7127 remove -Wno-missing-braces from Makefile.uts
1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
23 *
24 * NOT a DDI compliant Sun Fibre Channel port driver(fp)
25 *
26 */
27
28 #include <sys/types.h>
29 #include <sys/varargs.h>
30 #include <sys/param.h>
31 #include <sys/errno.h>
32 #include <sys/uio.h>
33 #include <sys/buf.h>
34 #include <sys/modctl.h>
35 #include <sys/open.h>
36 #include <sys/file.h>
37 #include <sys/kmem.h>
38 #include <sys/poll.h>
39 #include <sys/conf.h>
40 #include <sys/thread.h>
41 #include <sys/var.h>
42 #include <sys/cmn_err.h>
43 #include <sys/stat.h>
44 #include <sys/ddi.h>
45 #include <sys/sunddi.h>
46 #include <sys/promif.h>
47 #include <sys/nvpair.h>
48 #include <sys/byteorder.h>
49 #include <sys/scsi/scsi.h>
50 #include <sys/fibre-channel/fc.h>
51 #include <sys/fibre-channel/impl/fc_ulpif.h>
52 #include <sys/fibre-channel/impl/fc_fcaif.h>
53 #include <sys/fibre-channel/impl/fctl_private.h>
54 #include <sys/fibre-channel/impl/fc_portif.h>
55 #include <sys/fibre-channel/impl/fp.h>
56
57 /* These are defined in fctl.c! */
58 extern int did_table_size;
59 extern int pwwn_table_size;
60
61 static struct cb_ops fp_cb_ops = {
62 fp_open, /* open */
63 fp_close, /* close */
64 nodev, /* strategy */
65 nodev, /* print */
66 nodev, /* dump */
67 nodev, /* read */
68 nodev, /* write */
69 fp_ioctl, /* ioctl */
70 nodev, /* devmap */
71 nodev, /* mmap */
72 nodev, /* segmap */
73 nochpoll, /* chpoll */
74 ddi_prop_op, /* cb_prop_op */
75 0, /* streamtab */
76 D_NEW | D_MP | D_HOTPLUG, /* cb_flag */
77 CB_REV, /* rev */
78 nodev, /* aread */
79 nodev /* awrite */
80 };
81
82 static struct dev_ops fp_ops = {
83 DEVO_REV, /* build revision */
84 0, /* reference count */
85 fp_getinfo, /* getinfo */
86 nulldev, /* identify - Obsoleted */
87 nulldev, /* probe */
88 fp_attach, /* attach */
89 fp_detach, /* detach */
90 nodev, /* reset */
91 &fp_cb_ops, /* cb_ops */
92 NULL, /* bus_ops */
93 fp_power, /* power */
94 ddi_quiesce_not_needed /* quiesce */
95 };
96
97 #define FP_VERSION "20091123-1.101"
98 #define FP_NAME_VERSION "SunFC Port v" FP_VERSION
99
100 char *fp_version = FP_NAME_VERSION;
101
102 static struct modldrv modldrv = {
103 &mod_driverops, /* Type of Module */
104 FP_NAME_VERSION, /* Name/Version of fp */
105 &fp_ops /* driver ops */
106 };
107
108 static struct modlinkage modlinkage = {
109 MODREV_1, /* Rev of the loadable modules system */
110 { &modldrv, NULL } /* NULL terminated list of */
111 };
112
113
114
115 static uint16_t ns_reg_cmds[] = {
116 NS_RPN_ID,
117 NS_RNN_ID,
118 NS_RCS_ID,
119 NS_RFT_ID,
120 NS_RPT_ID,
121 NS_RSPN_ID,
122 NS_RSNN_NN
123 };
124
125 struct fp_xlat {
126 uchar_t xlat_state;
127 int xlat_rval;
128 } fp_xlat [] = {
129 { FC_PKT_SUCCESS, FC_SUCCESS },
130 { FC_PKT_REMOTE_STOP, FC_FAILURE },
131 { FC_PKT_LOCAL_RJT, FC_FAILURE },
132 { FC_PKT_NPORT_RJT, FC_ELS_PREJECT },
133 { FC_PKT_FABRIC_RJT, FC_ELS_FREJECT },
134 { FC_PKT_LOCAL_BSY, FC_TRAN_BUSY },
135 { FC_PKT_TRAN_BSY, FC_TRAN_BUSY },
136 { FC_PKT_NPORT_BSY, FC_PBUSY },
137 { FC_PKT_FABRIC_BSY, FC_FBUSY },
138 { FC_PKT_LS_RJT, FC_FAILURE },
139 { FC_PKT_BA_RJT, FC_FAILURE },
140 { FC_PKT_TIMEOUT, FC_FAILURE },
141 { FC_PKT_TRAN_ERROR, FC_TRANSPORT_ERROR },
142 { FC_PKT_FAILURE, FC_FAILURE },
143 { FC_PKT_PORT_OFFLINE, FC_OFFLINE }
144 };
145
146 static uchar_t fp_valid_alpas[] = {
147 0x01, 0x02, 0x04, 0x08, 0x0F, 0x10, 0x17, 0x18, 0x1B,
148 0x1D, 0x1E, 0x1F, 0x23, 0x25, 0x26, 0x27, 0x29, 0x2A,
149 0x2B, 0x2C, 0x2D, 0x2E, 0x31, 0x32, 0x33, 0x34, 0x35,
150 0x36, 0x39, 0x3A, 0x3C, 0x43, 0x45, 0x46, 0x47, 0x49,
151 0x4A, 0x4B, 0x4C, 0x4D, 0x4E, 0x51, 0x52, 0x53, 0x54,
152 0x55, 0x56, 0x59, 0x5A, 0x5C, 0x63, 0x65, 0x66, 0x67,
153 0x69, 0x6A, 0x6B, 0x6C, 0x6D, 0x6E, 0x71, 0x72, 0x73,
154 0x74, 0x75, 0x76, 0x79, 0x7A, 0x7C, 0x80, 0x81, 0x82,
155 0x84, 0x88, 0x8F, 0x90, 0x97, 0x98, 0x9B, 0x9D, 0x9E,
156 0x9F, 0xA3, 0xA5, 0xA6, 0xA7, 0xA9, 0xAA, 0xAB, 0xAC,
157 0xAD, 0xAE, 0xB1, 0xB2, 0xB3, 0xB4, 0xB5, 0xB6, 0xB9,
158 0xBA, 0xBC, 0xC3, 0xC5, 0xC6, 0xC7, 0xC9, 0xCA, 0xCB,
159 0xCC, 0xCD, 0xCE, 0xD1, 0xD2, 0xD3, 0xD4, 0xD5, 0xD6,
160 0xD9, 0xDA, 0xDC, 0xE0, 0xE1, 0xE2, 0xE4, 0xE8, 0xEF
161 };
162
163 static struct fp_perms {
164 uint16_t fp_ioctl_cmd;
165 uchar_t fp_open_flag;
166 } fp_perm_list [] = {
167 { FCIO_GET_NUM_DEVS, FP_OPEN },
168 { FCIO_GET_DEV_LIST, FP_OPEN },
169 { FCIO_GET_SYM_PNAME, FP_OPEN },
170 { FCIO_GET_SYM_NNAME, FP_OPEN },
171 { FCIO_SET_SYM_PNAME, FP_EXCL },
172 { FCIO_SET_SYM_NNAME, FP_EXCL },
173 { FCIO_GET_LOGI_PARAMS, FP_OPEN },
174 { FCIO_DEV_LOGIN, FP_EXCL },
175 { FCIO_DEV_LOGOUT, FP_EXCL },
176 { FCIO_GET_STATE, FP_OPEN },
177 { FCIO_DEV_REMOVE, FP_EXCL },
178 { FCIO_GET_FCODE_REV, FP_OPEN },
179 { FCIO_GET_FW_REV, FP_OPEN },
180 { FCIO_GET_DUMP_SIZE, FP_OPEN },
181 { FCIO_FORCE_DUMP, FP_EXCL },
182 { FCIO_GET_DUMP, FP_OPEN },
183 { FCIO_GET_TOPOLOGY, FP_OPEN },
184 { FCIO_RESET_LINK, FP_EXCL },
185 { FCIO_RESET_HARD, FP_EXCL },
186 { FCIO_RESET_HARD_CORE, FP_EXCL },
187 { FCIO_DIAG, FP_OPEN },
188 { FCIO_NS, FP_EXCL },
189 { FCIO_DOWNLOAD_FW, FP_EXCL },
190 { FCIO_DOWNLOAD_FCODE, FP_EXCL },
191 { FCIO_LINK_STATUS, FP_OPEN },
192 { FCIO_GET_HOST_PARAMS, FP_OPEN },
193 { FCIO_GET_NODE_ID, FP_OPEN },
194 { FCIO_SET_NODE_ID, FP_EXCL },
195 { FCIO_SEND_NODE_ID, FP_OPEN },
196 { FCIO_GET_ADAPTER_ATTRIBUTES, FP_OPEN },
197 { FCIO_GET_OTHER_ADAPTER_PORTS, FP_OPEN },
198 { FCIO_GET_ADAPTER_PORT_ATTRIBUTES, FP_OPEN },
199 { FCIO_GET_DISCOVERED_PORT_ATTRIBUTES, FP_OPEN },
200 { FCIO_GET_PORT_ATTRIBUTES, FP_OPEN },
201 { FCIO_GET_ADAPTER_PORT_STATS, FP_OPEN },
202 { FCIO_GET_ADAPTER_PORT_NPIV_ATTRIBUTES, FP_OPEN },
203 { FCIO_GET_NPIV_PORT_LIST, FP_OPEN },
204 { FCIO_DELETE_NPIV_PORT, FP_OPEN },
205 { FCIO_GET_NPIV_ATTRIBUTES, FP_OPEN },
206 { FCIO_CREATE_NPIV_PORT, FP_OPEN },
207 { FCIO_NPIV_GET_ADAPTER_ATTRIBUTES, FP_OPEN }
208 };
209
210 static char *fp_pm_comps[] = {
211 "NAME=FC Port",
212 "0=Port Down",
213 "1=Port Up"
214 };
215
216
217 #ifdef _LITTLE_ENDIAN
218 #define MAKE_BE_32(x) { \
219 uint32_t *ptr1, i; \
220 ptr1 = (uint32_t *)(x); \
221 for (i = 0; i < sizeof (*(x)) / sizeof (uint32_t); i++) { \
222 *ptr1 = BE_32(*ptr1); \
223 ptr1++; \
224 } \
225 }
226 #else
227 #define MAKE_BE_32(x)
228 #endif
229
230 static uchar_t fp_verbosity = (FP_WARNING_MESSAGES | FP_FATAL_MESSAGES);
231 static uint32_t fp_options = 0;
232
233 static int fp_cmd_wait_cnt = FP_CMDWAIT_DELAY;
234 static int fp_retry_delay = FP_RETRY_DELAY; /* retry after this delay */
235 static int fp_retry_count = FP_RETRY_COUNT; /* number of retries */
236 unsigned int fp_offline_ticker; /* seconds */
237
238 /*
239 * Driver global variable to anchor the list of soft state structs for
240 * all fp driver instances. Used with the Solaris DDI soft state functions.
241 */
242 static void *fp_driver_softstate;
243
244 static clock_t fp_retry_ticks;
245 static clock_t fp_offline_ticks;
246
247 static int fp_retry_ticker;
248 static uint32_t fp_unsol_buf_count = FP_UNSOL_BUF_COUNT;
249 static uint32_t fp_unsol_buf_size = FP_UNSOL_BUF_SIZE;
250
251 static int fp_log_size = FP_LOG_SIZE;
252 static int fp_trace = FP_TRACE_DEFAULT;
253 static fc_trace_logq_t *fp_logq = NULL;
254
255 int fp_get_adapter_paths(char *pathList, int count);
256 static void fp_log_port_event(fc_local_port_t *port, char *subclass);
257 static void fp_log_target_event(fc_local_port_t *port, char *subclass,
258 la_wwn_t tgt_pwwn, uint32_t port_id);
259 static uint32_t fp_map_remote_port_state(uint32_t rm_state);
260 static void fp_init_symbolic_names(fc_local_port_t *port);
261
262
263 /*
264 * Perform global initialization
265 */
266 int
267 _init(void)
268 {
269 int ret;
270
271 if ((ret = ddi_soft_state_init(&fp_driver_softstate,
272 sizeof (struct fc_local_port), 8)) != 0) {
273 return (ret);
274 }
275
276 if ((ret = scsi_hba_init(&modlinkage)) != 0) {
277 ddi_soft_state_fini(&fp_driver_softstate);
278 return (ret);
279 }
280
281 fp_logq = fc_trace_alloc_logq(fp_log_size);
282
283 if ((ret = mod_install(&modlinkage)) != 0) {
284 fc_trace_free_logq(fp_logq);
285 ddi_soft_state_fini(&fp_driver_softstate);
286 scsi_hba_fini(&modlinkage);
287 }
288
289 return (ret);
290 }
291
292
293 /*
294 * Prepare for driver unload
295 */
296 int
297 _fini(void)
298 {
299 int ret;
300
301 if ((ret = mod_remove(&modlinkage)) == 0) {
302 fc_trace_free_logq(fp_logq);
303 ddi_soft_state_fini(&fp_driver_softstate);
304 scsi_hba_fini(&modlinkage);
305 }
306
307 return (ret);
308 }
309
310
311 /*
312 * Request mod_info() to handle all cases
313 */
314 int
315 _info(struct modinfo *modinfo)
316 {
317 return (mod_info(&modlinkage, modinfo));
318 }
319
320
321 /*
322 * fp_attach:
323 *
324 * The respective cmd handlers take care of performing
325 * ULP related invocations
326 */
327 static int
328 fp_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
329 {
330 int rval;
331
332 /*
333 * We check the value of fp_offline_ticker at this
334 * point. The variable is global for the driver and
335 * not specific to an instance.
336 *
337 * If there is no user-defined value found in /etc/system
338 * or fp.conf, then we use 90 seconds (FP_OFFLINE_TICKER).
339 * The minimum setting for this offline timeout according
340 * to the FC-FS2 standard (Fibre Channel Framing and
341 * Signalling-2, see www.t11.org) is R_T_TOV == 100msec.
342 *
343 * We do not recommend setting the value to less than 10
344 * seconds (RA_TOV) or more than 90 seconds. If this
345 * variable is greater than 90 seconds then drivers above
346 * fp (fcp, sd, scsi_vhci, vxdmp et al) might complain.
347 */
348
349 fp_offline_ticker = ddi_prop_get_int(DDI_DEV_T_ANY,
350 dip, DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "fp_offline_ticker",
351 FP_OFFLINE_TICKER);
352
353 if ((fp_offline_ticker < 10) ||
354 (fp_offline_ticker > 90)) {
355 cmn_err(CE_WARN, "Setting fp_offline_ticker to "
356 "%d second(s). This is outside the "
357 "recommended range of 10..90 seconds",
358 fp_offline_ticker);
359 }
360
361 /*
362 * Tick every second when there are commands to retry.
363 * It should tick at the least granular value of pkt_timeout
364 * (which is one second)
365 */
366 fp_retry_ticker = 1;
367
368 fp_retry_ticks = drv_usectohz(fp_retry_ticker * 1000 * 1000);
369 fp_offline_ticks = drv_usectohz(fp_offline_ticker * 1000 * 1000);
370
371 switch (cmd) {
372 case DDI_ATTACH:
373 rval = fp_attach_handler(dip);
374 break;
375
376 case DDI_RESUME:
377 rval = fp_resume_handler(dip);
378 break;
379
380 default:
381 rval = DDI_FAILURE;
382 break;
383 }
384 return (rval);
385 }
386
387
388 /*
389 * fp_detach:
390 *
391 * If a ULP fails to handle cmd request converse of
392 * cmd is invoked for ULPs that previously succeeded
393 * cmd request.
394 */
395 static int
396 fp_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
397 {
398 int rval = DDI_FAILURE;
399 fc_local_port_t *port;
400 fc_attach_cmd_t converse;
401 uint8_t cnt;
402
403 if ((port = ddi_get_soft_state(fp_driver_softstate,
404 ddi_get_instance(dip))) == NULL) {
405 return (DDI_FAILURE);
406 }
407
408 mutex_enter(&port->fp_mutex);
409
410 if (port->fp_ulp_attach) {
411 mutex_exit(&port->fp_mutex);
412 return (DDI_FAILURE);
413 }
414
415 switch (cmd) {
416 case DDI_DETACH:
417 if (port->fp_task != FP_TASK_IDLE) {
418 mutex_exit(&port->fp_mutex);
419 return (DDI_FAILURE);
420 }
421
422 /* Let's attempt to quit the job handler gracefully */
423 port->fp_soft_state |= FP_DETACH_INPROGRESS;
424
425 mutex_exit(&port->fp_mutex);
426 converse = FC_CMD_ATTACH;
427 if (fctl_detach_ulps(port, FC_CMD_DETACH,
428 &modlinkage) != FC_SUCCESS) {
429 mutex_enter(&port->fp_mutex);
430 port->fp_soft_state &= ~FP_DETACH_INPROGRESS;
431 mutex_exit(&port->fp_mutex);
432 rval = DDI_FAILURE;
433 break;
434 }
435
436 mutex_enter(&port->fp_mutex);
437 for (cnt = 0; (port->fp_job_head) && (cnt < fp_cmd_wait_cnt);
438 cnt++) {
439 mutex_exit(&port->fp_mutex);
440 delay(drv_usectohz(1000000));
441 mutex_enter(&port->fp_mutex);
442 }
443
444 if (port->fp_job_head) {
445 mutex_exit(&port->fp_mutex);
446 rval = DDI_FAILURE;
447 break;
448 }
449 mutex_exit(&port->fp_mutex);
450
451 rval = fp_detach_handler(port);
452 break;
453
454 case DDI_SUSPEND:
455 mutex_exit(&port->fp_mutex);
456 converse = FC_CMD_RESUME;
457 if (fctl_detach_ulps(port, FC_CMD_SUSPEND,
458 &modlinkage) != FC_SUCCESS) {
459 rval = DDI_FAILURE;
460 break;
461 }
462 if ((rval = fp_suspend_handler(port)) != DDI_SUCCESS) {
463 (void) callb_generic_cpr(&port->fp_cpr_info,
464 CB_CODE_CPR_RESUME);
465 }
466 break;
467
468 default:
469 mutex_exit(&port->fp_mutex);
470 break;
471 }
472
473 /*
474 * Use softint to perform reattach. Mark fp_ulp_attach so we
475 * don't attempt to do this repeatedly on behalf of some persistent
476 * caller.
477 */
478 if (rval != DDI_SUCCESS) {
479 mutex_enter(&port->fp_mutex);
480 port->fp_ulp_attach = 1;
481
482 /*
483 * If the port is in the low power mode then there is
484 * possibility that fca too could be in low power mode.
485 * Try to raise the power before calling attach ulps.
486 */
487
488 if ((port->fp_soft_state & FP_SOFT_POWER_DOWN) &&
489 (!(port->fp_soft_state & FP_SOFT_NO_PMCOMP))) {
490 mutex_exit(&port->fp_mutex);
491 (void) pm_raise_power(port->fp_port_dip,
492 FP_PM_COMPONENT, FP_PM_PORT_UP);
493 } else {
494 mutex_exit(&port->fp_mutex);
495 }
496
497
498 fp_attach_ulps(port, converse);
499
500 mutex_enter(&port->fp_mutex);
501 while (port->fp_ulp_attach) {
502 cv_wait(&port->fp_attach_cv, &port->fp_mutex);
503 }
504
505 port->fp_soft_state &= ~FP_DETACH_INPROGRESS;
506
507 /*
508 * Mark state as detach failed so asynchronous ULP attach
509 * events (downstream, not the ones we're initiating with
510 * the call to fp_attach_ulps) are not honored. We're
511 * really still in pending detach.
512 */
513 port->fp_soft_state |= FP_DETACH_FAILED;
514
515 mutex_exit(&port->fp_mutex);
516 }
517
518 return (rval);
519 }
520
521
522 /*
523 * fp_getinfo:
524 * Given the device number, return either the
525 * dev_info_t pointer or the instance number.
526 */
527
528 /* ARGSUSED */
529 static int
530 fp_getinfo(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg, void **result)
531 {
532 int rval;
533 minor_t instance;
534 fc_local_port_t *port;
535
536 rval = DDI_SUCCESS;
537 instance = getminor((dev_t)arg);
538
539 switch (cmd) {
540 case DDI_INFO_DEVT2DEVINFO:
541 if ((port = ddi_get_soft_state(fp_driver_softstate,
542 instance)) == NULL) {
543 rval = DDI_FAILURE;
544 break;
545 }
546 *result = (void *)port->fp_port_dip;
547 break;
548
549 case DDI_INFO_DEVT2INSTANCE:
550 *result = (void *)(uintptr_t)instance;
551 break;
552
553 default:
554 rval = DDI_FAILURE;
555 break;
556 }
557
558 return (rval);
559 }
560
561
562 /*
563 * Entry point for power up and power down request from kernel
564 */
565 static int
566 fp_power(dev_info_t *dip, int comp, int level)
567 {
568 int rval = DDI_FAILURE;
569 fc_local_port_t *port;
570
571 port = ddi_get_soft_state(fp_driver_softstate, ddi_get_instance(dip));
572 if (port == NULL || comp != FP_PM_COMPONENT) {
573 return (rval);
574 }
575
576 switch (level) {
577 case FP_PM_PORT_UP:
578 rval = DDI_SUCCESS;
579
580 /*
581 * If the port is DDI_SUSPENDed, let the DDI_RESUME
582 * code complete the rediscovery.
583 */
584 mutex_enter(&port->fp_mutex);
585 if (port->fp_soft_state & FP_SOFT_SUSPEND) {
586 port->fp_soft_state &= ~FP_SOFT_POWER_DOWN;
587 port->fp_pm_level = FP_PM_PORT_UP;
588 mutex_exit(&port->fp_mutex);
589 fctl_attach_ulps(port, FC_CMD_POWER_UP, &modlinkage);
590 break;
591 }
592
593 if (port->fp_soft_state & FP_SOFT_POWER_DOWN) {
594 ASSERT(port->fp_pm_level == FP_PM_PORT_DOWN);
595
596 port->fp_pm_level = FP_PM_PORT_UP;
597 rval = fp_power_up(port);
598 if (rval != DDI_SUCCESS) {
599 port->fp_pm_level = FP_PM_PORT_DOWN;
600 }
601 } else {
602 port->fp_pm_level = FP_PM_PORT_UP;
603 }
604 mutex_exit(&port->fp_mutex);
605 break;
606
607 case FP_PM_PORT_DOWN:
608 mutex_enter(&port->fp_mutex);
609
610 ASSERT(!(port->fp_soft_state & FP_SOFT_NO_PMCOMP));
611 if (port->fp_soft_state & FP_SOFT_NO_PMCOMP) {
612 /*
613 * PM framework goofed up. We have don't
614 * have any PM components. Let's never go down.
615 */
616 mutex_exit(&port->fp_mutex);
617 break;
618
619 }
620
621 if (port->fp_ulp_attach) {
622 /* We shouldn't let the power go down */
623 mutex_exit(&port->fp_mutex);
624 break;
625 }
626
627 /*
628 * Not a whole lot to do if we are detaching
629 */
630 if (port->fp_soft_state & FP_SOFT_IN_DETACH) {
631 port->fp_pm_level = FP_PM_PORT_DOWN;
632 mutex_exit(&port->fp_mutex);
633 rval = DDI_SUCCESS;
634 break;
635 }
636
637 if (!port->fp_pm_busy && !port->fp_pm_busy_nocomp) {
638 port->fp_pm_level = FP_PM_PORT_DOWN;
639
640 rval = fp_power_down(port);
641 if (rval != DDI_SUCCESS) {
642 port->fp_pm_level = FP_PM_PORT_UP;
643 ASSERT(!(port->fp_soft_state &
644 FP_SOFT_POWER_DOWN));
645 } else {
646 ASSERT(port->fp_soft_state &
647 FP_SOFT_POWER_DOWN);
648 }
649 }
650 mutex_exit(&port->fp_mutex);
651 break;
652
653 default:
654 break;
655 }
656
657 return (rval);
658 }
659
660
661 /*
662 * Open FC port devctl node
663 */
664 static int
665 fp_open(dev_t *devp, int flag, int otype, cred_t *credp)
666 {
667 int instance;
668 fc_local_port_t *port;
669
670 if (otype != OTYP_CHR) {
671 return (EINVAL);
672 }
673
674 /*
675 * This is not a toy to play with. Allow only powerful
676 * users (hopefully knowledgeable) to access the port
677 * (A hacker potentially could download a sick binary
678 * file into FCA)
679 */
680 if (drv_priv(credp)) {
681 return (EPERM);
682 }
683
684 instance = (int)getminor(*devp);
685
686 port = ddi_get_soft_state(fp_driver_softstate, instance);
687 if (port == NULL) {
688 return (ENXIO);
689 }
690
691 mutex_enter(&port->fp_mutex);
692 if (port->fp_flag & FP_EXCL) {
693 /*
694 * It is already open for exclusive access.
695 * So shut the door on this caller.
696 */
697 mutex_exit(&port->fp_mutex);
698 return (EBUSY);
699 }
700
701 if (flag & FEXCL) {
702 if (port->fp_flag & FP_OPEN) {
703 /*
704 * Exclusive operation not possible
705 * as it is already opened
706 */
707 mutex_exit(&port->fp_mutex);
708 return (EBUSY);
709 }
710 port->fp_flag |= FP_EXCL;
711 }
712 port->fp_flag |= FP_OPEN;
713 mutex_exit(&port->fp_mutex);
714
715 return (0);
716 }
717
718
719 /*
720 * The driver close entry point is called on the last close()
721 * of a device. So it is perfectly alright to just clobber the
722 * open flag and reset it to idle (instead of having to reset
723 * each flag bits). For any confusion, check out close(9E).
724 */
725
726 /* ARGSUSED */
727 static int
728 fp_close(dev_t dev, int flag, int otype, cred_t *credp)
729 {
730 int instance;
731 fc_local_port_t *port;
732
733 if (otype != OTYP_CHR) {
734 return (EINVAL);
735 }
736
737 instance = (int)getminor(dev);
738
739 port = ddi_get_soft_state(fp_driver_softstate, instance);
740 if (port == NULL) {
741 return (ENXIO);
742 }
743
744 mutex_enter(&port->fp_mutex);
745 if ((port->fp_flag & FP_OPEN) == 0) {
746 mutex_exit(&port->fp_mutex);
747 return (ENODEV);
748 }
749 port->fp_flag = FP_IDLE;
750 mutex_exit(&port->fp_mutex);
751
752 return (0);
753 }
754
755 /*
756 * Handle IOCTL requests
757 */
758
759 /* ARGSUSED */
760 static int
761 fp_ioctl(dev_t dev, int cmd, intptr_t data, int mode, cred_t *credp, int *rval)
762 {
763 int instance;
764 int ret = 0;
765 fcio_t fcio;
766 fc_local_port_t *port;
767
768 instance = (int)getminor(dev);
769
770 port = ddi_get_soft_state(fp_driver_softstate, instance);
771 if (port == NULL) {
772 return (ENXIO);
773 }
774
775 mutex_enter(&port->fp_mutex);
776 if ((port->fp_flag & FP_OPEN) == 0) {
777 mutex_exit(&port->fp_mutex);
778 return (ENXIO);
779 }
780
781 if (port->fp_soft_state & FP_SOFT_SUSPEND) {
782 mutex_exit(&port->fp_mutex);
783 return (ENXIO);
784 }
785
786 mutex_exit(&port->fp_mutex);
787
788 /* this will raise power if necessary */
789 ret = fctl_busy_port(port);
790 if (ret != 0) {
791 return (ret);
792 }
793
794 ASSERT(port->fp_pm_level == FP_PM_PORT_UP);
795
796
797 switch (cmd) {
798 case FCIO_CMD: {
799 #ifdef _MULTI_DATAMODEL
800 switch (ddi_model_convert_from(mode & FMODELS)) {
801 case DDI_MODEL_ILP32: {
802 struct fcio32 fcio32;
803
804 if (ddi_copyin((void *)data, (void *)&fcio32,
805 sizeof (struct fcio32), mode)) {
806 ret = EFAULT;
807 break;
808 }
809 fcio.fcio_xfer = fcio32.fcio_xfer;
810 fcio.fcio_cmd = fcio32.fcio_cmd;
811 fcio.fcio_flags = fcio32.fcio_flags;
812 fcio.fcio_cmd_flags = fcio32.fcio_cmd_flags;
813 fcio.fcio_ilen = (size_t)fcio32.fcio_ilen;
814 fcio.fcio_ibuf =
815 (caddr_t)(uintptr_t)fcio32.fcio_ibuf;
816 fcio.fcio_olen = (size_t)fcio32.fcio_olen;
817 fcio.fcio_obuf =
818 (caddr_t)(uintptr_t)fcio32.fcio_obuf;
819 fcio.fcio_alen = (size_t)fcio32.fcio_alen;
820 fcio.fcio_abuf =
821 (caddr_t)(uintptr_t)fcio32.fcio_abuf;
822 fcio.fcio_errno = fcio32.fcio_errno;
823 break;
824 }
825
826 case DDI_MODEL_NONE:
827 if (ddi_copyin((void *)data, (void *)&fcio,
828 sizeof (fcio_t), mode)) {
829 ret = EFAULT;
830 }
831 break;
832 }
833 #else /* _MULTI_DATAMODEL */
834 if (ddi_copyin((void *)data, (void *)&fcio,
835 sizeof (fcio_t), mode)) {
836 ret = EFAULT;
837 break;
838 }
839 #endif /* _MULTI_DATAMODEL */
840 if (!ret) {
841 ret = fp_fciocmd(port, data, mode, &fcio);
842 }
843 break;
844 }
845
846 default:
847 ret = fctl_ulp_port_ioctl(port, dev, cmd, data,
848 mode, credp, rval);
849 }
850
851 fctl_idle_port(port);
852
853 return (ret);
854 }
855
856
857 /*
858 * Init Symbolic Port Name and Node Name
859 * LV will try to get symbolic names from FCA driver
860 * and register these to name server,
861 * if LV fails to get these,
862 * LV will register its default symbolic names to name server.
863 * The Default symbolic node name format is :
864 * <hostname>:<hba driver name>(instance)
865 * The Default symbolic port name format is :
866 * <fp path name>
867 */
868 static void
869 fp_init_symbolic_names(fc_local_port_t *port)
870 {
871 const char *vendorname = ddi_driver_name(port->fp_fca_dip);
872 char *sym_name;
873 char fcaname[50] = {0};
874 int hostnlen, fcanlen;
875
876 if (port->fp_sym_node_namelen == 0) {
877 hostnlen = strlen(utsname.nodename);
878 (void) snprintf(fcaname, sizeof (fcaname),
879 "%s%d", vendorname, ddi_get_instance(port->fp_fca_dip));
880 fcanlen = strlen(fcaname);
881
882 sym_name = kmem_zalloc(hostnlen + fcanlen + 2, KM_SLEEP);
883 (void) sprintf(sym_name, "%s:%s", utsname.nodename, fcaname);
884 port->fp_sym_node_namelen = strlen(sym_name);
885 if (port->fp_sym_node_namelen >= FCHBA_SYMB_NAME_LEN) {
886 port->fp_sym_node_namelen = FCHBA_SYMB_NAME_LEN;
887 }
888 (void) strncpy(port->fp_sym_node_name, sym_name,
889 port->fp_sym_node_namelen);
890 kmem_free(sym_name, hostnlen + fcanlen + 2);
891 }
892
893 if (port->fp_sym_port_namelen == 0) {
894 char *pathname = kmem_zalloc(MAXPATHLEN, KM_SLEEP);
895
896 (void) ddi_pathname(port->fp_port_dip, pathname);
897 port->fp_sym_port_namelen = strlen(pathname);
898 if (port->fp_sym_port_namelen >= FCHBA_SYMB_NAME_LEN) {
899 port->fp_sym_port_namelen = FCHBA_SYMB_NAME_LEN;
900 }
901 (void) strncpy(port->fp_sym_port_name, pathname,
902 port->fp_sym_port_namelen);
903 kmem_free(pathname, MAXPATHLEN);
904 }
905 }
906
907
908 /*
909 * Perform port attach
910 */
911 static int
912 fp_attach_handler(dev_info_t *dip)
913 {
914 int rval;
915 int instance;
916 int port_num;
917 int port_len;
918 char name[30];
919 char i_pwwn[17];
920 fp_cmd_t *pkt;
921 uint32_t ub_count;
922 fc_local_port_t *port;
923 job_request_t *job;
924 fc_local_port_t *phyport = NULL;
925 int portpro1;
926 char pwwn[17], nwwn[17];
927
928 instance = ddi_get_instance(dip);
929 port_len = sizeof (port_num);
930 rval = ddi_prop_op(DDI_DEV_T_ANY, dip, PROP_LEN_AND_VAL_BUF,
931 DDI_PROP_DONTPASS | DDI_PROP_CANSLEEP, "port",
932 (caddr_t)&port_num, &port_len);
933 if (rval != DDI_SUCCESS) {
934 cmn_err(CE_WARN, "fp(%d): No port property in devinfo",
935 instance);
936 return (DDI_FAILURE);
937 }
938
939 if (ddi_create_minor_node(dip, "devctl", S_IFCHR, instance,
940 DDI_NT_NEXUS, 0) != DDI_SUCCESS) {
941 cmn_err(CE_WARN, "fp(%d): failed to create devctl minor node",
942 instance);
943 return (DDI_FAILURE);
944 }
945
946 if (ddi_create_minor_node(dip, "fc", S_IFCHR, instance,
947 DDI_NT_FC_ATTACHMENT_POINT, 0) != DDI_SUCCESS) {
948 cmn_err(CE_WARN, "fp(%d): failed to create fc attachment"
949 " point minor node", instance);
950 ddi_remove_minor_node(dip, NULL);
951 return (DDI_FAILURE);
952 }
953
954 if (ddi_soft_state_zalloc(fp_driver_softstate, instance)
955 != DDI_SUCCESS) {
956 cmn_err(CE_WARN, "fp(%d): failed to alloc soft state",
957 instance);
958 ddi_remove_minor_node(dip, NULL);
959 return (DDI_FAILURE);
960 }
961 port = ddi_get_soft_state(fp_driver_softstate, instance);
962
963 (void) sprintf(port->fp_ibuf, "fp(%d)", instance);
964
965 port->fp_instance = instance;
966 port->fp_ulp_attach = 1;
967 port->fp_port_num = port_num;
968 port->fp_verbose = fp_verbosity;
969 port->fp_options = fp_options;
970
971 port->fp_fca_dip = ddi_get_parent(dip);
972 port->fp_port_dip = dip;
973 port->fp_fca_tran = (fc_fca_tran_t *)
974 ddi_get_driver_private(port->fp_fca_dip);
975
976 port->fp_task = port->fp_last_task = FP_TASK_IDLE;
977
978 /*
979 * Init the starting value of fp_rscn_count. Note that if
980 * FC_INVALID_RSCN_COUNT is 0 (which is what it currently is), the
981 * actual # of RSCNs will be (fp_rscn_count - 1)
982 */
983 port->fp_rscn_count = FC_INVALID_RSCN_COUNT + 1;
984
985 mutex_init(&port->fp_mutex, NULL, MUTEX_DRIVER, NULL);
986 cv_init(&port->fp_cv, NULL, CV_DRIVER, NULL);
987 cv_init(&port->fp_attach_cv, NULL, CV_DRIVER, NULL);
988
989 (void) sprintf(name, "fp%d_cache", instance);
990
991 if ((portpro1 = ddi_prop_get_int(DDI_DEV_T_ANY,
992 dip, DDI_PROP_DONTPASS | DDI_PROP_NOTPROM,
993 "phyport-instance", -1)) != -1) {
994 phyport = ddi_get_soft_state(fp_driver_softstate, portpro1);
995 fc_wwn_to_str(&phyport->fp_service_params.nport_ww_name, pwwn);
996 fc_wwn_to_str(&phyport->fp_service_params.node_ww_name, nwwn);
997 port->fp_npiv_type = FC_NPIV_PORT;
998 }
999
1000 /*
1001 * Allocate the pool of fc_packet_t structs to be used with
1002 * this fp instance.
1003 */
1004 port->fp_pkt_cache = kmem_cache_create(name,
1005 (port->fp_fca_tran->fca_pkt_size) + sizeof (fp_cmd_t), 8,
1006 fp_cache_constructor, fp_cache_destructor, NULL, (void *)port,
1007 NULL, 0);
1008 port->fp_out_fpcmds = 0;
1009 if (port->fp_pkt_cache == NULL) {
1010 goto cache_alloc_failed;
1011 }
1012
1013
1014 /*
1015 * Allocate the d_id and pwwn hash tables for all remote ports
1016 * connected to this local port.
1017 */
1018 port->fp_did_table = kmem_zalloc(did_table_size *
1019 sizeof (struct d_id_hash), KM_SLEEP);
1020
1021 port->fp_pwwn_table = kmem_zalloc(pwwn_table_size *
1022 sizeof (struct pwwn_hash), KM_SLEEP);
1023
1024 port->fp_taskq = taskq_create("fp_ulp_callback", 1,
1025 MINCLSYSPRI, 1, 16, 0);
1026
1027 /* Indicate that don't have the pm components yet */
1028 port->fp_soft_state |= FP_SOFT_NO_PMCOMP;
1029
1030 /*
1031 * Bind the callbacks with the FCA driver. This will open the gate
1032 * for asynchronous callbacks, so after this call the fp_mutex
1033 * must be held when updating the fc_local_port_t struct.
1034 *
1035 * This is done _before_ setting up the job thread so we can avoid
1036 * cleaning up after the thread_create() in the error path. This
1037 * also means fp will be operating with fp_els_resp_pkt set to NULL.
1038 */
1039 if (fp_bind_callbacks(port) != DDI_SUCCESS) {
1040 goto bind_callbacks_failed;
1041 }
1042
1043 if (phyport) {
1044 mutex_enter(&phyport->fp_mutex);
1045 if (phyport->fp_port_next) {
1046 phyport->fp_port_next->fp_port_prev = port;
1047 port->fp_port_next = phyport->fp_port_next;
1048 phyport->fp_port_next = port;
1049 port->fp_port_prev = phyport;
1050 } else {
1051 phyport->fp_port_next = port;
1052 phyport->fp_port_prev = port;
1053 port->fp_port_next = phyport;
1054 port->fp_port_prev = phyport;
1055 }
1056 mutex_exit(&phyport->fp_mutex);
1057 }
1058
1059 /*
1060 * Init Symbolic Names
1061 */
1062 fp_init_symbolic_names(port);
1063
1064 pkt = fp_alloc_pkt(port, sizeof (la_els_logi_t), sizeof (la_els_logi_t),
1065 KM_SLEEP, NULL);
1066
1067 if (pkt == NULL) {
1068 cmn_err(CE_WARN, "fp(%d): failed to allocate ELS packet",
1069 instance);
1070 goto alloc_els_packet_failed;
1071 }
1072
1073 (void) thread_create(NULL, 0, fp_job_handler, port, 0, &p0, TS_RUN,
1074 v.v_maxsyspri - 2);
1075
1076 fc_wwn_to_str(&port->fp_service_params.nport_ww_name, i_pwwn);
1077 if (ddi_prop_update_string(DDI_DEV_T_NONE, dip, "initiator-port",
1078 i_pwwn) != DDI_PROP_SUCCESS) {
1079 fp_printf(port, CE_NOTE, FP_LOG_ONLY, 0, NULL,
1080 "fp(%d): Updating 'initiator-port' property"
1081 " on fp dev_info node failed", instance);
1082 }
1083
1084 fc_wwn_to_str(&port->fp_service_params.node_ww_name, i_pwwn);
1085 if (ddi_prop_update_string(DDI_DEV_T_NONE, dip, "initiator-node",
1086 i_pwwn) != DDI_PROP_SUCCESS) {
1087 fp_printf(port, CE_NOTE, FP_LOG_ONLY, 0, NULL,
1088 "fp(%d): Updating 'initiator-node' property"
1089 " on fp dev_info node failed", instance);
1090 }
1091
1092 mutex_enter(&port->fp_mutex);
1093 port->fp_els_resp_pkt = pkt;
1094 mutex_exit(&port->fp_mutex);
1095
1096 /*
1097 * Determine the count of unsolicited buffers this FCA can support
1098 */
1099 fp_retrieve_caps(port);
1100
1101 /*
1102 * Allocate unsolicited buffer tokens
1103 */
1104 if (port->fp_ub_count) {
1105 ub_count = port->fp_ub_count;
1106 port->fp_ub_tokens = kmem_zalloc(ub_count *
1107 sizeof (*port->fp_ub_tokens), KM_SLEEP);
1108 /*
1109 * Do not fail the attach if unsolicited buffer allocation
1110 * fails; Just try to get along with whatever the FCA can do.
1111 */
1112 if (fc_ulp_uballoc(port, &ub_count, fp_unsol_buf_size,
1113 FC_TYPE_EXTENDED_LS, port->fp_ub_tokens) !=
1114 FC_SUCCESS || ub_count != port->fp_ub_count) {
1115 cmn_err(CE_WARN, "fp(%d): failed to allocate "
1116 " Unsolicited buffers. proceeding with attach...",
1117 instance);
1118 kmem_free(port->fp_ub_tokens,
1119 sizeof (*port->fp_ub_tokens) * port->fp_ub_count);
1120 port->fp_ub_tokens = NULL;
1121 }
1122 }
1123
1124 fp_load_ulp_modules(dip, port);
1125
1126 /*
1127 * Enable DDI_SUSPEND and DDI_RESUME for this instance.
1128 */
1129 (void) ddi_prop_create(DDI_DEV_T_NONE, dip, DDI_PROP_CANSLEEP,
1130 "pm-hardware-state", "needs-suspend-resume",
1131 strlen("needs-suspend-resume") + 1);
1132
1133 /*
1134 * fctl maintains a list of all port handles, so
1135 * help fctl add this one to its list now.
1136 */
1137 mutex_enter(&port->fp_mutex);
1138 fctl_add_port(port);
1139
1140 /*
1141 * If a state change is already in progress, set the bind state t
1142 * OFFLINE as well, so further state change callbacks into ULPs
1143 * will pass the appropriate states
1144 */
1145 if (FC_PORT_STATE_MASK(port->fp_bind_state) == FC_STATE_OFFLINE ||
1146 port->fp_statec_busy) {
1147 port->fp_bind_state = FC_STATE_OFFLINE;
1148 mutex_exit(&port->fp_mutex);
1149
1150 fp_startup_done((opaque_t)port, FC_PKT_SUCCESS);
1151 } else {
1152 /*
1153 * Without dropping the mutex, ensure that the port
1154 * startup happens ahead of state change callback
1155 * processing
1156 */
1157 ASSERT(port->fp_job_tail == NULL && port->fp_job_head == NULL);
1158
1159 port->fp_last_task = port->fp_task;
1160 port->fp_task = FP_TASK_PORT_STARTUP;
1161
1162 job = fctl_alloc_job(JOB_PORT_STARTUP, JOB_TYPE_FCTL_ASYNC,
1163 fp_startup_done, (opaque_t)port, KM_SLEEP);
1164
1165 port->fp_job_head = port->fp_job_tail = job;
1166
1167 cv_signal(&port->fp_cv);
1168
1169 mutex_exit(&port->fp_mutex);
1170 }
1171
1172 mutex_enter(&port->fp_mutex);
1173 while (port->fp_ulp_attach) {
1174 cv_wait(&port->fp_attach_cv, &port->fp_mutex);
1175 }
1176 mutex_exit(&port->fp_mutex);
1177
1178 if (ddi_prop_update_string_array(DDI_DEV_T_NONE, dip,
1179 "pm-components", fp_pm_comps,
1180 sizeof (fp_pm_comps) / sizeof (fp_pm_comps[0])) !=
1181 DDI_PROP_SUCCESS) {
1182 FP_TRACE(FP_NHEAD2(9, 0), "Failed to create PM"
1183 " components property, PM disabled on this port.");
1184 mutex_enter(&port->fp_mutex);
1185 port->fp_pm_level = FP_PM_PORT_UP;
1186 mutex_exit(&port->fp_mutex);
1187 } else {
1188 if (pm_raise_power(dip, FP_PM_COMPONENT,
1189 FP_PM_PORT_UP) != DDI_SUCCESS) {
1190 FP_TRACE(FP_NHEAD2(9, 0), "Failed to raise"
1191 " power level");
1192 mutex_enter(&port->fp_mutex);
1193 port->fp_pm_level = FP_PM_PORT_UP;
1194 mutex_exit(&port->fp_mutex);
1195 }
1196
1197 /*
1198 * Don't unset the FP_SOFT_NO_PMCOMP flag until after
1199 * the call to pm_raise_power. The PM framework can't
1200 * handle multiple threads calling into it during attach.
1201 */
1202
1203 mutex_enter(&port->fp_mutex);
1204 port->fp_soft_state &= ~FP_SOFT_NO_PMCOMP;
1205 mutex_exit(&port->fp_mutex);
1206 }
1207
1208 ddi_report_dev(dip);
1209
1210 fp_log_port_event(port, ESC_SUNFC_PORT_ATTACH);
1211
1212 return (DDI_SUCCESS);
1213
1214 /*
1215 * Unwind any/all preceeding allocations in the event of an error.
1216 */
1217
1218 alloc_els_packet_failed:
1219
1220 if (port->fp_fca_handle != NULL) {
1221 port->fp_fca_tran->fca_unbind_port(port->fp_fca_handle);
1222 port->fp_fca_handle = NULL;
1223 }
1224
1225 if (port->fp_ub_tokens != NULL) {
1226 (void) fc_ulp_ubfree(port, port->fp_ub_count,
1227 port->fp_ub_tokens);
1228 kmem_free(port->fp_ub_tokens,
1229 port->fp_ub_count * sizeof (*port->fp_ub_tokens));
1230 port->fp_ub_tokens = NULL;
1231 }
1232
1233 if (port->fp_els_resp_pkt != NULL) {
1234 fp_free_pkt(port->fp_els_resp_pkt);
1235 port->fp_els_resp_pkt = NULL;
1236 }
1237
1238 bind_callbacks_failed:
1239
1240 if (port->fp_taskq != NULL) {
1241 taskq_destroy(port->fp_taskq);
1242 }
1243
1244 if (port->fp_pwwn_table != NULL) {
1245 kmem_free(port->fp_pwwn_table,
1246 pwwn_table_size * sizeof (struct pwwn_hash));
1247 port->fp_pwwn_table = NULL;
1248 }
1249
1250 if (port->fp_did_table != NULL) {
1251 kmem_free(port->fp_did_table,
1252 did_table_size * sizeof (struct d_id_hash));
1253 port->fp_did_table = NULL;
1254 }
1255
1256 if (port->fp_pkt_cache != NULL) {
1257 kmem_cache_destroy(port->fp_pkt_cache);
1258 port->fp_pkt_cache = NULL;
1259 }
1260
1261 cache_alloc_failed:
1262
1263 cv_destroy(&port->fp_attach_cv);
1264 cv_destroy(&port->fp_cv);
1265 mutex_destroy(&port->fp_mutex);
1266 ddi_remove_minor_node(port->fp_port_dip, NULL);
1267 ddi_soft_state_free(fp_driver_softstate, instance);
1268 ddi_prop_remove_all(dip);
1269
1270 return (DDI_FAILURE);
1271 }
1272
1273
1274 /*
1275 * Handle DDI_RESUME request
1276 */
1277 static int
1278 fp_resume_handler(dev_info_t *dip)
1279 {
1280 int rval;
1281 fc_local_port_t *port;
1282
1283 port = ddi_get_soft_state(fp_driver_softstate, ddi_get_instance(dip));
1284
1285 ASSERT(port != NULL);
1286
1287 #ifdef DEBUG
1288 mutex_enter(&port->fp_mutex);
1289 ASSERT(port->fp_soft_state & FP_SOFT_SUSPEND);
1290 mutex_exit(&port->fp_mutex);
1291 #endif
1292
1293 /*
1294 * If the port was power suspended, raise the power level
1295 */
1296 mutex_enter(&port->fp_mutex);
1297 if ((port->fp_soft_state & FP_SOFT_POWER_DOWN) &&
1298 (!(port->fp_soft_state & FP_SOFT_NO_PMCOMP))) {
1299 ASSERT(port->fp_pm_level == FP_PM_PORT_DOWN);
1300
1301 mutex_exit(&port->fp_mutex);
1302 if (pm_raise_power(dip, FP_PM_COMPONENT,
1303 FP_PM_PORT_UP) != DDI_SUCCESS) {
1304 FP_TRACE(FP_NHEAD2(9, 0),
1305 "Failed to raise the power level");
1306 return (DDI_FAILURE);
1307 }
1308 mutex_enter(&port->fp_mutex);
1309 }
1310 port->fp_soft_state &= ~FP_SOFT_SUSPEND;
1311 mutex_exit(&port->fp_mutex);
1312
1313 /*
1314 * All the discovery is initiated and handled by per-port thread.
1315 * Further all the discovery is done in handled in callback mode
1316 * (not polled mode); In a specific case such as this, the discovery
1317 * is required to happen in polled mode. The easiest way out is
1318 * to bail out port thread and get started. Come back and fix this
1319 * to do on demand discovery initiated by ULPs. ULPs such as FCP
1320 * will do on-demand discovery during pre-power-up busctl handling
1321 * which will only be possible when SCSA provides a new HBA vector
1322 * for sending down the PM busctl requests.
1323 */
1324 (void) callb_generic_cpr(&port->fp_cpr_info, CB_CODE_CPR_RESUME);
1325
1326 rval = fp_resume_all(port, FC_CMD_RESUME);
1327 if (rval != DDI_SUCCESS) {
1328 mutex_enter(&port->fp_mutex);
1329 port->fp_soft_state |= FP_SOFT_SUSPEND;
1330 mutex_exit(&port->fp_mutex);
1331 (void) callb_generic_cpr(&port->fp_cpr_info,
1332 CB_CODE_CPR_CHKPT);
1333 }
1334
1335 return (rval);
1336 }
1337
1338 /*
1339 * Perform FC Port power on initialization
1340 */
1341 static int
1342 fp_power_up(fc_local_port_t *port)
1343 {
1344 int rval;
1345
1346 ASSERT(MUTEX_HELD(&port->fp_mutex));
1347
1348 ASSERT((port->fp_soft_state & FP_SOFT_SUSPEND) == 0);
1349 ASSERT(port->fp_soft_state & FP_SOFT_POWER_DOWN);
1350
1351 port->fp_soft_state &= ~FP_SOFT_POWER_DOWN;
1352
1353 mutex_exit(&port->fp_mutex);
1354
1355 rval = fp_resume_all(port, FC_CMD_POWER_UP);
1356 if (rval != DDI_SUCCESS) {
1357 mutex_enter(&port->fp_mutex);
1358 port->fp_soft_state |= FP_SOFT_POWER_DOWN;
1359 } else {
1360 mutex_enter(&port->fp_mutex);
1361 }
1362
1363 return (rval);
1364 }
1365
1366
1367 /*
1368 * It is important to note that the power may possibly be removed between
1369 * SUSPEND and the ensuing RESUME operation. In such a context the underlying
1370 * FC port hardware would have gone through an OFFLINE to ONLINE transition
1371 * (hardware state). In this case, the port driver may need to rediscover the
1372 * topology, perform LOGINs, register with the name server again and perform
1373 * any such port initialization procedures. To perform LOGINs, the driver could
1374 * use the port device handle to see if a LOGIN needs to be performed and use
1375 * the D_ID and WWN in it. The LOGINs may fail (if the hardware is reconfigured
1376 * or removed) which will be reflected in the map the ULPs will see.
1377 */
1378 static int
1379 fp_resume_all(fc_local_port_t *port, fc_attach_cmd_t cmd)
1380 {
1381
1382 ASSERT(!MUTEX_HELD(&port->fp_mutex));
1383
1384 if (fp_bind_callbacks(port) != DDI_SUCCESS) {
1385 return (DDI_FAILURE);
1386 }
1387
1388 mutex_enter(&port->fp_mutex);
1389
1390 /*
1391 * If there are commands queued for delayed retry, instead of
1392 * working the hard way to figure out which ones are good for
1393 * restart and which ones not (ELSs are definitely not good
1394 * as the port will have to go through a new spin of rediscovery
1395 * now), so just flush them out.
1396 */
1397 if (port->fp_restore & FP_RESTORE_WAIT_TIMEOUT) {
1398 fp_cmd_t *cmd;
1399
1400 port->fp_restore &= ~FP_RESTORE_WAIT_TIMEOUT;
1401
1402 mutex_exit(&port->fp_mutex);
1403 while ((cmd = fp_deque_cmd(port)) != NULL) {
1404 cmd->cmd_pkt.pkt_state = FC_PKT_TRAN_ERROR;
1405 fp_iodone(cmd);
1406 }
1407 mutex_enter(&port->fp_mutex);
1408 }
1409
1410 if (FC_PORT_STATE_MASK(port->fp_bind_state) == FC_STATE_OFFLINE) {
1411 if ((port->fp_restore & FP_RESTORE_OFFLINE_TIMEOUT) ||
1412 port->fp_dev_count) {
1413 port->fp_restore &= ~FP_RESTORE_OFFLINE_TIMEOUT;
1414 port->fp_offline_tid = timeout(fp_offline_timeout,
1415 (caddr_t)port, fp_offline_ticks);
1416 }
1417 if (port->fp_job_head) {
1418 cv_signal(&port->fp_cv);
1419 }
1420 mutex_exit(&port->fp_mutex);
1421 fctl_attach_ulps(port, cmd, &modlinkage);
1422 } else {
1423 struct job_request *job;
1424
1425 /*
1426 * If an OFFLINE timer was running at the time of
1427 * suspending, there is no need to restart it as
1428 * the port is ONLINE now.
1429 */
1430 port->fp_restore &= ~FP_RESTORE_OFFLINE_TIMEOUT;
1431 if (port->fp_statec_busy == 0) {
1432 port->fp_soft_state |= FP_SOFT_IN_STATEC_CB;
1433 }
1434 port->fp_statec_busy++;
1435 mutex_exit(&port->fp_mutex);
1436
1437 job = fctl_alloc_job(JOB_PORT_ONLINE,
1438 JOB_CANCEL_ULP_NOTIFICATION, NULL, NULL, KM_SLEEP);
1439 fctl_enque_job(port, job);
1440
1441 fctl_jobwait(job);
1442 fctl_remove_oldies(port);
1443
1444 fctl_attach_ulps(port, cmd, &modlinkage);
1445 fctl_dealloc_job(job);
1446 }
1447
1448 return (DDI_SUCCESS);
1449 }
1450
1451
1452 /*
1453 * At this time, there shouldn't be any I/O requests on this port.
1454 * But the unsolicited callbacks from the underlying FCA port need
1455 * to be handled very carefully. The steps followed to handle the
1456 * DDI_DETACH are:
1457 * + Grab the port driver mutex, check if the unsolicited
1458 * callback is currently under processing. If true, fail
1459 * the DDI_DETACH request by printing a message; If false
1460 * mark the DDI_DETACH as under progress, so that any
1461 * further unsolicited callbacks get bounced.
1462 * + Perform PRLO/LOGO if necessary, cleanup all the data
1463 * structures.
1464 * + Get the job_handler thread to gracefully exit.
1465 * + Unregister callbacks with the FCA port.
1466 * + Now that some peace is found, notify all the ULPs of
1467 * DDI_DETACH request (using ulp_port_detach entry point)
1468 * + Free all mutexes, semaphores, conditional variables.
1469 * + Free the soft state, return success.
1470 *
1471 * Important considerations:
1472 * Port driver de-registers state change and unsolicited
1473 * callbacks before taking up the task of notifying ULPs
1474 * and performing PRLO and LOGOs.
1475 *
1476 * A port may go offline at the time PRLO/LOGO is being
1477 * requested. It is expected of all FCA drivers to fail
1478 * such requests either immediately with a FC_OFFLINE
1479 * return code to fc_fca_transport() or return the packet
1480 * asynchronously with pkt state set to FC_PKT_PORT_OFFLINE
1481 */
1482 static int
1483 fp_detach_handler(fc_local_port_t *port)
1484 {
1485 job_request_t *job;
1486 uint32_t delay_count;
1487 fc_orphan_t *orp, *tmporp;
1488
1489 /*
1490 * In a Fabric topology with many host ports connected to
1491 * a switch, another detaching instance of fp might have
1492 * triggered a LOGO (which is an unsolicited request to
1493 * this instance). So in order to be able to successfully
1494 * detach by taking care of such cases a delay of about
1495 * 30 seconds is introduced.
1496 */
1497 delay_count = 0;
1498 mutex_enter(&port->fp_mutex);
1499 if (port->fp_out_fpcmds != 0) {
1500 /*
1501 * At this time we can only check fp internal commands, because
1502 * sd/ssd/scsi_vhci should have finsihed all their commands,
1503 * fcp/fcip/fcsm should have finished all their commands.
1504 *
1505 * It seems that all fp internal commands are asynchronous now.
1506 */
1507 port->fp_soft_state &= ~FP_DETACH_INPROGRESS;
1508 mutex_exit(&port->fp_mutex);
1509
1510 cmn_err(CE_WARN, "fp(%d): %d fp_cmd(s) is/are in progress"
1511 " Failing detach", port->fp_instance, port->fp_out_fpcmds);
1512 return (DDI_FAILURE);
1513 }
1514
1515 while ((port->fp_soft_state &
1516 (FP_SOFT_IN_STATEC_CB | FP_SOFT_IN_UNSOL_CB)) &&
1517 (delay_count < 30)) {
1518 mutex_exit(&port->fp_mutex);
1519 delay_count++;
1520 delay(drv_usectohz(1000000));
1521 mutex_enter(&port->fp_mutex);
1522 }
1523
1524 if (port->fp_soft_state &
1525 (FP_SOFT_IN_STATEC_CB | FP_SOFT_IN_UNSOL_CB)) {
1526 port->fp_soft_state &= ~FP_DETACH_INPROGRESS;
1527 mutex_exit(&port->fp_mutex);
1528
1529 cmn_err(CE_WARN, "fp(%d): FCA callback in progress: "
1530 " Failing detach", port->fp_instance);
1531 return (DDI_FAILURE);
1532 }
1533
1534 port->fp_soft_state |= FP_SOFT_IN_DETACH;
1535 port->fp_soft_state &= ~FP_DETACH_INPROGRESS;
1536 mutex_exit(&port->fp_mutex);
1537
1538 /*
1539 * If we're powered down, we need to raise power prior to submitting
1540 * the JOB_PORT_SHUTDOWN job. Otherwise, the job handler will never
1541 * process the shutdown job.
1542 */
1543 if (fctl_busy_port(port) != 0) {
1544 cmn_err(CE_WARN, "fp(%d): fctl_busy_port failed",
1545 port->fp_instance);
1546 mutex_enter(&port->fp_mutex);
1547 port->fp_soft_state &= ~FP_SOFT_IN_DETACH;
1548 mutex_exit(&port->fp_mutex);
1549 return (DDI_FAILURE);
1550 }
1551
1552 /*
1553 * This will deallocate data structs and cause the "job" thread
1554 * to exit, in preparation for DDI_DETACH on the instance.
1555 * This can sleep for an arbitrary duration, since it waits for
1556 * commands over the wire, timeout(9F) callbacks, etc.
1557 *
1558 * CAUTION: There is still a race here, where the "job" thread
1559 * can still be executing code even tho the fctl_jobwait() call
1560 * below has returned to us. In theory the fp driver could even be
1561 * modunloaded even tho the job thread isn't done executing.
1562 * without creating the race condition.
1563 */
1564 job = fctl_alloc_job(JOB_PORT_SHUTDOWN, 0, NULL,
1565 (opaque_t)port, KM_SLEEP);
1566 fctl_enque_job(port, job);
1567 fctl_jobwait(job);
1568 fctl_dealloc_job(job);
1569
1570
1571 (void) pm_lower_power(port->fp_port_dip, FP_PM_COMPONENT,
1572 FP_PM_PORT_DOWN);
1573
1574 if (port->fp_taskq) {
1575 taskq_destroy(port->fp_taskq);
1576 }
1577
1578 ddi_prop_remove_all(port->fp_port_dip);
1579
1580 ddi_remove_minor_node(port->fp_port_dip, NULL);
1581
1582 fctl_remove_port(port);
1583
1584 fp_free_pkt(port->fp_els_resp_pkt);
1585
1586 if (port->fp_ub_tokens) {
1587 if (fc_ulp_ubfree(port, port->fp_ub_count,
1588 port->fp_ub_tokens) != FC_SUCCESS) {
1589 cmn_err(CE_WARN, "fp(%d): couldn't free "
1590 " unsolicited buffers", port->fp_instance);
1591 }
1592 kmem_free(port->fp_ub_tokens,
1593 sizeof (*port->fp_ub_tokens) * port->fp_ub_count);
1594 port->fp_ub_tokens = NULL;
1595 }
1596
1597 if (port->fp_pkt_cache != NULL) {
1598 kmem_cache_destroy(port->fp_pkt_cache);
1599 }
1600
1601 port->fp_fca_tran->fca_unbind_port(port->fp_fca_handle);
1602
1603 mutex_enter(&port->fp_mutex);
1604 if (port->fp_did_table) {
1605 kmem_free(port->fp_did_table, did_table_size *
1606 sizeof (struct d_id_hash));
1607 }
1608
1609 if (port->fp_pwwn_table) {
1610 kmem_free(port->fp_pwwn_table, pwwn_table_size *
1611 sizeof (struct pwwn_hash));
1612 }
1613 orp = port->fp_orphan_list;
1614 while (orp) {
1615 tmporp = orp;
1616 orp = orp->orp_next;
1617 kmem_free(tmporp, sizeof (*orp));
1618 }
1619
1620 mutex_exit(&port->fp_mutex);
1621
1622 fp_log_port_event(port, ESC_SUNFC_PORT_DETACH);
1623
1624 mutex_destroy(&port->fp_mutex);
1625 cv_destroy(&port->fp_attach_cv);
1626 cv_destroy(&port->fp_cv);
1627 ddi_soft_state_free(fp_driver_softstate, port->fp_instance);
1628
1629 return (DDI_SUCCESS);
1630 }
1631
1632
1633 /*
1634 * Steps to perform DDI_SUSPEND operation on a FC port
1635 *
1636 * - If already suspended return DDI_FAILURE
1637 * - If already power-suspended return DDI_SUCCESS
1638 * - If an unsolicited callback or state change handling is in
1639 * in progress, throw a warning message, return DDI_FAILURE
1640 * - Cancel timeouts
1641 * - SUSPEND the job_handler thread (means do nothing as it is
1642 * taken care of by the CPR frame work)
1643 */
1644 static int
1645 fp_suspend_handler(fc_local_port_t *port)
1646 {
1647 uint32_t delay_count;
1648
1649 mutex_enter(&port->fp_mutex);
1650
1651 /*
1652 * The following should never happen, but
1653 * let the driver be more defensive here
1654 */
1655 if (port->fp_soft_state & FP_SOFT_SUSPEND) {
1656 mutex_exit(&port->fp_mutex);
1657 return (DDI_FAILURE);
1658 }
1659
1660 /*
1661 * If the port is already power suspended, there
1662 * is nothing else to do, So return DDI_SUCCESS,
1663 * but mark the SUSPEND bit in the soft state
1664 * before leaving.
1665 */
1666 if (port->fp_soft_state & FP_SOFT_POWER_DOWN) {
1667 port->fp_soft_state |= FP_SOFT_SUSPEND;
1668 mutex_exit(&port->fp_mutex);
1669 return (DDI_SUCCESS);
1670 }
1671
1672 /*
1673 * Check if an unsolicited callback or state change handling is
1674 * in progress. If true, fail the suspend operation; also throw
1675 * a warning message notifying the failure. Note that Sun PCI
1676 * hotplug spec recommends messages in cases of failure (but
1677 * not flooding the console)
1678 *
1679 * Busy waiting for a short interval (500 millisecond ?) to see
1680 * if the callback processing completes may be another idea. Since
1681 * most of the callback processing involves a lot of work, it
1682 * is safe to just fail the SUSPEND operation. It is definitely
1683 * not bad to fail the SUSPEND operation if the driver is busy.
1684 */
1685 delay_count = 0;
1686 while ((port->fp_soft_state & (FP_SOFT_IN_STATEC_CB |
1687 FP_SOFT_IN_UNSOL_CB)) && (delay_count < 30)) {
1688 mutex_exit(&port->fp_mutex);
1689 delay_count++;
1690 delay(drv_usectohz(1000000));
1691 mutex_enter(&port->fp_mutex);
1692 }
1693
1694 if (port->fp_soft_state & (FP_SOFT_IN_STATEC_CB |
1695 FP_SOFT_IN_UNSOL_CB)) {
1696 mutex_exit(&port->fp_mutex);
1697 cmn_err(CE_WARN, "fp(%d): FCA callback in progress: "
1698 " Failing suspend", port->fp_instance);
1699 return (DDI_FAILURE);
1700 }
1701
1702 /*
1703 * Check of FC port thread is busy
1704 */
1705 if (port->fp_job_head) {
1706 mutex_exit(&port->fp_mutex);
1707 FP_TRACE(FP_NHEAD2(9, 0),
1708 "FC port thread is busy: Failing suspend");
1709 return (DDI_FAILURE);
1710 }
1711 port->fp_soft_state |= FP_SOFT_SUSPEND;
1712
1713 fp_suspend_all(port);
1714 mutex_exit(&port->fp_mutex);
1715
1716 return (DDI_SUCCESS);
1717 }
1718
1719
1720 /*
1721 * Prepare for graceful power down of a FC port
1722 */
1723 static int
1724 fp_power_down(fc_local_port_t *port)
1725 {
1726 ASSERT(MUTEX_HELD(&port->fp_mutex));
1727
1728 /*
1729 * Power down request followed by a DDI_SUSPEND should
1730 * never happen; If it does return DDI_SUCCESS
1731 */
1732 if (port->fp_soft_state & FP_SOFT_SUSPEND) {
1733 port->fp_soft_state |= FP_SOFT_POWER_DOWN;
1734 return (DDI_SUCCESS);
1735 }
1736
1737 /*
1738 * If the port is already power suspended, there
1739 * is nothing else to do, So return DDI_SUCCESS,
1740 */
1741 if (port->fp_soft_state & FP_SOFT_POWER_DOWN) {
1742 return (DDI_SUCCESS);
1743 }
1744
1745 /*
1746 * Check if an unsolicited callback or state change handling
1747 * is in progress. If true, fail the PM suspend operation.
1748 * But don't print a message unless the verbosity of the
1749 * driver desires otherwise.
1750 */
1751 if ((port->fp_soft_state & FP_SOFT_IN_STATEC_CB) ||
1752 (port->fp_soft_state & FP_SOFT_IN_UNSOL_CB)) {
1753 FP_TRACE(FP_NHEAD2(9, 0),
1754 "Unsolicited callback in progress: Failing power down");
1755 return (DDI_FAILURE);
1756 }
1757
1758 /*
1759 * Check of FC port thread is busy
1760 */
1761 if (port->fp_job_head) {
1762 FP_TRACE(FP_NHEAD2(9, 0),
1763 "FC port thread is busy: Failing power down");
1764 return (DDI_FAILURE);
1765 }
1766 port->fp_soft_state |= FP_SOFT_POWER_DOWN;
1767
1768 /*
1769 * check if the ULPs are ready for power down
1770 */
1771 mutex_exit(&port->fp_mutex);
1772 if (fctl_detach_ulps(port, FC_CMD_POWER_DOWN,
1773 &modlinkage) != FC_SUCCESS) {
1774 mutex_enter(&port->fp_mutex);
1775 port->fp_soft_state &= ~FP_SOFT_POWER_DOWN;
1776 mutex_exit(&port->fp_mutex);
1777
1778 /*
1779 * Power back up the obedient ULPs that went down
1780 */
1781 fp_attach_ulps(port, FC_CMD_POWER_UP);
1782
1783 FP_TRACE(FP_NHEAD2(9, 0),
1784 "ULP(s) busy, detach_ulps failed. Failing power down");
1785 mutex_enter(&port->fp_mutex);
1786 return (DDI_FAILURE);
1787 }
1788 mutex_enter(&port->fp_mutex);
1789
1790 fp_suspend_all(port);
1791
1792 return (DDI_SUCCESS);
1793 }
1794
1795
1796 /*
1797 * Suspend the entire FC port
1798 */
1799 static void
1800 fp_suspend_all(fc_local_port_t *port)
1801 {
1802 int index;
1803 struct pwwn_hash *head;
1804 fc_remote_port_t *pd;
1805
1806 ASSERT(MUTEX_HELD(&port->fp_mutex));
1807
1808 if (port->fp_wait_tid != 0) {
1809 timeout_id_t tid;
1810
1811 tid = port->fp_wait_tid;
1812 port->fp_wait_tid = (timeout_id_t)NULL;
1813 mutex_exit(&port->fp_mutex);
1814 (void) untimeout(tid);
1815 mutex_enter(&port->fp_mutex);
1816 port->fp_restore |= FP_RESTORE_WAIT_TIMEOUT;
1817 }
1818
1819 if (port->fp_offline_tid) {
1820 timeout_id_t tid;
1821
1822 tid = port->fp_offline_tid;
1823 port->fp_offline_tid = (timeout_id_t)NULL;
1824 mutex_exit(&port->fp_mutex);
1825 (void) untimeout(tid);
1826 mutex_enter(&port->fp_mutex);
1827 port->fp_restore |= FP_RESTORE_OFFLINE_TIMEOUT;
1828 }
1829 mutex_exit(&port->fp_mutex);
1830 port->fp_fca_tran->fca_unbind_port(port->fp_fca_handle);
1831 mutex_enter(&port->fp_mutex);
1832
1833 /*
1834 * Mark all devices as OLD, and reset the LOGIN state as well
1835 * (this will force the ULPs to perform a LOGIN after calling
1836 * fc_portgetmap() during RESUME/PM_RESUME)
1837 */
1838 for (index = 0; index < pwwn_table_size; index++) {
1839 head = &port->fp_pwwn_table[index];
1840 pd = head->pwwn_head;
1841 while (pd != NULL) {
1842 mutex_enter(&pd->pd_mutex);
1843 fp_remote_port_offline(pd);
1844 fctl_delist_did_table(port, pd);
1845 pd->pd_state = PORT_DEVICE_VALID;
1846 pd->pd_login_count = 0;
1847 mutex_exit(&pd->pd_mutex);
1848 pd = pd->pd_wwn_hnext;
1849 }
1850 }
1851 }
1852
1853
1854 /*
1855 * fp_cache_constructor: Constructor function for kmem_cache_create(9F).
1856 * Performs intializations for fc_packet_t structs.
1857 * Returns 0 for success or -1 for failure.
1858 *
1859 * This function allocates DMA handles for both command and responses.
1860 * Most of the ELSs used have both command and responses so it is strongly
1861 * desired to move them to cache constructor routine.
1862 *
1863 * Context: Can sleep iff called with KM_SLEEP flag.
1864 */
1865 static int
1866 fp_cache_constructor(void *buf, void *cdarg, int kmflags)
1867 {
1868 int (*cb) (caddr_t);
1869 fc_packet_t *pkt;
1870 fp_cmd_t *cmd = (fp_cmd_t *)buf;
1871 fc_local_port_t *port = (fc_local_port_t *)cdarg;
1872
1873 cb = (kmflags == KM_SLEEP) ? DDI_DMA_SLEEP : DDI_DMA_DONTWAIT;
1874
1875 cmd->cmd_next = NULL;
1876 cmd->cmd_flags = 0;
1877 cmd->cmd_dflags = 0;
1878 cmd->cmd_job = NULL;
1879 cmd->cmd_port = port;
1880 pkt = &cmd->cmd_pkt;
1881
1882 if (!(port->fp_soft_state & FP_SOFT_FCA_IS_NODMA)) {
1883 if (ddi_dma_alloc_handle(port->fp_fca_dip,
1884 port->fp_fca_tran->fca_dma_attr, cb, NULL,
1885 &pkt->pkt_cmd_dma) != DDI_SUCCESS) {
1886 return (-1);
1887 }
1888
1889 if (ddi_dma_alloc_handle(port->fp_fca_dip,
1890 port->fp_fca_tran->fca_dma_attr, cb, NULL,
1891 &pkt->pkt_resp_dma) != DDI_SUCCESS) {
1892 ddi_dma_free_handle(&pkt->pkt_cmd_dma);
1893 return (-1);
1894 }
1895 } else {
1896 pkt->pkt_cmd_dma = 0;
1897 pkt->pkt_resp_dma = 0;
1898 }
1899
1900 pkt->pkt_cmd_acc = pkt->pkt_resp_acc = NULL;
1901 pkt->pkt_cmd_cookie_cnt = pkt->pkt_resp_cookie_cnt =
1902 pkt->pkt_data_cookie_cnt = 0;
1903 pkt->pkt_cmd_cookie = pkt->pkt_resp_cookie =
1904 pkt->pkt_data_cookie = NULL;
1905 pkt->pkt_fca_private = (caddr_t)buf + sizeof (fp_cmd_t);
1906
1907 return (0);
1908 }
1909
1910
1911 /*
1912 * fp_cache_destructor: Destructor function for kmem_cache_create().
1913 * Performs un-intializations for fc_packet_t structs.
1914 */
1915 /* ARGSUSED */
1916 static void
1917 fp_cache_destructor(void *buf, void *cdarg)
1918 {
1919 fp_cmd_t *cmd = (fp_cmd_t *)buf;
1920 fc_packet_t *pkt;
1921
1922 pkt = &cmd->cmd_pkt;
1923 if (pkt->pkt_cmd_dma) {
1924 ddi_dma_free_handle(&pkt->pkt_cmd_dma);
1925 }
1926
1927 if (pkt->pkt_resp_dma) {
1928 ddi_dma_free_handle(&pkt->pkt_resp_dma);
1929 }
1930 }
1931
1932
1933 /*
1934 * Packet allocation for ELS and any other port driver commands
1935 *
1936 * Some ELSs like FLOGI and PLOGI are critical for topology and
1937 * device discovery and a system's inability to allocate memory
1938 * or DVMA resources while performing some of these critical ELSs
1939 * cause a lot of problem. While memory allocation failures are
1940 * rare, DVMA resource failures are common as the applications
1941 * are becoming more and more powerful on huge servers. So it
1942 * is desirable to have a framework support to reserve a fragment
1943 * of DVMA. So until this is fixed the correct way, the suffering
1944 * is huge whenever a LIP happens at a time DVMA resources are
1945 * drained out completely - So an attempt needs to be made to
1946 * KM_SLEEP while requesting for these resources, hoping that
1947 * the requests won't hang forever.
1948 *
1949 * The fc_remote_port_t argument is stored into the pkt_pd field in the
1950 * fc_packet_t struct prior to the fc_ulp_init_packet() call. This
1951 * ensures that the pd_ref_count for the fc_remote_port_t is valid.
1952 * If there is no fc_remote_port_t associated with the fc_packet_t, then
1953 * fp_alloc_pkt() must be called with pd set to NULL.
1954 *
1955 * fp/fctl will resue fp_cmd_t somewhere, and change pkt_cmdlen/rsplen,
1956 * actually, it's a design fault. But there's no problem for physical
1957 * FCAs. But it will cause memory leak or panic for virtual FCAs like fcoei.
1958 *
1959 * For FCAs that don't support DMA, such as fcoei, we will use
1960 * pkt_fctl_rsvd1/rsvd2 to keep the real cmd_len/resp_len.
1961 */
1962
1963 static fp_cmd_t *
1964 fp_alloc_pkt(fc_local_port_t *port, int cmd_len, int resp_len, int kmflags,
1965 fc_remote_port_t *pd)
1966 {
1967 int rval;
1968 ulong_t real_len;
1969 fp_cmd_t *cmd;
1970 fc_packet_t *pkt;
1971 int (*cb) (caddr_t);
1972 ddi_dma_cookie_t pkt_cookie;
1973 ddi_dma_cookie_t *cp;
1974 uint32_t cnt;
1975
1976 ASSERT(!MUTEX_HELD(&port->fp_mutex));
1977
1978 cb = (kmflags == KM_SLEEP) ? DDI_DMA_SLEEP : DDI_DMA_DONTWAIT;
1979
1980 cmd = (fp_cmd_t *)kmem_cache_alloc(port->fp_pkt_cache, kmflags);
1981 if (cmd == NULL) {
1982 return (cmd);
1983 }
1984
1985 cmd->cmd_ulp_pkt = NULL;
1986 cmd->cmd_flags = 0;
1987 pkt = &cmd->cmd_pkt;
1988 ASSERT(cmd->cmd_dflags == 0);
1989
1990 pkt->pkt_datalen = 0;
1991 pkt->pkt_data = NULL;
1992 pkt->pkt_state = 0;
1993 pkt->pkt_action = 0;
1994 pkt->pkt_reason = 0;
1995 pkt->pkt_expln = 0;
1996 pkt->pkt_cmd = NULL;
1997 pkt->pkt_resp = NULL;
1998 pkt->pkt_fctl_rsvd1 = NULL;
1999 pkt->pkt_fctl_rsvd2 = NULL;
2000
2001 /*
2002 * Init pkt_pd with the given pointer; this must be done _before_
2003 * the call to fc_ulp_init_packet().
2004 */
2005 pkt->pkt_pd = pd;
2006
2007 /* Now call the FCA driver to init its private, per-packet fields */
2008 if (fc_ulp_init_packet((opaque_t)port, pkt, kmflags) != FC_SUCCESS) {
2009 goto alloc_pkt_failed;
2010 }
2011
2012 if (cmd_len && !(port->fp_soft_state & FP_SOFT_FCA_IS_NODMA)) {
2013 ASSERT(pkt->pkt_cmd_dma != NULL);
2014
2015 rval = ddi_dma_mem_alloc(pkt->pkt_cmd_dma, cmd_len,
2016 port->fp_fca_tran->fca_acc_attr, DDI_DMA_CONSISTENT,
2017 cb, NULL, (caddr_t *)&pkt->pkt_cmd, &real_len,
2018 &pkt->pkt_cmd_acc);
2019
2020 if (rval != DDI_SUCCESS) {
2021 goto alloc_pkt_failed;
2022 }
2023 cmd->cmd_dflags |= FP_CMD_VALID_DMA_MEM;
2024
2025 if (real_len < cmd_len) {
2026 goto alloc_pkt_failed;
2027 }
2028
2029 rval = ddi_dma_addr_bind_handle(pkt->pkt_cmd_dma, NULL,
2030 pkt->pkt_cmd, real_len, DDI_DMA_WRITE |
2031 DDI_DMA_CONSISTENT, cb, NULL,
2032 &pkt_cookie, &pkt->pkt_cmd_cookie_cnt);
2033
2034 if (rval != DDI_DMA_MAPPED) {
2035 goto alloc_pkt_failed;
2036 }
2037
2038 cmd->cmd_dflags |= FP_CMD_VALID_DMA_BIND;
2039
2040 if (pkt->pkt_cmd_cookie_cnt >
2041 port->fp_fca_tran->fca_dma_attr->dma_attr_sgllen) {
2042 goto alloc_pkt_failed;
2043 }
2044
2045 ASSERT(pkt->pkt_cmd_cookie_cnt != 0);
2046
2047 cp = pkt->pkt_cmd_cookie = (ddi_dma_cookie_t *)kmem_alloc(
2048 pkt->pkt_cmd_cookie_cnt * sizeof (pkt_cookie),
2049 KM_NOSLEEP);
2050
2051 if (cp == NULL) {
2052 goto alloc_pkt_failed;
2053 }
2054
2055 *cp = pkt_cookie;
2056 cp++;
2057 for (cnt = 1; cnt < pkt->pkt_cmd_cookie_cnt; cnt++, cp++) {
2058 ddi_dma_nextcookie(pkt->pkt_cmd_dma, &pkt_cookie);
2059 *cp = pkt_cookie;
2060 }
2061 } else if (cmd_len != 0) {
2062 pkt->pkt_cmd = kmem_alloc(cmd_len, KM_SLEEP);
2063 pkt->pkt_fctl_rsvd1 = (opaque_t)(uintptr_t)cmd_len;
2064 }
2065
2066 if (resp_len && !(port->fp_soft_state & FP_SOFT_FCA_IS_NODMA)) {
2067 ASSERT(pkt->pkt_resp_dma != NULL);
2068
2069 rval = ddi_dma_mem_alloc(pkt->pkt_resp_dma, resp_len,
2070 port->fp_fca_tran->fca_acc_attr,
2071 DDI_DMA_CONSISTENT, cb, NULL,
2072 (caddr_t *)&pkt->pkt_resp, &real_len,
2073 &pkt->pkt_resp_acc);
2074
2075 if (rval != DDI_SUCCESS) {
2076 goto alloc_pkt_failed;
2077 }
2078 cmd->cmd_dflags |= FP_RESP_VALID_DMA_MEM;
2079
2080 if (real_len < resp_len) {
2081 goto alloc_pkt_failed;
2082 }
2083
2084 rval = ddi_dma_addr_bind_handle(pkt->pkt_resp_dma, NULL,
2085 pkt->pkt_resp, real_len, DDI_DMA_READ |
2086 DDI_DMA_CONSISTENT, cb, NULL,
2087 &pkt_cookie, &pkt->pkt_resp_cookie_cnt);
2088
2089 if (rval != DDI_DMA_MAPPED) {
2090 goto alloc_pkt_failed;
2091 }
2092
2093 cmd->cmd_dflags |= FP_RESP_VALID_DMA_BIND;
2094
2095 if (pkt->pkt_resp_cookie_cnt >
2096 port->fp_fca_tran->fca_dma_attr->dma_attr_sgllen) {
2097 goto alloc_pkt_failed;
2098 }
2099
2100 ASSERT(pkt->pkt_cmd_cookie_cnt != 0);
2101
2102 cp = pkt->pkt_resp_cookie = (ddi_dma_cookie_t *)kmem_alloc(
2103 pkt->pkt_resp_cookie_cnt * sizeof (pkt_cookie),
2104 KM_NOSLEEP);
2105
2106 if (cp == NULL) {
2107 goto alloc_pkt_failed;
2108 }
2109
2110 *cp = pkt_cookie;
2111 cp++;
2112 for (cnt = 1; cnt < pkt->pkt_resp_cookie_cnt; cnt++, cp++) {
2113 ddi_dma_nextcookie(pkt->pkt_resp_dma, &pkt_cookie);
2114 *cp = pkt_cookie;
2115 }
2116 } else if (resp_len != 0) {
2117 pkt->pkt_resp = kmem_alloc(resp_len, KM_SLEEP);
2118 pkt->pkt_fctl_rsvd2 = (opaque_t)(uintptr_t)resp_len;
2119 }
2120
2121 pkt->pkt_cmdlen = cmd_len;
2122 pkt->pkt_rsplen = resp_len;
2123 pkt->pkt_ulp_private = cmd;
2124
2125 return (cmd);
2126
2127 alloc_pkt_failed:
2128
2129 fp_free_dma(cmd);
2130
2131 if (pkt->pkt_cmd_cookie != NULL) {
2132 kmem_free(pkt->pkt_cmd_cookie,
2133 pkt->pkt_cmd_cookie_cnt * sizeof (ddi_dma_cookie_t));
2134 pkt->pkt_cmd_cookie = NULL;
2135 }
2136
2137 if (pkt->pkt_resp_cookie != NULL) {
2138 kmem_free(pkt->pkt_resp_cookie,
2139 pkt->pkt_resp_cookie_cnt * sizeof (ddi_dma_cookie_t));
2140 pkt->pkt_resp_cookie = NULL;
2141 }
2142
2143 if (port->fp_soft_state & FP_SOFT_FCA_IS_NODMA) {
2144 if (pkt->pkt_cmd) {
2145 kmem_free(pkt->pkt_cmd, cmd_len);
2146 }
2147
2148 if (pkt->pkt_resp) {
2149 kmem_free(pkt->pkt_resp, resp_len);
2150 }
2151 }
2152
2153 kmem_cache_free(port->fp_pkt_cache, cmd);
2154
2155 return (NULL);
2156 }
2157
2158
2159 /*
2160 * Free FC packet
2161 */
2162 static void
2163 fp_free_pkt(fp_cmd_t *cmd)
2164 {
2165 fc_local_port_t *port;
2166 fc_packet_t *pkt;
2167
2168 ASSERT(!MUTEX_HELD(&cmd->cmd_port->fp_mutex));
2169
2170 cmd->cmd_next = NULL;
2171 cmd->cmd_job = NULL;
2172 pkt = &cmd->cmd_pkt;
2173 pkt->pkt_ulp_private = 0;
2174 pkt->pkt_tran_flags = 0;
2175 pkt->pkt_tran_type = 0;
2176 port = cmd->cmd_port;
2177
2178 if (pkt->pkt_cmd_cookie != NULL) {
2179 kmem_free(pkt->pkt_cmd_cookie, pkt->pkt_cmd_cookie_cnt *
2180 sizeof (ddi_dma_cookie_t));
2181 pkt->pkt_cmd_cookie = NULL;
2182 }
2183
2184 if (pkt->pkt_resp_cookie != NULL) {
2185 kmem_free(pkt->pkt_resp_cookie, pkt->pkt_resp_cookie_cnt *
2186 sizeof (ddi_dma_cookie_t));
2187 pkt->pkt_resp_cookie = NULL;
2188 }
2189
2190 if (port->fp_soft_state & FP_SOFT_FCA_IS_NODMA) {
2191 if (pkt->pkt_cmd) {
2192 kmem_free(pkt->pkt_cmd,
2193 (uint32_t)(uintptr_t)pkt->pkt_fctl_rsvd1);
2194 }
2195
2196 if (pkt->pkt_resp) {
2197 kmem_free(pkt->pkt_resp,
2198 (uint32_t)(uintptr_t)pkt->pkt_fctl_rsvd2);
2199 }
2200 }
2201
2202 fp_free_dma(cmd);
2203 (void) fc_ulp_uninit_packet((opaque_t)port, pkt);
2204 kmem_cache_free(port->fp_pkt_cache, (void *)cmd);
2205 }
2206
2207
2208 /*
2209 * Release DVMA resources
2210 */
2211 static void
2212 fp_free_dma(fp_cmd_t *cmd)
2213 {
2214 fc_packet_t *pkt = &cmd->cmd_pkt;
2215
2216 pkt->pkt_cmdlen = 0;
2217 pkt->pkt_rsplen = 0;
2218 pkt->pkt_tran_type = 0;
2219 pkt->pkt_tran_flags = 0;
2220
2221 if (cmd->cmd_dflags & FP_CMD_VALID_DMA_BIND) {
2222 (void) ddi_dma_unbind_handle(pkt->pkt_cmd_dma);
2223 }
2224
2225 if (cmd->cmd_dflags & FP_CMD_VALID_DMA_MEM) {
2226 if (pkt->pkt_cmd_acc) {
2227 ddi_dma_mem_free(&pkt->pkt_cmd_acc);
2228 }
2229 }
2230
2231 if (cmd->cmd_dflags & FP_RESP_VALID_DMA_BIND) {
2232 (void) ddi_dma_unbind_handle(pkt->pkt_resp_dma);
2233 }
2234
2235 if (cmd->cmd_dflags & FP_RESP_VALID_DMA_MEM) {
2236 if (pkt->pkt_resp_acc) {
2237 ddi_dma_mem_free(&pkt->pkt_resp_acc);
2238 }
2239 }
2240 cmd->cmd_dflags = 0;
2241 }
2242
2243
2244 /*
2245 * Dedicated thread to perform various activities. One thread for
2246 * each fc_local_port_t (driver soft state) instance.
2247 * Note, this effectively works out to one thread for each local
2248 * port, but there are also some Solaris taskq threads in use on a per-local
2249 * port basis; these also need to be taken into consideration.
2250 */
2251 static void
2252 fp_job_handler(fc_local_port_t *port)
2253 {
2254 int rval;
2255 uint32_t *d_id;
2256 fc_remote_port_t *pd;
2257 job_request_t *job;
2258
2259 #ifndef __lock_lint
2260 /*
2261 * Solaris-internal stuff for proper operation of kernel threads
2262 * with Solaris CPR.
2263 */
2264 CALLB_CPR_INIT(&port->fp_cpr_info, &port->fp_mutex,
2265 callb_generic_cpr, "fp_job_handler");
2266 #endif
2267
2268
2269 /* Loop forever waiting for work to do */
2270 for (;;) {
2271
2272 mutex_enter(&port->fp_mutex);
2273
2274 /*
2275 * Sleep if no work to do right now, or if we want
2276 * to suspend or power-down.
2277 */
2278 while (port->fp_job_head == NULL ||
2279 (port->fp_soft_state & (FP_SOFT_POWER_DOWN |
2280 FP_SOFT_SUSPEND))) {
2281 CALLB_CPR_SAFE_BEGIN(&port->fp_cpr_info);
2282 cv_wait(&port->fp_cv, &port->fp_mutex);
2283 CALLB_CPR_SAFE_END(&port->fp_cpr_info, &port->fp_mutex);
2284 }
2285
2286 /*
2287 * OK, we've just been woken up, so retrieve the next entry
2288 * from the head of the job queue for this local port.
2289 */
2290 job = fctl_deque_job(port);
2291
2292 /*
2293 * Handle all the fp driver's supported job codes here
2294 * in this big honkin' switch.
2295 */
2296 switch (job->job_code) {
2297 case JOB_PORT_SHUTDOWN:
2298 /*
2299 * fp_port_shutdown() is only called from here. This
2300 * will prepare the local port instance (softstate)
2301 * for detaching. This cancels timeout callbacks,
2302 * executes LOGOs with remote ports, cleans up tables,
2303 * and deallocates data structs.
2304 */
2305 fp_port_shutdown(port, job);
2306
2307 /*
2308 * This will exit the job thread.
2309 */
2310 #ifndef __lock_lint
2311 CALLB_CPR_EXIT(&(port->fp_cpr_info));
2312 #else
2313 mutex_exit(&port->fp_mutex);
2314 #endif
2315 fctl_jobdone(job);
2316 thread_exit();
2317
2318 /* NOTREACHED */
2319
2320 case JOB_ATTACH_ULP: {
2321 /*
2322 * This job is spawned in response to a ULP calling
2323 * fc_ulp_add().
2324 */
2325
2326 boolean_t do_attach_ulps = B_TRUE;
2327
2328 /*
2329 * If fp is detaching, we don't want to call
2330 * fp_startup_done as this asynchronous
2331 * notification may interfere with the re-attach.
2332 */
2333
2334 if (port->fp_soft_state & (FP_DETACH_INPROGRESS |
2335 FP_SOFT_IN_DETACH | FP_DETACH_FAILED)) {
2336 do_attach_ulps = B_FALSE;
2337 } else {
2338 /*
2339 * We are going to force the transport
2340 * to attach to the ULPs, so set
2341 * fp_ulp_attach. This will keep any
2342 * potential detach from occurring until
2343 * we are done.
2344 */
2345 port->fp_ulp_attach = 1;
2346 }
2347
2348 mutex_exit(&port->fp_mutex);
2349
2350 /*
2351 * NOTE: Since we just dropped the mutex, there is now
2352 * a race window where the fp_soft_state check above
2353 * could change here. This race is covered because an
2354 * additional check was added in the functions hidden
2355 * under fp_startup_done().
2356 */
2357 if (do_attach_ulps == B_TRUE) {
2358 /*
2359 * This goes thru a bit of a convoluted call
2360 * chain before spawning off a DDI taskq
2361 * request to perform the actual attach
2362 * operations. Blocking can occur at a number
2363 * of points.
2364 */
2365 fp_startup_done((opaque_t)port, FC_PKT_SUCCESS);
2366 }
2367 job->job_result = FC_SUCCESS;
2368 fctl_jobdone(job);
2369 break;
2370 }
2371
2372 case JOB_ULP_NOTIFY: {
2373 /*
2374 * Pass state change notifications up to any/all
2375 * registered ULPs.
2376 */
2377 uint32_t statec;
2378
2379 statec = job->job_ulp_listlen;
2380 if (statec == FC_STATE_RESET_REQUESTED) {
2381 port->fp_last_task = port->fp_task;
2382 port->fp_task = FP_TASK_OFFLINE;
2383 fp_port_offline(port, 0);
2384 port->fp_task = port->fp_last_task;
2385 port->fp_last_task = FP_TASK_IDLE;
2386 }
2387
2388 if (--port->fp_statec_busy == 0) {
2389 port->fp_soft_state &= ~FP_SOFT_IN_STATEC_CB;
2390 }
2391
2392 mutex_exit(&port->fp_mutex);
2393
2394 job->job_result = fp_ulp_notify(port, statec, KM_SLEEP);
2395 fctl_jobdone(job);
2396 break;
2397 }
2398
2399 case JOB_PLOGI_ONE:
2400 /*
2401 * Issue a PLOGI to a single remote port. Multiple
2402 * PLOGIs to different remote ports may occur in
2403 * parallel.
2404 * This can create the fc_remote_port_t if it does not
2405 * already exist.
2406 */
2407
2408 mutex_exit(&port->fp_mutex);
2409 d_id = (uint32_t *)job->job_private;
2410 pd = fctl_get_remote_port_by_did(port, *d_id);
2411
2412 if (pd) {
2413 mutex_enter(&pd->pd_mutex);
2414 if (pd->pd_state == PORT_DEVICE_LOGGED_IN) {
2415 pd->pd_login_count++;
2416 mutex_exit(&pd->pd_mutex);
2417 job->job_result = FC_SUCCESS;
2418 fctl_jobdone(job);
2419 break;
2420 }
2421 mutex_exit(&pd->pd_mutex);
2422 } else {
2423 mutex_enter(&port->fp_mutex);
2424 if (FC_IS_TOP_SWITCH(port->fp_topology)) {
2425 mutex_exit(&port->fp_mutex);
2426 pd = fp_create_remote_port_by_ns(port,
2427 *d_id, KM_SLEEP);
2428 if (pd == NULL) {
2429 job->job_result = FC_FAILURE;
2430 fctl_jobdone(job);
2431 break;
2432 }
2433 } else {
2434 mutex_exit(&port->fp_mutex);
2435 }
2436 }
2437
2438 job->job_flags |= JOB_TYPE_FP_ASYNC;
2439 job->job_counter = 1;
2440
2441 rval = fp_port_login(port, *d_id, job,
2442 FP_CMD_PLOGI_RETAIN, KM_SLEEP, pd, NULL);
2443
2444 if (rval != FC_SUCCESS) {
2445 job->job_result = rval;
2446 fctl_jobdone(job);
2447 }
2448 break;
2449
2450 case JOB_LOGO_ONE: {
2451 /*
2452 * Issue a PLOGO to a single remote port. Multiple
2453 * PLOGOs to different remote ports may occur in
2454 * parallel.
2455 */
2456 fc_remote_port_t *pd;
2457
2458 #ifndef __lock_lint
2459 ASSERT(job->job_counter > 0);
2460 #endif
2461
2462 pd = (fc_remote_port_t *)job->job_ulp_pkts;
2463
2464 mutex_enter(&pd->pd_mutex);
2465 if (pd->pd_state != PORT_DEVICE_LOGGED_IN) {
2466 mutex_exit(&pd->pd_mutex);
2467 job->job_result = FC_LOGINREQ;
2468 mutex_exit(&port->fp_mutex);
2469 fctl_jobdone(job);
2470 break;
2471 }
2472 if (pd->pd_login_count > 1) {
2473 pd->pd_login_count--;
2474 mutex_exit(&pd->pd_mutex);
2475 job->job_result = FC_SUCCESS;
2476 mutex_exit(&port->fp_mutex);
2477 fctl_jobdone(job);
2478 break;
2479 }
2480 mutex_exit(&pd->pd_mutex);
2481 mutex_exit(&port->fp_mutex);
2482 job->job_flags |= JOB_TYPE_FP_ASYNC;
2483 (void) fp_logout(port, pd, job);
2484 break;
2485 }
2486
2487 case JOB_FCIO_LOGIN:
2488 /*
2489 * PLOGI initiated at ioctl request.
2490 */
2491 mutex_exit(&port->fp_mutex);
2492 job->job_result =
2493 fp_fcio_login(port, job->job_private, job);
2494 fctl_jobdone(job);
2495 break;
2496
2497 case JOB_FCIO_LOGOUT:
2498 /*
2499 * PLOGO initiated at ioctl request.
2500 */
2501 mutex_exit(&port->fp_mutex);
2502 job->job_result =
2503 fp_fcio_logout(port, job->job_private, job);
2504 fctl_jobdone(job);
2505 break;
2506
2507 case JOB_PORT_GETMAP:
2508 case JOB_PORT_GETMAP_PLOGI_ALL: {
2509 port->fp_last_task = port->fp_task;
2510 port->fp_task = FP_TASK_GETMAP;
2511
2512 switch (port->fp_topology) {
2513 case FC_TOP_PRIVATE_LOOP:
2514 job->job_counter = 1;
2515
2516 fp_get_loopmap(port, job);
2517 mutex_exit(&port->fp_mutex);
2518 fp_jobwait(job);
2519 fctl_fillout_map(port,
2520 (fc_portmap_t **)job->job_private,
2521 (uint32_t *)job->job_arg, 1, 0, 0);
2522 fctl_jobdone(job);
2523 mutex_enter(&port->fp_mutex);
2524 break;
2525
2526 case FC_TOP_PUBLIC_LOOP:
2527 case FC_TOP_FABRIC:
2528 mutex_exit(&port->fp_mutex);
2529 job->job_counter = 1;
2530
2531 job->job_result = fp_ns_getmap(port,
2532 job, (fc_portmap_t **)job->job_private,
2533 (uint32_t *)job->job_arg,
2534 FCTL_GAN_START_ID);
2535 fctl_jobdone(job);
2536 mutex_enter(&port->fp_mutex);
2537 break;
2538
2539 case FC_TOP_PT_PT:
2540 mutex_exit(&port->fp_mutex);
2541 fctl_fillout_map(port,
2542 (fc_portmap_t **)job->job_private,
2543 (uint32_t *)job->job_arg, 1, 0, 0);
2544 fctl_jobdone(job);
2545 mutex_enter(&port->fp_mutex);
2546 break;
2547
2548 default:
2549 mutex_exit(&port->fp_mutex);
2550 fctl_jobdone(job);
2551 mutex_enter(&port->fp_mutex);
2552 break;
2553 }
2554 port->fp_task = port->fp_last_task;
2555 port->fp_last_task = FP_TASK_IDLE;
2556 mutex_exit(&port->fp_mutex);
2557 break;
2558 }
2559
2560 case JOB_PORT_OFFLINE: {
2561 fp_log_port_event(port, ESC_SUNFC_PORT_OFFLINE);
2562
2563 port->fp_last_task = port->fp_task;
2564 port->fp_task = FP_TASK_OFFLINE;
2565
2566 if (port->fp_statec_busy > 2) {
2567 job->job_flags |= JOB_CANCEL_ULP_NOTIFICATION;
2568 fp_port_offline(port, 0);
2569 if (--port->fp_statec_busy == 0) {
2570 port->fp_soft_state &=
2571 ~FP_SOFT_IN_STATEC_CB;
2572 }
2573 } else {
2574 fp_port_offline(port, 1);
2575 }
2576
2577 port->fp_task = port->fp_last_task;
2578 port->fp_last_task = FP_TASK_IDLE;
2579
2580 mutex_exit(&port->fp_mutex);
2581
2582 fctl_jobdone(job);
2583 break;
2584 }
2585
2586 case JOB_PORT_STARTUP: {
2587 if ((rval = fp_port_startup(port, job)) != FC_SUCCESS) {
2588 if (port->fp_statec_busy > 1) {
2589 mutex_exit(&port->fp_mutex);
2590 break;
2591 }
2592 mutex_exit(&port->fp_mutex);
2593
2594 FP_TRACE(FP_NHEAD2(9, rval),
2595 "Topology discovery failed");
2596 break;
2597 }
2598
2599 /*
2600 * Attempt building device handles in case
2601 * of private Loop.
2602 */
2603 if (port->fp_topology == FC_TOP_PRIVATE_LOOP) {
2604 job->job_counter = 1;
2605
2606 fp_get_loopmap(port, job);
2607 mutex_exit(&port->fp_mutex);
2608 fp_jobwait(job);
2609 mutex_enter(&port->fp_mutex);
2610 if (port->fp_lilp_map.lilp_magic < MAGIC_LIRP) {
2611 ASSERT(port->fp_total_devices == 0);
2612 port->fp_total_devices =
2613 port->fp_dev_count;
2614 }
2615 } else if (FC_IS_TOP_SWITCH(port->fp_topology)) {
2616 /*
2617 * Hack to avoid state changes going up early
2618 */
2619 port->fp_statec_busy++;
2620 port->fp_soft_state |= FP_SOFT_IN_STATEC_CB;
2621
2622 job->job_flags |= JOB_CANCEL_ULP_NOTIFICATION;
2623 fp_fabric_online(port, job);
2624 job->job_flags &= ~JOB_CANCEL_ULP_NOTIFICATION;
2625 }
2626 mutex_exit(&port->fp_mutex);
2627 fctl_jobdone(job);
2628 break;
2629 }
2630
2631 case JOB_PORT_ONLINE: {
2632 char *newtop;
2633 char *oldtop;
2634 uint32_t old_top;
2635
2636 fp_log_port_event(port, ESC_SUNFC_PORT_ONLINE);
2637
2638 /*
2639 * Bail out early if there are a lot of
2640 * state changes in the pipeline
2641 */
2642 if (port->fp_statec_busy > 1) {
2643 --port->fp_statec_busy;
2644 mutex_exit(&port->fp_mutex);
2645 fctl_jobdone(job);
2646 break;
2647 }
2648
2649 switch (old_top = port->fp_topology) {
2650 case FC_TOP_PRIVATE_LOOP:
2651 oldtop = "Private Loop";
2652 break;
2653
2654 case FC_TOP_PUBLIC_LOOP:
2655 oldtop = "Public Loop";
2656 break;
2657
2658 case FC_TOP_PT_PT:
2659 oldtop = "Point to Point";
2660 break;
2661
2662 case FC_TOP_FABRIC:
2663 oldtop = "Fabric";
2664 break;
2665
2666 default:
2667 oldtop = NULL;
2668 break;
2669 }
2670
2671 port->fp_last_task = port->fp_task;
2672 port->fp_task = FP_TASK_ONLINE;
2673
2674 if ((rval = fp_port_startup(port, job)) != FC_SUCCESS) {
2675
2676 port->fp_task = port->fp_last_task;
2677 port->fp_last_task = FP_TASK_IDLE;
2678
2679 if (port->fp_statec_busy > 1) {
2680 --port->fp_statec_busy;
2681 mutex_exit(&port->fp_mutex);
2682 break;
2683 }
2684
2685 port->fp_state = FC_STATE_OFFLINE;
2686
2687 FP_TRACE(FP_NHEAD2(9, rval),
2688 "Topology discovery failed");
2689
2690 if (--port->fp_statec_busy == 0) {
2691 port->fp_soft_state &=
2692 ~FP_SOFT_IN_STATEC_CB;
2693 }
2694
2695 if (port->fp_offline_tid == NULL) {
2696 port->fp_offline_tid =
2697 timeout(fp_offline_timeout,
2698 (caddr_t)port, fp_offline_ticks);
2699 }
2700
2701 mutex_exit(&port->fp_mutex);
2702 break;
2703 }
2704
2705 switch (port->fp_topology) {
2706 case FC_TOP_PRIVATE_LOOP:
2707 newtop = "Private Loop";
2708 break;
2709
2710 case FC_TOP_PUBLIC_LOOP:
2711 newtop = "Public Loop";
2712 break;
2713
2714 case FC_TOP_PT_PT:
2715 newtop = "Point to Point";
2716 break;
2717
2718 case FC_TOP_FABRIC:
2719 newtop = "Fabric";
2720 break;
2721
2722 default:
2723 newtop = NULL;
2724 break;
2725 }
2726
2727 if (oldtop && newtop && strcmp(oldtop, newtop)) {
2728 fp_printf(port, CE_NOTE, FP_LOG_ONLY, 0, NULL,
2729 "Change in FC Topology old = %s new = %s",
2730 oldtop, newtop);
2731 }
2732
2733 switch (port->fp_topology) {
2734 case FC_TOP_PRIVATE_LOOP: {
2735 int orphan = (old_top == FC_TOP_FABRIC ||
2736 old_top == FC_TOP_PUBLIC_LOOP) ? 1 : 0;
2737
2738 mutex_exit(&port->fp_mutex);
2739 fp_loop_online(port, job, orphan);
2740 break;
2741 }
2742
2743 case FC_TOP_PUBLIC_LOOP:
2744 /* FALLTHROUGH */
2745 case FC_TOP_FABRIC:
2746 fp_fabric_online(port, job);
2747 mutex_exit(&port->fp_mutex);
2748 break;
2749
2750 case FC_TOP_PT_PT:
2751 fp_p2p_online(port, job);
2752 mutex_exit(&port->fp_mutex);
2753 break;
2754
2755 default:
2756 if (--port->fp_statec_busy != 0) {
2757 /*
2758 * Watch curiously at what the next
2759 * state transition can do.
2760 */
2761 mutex_exit(&port->fp_mutex);
2762 break;
2763 }
2764
2765 FP_TRACE(FP_NHEAD2(9, 0),
2766 "Topology Unknown, Offlining the port..");
2767
2768 port->fp_soft_state &= ~FP_SOFT_IN_STATEC_CB;
2769 port->fp_state = FC_STATE_OFFLINE;
2770
2771 if (port->fp_offline_tid == NULL) {
2772 port->fp_offline_tid =
2773 timeout(fp_offline_timeout,
2774 (caddr_t)port, fp_offline_ticks);
2775 }
2776 mutex_exit(&port->fp_mutex);
2777 break;
2778 }
2779
2780 mutex_enter(&port->fp_mutex);
2781
2782 port->fp_task = port->fp_last_task;
2783 port->fp_last_task = FP_TASK_IDLE;
2784
2785 mutex_exit(&port->fp_mutex);
2786
2787 fctl_jobdone(job);
2788 break;
2789 }
2790
2791 case JOB_PLOGI_GROUP: {
2792 mutex_exit(&port->fp_mutex);
2793 fp_plogi_group(port, job);
2794 break;
2795 }
2796
2797 case JOB_UNSOL_REQUEST: {
2798 mutex_exit(&port->fp_mutex);
2799 fp_handle_unsol_buf(port,
2800 (fc_unsol_buf_t *)job->job_private, job);
2801 fctl_dealloc_job(job);
2802 break;
2803 }
2804
2805 case JOB_NS_CMD: {
2806 fctl_ns_req_t *ns_cmd;
2807
2808 mutex_exit(&port->fp_mutex);
2809
2810 job->job_flags |= JOB_TYPE_FP_ASYNC;
2811 ns_cmd = (fctl_ns_req_t *)job->job_private;
2812 if (ns_cmd->ns_cmd_code < NS_GA_NXT ||
2813 ns_cmd->ns_cmd_code > NS_DA_ID) {
2814 job->job_result = FC_BADCMD;
2815 fctl_jobdone(job);
2816 break;
2817 }
2818
2819 if (FC_IS_CMD_A_REG(ns_cmd->ns_cmd_code)) {
2820 if (ns_cmd->ns_pd != NULL) {
2821 job->job_result = FC_BADOBJECT;
2822 fctl_jobdone(job);
2823 break;
2824 }
2825
2826 job->job_counter = 1;
2827
2828 rval = fp_ns_reg(port, ns_cmd->ns_pd,
2829 ns_cmd->ns_cmd_code, job, 0, KM_SLEEP);
2830
2831 if (rval != FC_SUCCESS) {
2832 job->job_result = rval;
2833 fctl_jobdone(job);
2834 }
2835 break;
2836 }
2837 job->job_result = FC_SUCCESS;
2838 job->job_counter = 1;
2839
2840 rval = fp_ns_query(port, ns_cmd, job, 0, KM_SLEEP);
2841 if (rval != FC_SUCCESS) {
2842 fctl_jobdone(job);
2843 }
2844 break;
2845 }
2846
2847 case JOB_LINK_RESET: {
2848 la_wwn_t *pwwn;
2849 uint32_t topology;
2850
2851 pwwn = (la_wwn_t *)job->job_private;
2852 ASSERT(pwwn != NULL);
2853
2854 topology = port->fp_topology;
2855 mutex_exit(&port->fp_mutex);
2856
2857 if (fctl_is_wwn_zero(pwwn) == FC_SUCCESS ||
2858 topology == FC_TOP_PRIVATE_LOOP) {
2859 job->job_flags |= JOB_TYPE_FP_ASYNC;
2860 rval = port->fp_fca_tran->fca_reset(
2861 port->fp_fca_handle, FC_FCA_LINK_RESET);
2862 job->job_result = rval;
2863 fp_jobdone(job);
2864 } else {
2865 ASSERT((job->job_flags &
2866 JOB_TYPE_FP_ASYNC) == 0);
2867
2868 if (FC_IS_TOP_SWITCH(topology)) {
2869 rval = fp_remote_lip(port, pwwn,
2870 KM_SLEEP, job);
2871 } else {
2872 rval = FC_FAILURE;
2873 }
2874 if (rval != FC_SUCCESS) {
2875 job->job_result = rval;
2876 }
2877 fctl_jobdone(job);
2878 }
2879 break;
2880 }
2881
2882 default:
2883 mutex_exit(&port->fp_mutex);
2884 job->job_result = FC_BADCMD;
2885 fctl_jobdone(job);
2886 break;
2887 }
2888 }
2889 /* NOTREACHED */
2890 }
2891
2892
2893 /*
2894 * Perform FC port bring up initialization
2895 */
2896 static int
2897 fp_port_startup(fc_local_port_t *port, job_request_t *job)
2898 {
2899 int rval;
2900 uint32_t state;
2901 uint32_t src_id;
2902 fc_lilpmap_t *lilp_map;
2903
2904 ASSERT(MUTEX_HELD(&port->fp_mutex));
2905 ASSERT((job->job_flags & JOB_TYPE_FP_ASYNC) == 0);
2906
2907 FP_DTRACE(FP_NHEAD1(2, 0), "Entering fp_port_startup;"
2908 " port=%p, job=%p", port, job);
2909
2910 port->fp_topology = FC_TOP_UNKNOWN;
2911 port->fp_port_id.port_id = 0;
2912 state = FC_PORT_STATE_MASK(port->fp_state);
2913
2914 if (state == FC_STATE_OFFLINE) {
2915 port->fp_port_type.port_type = FC_NS_PORT_UNKNOWN;
2916 job->job_result = FC_OFFLINE;
2917 mutex_exit(&port->fp_mutex);
2918 fctl_jobdone(job);
2919 mutex_enter(&port->fp_mutex);
2920 return (FC_OFFLINE);
2921 }
2922
2923 if (state == FC_STATE_LOOP) {
2924 port->fp_port_type.port_type = FC_NS_PORT_NL;
2925 mutex_exit(&port->fp_mutex);
2926
2927 lilp_map = &port->fp_lilp_map;
2928 if ((rval = fp_get_lilpmap(port, lilp_map)) != FC_SUCCESS) {
2929 job->job_result = FC_FAILURE;
2930 fctl_jobdone(job);
2931
2932 FP_TRACE(FP_NHEAD1(9, rval),
2933 "LILP map Invalid or not present");
2934 mutex_enter(&port->fp_mutex);
2935 return (FC_FAILURE);
2936 }
2937
2938 if (lilp_map->lilp_length == 0) {
2939 job->job_result = FC_NO_MAP;
2940 fctl_jobdone(job);
2941 fp_printf(port, CE_NOTE, FP_LOG_ONLY, 0, NULL,
2942 "LILP map length zero");
2943 mutex_enter(&port->fp_mutex);
2944 return (FC_NO_MAP);
2945 }
2946 src_id = lilp_map->lilp_myalpa & 0xFF;
2947 } else {
2948 fc_remote_port_t *pd;
2949 fc_fca_pm_t pm;
2950 fc_fca_p2p_info_t p2p_info;
2951 int pd_recepient;
2952
2953 /*
2954 * Get P2P remote port info if possible
2955 */
2956 bzero((caddr_t)&pm, sizeof (pm));
2957
2958 pm.pm_cmd_flags = FC_FCA_PM_READ;
2959 pm.pm_cmd_code = FC_PORT_GET_P2P_INFO;
2960 pm.pm_data_len = sizeof (fc_fca_p2p_info_t);
2961 pm.pm_data_buf = (caddr_t)&p2p_info;
2962
2963 rval = port->fp_fca_tran->fca_port_manage(
2964 port->fp_fca_handle, &pm);
2965
2966 if (rval == FC_SUCCESS) {
2967 port->fp_port_id.port_id = p2p_info.fca_d_id;
2968 port->fp_port_type.port_type = FC_NS_PORT_N;
2969 port->fp_topology = FC_TOP_PT_PT;
2970 port->fp_total_devices = 1;
2971 pd_recepient = fctl_wwn_cmp(
2972 &port->fp_service_params.nport_ww_name,
2973 &p2p_info.pwwn) < 0 ?
2974 PD_PLOGI_RECEPIENT : PD_PLOGI_INITIATOR;
2975 mutex_exit(&port->fp_mutex);
2976 pd = fctl_create_remote_port(port,
2977 &p2p_info.nwwn,
2978 &p2p_info.pwwn,
2979 p2p_info.d_id,
2980 pd_recepient, KM_NOSLEEP);
2981 FP_DTRACE(FP_NHEAD1(2, 0), "Exiting fp_port_startup;"
2982 " P2P port=%p pd=%p fp %x pd %x", port, pd,
2983 port->fp_port_id.port_id, p2p_info.d_id);
2984 mutex_enter(&port->fp_mutex);
2985 return (FC_SUCCESS);
2986 }
2987 port->fp_port_type.port_type = FC_NS_PORT_N;
2988 mutex_exit(&port->fp_mutex);
2989 src_id = 0;
2990 }
2991
2992 job->job_counter = 1;
2993 job->job_result = FC_SUCCESS;
2994
2995 if ((rval = fp_fabric_login(port, src_id, job, FP_CMD_PLOGI_DONT_CARE,
2996 KM_SLEEP)) != FC_SUCCESS) {
2997 port->fp_port_type.port_type = FC_NS_PORT_UNKNOWN;
2998 job->job_result = FC_FAILURE;
2999 fctl_jobdone(job);
3000
3001 mutex_enter(&port->fp_mutex);
3002 if (port->fp_statec_busy <= 1) {
3003 mutex_exit(&port->fp_mutex);
3004 fp_printf(port, CE_NOTE, FP_LOG_ONLY, rval, NULL,
3005 "Couldn't transport FLOGI");
3006 mutex_enter(&port->fp_mutex);
3007 }
3008 return (FC_FAILURE);
3009 }
3010
3011 fp_jobwait(job);
3012
3013 mutex_enter(&port->fp_mutex);
3014 if (job->job_result == FC_SUCCESS) {
3015 if (FC_IS_TOP_SWITCH(port->fp_topology)) {
3016 mutex_exit(&port->fp_mutex);
3017 fp_ns_init(port, job, KM_SLEEP);
3018 mutex_enter(&port->fp_mutex);
3019 }
3020 } else {
3021 if (state == FC_STATE_LOOP) {
3022 port->fp_topology = FC_TOP_PRIVATE_LOOP;
3023 port->fp_port_id.port_id =
3024 port->fp_lilp_map.lilp_myalpa & 0xFF;
3025 }
3026 }
3027
3028 FP_DTRACE(FP_NHEAD1(2, 0), "Exiting fp_port_startup; port=%p, job=%p",
3029 port, job);
3030
3031 return (FC_SUCCESS);
3032 }
3033
3034
3035 /*
3036 * Perform ULP invocations following FC port startup
3037 */
3038 /* ARGSUSED */
3039 static void
3040 fp_startup_done(opaque_t arg, uchar_t result)
3041 {
3042 fc_local_port_t *port = arg;
3043
3044 fp_attach_ulps(port, FC_CMD_ATTACH);
3045
3046 FP_DTRACE(FP_NHEAD1(2, 0), "fp_startup almost complete; port=%p", port);
3047 }
3048
3049
3050 /*
3051 * Perform ULP port attach
3052 */
3053 static void
3054 fp_ulp_port_attach(void *arg)
3055 {
3056 fp_soft_attach_t *att = (fp_soft_attach_t *)arg;
3057 fc_local_port_t *port = att->att_port;
3058
3059 FP_DTRACE(FP_NHEAD1(1, 0), "port attach of"
3060 " ULPs begin; port=%p, cmd=%x", port, att->att_cmd);
3061
3062 fctl_attach_ulps(att->att_port, att->att_cmd, &modlinkage);
3063
3064 if (att->att_need_pm_idle == B_TRUE) {
3065 fctl_idle_port(port);
3066 }
3067
3068 FP_DTRACE(FP_NHEAD1(1, 0), "port attach of"
3069 " ULPs end; port=%p, cmd=%x", port, att->att_cmd);
3070
3071 mutex_enter(&att->att_port->fp_mutex);
3072 att->att_port->fp_ulp_attach = 0;
3073
3074 port->fp_task = port->fp_last_task;
3075 port->fp_last_task = FP_TASK_IDLE;
3076
3077 cv_signal(&att->att_port->fp_attach_cv);
3078
3079 mutex_exit(&att->att_port->fp_mutex);
3080
3081 kmem_free(att, sizeof (fp_soft_attach_t));
3082 }
3083
3084 /*
3085 * Entry point to funnel all requests down to FCAs
3086 */
3087 static int
3088 fp_sendcmd(fc_local_port_t *port, fp_cmd_t *cmd, opaque_t fca_handle)
3089 {
3090 int rval;
3091
3092 mutex_enter(&port->fp_mutex);
3093 if (port->fp_statec_busy > 1 || (cmd->cmd_ulp_pkt != NULL &&
3094 (port->fp_statec_busy || FC_PORT_STATE_MASK(port->fp_state) ==
3095 FC_STATE_OFFLINE))) {
3096 /*
3097 * This means there is more than one state change
3098 * at this point of time - Since they are processed
3099 * serially, any processing of the current one should
3100 * be failed, failed and move up in processing the next
3101 */
3102 cmd->cmd_pkt.pkt_state = FC_PKT_ELS_IN_PROGRESS;
3103 cmd->cmd_pkt.pkt_reason = FC_REASON_OFFLINE;
3104 if (cmd->cmd_job) {
3105 /*
3106 * A state change that is going to be invalidated
3107 * by another one already in the port driver's queue
3108 * need not go up to all ULPs. This will minimize
3109 * needless processing and ripples in ULP modules
3110 */
3111 cmd->cmd_job->job_flags |= JOB_CANCEL_ULP_NOTIFICATION;
3112 }
3113 mutex_exit(&port->fp_mutex);
3114 return (FC_STATEC_BUSY);
3115 }
3116
3117 if (FC_PORT_STATE_MASK(port->fp_state) == FC_STATE_OFFLINE) {
3118 cmd->cmd_pkt.pkt_state = FC_PKT_PORT_OFFLINE;
3119 cmd->cmd_pkt.pkt_reason = FC_REASON_OFFLINE;
3120 mutex_exit(&port->fp_mutex);
3121
3122 return (FC_OFFLINE);
3123 }
3124 mutex_exit(&port->fp_mutex);
3125
3126 rval = cmd->cmd_transport(fca_handle, &cmd->cmd_pkt);
3127 if (rval != FC_SUCCESS) {
3128 if (rval == FC_TRAN_BUSY) {
3129 cmd->cmd_retry_interval = fp_retry_delay;
3130 rval = fp_retry_cmd(&cmd->cmd_pkt);
3131 if (rval == FC_FAILURE) {
3132 cmd->cmd_pkt.pkt_state = FC_PKT_TRAN_BSY;
3133 }
3134 }
3135 } else {
3136 mutex_enter(&port->fp_mutex);
3137 port->fp_out_fpcmds++;
3138 mutex_exit(&port->fp_mutex);
3139 }
3140
3141 return (rval);
3142 }
3143
3144
3145 /*
3146 * Each time a timeout kicks in, walk the wait queue, decrement the
3147 * the retry_interval, when the retry_interval becomes less than
3148 * or equal to zero, re-transport the command: If the re-transport
3149 * fails with BUSY, enqueue the command in the wait queue.
3150 *
3151 * In order to prevent looping forever because of commands enqueued
3152 * from within this function itself, save the current tail pointer
3153 * (in cur_tail) and exit the loop after serving this command.
3154 */
3155 static void
3156 fp_resendcmd(void *port_handle)
3157 {
3158 int rval;
3159 fc_local_port_t *port;
3160 fp_cmd_t *cmd;
3161 fp_cmd_t *cur_tail;
3162
3163 port = port_handle;
3164 mutex_enter(&port->fp_mutex);
3165 cur_tail = port->fp_wait_tail;
3166 mutex_exit(&port->fp_mutex);
3167
3168 while ((cmd = fp_deque_cmd(port)) != NULL) {
3169 cmd->cmd_retry_interval -= fp_retry_ticker;
3170 /* Check if we are detaching */
3171 if (port->fp_soft_state &
3172 (FP_SOFT_IN_DETACH | FP_DETACH_INPROGRESS)) {
3173 cmd->cmd_pkt.pkt_state = FC_PKT_TRAN_ERROR;
3174 cmd->cmd_pkt.pkt_reason = 0;
3175 fp_iodone(cmd);
3176 } else if (cmd->cmd_retry_interval <= 0) {
3177 rval = cmd->cmd_transport(port->fp_fca_handle,
3178 &cmd->cmd_pkt);
3179
3180 if (rval != FC_SUCCESS) {
3181 if (cmd->cmd_pkt.pkt_state == FC_PKT_TRAN_BSY) {
3182 if (--cmd->cmd_retry_count) {
3183 fp_enque_cmd(port, cmd);
3184 if (cmd == cur_tail) {
3185 break;
3186 }
3187 continue;
3188 }
3189 cmd->cmd_pkt.pkt_state =
3190 FC_PKT_TRAN_BSY;
3191 } else {
3192 cmd->cmd_pkt.pkt_state =
3193 FC_PKT_TRAN_ERROR;
3194 }
3195 cmd->cmd_pkt.pkt_reason = 0;
3196 fp_iodone(cmd);
3197 } else {
3198 mutex_enter(&port->fp_mutex);
3199 port->fp_out_fpcmds++;
3200 mutex_exit(&port->fp_mutex);
3201 }
3202 } else {
3203 fp_enque_cmd(port, cmd);
3204 }
3205
3206 if (cmd == cur_tail) {
3207 break;
3208 }
3209 }
3210
3211 mutex_enter(&port->fp_mutex);
3212 if (port->fp_wait_head) {
3213 timeout_id_t tid;
3214
3215 mutex_exit(&port->fp_mutex);
3216 tid = timeout(fp_resendcmd, (caddr_t)port,
3217 fp_retry_ticks);
3218 mutex_enter(&port->fp_mutex);
3219 port->fp_wait_tid = tid;
3220 } else {
3221 port->fp_wait_tid = NULL;
3222 }
3223 mutex_exit(&port->fp_mutex);
3224 }
3225
3226
3227 /*
3228 * Handle Local, Fabric, N_Port, Transport (whatever that means) BUSY here.
3229 *
3230 * Yes, as you can see below, cmd_retry_count is used here too. That means
3231 * the retries for BUSY are less if there were transport failures (transport
3232 * failure means fca_transport failure). The goal is not to exceed overall
3233 * retries set in the cmd_retry_count (whatever may be the reason for retry)
3234 *
3235 * Return Values:
3236 * FC_SUCCESS
3237 * FC_FAILURE
3238 */
3239 static int
3240 fp_retry_cmd(fc_packet_t *pkt)
3241 {
3242 fp_cmd_t *cmd;
3243
3244 cmd = pkt->pkt_ulp_private;
3245
3246 if (--cmd->cmd_retry_count) {
3247 fp_enque_cmd(cmd->cmd_port, cmd);
3248 return (FC_SUCCESS);
3249 } else {
3250 return (FC_FAILURE);
3251 }
3252 }
3253
3254
3255 /*
3256 * Queue up FC packet for deferred retry
3257 */
3258 static void
3259 fp_enque_cmd(fc_local_port_t *port, fp_cmd_t *cmd)
3260 {
3261 timeout_id_t tid;
3262
3263 ASSERT(!MUTEX_HELD(&port->fp_mutex));
3264
3265 #ifdef DEBUG
3266 fp_printf(port, CE_NOTE, FP_LOG_ONLY, 0, &cmd->cmd_pkt,
3267 "Retrying ELS for %x", cmd->cmd_pkt.pkt_cmd_fhdr.d_id);
3268 #endif
3269
3270 mutex_enter(&port->fp_mutex);
3271 if (port->fp_wait_tail) {
3272 port->fp_wait_tail->cmd_next = cmd;
3273 port->fp_wait_tail = cmd;
3274 } else {
3275 ASSERT(port->fp_wait_head == NULL);
3276 port->fp_wait_head = port->fp_wait_tail = cmd;
3277 if (port->fp_wait_tid == NULL) {
3278 mutex_exit(&port->fp_mutex);
3279 tid = timeout(fp_resendcmd, (caddr_t)port,
3280 fp_retry_ticks);
3281 mutex_enter(&port->fp_mutex);
3282 port->fp_wait_tid = tid;
3283 }
3284 }
3285 mutex_exit(&port->fp_mutex);
3286 }
3287
3288
3289 /*
3290 * Handle all RJT codes
3291 */
3292 static int
3293 fp_handle_reject(fc_packet_t *pkt)
3294 {
3295 int rval = FC_FAILURE;
3296 uchar_t next_class;
3297 fp_cmd_t *cmd;
3298 fc_local_port_t *port;
3299
3300 cmd = pkt->pkt_ulp_private;
3301 port = cmd->cmd_port;
3302
3303 switch (pkt->pkt_state) {
3304 case FC_PKT_FABRIC_RJT:
3305 case FC_PKT_NPORT_RJT:
3306 if (pkt->pkt_reason == FC_REASON_CLASS_NOT_SUPP) {
3307 next_class = fp_get_nextclass(cmd->cmd_port,
3308 FC_TRAN_CLASS(pkt->pkt_tran_flags));
3309
3310 if (next_class == FC_TRAN_CLASS_INVALID) {
3311 return (rval);
3312 }
3313 pkt->pkt_tran_flags = FC_TRAN_INTR | next_class;
3314 pkt->pkt_tran_type = FC_PKT_EXCHANGE;
3315
3316 rval = fp_sendcmd(cmd->cmd_port, cmd,
3317 cmd->cmd_port->fp_fca_handle);
3318
3319 if (rval != FC_SUCCESS) {
3320 pkt->pkt_state = FC_PKT_TRAN_ERROR;
3321 }
3322 }
3323 break;
3324
3325 case FC_PKT_LS_RJT:
3326 case FC_PKT_BA_RJT:
3327 if ((pkt->pkt_reason == FC_REASON_LOGICAL_ERROR) ||
3328 (pkt->pkt_reason == FC_REASON_LOGICAL_BSY)) {
3329 cmd->cmd_retry_interval = fp_retry_delay;
3330 rval = fp_retry_cmd(pkt);
3331 }
3332 break;
3333
3334 case FC_PKT_FS_RJT:
3335 if ((pkt->pkt_reason == FC_REASON_FS_LOGICAL_BUSY) ||
3336 ((pkt->pkt_reason == FC_REASON_FS_CMD_UNABLE) &&
3337 (pkt->pkt_expln == 0x00))) {
3338 cmd->cmd_retry_interval = fp_retry_delay;
3339 rval = fp_retry_cmd(pkt);
3340 }
3341 break;
3342
3343 case FC_PKT_LOCAL_RJT:
3344 if (pkt->pkt_reason == FC_REASON_QFULL) {
3345 cmd->cmd_retry_interval = fp_retry_delay;
3346 rval = fp_retry_cmd(pkt);
3347 }
3348 break;
3349
3350 default:
3351 FP_TRACE(FP_NHEAD1(1, 0),
3352 "fp_handle_reject(): Invalid pkt_state");
3353 break;
3354 }
3355
3356 return (rval);
3357 }
3358
3359
3360 /*
3361 * Return the next class of service supported by the FCA
3362 */
3363 static uchar_t
3364 fp_get_nextclass(fc_local_port_t *port, uchar_t cur_class)
3365 {
3366 uchar_t next_class;
3367
3368 ASSERT(!MUTEX_HELD(&port->fp_mutex));
3369
3370 switch (cur_class) {
3371 case FC_TRAN_CLASS_INVALID:
3372 if (port->fp_cos & FC_NS_CLASS1) {
3373 next_class = FC_TRAN_CLASS1;
3374 break;
3375 }
3376 /* FALLTHROUGH */
3377
3378 case FC_TRAN_CLASS1:
3379 if (port->fp_cos & FC_NS_CLASS2) {
3380 next_class = FC_TRAN_CLASS2;
3381 break;
3382 }
3383 /* FALLTHROUGH */
3384
3385 case FC_TRAN_CLASS2:
3386 if (port->fp_cos & FC_NS_CLASS3) {
3387 next_class = FC_TRAN_CLASS3;
3388 break;
3389 }
3390 /* FALLTHROUGH */
3391
3392 case FC_TRAN_CLASS3:
3393 default:
3394 next_class = FC_TRAN_CLASS_INVALID;
3395 break;
3396 }
3397
3398 return (next_class);
3399 }
3400
3401
3402 /*
3403 * Determine if a class of service is supported by the FCA
3404 */
3405 static int
3406 fp_is_class_supported(uint32_t cos, uchar_t tran_class)
3407 {
3408 int rval;
3409
3410 switch (tran_class) {
3411 case FC_TRAN_CLASS1:
3412 if (cos & FC_NS_CLASS1) {
3413 rval = FC_SUCCESS;
3414 } else {
3415 rval = FC_FAILURE;
3416 }
3417 break;
3418
3419 case FC_TRAN_CLASS2:
3420 if (cos & FC_NS_CLASS2) {
3421 rval = FC_SUCCESS;
3422 } else {
3423 rval = FC_FAILURE;
3424 }
3425 break;
3426
3427 case FC_TRAN_CLASS3:
3428 if (cos & FC_NS_CLASS3) {
3429 rval = FC_SUCCESS;
3430 } else {
3431 rval = FC_FAILURE;
3432 }
3433 break;
3434
3435 default:
3436 rval = FC_FAILURE;
3437 break;
3438 }
3439
3440 return (rval);
3441 }
3442
3443
3444 /*
3445 * Dequeue FC packet for retry
3446 */
3447 static fp_cmd_t *
3448 fp_deque_cmd(fc_local_port_t *port)
3449 {
3450 fp_cmd_t *cmd;
3451
3452 ASSERT(!MUTEX_HELD(&port->fp_mutex));
3453
3454 mutex_enter(&port->fp_mutex);
3455
3456 if (port->fp_wait_head == NULL) {
3457 /*
3458 * To avoid races, NULL the fp_wait_tid as
3459 * we are about to exit the timeout thread.
3460 */
3461 port->fp_wait_tid = NULL;
3462 mutex_exit(&port->fp_mutex);
3463 return (NULL);
3464 }
3465
3466 cmd = port->fp_wait_head;
3467 port->fp_wait_head = cmd->cmd_next;
3468 cmd->cmd_next = NULL;
3469
3470 if (port->fp_wait_head == NULL) {
3471 port->fp_wait_tail = NULL;
3472 }
3473 mutex_exit(&port->fp_mutex);
3474
3475 return (cmd);
3476 }
3477
3478
3479 /*
3480 * Wait for job completion
3481 */
3482 static void
3483 fp_jobwait(job_request_t *job)
3484 {
3485 sema_p(&job->job_port_sema);
3486 }
3487
3488
3489 /*
3490 * Convert FC packet state to FC errno
3491 */
3492 int
3493 fp_state_to_rval(uchar_t state)
3494 {
3495 int count;
3496
3497 for (count = 0; count < sizeof (fp_xlat) /
3498 sizeof (fp_xlat[0]); count++) {
3499 if (fp_xlat[count].xlat_state == state) {
3500 return (fp_xlat[count].xlat_rval);
3501 }
3502 }
3503
3504 return (FC_FAILURE);
3505 }
3506
3507
3508 /*
3509 * For Synchronous I/O requests, the caller is
3510 * expected to do fctl_jobdone(if necessary)
3511 *
3512 * We want to preserve at least one failure in the
3513 * job_result if it happens.
3514 *
3515 */
3516 static void
3517 fp_iodone(fp_cmd_t *cmd)
3518 {
3519 fc_packet_t *ulp_pkt = cmd->cmd_ulp_pkt;
3520 job_request_t *job = cmd->cmd_job;
3521 fc_remote_port_t *pd = cmd->cmd_pkt.pkt_pd;
3522
3523 ASSERT(job != NULL);
3524 ASSERT(cmd->cmd_port != NULL);
3525 ASSERT(&cmd->cmd_pkt != NULL);
3526
3527 mutex_enter(&job->job_mutex);
3528 if (job->job_result == FC_SUCCESS) {
3529 job->job_result = fp_state_to_rval(cmd->cmd_pkt.pkt_state);
3530 }
3531 mutex_exit(&job->job_mutex);
3532
3533 if (pd) {
3534 mutex_enter(&pd->pd_mutex);
3535 pd->pd_flags = PD_IDLE;
3536 mutex_exit(&pd->pd_mutex);
3537 }
3538
3539 if (ulp_pkt) {
3540 if (pd && cmd->cmd_flags & FP_CMD_DELDEV_ON_ERROR &&
3541 FP_IS_PKT_ERROR(ulp_pkt)) {
3542 fc_local_port_t *port;
3543 fc_remote_node_t *node;
3544
3545 port = cmd->cmd_port;
3546
3547 mutex_enter(&pd->pd_mutex);
3548 pd->pd_state = PORT_DEVICE_INVALID;
3549 pd->pd_ref_count--;
3550 node = pd->pd_remote_nodep;
3551 mutex_exit(&pd->pd_mutex);
3552
3553 ASSERT(node != NULL);
3554 ASSERT(port != NULL);
3555
3556 if (fctl_destroy_remote_port(port, pd) == 0) {
3557 fctl_destroy_remote_node(node);
3558 }
3559
3560 ulp_pkt->pkt_pd = NULL;
3561 }
3562
3563 ulp_pkt->pkt_comp(ulp_pkt);
3564 }
3565
3566 fp_free_pkt(cmd);
3567 fp_jobdone(job);
3568 }
3569
3570
3571 /*
3572 * Job completion handler
3573 */
3574 static void
3575 fp_jobdone(job_request_t *job)
3576 {
3577 mutex_enter(&job->job_mutex);
3578 ASSERT(job->job_counter > 0);
3579
3580 if (--job->job_counter != 0) {
3581 mutex_exit(&job->job_mutex);
3582 return;
3583 }
3584
3585 if (job->job_ulp_pkts) {
3586 ASSERT(job->job_ulp_listlen > 0);
3587 kmem_free(job->job_ulp_pkts,
3588 sizeof (fc_packet_t *) * job->job_ulp_listlen);
3589 }
3590
3591 if (job->job_flags & JOB_TYPE_FP_ASYNC) {
3592 mutex_exit(&job->job_mutex);
3593 fctl_jobdone(job);
3594 } else {
3595 mutex_exit(&job->job_mutex);
3596 sema_v(&job->job_port_sema);
3597 }
3598 }
3599
3600
3601 /*
3602 * Try to perform shutdown of a port during a detach. No return
3603 * value since the detach should not fail because the port shutdown
3604 * failed.
3605 */
3606 static void
3607 fp_port_shutdown(fc_local_port_t *port, job_request_t *job)
3608 {
3609 int index;
3610 int count;
3611 int flags;
3612 fp_cmd_t *cmd;
3613 struct pwwn_hash *head;
3614 fc_remote_port_t *pd;
3615
3616 ASSERT(MUTEX_HELD(&port->fp_mutex));
3617
3618 job->job_result = FC_SUCCESS;
3619
3620 if (port->fp_taskq) {
3621 /*
3622 * We must release the mutex here to ensure that other
3623 * potential jobs can complete their processing. Many
3624 * also need this mutex.
3625 */
3626 mutex_exit(&port->fp_mutex);
3627 taskq_wait(port->fp_taskq);
3628 mutex_enter(&port->fp_mutex);
3629 }
3630
3631 if (port->fp_offline_tid) {
3632 timeout_id_t tid;
3633
3634 tid = port->fp_offline_tid;
3635 port->fp_offline_tid = NULL;
3636 mutex_exit(&port->fp_mutex);
3637 (void) untimeout(tid);
3638 mutex_enter(&port->fp_mutex);
3639 }
3640
3641 if (port->fp_wait_tid) {
3642 timeout_id_t tid;
3643
3644 tid = port->fp_wait_tid;
3645 port->fp_wait_tid = NULL;
3646 mutex_exit(&port->fp_mutex);
3647 (void) untimeout(tid);
3648 } else {
3649 mutex_exit(&port->fp_mutex);
3650 }
3651
3652 /*
3653 * While we cancel the timeout, let's also return the
3654 * the outstanding requests back to the callers.
3655 */
3656 while ((cmd = fp_deque_cmd(port)) != NULL) {
3657 ASSERT(cmd->cmd_job != NULL);
3658 cmd->cmd_job->job_result = FC_OFFLINE;
3659 fp_iodone(cmd);
3660 }
3661
3662 /*
3663 * Gracefully LOGO with all the devices logged in.
3664 */
3665 mutex_enter(&port->fp_mutex);
3666
3667 for (count = index = 0; index < pwwn_table_size; index++) {
3668 head = &port->fp_pwwn_table[index];
3669 pd = head->pwwn_head;
3670 while (pd != NULL) {
3671 mutex_enter(&pd->pd_mutex);
3672 if (pd->pd_state == PORT_DEVICE_LOGGED_IN) {
3673 count++;
3674 }
3675 mutex_exit(&pd->pd_mutex);
3676 pd = pd->pd_wwn_hnext;
3677 }
3678 }
3679
3680 if (job->job_flags & JOB_TYPE_FP_ASYNC) {
3681 flags = job->job_flags;
3682 job->job_flags &= ~JOB_TYPE_FP_ASYNC;
3683 } else {
3684 flags = 0;
3685 }
3686 if (count) {
3687 job->job_counter = count;
3688
3689 for (index = 0; index < pwwn_table_size; index++) {
3690 head = &port->fp_pwwn_table[index];
3691 pd = head->pwwn_head;
3692 while (pd != NULL) {
3693 mutex_enter(&pd->pd_mutex);
3694 if (pd->pd_state == PORT_DEVICE_LOGGED_IN) {
3695 ASSERT(pd->pd_login_count > 0);
3696 /*
3697 * Force the counter to ONE in order
3698 * for us to really send LOGO els.
3699 */
3700 pd->pd_login_count = 1;
3701 mutex_exit(&pd->pd_mutex);
3702 mutex_exit(&port->fp_mutex);
3703 (void) fp_logout(port, pd, job);
3704 mutex_enter(&port->fp_mutex);
3705 } else {
3706 mutex_exit(&pd->pd_mutex);
3707 }
3708 pd = pd->pd_wwn_hnext;
3709 }
3710 }
3711 mutex_exit(&port->fp_mutex);
3712 fp_jobwait(job);
3713 } else {
3714 mutex_exit(&port->fp_mutex);
3715 }
3716
3717 if (job->job_result != FC_SUCCESS) {
3718 FP_TRACE(FP_NHEAD1(9, 0),
3719 "Can't logout all devices. Proceeding with"
3720 " port shutdown");
3721 job->job_result = FC_SUCCESS;
3722 }
3723
3724 fctl_destroy_all_remote_ports(port);
3725
3726 mutex_enter(&port->fp_mutex);
3727 if (FC_IS_TOP_SWITCH(port->fp_topology)) {
3728 mutex_exit(&port->fp_mutex);
3729 fp_ns_fini(port, job);
3730 } else {
3731 mutex_exit(&port->fp_mutex);
3732 }
3733
3734 if (flags) {
3735 job->job_flags = flags;
3736 }
3737
3738 mutex_enter(&port->fp_mutex);
3739
3740 }
3741
3742
3743 /*
3744 * Build the port driver's data structures based on the AL_PA list
3745 */
3746 static void
3747 fp_get_loopmap(fc_local_port_t *port, job_request_t *job)
3748 {
3749 int rval;
3750 int flag;
3751 int count;
3752 uint32_t d_id;
3753 fc_remote_port_t *pd;
3754 fc_lilpmap_t *lilp_map;
3755
3756 ASSERT(MUTEX_HELD(&port->fp_mutex));
3757
3758 if (FC_PORT_STATE_MASK(port->fp_state) == FC_STATE_OFFLINE) {
3759 job->job_result = FC_OFFLINE;
3760 mutex_exit(&port->fp_mutex);
3761 fp_jobdone(job);
3762 mutex_enter(&port->fp_mutex);
3763 return;
3764 }
3765
3766 if (port->fp_lilp_map.lilp_length == 0) {
3767 mutex_exit(&port->fp_mutex);
3768 job->job_result = FC_NO_MAP;
3769 fp_jobdone(job);
3770 mutex_enter(&port->fp_mutex);
3771 return;
3772 }
3773 mutex_exit(&port->fp_mutex);
3774
3775 lilp_map = &port->fp_lilp_map;
3776 job->job_counter = lilp_map->lilp_length;
3777
3778 if (job->job_code == JOB_PORT_GETMAP_PLOGI_ALL) {
3779 flag = FP_CMD_PLOGI_RETAIN;
3780 } else {
3781 flag = FP_CMD_PLOGI_DONT_CARE;
3782 }
3783
3784 for (count = 0; count < lilp_map->lilp_length; count++) {
3785 d_id = lilp_map->lilp_alpalist[count];
3786
3787 if (d_id == (lilp_map->lilp_myalpa & 0xFF)) {
3788 fp_jobdone(job);
3789 continue;
3790 }
3791
3792 pd = fctl_get_remote_port_by_did(port, d_id);
3793 if (pd) {
3794 mutex_enter(&pd->pd_mutex);
3795 if (flag == FP_CMD_PLOGI_DONT_CARE ||
3796 pd->pd_state == PORT_DEVICE_LOGGED_IN) {
3797 mutex_exit(&pd->pd_mutex);
3798 fp_jobdone(job);
3799 continue;
3800 }
3801 mutex_exit(&pd->pd_mutex);
3802 }
3803
3804 rval = fp_port_login(port, d_id, job, flag,
3805 KM_SLEEP, pd, NULL);
3806 if (rval != FC_SUCCESS) {
3807 fp_jobdone(job);
3808 }
3809 }
3810
3811 mutex_enter(&port->fp_mutex);
3812 }
3813
3814
3815 /*
3816 * Perform loop ONLINE processing
3817 */
3818 static void
3819 fp_loop_online(fc_local_port_t *port, job_request_t *job, int orphan)
3820 {
3821 int count;
3822 int rval;
3823 uint32_t d_id;
3824 uint32_t listlen;
3825 fc_lilpmap_t *lilp_map;
3826 fc_remote_port_t *pd;
3827 fc_portmap_t *changelist;
3828
3829 ASSERT(!MUTEX_HELD(&port->fp_mutex));
3830
3831 FP_TRACE(FP_NHEAD1(1, 0), "fp_loop_online begin; port=%p, job=%p",
3832 port, job);
3833
3834 lilp_map = &port->fp_lilp_map;
3835
3836 if (lilp_map->lilp_length) {
3837 mutex_enter(&port->fp_mutex);
3838 if (port->fp_soft_state & FP_SOFT_IN_FCA_RESET) {
3839 port->fp_soft_state &= ~FP_SOFT_IN_FCA_RESET;
3840 mutex_exit(&port->fp_mutex);
3841 delay(drv_usectohz(PLDA_RR_TOV * 1000 * 1000));
3842 } else {
3843 mutex_exit(&port->fp_mutex);
3844 }
3845
3846 job->job_counter = lilp_map->lilp_length;
3847
3848 for (count = 0; count < lilp_map->lilp_length; count++) {
3849 d_id = lilp_map->lilp_alpalist[count];
3850
3851 if (d_id == (lilp_map->lilp_myalpa & 0xFF)) {
3852 fp_jobdone(job);
3853 continue;
3854 }
3855
3856 pd = fctl_get_remote_port_by_did(port, d_id);
3857 if (pd != NULL) {
3858 #ifdef DEBUG
3859 mutex_enter(&pd->pd_mutex);
3860 if (pd->pd_recepient == PD_PLOGI_INITIATOR) {
3861 ASSERT(pd->pd_type != PORT_DEVICE_OLD);
3862 }
3863 mutex_exit(&pd->pd_mutex);
3864 #endif
3865 fp_jobdone(job);
3866 continue;
3867 }
3868
3869 rval = fp_port_login(port, d_id, job,
3870 FP_CMD_PLOGI_DONT_CARE, KM_SLEEP, pd, NULL);
3871
3872 if (rval != FC_SUCCESS) {
3873 fp_jobdone(job);
3874 }
3875 }
3876 fp_jobwait(job);
3877 }
3878 listlen = 0;
3879 changelist = NULL;
3880
3881 if ((job->job_flags & JOB_CANCEL_ULP_NOTIFICATION) == 0) {
3882 mutex_enter(&port->fp_mutex);
3883 ASSERT(port->fp_statec_busy > 0);
3884 if (port->fp_statec_busy == 1) {
3885 mutex_exit(&port->fp_mutex);
3886 fctl_fillout_map(port, &changelist, &listlen,
3887 1, 0, orphan);
3888
3889 mutex_enter(&port->fp_mutex);
3890 if (port->fp_lilp_map.lilp_magic < MAGIC_LIRP) {
3891 ASSERT(port->fp_total_devices == 0);
3892 port->fp_total_devices = port->fp_dev_count;
3893 }
3894 } else {
3895 job->job_flags |= JOB_CANCEL_ULP_NOTIFICATION;
3896 }
3897 mutex_exit(&port->fp_mutex);
3898 }
3899
3900 if ((job->job_flags & JOB_CANCEL_ULP_NOTIFICATION) == 0) {
3901 (void) fp_ulp_statec_cb(port, FC_STATE_ONLINE, changelist,
3902 listlen, listlen, KM_SLEEP);
3903 } else {
3904 mutex_enter(&port->fp_mutex);
3905 if (--port->fp_statec_busy == 0) {
3906 port->fp_soft_state &= ~FP_SOFT_IN_STATEC_CB;
3907 }
3908 ASSERT(changelist == NULL && listlen == 0);
3909 mutex_exit(&port->fp_mutex);
3910 }
3911
3912 FP_TRACE(FP_NHEAD1(1, 0), "fp_loop_online end; port=%p, job=%p",
3913 port, job);
3914 }
3915
3916
3917 /*
3918 * Get an Arbitrated Loop map from the underlying FCA
3919 */
3920 static int
3921 fp_get_lilpmap(fc_local_port_t *port, fc_lilpmap_t *lilp_map)
3922 {
3923 int rval;
3924
3925 FP_TRACE(FP_NHEAD1(1, 0), "fp_get_lilpmap Begin; port=%p, map=%p",
3926 port, lilp_map);
3927
3928 bzero((caddr_t)lilp_map, sizeof (fc_lilpmap_t));
3929 rval = port->fp_fca_tran->fca_getmap(port->fp_fca_handle, lilp_map);
3930 lilp_map->lilp_magic &= 0xFF; /* Ignore upper byte */
3931
3932 if (rval != FC_SUCCESS) {
3933 rval = FC_NO_MAP;
3934 } else if (lilp_map->lilp_length == 0 &&
3935 (lilp_map->lilp_magic >= MAGIC_LISM &&
3936 lilp_map->lilp_magic < MAGIC_LIRP)) {
3937 uchar_t lilp_length;
3938
3939 /*
3940 * Since the map length is zero, provide all
3941 * the valid AL_PAs for NL_ports discovery.
3942 */
3943 lilp_length = sizeof (fp_valid_alpas) /
3944 sizeof (fp_valid_alpas[0]);
3945 lilp_map->lilp_length = lilp_length;
3946 bcopy(fp_valid_alpas, lilp_map->lilp_alpalist,
3947 lilp_length);
3948 } else {
3949 rval = fp_validate_lilp_map(lilp_map);
3950
3951 if (rval == FC_SUCCESS) {
3952 mutex_enter(&port->fp_mutex);
3953 port->fp_total_devices = lilp_map->lilp_length - 1;
3954 mutex_exit(&port->fp_mutex);
3955 }
3956 }
3957
3958 mutex_enter(&port->fp_mutex);
3959 if (rval != FC_SUCCESS && !(port->fp_soft_state & FP_SOFT_BAD_LINK)) {
3960 port->fp_soft_state |= FP_SOFT_BAD_LINK;
3961 mutex_exit(&port->fp_mutex);
3962
3963 if (port->fp_fca_tran->fca_reset(port->fp_fca_handle,
3964 FC_FCA_RESET_CORE) != FC_SUCCESS) {
3965 FP_TRACE(FP_NHEAD1(9, 0),
3966 "FCA reset failed after LILP map was found"
3967 " to be invalid");
3968 }
3969 } else if (rval == FC_SUCCESS) {
3970 port->fp_soft_state &= ~FP_SOFT_BAD_LINK;
3971 mutex_exit(&port->fp_mutex);
3972 } else {
3973 mutex_exit(&port->fp_mutex);
3974 }
3975
3976 FP_TRACE(FP_NHEAD1(1, 0), "fp_get_lilpmap End; port=%p, map=%p", port,
3977 lilp_map);
3978
3979 return (rval);
3980 }
3981
3982
3983 /*
3984 * Perform Fabric Login:
3985 *
3986 * Return Values:
3987 * FC_SUCCESS
3988 * FC_FAILURE
3989 * FC_NOMEM
3990 * FC_TRANSPORT_ERROR
3991 * and a lot others defined in fc_error.h
3992 */
3993 static int
3994 fp_fabric_login(fc_local_port_t *port, uint32_t s_id, job_request_t *job,
3995 int flag, int sleep)
3996 {
3997 int rval;
3998 fp_cmd_t *cmd;
3999 uchar_t class;
4000
4001 ASSERT(!MUTEX_HELD(&port->fp_mutex));
4002
4003 FP_TRACE(FP_NHEAD1(1, 0), "fp_fabric_login Begin; port=%p, job=%p",
4004 port, job);
4005
4006 class = fp_get_nextclass(port, FC_TRAN_CLASS_INVALID);
4007 if (class == FC_TRAN_CLASS_INVALID) {
4008 return (FC_ELS_BAD);
4009 }
4010
4011 cmd = fp_alloc_pkt(port, sizeof (la_els_logi_t),
4012 sizeof (la_els_logi_t), sleep, NULL);
4013 if (cmd == NULL) {
4014 return (FC_NOMEM);
4015 }
4016
4017 cmd->cmd_pkt.pkt_tran_flags = FC_TRAN_INTR | class;
4018 cmd->cmd_pkt.pkt_tran_type = FC_PKT_EXCHANGE;
4019 cmd->cmd_flags = flag;
4020 cmd->cmd_retry_count = fp_retry_count;
4021 cmd->cmd_ulp_pkt = NULL;
4022
4023 fp_xlogi_init(port, cmd, s_id, 0xFFFFFE, fp_flogi_intr,
4024 job, LA_ELS_FLOGI);
4025
4026 rval = fp_sendcmd(port, cmd, port->fp_fca_handle);
4027 if (rval != FC_SUCCESS) {
4028 fp_free_pkt(cmd);
4029 }
4030
4031 FP_TRACE(FP_NHEAD1(1, 0), "fp_fabric_login End; port=%p, job=%p",
4032 port, job);
4033
4034 return (rval);
4035 }
4036
4037
4038 /*
4039 * In some scenarios such as private loop device discovery period
4040 * the fc_remote_port_t data structure isn't allocated. The allocation
4041 * is done when the PLOGI is successful. In some other scenarios
4042 * such as Fabric topology, the fc_remote_port_t is already created
4043 * and initialized with appropriate values (as the NS provides
4044 * them)
4045 */
4046 static int
4047 fp_port_login(fc_local_port_t *port, uint32_t d_id, job_request_t *job,
4048 int cmd_flag, int sleep, fc_remote_port_t *pd, fc_packet_t *ulp_pkt)
4049 {
4050 uchar_t class;
4051 fp_cmd_t *cmd;
4052 uint32_t src_id;
4053 fc_remote_port_t *tmp_pd;
4054 int relogin;
4055 int found = 0;
4056
4057 #ifdef DEBUG
4058 if (pd == NULL) {
4059 ASSERT(fctl_get_remote_port_by_did(port, d_id) == NULL);
4060 }
4061 #endif
4062 ASSERT(job->job_counter > 0);
4063
4064 class = fp_get_nextclass(port, FC_TRAN_CLASS_INVALID);
4065 if (class == FC_TRAN_CLASS_INVALID) {
4066 return (FC_ELS_BAD);
4067 }
4068
4069 mutex_enter(&port->fp_mutex);
4070 tmp_pd = fctl_lookup_pd_by_did(port, d_id);
4071 mutex_exit(&port->fp_mutex);
4072
4073 relogin = 1;
4074 if (tmp_pd) {
4075 mutex_enter(&tmp_pd->pd_mutex);
4076 if ((tmp_pd->pd_aux_flags & PD_DISABLE_RELOGIN) &&
4077 !(tmp_pd->pd_aux_flags & PD_LOGGED_OUT)) {
4078 tmp_pd->pd_state = PORT_DEVICE_LOGGED_IN;
4079 relogin = 0;
4080 }
4081 mutex_exit(&tmp_pd->pd_mutex);
4082 }
4083
4084 if (!relogin) {
4085 mutex_enter(&tmp_pd->pd_mutex);
4086 if (tmp_pd->pd_state == PORT_DEVICE_LOGGED_IN) {
4087 cmd_flag |= FP_CMD_PLOGI_RETAIN;
4088 }
4089 mutex_exit(&tmp_pd->pd_mutex);
4090
4091 cmd = fp_alloc_pkt(port, sizeof (la_els_adisc_t),
4092 sizeof (la_els_adisc_t), sleep, tmp_pd);
4093 if (cmd == NULL) {
4094 return (FC_NOMEM);
4095 }
4096
4097 cmd->cmd_pkt.pkt_tran_flags = FC_TRAN_INTR | class;
4098 cmd->cmd_pkt.pkt_tran_type = FC_PKT_EXCHANGE;
4099 cmd->cmd_flags = cmd_flag;
4100 cmd->cmd_retry_count = fp_retry_count;
4101 cmd->cmd_ulp_pkt = ulp_pkt;
4102
4103 mutex_enter(&port->fp_mutex);
4104 mutex_enter(&tmp_pd->pd_mutex);
4105 fp_adisc_init(cmd, job);
4106 mutex_exit(&tmp_pd->pd_mutex);
4107 mutex_exit(&port->fp_mutex);
4108
4109 cmd->cmd_pkt.pkt_cmdlen = sizeof (la_els_adisc_t);
4110 cmd->cmd_pkt.pkt_rsplen = sizeof (la_els_adisc_t);
4111
4112 } else {
4113 cmd = fp_alloc_pkt(port, sizeof (la_els_logi_t),
4114 sizeof (la_els_logi_t), sleep, pd);
4115 if (cmd == NULL) {
4116 return (FC_NOMEM);
4117 }
4118
4119 cmd->cmd_pkt.pkt_tran_flags = FC_TRAN_INTR | class;
4120 cmd->cmd_pkt.pkt_tran_type = FC_PKT_EXCHANGE;
4121 cmd->cmd_flags = cmd_flag;
4122 cmd->cmd_retry_count = fp_retry_count;
4123 cmd->cmd_ulp_pkt = ulp_pkt;
4124
4125 mutex_enter(&port->fp_mutex);
4126 src_id = port->fp_port_id.port_id;
4127 mutex_exit(&port->fp_mutex);
4128
4129 fp_xlogi_init(port, cmd, src_id, d_id, fp_plogi_intr,
4130 job, LA_ELS_PLOGI);
4131 }
4132
4133 if (pd) {
4134 mutex_enter(&pd->pd_mutex);
4135 pd->pd_flags = PD_ELS_IN_PROGRESS;
4136 mutex_exit(&pd->pd_mutex);
4137 }
4138
4139 /* npiv check to make sure we don't log into ourself */
4140 if (relogin &&
4141 ((port->fp_npiv_type == FC_NPIV_PORT) ||
4142 (port->fp_npiv_flag == FC_NPIV_ENABLE))) {
4143 if ((d_id & 0xffff00) ==
4144 (port->fp_port_id.port_id & 0xffff00)) {
4145 found = 1;
4146 }
4147 }
4148
4149 if (found ||
4150 (fp_sendcmd(port, cmd, port->fp_fca_handle) != FC_SUCCESS)) {
4151 if (found) {
4152 fc_packet_t *pkt = &cmd->cmd_pkt;
4153 pkt->pkt_state = FC_PKT_NPORT_RJT;
4154 }
4155 if (pd) {
4156 mutex_enter(&pd->pd_mutex);
4157 pd->pd_flags = PD_IDLE;
4158 mutex_exit(&pd->pd_mutex);
4159 }
4160
4161 if (ulp_pkt) {
4162 fc_packet_t *pkt = &cmd->cmd_pkt;
4163
4164 ulp_pkt->pkt_state = pkt->pkt_state;
4165 ulp_pkt->pkt_reason = pkt->pkt_reason;
4166 ulp_pkt->pkt_action = pkt->pkt_action;
4167 ulp_pkt->pkt_expln = pkt->pkt_expln;
4168 }
4169
4170 fp_iodone(cmd);
4171 }
4172
4173 return (FC_SUCCESS);
4174 }
4175
4176
4177 /*
4178 * Register the LOGIN parameters with a port device
4179 */
4180 static void
4181 fp_register_login(ddi_acc_handle_t *handle, fc_remote_port_t *pd,
4182 la_els_logi_t *acc, uchar_t class)
4183 {
4184 fc_remote_node_t *node;
4185
4186 ASSERT(pd != NULL);
4187
4188 mutex_enter(&pd->pd_mutex);
4189 node = pd->pd_remote_nodep;
4190 if (pd->pd_login_count == 0) {
4191 pd->pd_login_count++;
4192 }
4193
4194 if (handle) {
4195 FC_GET_RSP(pd->pd_port, *handle, (uint8_t *)&pd->pd_csp,
4196 (uint8_t *)&acc->common_service,
4197 sizeof (acc->common_service), DDI_DEV_AUTOINCR);
4198 FC_GET_RSP(pd->pd_port, *handle, (uint8_t *)&pd->pd_clsp1,
4199 (uint8_t *)&acc->class_1, sizeof (acc->class_1),
4200 DDI_DEV_AUTOINCR);
4201 FC_GET_RSP(pd->pd_port, *handle, (uint8_t *)&pd->pd_clsp2,
4202 (uint8_t *)&acc->class_2, sizeof (acc->class_2),
4203 DDI_DEV_AUTOINCR);
4204 FC_GET_RSP(pd->pd_port, *handle, (uint8_t *)&pd->pd_clsp3,
4205 (uint8_t *)&acc->class_3, sizeof (acc->class_3),
4206 DDI_DEV_AUTOINCR);
4207 } else {
4208 pd->pd_csp = acc->common_service;
4209 pd->pd_clsp1 = acc->class_1;
4210 pd->pd_clsp2 = acc->class_2;
4211 pd->pd_clsp3 = acc->class_3;
4212 }
4213
4214 pd->pd_state = PORT_DEVICE_LOGGED_IN;
4215 pd->pd_login_class = class;
4216 mutex_exit(&pd->pd_mutex);
4217
4218 #ifndef __lock_lint
4219 ASSERT(fctl_get_remote_port_by_did(pd->pd_port,
4220 pd->pd_port_id.port_id) == pd);
4221 #endif
4222
4223 mutex_enter(&node->fd_mutex);
4224 if (handle) {
4225 FC_GET_RSP(pd->pd_port, *handle, (uint8_t *)node->fd_vv,
4226 (uint8_t *)acc->vendor_version, sizeof (node->fd_vv),
4227 DDI_DEV_AUTOINCR);
4228 } else {
4229 bcopy(acc->vendor_version, node->fd_vv, sizeof (node->fd_vv));
4230 }
4231 mutex_exit(&node->fd_mutex);
4232 }
4233
4234
4235 /*
4236 * Mark the remote port as OFFLINE
4237 */
4238 static void
4239 fp_remote_port_offline(fc_remote_port_t *pd)
4240 {
4241 ASSERT(MUTEX_HELD(&pd->pd_mutex));
4242 if (pd->pd_login_count &&
4243 ((pd->pd_aux_flags & PD_DISABLE_RELOGIN) == 0)) {
4244 bzero((caddr_t)&pd->pd_csp, sizeof (struct common_service));
4245 bzero((caddr_t)&pd->pd_clsp1, sizeof (struct service_param));
4246 bzero((caddr_t)&pd->pd_clsp2, sizeof (struct service_param));
4247 bzero((caddr_t)&pd->pd_clsp3, sizeof (struct service_param));
4248 pd->pd_login_class = 0;
4249 }
4250 pd->pd_type = PORT_DEVICE_OLD;
4251 pd->pd_flags = PD_IDLE;
4252 fctl_tc_reset(&pd->pd_logo_tc);
4253 }
4254
4255
4256 /*
4257 * Deregistration of a port device
4258 */
4259 static void
4260 fp_unregister_login(fc_remote_port_t *pd)
4261 {
4262 fc_remote_node_t *node;
4263
4264 ASSERT(pd != NULL);
4265
4266 mutex_enter(&pd->pd_mutex);
4267 pd->pd_login_count = 0;
4268 bzero((caddr_t)&pd->pd_csp, sizeof (struct common_service));
4269 bzero((caddr_t)&pd->pd_clsp1, sizeof (struct service_param));
4270 bzero((caddr_t)&pd->pd_clsp2, sizeof (struct service_param));
4271 bzero((caddr_t)&pd->pd_clsp3, sizeof (struct service_param));
4272
4273 pd->pd_state = PORT_DEVICE_VALID;
4274 pd->pd_login_class = 0;
4275 node = pd->pd_remote_nodep;
4276 mutex_exit(&pd->pd_mutex);
4277
4278 mutex_enter(&node->fd_mutex);
4279 bzero(node->fd_vv, sizeof (node->fd_vv));
4280 mutex_exit(&node->fd_mutex);
4281 }
4282
4283
4284 /*
4285 * Handle OFFLINE state of an FCA port
4286 */
4287 static void
4288 fp_port_offline(fc_local_port_t *port, int notify)
4289 {
4290 int index;
4291 int statec;
4292 timeout_id_t tid;
4293 struct pwwn_hash *head;
4294 fc_remote_port_t *pd;
4295
4296 ASSERT(MUTEX_HELD(&port->fp_mutex));
4297
4298 for (index = 0; index < pwwn_table_size; index++) {
4299 head = &port->fp_pwwn_table[index];
4300 pd = head->pwwn_head;
4301 while (pd != NULL) {
4302 mutex_enter(&pd->pd_mutex);
4303 fp_remote_port_offline(pd);
4304 fctl_delist_did_table(port, pd);
4305 mutex_exit(&pd->pd_mutex);
4306 pd = pd->pd_wwn_hnext;
4307 }
4308 }
4309 port->fp_total_devices = 0;
4310
4311 statec = 0;
4312 if (notify) {
4313 /*
4314 * Decrement the statec busy counter as we
4315 * are almost done with handling the state
4316 * change
4317 */
4318 ASSERT(port->fp_statec_busy > 0);
4319 if (--port->fp_statec_busy == 0) {
4320 port->fp_soft_state &= ~FP_SOFT_IN_STATEC_CB;
4321 }
4322 mutex_exit(&port->fp_mutex);
4323 (void) fp_ulp_statec_cb(port, FC_STATE_OFFLINE, NULL,
4324 0, 0, KM_SLEEP);
4325 mutex_enter(&port->fp_mutex);
4326
4327 if (port->fp_statec_busy) {
4328 statec++;
4329 }
4330 } else if (port->fp_statec_busy > 1) {
4331 statec++;
4332 }
4333
4334 if ((tid = port->fp_offline_tid) != NULL) {
4335 mutex_exit(&port->fp_mutex);
4336 (void) untimeout(tid);
4337 mutex_enter(&port->fp_mutex);
4338 }
4339
4340 if (!statec) {
4341 port->fp_offline_tid = timeout(fp_offline_timeout,
4342 (caddr_t)port, fp_offline_ticks);
4343 }
4344 }
4345
4346
4347 /*
4348 * Offline devices and send up a state change notification to ULPs
4349 */
4350 static void
4351 fp_offline_timeout(void *port_handle)
4352 {
4353 int ret;
4354 fc_local_port_t *port = port_handle;
4355 uint32_t listlen = 0;
4356 fc_portmap_t *changelist = NULL;
4357
4358 mutex_enter(&port->fp_mutex);
4359
4360 if ((FC_PORT_STATE_MASK(port->fp_state) != FC_STATE_OFFLINE) ||
4361 (port->fp_soft_state &
4362 (FP_SOFT_IN_DETACH | FP_SOFT_SUSPEND | FP_SOFT_POWER_DOWN)) ||
4363 port->fp_dev_count == 0 || port->fp_statec_busy) {
4364 port->fp_offline_tid = NULL;
4365 mutex_exit(&port->fp_mutex);
4366 return;
4367 }
4368
4369 mutex_exit(&port->fp_mutex);
4370
4371 FP_TRACE(FP_NHEAD2(9, 0), "OFFLINE timeout");
4372
4373 if (port->fp_options & FP_CORE_ON_OFFLINE_TIMEOUT) {
4374 if ((ret = port->fp_fca_tran->fca_reset(port->fp_fca_handle,
4375 FC_FCA_CORE)) != FC_SUCCESS) {
4376 FP_TRACE(FP_NHEAD1(9, ret),
4377 "Failed to force adapter dump");
4378 } else {
4379 FP_TRACE(FP_NHEAD1(9, 0),
4380 "Forced adapter dump successfully");
4381 }
4382 } else if (port->fp_options & FP_RESET_CORE_ON_OFFLINE_TIMEOUT) {
4383 if ((ret = port->fp_fca_tran->fca_reset(port->fp_fca_handle,
4384 FC_FCA_RESET_CORE)) != FC_SUCCESS) {
4385 FP_TRACE(FP_NHEAD1(9, ret),
4386 "Failed to force adapter dump and reset");
4387 } else {
4388 FP_TRACE(FP_NHEAD1(9, 0),
4389 "Forced adapter dump and reset successfully");
4390 }
4391 }
4392
4393 fctl_fillout_map(port, &changelist, &listlen, 1, 0, 0);
4394 (void) fp_ulp_statec_cb(port, FC_STATE_OFFLINE, changelist,
4395 listlen, listlen, KM_SLEEP);
4396
4397 mutex_enter(&port->fp_mutex);
4398 port->fp_offline_tid = NULL;
4399 mutex_exit(&port->fp_mutex);
4400 }
4401
4402
4403 /*
4404 * Perform general purpose ELS request initialization
4405 */
4406 static void
4407 fp_els_init(fp_cmd_t *cmd, uint32_t s_id, uint32_t d_id,
4408 void (*comp) (), job_request_t *job)
4409 {
4410 fc_packet_t *pkt;
4411
4412 pkt = &cmd->cmd_pkt;
4413 cmd->cmd_job = job;
4414
4415 pkt->pkt_cmd_fhdr.r_ctl = R_CTL_ELS_REQ;
4416 pkt->pkt_cmd_fhdr.d_id = d_id;
4417 pkt->pkt_cmd_fhdr.s_id = s_id;
4418 pkt->pkt_cmd_fhdr.type = FC_TYPE_EXTENDED_LS;
4419 pkt->pkt_cmd_fhdr.f_ctl = F_CTL_SEQ_INITIATIVE | F_CTL_FIRST_SEQ;
4420 pkt->pkt_cmd_fhdr.seq_id = 0;
4421 pkt->pkt_cmd_fhdr.df_ctl = 0;
4422 pkt->pkt_cmd_fhdr.seq_cnt = 0;
4423 pkt->pkt_cmd_fhdr.ox_id = 0xffff;
4424 pkt->pkt_cmd_fhdr.rx_id = 0xffff;
4425 pkt->pkt_cmd_fhdr.ro = 0;
4426 pkt->pkt_cmd_fhdr.rsvd = 0;
4427 pkt->pkt_comp = comp;
4428 pkt->pkt_timeout = FP_ELS_TIMEOUT;
4429 }
4430
4431
4432 /*
4433 * Initialize PLOGI/FLOGI ELS request
4434 */
4435 static void
4436 fp_xlogi_init(fc_local_port_t *port, fp_cmd_t *cmd, uint32_t s_id,
4437 uint32_t d_id, void (*intr) (), job_request_t *job, uchar_t ls_code)
4438 {
4439 ls_code_t payload;
4440
4441 fp_els_init(cmd, s_id, d_id, intr, job);
4442 cmd->cmd_transport = port->fp_fca_tran->fca_els_send;
4443
4444 payload.ls_code = ls_code;
4445 payload.mbz = 0;
4446
4447 FC_SET_CMD(port, cmd->cmd_pkt.pkt_cmd_acc,
4448 (uint8_t *)&port->fp_service_params,
4449 (uint8_t *)cmd->cmd_pkt.pkt_cmd, sizeof (port->fp_service_params),
4450 DDI_DEV_AUTOINCR);
4451
4452 FC_SET_CMD(port, cmd->cmd_pkt.pkt_cmd_acc, (uint8_t *)&payload,
4453 (uint8_t *)cmd->cmd_pkt.pkt_cmd, sizeof (payload),
4454 DDI_DEV_AUTOINCR);
4455 }
4456
4457
4458 /*
4459 * Initialize LOGO ELS request
4460 */
4461 static void
4462 fp_logo_init(fc_remote_port_t *pd, fp_cmd_t *cmd, job_request_t *job)
4463 {
4464 fc_local_port_t *port;
4465 fc_packet_t *pkt;
4466 la_els_logo_t payload;
4467
4468 port = pd->pd_port;
4469 pkt = &cmd->cmd_pkt;
4470 ASSERT(MUTEX_HELD(&port->fp_mutex));
4471 ASSERT(MUTEX_HELD(&pd->pd_mutex));
4472
4473 fp_els_init(cmd, port->fp_port_id.port_id, pd->pd_port_id.port_id,
4474 fp_logo_intr, job);
4475
4476 cmd->cmd_transport = port->fp_fca_tran->fca_els_send;
4477
4478 pkt->pkt_tran_flags = FC_TRAN_INTR | pd->pd_login_class;
4479 pkt->pkt_tran_type = FC_PKT_EXCHANGE;
4480
4481 payload.ls_code.ls_code = LA_ELS_LOGO;
4482 payload.ls_code.mbz = 0;
4483 payload.nport_ww_name = port->fp_service_params.nport_ww_name;
4484 payload.nport_id = port->fp_port_id;
4485
4486 FC_SET_CMD(port, pkt->pkt_cmd_acc, (uint8_t *)&payload,
4487 (uint8_t *)pkt->pkt_cmd, sizeof (payload), DDI_DEV_AUTOINCR);
4488 }
4489
4490 /*
4491 * Initialize RNID ELS request
4492 */
4493 static void
4494 fp_rnid_init(fp_cmd_t *cmd, uint16_t flag, job_request_t *job)
4495 {
4496 fc_local_port_t *port;
4497 fc_packet_t *pkt;
4498 la_els_rnid_t payload;
4499 fc_remote_port_t *pd;
4500
4501 pkt = &cmd->cmd_pkt;
4502 pd = pkt->pkt_pd;
4503 port = pd->pd_port;
4504
4505 ASSERT(MUTEX_HELD(&port->fp_mutex));
4506 ASSERT(MUTEX_HELD(&pd->pd_mutex));
4507
4508 fp_els_init(cmd, port->fp_port_id.port_id, pd->pd_port_id.port_id,
4509 fp_rnid_intr, job);
4510
4511 cmd->cmd_transport = port->fp_fca_tran->fca_els_send;
4512 pkt->pkt_tran_flags = FC_TRAN_INTR | pd->pd_login_class;
4513 pkt->pkt_tran_type = FC_PKT_EXCHANGE;
4514
4515 payload.ls_code.ls_code = LA_ELS_RNID;
4516 payload.ls_code.mbz = 0;
4517 payload.data_format = flag;
4518
4519 FC_SET_CMD(port, pkt->pkt_cmd_acc, (uint8_t *)&payload,
4520 (uint8_t *)pkt->pkt_cmd, sizeof (payload), DDI_DEV_AUTOINCR);
4521 }
4522
4523 /*
4524 * Initialize RLS ELS request
4525 */
4526 static void
4527 fp_rls_init(fp_cmd_t *cmd, job_request_t *job)
4528 {
4529 fc_local_port_t *port;
4530 fc_packet_t *pkt;
4531 la_els_rls_t payload;
4532 fc_remote_port_t *pd;
4533
4534 pkt = &cmd->cmd_pkt;
4535 pd = pkt->pkt_pd;
4536 port = pd->pd_port;
4537
4538 ASSERT(MUTEX_HELD(&port->fp_mutex));
4539 ASSERT(MUTEX_HELD(&pd->pd_mutex));
4540
4541 fp_els_init(cmd, port->fp_port_id.port_id, pd->pd_port_id.port_id,
4542 fp_rls_intr, job);
4543
4544 cmd->cmd_transport = port->fp_fca_tran->fca_els_send;
4545 pkt->pkt_tran_flags = FC_TRAN_INTR | pd->pd_login_class;
4546 pkt->pkt_tran_type = FC_PKT_EXCHANGE;
4547
4548 payload.ls_code.ls_code = LA_ELS_RLS;
4549 payload.ls_code.mbz = 0;
4550 payload.rls_portid = port->fp_port_id;
4551
4552 FC_SET_CMD(port, pkt->pkt_cmd_acc, (uint8_t *)&payload,
4553 (uint8_t *)pkt->pkt_cmd, sizeof (payload), DDI_DEV_AUTOINCR);
4554 }
4555
4556
4557 /*
4558 * Initialize an ADISC ELS request
4559 */
4560 static void
4561 fp_adisc_init(fp_cmd_t *cmd, job_request_t *job)
4562 {
4563 fc_local_port_t *port;
4564 fc_packet_t *pkt;
4565 la_els_adisc_t payload;
4566 fc_remote_port_t *pd;
4567
4568 pkt = &cmd->cmd_pkt;
4569 pd = pkt->pkt_pd;
4570 port = pd->pd_port;
4571
4572 ASSERT(MUTEX_HELD(&pd->pd_mutex));
4573 ASSERT(MUTEX_HELD(&pd->pd_port->fp_mutex));
4574
4575 fp_els_init(cmd, port->fp_port_id.port_id, pd->pd_port_id.port_id,
4576 fp_adisc_intr, job);
4577
4578 cmd->cmd_transport = port->fp_fca_tran->fca_els_send;
4579 pkt->pkt_tran_flags = FC_TRAN_INTR | pd->pd_login_class;
4580 pkt->pkt_tran_type = FC_PKT_EXCHANGE;
4581
4582 payload.ls_code.ls_code = LA_ELS_ADISC;
4583 payload.ls_code.mbz = 0;
4584 payload.nport_id = port->fp_port_id;
4585 payload.port_wwn = port->fp_service_params.nport_ww_name;
4586 payload.node_wwn = port->fp_service_params.node_ww_name;
4587 payload.hard_addr = port->fp_hard_addr;
4588
4589 FC_SET_CMD(port, pkt->pkt_cmd_acc, (uint8_t *)&payload,
4590 (uint8_t *)pkt->pkt_cmd, sizeof (payload), DDI_DEV_AUTOINCR);
4591 }
4592
4593
4594 /*
4595 * Send up a state change notification to ULPs.
4596 * Spawns a call to fctl_ulp_statec_cb in a taskq thread.
4597 */
4598 static int
4599 fp_ulp_statec_cb(fc_local_port_t *port, uint32_t state,
4600 fc_portmap_t *changelist, uint32_t listlen, uint32_t alloc_len, int sleep)
4601 {
4602 fc_port_clist_t *clist;
4603 fc_remote_port_t *pd;
4604 int count;
4605
4606 ASSERT(!MUTEX_HELD(&port->fp_mutex));
4607
4608 clist = kmem_zalloc(sizeof (*clist), sleep);
4609 if (clist == NULL) {
4610 kmem_free(changelist, alloc_len * sizeof (*changelist));
4611 return (FC_NOMEM);
4612 }
4613
4614 clist->clist_state = state;
4615
4616 mutex_enter(&port->fp_mutex);
4617 clist->clist_flags = port->fp_topology;
4618 mutex_exit(&port->fp_mutex);
4619
4620 clist->clist_port = (opaque_t)port;
4621 clist->clist_len = listlen;
4622 clist->clist_size = alloc_len;
4623 clist->clist_map = changelist;
4624
4625 /*
4626 * Bump the reference count of each fc_remote_port_t in this changelist.
4627 * This is necessary since these devices will be sitting in a taskq
4628 * and referenced later. When the state change notification is
4629 * complete, the reference counts will be decremented.
4630 */
4631 for (count = 0; count < clist->clist_len; count++) {
4632 pd = clist->clist_map[count].map_pd;
4633
4634 if (pd != NULL) {
4635 mutex_enter(&pd->pd_mutex);
4636 ASSERT((pd->pd_ref_count >= 0) ||
4637 (pd->pd_aux_flags & PD_GIVEN_TO_ULPS));
4638 pd->pd_ref_count++;
4639
4640 if (clist->clist_map[count].map_state !=
4641 PORT_DEVICE_INVALID) {
4642 pd->pd_aux_flags |= PD_GIVEN_TO_ULPS;
4643 }
4644
4645 mutex_exit(&pd->pd_mutex);
4646 }
4647 }
4648
4649 #ifdef DEBUG
4650 /*
4651 * Sanity check for presence of OLD devices in the hash lists
4652 */
4653 if (clist->clist_size) {
4654 ASSERT(clist->clist_map != NULL);
4655 for (count = 0; count < clist->clist_len; count++) {
4656 if (clist->clist_map[count].map_state ==
4657 PORT_DEVICE_INVALID) {
4658 la_wwn_t pwwn;
4659 fc_portid_t d_id;
4660
4661 pd = clist->clist_map[count].map_pd;
4662 ASSERT(pd != NULL);
4663
4664 mutex_enter(&pd->pd_mutex);
4665 pwwn = pd->pd_port_name;
4666 d_id = pd->pd_port_id;
4667 mutex_exit(&pd->pd_mutex);
4668
4669 pd = fctl_get_remote_port_by_pwwn(port, &pwwn);
4670 ASSERT(pd != clist->clist_map[count].map_pd);
4671
4672 pd = fctl_get_remote_port_by_did(port,
4673 d_id.port_id);
4674 ASSERT(pd != clist->clist_map[count].map_pd);
4675 }
4676 }
4677 }
4678 #endif
4679
4680 mutex_enter(&port->fp_mutex);
4681
4682 if (state == FC_STATE_ONLINE) {
4683 if (--port->fp_statec_busy == 0) {
4684 port->fp_soft_state &= ~FP_SOFT_IN_STATEC_CB;
4685 }
4686 }
4687 mutex_exit(&port->fp_mutex);
4688
4689 (void) taskq_dispatch(port->fp_taskq, fctl_ulp_statec_cb,
4690 clist, KM_SLEEP);
4691
4692 FP_TRACE(FP_NHEAD1(4, 0), "fp_ulp_statec fired; Port=%p,"
4693 "state=%x, len=%d", port, state, listlen);
4694
4695 return (FC_SUCCESS);
4696 }
4697
4698
4699 /*
4700 * Send up a FC_STATE_DEVICE_CHANGE state notification to ULPs
4701 */
4702 static int
4703 fp_ulp_devc_cb(fc_local_port_t *port, fc_portmap_t *changelist,
4704 uint32_t listlen, uint32_t alloc_len, int sleep, int sync)
4705 {
4706 int ret;
4707 fc_port_clist_t *clist;
4708
4709 ASSERT(!MUTEX_HELD(&port->fp_mutex));
4710
4711 clist = kmem_zalloc(sizeof (*clist), sleep);
4712 if (clist == NULL) {
4713 kmem_free(changelist, alloc_len * sizeof (*changelist));
4714 return (FC_NOMEM);
4715 }
4716
4717 clist->clist_state = FC_STATE_DEVICE_CHANGE;
4718
4719 mutex_enter(&port->fp_mutex);
4720 clist->clist_flags = port->fp_topology;
4721 mutex_exit(&port->fp_mutex);
4722
4723 clist->clist_port = (opaque_t)port;
4724 clist->clist_len = listlen;
4725 clist->clist_size = alloc_len;
4726 clist->clist_map = changelist;
4727
4728 /* Send sysevents for target state changes */
4729
4730 if (clist->clist_size) {
4731 int count;
4732 fc_remote_port_t *pd;
4733
4734 ASSERT(clist->clist_map != NULL);
4735 for (count = 0; count < clist->clist_len; count++) {
4736 pd = clist->clist_map[count].map_pd;
4737
4738 /*
4739 * Bump reference counts on all fc_remote_port_t
4740 * structs in this list. We don't know when the task
4741 * will fire, and we don't need these fc_remote_port_t
4742 * structs going away behind our back.
4743 */
4744 if (pd) {
4745 mutex_enter(&pd->pd_mutex);
4746 ASSERT((pd->pd_ref_count >= 0) ||
4747 (pd->pd_aux_flags & PD_GIVEN_TO_ULPS));
4748 pd->pd_ref_count++;
4749 mutex_exit(&pd->pd_mutex);
4750 }
4751
4752 if (clist->clist_map[count].map_state ==
4753 PORT_DEVICE_VALID) {
4754 if (clist->clist_map[count].map_type ==
4755 PORT_DEVICE_NEW) {
4756 /* Update our state change counter */
4757 mutex_enter(&port->fp_mutex);
4758 port->fp_last_change++;
4759 mutex_exit(&port->fp_mutex);
4760
4761 /* Additions */
4762 fp_log_target_event(port,
4763 ESC_SUNFC_TARGET_ADD,
4764 clist->clist_map[count].map_pwwn,
4765 clist->clist_map[count].map_did.
4766 port_id);
4767 }
4768
4769 } else if ((clist->clist_map[count].map_type ==
4770 PORT_DEVICE_OLD) &&
4771 (clist->clist_map[count].map_state ==
4772 PORT_DEVICE_INVALID)) {
4773 /* Update our state change counter */
4774 mutex_enter(&port->fp_mutex);
4775 port->fp_last_change++;
4776 mutex_exit(&port->fp_mutex);
4777
4778 /*
4779 * For removals, we don't decrement
4780 * pd_ref_count until after the ULP's
4781 * state change callback function has
4782 * completed.
4783 */
4784
4785 /* Removals */
4786 fp_log_target_event(port,
4787 ESC_SUNFC_TARGET_REMOVE,
4788 clist->clist_map[count].map_pwwn,
4789 clist->clist_map[count].map_did.port_id);
4790 }
4791
4792 if (clist->clist_map[count].map_state !=
4793 PORT_DEVICE_INVALID) {
4794 /*
4795 * Indicate that the ULPs are now aware of
4796 * this device.
4797 */
4798
4799 mutex_enter(&pd->pd_mutex);
4800 pd->pd_aux_flags |= PD_GIVEN_TO_ULPS;
4801 mutex_exit(&pd->pd_mutex);
4802 }
4803
4804 #ifdef DEBUG
4805 /*
4806 * Sanity check for OLD devices in the hash lists
4807 */
4808 if (pd && clist->clist_map[count].map_state ==
4809 PORT_DEVICE_INVALID) {
4810 la_wwn_t pwwn;
4811 fc_portid_t d_id;
4812
4813 mutex_enter(&pd->pd_mutex);
4814 pwwn = pd->pd_port_name;
4815 d_id = pd->pd_port_id;
4816 mutex_exit(&pd->pd_mutex);
4817
4818 /*
4819 * This overwrites the 'pd' local variable.
4820 * Beware of this if 'pd' ever gets
4821 * referenced below this block.
4822 */
4823 pd = fctl_get_remote_port_by_pwwn(port, &pwwn);
4824 ASSERT(pd != clist->clist_map[count].map_pd);
4825
4826 pd = fctl_get_remote_port_by_did(port,
4827 d_id.port_id);
4828 ASSERT(pd != clist->clist_map[count].map_pd);
4829 }
4830 #endif
4831 }
4832 }
4833
4834 if (sync) {
4835 clist->clist_wait = 1;
4836 mutex_init(&clist->clist_mutex, NULL, MUTEX_DRIVER, NULL);
4837 cv_init(&clist->clist_cv, NULL, CV_DRIVER, NULL);
4838 }
4839
4840 ret = taskq_dispatch(port->fp_taskq, fctl_ulp_statec_cb, clist, sleep);
4841 if (sync && ret) {
4842 mutex_enter(&clist->clist_mutex);
4843 while (clist->clist_wait) {
4844 cv_wait(&clist->clist_cv, &clist->clist_mutex);
4845 }
4846 mutex_exit(&clist->clist_mutex);
4847
4848 mutex_destroy(&clist->clist_mutex);
4849 cv_destroy(&clist->clist_cv);
4850 kmem_free(clist, sizeof (*clist));
4851 }
4852
4853 if (!ret) {
4854 FP_TRACE(FP_NHEAD1(4, 0), "fp_ulp_devc dispatch failed; "
4855 "port=%p", port);
4856 kmem_free(clist->clist_map,
4857 sizeof (*(clist->clist_map)) * clist->clist_size);
4858 kmem_free(clist, sizeof (*clist));
4859 } else {
4860 FP_TRACE(FP_NHEAD1(4, 0), "fp_ulp_devc fired; port=%p, len=%d",
4861 port, listlen);
4862 }
4863
4864 return (FC_SUCCESS);
4865 }
4866
4867
4868 /*
4869 * Perform PLOGI to the group of devices for ULPs
4870 */
4871 static void
4872 fp_plogi_group(fc_local_port_t *port, job_request_t *job)
4873 {
4874 int offline;
4875 int count;
4876 int rval;
4877 uint32_t listlen;
4878 uint32_t done;
4879 uint32_t d_id;
4880 fc_remote_node_t *node;
4881 fc_remote_port_t *pd;
4882 fc_remote_port_t *tmp_pd;
4883 fc_packet_t *ulp_pkt;
4884 la_els_logi_t *els_data;
4885 ls_code_t ls_code;
4886
4887 FP_TRACE(FP_NHEAD1(1, 0), "fp_plogi_group begin; port=%p, job=%p",
4888 port, job);
4889
4890 done = 0;
4891 listlen = job->job_ulp_listlen;
4892 job->job_counter = job->job_ulp_listlen;
4893
4894 mutex_enter(&port->fp_mutex);
4895 offline = (port->fp_statec_busy ||
4896 FC_PORT_STATE_MASK(port->fp_state) == FC_STATE_OFFLINE) ? 1 : 0;
4897 mutex_exit(&port->fp_mutex);
4898
4899 for (count = 0; count < listlen; count++) {
4900 ASSERT(job->job_ulp_pkts[count]->pkt_rsplen >=
4901 sizeof (la_els_logi_t));
4902
4903 ulp_pkt = job->job_ulp_pkts[count];
4904 pd = ulp_pkt->pkt_pd;
4905 d_id = ulp_pkt->pkt_cmd_fhdr.d_id;
4906
4907 if (offline) {
4908 done++;
4909
4910 ulp_pkt->pkt_state = FC_PKT_PORT_OFFLINE;
4911 ulp_pkt->pkt_reason = FC_REASON_OFFLINE;
4912 ulp_pkt->pkt_pd = NULL;
4913 ulp_pkt->pkt_comp(ulp_pkt);
4914
4915 job->job_ulp_pkts[count] = NULL;
4916
4917 fp_jobdone(job);
4918 continue;
4919 }
4920
4921 if (pd == NULL) {
4922 pd = fctl_get_remote_port_by_did(port, d_id);
4923 if (pd == NULL) {
4924 /* reset later */
4925 ulp_pkt->pkt_state = FC_PKT_FAILURE;
4926 continue;
4927 }
4928 mutex_enter(&pd->pd_mutex);
4929 if (pd->pd_flags == PD_ELS_IN_PROGRESS) {
4930 mutex_exit(&pd->pd_mutex);
4931 ulp_pkt->pkt_state = FC_PKT_ELS_IN_PROGRESS;
4932 done++;
4933 ulp_pkt->pkt_comp(ulp_pkt);
4934 job->job_ulp_pkts[count] = NULL;
4935 fp_jobdone(job);
4936 } else {
4937 ulp_pkt->pkt_state = FC_PKT_FAILURE;
4938 mutex_exit(&pd->pd_mutex);
4939 }
4940 continue;
4941 }
4942
4943 switch (ulp_pkt->pkt_state) {
4944 case FC_PKT_ELS_IN_PROGRESS:
4945 ulp_pkt->pkt_reason = FC_REASON_OFFLINE;
4946 /* FALLTHRU */
4947 case FC_PKT_LOCAL_RJT:
4948 done++;
4949 ulp_pkt->pkt_comp(ulp_pkt);
4950 job->job_ulp_pkts[count] = NULL;
4951 fp_jobdone(job);
4952 continue;
4953 default:
4954 break;
4955 }
4956
4957 /*
4958 * Validate the pd corresponding to the d_id passed
4959 * by the ULPs
4960 */
4961 tmp_pd = fctl_get_remote_port_by_did(port, d_id);
4962 if ((tmp_pd == NULL) || (pd != tmp_pd)) {
4963 done++;
4964 ulp_pkt->pkt_state = FC_PKT_FAILURE;
4965 ulp_pkt->pkt_reason = FC_REASON_NO_CONNECTION;
4966 ulp_pkt->pkt_pd = NULL;
4967 ulp_pkt->pkt_comp(ulp_pkt);
4968 job->job_ulp_pkts[count] = NULL;
4969 fp_jobdone(job);
4970 continue;
4971 }
4972
4973 FP_TRACE(FP_NHEAD1(3, 0), "fp_plogi_group contd; "
4974 "port=%p, pd=%p", port, pd);
4975
4976 mutex_enter(&pd->pd_mutex);
4977
4978 if (pd->pd_state == PORT_DEVICE_LOGGED_IN) {
4979 done++;
4980 els_data = (la_els_logi_t *)ulp_pkt->pkt_resp;
4981
4982 ls_code.ls_code = LA_ELS_ACC;
4983 ls_code.mbz = 0;
4984
4985 FC_SET_CMD(pd->pd_port, ulp_pkt->pkt_resp_acc,
4986 (uint8_t *)&ls_code, (uint8_t *)&els_data->ls_code,
4987 sizeof (ls_code_t), DDI_DEV_AUTOINCR);
4988
4989 FC_SET_CMD(pd->pd_port, ulp_pkt->pkt_resp_acc,
4990 (uint8_t *)&pd->pd_csp,
4991 (uint8_t *)&els_data->common_service,
4992 sizeof (pd->pd_csp), DDI_DEV_AUTOINCR);
4993
4994 FC_SET_CMD(pd->pd_port, ulp_pkt->pkt_resp_acc,
4995 (uint8_t *)&pd->pd_port_name,
4996 (uint8_t *)&els_data->nport_ww_name,
4997 sizeof (pd->pd_port_name), DDI_DEV_AUTOINCR);
4998
4999 FC_SET_CMD(pd->pd_port, ulp_pkt->pkt_resp_acc,
5000 (uint8_t *)&pd->pd_clsp1,
5001 (uint8_t *)&els_data->class_1,
5002 sizeof (pd->pd_clsp1), DDI_DEV_AUTOINCR);
5003
5004 FC_SET_CMD(pd->pd_port, ulp_pkt->pkt_resp_acc,
5005 (uint8_t *)&pd->pd_clsp2,
5006 (uint8_t *)&els_data->class_2,
5007 sizeof (pd->pd_clsp2), DDI_DEV_AUTOINCR);
5008
5009 FC_SET_CMD(pd->pd_port, ulp_pkt->pkt_resp_acc,
5010 (uint8_t *)&pd->pd_clsp3,
5011 (uint8_t *)&els_data->class_3,
5012 sizeof (pd->pd_clsp3), DDI_DEV_AUTOINCR);
5013
5014 node = pd->pd_remote_nodep;
5015 pd->pd_login_count++;
5016 pd->pd_flags = PD_IDLE;
5017 ulp_pkt->pkt_pd = pd;
5018 mutex_exit(&pd->pd_mutex);
5019
5020 mutex_enter(&node->fd_mutex);
5021 FC_SET_CMD(pd->pd_port, ulp_pkt->pkt_resp_acc,
5022 (uint8_t *)&node->fd_node_name,
5023 (uint8_t *)(&els_data->node_ww_name),
5024 sizeof (node->fd_node_name), DDI_DEV_AUTOINCR);
5025
5026 FC_SET_CMD(pd->pd_port, ulp_pkt->pkt_resp_acc,
5027 (uint8_t *)&node->fd_vv,
5028 (uint8_t *)(&els_data->vendor_version),
5029 sizeof (node->fd_vv), DDI_DEV_AUTOINCR);
5030
5031 mutex_exit(&node->fd_mutex);
5032 ulp_pkt->pkt_state = FC_PKT_SUCCESS;
5033 } else {
5034
5035 ulp_pkt->pkt_state = FC_PKT_FAILURE; /* reset later */
5036 mutex_exit(&pd->pd_mutex);
5037 }
5038
5039 if (ulp_pkt->pkt_state != FC_PKT_FAILURE) {
5040 ulp_pkt->pkt_comp(ulp_pkt);
5041 job->job_ulp_pkts[count] = NULL;
5042 fp_jobdone(job);
5043 }
5044 }
5045
5046 if (done == listlen) {
5047 fp_jobwait(job);
5048 fctl_jobdone(job);
5049 return;
5050 }
5051
5052 job->job_counter = listlen - done;
5053
5054 for (count = 0; count < listlen; count++) {
5055 int cmd_flags;
5056
5057 if ((ulp_pkt = job->job_ulp_pkts[count]) == NULL) {
5058 continue;
5059 }
5060
5061 ASSERT(ulp_pkt->pkt_state == FC_PKT_FAILURE);
5062
5063 cmd_flags = FP_CMD_PLOGI_RETAIN;
5064
5065 d_id = ulp_pkt->pkt_cmd_fhdr.d_id;
5066 ASSERT(d_id != 0);
5067
5068 pd = fctl_get_remote_port_by_did(port, d_id);
5069
5070 /*
5071 * We need to properly adjust the port device
5072 * reference counter before we assign the pd
5073 * to the ULP packets port device pointer.
5074 */
5075 if (pd != NULL && ulp_pkt->pkt_pd == NULL) {
5076 mutex_enter(&pd->pd_mutex);
5077 pd->pd_ref_count++;
5078 mutex_exit(&pd->pd_mutex);
5079 FP_TRACE(FP_NHEAD1(3, 0),
5080 "fp_plogi_group: DID = 0x%x using new pd %p \
5081 old pd NULL\n", d_id, pd);
5082 } else if (pd != NULL && ulp_pkt->pkt_pd != NULL &&
5083 ulp_pkt->pkt_pd != pd) {
5084 mutex_enter(&pd->pd_mutex);
5085 pd->pd_ref_count++;
5086 mutex_exit(&pd->pd_mutex);
5087 mutex_enter(&ulp_pkt->pkt_pd->pd_mutex);
5088 ulp_pkt->pkt_pd->pd_ref_count--;
5089 mutex_exit(&ulp_pkt->pkt_pd->pd_mutex);
5090 FP_TRACE(FP_NHEAD1(3, 0),
5091 "fp_plogi_group: DID = 0x%x pkt_pd %p != pd %p\n",
5092 d_id, ulp_pkt->pkt_pd, pd);
5093 } else if (pd == NULL && ulp_pkt->pkt_pd != NULL) {
5094 mutex_enter(&ulp_pkt->pkt_pd->pd_mutex);
5095 ulp_pkt->pkt_pd->pd_ref_count--;
5096 mutex_exit(&ulp_pkt->pkt_pd->pd_mutex);
5097 FP_TRACE(FP_NHEAD1(3, 0),
5098 "fp_plogi_group: DID = 0x%x pd is NULL and \
5099 pkt_pd = %p\n", d_id, ulp_pkt->pkt_pd);
5100 }
5101
5102 ulp_pkt->pkt_pd = pd;
5103
5104 if (pd != NULL) {
5105 mutex_enter(&pd->pd_mutex);
5106 d_id = pd->pd_port_id.port_id;
5107 pd->pd_flags = PD_ELS_IN_PROGRESS;
5108 mutex_exit(&pd->pd_mutex);
5109 } else {
5110 d_id = ulp_pkt->pkt_cmd_fhdr.d_id;
5111 #ifdef DEBUG
5112 pd = fctl_get_remote_port_by_did(port, d_id);
5113 ASSERT(pd == NULL);
5114 #endif
5115 /*
5116 * In the Fabric topology, use NS to create
5117 * port device, and if that fails still try
5118 * with PLOGI - which will make yet another
5119 * attempt to create after successful PLOGI
5120 */
5121 mutex_enter(&port->fp_mutex);
5122 if (FC_IS_TOP_SWITCH(port->fp_topology)) {
5123 mutex_exit(&port->fp_mutex);
5124 pd = fp_create_remote_port_by_ns(port,
5125 d_id, KM_SLEEP);
5126 if (pd) {
5127 cmd_flags |= FP_CMD_DELDEV_ON_ERROR;
5128
5129 mutex_enter(&pd->pd_mutex);
5130 pd->pd_flags = PD_ELS_IN_PROGRESS;
5131 mutex_exit(&pd->pd_mutex);
5132
5133 FP_TRACE(FP_NHEAD1(3, 0),
5134 "fp_plogi_group;"
5135 " NS created PD port=%p, job=%p,"
5136 " pd=%p", port, job, pd);
5137 }
5138 } else {
5139 mutex_exit(&port->fp_mutex);
5140 }
5141 if ((ulp_pkt->pkt_pd == NULL) && (pd != NULL)) {
5142 FP_TRACE(FP_NHEAD1(3, 0),
5143 "fp_plogi_group;"
5144 "ulp_pkt's pd is NULL, get a pd %p",
5145 pd);
5146 mutex_enter(&pd->pd_mutex);
5147 pd->pd_ref_count++;
5148 mutex_exit(&pd->pd_mutex);
5149 }
5150 ulp_pkt->pkt_pd = pd;
5151 }
5152
5153 rval = fp_port_login(port, d_id, job, cmd_flags,
5154 KM_SLEEP, pd, ulp_pkt);
5155
5156 if (rval == FC_SUCCESS) {
5157 continue;
5158 }
5159
5160 if (rval == FC_STATEC_BUSY) {
5161 ulp_pkt->pkt_state = FC_PKT_PORT_OFFLINE;
5162 ulp_pkt->pkt_reason = FC_REASON_OFFLINE;
5163 } else {
5164 ulp_pkt->pkt_state = FC_PKT_FAILURE;
5165 }
5166
5167 if (pd) {
5168 mutex_enter(&pd->pd_mutex);
5169 pd->pd_flags = PD_IDLE;
5170 mutex_exit(&pd->pd_mutex);
5171 }
5172
5173 if (cmd_flags & FP_CMD_DELDEV_ON_ERROR) {
5174 ASSERT(pd != NULL);
5175
5176 FP_TRACE(FP_NHEAD1(3, 0), "fp_plogi_group: NS created,"
5177 " PD removed; port=%p, job=%p", port, job);
5178
5179 mutex_enter(&pd->pd_mutex);
5180 pd->pd_ref_count--;
5181 node = pd->pd_remote_nodep;
5182 mutex_exit(&pd->pd_mutex);
5183
5184 ASSERT(node != NULL);
5185
5186 if (fctl_destroy_remote_port(port, pd) == 0) {
5187 fctl_destroy_remote_node(node);
5188 }
5189 ulp_pkt->pkt_pd = NULL;
5190 }
5191 ulp_pkt->pkt_comp(ulp_pkt);
5192 fp_jobdone(job);
5193 }
5194
5195 fp_jobwait(job);
5196 fctl_jobdone(job);
5197
5198 FP_TRACE(FP_NHEAD1(1, 0), "fp_plogi_group end: port=%p, job=%p",
5199 port, job);
5200 }
5201
5202
5203 /*
5204 * Name server request initialization
5205 */
5206 static void
5207 fp_ns_init(fc_local_port_t *port, job_request_t *job, int sleep)
5208 {
5209 int rval;
5210 int count;
5211 int size;
5212
5213 ASSERT((job->job_flags & JOB_TYPE_FP_ASYNC) == 0);
5214
5215 job->job_counter = 1;
5216 job->job_result = FC_SUCCESS;
5217
5218 rval = fp_port_login(port, 0xFFFFFC, job, FP_CMD_PLOGI_RETAIN,
5219 KM_SLEEP, NULL, NULL);
5220
5221 if (rval != FC_SUCCESS) {
5222 mutex_enter(&port->fp_mutex);
5223 port->fp_topology = FC_TOP_NO_NS;
5224 mutex_exit(&port->fp_mutex);
5225 return;
5226 }
5227
5228 fp_jobwait(job);
5229
5230 if (job->job_result != FC_SUCCESS) {
5231 mutex_enter(&port->fp_mutex);
5232 port->fp_topology = FC_TOP_NO_NS;
5233 mutex_exit(&port->fp_mutex);
5234 return;
5235 }
5236
5237 /*
5238 * At this time, we'll do NS registration for objects in the
5239 * ns_reg_cmds (see top of this file) array.
5240 *
5241 * Each time a ULP module registers with the transport, the
5242 * appropriate fc4 bit is set fc4 types and registered with
5243 * the NS for this support. Also, ULPs and FC admin utilities
5244 * may do registration for objects like IP address, symbolic
5245 * port/node name, Initial process associator at run time.
5246 */
5247 size = sizeof (ns_reg_cmds) / sizeof (ns_reg_cmds[0]);
5248 job->job_counter = size;
5249 job->job_result = FC_SUCCESS;
5250
5251 for (count = 0; count < size; count++) {
5252 if (fp_ns_reg(port, NULL, ns_reg_cmds[count],
5253 job, 0, sleep) != FC_SUCCESS) {
5254 fp_jobdone(job);
5255 }
5256 }
5257 if (size) {
5258 fp_jobwait(job);
5259 }
5260
5261 job->job_result = FC_SUCCESS;
5262
5263 (void) fp_ns_get_devcount(port, job, 0, KM_SLEEP);
5264
5265 if (port->fp_dev_count < FP_MAX_DEVICES) {
5266 (void) fp_ns_get_devcount(port, job, 1, KM_SLEEP);
5267 }
5268
5269 job->job_counter = 1;
5270
5271 if (fp_ns_scr(port, job, FC_SCR_FULL_REGISTRATION,
5272 sleep) == FC_SUCCESS) {
5273 fp_jobwait(job);
5274 }
5275 }
5276
5277
5278 /*
5279 * Name server finish:
5280 * Unregister for RSCNs
5281 * Unregister all the host port objects in the Name Server
5282 * Perform LOGO with the NS;
5283 */
5284 static void
5285 fp_ns_fini(fc_local_port_t *port, job_request_t *job)
5286 {
5287 fp_cmd_t *cmd;
5288 uchar_t class;
5289 uint32_t s_id;
5290 fc_packet_t *pkt;
5291 la_els_logo_t payload;
5292
5293 ASSERT((job->job_flags & JOB_TYPE_FP_ASYNC) == 0);
5294
5295 job->job_counter = 1;
5296
5297 if (fp_ns_scr(port, job, FC_SCR_CLEAR_REGISTRATION, KM_SLEEP) !=
5298 FC_SUCCESS) {
5299 fp_jobdone(job);
5300 }
5301 fp_jobwait(job);
5302
5303 job->job_counter = 1;
5304
5305 if (fp_ns_reg(port, NULL, NS_DA_ID, job, 0, KM_SLEEP) != FC_SUCCESS) {
5306 fp_jobdone(job);
5307 }
5308 fp_jobwait(job);
5309
5310 job->job_counter = 1;
5311
5312 cmd = fp_alloc_pkt(port, sizeof (la_els_logo_t),
5313 FP_PORT_IDENTIFIER_LEN, KM_SLEEP, NULL);
5314 pkt = &cmd->cmd_pkt;
5315
5316 mutex_enter(&port->fp_mutex);
5317 class = port->fp_ns_login_class;
5318 s_id = port->fp_port_id.port_id;
5319 payload.nport_id = port->fp_port_id;
5320 mutex_exit(&port->fp_mutex);
5321
5322 cmd->cmd_pkt.pkt_tran_flags = FC_TRAN_INTR | class;
5323 cmd->cmd_pkt.pkt_tran_type = FC_PKT_EXCHANGE;
5324 cmd->cmd_flags = FP_CMD_PLOGI_DONT_CARE;
5325 cmd->cmd_retry_count = 1;
5326 cmd->cmd_ulp_pkt = NULL;
5327
5328 if (port->fp_npiv_type == FC_NPIV_PORT) {
5329 fp_els_init(cmd, s_id, 0xFFFFFE, fp_logo_intr, job);
5330 } else {
5331 fp_els_init(cmd, s_id, 0xFFFFFC, fp_logo_intr, job);
5332 }
5333
5334 cmd->cmd_transport = port->fp_fca_tran->fca_els_send;
5335
5336 payload.ls_code.ls_code = LA_ELS_LOGO;
5337 payload.ls_code.mbz = 0;
5338 payload.nport_ww_name = port->fp_service_params.nport_ww_name;
5339
5340 FC_SET_CMD(port, pkt->pkt_cmd_acc, (uint8_t *)&payload,
5341 (uint8_t *)pkt->pkt_cmd, sizeof (payload), DDI_DEV_AUTOINCR);
5342
5343 if (fp_sendcmd(port, cmd, port->fp_fca_handle) != FC_SUCCESS) {
5344 fp_iodone(cmd);
5345 }
5346 fp_jobwait(job);
5347 }
5348
5349
5350 /*
5351 * NS Registration function.
5352 *
5353 * It should be seriously noted that FC-GS-2 currently doesn't support
5354 * an Object Registration by a D_ID other than the owner of the object.
5355 * What we are aiming at currently is to at least allow Symbolic Node/Port
5356 * Name registration for any N_Port Identifier by the host software.
5357 *
5358 * Anyway, if the second argument (fc_remote_port_t *) is NULL, this
5359 * function treats the request as Host NS Object.
5360 */
5361 static int
5362 fp_ns_reg(fc_local_port_t *port, fc_remote_port_t *pd, uint16_t cmd_code,
5363 job_request_t *job, int polled, int sleep)
5364 {
5365 int rval;
5366 fc_portid_t s_id;
5367 fc_packet_t *pkt;
5368 fp_cmd_t *cmd;
5369
5370 if (pd == NULL) {
5371 mutex_enter(&port->fp_mutex);
5372 s_id = port->fp_port_id;
5373 mutex_exit(&port->fp_mutex);
5374 } else {
5375 mutex_enter(&pd->pd_mutex);
5376 s_id = pd->pd_port_id;
5377 mutex_exit(&pd->pd_mutex);
5378 }
5379
5380 if (polled) {
5381 job->job_counter = 1;
5382 }
5383
5384 switch (cmd_code) {
5385 case NS_RPN_ID:
5386 case NS_RNN_ID: {
5387 ns_rxn_req_t rxn;
5388
5389 cmd = fp_alloc_pkt(port, sizeof (fc_ct_header_t) +
5390 sizeof (ns_rxn_req_t), sizeof (fc_reg_resp_t), sleep, NULL);
5391 if (cmd == NULL) {
5392 return (FC_NOMEM);
5393 }
5394 fp_ct_init(port, cmd, NULL, cmd_code, NULL, 0, 0, job);
5395 pkt = &cmd->cmd_pkt;
5396
5397 if (pd == NULL) {
5398 rxn.rxn_xname = ((cmd_code == NS_RPN_ID) ?
5399 (port->fp_service_params.nport_ww_name) :
5400 (port->fp_service_params.node_ww_name));
5401 } else {
5402 if (cmd_code == NS_RPN_ID) {
5403 mutex_enter(&pd->pd_mutex);
5404 rxn.rxn_xname = pd->pd_port_name;
5405 mutex_exit(&pd->pd_mutex);
5406 } else {
5407 fc_remote_node_t *node;
5408
5409 mutex_enter(&pd->pd_mutex);
5410 node = pd->pd_remote_nodep;
5411 mutex_exit(&pd->pd_mutex);
5412
5413 mutex_enter(&node->fd_mutex);
5414 rxn.rxn_xname = node->fd_node_name;
5415 mutex_exit(&node->fd_mutex);
5416 }
5417 }
5418 rxn.rxn_port_id = s_id;
5419
5420 FC_SET_CMD(port, pkt->pkt_cmd_acc, (uint8_t *)&rxn,
5421 (uint8_t *)(pkt->pkt_cmd + sizeof (fc_ct_header_t)),
5422 sizeof (rxn), DDI_DEV_AUTOINCR);
5423
5424 break;
5425 }
5426
5427 case NS_RCS_ID: {
5428 ns_rcos_t rcos;
5429
5430 cmd = fp_alloc_pkt(port, sizeof (fc_ct_header_t) +
5431 sizeof (ns_rcos_t), sizeof (fc_reg_resp_t), sleep, NULL);
5432 if (cmd == NULL) {
5433 return (FC_NOMEM);
5434 }
5435 fp_ct_init(port, cmd, NULL, cmd_code, NULL, 0, 0, job);
5436 pkt = &cmd->cmd_pkt;
5437
5438 if (pd == NULL) {
5439 rcos.rcos_cos = port->fp_cos;
5440 } else {
5441 mutex_enter(&pd->pd_mutex);
5442 rcos.rcos_cos = pd->pd_cos;
5443 mutex_exit(&pd->pd_mutex);
5444 }
5445 rcos.rcos_port_id = s_id;
5446
5447 FC_SET_CMD(port, pkt->pkt_cmd_acc, (uint8_t *)&rcos,
5448 (uint8_t *)(pkt->pkt_cmd + sizeof (fc_ct_header_t)),
5449 sizeof (rcos), DDI_DEV_AUTOINCR);
5450
5451 break;
5452 }
5453
5454 case NS_RFT_ID: {
5455 ns_rfc_type_t rfc;
5456
5457 cmd = fp_alloc_pkt(port, sizeof (fc_ct_header_t) +
5458 sizeof (ns_rfc_type_t), sizeof (fc_reg_resp_t), sleep,
5459 NULL);
5460 if (cmd == NULL) {
5461 return (FC_NOMEM);
5462 }
5463 fp_ct_init(port, cmd, NULL, cmd_code, NULL, 0, 0, job);
5464 pkt = &cmd->cmd_pkt;
5465
5466 if (pd == NULL) {
5467 mutex_enter(&port->fp_mutex);
5468 bcopy(port->fp_fc4_types, rfc.rfc_types,
5469 sizeof (port->fp_fc4_types));
5470 mutex_exit(&port->fp_mutex);
5471 } else {
5472 mutex_enter(&pd->pd_mutex);
5473 bcopy(pd->pd_fc4types, rfc.rfc_types,
5474 sizeof (pd->pd_fc4types));
5475 mutex_exit(&pd->pd_mutex);
5476 }
5477 rfc.rfc_port_id = s_id;
5478
5479 FC_SET_CMD(port, pkt->pkt_cmd_acc, (uint8_t *)&rfc,
5480 (uint8_t *)(pkt->pkt_cmd + sizeof (fc_ct_header_t)),
5481 sizeof (rfc), DDI_DEV_AUTOINCR);
5482
5483 break;
5484 }
5485
5486 case NS_RSPN_ID: {
5487 uchar_t name_len;
5488 int pl_size;
5489 fc_portid_t spn;
5490
5491 if (pd == NULL) {
5492 mutex_enter(&port->fp_mutex);
5493 name_len = port->fp_sym_port_namelen;
5494 mutex_exit(&port->fp_mutex);
5495 } else {
5496 mutex_enter(&pd->pd_mutex);
5497 name_len = pd->pd_spn_len;
5498 mutex_exit(&pd->pd_mutex);
5499 }
5500
5501 pl_size = sizeof (fc_portid_t) + name_len + 1;
5502
5503 cmd = fp_alloc_pkt(port, sizeof (fc_ct_header_t) + pl_size,
5504 sizeof (fc_reg_resp_t), sleep, NULL);
5505 if (cmd == NULL) {
5506 return (FC_NOMEM);
5507 }
5508
5509 fp_ct_init(port, cmd, NULL, cmd_code, NULL, 0, 0, job);
5510
5511 pkt = &cmd->cmd_pkt;
5512
5513 spn = s_id;
5514
5515 FC_SET_CMD(port, pkt->pkt_cmd_acc, (uint8_t *)&spn, (uint8_t *)
5516 (pkt->pkt_cmd + sizeof (fc_ct_header_t)), sizeof (spn),
5517 DDI_DEV_AUTOINCR);
5518 FC_SET_CMD(port, pkt->pkt_cmd_acc, (uint8_t *)&name_len,
5519 (uint8_t *)(pkt->pkt_cmd + sizeof (fc_ct_header_t)
5520 + sizeof (fc_portid_t)), 1, DDI_DEV_AUTOINCR);
5521
5522 if (pd == NULL) {
5523 mutex_enter(&port->fp_mutex);
5524 FC_SET_CMD(port, pkt->pkt_cmd_acc,
5525 (uint8_t *)port->fp_sym_port_name, (uint8_t *)
5526 (pkt->pkt_cmd + sizeof (fc_ct_header_t) +
5527 sizeof (spn) + 1), name_len, DDI_DEV_AUTOINCR);
5528 mutex_exit(&port->fp_mutex);
5529 } else {
5530 mutex_enter(&pd->pd_mutex);
5531 FC_SET_CMD(port, pkt->pkt_cmd_acc,
5532 (uint8_t *)pd->pd_spn,
5533 (uint8_t *)(pkt->pkt_cmd + sizeof (fc_ct_header_t) +
5534 sizeof (spn) + 1), name_len, DDI_DEV_AUTOINCR);
5535 mutex_exit(&pd->pd_mutex);
5536 }
5537 break;
5538 }
5539
5540 case NS_RPT_ID: {
5541 ns_rpt_t rpt;
5542
5543 cmd = fp_alloc_pkt(port, sizeof (fc_ct_header_t) +
5544 sizeof (ns_rpt_t), sizeof (fc_reg_resp_t), sleep, NULL);
5545 if (cmd == NULL) {
5546 return (FC_NOMEM);
5547 }
5548 fp_ct_init(port, cmd, NULL, cmd_code, NULL, 0, 0, job);
5549 pkt = &cmd->cmd_pkt;
5550
5551 if (pd == NULL) {
5552 rpt.rpt_type = port->fp_port_type;
5553 } else {
5554 mutex_enter(&pd->pd_mutex);
5555 rpt.rpt_type = pd->pd_porttype;
5556 mutex_exit(&pd->pd_mutex);
5557 }
5558 rpt.rpt_port_id = s_id;
5559
5560 FC_SET_CMD(port, pkt->pkt_cmd_acc, (uint8_t *)&rpt,
5561 (uint8_t *)(pkt->pkt_cmd + sizeof (fc_ct_header_t)),
5562 sizeof (rpt), DDI_DEV_AUTOINCR);
5563
5564 break;
5565 }
5566
5567 case NS_RIP_NN: {
5568 ns_rip_t rip;
5569
5570 cmd = fp_alloc_pkt(port, sizeof (fc_ct_header_t) +
5571 sizeof (ns_rip_t), sizeof (fc_reg_resp_t), sleep, NULL);
5572 if (cmd == NULL) {
5573 return (FC_NOMEM);
5574 }
5575 fp_ct_init(port, cmd, NULL, cmd_code, NULL, 0, 0, job);
5576 pkt = &cmd->cmd_pkt;
5577
5578 if (pd == NULL) {
5579 rip.rip_node_name =
5580 port->fp_service_params.node_ww_name;
5581 bcopy(port->fp_ip_addr, rip.rip_ip_addr,
5582 sizeof (port->fp_ip_addr));
5583 } else {
5584 fc_remote_node_t *node;
5585
5586 /*
5587 * The most correct implementation should have the IP
5588 * address in the fc_remote_node_t structure; I believe
5589 * Node WWN and IP address should have one to one
5590 * correlation (but guess what this is changing in
5591 * FC-GS-2 latest draft)
5592 */
5593 mutex_enter(&pd->pd_mutex);
5594 node = pd->pd_remote_nodep;
5595 bcopy(pd->pd_ip_addr, rip.rip_ip_addr,
5596 sizeof (pd->pd_ip_addr));
5597 mutex_exit(&pd->pd_mutex);
5598
5599 mutex_enter(&node->fd_mutex);
5600 rip.rip_node_name = node->fd_node_name;
5601 mutex_exit(&node->fd_mutex);
5602 }
5603
5604 FC_SET_CMD(port, pkt->pkt_cmd_acc, (uint8_t *)&rip,
5605 (uint8_t *)(pkt->pkt_cmd + sizeof (fc_ct_header_t)),
5606 sizeof (rip), DDI_DEV_AUTOINCR);
5607
5608 break;
5609 }
5610
5611 case NS_RIPA_NN: {
5612 ns_ipa_t ipa;
5613
5614 cmd = fp_alloc_pkt(port, sizeof (fc_ct_header_t) +
5615 sizeof (ns_ipa_t), sizeof (fc_reg_resp_t), sleep, NULL);
5616 if (cmd == NULL) {
5617 return (FC_NOMEM);
5618 }
5619 fp_ct_init(port, cmd, NULL, cmd_code, NULL, 0, 0, job);
5620 pkt = &cmd->cmd_pkt;
5621
5622 if (pd == NULL) {
5623 ipa.ipa_node_name =
5624 port->fp_service_params.node_ww_name;
5625 bcopy(port->fp_ipa, ipa.ipa_value,
5626 sizeof (port->fp_ipa));
5627 } else {
5628 fc_remote_node_t *node;
5629
5630 mutex_enter(&pd->pd_mutex);
5631 node = pd->pd_remote_nodep;
5632 mutex_exit(&pd->pd_mutex);
5633
5634 mutex_enter(&node->fd_mutex);
5635 ipa.ipa_node_name = node->fd_node_name;
5636 bcopy(node->fd_ipa, ipa.ipa_value,
5637 sizeof (node->fd_ipa));
5638 mutex_exit(&node->fd_mutex);
5639 }
5640
5641 FC_SET_CMD(port, pkt->pkt_cmd_acc, (uint8_t *)&ipa,
5642 (uint8_t *)(pkt->pkt_cmd + sizeof (fc_ct_header_t)),
5643 sizeof (ipa), DDI_DEV_AUTOINCR);
5644
5645 break;
5646 }
5647
5648 case NS_RSNN_NN: {
5649 uchar_t name_len;
5650 int pl_size;
5651 la_wwn_t snn;
5652 fc_remote_node_t *node = NULL;
5653
5654 if (pd == NULL) {
5655 mutex_enter(&port->fp_mutex);
5656 name_len = port->fp_sym_node_namelen;
5657 mutex_exit(&port->fp_mutex);
5658 } else {
5659 mutex_enter(&pd->pd_mutex);
5660 node = pd->pd_remote_nodep;
5661 mutex_exit(&pd->pd_mutex);
5662
5663 mutex_enter(&node->fd_mutex);
5664 name_len = node->fd_snn_len;
5665 mutex_exit(&node->fd_mutex);
5666 }
5667
5668 pl_size = sizeof (la_wwn_t) + name_len + 1;
5669
5670 cmd = fp_alloc_pkt(port, sizeof (fc_ct_header_t) +
5671 pl_size, sizeof (fc_reg_resp_t), sleep, NULL);
5672 if (cmd == NULL) {
5673 return (FC_NOMEM);
5674 }
5675 fp_ct_init(port, cmd, NULL, cmd_code, NULL, 0, 0, job);
5676
5677 pkt = &cmd->cmd_pkt;
5678
5679 bcopy(&port->fp_service_params.node_ww_name,
5680 &snn, sizeof (la_wwn_t));
5681
5682 if (pd == NULL) {
5683 mutex_enter(&port->fp_mutex);
5684 FC_SET_CMD(port, pkt->pkt_cmd_acc,
5685 (uint8_t *)port->fp_sym_node_name, (uint8_t *)
5686 (pkt->pkt_cmd + sizeof (fc_ct_header_t) +
5687 sizeof (snn) + 1), name_len, DDI_DEV_AUTOINCR);
5688 mutex_exit(&port->fp_mutex);
5689 } else {
5690 ASSERT(node != NULL);
5691 mutex_enter(&node->fd_mutex);
5692 FC_SET_CMD(port, pkt->pkt_cmd_acc,
5693 (uint8_t *)node->fd_snn,
5694 (uint8_t *)(pkt->pkt_cmd + sizeof (fc_ct_header_t) +
5695 sizeof (snn) + 1), name_len, DDI_DEV_AUTOINCR);
5696 mutex_exit(&node->fd_mutex);
5697 }
5698
5699 FC_SET_CMD(port, pkt->pkt_cmd_acc, (uint8_t *)&snn,
5700 (uint8_t *)(pkt->pkt_cmd + sizeof (fc_ct_header_t)),
5701 sizeof (snn), DDI_DEV_AUTOINCR);
5702 FC_SET_CMD(port, pkt->pkt_cmd_acc, (uint8_t *)&name_len,
5703 (uint8_t *)(pkt->pkt_cmd
5704 + sizeof (fc_ct_header_t) + sizeof (snn)),
5705 1, DDI_DEV_AUTOINCR);
5706
5707 break;
5708 }
5709
5710 case NS_DA_ID: {
5711 ns_remall_t rall;
5712 char tmp[4] = {0};
5713 char *ptr;
5714
5715 cmd = fp_alloc_pkt(port, sizeof (fc_ct_header_t) +
5716 sizeof (ns_remall_t), sizeof (fc_reg_resp_t), sleep, NULL);
5717
5718 if (cmd == NULL) {
5719 return (FC_NOMEM);
5720 }
5721
5722 fp_ct_init(port, cmd, NULL, cmd_code, NULL, 0, 0, job);
5723 pkt = &cmd->cmd_pkt;
5724
5725 ptr = (char *)(&s_id);
5726 tmp[3] = *ptr++;
5727 tmp[2] = *ptr++;
5728 tmp[1] = *ptr++;
5729 tmp[0] = *ptr;
5730 #if defined(_BIT_FIELDS_LTOH)
5731 bcopy((caddr_t)tmp, (caddr_t)(&rall.rem_port_id), 4);
5732 #else
5733 rall.rem_port_id = s_id;
5734 #endif
5735 FC_SET_CMD(port, pkt->pkt_cmd_acc, (uint8_t *)&rall,
5736 (uint8_t *)(pkt->pkt_cmd + sizeof (fc_ct_header_t)),
5737 sizeof (rall), DDI_DEV_AUTOINCR);
5738
5739 break;
5740 }
5741
5742 default:
5743 return (FC_FAILURE);
5744 }
5745
5746 rval = fp_sendcmd(port, cmd, port->fp_fca_handle);
5747
5748 if (rval != FC_SUCCESS) {
5749 job->job_result = rval;
5750 fp_iodone(cmd);
5751 }
5752
5753 if (polled) {
5754 ASSERT((job->job_flags & JOB_TYPE_FP_ASYNC) == 0);
5755 fp_jobwait(job);
5756 } else {
5757 rval = FC_SUCCESS;
5758 }
5759
5760 return (rval);
5761 }
5762
5763
5764 /*
5765 * Common interrupt handler
5766 */
5767 static int
5768 fp_common_intr(fc_packet_t *pkt, int iodone)
5769 {
5770 int rval = FC_FAILURE;
5771 fp_cmd_t *cmd;
5772 fc_local_port_t *port;
5773
5774 cmd = pkt->pkt_ulp_private;
5775 port = cmd->cmd_port;
5776
5777 /*
5778 * Fail fast the upper layer requests if
5779 * a state change has occurred amidst.
5780 */
5781 mutex_enter(&port->fp_mutex);
5782 if (cmd->cmd_ulp_pkt != NULL && port->fp_statec_busy) {
5783 mutex_exit(&port->fp_mutex);
5784 cmd->cmd_ulp_pkt->pkt_state = FC_PKT_PORT_OFFLINE;
5785 cmd->cmd_ulp_pkt->pkt_reason = FC_REASON_OFFLINE;
5786 } else if (!(port->fp_soft_state &
5787 (FP_SOFT_IN_DETACH | FP_DETACH_INPROGRESS))) {
5788 mutex_exit(&port->fp_mutex);
5789
5790 switch (pkt->pkt_state) {
5791 case FC_PKT_LOCAL_BSY:
5792 case FC_PKT_FABRIC_BSY:
5793 case FC_PKT_NPORT_BSY:
5794 case FC_PKT_TIMEOUT:
5795 cmd->cmd_retry_interval = (pkt->pkt_state ==
5796 FC_PKT_TIMEOUT) ? 0 : fp_retry_delay;
5797 rval = fp_retry_cmd(pkt);
5798 break;
5799
5800 case FC_PKT_FABRIC_RJT:
5801 case FC_PKT_NPORT_RJT:
5802 case FC_PKT_LOCAL_RJT:
5803 case FC_PKT_LS_RJT:
5804 case FC_PKT_FS_RJT:
5805 case FC_PKT_BA_RJT:
5806 rval = fp_handle_reject(pkt);
5807 break;
5808
5809 default:
5810 if (pkt->pkt_resp_resid) {
5811 cmd->cmd_retry_interval = 0;
5812 rval = fp_retry_cmd(pkt);
5813 }
5814 break;
5815 }
5816 } else {
5817 mutex_exit(&port->fp_mutex);
5818 }
5819
5820 if (rval != FC_SUCCESS && iodone) {
5821 fp_iodone(cmd);
5822 rval = FC_SUCCESS;
5823 }
5824
5825 return (rval);
5826 }
5827
5828
5829 /*
5830 * Some not so long winding theory on point to point topology:
5831 *
5832 * In the ACC payload, if the D_ID is ZERO and the common service
5833 * parameters indicate N_Port, then the topology is POINT TO POINT.
5834 *
5835 * In a point to point topology with an N_Port, during Fabric Login,
5836 * the destination N_Port will check with our WWN and decide if it
5837 * needs to issue PLOGI or not. That means, FLOGI could potentially
5838 * trigger an unsolicited PLOGI from an N_Port. The Unsolicited
5839 * PLOGI creates the device handles.
5840 *
5841 * Assuming that the host port WWN is greater than the other N_Port
5842 * WWN, then we become the master (be aware that this isn't the word
5843 * used in the FC standards) and initiate the PLOGI.
5844 *
5845 */
5846 static void
5847 fp_flogi_intr(fc_packet_t *pkt)
5848 {
5849 int state;
5850 int f_port;
5851 uint32_t s_id;
5852 uint32_t d_id;
5853 fp_cmd_t *cmd;
5854 fc_local_port_t *port;
5855 la_wwn_t *swwn;
5856 la_wwn_t dwwn;
5857 la_wwn_t nwwn;
5858 fc_remote_port_t *pd;
5859 la_els_logi_t *acc;
5860 com_svc_t csp;
5861 ls_code_t resp;
5862
5863 cmd = pkt->pkt_ulp_private;
5864 port = cmd->cmd_port;
5865
5866 mutex_enter(&port->fp_mutex);
5867 port->fp_out_fpcmds--;
5868 mutex_exit(&port->fp_mutex);
5869
5870 FP_TRACE(FP_NHEAD1(1, 0), "fp_flogi_intr; port=%p, pkt=%p, state=%x",
5871 port, pkt, pkt->pkt_state);
5872
5873 if (FP_IS_PKT_ERROR(pkt)) {
5874 (void) fp_common_intr(pkt, 1);
5875 return;
5876 }
5877
5878 /*
5879 * Currently, we don't need to swap bytes here because qlc is faking the
5880 * response for us and so endianness is getting taken care of. But we
5881 * have to fix this and generalize this at some point
5882 */
5883 acc = (la_els_logi_t *)pkt->pkt_resp;
5884
5885 FC_GET_RSP(port, pkt->pkt_resp_acc, (uint8_t *)&resp, (uint8_t *)acc,
5886 sizeof (resp), DDI_DEV_AUTOINCR);
5887
5888 ASSERT(resp.ls_code == LA_ELS_ACC);
5889 if (resp.ls_code != LA_ELS_ACC) {
5890 (void) fp_common_intr(pkt, 1);
5891 return;
5892 }
5893
5894 FC_GET_RSP(port, pkt->pkt_resp_acc, (uint8_t *)&csp,
5895 (uint8_t *)&acc->common_service, sizeof (csp), DDI_DEV_AUTOINCR);
5896
5897 f_port = FP_IS_F_PORT(csp.cmn_features) ? 1 : 0;
5898
5899 ASSERT(!MUTEX_HELD(&port->fp_mutex));
5900
5901 mutex_enter(&port->fp_mutex);
5902 state = FC_PORT_STATE_MASK(port->fp_state);
5903 mutex_exit(&port->fp_mutex);
5904
5905 if (f_port == 0) {
5906 if (state != FC_STATE_LOOP) {
5907 swwn = &port->fp_service_params.nport_ww_name;
5908
5909 FC_GET_RSP(port, pkt->pkt_resp_acc, (uint8_t *)&dwwn,
5910 (uint8_t *)&acc->nport_ww_name, sizeof (la_wwn_t),
5911 DDI_DEV_AUTOINCR);
5912
5913 FC_GET_RSP(port, pkt->pkt_resp_acc, (uint8_t *)&nwwn,
5914 (uint8_t *)&acc->node_ww_name, sizeof (la_wwn_t),
5915 DDI_DEV_AUTOINCR);
5916
5917 mutex_enter(&port->fp_mutex);
5918
5919 port->fp_topology = FC_TOP_PT_PT;
5920 port->fp_total_devices = 1;
5921 if (fctl_wwn_cmp(swwn, &dwwn) >= 0) {
5922 port->fp_ptpt_master = 1;
5923 /*
5924 * Let us choose 'X' as S_ID and 'Y'
5925 * as D_ID and that'll work; hopefully
5926 * If not, it will get changed.
5927 */
5928 s_id = port->fp_instance + FP_DEFAULT_SID;
5929 d_id = port->fp_instance + FP_DEFAULT_DID;
5930 port->fp_port_id.port_id = s_id;
5931 mutex_exit(&port->fp_mutex);
5932
5933 FP_TRACE(FP_NHEAD1(1, 0), "fp_flogi_intr: fp %x"
5934 "pd %x", port->fp_port_id.port_id, d_id);
5935 pd = fctl_create_remote_port(port,
5936 &nwwn, &dwwn, d_id, PD_PLOGI_INITIATOR,
5937 KM_NOSLEEP);
5938 if (pd == NULL) {
5939 fp_printf(port, CE_NOTE, FP_LOG_ONLY,
5940 0, NULL, "couldn't create device"
5941 " d_id=%X", d_id);
5942 fp_iodone(cmd);
5943 return;
5944 }
5945
5946 cmd->cmd_pkt.pkt_tran_flags =
5947 pkt->pkt_tran_flags;
5948 cmd->cmd_pkt.pkt_tran_type = pkt->pkt_tran_type;
5949 cmd->cmd_flags = FP_CMD_PLOGI_RETAIN;
5950 cmd->cmd_retry_count = fp_retry_count;
5951
5952 fp_xlogi_init(port, cmd, s_id, d_id,
5953 fp_plogi_intr, cmd->cmd_job, LA_ELS_PLOGI);
5954
5955 (&cmd->cmd_pkt)->pkt_pd = pd;
5956
5957 /*
5958 * We've just created this fc_remote_port_t, and
5959 * we're about to use it to send a PLOGI, so
5960 * bump the reference count right now. When
5961 * the packet is freed, the reference count will
5962 * be decremented. The ULP may also start using
5963 * it, so mark it as given away as well.
5964 */
5965 pd->pd_ref_count++;
5966 pd->pd_aux_flags |= PD_GIVEN_TO_ULPS;
5967
5968 if (fp_sendcmd(port, cmd,
5969 port->fp_fca_handle) == FC_SUCCESS) {
5970 return;
5971 }
5972 } else {
5973 /*
5974 * The device handles will be created when the
5975 * unsolicited PLOGI is completed successfully
5976 */
5977 port->fp_ptpt_master = 0;
5978 mutex_exit(&port->fp_mutex);
5979 }
5980 }
5981 pkt->pkt_state = FC_PKT_FAILURE;
5982 } else {
5983 if (f_port) {
5984 mutex_enter(&port->fp_mutex);
5985 if (state == FC_STATE_LOOP) {
5986 port->fp_topology = FC_TOP_PUBLIC_LOOP;
5987 } else {
5988 port->fp_topology = FC_TOP_FABRIC;
5989
5990 FC_GET_RSP(port, pkt->pkt_resp_acc,
5991 (uint8_t *)&port->fp_fabric_name,
5992 (uint8_t *)&acc->node_ww_name,
5993 sizeof (la_wwn_t),
5994 DDI_DEV_AUTOINCR);
5995 }
5996 port->fp_port_id.port_id = pkt->pkt_resp_fhdr.d_id;
5997 mutex_exit(&port->fp_mutex);
5998 } else {
5999 pkt->pkt_state = FC_PKT_FAILURE;
6000 }
6001 }
6002 fp_iodone(cmd);
6003 }
6004
6005
6006 /*
6007 * Handle solicited PLOGI response
6008 */
6009 static void
6010 fp_plogi_intr(fc_packet_t *pkt)
6011 {
6012 int nl_port;
6013 int bailout;
6014 uint32_t d_id;
6015 fp_cmd_t *cmd;
6016 la_els_logi_t *acc;
6017 fc_local_port_t *port;
6018 fc_remote_port_t *pd;
6019 la_wwn_t nwwn;
6020 la_wwn_t pwwn;
6021 ls_code_t resp;
6022
6023 nl_port = 0;
6024 cmd = pkt->pkt_ulp_private;
6025 port = cmd->cmd_port;
6026 d_id = pkt->pkt_cmd_fhdr.d_id;
6027
6028 #ifndef __lock_lint
6029 ASSERT(cmd->cmd_job && cmd->cmd_job->job_counter);
6030 #endif
6031
6032 FP_TRACE(FP_NHEAD1(3, 0), "fp_plogi_intr: port=%p, job=%p, d_id=%x,"
6033 " jcount=%d pkt=%p, state=%x", port, cmd->cmd_job, d_id,
6034 cmd->cmd_job->job_counter, pkt, pkt->pkt_state);
6035
6036 /*
6037 * Bail out early on ULP initiated requests if the
6038 * state change has occurred
6039 */
6040 mutex_enter(&port->fp_mutex);
6041 port->fp_out_fpcmds--;
6042 bailout = ((port->fp_statec_busy ||
6043 FC_PORT_STATE_MASK(port->fp_state) == FC_STATE_OFFLINE) &&
6044 cmd->cmd_ulp_pkt) ? 1 : 0;
6045 mutex_exit(&port->fp_mutex);
6046
6047 if (FP_IS_PKT_ERROR(pkt) || bailout) {
6048 int skip_msg = 0;
6049 int giveup = 0;
6050
6051 if (cmd->cmd_ulp_pkt) {
6052 cmd->cmd_ulp_pkt->pkt_state = pkt->pkt_state;
6053 cmd->cmd_ulp_pkt->pkt_reason = pkt->pkt_reason;
6054 cmd->cmd_ulp_pkt->pkt_action = pkt->pkt_action;
6055 cmd->cmd_ulp_pkt->pkt_expln = pkt->pkt_expln;
6056 }
6057
6058 /*
6059 * If an unsolicited cross login already created
6060 * a device speed up the discovery by not retrying
6061 * the command mindlessly.
6062 */
6063 if (pkt->pkt_pd == NULL &&
6064 fctl_get_remote_port_by_did(port, d_id) != NULL) {
6065 fp_iodone(cmd);
6066 return;
6067 }
6068
6069 if (pkt->pkt_pd != NULL) {
6070 giveup = (pkt->pkt_pd->pd_recepient ==
6071 PD_PLOGI_RECEPIENT) ? 1 : 0;
6072 if (giveup) {
6073 /*
6074 * This pd is marked as plogi
6075 * recipient, stop retrying
6076 */
6077 FP_TRACE(FP_NHEAD1(3, 0),
6078 "fp_plogi_intr: stop retry as"
6079 " a cross login was accepted"
6080 " from d_id=%x, port=%p.",
6081 d_id, port);
6082 fp_iodone(cmd);
6083 return;
6084 }
6085 }
6086
6087 if (fp_common_intr(pkt, 0) == FC_SUCCESS) {
6088 return;
6089 }
6090
6091 if ((pd = fctl_get_remote_port_by_did(port, d_id)) != NULL) {
6092 mutex_enter(&pd->pd_mutex);
6093 if (pd->pd_state == PORT_DEVICE_LOGGED_IN) {
6094 skip_msg++;
6095 }
6096 mutex_exit(&pd->pd_mutex);
6097 }
6098
6099 mutex_enter(&port->fp_mutex);
6100 if (!bailout && !(skip_msg && port->fp_statec_busy) &&
6101 port->fp_statec_busy <= 1 &&
6102 pkt->pkt_reason != FC_REASON_FCAL_OPN_FAIL) {
6103 mutex_exit(&port->fp_mutex);
6104 /*
6105 * In case of Login Collisions, JNI HBAs returns the
6106 * FC pkt back to the Initiator with the state set to
6107 * FC_PKT_LS_RJT and reason to FC_REASON_LOGICAL_ERROR.
6108 * QLC HBAs handles such cases in the FW and doesnot
6109 * return the LS_RJT with Logical error when
6110 * login collision happens.
6111 */
6112 if ((pkt->pkt_state != FC_PKT_LS_RJT) ||
6113 (pkt->pkt_reason != FC_REASON_LOGICAL_ERROR)) {
6114 fp_printf(port, CE_NOTE, FP_LOG_ONLY, 0, pkt,
6115 "PLOGI to %x failed", d_id);
6116 }
6117 FP_TRACE(FP_NHEAD2(9, 0),
6118 "PLOGI to %x failed. state=%x reason=%x.",
6119 d_id, pkt->pkt_state, pkt->pkt_reason);
6120 } else {
6121 mutex_exit(&port->fp_mutex);
6122 }
6123
6124 fp_iodone(cmd);
6125 return;
6126 }
6127
6128 acc = (la_els_logi_t *)pkt->pkt_resp;
6129
6130 FC_GET_RSP(port, pkt->pkt_resp_acc, (uint8_t *)&resp, (uint8_t *)acc,
6131 sizeof (resp), DDI_DEV_AUTOINCR);
6132
6133 ASSERT(resp.ls_code == LA_ELS_ACC);
6134 if (resp.ls_code != LA_ELS_ACC) {
6135 (void) fp_common_intr(pkt, 1);
6136 return;
6137 }
6138
6139 if (d_id == FS_NAME_SERVER || d_id == FS_FABRIC_CONTROLLER) {
6140 mutex_enter(&port->fp_mutex);
6141 port->fp_ns_login_class = FC_TRAN_CLASS(pkt->pkt_tran_flags);
6142 mutex_exit(&port->fp_mutex);
6143 fp_iodone(cmd);
6144 return;
6145 }
6146
6147 ASSERT(acc == (la_els_logi_t *)pkt->pkt_resp);
6148
6149 FC_GET_RSP(port, pkt->pkt_resp_acc, (uint8_t *)&pwwn,
6150 (uint8_t *)&acc->nport_ww_name, sizeof (la_wwn_t),
6151 DDI_DEV_AUTOINCR);
6152
6153 FC_GET_RSP(port, pkt->pkt_resp_acc, (uint8_t *)&nwwn,
6154 (uint8_t *)&acc->node_ww_name, sizeof (la_wwn_t),
6155 DDI_DEV_AUTOINCR);
6156
6157 ASSERT(fctl_is_wwn_zero(&pwwn) == FC_FAILURE);
6158 ASSERT(fctl_is_wwn_zero(&nwwn) == FC_FAILURE);
6159
6160 if ((pd = pkt->pkt_pd) == NULL) {
6161 pd = fctl_get_remote_port_by_pwwn(port, &pwwn);
6162 if (pd == NULL) {
6163 FP_TRACE(FP_NHEAD2(1, 0), "fp_plogi_intr: fp %x pd %x",
6164 port->fp_port_id.port_id, d_id);
6165 pd = fctl_create_remote_port(port, &nwwn, &pwwn, d_id,
6166 PD_PLOGI_INITIATOR, KM_NOSLEEP);
6167 if (pd == NULL) {
6168 fp_printf(port, CE_NOTE, FP_LOG_ONLY, 0, NULL,
6169 "couldn't create port device handles"
6170 " d_id=%x", d_id);
6171 fp_iodone(cmd);
6172 return;
6173 }
6174 } else {
6175 fc_remote_port_t *tmp_pd;
6176
6177 tmp_pd = fctl_get_remote_port_by_did(port, d_id);
6178 if (tmp_pd != NULL) {
6179 fp_iodone(cmd);
6180 return;
6181 }
6182
6183 mutex_enter(&port->fp_mutex);
6184 mutex_enter(&pd->pd_mutex);
6185 if ((pd->pd_state == PORT_DEVICE_LOGGED_IN) ||
6186 (pd->pd_aux_flags & PD_LOGGED_OUT)) {
6187 cmd->cmd_flags |= FP_CMD_PLOGI_RETAIN;
6188 }
6189
6190 if (pd->pd_type == PORT_DEVICE_OLD) {
6191 if (pd->pd_port_id.port_id != d_id) {
6192 fctl_delist_did_table(port, pd);
6193 pd->pd_type = PORT_DEVICE_CHANGED;
6194 pd->pd_port_id.port_id = d_id;
6195 } else {
6196 pd->pd_type = PORT_DEVICE_NOCHANGE;
6197 }
6198 }
6199
6200 if (pd->pd_aux_flags & PD_IN_DID_QUEUE) {
6201 char ww_name[17];
6202
6203 fc_wwn_to_str(&pd->pd_port_name, ww_name);
6204
6205 mutex_exit(&pd->pd_mutex);
6206 mutex_exit(&port->fp_mutex);
6207 FP_TRACE(FP_NHEAD2(9, 0),
6208 "Possible Duplicate name or address"
6209 " identifiers in the PLOGI response"
6210 " D_ID=%x, PWWN=%s: Please check the"
6211 " configuration", d_id, ww_name);
6212 fp_iodone(cmd);
6213 return;
6214 }
6215 fctl_enlist_did_table(port, pd);
6216 pd->pd_aux_flags &= ~PD_LOGGED_OUT;
6217 mutex_exit(&pd->pd_mutex);
6218 mutex_exit(&port->fp_mutex);
6219 }
6220 } else {
6221 fc_remote_port_t *tmp_pd, *new_wwn_pd;
6222
6223 tmp_pd = fctl_get_remote_port_by_did(port, d_id);
6224 new_wwn_pd = fctl_get_remote_port_by_pwwn(port, &pwwn);
6225
6226 mutex_enter(&port->fp_mutex);
6227 mutex_enter(&pd->pd_mutex);
6228 if (fctl_wwn_cmp(&pd->pd_port_name, &pwwn) == 0) {
6229 FP_TRACE(FP_NHEAD1(3, 0), "fp_plogi_intr: d_id=%x,"
6230 " pd_state=%x pd_type=%x", d_id, pd->pd_state,
6231 pd->pd_type);
6232 if ((pd->pd_state == PORT_DEVICE_LOGGED_IN &&
6233 pd->pd_type == PORT_DEVICE_OLD) ||
6234 (pd->pd_aux_flags & PD_LOGGED_OUT)) {
6235 pd->pd_type = PORT_DEVICE_NOCHANGE;
6236 } else if (pd->pd_state != PORT_DEVICE_LOGGED_IN) {
6237 pd->pd_type = PORT_DEVICE_NEW;
6238 }
6239 } else {
6240 char old_name[17];
6241 char new_name[17];
6242
6243 fc_wwn_to_str(&pd->pd_port_name, old_name);
6244 fc_wwn_to_str(&pwwn, new_name);
6245
6246 FP_TRACE(FP_NHEAD1(9, 0),
6247 "fp_plogi_intr: PWWN of a device with D_ID=%x "
6248 "changed. New PWWN = %s, OLD PWWN = %s ; tmp_pd:%p "
6249 "pd:%p new_wwn_pd:%p, cmd_ulp_pkt:%p, bailout:0x%x",
6250 d_id, new_name, old_name, tmp_pd, pd, new_wwn_pd,
6251 cmd->cmd_ulp_pkt, bailout);
6252
6253 FP_TRACE(FP_NHEAD2(9, 0),
6254 "PWWN of a device with D_ID=%x changed."
6255 " New PWWN = %s, OLD PWWN = %s", d_id,
6256 new_name, old_name);
6257
6258 if (cmd->cmd_ulp_pkt && !bailout) {
6259 fc_remote_node_t *rnodep;
6260 fc_portmap_t *changelist;
6261 fc_portmap_t *listptr;
6262 int len = 1;
6263 /* # entries in changelist */
6264
6265 fctl_delist_pwwn_table(port, pd);
6266
6267 /*
6268 * Lets now check if there already is a pd with
6269 * this new WWN in the table. If so, we'll mark
6270 * it as invalid
6271 */
6272
6273 if (new_wwn_pd) {
6274 /*
6275 * There is another pd with in the pwwn
6276 * table with the same WWN that we got
6277 * in the PLOGI payload. We have to get
6278 * it out of the pwwn table, update the
6279 * pd's state (fp_fillout_old_map does
6280 * this for us) and add it to the
6281 * changelist that goes up to ULPs.
6282 *
6283 * len is length of changelist and so
6284 * increment it.
6285 */
6286 len++;
6287
6288 if (tmp_pd != pd) {
6289 /*
6290 * Odd case where pwwn and did
6291 * tables are out of sync but
6292 * we will handle that too. See
6293 * more comments below.
6294 *
6295 * One more device that ULPs
6296 * should know about and so len
6297 * gets incremented again.
6298 */
6299 len++;
6300 }
6301
6302 listptr = changelist = kmem_zalloc(len *
6303 sizeof (*changelist), KM_SLEEP);
6304
6305 mutex_enter(&new_wwn_pd->pd_mutex);
6306 rnodep = new_wwn_pd->pd_remote_nodep;
6307 mutex_exit(&new_wwn_pd->pd_mutex);
6308
6309 /*
6310 * Hold the fd_mutex since
6311 * fctl_copy_portmap_held expects it.
6312 * Preserve lock hierarchy by grabbing
6313 * fd_mutex before pd_mutex
6314 */
6315 if (rnodep) {
6316 mutex_enter(&rnodep->fd_mutex);
6317 }
6318 mutex_enter(&new_wwn_pd->pd_mutex);
6319 fp_fillout_old_map_held(listptr++,
6320 new_wwn_pd, 0);
6321 mutex_exit(&new_wwn_pd->pd_mutex);
6322 if (rnodep) {
6323 mutex_exit(&rnodep->fd_mutex);
6324 }
6325
6326 /*
6327 * Safety check :
6328 * Lets ensure that the pwwn and did
6329 * tables are in sync. Ideally, we
6330 * should not find that these two pd's
6331 * are different.
6332 */
6333 if (tmp_pd != pd) {
6334 mutex_enter(&tmp_pd->pd_mutex);
6335 rnodep =
6336 tmp_pd->pd_remote_nodep;
6337 mutex_exit(&tmp_pd->pd_mutex);
6338
6339 /* As above grab fd_mutex */
6340 if (rnodep) {
6341 mutex_enter(&rnodep->
6342 fd_mutex);
6343 }
6344 mutex_enter(&tmp_pd->pd_mutex);
6345
6346 fp_fillout_old_map_held(
6347 listptr++, tmp_pd, 0);
6348
6349 mutex_exit(&tmp_pd->pd_mutex);
6350 if (rnodep) {
6351 mutex_exit(&rnodep->
6352 fd_mutex);
6353 }
6354
6355 /*
6356 * Now add "pd" (not tmp_pd)
6357 * to fp_did_table to sync it up
6358 * with fp_pwwn_table
6359 *
6360 * pd->pd_mutex is already held
6361 * at this point
6362 */
6363 fctl_enlist_did_table(port, pd);
6364 }
6365 } else {
6366 listptr = changelist = kmem_zalloc(
6367 sizeof (*changelist), KM_SLEEP);
6368 }
6369
6370 ASSERT(changelist != NULL);
6371
6372 fp_fillout_changed_map(listptr, pd, &d_id,
6373 &pwwn);
6374 fctl_enlist_pwwn_table(port, pd);
6375
6376 mutex_exit(&pd->pd_mutex);
6377 mutex_exit(&port->fp_mutex);
6378
6379 fp_iodone(cmd);
6380
6381 (void) fp_ulp_devc_cb(port, changelist, len,
6382 len, KM_NOSLEEP, 0);
6383
6384 return;
6385 }
6386 }
6387
6388 if (pd->pd_porttype.port_type == FC_NS_PORT_NL) {
6389 nl_port = 1;
6390 }
6391 if (pd->pd_aux_flags & PD_DISABLE_RELOGIN) {
6392 pd->pd_aux_flags &= ~PD_LOGGED_OUT;
6393 }
6394
6395 mutex_exit(&pd->pd_mutex);
6396 mutex_exit(&port->fp_mutex);
6397
6398 if (tmp_pd == NULL) {
6399 mutex_enter(&port->fp_mutex);
6400 mutex_enter(&pd->pd_mutex);
6401 if (pd->pd_aux_flags & PD_IN_DID_QUEUE) {
6402 char ww_name[17];
6403
6404 fc_wwn_to_str(&pd->pd_port_name, ww_name);
6405 mutex_exit(&pd->pd_mutex);
6406 mutex_exit(&port->fp_mutex);
6407 FP_TRACE(FP_NHEAD2(9, 0),
6408 "Possible Duplicate name or address"
6409 " identifiers in the PLOGI response"
6410 " D_ID=%x, PWWN=%s: Please check the"
6411 " configuration", d_id, ww_name);
6412 fp_iodone(cmd);
6413 return;
6414 }
6415 fctl_enlist_did_table(port, pd);
6416 pd->pd_aux_flags &= ~PD_LOGGED_OUT;
6417 mutex_exit(&pd->pd_mutex);
6418 mutex_exit(&port->fp_mutex);
6419 }
6420 }
6421 fp_register_login(&pkt->pkt_resp_acc, pd, acc,
6422 FC_TRAN_CLASS(pkt->pkt_tran_flags));
6423
6424 if (cmd->cmd_ulp_pkt) {
6425 cmd->cmd_ulp_pkt->pkt_state = pkt->pkt_state;
6426 cmd->cmd_ulp_pkt->pkt_action = pkt->pkt_action;
6427 cmd->cmd_ulp_pkt->pkt_expln = pkt->pkt_expln;
6428 if (cmd->cmd_ulp_pkt->pkt_pd == NULL) {
6429 if (pd != NULL) {
6430 FP_TRACE(FP_NHEAD1(9, 0),
6431 "fp_plogi_intr;"
6432 "ulp_pkt's pd is NULL, get a pd %p",
6433 pd);
6434 mutex_enter(&pd->pd_mutex);
6435 pd->pd_ref_count++;
6436 mutex_exit(&pd->pd_mutex);
6437 }
6438 cmd->cmd_ulp_pkt->pkt_pd = pd;
6439 }
6440 bcopy((caddr_t)&pkt->pkt_resp_fhdr,
6441 (caddr_t)&cmd->cmd_ulp_pkt->pkt_resp_fhdr,
6442 sizeof (fc_frame_hdr_t));
6443 bcopy((caddr_t)pkt->pkt_resp,
6444 (caddr_t)cmd->cmd_ulp_pkt->pkt_resp,
6445 sizeof (la_els_logi_t));
6446 }
6447
6448 mutex_enter(&port->fp_mutex);
6449 if (port->fp_topology == FC_TOP_PRIVATE_LOOP || nl_port) {
6450 mutex_enter(&pd->pd_mutex);
6451
6452 cmd->cmd_pkt.pkt_tran_flags = FC_TRAN_INTR | pd->pd_login_class;
6453 cmd->cmd_pkt.pkt_tran_type = FC_PKT_EXCHANGE;
6454 cmd->cmd_retry_count = fp_retry_count;
6455
6456 /*
6457 * If the fc_remote_port_t pointer is not set in the given
6458 * fc_packet_t, then this fc_remote_port_t must have just
6459 * been created. Save the pointer and also increment the
6460 * fc_remote_port_t reference count.
6461 */
6462 if (pkt->pkt_pd == NULL) {
6463 pkt->pkt_pd = pd;
6464 pd->pd_ref_count++; /* It's in use! */
6465 }
6466
6467 fp_adisc_init(cmd, cmd->cmd_job);
6468
6469 pkt->pkt_cmdlen = sizeof (la_els_adisc_t);
6470 pkt->pkt_rsplen = sizeof (la_els_adisc_t);
6471
6472 mutex_exit(&pd->pd_mutex);
6473 mutex_exit(&port->fp_mutex);
6474
6475 if (fp_sendcmd(port, cmd, port->fp_fca_handle) == FC_SUCCESS) {
6476 return;
6477 }
6478 } else {
6479 mutex_exit(&port->fp_mutex);
6480 }
6481
6482 if ((cmd->cmd_flags & FP_CMD_PLOGI_RETAIN) == 0) {
6483 mutex_enter(&port->fp_mutex);
6484 mutex_enter(&pd->pd_mutex);
6485
6486 cmd->cmd_pkt.pkt_tran_flags = FC_TRAN_INTR | pd->pd_login_class;
6487 cmd->cmd_pkt.pkt_tran_type = FC_PKT_EXCHANGE;
6488 cmd->cmd_retry_count = fp_retry_count;
6489
6490 fp_logo_init(pd, cmd, cmd->cmd_job);
6491
6492 pkt->pkt_cmdlen = sizeof (la_els_logo_t);
6493 pkt->pkt_rsplen = FP_PORT_IDENTIFIER_LEN;
6494
6495 mutex_exit(&pd->pd_mutex);
6496 mutex_exit(&port->fp_mutex);
6497
6498 if (fp_sendcmd(port, cmd, port->fp_fca_handle) == FC_SUCCESS) {
6499 return;
6500 }
6501
6502 }
6503 fp_iodone(cmd);
6504 }
6505
6506
6507 /*
6508 * Handle solicited ADISC response
6509 */
6510 static void
6511 fp_adisc_intr(fc_packet_t *pkt)
6512 {
6513 int rval;
6514 int bailout;
6515 fp_cmd_t *cmd, *logi_cmd;
6516 fc_local_port_t *port;
6517 fc_remote_port_t *pd;
6518 la_els_adisc_t *acc;
6519 ls_code_t resp;
6520 fc_hardaddr_t ha;
6521 fc_portmap_t *changelist;
6522 int initiator, adiscfail = 0;
6523
6524 pd = pkt->pkt_pd;
6525 cmd = pkt->pkt_ulp_private;
6526 port = cmd->cmd_port;
6527
6528 #ifndef __lock_lint
6529 ASSERT(cmd->cmd_job && cmd->cmd_job->job_counter);
6530 #endif
6531
6532 ASSERT(pd != NULL && port != NULL && cmd != NULL);
6533
6534 mutex_enter(&port->fp_mutex);
6535 port->fp_out_fpcmds--;
6536 bailout = ((port->fp_statec_busy ||
6537 FC_PORT_STATE_MASK(port->fp_state) == FC_STATE_OFFLINE) &&
6538 cmd->cmd_ulp_pkt) ? 1 : 0;
6539 mutex_exit(&port->fp_mutex);
6540
6541 if (bailout) {
6542 fp_iodone(cmd);
6543 return;
6544 }
6545
6546 if (pkt->pkt_state == FC_PKT_SUCCESS && pkt->pkt_resp_resid == 0) {
6547 acc = (la_els_adisc_t *)pkt->pkt_resp;
6548
6549 FC_GET_RSP(port, pkt->pkt_resp_acc, (uint8_t *)&resp,
6550 (uint8_t *)acc, sizeof (resp), DDI_DEV_AUTOINCR);
6551
6552 if (resp.ls_code == LA_ELS_ACC) {
6553 int is_private;
6554
6555 FC_GET_RSP(port, pkt->pkt_resp_acc, (uint8_t *)&ha,
6556 (uint8_t *)&acc->hard_addr, sizeof (ha),
6557 DDI_DEV_AUTOINCR);
6558
6559 mutex_enter(&port->fp_mutex);
6560
6561 is_private =
6562 (port->fp_topology == FC_TOP_PRIVATE_LOOP) ? 1 : 0;
6563
6564 mutex_enter(&pd->pd_mutex);
6565 if ((pd->pd_aux_flags & PD_IN_DID_QUEUE) == 0) {
6566 fctl_enlist_did_table(port, pd);
6567 }
6568 mutex_exit(&pd->pd_mutex);
6569
6570 mutex_exit(&port->fp_mutex);
6571
6572 mutex_enter(&pd->pd_mutex);
6573 if (pd->pd_type != PORT_DEVICE_NEW) {
6574 if (is_private && (pd->pd_hard_addr.hard_addr !=
6575 ha.hard_addr)) {
6576 pd->pd_type = PORT_DEVICE_CHANGED;
6577 } else {
6578 pd->pd_type = PORT_DEVICE_NOCHANGE;
6579 }
6580 }
6581
6582 if (is_private && (ha.hard_addr &&
6583 pd->pd_port_id.port_id != ha.hard_addr)) {
6584 char ww_name[17];
6585
6586 fc_wwn_to_str(&pd->pd_port_name, ww_name);
6587
6588 fp_printf(port, CE_NOTE, FP_LOG_ONLY, 0, NULL,
6589 "NL_Port Identifier %x doesn't match"
6590 " with Hard Address %x, Will use Port"
6591 " WWN %s", pd->pd_port_id.port_id,
6592 ha.hard_addr, ww_name);
6593
6594 pd->pd_hard_addr.hard_addr = 0;
6595 } else {
6596 pd->pd_hard_addr.hard_addr = ha.hard_addr;
6597 }
6598 mutex_exit(&pd->pd_mutex);
6599 } else {
6600 if (fp_common_intr(pkt, 0) == FC_SUCCESS) {
6601 return;
6602 }
6603 }
6604 } else {
6605 if (fp_common_intr(pkt, 0) == FC_SUCCESS) {
6606 return;
6607 }
6608
6609 mutex_enter(&port->fp_mutex);
6610 if (port->fp_statec_busy <= 1) {
6611 mutex_exit(&port->fp_mutex);
6612 if (pkt->pkt_state == FC_PKT_LS_RJT &&
6613 pkt->pkt_reason == FC_REASON_CMD_UNABLE) {
6614 uchar_t class;
6615 int cmd_flag;
6616 uint32_t src_id;
6617
6618 class = fp_get_nextclass(port,
6619 FC_TRAN_CLASS_INVALID);
6620 if (class == FC_TRAN_CLASS_INVALID) {
6621 fp_iodone(cmd);
6622 return;
6623 }
6624
6625 FP_TRACE(FP_NHEAD1(1, 0), "ADISC re-login; "
6626 "fp_state=0x%x, pkt_state=0x%x, "
6627 "reason=0x%x, class=0x%x",
6628 port->fp_state, pkt->pkt_state,
6629 pkt->pkt_reason, class);
6630 cmd_flag = FP_CMD_PLOGI_RETAIN;
6631
6632 logi_cmd = fp_alloc_pkt(port,
6633 sizeof (la_els_logi_t),
6634 sizeof (la_els_logi_t), KM_SLEEP, pd);
6635 if (logi_cmd == NULL) {
6636 fp_iodone(cmd);
6637 return;
6638 }
6639
6640 logi_cmd->cmd_pkt.pkt_tran_flags =
6641 FC_TRAN_INTR | class;
6642 logi_cmd->cmd_pkt.pkt_tran_type =
6643 FC_PKT_EXCHANGE;
6644 logi_cmd->cmd_flags = cmd_flag;
6645 logi_cmd->cmd_retry_count = fp_retry_count;
6646 logi_cmd->cmd_ulp_pkt = NULL;
6647
6648 mutex_enter(&port->fp_mutex);
6649 src_id = port->fp_port_id.port_id;
6650 mutex_exit(&port->fp_mutex);
6651
6652 fp_xlogi_init(port, logi_cmd, src_id,
6653 pkt->pkt_cmd_fhdr.d_id, fp_plogi_intr,
6654 cmd->cmd_job, LA_ELS_PLOGI);
6655 if (pd) {
6656 mutex_enter(&pd->pd_mutex);
6657 pd->pd_flags = PD_ELS_IN_PROGRESS;
6658 mutex_exit(&pd->pd_mutex);
6659 }
6660
6661 if (fp_sendcmd(port, logi_cmd,
6662 port->fp_fca_handle) == FC_SUCCESS) {
6663 fp_free_pkt(cmd);
6664 return;
6665 } else {
6666 fp_free_pkt(logi_cmd);
6667 }
6668 } else {
6669 fp_printf(port, CE_NOTE, FP_LOG_ONLY, 0, pkt,
6670 "ADISC to %x failed, cmd_flags=%x",
6671 pkt->pkt_cmd_fhdr.d_id, cmd->cmd_flags);
6672 cmd->cmd_flags &= ~FP_CMD_PLOGI_RETAIN;
6673 adiscfail = 1;
6674 }
6675 } else {
6676 mutex_exit(&port->fp_mutex);
6677 }
6678 }
6679
6680 if (cmd->cmd_ulp_pkt) {
6681 cmd->cmd_ulp_pkt->pkt_state = pkt->pkt_state;
6682 cmd->cmd_ulp_pkt->pkt_action = pkt->pkt_action;
6683 cmd->cmd_ulp_pkt->pkt_expln = pkt->pkt_expln;
6684 if (cmd->cmd_ulp_pkt->pkt_pd == NULL) {
6685 cmd->cmd_ulp_pkt->pkt_pd = pd;
6686 FP_TRACE(FP_NHEAD1(9, 0),
6687 "fp_adisc__intr;"
6688 "ulp_pkt's pd is NULL, get a pd %p",
6689 pd);
6690
6691 }
6692 bcopy((caddr_t)&pkt->pkt_resp_fhdr,
6693 (caddr_t)&cmd->cmd_ulp_pkt->pkt_resp_fhdr,
6694 sizeof (fc_frame_hdr_t));
6695 bcopy((caddr_t)pkt->pkt_resp,
6696 (caddr_t)cmd->cmd_ulp_pkt->pkt_resp,
6697 sizeof (la_els_adisc_t));
6698 }
6699
6700 if ((cmd->cmd_flags & FP_CMD_PLOGI_RETAIN) == 0) {
6701 FP_TRACE(FP_NHEAD1(9, 0),
6702 "fp_adisc_intr: Perform LOGO.cmd_flags=%x, "
6703 "fp_retry_count=%x, ulp_pkt=%p",
6704 cmd->cmd_flags, fp_retry_count, cmd->cmd_ulp_pkt);
6705
6706 mutex_enter(&port->fp_mutex);
6707 mutex_enter(&pd->pd_mutex);
6708
6709 cmd->cmd_pkt.pkt_tran_flags = FC_TRAN_INTR | pd->pd_login_class;
6710 cmd->cmd_pkt.pkt_tran_type = FC_PKT_EXCHANGE;
6711 cmd->cmd_retry_count = fp_retry_count;
6712
6713 fp_logo_init(pd, cmd, cmd->cmd_job);
6714
6715 pkt->pkt_cmdlen = sizeof (la_els_logo_t);
6716 pkt->pkt_rsplen = FP_PORT_IDENTIFIER_LEN;
6717
6718 mutex_exit(&pd->pd_mutex);
6719 mutex_exit(&port->fp_mutex);
6720
6721 rval = fp_sendcmd(port, cmd, port->fp_fca_handle);
6722 if (adiscfail) {
6723 mutex_enter(&pd->pd_mutex);
6724 initiator =
6725 ((pd->pd_recepient == PD_PLOGI_INITIATOR) ? 1 : 0);
6726 pd->pd_state = PORT_DEVICE_VALID;
6727 pd->pd_aux_flags |= PD_LOGGED_OUT;
6728 if (pd->pd_aux_flags & PD_DISABLE_RELOGIN) {
6729 pd->pd_type = PORT_DEVICE_NEW;
6730 } else {
6731 pd->pd_type = PORT_DEVICE_NOCHANGE;
6732 }
6733 mutex_exit(&pd->pd_mutex);
6734
6735 changelist =
6736 kmem_zalloc(sizeof (*changelist), KM_SLEEP);
6737
6738 if (initiator) {
6739 fp_unregister_login(pd);
6740 fctl_copy_portmap(changelist, pd);
6741 } else {
6742 fp_fillout_old_map(changelist, pd, 0);
6743 }
6744
6745 FP_TRACE(FP_NHEAD1(9, 0),
6746 "fp_adisc_intr: Dev change notification "
6747 "to ULP port=%p, pd=%p, map_type=%x map_state=%x "
6748 "map_flags=%x initiator=%d", port, pd,
6749 changelist->map_type, changelist->map_state,
6750 changelist->map_flags, initiator);
6751
6752 (void) fp_ulp_devc_cb(port, changelist,
6753 1, 1, KM_SLEEP, 0);
6754 }
6755 if (rval == FC_SUCCESS) {
6756 return;
6757 }
6758 }
6759 fp_iodone(cmd);
6760 }
6761
6762
6763 /*
6764 * Handle solicited LOGO response
6765 */
6766 static void
6767 fp_logo_intr(fc_packet_t *pkt)
6768 {
6769 ls_code_t resp;
6770 fc_local_port_t *port = ((fp_cmd_t *)pkt->pkt_ulp_private)->cmd_port;
6771
6772 mutex_enter(&((fp_cmd_t *)pkt->pkt_ulp_private)->cmd_port->fp_mutex);
6773 ((fp_cmd_t *)pkt->pkt_ulp_private)->cmd_port->fp_out_fpcmds--;
6774 mutex_exit(&((fp_cmd_t *)pkt->pkt_ulp_private)->cmd_port->fp_mutex);
6775
6776 FC_GET_RSP(port, pkt->pkt_resp_acc, (uint8_t *)&resp,
6777 (uint8_t *)pkt->pkt_resp, sizeof (resp), DDI_DEV_AUTOINCR);
6778
6779 if (FP_IS_PKT_ERROR(pkt)) {
6780 (void) fp_common_intr(pkt, 1);
6781 return;
6782 }
6783
6784 ASSERT(resp.ls_code == LA_ELS_ACC);
6785 if (resp.ls_code != LA_ELS_ACC) {
6786 (void) fp_common_intr(pkt, 1);
6787 return;
6788 }
6789
6790 if (pkt->pkt_pd != NULL) {
6791 fp_unregister_login(pkt->pkt_pd);
6792 }
6793
6794 fp_iodone(pkt->pkt_ulp_private);
6795 }
6796
6797
6798 /*
6799 * Handle solicited RNID response
6800 */
6801 static void
6802 fp_rnid_intr(fc_packet_t *pkt)
6803 {
6804 ls_code_t resp;
6805 job_request_t *job;
6806 fp_cmd_t *cmd;
6807 la_els_rnid_acc_t *acc;
6808 fc_local_port_t *port = ((fp_cmd_t *)pkt->pkt_ulp_private)->cmd_port;
6809
6810 FC_GET_RSP(port, pkt->pkt_resp_acc, (uint8_t *)&resp,
6811 (uint8_t *)pkt->pkt_resp, sizeof (resp), DDI_DEV_AUTOINCR);
6812 cmd = pkt->pkt_ulp_private;
6813
6814 mutex_enter(&cmd->cmd_port->fp_mutex);
6815 cmd->cmd_port->fp_out_fpcmds--;
6816 mutex_exit(&cmd->cmd_port->fp_mutex);
6817
6818 job = cmd->cmd_job;
6819 ASSERT(job->job_private != NULL);
6820
6821 /* If failure or LS_RJT then retry the packet, if needed */
6822 if (pkt->pkt_state != FC_PKT_SUCCESS || resp.ls_code != LA_ELS_ACC) {
6823 (void) fp_common_intr(pkt, 1);
6824 return;
6825 }
6826
6827 /* Save node_id memory allocated in ioctl code */
6828 acc = (la_els_rnid_acc_t *)pkt->pkt_resp;
6829
6830 FC_GET_RSP(port, pkt->pkt_resp_acc, (uint8_t *)job->job_private,
6831 (uint8_t *)acc, sizeof (la_els_rnid_acc_t), DDI_DEV_AUTOINCR);
6832
6833 /* wakeup the ioctl thread and free the pkt */
6834 fp_iodone(cmd);
6835 }
6836
6837
6838 /*
6839 * Handle solicited RLS response
6840 */
6841 static void
6842 fp_rls_intr(fc_packet_t *pkt)
6843 {
6844 ls_code_t resp;
6845 job_request_t *job;
6846 fp_cmd_t *cmd;
6847 la_els_rls_acc_t *acc;
6848 fc_local_port_t *port = ((fp_cmd_t *)pkt->pkt_ulp_private)->cmd_port;
6849
6850 FC_GET_RSP(port, pkt->pkt_resp_acc, (uint8_t *)&resp,
6851 (uint8_t *)pkt->pkt_resp, sizeof (resp), DDI_DEV_AUTOINCR);
6852 cmd = pkt->pkt_ulp_private;
6853
6854 mutex_enter(&cmd->cmd_port->fp_mutex);
6855 cmd->cmd_port->fp_out_fpcmds--;
6856 mutex_exit(&cmd->cmd_port->fp_mutex);
6857
6858 job = cmd->cmd_job;
6859 ASSERT(job->job_private != NULL);
6860
6861 /* If failure or LS_RJT then retry the packet, if needed */
6862 if (FP_IS_PKT_ERROR(pkt) || resp.ls_code != LA_ELS_ACC) {
6863 (void) fp_common_intr(pkt, 1);
6864 return;
6865 }
6866
6867 /* Save link error status block in memory allocated in ioctl code */
6868 acc = (la_els_rls_acc_t *)pkt->pkt_resp;
6869
6870 FC_GET_RSP(port, pkt->pkt_resp_acc, (uint8_t *)job->job_private,
6871 (uint8_t *)&acc->rls_link_params, sizeof (fc_rls_acc_t),
6872 DDI_DEV_AUTOINCR);
6873
6874 /* wakeup the ioctl thread and free the pkt */
6875 fp_iodone(cmd);
6876 }
6877
6878
6879 /*
6880 * A solicited command completion interrupt (mostly for commands
6881 * that require almost no post processing such as SCR ELS)
6882 */
6883 static void
6884 fp_intr(fc_packet_t *pkt)
6885 {
6886 mutex_enter(&((fp_cmd_t *)pkt->pkt_ulp_private)->cmd_port->fp_mutex);
6887 ((fp_cmd_t *)pkt->pkt_ulp_private)->cmd_port->fp_out_fpcmds--;
6888 mutex_exit(&((fp_cmd_t *)pkt->pkt_ulp_private)->cmd_port->fp_mutex);
6889
6890 if (FP_IS_PKT_ERROR(pkt)) {
6891 (void) fp_common_intr(pkt, 1);
6892 return;
6893 }
6894 fp_iodone(pkt->pkt_ulp_private);
6895 }
6896
6897
6898 /*
6899 * Handle the underlying port's state change
6900 */
6901 static void
6902 fp_statec_cb(opaque_t port_handle, uint32_t state)
6903 {
6904 fc_local_port_t *port = port_handle;
6905 job_request_t *job;
6906
6907 /*
6908 * If it is not possible to process the callbacks
6909 * just drop the callback on the floor; Don't bother
6910 * to do something that isn't safe at this time
6911 */
6912 mutex_enter(&port->fp_mutex);
6913 if ((port->fp_soft_state &
6914 (FP_SOFT_IN_DETACH | FP_SOFT_SUSPEND | FP_SOFT_POWER_DOWN)) ||
6915 (FC_PORT_STATE_MASK(port->fp_state) == FC_PORT_STATE_MASK(state))) {
6916 mutex_exit(&port->fp_mutex);
6917 return;
6918 }
6919
6920 if (port->fp_statec_busy == 0) {
6921 port->fp_soft_state |= FP_SOFT_IN_STATEC_CB;
6922 #ifdef DEBUG
6923 } else {
6924 ASSERT(port->fp_soft_state & FP_SOFT_IN_STATEC_CB);
6925 #endif
6926 }
6927
6928 port->fp_statec_busy++;
6929
6930 /*
6931 * For now, force the trusted method of device authentication (by
6932 * PLOGI) when LIPs do not involve OFFLINE to ONLINE transition.
6933 */
6934 if (FC_PORT_STATE_MASK(state) == FC_STATE_LIP ||
6935 FC_PORT_STATE_MASK(state) == FC_STATE_LIP_LBIT_SET) {
6936 state = FC_PORT_SPEED_MASK(port->fp_state) | FC_STATE_LOOP;
6937 fp_port_offline(port, 0);
6938 }
6939 mutex_exit(&port->fp_mutex);
6940
6941 switch (FC_PORT_STATE_MASK(state)) {
6942 case FC_STATE_OFFLINE:
6943 job = fctl_alloc_job(JOB_PORT_OFFLINE,
6944 JOB_TYPE_FCTL_ASYNC, NULL, NULL, KM_NOSLEEP);
6945 if (job == NULL) {
6946 fp_printf(port, CE_NOTE, FP_LOG_ONLY, 0, NULL,
6947 " fp_statec_cb() couldn't submit a job "
6948 " to the thread: failing..");
6949 mutex_enter(&port->fp_mutex);
6950 if (--port->fp_statec_busy == 0) {
6951 port->fp_soft_state &= ~FP_SOFT_IN_STATEC_CB;
6952 }
6953 mutex_exit(&port->fp_mutex);
6954 return;
6955 }
6956 mutex_enter(&port->fp_mutex);
6957 /*
6958 * Zero out this field so that we do not retain
6959 * the fabric name as its no longer valid
6960 */
6961 bzero(&port->fp_fabric_name, sizeof (la_wwn_t));
6962 port->fp_state = state;
6963 mutex_exit(&port->fp_mutex);
6964
6965 fctl_enque_job(port, job);
6966 break;
6967
6968 case FC_STATE_ONLINE:
6969 case FC_STATE_LOOP:
6970 mutex_enter(&port->fp_mutex);
6971 port->fp_state = state;
6972
6973 if (port->fp_offline_tid) {
6974 timeout_id_t tid;
6975
6976 tid = port->fp_offline_tid;
6977 port->fp_offline_tid = NULL;
6978 mutex_exit(&port->fp_mutex);
6979 (void) untimeout(tid);
6980 } else {
6981 mutex_exit(&port->fp_mutex);
6982 }
6983
6984 job = fctl_alloc_job(JOB_PORT_ONLINE,
6985 JOB_TYPE_FCTL_ASYNC, NULL, NULL, KM_NOSLEEP);
6986 if (job == NULL) {
6987 fp_printf(port, CE_NOTE, FP_LOG_ONLY, 0, NULL,
6988 "fp_statec_cb() couldn't submit a job "
6989 "to the thread: failing..");
6990
6991 mutex_enter(&port->fp_mutex);
6992 if (--port->fp_statec_busy == 0) {
6993 port->fp_soft_state &= ~FP_SOFT_IN_STATEC_CB;
6994 }
6995 mutex_exit(&port->fp_mutex);
6996 return;
6997 }
6998 fctl_enque_job(port, job);
6999 break;
7000
7001 case FC_STATE_RESET_REQUESTED:
7002 mutex_enter(&port->fp_mutex);
7003 port->fp_state = FC_STATE_OFFLINE;
7004 port->fp_soft_state |= FP_SOFT_IN_FCA_RESET;
7005 mutex_exit(&port->fp_mutex);
7006 /* FALLTHROUGH */
7007
7008 case FC_STATE_RESET:
7009 job = fctl_alloc_job(JOB_ULP_NOTIFY,
7010 JOB_TYPE_FCTL_ASYNC, NULL, NULL, KM_NOSLEEP);
7011 if (job == NULL) {
7012 fp_printf(port, CE_NOTE, FP_LOG_ONLY, 0, NULL,
7013 "fp_statec_cb() couldn't submit a job"
7014 " to the thread: failing..");
7015
7016 mutex_enter(&port->fp_mutex);
7017 if (--port->fp_statec_busy == 0) {
7018 port->fp_soft_state &= ~FP_SOFT_IN_STATEC_CB;
7019 }
7020 mutex_exit(&port->fp_mutex);
7021 return;
7022 }
7023
7024 /* squeeze into some field in the job structure */
7025 job->job_ulp_listlen = FC_PORT_STATE_MASK(state);
7026 fctl_enque_job(port, job);
7027 break;
7028
7029 case FC_STATE_TARGET_PORT_RESET:
7030 (void) fp_ulp_notify(port, state, KM_NOSLEEP);
7031 /* FALLTHROUGH */
7032
7033 case FC_STATE_NAMESERVICE:
7034 /* FALLTHROUGH */
7035
7036 default:
7037 mutex_enter(&port->fp_mutex);
7038 if (--port->fp_statec_busy == 0) {
7039 port->fp_soft_state &= ~FP_SOFT_IN_STATEC_CB;
7040 }
7041 mutex_exit(&port->fp_mutex);
7042 break;
7043 }
7044 }
7045
7046
7047 /*
7048 * Register with the Name Server for RSCNs
7049 */
7050 static int
7051 fp_ns_scr(fc_local_port_t *port, job_request_t *job, uchar_t scr_func,
7052 int sleep)
7053 {
7054 uint32_t s_id;
7055 uchar_t class;
7056 fc_scr_req_t payload;
7057 fp_cmd_t *cmd;
7058 fc_packet_t *pkt;
7059
7060 mutex_enter(&port->fp_mutex);
7061 s_id = port->fp_port_id.port_id;
7062 class = port->fp_ns_login_class;
7063 mutex_exit(&port->fp_mutex);
7064
7065 cmd = fp_alloc_pkt(port, sizeof (fc_scr_req_t),
7066 sizeof (fc_scr_resp_t), sleep, NULL);
7067 if (cmd == NULL) {
7068 return (FC_NOMEM);
7069 }
7070
7071 cmd->cmd_pkt.pkt_tran_flags = FC_TRAN_INTR | class;
7072 cmd->cmd_pkt.pkt_tran_type = FC_PKT_EXCHANGE;
7073 cmd->cmd_flags = FP_CMD_CFLAG_UNDEFINED;
7074 cmd->cmd_retry_count = fp_retry_count;
7075 cmd->cmd_ulp_pkt = NULL;
7076
7077 pkt = &cmd->cmd_pkt;
7078 cmd->cmd_transport = port->fp_fca_tran->fca_els_send;
7079
7080 fp_els_init(cmd, s_id, 0xFFFFFD, fp_intr, job);
7081
7082 payload.ls_code.ls_code = LA_ELS_SCR;
7083 payload.ls_code.mbz = 0;
7084 payload.scr_rsvd = 0;
7085 payload.scr_func = scr_func;
7086
7087 FC_SET_CMD(port, pkt->pkt_cmd_acc, (uint8_t *)&payload,
7088 (uint8_t *)pkt->pkt_cmd, sizeof (payload), DDI_DEV_AUTOINCR);
7089
7090 job->job_counter = 1;
7091
7092 if (fp_sendcmd(port, cmd, port->fp_fca_handle) != FC_SUCCESS) {
7093 fp_iodone(cmd);
7094 }
7095
7096 return (FC_SUCCESS);
7097 }
7098
7099
7100 /*
7101 * There are basically two methods to determine the total number of
7102 * devices out in the NS database; Reading the details of the two
7103 * methods described below, it shouldn't be hard to identify which
7104 * of the two methods is better.
7105 *
7106 * Method 1.
7107 * Iteratively issue GANs until all ports identifiers are walked
7108 *
7109 * Method 2.
7110 * Issue GID_PT (get port Identifiers) with Maximum residual
7111 * field in the request CT HEADER set to accommodate only the
7112 * CT HEADER in the response frame. And if FC-GS2 has been
7113 * carefully read, the NS here has a chance to FS_ACC the
7114 * request and indicate the residual size in the FS_ACC.
7115 *
7116 * Method 2 is wonderful, although it's not mandatory for the NS
7117 * to update the Maximum/Residual Field as can be seen in 4.3.1.6
7118 * (note with particular care the use of the auxiliary verb 'may')
7119 *
7120 */
7121 static int
7122 fp_ns_get_devcount(fc_local_port_t *port, job_request_t *job, int create,
7123 int sleep)
7124 {
7125 int flags;
7126 int rval;
7127 uint32_t src_id;
7128 fctl_ns_req_t *ns_cmd;
7129
7130 ASSERT(!MUTEX_HELD(&port->fp_mutex));
7131
7132 mutex_enter(&port->fp_mutex);
7133 src_id = port->fp_port_id.port_id;
7134 mutex_exit(&port->fp_mutex);
7135
7136 if (!create && (port->fp_options & FP_NS_SMART_COUNT)) {
7137 ns_cmd = fctl_alloc_ns_cmd(sizeof (ns_req_gid_pt_t),
7138 sizeof (ns_resp_gid_pt_t), 0,
7139 (FCTL_NS_GET_DEV_COUNT | FCTL_NS_NO_DATA_BUF), sleep);
7140
7141 if (ns_cmd == NULL) {
7142 return (FC_NOMEM);
7143 }
7144
7145 ns_cmd->ns_cmd_code = NS_GID_PT;
7146 ((ns_req_gid_pt_t *)(ns_cmd->ns_cmd_buf))->port_type.port_type
7147 = FC_NS_PORT_NX; /* All port types */
7148 ((ns_req_gid_pt_t *)(ns_cmd->ns_cmd_buf))->port_type.rsvd = 0;
7149
7150 } else {
7151 uint32_t ns_flags;
7152
7153 ns_flags = FCTL_NS_GET_DEV_COUNT | FCTL_NS_NO_DATA_BUF;
7154 if (create) {
7155 ns_flags |= FCTL_NS_CREATE_DEVICE;
7156 }
7157 ns_cmd = fctl_alloc_ns_cmd(sizeof (ns_req_gan_t),
7158 sizeof (ns_resp_gan_t), sizeof (int), ns_flags, sleep);
7159
7160 if (ns_cmd == NULL) {
7161 return (FC_NOMEM);
7162 }
7163 ns_cmd->ns_gan_index = 0;
7164 ns_cmd->ns_gan_sid = FCTL_GAN_START_ID;
7165 ns_cmd->ns_cmd_code = NS_GA_NXT;
7166 ns_cmd->ns_gan_max = 0xFFFF;
7167
7168 ((ns_req_gan_t *)(ns_cmd->ns_cmd_buf))->pid.port_id = src_id;
7169 ((ns_req_gan_t *)(ns_cmd->ns_cmd_buf))->pid.priv_lilp_posit = 0;
7170 }
7171
7172 flags = job->job_flags;
7173 job->job_flags &= ~JOB_TYPE_FP_ASYNC;
7174 job->job_counter = 1;
7175
7176 rval = fp_ns_query(port, ns_cmd, job, 1, sleep);
7177 job->job_flags = flags;
7178
7179 if (!create && (port->fp_options & FP_NS_SMART_COUNT)) {
7180 uint16_t max_resid;
7181
7182 /*
7183 * Revert to scanning the NS if NS_GID_PT isn't
7184 * helping us figure out total number of devices.
7185 */
7186 if (job->job_result != FC_SUCCESS ||
7187 ns_cmd->ns_resp_hdr.ct_cmdrsp != FS_ACC_IU) {
7188 mutex_enter(&port->fp_mutex);
7189 port->fp_options &= ~FP_NS_SMART_COUNT;
7190 mutex_exit(&port->fp_mutex);
7191
7192 fctl_free_ns_cmd(ns_cmd);
7193 return (fp_ns_get_devcount(port, job, create, sleep));
7194 }
7195
7196 mutex_enter(&port->fp_mutex);
7197 port->fp_total_devices = 1;
7198 max_resid = ns_cmd->ns_resp_hdr.ct_aiusize;
7199 if (max_resid) {
7200 /*
7201 * Since port identifier is 4 bytes and max_resid
7202 * is also in WORDS, max_resid simply indicates
7203 * the total number of port identifiers not
7204 * transferred
7205 */
7206 port->fp_total_devices += max_resid;
7207 }
7208 mutex_exit(&port->fp_mutex);
7209 }
7210 mutex_enter(&port->fp_mutex);
7211 port->fp_total_devices = *((int *)ns_cmd->ns_data_buf);
7212 mutex_exit(&port->fp_mutex);
7213 fctl_free_ns_cmd(ns_cmd);
7214
7215 return (rval);
7216 }
7217
7218 /*
7219 * One heck of a function to serve userland.
7220 */
7221 static int
7222 fp_fciocmd(fc_local_port_t *port, intptr_t data, int mode, fcio_t *fcio)
7223 {
7224 int rval = 0;
7225 int jcode;
7226 uint32_t ret;
7227 uchar_t open_flag;
7228 fcio_t *kfcio;
7229 job_request_t *job;
7230 boolean_t use32 = B_FALSE;
7231
7232 #ifdef _MULTI_DATAMODEL
7233 switch (ddi_model_convert_from(mode & FMODELS)) {
7234 case DDI_MODEL_ILP32:
7235 use32 = B_TRUE;
7236 break;
7237
7238 case DDI_MODEL_NONE:
7239 default:
7240 break;
7241 }
7242 #endif
7243
7244 mutex_enter(&port->fp_mutex);
7245 if (port->fp_soft_state & (FP_SOFT_IN_STATEC_CB |
7246 FP_SOFT_IN_UNSOL_CB)) {
7247 fcio->fcio_errno = FC_STATEC_BUSY;
7248 mutex_exit(&port->fp_mutex);
7249 rval = EAGAIN;
7250 if (fp_fcio_copyout(fcio, data, mode)) {
7251 rval = EFAULT;
7252 }
7253 return (rval);
7254 }
7255 open_flag = port->fp_flag;
7256 mutex_exit(&port->fp_mutex);
7257
7258 if (fp_check_perms(open_flag, fcio->fcio_cmd) != FC_SUCCESS) {
7259 fcio->fcio_errno = FC_FAILURE;
7260 rval = EACCES;
7261 if (fp_fcio_copyout(fcio, data, mode)) {
7262 rval = EFAULT;
7263 }
7264 return (rval);
7265 }
7266
7267 /*
7268 * If an exclusive open was demanded during open, don't let
7269 * either innocuous or devil threads to share the file
7270 * descriptor and fire down exclusive access commands
7271 */
7272 mutex_enter(&port->fp_mutex);
7273 if (port->fp_flag & FP_EXCL) {
7274 if (port->fp_flag & FP_EXCL_BUSY) {
7275 mutex_exit(&port->fp_mutex);
7276 fcio->fcio_errno = FC_FAILURE;
7277 return (EBUSY);
7278 }
7279 port->fp_flag |= FP_EXCL_BUSY;
7280 }
7281 mutex_exit(&port->fp_mutex);
7282
7283 fcio->fcio_errno = FC_SUCCESS;
7284
7285 switch (fcio->fcio_cmd) {
7286 case FCIO_GET_HOST_PARAMS: {
7287 fc_port_dev_t *val;
7288 fc_port_dev32_t *val32;
7289 int index;
7290 int lilp_device_count;
7291 fc_lilpmap_t *lilp_map;
7292 uchar_t *alpa_list;
7293
7294 if (use32 == B_TRUE) {
7295 if (fcio->fcio_olen != sizeof (*val32) ||
7296 fcio->fcio_xfer != FCIO_XFER_READ) {
7297 rval = EINVAL;
7298 break;
7299 }
7300 } else {
7301 if (fcio->fcio_olen != sizeof (*val) ||
7302 fcio->fcio_xfer != FCIO_XFER_READ) {
7303 rval = EINVAL;
7304 break;
7305 }
7306 }
7307
7308 val = kmem_zalloc(sizeof (*val), KM_SLEEP);
7309
7310 mutex_enter(&port->fp_mutex);
7311 val->dev_did = port->fp_port_id;
7312 val->dev_hard_addr = port->fp_hard_addr;
7313 val->dev_pwwn = port->fp_service_params.nport_ww_name;
7314 val->dev_nwwn = port->fp_service_params.node_ww_name;
7315 val->dev_state = port->fp_state;
7316
7317 lilp_map = &port->fp_lilp_map;
7318 alpa_list = &lilp_map->lilp_alpalist[0];
7319 lilp_device_count = lilp_map->lilp_length;
7320 for (index = 0; index < lilp_device_count; index++) {
7321 uint32_t d_id;
7322
7323 d_id = alpa_list[index];
7324 if (d_id == port->fp_port_id.port_id) {
7325 break;
7326 }
7327 }
7328 val->dev_did.priv_lilp_posit = (uint8_t)(index & 0xff);
7329
7330 bcopy(port->fp_fc4_types, val->dev_type,
7331 sizeof (port->fp_fc4_types));
7332 mutex_exit(&port->fp_mutex);
7333
7334 if (use32 == B_TRUE) {
7335 val32 = kmem_zalloc(sizeof (*val32), KM_SLEEP);
7336
7337 val32->dev_did = val->dev_did;
7338 val32->dev_hard_addr = val->dev_hard_addr;
7339 val32->dev_pwwn = val->dev_pwwn;
7340 val32->dev_nwwn = val->dev_nwwn;
7341 val32->dev_state = val->dev_state;
7342 val32->dev_did.priv_lilp_posit =
7343 val->dev_did.priv_lilp_posit;
7344
7345 bcopy(val->dev_type, val32->dev_type,
7346 sizeof (port->fp_fc4_types));
7347
7348 if (fp_copyout((void *)val32, (void *)fcio->fcio_obuf,
7349 fcio->fcio_olen, mode) == 0) {
7350 if (fp_fcio_copyout(fcio, data, mode)) {
7351 rval = EFAULT;
7352 }
7353 } else {
7354 rval = EFAULT;
7355 }
7356
7357 kmem_free(val32, sizeof (*val32));
7358 } else {
7359 if (fp_copyout((void *)val, (void *)fcio->fcio_obuf,
7360 fcio->fcio_olen, mode) == 0) {
7361 if (fp_fcio_copyout(fcio, data, mode)) {
7362 rval = EFAULT;
7363 }
7364 } else {
7365 rval = EFAULT;
7366 }
7367 }
7368
7369 /* need to free "val" here */
7370 kmem_free(val, sizeof (*val));
7371 break;
7372 }
7373
7374 case FCIO_GET_OTHER_ADAPTER_PORTS: {
7375 uint32_t index;
7376 char *tmpPath;
7377 fc_local_port_t *tmpPort;
7378
7379 if (fcio->fcio_olen < MAXPATHLEN ||
7380 fcio->fcio_ilen != sizeof (uint32_t)) {
7381 rval = EINVAL;
7382 break;
7383 }
7384 if (ddi_copyin(fcio->fcio_ibuf, &index, sizeof (index), mode)) {
7385 rval = EFAULT;
7386 break;
7387 }
7388
7389 tmpPort = fctl_get_adapter_port_by_index(port, index);
7390 if (tmpPort == NULL) {
7391 FP_TRACE(FP_NHEAD1(9, 0),
7392 "User supplied index out of range");
7393 fcio->fcio_errno = FC_BADPORT;
7394 rval = EFAULT;
7395 if (fp_fcio_copyout(fcio, data, mode)) {
7396 rval = EFAULT;
7397 }
7398 break;
7399 }
7400
7401 tmpPath = kmem_zalloc(MAXPATHLEN, KM_SLEEP);
7402 (void) ddi_pathname(tmpPort->fp_port_dip, tmpPath);
7403 if (fp_copyout((void *)tmpPath, (void *)fcio->fcio_obuf,
7404 MAXPATHLEN, mode) == 0) {
7405 if (fp_fcio_copyout(fcio, data, mode)) {
7406 rval = EFAULT;
7407 }
7408 } else {
7409 rval = EFAULT;
7410 }
7411 kmem_free(tmpPath, MAXPATHLEN);
7412 break;
7413 }
7414
7415 case FCIO_NPIV_GET_ADAPTER_ATTRIBUTES:
7416 case FCIO_GET_ADAPTER_ATTRIBUTES: {
7417 fc_hba_adapter_attributes_t *val;
7418 fc_hba_adapter_attributes32_t *val32;
7419
7420 if (use32 == B_TRUE) {
7421 if (fcio->fcio_olen < sizeof (*val32) ||
7422 fcio->fcio_xfer != FCIO_XFER_READ) {
7423 rval = EINVAL;
7424 break;
7425 }
7426 } else {
7427 if (fcio->fcio_olen < sizeof (*val) ||
7428 fcio->fcio_xfer != FCIO_XFER_READ) {
7429 rval = EINVAL;
7430 break;
7431 }
7432 }
7433
7434 val = kmem_zalloc(sizeof (*val), KM_SLEEP);
7435 val->version = FC_HBA_ADAPTER_ATTRIBUTES_VERSION;
7436 mutex_enter(&port->fp_mutex);
7437 bcopy(port->fp_hba_port_attrs.manufacturer,
7438 val->Manufacturer,
7439 sizeof (val->Manufacturer));
7440 bcopy(port->fp_hba_port_attrs.serial_number,
7441 val->SerialNumber,
7442 sizeof (val->SerialNumber));
7443 bcopy(port->fp_hba_port_attrs.model,
7444 val->Model,
7445 sizeof (val->Model));
7446 bcopy(port->fp_hba_port_attrs.model_description,
7447 val->ModelDescription,
7448 sizeof (val->ModelDescription));
7449 bcopy(port->fp_sym_node_name, val->NodeSymbolicName,
7450 port->fp_sym_node_namelen);
7451 bcopy(port->fp_hba_port_attrs.hardware_version,
7452 val->HardwareVersion,
7453 sizeof (val->HardwareVersion));
7454 bcopy(port->fp_hba_port_attrs.option_rom_version,
7455 val->OptionROMVersion,
7456 sizeof (val->OptionROMVersion));
7457 bcopy(port->fp_hba_port_attrs.firmware_version,
7458 val->FirmwareVersion,
7459 sizeof (val->FirmwareVersion));
7460 val->VendorSpecificID =
7461 port->fp_hba_port_attrs.vendor_specific_id;
7462 bcopy(&port->fp_service_params.node_ww_name.raw_wwn,
7463 &val->NodeWWN.raw_wwn,
7464 sizeof (val->NodeWWN.raw_wwn));
7465
7466
7467 bcopy(port->fp_hba_port_attrs.driver_name,
7468 val->DriverName,
7469 sizeof (val->DriverName));
7470 bcopy(port->fp_hba_port_attrs.driver_version,
7471 val->DriverVersion,
7472 sizeof (val->DriverVersion));
7473 mutex_exit(&port->fp_mutex);
7474
7475 if (fcio->fcio_cmd == FCIO_GET_ADAPTER_ATTRIBUTES) {
7476 val->NumberOfPorts = fctl_count_fru_ports(port, 0);
7477 } else {
7478 val->NumberOfPorts = fctl_count_fru_ports(port, 1);
7479 }
7480
7481 if (use32 == B_TRUE) {
7482 val32 = kmem_zalloc(sizeof (*val32), KM_SLEEP);
7483 val32->version = val->version;
7484 bcopy(val->Manufacturer, val32->Manufacturer,
7485 sizeof (val->Manufacturer));
7486 bcopy(val->SerialNumber, val32->SerialNumber,
7487 sizeof (val->SerialNumber));
7488 bcopy(val->Model, val32->Model,
7489 sizeof (val->Model));
7490 bcopy(val->ModelDescription, val32->ModelDescription,
7491 sizeof (val->ModelDescription));
7492 bcopy(val->NodeSymbolicName, val32->NodeSymbolicName,
7493 sizeof (val->NodeSymbolicName));
7494 bcopy(val->HardwareVersion, val32->HardwareVersion,
7495 sizeof (val->HardwareVersion));
7496 bcopy(val->OptionROMVersion, val32->OptionROMVersion,
7497 sizeof (val->OptionROMVersion));
7498 bcopy(val->FirmwareVersion, val32->FirmwareVersion,
7499 sizeof (val->FirmwareVersion));
7500 val32->VendorSpecificID = val->VendorSpecificID;
7501 bcopy(&val->NodeWWN.raw_wwn, &val32->NodeWWN.raw_wwn,
7502 sizeof (val->NodeWWN.raw_wwn));
7503 bcopy(val->DriverName, val32->DriverName,
7504 sizeof (val->DriverName));
7505 bcopy(val->DriverVersion, val32->DriverVersion,
7506 sizeof (val->DriverVersion));
7507
7508 val32->NumberOfPorts = val->NumberOfPorts;
7509
7510 if (fp_copyout((void *)val32, (void *)fcio->fcio_obuf,
7511 fcio->fcio_olen, mode) == 0) {
7512 if (fp_fcio_copyout(fcio, data, mode)) {
7513 rval = EFAULT;
7514 }
7515 } else {
7516 rval = EFAULT;
7517 }
7518
7519 kmem_free(val32, sizeof (*val32));
7520 } else {
7521 if (fp_copyout((void *)val, (void *)fcio->fcio_obuf,
7522 fcio->fcio_olen, mode) == 0) {
7523 if (fp_fcio_copyout(fcio, data, mode)) {
7524 rval = EFAULT;
7525 }
7526 } else {
7527 rval = EFAULT;
7528 }
7529 }
7530
7531 kmem_free(val, sizeof (*val));
7532 break;
7533 }
7534
7535 case FCIO_GET_NPIV_ATTRIBUTES: {
7536 fc_hba_npiv_attributes_t *attrs;
7537
7538 attrs = kmem_zalloc(sizeof (*attrs), KM_SLEEP);
7539 mutex_enter(&port->fp_mutex);
7540 bcopy(&port->fp_service_params.node_ww_name.raw_wwn,
7541 &attrs->NodeWWN.raw_wwn,
7542 sizeof (attrs->NodeWWN.raw_wwn));
7543 bcopy(&port->fp_service_params.nport_ww_name.raw_wwn,
7544 &attrs->PortWWN.raw_wwn,
7545 sizeof (attrs->PortWWN.raw_wwn));
7546 mutex_exit(&port->fp_mutex);
7547 if (fp_copyout((void *)attrs, (void *)fcio->fcio_obuf,
7548 fcio->fcio_olen, mode) == 0) {
7549 if (fp_fcio_copyout(fcio, data, mode)) {
7550 rval = EFAULT;
7551 }
7552 } else {
7553 rval = EFAULT;
7554 }
7555 kmem_free(attrs, sizeof (*attrs));
7556 break;
7557 }
7558
7559 case FCIO_DELETE_NPIV_PORT: {
7560 fc_local_port_t *tmpport;
7561 char ww_pname[17];
7562 la_wwn_t vwwn[1];
7563
7564 FP_TRACE(FP_NHEAD1(1, 0), "Delete NPIV Port");
7565 if (ddi_copyin(fcio->fcio_ibuf,
7566 &vwwn, sizeof (la_wwn_t), mode)) {
7567 rval = EFAULT;
7568 break;
7569 }
7570
7571 fc_wwn_to_str(&vwwn[0], ww_pname);
7572 FP_TRACE(FP_NHEAD1(3, 0),
7573 "Delete NPIV Port %s", ww_pname);
7574 tmpport = fc_delete_npiv_port(port, &vwwn[0]);
7575 if (tmpport == NULL) {
7576 FP_TRACE(FP_NHEAD1(3, 0),
7577 "Delete NPIV Port : no found");
7578 rval = EFAULT;
7579 } else {
7580 fc_local_port_t *nextport = tmpport->fp_port_next;
7581 fc_local_port_t *prevport = tmpport->fp_port_prev;
7582 int portlen, portindex, ret;
7583
7584 portlen = sizeof (portindex);
7585 ret = ddi_prop_op(DDI_DEV_T_ANY,
7586 tmpport->fp_port_dip, PROP_LEN_AND_VAL_BUF,
7587 DDI_PROP_DONTPASS | DDI_PROP_CANSLEEP, "port",
7588 (caddr_t)&portindex, &portlen);
7589 if (ret != DDI_SUCCESS) {
7590 rval = EFAULT;
7591 break;
7592 }
7593 if (ndi_devi_offline(tmpport->fp_port_dip,
7594 NDI_DEVI_REMOVE) != DDI_SUCCESS) {
7595 FP_TRACE(FP_NHEAD1(1, 0),
7596 "Delete NPIV Port failed");
7597 mutex_enter(&port->fp_mutex);
7598 tmpport->fp_npiv_state = 0;
7599 mutex_exit(&port->fp_mutex);
7600 rval = EFAULT;
7601 } else {
7602 mutex_enter(&port->fp_mutex);
7603 nextport->fp_port_prev = prevport;
7604 prevport->fp_port_next = nextport;
7605 if (port == port->fp_port_next) {
7606 port->fp_port_next =
7607 port->fp_port_prev = NULL;
7608 }
7609 port->fp_npiv_portnum--;
7610 FP_TRACE(FP_NHEAD1(3, 0),
7611 "Delete NPIV Port %d", portindex);
7612 port->fp_npiv_portindex[portindex-1] = 0;
7613 mutex_exit(&port->fp_mutex);
7614 }
7615 }
7616 break;
7617 }
7618
7619 case FCIO_CREATE_NPIV_PORT: {
7620 char ww_nname[17], ww_pname[17];
7621 la_npiv_create_entry_t entrybuf;
7622 uint32_t vportindex = 0;
7623 int npiv_ret = 0;
7624 char *portname, *fcaname;
7625
7626 portname = kmem_zalloc(MAXPATHLEN, KM_SLEEP);
7627 (void) ddi_pathname(port->fp_port_dip, portname);
7628 fcaname = kmem_zalloc(MAXPATHLEN, KM_SLEEP);
7629 (void) ddi_pathname(port->fp_fca_dip, fcaname);
7630 FP_TRACE(FP_NHEAD1(1, 0),
7631 "Create NPIV port %s %s %s", portname, fcaname,
7632 ddi_driver_name(port->fp_fca_dip));
7633 kmem_free(portname, MAXPATHLEN);
7634 kmem_free(fcaname, MAXPATHLEN);
7635 if (ddi_copyin(fcio->fcio_ibuf,
7636 &entrybuf, sizeof (la_npiv_create_entry_t), mode)) {
7637 rval = EFAULT;
7638 break;
7639 }
7640
7641 fc_wwn_to_str(&entrybuf.VNodeWWN, ww_nname);
7642 fc_wwn_to_str(&entrybuf.VPortWWN, ww_pname);
7643 vportindex = entrybuf.vindex;
7644 FP_TRACE(FP_NHEAD1(3, 0),
7645 "Create NPIV Port %s %s %d",
7646 ww_nname, ww_pname, vportindex);
7647
7648 if (fc_get_npiv_port(port, &entrybuf.VPortWWN)) {
7649 rval = EFAULT;
7650 break;
7651 }
7652 npiv_ret = fctl_fca_create_npivport(port->fp_fca_dip,
7653 port->fp_port_dip, ww_nname, ww_pname, &vportindex);
7654 if (npiv_ret == NDI_SUCCESS) {
7655 mutex_enter(&port->fp_mutex);
7656 port->fp_npiv_portnum++;
7657 mutex_exit(&port->fp_mutex);
7658 if (fp_copyout((void *)&vportindex,
7659 (void *)fcio->fcio_obuf,
7660 fcio->fcio_olen, mode) == 0) {
7661 if (fp_fcio_copyout(fcio, data, mode)) {
7662 rval = EFAULT;
7663 }
7664 } else {
7665 rval = EFAULT;
7666 }
7667 } else {
7668 rval = EFAULT;
7669 }
7670 FP_TRACE(FP_NHEAD1(3, 0),
7671 "Create NPIV Port %d %d", npiv_ret, vportindex);
7672 break;
7673 }
7674
7675 case FCIO_GET_NPIV_PORT_LIST: {
7676 fc_hba_npiv_port_list_t *list;
7677 int count;
7678
7679 if ((fcio->fcio_xfer != FCIO_XFER_READ) ||
7680 (fcio->fcio_olen == 0) || (fcio->fcio_obuf == 0)) {
7681 rval = EINVAL;
7682 break;
7683 }
7684
7685 list = kmem_zalloc(fcio->fcio_olen, KM_SLEEP);
7686 list->version = FC_HBA_LIST_VERSION;
7687
7688 count = (fcio->fcio_olen -
7689 (int)sizeof (fc_hba_npiv_port_list_t))/MAXPATHLEN + 1;
7690 if (port->fp_npiv_portnum > count) {
7691 list->numAdapters = port->fp_npiv_portnum;
7692 } else {
7693 /* build npiv port list */
7694 count = fc_ulp_get_npiv_port_list(port,
7695 (char *)list->hbaPaths);
7696 if (count < 0) {
7697 rval = ENXIO;
7698 FP_TRACE(FP_NHEAD1(1, 0),
7699 "Build NPIV Port List error");
7700 kmem_free(list, fcio->fcio_olen);
7701 break;
7702 }
7703 list->numAdapters = count;
7704 }
7705
7706 if (fp_copyout((void *)list, (void *)fcio->fcio_obuf,
7707 fcio->fcio_olen, mode) == 0) {
7708 if (fp_fcio_copyout(fcio, data, mode)) {
7709 FP_TRACE(FP_NHEAD1(1, 0),
7710 "Copy NPIV Port data error");
7711 rval = EFAULT;
7712 }
7713 } else {
7714 FP_TRACE(FP_NHEAD1(1, 0), "Copy NPIV Port List error");
7715 rval = EFAULT;
7716 }
7717 kmem_free(list, fcio->fcio_olen);
7718 break;
7719 }
7720
7721 case FCIO_GET_ADAPTER_PORT_NPIV_ATTRIBUTES: {
7722 fc_hba_port_npiv_attributes_t *val;
7723
7724 val = kmem_zalloc(sizeof (*val), KM_SLEEP);
7725 val->version = FC_HBA_PORT_NPIV_ATTRIBUTES_VERSION;
7726
7727 mutex_enter(&port->fp_mutex);
7728 val->npivflag = port->fp_npiv_flag;
7729 val->lastChange = port->fp_last_change;
7730 bcopy(&port->fp_service_params.nport_ww_name.raw_wwn,
7731 &val->PortWWN.raw_wwn,
7732 sizeof (val->PortWWN.raw_wwn));
7733 bcopy(&port->fp_service_params.node_ww_name.raw_wwn,
7734 &val->NodeWWN.raw_wwn,
7735 sizeof (val->NodeWWN.raw_wwn));
7736 mutex_exit(&port->fp_mutex);
7737
7738 val->NumberOfNPIVPorts = fc_ulp_get_npiv_port_num(port);
7739 if (port->fp_npiv_type != FC_NPIV_PORT) {
7740 val->MaxNumberOfNPIVPorts =
7741 port->fp_fca_tran->fca_num_npivports;
7742 } else {
7743 val->MaxNumberOfNPIVPorts = 0;
7744 }
7745
7746 if (fp_copyout((void *)val, (void *)fcio->fcio_obuf,
7747 fcio->fcio_olen, mode) == 0) {
7748 if (fp_fcio_copyout(fcio, data, mode)) {
7749 rval = EFAULT;
7750 }
7751 } else {
7752 rval = EFAULT;
7753 }
7754 kmem_free(val, sizeof (*val));
7755 break;
7756 }
7757
7758 case FCIO_GET_ADAPTER_PORT_ATTRIBUTES: {
7759 fc_hba_port_attributes_t *val;
7760 fc_hba_port_attributes32_t *val32;
7761
7762 if (use32 == B_TRUE) {
7763 if (fcio->fcio_olen < sizeof (*val32) ||
7764 fcio->fcio_xfer != FCIO_XFER_READ) {
7765 rval = EINVAL;
7766 break;
7767 }
7768 } else {
7769 if (fcio->fcio_olen < sizeof (*val) ||
7770 fcio->fcio_xfer != FCIO_XFER_READ) {
7771 rval = EINVAL;
7772 break;
7773 }
7774 }
7775
7776 val = kmem_zalloc(sizeof (*val), KM_SLEEP);
7777 val->version = FC_HBA_PORT_ATTRIBUTES_VERSION;
7778 mutex_enter(&port->fp_mutex);
7779 val->lastChange = port->fp_last_change;
7780 val->fp_minor = port->fp_instance;
7781
7782 bcopy(&port->fp_service_params.nport_ww_name.raw_wwn,
7783 &val->PortWWN.raw_wwn,
7784 sizeof (val->PortWWN.raw_wwn));
7785 bcopy(&port->fp_service_params.node_ww_name.raw_wwn,
7786 &val->NodeWWN.raw_wwn,
7787 sizeof (val->NodeWWN.raw_wwn));
7788 bcopy(&port->fp_fabric_name, &val->FabricName.raw_wwn,
7789 sizeof (val->FabricName.raw_wwn));
7790
7791 val->PortFcId = port->fp_port_id.port_id;
7792
7793 switch (FC_PORT_STATE_MASK(port->fp_state)) {
7794 case FC_STATE_OFFLINE:
7795 val->PortState = FC_HBA_PORTSTATE_OFFLINE;
7796 break;
7797 case FC_STATE_ONLINE:
7798 case FC_STATE_LOOP:
7799 case FC_STATE_NAMESERVICE:
7800 val->PortState = FC_HBA_PORTSTATE_ONLINE;
7801 break;
7802 default:
7803 val->PortState = FC_HBA_PORTSTATE_UNKNOWN;
7804 break;
7805 }
7806
7807 /* Translate from LV to FC-HBA port type codes */
7808 switch (port->fp_port_type.port_type) {
7809 case FC_NS_PORT_N:
7810 val->PortType = FC_HBA_PORTTYPE_NPORT;
7811 break;
7812 case FC_NS_PORT_NL:
7813 /* Actually means loop for us */
7814 val->PortType = FC_HBA_PORTTYPE_LPORT;
7815 break;
7816 case FC_NS_PORT_F:
7817 val->PortType = FC_HBA_PORTTYPE_FPORT;
7818 break;
7819 case FC_NS_PORT_FL:
7820 val->PortType = FC_HBA_PORTTYPE_FLPORT;
7821 break;
7822 case FC_NS_PORT_E:
7823 val->PortType = FC_HBA_PORTTYPE_EPORT;
7824 break;
7825 default:
7826 val->PortType = FC_HBA_PORTTYPE_OTHER;
7827 break;
7828 }
7829
7830
7831 /*
7832 * If fp has decided that the topology is public loop,
7833 * we will indicate that using the appropriate
7834 * FC HBA API constant.
7835 */
7836 switch (port->fp_topology) {
7837 case FC_TOP_PUBLIC_LOOP:
7838 val->PortType = FC_HBA_PORTTYPE_NLPORT;
7839 break;
7840
7841 case FC_TOP_PT_PT:
7842 val->PortType = FC_HBA_PORTTYPE_PTP;
7843 break;
7844
7845 case FC_TOP_UNKNOWN:
7846 /*
7847 * This should cover the case where nothing is connected
7848 * to the port. Crystal+ is p'bly an exception here.
7849 * For Crystal+, port 0 will come up as private loop
7850 * (i.e fp_bind_state will be FC_STATE_LOOP) even when
7851 * nothing is connected to it.
7852 * Current plan is to let userland handle this.
7853 */
7854 if (port->fp_bind_state == FC_STATE_OFFLINE) {
7855 val->PortType = FC_HBA_PORTTYPE_UNKNOWN;
7856 }
7857 break;
7858
7859 default:
7860 /*
7861 * Do Nothing.
7862 * Unused:
7863 * val->PortType = FC_HBA_PORTTYPE_GPORT;
7864 */
7865 break;
7866 }
7867
7868 val->PortSupportedClassofService =
7869 port->fp_hba_port_attrs.supported_cos;
7870 val->PortSupportedFc4Types[0] = 0;
7871 bcopy(port->fp_fc4_types, val->PortActiveFc4Types,
7872 sizeof (val->PortActiveFc4Types));
7873 bcopy(port->fp_sym_port_name, val->PortSymbolicName,
7874 port->fp_sym_port_namelen);
7875 val->PortSupportedSpeed =
7876 port->fp_hba_port_attrs.supported_speed;
7877
7878 switch (FC_PORT_SPEED_MASK(port->fp_state)) {
7879 case FC_STATE_1GBIT_SPEED:
7880 val->PortSpeed = FC_HBA_PORTSPEED_1GBIT;
7881 break;
7882 case FC_STATE_2GBIT_SPEED:
7883 val->PortSpeed = FC_HBA_PORTSPEED_2GBIT;
7884 break;
7885 case FC_STATE_4GBIT_SPEED:
7886 val->PortSpeed = FC_HBA_PORTSPEED_4GBIT;
7887 break;
7888 case FC_STATE_8GBIT_SPEED:
7889 val->PortSpeed = FC_HBA_PORTSPEED_8GBIT;
7890 break;
7891 case FC_STATE_10GBIT_SPEED:
7892 val->PortSpeed = FC_HBA_PORTSPEED_10GBIT;
7893 break;
7894 case FC_STATE_16GBIT_SPEED:
7895 val->PortSpeed = FC_HBA_PORTSPEED_16GBIT;
7896 break;
7897 default:
7898 val->PortSpeed = FC_HBA_PORTSPEED_UNKNOWN;
7899 break;
7900 }
7901 val->PortMaxFrameSize = port->fp_hba_port_attrs.max_frame_size;
7902 val->NumberofDiscoveredPorts = port->fp_dev_count;
7903 mutex_exit(&port->fp_mutex);
7904
7905 if (use32 == B_TRUE) {
7906 val32 = kmem_zalloc(sizeof (*val32), KM_SLEEP);
7907 val32->version = val->version;
7908 val32->lastChange = val->lastChange;
7909 val32->fp_minor = val->fp_minor;
7910
7911 bcopy(&val->PortWWN.raw_wwn, &val32->PortWWN.raw_wwn,
7912 sizeof (val->PortWWN.raw_wwn));
7913 bcopy(&val->NodeWWN.raw_wwn, &val32->NodeWWN.raw_wwn,
7914 sizeof (val->NodeWWN.raw_wwn));
7915 val32->PortFcId = val->PortFcId;
7916 val32->PortState = val->PortState;
7917 val32->PortType = val->PortType;
7918
7919 val32->PortSupportedClassofService =
7920 val->PortSupportedClassofService;
7921 bcopy(val->PortActiveFc4Types,
7922 val32->PortActiveFc4Types,
7923 sizeof (val->PortActiveFc4Types));
7924 bcopy(val->PortSymbolicName, val32->PortSymbolicName,
7925 sizeof (val->PortSymbolicName));
7926 bcopy(&val->FabricName, &val32->FabricName,
7927 sizeof (val->FabricName.raw_wwn));
7928 val32->PortSupportedSpeed = val->PortSupportedSpeed;
7929 val32->PortSpeed = val->PortSpeed;
7930
7931 val32->PortMaxFrameSize = val->PortMaxFrameSize;
7932 val32->NumberofDiscoveredPorts =
7933 val->NumberofDiscoveredPorts;
7934
7935 if (fp_copyout((void *)val32, (void *)fcio->fcio_obuf,
7936 fcio->fcio_olen, mode) == 0) {
7937 if (fp_fcio_copyout(fcio, data, mode)) {
7938 rval = EFAULT;
7939 }
7940 } else {
7941 rval = EFAULT;
7942 }
7943
7944 kmem_free(val32, sizeof (*val32));
7945 } else {
7946 if (fp_copyout((void *)val, (void *)fcio->fcio_obuf,
7947 fcio->fcio_olen, mode) == 0) {
7948 if (fp_fcio_copyout(fcio, data, mode)) {
7949 rval = EFAULT;
7950 }
7951 } else {
7952 rval = EFAULT;
7953 }
7954 }
7955
7956 kmem_free(val, sizeof (*val));
7957 break;
7958 }
7959
7960 case FCIO_GET_DISCOVERED_PORT_ATTRIBUTES: {
7961 fc_hba_port_attributes_t *val;
7962 fc_hba_port_attributes32_t *val32;
7963 uint32_t index = 0;
7964 fc_remote_port_t *tmp_pd;
7965
7966 if (use32 == B_TRUE) {
7967 if (fcio->fcio_olen < sizeof (*val32) ||
7968 fcio->fcio_xfer != FCIO_XFER_READ) {
7969 rval = EINVAL;
7970 break;
7971 }
7972 } else {
7973 if (fcio->fcio_olen < sizeof (*val) ||
7974 fcio->fcio_xfer != FCIO_XFER_READ) {
7975 rval = EINVAL;
7976 break;
7977 }
7978 }
7979
7980 if (ddi_copyin(fcio->fcio_ibuf, &index, sizeof (index), mode)) {
7981 rval = EFAULT;
7982 break;
7983 }
7984
7985 if (index >= port->fp_dev_count) {
7986 FP_TRACE(FP_NHEAD1(9, 0),
7987 "User supplied index out of range");
7988 fcio->fcio_errno = FC_OUTOFBOUNDS;
7989 rval = EINVAL;
7990 if (fp_fcio_copyout(fcio, data, mode)) {
7991 rval = EFAULT;
7992 }
7993 break;
7994 }
7995
7996 val = kmem_zalloc(sizeof (*val), KM_SLEEP);
7997 val->version = FC_HBA_PORT_ATTRIBUTES_VERSION;
7998
7999 mutex_enter(&port->fp_mutex);
8000 tmp_pd = fctl_lookup_pd_by_index(port, index);
8001
8002 if (tmp_pd == NULL) {
8003 fcio->fcio_errno = FC_BADPORT;
8004 rval = EINVAL;
8005 } else {
8006 val->lastChange = port->fp_last_change;
8007 val->fp_minor = port->fp_instance;
8008
8009 mutex_enter(&tmp_pd->pd_mutex);
8010 bcopy(&tmp_pd->pd_port_name.raw_wwn,
8011 &val->PortWWN.raw_wwn,
8012 sizeof (val->PortWWN.raw_wwn));
8013 bcopy(&tmp_pd->pd_remote_nodep->fd_node_name.raw_wwn,
8014 &val->NodeWWN.raw_wwn,
8015 sizeof (val->NodeWWN.raw_wwn));
8016 val->PortFcId = tmp_pd->pd_port_id.port_id;
8017 bcopy(tmp_pd->pd_spn, val->PortSymbolicName,
8018 tmp_pd->pd_spn_len);
8019 val->PortSupportedClassofService = tmp_pd->pd_cos;
8020 /*
8021 * we will assume the sizeof these pd_fc4types and
8022 * portActiveFc4Types will remain the same. we could
8023 * add in a check for it, but we decided it was unneeded
8024 */
8025 bcopy((caddr_t)tmp_pd->pd_fc4types,
8026 val->PortActiveFc4Types,
8027 sizeof (tmp_pd->pd_fc4types));
8028 val->PortState =
8029 fp_map_remote_port_state(tmp_pd->pd_state);
8030 mutex_exit(&tmp_pd->pd_mutex);
8031
8032 val->PortType = FC_HBA_PORTTYPE_UNKNOWN;
8033 val->PortSupportedFc4Types[0] = 0;
8034 val->PortSupportedSpeed = FC_HBA_PORTSPEED_UNKNOWN;
8035 val->PortSpeed = FC_HBA_PORTSPEED_UNKNOWN;
8036 val->PortMaxFrameSize = 0;
8037 val->NumberofDiscoveredPorts = 0;
8038
8039 if (use32 == B_TRUE) {
8040 val32 = kmem_zalloc(sizeof (*val32), KM_SLEEP);
8041 val32->version = val->version;
8042 val32->lastChange = val->lastChange;
8043 val32->fp_minor = val->fp_minor;
8044
8045 bcopy(&val->PortWWN.raw_wwn,
8046 &val32->PortWWN.raw_wwn,
8047 sizeof (val->PortWWN.raw_wwn));
8048 bcopy(&val->NodeWWN.raw_wwn,
8049 &val32->NodeWWN.raw_wwn,
8050 sizeof (val->NodeWWN.raw_wwn));
8051 val32->PortFcId = val->PortFcId;
8052 bcopy(val->PortSymbolicName,
8053 val32->PortSymbolicName,
8054 sizeof (val->PortSymbolicName));
8055 val32->PortSupportedClassofService =
8056 val->PortSupportedClassofService;
8057 bcopy(val->PortActiveFc4Types,
8058 val32->PortActiveFc4Types,
8059 sizeof (tmp_pd->pd_fc4types));
8060
8061 val32->PortType = val->PortType;
8062 val32->PortState = val->PortState;
8063 val32->PortSupportedFc4Types[0] =
8064 val->PortSupportedFc4Types[0];
8065 val32->PortSupportedSpeed =
8066 val->PortSupportedSpeed;
8067 val32->PortSpeed = val->PortSpeed;
8068 val32->PortMaxFrameSize =
8069 val->PortMaxFrameSize;
8070 val32->NumberofDiscoveredPorts =
8071 val->NumberofDiscoveredPorts;
8072
8073 if (fp_copyout((void *)val32,
8074 (void *)fcio->fcio_obuf,
8075 fcio->fcio_olen, mode) == 0) {
8076 if (fp_fcio_copyout(fcio,
8077 data, mode)) {
8078 rval = EFAULT;
8079 }
8080 } else {
8081 rval = EFAULT;
8082 }
8083
8084 kmem_free(val32, sizeof (*val32));
8085 } else {
8086 if (fp_copyout((void *)val,
8087 (void *)fcio->fcio_obuf,
8088 fcio->fcio_olen, mode) == 0) {
8089 if (fp_fcio_copyout(fcio, data, mode)) {
8090 rval = EFAULT;
8091 }
8092 } else {
8093 rval = EFAULT;
8094 }
8095 }
8096 }
8097
8098 mutex_exit(&port->fp_mutex);
8099 kmem_free(val, sizeof (*val));
8100 break;
8101 }
8102
8103 case FCIO_GET_PORT_ATTRIBUTES: {
8104 fc_hba_port_attributes_t *val;
8105 fc_hba_port_attributes32_t *val32;
8106 la_wwn_t wwn;
8107 fc_remote_port_t *tmp_pd;
8108
8109 if (use32 == B_TRUE) {
8110 if (fcio->fcio_olen < sizeof (*val32) ||
8111 fcio->fcio_xfer != FCIO_XFER_READ) {
8112 rval = EINVAL;
8113 break;
8114 }
8115 } else {
8116 if (fcio->fcio_olen < sizeof (*val) ||
8117 fcio->fcio_xfer != FCIO_XFER_READ) {
8118 rval = EINVAL;
8119 break;
8120 }
8121 }
8122
8123 if (ddi_copyin(fcio->fcio_ibuf, &wwn, sizeof (wwn), mode)) {
8124 rval = EFAULT;
8125 break;
8126 }
8127
8128 val = kmem_zalloc(sizeof (*val), KM_SLEEP);
8129 val->version = FC_HBA_PORT_ATTRIBUTES_VERSION;
8130
8131 mutex_enter(&port->fp_mutex);
8132 tmp_pd = fctl_lookup_pd_by_wwn(port, wwn);
8133 val->lastChange = port->fp_last_change;
8134 val->fp_minor = port->fp_instance;
8135 mutex_exit(&port->fp_mutex);
8136
8137 if (tmp_pd == NULL) {
8138 fcio->fcio_errno = FC_BADWWN;
8139 rval = EINVAL;
8140 } else {
8141 mutex_enter(&tmp_pd->pd_mutex);
8142 bcopy(&tmp_pd->pd_port_name.raw_wwn,
8143 &val->PortWWN.raw_wwn,
8144 sizeof (val->PortWWN.raw_wwn));
8145 bcopy(&tmp_pd->pd_remote_nodep->fd_node_name.raw_wwn,
8146 &val->NodeWWN.raw_wwn,
8147 sizeof (val->NodeWWN.raw_wwn));
8148 val->PortFcId = tmp_pd->pd_port_id.port_id;
8149 bcopy(tmp_pd->pd_spn, val->PortSymbolicName,
8150 tmp_pd->pd_spn_len);
8151 val->PortSupportedClassofService = tmp_pd->pd_cos;
8152 val->PortType = FC_HBA_PORTTYPE_UNKNOWN;
8153 val->PortState =
8154 fp_map_remote_port_state(tmp_pd->pd_state);
8155 val->PortSupportedFc4Types[0] = 0;
8156 /*
8157 * we will assume the sizeof these pd_fc4types and
8158 * portActiveFc4Types will remain the same. we could
8159 * add in a check for it, but we decided it was unneeded
8160 */
8161 bcopy((caddr_t)tmp_pd->pd_fc4types,
8162 val->PortActiveFc4Types,
8163 sizeof (tmp_pd->pd_fc4types));
8164 val->PortSupportedSpeed = FC_HBA_PORTSPEED_UNKNOWN;
8165 val->PortSpeed = FC_HBA_PORTSPEED_UNKNOWN;
8166 val->PortMaxFrameSize = 0;
8167 val->NumberofDiscoveredPorts = 0;
8168 mutex_exit(&tmp_pd->pd_mutex);
8169
8170 if (use32 == B_TRUE) {
8171 val32 = kmem_zalloc(sizeof (*val32), KM_SLEEP);
8172 val32->version = val->version;
8173 val32->lastChange = val->lastChange;
8174 val32->fp_minor = val->fp_minor;
8175 bcopy(&val->PortWWN.raw_wwn,
8176 &val32->PortWWN.raw_wwn,
8177 sizeof (val->PortWWN.raw_wwn));
8178 bcopy(&val->NodeWWN.raw_wwn,
8179 &val32->NodeWWN.raw_wwn,
8180 sizeof (val->NodeWWN.raw_wwn));
8181 val32->PortFcId = val->PortFcId;
8182 bcopy(val->PortSymbolicName,
8183 val32->PortSymbolicName,
8184 sizeof (val->PortSymbolicName));
8185 val32->PortSupportedClassofService =
8186 val->PortSupportedClassofService;
8187 val32->PortType = val->PortType;
8188 val32->PortState = val->PortState;
8189 val32->PortSupportedFc4Types[0] =
8190 val->PortSupportedFc4Types[0];
8191 bcopy(val->PortActiveFc4Types,
8192 val32->PortActiveFc4Types,
8193 sizeof (tmp_pd->pd_fc4types));
8194 val32->PortSupportedSpeed =
8195 val->PortSupportedSpeed;
8196 val32->PortSpeed = val->PortSpeed;
8197 val32->PortMaxFrameSize = val->PortMaxFrameSize;
8198 val32->NumberofDiscoveredPorts =
8199 val->NumberofDiscoveredPorts;
8200
8201 if (fp_copyout((void *)val32,
8202 (void *)fcio->fcio_obuf,
8203 fcio->fcio_olen, mode) == 0) {
8204 if (fp_fcio_copyout(fcio, data, mode)) {
8205 rval = EFAULT;
8206 }
8207 } else {
8208 rval = EFAULT;
8209 }
8210
8211 kmem_free(val32, sizeof (*val32));
8212 } else {
8213 if (fp_copyout((void *)val,
8214 (void *)fcio->fcio_obuf,
8215 fcio->fcio_olen, mode) == 0) {
8216 if (fp_fcio_copyout(fcio, data, mode)) {
8217 rval = EFAULT;
8218 }
8219 } else {
8220 rval = EFAULT;
8221 }
8222 }
8223 }
8224 kmem_free(val, sizeof (*val));
8225 break;
8226 }
8227
8228 case FCIO_GET_NUM_DEVS: {
8229 int num_devices;
8230
8231 if (fcio->fcio_olen != sizeof (num_devices) ||
8232 fcio->fcio_xfer != FCIO_XFER_READ) {
8233 rval = EINVAL;
8234 break;
8235 }
8236
8237 mutex_enter(&port->fp_mutex);
8238 switch (port->fp_topology) {
8239 case FC_TOP_PRIVATE_LOOP:
8240 case FC_TOP_PT_PT:
8241 num_devices = port->fp_total_devices;
8242 fcio->fcio_errno = FC_SUCCESS;
8243 break;
8244
8245 case FC_TOP_PUBLIC_LOOP:
8246 case FC_TOP_FABRIC:
8247 mutex_exit(&port->fp_mutex);
8248 job = fctl_alloc_job(JOB_NS_CMD, 0, NULL,
8249 NULL, KM_SLEEP);
8250 ASSERT(job != NULL);
8251
8252 /*
8253 * In FC-GS-2 the Name Server doesn't send out
8254 * RSCNs for any Name Server Database updates
8255 * When it is finally fixed there is no need
8256 * to probe as below and should be removed.
8257 */
8258 (void) fp_ns_get_devcount(port, job, 0, KM_SLEEP);
8259 fctl_dealloc_job(job);
8260
8261 mutex_enter(&port->fp_mutex);
8262 num_devices = port->fp_total_devices;
8263 fcio->fcio_errno = FC_SUCCESS;
8264 break;
8265
8266 case FC_TOP_NO_NS:
8267 /* FALLTHROUGH */
8268 case FC_TOP_UNKNOWN:
8269 /* FALLTHROUGH */
8270 default:
8271 num_devices = 0;
8272 fcio->fcio_errno = FC_SUCCESS;
8273 break;
8274 }
8275 mutex_exit(&port->fp_mutex);
8276
8277 if (fp_copyout((void *)&num_devices,
8278 (void *)fcio->fcio_obuf, fcio->fcio_olen,
8279 mode) == 0) {
8280 if (fp_fcio_copyout(fcio, data, mode)) {
8281 rval = EFAULT;
8282 }
8283 } else {
8284 rval = EFAULT;
8285 }
8286 break;
8287 }
8288
8289 case FCIO_GET_DEV_LIST: {
8290 int num_devices;
8291 int new_count;
8292 int map_size;
8293
8294 if (fcio->fcio_xfer != FCIO_XFER_READ ||
8295 fcio->fcio_alen != sizeof (new_count)) {
8296 rval = EINVAL;
8297 break;
8298 }
8299
8300 num_devices = fcio->fcio_olen / sizeof (fc_port_dev_t);
8301
8302 mutex_enter(&port->fp_mutex);
8303 if (num_devices < port->fp_total_devices) {
8304 fcio->fcio_errno = FC_TOOMANY;
8305 new_count = port->fp_total_devices;
8306 mutex_exit(&port->fp_mutex);
8307
8308 if (fp_copyout((void *)&new_count,
8309 (void *)fcio->fcio_abuf,
8310 sizeof (new_count), mode)) {
8311 rval = EFAULT;
8312 break;
8313 }
8314
8315 if (fp_fcio_copyout(fcio, data, mode)) {
8316 rval = EFAULT;
8317 break;
8318 }
8319 rval = EINVAL;
8320 break;
8321 }
8322
8323 if (port->fp_total_devices <= 0) {
8324 fcio->fcio_errno = FC_NO_MAP;
8325 new_count = port->fp_total_devices;
8326 mutex_exit(&port->fp_mutex);
8327
8328 if (fp_copyout((void *)&new_count,
8329 (void *)fcio->fcio_abuf,
8330 sizeof (new_count), mode)) {
8331 rval = EFAULT;
8332 break;
8333 }
8334
8335 if (fp_fcio_copyout(fcio, data, mode)) {
8336 rval = EFAULT;
8337 break;
8338 }
8339 rval = EINVAL;
8340 break;
8341 }
8342
8343 switch (port->fp_topology) {
8344 case FC_TOP_PRIVATE_LOOP:
8345 if (fp_fillout_loopmap(port, fcio,
8346 mode) != FC_SUCCESS) {
8347 rval = EFAULT;
8348 break;
8349 }
8350 if (fp_fcio_copyout(fcio, data, mode)) {
8351 rval = EFAULT;
8352 }
8353 break;
8354
8355 case FC_TOP_PT_PT:
8356 if (fp_fillout_p2pmap(port, fcio,
8357 mode) != FC_SUCCESS) {
8358 rval = EFAULT;
8359 break;
8360 }
8361 if (fp_fcio_copyout(fcio, data, mode)) {
8362 rval = EFAULT;
8363 }
8364 break;
8365
8366 case FC_TOP_PUBLIC_LOOP:
8367 case FC_TOP_FABRIC: {
8368 fctl_ns_req_t *ns_cmd;
8369
8370 map_size =
8371 sizeof (fc_port_dev_t) * port->fp_total_devices;
8372
8373 mutex_exit(&port->fp_mutex);
8374
8375 ns_cmd = fctl_alloc_ns_cmd(sizeof (ns_req_gan_t),
8376 sizeof (ns_resp_gan_t), map_size,
8377 (FCTL_NS_FILL_NS_MAP | FCTL_NS_BUF_IS_USERLAND),
8378 KM_SLEEP);
8379 ASSERT(ns_cmd != NULL);
8380
8381 ns_cmd->ns_gan_index = 0;
8382 ns_cmd->ns_gan_sid = FCTL_GAN_START_ID;
8383 ns_cmd->ns_cmd_code = NS_GA_NXT;
8384 ns_cmd->ns_gan_max = map_size / sizeof (fc_port_dev_t);
8385
8386 job = fctl_alloc_job(JOB_PORT_GETMAP, 0, NULL,
8387 NULL, KM_SLEEP);
8388 ASSERT(job != NULL);
8389
8390 ret = fp_ns_query(port, ns_cmd, job, 1, KM_SLEEP);
8391
8392 if (ret != FC_SUCCESS ||
8393 job->job_result != FC_SUCCESS) {
8394 fctl_free_ns_cmd(ns_cmd);
8395
8396 fcio->fcio_errno = job->job_result;
8397 new_count = 0;
8398 if (fp_copyout((void *)&new_count,
8399 (void *)fcio->fcio_abuf,
8400 sizeof (new_count), mode)) {
8401 fctl_dealloc_job(job);
8402 mutex_enter(&port->fp_mutex);
8403 rval = EFAULT;
8404 break;
8405 }
8406
8407 if (fp_fcio_copyout(fcio, data, mode)) {
8408 fctl_dealloc_job(job);
8409 mutex_enter(&port->fp_mutex);
8410 rval = EFAULT;
8411 break;
8412 }
8413 rval = EIO;
8414 mutex_enter(&port->fp_mutex);
8415 break;
8416 }
8417 fctl_dealloc_job(job);
8418
8419 new_count = ns_cmd->ns_gan_index;
8420 if (fp_copyout((void *)&new_count,
8421 (void *)fcio->fcio_abuf, sizeof (new_count),
8422 mode)) {
8423 rval = EFAULT;
8424 fctl_free_ns_cmd(ns_cmd);
8425 mutex_enter(&port->fp_mutex);
8426 break;
8427 }
8428
8429 if (fp_copyout((void *)ns_cmd->ns_data_buf,
8430 (void *)fcio->fcio_obuf, sizeof (fc_port_dev_t) *
8431 ns_cmd->ns_gan_index, mode)) {
8432 rval = EFAULT;
8433 fctl_free_ns_cmd(ns_cmd);
8434 mutex_enter(&port->fp_mutex);
8435 break;
8436 }
8437 fctl_free_ns_cmd(ns_cmd);
8438
8439 if (fp_fcio_copyout(fcio, data, mode)) {
8440 rval = EFAULT;
8441 }
8442 mutex_enter(&port->fp_mutex);
8443 break;
8444 }
8445
8446 case FC_TOP_NO_NS:
8447 /* FALLTHROUGH */
8448 case FC_TOP_UNKNOWN:
8449 /* FALLTHROUGH */
8450 default:
8451 fcio->fcio_errno = FC_NO_MAP;
8452 num_devices = port->fp_total_devices;
8453
8454 if (fp_copyout((void *)&new_count,
8455 (void *)fcio->fcio_abuf,
8456 sizeof (new_count), mode)) {
8457 rval = EFAULT;
8458 break;
8459 }
8460
8461 if (fp_fcio_copyout(fcio, data, mode)) {
8462 rval = EFAULT;
8463 break;
8464 }
8465 rval = EINVAL;
8466 break;
8467 }
8468 mutex_exit(&port->fp_mutex);
8469 break;
8470 }
8471
8472 case FCIO_GET_SYM_PNAME: {
8473 rval = ENOTSUP;
8474 break;
8475 }
8476
8477 case FCIO_GET_SYM_NNAME: {
8478 rval = ENOTSUP;
8479 break;
8480 }
8481
8482 case FCIO_SET_SYM_PNAME: {
8483 rval = ENOTSUP;
8484 break;
8485 }
8486
8487 case FCIO_SET_SYM_NNAME: {
8488 rval = ENOTSUP;
8489 break;
8490 }
8491
8492 case FCIO_GET_LOGI_PARAMS: {
8493 la_wwn_t pwwn;
8494 la_wwn_t *my_pwwn;
8495 la_els_logi_t *params;
8496 la_els_logi32_t *params32;
8497 fc_remote_node_t *node;
8498 fc_remote_port_t *pd;
8499
8500 if (fcio->fcio_ilen != sizeof (la_wwn_t) ||
8501 (fcio->fcio_xfer & FCIO_XFER_READ) == 0 ||
8502 (fcio->fcio_xfer & FCIO_XFER_WRITE) == 0) {
8503 rval = EINVAL;
8504 break;
8505 }
8506
8507 if (use32 == B_TRUE) {
8508 if (fcio->fcio_olen != sizeof (la_els_logi32_t)) {
8509 rval = EINVAL;
8510 break;
8511 }
8512 } else {
8513 if (fcio->fcio_olen != sizeof (la_els_logi_t)) {
8514 rval = EINVAL;
8515 break;
8516 }
8517 }
8518
8519 if (ddi_copyin(fcio->fcio_ibuf, &pwwn, sizeof (pwwn), mode)) {
8520 rval = EFAULT;
8521 break;
8522 }
8523
8524 pd = fctl_hold_remote_port_by_pwwn(port, &pwwn);
8525 if (pd == NULL) {
8526 mutex_enter(&port->fp_mutex);
8527 my_pwwn = &port->fp_service_params.nport_ww_name;
8528 mutex_exit(&port->fp_mutex);
8529
8530 if (fctl_wwn_cmp(&pwwn, my_pwwn) != 0) {
8531 rval = ENXIO;
8532 break;
8533 }
8534
8535 params = kmem_zalloc(sizeof (*params), KM_SLEEP);
8536 mutex_enter(&port->fp_mutex);
8537 *params = port->fp_service_params;
8538 mutex_exit(&port->fp_mutex);
8539 } else {
8540 params = kmem_zalloc(sizeof (*params), KM_SLEEP);
8541
8542 mutex_enter(&pd->pd_mutex);
8543 params->ls_code.mbz = params->ls_code.ls_code = 0;
8544 params->common_service = pd->pd_csp;
8545 params->nport_ww_name = pd->pd_port_name;
8546 params->class_1 = pd->pd_clsp1;
8547 params->class_2 = pd->pd_clsp2;
8548 params->class_3 = pd->pd_clsp3;
8549 node = pd->pd_remote_nodep;
8550 mutex_exit(&pd->pd_mutex);
8551
8552 bzero(params->reserved, sizeof (params->reserved));
8553
8554 mutex_enter(&node->fd_mutex);
8555 bcopy(node->fd_vv, params->vendor_version,
8556 sizeof (node->fd_vv));
8557 params->node_ww_name = node->fd_node_name;
8558 mutex_exit(&node->fd_mutex);
8559
8560 fctl_release_remote_port(pd);
8561 }
8562
8563 if (use32 == B_TRUE) {
8564 params32 = kmem_zalloc(sizeof (*params32), KM_SLEEP);
8565
8566 params32->ls_code.mbz = params->ls_code.mbz;
8567 params32->common_service = params->common_service;
8568 params32->nport_ww_name = params->nport_ww_name;
8569 params32->class_1 = params->class_1;
8570 params32->class_2 = params->class_2;
8571 params32->class_3 = params->class_3;
8572 bzero(params32->reserved, sizeof (params32->reserved));
8573 bcopy(params->vendor_version, params32->vendor_version,
8574 sizeof (node->fd_vv));
8575 params32->node_ww_name = params->node_ww_name;
8576
8577 if (ddi_copyout((void *)params32,
8578 (void *)fcio->fcio_obuf,
8579 sizeof (*params32), mode)) {
8580 rval = EFAULT;
8581 }
8582
8583 kmem_free(params32, sizeof (*params32));
8584 } else {
8585 if (ddi_copyout((void *)params, (void *)fcio->fcio_obuf,
8586 sizeof (*params), mode)) {
8587 rval = EFAULT;
8588 }
8589 }
8590
8591 kmem_free(params, sizeof (*params));
8592 if (fp_fcio_copyout(fcio, data, mode)) {
8593 rval = EFAULT;
8594 }
8595 break;
8596 }
8597
8598 case FCIO_DEV_LOGOUT:
8599 case FCIO_DEV_LOGIN:
8600 if (fcio->fcio_ilen != sizeof (la_wwn_t) ||
8601 fcio->fcio_xfer != FCIO_XFER_WRITE) {
8602 rval = EINVAL;
8603
8604 if (fp_fcio_copyout(fcio, data, mode)) {
8605 rval = EFAULT;
8606 }
8607 break;
8608 }
8609
8610 if (fcio->fcio_cmd == FCIO_DEV_LOGIN) {
8611 jcode = JOB_FCIO_LOGIN;
8612 } else {
8613 jcode = JOB_FCIO_LOGOUT;
8614 }
8615
8616 kfcio = kmem_zalloc(sizeof (*kfcio), KM_SLEEP);
8617 bcopy(fcio, kfcio, sizeof (*fcio));
8618
8619 if (kfcio->fcio_ilen) {
8620 kfcio->fcio_ibuf = kmem_zalloc(kfcio->fcio_ilen,
8621 KM_SLEEP);
8622
8623 if (ddi_copyin((void *)fcio->fcio_ibuf,
8624 (void *)kfcio->fcio_ibuf, kfcio->fcio_ilen,
8625 mode)) {
8626 rval = EFAULT;
8627
8628 kmem_free(kfcio->fcio_ibuf, kfcio->fcio_ilen);
8629 kmem_free(kfcio, sizeof (*kfcio));
8630 fcio->fcio_errno = job->job_result;
8631 if (fp_fcio_copyout(fcio, data, mode)) {
8632 rval = EFAULT;
8633 }
8634 break;
8635 }
8636 }
8637
8638 job = fctl_alloc_job(jcode, 0, NULL, NULL, KM_SLEEP);
8639 job->job_private = kfcio;
8640
8641 fctl_enque_job(port, job);
8642 fctl_jobwait(job);
8643
8644 rval = job->job_result;
8645
8646 fcio->fcio_errno = kfcio->fcio_errno;
8647 if (fp_fcio_copyout(fcio, data, mode)) {
8648 rval = EFAULT;
8649 }
8650
8651 kmem_free(kfcio->fcio_ibuf, kfcio->fcio_ilen);
8652 kmem_free(kfcio, sizeof (*kfcio));
8653 fctl_dealloc_job(job);
8654 break;
8655
8656 case FCIO_GET_STATE: {
8657 la_wwn_t pwwn;
8658 uint32_t state;
8659 fc_remote_port_t *pd;
8660 fctl_ns_req_t *ns_cmd;
8661
8662 if (fcio->fcio_ilen != sizeof (la_wwn_t) ||
8663 fcio->fcio_olen != sizeof (state) ||
8664 (fcio->fcio_xfer & FCIO_XFER_WRITE) == 0 ||
8665 (fcio->fcio_xfer & FCIO_XFER_READ) == 0) {
8666 rval = EINVAL;
8667 break;
8668 }
8669
8670 if (ddi_copyin(fcio->fcio_ibuf, &pwwn, sizeof (pwwn), mode)) {
8671 rval = EFAULT;
8672 break;
8673 }
8674 fcio->fcio_errno = 0;
8675
8676 pd = fctl_hold_remote_port_by_pwwn(port, &pwwn);
8677 if (pd == NULL) {
8678 mutex_enter(&port->fp_mutex);
8679 if (FC_IS_TOP_SWITCH(port->fp_topology)) {
8680 mutex_exit(&port->fp_mutex);
8681 job = fctl_alloc_job(JOB_PLOGI_ONE, 0,
8682 NULL, NULL, KM_SLEEP);
8683
8684 job->job_counter = 1;
8685 job->job_result = FC_SUCCESS;
8686
8687 ns_cmd = fctl_alloc_ns_cmd(
8688 sizeof (ns_req_gid_pn_t),
8689 sizeof (ns_resp_gid_pn_t),
8690 sizeof (ns_resp_gid_pn_t),
8691 FCTL_NS_BUF_IS_USERLAND, KM_SLEEP);
8692 ASSERT(ns_cmd != NULL);
8693
8694 ns_cmd->ns_cmd_code = NS_GID_PN;
8695 ((ns_req_gid_pn_t *)
8696 (ns_cmd->ns_cmd_buf))->pwwn = pwwn;
8697
8698 ret = fp_ns_query(port, ns_cmd, job,
8699 1, KM_SLEEP);
8700
8701 if (ret != FC_SUCCESS || job->job_result !=
8702 FC_SUCCESS) {
8703 if (ret != FC_SUCCESS) {
8704 fcio->fcio_errno = ret;
8705 } else {
8706 fcio->fcio_errno =
8707 job->job_result;
8708 }
8709 rval = EIO;
8710 } else {
8711 state = PORT_DEVICE_INVALID;
8712 }
8713 fctl_free_ns_cmd(ns_cmd);
8714 fctl_dealloc_job(job);
8715 } else {
8716 mutex_exit(&port->fp_mutex);
8717 fcio->fcio_errno = FC_BADWWN;
8718 rval = ENXIO;
8719 }
8720 } else {
8721 mutex_enter(&pd->pd_mutex);
8722 state = pd->pd_state;
8723 mutex_exit(&pd->pd_mutex);
8724
8725 fctl_release_remote_port(pd);
8726 }
8727
8728 if (!rval) {
8729 if (ddi_copyout((void *)&state,
8730 (void *)fcio->fcio_obuf, sizeof (state),
8731 mode)) {
8732 rval = EFAULT;
8733 }
8734 }
8735 if (fp_fcio_copyout(fcio, data, mode)) {
8736 rval = EFAULT;
8737 }
8738 break;
8739 }
8740
8741 case FCIO_DEV_REMOVE: {
8742 la_wwn_t pwwn;
8743 fc_portmap_t *changelist;
8744 fc_remote_port_t *pd;
8745
8746 if (fcio->fcio_ilen != sizeof (la_wwn_t) ||
8747 fcio->fcio_xfer != FCIO_XFER_WRITE) {
8748 rval = EINVAL;
8749 break;
8750 }
8751
8752 if (ddi_copyin(fcio->fcio_ibuf, &pwwn, sizeof (pwwn), mode)) {
8753 rval = EFAULT;
8754 break;
8755 }
8756
8757 pd = fctl_hold_remote_port_by_pwwn(port, &pwwn);
8758 if (pd == NULL) {
8759 rval = ENXIO;
8760 fcio->fcio_errno = FC_BADWWN;
8761 if (fp_fcio_copyout(fcio, data, mode)) {
8762 rval = EFAULT;
8763 }
8764 break;
8765 }
8766
8767 mutex_enter(&pd->pd_mutex);
8768 if (pd->pd_ref_count > 1) {
8769 mutex_exit(&pd->pd_mutex);
8770
8771 rval = EBUSY;
8772 fcio->fcio_errno = FC_FAILURE;
8773 fctl_release_remote_port(pd);
8774
8775 if (fp_fcio_copyout(fcio, data, mode)) {
8776 rval = EFAULT;
8777 }
8778 break;
8779 }
8780 mutex_exit(&pd->pd_mutex);
8781
8782 changelist = kmem_zalloc(sizeof (*changelist), KM_SLEEP);
8783
8784 fctl_copy_portmap(changelist, pd);
8785 changelist->map_type = PORT_DEVICE_USER_LOGOUT;
8786 (void) fp_ulp_devc_cb(port, changelist, 1, 1, KM_SLEEP, 1);
8787
8788 fctl_release_remote_port(pd);
8789 break;
8790 }
8791
8792 case FCIO_GET_FCODE_REV: {
8793 caddr_t fcode_rev;
8794 fc_fca_pm_t pm;
8795
8796 if (fcio->fcio_olen < FC_FCODE_REV_SIZE ||
8797 fcio->fcio_xfer != FCIO_XFER_READ) {
8798 rval = EINVAL;
8799 break;
8800 }
8801 bzero((caddr_t)&pm, sizeof (pm));
8802
8803 fcode_rev = kmem_zalloc(fcio->fcio_olen, KM_SLEEP);
8804
8805 pm.pm_cmd_flags = FC_FCA_PM_READ;
8806 pm.pm_cmd_code = FC_PORT_GET_FCODE_REV;
8807 pm.pm_data_len = fcio->fcio_olen;
8808 pm.pm_data_buf = fcode_rev;
8809
8810 ret = port->fp_fca_tran->fca_port_manage(
8811 port->fp_fca_handle, &pm);
8812
8813 if (ret == FC_SUCCESS) {
8814 if (ddi_copyout((void *)fcode_rev,
8815 (void *)fcio->fcio_obuf,
8816 fcio->fcio_olen, mode) == 0) {
8817 if (fp_fcio_copyout(fcio, data, mode)) {
8818 rval = EFAULT;
8819 }
8820 } else {
8821 rval = EFAULT;
8822 }
8823 } else {
8824 /*
8825 * check if buffer was not large enough to obtain
8826 * FCODE version.
8827 */
8828 if (pm.pm_data_len > fcio->fcio_olen) {
8829 rval = ENOMEM;
8830 } else {
8831 rval = EIO;
8832 }
8833 fcio->fcio_errno = ret;
8834 if (fp_fcio_copyout(fcio, data, mode)) {
8835 rval = EFAULT;
8836 }
8837 }
8838 kmem_free(fcode_rev, fcio->fcio_olen);
8839 break;
8840 }
8841
8842 case FCIO_GET_FW_REV: {
8843 caddr_t fw_rev;
8844 fc_fca_pm_t pm;
8845
8846 if (fcio->fcio_olen < FC_FW_REV_SIZE ||
8847 fcio->fcio_xfer != FCIO_XFER_READ) {
8848 rval = EINVAL;
8849 break;
8850 }
8851 bzero((caddr_t)&pm, sizeof (pm));
8852
8853 fw_rev = kmem_zalloc(fcio->fcio_olen, KM_SLEEP);
8854
8855 pm.pm_cmd_flags = FC_FCA_PM_READ;
8856 pm.pm_cmd_code = FC_PORT_GET_FW_REV;
8857 pm.pm_data_len = fcio->fcio_olen;
8858 pm.pm_data_buf = fw_rev;
8859
8860 ret = port->fp_fca_tran->fca_port_manage(
8861 port->fp_fca_handle, &pm);
8862
8863 if (ret == FC_SUCCESS) {
8864 if (ddi_copyout((void *)fw_rev,
8865 (void *)fcio->fcio_obuf,
8866 fcio->fcio_olen, mode) == 0) {
8867 if (fp_fcio_copyout(fcio, data, mode)) {
8868 rval = EFAULT;
8869 }
8870 } else {
8871 rval = EFAULT;
8872 }
8873 } else {
8874 if (fp_fcio_copyout(fcio, data, mode)) {
8875 rval = EFAULT;
8876 }
8877 rval = EIO;
8878 }
8879 kmem_free(fw_rev, fcio->fcio_olen);
8880 break;
8881 }
8882
8883 case FCIO_GET_DUMP_SIZE: {
8884 uint32_t dump_size;
8885 fc_fca_pm_t pm;
8886
8887 if (fcio->fcio_olen != sizeof (dump_size) ||
8888 fcio->fcio_xfer != FCIO_XFER_READ) {
8889 rval = EINVAL;
8890 break;
8891 }
8892 bzero((caddr_t)&pm, sizeof (pm));
8893 pm.pm_cmd_flags = FC_FCA_PM_READ;
8894 pm.pm_cmd_code = FC_PORT_GET_DUMP_SIZE;
8895 pm.pm_data_len = sizeof (dump_size);
8896 pm.pm_data_buf = (caddr_t)&dump_size;
8897
8898 ret = port->fp_fca_tran->fca_port_manage(
8899 port->fp_fca_handle, &pm);
8900
8901 if (ret == FC_SUCCESS) {
8902 if (ddi_copyout((void *)&dump_size,
8903 (void *)fcio->fcio_obuf, sizeof (dump_size),
8904 mode) == 0) {
8905 if (fp_fcio_copyout(fcio, data, mode)) {
8906 rval = EFAULT;
8907 }
8908 } else {
8909 rval = EFAULT;
8910 }
8911 } else {
8912 fcio->fcio_errno = ret;
8913 rval = EIO;
8914 if (fp_fcio_copyout(fcio, data, mode)) {
8915 rval = EFAULT;
8916 }
8917 }
8918 break;
8919 }
8920
8921 case FCIO_DOWNLOAD_FW: {
8922 caddr_t firmware;
8923 fc_fca_pm_t pm;
8924
8925 if (fcio->fcio_ilen <= 0 ||
8926 fcio->fcio_xfer != FCIO_XFER_WRITE) {
8927 rval = EINVAL;
8928 break;
8929 }
8930
8931 firmware = kmem_zalloc(fcio->fcio_ilen, KM_SLEEP);
8932 if (ddi_copyin(fcio->fcio_ibuf, firmware,
8933 fcio->fcio_ilen, mode)) {
8934 rval = EFAULT;
8935 kmem_free(firmware, fcio->fcio_ilen);
8936 break;
8937 }
8938
8939 bzero((caddr_t)&pm, sizeof (pm));
8940 pm.pm_cmd_flags = FC_FCA_PM_WRITE;
8941 pm.pm_cmd_code = FC_PORT_DOWNLOAD_FW;
8942 pm.pm_data_len = fcio->fcio_ilen;
8943 pm.pm_data_buf = firmware;
8944
8945 ret = port->fp_fca_tran->fca_port_manage(
8946 port->fp_fca_handle, &pm);
8947
8948 kmem_free(firmware, fcio->fcio_ilen);
8949
8950 if (ret != FC_SUCCESS) {
8951 fcio->fcio_errno = ret;
8952 rval = EIO;
8953 if (fp_fcio_copyout(fcio, data, mode)) {
8954 rval = EFAULT;
8955 }
8956 }
8957 break;
8958 }
8959
8960 case FCIO_DOWNLOAD_FCODE: {
8961 caddr_t fcode;
8962 fc_fca_pm_t pm;
8963
8964 if (fcio->fcio_ilen <= 0 ||
8965 fcio->fcio_xfer != FCIO_XFER_WRITE) {
8966 rval = EINVAL;
8967 break;
8968 }
8969
8970 fcode = kmem_zalloc(fcio->fcio_ilen, KM_SLEEP);
8971 if (ddi_copyin(fcio->fcio_ibuf, fcode,
8972 fcio->fcio_ilen, mode)) {
8973 rval = EFAULT;
8974 kmem_free(fcode, fcio->fcio_ilen);
8975 break;
8976 }
8977
8978 bzero((caddr_t)&pm, sizeof (pm));
8979 pm.pm_cmd_flags = FC_FCA_PM_WRITE;
8980 pm.pm_cmd_code = FC_PORT_DOWNLOAD_FCODE;
8981 pm.pm_data_len = fcio->fcio_ilen;
8982 pm.pm_data_buf = fcode;
8983
8984 ret = port->fp_fca_tran->fca_port_manage(
8985 port->fp_fca_handle, &pm);
8986
8987 kmem_free(fcode, fcio->fcio_ilen);
8988
8989 if (ret != FC_SUCCESS) {
8990 fcio->fcio_errno = ret;
8991 rval = EIO;
8992 if (fp_fcio_copyout(fcio, data, mode)) {
8993 rval = EFAULT;
8994 }
8995 }
8996 break;
8997 }
8998
8999 case FCIO_FORCE_DUMP:
9000 ret = port->fp_fca_tran->fca_reset(
9001 port->fp_fca_handle, FC_FCA_CORE);
9002
9003 if (ret != FC_SUCCESS) {
9004 fcio->fcio_errno = ret;
9005 rval = EIO;
9006 if (fp_fcio_copyout(fcio, data, mode)) {
9007 rval = EFAULT;
9008 }
9009 }
9010 break;
9011
9012 case FCIO_GET_DUMP: {
9013 caddr_t dump;
9014 uint32_t dump_size;
9015 fc_fca_pm_t pm;
9016
9017 if (fcio->fcio_xfer != FCIO_XFER_READ) {
9018 rval = EINVAL;
9019 break;
9020 }
9021 bzero((caddr_t)&pm, sizeof (pm));
9022
9023 pm.pm_cmd_flags = FC_FCA_PM_READ;
9024 pm.pm_cmd_code = FC_PORT_GET_DUMP_SIZE;
9025 pm.pm_data_len = sizeof (dump_size);
9026 pm.pm_data_buf = (caddr_t)&dump_size;
9027
9028 ret = port->fp_fca_tran->fca_port_manage(
9029 port->fp_fca_handle, &pm);
9030
9031 if (ret != FC_SUCCESS) {
9032 fcio->fcio_errno = ret;
9033 rval = EIO;
9034 if (fp_fcio_copyout(fcio, data, mode)) {
9035 rval = EFAULT;
9036 }
9037 break;
9038 }
9039 if (fcio->fcio_olen != dump_size) {
9040 fcio->fcio_errno = FC_NOMEM;
9041 rval = EINVAL;
9042 if (fp_fcio_copyout(fcio, data, mode)) {
9043 rval = EFAULT;
9044 }
9045 break;
9046 }
9047
9048 dump = kmem_zalloc(dump_size, KM_SLEEP);
9049
9050 bzero((caddr_t)&pm, sizeof (pm));
9051 pm.pm_cmd_flags = FC_FCA_PM_READ;
9052 pm.pm_cmd_code = FC_PORT_GET_DUMP;
9053 pm.pm_data_len = dump_size;
9054 pm.pm_data_buf = dump;
9055
9056 ret = port->fp_fca_tran->fca_port_manage(
9057 port->fp_fca_handle, &pm);
9058
9059 if (ret == FC_SUCCESS) {
9060 if (ddi_copyout((void *)dump, (void *)fcio->fcio_obuf,
9061 dump_size, mode) == 0) {
9062 if (fp_fcio_copyout(fcio, data, mode)) {
9063 rval = EFAULT;
9064 }
9065 } else {
9066 rval = EFAULT;
9067 }
9068 } else {
9069 fcio->fcio_errno = ret;
9070 rval = EIO;
9071 if (fp_fcio_copyout(fcio, data, mode)) {
9072 rval = EFAULT;
9073 }
9074 }
9075 kmem_free(dump, dump_size);
9076 break;
9077 }
9078
9079 case FCIO_GET_TOPOLOGY: {
9080 uint32_t user_topology;
9081
9082 if (fcio->fcio_xfer != FCIO_XFER_READ ||
9083 fcio->fcio_olen != sizeof (user_topology)) {
9084 rval = EINVAL;
9085 break;
9086 }
9087
9088 mutex_enter(&port->fp_mutex);
9089 if (FC_PORT_STATE_MASK(port->fp_state) == FC_STATE_OFFLINE) {
9090 user_topology = FC_TOP_UNKNOWN;
9091 } else {
9092 user_topology = port->fp_topology;
9093 }
9094 mutex_exit(&port->fp_mutex);
9095
9096 if (ddi_copyout((void *)&user_topology,
9097 (void *)fcio->fcio_obuf, sizeof (user_topology),
9098 mode)) {
9099 rval = EFAULT;
9100 }
9101 break;
9102 }
9103
9104 case FCIO_RESET_LINK: {
9105 la_wwn_t pwwn;
9106
9107 /*
9108 * Look at the output buffer field; if this field has zero
9109 * bytes then attempt to reset the local link/loop. If the
9110 * fcio_ibuf field points to a WWN, see if it's an NL_Port,
9111 * and if yes, determine the LFA and reset the remote LIP
9112 * by LINIT ELS.
9113 */
9114
9115 if (fcio->fcio_xfer != FCIO_XFER_WRITE ||
9116 fcio->fcio_ilen != sizeof (pwwn)) {
9117 rval = EINVAL;
9118 break;
9119 }
9120
9121 if (ddi_copyin(fcio->fcio_ibuf, &pwwn,
9122 sizeof (pwwn), mode)) {
9123 rval = EFAULT;
9124 break;
9125 }
9126
9127 mutex_enter(&port->fp_mutex);
9128 if (port->fp_soft_state & FP_SOFT_IN_LINK_RESET) {
9129 mutex_exit(&port->fp_mutex);
9130 break;
9131 }
9132 port->fp_soft_state |= FP_SOFT_IN_LINK_RESET;
9133 mutex_exit(&port->fp_mutex);
9134
9135 job = fctl_alloc_job(JOB_LINK_RESET, 0, NULL, NULL, KM_SLEEP);
9136 if (job == NULL) {
9137 rval = ENOMEM;
9138 break;
9139 }
9140 job->job_counter = 1;
9141 job->job_private = (void *)&pwwn;
9142
9143 fctl_enque_job(port, job);
9144 fctl_jobwait(job);
9145
9146 mutex_enter(&port->fp_mutex);
9147 port->fp_soft_state &= ~FP_SOFT_IN_LINK_RESET;
9148 mutex_exit(&port->fp_mutex);
9149
9150 if (job->job_result != FC_SUCCESS) {
9151 fcio->fcio_errno = job->job_result;
9152 rval = EIO;
9153 if (fp_fcio_copyout(fcio, data, mode)) {
9154 rval = EFAULT;
9155 }
9156 }
9157 fctl_dealloc_job(job);
9158 break;
9159 }
9160
9161 case FCIO_RESET_HARD:
9162 ret = port->fp_fca_tran->fca_reset(
9163 port->fp_fca_handle, FC_FCA_RESET);
9164 if (ret != FC_SUCCESS) {
9165 fcio->fcio_errno = ret;
9166 rval = EIO;
9167 if (fp_fcio_copyout(fcio, data, mode)) {
9168 rval = EFAULT;
9169 }
9170 }
9171 break;
9172
9173 case FCIO_RESET_HARD_CORE:
9174 ret = port->fp_fca_tran->fca_reset(
9175 port->fp_fca_handle, FC_FCA_RESET_CORE);
9176 if (ret != FC_SUCCESS) {
9177 rval = EIO;
9178 fcio->fcio_errno = ret;
9179 if (fp_fcio_copyout(fcio, data, mode)) {
9180 rval = EFAULT;
9181 }
9182 }
9183 break;
9184
9185 case FCIO_DIAG: {
9186 fc_fca_pm_t pm;
9187
9188 bzero((caddr_t)&pm, sizeof (fc_fca_pm_t));
9189
9190 /* Validate user buffer from ioctl call. */
9191 if (((fcio->fcio_ilen > 0) && (fcio->fcio_ibuf == NULL)) ||
9192 ((fcio->fcio_ilen <= 0) && (fcio->fcio_ibuf != NULL)) ||
9193 ((fcio->fcio_alen > 0) && (fcio->fcio_abuf == NULL)) ||
9194 ((fcio->fcio_alen <= 0) && (fcio->fcio_abuf != NULL)) ||
9195 ((fcio->fcio_olen > 0) && (fcio->fcio_obuf == NULL)) ||
9196 ((fcio->fcio_olen <= 0) && (fcio->fcio_obuf != NULL))) {
9197 rval = EFAULT;
9198 break;
9199 }
9200
9201 if ((pm.pm_cmd_len = fcio->fcio_ilen) > 0) {
9202 pm.pm_cmd_buf = kmem_zalloc(fcio->fcio_ilen, KM_SLEEP);
9203 if (ddi_copyin(fcio->fcio_ibuf, pm.pm_cmd_buf,
9204 fcio->fcio_ilen, mode)) {
9205 rval = EFAULT;
9206 goto fp_fcio_diag_cleanup;
9207 }
9208 }
9209
9210 if ((pm.pm_data_len = fcio->fcio_alen) > 0) {
9211 pm.pm_data_buf = kmem_zalloc(fcio->fcio_alen, KM_SLEEP);
9212 if (ddi_copyin(fcio->fcio_abuf, pm.pm_data_buf,
9213 fcio->fcio_alen, mode)) {
9214 rval = EFAULT;
9215 goto fp_fcio_diag_cleanup;
9216 }
9217 }
9218
9219 if ((pm.pm_stat_len = fcio->fcio_olen) > 0) {
9220 pm.pm_stat_buf = kmem_zalloc(fcio->fcio_olen, KM_SLEEP);
9221 }
9222
9223 pm.pm_cmd_code = FC_PORT_DIAG;
9224 pm.pm_cmd_flags = fcio->fcio_cmd_flags;
9225
9226 ret = port->fp_fca_tran->fca_port_manage(
9227 port->fp_fca_handle, &pm);
9228
9229 if (ret != FC_SUCCESS) {
9230 if (ret == FC_INVALID_REQUEST) {
9231 rval = ENOTTY;
9232 } else {
9233 rval = EIO;
9234 }
9235
9236 fcio->fcio_errno = ret;
9237 if (fp_fcio_copyout(fcio, data, mode)) {
9238 rval = EFAULT;
9239 }
9240 goto fp_fcio_diag_cleanup;
9241 }
9242
9243 /*
9244 * pm_stat_len will contain the number of status bytes
9245 * an FCA driver requires to return the complete status
9246 * of the requested diag operation. If the user buffer
9247 * is not large enough to hold the entire status, We
9248 * copy only the portion of data the fits in the buffer and
9249 * return a ENOMEM to the user application.
9250 */
9251 if (pm.pm_stat_len > fcio->fcio_olen) {
9252 fp_printf(port, CE_NOTE, FP_LOG_ONLY, 0, NULL,
9253 "fp:FCIO_DIAG:status buffer too small\n");
9254
9255 rval = ENOMEM;
9256 if (ddi_copyout(pm.pm_stat_buf, fcio->fcio_obuf,
9257 fcio->fcio_olen, mode)) {
9258 rval = EFAULT;
9259 goto fp_fcio_diag_cleanup;
9260 }
9261 } else {
9262 /*
9263 * Copy only data pm_stat_len bytes of data
9264 */
9265 if (ddi_copyout(pm.pm_stat_buf, fcio->fcio_obuf,
9266 pm.pm_stat_len, mode)) {
9267 rval = EFAULT;
9268 goto fp_fcio_diag_cleanup;
9269 }
9270 }
9271
9272 if (fp_fcio_copyout(fcio, data, mode)) {
9273 rval = EFAULT;
9274 }
9275
9276 fp_fcio_diag_cleanup:
9277 if (pm.pm_cmd_buf != NULL) {
9278 kmem_free(pm.pm_cmd_buf, fcio->fcio_ilen);
9279 }
9280 if (pm.pm_data_buf != NULL) {
9281 kmem_free(pm.pm_data_buf, fcio->fcio_alen);
9282 }
9283 if (pm.pm_stat_buf != NULL) {
9284 kmem_free(pm.pm_stat_buf, fcio->fcio_olen);
9285 }
9286
9287 break;
9288 }
9289
9290 case FCIO_GET_NODE_ID: {
9291 /* validate parameters */
9292 if (fcio->fcio_xfer != FCIO_XFER_READ ||
9293 fcio->fcio_olen < sizeof (fc_rnid_t)) {
9294 rval = EINVAL;
9295 break;
9296 }
9297
9298 rval = fp_get_rnid(port, data, mode, fcio);
9299
9300 /* ioctl handling is over */
9301 break;
9302 }
9303
9304 case FCIO_SEND_NODE_ID: {
9305 la_wwn_t pwwn;
9306
9307 /* validate parameters */
9308 if (fcio->fcio_ilen != sizeof (la_wwn_t) ||
9309 fcio->fcio_xfer != FCIO_XFER_READ) {
9310 rval = EINVAL;
9311 break;
9312 }
9313
9314 if (ddi_copyin(fcio->fcio_ibuf, &pwwn,
9315 sizeof (la_wwn_t), mode)) {
9316 rval = EFAULT;
9317 break;
9318 }
9319
9320 rval = fp_send_rnid(port, data, mode, fcio, &pwwn);
9321
9322 /* ioctl handling is over */
9323 break;
9324 }
9325
9326 case FCIO_SET_NODE_ID: {
9327 if (fcio->fcio_ilen != sizeof (fc_rnid_t) ||
9328 (fcio->fcio_xfer != FCIO_XFER_WRITE)) {
9329 rval = EINVAL;
9330 break;
9331 }
9332
9333 rval = fp_set_rnid(port, data, mode, fcio);
9334 break;
9335 }
9336
9337 case FCIO_LINK_STATUS: {
9338 fc_portid_t rls_req;
9339 fc_rls_acc_t *rls_acc;
9340 fc_fca_pm_t pm;
9341 uint32_t dest, src_id;
9342 fp_cmd_t *cmd;
9343 fc_remote_port_t *pd;
9344 uchar_t pd_flags;
9345
9346 /* validate parameters */
9347 if (fcio->fcio_ilen != sizeof (fc_portid_t) ||
9348 fcio->fcio_olen != sizeof (fc_rls_acc_t) ||
9349 fcio->fcio_xfer != FCIO_XFER_RW) {
9350 rval = EINVAL;
9351 break;
9352 }
9353
9354 if ((fcio->fcio_cmd_flags != FCIO_CFLAGS_RLS_DEST_FPORT) &&
9355 (fcio->fcio_cmd_flags != FCIO_CFLAGS_RLS_DEST_NPORT)) {
9356 rval = EINVAL;
9357 break;
9358 }
9359
9360 if (ddi_copyin((void *)fcio->fcio_ibuf, (void *)&rls_req,
9361 sizeof (fc_portid_t), mode)) {
9362 rval = EFAULT;
9363 break;
9364 }
9365
9366
9367 /* Determine the destination of the RLS frame */
9368 if (fcio->fcio_cmd_flags == FCIO_CFLAGS_RLS_DEST_FPORT) {
9369 dest = FS_FABRIC_F_PORT;
9370 } else {
9371 dest = rls_req.port_id;
9372 }
9373
9374 mutex_enter(&port->fp_mutex);
9375 src_id = port->fp_port_id.port_id;
9376 mutex_exit(&port->fp_mutex);
9377
9378 /* If dest is zero OR same as FCA ID, then use port_manage() */
9379 if (dest == 0 || dest == src_id) {
9380
9381 /* Allocate memory for link error status block */
9382 rls_acc = kmem_zalloc(sizeof (*rls_acc), KM_SLEEP);
9383 ASSERT(rls_acc != NULL);
9384
9385 /* Prepare the port management structure */
9386 bzero((caddr_t)&pm, sizeof (pm));
9387
9388 pm.pm_cmd_flags = FC_FCA_PM_READ;
9389 pm.pm_cmd_code = FC_PORT_RLS;
9390 pm.pm_data_len = sizeof (*rls_acc);
9391 pm.pm_data_buf = (caddr_t)rls_acc;
9392
9393 /* Get the adapter's link error status block */
9394 ret = port->fp_fca_tran->fca_port_manage(
9395 port->fp_fca_handle, &pm);
9396
9397 if (ret == FC_SUCCESS) {
9398 /* xfer link status block to userland */
9399 if (ddi_copyout((void *)rls_acc,
9400 (void *)fcio->fcio_obuf,
9401 sizeof (*rls_acc), mode) == 0) {
9402 if (fp_fcio_copyout(fcio, data,
9403 mode)) {
9404 rval = EFAULT;
9405 }
9406 } else {
9407 rval = EFAULT;
9408 }
9409 } else {
9410 rval = EIO;
9411 fcio->fcio_errno = ret;
9412 if (fp_fcio_copyout(fcio, data, mode)) {
9413 rval = EFAULT;
9414 }
9415 }
9416
9417 kmem_free(rls_acc, sizeof (*rls_acc));
9418
9419 /* ioctl handling is over */
9420 break;
9421 }
9422
9423 /*
9424 * Send RLS to the destination port.
9425 * Having RLS frame destination is as FPORT is not yet
9426 * supported and will be implemented in future, if needed.
9427 * Following call to get "pd" will fail if dest is FPORT
9428 */
9429 pd = fctl_hold_remote_port_by_did(port, dest);
9430 if (pd == NULL) {
9431 fcio->fcio_errno = FC_BADOBJECT;
9432 rval = ENXIO;
9433 if (fp_fcio_copyout(fcio, data, mode)) {
9434 rval = EFAULT;
9435 }
9436 break;
9437 }
9438
9439 mutex_enter(&pd->pd_mutex);
9440 if (pd->pd_state != PORT_DEVICE_LOGGED_IN) {
9441 mutex_exit(&pd->pd_mutex);
9442 fctl_release_remote_port(pd);
9443
9444 fcio->fcio_errno = FC_LOGINREQ;
9445 rval = EINVAL;
9446 if (fp_fcio_copyout(fcio, data, mode)) {
9447 rval = EFAULT;
9448 }
9449 break;
9450 }
9451 ASSERT(pd->pd_login_count >= 1);
9452 mutex_exit(&pd->pd_mutex);
9453
9454 /*
9455 * Allocate job structure and set job_code as DUMMY,
9456 * because we will not go through the job thread.
9457 * Instead fp_sendcmd() is called directly here.
9458 */
9459 job = fctl_alloc_job(JOB_DUMMY, JOB_TYPE_FP_ASYNC,
9460 NULL, NULL, KM_SLEEP);
9461 ASSERT(job != NULL);
9462
9463 job->job_counter = 1;
9464
9465 cmd = fp_alloc_pkt(port, sizeof (la_els_rls_t),
9466 sizeof (la_els_rls_acc_t), KM_SLEEP, pd);
9467 if (cmd == NULL) {
9468 fcio->fcio_errno = FC_NOMEM;
9469 rval = ENOMEM;
9470
9471 fctl_release_remote_port(pd);
9472
9473 fctl_dealloc_job(job);
9474 if (fp_fcio_copyout(fcio, data, mode)) {
9475 rval = EFAULT;
9476 }
9477 break;
9478 }
9479
9480 /* Allocate memory for link error status block */
9481 rls_acc = kmem_zalloc(sizeof (*rls_acc), KM_SLEEP);
9482
9483 mutex_enter(&port->fp_mutex);
9484 mutex_enter(&pd->pd_mutex);
9485
9486 cmd->cmd_pkt.pkt_tran_flags = FC_TRAN_INTR | pd->pd_login_class;
9487 cmd->cmd_pkt.pkt_tran_type = FC_PKT_EXCHANGE;
9488 cmd->cmd_flags = FP_CMD_CFLAG_UNDEFINED;
9489 cmd->cmd_retry_count = 1;
9490 cmd->cmd_ulp_pkt = NULL;
9491
9492 fp_rls_init(cmd, job);
9493
9494 job->job_private = (void *)rls_acc;
9495
9496 pd_flags = pd->pd_flags;
9497 pd->pd_flags = PD_ELS_IN_PROGRESS;
9498
9499 mutex_exit(&pd->pd_mutex);
9500 mutex_exit(&port->fp_mutex);
9501
9502 if (fp_sendcmd(port, cmd, port->fp_fca_handle) == FC_SUCCESS) {
9503 fctl_jobwait(job);
9504
9505 fcio->fcio_errno = job->job_result;
9506 if (job->job_result == FC_SUCCESS) {
9507 ASSERT(pd != NULL);
9508 /*
9509 * link error status block is now available.
9510 * Copy it to userland
9511 */
9512 ASSERT(job->job_private == (void *)rls_acc);
9513 if (ddi_copyout((void *)rls_acc,
9514 (void *)fcio->fcio_obuf,
9515 sizeof (*rls_acc), mode) == 0) {
9516 if (fp_fcio_copyout(fcio, data,
9517 mode)) {
9518 rval = EFAULT;
9519 }
9520 } else {
9521 rval = EFAULT;
9522 }
9523 } else {
9524 rval = EIO;
9525 }
9526 } else {
9527 rval = EIO;
9528 fp_free_pkt(cmd);
9529 }
9530
9531 if (rval) {
9532 mutex_enter(&port->fp_mutex);
9533 mutex_enter(&pd->pd_mutex);
9534 if (pd->pd_flags == PD_ELS_IN_PROGRESS) {
9535 pd->pd_flags = pd_flags;
9536 }
9537 mutex_exit(&pd->pd_mutex);
9538 mutex_exit(&port->fp_mutex);
9539 }
9540
9541 fctl_release_remote_port(pd);
9542 fctl_dealloc_job(job);
9543 kmem_free(rls_acc, sizeof (*rls_acc));
9544
9545 if (fp_fcio_copyout(fcio, data, mode)) {
9546 rval = EFAULT;
9547 }
9548 break;
9549 }
9550
9551 case FCIO_NS: {
9552 fc_ns_cmd_t *ns_req;
9553 fc_ns_cmd32_t *ns_req32;
9554 fctl_ns_req_t *ns_cmd;
9555
9556 if (use32 == B_TRUE) {
9557 if (fcio->fcio_ilen != sizeof (*ns_req32)) {
9558 rval = EINVAL;
9559 break;
9560 }
9561
9562 ns_req = kmem_zalloc(sizeof (*ns_req), KM_SLEEP);
9563 ns_req32 = kmem_zalloc(sizeof (*ns_req32), KM_SLEEP);
9564
9565 if (ddi_copyin(fcio->fcio_ibuf, ns_req32,
9566 sizeof (*ns_req32), mode)) {
9567 rval = EFAULT;
9568 kmem_free(ns_req, sizeof (*ns_req));
9569 kmem_free(ns_req32, sizeof (*ns_req32));
9570 break;
9571 }
9572
9573 ns_req->ns_flags = ns_req32->ns_flags;
9574 ns_req->ns_cmd = ns_req32->ns_cmd;
9575 ns_req->ns_req_len = ns_req32->ns_req_len;
9576 ns_req->ns_req_payload = ns_req32->ns_req_payload;
9577 ns_req->ns_resp_len = ns_req32->ns_resp_len;
9578 ns_req->ns_resp_payload = ns_req32->ns_resp_payload;
9579 ns_req->ns_fctl_private = ns_req32->ns_fctl_private;
9580 ns_req->ns_resp_hdr = ns_req32->ns_resp_hdr;
9581
9582 kmem_free(ns_req32, sizeof (*ns_req32));
9583 } else {
9584 if (fcio->fcio_ilen != sizeof (*ns_req)) {
9585 rval = EINVAL;
9586 break;
9587 }
9588
9589 ns_req = kmem_zalloc(sizeof (*ns_req), KM_SLEEP);
9590
9591 if (ddi_copyin(fcio->fcio_ibuf, ns_req,
9592 sizeof (fc_ns_cmd_t), mode)) {
9593 rval = EFAULT;
9594 kmem_free(ns_req, sizeof (*ns_req));
9595 break;
9596 }
9597 }
9598
9599 if (ns_req->ns_req_len <= 0) {
9600 rval = EINVAL;
9601 kmem_free(ns_req, sizeof (*ns_req));
9602 break;
9603 }
9604
9605 job = fctl_alloc_job(JOB_NS_CMD, 0, NULL, NULL, KM_SLEEP);
9606 ASSERT(job != NULL);
9607
9608 ns_cmd = fctl_alloc_ns_cmd(ns_req->ns_req_len,
9609 ns_req->ns_resp_len, ns_req->ns_resp_len,
9610 FCTL_NS_FILL_NS_MAP, KM_SLEEP);
9611 ASSERT(ns_cmd != NULL);
9612 ns_cmd->ns_cmd_code = ns_req->ns_cmd;
9613
9614 if (ns_cmd->ns_cmd_code == NS_GA_NXT) {
9615 ns_cmd->ns_gan_max = 1;
9616 ns_cmd->ns_gan_index = 0;
9617 ns_cmd->ns_gan_sid = FCTL_GAN_START_ID;
9618 }
9619
9620 if (ddi_copyin(ns_req->ns_req_payload,
9621 ns_cmd->ns_cmd_buf, ns_req->ns_req_len, mode)) {
9622 rval = EFAULT;
9623 fctl_free_ns_cmd(ns_cmd);
9624 fctl_dealloc_job(job);
9625 kmem_free(ns_req, sizeof (*ns_req));
9626 break;
9627 }
9628
9629 job->job_private = (void *)ns_cmd;
9630 fctl_enque_job(port, job);
9631 fctl_jobwait(job);
9632 rval = job->job_result;
9633
9634 if (rval == FC_SUCCESS) {
9635 if (ns_req->ns_resp_len) {
9636 if (ddi_copyout(ns_cmd->ns_data_buf,
9637 ns_req->ns_resp_payload,
9638 ns_cmd->ns_data_len, mode)) {
9639 rval = EFAULT;
9640 fctl_free_ns_cmd(ns_cmd);
9641 fctl_dealloc_job(job);
9642 kmem_free(ns_req, sizeof (*ns_req));
9643 break;
9644 }
9645 }
9646 } else {
9647 rval = EIO;
9648 }
9649 ns_req->ns_resp_hdr = ns_cmd->ns_resp_hdr;
9650 fctl_free_ns_cmd(ns_cmd);
9651 fctl_dealloc_job(job);
9652 kmem_free(ns_req, sizeof (*ns_req));
9653
9654 if (fp_fcio_copyout(fcio, data, mode)) {
9655 rval = EFAULT;
9656 }
9657 break;
9658 }
9659
9660 default:
9661 rval = ENOTTY;
9662 break;
9663 }
9664
9665 /*
9666 * If set, reset the EXCL busy bit to
9667 * receive other exclusive access commands
9668 */
9669 mutex_enter(&port->fp_mutex);
9670 if (port->fp_flag & FP_EXCL_BUSY) {
9671 port->fp_flag &= ~FP_EXCL_BUSY;
9672 }
9673 mutex_exit(&port->fp_mutex);
9674
9675 return (rval);
9676 }
9677
9678
9679 /*
9680 * This function assumes that the response length
9681 * is same regardless of data model (LP32 or LP64)
9682 * which is true for all the ioctls currently
9683 * supported.
9684 */
9685 static int
9686 fp_copyout(void *from, void *to, size_t len, int mode)
9687 {
9688 return (ddi_copyout(from, to, len, mode));
9689 }
9690
9691 /*
9692 * This function does the set rnid
9693 */
9694 static int
9695 fp_set_rnid(fc_local_port_t *port, intptr_t data, int mode, fcio_t *fcio)
9696 {
9697 int rval = 0;
9698 fc_rnid_t *rnid;
9699 fc_fca_pm_t pm;
9700
9701 /* Allocate memory for node id block */
9702 rnid = kmem_zalloc(sizeof (fc_rnid_t), KM_SLEEP);
9703
9704 if (ddi_copyin(fcio->fcio_ibuf, rnid, sizeof (fc_rnid_t), mode)) {
9705 FP_TRACE(FP_NHEAD1(3, 0), "fp_set_rnid: failed = %d", EFAULT);
9706 kmem_free(rnid, sizeof (fc_rnid_t));
9707 return (EFAULT);
9708 }
9709
9710 /* Prepare the port management structure */
9711 bzero((caddr_t)&pm, sizeof (pm));
9712
9713 pm.pm_cmd_flags = FC_FCA_PM_WRITE;
9714 pm.pm_cmd_code = FC_PORT_SET_NODE_ID;
9715 pm.pm_data_len = sizeof (*rnid);
9716 pm.pm_data_buf = (caddr_t)rnid;
9717
9718 /* Get the adapter's node data */
9719 rval = port->fp_fca_tran->fca_port_manage(
9720 port->fp_fca_handle, &pm);
9721
9722 if (rval != FC_SUCCESS) {
9723 fcio->fcio_errno = rval;
9724 rval = EIO;
9725 if (fp_fcio_copyout(fcio, data, mode)) {
9726 rval = EFAULT;
9727 }
9728 } else {
9729 mutex_enter(&port->fp_mutex);
9730 /* copy to the port structure */
9731 bcopy(rnid, &port->fp_rnid_params,
9732 sizeof (port->fp_rnid_params));
9733 mutex_exit(&port->fp_mutex);
9734 }
9735
9736 kmem_free(rnid, sizeof (fc_rnid_t));
9737
9738 if (rval != FC_SUCCESS) {
9739 FP_TRACE(FP_NHEAD1(3, 0), "fp_set_rnid: failed = %d", rval);
9740 }
9741
9742 return (rval);
9743 }
9744
9745 /*
9746 * This function does the local pwwn get rnid
9747 */
9748 static int
9749 fp_get_rnid(fc_local_port_t *port, intptr_t data, int mode, fcio_t *fcio)
9750 {
9751 fc_rnid_t *rnid;
9752 fc_fca_pm_t pm;
9753 int rval = 0;
9754 uint32_t ret;
9755
9756 /* Allocate memory for rnid data block */
9757 rnid = kmem_zalloc(sizeof (fc_rnid_t), KM_SLEEP);
9758
9759 mutex_enter(&port->fp_mutex);
9760 if (port->fp_rnid_init == 1) {
9761 bcopy(&port->fp_rnid_params, rnid, sizeof (fc_rnid_t));
9762 mutex_exit(&port->fp_mutex);
9763 /* xfer node info to userland */
9764 if (ddi_copyout((void *)rnid, (void *)fcio->fcio_obuf,
9765 sizeof (*rnid), mode) == 0) {
9766 if (fp_fcio_copyout(fcio, data, mode)) {
9767 rval = EFAULT;
9768 }
9769 } else {
9770 rval = EFAULT;
9771 }
9772
9773 kmem_free(rnid, sizeof (fc_rnid_t));
9774
9775 if (rval != FC_SUCCESS) {
9776 FP_TRACE(FP_NHEAD1(3, 0), "fp_get_rnid: failed = %d",
9777 rval);
9778 }
9779
9780 return (rval);
9781 }
9782 mutex_exit(&port->fp_mutex);
9783
9784 /* Prepare the port management structure */
9785 bzero((caddr_t)&pm, sizeof (pm));
9786
9787 pm.pm_cmd_flags = FC_FCA_PM_READ;
9788 pm.pm_cmd_code = FC_PORT_GET_NODE_ID;
9789 pm.pm_data_len = sizeof (fc_rnid_t);
9790 pm.pm_data_buf = (caddr_t)rnid;
9791
9792 /* Get the adapter's node data */
9793 ret = port->fp_fca_tran->fca_port_manage(
9794 port->fp_fca_handle,
9795 &pm);
9796
9797 if (ret == FC_SUCCESS) {
9798 /* initialize in the port_info */
9799 mutex_enter(&port->fp_mutex);
9800 port->fp_rnid_init = 1;
9801 bcopy(rnid, &port->fp_rnid_params, sizeof (*rnid));
9802 mutex_exit(&port->fp_mutex);
9803
9804 /* xfer node info to userland */
9805 if (ddi_copyout((void *)rnid,
9806 (void *)fcio->fcio_obuf,
9807 sizeof (*rnid), mode) == 0) {
9808 if (fp_fcio_copyout(fcio, data,
9809 mode)) {
9810 rval = EFAULT;
9811 }
9812 } else {
9813 rval = EFAULT;
9814 }
9815 } else {
9816 rval = EIO;
9817 fcio->fcio_errno = ret;
9818 if (fp_fcio_copyout(fcio, data, mode)) {
9819 rval = EFAULT;
9820 }
9821 }
9822
9823 kmem_free(rnid, sizeof (fc_rnid_t));
9824
9825 if (rval != FC_SUCCESS) {
9826 FP_TRACE(FP_NHEAD1(3, 0), "fp_get_rnid: failed = %d", rval);
9827 }
9828
9829 return (rval);
9830 }
9831
9832 static int
9833 fp_send_rnid(fc_local_port_t *port, intptr_t data, int mode, fcio_t *fcio,
9834 la_wwn_t *pwwn)
9835 {
9836 int rval = 0;
9837 fc_remote_port_t *pd;
9838 fp_cmd_t *cmd;
9839 job_request_t *job;
9840 la_els_rnid_acc_t *rnid_acc;
9841
9842 pd = fctl_get_remote_port_by_pwwn(port, pwwn);
9843 if (pd == NULL) {
9844 /*
9845 * We can safely assume that the destination port
9846 * is logged in. Either the user land will explicitly
9847 * login before issuing RNID ioctl or the device would
9848 * have been configured, meaning already logged in.
9849 */
9850
9851 FP_TRACE(FP_NHEAD1(3, 0), "fp_send_rnid: failed = %d", ENXIO);
9852
9853 return (ENXIO);
9854 }
9855 /*
9856 * Allocate job structure and set job_code as DUMMY,
9857 * because we will not go thorugh the job thread.
9858 * Instead fp_sendcmd() is called directly here.
9859 */
9860 job = fctl_alloc_job(JOB_DUMMY, JOB_TYPE_FP_ASYNC,
9861 NULL, NULL, KM_SLEEP);
9862
9863 ASSERT(job != NULL);
9864
9865 job->job_counter = 1;
9866
9867 cmd = fp_alloc_pkt(port, sizeof (la_els_rnid_t),
9868 sizeof (la_els_rnid_acc_t), KM_SLEEP, pd);
9869 if (cmd == NULL) {
9870 fcio->fcio_errno = FC_NOMEM;
9871 rval = ENOMEM;
9872
9873 fctl_dealloc_job(job);
9874 if (fp_fcio_copyout(fcio, data, mode)) {
9875 rval = EFAULT;
9876 }
9877
9878 FP_TRACE(FP_NHEAD1(3, 0), "fp_send_rnid: failed = %d", rval);
9879
9880 return (rval);
9881 }
9882
9883 /* Allocate memory for node id accept block */
9884 rnid_acc = kmem_zalloc(sizeof (la_els_rnid_acc_t), KM_SLEEP);
9885
9886 mutex_enter(&port->fp_mutex);
9887 mutex_enter(&pd->pd_mutex);
9888
9889 cmd->cmd_pkt.pkt_tran_flags = FC_TRAN_INTR | pd->pd_login_class;
9890 cmd->cmd_pkt.pkt_tran_type = FC_PKT_EXCHANGE;
9891 cmd->cmd_flags = FP_CMD_CFLAG_UNDEFINED;
9892 cmd->cmd_retry_count = 1;
9893 cmd->cmd_ulp_pkt = NULL;
9894
9895 fp_rnid_init(cmd, fcio->fcio_cmd_flags, job);
9896
9897 job->job_private = (void *)rnid_acc;
9898
9899 pd->pd_flags = PD_ELS_IN_PROGRESS;
9900
9901 mutex_exit(&pd->pd_mutex);
9902 mutex_exit(&port->fp_mutex);
9903
9904 if (fp_sendcmd(port, cmd, port->fp_fca_handle) == FC_SUCCESS) {
9905 fctl_jobwait(job);
9906 fcio->fcio_errno = job->job_result;
9907 if (job->job_result == FC_SUCCESS) {
9908 int rnid_cnt;
9909 ASSERT(pd != NULL);
9910 /*
9911 * node id block is now available.
9912 * Copy it to userland
9913 */
9914 ASSERT(job->job_private == (void *)rnid_acc);
9915
9916 /* get the response length */
9917 rnid_cnt = sizeof (ls_code_t) + sizeof (fc_rnid_hdr_t) +
9918 rnid_acc->hdr.cmn_len +
9919 rnid_acc->hdr.specific_len;
9920
9921 if (fcio->fcio_olen < rnid_cnt) {
9922 rval = EINVAL;
9923 } else if (ddi_copyout((void *)rnid_acc,
9924 (void *)fcio->fcio_obuf,
9925 rnid_cnt, mode) == 0) {
9926 if (fp_fcio_copyout(fcio, data,
9927 mode)) {
9928 rval = EFAULT;
9929 }
9930 } else {
9931 rval = EFAULT;
9932 }
9933 } else {
9934 rval = EIO;
9935 }
9936 } else {
9937 rval = EIO;
9938 if (pd) {
9939 mutex_enter(&pd->pd_mutex);
9940 pd->pd_flags = PD_IDLE;
9941 mutex_exit(&pd->pd_mutex);
9942 }
9943 fp_free_pkt(cmd);
9944 }
9945
9946 fctl_dealloc_job(job);
9947 kmem_free(rnid_acc, sizeof (la_els_rnid_acc_t));
9948
9949 if (fp_fcio_copyout(fcio, data, mode)) {
9950 rval = EFAULT;
9951 }
9952
9953 if (rval != FC_SUCCESS) {
9954 FP_TRACE(FP_NHEAD1(3, 0), "fp_send_rnid: failed = %d", rval);
9955 }
9956
9957 return (rval);
9958 }
9959
9960 /*
9961 * Copy out to userland
9962 */
9963 static int
9964 fp_fcio_copyout(fcio_t *fcio, intptr_t data, int mode)
9965 {
9966 int rval;
9967
9968 #ifdef _MULTI_DATAMODEL
9969 switch (ddi_model_convert_from(mode & FMODELS)) {
9970 case DDI_MODEL_ILP32: {
9971 struct fcio32 fcio32;
9972
9973 fcio32.fcio_xfer = fcio->fcio_xfer;
9974 fcio32.fcio_cmd = fcio->fcio_cmd;
9975 fcio32.fcio_flags = fcio->fcio_flags;
9976 fcio32.fcio_cmd_flags = fcio->fcio_cmd_flags;
9977 fcio32.fcio_ilen = fcio->fcio_ilen;
9978 fcio32.fcio_ibuf =
9979 (caddr32_t)(uintptr_t)fcio->fcio_ibuf;
9980 fcio32.fcio_olen = fcio->fcio_olen;
9981 fcio32.fcio_obuf =
9982 (caddr32_t)(uintptr_t)fcio->fcio_obuf;
9983 fcio32.fcio_alen = fcio->fcio_alen;
9984 fcio32.fcio_abuf =
9985 (caddr32_t)(uintptr_t)fcio->fcio_abuf;
9986 fcio32.fcio_errno = fcio->fcio_errno;
9987
9988 rval = ddi_copyout((void *)&fcio32, (void *)data,
9989 sizeof (struct fcio32), mode);
9990 break;
9991 }
9992 case DDI_MODEL_NONE:
9993 rval = ddi_copyout((void *)fcio, (void *)data,
9994 sizeof (fcio_t), mode);
9995 break;
9996 }
9997 #else
9998 rval = ddi_copyout((void *)fcio, (void *)data, sizeof (fcio_t), mode);
9999 #endif
10000
10001 return (rval);
10002 }
10003
10004
10005 static void
10006 fp_p2p_online(fc_local_port_t *port, job_request_t *job)
10007 {
10008 uint32_t listlen;
10009 fc_portmap_t *changelist;
10010
10011 ASSERT(MUTEX_HELD(&port->fp_mutex));
10012 ASSERT(port->fp_topology == FC_TOP_PT_PT);
10013 ASSERT((job->job_flags & JOB_TYPE_FP_ASYNC) == 0);
10014
10015 listlen = 0;
10016 changelist = NULL;
10017
10018 if ((job->job_flags & JOB_CANCEL_ULP_NOTIFICATION) == 0) {
10019 if (port->fp_statec_busy > 1) {
10020 job->job_flags |= JOB_CANCEL_ULP_NOTIFICATION;
10021 }
10022 }
10023 mutex_exit(&port->fp_mutex);
10024
10025 if ((job->job_flags & JOB_CANCEL_ULP_NOTIFICATION) == 0) {
10026 fctl_fillout_map(port, &changelist, &listlen, 1, 0, 0);
10027 (void) fp_ulp_statec_cb(port, FC_STATE_ONLINE, changelist,
10028 listlen, listlen, KM_SLEEP);
10029
10030 mutex_enter(&port->fp_mutex);
10031 } else {
10032 ASSERT(changelist == NULL && listlen == 0);
10033 mutex_enter(&port->fp_mutex);
10034 if (--port->fp_statec_busy == 0) {
10035 port->fp_soft_state &= ~FP_SOFT_IN_STATEC_CB;
10036 }
10037 }
10038 }
10039
10040 static int
10041 fp_fillout_p2pmap(fc_local_port_t *port, fcio_t *fcio, int mode)
10042 {
10043 int rval;
10044 int count;
10045 int index;
10046 int num_devices;
10047 fc_remote_node_t *node;
10048 fc_port_dev_t *devlist;
10049 struct pwwn_hash *head;
10050 fc_remote_port_t *pd;
10051
10052 ASSERT(MUTEX_HELD(&port->fp_mutex));
10053
10054 num_devices = fcio->fcio_olen / sizeof (fc_port_dev_t);
10055
10056 devlist = kmem_zalloc(sizeof (fc_port_dev_t) * num_devices, KM_SLEEP);
10057
10058 for (count = index = 0; index < pwwn_table_size; index++) {
10059 head = &port->fp_pwwn_table[index];
10060 pd = head->pwwn_head;
10061 while (pd != NULL) {
10062 mutex_enter(&pd->pd_mutex);
10063 if (pd->pd_state == PORT_DEVICE_INVALID) {
10064 mutex_exit(&pd->pd_mutex);
10065 pd = pd->pd_wwn_hnext;
10066 continue;
10067 }
10068
10069 devlist[count].dev_state = pd->pd_state;
10070 devlist[count].dev_hard_addr = pd->pd_hard_addr;
10071 devlist[count].dev_did = pd->pd_port_id;
10072 devlist[count].dev_did.priv_lilp_posit =
10073 (uint8_t)(index & 0xff);
10074 bcopy((caddr_t)pd->pd_fc4types,
10075 (caddr_t)devlist[count].dev_type,
10076 sizeof (pd->pd_fc4types));
10077
10078 bcopy((caddr_t)&pd->pd_port_name,
10079 (caddr_t)&devlist[count].dev_pwwn,
10080 sizeof (la_wwn_t));
10081
10082 node = pd->pd_remote_nodep;
10083 mutex_exit(&pd->pd_mutex);
10084
10085 if (node) {
10086 mutex_enter(&node->fd_mutex);
10087 bcopy((caddr_t)&node->fd_node_name,
10088 (caddr_t)&devlist[count].dev_nwwn,
10089 sizeof (la_wwn_t));
10090 mutex_exit(&node->fd_mutex);
10091 }
10092 count++;
10093 if (count >= num_devices) {
10094 goto found;
10095 }
10096 }
10097 }
10098 found:
10099 if (fp_copyout((void *)&count, (void *)fcio->fcio_abuf,
10100 sizeof (count), mode)) {
10101 rval = FC_FAILURE;
10102 } else if (fp_copyout((void *)devlist, (void *)fcio->fcio_obuf,
10103 sizeof (fc_port_dev_t) * num_devices, mode)) {
10104 rval = FC_FAILURE;
10105 } else {
10106 rval = FC_SUCCESS;
10107 }
10108
10109 kmem_free(devlist, sizeof (fc_port_dev_t) * num_devices);
10110
10111 return (rval);
10112 }
10113
10114
10115 /*
10116 * Handle Fabric ONLINE
10117 */
10118 static void
10119 fp_fabric_online(fc_local_port_t *port, job_request_t *job)
10120 {
10121 int index;
10122 int rval;
10123 int dbg_count;
10124 int count = 0;
10125 char ww_name[17];
10126 uint32_t d_id;
10127 uint32_t listlen;
10128 fctl_ns_req_t *ns_cmd;
10129 struct pwwn_hash *head;
10130 fc_remote_port_t *pd;
10131 fc_remote_port_t *npd;
10132 fc_portmap_t *changelist;
10133
10134 ASSERT(MUTEX_HELD(&port->fp_mutex));
10135 ASSERT(FC_IS_TOP_SWITCH(port->fp_topology));
10136 ASSERT((job->job_flags & JOB_TYPE_FP_ASYNC) == 0);
10137
10138 ns_cmd = fctl_alloc_ns_cmd(sizeof (ns_req_gid_pn_t),
10139 sizeof (ns_resp_gid_pn_t), sizeof (ns_resp_gid_pn_t),
10140 0, KM_SLEEP);
10141
10142 ASSERT(ns_cmd != NULL);
10143
10144 ns_cmd->ns_cmd_code = NS_GID_PN;
10145
10146 /*
10147 * Check if orphans are showing up now
10148 */
10149 if (port->fp_orphan_count) {
10150 fc_orphan_t *orp;
10151 fc_orphan_t *norp = NULL;
10152 fc_orphan_t *prev = NULL;
10153
10154 for (orp = port->fp_orphan_list; orp; orp = norp) {
10155 norp = orp->orp_next;
10156 mutex_exit(&port->fp_mutex);
10157 orp->orp_nscan++;
10158
10159 job->job_counter = 1;
10160 job->job_result = FC_SUCCESS;
10161
10162 ((ns_req_gid_pn_t *)
10163 (ns_cmd->ns_cmd_buf))->pwwn = orp->orp_pwwn;
10164 ((ns_resp_gid_pn_t *)
10165 ns_cmd->ns_data_buf)->pid.port_id = 0;
10166 ((ns_resp_gid_pn_t *)
10167 ns_cmd->ns_data_buf)->pid.priv_lilp_posit = 0;
10168
10169 rval = fp_ns_query(port, ns_cmd, job, 1, KM_SLEEP);
10170 if (rval == FC_SUCCESS) {
10171 d_id =
10172 BE_32(*((uint32_t *)ns_cmd->ns_data_buf));
10173 pd = fp_create_remote_port_by_ns(port,
10174 d_id, KM_SLEEP);
10175
10176 if (pd != NULL) {
10177 fc_wwn_to_str(&orp->orp_pwwn, ww_name);
10178
10179 fp_printf(port, CE_WARN, FP_LOG_ONLY,
10180 0, NULL, "N_x Port with D_ID=%x,"
10181 " PWWN=%s reappeared in fabric",
10182 d_id, ww_name);
10183
10184 mutex_enter(&port->fp_mutex);
10185 if (prev) {
10186 prev->orp_next = orp->orp_next;
10187 } else {
10188 ASSERT(orp ==
10189 port->fp_orphan_list);
10190 port->fp_orphan_list =
10191 orp->orp_next;
10192 }
10193 port->fp_orphan_count--;
10194 mutex_exit(&port->fp_mutex);
10195 kmem_free(orp, sizeof (*orp));
10196 count++;
10197
10198 mutex_enter(&pd->pd_mutex);
10199 pd->pd_flags = PD_ELS_MARK;
10200
10201 mutex_exit(&pd->pd_mutex);
10202 } else {
10203 prev = orp;
10204 }
10205 } else {
10206 if (orp->orp_nscan == FC_ORPHAN_SCAN_LIMIT) {
10207 fc_wwn_to_str(&orp->orp_pwwn, ww_name);
10208
10209 fp_printf(port, CE_NOTE, FP_LOG_ONLY, 0,
10210 NULL,
10211 " Port WWN %s removed from orphan"
10212 " list after %d scans", ww_name,
10213 orp->orp_nscan);
10214
10215 mutex_enter(&port->fp_mutex);
10216 if (prev) {
10217 prev->orp_next = orp->orp_next;
10218 } else {
10219 ASSERT(orp ==
10220 port->fp_orphan_list);
10221 port->fp_orphan_list =
10222 orp->orp_next;
10223 }
10224 port->fp_orphan_count--;
10225 mutex_exit(&port->fp_mutex);
10226
10227 kmem_free(orp, sizeof (*orp));
10228 } else {
10229 prev = orp;
10230 }
10231 }
10232 mutex_enter(&port->fp_mutex);
10233 }
10234 }
10235
10236 /*
10237 * Walk the Port WWN hash table, reestablish LOGIN
10238 * if a LOGIN is already performed on a particular
10239 * device; Any failure to LOGIN should mark the
10240 * port device OLD.
10241 */
10242 for (index = 0; index < pwwn_table_size; index++) {
10243 head = &port->fp_pwwn_table[index];
10244 npd = head->pwwn_head;
10245
10246 while ((pd = npd) != NULL) {
10247 la_wwn_t *pwwn;
10248
10249 npd = pd->pd_wwn_hnext;
10250
10251 /*
10252 * Don't count in the port devices that are new
10253 * unless the total number of devices visible
10254 * through this port is less than FP_MAX_DEVICES
10255 */
10256 mutex_enter(&pd->pd_mutex);
10257 if (port->fp_dev_count >= FP_MAX_DEVICES ||
10258 (port->fp_options & FP_TARGET_MODE)) {
10259 if (pd->pd_type == PORT_DEVICE_NEW ||
10260 pd->pd_flags == PD_ELS_MARK ||
10261 pd->pd_recepient != PD_PLOGI_INITIATOR) {
10262 mutex_exit(&pd->pd_mutex);
10263 continue;
10264 }
10265 } else {
10266 if (pd->pd_flags == PD_ELS_MARK ||
10267 pd->pd_recepient != PD_PLOGI_INITIATOR) {
10268 mutex_exit(&pd->pd_mutex);
10269 continue;
10270 }
10271 pd->pd_type = PORT_DEVICE_OLD;
10272 }
10273 count++;
10274
10275 /*
10276 * Consult with the name server about D_ID changes
10277 */
10278 job->job_counter = 1;
10279 job->job_result = FC_SUCCESS;
10280
10281 ((ns_req_gid_pn_t *)
10282 (ns_cmd->ns_cmd_buf))->pwwn = pd->pd_port_name;
10283 ((ns_resp_gid_pn_t *)
10284 ns_cmd->ns_data_buf)->pid.port_id = 0;
10285
10286 ((ns_resp_gid_pn_t *)ns_cmd->ns_data_buf)->
10287 pid.priv_lilp_posit = 0;
10288
10289 pwwn = &pd->pd_port_name;
10290 pd->pd_flags = PD_ELS_MARK;
10291
10292 mutex_exit(&pd->pd_mutex);
10293 mutex_exit(&port->fp_mutex);
10294
10295 rval = fp_ns_query(port, ns_cmd, job, 1, KM_SLEEP);
10296 if (rval != FC_SUCCESS) {
10297 fc_wwn_to_str(pwwn, ww_name);
10298
10299 mutex_enter(&pd->pd_mutex);
10300 d_id = pd->pd_port_id.port_id;
10301 pd->pd_type = PORT_DEVICE_DELETE;
10302 mutex_exit(&pd->pd_mutex);
10303
10304 FP_TRACE(FP_NHEAD1(3, 0),
10305 "fp_fabric_online: PD "
10306 "disappeared; d_id=%x, PWWN=%s",
10307 d_id, ww_name);
10308
10309 FP_TRACE(FP_NHEAD2(9, 0),
10310 "N_x Port with D_ID=%x, PWWN=%s"
10311 " disappeared from fabric", d_id,
10312 ww_name);
10313
10314 mutex_enter(&port->fp_mutex);
10315 continue;
10316 }
10317
10318 d_id = BE_32(*((uint32_t *)ns_cmd->ns_data_buf));
10319
10320 mutex_enter(&port->fp_mutex);
10321 mutex_enter(&pd->pd_mutex);
10322 if (d_id != pd->pd_port_id.port_id) {
10323 fctl_delist_did_table(port, pd);
10324 fc_wwn_to_str(pwwn, ww_name);
10325
10326 FP_TRACE(FP_NHEAD2(9, 0),
10327 "D_ID of a device with PWWN %s changed."
10328 " New D_ID = %x, OLD D_ID = %x", ww_name,
10329 d_id, pd->pd_port_id.port_id);
10330
10331 pd->pd_port_id.port_id = BE_32(d_id);
10332 pd->pd_type = PORT_DEVICE_CHANGED;
10333 fctl_enlist_did_table(port, pd);
10334 }
10335 mutex_exit(&pd->pd_mutex);
10336
10337 }
10338 }
10339
10340 if (ns_cmd) {
10341 fctl_free_ns_cmd(ns_cmd);
10342 }
10343
10344 listlen = 0;
10345 changelist = NULL;
10346 if (count) {
10347 if (port->fp_soft_state & FP_SOFT_IN_FCA_RESET) {
10348 port->fp_soft_state &= ~FP_SOFT_IN_FCA_RESET;
10349 mutex_exit(&port->fp_mutex);
10350 delay(drv_usectohz(FLA_RR_TOV * 1000 * 1000));
10351 mutex_enter(&port->fp_mutex);
10352 }
10353
10354 dbg_count = 0;
10355
10356 job->job_counter = count;
10357
10358 for (index = 0; index < pwwn_table_size; index++) {
10359 head = &port->fp_pwwn_table[index];
10360 npd = head->pwwn_head;
10361
10362 while ((pd = npd) != NULL) {
10363 npd = pd->pd_wwn_hnext;
10364
10365 mutex_enter(&pd->pd_mutex);
10366 if (pd->pd_flags != PD_ELS_MARK) {
10367 mutex_exit(&pd->pd_mutex);
10368 continue;
10369 }
10370
10371 dbg_count++;
10372
10373 /*
10374 * If it is already marked deletion, nothing
10375 * else to do.
10376 */
10377 if (pd->pd_type == PORT_DEVICE_DELETE) {
10378 pd->pd_type = PORT_DEVICE_OLD;
10379
10380 mutex_exit(&pd->pd_mutex);
10381 mutex_exit(&port->fp_mutex);
10382 fp_jobdone(job);
10383 mutex_enter(&port->fp_mutex);
10384
10385 continue;
10386 }
10387
10388 /*
10389 * If it is freshly discovered out of
10390 * the orphan list, nothing else to do
10391 */
10392 if (pd->pd_type == PORT_DEVICE_NEW) {
10393 pd->pd_flags = PD_IDLE;
10394
10395 mutex_exit(&pd->pd_mutex);
10396 mutex_exit(&port->fp_mutex);
10397 fp_jobdone(job);
10398 mutex_enter(&port->fp_mutex);
10399
10400 continue;
10401 }
10402
10403 pd->pd_flags = PD_IDLE;
10404 d_id = pd->pd_port_id.port_id;
10405
10406 /*
10407 * Explicitly mark all devices OLD; successful
10408 * PLOGI should reset this to either NO_CHANGE
10409 * or CHANGED.
10410 */
10411 if (pd->pd_type != PORT_DEVICE_CHANGED) {
10412 pd->pd_type = PORT_DEVICE_OLD;
10413 }
10414
10415 mutex_exit(&pd->pd_mutex);
10416 mutex_exit(&port->fp_mutex);
10417
10418 rval = fp_port_login(port, d_id, job,
10419 FP_CMD_PLOGI_RETAIN, KM_SLEEP, pd, NULL);
10420
10421 if (rval != FC_SUCCESS) {
10422 fp_jobdone(job);
10423 }
10424 mutex_enter(&port->fp_mutex);
10425 }
10426 }
10427 mutex_exit(&port->fp_mutex);
10428
10429 ASSERT(dbg_count == count);
10430 fp_jobwait(job);
10431
10432 mutex_enter(&port->fp_mutex);
10433
10434 ASSERT(port->fp_statec_busy > 0);
10435 if ((job->job_flags & JOB_CANCEL_ULP_NOTIFICATION) == 0) {
10436 if (port->fp_statec_busy > 1) {
10437 job->job_flags |= JOB_CANCEL_ULP_NOTIFICATION;
10438 }
10439 }
10440 mutex_exit(&port->fp_mutex);
10441 } else {
10442 ASSERT(port->fp_statec_busy > 0);
10443 if (port->fp_statec_busy > 1) {
10444 job->job_flags |= JOB_CANCEL_ULP_NOTIFICATION;
10445 }
10446 mutex_exit(&port->fp_mutex);
10447 }
10448
10449 if ((job->job_flags & JOB_CANCEL_ULP_NOTIFICATION) == 0) {
10450 fctl_fillout_map(port, &changelist, &listlen, 1, 0, 0);
10451
10452 (void) fp_ulp_statec_cb(port, FC_STATE_ONLINE, changelist,
10453 listlen, listlen, KM_SLEEP);
10454
10455 mutex_enter(&port->fp_mutex);
10456 } else {
10457 ASSERT(changelist == NULL && listlen == 0);
10458 mutex_enter(&port->fp_mutex);
10459 if (--port->fp_statec_busy == 0) {
10460 port->fp_soft_state &= ~FP_SOFT_IN_STATEC_CB;
10461 }
10462 }
10463 }
10464
10465
10466 /*
10467 * Fill out device list for userland ioctl in private loop
10468 */
10469 static int
10470 fp_fillout_loopmap(fc_local_port_t *port, fcio_t *fcio, int mode)
10471 {
10472 int rval;
10473 int count;
10474 int index;
10475 int num_devices;
10476 fc_remote_node_t *node;
10477 fc_port_dev_t *devlist;
10478 int lilp_device_count;
10479 fc_lilpmap_t *lilp_map;
10480 uchar_t *alpa_list;
10481
10482 ASSERT(MUTEX_HELD(&port->fp_mutex));
10483
10484 num_devices = fcio->fcio_olen / sizeof (fc_port_dev_t);
10485 if (port->fp_total_devices > port->fp_dev_count &&
10486 num_devices >= port->fp_total_devices) {
10487 job_request_t *job;
10488
10489 mutex_exit(&port->fp_mutex);
10490 job = fctl_alloc_job(JOB_PORT_GETMAP, 0, NULL, NULL, KM_SLEEP);
10491 job->job_counter = 1;
10492
10493 mutex_enter(&port->fp_mutex);
10494 fp_get_loopmap(port, job);
10495 mutex_exit(&port->fp_mutex);
10496
10497 fp_jobwait(job);
10498 fctl_dealloc_job(job);
10499 } else {
10500 mutex_exit(&port->fp_mutex);
10501 }
10502 devlist = kmem_zalloc(sizeof (*devlist) * num_devices, KM_SLEEP);
10503
10504 mutex_enter(&port->fp_mutex);
10505
10506 /*
10507 * Applications are accustomed to getting the device list in
10508 * LILP map order. The HBA firmware usually returns the device
10509 * map in the LILP map order and diagnostic applications would
10510 * prefer to receive in the device list in that order too
10511 */
10512 lilp_map = &port->fp_lilp_map;
10513 alpa_list = &lilp_map->lilp_alpalist[0];
10514
10515 /*
10516 * the length field corresponds to the offset in the LILP frame
10517 * which begins with 1. The thing to note here is that the
10518 * lilp_device_count is 1 more than fp->fp_total_devices since
10519 * the host adapter's alpa also shows up in the lilp map. We
10520 * don't however return details of the host adapter since
10521 * fctl_get_remote_port_by_did fails for the host adapter's ALPA
10522 * and applications are required to issue the FCIO_GET_HOST_PARAMS
10523 * ioctl to obtain details about the host adapter port.
10524 */
10525 lilp_device_count = lilp_map->lilp_length;
10526
10527 for (count = index = 0; index < lilp_device_count &&
10528 count < num_devices; index++) {
10529 uint32_t d_id;
10530 fc_remote_port_t *pd;
10531
10532 d_id = alpa_list[index];
10533
10534 mutex_exit(&port->fp_mutex);
10535 pd = fctl_get_remote_port_by_did(port, d_id);
10536 mutex_enter(&port->fp_mutex);
10537
10538 if (pd != NULL) {
10539 mutex_enter(&pd->pd_mutex);
10540
10541 if (pd->pd_state == PORT_DEVICE_INVALID) {
10542 mutex_exit(&pd->pd_mutex);
10543 continue;
10544 }
10545
10546 devlist[count].dev_state = pd->pd_state;
10547 devlist[count].dev_hard_addr = pd->pd_hard_addr;
10548 devlist[count].dev_did = pd->pd_port_id;
10549 devlist[count].dev_did.priv_lilp_posit =
10550 (uint8_t)(index & 0xff);
10551 bcopy((caddr_t)pd->pd_fc4types,
10552 (caddr_t)devlist[count].dev_type,
10553 sizeof (pd->pd_fc4types));
10554
10555 bcopy((caddr_t)&pd->pd_port_name,
10556 (caddr_t)&devlist[count].dev_pwwn,
10557 sizeof (la_wwn_t));
10558
10559 node = pd->pd_remote_nodep;
10560 mutex_exit(&pd->pd_mutex);
10561
10562 if (node) {
10563 mutex_enter(&node->fd_mutex);
10564 bcopy((caddr_t)&node->fd_node_name,
10565 (caddr_t)&devlist[count].dev_nwwn,
10566 sizeof (la_wwn_t));
10567 mutex_exit(&node->fd_mutex);
10568 }
10569 count++;
10570 }
10571 }
10572
10573 if (fp_copyout((void *)&count, (void *)fcio->fcio_abuf,
10574 sizeof (count), mode)) {
10575 rval = FC_FAILURE;
10576 }
10577
10578 if (fp_copyout((void *)devlist, (void *)fcio->fcio_obuf,
10579 sizeof (fc_port_dev_t) * num_devices, mode)) {
10580 rval = FC_FAILURE;
10581 } else {
10582 rval = FC_SUCCESS;
10583 }
10584
10585 kmem_free(devlist, sizeof (*devlist) * num_devices);
10586 ASSERT(MUTEX_HELD(&port->fp_mutex));
10587
10588 return (rval);
10589 }
10590
10591
10592 /*
10593 * Completion function for responses to unsolicited commands
10594 */
10595 static void
10596 fp_unsol_intr(fc_packet_t *pkt)
10597 {
10598 fp_cmd_t *cmd;
10599 fc_local_port_t *port;
10600
10601 cmd = pkt->pkt_ulp_private;
10602 port = cmd->cmd_port;
10603
10604 mutex_enter(&port->fp_mutex);
10605 port->fp_out_fpcmds--;
10606 mutex_exit(&port->fp_mutex);
10607
10608 if (pkt->pkt_state != FC_PKT_SUCCESS) {
10609 fp_printf(port, CE_WARN, FP_LOG_ONLY, 0, pkt,
10610 "couldn't post response to unsolicited request;"
10611 " ox_id=%x rx_id=%x", pkt->pkt_cmd_fhdr.ox_id,
10612 pkt->pkt_resp_fhdr.rx_id);
10613 }
10614
10615 if (cmd == port->fp_els_resp_pkt) {
10616 mutex_enter(&port->fp_mutex);
10617 port->fp_els_resp_pkt_busy = 0;
10618 mutex_exit(&port->fp_mutex);
10619 return;
10620 }
10621
10622 fp_free_pkt(cmd);
10623 }
10624
10625
10626 /*
10627 * solicited LINIT ELS completion function
10628 */
10629 static void
10630 fp_linit_intr(fc_packet_t *pkt)
10631 {
10632 fp_cmd_t *cmd;
10633 job_request_t *job;
10634 fc_linit_resp_t acc;
10635 fc_local_port_t *port = ((fp_cmd_t *)pkt->pkt_ulp_private)->cmd_port;
10636
10637 cmd = (fp_cmd_t *)pkt->pkt_ulp_private;
10638
10639 mutex_enter(&cmd->cmd_port->fp_mutex);
10640 cmd->cmd_port->fp_out_fpcmds--;
10641 mutex_exit(&cmd->cmd_port->fp_mutex);
10642
10643 if (FP_IS_PKT_ERROR(pkt)) {
10644 (void) fp_common_intr(pkt, 1);
10645 return;
10646 }
10647
10648 job = cmd->cmd_job;
10649
10650 FC_GET_RSP(port, pkt->pkt_resp_acc, (uint8_t *)&acc,
10651 (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
10652 if (acc.status != FC_LINIT_SUCCESS) {
10653 job->job_result = FC_FAILURE;
10654 } else {
10655 job->job_result = FC_SUCCESS;
10656 }
10657
10658 fp_iodone(cmd);
10659 }
10660
10661
10662 /*
10663 * Decode the unsolicited request; For FC-4 Device and Link data frames
10664 * notify the registered ULP of this FC-4 type right here. For Unsolicited
10665 * ELS requests, submit a request to the job_handler thread to work on it.
10666 * The intent is to act quickly on the FC-4 unsolicited link and data frames
10667 * and save much of the interrupt time processing of unsolicited ELS requests
10668 * and hand it off to the job_handler thread.
10669 */
10670 static void
10671 fp_unsol_cb(opaque_t port_handle, fc_unsol_buf_t *buf, uint32_t type)
10672 {
10673 uchar_t r_ctl;
10674 uchar_t ls_code;
10675 uint32_t s_id;
10676 uint32_t rscn_count = FC_INVALID_RSCN_COUNT;
10677 uint32_t cb_arg;
10678 fp_cmd_t *cmd;
10679 fc_local_port_t *port;
10680 job_request_t *job;
10681 fc_remote_port_t *pd;
10682
10683 port = port_handle;
10684
10685 FP_TRACE(FP_NHEAD1(1, 0), "fp_unsol_cb: s_id=%x,"
10686 " d_id=%x, type=%x, r_ctl=%x, f_ctl=%x"
10687 " seq_id=%x, df_ctl=%x, seq_cnt=%x, ox_id=%x, rx_id=%x"
10688 " ro=%x, buffer[0]:%x", buf->ub_frame.s_id, buf->ub_frame.d_id,
10689 buf->ub_frame.type, buf->ub_frame.r_ctl, buf->ub_frame.f_ctl,
10690 buf->ub_frame.seq_id, buf->ub_frame.df_ctl, buf->ub_frame.seq_cnt,
10691 buf->ub_frame.ox_id, buf->ub_frame.rx_id, buf->ub_frame.ro,
10692 buf->ub_buffer[0]);
10693
10694 if (type & 0x80000000) {
10695 /*
10696 * Huh ? Nothing much can be done without
10697 * a valid buffer. So just exit.
10698 */
10699 return;
10700 }
10701 /*
10702 * If the unsolicited interrupts arrive while it isn't
10703 * safe to handle unsolicited callbacks; Drop them, yes,
10704 * drop them on the floor
10705 */
10706 mutex_enter(&port->fp_mutex);
10707 port->fp_active_ubs++;
10708 if ((port->fp_soft_state &
10709 (FP_SOFT_IN_DETACH | FP_SOFT_SUSPEND | FP_SOFT_POWER_DOWN)) ||
10710 FC_PORT_STATE_MASK(port->fp_state) == FC_STATE_OFFLINE) {
10711
10712 FP_TRACE(FP_NHEAD1(3, 0), "fp_unsol_cb: port state is "
10713 "not ONLINE. s_id=%x, d_id=%x, type=%x, "
10714 "seq_id=%x, ox_id=%x, rx_id=%x"
10715 "ro=%x", buf->ub_frame.s_id, buf->ub_frame.d_id,
10716 buf->ub_frame.type, buf->ub_frame.seq_id,
10717 buf->ub_frame.ox_id, buf->ub_frame.rx_id, buf->ub_frame.ro);
10718
10719 ASSERT(port->fp_active_ubs > 0);
10720 if (--(port->fp_active_ubs) == 0) {
10721 port->fp_soft_state &= ~FP_SOFT_IN_UNSOL_CB;
10722 }
10723
10724 mutex_exit(&port->fp_mutex);
10725
10726 port->fp_fca_tran->fca_ub_release(port->fp_fca_handle,
10727 1, &buf->ub_token);
10728
10729 return;
10730 }
10731
10732 r_ctl = buf->ub_frame.r_ctl;
10733 s_id = buf->ub_frame.s_id;
10734 if (port->fp_active_ubs == 1) {
10735 port->fp_soft_state |= FP_SOFT_IN_UNSOL_CB;
10736 }
10737
10738 if (r_ctl == R_CTL_ELS_REQ && buf->ub_buffer[0] == LA_ELS_LOGO &&
10739 port->fp_statec_busy) {
10740 mutex_exit(&port->fp_mutex);
10741 pd = fctl_get_remote_port_by_did(port, s_id);
10742 if (pd) {
10743 mutex_enter(&pd->pd_mutex);
10744 if (pd->pd_state == PORT_DEVICE_LOGGED_IN) {
10745 FP_TRACE(FP_NHEAD1(3, 0),
10746 "LOGO for LOGGED IN D_ID %x",
10747 buf->ub_frame.s_id);
10748 pd->pd_state = PORT_DEVICE_VALID;
10749 }
10750 mutex_exit(&pd->pd_mutex);
10751 }
10752
10753 mutex_enter(&port->fp_mutex);
10754 ASSERT(port->fp_active_ubs > 0);
10755 if (--(port->fp_active_ubs) == 0) {
10756 port->fp_soft_state &= ~FP_SOFT_IN_UNSOL_CB;
10757 }
10758 mutex_exit(&port->fp_mutex);
10759
10760 port->fp_fca_tran->fca_ub_release(port->fp_fca_handle,
10761 1, &buf->ub_token);
10762
10763 FP_TRACE(FP_NHEAD1(3, 0),
10764 "fp_unsol_cb() bailing out LOGO for D_ID %x",
10765 buf->ub_frame.s_id);
10766 return;
10767 }
10768
10769 if (port->fp_els_resp_pkt_busy == 0) {
10770 if (r_ctl == R_CTL_ELS_REQ) {
10771 ls_code = buf->ub_buffer[0];
10772
10773 switch (ls_code) {
10774 case LA_ELS_PLOGI:
10775 case LA_ELS_FLOGI:
10776 port->fp_els_resp_pkt_busy = 1;
10777 mutex_exit(&port->fp_mutex);
10778 fp_i_handle_unsol_els(port, buf);
10779
10780 mutex_enter(&port->fp_mutex);
10781 ASSERT(port->fp_active_ubs > 0);
10782 if (--(port->fp_active_ubs) == 0) {
10783 port->fp_soft_state &=
10784 ~FP_SOFT_IN_UNSOL_CB;
10785 }
10786 mutex_exit(&port->fp_mutex);
10787 port->fp_fca_tran->fca_ub_release(
10788 port->fp_fca_handle, 1, &buf->ub_token);
10789
10790 return;
10791 case LA_ELS_RSCN:
10792 if (++(port)->fp_rscn_count ==
10793 FC_INVALID_RSCN_COUNT) {
10794 ++(port)->fp_rscn_count;
10795 }
10796 rscn_count = port->fp_rscn_count;
10797 break;
10798
10799 default:
10800 break;
10801 }
10802 }
10803 } else if ((r_ctl == R_CTL_ELS_REQ) &&
10804 (buf->ub_buffer[0] == LA_ELS_RSCN)) {
10805 if (++port->fp_rscn_count == FC_INVALID_RSCN_COUNT) {
10806 ++port->fp_rscn_count;
10807 }
10808 rscn_count = port->fp_rscn_count;
10809 }
10810
10811 mutex_exit(&port->fp_mutex);
10812
10813 switch (r_ctl & R_CTL_ROUTING) {
10814 case R_CTL_DEVICE_DATA:
10815 /*
10816 * If the unsolicited buffer is a CT IU,
10817 * have the job_handler thread work on it.
10818 */
10819 if (buf->ub_frame.type == FC_TYPE_FC_SERVICES) {
10820 break;
10821 }
10822 /* FALLTHROUGH */
10823
10824 case R_CTL_FC4_SVC: {
10825 int sendup = 0;
10826
10827 /*
10828 * If a LOGIN isn't performed before this request
10829 * shut the door on this port with a reply that a
10830 * LOGIN is required. We make an exception however
10831 * for IP broadcast packets and pass them through
10832 * to the IP ULP(s) to handle broadcast requests.
10833 * This is not a problem for private loop devices
10834 * but for fabric topologies we don't log into the
10835 * remote ports during port initialization and
10836 * the ULPs need to log into requesting ports on
10837 * demand.
10838 */
10839 pd = fctl_get_remote_port_by_did(port, s_id);
10840 if (pd) {
10841 mutex_enter(&pd->pd_mutex);
10842 if (pd->pd_state == PORT_DEVICE_LOGGED_IN) {
10843 sendup++;
10844 }
10845 mutex_exit(&pd->pd_mutex);
10846 } else if ((pd == NULL) &&
10847 (buf->ub_frame.type == FC_TYPE_IS8802_SNAP) &&
10848 (buf->ub_frame.d_id == 0xffffff ||
10849 buf->ub_frame.d_id == 0x00)) {
10850 /* brodacst IP frame - so sendup via job thread */
10851 break;
10852 }
10853
10854 /*
10855 * Send all FC4 services via job thread too
10856 */
10857 if ((r_ctl & R_CTL_ROUTING) == R_CTL_FC4_SVC) {
10858 break;
10859 }
10860
10861 if (sendup || !FC_IS_REAL_DEVICE(s_id)) {
10862 fctl_ulp_unsol_cb(port, buf, buf->ub_frame.type);
10863 return;
10864 }
10865
10866 if (FP_IS_CLASS_1_OR_2(buf->ub_class)) {
10867 cmd = fp_alloc_pkt(port, sizeof (la_els_rjt_t),
10868 0, KM_NOSLEEP, pd);
10869 if (cmd != NULL) {
10870 fp_els_rjt_init(port, cmd, buf,
10871 FC_ACTION_NON_RETRYABLE,
10872 FC_REASON_LOGIN_REQUIRED, NULL);
10873
10874 if (fp_sendcmd(port, cmd,
10875 port->fp_fca_handle) != FC_SUCCESS) {
10876 fp_free_pkt(cmd);
10877 }
10878 }
10879 }
10880
10881 mutex_enter(&port->fp_mutex);
10882 ASSERT(port->fp_active_ubs > 0);
10883 if (--(port->fp_active_ubs) == 0) {
10884 port->fp_soft_state &= ~FP_SOFT_IN_UNSOL_CB;
10885 }
10886 mutex_exit(&port->fp_mutex);
10887 port->fp_fca_tran->fca_ub_release(port->fp_fca_handle,
10888 1, &buf->ub_token);
10889
10890 return;
10891 }
10892
10893 default:
10894 break;
10895 }
10896
10897 /*
10898 * Submit a Request to the job_handler thread to work
10899 * on the unsolicited request. The potential side effect
10900 * of this is that the unsolicited buffer takes a little
10901 * longer to get released but we save interrupt time in
10902 * the bargain.
10903 */
10904 cb_arg = (rscn_count == FC_INVALID_RSCN_COUNT) ? NULL : rscn_count;
10905
10906 /*
10907 * One way that the rscn_count will get used is described below :
10908 *
10909 * 1. fp_unsol_cb() gets an RSCN and updates fp_rscn_count.
10910 * 2. Before mutex is released, a copy of it is stored in rscn_count.
10911 * 3. The count is passed to job thread as JOB_UNSOL_REQUEST (below)
10912 * by overloading the job_cb_arg to pass the rscn_count
10913 * 4. When one of the routines processing the RSCN picks it up (ex:
10914 * fp_validate_rscn_page()), it passes this count in the map
10915 * structure (as part of the map_rscn_info structure member) to the
10916 * ULPs.
10917 * 5. When ULPs make calls back to the transport (example interfaces for
10918 * this are fc_ulp_transport(), fc_ulp_login(), fc_issue_els()), they
10919 * can now pass back this count as part of the fc_packet's
10920 * pkt_ulp_rscn_count member. fcp does this currently.
10921 * 6. When transport gets a call to transport a command on the wire, it
10922 * will check to see if there is a valid pkt_ulp_rsvd1 field in the
10923 * fc_packet. If there is, it will match that info with the current
10924 * rscn_count on that instance of the port. If they don't match up
10925 * then there was a newer RSCN. The ULP gets back an error code which
10926 * informs it about it - FC_DEVICE_BUSY_NEW_RSCN.
10927 * 7. At this point the ULP is free to make up its own mind as to how to
10928 * handle this. Currently, fcp will reset its retry counters and keep
10929 * retrying the operation it was doing in anticipation of getting a
10930 * new state change call back for the new RSCN.
10931 */
10932 job = fctl_alloc_job(JOB_UNSOL_REQUEST, 0, NULL,
10933 (opaque_t)(uintptr_t)cb_arg, KM_NOSLEEP);
10934 if (job == NULL) {
10935 fp_printf(port, CE_NOTE, FP_LOG_ONLY, 0, NULL, "fp_unsol_cb() "
10936 "couldn't submit a job to the thread, failing..");
10937
10938 mutex_enter(&port->fp_mutex);
10939
10940 if (--port->fp_rscn_count == FC_INVALID_RSCN_COUNT) {
10941 --port->fp_rscn_count;
10942 }
10943
10944 ASSERT(port->fp_active_ubs > 0);
10945 if (--(port->fp_active_ubs) == 0) {
10946 port->fp_soft_state &= ~FP_SOFT_IN_UNSOL_CB;
10947 }
10948
10949 mutex_exit(&port->fp_mutex);
10950 port->fp_fca_tran->fca_ub_release(port->fp_fca_handle,
10951 1, &buf->ub_token);
10952
10953 return;
10954 }
10955 job->job_private = (void *)buf;
10956 fctl_enque_job(port, job);
10957 }
10958
10959
10960 /*
10961 * Handle unsolicited requests
10962 */
10963 static void
10964 fp_handle_unsol_buf(fc_local_port_t *port, fc_unsol_buf_t *buf,
10965 job_request_t *job)
10966 {
10967 uchar_t r_ctl;
10968 uchar_t ls_code;
10969 uint32_t s_id;
10970 fp_cmd_t *cmd;
10971 fc_remote_port_t *pd;
10972 fp_unsol_spec_t *ub_spec;
10973
10974 r_ctl = buf->ub_frame.r_ctl;
10975 s_id = buf->ub_frame.s_id;
10976
10977 switch (r_ctl & R_CTL_ROUTING) {
10978 case R_CTL_EXTENDED_SVC:
10979 if (r_ctl != R_CTL_ELS_REQ) {
10980 break;
10981 }
10982
10983 ls_code = buf->ub_buffer[0];
10984 switch (ls_code) {
10985 case LA_ELS_LOGO:
10986 case LA_ELS_ADISC:
10987 case LA_ELS_PRLO:
10988 pd = fctl_get_remote_port_by_did(port, s_id);
10989 if (pd == NULL) {
10990 if (!FC_IS_REAL_DEVICE(s_id)) {
10991 break;
10992 }
10993 if (!FP_IS_CLASS_1_OR_2(buf->ub_class)) {
10994 break;
10995 }
10996 if ((cmd = fp_alloc_pkt(port,
10997 sizeof (la_els_rjt_t), 0, KM_SLEEP,
10998 NULL)) == NULL) {
10999 /*
11000 * Can this actually fail when
11001 * given KM_SLEEP? (Could be used
11002 * this way in a number of places.)
11003 */
11004 break;
11005 }
11006
11007 fp_els_rjt_init(port, cmd, buf,
11008 FC_ACTION_NON_RETRYABLE,
11009 FC_REASON_INVALID_LINK_CTRL, job);
11010
11011 if (fp_sendcmd(port, cmd,
11012 port->fp_fca_handle) != FC_SUCCESS) {
11013 fp_free_pkt(cmd);
11014 }
11015
11016 break;
11017 }
11018 if (ls_code == LA_ELS_LOGO) {
11019 fp_handle_unsol_logo(port, buf, pd, job);
11020 } else if (ls_code == LA_ELS_ADISC) {
11021 fp_handle_unsol_adisc(port, buf, pd, job);
11022 } else {
11023 fp_handle_unsol_prlo(port, buf, pd, job);
11024 }
11025 break;
11026
11027 case LA_ELS_PLOGI:
11028 fp_handle_unsol_plogi(port, buf, job, KM_SLEEP);
11029 break;
11030
11031 case LA_ELS_FLOGI:
11032 fp_handle_unsol_flogi(port, buf, job, KM_SLEEP);
11033 break;
11034
11035 case LA_ELS_RSCN:
11036 fp_handle_unsol_rscn(port, buf, job, KM_SLEEP);
11037 break;
11038
11039 default:
11040 ub_spec = kmem_zalloc(sizeof (*ub_spec), KM_SLEEP);
11041 ub_spec->port = port;
11042 ub_spec->buf = buf;
11043
11044 (void) taskq_dispatch(port->fp_taskq,
11045 fp_ulp_unsol_cb, ub_spec, KM_SLEEP);
11046 return;
11047 }
11048 break;
11049
11050 case R_CTL_BASIC_SVC:
11051 /*
11052 * The unsolicited basic link services could be ABTS
11053 * and RMC (Or even a NOP). Just BA_RJT them until
11054 * such time there arises a need to handle them more
11055 * carefully.
11056 */
11057 if (FP_IS_CLASS_1_OR_2(buf->ub_class)) {
11058 cmd = fp_alloc_pkt(port, sizeof (la_ba_rjt_t),
11059 0, KM_SLEEP, NULL);
11060 if (cmd != NULL) {
11061 fp_ba_rjt_init(port, cmd, buf, job);
11062 if (fp_sendcmd(port, cmd,
11063 port->fp_fca_handle) != FC_SUCCESS) {
11064 fp_free_pkt(cmd);
11065 }
11066 }
11067 }
11068 break;
11069
11070 case R_CTL_DEVICE_DATA:
11071 if (buf->ub_frame.type == FC_TYPE_FC_SERVICES) {
11072 /*
11073 * Mostly this is of type FC_TYPE_FC_SERVICES.
11074 * As we don't like any Unsolicited FC services
11075 * requests, we would do well to RJT them as
11076 * well.
11077 */
11078 if (FP_IS_CLASS_1_OR_2(buf->ub_class)) {
11079 cmd = fp_alloc_pkt(port, sizeof (la_els_rjt_t),
11080 0, KM_SLEEP, NULL);
11081 if (cmd != NULL) {
11082 fp_els_rjt_init(port, cmd, buf,
11083 FC_ACTION_NON_RETRYABLE,
11084 FC_REASON_INVALID_LINK_CTRL, job);
11085
11086 if (fp_sendcmd(port, cmd,
11087 port->fp_fca_handle) !=
11088 FC_SUCCESS) {
11089 fp_free_pkt(cmd);
11090 }
11091 }
11092 }
11093 break;
11094 }
11095 /* FALLTHROUGH */
11096
11097 case R_CTL_FC4_SVC:
11098 ub_spec = kmem_zalloc(sizeof (*ub_spec), KM_SLEEP);
11099 ub_spec->port = port;
11100 ub_spec->buf = buf;
11101
11102 (void) taskq_dispatch(port->fp_taskq,
11103 fp_ulp_unsol_cb, ub_spec, KM_SLEEP);
11104 return;
11105
11106 case R_CTL_LINK_CTL:
11107 /*
11108 * Turn deaf ear on unsolicited link control frames.
11109 * Typical unsolicited link control Frame is an LCR
11110 * (to reset End to End credit to the default login
11111 * value and abort current sequences for all classes)
11112 * An intelligent microcode/firmware should handle
11113 * this transparently at its level and not pass all
11114 * the way up here.
11115 *
11116 * Possible responses to LCR are R_RDY, F_RJT, P_RJT
11117 * or F_BSY. P_RJT is chosen to be the most appropriate
11118 * at this time.
11119 */
11120 /* FALLTHROUGH */
11121
11122 default:
11123 /*
11124 * Just reject everything else as an invalid request.
11125 */
11126 if (FP_IS_CLASS_1_OR_2(buf->ub_class)) {
11127 cmd = fp_alloc_pkt(port, sizeof (la_els_rjt_t),
11128 0, KM_SLEEP, NULL);
11129 if (cmd != NULL) {
11130 fp_els_rjt_init(port, cmd, buf,
11131 FC_ACTION_NON_RETRYABLE,
11132 FC_REASON_INVALID_LINK_CTRL, job);
11133
11134 if (fp_sendcmd(port, cmd,
11135 port->fp_fca_handle) != FC_SUCCESS) {
11136 fp_free_pkt(cmd);
11137 }
11138 }
11139 }
11140 break;
11141 }
11142
11143 mutex_enter(&port->fp_mutex);
11144 ASSERT(port->fp_active_ubs > 0);
11145 if (--(port->fp_active_ubs) == 0) {
11146 port->fp_soft_state &= ~FP_SOFT_IN_UNSOL_CB;
11147 }
11148 mutex_exit(&port->fp_mutex);
11149 port->fp_fca_tran->fca_ub_release(port->fp_fca_handle,
11150 1, &buf->ub_token);
11151 }
11152
11153
11154 /*
11155 * Prepare a BA_RJT and send it over.
11156 */
11157 static void
11158 fp_ba_rjt_init(fc_local_port_t *port, fp_cmd_t *cmd, fc_unsol_buf_t *buf,
11159 job_request_t *job)
11160 {
11161 fc_packet_t *pkt;
11162 la_ba_rjt_t payload;
11163
11164 ASSERT(!MUTEX_HELD(&port->fp_mutex));
11165
11166 cmd->cmd_pkt.pkt_tran_flags = buf->ub_class;
11167 cmd->cmd_pkt.pkt_tran_type = FC_PKT_OUTBOUND;
11168 cmd->cmd_flags = FP_CMD_CFLAG_UNDEFINED;
11169 cmd->cmd_retry_count = 1;
11170 cmd->cmd_ulp_pkt = NULL;
11171
11172 cmd->cmd_transport = port->fp_fca_tran->fca_els_send;
11173 cmd->cmd_job = job;
11174
11175 pkt = &cmd->cmd_pkt;
11176
11177 fp_unsol_resp_init(pkt, buf, R_CTL_LS_BA_RJT, FC_TYPE_BASIC_LS);
11178
11179 payload.reserved = 0;
11180 payload.reason_code = FC_REASON_CMD_UNSUPPORTED;
11181 payload.explanation = FC_EXPLN_NONE;
11182 payload.vendor = 0;
11183
11184 FC_SET_CMD(port, pkt->pkt_cmd_acc, (uint8_t *)&payload,
11185 (uint8_t *)pkt->pkt_cmd, sizeof (payload), DDI_DEV_AUTOINCR);
11186 }
11187
11188
11189 /*
11190 * Prepare an LS_RJT and send it over
11191 */
11192 static void
11193 fp_els_rjt_init(fc_local_port_t *port, fp_cmd_t *cmd, fc_unsol_buf_t *buf,
11194 uchar_t action, uchar_t reason, job_request_t *job)
11195 {
11196 fc_packet_t *pkt;
11197 la_els_rjt_t payload;
11198
11199 ASSERT(!MUTEX_HELD(&port->fp_mutex));
11200
11201 cmd->cmd_pkt.pkt_tran_flags = buf->ub_class;
11202 cmd->cmd_pkt.pkt_tran_type = FC_PKT_OUTBOUND;
11203 cmd->cmd_flags = FP_CMD_CFLAG_UNDEFINED;
11204 cmd->cmd_retry_count = 1;
11205 cmd->cmd_ulp_pkt = NULL;
11206
11207 cmd->cmd_transport = port->fp_fca_tran->fca_els_send;
11208 cmd->cmd_job = job;
11209
11210 pkt = &cmd->cmd_pkt;
11211
11212 fp_unsol_resp_init(pkt, buf, R_CTL_ELS_RSP, FC_TYPE_EXTENDED_LS);
11213
11214 payload.ls_code.ls_code = LA_ELS_RJT;
11215 payload.ls_code.mbz = 0;
11216 payload.action = action;
11217 payload.reason = reason;
11218 payload.reserved = 0;
11219 payload.vu = 0;
11220
11221 FC_SET_CMD(port, pkt->pkt_cmd_acc, (uint8_t *)&payload,
11222 (uint8_t *)pkt->pkt_cmd, sizeof (payload), DDI_DEV_AUTOINCR);
11223 }
11224
11225 /*
11226 * Function: fp_prlo_acc_init
11227 *
11228 * Description: Initializes an Link Service Accept for a PRLO.
11229 *
11230 * Arguments: *port Local port through which the PRLO was
11231 * received.
11232 * cmd Command that will carry the accept.
11233 * *buf Unsolicited buffer containing the PRLO
11234 * request.
11235 * job Job request.
11236 * sleep Allocation mode.
11237 *
11238 * Return Value: *cmd Command containing the response.
11239 *
11240 * Context: Depends on the parameter sleep.
11241 */
11242 fp_cmd_t *
11243 fp_prlo_acc_init(fc_local_port_t *port, fc_remote_port_t *pd,
11244 fc_unsol_buf_t *buf, job_request_t *job, int sleep)
11245 {
11246 fp_cmd_t *cmd;
11247 fc_packet_t *pkt;
11248 la_els_prlo_t *req;
11249 size_t len;
11250 uint16_t flags;
11251
11252 req = (la_els_prlo_t *)buf->ub_buffer;
11253 len = (size_t)ntohs(req->payload_length);
11254
11255 /*
11256 * The payload of the accept to a PRLO has to be the exact match of
11257 * the payload of the request (at the exception of the code).
11258 */
11259 cmd = fp_alloc_pkt(port, (int)len, 0, sleep, pd);
11260
11261 if (cmd) {
11262 /*
11263 * The fp command was successfully allocated.
11264 */
11265 cmd->cmd_pkt.pkt_tran_flags = buf->ub_class;
11266 cmd->cmd_pkt.pkt_tran_type = FC_PKT_OUTBOUND;
11267 cmd->cmd_flags = FP_CMD_CFLAG_UNDEFINED;
11268 cmd->cmd_retry_count = 1;
11269 cmd->cmd_ulp_pkt = NULL;
11270
11271 cmd->cmd_transport = port->fp_fca_tran->fca_els_send;
11272 cmd->cmd_job = job;
11273
11274 pkt = &cmd->cmd_pkt;
11275
11276 fp_unsol_resp_init(pkt, buf, R_CTL_ELS_RSP,
11277 FC_TYPE_EXTENDED_LS);
11278
11279 /* The code is overwritten for the copy. */
11280 req->ls_code = LA_ELS_ACC;
11281 /* Response code is set. */
11282 flags = ntohs(req->flags);
11283 flags &= ~SP_RESP_CODE_MASK;
11284 flags |= SP_RESP_CODE_REQ_EXECUTED;
11285 req->flags = htons(flags);
11286
11287 FC_SET_CMD(port, pkt->pkt_cmd_acc, (uint8_t *)req,
11288 (uint8_t *)pkt->pkt_cmd, len, DDI_DEV_AUTOINCR);
11289 }
11290 return (cmd);
11291 }
11292
11293 /*
11294 * Prepare an ACC response to an ELS request
11295 */
11296 static void
11297 fp_els_acc_init(fc_local_port_t *port, fp_cmd_t *cmd, fc_unsol_buf_t *buf,
11298 job_request_t *job)
11299 {
11300 fc_packet_t *pkt;
11301 ls_code_t payload;
11302
11303 cmd->cmd_pkt.pkt_tran_flags = buf->ub_class;
11304 cmd->cmd_pkt.pkt_tran_type = FC_PKT_OUTBOUND;
11305 cmd->cmd_flags = FP_CMD_CFLAG_UNDEFINED;
11306 cmd->cmd_retry_count = 1;
11307 cmd->cmd_ulp_pkt = NULL;
11308
11309 cmd->cmd_transport = port->fp_fca_tran->fca_els_send;
11310 cmd->cmd_job = job;
11311
11312 pkt = &cmd->cmd_pkt;
11313
11314 fp_unsol_resp_init(pkt, buf, R_CTL_ELS_RSP, FC_TYPE_EXTENDED_LS);
11315
11316 payload.ls_code = LA_ELS_ACC;
11317 payload.mbz = 0;
11318
11319 FC_SET_CMD(port, pkt->pkt_cmd_acc, (uint8_t *)&payload,
11320 (uint8_t *)pkt->pkt_cmd, sizeof (payload), DDI_DEV_AUTOINCR);
11321 }
11322
11323 /*
11324 * Unsolicited PRLO handler
11325 *
11326 * A Process Logout should be handled by the ULP that established it. However,
11327 * some devices send a PRLO to trigger a PLOGI followed by a PRLI. This happens
11328 * when a device implicitly logs out an initiator (for whatever reason) and
11329 * tries to get that initiator to restablish the connection (PLOGI and PRLI).
11330 * The logical thing to do for the device would be to send a LOGO in response
11331 * to any FC4 frame sent by the initiator. Some devices choose, however, to send
11332 * a PRLO instead.
11333 *
11334 * From a Fibre Channel standpoint a PRLO calls for a PRLI. There's no reason to
11335 * think that the Port Login has been lost. If we follow the Fibre Channel
11336 * protocol to the letter a PRLI should be sent after accepting the PRLO. If
11337 * the Port Login has also been lost, the remote port will reject the PRLI
11338 * indicating that we must PLOGI first. The initiator will then turn around and
11339 * send a PLOGI. The way Leadville is layered and the way the ULP interface
11340 * is defined doesn't allow this scenario to be followed easily. If FCP were to
11341 * handle the PRLO and attempt the PRLI, the reject indicating that a PLOGI is
11342 * needed would be received by FCP. FCP would have, then, to tell the transport
11343 * (fp) to PLOGI. The problem is, the transport would still think the Port
11344 * Login is valid and there is no way for FCP to tell the transport: "PLOGI even
11345 * if you think it's not necessary". To work around that difficulty, the PRLO
11346 * is treated by the transport as a LOGO. The downside to it is a Port Login
11347 * may be disrupted (if a PLOGI wasn't actually needed) and another ULP (that
11348 * has nothing to do with the PRLO) may be impacted. However, this is a
11349 * scenario very unlikely to happen. As of today the only ULP in Leadville
11350 * using PRLI/PRLOs is FCP. For a PRLO to disrupt another ULP (that would be
11351 * FCIP), a SCSI target would have to be running FCP and FCIP (which is very
11352 * unlikely).
11353 */
11354 static void
11355 fp_handle_unsol_prlo(fc_local_port_t *port, fc_unsol_buf_t *buf,
11356 fc_remote_port_t *pd, job_request_t *job)
11357 {
11358 int busy;
11359 int rval;
11360 int retain;
11361 fp_cmd_t *cmd;
11362 fc_portmap_t *listptr;
11363 boolean_t tolerance;
11364 la_els_prlo_t *req;
11365
11366 req = (la_els_prlo_t *)buf->ub_buffer;
11367
11368 if ((ntohs(req->payload_length) !=
11369 (sizeof (service_parameter_page_t) + sizeof (ls_code_t))) ||
11370 (req->page_length != sizeof (service_parameter_page_t))) {
11371 /*
11372 * We are being very restrictive. Only on page per
11373 * payload. If it is not the case we reject the ELS although
11374 * we should reply indicating we handle only single page
11375 * per PRLO.
11376 */
11377 goto fp_reject_prlo;
11378 }
11379
11380 if (ntohs(req->payload_length) > buf->ub_bufsize) {
11381 /*
11382 * This is in case the payload advertizes a size bigger than
11383 * what it really is.
11384 */
11385 goto fp_reject_prlo;
11386 }
11387
11388 mutex_enter(&port->fp_mutex);
11389 busy = port->fp_statec_busy;
11390 mutex_exit(&port->fp_mutex);
11391
11392 mutex_enter(&pd->pd_mutex);
11393 tolerance = fctl_tc_increment(&pd->pd_logo_tc);
11394 if (!busy) {
11395 if (pd->pd_state != PORT_DEVICE_LOGGED_IN ||
11396 pd->pd_state == PORT_DEVICE_INVALID ||
11397 pd->pd_flags == PD_ELS_IN_PROGRESS ||
11398 pd->pd_type == PORT_DEVICE_OLD) {
11399 busy++;
11400 }
11401 }
11402
11403 if (busy) {
11404 mutex_exit(&pd->pd_mutex);
11405
11406 FP_TRACE(FP_NHEAD1(5, 0), "Logout; D_ID=%x,"
11407 "pd=%p - busy",
11408 pd->pd_port_id.port_id, pd);
11409
11410 if (FP_IS_CLASS_1_OR_2(buf->ub_class)) {
11411 goto fp_reject_prlo;
11412 }
11413 } else {
11414 retain = (pd->pd_recepient == PD_PLOGI_INITIATOR) ? 1 : 0;
11415
11416 if (tolerance) {
11417 fctl_tc_reset(&pd->pd_logo_tc);
11418 retain = 0;
11419 pd->pd_state = PORT_DEVICE_INVALID;
11420 }
11421
11422 FP_TRACE(FP_NHEAD1(5, 0), "Accepting LOGO; d_id=%x, pd=%p,"
11423 " tolerance=%d retain=%d", pd->pd_port_id.port_id, pd,
11424 tolerance, retain);
11425
11426 pd->pd_aux_flags |= PD_LOGGED_OUT;
11427 mutex_exit(&pd->pd_mutex);
11428
11429 cmd = fp_prlo_acc_init(port, pd, buf, job, KM_SLEEP);
11430 if (cmd == NULL) {
11431 return;
11432 }
11433
11434 rval = fp_sendcmd(port, cmd, port->fp_fca_handle);
11435 if (rval != FC_SUCCESS) {
11436 fp_free_pkt(cmd);
11437 return;
11438 }
11439
11440 listptr = kmem_zalloc(sizeof (fc_portmap_t), KM_SLEEP);
11441
11442 if (retain) {
11443 fp_unregister_login(pd);
11444 fctl_copy_portmap(listptr, pd);
11445 } else {
11446 uint32_t d_id;
11447 char ww_name[17];
11448
11449 mutex_enter(&pd->pd_mutex);
11450 d_id = pd->pd_port_id.port_id;
11451 fc_wwn_to_str(&pd->pd_port_name, ww_name);
11452 mutex_exit(&pd->pd_mutex);
11453
11454 FP_TRACE(FP_NHEAD2(9, 0),
11455 "N_x Port with D_ID=%x, PWWN=%s logged out"
11456 " %d times in %d us; Giving up", d_id, ww_name,
11457 FC_LOGO_TOLERANCE_LIMIT,
11458 FC_LOGO_TOLERANCE_TIME_LIMIT);
11459
11460 fp_fillout_old_map(listptr, pd, 0);
11461 listptr->map_type = PORT_DEVICE_OLD;
11462 }
11463
11464 (void) fp_ulp_devc_cb(port, listptr, 1, 1, KM_SLEEP, 0);
11465 return;
11466 }
11467
11468 fp_reject_prlo:
11469
11470 cmd = fp_alloc_pkt(port, sizeof (la_els_rjt_t), 0, KM_SLEEP, pd);
11471 if (cmd != NULL) {
11472 fp_els_rjt_init(port, cmd, buf, FC_ACTION_NON_RETRYABLE,
11473 FC_REASON_INVALID_LINK_CTRL, job);
11474
11475 if (fp_sendcmd(port, cmd, port->fp_fca_handle) != FC_SUCCESS) {
11476 fp_free_pkt(cmd);
11477 }
11478 }
11479 }
11480
11481 /*
11482 * Unsolicited LOGO handler
11483 */
11484 static void
11485 fp_handle_unsol_logo(fc_local_port_t *port, fc_unsol_buf_t *buf,
11486 fc_remote_port_t *pd, job_request_t *job)
11487 {
11488 int busy;
11489 int rval;
11490 int retain;
11491 fp_cmd_t *cmd;
11492 fc_portmap_t *listptr;
11493 boolean_t tolerance;
11494
11495 mutex_enter(&port->fp_mutex);
11496 busy = port->fp_statec_busy;
11497 mutex_exit(&port->fp_mutex);
11498
11499 mutex_enter(&pd->pd_mutex);
11500 tolerance = fctl_tc_increment(&pd->pd_logo_tc);
11501 if (!busy) {
11502 if (pd->pd_state != PORT_DEVICE_LOGGED_IN ||
11503 pd->pd_state == PORT_DEVICE_INVALID ||
11504 pd->pd_flags == PD_ELS_IN_PROGRESS ||
11505 pd->pd_type == PORT_DEVICE_OLD) {
11506 busy++;
11507 }
11508 }
11509
11510 if (busy) {
11511 mutex_exit(&pd->pd_mutex);
11512
11513 FP_TRACE(FP_NHEAD1(5, 0), "Logout; D_ID=%x,"
11514 "pd=%p - busy",
11515 pd->pd_port_id.port_id, pd);
11516
11517 if (FP_IS_CLASS_1_OR_2(buf->ub_class)) {
11518 cmd = fp_alloc_pkt(port, sizeof (la_els_rjt_t),
11519 0, KM_SLEEP, pd);
11520 if (cmd != NULL) {
11521 fp_els_rjt_init(port, cmd, buf,
11522 FC_ACTION_NON_RETRYABLE,
11523 FC_REASON_INVALID_LINK_CTRL, job);
11524
11525 if (fp_sendcmd(port, cmd,
11526 port->fp_fca_handle) != FC_SUCCESS) {
11527 fp_free_pkt(cmd);
11528 }
11529 }
11530 }
11531 } else {
11532 retain = (pd->pd_recepient == PD_PLOGI_INITIATOR) ? 1 : 0;
11533
11534 if (tolerance) {
11535 fctl_tc_reset(&pd->pd_logo_tc);
11536 retain = 0;
11537 pd->pd_state = PORT_DEVICE_INVALID;
11538 }
11539
11540 FP_TRACE(FP_NHEAD1(5, 0), "Accepting LOGO; d_id=%x, pd=%p,"
11541 " tolerance=%d retain=%d", pd->pd_port_id.port_id, pd,
11542 tolerance, retain);
11543
11544 pd->pd_aux_flags |= PD_LOGGED_OUT;
11545 mutex_exit(&pd->pd_mutex);
11546
11547 cmd = fp_alloc_pkt(port, FP_PORT_IDENTIFIER_LEN, 0,
11548 KM_SLEEP, pd);
11549 if (cmd == NULL) {
11550 return;
11551 }
11552
11553 fp_els_acc_init(port, cmd, buf, job);
11554
11555 rval = fp_sendcmd(port, cmd, port->fp_fca_handle);
11556 if (rval != FC_SUCCESS) {
11557 fp_free_pkt(cmd);
11558 return;
11559 }
11560
11561 listptr = kmem_zalloc(sizeof (fc_portmap_t), KM_SLEEP);
11562
11563 if (retain) {
11564 job_request_t *job;
11565 fctl_ns_req_t *ns_cmd;
11566
11567 /*
11568 * when get LOGO, first try to get PID from nameserver
11569 * if failed, then we do not need
11570 * send PLOGI to that remote port
11571 */
11572 job = fctl_alloc_job(
11573 JOB_NS_CMD, 0, NULL, (opaque_t)port, KM_SLEEP);
11574
11575 if (job != NULL) {
11576 ns_cmd = fctl_alloc_ns_cmd(
11577 sizeof (ns_req_gid_pn_t),
11578 sizeof (ns_resp_gid_pn_t),
11579 sizeof (ns_resp_gid_pn_t),
11580 0, KM_SLEEP);
11581 if (ns_cmd != NULL) {
11582 int ret;
11583 job->job_result = FC_SUCCESS;
11584 ns_cmd->ns_cmd_code = NS_GID_PN;
11585 ((ns_req_gid_pn_t *)
11586 (ns_cmd->ns_cmd_buf))->pwwn =
11587 pd->pd_port_name;
11588 ret = fp_ns_query(
11589 port, ns_cmd, job, 1, KM_SLEEP);
11590 if ((ret != FC_SUCCESS) ||
11591 (job->job_result != FC_SUCCESS)) {
11592 fctl_free_ns_cmd(ns_cmd);
11593 fctl_dealloc_job(job);
11594 FP_TRACE(FP_NHEAD2(9, 0),
11595 "NS query failed,",
11596 " delete pd");
11597 goto delete_pd;
11598 }
11599 fctl_free_ns_cmd(ns_cmd);
11600 }
11601 fctl_dealloc_job(job);
11602 }
11603 fp_unregister_login(pd);
11604 fctl_copy_portmap(listptr, pd);
11605 } else {
11606 uint32_t d_id;
11607 char ww_name[17];
11608
11609 delete_pd:
11610 mutex_enter(&pd->pd_mutex);
11611 d_id = pd->pd_port_id.port_id;
11612 fc_wwn_to_str(&pd->pd_port_name, ww_name);
11613 mutex_exit(&pd->pd_mutex);
11614
11615 FP_TRACE(FP_NHEAD2(9, 0),
11616 "N_x Port with D_ID=%x, PWWN=%s logged out"
11617 " %d times in %d us; Giving up", d_id, ww_name,
11618 FC_LOGO_TOLERANCE_LIMIT,
11619 FC_LOGO_TOLERANCE_TIME_LIMIT);
11620
11621 fp_fillout_old_map(listptr, pd, 0);
11622 listptr->map_type = PORT_DEVICE_OLD;
11623 }
11624
11625 (void) fp_ulp_devc_cb(port, listptr, 1, 1, KM_SLEEP, 0);
11626 }
11627 }
11628
11629
11630 /*
11631 * Perform general purpose preparation of a response to an unsolicited request
11632 */
11633 static void
11634 fp_unsol_resp_init(fc_packet_t *pkt, fc_unsol_buf_t *buf,
11635 uchar_t r_ctl, uchar_t type)
11636 {
11637 pkt->pkt_cmd_fhdr.r_ctl = r_ctl;
11638 pkt->pkt_cmd_fhdr.d_id = buf->ub_frame.s_id;
11639 pkt->pkt_cmd_fhdr.s_id = buf->ub_frame.d_id;
11640 pkt->pkt_cmd_fhdr.type = type;
11641 pkt->pkt_cmd_fhdr.f_ctl = F_CTL_LAST_SEQ | F_CTL_XCHG_CONTEXT;
11642 pkt->pkt_cmd_fhdr.seq_id = buf->ub_frame.seq_id;
11643 pkt->pkt_cmd_fhdr.df_ctl = buf->ub_frame.df_ctl;
11644 pkt->pkt_cmd_fhdr.seq_cnt = buf->ub_frame.seq_cnt;
11645 pkt->pkt_cmd_fhdr.ox_id = buf->ub_frame.ox_id;
11646 pkt->pkt_cmd_fhdr.rx_id = buf->ub_frame.rx_id;
11647 pkt->pkt_cmd_fhdr.ro = 0;
11648 pkt->pkt_cmd_fhdr.rsvd = 0;
11649 pkt->pkt_comp = fp_unsol_intr;
11650 pkt->pkt_timeout = FP_ELS_TIMEOUT;
11651 pkt->pkt_ub_resp_token = (opaque_t)buf;
11652 }
11653
11654 /*
11655 * Immediate handling of unsolicited FLOGI and PLOGI requests. In the
11656 * early development days of public loop soc+ firmware, numerous problems
11657 * were encountered (the details are undocumented and history now) which
11658 * led to the birth of this function.
11659 *
11660 * If a pre-allocated unsolicited response packet is free, send out an
11661 * immediate response, otherwise submit the request to the port thread
11662 * to do the deferred processing.
11663 */
11664 static void
11665 fp_i_handle_unsol_els(fc_local_port_t *port, fc_unsol_buf_t *buf)
11666 {
11667 int sent;
11668 int f_port;
11669 int do_acc;
11670 fp_cmd_t *cmd;
11671 la_els_logi_t *payload;
11672 fc_remote_port_t *pd;
11673 char dww_name[17];
11674
11675 ASSERT(!MUTEX_HELD(&port->fp_mutex));
11676
11677 cmd = port->fp_els_resp_pkt;
11678
11679 mutex_enter(&port->fp_mutex);
11680 do_acc = (port->fp_statec_busy == 0) ? 1 : 0;
11681 mutex_exit(&port->fp_mutex);
11682
11683 switch (buf->ub_buffer[0]) {
11684 case LA_ELS_PLOGI: {
11685 int small;
11686
11687 payload = (la_els_logi_t *)buf->ub_buffer;
11688
11689 f_port = FP_IS_F_PORT(payload->
11690 common_service.cmn_features) ? 1 : 0;
11691
11692 small = fctl_wwn_cmp(&port->fp_service_params.nport_ww_name,
11693 &payload->nport_ww_name);
11694 pd = fctl_get_remote_port_by_pwwn(port,
11695 &payload->nport_ww_name);
11696 if (pd) {
11697 mutex_enter(&pd->pd_mutex);
11698 sent = (pd->pd_flags == PD_ELS_IN_PROGRESS) ? 1 : 0;
11699 /*
11700 * Most likely this means a cross login is in
11701 * progress or a device about to be yanked out.
11702 * Only accept the plogi if my wwn is smaller.
11703 */
11704 if (pd->pd_type == PORT_DEVICE_OLD) {
11705 sent = 1;
11706 }
11707 /*
11708 * Stop plogi request (if any)
11709 * attempt from local side to speedup
11710 * the discovery progress.
11711 * Mark the pd as PD_PLOGI_RECEPIENT.
11712 */
11713 if (f_port == 0 && small < 0) {
11714 pd->pd_recepient = PD_PLOGI_RECEPIENT;
11715 }
11716 fc_wwn_to_str(&pd->pd_port_name, dww_name);
11717
11718 mutex_exit(&pd->pd_mutex);
11719
11720 FP_TRACE(FP_NHEAD1(3, 0), "fp_i_handle_unsol_els: "
11721 "Unsol PLOGI received. PD still exists in the "
11722 "PWWN list. pd=%p PWWN=%s, sent=%x",
11723 pd, dww_name, sent);
11724
11725 if (f_port == 0 && small < 0) {
11726 FP_TRACE(FP_NHEAD1(3, 0),
11727 "fp_i_handle_unsol_els: Mark the pd"
11728 " as plogi recipient, pd=%p, PWWN=%s"
11729 ", sent=%x",
11730 pd, dww_name, sent);
11731 }
11732 } else {
11733 sent = 0;
11734 }
11735
11736 /*
11737 * To avoid Login collisions, accept only if my WWN
11738 * is smaller than the requester (A curious side note
11739 * would be that this rule may not satisfy the PLOGIs
11740 * initiated by the switch from not-so-well known
11741 * ports such as 0xFFFC41)
11742 */
11743 if ((f_port == 0 && small < 0) ||
11744 (((small > 0 && do_acc) ||
11745 FC_MUST_ACCEPT_D_ID(buf->ub_frame.s_id)) && sent == 0)) {
11746 if (fp_is_class_supported(port->fp_cos,
11747 buf->ub_class) == FC_FAILURE) {
11748 if (FP_IS_CLASS_1_OR_2(buf->ub_class)) {
11749 cmd->cmd_pkt.pkt_cmdlen =
11750 sizeof (la_els_rjt_t);
11751 cmd->cmd_pkt.pkt_rsplen = 0;
11752 fp_els_rjt_init(port, cmd, buf,
11753 FC_ACTION_NON_RETRYABLE,
11754 FC_REASON_CLASS_NOT_SUPP, NULL);
11755 FP_TRACE(FP_NHEAD1(3, 0),
11756 "fp_i_handle_unsol_els: "
11757 "Unsupported class. "
11758 "Rejecting PLOGI");
11759
11760 } else {
11761 mutex_enter(&port->fp_mutex);
11762 port->fp_els_resp_pkt_busy = 0;
11763 mutex_exit(&port->fp_mutex);
11764 return;
11765 }
11766 } else {
11767 cmd->cmd_pkt.pkt_cmdlen =
11768 sizeof (la_els_logi_t);
11769 cmd->cmd_pkt.pkt_rsplen = 0;
11770
11771 /*
11772 * If fp_port_id is zero and topology is
11773 * Point-to-Point, get the local port id from
11774 * the d_id in the PLOGI request.
11775 * If the outgoing FLOGI hasn't been accepted,
11776 * the topology will be unknown here. But it's
11777 * still safe to save the d_id to fp_port_id,
11778 * just because it will be overwritten later
11779 * if the topology is not Point-to-Point.
11780 */
11781 mutex_enter(&port->fp_mutex);
11782 if ((port->fp_port_id.port_id == 0) &&
11783 (port->fp_topology == FC_TOP_PT_PT ||
11784 port->fp_topology == FC_TOP_UNKNOWN)) {
11785 port->fp_port_id.port_id =
11786 buf->ub_frame.d_id;
11787 }
11788 mutex_exit(&port->fp_mutex);
11789
11790 /*
11791 * Sometime later, we should validate
11792 * the service parameters instead of
11793 * just accepting it.
11794 */
11795 fp_login_acc_init(port, cmd, buf, NULL,
11796 KM_NOSLEEP);
11797 FP_TRACE(FP_NHEAD1(3, 0),
11798 "fp_i_handle_unsol_els: Accepting PLOGI,"
11799 " f_port=%d, small=%d, do_acc=%d,"
11800 " sent=%d.", f_port, small, do_acc,
11801 sent);
11802 }
11803 } else {
11804 if (FP_IS_CLASS_1_OR_2(buf->ub_class) ||
11805 port->fp_options & FP_SEND_RJT) {
11806 cmd->cmd_pkt.pkt_cmdlen = sizeof (la_els_rjt_t);
11807 cmd->cmd_pkt.pkt_rsplen = 0;
11808 fp_els_rjt_init(port, cmd, buf,
11809 FC_ACTION_NON_RETRYABLE,
11810 FC_REASON_LOGICAL_BSY, NULL);
11811 FP_TRACE(FP_NHEAD1(3, 0),
11812 "fp_i_handle_unsol_els: "
11813 "Rejecting PLOGI with Logical Busy."
11814 "Possible Login collision.");
11815 } else {
11816 mutex_enter(&port->fp_mutex);
11817 port->fp_els_resp_pkt_busy = 0;
11818 mutex_exit(&port->fp_mutex);
11819 return;
11820 }
11821 }
11822 break;
11823 }
11824
11825 case LA_ELS_FLOGI:
11826 if (fp_is_class_supported(port->fp_cos,
11827 buf->ub_class) == FC_FAILURE) {
11828 if (FP_IS_CLASS_1_OR_2(buf->ub_class)) {
11829 cmd->cmd_pkt.pkt_cmdlen = sizeof (la_els_rjt_t);
11830 cmd->cmd_pkt.pkt_rsplen = 0;
11831 fp_els_rjt_init(port, cmd, buf,
11832 FC_ACTION_NON_RETRYABLE,
11833 FC_REASON_CLASS_NOT_SUPP, NULL);
11834 FP_TRACE(FP_NHEAD1(3, 0),
11835 "fp_i_handle_unsol_els: "
11836 "Unsupported Class. Rejecting FLOGI.");
11837 } else {
11838 mutex_enter(&port->fp_mutex);
11839 port->fp_els_resp_pkt_busy = 0;
11840 mutex_exit(&port->fp_mutex);
11841 return;
11842 }
11843 } else {
11844 mutex_enter(&port->fp_mutex);
11845 if (FC_PORT_STATE_MASK(port->fp_state) !=
11846 FC_STATE_ONLINE || (port->fp_port_id.port_id &&
11847 buf->ub_frame.s_id == port->fp_port_id.port_id)) {
11848 mutex_exit(&port->fp_mutex);
11849 if (FP_IS_CLASS_1_OR_2(buf->ub_class)) {
11850 cmd->cmd_pkt.pkt_cmdlen =
11851 sizeof (la_els_rjt_t);
11852 cmd->cmd_pkt.pkt_rsplen = 0;
11853 fp_els_rjt_init(port, cmd, buf,
11854 FC_ACTION_NON_RETRYABLE,
11855 FC_REASON_INVALID_LINK_CTRL,
11856 NULL);
11857 FP_TRACE(FP_NHEAD1(3, 0),
11858 "fp_i_handle_unsol_els: "
11859 "Invalid Link Ctrl. "
11860 "Rejecting FLOGI.");
11861 } else {
11862 mutex_enter(&port->fp_mutex);
11863 port->fp_els_resp_pkt_busy = 0;
11864 mutex_exit(&port->fp_mutex);
11865 return;
11866 }
11867 } else {
11868 mutex_exit(&port->fp_mutex);
11869 cmd->cmd_pkt.pkt_cmdlen =
11870 sizeof (la_els_logi_t);
11871 cmd->cmd_pkt.pkt_rsplen = 0;
11872 /*
11873 * Let's not aggressively validate the N_Port's
11874 * service parameters until PLOGI. Suffice it
11875 * to give a hint that we are an N_Port and we
11876 * are game to some serious stuff here.
11877 */
11878 fp_login_acc_init(port, cmd, buf,
11879 NULL, KM_NOSLEEP);
11880 FP_TRACE(FP_NHEAD1(3, 0),
11881 "fp_i_handle_unsol_els: "
11882 "Accepting FLOGI.");
11883 }
11884 }
11885 break;
11886
11887 default:
11888 return;
11889 }
11890
11891 if ((fp_sendcmd(port, cmd, port->fp_fca_handle)) != FC_SUCCESS) {
11892 mutex_enter(&port->fp_mutex);
11893 port->fp_els_resp_pkt_busy = 0;
11894 mutex_exit(&port->fp_mutex);
11895 }
11896 }
11897
11898
11899 /*
11900 * Handle unsolicited PLOGI request
11901 */
11902 static void
11903 fp_handle_unsol_plogi(fc_local_port_t *port, fc_unsol_buf_t *buf,
11904 job_request_t *job, int sleep)
11905 {
11906 int sent;
11907 int small;
11908 int f_port;
11909 int do_acc;
11910 fp_cmd_t *cmd;
11911 la_wwn_t *swwn;
11912 la_wwn_t *dwwn;
11913 la_els_logi_t *payload;
11914 fc_remote_port_t *pd;
11915 char dww_name[17];
11916
11917 payload = (la_els_logi_t *)buf->ub_buffer;
11918 f_port = FP_IS_F_PORT(payload->common_service.cmn_features) ? 1 : 0;
11919
11920 mutex_enter(&port->fp_mutex);
11921 do_acc = (port->fp_statec_busy == 0) ? 1 : 0;
11922 mutex_exit(&port->fp_mutex);
11923
11924 FP_TRACE(FP_NHEAD1(3, 0), "fp_handle_unsol_plogi: s_id=%x, d_id=%x,"
11925 "type=%x, f_ctl=%x"
11926 " seq_id=%x, ox_id=%x, rx_id=%x"
11927 " ro=%x", buf->ub_frame.s_id, buf->ub_frame.d_id,
11928 buf->ub_frame.type, buf->ub_frame.f_ctl, buf->ub_frame.seq_id,
11929 buf->ub_frame.ox_id, buf->ub_frame.rx_id, buf->ub_frame.ro);
11930
11931 swwn = &port->fp_service_params.nport_ww_name;
11932 dwwn = &payload->nport_ww_name;
11933 small = fctl_wwn_cmp(swwn, dwwn);
11934 pd = fctl_get_remote_port_by_pwwn(port, dwwn);
11935 if (pd) {
11936 mutex_enter(&pd->pd_mutex);
11937 sent = (pd->pd_flags == PD_ELS_IN_PROGRESS) ? 1 : 0;
11938 /*
11939 * Most likely this means a cross login is in
11940 * progress or a device about to be yanked out.
11941 * Only accept the plogi if my wwn is smaller.
11942 */
11943
11944 if (pd->pd_type == PORT_DEVICE_OLD) {
11945 sent = 1;
11946 }
11947 /*
11948 * Stop plogi request (if any)
11949 * attempt from local side to speedup
11950 * the discovery progress.
11951 * Mark the pd as PD_PLOGI_RECEPIENT.
11952 */
11953 if (f_port == 0 && small < 0) {
11954 pd->pd_recepient = PD_PLOGI_RECEPIENT;
11955 }
11956 fc_wwn_to_str(&pd->pd_port_name, dww_name);
11957
11958 mutex_exit(&pd->pd_mutex);
11959
11960 FP_TRACE(FP_NHEAD1(3, 0), "fp_handle_unsol_plogi: Unsol PLOGI"
11961 " received. PD still exists in the PWWN list. pd=%p "
11962 "PWWN=%s, sent=%x", pd, dww_name, sent);
11963
11964 if (f_port == 0 && small < 0) {
11965 FP_TRACE(FP_NHEAD1(3, 0),
11966 "fp_handle_unsol_plogi: Mark the pd"
11967 " as plogi recipient, pd=%p, PWWN=%s"
11968 ", sent=%x",
11969 pd, dww_name, sent);
11970 }
11971 } else {
11972 sent = 0;
11973 }
11974
11975 /*
11976 * Avoid Login collisions by accepting only if my WWN is smaller.
11977 *
11978 * A side note: There is no need to start a PLOGI from this end in
11979 * this context if login isn't going to be accepted for the
11980 * above reason as either a LIP (in private loop), RSCN (in
11981 * fabric topology), or an FLOGI (in point to point - Huh ?
11982 * check FC-PH) would normally drive the PLOGI from this end.
11983 * At this point of time there is no need for an inbound PLOGI
11984 * to kick an outbound PLOGI when it is going to be rejected
11985 * for the reason of WWN being smaller. However it isn't hard
11986 * to do that either (when such a need arises, start a timer
11987 * for a duration that extends beyond a normal device discovery
11988 * time and check if an outbound PLOGI did go before that, if
11989 * none fire one)
11990 *
11991 * Unfortunately, as it turned out, during booting, it is possible
11992 * to miss another initiator in the same loop as port driver
11993 * instances are serially attached. While preserving the above
11994 * comments for belly laughs, please kick an outbound PLOGI in
11995 * a non-switch environment (which is a pt pt between N_Ports or
11996 * a private loop)
11997 *
11998 * While preserving the above comments for amusement, send an
11999 * ACC if the PLOGI is going to be rejected for WWN being smaller
12000 * when no discovery is in progress at this end. Turn around
12001 * and make the port device as the PLOGI initiator, so that
12002 * during subsequent link/loop initialization, this end drives
12003 * the PLOGI (In fact both ends do in this particular case, but
12004 * only one wins)
12005 *
12006 * Make sure the PLOGIs initiated by the switch from not-so-well-known
12007 * ports (such as 0xFFFC41) are accepted too.
12008 */
12009 if ((f_port == 0 && small < 0) || (((small > 0 && do_acc) ||
12010 FC_MUST_ACCEPT_D_ID(buf->ub_frame.s_id)) && sent == 0)) {
12011 if (fp_is_class_supported(port->fp_cos,
12012 buf->ub_class) == FC_FAILURE) {
12013 if (FP_IS_CLASS_1_OR_2(buf->ub_class)) {
12014 cmd = fp_alloc_pkt(port,
12015 sizeof (la_els_logi_t), 0, sleep, pd);
12016 if (cmd == NULL) {
12017 return;
12018 }
12019 cmd->cmd_pkt.pkt_cmdlen = sizeof (la_els_rjt_t);
12020 cmd->cmd_pkt.pkt_rsplen = 0;
12021 fp_els_rjt_init(port, cmd, buf,
12022 FC_ACTION_NON_RETRYABLE,
12023 FC_REASON_CLASS_NOT_SUPP, job);
12024 FP_TRACE(FP_NHEAD1(3, 0),
12025 "fp_handle_unsol_plogi: "
12026 "Unsupported class. rejecting PLOGI");
12027 }
12028 } else {
12029 cmd = fp_alloc_pkt(port, sizeof (la_els_logi_t),
12030 0, sleep, pd);
12031 if (cmd == NULL) {
12032 return;
12033 }
12034 cmd->cmd_pkt.pkt_cmdlen = sizeof (la_els_logi_t);
12035 cmd->cmd_pkt.pkt_rsplen = 0;
12036
12037 /*
12038 * Sometime later, we should validate the service
12039 * parameters instead of just accepting it.
12040 */
12041 fp_login_acc_init(port, cmd, buf, job, KM_SLEEP);
12042 FP_TRACE(FP_NHEAD1(3, 0), "fp_handle_unsol_plogi: "
12043 "Accepting PLOGI, f_port=%d, small=%d, "
12044 "do_acc=%d, sent=%d.", f_port, small, do_acc,
12045 sent);
12046
12047 /*
12048 * If fp_port_id is zero and topology is
12049 * Point-to-Point, get the local port id from
12050 * the d_id in the PLOGI request.
12051 * If the outgoing FLOGI hasn't been accepted,
12052 * the topology will be unknown here. But it's
12053 * still safe to save the d_id to fp_port_id,
12054 * just because it will be overwritten later
12055 * if the topology is not Point-to-Point.
12056 */
12057 mutex_enter(&port->fp_mutex);
12058 if ((port->fp_port_id.port_id == 0) &&
12059 (port->fp_topology == FC_TOP_PT_PT ||
12060 port->fp_topology == FC_TOP_UNKNOWN)) {
12061 port->fp_port_id.port_id =
12062 buf->ub_frame.d_id;
12063 }
12064 mutex_exit(&port->fp_mutex);
12065 }
12066 } else {
12067 if (FP_IS_CLASS_1_OR_2(buf->ub_class) ||
12068 port->fp_options & FP_SEND_RJT) {
12069 cmd = fp_alloc_pkt(port, sizeof (la_els_logi_t),
12070 0, sleep, pd);
12071 if (cmd == NULL) {
12072 return;
12073 }
12074 cmd->cmd_pkt.pkt_cmdlen = sizeof (la_els_rjt_t);
12075 cmd->cmd_pkt.pkt_rsplen = 0;
12076 /*
12077 * Send out Logical busy to indicate
12078 * the detection of PLOGI collision
12079 */
12080 fp_els_rjt_init(port, cmd, buf,
12081 FC_ACTION_NON_RETRYABLE,
12082 FC_REASON_LOGICAL_BSY, job);
12083
12084 fc_wwn_to_str(dwwn, dww_name);
12085 FP_TRACE(FP_NHEAD1(3, 0), "fp_handle_unsol_plogi: "
12086 "Rejecting Unsol PLOGI with Logical Busy."
12087 "possible PLOGI collision. PWWN=%s, sent=%x",
12088 dww_name, sent);
12089 } else {
12090 return;
12091 }
12092 }
12093
12094 if (fp_sendcmd(port, cmd, port->fp_fca_handle) != FC_SUCCESS) {
12095 fp_free_pkt(cmd);
12096 }
12097 }
12098
12099
12100 /*
12101 * Handle mischievous turning over of our own FLOGI requests back to
12102 * us by the SOC+ microcode. In other words, look at the class of such
12103 * bone headed requests, if 1 or 2, bluntly P_RJT them, if 3 drop them
12104 * on the floor
12105 */
12106 static void
12107 fp_handle_unsol_flogi(fc_local_port_t *port, fc_unsol_buf_t *buf,
12108 job_request_t *job, int sleep)
12109 {
12110 uint32_t state;
12111 uint32_t s_id;
12112 fp_cmd_t *cmd;
12113
12114 if (fp_is_class_supported(port->fp_cos, buf->ub_class) == FC_FAILURE) {
12115 if (FP_IS_CLASS_1_OR_2(buf->ub_class)) {
12116 cmd = fp_alloc_pkt(port, sizeof (la_els_rjt_t),
12117 0, sleep, NULL);
12118 if (cmd == NULL) {
12119 return;
12120 }
12121 fp_els_rjt_init(port, cmd, buf,
12122 FC_ACTION_NON_RETRYABLE,
12123 FC_REASON_CLASS_NOT_SUPP, job);
12124 } else {
12125 return;
12126 }
12127 } else {
12128
12129 FP_TRACE(FP_NHEAD1(3, 0), "fp_handle_unsol_flogi:"
12130 " s_id=%x, d_id=%x, type=%x, f_ctl=%x"
12131 " seq_id=%x, ox_id=%x, rx_id=%x, ro=%x",
12132 buf->ub_frame.s_id, buf->ub_frame.d_id,
12133 buf->ub_frame.type, buf->ub_frame.f_ctl,
12134 buf->ub_frame.seq_id, buf->ub_frame.ox_id,
12135 buf->ub_frame.rx_id, buf->ub_frame.ro);
12136
12137 mutex_enter(&port->fp_mutex);
12138 state = FC_PORT_STATE_MASK(port->fp_state);
12139 s_id = port->fp_port_id.port_id;
12140 mutex_exit(&port->fp_mutex);
12141
12142 if (state != FC_STATE_ONLINE ||
12143 (s_id && buf->ub_frame.s_id == s_id)) {
12144 if (FP_IS_CLASS_1_OR_2(buf->ub_class)) {
12145 cmd = fp_alloc_pkt(port, sizeof (la_els_rjt_t),
12146 0, sleep, NULL);
12147 if (cmd == NULL) {
12148 return;
12149 }
12150 fp_els_rjt_init(port, cmd, buf,
12151 FC_ACTION_NON_RETRYABLE,
12152 FC_REASON_INVALID_LINK_CTRL, job);
12153 FP_TRACE(FP_NHEAD1(3, 0),
12154 "fp_handle_unsol_flogi: "
12155 "Rejecting PLOGI. Invalid Link CTRL");
12156 } else {
12157 return;
12158 }
12159 } else {
12160 cmd = fp_alloc_pkt(port, sizeof (la_els_logi_t),
12161 0, sleep, NULL);
12162 if (cmd == NULL) {
12163 return;
12164 }
12165 /*
12166 * Let's not aggressively validate the N_Port's
12167 * service parameters until PLOGI. Suffice it
12168 * to give a hint that we are an N_Port and we
12169 * are game to some serious stuff here.
12170 */
12171 fp_login_acc_init(port, cmd, buf, job, KM_SLEEP);
12172 FP_TRACE(FP_NHEAD1(3, 0), "fp_handle_unsol_flogi: "
12173 "Accepting PLOGI");
12174 }
12175 }
12176
12177 if (fp_sendcmd(port, cmd, port->fp_fca_handle) != FC_SUCCESS) {
12178 fp_free_pkt(cmd);
12179 }
12180 }
12181
12182
12183 /*
12184 * Perform PLOGI accept
12185 */
12186 static void
12187 fp_login_acc_init(fc_local_port_t *port, fp_cmd_t *cmd, fc_unsol_buf_t *buf,
12188 job_request_t *job, int sleep)
12189 {
12190 fc_packet_t *pkt;
12191 fc_portmap_t *listptr;
12192 la_els_logi_t payload;
12193
12194 ASSERT(buf != NULL);
12195
12196 /*
12197 * If we are sending ACC to PLOGI and we haven't already
12198 * create port and node device handles, let's create them
12199 * here.
12200 */
12201 if (buf->ub_buffer[0] == LA_ELS_PLOGI &&
12202 FC_IS_REAL_DEVICE(buf->ub_frame.s_id)) {
12203 int small;
12204 int do_acc;
12205 fc_remote_port_t *pd;
12206 la_els_logi_t *req;
12207
12208 req = (la_els_logi_t *)buf->ub_buffer;
12209 small = fctl_wwn_cmp(&port->fp_service_params.nport_ww_name,
12210 &req->nport_ww_name);
12211
12212 mutex_enter(&port->fp_mutex);
12213 do_acc = (port->fp_statec_busy == 0) ? 1 : 0;
12214 mutex_exit(&port->fp_mutex);
12215
12216 FP_TRACE(FP_NHEAD1(3, 0), "fp_plogi_acc_init fp %x, pd %x",
12217 port->fp_port_id.port_id, buf->ub_frame.s_id);
12218 pd = fctl_create_remote_port(port, &req->node_ww_name,
12219 &req->nport_ww_name, buf->ub_frame.s_id,
12220 PD_PLOGI_RECEPIENT, sleep);
12221 if (pd == NULL) {
12222 FP_TRACE(FP_NHEAD1(3, 0), "login_acc_init: "
12223 "Couldn't create port device for d_id:0x%x",
12224 buf->ub_frame.s_id);
12225
12226 fp_printf(port, CE_NOTE, FP_LOG_ONLY, 0, NULL,
12227 "couldn't create port device d_id=%x",
12228 buf->ub_frame.s_id);
12229 } else {
12230 /*
12231 * usoc currently returns PLOGIs inline and
12232 * the maximum buffer size is 60 bytes or so.
12233 * So attempt not to look beyond what is in
12234 * the unsolicited buffer
12235 *
12236 * JNI also traverses this path sometimes
12237 */
12238 if (buf->ub_bufsize >= sizeof (la_els_logi_t)) {
12239 fp_register_login(NULL, pd, req, buf->ub_class);
12240 } else {
12241 mutex_enter(&pd->pd_mutex);
12242 if (pd->pd_login_count == 0) {
12243 pd->pd_login_count++;
12244 }
12245 pd->pd_state = PORT_DEVICE_LOGGED_IN;
12246 pd->pd_login_class = buf->ub_class;
12247 mutex_exit(&pd->pd_mutex);
12248 }
12249
12250 listptr = kmem_zalloc(sizeof (fc_portmap_t), sleep);
12251 if (listptr != NULL) {
12252 fctl_copy_portmap(listptr, pd);
12253 (void) fp_ulp_devc_cb(port, listptr,
12254 1, 1, sleep, 0);
12255 }
12256
12257 if (small > 0 && do_acc) {
12258 mutex_enter(&pd->pd_mutex);
12259 pd->pd_recepient = PD_PLOGI_INITIATOR;
12260 mutex_exit(&pd->pd_mutex);
12261 }
12262 }
12263 }
12264
12265 cmd->cmd_pkt.pkt_tran_flags = buf->ub_class;
12266 cmd->cmd_pkt.pkt_tran_type = FC_PKT_OUTBOUND;
12267 cmd->cmd_flags = FP_CMD_CFLAG_UNDEFINED;
12268 cmd->cmd_retry_count = 1;
12269 cmd->cmd_ulp_pkt = NULL;
12270
12271 cmd->cmd_transport = port->fp_fca_tran->fca_els_send;
12272 cmd->cmd_job = job;
12273
12274 pkt = &cmd->cmd_pkt;
12275
12276 fp_unsol_resp_init(pkt, buf, R_CTL_ELS_RSP, FC_TYPE_EXTENDED_LS);
12277
12278 payload = port->fp_service_params;
12279 payload.ls_code.ls_code = LA_ELS_ACC;
12280
12281 FC_SET_CMD(port, pkt->pkt_cmd_acc, (uint8_t *)&payload,
12282 (uint8_t *)pkt->pkt_cmd, sizeof (payload), DDI_DEV_AUTOINCR);
12283
12284 FP_TRACE(FP_NHEAD1(3, 0), "login_acc_init: ELS:0x%x d_id:0x%x "
12285 "bufsize:0x%x sizeof (la_els_logi):0x%x "
12286 "port's wwn:0x%01x%03x%04x%08x requestor's wwn:0x%01x%03x%04x%08x "
12287 "statec_busy:0x%x", buf->ub_buffer[0], buf->ub_frame.s_id,
12288 buf->ub_bufsize, sizeof (la_els_logi_t),
12289 port->fp_service_params.nport_ww_name.w.naa_id,
12290 port->fp_service_params.nport_ww_name.w.nport_id,
12291 port->fp_service_params.nport_ww_name.w.wwn_hi,
12292 port->fp_service_params.nport_ww_name.w.wwn_lo,
12293 ((la_els_logi_t *)buf->ub_buffer)->nport_ww_name.w.naa_id,
12294 ((la_els_logi_t *)buf->ub_buffer)->nport_ww_name.w.nport_id,
12295 ((la_els_logi_t *)buf->ub_buffer)->nport_ww_name.w.wwn_hi,
12296 ((la_els_logi_t *)buf->ub_buffer)->nport_ww_name.w.wwn_lo,
12297 port->fp_statec_busy);
12298 }
12299
12300
12301 #define RSCN_EVENT_NAME_LEN 256
12302
12303 /*
12304 * Handle RSCNs
12305 */
12306 static void
12307 fp_handle_unsol_rscn(fc_local_port_t *port, fc_unsol_buf_t *buf,
12308 job_request_t *job, int sleep)
12309 {
12310 uint32_t mask;
12311 fp_cmd_t *cmd;
12312 uint32_t count;
12313 int listindex;
12314 int16_t len;
12315 fc_rscn_t *payload;
12316 fc_portmap_t *listptr;
12317 fctl_ns_req_t *ns_cmd;
12318 fc_affected_id_t *page;
12319 caddr_t nvname;
12320 nvlist_t *attr_list = NULL;
12321
12322 mutex_enter(&port->fp_mutex);
12323 if (!FC_IS_TOP_SWITCH(port->fp_topology)) {
12324 if (--port->fp_rscn_count == FC_INVALID_RSCN_COUNT) {
12325 --port->fp_rscn_count;
12326 }
12327 mutex_exit(&port->fp_mutex);
12328 return;
12329 }
12330 mutex_exit(&port->fp_mutex);
12331
12332 cmd = fp_alloc_pkt(port, FP_PORT_IDENTIFIER_LEN, 0, sleep, NULL);
12333 if (cmd != NULL) {
12334 fp_els_acc_init(port, cmd, buf, job);
12335 if (fp_sendcmd(port, cmd, port->fp_fca_handle) != FC_SUCCESS) {
12336 fp_free_pkt(cmd);
12337 }
12338 }
12339
12340 payload = (fc_rscn_t *)buf->ub_buffer;
12341 ASSERT(payload->rscn_code == LA_ELS_RSCN);
12342 ASSERT(payload->rscn_len == FP_PORT_IDENTIFIER_LEN);
12343
12344 len = payload->rscn_payload_len - FP_PORT_IDENTIFIER_LEN;
12345
12346 if (len <= 0) {
12347 mutex_enter(&port->fp_mutex);
12348 if (--port->fp_rscn_count == FC_INVALID_RSCN_COUNT) {
12349 --port->fp_rscn_count;
12350 }
12351 mutex_exit(&port->fp_mutex);
12352
12353 return;
12354 }
12355
12356 ASSERT((len & 0x3) == 0); /* Must be power of 4 */
12357 count = (len >> 2) << 1; /* number of pages multiplied by 2 */
12358
12359 listptr = kmem_zalloc(sizeof (fc_portmap_t) * count, sleep);
12360 page = (fc_affected_id_t *)(buf->ub_buffer + sizeof (fc_rscn_t));
12361
12362 ASSERT((job->job_flags & JOB_TYPE_FP_ASYNC) == 0);
12363
12364 ns_cmd = fctl_alloc_ns_cmd(sizeof (ns_req_gpn_id_t),
12365 sizeof (ns_resp_gpn_id_t), sizeof (ns_resp_gpn_id_t),
12366 0, sleep);
12367 if (ns_cmd == NULL) {
12368 kmem_free(listptr, sizeof (fc_portmap_t) * count);
12369
12370 mutex_enter(&port->fp_mutex);
12371 if (--port->fp_rscn_count == FC_INVALID_RSCN_COUNT) {
12372 --port->fp_rscn_count;
12373 }
12374 mutex_exit(&port->fp_mutex);
12375
12376 return;
12377 }
12378
12379 ns_cmd->ns_cmd_code = NS_GPN_ID;
12380
12381 FP_TRACE(FP_NHEAD1(3, 0), "fp_handle_unsol_rscn: s_id=%x, d_id=%x,"
12382 "type=%x, f_ctl=%x seq_id=%x, ox_id=%x, rx_id=%x"
12383 " ro=%x", buf->ub_frame.s_id, buf->ub_frame.d_id,
12384 buf->ub_frame.type, buf->ub_frame.f_ctl, buf->ub_frame.seq_id,
12385 buf->ub_frame.ox_id, buf->ub_frame.rx_id, buf->ub_frame.ro);
12386
12387 /* Only proceed if we can allocate nvname and the nvlist */
12388 if ((nvname = kmem_zalloc(RSCN_EVENT_NAME_LEN, KM_NOSLEEP)) != NULL &&
12389 nvlist_alloc(&attr_list, NV_UNIQUE_NAME_TYPE,
12390 KM_NOSLEEP) == DDI_SUCCESS) {
12391 if (!(attr_list && nvlist_add_uint32(attr_list, "instance",
12392 port->fp_instance) == DDI_SUCCESS &&
12393 nvlist_add_byte_array(attr_list, "port-wwn",
12394 port->fp_service_params.nport_ww_name.raw_wwn,
12395 sizeof (la_wwn_t)) == DDI_SUCCESS)) {
12396 nvlist_free(attr_list);
12397 attr_list = NULL;
12398 }
12399 }
12400
12401 for (listindex = 0; len; len -= FP_PORT_IDENTIFIER_LEN, page++) {
12402 /* Add affected page to the event payload */
12403 if (attr_list != NULL) {
12404 (void) snprintf(nvname, RSCN_EVENT_NAME_LEN,
12405 "affected_page_%d", listindex);
12406 if (attr_list && nvlist_add_uint32(attr_list, nvname,
12407 ntohl(*(uint32_t *)page)) != DDI_SUCCESS) {
12408 /* We don't send a partial event, so dump it */
12409 nvlist_free(attr_list);
12410 attr_list = NULL;
12411 }
12412 }
12413 /*
12414 * Query the NS to get the Port WWN for this
12415 * affected D_ID.
12416 */
12417 mask = 0;
12418 switch (page->aff_format & FC_RSCN_ADDRESS_MASK) {
12419 case FC_RSCN_PORT_ADDRESS:
12420 fp_validate_rscn_page(port, page, job, ns_cmd,
12421 listptr, &listindex, sleep);
12422
12423 if (listindex == 0) {
12424 /*
12425 * We essentially did not process this RSCN. So,
12426 * ULPs are not going to be called and so we
12427 * decrement the rscn_count
12428 */
12429 mutex_enter(&port->fp_mutex);
12430 if (--port->fp_rscn_count ==
12431 FC_INVALID_RSCN_COUNT) {
12432 --port->fp_rscn_count;
12433 }
12434 mutex_exit(&port->fp_mutex);
12435 }
12436 break;
12437
12438 case FC_RSCN_AREA_ADDRESS:
12439 mask = 0xFFFF00;
12440 /* FALLTHROUGH */
12441
12442 case FC_RSCN_DOMAIN_ADDRESS:
12443 if (!mask) {
12444 mask = 0xFF0000;
12445 }
12446 fp_validate_area_domain(port, page->aff_d_id, mask,
12447 job, sleep);
12448 break;
12449
12450 case FC_RSCN_FABRIC_ADDRESS:
12451 /*
12452 * We need to discover all the devices on this
12453 * port.
12454 */
12455 fp_validate_area_domain(port, 0, 0, job, sleep);
12456 break;
12457
12458 default:
12459 break;
12460 }
12461 }
12462 if (attr_list != NULL) {
12463 (void) ddi_log_sysevent(port->fp_port_dip, DDI_VENDOR_SUNW,
12464 EC_SUNFC, ESC_SUNFC_PORT_RSCN, attr_list,
12465 NULL, DDI_SLEEP);
12466 nvlist_free(attr_list);
12467 } else {
12468 FP_TRACE(FP_NHEAD1(9, 0),
12469 "RSCN handled, but event not sent to userland");
12470 }
12471 if (nvname != NULL) {
12472 kmem_free(nvname, RSCN_EVENT_NAME_LEN);
12473 }
12474
12475 if (ns_cmd) {
12476 fctl_free_ns_cmd(ns_cmd);
12477 }
12478
12479 if (listindex) {
12480 #ifdef DEBUG
12481 page = (fc_affected_id_t *)(buf->ub_buffer +
12482 sizeof (fc_rscn_t));
12483
12484 if (listptr->map_did.port_id != page->aff_d_id) {
12485 FP_TRACE(FP_NHEAD1(9, 0),
12486 "PORT RSCN: processed=%x, reporting=%x",
12487 listptr->map_did.port_id, page->aff_d_id);
12488 }
12489 #endif
12490
12491 (void) fp_ulp_devc_cb(port, listptr, listindex, count,
12492 sleep, 0);
12493 } else {
12494 kmem_free(listptr, sizeof (fc_portmap_t) * count);
12495 }
12496 }
12497
12498
12499 /*
12500 * Fill out old map for ULPs with fp_mutex, fd_mutex and pd_mutex held
12501 */
12502 static void
12503 fp_fillout_old_map_held(fc_portmap_t *map, fc_remote_port_t *pd, uchar_t flag)
12504 {
12505 int is_switch;
12506 int initiator;
12507 fc_local_port_t *port;
12508
12509 port = pd->pd_port;
12510
12511 /* This function has the following bunch of assumptions */
12512 ASSERT(port != NULL);
12513 ASSERT(MUTEX_HELD(&port->fp_mutex));
12514 ASSERT(MUTEX_HELD(&pd->pd_remote_nodep->fd_mutex));
12515 ASSERT(MUTEX_HELD(&pd->pd_mutex));
12516
12517 pd->pd_state = PORT_DEVICE_INVALID;
12518 pd->pd_type = PORT_DEVICE_OLD;
12519 initiator = (pd->pd_recepient == PD_PLOGI_INITIATOR) ? 1 : 0;
12520 is_switch = FC_IS_TOP_SWITCH(port->fp_topology);
12521
12522 fctl_delist_did_table(port, pd);
12523 fctl_delist_pwwn_table(port, pd);
12524
12525 FP_TRACE(FP_NHEAD1(6, 0), "fp_fillout_old_map_held: port=%p, d_id=%x"
12526 " removed the PD=%p from DID and PWWN tables",
12527 port, pd->pd_port_id.port_id, pd);
12528
12529 if ((!flag) && port && initiator && is_switch) {
12530 (void) fctl_add_orphan_held(port, pd);
12531 }
12532 fctl_copy_portmap_held(map, pd);
12533 map->map_pd = pd;
12534 }
12535
12536 /*
12537 * Fill out old map for ULPs
12538 */
12539 static void
12540 fp_fillout_old_map(fc_portmap_t *map, fc_remote_port_t *pd, uchar_t flag)
12541 {
12542 int is_switch;
12543 int initiator;
12544 fc_local_port_t *port;
12545
12546 mutex_enter(&pd->pd_mutex);
12547 port = pd->pd_port;
12548 mutex_exit(&pd->pd_mutex);
12549
12550 mutex_enter(&port->fp_mutex);
12551 mutex_enter(&pd->pd_mutex);
12552
12553 pd->pd_state = PORT_DEVICE_INVALID;
12554 pd->pd_type = PORT_DEVICE_OLD;
12555 initiator = (pd->pd_recepient == PD_PLOGI_INITIATOR) ? 1 : 0;
12556 is_switch = FC_IS_TOP_SWITCH(port->fp_topology);
12557
12558 fctl_delist_did_table(port, pd);
12559 fctl_delist_pwwn_table(port, pd);
12560
12561 FP_TRACE(FP_NHEAD1(6, 0), "fp_fillout_old_map: port=%p, d_id=%x"
12562 " removed the PD=%p from DID and PWWN tables",
12563 port, pd->pd_port_id.port_id, pd);
12564
12565 mutex_exit(&pd->pd_mutex);
12566 mutex_exit(&port->fp_mutex);
12567
12568 ASSERT(port != NULL);
12569 if ((!flag) && port && initiator && is_switch) {
12570 (void) fctl_add_orphan(port, pd, KM_NOSLEEP);
12571 }
12572 fctl_copy_portmap(map, pd);
12573 map->map_pd = pd;
12574 }
12575
12576
12577 /*
12578 * Fillout Changed Map for ULPs
12579 */
12580 static void
12581 fp_fillout_changed_map(fc_portmap_t *map, fc_remote_port_t *pd,
12582 uint32_t *new_did, la_wwn_t *new_pwwn)
12583 {
12584 ASSERT(MUTEX_HELD(&pd->pd_mutex));
12585
12586 pd->pd_type = PORT_DEVICE_CHANGED;
12587 if (new_did) {
12588 pd->pd_port_id.port_id = *new_did;
12589 }
12590 if (new_pwwn) {
12591 pd->pd_port_name = *new_pwwn;
12592 }
12593 mutex_exit(&pd->pd_mutex);
12594
12595 fctl_copy_portmap(map, pd);
12596
12597 mutex_enter(&pd->pd_mutex);
12598 pd->pd_type = PORT_DEVICE_NOCHANGE;
12599 }
12600
12601
12602 /*
12603 * Fillout New Name Server map
12604 */
12605 static void
12606 fp_fillout_new_nsmap(fc_local_port_t *port, ddi_acc_handle_t *handle,
12607 fc_portmap_t *port_map, ns_resp_gan_t *gan_resp, uint32_t d_id)
12608 {
12609 ASSERT(!MUTEX_HELD(&port->fp_mutex));
12610
12611 if (handle) {
12612 FC_GET_RSP(port, *handle, (uint8_t *)&port_map->map_pwwn,
12613 (uint8_t *)&gan_resp->gan_pwwn, sizeof (gan_resp->gan_pwwn),
12614 DDI_DEV_AUTOINCR);
12615 FC_GET_RSP(port, *handle, (uint8_t *)&port_map->map_nwwn,
12616 (uint8_t *)&gan_resp->gan_nwwn, sizeof (gan_resp->gan_nwwn),
12617 DDI_DEV_AUTOINCR);
12618 FC_GET_RSP(port, *handle, (uint8_t *)port_map->map_fc4_types,
12619 (uint8_t *)gan_resp->gan_fc4types,
12620 sizeof (gan_resp->gan_fc4types), DDI_DEV_AUTOINCR);
12621 } else {
12622 bcopy(&gan_resp->gan_pwwn, &port_map->map_pwwn,
12623 sizeof (gan_resp->gan_pwwn));
12624 bcopy(&gan_resp->gan_nwwn, &port_map->map_nwwn,
12625 sizeof (gan_resp->gan_nwwn));
12626 bcopy(gan_resp->gan_fc4types, port_map->map_fc4_types,
12627 sizeof (gan_resp->gan_fc4types));
12628 }
12629 port_map->map_did.port_id = d_id;
12630 port_map->map_did.priv_lilp_posit = 0;
12631 port_map->map_hard_addr.hard_addr = 0;
12632 port_map->map_hard_addr.rsvd = 0;
12633 port_map->map_state = PORT_DEVICE_INVALID;
12634 port_map->map_type = PORT_DEVICE_NEW;
12635 port_map->map_flags = 0;
12636 port_map->map_pd = NULL;
12637
12638 (void) fctl_remove_if_orphan(port, &port_map->map_pwwn);
12639
12640 ASSERT(port != NULL);
12641 }
12642
12643
12644 /*
12645 * Perform LINIT ELS
12646 */
12647 static int
12648 fp_remote_lip(fc_local_port_t *port, la_wwn_t *pwwn, int sleep,
12649 job_request_t *job)
12650 {
12651 int rval;
12652 uint32_t d_id;
12653 uint32_t s_id;
12654 uint32_t lfa;
12655 uchar_t class;
12656 uint32_t ret;
12657 fp_cmd_t *cmd;
12658 fc_porttype_t ptype;
12659 fc_packet_t *pkt;
12660 fc_linit_req_t payload;
12661 fc_remote_port_t *pd;
12662
12663 rval = 0;
12664
12665 ASSERT(job != NULL);
12666 ASSERT((job->job_flags & JOB_TYPE_FP_ASYNC) == 0);
12667
12668 pd = fctl_get_remote_port_by_pwwn(port, pwwn);
12669 if (pd == NULL) {
12670 fctl_ns_req_t *ns_cmd;
12671
12672 ns_cmd = fctl_alloc_ns_cmd(sizeof (ns_req_gid_pn_t),
12673 sizeof (ns_resp_gid_pn_t), sizeof (ns_resp_gid_pn_t),
12674 0, sleep);
12675
12676 if (ns_cmd == NULL) {
12677 return (FC_NOMEM);
12678 }
12679 job->job_result = FC_SUCCESS;
12680 ns_cmd->ns_cmd_code = NS_GID_PN;
12681 ((ns_req_gid_pn_t *)(ns_cmd->ns_cmd_buf))->pwwn = *pwwn;
12682
12683 ret = fp_ns_query(port, ns_cmd, job, 1, sleep);
12684 if (ret != FC_SUCCESS || job->job_result != FC_SUCCESS) {
12685 fctl_free_ns_cmd(ns_cmd);
12686 return (FC_FAILURE);
12687 }
12688 bcopy(ns_cmd->ns_data_buf, (caddr_t)&d_id, sizeof (d_id));
12689 d_id = BE_32(*((uint32_t *)ns_cmd->ns_data_buf));
12690
12691 fctl_free_ns_cmd(ns_cmd);
12692 lfa = d_id & 0xFFFF00;
12693
12694 /*
12695 * Given this D_ID, get the port type to see if
12696 * we can do LINIT on the LFA
12697 */
12698 ns_cmd = fctl_alloc_ns_cmd(sizeof (ns_req_gpt_id_t),
12699 sizeof (ns_resp_gpt_id_t), sizeof (ns_resp_gpt_id_t),
12700 0, sleep);
12701
12702 if (ns_cmd == NULL) {
12703 return (FC_NOMEM);
12704 }
12705
12706 job->job_result = FC_SUCCESS;
12707 ns_cmd->ns_cmd_code = NS_GPT_ID;
12708
12709 ((ns_req_gpt_id_t *)(ns_cmd->ns_cmd_buf))->pid.port_id = d_id;
12710 ((ns_req_gpt_id_t *)
12711 (ns_cmd->ns_cmd_buf))->pid.priv_lilp_posit = 0;
12712
12713 ret = fp_ns_query(port, ns_cmd, job, 1, sleep);
12714 if (ret != FC_SUCCESS || job->job_result != FC_SUCCESS) {
12715 fctl_free_ns_cmd(ns_cmd);
12716 return (FC_FAILURE);
12717 }
12718 bcopy(ns_cmd->ns_data_buf, (caddr_t)&ptype, sizeof (ptype));
12719
12720 fctl_free_ns_cmd(ns_cmd);
12721
12722 switch (ptype.port_type) {
12723 case FC_NS_PORT_NL:
12724 case FC_NS_PORT_F_NL:
12725 case FC_NS_PORT_FL:
12726 break;
12727
12728 default:
12729 return (FC_FAILURE);
12730 }
12731 } else {
12732 mutex_enter(&pd->pd_mutex);
12733 ptype = pd->pd_porttype;
12734
12735 switch (pd->pd_porttype.port_type) {
12736 case FC_NS_PORT_NL:
12737 case FC_NS_PORT_F_NL:
12738 case FC_NS_PORT_FL:
12739 lfa = pd->pd_port_id.port_id & 0xFFFF00;
12740 break;
12741
12742 default:
12743 mutex_exit(&pd->pd_mutex);
12744 return (FC_FAILURE);
12745 }
12746 mutex_exit(&pd->pd_mutex);
12747 }
12748
12749 mutex_enter(&port->fp_mutex);
12750 s_id = port->fp_port_id.port_id;
12751 class = port->fp_ns_login_class;
12752 mutex_exit(&port->fp_mutex);
12753
12754 cmd = fp_alloc_pkt(port, sizeof (fc_linit_req_t),
12755 sizeof (fc_linit_resp_t), sleep, pd);
12756 if (cmd == NULL) {
12757 return (FC_NOMEM);
12758 }
12759
12760 cmd->cmd_pkt.pkt_tran_flags = FC_TRAN_INTR | class;
12761 cmd->cmd_pkt.pkt_tran_type = FC_PKT_EXCHANGE;
12762 cmd->cmd_flags = FP_CMD_CFLAG_UNDEFINED;
12763 cmd->cmd_retry_count = fp_retry_count;
12764 cmd->cmd_ulp_pkt = NULL;
12765
12766 pkt = &cmd->cmd_pkt;
12767 cmd->cmd_transport = port->fp_fca_tran->fca_els_send;
12768
12769 fp_els_init(cmd, s_id, lfa, fp_linit_intr, job);
12770
12771 /*
12772 * How does LIP work by the way ?
12773 * If the L_Port receives three consecutive identical ordered
12774 * sets whose first two characters (fully decoded) are equal to
12775 * the values shown in Table 3 of FC-AL-2 then the L_Port shall
12776 * recognize a Loop Initialization Primitive sequence. The
12777 * character 3 determines the type of lip:
12778 * LIP(F7) Normal LIP
12779 * LIP(F8) Loop Failure LIP
12780 *
12781 * The possible combination for the 3rd and 4th bytes are:
12782 * F7, F7 Normal Lip - No valid AL_PA
12783 * F8, F8 Loop Failure - No valid AL_PA
12784 * F7, AL_PS Normal Lip - Valid source AL_PA
12785 * F8, AL_PS Loop Failure - Valid source AL_PA
12786 * AL_PD AL_PS Loop reset of AL_PD originated by AL_PS
12787 * And Normal Lip for all other loop members
12788 * 0xFF AL_PS Vendor specific reset of all loop members
12789 *
12790 * Now, it may not always be that we, at the source, may have an
12791 * AL_PS (AL_PA of source) for 4th character slot, so we decide
12792 * to do (Normal Lip, No Valid AL_PA), that means, in the LINIT
12793 * payload we are going to set:
12794 * lip_b3 = 0xF7; Normal LIP
12795 * lip_b4 = 0xF7; No valid source AL_PA
12796 */
12797 payload.ls_code.ls_code = LA_ELS_LINIT;
12798 payload.ls_code.mbz = 0;
12799 payload.rsvd = 0;
12800 payload.func = 0; /* Let Fabric determine the best way */
12801 payload.lip_b3 = 0xF7; /* Normal LIP */
12802 payload.lip_b4 = 0xF7; /* No valid source AL_PA */
12803
12804 FC_SET_CMD(port, pkt->pkt_cmd_acc, (uint8_t *)&payload,
12805 (uint8_t *)pkt->pkt_cmd, sizeof (payload), DDI_DEV_AUTOINCR);
12806
12807 job->job_counter = 1;
12808
12809 ret = fp_sendcmd(port, cmd, port->fp_fca_handle);
12810 if (ret == FC_SUCCESS) {
12811 fp_jobwait(job);
12812 rval = job->job_result;
12813 } else {
12814 rval = FC_FAILURE;
12815 fp_free_pkt(cmd);
12816 }
12817
12818 return (rval);
12819 }
12820
12821
12822 /*
12823 * Fill out the device handles with GAN response
12824 */
12825 static void
12826 fp_stuff_device_with_gan(ddi_acc_handle_t *handle, fc_remote_port_t *pd,
12827 ns_resp_gan_t *gan_resp)
12828 {
12829 fc_remote_node_t *node;
12830 fc_porttype_t type;
12831 fc_local_port_t *port;
12832
12833 ASSERT(pd != NULL);
12834 ASSERT(handle != NULL);
12835
12836 port = pd->pd_port;
12837
12838 FP_TRACE(FP_NHEAD1(1, 0), "GAN PD stuffing; pd=%p,"
12839 " port_id=%x, sym_len=%d fc4-type=%x",
12840 pd, gan_resp->gan_type_id.rsvd,
12841 gan_resp->gan_spnlen, gan_resp->gan_fc4types[0]);
12842
12843 mutex_enter(&pd->pd_mutex);
12844
12845 FC_GET_RSP(port, *handle, (uint8_t *)&type,
12846 (uint8_t *)&gan_resp->gan_type_id, sizeof (type), DDI_DEV_AUTOINCR);
12847
12848 pd->pd_porttype.port_type = type.port_type;
12849 pd->pd_porttype.rsvd = 0;
12850
12851 pd->pd_spn_len = gan_resp->gan_spnlen;
12852 if (pd->pd_spn_len) {
12853 FC_GET_RSP(port, *handle, (uint8_t *)pd->pd_spn,
12854 (uint8_t *)gan_resp->gan_spname, pd->pd_spn_len,
12855 DDI_DEV_AUTOINCR);
12856 }
12857
12858 FC_GET_RSP(port, *handle, (uint8_t *)pd->pd_ip_addr,
12859 (uint8_t *)gan_resp->gan_ip, sizeof (pd->pd_ip_addr),
12860 DDI_DEV_AUTOINCR);
12861 FC_GET_RSP(port, *handle, (uint8_t *)&pd->pd_cos,
12862 (uint8_t *)&gan_resp->gan_cos, sizeof (pd->pd_cos),
12863 DDI_DEV_AUTOINCR);
12864 FC_GET_RSP(port, *handle, (uint8_t *)pd->pd_fc4types,
12865 (uint8_t *)gan_resp->gan_fc4types, sizeof (pd->pd_fc4types),
12866 DDI_DEV_AUTOINCR);
12867
12868 node = pd->pd_remote_nodep;
12869 mutex_exit(&pd->pd_mutex);
12870
12871 mutex_enter(&node->fd_mutex);
12872
12873 FC_GET_RSP(port, *handle, (uint8_t *)node->fd_ipa,
12874 (uint8_t *)gan_resp->gan_ipa, sizeof (node->fd_ipa),
12875 DDI_DEV_AUTOINCR);
12876
12877 node->fd_snn_len = gan_resp->gan_snnlen;
12878 if (node->fd_snn_len) {
12879 FC_GET_RSP(port, *handle, (uint8_t *)node->fd_snn,
12880 (uint8_t *)gan_resp->gan_snname, node->fd_snn_len,
12881 DDI_DEV_AUTOINCR);
12882 }
12883
12884 mutex_exit(&node->fd_mutex);
12885 }
12886
12887
12888 /*
12889 * Handles all NS Queries (also means that this function
12890 * doesn't handle NS object registration)
12891 */
12892 static int
12893 fp_ns_query(fc_local_port_t *port, fctl_ns_req_t *ns_cmd, job_request_t *job,
12894 int polled, int sleep)
12895 {
12896 int rval;
12897 fp_cmd_t *cmd;
12898
12899 ASSERT(!MUTEX_HELD(&port->fp_mutex));
12900
12901 if (ns_cmd->ns_cmd_code == NS_GA_NXT) {
12902 FP_TRACE(FP_NHEAD1(1, 0), "fp_ns_query GA_NXT fp %x pd %x",
12903 port->fp_port_id.port_id, ns_cmd->ns_gan_sid);
12904 }
12905
12906 if (ns_cmd->ns_cmd_size == 0) {
12907 return (FC_FAILURE);
12908 }
12909
12910 cmd = fp_alloc_pkt(port, sizeof (fc_ct_header_t) +
12911 ns_cmd->ns_cmd_size, sizeof (fc_ct_header_t) +
12912 ns_cmd->ns_resp_size, sleep, NULL);
12913 if (cmd == NULL) {
12914 return (FC_NOMEM);
12915 }
12916
12917 fp_ct_init(port, cmd, ns_cmd, ns_cmd->ns_cmd_code, ns_cmd->ns_cmd_buf,
12918 ns_cmd->ns_cmd_size, ns_cmd->ns_resp_size, job);
12919
12920 if (polled) {
12921 job->job_counter = 1;
12922 ASSERT((job->job_flags & JOB_TYPE_FP_ASYNC) == 0);
12923 }
12924 rval = fp_sendcmd(port, cmd, port->fp_fca_handle);
12925 if (rval != FC_SUCCESS) {
12926 job->job_result = rval;
12927 fp_iodone(cmd);
12928 if (polled == 0) {
12929 /*
12930 * Return FC_SUCCESS to indicate that
12931 * fp_iodone is performed already.
12932 */
12933 rval = FC_SUCCESS;
12934 }
12935 }
12936
12937 if (polled) {
12938 fp_jobwait(job);
12939 rval = job->job_result;
12940 }
12941
12942 return (rval);
12943 }
12944
12945
12946 /*
12947 * Initialize Common Transport request
12948 */
12949 static void
12950 fp_ct_init(fc_local_port_t *port, fp_cmd_t *cmd, fctl_ns_req_t *ns_cmd,
12951 uint16_t cmd_code, caddr_t cmd_buf, uint16_t cmd_len,
12952 uint16_t resp_len, job_request_t *job)
12953 {
12954 uint32_t s_id;
12955 uchar_t class;
12956 fc_packet_t *pkt;
12957 fc_ct_header_t ct;
12958
12959 ASSERT(!MUTEX_HELD(&port->fp_mutex));
12960
12961 mutex_enter(&port->fp_mutex);
12962 s_id = port->fp_port_id.port_id;
12963 class = port->fp_ns_login_class;
12964 mutex_exit(&port->fp_mutex);
12965
12966 cmd->cmd_job = job;
12967 cmd->cmd_private = ns_cmd;
12968 pkt = &cmd->cmd_pkt;
12969
12970 ct.ct_rev = CT_REV;
12971 ct.ct_inid = 0;
12972 ct.ct_fcstype = FCSTYPE_DIRECTORY;
12973 ct.ct_fcssubtype = FCSSUB_DS_NAME_SERVER;
12974 ct.ct_options = 0;
12975 ct.ct_reserved1 = 0;
12976 ct.ct_cmdrsp = cmd_code;
12977 ct.ct_aiusize = resp_len >> 2;
12978 ct.ct_reserved2 = 0;
12979 ct.ct_reason = 0;
12980 ct.ct_expln = 0;
12981 ct.ct_vendor = 0;
12982
12983 FC_SET_CMD(port, pkt->pkt_cmd_acc, (uint8_t *)&ct,
12984 (uint8_t *)pkt->pkt_cmd, sizeof (ct), DDI_DEV_AUTOINCR);
12985
12986 pkt->pkt_cmd_fhdr.r_ctl = R_CTL_UNSOL_CONTROL;
12987 pkt->pkt_cmd_fhdr.d_id = 0xFFFFFC;
12988 pkt->pkt_cmd_fhdr.s_id = s_id;
12989 pkt->pkt_cmd_fhdr.type = FC_TYPE_FC_SERVICES;
12990 pkt->pkt_cmd_fhdr.f_ctl = F_CTL_SEQ_INITIATIVE |
12991 F_CTL_FIRST_SEQ | F_CTL_END_SEQ;
12992 pkt->pkt_cmd_fhdr.seq_id = 0;
12993 pkt->pkt_cmd_fhdr.df_ctl = 0;
12994 pkt->pkt_cmd_fhdr.seq_cnt = 0;
12995 pkt->pkt_cmd_fhdr.ox_id = 0xffff;
12996 pkt->pkt_cmd_fhdr.rx_id = 0xffff;
12997 pkt->pkt_cmd_fhdr.ro = 0;
12998 pkt->pkt_cmd_fhdr.rsvd = 0;
12999
13000 pkt->pkt_comp = fp_ns_intr;
13001 pkt->pkt_ulp_private = (opaque_t)cmd;
13002 pkt->pkt_timeout = FP_NS_TIMEOUT;
13003
13004 if (cmd_buf) {
13005 FC_SET_CMD(port, pkt->pkt_cmd_acc, (uint8_t *)cmd_buf,
13006 (uint8_t *)(pkt->pkt_cmd + sizeof (fc_ct_header_t)),
13007 cmd_len, DDI_DEV_AUTOINCR);
13008 }
13009
13010 cmd->cmd_transport = port->fp_fca_tran->fca_transport;
13011
13012 cmd->cmd_pkt.pkt_tran_flags = FC_TRAN_INTR | class;
13013 cmd->cmd_pkt.pkt_tran_type = FC_PKT_EXCHANGE;
13014 cmd->cmd_flags = FP_CMD_PLOGI_DONT_CARE;
13015 cmd->cmd_retry_count = fp_retry_count;
13016 cmd->cmd_ulp_pkt = NULL;
13017 }
13018
13019
13020 /*
13021 * Name Server request interrupt routine
13022 */
13023 static void
13024 fp_ns_intr(fc_packet_t *pkt)
13025 {
13026 fp_cmd_t *cmd;
13027 fc_local_port_t *port;
13028 fc_ct_header_t resp_hdr;
13029 fc_ct_header_t cmd_hdr;
13030 fctl_ns_req_t *ns_cmd;
13031
13032 cmd = pkt->pkt_ulp_private;
13033 port = cmd->cmd_port;
13034
13035 mutex_enter(&port->fp_mutex);
13036 port->fp_out_fpcmds--;
13037 mutex_exit(&port->fp_mutex);
13038
13039 FC_GET_RSP(port, pkt->pkt_cmd_acc, (uint8_t *)&cmd_hdr,
13040 (uint8_t *)pkt->pkt_cmd, sizeof (cmd_hdr), DDI_DEV_AUTOINCR);
13041 ns_cmd = (fctl_ns_req_t *)
13042 (((fp_cmd_t *)(pkt->pkt_ulp_private))->cmd_private);
13043 if (!FP_IS_PKT_ERROR(pkt)) {
13044 FC_GET_RSP(port, pkt->pkt_resp_acc, (uint8_t *)&resp_hdr,
13045 (uint8_t *)pkt->pkt_resp, sizeof (resp_hdr),
13046 DDI_DEV_AUTOINCR);
13047
13048 /*
13049 * On x86 architectures, make sure the resp_hdr is big endian.
13050 * This macro is a NOP on sparc architectures mainly because
13051 * we don't want to end up wasting time since the end result
13052 * is going to be the same.
13053 */
13054 MAKE_BE_32(&resp_hdr);
13055
13056 if (ns_cmd) {
13057 /*
13058 * Always copy out the response CT_HDR
13059 */
13060 bcopy(&resp_hdr, &ns_cmd->ns_resp_hdr,
13061 sizeof (resp_hdr));
13062 }
13063
13064 if (resp_hdr.ct_cmdrsp == FS_RJT_IU) {
13065 pkt->pkt_state = FC_PKT_FS_RJT;
13066 pkt->pkt_reason = resp_hdr.ct_reason;
13067 pkt->pkt_expln = resp_hdr.ct_expln;
13068 }
13069 }
13070
13071 if (FP_IS_PKT_ERROR(pkt)) {
13072 if (ns_cmd) {
13073 if (ns_cmd->ns_flags & FCTL_NS_VALIDATE_PD) {
13074 ASSERT(ns_cmd->ns_pd != NULL);
13075
13076 /* Mark it OLD if not already done */
13077 mutex_enter(&ns_cmd->ns_pd->pd_mutex);
13078 ns_cmd->ns_pd->pd_type = PORT_DEVICE_OLD;
13079 mutex_exit(&ns_cmd->ns_pd->pd_mutex);
13080 }
13081
13082 if (ns_cmd->ns_flags & FCTL_NS_ASYNC_REQUEST) {
13083 fctl_free_ns_cmd(ns_cmd);
13084 ((fp_cmd_t *)
13085 (pkt->pkt_ulp_private))->cmd_private = NULL;
13086 }
13087
13088 }
13089
13090 FP_TRACE(FP_NHEAD2(1, 0), "%x NS failure pkt state=%x "
13091 "reason=%x, expln=%x, NSCMD=%04X, NSRSP=%04X",
13092 port->fp_port_id.port_id, pkt->pkt_state,
13093 pkt->pkt_reason, pkt->pkt_expln,
13094 cmd_hdr.ct_cmdrsp, resp_hdr.ct_cmdrsp);
13095
13096 (void) fp_common_intr(pkt, 1);
13097
13098 return;
13099 }
13100
13101 if (resp_hdr.ct_cmdrsp != FS_ACC_IU) {
13102 uint32_t d_id;
13103 fc_local_port_t *port;
13104 fp_cmd_t *cmd;
13105
13106 d_id = pkt->pkt_cmd_fhdr.d_id;
13107 cmd = pkt->pkt_ulp_private;
13108 port = cmd->cmd_port;
13109 FP_TRACE(FP_NHEAD2(9, 0),
13110 "Bogus NS response received for D_ID=%x", d_id);
13111 }
13112
13113 if (cmd_hdr.ct_cmdrsp == NS_GA_NXT) {
13114 fp_gan_handler(pkt, ns_cmd);
13115 return;
13116 }
13117
13118 if (cmd_hdr.ct_cmdrsp >= NS_GPN_ID &&
13119 cmd_hdr.ct_cmdrsp <= NS_GID_PT) {
13120 if (ns_cmd) {
13121 if ((ns_cmd->ns_flags & FCTL_NS_NO_DATA_BUF) == 0) {
13122 fp_ns_query_handler(pkt, ns_cmd);
13123 return;
13124 }
13125 }
13126 }
13127
13128 fp_iodone(pkt->pkt_ulp_private);
13129 }
13130
13131
13132 /*
13133 * Process NS_GAN response
13134 */
13135 static void
13136 fp_gan_handler(fc_packet_t *pkt, fctl_ns_req_t *ns_cmd)
13137 {
13138 int my_did;
13139 fc_portid_t d_id;
13140 fp_cmd_t *cmd;
13141 fc_local_port_t *port;
13142 fc_remote_port_t *pd;
13143 ns_req_gan_t gan_req;
13144 ns_resp_gan_t *gan_resp;
13145
13146 ASSERT(ns_cmd != NULL);
13147
13148 cmd = pkt->pkt_ulp_private;
13149 port = cmd->cmd_port;
13150
13151 gan_resp = (ns_resp_gan_t *)(pkt->pkt_resp + sizeof (fc_ct_header_t));
13152
13153 FC_GET_RSP(port, pkt->pkt_resp_acc, (uint8_t *)&d_id,
13154 (uint8_t *)&gan_resp->gan_type_id, sizeof (d_id), DDI_DEV_AUTOINCR);
13155
13156 *(uint32_t *)&d_id = BE_32(*(uint32_t *)&d_id);
13157
13158 /*
13159 * In this case the priv_lilp_posit field in reality
13160 * is actually represents the relative position on a private loop.
13161 * So zero it while dealing with Port Identifiers.
13162 */
13163 d_id.priv_lilp_posit = 0;
13164 pd = fctl_get_remote_port_by_did(port, d_id.port_id);
13165 if (ns_cmd->ns_gan_sid == d_id.port_id) {
13166 /*
13167 * We've come a full circle; time to get out.
13168 */
13169 fp_iodone(cmd);
13170 return;
13171 }
13172
13173 if (ns_cmd->ns_gan_sid == FCTL_GAN_START_ID) {
13174 ns_cmd->ns_gan_sid = d_id.port_id;
13175 }
13176
13177 mutex_enter(&port->fp_mutex);
13178 my_did = (d_id.port_id == port->fp_port_id.port_id) ? 1 : 0;
13179 mutex_exit(&port->fp_mutex);
13180
13181 FP_TRACE(FP_NHEAD1(1, 0), "GAN response; port=%p, fp %x pd %x", port,
13182 port->fp_port_id.port_id, d_id.port_id);
13183 if (my_did == 0) {
13184 la_wwn_t pwwn;
13185 la_wwn_t nwwn;
13186
13187 FP_TRACE(FP_NHEAD1(1, 0), "GAN response details; "
13188 "port=%p, d_id=%x, type_id=%x, "
13189 "pwwn=%x %x %x %x %x %x %x %x, "
13190 "nwwn=%x %x %x %x %x %x %x %x",
13191 port, d_id.port_id, gan_resp->gan_type_id,
13192
13193 gan_resp->gan_pwwn.raw_wwn[0],
13194 gan_resp->gan_pwwn.raw_wwn[1],
13195 gan_resp->gan_pwwn.raw_wwn[2],
13196 gan_resp->gan_pwwn.raw_wwn[3],
13197 gan_resp->gan_pwwn.raw_wwn[4],
13198 gan_resp->gan_pwwn.raw_wwn[5],
13199 gan_resp->gan_pwwn.raw_wwn[6],
13200 gan_resp->gan_pwwn.raw_wwn[7],
13201
13202 gan_resp->gan_nwwn.raw_wwn[0],
13203 gan_resp->gan_nwwn.raw_wwn[1],
13204 gan_resp->gan_nwwn.raw_wwn[2],
13205 gan_resp->gan_nwwn.raw_wwn[3],
13206 gan_resp->gan_nwwn.raw_wwn[4],
13207 gan_resp->gan_nwwn.raw_wwn[5],
13208 gan_resp->gan_nwwn.raw_wwn[6],
13209 gan_resp->gan_nwwn.raw_wwn[7]);
13210
13211 FC_GET_RSP(port, pkt->pkt_resp_acc, (uint8_t *)&nwwn,
13212 (uint8_t *)&gan_resp->gan_nwwn, sizeof (nwwn),
13213 DDI_DEV_AUTOINCR);
13214
13215 FC_GET_RSP(port, pkt->pkt_resp_acc, (uint8_t *)&pwwn,
13216 (uint8_t *)&gan_resp->gan_pwwn, sizeof (pwwn),
13217 DDI_DEV_AUTOINCR);
13218
13219 if (ns_cmd->ns_flags & FCTL_NS_CREATE_DEVICE && pd == NULL) {
13220 FP_TRACE(FP_NHEAD1(1, 0), "fp %x gan_hander create"
13221 "pd %x", port->fp_port_id.port_id, d_id.port_id);
13222 pd = fctl_create_remote_port(port, &nwwn, &pwwn,
13223 d_id.port_id, PD_PLOGI_INITIATOR, KM_NOSLEEP);
13224 }
13225 if (pd != NULL) {
13226 fp_stuff_device_with_gan(&pkt->pkt_resp_acc,
13227 pd, gan_resp);
13228 }
13229
13230 if (ns_cmd->ns_flags & FCTL_NS_GET_DEV_COUNT) {
13231 *((int *)ns_cmd->ns_data_buf) += 1;
13232 }
13233
13234 if (ns_cmd->ns_flags & FCTL_NS_FILL_NS_MAP) {
13235 ASSERT((ns_cmd->ns_flags & FCTL_NS_NO_DATA_BUF) == 0);
13236
13237 if (ns_cmd->ns_flags & FCTL_NS_BUF_IS_USERLAND) {
13238 fc_port_dev_t *userbuf;
13239
13240 userbuf = ((fc_port_dev_t *)
13241 ns_cmd->ns_data_buf) +
13242 ns_cmd->ns_gan_index++;
13243
13244 userbuf->dev_did = d_id;
13245
13246 FC_GET_RSP(port, pkt->pkt_resp_acc,
13247 (uint8_t *)userbuf->dev_type,
13248 (uint8_t *)gan_resp->gan_fc4types,
13249 sizeof (userbuf->dev_type),
13250 DDI_DEV_AUTOINCR);
13251
13252 userbuf->dev_nwwn = nwwn;
13253 userbuf->dev_pwwn = pwwn;
13254
13255 if (pd != NULL) {
13256 mutex_enter(&pd->pd_mutex);
13257 userbuf->dev_state = pd->pd_state;
13258 userbuf->dev_hard_addr =
13259 pd->pd_hard_addr;
13260 mutex_exit(&pd->pd_mutex);
13261 } else {
13262 userbuf->dev_state =
13263 PORT_DEVICE_INVALID;
13264 }
13265 } else if (ns_cmd->ns_flags &
13266 FCTL_NS_BUF_IS_FC_PORTMAP) {
13267 fc_portmap_t *map;
13268
13269 map = ((fc_portmap_t *)
13270 ns_cmd->ns_data_buf) +
13271 ns_cmd->ns_gan_index++;
13272
13273 /*
13274 * First fill it like any new map
13275 * and update the port device info
13276 * below.
13277 */
13278 fp_fillout_new_nsmap(port, &pkt->pkt_resp_acc,
13279 map, gan_resp, d_id.port_id);
13280 if (pd != NULL) {
13281 fctl_copy_portmap(map, pd);
13282 } else {
13283 map->map_state = PORT_DEVICE_INVALID;
13284 map->map_type = PORT_DEVICE_NOCHANGE;
13285 }
13286 } else {
13287 caddr_t dst_ptr;
13288
13289 dst_ptr = ns_cmd->ns_data_buf +
13290 (NS_GAN_RESP_LEN) * ns_cmd->ns_gan_index++;
13291
13292 FC_GET_RSP(port, pkt->pkt_resp_acc,
13293 (uint8_t *)dst_ptr, (uint8_t *)gan_resp,
13294 NS_GAN_RESP_LEN, DDI_DEV_AUTOINCR);
13295 }
13296 } else {
13297 ns_cmd->ns_gan_index++;
13298 }
13299 if (ns_cmd->ns_gan_index >= ns_cmd->ns_gan_max) {
13300 fp_iodone(cmd);
13301 return;
13302 }
13303 }
13304
13305 gan_req.pid = d_id;
13306
13307 FC_SET_CMD(port, pkt->pkt_cmd_acc, (uint8_t *)&gan_req,
13308 (uint8_t *)(pkt->pkt_cmd + sizeof (fc_ct_header_t)),
13309 sizeof (gan_req), DDI_DEV_AUTOINCR);
13310
13311 if (cmd->cmd_transport(port->fp_fca_handle, pkt) != FC_SUCCESS) {
13312 pkt->pkt_state = FC_PKT_TRAN_ERROR;
13313 fp_iodone(cmd);
13314 } else {
13315 mutex_enter(&port->fp_mutex);
13316 port->fp_out_fpcmds++;
13317 mutex_exit(&port->fp_mutex);
13318 }
13319 }
13320
13321
13322 /*
13323 * Handle NS Query interrupt
13324 */
13325 static void
13326 fp_ns_query_handler(fc_packet_t *pkt, fctl_ns_req_t *ns_cmd)
13327 {
13328 fp_cmd_t *cmd;
13329 fc_local_port_t *port;
13330 caddr_t src_ptr;
13331 uint32_t xfer_len;
13332
13333 cmd = pkt->pkt_ulp_private;
13334 port = cmd->cmd_port;
13335
13336 xfer_len = ns_cmd->ns_resp_size;
13337
13338 FP_TRACE(FP_NHEAD1(1, 0), "NS Query response, cmd_code=%x, xfer_len=%x",
13339 ns_cmd->ns_cmd_code, xfer_len);
13340
13341 if (ns_cmd->ns_cmd_code == NS_GPN_ID) {
13342 src_ptr = (caddr_t)pkt->pkt_resp + sizeof (fc_ct_header_t);
13343
13344 FP_TRACE(FP_NHEAD1(6, 0), "GPN_ID results; %x %x %x %x %x",
13345 src_ptr[0], src_ptr[1], src_ptr[2], src_ptr[3], src_ptr[4]);
13346 }
13347
13348 if (xfer_len <= ns_cmd->ns_data_len) {
13349 src_ptr = (caddr_t)pkt->pkt_resp + sizeof (fc_ct_header_t);
13350 FC_GET_RSP(port, pkt->pkt_resp_acc,
13351 (uint8_t *)ns_cmd->ns_data_buf,
13352 (uint8_t *)src_ptr, xfer_len, DDI_DEV_AUTOINCR);
13353 }
13354
13355 if (ns_cmd->ns_flags & FCTL_NS_VALIDATE_PD) {
13356 ASSERT(ns_cmd->ns_pd != NULL);
13357
13358 mutex_enter(&ns_cmd->ns_pd->pd_mutex);
13359 if (ns_cmd->ns_pd->pd_type == PORT_DEVICE_OLD) {
13360 ns_cmd->ns_pd->pd_type = PORT_DEVICE_NOCHANGE;
13361 }
13362 mutex_exit(&ns_cmd->ns_pd->pd_mutex);
13363 }
13364
13365 if (ns_cmd->ns_flags & FCTL_NS_ASYNC_REQUEST) {
13366 fctl_free_ns_cmd(ns_cmd);
13367 ((fp_cmd_t *)(pkt->pkt_ulp_private))->cmd_private = NULL;
13368 }
13369 fp_iodone(cmd);
13370 }
13371
13372
13373 /*
13374 * Handle unsolicited ADISC ELS request
13375 */
13376 static void
13377 fp_handle_unsol_adisc(fc_local_port_t *port, fc_unsol_buf_t *buf,
13378 fc_remote_port_t *pd, job_request_t *job)
13379 {
13380 int rval;
13381 fp_cmd_t *cmd;
13382
13383 FP_TRACE(FP_NHEAD1(5, 0), "ADISC; port=%p, D_ID=%x state=%x, pd=%p",
13384 port, pd->pd_port_id.port_id, pd->pd_state, pd);
13385 mutex_enter(&pd->pd_mutex);
13386 if (pd->pd_state != PORT_DEVICE_LOGGED_IN) {
13387 mutex_exit(&pd->pd_mutex);
13388 if (FP_IS_CLASS_1_OR_2(buf->ub_class)) {
13389 cmd = fp_alloc_pkt(port, sizeof (la_els_rjt_t),
13390 0, KM_SLEEP, pd);
13391 if (cmd != NULL) {
13392 fp_els_rjt_init(port, cmd, buf,
13393 FC_ACTION_NON_RETRYABLE,
13394 FC_REASON_INVALID_LINK_CTRL, job);
13395
13396 if (fp_sendcmd(port, cmd,
13397 port->fp_fca_handle) != FC_SUCCESS) {
13398 fp_free_pkt(cmd);
13399 }
13400 }
13401 }
13402 } else {
13403 mutex_exit(&pd->pd_mutex);
13404 /*
13405 * Yes, yes, we don't have a hard address. But we
13406 * we should still respond. Huh ? Visit 21.19.2
13407 * of FC-PH-2 which essentially says that if an
13408 * NL_Port doesn't have a hard address, or if a port
13409 * does not have FC-AL capability, it shall report
13410 * zeroes in this field.
13411 */
13412 cmd = fp_alloc_pkt(port, sizeof (la_els_adisc_t),
13413 0, KM_SLEEP, pd);
13414 if (cmd == NULL) {
13415 return;
13416 }
13417 fp_adisc_acc_init(port, cmd, buf, job);
13418 rval = fp_sendcmd(port, cmd, port->fp_fca_handle);
13419 if (rval != FC_SUCCESS) {
13420 fp_free_pkt(cmd);
13421 }
13422 }
13423 }
13424
13425
13426 /*
13427 * Initialize ADISC response.
13428 */
13429 static void
13430 fp_adisc_acc_init(fc_local_port_t *port, fp_cmd_t *cmd, fc_unsol_buf_t *buf,
13431 job_request_t *job)
13432 {
13433 fc_packet_t *pkt;
13434 la_els_adisc_t payload;
13435
13436 cmd->cmd_pkt.pkt_tran_flags = buf->ub_class;
13437 cmd->cmd_pkt.pkt_tran_type = FC_PKT_OUTBOUND;
13438 cmd->cmd_flags = FP_CMD_CFLAG_UNDEFINED;
13439 cmd->cmd_retry_count = 1;
13440 cmd->cmd_ulp_pkt = NULL;
13441
13442 cmd->cmd_transport = port->fp_fca_tran->fca_els_send;
13443 cmd->cmd_job = job;
13444
13445 pkt = &cmd->cmd_pkt;
13446
13447 fp_unsol_resp_init(pkt, buf, R_CTL_ELS_RSP, FC_TYPE_EXTENDED_LS);
13448
13449 payload.ls_code.ls_code = LA_ELS_ACC;
13450 payload.ls_code.mbz = 0;
13451
13452 mutex_enter(&port->fp_mutex);
13453 payload.nport_id = port->fp_port_id;
13454 payload.hard_addr = port->fp_hard_addr;
13455 mutex_exit(&port->fp_mutex);
13456
13457 payload.port_wwn = port->fp_service_params.nport_ww_name;
13458 payload.node_wwn = port->fp_service_params.node_ww_name;
13459
13460 FC_SET_CMD(port, pkt->pkt_cmd_acc, (uint8_t *)&payload,
13461 (uint8_t *)pkt->pkt_cmd, sizeof (payload), DDI_DEV_AUTOINCR);
13462 }
13463
13464
13465 /*
13466 * Hold and Install the requested ULP drivers
13467 */
13468 static void
13469 fp_load_ulp_modules(dev_info_t *dip, fc_local_port_t *port)
13470 {
13471 int len;
13472 int count;
13473 int data_len;
13474 major_t ulp_major;
13475 caddr_t ulp_name;
13476 caddr_t data_ptr;
13477 caddr_t data_buf;
13478
13479 ASSERT(!MUTEX_HELD(&port->fp_mutex));
13480
13481 data_buf = NULL;
13482 if (ddi_getlongprop(DDI_DEV_T_ANY, dip,
13483 DDI_PROP_DONTPASS, "load-ulp-list",
13484 (caddr_t)&data_buf, &data_len) != DDI_PROP_SUCCESS) {
13485 return;
13486 }
13487
13488 len = strlen(data_buf);
13489 port->fp_ulp_nload = fctl_atoi(data_buf, 10);
13490
13491 data_ptr = data_buf + len + 1;
13492 for (count = 0; count < port->fp_ulp_nload; count++) {
13493 len = strlen(data_ptr) + 1;
13494 ulp_name = kmem_zalloc(len, KM_SLEEP);
13495 bcopy(data_ptr, ulp_name, len);
13496
13497 ulp_major = ddi_name_to_major(ulp_name);
13498
13499 if (ulp_major != (major_t)-1) {
13500 if (modload("drv", ulp_name) < 0) {
13501 fp_printf(port, CE_NOTE, FP_LOG_ONLY,
13502 0, NULL, "failed to load %s",
13503 ulp_name);
13504 }
13505 } else {
13506 fp_printf(port, CE_NOTE, FP_LOG_ONLY, 0, NULL,
13507 "%s isn't a valid driver", ulp_name);
13508 }
13509
13510 kmem_free(ulp_name, len);
13511 data_ptr += len; /* Skip to next field */
13512 }
13513
13514 /*
13515 * Free the memory allocated by DDI
13516 */
13517 if (data_buf != NULL) {
13518 kmem_free(data_buf, data_len);
13519 }
13520 }
13521
13522
13523 /*
13524 * Perform LOGO operation
13525 */
13526 static int
13527 fp_logout(fc_local_port_t *port, fc_remote_port_t *pd, job_request_t *job)
13528 {
13529 int rval;
13530 fp_cmd_t *cmd;
13531
13532 ASSERT(!MUTEX_HELD(&port->fp_mutex));
13533 ASSERT(!MUTEX_HELD(&pd->pd_mutex));
13534
13535 cmd = fp_alloc_pkt(port, sizeof (la_els_logo_t),
13536 FP_PORT_IDENTIFIER_LEN, KM_SLEEP, pd);
13537
13538 mutex_enter(&port->fp_mutex);
13539 mutex_enter(&pd->pd_mutex);
13540
13541 ASSERT(pd->pd_state == PORT_DEVICE_LOGGED_IN);
13542 ASSERT(pd->pd_login_count == 1);
13543
13544 cmd->cmd_pkt.pkt_tran_flags = FC_TRAN_INTR | pd->pd_login_class;
13545 cmd->cmd_pkt.pkt_tran_type = FC_PKT_EXCHANGE;
13546 cmd->cmd_flags = 0;
13547 cmd->cmd_retry_count = 1;
13548 cmd->cmd_ulp_pkt = NULL;
13549
13550 fp_logo_init(pd, cmd, job);
13551
13552 mutex_exit(&pd->pd_mutex);
13553 mutex_exit(&port->fp_mutex);
13554
13555 rval = fp_sendcmd(port, cmd, port->fp_fca_handle);
13556 if (rval != FC_SUCCESS) {
13557 fp_iodone(cmd);
13558 }
13559
13560 return (rval);
13561 }
13562
13563
13564 /*
13565 * Perform Port attach callbacks to registered ULPs
13566 */
13567 static void
13568 fp_attach_ulps(fc_local_port_t *port, fc_attach_cmd_t cmd)
13569 {
13570 fp_soft_attach_t *att;
13571
13572 att = kmem_zalloc(sizeof (*att), KM_SLEEP);
13573 att->att_cmd = cmd;
13574 att->att_port = port;
13575
13576 /*
13577 * We need to remember whether or not fctl_busy_port
13578 * succeeded so we know whether or not to call
13579 * fctl_idle_port when the task is complete.
13580 */
13581
13582 if (fctl_busy_port(port) == 0) {
13583 att->att_need_pm_idle = B_TRUE;
13584 } else {
13585 att->att_need_pm_idle = B_FALSE;
13586 }
13587
13588 (void) taskq_dispatch(port->fp_taskq, fp_ulp_port_attach,
13589 att, KM_SLEEP);
13590 }
13591
13592
13593 /*
13594 * Forward state change notifications on to interested ULPs.
13595 * Spawns a call to fctl_ulp_statec_cb() in a taskq thread to do all the
13596 * real work.
13597 */
13598 static int
13599 fp_ulp_notify(fc_local_port_t *port, uint32_t statec, int sleep)
13600 {
13601 fc_port_clist_t *clist;
13602
13603 clist = kmem_zalloc(sizeof (*clist), sleep);
13604 if (clist == NULL) {
13605 return (FC_NOMEM);
13606 }
13607
13608 clist->clist_state = statec;
13609
13610 mutex_enter(&port->fp_mutex);
13611 clist->clist_flags = port->fp_topology;
13612 mutex_exit(&port->fp_mutex);
13613
13614 clist->clist_port = (opaque_t)port;
13615 clist->clist_len = 0;
13616 clist->clist_size = 0;
13617 clist->clist_map = NULL;
13618
13619 (void) taskq_dispatch(port->fp_taskq, fctl_ulp_statec_cb,
13620 clist, KM_SLEEP);
13621
13622 return (FC_SUCCESS);
13623 }
13624
13625
13626 /*
13627 * Get name server map
13628 */
13629 static int
13630 fp_ns_getmap(fc_local_port_t *port, job_request_t *job, fc_portmap_t **map,
13631 uint32_t *len, uint32_t sid)
13632 {
13633 int ret;
13634 fctl_ns_req_t *ns_cmd;
13635
13636 /*
13637 * Don't let the allocator do anything for response;
13638 * we have have buffer ready to fillout.
13639 */
13640 ns_cmd = fctl_alloc_ns_cmd(sizeof (ns_req_gan_t),
13641 sizeof (ns_resp_gan_t), 0, (FCTL_NS_FILL_NS_MAP |
13642 FCTL_NS_BUF_IS_FC_PORTMAP), KM_SLEEP);
13643
13644 ns_cmd->ns_data_len = sizeof (**map) * (*len);
13645 ns_cmd->ns_data_buf = (caddr_t)*map;
13646
13647 ASSERT(ns_cmd != NULL);
13648
13649 ns_cmd->ns_gan_index = 0;
13650 ns_cmd->ns_gan_sid = sid;
13651 ns_cmd->ns_cmd_code = NS_GA_NXT;
13652 ns_cmd->ns_gan_max = *len;
13653
13654 ret = fp_ns_query(port, ns_cmd, job, 1, KM_SLEEP);
13655
13656 if (ns_cmd->ns_gan_index != *len) {
13657 *len = ns_cmd->ns_gan_index;
13658 }
13659 ns_cmd->ns_data_len = 0;
13660 ns_cmd->ns_data_buf = NULL;
13661 fctl_free_ns_cmd(ns_cmd);
13662
13663 return (ret);
13664 }
13665
13666
13667 /*
13668 * Create a remote port in Fabric topology by using NS services
13669 */
13670 static fc_remote_port_t *
13671 fp_create_remote_port_by_ns(fc_local_port_t *port, uint32_t d_id, int sleep)
13672 {
13673 int rval;
13674 job_request_t *job;
13675 fctl_ns_req_t *ns_cmd;
13676 fc_remote_port_t *pd;
13677
13678 ASSERT(!MUTEX_HELD(&port->fp_mutex));
13679
13680 FP_TRACE(FP_NHEAD1(1, 0), "PD creation begin; port=%p, d_id=%x",
13681 port, d_id);
13682
13683 #ifdef DEBUG
13684 mutex_enter(&port->fp_mutex);
13685 ASSERT(FC_IS_TOP_SWITCH(port->fp_topology));
13686 mutex_exit(&port->fp_mutex);
13687 #endif
13688
13689 job = fctl_alloc_job(JOB_NS_CMD, 0, NULL, (opaque_t)port, sleep);
13690 if (job == NULL) {
13691 return (NULL);
13692 }
13693
13694 ns_cmd = fctl_alloc_ns_cmd(sizeof (ns_req_gan_t),
13695 sizeof (ns_resp_gan_t), 0, (FCTL_NS_CREATE_DEVICE |
13696 FCTL_NS_NO_DATA_BUF), sleep);
13697 if (ns_cmd == NULL) {
13698 return (NULL);
13699 }
13700
13701 job->job_result = FC_SUCCESS;
13702 ns_cmd->ns_gan_max = 1;
13703 ns_cmd->ns_cmd_code = NS_GA_NXT;
13704 ns_cmd->ns_gan_sid = FCTL_GAN_START_ID;
13705 ((ns_req_gan_t *)(ns_cmd->ns_cmd_buf))->pid.port_id = d_id - 1;
13706 ((ns_req_gan_t *)(ns_cmd->ns_cmd_buf))->pid.priv_lilp_posit = 0;
13707
13708 ASSERT((job->job_flags & JOB_TYPE_FP_ASYNC) == 0);
13709 rval = fp_ns_query(port, ns_cmd, job, 1, KM_SLEEP);
13710 fctl_free_ns_cmd(ns_cmd);
13711
13712 if (rval != FC_SUCCESS || job->job_result != FC_SUCCESS) {
13713 fctl_dealloc_job(job);
13714 return (NULL);
13715 }
13716 fctl_dealloc_job(job);
13717
13718 pd = fctl_get_remote_port_by_did(port, d_id);
13719
13720 FP_TRACE(FP_NHEAD1(1, 0), "PD creation end; port=%p, d_id=%x, pd=%p",
13721 port, d_id, pd);
13722
13723 return (pd);
13724 }
13725
13726
13727 /*
13728 * Check for the permissions on an ioctl command. If it is required to have an
13729 * EXCLUSIVE open performed, return a FAILURE to just shut the door on it. If
13730 * the ioctl command isn't in one of the list built, shut the door on that too.
13731 *
13732 * Certain ioctls perform hardware accesses in FCA drivers, and it needs
13733 * to be made sure that users open the port for an exclusive access while
13734 * performing those operations.
13735 *
13736 * This can prevent a casual user from inflicting damage on the port by
13737 * sending these ioctls from multiple processes/threads (there is no good
13738 * reason why one would need to do that) without actually realizing how
13739 * expensive such commands could turn out to be.
13740 *
13741 * It is also important to note that, even with an exclusive access,
13742 * multiple threads can share the same file descriptor and fire down
13743 * commands in parallel. To prevent that the driver needs to make sure
13744 * that such commands aren't in progress already. This is taken care of
13745 * in the FP_EXCL_BUSY bit of fp_flag.
13746 */
13747 static int
13748 fp_check_perms(uchar_t open_flag, uint16_t ioctl_cmd)
13749 {
13750 int ret = FC_FAILURE;
13751 int count;
13752
13753 for (count = 0;
13754 count < sizeof (fp_perm_list) / sizeof (fp_perm_list[0]);
13755 count++) {
13756 if (fp_perm_list[count].fp_ioctl_cmd == ioctl_cmd) {
13757 if (fp_perm_list[count].fp_open_flag & open_flag) {
13758 ret = FC_SUCCESS;
13759 }
13760 break;
13761 }
13762 }
13763
13764 return (ret);
13765 }
13766
13767
13768 /*
13769 * Bind Port driver's unsolicited, state change callbacks
13770 */
13771 static int
13772 fp_bind_callbacks(fc_local_port_t *port)
13773 {
13774 fc_fca_bind_info_t bind_info = {0};
13775 fc_fca_port_info_t *port_info;
13776 int rval = DDI_SUCCESS;
13777 uint16_t class;
13778 int node_namelen, port_namelen;
13779 char *nname = NULL, *pname = NULL;
13780
13781 ASSERT(!MUTEX_HELD(&port->fp_mutex));
13782
13783 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, port->fp_port_dip,
13784 DDI_PROP_NOTPROM | DDI_PROP_DONTPASS,
13785 "node-name", &nname) != DDI_PROP_SUCCESS) {
13786 FP_TRACE(FP_NHEAD1(1, 0),
13787 "fp_bind_callback fail to get node-name");
13788 }
13789 if (nname) {
13790 fc_str_to_wwn(nname, &(bind_info.port_nwwn));
13791 }
13792
13793 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, port->fp_port_dip,
13794 DDI_PROP_NOTPROM | DDI_PROP_DONTPASS,
13795 "port-name", &pname) != DDI_PROP_SUCCESS) {
13796 FP_TRACE(FP_NHEAD1(1, 0),
13797 "fp_bind_callback fail to get port-name");
13798 }
13799 if (pname) {
13800 fc_str_to_wwn(pname, &(bind_info.port_pwwn));
13801 }
13802
13803 if (port->fp_npiv_type == FC_NPIV_PORT) {
13804 bind_info.port_npiv = 1;
13805 }
13806
13807 /*
13808 * fca_bind_port returns the FCA driver's handle for the local
13809 * port instance. If the port number isn't supported it returns NULL.
13810 * It also sets up callback in the FCA for various
13811 * things like state change, ELS etc..
13812 */
13813 bind_info.port_statec_cb = fp_statec_cb;
13814 bind_info.port_unsol_cb = fp_unsol_cb;
13815 bind_info.port_num = port->fp_port_num;
13816 bind_info.port_handle = (opaque_t)port;
13817
13818 port_info = kmem_zalloc(sizeof (*port_info), KM_SLEEP);
13819
13820 /*
13821 * Hold the port driver mutex as the callbacks are bound until the
13822 * service parameters are properly filled in (in order to be able to
13823 * properly respond to unsolicited ELS requests)
13824 */
13825 mutex_enter(&port->fp_mutex);
13826
13827 port->fp_fca_handle = port->fp_fca_tran->fca_bind_port(
13828 port->fp_fca_dip, port_info, &bind_info);
13829
13830 if (port->fp_fca_handle == NULL) {
13831 rval = DDI_FAILURE;
13832 goto exit;
13833 }
13834
13835 /*
13836 * Only fcoei will set this bit
13837 */
13838 if (port_info->pi_port_state & FC_STATE_FCA_IS_NODMA) {
13839 port->fp_soft_state |= FP_SOFT_FCA_IS_NODMA;
13840 port_info->pi_port_state &= ~(FC_STATE_FCA_IS_NODMA);
13841 }
13842
13843 port->fp_bind_state = port->fp_state = port_info->pi_port_state;
13844 port->fp_service_params = port_info->pi_login_params;
13845 port->fp_hard_addr = port_info->pi_hard_addr;
13846
13847 /* Copy from the FCA structure to the FP structure */
13848 port->fp_hba_port_attrs = port_info->pi_attrs;
13849
13850 if (port_info->pi_rnid_params.status == FC_SUCCESS) {
13851 port->fp_rnid_init = 1;
13852 bcopy(&port_info->pi_rnid_params.params,
13853 &port->fp_rnid_params,
13854 sizeof (port->fp_rnid_params));
13855 } else {
13856 port->fp_rnid_init = 0;
13857 }
13858
13859 node_namelen = strlen((char *)&port_info->pi_attrs.sym_node_name);
13860 if (node_namelen) {
13861 bcopy(&port_info->pi_attrs.sym_node_name,
13862 &port->fp_sym_node_name,
13863 node_namelen);
13864 port->fp_sym_node_namelen = node_namelen;
13865 }
13866 port_namelen = strlen((char *)&port_info->pi_attrs.sym_port_name);
13867 if (port_namelen) {
13868 bcopy(&port_info->pi_attrs.sym_port_name,
13869 &port->fp_sym_port_name,
13870 port_namelen);
13871 port->fp_sym_port_namelen = port_namelen;
13872 }
13873
13874 /* zero out the normally unused fields right away */
13875 port->fp_service_params.ls_code.mbz = 0;
13876 port->fp_service_params.ls_code.ls_code = 0;
13877 bzero(&port->fp_service_params.reserved,
13878 sizeof (port->fp_service_params.reserved));
13879
13880 class = port_info->pi_login_params.class_1.class_opt;
13881 port->fp_cos |= (class & 0x8000) ? FC_NS_CLASS1 : 0;
13882
13883 class = port_info->pi_login_params.class_2.class_opt;
13884 port->fp_cos |= (class & 0x8000) ? FC_NS_CLASS2 : 0;
13885
13886 class = port_info->pi_login_params.class_3.class_opt;
13887 port->fp_cos |= (class & 0x8000) ? FC_NS_CLASS3 : 0;
13888
13889 exit:
13890 if (nname) {
13891 ddi_prop_free(nname);
13892 }
13893 if (pname) {
13894 ddi_prop_free(pname);
13895 }
13896 mutex_exit(&port->fp_mutex);
13897 kmem_free(port_info, sizeof (*port_info));
13898
13899 return (rval);
13900 }
13901
13902
13903 /*
13904 * Retrieve FCA capabilities
13905 */
13906 static void
13907 fp_retrieve_caps(fc_local_port_t *port)
13908 {
13909 int rval;
13910 int ub_count;
13911 fc_fcp_dma_t fcp_dma;
13912 fc_reset_action_t action;
13913 fc_dma_behavior_t dma_behavior;
13914
13915 ASSERT(!MUTEX_HELD(&port->fp_mutex));
13916
13917 rval = port->fp_fca_tran->fca_get_cap(port->fp_fca_handle,
13918 FC_CAP_UNSOL_BUF, &ub_count);
13919
13920 switch (rval) {
13921 case FC_CAP_FOUND:
13922 case FC_CAP_SETTABLE:
13923 switch (ub_count) {
13924 case 0:
13925 break;
13926
13927 case -1:
13928 ub_count = fp_unsol_buf_count;
13929 break;
13930
13931 default:
13932 /* 1/4th of total buffers is my share */
13933 ub_count =
13934 (ub_count / port->fp_fca_tran->fca_numports) >> 2;
13935 break;
13936 }
13937 break;
13938
13939 default:
13940 ub_count = 0;
13941 break;
13942 }
13943
13944 mutex_enter(&port->fp_mutex);
13945 port->fp_ub_count = ub_count;
13946 mutex_exit(&port->fp_mutex);
13947
13948 rval = port->fp_fca_tran->fca_get_cap(port->fp_fca_handle,
13949 FC_CAP_POST_RESET_BEHAVIOR, &action);
13950
13951 switch (rval) {
13952 case FC_CAP_FOUND:
13953 case FC_CAP_SETTABLE:
13954 switch (action) {
13955 case FC_RESET_RETURN_NONE:
13956 case FC_RESET_RETURN_ALL:
13957 case FC_RESET_RETURN_OUTSTANDING:
13958 break;
13959
13960 default:
13961 action = FC_RESET_RETURN_NONE;
13962 break;
13963 }
13964 break;
13965
13966 default:
13967 action = FC_RESET_RETURN_NONE;
13968 break;
13969 }
13970 mutex_enter(&port->fp_mutex);
13971 port->fp_reset_action = action;
13972 mutex_exit(&port->fp_mutex);
13973
13974 rval = port->fp_fca_tran->fca_get_cap(port->fp_fca_handle,
13975 FC_CAP_NOSTREAM_ON_UNALIGN_BUF, &dma_behavior);
13976
13977 switch (rval) {
13978 case FC_CAP_FOUND:
13979 switch (dma_behavior) {
13980 case FC_ALLOW_STREAMING:
13981 /* FALLTHROUGH */
13982 case FC_NO_STREAMING:
13983 break;
13984
13985 default:
13986 /*
13987 * If capability was found and the value
13988 * was incorrect assume the worst
13989 */
13990 dma_behavior = FC_NO_STREAMING;
13991 break;
13992 }
13993 break;
13994
13995 default:
13996 /*
13997 * If capability was not defined - allow streaming; existing
13998 * FCAs should not be affected.
13999 */
14000 dma_behavior = FC_ALLOW_STREAMING;
14001 break;
14002 }
14003 mutex_enter(&port->fp_mutex);
14004 port->fp_dma_behavior = dma_behavior;
14005 mutex_exit(&port->fp_mutex);
14006
14007 rval = port->fp_fca_tran->fca_get_cap(port->fp_fca_handle,
14008 FC_CAP_FCP_DMA, &fcp_dma);
14009
14010 if (rval != FC_CAP_FOUND || (fcp_dma != FC_NO_DVMA_SPACE &&
14011 fcp_dma != FC_DVMA_SPACE)) {
14012 fcp_dma = FC_DVMA_SPACE;
14013 }
14014
14015 mutex_enter(&port->fp_mutex);
14016 port->fp_fcp_dma = fcp_dma;
14017 mutex_exit(&port->fp_mutex);
14018 }
14019
14020
14021 /*
14022 * Handle Domain, Area changes in the Fabric.
14023 */
14024 static void
14025 fp_validate_area_domain(fc_local_port_t *port, uint32_t id, uint32_t mask,
14026 job_request_t *job, int sleep)
14027 {
14028 #ifdef DEBUG
14029 uint32_t dcnt;
14030 #endif
14031 int rval;
14032 int send;
14033 int index;
14034 int listindex;
14035 int login;
14036 int job_flags;
14037 char ww_name[17];
14038 uint32_t d_id;
14039 uint32_t count;
14040 fctl_ns_req_t *ns_cmd;
14041 fc_portmap_t *list;
14042 fc_orphan_t *orp;
14043 fc_orphan_t *norp;
14044 fc_orphan_t *prev;
14045 fc_remote_port_t *pd;
14046 fc_remote_port_t *npd;
14047 struct pwwn_hash *head;
14048
14049 ns_cmd = fctl_alloc_ns_cmd(sizeof (ns_req_gid_pn_t),
14050 sizeof (ns_resp_gid_pn_t), sizeof (ns_resp_gid_pn_t),
14051 0, sleep);
14052 if (ns_cmd == NULL) {
14053 mutex_enter(&port->fp_mutex);
14054 if (--port->fp_rscn_count == FC_INVALID_RSCN_COUNT) {
14055 --port->fp_rscn_count;
14056 }
14057 mutex_exit(&port->fp_mutex);
14058
14059 return;
14060 }
14061 ns_cmd->ns_cmd_code = NS_GID_PN;
14062
14063 /*
14064 * We need to get a new count of devices from the
14065 * name server, which will also create any new devices
14066 * as needed.
14067 */
14068
14069 (void) fp_ns_get_devcount(port, job, 1, sleep);
14070
14071 FP_TRACE(FP_NHEAD1(3, 0),
14072 "fp_validate_area_domain: get_devcount found %d devices",
14073 port->fp_total_devices);
14074
14075 mutex_enter(&port->fp_mutex);
14076
14077 for (count = index = 0; index < pwwn_table_size; index++) {
14078 head = &port->fp_pwwn_table[index];
14079 pd = head->pwwn_head;
14080 while (pd != NULL) {
14081 mutex_enter(&pd->pd_mutex);
14082 if (pd->pd_flags != PD_ELS_IN_PROGRESS) {
14083 if ((pd->pd_port_id.port_id & mask) == id &&
14084 pd->pd_recepient == PD_PLOGI_INITIATOR) {
14085 count++;
14086 pd->pd_type = PORT_DEVICE_OLD;
14087 pd->pd_flags = PD_ELS_MARK;
14088 }
14089 }
14090 mutex_exit(&pd->pd_mutex);
14091 pd = pd->pd_wwn_hnext;
14092 }
14093 }
14094
14095 #ifdef DEBUG
14096 dcnt = count;
14097 #endif /* DEBUG */
14098
14099 /*
14100 * Since port->fp_orphan_count is declared an 'int' it is
14101 * theoretically possible that the count could go negative.
14102 *
14103 * This would be bad and if that happens we really do want
14104 * to know.
14105 */
14106
14107 ASSERT(port->fp_orphan_count >= 0);
14108
14109 count += port->fp_orphan_count;
14110
14111 /*
14112 * We add the port->fp_total_devices value to the count
14113 * in the case where our port is newly attached. This is
14114 * because we haven't done any discovery and we don't have
14115 * any orphans in the port's orphan list. If we do not do
14116 * this addition to count then we won't alloc enough kmem
14117 * to do discovery with.
14118 */
14119
14120 if (count == 0) {
14121 count += port->fp_total_devices;
14122 FP_TRACE(FP_NHEAD1(3, 0), "fp_validate_area_domain: "
14123 "0x%x orphans found, using 0x%x",
14124 port->fp_orphan_count, count);
14125 }
14126
14127 mutex_exit(&port->fp_mutex);
14128
14129 /*
14130 * Allocate the change list
14131 */
14132
14133 list = kmem_zalloc(sizeof (fc_portmap_t) * count, sleep);
14134 if (list == NULL) {
14135 fp_printf(port, CE_NOTE, FP_LOG_ONLY, 0, NULL,
14136 " Not enough memory to service RSCNs"
14137 " for %d ports, continuing...", count);
14138
14139 fctl_free_ns_cmd(ns_cmd);
14140
14141 mutex_enter(&port->fp_mutex);
14142 if (--port->fp_rscn_count == FC_INVALID_RSCN_COUNT) {
14143 --port->fp_rscn_count;
14144 }
14145 mutex_exit(&port->fp_mutex);
14146
14147 return;
14148 }
14149
14150 /*
14151 * Attempt to validate or invalidate the devices that were
14152 * already in the pwwn hash table.
14153 */
14154
14155 mutex_enter(&port->fp_mutex);
14156 for (listindex = 0, index = 0; index < pwwn_table_size; index++) {
14157 head = &port->fp_pwwn_table[index];
14158 npd = head->pwwn_head;
14159
14160 while ((pd = npd) != NULL) {
14161 npd = pd->pd_wwn_hnext;
14162
14163 mutex_enter(&pd->pd_mutex);
14164 if ((pd->pd_port_id.port_id & mask) == id &&
14165 pd->pd_flags == PD_ELS_MARK) {
14166 la_wwn_t *pwwn;
14167
14168 job->job_result = FC_SUCCESS;
14169
14170 ((ns_req_gid_pn_t *)
14171 (ns_cmd->ns_cmd_buf))->pwwn =
14172 pd->pd_port_name;
14173
14174 pwwn = &pd->pd_port_name;
14175 d_id = pd->pd_port_id.port_id;
14176
14177 mutex_exit(&pd->pd_mutex);
14178 mutex_exit(&port->fp_mutex);
14179
14180 rval = fp_ns_query(port, ns_cmd, job, 1,
14181 sleep);
14182 if (rval != FC_SUCCESS) {
14183 fc_wwn_to_str(pwwn, ww_name);
14184
14185 FP_TRACE(FP_NHEAD1(3, 0),
14186 "AREA RSCN: PD disappeared; "
14187 "d_id=%x, PWWN=%s", d_id, ww_name);
14188
14189 FP_TRACE(FP_NHEAD2(9, 0),
14190 "N_x Port with D_ID=%x,"
14191 " PWWN=%s disappeared from fabric",
14192 d_id, ww_name);
14193
14194 fp_fillout_old_map(list + listindex++,
14195 pd, 1);
14196 } else {
14197 fctl_copy_portmap(list + listindex++,
14198 pd);
14199
14200 mutex_enter(&pd->pd_mutex);
14201 pd->pd_flags = PD_ELS_IN_PROGRESS;
14202 mutex_exit(&pd->pd_mutex);
14203 }
14204
14205 mutex_enter(&port->fp_mutex);
14206 } else {
14207 mutex_exit(&pd->pd_mutex);
14208 }
14209 }
14210 }
14211
14212 mutex_exit(&port->fp_mutex);
14213
14214 ASSERT(listindex == dcnt);
14215
14216 job->job_counter = listindex;
14217 job_flags = job->job_flags;
14218 job->job_flags |= JOB_TYPE_FP_ASYNC;
14219
14220 /*
14221 * Login (if we were the initiator) or validate devices in the
14222 * port map.
14223 */
14224
14225 for (index = 0; index < listindex; index++) {
14226 pd = list[index].map_pd;
14227
14228 mutex_enter(&pd->pd_mutex);
14229 ASSERT((pd->pd_port_id.port_id & mask) == id);
14230
14231 if (pd->pd_flags != PD_ELS_IN_PROGRESS) {
14232 ASSERT(pd->pd_type == PORT_DEVICE_OLD);
14233 mutex_exit(&pd->pd_mutex);
14234 fp_jobdone(job);
14235 continue;
14236 }
14237
14238 login = (pd->pd_state == PORT_DEVICE_LOGGED_IN) ? 1 : 0;
14239 send = (pd->pd_recepient == PD_PLOGI_INITIATOR) ? 1 : 0;
14240 d_id = pd->pd_port_id.port_id;
14241 mutex_exit(&pd->pd_mutex);
14242
14243 if ((d_id & mask) == id && send) {
14244 if (login) {
14245 FP_TRACE(FP_NHEAD1(6, 0),
14246 "RSCN and PLOGI request;"
14247 " pd=%p, job=%p d_id=%x, index=%d", pd,
14248 job, d_id, index);
14249
14250 rval = fp_port_login(port, d_id, job,
14251 FP_CMD_PLOGI_RETAIN, sleep, pd, NULL);
14252 if (rval != FC_SUCCESS) {
14253 mutex_enter(&pd->pd_mutex);
14254 pd->pd_flags = PD_IDLE;
14255 mutex_exit(&pd->pd_mutex);
14256
14257 job->job_result = rval;
14258 fp_jobdone(job);
14259 }
14260 FP_TRACE(FP_NHEAD1(1, 0),
14261 "PLOGI succeeded:no skip(1) for "
14262 "D_ID %x", d_id);
14263 list[index].map_flags |=
14264 PORT_DEVICE_NO_SKIP_DEVICE_DISCOVERY;
14265 } else {
14266 FP_TRACE(FP_NHEAD1(6, 0), "RSCN and NS request;"
14267 " pd=%p, job=%p d_id=%x, index=%d", pd,
14268 job, d_id, index);
14269
14270 rval = fp_ns_validate_device(port, pd, job,
14271 0, sleep);
14272 if (rval != FC_SUCCESS) {
14273 fp_jobdone(job);
14274 }
14275 mutex_enter(&pd->pd_mutex);
14276 pd->pd_flags = PD_IDLE;
14277 mutex_exit(&pd->pd_mutex);
14278 }
14279 } else {
14280 FP_TRACE(FP_NHEAD1(6, 0),
14281 "RSCN and NO request sent; pd=%p,"
14282 " d_id=%x, index=%d", pd, d_id, index);
14283
14284 mutex_enter(&pd->pd_mutex);
14285 pd->pd_flags = PD_IDLE;
14286 mutex_exit(&pd->pd_mutex);
14287
14288 fp_jobdone(job);
14289 }
14290 }
14291
14292 if (listindex) {
14293 fctl_jobwait(job);
14294 }
14295 job->job_flags = job_flags;
14296
14297 /*
14298 * Orphan list validation.
14299 */
14300 mutex_enter(&port->fp_mutex);
14301 for (prev = NULL, orp = port->fp_orphan_list; port->fp_orphan_count &&
14302 orp != NULL; orp = norp) {
14303 norp = orp->orp_next;
14304 mutex_exit(&port->fp_mutex);
14305
14306 job->job_counter = 1;
14307 job->job_result = FC_SUCCESS;
14308 ASSERT((job->job_flags & JOB_TYPE_FP_ASYNC) == 0);
14309
14310 ((ns_req_gid_pn_t *)ns_cmd->ns_cmd_buf)->pwwn = orp->orp_pwwn;
14311
14312 ((ns_resp_gid_pn_t *)ns_cmd->ns_data_buf)->pid.port_id = 0;
14313 ((ns_resp_gid_pn_t *)
14314 ns_cmd->ns_data_buf)->pid.priv_lilp_posit = 0;
14315
14316 rval = fp_ns_query(port, ns_cmd, job, 1, KM_SLEEP);
14317 if (rval == FC_SUCCESS) {
14318 d_id = BE_32(*((uint32_t *)ns_cmd->ns_data_buf));
14319 pd = fp_create_remote_port_by_ns(port, d_id, KM_SLEEP);
14320 if (pd != NULL) {
14321 fc_wwn_to_str(&orp->orp_pwwn, ww_name);
14322
14323 FP_TRACE(FP_NHEAD1(6, 0),
14324 "RSCN and ORPHAN list "
14325 "success; d_id=%x, PWWN=%s", d_id, ww_name);
14326
14327 FP_TRACE(FP_NHEAD2(6, 0),
14328 "N_x Port with D_ID=%x, PWWN=%s reappeared"
14329 " in fabric", d_id, ww_name);
14330
14331 mutex_enter(&port->fp_mutex);
14332 if (prev) {
14333 prev->orp_next = orp->orp_next;
14334 } else {
14335 ASSERT(orp == port->fp_orphan_list);
14336 port->fp_orphan_list = orp->orp_next;
14337 }
14338 port->fp_orphan_count--;
14339 mutex_exit(&port->fp_mutex);
14340
14341 kmem_free(orp, sizeof (*orp));
14342 fctl_copy_portmap(list + listindex++, pd);
14343 } else {
14344 prev = orp;
14345 }
14346 } else {
14347 prev = orp;
14348 }
14349 mutex_enter(&port->fp_mutex);
14350 }
14351 mutex_exit(&port->fp_mutex);
14352
14353 /*
14354 * One more pass through the list to delist old devices from
14355 * the d_id and pwwn tables and possibly add to the orphan list.
14356 */
14357
14358 for (index = 0; index < listindex; index++) {
14359 pd = list[index].map_pd;
14360 ASSERT(pd != NULL);
14361
14362 /*
14363 * Update PLOGI results; For NS validation
14364 * of orphan list, it is redundant
14365 *
14366 * Take care to preserve PORT_DEVICE_NO_SKIP_DEVICE_DISCOVERY if
14367 * appropriate as fctl_copy_portmap() will clear map_flags.
14368 */
14369 if (list[index].map_flags &
14370 PORT_DEVICE_NO_SKIP_DEVICE_DISCOVERY) {
14371 fctl_copy_portmap(list + index, pd);
14372 list[index].map_flags |=
14373 PORT_DEVICE_NO_SKIP_DEVICE_DISCOVERY;
14374 } else {
14375 fctl_copy_portmap(list + index, pd);
14376 }
14377
14378 FP_TRACE(FP_NHEAD1(6, 0), "RSCN with Area DOMAIN "
14379 "results; pd=%p, d_id=%x pwwn=%x %x %x %x %x %x %x %x",
14380 pd, pd->pd_port_id.port_id,
14381 pd->pd_port_name.raw_wwn[0],
14382 pd->pd_port_name.raw_wwn[1],
14383 pd->pd_port_name.raw_wwn[2],
14384 pd->pd_port_name.raw_wwn[3],
14385 pd->pd_port_name.raw_wwn[4],
14386 pd->pd_port_name.raw_wwn[5],
14387 pd->pd_port_name.raw_wwn[6],
14388 pd->pd_port_name.raw_wwn[7]);
14389
14390 FP_TRACE(FP_NHEAD1(6, 0), "RSCN with Area DOMAIN "
14391 "results continued, pd=%p type=%x, flags=%x, state=%x",
14392 pd, pd->pd_type, pd->pd_flags, pd->pd_state);
14393
14394 mutex_enter(&pd->pd_mutex);
14395 if (pd->pd_type == PORT_DEVICE_OLD) {
14396 int initiator;
14397
14398 pd->pd_flags = PD_IDLE;
14399 initiator = (pd->pd_recepient ==
14400 PD_PLOGI_INITIATOR) ? 1 : 0;
14401
14402 mutex_exit(&pd->pd_mutex);
14403
14404 mutex_enter(&port->fp_mutex);
14405 mutex_enter(&pd->pd_mutex);
14406
14407 pd->pd_state = PORT_DEVICE_INVALID;
14408 fctl_delist_did_table(port, pd);
14409 fctl_delist_pwwn_table(port, pd);
14410
14411 mutex_exit(&pd->pd_mutex);
14412 mutex_exit(&port->fp_mutex);
14413
14414 if (initiator) {
14415 (void) fctl_add_orphan(port, pd, sleep);
14416 }
14417 list[index].map_pd = pd;
14418 } else {
14419 ASSERT(pd->pd_flags == PD_IDLE);
14420 if (pd->pd_state == PORT_DEVICE_LOGGED_IN) {
14421 /*
14422 * Reset LOGO tolerance to zero
14423 */
14424 fctl_tc_reset(&pd->pd_logo_tc);
14425 }
14426 mutex_exit(&pd->pd_mutex);
14427 }
14428 }
14429
14430 if (ns_cmd) {
14431 fctl_free_ns_cmd(ns_cmd);
14432 }
14433 if (listindex) {
14434 (void) fp_ulp_devc_cb(port, list, listindex, count,
14435 sleep, 0);
14436 } else {
14437 kmem_free(list, sizeof (*list) * count);
14438
14439 mutex_enter(&port->fp_mutex);
14440 if (--port->fp_rscn_count == FC_INVALID_RSCN_COUNT) {
14441 --port->fp_rscn_count;
14442 }
14443 mutex_exit(&port->fp_mutex);
14444 }
14445 }
14446
14447
14448 /*
14449 * Work hard to make sense out of an RSCN page.
14450 */
14451 static void
14452 fp_validate_rscn_page(fc_local_port_t *port, fc_affected_id_t *page,
14453 job_request_t *job, fctl_ns_req_t *ns_cmd, fc_portmap_t *listptr,
14454 int *listindex, int sleep)
14455 {
14456 int rval;
14457 char ww_name[17];
14458 la_wwn_t *pwwn;
14459 fc_remote_port_t *pwwn_pd;
14460 fc_remote_port_t *did_pd;
14461
14462 did_pd = fctl_get_remote_port_by_did(port, page->aff_d_id);
14463
14464 FP_TRACE(FP_NHEAD1(6, 0), "RSCN with D_ID page; "
14465 "port=%p, d_id=%x, pd=%p, rscn_count:0x%x", port, page->aff_d_id,
14466 did_pd, (uint32_t)(uintptr_t)job->job_cb_arg);
14467
14468 if (did_pd != NULL) {
14469 mutex_enter(&did_pd->pd_mutex);
14470 if (did_pd->pd_flags != PD_IDLE) {
14471 mutex_exit(&did_pd->pd_mutex);
14472 FP_TRACE(FP_NHEAD1(6, 0), "RSCN with D_ID page: "
14473 "PD is BUSY; port=%p, d_id=%x, pd=%p",
14474 port, page->aff_d_id, did_pd);
14475 return;
14476 }
14477 did_pd->pd_flags = PD_ELS_IN_PROGRESS;
14478 mutex_exit(&did_pd->pd_mutex);
14479 }
14480
14481 job->job_counter = 1;
14482
14483 pwwn = &((ns_resp_gpn_id_t *)ns_cmd->ns_data_buf)->pwwn;
14484
14485 ((ns_req_gpn_id_t *)ns_cmd->ns_cmd_buf)->pid.port_id = page->aff_d_id;
14486 ((ns_req_gpn_id_t *)ns_cmd->ns_cmd_buf)->pid.priv_lilp_posit = 0;
14487
14488 bzero(ns_cmd->ns_data_buf, sizeof (la_wwn_t));
14489 rval = fp_ns_query(port, ns_cmd, job, 1, sleep);
14490
14491 FP_TRACE(FP_NHEAD1(1, 0), "NS Query Response for D_ID page; rev=%x,"
14492 " in_id=%x, cmdrsp=%x, reason=%x, expln=%x",
14493 ns_cmd->ns_resp_hdr.ct_rev, ns_cmd->ns_resp_hdr.ct_inid,
14494 ns_cmd->ns_resp_hdr.ct_cmdrsp, ns_cmd->ns_resp_hdr.ct_reason,
14495 ns_cmd->ns_resp_hdr.ct_expln);
14496
14497 job->job_counter = 1;
14498
14499 if (rval != FC_SUCCESS || fctl_is_wwn_zero(pwwn) == FC_SUCCESS) {
14500 /*
14501 * What this means is that the D_ID
14502 * disappeared from the Fabric.
14503 */
14504 if (did_pd == NULL) {
14505 FP_TRACE(FP_NHEAD1(1, 0), "RSCN with D_ID page;"
14506 " NULL PD disappeared, rval=%x", rval);
14507 return;
14508 }
14509
14510 fc_wwn_to_str(&did_pd->pd_port_name, ww_name);
14511
14512 (listptr + *listindex)->map_rscn_info.ulp_rscn_count =
14513 (uint32_t)(uintptr_t)job->job_cb_arg;
14514
14515 fp_fillout_old_map(listptr + (*listindex)++, did_pd, 0);
14516
14517 FP_TRACE(FP_NHEAD1(3, 0), "RSCN: PD disappeared; "
14518 "d_id=%x, PWWN=%s", page->aff_d_id, ww_name);
14519
14520 FP_TRACE(FP_NHEAD2(9, 0),
14521 "GPN_ID for D_ID=%x failed", page->aff_d_id);
14522
14523 FP_TRACE(FP_NHEAD2(9, 0),
14524 "N_x Port with D_ID=%x, PWWN=%s disappeared from"
14525 " fabric", page->aff_d_id, ww_name);
14526
14527 mutex_enter(&did_pd->pd_mutex);
14528 did_pd->pd_flags = PD_IDLE;
14529 mutex_exit(&did_pd->pd_mutex);
14530
14531 FP_TRACE(FP_NHEAD1(3, 0), "RSCN with D_ID (%x) page; "
14532 "PD disappeared, pd=%p", page->aff_d_id, did_pd);
14533
14534 return;
14535 }
14536
14537 pwwn_pd = fctl_get_remote_port_by_pwwn(port, pwwn);
14538
14539 if (did_pd != NULL && pwwn_pd != NULL && did_pd == pwwn_pd) {
14540 /*
14541 * There is no change. Do PLOGI again and add it to
14542 * ULP portmap baggage and return. Note: When RSCNs
14543 * arrive with per page states, the need for PLOGI
14544 * can be determined correctly.
14545 */
14546 mutex_enter(&pwwn_pd->pd_mutex);
14547 pwwn_pd->pd_type = PORT_DEVICE_NOCHANGE;
14548 mutex_exit(&pwwn_pd->pd_mutex);
14549
14550 (listptr + *listindex)->map_rscn_info.ulp_rscn_count =
14551 (uint32_t)(uintptr_t)job->job_cb_arg;
14552
14553 fctl_copy_portmap(listptr + (*listindex)++, pwwn_pd);
14554
14555 mutex_enter(&pwwn_pd->pd_mutex);
14556 if ((pwwn_pd->pd_state == PORT_DEVICE_LOGGED_IN) ||
14557 (pwwn_pd->pd_aux_flags & PD_LOGGED_OUT)) {
14558 fc_wwn_to_str(&pwwn_pd->pd_port_name, ww_name);
14559 mutex_exit(&pwwn_pd->pd_mutex);
14560
14561 rval = fp_port_login(port, page->aff_d_id, job,
14562 FP_CMD_PLOGI_RETAIN, sleep, pwwn_pd, NULL);
14563 if (rval == FC_SUCCESS) {
14564 fp_jobwait(job);
14565 rval = job->job_result;
14566
14567 /*
14568 * Reset LOGO tolerance to zero
14569 * Also we are the PLOGI initiator now.
14570 */
14571 mutex_enter(&pwwn_pd->pd_mutex);
14572 fctl_tc_reset(&pwwn_pd->pd_logo_tc);
14573 pwwn_pd->pd_recepient = PD_PLOGI_INITIATOR;
14574 mutex_exit(&pwwn_pd->pd_mutex);
14575 }
14576
14577 if (rval == FC_SUCCESS) {
14578 struct fc_portmap *map =
14579 listptr + *listindex - 1;
14580
14581 FP_TRACE(FP_NHEAD1(1, 0),
14582 "PLOGI succeeded: no skip(2)"
14583 " for D_ID %x", page->aff_d_id);
14584 map->map_flags |=
14585 PORT_DEVICE_NO_SKIP_DEVICE_DISCOVERY;
14586 } else {
14587 FP_TRACE(FP_NHEAD2(9, rval),
14588 "PLOGI to D_ID=%x failed", page->aff_d_id);
14589
14590 FP_TRACE(FP_NHEAD2(9, 0),
14591 "N_x Port with D_ID=%x, PWWN=%s"
14592 " disappeared from fabric",
14593 page->aff_d_id, ww_name);
14594
14595 fp_fillout_old_map(listptr +
14596 *listindex - 1, pwwn_pd, 0);
14597 }
14598 } else {
14599 mutex_exit(&pwwn_pd->pd_mutex);
14600 }
14601
14602 mutex_enter(&did_pd->pd_mutex);
14603 did_pd->pd_flags = PD_IDLE;
14604 mutex_exit(&did_pd->pd_mutex);
14605
14606 FP_TRACE(FP_NHEAD1(6, 0), "RSCN with D_ID (0x%x) page; "
14607 "Case ONE, rval=%x, result=%x pd=%p", page->aff_d_id, rval,
14608 job->job_result, pwwn_pd);
14609
14610 return;
14611 }
14612
14613 if (did_pd == NULL && pwwn_pd == NULL) {
14614
14615 fc_orphan_t *orp = NULL;
14616 fc_orphan_t *norp = NULL;
14617 fc_orphan_t *prev = NULL;
14618
14619 /*
14620 * Hunt down the orphan list before giving up.
14621 */
14622
14623 mutex_enter(&port->fp_mutex);
14624 if (port->fp_orphan_count) {
14625
14626 for (orp = port->fp_orphan_list; orp; orp = norp) {
14627 norp = orp->orp_next;
14628
14629 if (fctl_wwn_cmp(&orp->orp_pwwn, pwwn) != 0) {
14630 prev = orp;
14631 continue;
14632 }
14633
14634 if (prev) {
14635 prev->orp_next = orp->orp_next;
14636 } else {
14637 ASSERT(orp ==
14638 port->fp_orphan_list);
14639 port->fp_orphan_list =
14640 orp->orp_next;
14641 }
14642 port->fp_orphan_count--;
14643 break;
14644 }
14645 }
14646
14647 mutex_exit(&port->fp_mutex);
14648 pwwn_pd = fp_create_remote_port_by_ns(port,
14649 page->aff_d_id, sleep);
14650
14651 if (pwwn_pd != NULL) {
14652
14653 if (orp) {
14654 fc_wwn_to_str(&orp->orp_pwwn,
14655 ww_name);
14656
14657 FP_TRACE(FP_NHEAD2(9, 0),
14658 "N_x Port with D_ID=%x,"
14659 " PWWN=%s reappeared in fabric",
14660 page->aff_d_id, ww_name);
14661
14662 kmem_free(orp, sizeof (*orp));
14663 }
14664
14665 (listptr + *listindex)->
14666 map_rscn_info.ulp_rscn_count =
14667 (uint32_t)(uintptr_t)job->job_cb_arg;
14668
14669 fctl_copy_portmap(listptr +
14670 (*listindex)++, pwwn_pd);
14671 }
14672
14673 FP_TRACE(FP_NHEAD1(6, 0), "RSCN with D_ID (0x%x) page; "
14674 "Case TWO", page->aff_d_id);
14675
14676 return;
14677 }
14678
14679 if (pwwn_pd != NULL && did_pd == NULL) {
14680 uint32_t old_d_id;
14681 uint32_t d_id = page->aff_d_id;
14682
14683 /*
14684 * What this means is there is a new D_ID for this
14685 * Port WWN. Take out the port device off D_ID
14686 * list and put it back with a new D_ID. Perform
14687 * PLOGI if already logged in.
14688 */
14689 mutex_enter(&port->fp_mutex);
14690 mutex_enter(&pwwn_pd->pd_mutex);
14691
14692 old_d_id = pwwn_pd->pd_port_id.port_id;
14693
14694 fctl_delist_did_table(port, pwwn_pd);
14695
14696 (listptr + *listindex)->map_rscn_info.ulp_rscn_count =
14697 (uint32_t)(uintptr_t)job->job_cb_arg;
14698
14699 fp_fillout_changed_map(listptr + (*listindex)++, pwwn_pd,
14700 &d_id, NULL);
14701 fctl_enlist_did_table(port, pwwn_pd);
14702
14703 FP_TRACE(FP_NHEAD1(6, 0), "RSCN with D_ID page;"
14704 " Case THREE, pd=%p,"
14705 " state=%x", pwwn_pd, pwwn_pd->pd_state);
14706
14707 if ((pwwn_pd->pd_state == PORT_DEVICE_LOGGED_IN) ||
14708 (pwwn_pd->pd_aux_flags & PD_LOGGED_OUT)) {
14709 fc_wwn_to_str(&pwwn_pd->pd_port_name, ww_name);
14710
14711 mutex_exit(&pwwn_pd->pd_mutex);
14712 mutex_exit(&port->fp_mutex);
14713
14714 FP_TRACE(FP_NHEAD2(9, 0),
14715 "N_x Port with D_ID=%x, PWWN=%s has a new"
14716 " D_ID=%x now", old_d_id, ww_name, d_id);
14717
14718 rval = fp_port_login(port, page->aff_d_id, job,
14719 FP_CMD_PLOGI_RETAIN, sleep, pwwn_pd, NULL);
14720 if (rval == FC_SUCCESS) {
14721 fp_jobwait(job);
14722 rval = job->job_result;
14723 }
14724
14725 if (rval != FC_SUCCESS) {
14726 fp_fillout_old_map(listptr +
14727 *listindex - 1, pwwn_pd, 0);
14728 }
14729 } else {
14730 mutex_exit(&pwwn_pd->pd_mutex);
14731 mutex_exit(&port->fp_mutex);
14732 }
14733
14734 return;
14735 }
14736
14737 if (pwwn_pd == NULL && did_pd != NULL) {
14738 fc_portmap_t *ptr;
14739 uint32_t len = 1;
14740 char old_ww_name[17];
14741
14742 mutex_enter(&did_pd->pd_mutex);
14743 fc_wwn_to_str(&did_pd->pd_port_name, old_ww_name);
14744 mutex_exit(&did_pd->pd_mutex);
14745
14746 fc_wwn_to_str(pwwn, ww_name);
14747
14748 (listptr + *listindex)->map_rscn_info.ulp_rscn_count =
14749 (uint32_t)(uintptr_t)job->job_cb_arg;
14750
14751 /*
14752 * What this means is that there is a new Port WWN for
14753 * this D_ID; Mark the Port device as old and provide
14754 * the new PWWN and D_ID combination as new.
14755 */
14756 fp_fillout_old_map(listptr + (*listindex)++, did_pd, 0);
14757
14758 FP_TRACE(FP_NHEAD2(9, 0),
14759 "N_x Port with D_ID=%x, PWWN=%s has a new PWWN=%s now",
14760 page->aff_d_id, old_ww_name, ww_name);
14761
14762 (listptr + *listindex)->map_rscn_info.ulp_rscn_count =
14763 (uint32_t)(uintptr_t)job->job_cb_arg;
14764
14765 ptr = listptr + (*listindex)++;
14766
14767 job->job_counter = 1;
14768
14769 if (fp_ns_getmap(port, job, &ptr, &len,
14770 page->aff_d_id - 1) != FC_SUCCESS) {
14771 (*listindex)--;
14772 }
14773
14774 mutex_enter(&did_pd->pd_mutex);
14775 did_pd->pd_flags = PD_IDLE;
14776 mutex_exit(&did_pd->pd_mutex);
14777
14778 return;
14779 }
14780
14781 /*
14782 * A weird case of Port WWN and D_ID existence but not matching up
14783 * between them. Trust your instincts - Take the port device handle
14784 * off Port WWN list, fix it with new Port WWN and put it back, In
14785 * the mean time mark the port device corresponding to the old port
14786 * WWN as OLD.
14787 */
14788 FP_TRACE(FP_NHEAD1(6, 0), "RSCN with D_ID page; Case WEIRD, pwwn_pd=%p,"
14789 " did_pd=%p", pwwn_pd, did_pd);
14790
14791 mutex_enter(&port->fp_mutex);
14792 mutex_enter(&pwwn_pd->pd_mutex);
14793
14794 pwwn_pd->pd_type = PORT_DEVICE_OLD;
14795 pwwn_pd->pd_state = PORT_DEVICE_INVALID;
14796 fctl_delist_did_table(port, pwwn_pd);
14797 fctl_delist_pwwn_table(port, pwwn_pd);
14798
14799 FP_TRACE(FP_NHEAD1(6, 0), "RSCN with D_ID page; case WEIRD continued,"
14800 " pwwn-d_id=%x pwwn-wwn=%x %x %x %x %x %x %x %x",
14801 pwwn_pd->pd_port_id.port_id,
14802
14803 pwwn_pd->pd_port_name.raw_wwn[0],
14804 pwwn_pd->pd_port_name.raw_wwn[1],
14805 pwwn_pd->pd_port_name.raw_wwn[2],
14806 pwwn_pd->pd_port_name.raw_wwn[3],
14807 pwwn_pd->pd_port_name.raw_wwn[4],
14808 pwwn_pd->pd_port_name.raw_wwn[5],
14809 pwwn_pd->pd_port_name.raw_wwn[6],
14810 pwwn_pd->pd_port_name.raw_wwn[7]);
14811
14812 mutex_exit(&pwwn_pd->pd_mutex);
14813 mutex_exit(&port->fp_mutex);
14814
14815 (listptr + *listindex)->map_rscn_info.ulp_rscn_count =
14816 (uint32_t)(uintptr_t)job->job_cb_arg;
14817
14818 fctl_copy_portmap(listptr + (*listindex)++, pwwn_pd);
14819
14820 mutex_enter(&port->fp_mutex);
14821 mutex_enter(&did_pd->pd_mutex);
14822
14823 fctl_delist_pwwn_table(port, did_pd);
14824
14825 (listptr + *listindex)->map_rscn_info.ulp_rscn_count =
14826 (uint32_t)(uintptr_t)job->job_cb_arg;
14827
14828 fp_fillout_changed_map(listptr + (*listindex)++, did_pd, NULL, pwwn);
14829 fctl_enlist_pwwn_table(port, did_pd);
14830
14831 FP_TRACE(FP_NHEAD1(6, 0), "RSCN with D_ID page; case WEIRD continued,"
14832 " d_id=%x, state=%x, did-wwn=%x %x %x %x %x %x %x %x",
14833 did_pd->pd_port_id.port_id, did_pd->pd_state,
14834
14835 did_pd->pd_port_name.raw_wwn[0],
14836 did_pd->pd_port_name.raw_wwn[1],
14837 did_pd->pd_port_name.raw_wwn[2],
14838 did_pd->pd_port_name.raw_wwn[3],
14839 did_pd->pd_port_name.raw_wwn[4],
14840 did_pd->pd_port_name.raw_wwn[5],
14841 did_pd->pd_port_name.raw_wwn[6],
14842 did_pd->pd_port_name.raw_wwn[7]);
14843
14844 if ((did_pd->pd_state == PORT_DEVICE_LOGGED_IN) ||
14845 (did_pd->pd_aux_flags & PD_LOGGED_OUT)) {
14846 mutex_exit(&did_pd->pd_mutex);
14847 mutex_exit(&port->fp_mutex);
14848
14849 rval = fp_port_login(port, page->aff_d_id, job,
14850 FP_CMD_PLOGI_RETAIN, sleep, did_pd, NULL);
14851 if (rval == FC_SUCCESS) {
14852 fp_jobwait(job);
14853 if (job->job_result != FC_SUCCESS) {
14854 fp_fillout_old_map(listptr +
14855 *listindex - 1, did_pd, 0);
14856 }
14857 } else {
14858 fp_fillout_old_map(listptr + *listindex - 1, did_pd, 0);
14859 }
14860 } else {
14861 mutex_exit(&did_pd->pd_mutex);
14862 mutex_exit(&port->fp_mutex);
14863 }
14864
14865 mutex_enter(&did_pd->pd_mutex);
14866 did_pd->pd_flags = PD_IDLE;
14867 mutex_exit(&did_pd->pd_mutex);
14868 }
14869
14870
14871 /*
14872 * Check with NS for the presence of this port WWN
14873 */
14874 static int
14875 fp_ns_validate_device(fc_local_port_t *port, fc_remote_port_t *pd,
14876 job_request_t *job, int polled, int sleep)
14877 {
14878 la_wwn_t pwwn;
14879 uint32_t flags;
14880 fctl_ns_req_t *ns_cmd;
14881
14882 flags = FCTL_NS_VALIDATE_PD | ((polled) ? 0: FCTL_NS_ASYNC_REQUEST);
14883 ns_cmd = fctl_alloc_ns_cmd(sizeof (ns_req_gid_pn_t),
14884 sizeof (ns_resp_gid_pn_t), sizeof (ns_resp_gid_pn_t),
14885 flags, sleep);
14886 if (ns_cmd == NULL) {
14887 return (FC_NOMEM);
14888 }
14889
14890 mutex_enter(&pd->pd_mutex);
14891 pwwn = pd->pd_port_name;
14892 mutex_exit(&pd->pd_mutex);
14893
14894 ns_cmd->ns_cmd_code = NS_GID_PN;
14895 ns_cmd->ns_pd = pd;
14896 ((ns_req_gid_pn_t *)ns_cmd->ns_cmd_buf)->pwwn = pwwn;
14897 ((ns_resp_gid_pn_t *)ns_cmd->ns_data_buf)->pid.port_id = 0;
14898 ((ns_resp_gid_pn_t *)ns_cmd->ns_data_buf)->pid.priv_lilp_posit = 0;
14899
14900 return (fp_ns_query(port, ns_cmd, job, polled, sleep));
14901 }
14902
14903
14904 /*
14905 * Sanity check the LILP map returned by FCA
14906 */
14907 static int
14908 fp_validate_lilp_map(fc_lilpmap_t *lilp_map)
14909 {
14910 int count;
14911
14912 if (lilp_map->lilp_length == 0) {
14913 return (FC_FAILURE);
14914 }
14915
14916 for (count = 0; count < lilp_map->lilp_length; count++) {
14917 if (fp_is_valid_alpa(lilp_map->lilp_alpalist[count]) !=
14918 FC_SUCCESS) {
14919 return (FC_FAILURE);
14920 }
14921 }
14922
14923 return (FC_SUCCESS);
14924 }
14925
14926
14927 /*
14928 * Sanity check if the AL_PA is a valid address
14929 */
14930 static int
14931 fp_is_valid_alpa(uchar_t al_pa)
14932 {
14933 int count;
14934
14935 for (count = 0; count < sizeof (fp_valid_alpas); count++) {
14936 if (al_pa == fp_valid_alpas[count] || al_pa == 0) {
14937 return (FC_SUCCESS);
14938 }
14939 }
14940
14941 return (FC_FAILURE);
14942 }
14943
14944
14945 /*
14946 * Post unsolicited callbacks to ULPs
14947 */
14948 static void
14949 fp_ulp_unsol_cb(void *arg)
14950 {
14951 fp_unsol_spec_t *ub_spec = (fp_unsol_spec_t *)arg;
14952
14953 fctl_ulp_unsol_cb(ub_spec->port, ub_spec->buf,
14954 ub_spec->buf->ub_frame.type);
14955 kmem_free(ub_spec, sizeof (*ub_spec));
14956 }
14957
14958
14959 /*
14960 * Perform message reporting in a consistent manner. Unless there is
14961 * a strong reason NOT to use this function (which is very very rare)
14962 * all message reporting should go through this.
14963 */
14964 static void
14965 fp_printf(fc_local_port_t *port, int level, fp_mesg_dest_t dest, int fc_errno,
14966 fc_packet_t *pkt, const char *fmt, ...)
14967 {
14968 caddr_t buf;
14969 va_list ap;
14970
14971 switch (level) {
14972 case CE_NOTE:
14973 if ((port->fp_verbose & FP_WARNING_MESSAGES) == 0) {
14974 return;
14975 }
14976 break;
14977
14978 case CE_WARN:
14979 if ((port->fp_verbose & FP_FATAL_MESSAGES) == 0) {
14980 return;
14981 }
14982 break;
14983 }
14984
14985 buf = kmem_zalloc(256, KM_NOSLEEP);
14986 if (buf == NULL) {
14987 return;
14988 }
14989
14990 (void) sprintf(buf, "fp(%d): ", port->fp_instance);
14991
14992 va_start(ap, fmt);
14993 (void) vsprintf(buf + strlen(buf), fmt, ap);
14994 va_end(ap);
14995
14996 if (fc_errno) {
14997 char *errmsg;
14998
14999 (void) fc_ulp_error(fc_errno, &errmsg);
15000 (void) sprintf(buf + strlen(buf), " FC Error=%s", errmsg);
15001 } else {
15002 if (pkt) {
15003 caddr_t state, reason, action, expln;
15004
15005 (void) fc_ulp_pkt_error(pkt, &state, &reason,
15006 &action, &expln);
15007
15008 (void) sprintf(buf + strlen(buf),
15009 " state=%s, reason=%s", state, reason);
15010
15011 if (pkt->pkt_resp_resid) {
15012 (void) sprintf(buf + strlen(buf),
15013 " resp resid=%x\n", pkt->pkt_resp_resid);
15014 }
15015 }
15016 }
15017
15018 switch (dest) {
15019 case FP_CONSOLE_ONLY:
15020 cmn_err(level, "^%s", buf);
15021 break;
15022
15023 case FP_LOG_ONLY:
15024 cmn_err(level, "!%s", buf);
15025 break;
15026
15027 default:
15028 cmn_err(level, "%s", buf);
15029 break;
15030 }
15031
15032 kmem_free(buf, 256);
15033 }
15034
15035 static int
15036 fp_fcio_login(fc_local_port_t *port, fcio_t *fcio, job_request_t *job)
15037 {
15038 int ret;
15039 uint32_t d_id;
15040 la_wwn_t pwwn;
15041 fc_remote_port_t *pd = NULL;
15042 fc_remote_port_t *held_pd = NULL;
15043 fctl_ns_req_t *ns_cmd;
15044 fc_portmap_t *changelist;
15045
15046 bcopy(fcio->fcio_ibuf, &pwwn, sizeof (pwwn));
15047
15048 mutex_enter(&port->fp_mutex);
15049 if (FC_IS_TOP_SWITCH(port->fp_topology)) {
15050 mutex_exit(&port->fp_mutex);
15051 job->job_counter = 1;
15052
15053 job->job_result = FC_SUCCESS;
15054
15055 ns_cmd = fctl_alloc_ns_cmd(sizeof (ns_req_gid_pn_t),
15056 sizeof (ns_resp_gid_pn_t), sizeof (ns_resp_gid_pn_t),
15057 FCTL_NS_BUF_IS_USERLAND, KM_SLEEP);
15058
15059 ASSERT(ns_cmd != NULL);
15060
15061 ns_cmd->ns_cmd_code = NS_GID_PN;
15062 ((ns_req_gid_pn_t *)(ns_cmd->ns_cmd_buf))->pwwn = pwwn;
15063
15064 ret = fp_ns_query(port, ns_cmd, job, 1, KM_SLEEP);
15065
15066 if (ret != FC_SUCCESS || job->job_result != FC_SUCCESS) {
15067 if (ret != FC_SUCCESS) {
15068 fcio->fcio_errno = ret;
15069 } else {
15070 fcio->fcio_errno = job->job_result;
15071 }
15072 fctl_free_ns_cmd(ns_cmd);
15073 return (EIO);
15074 }
15075 d_id = BE_32(*((uint32_t *)ns_cmd->ns_data_buf));
15076 fctl_free_ns_cmd(ns_cmd);
15077 } else {
15078 mutex_exit(&port->fp_mutex);
15079
15080 held_pd = fctl_hold_remote_port_by_pwwn(port, &pwwn);
15081 if (held_pd == NULL) {
15082 fcio->fcio_errno = FC_BADWWN;
15083 return (EIO);
15084 }
15085 pd = held_pd;
15086
15087 mutex_enter(&pd->pd_mutex);
15088 d_id = pd->pd_port_id.port_id;
15089 mutex_exit(&pd->pd_mutex);
15090 }
15091
15092 job->job_counter = 1;
15093
15094 pd = fctl_get_remote_port_by_did(port, d_id);
15095
15096 if (pd) {
15097 mutex_enter(&pd->pd_mutex);
15098 if (pd->pd_state == PORT_DEVICE_LOGGED_IN) {
15099 pd->pd_login_count++;
15100 mutex_exit(&pd->pd_mutex);
15101
15102 fcio->fcio_errno = FC_SUCCESS;
15103 if (held_pd) {
15104 fctl_release_remote_port(held_pd);
15105 }
15106
15107 return (0);
15108 }
15109 mutex_exit(&pd->pd_mutex);
15110 } else {
15111 mutex_enter(&port->fp_mutex);
15112 if (FC_IS_TOP_SWITCH(port->fp_topology)) {
15113 mutex_exit(&port->fp_mutex);
15114 pd = fp_create_remote_port_by_ns(port, d_id, KM_SLEEP);
15115 if (pd == NULL) {
15116 fcio->fcio_errno = FC_FAILURE;
15117 if (held_pd) {
15118 fctl_release_remote_port(held_pd);
15119 }
15120 return (EIO);
15121 }
15122 } else {
15123 mutex_exit(&port->fp_mutex);
15124 }
15125 }
15126
15127 job->job_flags &= ~JOB_TYPE_FP_ASYNC;
15128 job->job_counter = 1;
15129
15130 ret = fp_port_login(port, d_id, job, FP_CMD_PLOGI_RETAIN,
15131 KM_SLEEP, pd, NULL);
15132
15133 if (ret != FC_SUCCESS) {
15134 fcio->fcio_errno = ret;
15135 if (held_pd) {
15136 fctl_release_remote_port(held_pd);
15137 }
15138 return (EIO);
15139 }
15140 fp_jobwait(job);
15141
15142 fcio->fcio_errno = job->job_result;
15143
15144 if (held_pd) {
15145 fctl_release_remote_port(held_pd);
15146 }
15147
15148 if (job->job_result != FC_SUCCESS) {
15149 return (EIO);
15150 }
15151
15152 pd = fctl_hold_remote_port_by_pwwn(port, &pwwn);
15153 if (pd == NULL) {
15154 fcio->fcio_errno = FC_BADDEV;
15155 return (ENODEV);
15156 }
15157
15158 changelist = kmem_zalloc(sizeof (*changelist), KM_SLEEP);
15159
15160 fctl_copy_portmap(changelist, pd);
15161 changelist->map_type = PORT_DEVICE_USER_LOGIN;
15162
15163 (void) fp_ulp_devc_cb(port, changelist, 1, 1, KM_SLEEP, 1);
15164
15165 mutex_enter(&pd->pd_mutex);
15166 pd->pd_type = PORT_DEVICE_NOCHANGE;
15167 mutex_exit(&pd->pd_mutex);
15168
15169 fctl_release_remote_port(pd);
15170
15171 return (0);
15172 }
15173
15174
15175 static int
15176 fp_fcio_logout(fc_local_port_t *port, fcio_t *fcio, job_request_t *job)
15177 {
15178 la_wwn_t pwwn;
15179 fp_cmd_t *cmd;
15180 fc_portmap_t *changelist;
15181 fc_remote_port_t *pd;
15182
15183 bcopy(fcio->fcio_ibuf, &pwwn, sizeof (pwwn));
15184
15185 pd = fctl_hold_remote_port_by_pwwn(port, &pwwn);
15186 if (pd == NULL) {
15187 fcio->fcio_errno = FC_BADWWN;
15188 return (ENXIO);
15189 }
15190
15191 mutex_enter(&pd->pd_mutex);
15192 if (pd->pd_state != PORT_DEVICE_LOGGED_IN) {
15193 fcio->fcio_errno = FC_LOGINREQ;
15194 mutex_exit(&pd->pd_mutex);
15195
15196 fctl_release_remote_port(pd);
15197
15198 return (EINVAL);
15199 }
15200
15201 ASSERT(pd->pd_login_count >= 1);
15202
15203 if (pd->pd_flags == PD_ELS_IN_PROGRESS) {
15204 fcio->fcio_errno = FC_FAILURE;
15205 mutex_exit(&pd->pd_mutex);
15206
15207 fctl_release_remote_port(pd);
15208
15209 return (EBUSY);
15210 }
15211
15212 if (pd->pd_login_count > 1) {
15213 pd->pd_login_count--;
15214 fcio->fcio_errno = FC_SUCCESS;
15215 mutex_exit(&pd->pd_mutex);
15216
15217 changelist = kmem_zalloc(sizeof (*changelist), KM_SLEEP);
15218
15219 fctl_copy_portmap(changelist, pd);
15220 changelist->map_type = PORT_DEVICE_USER_LOGOUT;
15221
15222 fctl_release_remote_port(pd);
15223
15224 (void) fp_ulp_devc_cb(port, changelist, 1, 1, KM_SLEEP, 1);
15225
15226 return (0);
15227 }
15228
15229 pd->pd_flags = PD_ELS_IN_PROGRESS;
15230 mutex_exit(&pd->pd_mutex);
15231
15232 job->job_counter = 1;
15233
15234 cmd = fp_alloc_pkt(port, sizeof (la_els_logo_t),
15235 FP_PORT_IDENTIFIER_LEN, KM_SLEEP, pd);
15236 if (cmd == NULL) {
15237 fcio->fcio_errno = FC_NOMEM;
15238 fctl_release_remote_port(pd);
15239
15240 mutex_enter(&pd->pd_mutex);
15241 pd->pd_flags = PD_IDLE;
15242 mutex_exit(&pd->pd_mutex);
15243
15244 return (ENOMEM);
15245 }
15246
15247 mutex_enter(&port->fp_mutex);
15248 mutex_enter(&pd->pd_mutex);
15249
15250 cmd->cmd_pkt.pkt_tran_flags = FC_TRAN_INTR | pd->pd_login_class;
15251 cmd->cmd_pkt.pkt_tran_type = FC_PKT_EXCHANGE;
15252 cmd->cmd_flags = FP_CMD_PLOGI_DONT_CARE;
15253 cmd->cmd_retry_count = 1;
15254 cmd->cmd_ulp_pkt = NULL;
15255
15256 fp_logo_init(pd, cmd, job);
15257
15258 mutex_exit(&pd->pd_mutex);
15259 mutex_exit(&port->fp_mutex);
15260
15261 if (fp_sendcmd(port, cmd, port->fp_fca_handle) != FC_SUCCESS) {
15262 mutex_enter(&pd->pd_mutex);
15263 pd->pd_flags = PD_IDLE;
15264 mutex_exit(&pd->pd_mutex);
15265
15266 fp_free_pkt(cmd);
15267 fctl_release_remote_port(pd);
15268
15269 return (EIO);
15270 }
15271
15272 fp_jobwait(job);
15273
15274 fcio->fcio_errno = job->job_result;
15275 if (job->job_result != FC_SUCCESS) {
15276 mutex_enter(&pd->pd_mutex);
15277 pd->pd_flags = PD_IDLE;
15278 mutex_exit(&pd->pd_mutex);
15279
15280 fctl_release_remote_port(pd);
15281
15282 return (EIO);
15283 }
15284
15285 ASSERT(pd != NULL);
15286
15287 changelist = kmem_zalloc(sizeof (*changelist), KM_SLEEP);
15288
15289 fctl_copy_portmap(changelist, pd);
15290 changelist->map_type = PORT_DEVICE_USER_LOGOUT;
15291 changelist->map_state = PORT_DEVICE_INVALID;
15292
15293 mutex_enter(&port->fp_mutex);
15294 mutex_enter(&pd->pd_mutex);
15295
15296 fctl_delist_did_table(port, pd);
15297 fctl_delist_pwwn_table(port, pd);
15298 pd->pd_flags = PD_IDLE;
15299
15300 mutex_exit(&pd->pd_mutex);
15301 mutex_exit(&port->fp_mutex);
15302
15303 (void) fp_ulp_devc_cb(port, changelist, 1, 1, KM_SLEEP, 1);
15304
15305 fctl_release_remote_port(pd);
15306
15307 return (0);
15308 }
15309
15310
15311
15312 /*
15313 * Send a syslog event for adapter port level events.
15314 */
15315 static void
15316 fp_log_port_event(fc_local_port_t *port, char *subclass)
15317 {
15318 nvlist_t *attr_list;
15319
15320 if (nvlist_alloc(&attr_list, NV_UNIQUE_NAME_TYPE,
15321 KM_SLEEP) != DDI_SUCCESS) {
15322 goto alloc_failed;
15323 }
15324
15325 if (nvlist_add_uint32(attr_list, "instance",
15326 port->fp_instance) != DDI_SUCCESS) {
15327 goto error;
15328 }
15329
15330 if (nvlist_add_byte_array(attr_list, "port-wwn",
15331 port->fp_service_params.nport_ww_name.raw_wwn,
15332 sizeof (la_wwn_t)) != DDI_SUCCESS) {
15333 goto error;
15334 }
15335
15336 (void) ddi_log_sysevent(port->fp_port_dip, DDI_VENDOR_SUNW, EC_SUNFC,
15337 subclass, attr_list, NULL, DDI_SLEEP);
15338
15339 nvlist_free(attr_list);
15340 return;
15341
15342 error:
15343 nvlist_free(attr_list);
15344 alloc_failed:
15345 FP_TRACE(FP_NHEAD1(9, 0), "Unable to send %s event", subclass);
15346 }
15347
15348
15349 static void
15350 fp_log_target_event(fc_local_port_t *port, char *subclass, la_wwn_t tgt_pwwn,
15351 uint32_t port_id)
15352 {
15353 nvlist_t *attr_list;
15354
15355 if (nvlist_alloc(&attr_list, NV_UNIQUE_NAME_TYPE,
15356 KM_SLEEP) != DDI_SUCCESS) {
15357 goto alloc_failed;
15358 }
15359
15360 if (nvlist_add_uint32(attr_list, "instance",
15361 port->fp_instance) != DDI_SUCCESS) {
15362 goto error;
15363 }
15364
15365 if (nvlist_add_byte_array(attr_list, "port-wwn",
15366 port->fp_service_params.nport_ww_name.raw_wwn,
15367 sizeof (la_wwn_t)) != DDI_SUCCESS) {
15368 goto error;
15369 }
15370
15371 if (nvlist_add_byte_array(attr_list, "target-port-wwn",
15372 tgt_pwwn.raw_wwn, sizeof (la_wwn_t)) != DDI_SUCCESS) {
15373 goto error;
15374 }
15375
15376 if (nvlist_add_uint32(attr_list, "target-port-id",
15377 port_id) != DDI_SUCCESS) {
15378 goto error;
15379 }
15380
15381 (void) ddi_log_sysevent(port->fp_port_dip, DDI_VENDOR_SUNW, EC_SUNFC,
15382 subclass, attr_list, NULL, DDI_SLEEP);
15383
15384 nvlist_free(attr_list);
15385 return;
15386
15387 error:
15388 nvlist_free(attr_list);
15389 alloc_failed:
15390 FP_TRACE(FP_NHEAD1(9, 0), "Unable to send %s event", subclass);
15391 }
15392
15393 static uint32_t
15394 fp_map_remote_port_state(uint32_t rm_state)
15395 {
15396 switch (rm_state) {
15397 case PORT_DEVICE_LOGGED_IN:
15398 return (FC_HBA_PORTSTATE_ONLINE);
15399 case PORT_DEVICE_VALID:
15400 case PORT_DEVICE_INVALID:
15401 default:
15402 return (FC_HBA_PORTSTATE_UNKNOWN);
15403 }
15404 }
--- EOF ---