1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved. 23 * 24 * NOT a DDI compliant Sun Fibre Channel port driver(fp) 25 * 26 */ 27 28 #include <sys/types.h> 29 #include <sys/varargs.h> 30 #include <sys/param.h> 31 #include <sys/errno.h> 32 #include <sys/uio.h> 33 #include <sys/buf.h> 34 #include <sys/modctl.h> 35 #include <sys/open.h> 36 #include <sys/file.h> 37 #include <sys/kmem.h> 38 #include <sys/poll.h> 39 #include <sys/conf.h> 40 #include <sys/thread.h> 41 #include <sys/var.h> 42 #include <sys/cmn_err.h> 43 #include <sys/stat.h> 44 #include <sys/ddi.h> 45 #include <sys/sunddi.h> 46 #include <sys/promif.h> 47 #include <sys/nvpair.h> 48 #include <sys/byteorder.h> 49 #include <sys/scsi/scsi.h> 50 #include <sys/fibre-channel/fc.h> 51 #include <sys/fibre-channel/impl/fc_ulpif.h> 52 #include <sys/fibre-channel/impl/fc_fcaif.h> 53 #include <sys/fibre-channel/impl/fctl_private.h> 54 #include <sys/fibre-channel/impl/fc_portif.h> 55 #include <sys/fibre-channel/impl/fp.h> 56 57 /* These are defined in fctl.c! */ 58 extern int did_table_size; 59 extern int pwwn_table_size; 60 61 static struct cb_ops fp_cb_ops = { 62 fp_open, /* open */ 63 fp_close, /* close */ 64 nodev, /* strategy */ 65 nodev, /* print */ 66 nodev, /* dump */ 67 nodev, /* read */ 68 nodev, /* write */ 69 fp_ioctl, /* ioctl */ 70 nodev, /* devmap */ 71 nodev, /* mmap */ 72 nodev, /* segmap */ 73 nochpoll, /* chpoll */ 74 ddi_prop_op, /* cb_prop_op */ 75 0, /* streamtab */ 76 D_NEW | D_MP | D_HOTPLUG, /* cb_flag */ 77 CB_REV, /* rev */ 78 nodev, /* aread */ 79 nodev /* awrite */ 80 }; 81 82 static struct dev_ops fp_ops = { 83 DEVO_REV, /* build revision */ 84 0, /* reference count */ 85 fp_getinfo, /* getinfo */ 86 nulldev, /* identify - Obsoleted */ 87 nulldev, /* probe */ 88 fp_attach, /* attach */ 89 fp_detach, /* detach */ 90 nodev, /* reset */ 91 &fp_cb_ops, /* cb_ops */ 92 NULL, /* bus_ops */ 93 fp_power, /* power */ 94 ddi_quiesce_not_needed /* quiesce */ 95 }; 96 97 #define FP_VERSION "20091123-1.101" 98 #define FP_NAME_VERSION "SunFC Port v" FP_VERSION 99 100 char *fp_version = FP_NAME_VERSION; 101 102 static struct modldrv modldrv = { 103 &mod_driverops, /* Type of Module */ 104 FP_NAME_VERSION, /* Name/Version of fp */ 105 &fp_ops /* driver ops */ 106 }; 107 108 static struct modlinkage modlinkage = { 109 MODREV_1, /* Rev of the loadable modules system */ 110 &modldrv, /* NULL terminated list of */ 111 NULL /* Linkage structures */ 112 }; 113 114 115 116 static uint16_t ns_reg_cmds[] = { 117 NS_RPN_ID, 118 NS_RNN_ID, 119 NS_RCS_ID, 120 NS_RFT_ID, 121 NS_RPT_ID, 122 NS_RSPN_ID, 123 NS_RSNN_NN 124 }; 125 126 struct fp_xlat { 127 uchar_t xlat_state; 128 int xlat_rval; 129 } fp_xlat [] = { 130 { FC_PKT_SUCCESS, FC_SUCCESS }, 131 { FC_PKT_REMOTE_STOP, FC_FAILURE }, 132 { FC_PKT_LOCAL_RJT, FC_FAILURE }, 133 { FC_PKT_NPORT_RJT, FC_ELS_PREJECT }, 134 { FC_PKT_FABRIC_RJT, FC_ELS_FREJECT }, 135 { FC_PKT_LOCAL_BSY, FC_TRAN_BUSY }, 136 { FC_PKT_TRAN_BSY, FC_TRAN_BUSY }, 137 { FC_PKT_NPORT_BSY, FC_PBUSY }, 138 { FC_PKT_FABRIC_BSY, FC_FBUSY }, 139 { FC_PKT_LS_RJT, FC_FAILURE }, 140 { FC_PKT_BA_RJT, FC_FAILURE }, 141 { FC_PKT_TIMEOUT, FC_FAILURE }, 142 { FC_PKT_TRAN_ERROR, FC_TRANSPORT_ERROR }, 143 { FC_PKT_FAILURE, FC_FAILURE }, 144 { FC_PKT_PORT_OFFLINE, FC_OFFLINE } 145 }; 146 147 static uchar_t fp_valid_alpas[] = { 148 0x01, 0x02, 0x04, 0x08, 0x0F, 0x10, 0x17, 0x18, 0x1B, 149 0x1D, 0x1E, 0x1F, 0x23, 0x25, 0x26, 0x27, 0x29, 0x2A, 150 0x2B, 0x2C, 0x2D, 0x2E, 0x31, 0x32, 0x33, 0x34, 0x35, 151 0x36, 0x39, 0x3A, 0x3C, 0x43, 0x45, 0x46, 0x47, 0x49, 152 0x4A, 0x4B, 0x4C, 0x4D, 0x4E, 0x51, 0x52, 0x53, 0x54, 153 0x55, 0x56, 0x59, 0x5A, 0x5C, 0x63, 0x65, 0x66, 0x67, 154 0x69, 0x6A, 0x6B, 0x6C, 0x6D, 0x6E, 0x71, 0x72, 0x73, 155 0x74, 0x75, 0x76, 0x79, 0x7A, 0x7C, 0x80, 0x81, 0x82, 156 0x84, 0x88, 0x8F, 0x90, 0x97, 0x98, 0x9B, 0x9D, 0x9E, 157 0x9F, 0xA3, 0xA5, 0xA6, 0xA7, 0xA9, 0xAA, 0xAB, 0xAC, 158 0xAD, 0xAE, 0xB1, 0xB2, 0xB3, 0xB4, 0xB5, 0xB6, 0xB9, 159 0xBA, 0xBC, 0xC3, 0xC5, 0xC6, 0xC7, 0xC9, 0xCA, 0xCB, 160 0xCC, 0xCD, 0xCE, 0xD1, 0xD2, 0xD3, 0xD4, 0xD5, 0xD6, 161 0xD9, 0xDA, 0xDC, 0xE0, 0xE1, 0xE2, 0xE4, 0xE8, 0xEF 162 }; 163 164 static struct fp_perms { 165 uint16_t fp_ioctl_cmd; 166 uchar_t fp_open_flag; 167 } fp_perm_list [] = { 168 { FCIO_GET_NUM_DEVS, FP_OPEN }, 169 { FCIO_GET_DEV_LIST, FP_OPEN }, 170 { FCIO_GET_SYM_PNAME, FP_OPEN }, 171 { FCIO_GET_SYM_NNAME, FP_OPEN }, 172 { FCIO_SET_SYM_PNAME, FP_EXCL }, 173 { FCIO_SET_SYM_NNAME, FP_EXCL }, 174 { FCIO_GET_LOGI_PARAMS, FP_OPEN }, 175 { FCIO_DEV_LOGIN, FP_EXCL }, 176 { FCIO_DEV_LOGOUT, FP_EXCL }, 177 { FCIO_GET_STATE, FP_OPEN }, 178 { FCIO_DEV_REMOVE, FP_EXCL }, 179 { FCIO_GET_FCODE_REV, FP_OPEN }, 180 { FCIO_GET_FW_REV, FP_OPEN }, 181 { FCIO_GET_DUMP_SIZE, FP_OPEN }, 182 { FCIO_FORCE_DUMP, FP_EXCL }, 183 { FCIO_GET_DUMP, FP_OPEN }, 184 { FCIO_GET_TOPOLOGY, FP_OPEN }, 185 { FCIO_RESET_LINK, FP_EXCL }, 186 { FCIO_RESET_HARD, FP_EXCL }, 187 { FCIO_RESET_HARD_CORE, FP_EXCL }, 188 { FCIO_DIAG, FP_OPEN }, 189 { FCIO_NS, FP_EXCL }, 190 { FCIO_DOWNLOAD_FW, FP_EXCL }, 191 { FCIO_DOWNLOAD_FCODE, FP_EXCL }, 192 { FCIO_LINK_STATUS, FP_OPEN }, 193 { FCIO_GET_HOST_PARAMS, FP_OPEN }, 194 { FCIO_GET_NODE_ID, FP_OPEN }, 195 { FCIO_SET_NODE_ID, FP_EXCL }, 196 { FCIO_SEND_NODE_ID, FP_OPEN }, 197 { FCIO_GET_ADAPTER_ATTRIBUTES, FP_OPEN }, 198 { FCIO_GET_OTHER_ADAPTER_PORTS, FP_OPEN }, 199 { FCIO_GET_ADAPTER_PORT_ATTRIBUTES, FP_OPEN }, 200 { FCIO_GET_DISCOVERED_PORT_ATTRIBUTES, FP_OPEN }, 201 { FCIO_GET_PORT_ATTRIBUTES, FP_OPEN }, 202 { FCIO_GET_ADAPTER_PORT_STATS, FP_OPEN }, 203 { FCIO_GET_ADAPTER_PORT_NPIV_ATTRIBUTES, FP_OPEN }, 204 { FCIO_GET_NPIV_PORT_LIST, FP_OPEN }, 205 { FCIO_DELETE_NPIV_PORT, FP_OPEN }, 206 { FCIO_GET_NPIV_ATTRIBUTES, FP_OPEN }, 207 { FCIO_CREATE_NPIV_PORT, FP_OPEN }, 208 { FCIO_NPIV_GET_ADAPTER_ATTRIBUTES, FP_OPEN } 209 }; 210 211 static char *fp_pm_comps[] = { 212 "NAME=FC Port", 213 "0=Port Down", 214 "1=Port Up" 215 }; 216 217 218 #ifdef _LITTLE_ENDIAN 219 #define MAKE_BE_32(x) { \ 220 uint32_t *ptr1, i; \ 221 ptr1 = (uint32_t *)(x); \ 222 for (i = 0; i < sizeof (*(x)) / sizeof (uint32_t); i++) { \ 223 *ptr1 = BE_32(*ptr1); \ 224 ptr1++; \ 225 } \ 226 } 227 #else 228 #define MAKE_BE_32(x) 229 #endif 230 231 static uchar_t fp_verbosity = (FP_WARNING_MESSAGES | FP_FATAL_MESSAGES); 232 static uint32_t fp_options = 0; 233 234 static int fp_cmd_wait_cnt = FP_CMDWAIT_DELAY; 235 static int fp_retry_delay = FP_RETRY_DELAY; /* retry after this delay */ 236 static int fp_retry_count = FP_RETRY_COUNT; /* number of retries */ 237 unsigned int fp_offline_ticker; /* seconds */ 238 239 /* 240 * Driver global variable to anchor the list of soft state structs for 241 * all fp driver instances. Used with the Solaris DDI soft state functions. 242 */ 243 static void *fp_driver_softstate; 244 245 static clock_t fp_retry_ticks; 246 static clock_t fp_offline_ticks; 247 248 static int fp_retry_ticker; 249 static uint32_t fp_unsol_buf_count = FP_UNSOL_BUF_COUNT; 250 static uint32_t fp_unsol_buf_size = FP_UNSOL_BUF_SIZE; 251 252 static int fp_log_size = FP_LOG_SIZE; 253 static int fp_trace = FP_TRACE_DEFAULT; 254 static fc_trace_logq_t *fp_logq = NULL; 255 256 int fp_get_adapter_paths(char *pathList, int count); 257 static void fp_log_port_event(fc_local_port_t *port, char *subclass); 258 static void fp_log_target_event(fc_local_port_t *port, char *subclass, 259 la_wwn_t tgt_pwwn, uint32_t port_id); 260 static uint32_t fp_map_remote_port_state(uint32_t rm_state); 261 static void fp_init_symbolic_names(fc_local_port_t *port); 262 263 264 /* 265 * Perform global initialization 266 */ 267 int 268 _init(void) 269 { 270 int ret; 271 272 if ((ret = ddi_soft_state_init(&fp_driver_softstate, 273 sizeof (struct fc_local_port), 8)) != 0) { 274 return (ret); 275 } 276 277 if ((ret = scsi_hba_init(&modlinkage)) != 0) { 278 ddi_soft_state_fini(&fp_driver_softstate); 279 return (ret); 280 } 281 282 fp_logq = fc_trace_alloc_logq(fp_log_size); 283 284 if ((ret = mod_install(&modlinkage)) != 0) { 285 fc_trace_free_logq(fp_logq); 286 ddi_soft_state_fini(&fp_driver_softstate); 287 scsi_hba_fini(&modlinkage); 288 } 289 290 return (ret); 291 } 292 293 294 /* 295 * Prepare for driver unload 296 */ 297 int 298 _fini(void) 299 { 300 int ret; 301 302 if ((ret = mod_remove(&modlinkage)) == 0) { 303 fc_trace_free_logq(fp_logq); 304 ddi_soft_state_fini(&fp_driver_softstate); 305 scsi_hba_fini(&modlinkage); 306 } 307 308 return (ret); 309 } 310 311 312 /* 313 * Request mod_info() to handle all cases 314 */ 315 int 316 _info(struct modinfo *modinfo) 317 { 318 return (mod_info(&modlinkage, modinfo)); 319 } 320 321 322 /* 323 * fp_attach: 324 * 325 * The respective cmd handlers take care of performing 326 * ULP related invocations 327 */ 328 static int 329 fp_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) 330 { 331 int rval; 332 333 /* 334 * We check the value of fp_offline_ticker at this 335 * point. The variable is global for the driver and 336 * not specific to an instance. 337 * 338 * If there is no user-defined value found in /etc/system 339 * or fp.conf, then we use 90 seconds (FP_OFFLINE_TICKER). 340 * The minimum setting for this offline timeout according 341 * to the FC-FS2 standard (Fibre Channel Framing and 342 * Signalling-2, see www.t11.org) is R_T_TOV == 100msec. 343 * 344 * We do not recommend setting the value to less than 10 345 * seconds (RA_TOV) or more than 90 seconds. If this 346 * variable is greater than 90 seconds then drivers above 347 * fp (fcp, sd, scsi_vhci, vxdmp et al) might complain. 348 */ 349 350 fp_offline_ticker = ddi_prop_get_int(DDI_DEV_T_ANY, 351 dip, DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "fp_offline_ticker", 352 FP_OFFLINE_TICKER); 353 354 if ((fp_offline_ticker < 10) || 355 (fp_offline_ticker > 90)) { 356 cmn_err(CE_WARN, "Setting fp_offline_ticker to " 357 "%d second(s). This is outside the " 358 "recommended range of 10..90 seconds", 359 fp_offline_ticker); 360 } 361 362 /* 363 * Tick every second when there are commands to retry. 364 * It should tick at the least granular value of pkt_timeout 365 * (which is one second) 366 */ 367 fp_retry_ticker = 1; 368 369 fp_retry_ticks = drv_usectohz(fp_retry_ticker * 1000 * 1000); 370 fp_offline_ticks = drv_usectohz(fp_offline_ticker * 1000 * 1000); 371 372 switch (cmd) { 373 case DDI_ATTACH: 374 rval = fp_attach_handler(dip); 375 break; 376 377 case DDI_RESUME: 378 rval = fp_resume_handler(dip); 379 break; 380 381 default: 382 rval = DDI_FAILURE; 383 break; 384 } 385 return (rval); 386 } 387 388 389 /* 390 * fp_detach: 391 * 392 * If a ULP fails to handle cmd request converse of 393 * cmd is invoked for ULPs that previously succeeded 394 * cmd request. 395 */ 396 static int 397 fp_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) 398 { 399 int rval = DDI_FAILURE; 400 fc_local_port_t *port; 401 fc_attach_cmd_t converse; 402 uint8_t cnt; 403 404 if ((port = ddi_get_soft_state(fp_driver_softstate, 405 ddi_get_instance(dip))) == NULL) { 406 return (DDI_FAILURE); 407 } 408 409 mutex_enter(&port->fp_mutex); 410 411 if (port->fp_ulp_attach) { 412 mutex_exit(&port->fp_mutex); 413 return (DDI_FAILURE); 414 } 415 416 switch (cmd) { 417 case DDI_DETACH: 418 if (port->fp_task != FP_TASK_IDLE) { 419 mutex_exit(&port->fp_mutex); 420 return (DDI_FAILURE); 421 } 422 423 /* Let's attempt to quit the job handler gracefully */ 424 port->fp_soft_state |= FP_DETACH_INPROGRESS; 425 426 mutex_exit(&port->fp_mutex); 427 converse = FC_CMD_ATTACH; 428 if (fctl_detach_ulps(port, FC_CMD_DETACH, 429 &modlinkage) != FC_SUCCESS) { 430 mutex_enter(&port->fp_mutex); 431 port->fp_soft_state &= ~FP_DETACH_INPROGRESS; 432 mutex_exit(&port->fp_mutex); 433 rval = DDI_FAILURE; 434 break; 435 } 436 437 mutex_enter(&port->fp_mutex); 438 for (cnt = 0; (port->fp_job_head) && (cnt < fp_cmd_wait_cnt); 439 cnt++) { 440 mutex_exit(&port->fp_mutex); 441 delay(drv_usectohz(1000000)); 442 mutex_enter(&port->fp_mutex); 443 } 444 445 if (port->fp_job_head) { 446 mutex_exit(&port->fp_mutex); 447 rval = DDI_FAILURE; 448 break; 449 } 450 mutex_exit(&port->fp_mutex); 451 452 rval = fp_detach_handler(port); 453 break; 454 455 case DDI_SUSPEND: 456 mutex_exit(&port->fp_mutex); 457 converse = FC_CMD_RESUME; 458 if (fctl_detach_ulps(port, FC_CMD_SUSPEND, 459 &modlinkage) != FC_SUCCESS) { 460 rval = DDI_FAILURE; 461 break; 462 } 463 if ((rval = fp_suspend_handler(port)) != DDI_SUCCESS) { 464 (void) callb_generic_cpr(&port->fp_cpr_info, 465 CB_CODE_CPR_RESUME); 466 } 467 break; 468 469 default: 470 mutex_exit(&port->fp_mutex); 471 break; 472 } 473 474 /* 475 * Use softint to perform reattach. Mark fp_ulp_attach so we 476 * don't attempt to do this repeatedly on behalf of some persistent 477 * caller. 478 */ 479 if (rval != DDI_SUCCESS) { 480 mutex_enter(&port->fp_mutex); 481 port->fp_ulp_attach = 1; 482 483 /* 484 * If the port is in the low power mode then there is 485 * possibility that fca too could be in low power mode. 486 * Try to raise the power before calling attach ulps. 487 */ 488 489 if ((port->fp_soft_state & FP_SOFT_POWER_DOWN) && 490 (!(port->fp_soft_state & FP_SOFT_NO_PMCOMP))) { 491 mutex_exit(&port->fp_mutex); 492 (void) pm_raise_power(port->fp_port_dip, 493 FP_PM_COMPONENT, FP_PM_PORT_UP); 494 } else { 495 mutex_exit(&port->fp_mutex); 496 } 497 498 499 fp_attach_ulps(port, converse); 500 501 mutex_enter(&port->fp_mutex); 502 while (port->fp_ulp_attach) { 503 cv_wait(&port->fp_attach_cv, &port->fp_mutex); 504 } 505 506 port->fp_soft_state &= ~FP_DETACH_INPROGRESS; 507 508 /* 509 * Mark state as detach failed so asynchronous ULP attach 510 * events (downstream, not the ones we're initiating with 511 * the call to fp_attach_ulps) are not honored. We're 512 * really still in pending detach. 513 */ 514 port->fp_soft_state |= FP_DETACH_FAILED; 515 516 mutex_exit(&port->fp_mutex); 517 } 518 519 return (rval); 520 } 521 522 523 /* 524 * fp_getinfo: 525 * Given the device number, return either the 526 * dev_info_t pointer or the instance number. 527 */ 528 529 /* ARGSUSED */ 530 static int 531 fp_getinfo(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg, void **result) 532 { 533 int rval; 534 minor_t instance; 535 fc_local_port_t *port; 536 537 rval = DDI_SUCCESS; 538 instance = getminor((dev_t)arg); 539 540 switch (cmd) { 541 case DDI_INFO_DEVT2DEVINFO: 542 if ((port = ddi_get_soft_state(fp_driver_softstate, 543 instance)) == NULL) { 544 rval = DDI_FAILURE; 545 break; 546 } 547 *result = (void *)port->fp_port_dip; 548 break; 549 550 case DDI_INFO_DEVT2INSTANCE: 551 *result = (void *)(uintptr_t)instance; 552 break; 553 554 default: 555 rval = DDI_FAILURE; 556 break; 557 } 558 559 return (rval); 560 } 561 562 563 /* 564 * Entry point for power up and power down request from kernel 565 */ 566 static int 567 fp_power(dev_info_t *dip, int comp, int level) 568 { 569 int rval = DDI_FAILURE; 570 fc_local_port_t *port; 571 572 port = ddi_get_soft_state(fp_driver_softstate, ddi_get_instance(dip)); 573 if (port == NULL || comp != FP_PM_COMPONENT) { 574 return (rval); 575 } 576 577 switch (level) { 578 case FP_PM_PORT_UP: 579 rval = DDI_SUCCESS; 580 581 /* 582 * If the port is DDI_SUSPENDed, let the DDI_RESUME 583 * code complete the rediscovery. 584 */ 585 mutex_enter(&port->fp_mutex); 586 if (port->fp_soft_state & FP_SOFT_SUSPEND) { 587 port->fp_soft_state &= ~FP_SOFT_POWER_DOWN; 588 port->fp_pm_level = FP_PM_PORT_UP; 589 mutex_exit(&port->fp_mutex); 590 fctl_attach_ulps(port, FC_CMD_POWER_UP, &modlinkage); 591 break; 592 } 593 594 if (port->fp_soft_state & FP_SOFT_POWER_DOWN) { 595 ASSERT(port->fp_pm_level == FP_PM_PORT_DOWN); 596 597 port->fp_pm_level = FP_PM_PORT_UP; 598 rval = fp_power_up(port); 599 if (rval != DDI_SUCCESS) { 600 port->fp_pm_level = FP_PM_PORT_DOWN; 601 } 602 } else { 603 port->fp_pm_level = FP_PM_PORT_UP; 604 } 605 mutex_exit(&port->fp_mutex); 606 break; 607 608 case FP_PM_PORT_DOWN: 609 mutex_enter(&port->fp_mutex); 610 611 ASSERT(!(port->fp_soft_state & FP_SOFT_NO_PMCOMP)); 612 if (port->fp_soft_state & FP_SOFT_NO_PMCOMP) { 613 /* 614 * PM framework goofed up. We have don't 615 * have any PM components. Let's never go down. 616 */ 617 mutex_exit(&port->fp_mutex); 618 break; 619 620 } 621 622 if (port->fp_ulp_attach) { 623 /* We shouldn't let the power go down */ 624 mutex_exit(&port->fp_mutex); 625 break; 626 } 627 628 /* 629 * Not a whole lot to do if we are detaching 630 */ 631 if (port->fp_soft_state & FP_SOFT_IN_DETACH) { 632 port->fp_pm_level = FP_PM_PORT_DOWN; 633 mutex_exit(&port->fp_mutex); 634 rval = DDI_SUCCESS; 635 break; 636 } 637 638 if (!port->fp_pm_busy && !port->fp_pm_busy_nocomp) { 639 port->fp_pm_level = FP_PM_PORT_DOWN; 640 641 rval = fp_power_down(port); 642 if (rval != DDI_SUCCESS) { 643 port->fp_pm_level = FP_PM_PORT_UP; 644 ASSERT(!(port->fp_soft_state & 645 FP_SOFT_POWER_DOWN)); 646 } else { 647 ASSERT(port->fp_soft_state & 648 FP_SOFT_POWER_DOWN); 649 } 650 } 651 mutex_exit(&port->fp_mutex); 652 break; 653 654 default: 655 break; 656 } 657 658 return (rval); 659 } 660 661 662 /* 663 * Open FC port devctl node 664 */ 665 static int 666 fp_open(dev_t *devp, int flag, int otype, cred_t *credp) 667 { 668 int instance; 669 fc_local_port_t *port; 670 671 if (otype != OTYP_CHR) { 672 return (EINVAL); 673 } 674 675 /* 676 * This is not a toy to play with. Allow only powerful 677 * users (hopefully knowledgeable) to access the port 678 * (A hacker potentially could download a sick binary 679 * file into FCA) 680 */ 681 if (drv_priv(credp)) { 682 return (EPERM); 683 } 684 685 instance = (int)getminor(*devp); 686 687 port = ddi_get_soft_state(fp_driver_softstate, instance); 688 if (port == NULL) { 689 return (ENXIO); 690 } 691 692 mutex_enter(&port->fp_mutex); 693 if (port->fp_flag & FP_EXCL) { 694 /* 695 * It is already open for exclusive access. 696 * So shut the door on this caller. 697 */ 698 mutex_exit(&port->fp_mutex); 699 return (EBUSY); 700 } 701 702 if (flag & FEXCL) { 703 if (port->fp_flag & FP_OPEN) { 704 /* 705 * Exclusive operation not possible 706 * as it is already opened 707 */ 708 mutex_exit(&port->fp_mutex); 709 return (EBUSY); 710 } 711 port->fp_flag |= FP_EXCL; 712 } 713 port->fp_flag |= FP_OPEN; 714 mutex_exit(&port->fp_mutex); 715 716 return (0); 717 } 718 719 720 /* 721 * The driver close entry point is called on the last close() 722 * of a device. So it is perfectly alright to just clobber the 723 * open flag and reset it to idle (instead of having to reset 724 * each flag bits). For any confusion, check out close(9E). 725 */ 726 727 /* ARGSUSED */ 728 static int 729 fp_close(dev_t dev, int flag, int otype, cred_t *credp) 730 { 731 int instance; 732 fc_local_port_t *port; 733 734 if (otype != OTYP_CHR) { 735 return (EINVAL); 736 } 737 738 instance = (int)getminor(dev); 739 740 port = ddi_get_soft_state(fp_driver_softstate, instance); 741 if (port == NULL) { 742 return (ENXIO); 743 } 744 745 mutex_enter(&port->fp_mutex); 746 if ((port->fp_flag & FP_OPEN) == 0) { 747 mutex_exit(&port->fp_mutex); 748 return (ENODEV); 749 } 750 port->fp_flag = FP_IDLE; 751 mutex_exit(&port->fp_mutex); 752 753 return (0); 754 } 755 756 /* 757 * Handle IOCTL requests 758 */ 759 760 /* ARGSUSED */ 761 static int 762 fp_ioctl(dev_t dev, int cmd, intptr_t data, int mode, cred_t *credp, int *rval) 763 { 764 int instance; 765 int ret = 0; 766 fcio_t fcio; 767 fc_local_port_t *port; 768 769 instance = (int)getminor(dev); 770 771 port = ddi_get_soft_state(fp_driver_softstate, instance); 772 if (port == NULL) { 773 return (ENXIO); 774 } 775 776 mutex_enter(&port->fp_mutex); 777 if ((port->fp_flag & FP_OPEN) == 0) { 778 mutex_exit(&port->fp_mutex); 779 return (ENXIO); 780 } 781 782 if (port->fp_soft_state & FP_SOFT_SUSPEND) { 783 mutex_exit(&port->fp_mutex); 784 return (ENXIO); 785 } 786 787 mutex_exit(&port->fp_mutex); 788 789 /* this will raise power if necessary */ 790 ret = fctl_busy_port(port); 791 if (ret != 0) { 792 return (ret); 793 } 794 795 ASSERT(port->fp_pm_level == FP_PM_PORT_UP); 796 797 798 switch (cmd) { 799 case FCIO_CMD: { 800 #ifdef _MULTI_DATAMODEL 801 switch (ddi_model_convert_from(mode & FMODELS)) { 802 case DDI_MODEL_ILP32: { 803 struct fcio32 fcio32; 804 805 if (ddi_copyin((void *)data, (void *)&fcio32, 806 sizeof (struct fcio32), mode)) { 807 ret = EFAULT; 808 break; 809 } 810 fcio.fcio_xfer = fcio32.fcio_xfer; 811 fcio.fcio_cmd = fcio32.fcio_cmd; 812 fcio.fcio_flags = fcio32.fcio_flags; 813 fcio.fcio_cmd_flags = fcio32.fcio_cmd_flags; 814 fcio.fcio_ilen = (size_t)fcio32.fcio_ilen; 815 fcio.fcio_ibuf = 816 (caddr_t)(uintptr_t)fcio32.fcio_ibuf; 817 fcio.fcio_olen = (size_t)fcio32.fcio_olen; 818 fcio.fcio_obuf = 819 (caddr_t)(uintptr_t)fcio32.fcio_obuf; 820 fcio.fcio_alen = (size_t)fcio32.fcio_alen; 821 fcio.fcio_abuf = 822 (caddr_t)(uintptr_t)fcio32.fcio_abuf; 823 fcio.fcio_errno = fcio32.fcio_errno; 824 break; 825 } 826 827 case DDI_MODEL_NONE: 828 if (ddi_copyin((void *)data, (void *)&fcio, 829 sizeof (fcio_t), mode)) { 830 ret = EFAULT; 831 } 832 break; 833 } 834 #else /* _MULTI_DATAMODEL */ 835 if (ddi_copyin((void *)data, (void *)&fcio, 836 sizeof (fcio_t), mode)) { 837 ret = EFAULT; 838 break; 839 } 840 #endif /* _MULTI_DATAMODEL */ 841 if (!ret) { 842 ret = fp_fciocmd(port, data, mode, &fcio); 843 } 844 break; 845 } 846 847 default: 848 ret = fctl_ulp_port_ioctl(port, dev, cmd, data, 849 mode, credp, rval); 850 } 851 852 fctl_idle_port(port); 853 854 return (ret); 855 } 856 857 858 /* 859 * Init Symbolic Port Name and Node Name 860 * LV will try to get symbolic names from FCA driver 861 * and register these to name server, 862 * if LV fails to get these, 863 * LV will register its default symbolic names to name server. 864 * The Default symbolic node name format is : 865 * <hostname>:<hba driver name>(instance) 866 * The Default symbolic port name format is : 867 * <fp path name> 868 */ 869 static void 870 fp_init_symbolic_names(fc_local_port_t *port) 871 { 872 const char *vendorname = ddi_driver_name(port->fp_fca_dip); 873 char *sym_name; 874 char fcaname[50] = {0}; 875 int hostnlen, fcanlen; 876 877 if (port->fp_sym_node_namelen == 0) { 878 hostnlen = strlen(utsname.nodename); 879 (void) snprintf(fcaname, sizeof (fcaname), 880 "%s%d", vendorname, ddi_get_instance(port->fp_fca_dip)); 881 fcanlen = strlen(fcaname); 882 883 sym_name = kmem_zalloc(hostnlen + fcanlen + 2, KM_SLEEP); 884 (void) sprintf(sym_name, "%s:%s", utsname.nodename, fcaname); 885 port->fp_sym_node_namelen = strlen(sym_name); 886 if (port->fp_sym_node_namelen >= FCHBA_SYMB_NAME_LEN) { 887 port->fp_sym_node_namelen = FCHBA_SYMB_NAME_LEN; 888 } 889 (void) strncpy(port->fp_sym_node_name, sym_name, 890 port->fp_sym_node_namelen); 891 kmem_free(sym_name, hostnlen + fcanlen + 2); 892 } 893 894 if (port->fp_sym_port_namelen == 0) { 895 char *pathname = kmem_zalloc(MAXPATHLEN, KM_SLEEP); 896 897 (void) ddi_pathname(port->fp_port_dip, pathname); 898 port->fp_sym_port_namelen = strlen(pathname); 899 if (port->fp_sym_port_namelen >= FCHBA_SYMB_NAME_LEN) { 900 port->fp_sym_port_namelen = FCHBA_SYMB_NAME_LEN; 901 } 902 (void) strncpy(port->fp_sym_port_name, pathname, 903 port->fp_sym_port_namelen); 904 kmem_free(pathname, MAXPATHLEN); 905 } 906 } 907 908 909 /* 910 * Perform port attach 911 */ 912 static int 913 fp_attach_handler(dev_info_t *dip) 914 { 915 int rval; 916 int instance; 917 int port_num; 918 int port_len; 919 char name[30]; 920 char i_pwwn[17]; 921 fp_cmd_t *pkt; 922 uint32_t ub_count; 923 fc_local_port_t *port; 924 job_request_t *job; 925 fc_local_port_t *phyport = NULL; 926 int portpro1; 927 char pwwn[17], nwwn[17]; 928 929 instance = ddi_get_instance(dip); 930 port_len = sizeof (port_num); 931 rval = ddi_prop_op(DDI_DEV_T_ANY, dip, PROP_LEN_AND_VAL_BUF, 932 DDI_PROP_DONTPASS | DDI_PROP_CANSLEEP, "port", 933 (caddr_t)&port_num, &port_len); 934 if (rval != DDI_SUCCESS) { 935 cmn_err(CE_WARN, "fp(%d): No port property in devinfo", 936 instance); 937 return (DDI_FAILURE); 938 } 939 940 if (ddi_create_minor_node(dip, "devctl", S_IFCHR, instance, 941 DDI_NT_NEXUS, 0) != DDI_SUCCESS) { 942 cmn_err(CE_WARN, "fp(%d): failed to create devctl minor node", 943 instance); 944 return (DDI_FAILURE); 945 } 946 947 if (ddi_create_minor_node(dip, "fc", S_IFCHR, instance, 948 DDI_NT_FC_ATTACHMENT_POINT, 0) != DDI_SUCCESS) { 949 cmn_err(CE_WARN, "fp(%d): failed to create fc attachment" 950 " point minor node", instance); 951 ddi_remove_minor_node(dip, NULL); 952 return (DDI_FAILURE); 953 } 954 955 if (ddi_soft_state_zalloc(fp_driver_softstate, instance) 956 != DDI_SUCCESS) { 957 cmn_err(CE_WARN, "fp(%d): failed to alloc soft state", 958 instance); 959 ddi_remove_minor_node(dip, NULL); 960 return (DDI_FAILURE); 961 } 962 port = ddi_get_soft_state(fp_driver_softstate, instance); 963 964 (void) sprintf(port->fp_ibuf, "fp(%d)", instance); 965 966 port->fp_instance = instance; 967 port->fp_ulp_attach = 1; 968 port->fp_port_num = port_num; 969 port->fp_verbose = fp_verbosity; 970 port->fp_options = fp_options; 971 972 port->fp_fca_dip = ddi_get_parent(dip); 973 port->fp_port_dip = dip; 974 port->fp_fca_tran = (fc_fca_tran_t *) 975 ddi_get_driver_private(port->fp_fca_dip); 976 977 port->fp_task = port->fp_last_task = FP_TASK_IDLE; 978 979 /* 980 * Init the starting value of fp_rscn_count. Note that if 981 * FC_INVALID_RSCN_COUNT is 0 (which is what it currently is), the 982 * actual # of RSCNs will be (fp_rscn_count - 1) 983 */ 984 port->fp_rscn_count = FC_INVALID_RSCN_COUNT + 1; 985 986 mutex_init(&port->fp_mutex, NULL, MUTEX_DRIVER, NULL); 987 cv_init(&port->fp_cv, NULL, CV_DRIVER, NULL); 988 cv_init(&port->fp_attach_cv, NULL, CV_DRIVER, NULL); 989 990 (void) sprintf(name, "fp%d_cache", instance); 991 992 if ((portpro1 = ddi_prop_get_int(DDI_DEV_T_ANY, 993 dip, DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, 994 "phyport-instance", -1)) != -1) { 995 phyport = ddi_get_soft_state(fp_driver_softstate, portpro1); 996 fc_wwn_to_str(&phyport->fp_service_params.nport_ww_name, pwwn); 997 fc_wwn_to_str(&phyport->fp_service_params.node_ww_name, nwwn); 998 port->fp_npiv_type = FC_NPIV_PORT; 999 } 1000 1001 /* 1002 * Allocate the pool of fc_packet_t structs to be used with 1003 * this fp instance. 1004 */ 1005 port->fp_pkt_cache = kmem_cache_create(name, 1006 (port->fp_fca_tran->fca_pkt_size) + sizeof (fp_cmd_t), 8, 1007 fp_cache_constructor, fp_cache_destructor, NULL, (void *)port, 1008 NULL, 0); 1009 port->fp_out_fpcmds = 0; 1010 if (port->fp_pkt_cache == NULL) { 1011 goto cache_alloc_failed; 1012 } 1013 1014 1015 /* 1016 * Allocate the d_id and pwwn hash tables for all remote ports 1017 * connected to this local port. 1018 */ 1019 port->fp_did_table = kmem_zalloc(did_table_size * 1020 sizeof (struct d_id_hash), KM_SLEEP); 1021 1022 port->fp_pwwn_table = kmem_zalloc(pwwn_table_size * 1023 sizeof (struct pwwn_hash), KM_SLEEP); 1024 1025 port->fp_taskq = taskq_create("fp_ulp_callback", 1, 1026 MINCLSYSPRI, 1, 16, 0); 1027 1028 /* Indicate that don't have the pm components yet */ 1029 port->fp_soft_state |= FP_SOFT_NO_PMCOMP; 1030 1031 /* 1032 * Bind the callbacks with the FCA driver. This will open the gate 1033 * for asynchronous callbacks, so after this call the fp_mutex 1034 * must be held when updating the fc_local_port_t struct. 1035 * 1036 * This is done _before_ setting up the job thread so we can avoid 1037 * cleaning up after the thread_create() in the error path. This 1038 * also means fp will be operating with fp_els_resp_pkt set to NULL. 1039 */ 1040 if (fp_bind_callbacks(port) != DDI_SUCCESS) { 1041 goto bind_callbacks_failed; 1042 } 1043 1044 if (phyport) { 1045 mutex_enter(&phyport->fp_mutex); 1046 if (phyport->fp_port_next) { 1047 phyport->fp_port_next->fp_port_prev = port; 1048 port->fp_port_next = phyport->fp_port_next; 1049 phyport->fp_port_next = port; 1050 port->fp_port_prev = phyport; 1051 } else { 1052 phyport->fp_port_next = port; 1053 phyport->fp_port_prev = port; 1054 port->fp_port_next = phyport; 1055 port->fp_port_prev = phyport; 1056 } 1057 mutex_exit(&phyport->fp_mutex); 1058 } 1059 1060 /* 1061 * Init Symbolic Names 1062 */ 1063 fp_init_symbolic_names(port); 1064 1065 pkt = fp_alloc_pkt(port, sizeof (la_els_logi_t), sizeof (la_els_logi_t), 1066 KM_SLEEP, NULL); 1067 1068 if (pkt == NULL) { 1069 cmn_err(CE_WARN, "fp(%d): failed to allocate ELS packet", 1070 instance); 1071 goto alloc_els_packet_failed; 1072 } 1073 1074 (void) thread_create(NULL, 0, fp_job_handler, port, 0, &p0, TS_RUN, 1075 v.v_maxsyspri - 2); 1076 1077 fc_wwn_to_str(&port->fp_service_params.nport_ww_name, i_pwwn); 1078 if (ddi_prop_update_string(DDI_DEV_T_NONE, dip, "initiator-port", 1079 i_pwwn) != DDI_PROP_SUCCESS) { 1080 fp_printf(port, CE_NOTE, FP_LOG_ONLY, 0, NULL, 1081 "fp(%d): Updating 'initiator-port' property" 1082 " on fp dev_info node failed", instance); 1083 } 1084 1085 fc_wwn_to_str(&port->fp_service_params.node_ww_name, i_pwwn); 1086 if (ddi_prop_update_string(DDI_DEV_T_NONE, dip, "initiator-node", 1087 i_pwwn) != DDI_PROP_SUCCESS) { 1088 fp_printf(port, CE_NOTE, FP_LOG_ONLY, 0, NULL, 1089 "fp(%d): Updating 'initiator-node' property" 1090 " on fp dev_info node failed", instance); 1091 } 1092 1093 mutex_enter(&port->fp_mutex); 1094 port->fp_els_resp_pkt = pkt; 1095 mutex_exit(&port->fp_mutex); 1096 1097 /* 1098 * Determine the count of unsolicited buffers this FCA can support 1099 */ 1100 fp_retrieve_caps(port); 1101 1102 /* 1103 * Allocate unsolicited buffer tokens 1104 */ 1105 if (port->fp_ub_count) { 1106 ub_count = port->fp_ub_count; 1107 port->fp_ub_tokens = kmem_zalloc(ub_count * 1108 sizeof (*port->fp_ub_tokens), KM_SLEEP); 1109 /* 1110 * Do not fail the attach if unsolicited buffer allocation 1111 * fails; Just try to get along with whatever the FCA can do. 1112 */ 1113 if (fc_ulp_uballoc(port, &ub_count, fp_unsol_buf_size, 1114 FC_TYPE_EXTENDED_LS, port->fp_ub_tokens) != 1115 FC_SUCCESS || ub_count != port->fp_ub_count) { 1116 cmn_err(CE_WARN, "fp(%d): failed to allocate " 1117 " Unsolicited buffers. proceeding with attach...", 1118 instance); 1119 kmem_free(port->fp_ub_tokens, 1120 sizeof (*port->fp_ub_tokens) * port->fp_ub_count); 1121 port->fp_ub_tokens = NULL; 1122 } 1123 } 1124 1125 fp_load_ulp_modules(dip, port); 1126 1127 /* 1128 * Enable DDI_SUSPEND and DDI_RESUME for this instance. 1129 */ 1130 (void) ddi_prop_create(DDI_DEV_T_NONE, dip, DDI_PROP_CANSLEEP, 1131 "pm-hardware-state", "needs-suspend-resume", 1132 strlen("needs-suspend-resume") + 1); 1133 1134 /* 1135 * fctl maintains a list of all port handles, so 1136 * help fctl add this one to its list now. 1137 */ 1138 mutex_enter(&port->fp_mutex); 1139 fctl_add_port(port); 1140 1141 /* 1142 * If a state change is already in progress, set the bind state t 1143 * OFFLINE as well, so further state change callbacks into ULPs 1144 * will pass the appropriate states 1145 */ 1146 if (FC_PORT_STATE_MASK(port->fp_bind_state) == FC_STATE_OFFLINE || 1147 port->fp_statec_busy) { 1148 port->fp_bind_state = FC_STATE_OFFLINE; 1149 mutex_exit(&port->fp_mutex); 1150 1151 fp_startup_done((opaque_t)port, FC_PKT_SUCCESS); 1152 } else { 1153 /* 1154 * Without dropping the mutex, ensure that the port 1155 * startup happens ahead of state change callback 1156 * processing 1157 */ 1158 ASSERT(port->fp_job_tail == NULL && port->fp_job_head == NULL); 1159 1160 port->fp_last_task = port->fp_task; 1161 port->fp_task = FP_TASK_PORT_STARTUP; 1162 1163 job = fctl_alloc_job(JOB_PORT_STARTUP, JOB_TYPE_FCTL_ASYNC, 1164 fp_startup_done, (opaque_t)port, KM_SLEEP); 1165 1166 port->fp_job_head = port->fp_job_tail = job; 1167 1168 cv_signal(&port->fp_cv); 1169 1170 mutex_exit(&port->fp_mutex); 1171 } 1172 1173 mutex_enter(&port->fp_mutex); 1174 while (port->fp_ulp_attach) { 1175 cv_wait(&port->fp_attach_cv, &port->fp_mutex); 1176 } 1177 mutex_exit(&port->fp_mutex); 1178 1179 if (ddi_prop_update_string_array(DDI_DEV_T_NONE, dip, 1180 "pm-components", fp_pm_comps, 1181 sizeof (fp_pm_comps) / sizeof (fp_pm_comps[0])) != 1182 DDI_PROP_SUCCESS) { 1183 FP_TRACE(FP_NHEAD2(9, 0), "Failed to create PM" 1184 " components property, PM disabled on this port."); 1185 mutex_enter(&port->fp_mutex); 1186 port->fp_pm_level = FP_PM_PORT_UP; 1187 mutex_exit(&port->fp_mutex); 1188 } else { 1189 if (pm_raise_power(dip, FP_PM_COMPONENT, 1190 FP_PM_PORT_UP) != DDI_SUCCESS) { 1191 FP_TRACE(FP_NHEAD2(9, 0), "Failed to raise" 1192 " power level"); 1193 mutex_enter(&port->fp_mutex); 1194 port->fp_pm_level = FP_PM_PORT_UP; 1195 mutex_exit(&port->fp_mutex); 1196 } 1197 1198 /* 1199 * Don't unset the FP_SOFT_NO_PMCOMP flag until after 1200 * the call to pm_raise_power. The PM framework can't 1201 * handle multiple threads calling into it during attach. 1202 */ 1203 1204 mutex_enter(&port->fp_mutex); 1205 port->fp_soft_state &= ~FP_SOFT_NO_PMCOMP; 1206 mutex_exit(&port->fp_mutex); 1207 } 1208 1209 ddi_report_dev(dip); 1210 1211 fp_log_port_event(port, ESC_SUNFC_PORT_ATTACH); 1212 1213 return (DDI_SUCCESS); 1214 1215 /* 1216 * Unwind any/all preceeding allocations in the event of an error. 1217 */ 1218 1219 alloc_els_packet_failed: 1220 1221 if (port->fp_fca_handle != NULL) { 1222 port->fp_fca_tran->fca_unbind_port(port->fp_fca_handle); 1223 port->fp_fca_handle = NULL; 1224 } 1225 1226 if (port->fp_ub_tokens != NULL) { 1227 (void) fc_ulp_ubfree(port, port->fp_ub_count, 1228 port->fp_ub_tokens); 1229 kmem_free(port->fp_ub_tokens, 1230 port->fp_ub_count * sizeof (*port->fp_ub_tokens)); 1231 port->fp_ub_tokens = NULL; 1232 } 1233 1234 if (port->fp_els_resp_pkt != NULL) { 1235 fp_free_pkt(port->fp_els_resp_pkt); 1236 port->fp_els_resp_pkt = NULL; 1237 } 1238 1239 bind_callbacks_failed: 1240 1241 if (port->fp_taskq != NULL) { 1242 taskq_destroy(port->fp_taskq); 1243 } 1244 1245 if (port->fp_pwwn_table != NULL) { 1246 kmem_free(port->fp_pwwn_table, 1247 pwwn_table_size * sizeof (struct pwwn_hash)); 1248 port->fp_pwwn_table = NULL; 1249 } 1250 1251 if (port->fp_did_table != NULL) { 1252 kmem_free(port->fp_did_table, 1253 did_table_size * sizeof (struct d_id_hash)); 1254 port->fp_did_table = NULL; 1255 } 1256 1257 if (port->fp_pkt_cache != NULL) { 1258 kmem_cache_destroy(port->fp_pkt_cache); 1259 port->fp_pkt_cache = NULL; 1260 } 1261 1262 cache_alloc_failed: 1263 1264 cv_destroy(&port->fp_attach_cv); 1265 cv_destroy(&port->fp_cv); 1266 mutex_destroy(&port->fp_mutex); 1267 ddi_remove_minor_node(port->fp_port_dip, NULL); 1268 ddi_soft_state_free(fp_driver_softstate, instance); 1269 ddi_prop_remove_all(dip); 1270 1271 return (DDI_FAILURE); 1272 } 1273 1274 1275 /* 1276 * Handle DDI_RESUME request 1277 */ 1278 static int 1279 fp_resume_handler(dev_info_t *dip) 1280 { 1281 int rval; 1282 fc_local_port_t *port; 1283 1284 port = ddi_get_soft_state(fp_driver_softstate, ddi_get_instance(dip)); 1285 1286 ASSERT(port != NULL); 1287 1288 #ifdef DEBUG 1289 mutex_enter(&port->fp_mutex); 1290 ASSERT(port->fp_soft_state & FP_SOFT_SUSPEND); 1291 mutex_exit(&port->fp_mutex); 1292 #endif 1293 1294 /* 1295 * If the port was power suspended, raise the power level 1296 */ 1297 mutex_enter(&port->fp_mutex); 1298 if ((port->fp_soft_state & FP_SOFT_POWER_DOWN) && 1299 (!(port->fp_soft_state & FP_SOFT_NO_PMCOMP))) { 1300 ASSERT(port->fp_pm_level == FP_PM_PORT_DOWN); 1301 1302 mutex_exit(&port->fp_mutex); 1303 if (pm_raise_power(dip, FP_PM_COMPONENT, 1304 FP_PM_PORT_UP) != DDI_SUCCESS) { 1305 FP_TRACE(FP_NHEAD2(9, 0), 1306 "Failed to raise the power level"); 1307 return (DDI_FAILURE); 1308 } 1309 mutex_enter(&port->fp_mutex); 1310 } 1311 port->fp_soft_state &= ~FP_SOFT_SUSPEND; 1312 mutex_exit(&port->fp_mutex); 1313 1314 /* 1315 * All the discovery is initiated and handled by per-port thread. 1316 * Further all the discovery is done in handled in callback mode 1317 * (not polled mode); In a specific case such as this, the discovery 1318 * is required to happen in polled mode. The easiest way out is 1319 * to bail out port thread and get started. Come back and fix this 1320 * to do on demand discovery initiated by ULPs. ULPs such as FCP 1321 * will do on-demand discovery during pre-power-up busctl handling 1322 * which will only be possible when SCSA provides a new HBA vector 1323 * for sending down the PM busctl requests. 1324 */ 1325 (void) callb_generic_cpr(&port->fp_cpr_info, CB_CODE_CPR_RESUME); 1326 1327 rval = fp_resume_all(port, FC_CMD_RESUME); 1328 if (rval != DDI_SUCCESS) { 1329 mutex_enter(&port->fp_mutex); 1330 port->fp_soft_state |= FP_SOFT_SUSPEND; 1331 mutex_exit(&port->fp_mutex); 1332 (void) callb_generic_cpr(&port->fp_cpr_info, 1333 CB_CODE_CPR_CHKPT); 1334 } 1335 1336 return (rval); 1337 } 1338 1339 /* 1340 * Perform FC Port power on initialization 1341 */ 1342 static int 1343 fp_power_up(fc_local_port_t *port) 1344 { 1345 int rval; 1346 1347 ASSERT(MUTEX_HELD(&port->fp_mutex)); 1348 1349 ASSERT((port->fp_soft_state & FP_SOFT_SUSPEND) == 0); 1350 ASSERT(port->fp_soft_state & FP_SOFT_POWER_DOWN); 1351 1352 port->fp_soft_state &= ~FP_SOFT_POWER_DOWN; 1353 1354 mutex_exit(&port->fp_mutex); 1355 1356 rval = fp_resume_all(port, FC_CMD_POWER_UP); 1357 if (rval != DDI_SUCCESS) { 1358 mutex_enter(&port->fp_mutex); 1359 port->fp_soft_state |= FP_SOFT_POWER_DOWN; 1360 } else { 1361 mutex_enter(&port->fp_mutex); 1362 } 1363 1364 return (rval); 1365 } 1366 1367 1368 /* 1369 * It is important to note that the power may possibly be removed between 1370 * SUSPEND and the ensuing RESUME operation. In such a context the underlying 1371 * FC port hardware would have gone through an OFFLINE to ONLINE transition 1372 * (hardware state). In this case, the port driver may need to rediscover the 1373 * topology, perform LOGINs, register with the name server again and perform 1374 * any such port initialization procedures. To perform LOGINs, the driver could 1375 * use the port device handle to see if a LOGIN needs to be performed and use 1376 * the D_ID and WWN in it. The LOGINs may fail (if the hardware is reconfigured 1377 * or removed) which will be reflected in the map the ULPs will see. 1378 */ 1379 static int 1380 fp_resume_all(fc_local_port_t *port, fc_attach_cmd_t cmd) 1381 { 1382 1383 ASSERT(!MUTEX_HELD(&port->fp_mutex)); 1384 1385 if (fp_bind_callbacks(port) != DDI_SUCCESS) { 1386 return (DDI_FAILURE); 1387 } 1388 1389 mutex_enter(&port->fp_mutex); 1390 1391 /* 1392 * If there are commands queued for delayed retry, instead of 1393 * working the hard way to figure out which ones are good for 1394 * restart and which ones not (ELSs are definitely not good 1395 * as the port will have to go through a new spin of rediscovery 1396 * now), so just flush them out. 1397 */ 1398 if (port->fp_restore & FP_RESTORE_WAIT_TIMEOUT) { 1399 fp_cmd_t *cmd; 1400 1401 port->fp_restore &= ~FP_RESTORE_WAIT_TIMEOUT; 1402 1403 mutex_exit(&port->fp_mutex); 1404 while ((cmd = fp_deque_cmd(port)) != NULL) { 1405 cmd->cmd_pkt.pkt_state = FC_PKT_TRAN_ERROR; 1406 fp_iodone(cmd); 1407 } 1408 mutex_enter(&port->fp_mutex); 1409 } 1410 1411 if (FC_PORT_STATE_MASK(port->fp_bind_state) == FC_STATE_OFFLINE) { 1412 if ((port->fp_restore & FP_RESTORE_OFFLINE_TIMEOUT) || 1413 port->fp_dev_count) { 1414 port->fp_restore &= ~FP_RESTORE_OFFLINE_TIMEOUT; 1415 port->fp_offline_tid = timeout(fp_offline_timeout, 1416 (caddr_t)port, fp_offline_ticks); 1417 } 1418 if (port->fp_job_head) { 1419 cv_signal(&port->fp_cv); 1420 } 1421 mutex_exit(&port->fp_mutex); 1422 fctl_attach_ulps(port, cmd, &modlinkage); 1423 } else { 1424 struct job_request *job; 1425 1426 /* 1427 * If an OFFLINE timer was running at the time of 1428 * suspending, there is no need to restart it as 1429 * the port is ONLINE now. 1430 */ 1431 port->fp_restore &= ~FP_RESTORE_OFFLINE_TIMEOUT; 1432 if (port->fp_statec_busy == 0) { 1433 port->fp_soft_state |= FP_SOFT_IN_STATEC_CB; 1434 } 1435 port->fp_statec_busy++; 1436 mutex_exit(&port->fp_mutex); 1437 1438 job = fctl_alloc_job(JOB_PORT_ONLINE, 1439 JOB_CANCEL_ULP_NOTIFICATION, NULL, NULL, KM_SLEEP); 1440 fctl_enque_job(port, job); 1441 1442 fctl_jobwait(job); 1443 fctl_remove_oldies(port); 1444 1445 fctl_attach_ulps(port, cmd, &modlinkage); 1446 fctl_dealloc_job(job); 1447 } 1448 1449 return (DDI_SUCCESS); 1450 } 1451 1452 1453 /* 1454 * At this time, there shouldn't be any I/O requests on this port. 1455 * But the unsolicited callbacks from the underlying FCA port need 1456 * to be handled very carefully. The steps followed to handle the 1457 * DDI_DETACH are: 1458 * + Grab the port driver mutex, check if the unsolicited 1459 * callback is currently under processing. If true, fail 1460 * the DDI_DETACH request by printing a message; If false 1461 * mark the DDI_DETACH as under progress, so that any 1462 * further unsolicited callbacks get bounced. 1463 * + Perform PRLO/LOGO if necessary, cleanup all the data 1464 * structures. 1465 * + Get the job_handler thread to gracefully exit. 1466 * + Unregister callbacks with the FCA port. 1467 * + Now that some peace is found, notify all the ULPs of 1468 * DDI_DETACH request (using ulp_port_detach entry point) 1469 * + Free all mutexes, semaphores, conditional variables. 1470 * + Free the soft state, return success. 1471 * 1472 * Important considerations: 1473 * Port driver de-registers state change and unsolicited 1474 * callbacks before taking up the task of notifying ULPs 1475 * and performing PRLO and LOGOs. 1476 * 1477 * A port may go offline at the time PRLO/LOGO is being 1478 * requested. It is expected of all FCA drivers to fail 1479 * such requests either immediately with a FC_OFFLINE 1480 * return code to fc_fca_transport() or return the packet 1481 * asynchronously with pkt state set to FC_PKT_PORT_OFFLINE 1482 */ 1483 static int 1484 fp_detach_handler(fc_local_port_t *port) 1485 { 1486 job_request_t *job; 1487 uint32_t delay_count; 1488 fc_orphan_t *orp, *tmporp; 1489 1490 /* 1491 * In a Fabric topology with many host ports connected to 1492 * a switch, another detaching instance of fp might have 1493 * triggered a LOGO (which is an unsolicited request to 1494 * this instance). So in order to be able to successfully 1495 * detach by taking care of such cases a delay of about 1496 * 30 seconds is introduced. 1497 */ 1498 delay_count = 0; 1499 mutex_enter(&port->fp_mutex); 1500 if (port->fp_out_fpcmds != 0) { 1501 /* 1502 * At this time we can only check fp internal commands, because 1503 * sd/ssd/scsi_vhci should have finsihed all their commands, 1504 * fcp/fcip/fcsm should have finished all their commands. 1505 * 1506 * It seems that all fp internal commands are asynchronous now. 1507 */ 1508 port->fp_soft_state &= ~FP_DETACH_INPROGRESS; 1509 mutex_exit(&port->fp_mutex); 1510 1511 cmn_err(CE_WARN, "fp(%d): %d fp_cmd(s) is/are in progress" 1512 " Failing detach", port->fp_instance, port->fp_out_fpcmds); 1513 return (DDI_FAILURE); 1514 } 1515 1516 while ((port->fp_soft_state & 1517 (FP_SOFT_IN_STATEC_CB | FP_SOFT_IN_UNSOL_CB)) && 1518 (delay_count < 30)) { 1519 mutex_exit(&port->fp_mutex); 1520 delay_count++; 1521 delay(drv_usectohz(1000000)); 1522 mutex_enter(&port->fp_mutex); 1523 } 1524 1525 if (port->fp_soft_state & 1526 (FP_SOFT_IN_STATEC_CB | FP_SOFT_IN_UNSOL_CB)) { 1527 port->fp_soft_state &= ~FP_DETACH_INPROGRESS; 1528 mutex_exit(&port->fp_mutex); 1529 1530 cmn_err(CE_WARN, "fp(%d): FCA callback in progress: " 1531 " Failing detach", port->fp_instance); 1532 return (DDI_FAILURE); 1533 } 1534 1535 port->fp_soft_state |= FP_SOFT_IN_DETACH; 1536 port->fp_soft_state &= ~FP_DETACH_INPROGRESS; 1537 mutex_exit(&port->fp_mutex); 1538 1539 /* 1540 * If we're powered down, we need to raise power prior to submitting 1541 * the JOB_PORT_SHUTDOWN job. Otherwise, the job handler will never 1542 * process the shutdown job. 1543 */ 1544 if (fctl_busy_port(port) != 0) { 1545 cmn_err(CE_WARN, "fp(%d): fctl_busy_port failed", 1546 port->fp_instance); 1547 mutex_enter(&port->fp_mutex); 1548 port->fp_soft_state &= ~FP_SOFT_IN_DETACH; 1549 mutex_exit(&port->fp_mutex); 1550 return (DDI_FAILURE); 1551 } 1552 1553 /* 1554 * This will deallocate data structs and cause the "job" thread 1555 * to exit, in preparation for DDI_DETACH on the instance. 1556 * This can sleep for an arbitrary duration, since it waits for 1557 * commands over the wire, timeout(9F) callbacks, etc. 1558 * 1559 * CAUTION: There is still a race here, where the "job" thread 1560 * can still be executing code even tho the fctl_jobwait() call 1561 * below has returned to us. In theory the fp driver could even be 1562 * modunloaded even tho the job thread isn't done executing. 1563 * without creating the race condition. 1564 */ 1565 job = fctl_alloc_job(JOB_PORT_SHUTDOWN, 0, NULL, 1566 (opaque_t)port, KM_SLEEP); 1567 fctl_enque_job(port, job); 1568 fctl_jobwait(job); 1569 fctl_dealloc_job(job); 1570 1571 1572 (void) pm_lower_power(port->fp_port_dip, FP_PM_COMPONENT, 1573 FP_PM_PORT_DOWN); 1574 1575 if (port->fp_taskq) { 1576 taskq_destroy(port->fp_taskq); 1577 } 1578 1579 ddi_prop_remove_all(port->fp_port_dip); 1580 1581 ddi_remove_minor_node(port->fp_port_dip, NULL); 1582 1583 fctl_remove_port(port); 1584 1585 fp_free_pkt(port->fp_els_resp_pkt); 1586 1587 if (port->fp_ub_tokens) { 1588 if (fc_ulp_ubfree(port, port->fp_ub_count, 1589 port->fp_ub_tokens) != FC_SUCCESS) { 1590 cmn_err(CE_WARN, "fp(%d): couldn't free " 1591 " unsolicited buffers", port->fp_instance); 1592 } 1593 kmem_free(port->fp_ub_tokens, 1594 sizeof (*port->fp_ub_tokens) * port->fp_ub_count); 1595 port->fp_ub_tokens = NULL; 1596 } 1597 1598 if (port->fp_pkt_cache != NULL) { 1599 kmem_cache_destroy(port->fp_pkt_cache); 1600 } 1601 1602 port->fp_fca_tran->fca_unbind_port(port->fp_fca_handle); 1603 1604 mutex_enter(&port->fp_mutex); 1605 if (port->fp_did_table) { 1606 kmem_free(port->fp_did_table, did_table_size * 1607 sizeof (struct d_id_hash)); 1608 } 1609 1610 if (port->fp_pwwn_table) { 1611 kmem_free(port->fp_pwwn_table, pwwn_table_size * 1612 sizeof (struct pwwn_hash)); 1613 } 1614 orp = port->fp_orphan_list; 1615 while (orp) { 1616 tmporp = orp; 1617 orp = orp->orp_next; 1618 kmem_free(tmporp, sizeof (*orp)); 1619 } 1620 1621 mutex_exit(&port->fp_mutex); 1622 1623 fp_log_port_event(port, ESC_SUNFC_PORT_DETACH); 1624 1625 mutex_destroy(&port->fp_mutex); 1626 cv_destroy(&port->fp_attach_cv); 1627 cv_destroy(&port->fp_cv); 1628 ddi_soft_state_free(fp_driver_softstate, port->fp_instance); 1629 1630 return (DDI_SUCCESS); 1631 } 1632 1633 1634 /* 1635 * Steps to perform DDI_SUSPEND operation on a FC port 1636 * 1637 * - If already suspended return DDI_FAILURE 1638 * - If already power-suspended return DDI_SUCCESS 1639 * - If an unsolicited callback or state change handling is in 1640 * in progress, throw a warning message, return DDI_FAILURE 1641 * - Cancel timeouts 1642 * - SUSPEND the job_handler thread (means do nothing as it is 1643 * taken care of by the CPR frame work) 1644 */ 1645 static int 1646 fp_suspend_handler(fc_local_port_t *port) 1647 { 1648 uint32_t delay_count; 1649 1650 mutex_enter(&port->fp_mutex); 1651 1652 /* 1653 * The following should never happen, but 1654 * let the driver be more defensive here 1655 */ 1656 if (port->fp_soft_state & FP_SOFT_SUSPEND) { 1657 mutex_exit(&port->fp_mutex); 1658 return (DDI_FAILURE); 1659 } 1660 1661 /* 1662 * If the port is already power suspended, there 1663 * is nothing else to do, So return DDI_SUCCESS, 1664 * but mark the SUSPEND bit in the soft state 1665 * before leaving. 1666 */ 1667 if (port->fp_soft_state & FP_SOFT_POWER_DOWN) { 1668 port->fp_soft_state |= FP_SOFT_SUSPEND; 1669 mutex_exit(&port->fp_mutex); 1670 return (DDI_SUCCESS); 1671 } 1672 1673 /* 1674 * Check if an unsolicited callback or state change handling is 1675 * in progress. If true, fail the suspend operation; also throw 1676 * a warning message notifying the failure. Note that Sun PCI 1677 * hotplug spec recommends messages in cases of failure (but 1678 * not flooding the console) 1679 * 1680 * Busy waiting for a short interval (500 millisecond ?) to see 1681 * if the callback processing completes may be another idea. Since 1682 * most of the callback processing involves a lot of work, it 1683 * is safe to just fail the SUSPEND operation. It is definitely 1684 * not bad to fail the SUSPEND operation if the driver is busy. 1685 */ 1686 delay_count = 0; 1687 while ((port->fp_soft_state & (FP_SOFT_IN_STATEC_CB | 1688 FP_SOFT_IN_UNSOL_CB)) && (delay_count < 30)) { 1689 mutex_exit(&port->fp_mutex); 1690 delay_count++; 1691 delay(drv_usectohz(1000000)); 1692 mutex_enter(&port->fp_mutex); 1693 } 1694 1695 if (port->fp_soft_state & (FP_SOFT_IN_STATEC_CB | 1696 FP_SOFT_IN_UNSOL_CB)) { 1697 mutex_exit(&port->fp_mutex); 1698 cmn_err(CE_WARN, "fp(%d): FCA callback in progress: " 1699 " Failing suspend", port->fp_instance); 1700 return (DDI_FAILURE); 1701 } 1702 1703 /* 1704 * Check of FC port thread is busy 1705 */ 1706 if (port->fp_job_head) { 1707 mutex_exit(&port->fp_mutex); 1708 FP_TRACE(FP_NHEAD2(9, 0), 1709 "FC port thread is busy: Failing suspend"); 1710 return (DDI_FAILURE); 1711 } 1712 port->fp_soft_state |= FP_SOFT_SUSPEND; 1713 1714 fp_suspend_all(port); 1715 mutex_exit(&port->fp_mutex); 1716 1717 return (DDI_SUCCESS); 1718 } 1719 1720 1721 /* 1722 * Prepare for graceful power down of a FC port 1723 */ 1724 static int 1725 fp_power_down(fc_local_port_t *port) 1726 { 1727 ASSERT(MUTEX_HELD(&port->fp_mutex)); 1728 1729 /* 1730 * Power down request followed by a DDI_SUSPEND should 1731 * never happen; If it does return DDI_SUCCESS 1732 */ 1733 if (port->fp_soft_state & FP_SOFT_SUSPEND) { 1734 port->fp_soft_state |= FP_SOFT_POWER_DOWN; 1735 return (DDI_SUCCESS); 1736 } 1737 1738 /* 1739 * If the port is already power suspended, there 1740 * is nothing else to do, So return DDI_SUCCESS, 1741 */ 1742 if (port->fp_soft_state & FP_SOFT_POWER_DOWN) { 1743 return (DDI_SUCCESS); 1744 } 1745 1746 /* 1747 * Check if an unsolicited callback or state change handling 1748 * is in progress. If true, fail the PM suspend operation. 1749 * But don't print a message unless the verbosity of the 1750 * driver desires otherwise. 1751 */ 1752 if ((port->fp_soft_state & FP_SOFT_IN_STATEC_CB) || 1753 (port->fp_soft_state & FP_SOFT_IN_UNSOL_CB)) { 1754 FP_TRACE(FP_NHEAD2(9, 0), 1755 "Unsolicited callback in progress: Failing power down"); 1756 return (DDI_FAILURE); 1757 } 1758 1759 /* 1760 * Check of FC port thread is busy 1761 */ 1762 if (port->fp_job_head) { 1763 FP_TRACE(FP_NHEAD2(9, 0), 1764 "FC port thread is busy: Failing power down"); 1765 return (DDI_FAILURE); 1766 } 1767 port->fp_soft_state |= FP_SOFT_POWER_DOWN; 1768 1769 /* 1770 * check if the ULPs are ready for power down 1771 */ 1772 mutex_exit(&port->fp_mutex); 1773 if (fctl_detach_ulps(port, FC_CMD_POWER_DOWN, 1774 &modlinkage) != FC_SUCCESS) { 1775 mutex_enter(&port->fp_mutex); 1776 port->fp_soft_state &= ~FP_SOFT_POWER_DOWN; 1777 mutex_exit(&port->fp_mutex); 1778 1779 /* 1780 * Power back up the obedient ULPs that went down 1781 */ 1782 fp_attach_ulps(port, FC_CMD_POWER_UP); 1783 1784 FP_TRACE(FP_NHEAD2(9, 0), 1785 "ULP(s) busy, detach_ulps failed. Failing power down"); 1786 mutex_enter(&port->fp_mutex); 1787 return (DDI_FAILURE); 1788 } 1789 mutex_enter(&port->fp_mutex); 1790 1791 fp_suspend_all(port); 1792 1793 return (DDI_SUCCESS); 1794 } 1795 1796 1797 /* 1798 * Suspend the entire FC port 1799 */ 1800 static void 1801 fp_suspend_all(fc_local_port_t *port) 1802 { 1803 int index; 1804 struct pwwn_hash *head; 1805 fc_remote_port_t *pd; 1806 1807 ASSERT(MUTEX_HELD(&port->fp_mutex)); 1808 1809 if (port->fp_wait_tid != 0) { 1810 timeout_id_t tid; 1811 1812 tid = port->fp_wait_tid; 1813 port->fp_wait_tid = (timeout_id_t)NULL; 1814 mutex_exit(&port->fp_mutex); 1815 (void) untimeout(tid); 1816 mutex_enter(&port->fp_mutex); 1817 port->fp_restore |= FP_RESTORE_WAIT_TIMEOUT; 1818 } 1819 1820 if (port->fp_offline_tid) { 1821 timeout_id_t tid; 1822 1823 tid = port->fp_offline_tid; 1824 port->fp_offline_tid = (timeout_id_t)NULL; 1825 mutex_exit(&port->fp_mutex); 1826 (void) untimeout(tid); 1827 mutex_enter(&port->fp_mutex); 1828 port->fp_restore |= FP_RESTORE_OFFLINE_TIMEOUT; 1829 } 1830 mutex_exit(&port->fp_mutex); 1831 port->fp_fca_tran->fca_unbind_port(port->fp_fca_handle); 1832 mutex_enter(&port->fp_mutex); 1833 1834 /* 1835 * Mark all devices as OLD, and reset the LOGIN state as well 1836 * (this will force the ULPs to perform a LOGIN after calling 1837 * fc_portgetmap() during RESUME/PM_RESUME) 1838 */ 1839 for (index = 0; index < pwwn_table_size; index++) { 1840 head = &port->fp_pwwn_table[index]; 1841 pd = head->pwwn_head; 1842 while (pd != NULL) { 1843 mutex_enter(&pd->pd_mutex); 1844 fp_remote_port_offline(pd); 1845 fctl_delist_did_table(port, pd); 1846 pd->pd_state = PORT_DEVICE_VALID; 1847 pd->pd_login_count = 0; 1848 mutex_exit(&pd->pd_mutex); 1849 pd = pd->pd_wwn_hnext; 1850 } 1851 } 1852 } 1853 1854 1855 /* 1856 * fp_cache_constructor: Constructor function for kmem_cache_create(9F). 1857 * Performs intializations for fc_packet_t structs. 1858 * Returns 0 for success or -1 for failure. 1859 * 1860 * This function allocates DMA handles for both command and responses. 1861 * Most of the ELSs used have both command and responses so it is strongly 1862 * desired to move them to cache constructor routine. 1863 * 1864 * Context: Can sleep iff called with KM_SLEEP flag. 1865 */ 1866 static int 1867 fp_cache_constructor(void *buf, void *cdarg, int kmflags) 1868 { 1869 int (*cb) (caddr_t); 1870 fc_packet_t *pkt; 1871 fp_cmd_t *cmd = (fp_cmd_t *)buf; 1872 fc_local_port_t *port = (fc_local_port_t *)cdarg; 1873 1874 cb = (kmflags == KM_SLEEP) ? DDI_DMA_SLEEP : DDI_DMA_DONTWAIT; 1875 1876 cmd->cmd_next = NULL; 1877 cmd->cmd_flags = 0; 1878 cmd->cmd_dflags = 0; 1879 cmd->cmd_job = NULL; 1880 cmd->cmd_port = port; 1881 pkt = &cmd->cmd_pkt; 1882 1883 if (!(port->fp_soft_state & FP_SOFT_FCA_IS_NODMA)) { 1884 if (ddi_dma_alloc_handle(port->fp_fca_dip, 1885 port->fp_fca_tran->fca_dma_attr, cb, NULL, 1886 &pkt->pkt_cmd_dma) != DDI_SUCCESS) { 1887 return (-1); 1888 } 1889 1890 if (ddi_dma_alloc_handle(port->fp_fca_dip, 1891 port->fp_fca_tran->fca_dma_attr, cb, NULL, 1892 &pkt->pkt_resp_dma) != DDI_SUCCESS) { 1893 ddi_dma_free_handle(&pkt->pkt_cmd_dma); 1894 return (-1); 1895 } 1896 } else { 1897 pkt->pkt_cmd_dma = 0; 1898 pkt->pkt_resp_dma = 0; 1899 } 1900 1901 pkt->pkt_cmd_acc = pkt->pkt_resp_acc = NULL; 1902 pkt->pkt_cmd_cookie_cnt = pkt->pkt_resp_cookie_cnt = 1903 pkt->pkt_data_cookie_cnt = 0; 1904 pkt->pkt_cmd_cookie = pkt->pkt_resp_cookie = 1905 pkt->pkt_data_cookie = NULL; 1906 pkt->pkt_fca_private = (caddr_t)buf + sizeof (fp_cmd_t); 1907 1908 return (0); 1909 } 1910 1911 1912 /* 1913 * fp_cache_destructor: Destructor function for kmem_cache_create(). 1914 * Performs un-intializations for fc_packet_t structs. 1915 */ 1916 /* ARGSUSED */ 1917 static void 1918 fp_cache_destructor(void *buf, void *cdarg) 1919 { 1920 fp_cmd_t *cmd = (fp_cmd_t *)buf; 1921 fc_packet_t *pkt; 1922 1923 pkt = &cmd->cmd_pkt; 1924 if (pkt->pkt_cmd_dma) { 1925 ddi_dma_free_handle(&pkt->pkt_cmd_dma); 1926 } 1927 1928 if (pkt->pkt_resp_dma) { 1929 ddi_dma_free_handle(&pkt->pkt_resp_dma); 1930 } 1931 } 1932 1933 1934 /* 1935 * Packet allocation for ELS and any other port driver commands 1936 * 1937 * Some ELSs like FLOGI and PLOGI are critical for topology and 1938 * device discovery and a system's inability to allocate memory 1939 * or DVMA resources while performing some of these critical ELSs 1940 * cause a lot of problem. While memory allocation failures are 1941 * rare, DVMA resource failures are common as the applications 1942 * are becoming more and more powerful on huge servers. So it 1943 * is desirable to have a framework support to reserve a fragment 1944 * of DVMA. So until this is fixed the correct way, the suffering 1945 * is huge whenever a LIP happens at a time DVMA resources are 1946 * drained out completely - So an attempt needs to be made to 1947 * KM_SLEEP while requesting for these resources, hoping that 1948 * the requests won't hang forever. 1949 * 1950 * The fc_remote_port_t argument is stored into the pkt_pd field in the 1951 * fc_packet_t struct prior to the fc_ulp_init_packet() call. This 1952 * ensures that the pd_ref_count for the fc_remote_port_t is valid. 1953 * If there is no fc_remote_port_t associated with the fc_packet_t, then 1954 * fp_alloc_pkt() must be called with pd set to NULL. 1955 * 1956 * fp/fctl will resue fp_cmd_t somewhere, and change pkt_cmdlen/rsplen, 1957 * actually, it's a design fault. But there's no problem for physical 1958 * FCAs. But it will cause memory leak or panic for virtual FCAs like fcoei. 1959 * 1960 * For FCAs that don't support DMA, such as fcoei, we will use 1961 * pkt_fctl_rsvd1/rsvd2 to keep the real cmd_len/resp_len. 1962 */ 1963 1964 static fp_cmd_t * 1965 fp_alloc_pkt(fc_local_port_t *port, int cmd_len, int resp_len, int kmflags, 1966 fc_remote_port_t *pd) 1967 { 1968 int rval; 1969 ulong_t real_len; 1970 fp_cmd_t *cmd; 1971 fc_packet_t *pkt; 1972 int (*cb) (caddr_t); 1973 ddi_dma_cookie_t pkt_cookie; 1974 ddi_dma_cookie_t *cp; 1975 uint32_t cnt; 1976 1977 ASSERT(!MUTEX_HELD(&port->fp_mutex)); 1978 1979 cb = (kmflags == KM_SLEEP) ? DDI_DMA_SLEEP : DDI_DMA_DONTWAIT; 1980 1981 cmd = (fp_cmd_t *)kmem_cache_alloc(port->fp_pkt_cache, kmflags); 1982 if (cmd == NULL) { 1983 return (cmd); 1984 } 1985 1986 cmd->cmd_ulp_pkt = NULL; 1987 cmd->cmd_flags = 0; 1988 pkt = &cmd->cmd_pkt; 1989 ASSERT(cmd->cmd_dflags == 0); 1990 1991 pkt->pkt_datalen = 0; 1992 pkt->pkt_data = NULL; 1993 pkt->pkt_state = 0; 1994 pkt->pkt_action = 0; 1995 pkt->pkt_reason = 0; 1996 pkt->pkt_expln = 0; 1997 pkt->pkt_cmd = NULL; 1998 pkt->pkt_resp = NULL; 1999 pkt->pkt_fctl_rsvd1 = NULL; 2000 pkt->pkt_fctl_rsvd2 = NULL; 2001 2002 /* 2003 * Init pkt_pd with the given pointer; this must be done _before_ 2004 * the call to fc_ulp_init_packet(). 2005 */ 2006 pkt->pkt_pd = pd; 2007 2008 /* Now call the FCA driver to init its private, per-packet fields */ 2009 if (fc_ulp_init_packet((opaque_t)port, pkt, kmflags) != FC_SUCCESS) { 2010 goto alloc_pkt_failed; 2011 } 2012 2013 if (cmd_len && !(port->fp_soft_state & FP_SOFT_FCA_IS_NODMA)) { 2014 ASSERT(pkt->pkt_cmd_dma != NULL); 2015 2016 rval = ddi_dma_mem_alloc(pkt->pkt_cmd_dma, cmd_len, 2017 port->fp_fca_tran->fca_acc_attr, DDI_DMA_CONSISTENT, 2018 cb, NULL, (caddr_t *)&pkt->pkt_cmd, &real_len, 2019 &pkt->pkt_cmd_acc); 2020 2021 if (rval != DDI_SUCCESS) { 2022 goto alloc_pkt_failed; 2023 } 2024 cmd->cmd_dflags |= FP_CMD_VALID_DMA_MEM; 2025 2026 if (real_len < cmd_len) { 2027 goto alloc_pkt_failed; 2028 } 2029 2030 rval = ddi_dma_addr_bind_handle(pkt->pkt_cmd_dma, NULL, 2031 pkt->pkt_cmd, real_len, DDI_DMA_WRITE | 2032 DDI_DMA_CONSISTENT, cb, NULL, 2033 &pkt_cookie, &pkt->pkt_cmd_cookie_cnt); 2034 2035 if (rval != DDI_DMA_MAPPED) { 2036 goto alloc_pkt_failed; 2037 } 2038 2039 cmd->cmd_dflags |= FP_CMD_VALID_DMA_BIND; 2040 2041 if (pkt->pkt_cmd_cookie_cnt > 2042 port->fp_fca_tran->fca_dma_attr->dma_attr_sgllen) { 2043 goto alloc_pkt_failed; 2044 } 2045 2046 ASSERT(pkt->pkt_cmd_cookie_cnt != 0); 2047 2048 cp = pkt->pkt_cmd_cookie = (ddi_dma_cookie_t *)kmem_alloc( 2049 pkt->pkt_cmd_cookie_cnt * sizeof (pkt_cookie), 2050 KM_NOSLEEP); 2051 2052 if (cp == NULL) { 2053 goto alloc_pkt_failed; 2054 } 2055 2056 *cp = pkt_cookie; 2057 cp++; 2058 for (cnt = 1; cnt < pkt->pkt_cmd_cookie_cnt; cnt++, cp++) { 2059 ddi_dma_nextcookie(pkt->pkt_cmd_dma, &pkt_cookie); 2060 *cp = pkt_cookie; 2061 } 2062 } else if (cmd_len != 0) { 2063 pkt->pkt_cmd = kmem_alloc(cmd_len, KM_SLEEP); 2064 pkt->pkt_fctl_rsvd1 = (opaque_t)(uintptr_t)cmd_len; 2065 } 2066 2067 if (resp_len && !(port->fp_soft_state & FP_SOFT_FCA_IS_NODMA)) { 2068 ASSERT(pkt->pkt_resp_dma != NULL); 2069 2070 rval = ddi_dma_mem_alloc(pkt->pkt_resp_dma, resp_len, 2071 port->fp_fca_tran->fca_acc_attr, 2072 DDI_DMA_CONSISTENT, cb, NULL, 2073 (caddr_t *)&pkt->pkt_resp, &real_len, 2074 &pkt->pkt_resp_acc); 2075 2076 if (rval != DDI_SUCCESS) { 2077 goto alloc_pkt_failed; 2078 } 2079 cmd->cmd_dflags |= FP_RESP_VALID_DMA_MEM; 2080 2081 if (real_len < resp_len) { 2082 goto alloc_pkt_failed; 2083 } 2084 2085 rval = ddi_dma_addr_bind_handle(pkt->pkt_resp_dma, NULL, 2086 pkt->pkt_resp, real_len, DDI_DMA_READ | 2087 DDI_DMA_CONSISTENT, cb, NULL, 2088 &pkt_cookie, &pkt->pkt_resp_cookie_cnt); 2089 2090 if (rval != DDI_DMA_MAPPED) { 2091 goto alloc_pkt_failed; 2092 } 2093 2094 cmd->cmd_dflags |= FP_RESP_VALID_DMA_BIND; 2095 2096 if (pkt->pkt_resp_cookie_cnt > 2097 port->fp_fca_tran->fca_dma_attr->dma_attr_sgllen) { 2098 goto alloc_pkt_failed; 2099 } 2100 2101 ASSERT(pkt->pkt_cmd_cookie_cnt != 0); 2102 2103 cp = pkt->pkt_resp_cookie = (ddi_dma_cookie_t *)kmem_alloc( 2104 pkt->pkt_resp_cookie_cnt * sizeof (pkt_cookie), 2105 KM_NOSLEEP); 2106 2107 if (cp == NULL) { 2108 goto alloc_pkt_failed; 2109 } 2110 2111 *cp = pkt_cookie; 2112 cp++; 2113 for (cnt = 1; cnt < pkt->pkt_resp_cookie_cnt; cnt++, cp++) { 2114 ddi_dma_nextcookie(pkt->pkt_resp_dma, &pkt_cookie); 2115 *cp = pkt_cookie; 2116 } 2117 } else if (resp_len != 0) { 2118 pkt->pkt_resp = kmem_alloc(resp_len, KM_SLEEP); 2119 pkt->pkt_fctl_rsvd2 = (opaque_t)(uintptr_t)resp_len; 2120 } 2121 2122 pkt->pkt_cmdlen = cmd_len; 2123 pkt->pkt_rsplen = resp_len; 2124 pkt->pkt_ulp_private = cmd; 2125 2126 return (cmd); 2127 2128 alloc_pkt_failed: 2129 2130 fp_free_dma(cmd); 2131 2132 if (pkt->pkt_cmd_cookie != NULL) { 2133 kmem_free(pkt->pkt_cmd_cookie, 2134 pkt->pkt_cmd_cookie_cnt * sizeof (ddi_dma_cookie_t)); 2135 pkt->pkt_cmd_cookie = NULL; 2136 } 2137 2138 if (pkt->pkt_resp_cookie != NULL) { 2139 kmem_free(pkt->pkt_resp_cookie, 2140 pkt->pkt_resp_cookie_cnt * sizeof (ddi_dma_cookie_t)); 2141 pkt->pkt_resp_cookie = NULL; 2142 } 2143 2144 if (port->fp_soft_state & FP_SOFT_FCA_IS_NODMA) { 2145 if (pkt->pkt_cmd) { 2146 kmem_free(pkt->pkt_cmd, cmd_len); 2147 } 2148 2149 if (pkt->pkt_resp) { 2150 kmem_free(pkt->pkt_resp, resp_len); 2151 } 2152 } 2153 2154 kmem_cache_free(port->fp_pkt_cache, cmd); 2155 2156 return (NULL); 2157 } 2158 2159 2160 /* 2161 * Free FC packet 2162 */ 2163 static void 2164 fp_free_pkt(fp_cmd_t *cmd) 2165 { 2166 fc_local_port_t *port; 2167 fc_packet_t *pkt; 2168 2169 ASSERT(!MUTEX_HELD(&cmd->cmd_port->fp_mutex)); 2170 2171 cmd->cmd_next = NULL; 2172 cmd->cmd_job = NULL; 2173 pkt = &cmd->cmd_pkt; 2174 pkt->pkt_ulp_private = 0; 2175 pkt->pkt_tran_flags = 0; 2176 pkt->pkt_tran_type = 0; 2177 port = cmd->cmd_port; 2178 2179 if (pkt->pkt_cmd_cookie != NULL) { 2180 kmem_free(pkt->pkt_cmd_cookie, pkt->pkt_cmd_cookie_cnt * 2181 sizeof (ddi_dma_cookie_t)); 2182 pkt->pkt_cmd_cookie = NULL; 2183 } 2184 2185 if (pkt->pkt_resp_cookie != NULL) { 2186 kmem_free(pkt->pkt_resp_cookie, pkt->pkt_resp_cookie_cnt * 2187 sizeof (ddi_dma_cookie_t)); 2188 pkt->pkt_resp_cookie = NULL; 2189 } 2190 2191 if (port->fp_soft_state & FP_SOFT_FCA_IS_NODMA) { 2192 if (pkt->pkt_cmd) { 2193 kmem_free(pkt->pkt_cmd, 2194 (uint32_t)(uintptr_t)pkt->pkt_fctl_rsvd1); 2195 } 2196 2197 if (pkt->pkt_resp) { 2198 kmem_free(pkt->pkt_resp, 2199 (uint32_t)(uintptr_t)pkt->pkt_fctl_rsvd2); 2200 } 2201 } 2202 2203 fp_free_dma(cmd); 2204 (void) fc_ulp_uninit_packet((opaque_t)port, pkt); 2205 kmem_cache_free(port->fp_pkt_cache, (void *)cmd); 2206 } 2207 2208 2209 /* 2210 * Release DVMA resources 2211 */ 2212 static void 2213 fp_free_dma(fp_cmd_t *cmd) 2214 { 2215 fc_packet_t *pkt = &cmd->cmd_pkt; 2216 2217 pkt->pkt_cmdlen = 0; 2218 pkt->pkt_rsplen = 0; 2219 pkt->pkt_tran_type = 0; 2220 pkt->pkt_tran_flags = 0; 2221 2222 if (cmd->cmd_dflags & FP_CMD_VALID_DMA_BIND) { 2223 (void) ddi_dma_unbind_handle(pkt->pkt_cmd_dma); 2224 } 2225 2226 if (cmd->cmd_dflags & FP_CMD_VALID_DMA_MEM) { 2227 if (pkt->pkt_cmd_acc) { 2228 ddi_dma_mem_free(&pkt->pkt_cmd_acc); 2229 } 2230 } 2231 2232 if (cmd->cmd_dflags & FP_RESP_VALID_DMA_BIND) { 2233 (void) ddi_dma_unbind_handle(pkt->pkt_resp_dma); 2234 } 2235 2236 if (cmd->cmd_dflags & FP_RESP_VALID_DMA_MEM) { 2237 if (pkt->pkt_resp_acc) { 2238 ddi_dma_mem_free(&pkt->pkt_resp_acc); 2239 } 2240 } 2241 cmd->cmd_dflags = 0; 2242 } 2243 2244 2245 /* 2246 * Dedicated thread to perform various activities. One thread for 2247 * each fc_local_port_t (driver soft state) instance. 2248 * Note, this effectively works out to one thread for each local 2249 * port, but there are also some Solaris taskq threads in use on a per-local 2250 * port basis; these also need to be taken into consideration. 2251 */ 2252 static void 2253 fp_job_handler(fc_local_port_t *port) 2254 { 2255 int rval; 2256 uint32_t *d_id; 2257 fc_remote_port_t *pd; 2258 job_request_t *job; 2259 2260 /* 2261 * Solaris-internal stuff for proper operation of kernel threads 2262 * with Solaris CPR. 2263 */ 2264 CALLB_CPR_INIT(&port->fp_cpr_info, &port->fp_mutex, 2265 callb_generic_cpr, "fp_job_handler"); 2266 2267 /* Loop forever waiting for work to do */ 2268 for (;;) { 2269 2270 mutex_enter(&port->fp_mutex); 2271 2272 /* 2273 * Sleep if no work to do right now, or if we want 2274 * to suspend or power-down. 2275 */ 2276 while (port->fp_job_head == NULL || 2277 (port->fp_soft_state & (FP_SOFT_POWER_DOWN | 2278 FP_SOFT_SUSPEND))) { 2279 CALLB_CPR_SAFE_BEGIN(&port->fp_cpr_info); 2280 cv_wait(&port->fp_cv, &port->fp_mutex); 2281 CALLB_CPR_SAFE_END(&port->fp_cpr_info, &port->fp_mutex); 2282 } 2283 2284 /* 2285 * OK, we've just been woken up, so retrieve the next entry 2286 * from the head of the job queue for this local port. 2287 */ 2288 job = fctl_deque_job(port); 2289 2290 /* 2291 * Handle all the fp driver's supported job codes here 2292 * in this big honkin' switch. 2293 */ 2294 switch (job->job_code) { 2295 case JOB_PORT_SHUTDOWN: 2296 /* 2297 * fp_port_shutdown() is only called from here. This 2298 * will prepare the local port instance (softstate) 2299 * for detaching. This cancels timeout callbacks, 2300 * executes LOGOs with remote ports, cleans up tables, 2301 * and deallocates data structs. 2302 */ 2303 fp_port_shutdown(port, job); 2304 2305 /* 2306 * This will exit the job thread. 2307 */ 2308 CALLB_CPR_EXIT(&(port->fp_cpr_info)); 2309 fctl_jobdone(job); 2310 thread_exit(); 2311 2312 /* NOTREACHED */ 2313 2314 case JOB_ATTACH_ULP: { 2315 /* 2316 * This job is spawned in response to a ULP calling 2317 * fc_ulp_add(). 2318 */ 2319 2320 boolean_t do_attach_ulps = B_TRUE; 2321 2322 /* 2323 * If fp is detaching, we don't want to call 2324 * fp_startup_done as this asynchronous 2325 * notification may interfere with the re-attach. 2326 */ 2327 2328 if (port->fp_soft_state & (FP_DETACH_INPROGRESS | 2329 FP_SOFT_IN_DETACH | FP_DETACH_FAILED)) { 2330 do_attach_ulps = B_FALSE; 2331 } else { 2332 /* 2333 * We are going to force the transport 2334 * to attach to the ULPs, so set 2335 * fp_ulp_attach. This will keep any 2336 * potential detach from occurring until 2337 * we are done. 2338 */ 2339 port->fp_ulp_attach = 1; 2340 } 2341 2342 mutex_exit(&port->fp_mutex); 2343 2344 /* 2345 * NOTE: Since we just dropped the mutex, there is now 2346 * a race window where the fp_soft_state check above 2347 * could change here. This race is covered because an 2348 * additional check was added in the functions hidden 2349 * under fp_startup_done(). 2350 */ 2351 if (do_attach_ulps == B_TRUE) { 2352 /* 2353 * This goes thru a bit of a convoluted call 2354 * chain before spawning off a DDI taskq 2355 * request to perform the actual attach 2356 * operations. Blocking can occur at a number 2357 * of points. 2358 */ 2359 fp_startup_done((opaque_t)port, FC_PKT_SUCCESS); 2360 } 2361 job->job_result = FC_SUCCESS; 2362 fctl_jobdone(job); 2363 break; 2364 } 2365 2366 case JOB_ULP_NOTIFY: { 2367 /* 2368 * Pass state change notifications up to any/all 2369 * registered ULPs. 2370 */ 2371 uint32_t statec; 2372 2373 statec = job->job_ulp_listlen; 2374 if (statec == FC_STATE_RESET_REQUESTED) { 2375 port->fp_last_task = port->fp_task; 2376 port->fp_task = FP_TASK_OFFLINE; 2377 fp_port_offline(port, 0); 2378 port->fp_task = port->fp_last_task; 2379 port->fp_last_task = FP_TASK_IDLE; 2380 } 2381 2382 if (--port->fp_statec_busy == 0) { 2383 port->fp_soft_state &= ~FP_SOFT_IN_STATEC_CB; 2384 } 2385 2386 mutex_exit(&port->fp_mutex); 2387 2388 job->job_result = fp_ulp_notify(port, statec, KM_SLEEP); 2389 fctl_jobdone(job); 2390 break; 2391 } 2392 2393 case JOB_PLOGI_ONE: 2394 /* 2395 * Issue a PLOGI to a single remote port. Multiple 2396 * PLOGIs to different remote ports may occur in 2397 * parallel. 2398 * This can create the fc_remote_port_t if it does not 2399 * already exist. 2400 */ 2401 2402 mutex_exit(&port->fp_mutex); 2403 d_id = (uint32_t *)job->job_private; 2404 pd = fctl_get_remote_port_by_did(port, *d_id); 2405 2406 if (pd) { 2407 mutex_enter(&pd->pd_mutex); 2408 if (pd->pd_state == PORT_DEVICE_LOGGED_IN) { 2409 pd->pd_login_count++; 2410 mutex_exit(&pd->pd_mutex); 2411 job->job_result = FC_SUCCESS; 2412 fctl_jobdone(job); 2413 break; 2414 } 2415 mutex_exit(&pd->pd_mutex); 2416 } else { 2417 mutex_enter(&port->fp_mutex); 2418 if (FC_IS_TOP_SWITCH(port->fp_topology)) { 2419 mutex_exit(&port->fp_mutex); 2420 pd = fp_create_remote_port_by_ns(port, 2421 *d_id, KM_SLEEP); 2422 if (pd == NULL) { 2423 job->job_result = FC_FAILURE; 2424 fctl_jobdone(job); 2425 break; 2426 } 2427 } else { 2428 mutex_exit(&port->fp_mutex); 2429 } 2430 } 2431 2432 job->job_flags |= JOB_TYPE_FP_ASYNC; 2433 job->job_counter = 1; 2434 2435 rval = fp_port_login(port, *d_id, job, 2436 FP_CMD_PLOGI_RETAIN, KM_SLEEP, pd, NULL); 2437 2438 if (rval != FC_SUCCESS) { 2439 job->job_result = rval; 2440 fctl_jobdone(job); 2441 } 2442 break; 2443 2444 case JOB_LOGO_ONE: { 2445 /* 2446 * Issue a PLOGO to a single remote port. Multiple 2447 * PLOGOs to different remote ports may occur in 2448 * parallel. 2449 */ 2450 fc_remote_port_t *pd; 2451 2452 ASSERT(job->job_counter > 0); 2453 2454 pd = (fc_remote_port_t *)job->job_ulp_pkts; 2455 2456 mutex_enter(&pd->pd_mutex); 2457 if (pd->pd_state != PORT_DEVICE_LOGGED_IN) { 2458 mutex_exit(&pd->pd_mutex); 2459 job->job_result = FC_LOGINREQ; 2460 mutex_exit(&port->fp_mutex); 2461 fctl_jobdone(job); 2462 break; 2463 } 2464 if (pd->pd_login_count > 1) { 2465 pd->pd_login_count--; 2466 mutex_exit(&pd->pd_mutex); 2467 job->job_result = FC_SUCCESS; 2468 mutex_exit(&port->fp_mutex); 2469 fctl_jobdone(job); 2470 break; 2471 } 2472 mutex_exit(&pd->pd_mutex); 2473 mutex_exit(&port->fp_mutex); 2474 job->job_flags |= JOB_TYPE_FP_ASYNC; 2475 (void) fp_logout(port, pd, job); 2476 break; 2477 } 2478 2479 case JOB_FCIO_LOGIN: 2480 /* 2481 * PLOGI initiated at ioctl request. 2482 */ 2483 mutex_exit(&port->fp_mutex); 2484 job->job_result = 2485 fp_fcio_login(port, job->job_private, job); 2486 fctl_jobdone(job); 2487 break; 2488 2489 case JOB_FCIO_LOGOUT: 2490 /* 2491 * PLOGO initiated at ioctl request. 2492 */ 2493 mutex_exit(&port->fp_mutex); 2494 job->job_result = 2495 fp_fcio_logout(port, job->job_private, job); 2496 fctl_jobdone(job); 2497 break; 2498 2499 case JOB_PORT_GETMAP: 2500 case JOB_PORT_GETMAP_PLOGI_ALL: { 2501 port->fp_last_task = port->fp_task; 2502 port->fp_task = FP_TASK_GETMAP; 2503 2504 switch (port->fp_topology) { 2505 case FC_TOP_PRIVATE_LOOP: 2506 job->job_counter = 1; 2507 2508 fp_get_loopmap(port, job); 2509 mutex_exit(&port->fp_mutex); 2510 fp_jobwait(job); 2511 fctl_fillout_map(port, 2512 (fc_portmap_t **)job->job_private, 2513 (uint32_t *)job->job_arg, 1, 0, 0); 2514 fctl_jobdone(job); 2515 mutex_enter(&port->fp_mutex); 2516 break; 2517 2518 case FC_TOP_PUBLIC_LOOP: 2519 case FC_TOP_FABRIC: 2520 mutex_exit(&port->fp_mutex); 2521 job->job_counter = 1; 2522 2523 job->job_result = fp_ns_getmap(port, 2524 job, (fc_portmap_t **)job->job_private, 2525 (uint32_t *)job->job_arg, 2526 FCTL_GAN_START_ID); 2527 fctl_jobdone(job); 2528 mutex_enter(&port->fp_mutex); 2529 break; 2530 2531 case FC_TOP_PT_PT: 2532 mutex_exit(&port->fp_mutex); 2533 fctl_fillout_map(port, 2534 (fc_portmap_t **)job->job_private, 2535 (uint32_t *)job->job_arg, 1, 0, 0); 2536 fctl_jobdone(job); 2537 mutex_enter(&port->fp_mutex); 2538 break; 2539 2540 default: 2541 mutex_exit(&port->fp_mutex); 2542 fctl_jobdone(job); 2543 mutex_enter(&port->fp_mutex); 2544 break; 2545 } 2546 port->fp_task = port->fp_last_task; 2547 port->fp_last_task = FP_TASK_IDLE; 2548 mutex_exit(&port->fp_mutex); 2549 break; 2550 } 2551 2552 case JOB_PORT_OFFLINE: { 2553 fp_log_port_event(port, ESC_SUNFC_PORT_OFFLINE); 2554 2555 port->fp_last_task = port->fp_task; 2556 port->fp_task = FP_TASK_OFFLINE; 2557 2558 if (port->fp_statec_busy > 2) { 2559 job->job_flags |= JOB_CANCEL_ULP_NOTIFICATION; 2560 fp_port_offline(port, 0); 2561 if (--port->fp_statec_busy == 0) { 2562 port->fp_soft_state &= 2563 ~FP_SOFT_IN_STATEC_CB; 2564 } 2565 } else { 2566 fp_port_offline(port, 1); 2567 } 2568 2569 port->fp_task = port->fp_last_task; 2570 port->fp_last_task = FP_TASK_IDLE; 2571 2572 mutex_exit(&port->fp_mutex); 2573 2574 fctl_jobdone(job); 2575 break; 2576 } 2577 2578 case JOB_PORT_STARTUP: { 2579 if ((rval = fp_port_startup(port, job)) != FC_SUCCESS) { 2580 if (port->fp_statec_busy > 1) { 2581 mutex_exit(&port->fp_mutex); 2582 break; 2583 } 2584 mutex_exit(&port->fp_mutex); 2585 2586 FP_TRACE(FP_NHEAD2(9, rval), 2587 "Topology discovery failed"); 2588 break; 2589 } 2590 2591 /* 2592 * Attempt building device handles in case 2593 * of private Loop. 2594 */ 2595 if (port->fp_topology == FC_TOP_PRIVATE_LOOP) { 2596 job->job_counter = 1; 2597 2598 fp_get_loopmap(port, job); 2599 mutex_exit(&port->fp_mutex); 2600 fp_jobwait(job); 2601 mutex_enter(&port->fp_mutex); 2602 if (port->fp_lilp_map.lilp_magic < MAGIC_LIRP) { 2603 ASSERT(port->fp_total_devices == 0); 2604 port->fp_total_devices = 2605 port->fp_dev_count; 2606 } 2607 } else if (FC_IS_TOP_SWITCH(port->fp_topology)) { 2608 /* 2609 * Hack to avoid state changes going up early 2610 */ 2611 port->fp_statec_busy++; 2612 port->fp_soft_state |= FP_SOFT_IN_STATEC_CB; 2613 2614 job->job_flags |= JOB_CANCEL_ULP_NOTIFICATION; 2615 fp_fabric_online(port, job); 2616 job->job_flags &= ~JOB_CANCEL_ULP_NOTIFICATION; 2617 } 2618 mutex_exit(&port->fp_mutex); 2619 fctl_jobdone(job); 2620 break; 2621 } 2622 2623 case JOB_PORT_ONLINE: { 2624 char *newtop; 2625 char *oldtop; 2626 uint32_t old_top; 2627 2628 fp_log_port_event(port, ESC_SUNFC_PORT_ONLINE); 2629 2630 /* 2631 * Bail out early if there are a lot of 2632 * state changes in the pipeline 2633 */ 2634 if (port->fp_statec_busy > 1) { 2635 --port->fp_statec_busy; 2636 mutex_exit(&port->fp_mutex); 2637 fctl_jobdone(job); 2638 break; 2639 } 2640 2641 switch (old_top = port->fp_topology) { 2642 case FC_TOP_PRIVATE_LOOP: 2643 oldtop = "Private Loop"; 2644 break; 2645 2646 case FC_TOP_PUBLIC_LOOP: 2647 oldtop = "Public Loop"; 2648 break; 2649 2650 case FC_TOP_PT_PT: 2651 oldtop = "Point to Point"; 2652 break; 2653 2654 case FC_TOP_FABRIC: 2655 oldtop = "Fabric"; 2656 break; 2657 2658 default: 2659 oldtop = NULL; 2660 break; 2661 } 2662 2663 port->fp_last_task = port->fp_task; 2664 port->fp_task = FP_TASK_ONLINE; 2665 2666 if ((rval = fp_port_startup(port, job)) != FC_SUCCESS) { 2667 2668 port->fp_task = port->fp_last_task; 2669 port->fp_last_task = FP_TASK_IDLE; 2670 2671 if (port->fp_statec_busy > 1) { 2672 --port->fp_statec_busy; 2673 mutex_exit(&port->fp_mutex); 2674 break; 2675 } 2676 2677 port->fp_state = FC_STATE_OFFLINE; 2678 2679 FP_TRACE(FP_NHEAD2(9, rval), 2680 "Topology discovery failed"); 2681 2682 if (--port->fp_statec_busy == 0) { 2683 port->fp_soft_state &= 2684 ~FP_SOFT_IN_STATEC_CB; 2685 } 2686 2687 if (port->fp_offline_tid == NULL) { 2688 port->fp_offline_tid = 2689 timeout(fp_offline_timeout, 2690 (caddr_t)port, fp_offline_ticks); 2691 } 2692 2693 mutex_exit(&port->fp_mutex); 2694 break; 2695 } 2696 2697 switch (port->fp_topology) { 2698 case FC_TOP_PRIVATE_LOOP: 2699 newtop = "Private Loop"; 2700 break; 2701 2702 case FC_TOP_PUBLIC_LOOP: 2703 newtop = "Public Loop"; 2704 break; 2705 2706 case FC_TOP_PT_PT: 2707 newtop = "Point to Point"; 2708 break; 2709 2710 case FC_TOP_FABRIC: 2711 newtop = "Fabric"; 2712 break; 2713 2714 default: 2715 newtop = NULL; 2716 break; 2717 } 2718 2719 if (oldtop && newtop && strcmp(oldtop, newtop)) { 2720 fp_printf(port, CE_NOTE, FP_LOG_ONLY, 0, NULL, 2721 "Change in FC Topology old = %s new = %s", 2722 oldtop, newtop); 2723 } 2724 2725 switch (port->fp_topology) { 2726 case FC_TOP_PRIVATE_LOOP: { 2727 int orphan = (old_top == FC_TOP_FABRIC || 2728 old_top == FC_TOP_PUBLIC_LOOP) ? 1 : 0; 2729 2730 mutex_exit(&port->fp_mutex); 2731 fp_loop_online(port, job, orphan); 2732 break; 2733 } 2734 2735 case FC_TOP_PUBLIC_LOOP: 2736 /* FALLTHROUGH */ 2737 case FC_TOP_FABRIC: 2738 fp_fabric_online(port, job); 2739 mutex_exit(&port->fp_mutex); 2740 break; 2741 2742 case FC_TOP_PT_PT: 2743 fp_p2p_online(port, job); 2744 mutex_exit(&port->fp_mutex); 2745 break; 2746 2747 default: 2748 if (--port->fp_statec_busy != 0) { 2749 /* 2750 * Watch curiously at what the next 2751 * state transition can do. 2752 */ 2753 mutex_exit(&port->fp_mutex); 2754 break; 2755 } 2756 2757 FP_TRACE(FP_NHEAD2(9, 0), 2758 "Topology Unknown, Offlining the port.."); 2759 2760 port->fp_soft_state &= ~FP_SOFT_IN_STATEC_CB; 2761 port->fp_state = FC_STATE_OFFLINE; 2762 2763 if (port->fp_offline_tid == NULL) { 2764 port->fp_offline_tid = 2765 timeout(fp_offline_timeout, 2766 (caddr_t)port, fp_offline_ticks); 2767 } 2768 mutex_exit(&port->fp_mutex); 2769 break; 2770 } 2771 2772 mutex_enter(&port->fp_mutex); 2773 2774 port->fp_task = port->fp_last_task; 2775 port->fp_last_task = FP_TASK_IDLE; 2776 2777 mutex_exit(&port->fp_mutex); 2778 2779 fctl_jobdone(job); 2780 break; 2781 } 2782 2783 case JOB_PLOGI_GROUP: { 2784 mutex_exit(&port->fp_mutex); 2785 fp_plogi_group(port, job); 2786 break; 2787 } 2788 2789 case JOB_UNSOL_REQUEST: { 2790 mutex_exit(&port->fp_mutex); 2791 fp_handle_unsol_buf(port, 2792 (fc_unsol_buf_t *)job->job_private, job); 2793 fctl_dealloc_job(job); 2794 break; 2795 } 2796 2797 case JOB_NS_CMD: { 2798 fctl_ns_req_t *ns_cmd; 2799 2800 mutex_exit(&port->fp_mutex); 2801 2802 job->job_flags |= JOB_TYPE_FP_ASYNC; 2803 ns_cmd = (fctl_ns_req_t *)job->job_private; 2804 if (ns_cmd->ns_cmd_code < NS_GA_NXT || 2805 ns_cmd->ns_cmd_code > NS_DA_ID) { 2806 job->job_result = FC_BADCMD; 2807 fctl_jobdone(job); 2808 break; 2809 } 2810 2811 if (FC_IS_CMD_A_REG(ns_cmd->ns_cmd_code)) { 2812 if (ns_cmd->ns_pd != NULL) { 2813 job->job_result = FC_BADOBJECT; 2814 fctl_jobdone(job); 2815 break; 2816 } 2817 2818 job->job_counter = 1; 2819 2820 rval = fp_ns_reg(port, ns_cmd->ns_pd, 2821 ns_cmd->ns_cmd_code, job, 0, KM_SLEEP); 2822 2823 if (rval != FC_SUCCESS) { 2824 job->job_result = rval; 2825 fctl_jobdone(job); 2826 } 2827 break; 2828 } 2829 job->job_result = FC_SUCCESS; 2830 job->job_counter = 1; 2831 2832 rval = fp_ns_query(port, ns_cmd, job, 0, KM_SLEEP); 2833 if (rval != FC_SUCCESS) { 2834 fctl_jobdone(job); 2835 } 2836 break; 2837 } 2838 2839 case JOB_LINK_RESET: { 2840 la_wwn_t *pwwn; 2841 uint32_t topology; 2842 2843 pwwn = (la_wwn_t *)job->job_private; 2844 ASSERT(pwwn != NULL); 2845 2846 topology = port->fp_topology; 2847 mutex_exit(&port->fp_mutex); 2848 2849 if (fctl_is_wwn_zero(pwwn) == FC_SUCCESS || 2850 topology == FC_TOP_PRIVATE_LOOP) { 2851 job->job_flags |= JOB_TYPE_FP_ASYNC; 2852 rval = port->fp_fca_tran->fca_reset( 2853 port->fp_fca_handle, FC_FCA_LINK_RESET); 2854 job->job_result = rval; 2855 fp_jobdone(job); 2856 } else { 2857 ASSERT((job->job_flags & 2858 JOB_TYPE_FP_ASYNC) == 0); 2859 2860 if (FC_IS_TOP_SWITCH(topology)) { 2861 rval = fp_remote_lip(port, pwwn, 2862 KM_SLEEP, job); 2863 } else { 2864 rval = FC_FAILURE; 2865 } 2866 if (rval != FC_SUCCESS) { 2867 job->job_result = rval; 2868 } 2869 fctl_jobdone(job); 2870 } 2871 break; 2872 } 2873 2874 default: 2875 mutex_exit(&port->fp_mutex); 2876 job->job_result = FC_BADCMD; 2877 fctl_jobdone(job); 2878 break; 2879 } 2880 } 2881 /* NOTREACHED */ 2882 } 2883 2884 2885 /* 2886 * Perform FC port bring up initialization 2887 */ 2888 static int 2889 fp_port_startup(fc_local_port_t *port, job_request_t *job) 2890 { 2891 int rval; 2892 uint32_t state; 2893 uint32_t src_id; 2894 fc_lilpmap_t *lilp_map; 2895 2896 ASSERT(MUTEX_HELD(&port->fp_mutex)); 2897 ASSERT((job->job_flags & JOB_TYPE_FP_ASYNC) == 0); 2898 2899 FP_DTRACE(FP_NHEAD1(2, 0), "Entering fp_port_startup;" 2900 " port=%p, job=%p", port, job); 2901 2902 port->fp_topology = FC_TOP_UNKNOWN; 2903 port->fp_port_id.port_id = 0; 2904 state = FC_PORT_STATE_MASK(port->fp_state); 2905 2906 if (state == FC_STATE_OFFLINE) { 2907 port->fp_port_type.port_type = FC_NS_PORT_UNKNOWN; 2908 job->job_result = FC_OFFLINE; 2909 mutex_exit(&port->fp_mutex); 2910 fctl_jobdone(job); 2911 mutex_enter(&port->fp_mutex); 2912 return (FC_OFFLINE); 2913 } 2914 2915 if (state == FC_STATE_LOOP) { 2916 port->fp_port_type.port_type = FC_NS_PORT_NL; 2917 mutex_exit(&port->fp_mutex); 2918 2919 lilp_map = &port->fp_lilp_map; 2920 if ((rval = fp_get_lilpmap(port, lilp_map)) != FC_SUCCESS) { 2921 job->job_result = FC_FAILURE; 2922 fctl_jobdone(job); 2923 2924 FP_TRACE(FP_NHEAD1(9, rval), 2925 "LILP map Invalid or not present"); 2926 mutex_enter(&port->fp_mutex); 2927 return (FC_FAILURE); 2928 } 2929 2930 if (lilp_map->lilp_length == 0) { 2931 job->job_result = FC_NO_MAP; 2932 fctl_jobdone(job); 2933 fp_printf(port, CE_NOTE, FP_LOG_ONLY, 0, NULL, 2934 "LILP map length zero"); 2935 mutex_enter(&port->fp_mutex); 2936 return (FC_NO_MAP); 2937 } 2938 src_id = lilp_map->lilp_myalpa & 0xFF; 2939 } else { 2940 fc_remote_port_t *pd; 2941 fc_fca_pm_t pm; 2942 fc_fca_p2p_info_t p2p_info; 2943 int pd_recepient; 2944 2945 /* 2946 * Get P2P remote port info if possible 2947 */ 2948 bzero((caddr_t)&pm, sizeof (pm)); 2949 2950 pm.pm_cmd_flags = FC_FCA_PM_READ; 2951 pm.pm_cmd_code = FC_PORT_GET_P2P_INFO; 2952 pm.pm_data_len = sizeof (fc_fca_p2p_info_t); 2953 pm.pm_data_buf = (caddr_t)&p2p_info; 2954 2955 rval = port->fp_fca_tran->fca_port_manage( 2956 port->fp_fca_handle, &pm); 2957 2958 if (rval == FC_SUCCESS) { 2959 port->fp_port_id.port_id = p2p_info.fca_d_id; 2960 port->fp_port_type.port_type = FC_NS_PORT_N; 2961 port->fp_topology = FC_TOP_PT_PT; 2962 port->fp_total_devices = 1; 2963 pd_recepient = fctl_wwn_cmp( 2964 &port->fp_service_params.nport_ww_name, 2965 &p2p_info.pwwn) < 0 ? 2966 PD_PLOGI_RECEPIENT : PD_PLOGI_INITIATOR; 2967 mutex_exit(&port->fp_mutex); 2968 pd = fctl_create_remote_port(port, 2969 &p2p_info.nwwn, 2970 &p2p_info.pwwn, 2971 p2p_info.d_id, 2972 pd_recepient, KM_NOSLEEP); 2973 FP_DTRACE(FP_NHEAD1(2, 0), "Exiting fp_port_startup;" 2974 " P2P port=%p pd=%p fp %x pd %x", port, pd, 2975 port->fp_port_id.port_id, p2p_info.d_id); 2976 mutex_enter(&port->fp_mutex); 2977 return (FC_SUCCESS); 2978 } 2979 port->fp_port_type.port_type = FC_NS_PORT_N; 2980 mutex_exit(&port->fp_mutex); 2981 src_id = 0; 2982 } 2983 2984 job->job_counter = 1; 2985 job->job_result = FC_SUCCESS; 2986 2987 if ((rval = fp_fabric_login(port, src_id, job, FP_CMD_PLOGI_DONT_CARE, 2988 KM_SLEEP)) != FC_SUCCESS) { 2989 port->fp_port_type.port_type = FC_NS_PORT_UNKNOWN; 2990 job->job_result = FC_FAILURE; 2991 fctl_jobdone(job); 2992 2993 mutex_enter(&port->fp_mutex); 2994 if (port->fp_statec_busy <= 1) { 2995 mutex_exit(&port->fp_mutex); 2996 fp_printf(port, CE_NOTE, FP_LOG_ONLY, rval, NULL, 2997 "Couldn't transport FLOGI"); 2998 mutex_enter(&port->fp_mutex); 2999 } 3000 return (FC_FAILURE); 3001 } 3002 3003 fp_jobwait(job); 3004 3005 mutex_enter(&port->fp_mutex); 3006 if (job->job_result == FC_SUCCESS) { 3007 if (FC_IS_TOP_SWITCH(port->fp_topology)) { 3008 mutex_exit(&port->fp_mutex); 3009 fp_ns_init(port, job, KM_SLEEP); 3010 mutex_enter(&port->fp_mutex); 3011 } 3012 } else { 3013 if (state == FC_STATE_LOOP) { 3014 port->fp_topology = FC_TOP_PRIVATE_LOOP; 3015 port->fp_port_id.port_id = 3016 port->fp_lilp_map.lilp_myalpa & 0xFF; 3017 } 3018 } 3019 3020 FP_DTRACE(FP_NHEAD1(2, 0), "Exiting fp_port_startup; port=%p, job=%p", 3021 port, job); 3022 3023 return (FC_SUCCESS); 3024 } 3025 3026 3027 /* 3028 * Perform ULP invocations following FC port startup 3029 */ 3030 /* ARGSUSED */ 3031 static void 3032 fp_startup_done(opaque_t arg, uchar_t result) 3033 { 3034 fc_local_port_t *port = arg; 3035 3036 fp_attach_ulps(port, FC_CMD_ATTACH); 3037 3038 FP_DTRACE(FP_NHEAD1(2, 0), "fp_startup almost complete; port=%p", port); 3039 } 3040 3041 3042 /* 3043 * Perform ULP port attach 3044 */ 3045 static void 3046 fp_ulp_port_attach(void *arg) 3047 { 3048 fp_soft_attach_t *att = (fp_soft_attach_t *)arg; 3049 fc_local_port_t *port = att->att_port; 3050 3051 FP_DTRACE(FP_NHEAD1(1, 0), "port attach of" 3052 " ULPs begin; port=%p, cmd=%x", port, att->att_cmd); 3053 3054 fctl_attach_ulps(att->att_port, att->att_cmd, &modlinkage); 3055 3056 if (att->att_need_pm_idle == B_TRUE) { 3057 fctl_idle_port(port); 3058 } 3059 3060 FP_DTRACE(FP_NHEAD1(1, 0), "port attach of" 3061 " ULPs end; port=%p, cmd=%x", port, att->att_cmd); 3062 3063 mutex_enter(&att->att_port->fp_mutex); 3064 att->att_port->fp_ulp_attach = 0; 3065 3066 port->fp_task = port->fp_last_task; 3067 port->fp_last_task = FP_TASK_IDLE; 3068 3069 cv_signal(&att->att_port->fp_attach_cv); 3070 3071 mutex_exit(&att->att_port->fp_mutex); 3072 3073 kmem_free(att, sizeof (fp_soft_attach_t)); 3074 } 3075 3076 /* 3077 * Entry point to funnel all requests down to FCAs 3078 */ 3079 static int 3080 fp_sendcmd(fc_local_port_t *port, fp_cmd_t *cmd, opaque_t fca_handle) 3081 { 3082 int rval; 3083 3084 mutex_enter(&port->fp_mutex); 3085 if (port->fp_statec_busy > 1 || (cmd->cmd_ulp_pkt != NULL && 3086 (port->fp_statec_busy || FC_PORT_STATE_MASK(port->fp_state) == 3087 FC_STATE_OFFLINE))) { 3088 /* 3089 * This means there is more than one state change 3090 * at this point of time - Since they are processed 3091 * serially, any processing of the current one should 3092 * be failed, failed and move up in processing the next 3093 */ 3094 cmd->cmd_pkt.pkt_state = FC_PKT_ELS_IN_PROGRESS; 3095 cmd->cmd_pkt.pkt_reason = FC_REASON_OFFLINE; 3096 if (cmd->cmd_job) { 3097 /* 3098 * A state change that is going to be invalidated 3099 * by another one already in the port driver's queue 3100 * need not go up to all ULPs. This will minimize 3101 * needless processing and ripples in ULP modules 3102 */ 3103 cmd->cmd_job->job_flags |= JOB_CANCEL_ULP_NOTIFICATION; 3104 } 3105 mutex_exit(&port->fp_mutex); 3106 return (FC_STATEC_BUSY); 3107 } 3108 3109 if (FC_PORT_STATE_MASK(port->fp_state) == FC_STATE_OFFLINE) { 3110 cmd->cmd_pkt.pkt_state = FC_PKT_PORT_OFFLINE; 3111 cmd->cmd_pkt.pkt_reason = FC_REASON_OFFLINE; 3112 mutex_exit(&port->fp_mutex); 3113 3114 return (FC_OFFLINE); 3115 } 3116 mutex_exit(&port->fp_mutex); 3117 3118 rval = cmd->cmd_transport(fca_handle, &cmd->cmd_pkt); 3119 if (rval != FC_SUCCESS) { 3120 if (rval == FC_TRAN_BUSY) { 3121 cmd->cmd_retry_interval = fp_retry_delay; 3122 rval = fp_retry_cmd(&cmd->cmd_pkt); 3123 if (rval == FC_FAILURE) { 3124 cmd->cmd_pkt.pkt_state = FC_PKT_TRAN_BSY; 3125 } 3126 } 3127 } else { 3128 mutex_enter(&port->fp_mutex); 3129 port->fp_out_fpcmds++; 3130 mutex_exit(&port->fp_mutex); 3131 } 3132 3133 return (rval); 3134 } 3135 3136 3137 /* 3138 * Each time a timeout kicks in, walk the wait queue, decrement the 3139 * the retry_interval, when the retry_interval becomes less than 3140 * or equal to zero, re-transport the command: If the re-transport 3141 * fails with BUSY, enqueue the command in the wait queue. 3142 * 3143 * In order to prevent looping forever because of commands enqueued 3144 * from within this function itself, save the current tail pointer 3145 * (in cur_tail) and exit the loop after serving this command. 3146 */ 3147 static void 3148 fp_resendcmd(void *port_handle) 3149 { 3150 int rval; 3151 fc_local_port_t *port; 3152 fp_cmd_t *cmd; 3153 fp_cmd_t *cur_tail; 3154 3155 port = port_handle; 3156 mutex_enter(&port->fp_mutex); 3157 cur_tail = port->fp_wait_tail; 3158 mutex_exit(&port->fp_mutex); 3159 3160 while ((cmd = fp_deque_cmd(port)) != NULL) { 3161 cmd->cmd_retry_interval -= fp_retry_ticker; 3162 /* Check if we are detaching */ 3163 if (port->fp_soft_state & 3164 (FP_SOFT_IN_DETACH | FP_DETACH_INPROGRESS)) { 3165 cmd->cmd_pkt.pkt_state = FC_PKT_TRAN_ERROR; 3166 cmd->cmd_pkt.pkt_reason = 0; 3167 fp_iodone(cmd); 3168 } else if (cmd->cmd_retry_interval <= 0) { 3169 rval = cmd->cmd_transport(port->fp_fca_handle, 3170 &cmd->cmd_pkt); 3171 3172 if (rval != FC_SUCCESS) { 3173 if (cmd->cmd_pkt.pkt_state == FC_PKT_TRAN_BSY) { 3174 if (--cmd->cmd_retry_count) { 3175 fp_enque_cmd(port, cmd); 3176 if (cmd == cur_tail) { 3177 break; 3178 } 3179 continue; 3180 } 3181 cmd->cmd_pkt.pkt_state = 3182 FC_PKT_TRAN_BSY; 3183 } else { 3184 cmd->cmd_pkt.pkt_state = 3185 FC_PKT_TRAN_ERROR; 3186 } 3187 cmd->cmd_pkt.pkt_reason = 0; 3188 fp_iodone(cmd); 3189 } else { 3190 mutex_enter(&port->fp_mutex); 3191 port->fp_out_fpcmds++; 3192 mutex_exit(&port->fp_mutex); 3193 } 3194 } else { 3195 fp_enque_cmd(port, cmd); 3196 } 3197 3198 if (cmd == cur_tail) { 3199 break; 3200 } 3201 } 3202 3203 mutex_enter(&port->fp_mutex); 3204 if (port->fp_wait_head) { 3205 timeout_id_t tid; 3206 3207 mutex_exit(&port->fp_mutex); 3208 tid = timeout(fp_resendcmd, (caddr_t)port, 3209 fp_retry_ticks); 3210 mutex_enter(&port->fp_mutex); 3211 port->fp_wait_tid = tid; 3212 } else { 3213 port->fp_wait_tid = NULL; 3214 } 3215 mutex_exit(&port->fp_mutex); 3216 } 3217 3218 3219 /* 3220 * Handle Local, Fabric, N_Port, Transport (whatever that means) BUSY here. 3221 * 3222 * Yes, as you can see below, cmd_retry_count is used here too. That means 3223 * the retries for BUSY are less if there were transport failures (transport 3224 * failure means fca_transport failure). The goal is not to exceed overall 3225 * retries set in the cmd_retry_count (whatever may be the reason for retry) 3226 * 3227 * Return Values: 3228 * FC_SUCCESS 3229 * FC_FAILURE 3230 */ 3231 static int 3232 fp_retry_cmd(fc_packet_t *pkt) 3233 { 3234 fp_cmd_t *cmd; 3235 3236 cmd = pkt->pkt_ulp_private; 3237 3238 if (--cmd->cmd_retry_count) { 3239 fp_enque_cmd(cmd->cmd_port, cmd); 3240 return (FC_SUCCESS); 3241 } else { 3242 return (FC_FAILURE); 3243 } 3244 } 3245 3246 3247 /* 3248 * Queue up FC packet for deferred retry 3249 */ 3250 static void 3251 fp_enque_cmd(fc_local_port_t *port, fp_cmd_t *cmd) 3252 { 3253 timeout_id_t tid; 3254 3255 ASSERT(!MUTEX_HELD(&port->fp_mutex)); 3256 3257 #ifdef DEBUG 3258 fp_printf(port, CE_NOTE, FP_LOG_ONLY, 0, &cmd->cmd_pkt, 3259 "Retrying ELS for %x", cmd->cmd_pkt.pkt_cmd_fhdr.d_id); 3260 #endif 3261 3262 mutex_enter(&port->fp_mutex); 3263 if (port->fp_wait_tail) { 3264 port->fp_wait_tail->cmd_next = cmd; 3265 port->fp_wait_tail = cmd; 3266 } else { 3267 ASSERT(port->fp_wait_head == NULL); 3268 port->fp_wait_head = port->fp_wait_tail = cmd; 3269 if (port->fp_wait_tid == NULL) { 3270 mutex_exit(&port->fp_mutex); 3271 tid = timeout(fp_resendcmd, (caddr_t)port, 3272 fp_retry_ticks); 3273 mutex_enter(&port->fp_mutex); 3274 port->fp_wait_tid = tid; 3275 } 3276 } 3277 mutex_exit(&port->fp_mutex); 3278 } 3279 3280 3281 /* 3282 * Handle all RJT codes 3283 */ 3284 static int 3285 fp_handle_reject(fc_packet_t *pkt) 3286 { 3287 int rval = FC_FAILURE; 3288 uchar_t next_class; 3289 fp_cmd_t *cmd; 3290 fc_local_port_t *port; 3291 3292 cmd = pkt->pkt_ulp_private; 3293 port = cmd->cmd_port; 3294 3295 switch (pkt->pkt_state) { 3296 case FC_PKT_FABRIC_RJT: 3297 case FC_PKT_NPORT_RJT: 3298 if (pkt->pkt_reason == FC_REASON_CLASS_NOT_SUPP) { 3299 next_class = fp_get_nextclass(cmd->cmd_port, 3300 FC_TRAN_CLASS(pkt->pkt_tran_flags)); 3301 3302 if (next_class == FC_TRAN_CLASS_INVALID) { 3303 return (rval); 3304 } 3305 pkt->pkt_tran_flags = FC_TRAN_INTR | next_class; 3306 pkt->pkt_tran_type = FC_PKT_EXCHANGE; 3307 3308 rval = fp_sendcmd(cmd->cmd_port, cmd, 3309 cmd->cmd_port->fp_fca_handle); 3310 3311 if (rval != FC_SUCCESS) { 3312 pkt->pkt_state = FC_PKT_TRAN_ERROR; 3313 } 3314 } 3315 break; 3316 3317 case FC_PKT_LS_RJT: 3318 case FC_PKT_BA_RJT: 3319 if ((pkt->pkt_reason == FC_REASON_LOGICAL_ERROR) || 3320 (pkt->pkt_reason == FC_REASON_LOGICAL_BSY)) { 3321 cmd->cmd_retry_interval = fp_retry_delay; 3322 rval = fp_retry_cmd(pkt); 3323 } 3324 break; 3325 3326 case FC_PKT_FS_RJT: 3327 if ((pkt->pkt_reason == FC_REASON_FS_LOGICAL_BUSY) || 3328 ((pkt->pkt_reason == FC_REASON_FS_CMD_UNABLE) && 3329 (pkt->pkt_expln == 0x00))) { 3330 cmd->cmd_retry_interval = fp_retry_delay; 3331 rval = fp_retry_cmd(pkt); 3332 } 3333 break; 3334 3335 case FC_PKT_LOCAL_RJT: 3336 if (pkt->pkt_reason == FC_REASON_QFULL) { 3337 cmd->cmd_retry_interval = fp_retry_delay; 3338 rval = fp_retry_cmd(pkt); 3339 } 3340 break; 3341 3342 default: 3343 FP_TRACE(FP_NHEAD1(1, 0), 3344 "fp_handle_reject(): Invalid pkt_state"); 3345 break; 3346 } 3347 3348 return (rval); 3349 } 3350 3351 3352 /* 3353 * Return the next class of service supported by the FCA 3354 */ 3355 static uchar_t 3356 fp_get_nextclass(fc_local_port_t *port, uchar_t cur_class) 3357 { 3358 uchar_t next_class; 3359 3360 ASSERT(!MUTEX_HELD(&port->fp_mutex)); 3361 3362 switch (cur_class) { 3363 case FC_TRAN_CLASS_INVALID: 3364 if (port->fp_cos & FC_NS_CLASS1) { 3365 next_class = FC_TRAN_CLASS1; 3366 break; 3367 } 3368 /* FALLTHROUGH */ 3369 3370 case FC_TRAN_CLASS1: 3371 if (port->fp_cos & FC_NS_CLASS2) { 3372 next_class = FC_TRAN_CLASS2; 3373 break; 3374 } 3375 /* FALLTHROUGH */ 3376 3377 case FC_TRAN_CLASS2: 3378 if (port->fp_cos & FC_NS_CLASS3) { 3379 next_class = FC_TRAN_CLASS3; 3380 break; 3381 } 3382 /* FALLTHROUGH */ 3383 3384 case FC_TRAN_CLASS3: 3385 default: 3386 next_class = FC_TRAN_CLASS_INVALID; 3387 break; 3388 } 3389 3390 return (next_class); 3391 } 3392 3393 3394 /* 3395 * Determine if a class of service is supported by the FCA 3396 */ 3397 static int 3398 fp_is_class_supported(uint32_t cos, uchar_t tran_class) 3399 { 3400 int rval; 3401 3402 switch (tran_class) { 3403 case FC_TRAN_CLASS1: 3404 if (cos & FC_NS_CLASS1) { 3405 rval = FC_SUCCESS; 3406 } else { 3407 rval = FC_FAILURE; 3408 } 3409 break; 3410 3411 case FC_TRAN_CLASS2: 3412 if (cos & FC_NS_CLASS2) { 3413 rval = FC_SUCCESS; 3414 } else { 3415 rval = FC_FAILURE; 3416 } 3417 break; 3418 3419 case FC_TRAN_CLASS3: 3420 if (cos & FC_NS_CLASS3) { 3421 rval = FC_SUCCESS; 3422 } else { 3423 rval = FC_FAILURE; 3424 } 3425 break; 3426 3427 default: 3428 rval = FC_FAILURE; 3429 break; 3430 } 3431 3432 return (rval); 3433 } 3434 3435 3436 /* 3437 * Dequeue FC packet for retry 3438 */ 3439 static fp_cmd_t * 3440 fp_deque_cmd(fc_local_port_t *port) 3441 { 3442 fp_cmd_t *cmd; 3443 3444 ASSERT(!MUTEX_HELD(&port->fp_mutex)); 3445 3446 mutex_enter(&port->fp_mutex); 3447 3448 if (port->fp_wait_head == NULL) { 3449 /* 3450 * To avoid races, NULL the fp_wait_tid as 3451 * we are about to exit the timeout thread. 3452 */ 3453 port->fp_wait_tid = NULL; 3454 mutex_exit(&port->fp_mutex); 3455 return (NULL); 3456 } 3457 3458 cmd = port->fp_wait_head; 3459 port->fp_wait_head = cmd->cmd_next; 3460 cmd->cmd_next = NULL; 3461 3462 if (port->fp_wait_head == NULL) { 3463 port->fp_wait_tail = NULL; 3464 } 3465 mutex_exit(&port->fp_mutex); 3466 3467 return (cmd); 3468 } 3469 3470 3471 /* 3472 * Wait for job completion 3473 */ 3474 static void 3475 fp_jobwait(job_request_t *job) 3476 { 3477 sema_p(&job->job_port_sema); 3478 } 3479 3480 3481 /* 3482 * Convert FC packet state to FC errno 3483 */ 3484 int 3485 fp_state_to_rval(uchar_t state) 3486 { 3487 int count; 3488 3489 for (count = 0; count < sizeof (fp_xlat) / 3490 sizeof (fp_xlat[0]); count++) { 3491 if (fp_xlat[count].xlat_state == state) { 3492 return (fp_xlat[count].xlat_rval); 3493 } 3494 } 3495 3496 return (FC_FAILURE); 3497 } 3498 3499 3500 /* 3501 * For Synchronous I/O requests, the caller is 3502 * expected to do fctl_jobdone(if necessary) 3503 * 3504 * We want to preserve at least one failure in the 3505 * job_result if it happens. 3506 * 3507 */ 3508 static void 3509 fp_iodone(fp_cmd_t *cmd) 3510 { 3511 fc_packet_t *ulp_pkt = cmd->cmd_ulp_pkt; 3512 job_request_t *job = cmd->cmd_job; 3513 fc_remote_port_t *pd = cmd->cmd_pkt.pkt_pd; 3514 3515 ASSERT(job != NULL); 3516 ASSERT(cmd->cmd_port != NULL); 3517 ASSERT(&cmd->cmd_pkt != NULL); 3518 3519 mutex_enter(&job->job_mutex); 3520 if (job->job_result == FC_SUCCESS) { 3521 job->job_result = fp_state_to_rval(cmd->cmd_pkt.pkt_state); 3522 } 3523 mutex_exit(&job->job_mutex); 3524 3525 if (pd) { 3526 mutex_enter(&pd->pd_mutex); 3527 pd->pd_flags = PD_IDLE; 3528 mutex_exit(&pd->pd_mutex); 3529 } 3530 3531 if (ulp_pkt) { 3532 if (pd && cmd->cmd_flags & FP_CMD_DELDEV_ON_ERROR && 3533 FP_IS_PKT_ERROR(ulp_pkt)) { 3534 fc_local_port_t *port; 3535 fc_remote_node_t *node; 3536 3537 port = cmd->cmd_port; 3538 3539 mutex_enter(&pd->pd_mutex); 3540 pd->pd_state = PORT_DEVICE_INVALID; 3541 pd->pd_ref_count--; 3542 node = pd->pd_remote_nodep; 3543 mutex_exit(&pd->pd_mutex); 3544 3545 ASSERT(node != NULL); 3546 ASSERT(port != NULL); 3547 3548 if (fctl_destroy_remote_port(port, pd) == 0) { 3549 fctl_destroy_remote_node(node); 3550 } 3551 3552 ulp_pkt->pkt_pd = NULL; 3553 } 3554 3555 ulp_pkt->pkt_comp(ulp_pkt); 3556 } 3557 3558 fp_free_pkt(cmd); 3559 fp_jobdone(job); 3560 } 3561 3562 3563 /* 3564 * Job completion handler 3565 */ 3566 static void 3567 fp_jobdone(job_request_t *job) 3568 { 3569 mutex_enter(&job->job_mutex); 3570 ASSERT(job->job_counter > 0); 3571 3572 if (--job->job_counter != 0) { 3573 mutex_exit(&job->job_mutex); 3574 return; 3575 } 3576 3577 if (job->job_ulp_pkts) { 3578 ASSERT(job->job_ulp_listlen > 0); 3579 kmem_free(job->job_ulp_pkts, 3580 sizeof (fc_packet_t *) * job->job_ulp_listlen); 3581 } 3582 3583 if (job->job_flags & JOB_TYPE_FP_ASYNC) { 3584 mutex_exit(&job->job_mutex); 3585 fctl_jobdone(job); 3586 } else { 3587 mutex_exit(&job->job_mutex); 3588 sema_v(&job->job_port_sema); 3589 } 3590 } 3591 3592 3593 /* 3594 * Try to perform shutdown of a port during a detach. No return 3595 * value since the detach should not fail because the port shutdown 3596 * failed. 3597 */ 3598 static void 3599 fp_port_shutdown(fc_local_port_t *port, job_request_t *job) 3600 { 3601 int index; 3602 int count; 3603 int flags; 3604 fp_cmd_t *cmd; 3605 struct pwwn_hash *head; 3606 fc_remote_port_t *pd; 3607 3608 ASSERT(MUTEX_HELD(&port->fp_mutex)); 3609 3610 job->job_result = FC_SUCCESS; 3611 3612 if (port->fp_taskq) { 3613 /* 3614 * We must release the mutex here to ensure that other 3615 * potential jobs can complete their processing. Many 3616 * also need this mutex. 3617 */ 3618 mutex_exit(&port->fp_mutex); 3619 taskq_wait(port->fp_taskq); 3620 mutex_enter(&port->fp_mutex); 3621 } 3622 3623 if (port->fp_offline_tid) { 3624 timeout_id_t tid; 3625 3626 tid = port->fp_offline_tid; 3627 port->fp_offline_tid = NULL; 3628 mutex_exit(&port->fp_mutex); 3629 (void) untimeout(tid); 3630 mutex_enter(&port->fp_mutex); 3631 } 3632 3633 if (port->fp_wait_tid) { 3634 timeout_id_t tid; 3635 3636 tid = port->fp_wait_tid; 3637 port->fp_wait_tid = NULL; 3638 mutex_exit(&port->fp_mutex); 3639 (void) untimeout(tid); 3640 } else { 3641 mutex_exit(&port->fp_mutex); 3642 } 3643 3644 /* 3645 * While we cancel the timeout, let's also return the 3646 * the outstanding requests back to the callers. 3647 */ 3648 while ((cmd = fp_deque_cmd(port)) != NULL) { 3649 ASSERT(cmd->cmd_job != NULL); 3650 cmd->cmd_job->job_result = FC_OFFLINE; 3651 fp_iodone(cmd); 3652 } 3653 3654 /* 3655 * Gracefully LOGO with all the devices logged in. 3656 */ 3657 mutex_enter(&port->fp_mutex); 3658 3659 for (count = index = 0; index < pwwn_table_size; index++) { 3660 head = &port->fp_pwwn_table[index]; 3661 pd = head->pwwn_head; 3662 while (pd != NULL) { 3663 mutex_enter(&pd->pd_mutex); 3664 if (pd->pd_state == PORT_DEVICE_LOGGED_IN) { 3665 count++; 3666 } 3667 mutex_exit(&pd->pd_mutex); 3668 pd = pd->pd_wwn_hnext; 3669 } 3670 } 3671 3672 if (job->job_flags & JOB_TYPE_FP_ASYNC) { 3673 flags = job->job_flags; 3674 job->job_flags &= ~JOB_TYPE_FP_ASYNC; 3675 } else { 3676 flags = 0; 3677 } 3678 if (count) { 3679 job->job_counter = count; 3680 3681 for (index = 0; index < pwwn_table_size; index++) { 3682 head = &port->fp_pwwn_table[index]; 3683 pd = head->pwwn_head; 3684 while (pd != NULL) { 3685 mutex_enter(&pd->pd_mutex); 3686 if (pd->pd_state == PORT_DEVICE_LOGGED_IN) { 3687 ASSERT(pd->pd_login_count > 0); 3688 /* 3689 * Force the counter to ONE in order 3690 * for us to really send LOGO els. 3691 */ 3692 pd->pd_login_count = 1; 3693 mutex_exit(&pd->pd_mutex); 3694 mutex_exit(&port->fp_mutex); 3695 (void) fp_logout(port, pd, job); 3696 mutex_enter(&port->fp_mutex); 3697 } else { 3698 mutex_exit(&pd->pd_mutex); 3699 } 3700 pd = pd->pd_wwn_hnext; 3701 } 3702 } 3703 mutex_exit(&port->fp_mutex); 3704 fp_jobwait(job); 3705 } else { 3706 mutex_exit(&port->fp_mutex); 3707 } 3708 3709 if (job->job_result != FC_SUCCESS) { 3710 FP_TRACE(FP_NHEAD1(9, 0), 3711 "Can't logout all devices. Proceeding with" 3712 " port shutdown"); 3713 job->job_result = FC_SUCCESS; 3714 } 3715 3716 fctl_destroy_all_remote_ports(port); 3717 3718 mutex_enter(&port->fp_mutex); 3719 if (FC_IS_TOP_SWITCH(port->fp_topology)) { 3720 mutex_exit(&port->fp_mutex); 3721 fp_ns_fini(port, job); 3722 } else { 3723 mutex_exit(&port->fp_mutex); 3724 } 3725 3726 if (flags) { 3727 job->job_flags = flags; 3728 } 3729 3730 mutex_enter(&port->fp_mutex); 3731 3732 } 3733 3734 3735 /* 3736 * Build the port driver's data structures based on the AL_PA list 3737 */ 3738 static void 3739 fp_get_loopmap(fc_local_port_t *port, job_request_t *job) 3740 { 3741 int rval; 3742 int flag; 3743 int count; 3744 uint32_t d_id; 3745 fc_remote_port_t *pd; 3746 fc_lilpmap_t *lilp_map; 3747 3748 ASSERT(MUTEX_HELD(&port->fp_mutex)); 3749 3750 if (FC_PORT_STATE_MASK(port->fp_state) == FC_STATE_OFFLINE) { 3751 job->job_result = FC_OFFLINE; 3752 mutex_exit(&port->fp_mutex); 3753 fp_jobdone(job); 3754 mutex_enter(&port->fp_mutex); 3755 return; 3756 } 3757 3758 if (port->fp_lilp_map.lilp_length == 0) { 3759 mutex_exit(&port->fp_mutex); 3760 job->job_result = FC_NO_MAP; 3761 fp_jobdone(job); 3762 mutex_enter(&port->fp_mutex); 3763 return; 3764 } 3765 mutex_exit(&port->fp_mutex); 3766 3767 lilp_map = &port->fp_lilp_map; 3768 job->job_counter = lilp_map->lilp_length; 3769 3770 if (job->job_code == JOB_PORT_GETMAP_PLOGI_ALL) { 3771 flag = FP_CMD_PLOGI_RETAIN; 3772 } else { 3773 flag = FP_CMD_PLOGI_DONT_CARE; 3774 } 3775 3776 for (count = 0; count < lilp_map->lilp_length; count++) { 3777 d_id = lilp_map->lilp_alpalist[count]; 3778 3779 if (d_id == (lilp_map->lilp_myalpa & 0xFF)) { 3780 fp_jobdone(job); 3781 continue; 3782 } 3783 3784 pd = fctl_get_remote_port_by_did(port, d_id); 3785 if (pd) { 3786 mutex_enter(&pd->pd_mutex); 3787 if (flag == FP_CMD_PLOGI_DONT_CARE || 3788 pd->pd_state == PORT_DEVICE_LOGGED_IN) { 3789 mutex_exit(&pd->pd_mutex); 3790 fp_jobdone(job); 3791 continue; 3792 } 3793 mutex_exit(&pd->pd_mutex); 3794 } 3795 3796 rval = fp_port_login(port, d_id, job, flag, 3797 KM_SLEEP, pd, NULL); 3798 if (rval != FC_SUCCESS) { 3799 fp_jobdone(job); 3800 } 3801 } 3802 3803 mutex_enter(&port->fp_mutex); 3804 } 3805 3806 3807 /* 3808 * Perform loop ONLINE processing 3809 */ 3810 static void 3811 fp_loop_online(fc_local_port_t *port, job_request_t *job, int orphan) 3812 { 3813 int count; 3814 int rval; 3815 uint32_t d_id; 3816 uint32_t listlen; 3817 fc_lilpmap_t *lilp_map; 3818 fc_remote_port_t *pd; 3819 fc_portmap_t *changelist; 3820 3821 ASSERT(!MUTEX_HELD(&port->fp_mutex)); 3822 3823 FP_TRACE(FP_NHEAD1(1, 0), "fp_loop_online begin; port=%p, job=%p", 3824 port, job); 3825 3826 lilp_map = &port->fp_lilp_map; 3827 3828 if (lilp_map->lilp_length) { 3829 mutex_enter(&port->fp_mutex); 3830 if (port->fp_soft_state & FP_SOFT_IN_FCA_RESET) { 3831 port->fp_soft_state &= ~FP_SOFT_IN_FCA_RESET; 3832 mutex_exit(&port->fp_mutex); 3833 delay(drv_usectohz(PLDA_RR_TOV * 1000 * 1000)); 3834 } else { 3835 mutex_exit(&port->fp_mutex); 3836 } 3837 3838 job->job_counter = lilp_map->lilp_length; 3839 3840 for (count = 0; count < lilp_map->lilp_length; count++) { 3841 d_id = lilp_map->lilp_alpalist[count]; 3842 3843 if (d_id == (lilp_map->lilp_myalpa & 0xFF)) { 3844 fp_jobdone(job); 3845 continue; 3846 } 3847 3848 pd = fctl_get_remote_port_by_did(port, d_id); 3849 if (pd != NULL) { 3850 #ifdef DEBUG 3851 mutex_enter(&pd->pd_mutex); 3852 if (pd->pd_recepient == PD_PLOGI_INITIATOR) { 3853 ASSERT(pd->pd_type != PORT_DEVICE_OLD); 3854 } 3855 mutex_exit(&pd->pd_mutex); 3856 #endif 3857 fp_jobdone(job); 3858 continue; 3859 } 3860 3861 rval = fp_port_login(port, d_id, job, 3862 FP_CMD_PLOGI_DONT_CARE, KM_SLEEP, pd, NULL); 3863 3864 if (rval != FC_SUCCESS) { 3865 fp_jobdone(job); 3866 } 3867 } 3868 fp_jobwait(job); 3869 } 3870 listlen = 0; 3871 changelist = NULL; 3872 3873 if ((job->job_flags & JOB_CANCEL_ULP_NOTIFICATION) == 0) { 3874 mutex_enter(&port->fp_mutex); 3875 ASSERT(port->fp_statec_busy > 0); 3876 if (port->fp_statec_busy == 1) { 3877 mutex_exit(&port->fp_mutex); 3878 fctl_fillout_map(port, &changelist, &listlen, 3879 1, 0, orphan); 3880 3881 mutex_enter(&port->fp_mutex); 3882 if (port->fp_lilp_map.lilp_magic < MAGIC_LIRP) { 3883 ASSERT(port->fp_total_devices == 0); 3884 port->fp_total_devices = port->fp_dev_count; 3885 } 3886 } else { 3887 job->job_flags |= JOB_CANCEL_ULP_NOTIFICATION; 3888 } 3889 mutex_exit(&port->fp_mutex); 3890 } 3891 3892 if ((job->job_flags & JOB_CANCEL_ULP_NOTIFICATION) == 0) { 3893 (void) fp_ulp_statec_cb(port, FC_STATE_ONLINE, changelist, 3894 listlen, listlen, KM_SLEEP); 3895 } else { 3896 mutex_enter(&port->fp_mutex); 3897 if (--port->fp_statec_busy == 0) { 3898 port->fp_soft_state &= ~FP_SOFT_IN_STATEC_CB; 3899 } 3900 ASSERT(changelist == NULL && listlen == 0); 3901 mutex_exit(&port->fp_mutex); 3902 } 3903 3904 FP_TRACE(FP_NHEAD1(1, 0), "fp_loop_online end; port=%p, job=%p", 3905 port, job); 3906 } 3907 3908 3909 /* 3910 * Get an Arbitrated Loop map from the underlying FCA 3911 */ 3912 static int 3913 fp_get_lilpmap(fc_local_port_t *port, fc_lilpmap_t *lilp_map) 3914 { 3915 int rval; 3916 3917 FP_TRACE(FP_NHEAD1(1, 0), "fp_get_lilpmap Begin; port=%p, map=%p", 3918 port, lilp_map); 3919 3920 bzero((caddr_t)lilp_map, sizeof (fc_lilpmap_t)); 3921 rval = port->fp_fca_tran->fca_getmap(port->fp_fca_handle, lilp_map); 3922 lilp_map->lilp_magic &= 0xFF; /* Ignore upper byte */ 3923 3924 if (rval != FC_SUCCESS) { 3925 rval = FC_NO_MAP; 3926 } else if (lilp_map->lilp_length == 0 && 3927 (lilp_map->lilp_magic >= MAGIC_LISM && 3928 lilp_map->lilp_magic < MAGIC_LIRP)) { 3929 uchar_t lilp_length; 3930 3931 /* 3932 * Since the map length is zero, provide all 3933 * the valid AL_PAs for NL_ports discovery. 3934 */ 3935 lilp_length = sizeof (fp_valid_alpas) / 3936 sizeof (fp_valid_alpas[0]); 3937 lilp_map->lilp_length = lilp_length; 3938 bcopy(fp_valid_alpas, lilp_map->lilp_alpalist, 3939 lilp_length); 3940 } else { 3941 rval = fp_validate_lilp_map(lilp_map); 3942 3943 if (rval == FC_SUCCESS) { 3944 mutex_enter(&port->fp_mutex); 3945 port->fp_total_devices = lilp_map->lilp_length - 1; 3946 mutex_exit(&port->fp_mutex); 3947 } 3948 } 3949 3950 mutex_enter(&port->fp_mutex); 3951 if (rval != FC_SUCCESS && !(port->fp_soft_state & FP_SOFT_BAD_LINK)) { 3952 port->fp_soft_state |= FP_SOFT_BAD_LINK; 3953 mutex_exit(&port->fp_mutex); 3954 3955 if (port->fp_fca_tran->fca_reset(port->fp_fca_handle, 3956 FC_FCA_RESET_CORE) != FC_SUCCESS) { 3957 FP_TRACE(FP_NHEAD1(9, 0), 3958 "FCA reset failed after LILP map was found" 3959 " to be invalid"); 3960 } 3961 } else if (rval == FC_SUCCESS) { 3962 port->fp_soft_state &= ~FP_SOFT_BAD_LINK; 3963 mutex_exit(&port->fp_mutex); 3964 } else { 3965 mutex_exit(&port->fp_mutex); 3966 } 3967 3968 FP_TRACE(FP_NHEAD1(1, 0), "fp_get_lilpmap End; port=%p, map=%p", port, 3969 lilp_map); 3970 3971 return (rval); 3972 } 3973 3974 3975 /* 3976 * Perform Fabric Login: 3977 * 3978 * Return Values: 3979 * FC_SUCCESS 3980 * FC_FAILURE 3981 * FC_NOMEM 3982 * FC_TRANSPORT_ERROR 3983 * and a lot others defined in fc_error.h 3984 */ 3985 static int 3986 fp_fabric_login(fc_local_port_t *port, uint32_t s_id, job_request_t *job, 3987 int flag, int sleep) 3988 { 3989 int rval; 3990 fp_cmd_t *cmd; 3991 uchar_t class; 3992 3993 ASSERT(!MUTEX_HELD(&port->fp_mutex)); 3994 3995 FP_TRACE(FP_NHEAD1(1, 0), "fp_fabric_login Begin; port=%p, job=%p", 3996 port, job); 3997 3998 class = fp_get_nextclass(port, FC_TRAN_CLASS_INVALID); 3999 if (class == FC_TRAN_CLASS_INVALID) { 4000 return (FC_ELS_BAD); 4001 } 4002 4003 cmd = fp_alloc_pkt(port, sizeof (la_els_logi_t), 4004 sizeof (la_els_logi_t), sleep, NULL); 4005 if (cmd == NULL) { 4006 return (FC_NOMEM); 4007 } 4008 4009 cmd->cmd_pkt.pkt_tran_flags = FC_TRAN_INTR | class; 4010 cmd->cmd_pkt.pkt_tran_type = FC_PKT_EXCHANGE; 4011 cmd->cmd_flags = flag; 4012 cmd->cmd_retry_count = fp_retry_count; 4013 cmd->cmd_ulp_pkt = NULL; 4014 4015 fp_xlogi_init(port, cmd, s_id, 0xFFFFFE, fp_flogi_intr, 4016 job, LA_ELS_FLOGI); 4017 4018 rval = fp_sendcmd(port, cmd, port->fp_fca_handle); 4019 if (rval != FC_SUCCESS) { 4020 fp_free_pkt(cmd); 4021 } 4022 4023 FP_TRACE(FP_NHEAD1(1, 0), "fp_fabric_login End; port=%p, job=%p", 4024 port, job); 4025 4026 return (rval); 4027 } 4028 4029 4030 /* 4031 * In some scenarios such as private loop device discovery period 4032 * the fc_remote_port_t data structure isn't allocated. The allocation 4033 * is done when the PLOGI is successful. In some other scenarios 4034 * such as Fabric topology, the fc_remote_port_t is already created 4035 * and initialized with appropriate values (as the NS provides 4036 * them) 4037 */ 4038 static int 4039 fp_port_login(fc_local_port_t *port, uint32_t d_id, job_request_t *job, 4040 int cmd_flag, int sleep, fc_remote_port_t *pd, fc_packet_t *ulp_pkt) 4041 { 4042 uchar_t class; 4043 fp_cmd_t *cmd; 4044 uint32_t src_id; 4045 fc_remote_port_t *tmp_pd; 4046 int relogin; 4047 int found = 0; 4048 4049 #ifdef DEBUG 4050 if (pd == NULL) { 4051 ASSERT(fctl_get_remote_port_by_did(port, d_id) == NULL); 4052 } 4053 #endif 4054 ASSERT(job->job_counter > 0); 4055 4056 class = fp_get_nextclass(port, FC_TRAN_CLASS_INVALID); 4057 if (class == FC_TRAN_CLASS_INVALID) { 4058 return (FC_ELS_BAD); 4059 } 4060 4061 mutex_enter(&port->fp_mutex); 4062 tmp_pd = fctl_lookup_pd_by_did(port, d_id); 4063 mutex_exit(&port->fp_mutex); 4064 4065 relogin = 1; 4066 if (tmp_pd) { 4067 mutex_enter(&tmp_pd->pd_mutex); 4068 if ((tmp_pd->pd_aux_flags & PD_DISABLE_RELOGIN) && 4069 !(tmp_pd->pd_aux_flags & PD_LOGGED_OUT)) { 4070 tmp_pd->pd_state = PORT_DEVICE_LOGGED_IN; 4071 relogin = 0; 4072 } 4073 mutex_exit(&tmp_pd->pd_mutex); 4074 } 4075 4076 if (!relogin) { 4077 mutex_enter(&tmp_pd->pd_mutex); 4078 if (tmp_pd->pd_state == PORT_DEVICE_LOGGED_IN) { 4079 cmd_flag |= FP_CMD_PLOGI_RETAIN; 4080 } 4081 mutex_exit(&tmp_pd->pd_mutex); 4082 4083 cmd = fp_alloc_pkt(port, sizeof (la_els_adisc_t), 4084 sizeof (la_els_adisc_t), sleep, tmp_pd); 4085 if (cmd == NULL) { 4086 return (FC_NOMEM); 4087 } 4088 4089 cmd->cmd_pkt.pkt_tran_flags = FC_TRAN_INTR | class; 4090 cmd->cmd_pkt.pkt_tran_type = FC_PKT_EXCHANGE; 4091 cmd->cmd_flags = cmd_flag; 4092 cmd->cmd_retry_count = fp_retry_count; 4093 cmd->cmd_ulp_pkt = ulp_pkt; 4094 4095 mutex_enter(&port->fp_mutex); 4096 mutex_enter(&tmp_pd->pd_mutex); 4097 fp_adisc_init(cmd, job); 4098 mutex_exit(&tmp_pd->pd_mutex); 4099 mutex_exit(&port->fp_mutex); 4100 4101 cmd->cmd_pkt.pkt_cmdlen = sizeof (la_els_adisc_t); 4102 cmd->cmd_pkt.pkt_rsplen = sizeof (la_els_adisc_t); 4103 4104 } else { 4105 cmd = fp_alloc_pkt(port, sizeof (la_els_logi_t), 4106 sizeof (la_els_logi_t), sleep, pd); 4107 if (cmd == NULL) { 4108 return (FC_NOMEM); 4109 } 4110 4111 cmd->cmd_pkt.pkt_tran_flags = FC_TRAN_INTR | class; 4112 cmd->cmd_pkt.pkt_tran_type = FC_PKT_EXCHANGE; 4113 cmd->cmd_flags = cmd_flag; 4114 cmd->cmd_retry_count = fp_retry_count; 4115 cmd->cmd_ulp_pkt = ulp_pkt; 4116 4117 mutex_enter(&port->fp_mutex); 4118 src_id = port->fp_port_id.port_id; 4119 mutex_exit(&port->fp_mutex); 4120 4121 fp_xlogi_init(port, cmd, src_id, d_id, fp_plogi_intr, 4122 job, LA_ELS_PLOGI); 4123 } 4124 4125 if (pd) { 4126 mutex_enter(&pd->pd_mutex); 4127 pd->pd_flags = PD_ELS_IN_PROGRESS; 4128 mutex_exit(&pd->pd_mutex); 4129 } 4130 4131 /* npiv check to make sure we don't log into ourself */ 4132 if (relogin && 4133 ((port->fp_npiv_type == FC_NPIV_PORT) || 4134 (port->fp_npiv_flag == FC_NPIV_ENABLE))) { 4135 if ((d_id & 0xffff00) == 4136 (port->fp_port_id.port_id & 0xffff00)) { 4137 found = 1; 4138 } 4139 } 4140 4141 if (found || 4142 (fp_sendcmd(port, cmd, port->fp_fca_handle) != FC_SUCCESS)) { 4143 if (found) { 4144 fc_packet_t *pkt = &cmd->cmd_pkt; 4145 pkt->pkt_state = FC_PKT_NPORT_RJT; 4146 } 4147 if (pd) { 4148 mutex_enter(&pd->pd_mutex); 4149 pd->pd_flags = PD_IDLE; 4150 mutex_exit(&pd->pd_mutex); 4151 } 4152 4153 if (ulp_pkt) { 4154 fc_packet_t *pkt = &cmd->cmd_pkt; 4155 4156 ulp_pkt->pkt_state = pkt->pkt_state; 4157 ulp_pkt->pkt_reason = pkt->pkt_reason; 4158 ulp_pkt->pkt_action = pkt->pkt_action; 4159 ulp_pkt->pkt_expln = pkt->pkt_expln; 4160 } 4161 4162 fp_iodone(cmd); 4163 } 4164 4165 return (FC_SUCCESS); 4166 } 4167 4168 4169 /* 4170 * Register the LOGIN parameters with a port device 4171 */ 4172 static void 4173 fp_register_login(ddi_acc_handle_t *handle, fc_remote_port_t *pd, 4174 la_els_logi_t *acc, uchar_t class) 4175 { 4176 fc_remote_node_t *node; 4177 4178 ASSERT(pd != NULL); 4179 4180 mutex_enter(&pd->pd_mutex); 4181 node = pd->pd_remote_nodep; 4182 if (pd->pd_login_count == 0) { 4183 pd->pd_login_count++; 4184 } 4185 4186 if (handle) { 4187 FC_GET_RSP(pd->pd_port, *handle, (uint8_t *)&pd->pd_csp, 4188 (uint8_t *)&acc->common_service, 4189 sizeof (acc->common_service), DDI_DEV_AUTOINCR); 4190 FC_GET_RSP(pd->pd_port, *handle, (uint8_t *)&pd->pd_clsp1, 4191 (uint8_t *)&acc->class_1, sizeof (acc->class_1), 4192 DDI_DEV_AUTOINCR); 4193 FC_GET_RSP(pd->pd_port, *handle, (uint8_t *)&pd->pd_clsp2, 4194 (uint8_t *)&acc->class_2, sizeof (acc->class_2), 4195 DDI_DEV_AUTOINCR); 4196 FC_GET_RSP(pd->pd_port, *handle, (uint8_t *)&pd->pd_clsp3, 4197 (uint8_t *)&acc->class_3, sizeof (acc->class_3), 4198 DDI_DEV_AUTOINCR); 4199 } else { 4200 pd->pd_csp = acc->common_service; 4201 pd->pd_clsp1 = acc->class_1; 4202 pd->pd_clsp2 = acc->class_2; 4203 pd->pd_clsp3 = acc->class_3; 4204 } 4205 4206 pd->pd_state = PORT_DEVICE_LOGGED_IN; 4207 pd->pd_login_class = class; 4208 mutex_exit(&pd->pd_mutex); 4209 4210 ASSERT(fctl_get_remote_port_by_did(pd->pd_port, 4211 pd->pd_port_id.port_id) == pd); 4212 4213 mutex_enter(&node->fd_mutex); 4214 if (handle) { 4215 FC_GET_RSP(pd->pd_port, *handle, (uint8_t *)node->fd_vv, 4216 (uint8_t *)acc->vendor_version, sizeof (node->fd_vv), 4217 DDI_DEV_AUTOINCR); 4218 } else { 4219 bcopy(acc->vendor_version, node->fd_vv, sizeof (node->fd_vv)); 4220 } 4221 mutex_exit(&node->fd_mutex); 4222 } 4223 4224 4225 /* 4226 * Mark the remote port as OFFLINE 4227 */ 4228 static void 4229 fp_remote_port_offline(fc_remote_port_t *pd) 4230 { 4231 ASSERT(MUTEX_HELD(&pd->pd_mutex)); 4232 if (pd->pd_login_count && 4233 ((pd->pd_aux_flags & PD_DISABLE_RELOGIN) == 0)) { 4234 bzero((caddr_t)&pd->pd_csp, sizeof (struct common_service)); 4235 bzero((caddr_t)&pd->pd_clsp1, sizeof (struct service_param)); 4236 bzero((caddr_t)&pd->pd_clsp2, sizeof (struct service_param)); 4237 bzero((caddr_t)&pd->pd_clsp3, sizeof (struct service_param)); 4238 pd->pd_login_class = 0; 4239 } 4240 pd->pd_type = PORT_DEVICE_OLD; 4241 pd->pd_flags = PD_IDLE; 4242 fctl_tc_reset(&pd->pd_logo_tc); 4243 } 4244 4245 4246 /* 4247 * Deregistration of a port device 4248 */ 4249 static void 4250 fp_unregister_login(fc_remote_port_t *pd) 4251 { 4252 fc_remote_node_t *node; 4253 4254 ASSERT(pd != NULL); 4255 4256 mutex_enter(&pd->pd_mutex); 4257 pd->pd_login_count = 0; 4258 bzero((caddr_t)&pd->pd_csp, sizeof (struct common_service)); 4259 bzero((caddr_t)&pd->pd_clsp1, sizeof (struct service_param)); 4260 bzero((caddr_t)&pd->pd_clsp2, sizeof (struct service_param)); 4261 bzero((caddr_t)&pd->pd_clsp3, sizeof (struct service_param)); 4262 4263 pd->pd_state = PORT_DEVICE_VALID; 4264 pd->pd_login_class = 0; 4265 node = pd->pd_remote_nodep; 4266 mutex_exit(&pd->pd_mutex); 4267 4268 mutex_enter(&node->fd_mutex); 4269 bzero(node->fd_vv, sizeof (node->fd_vv)); 4270 mutex_exit(&node->fd_mutex); 4271 } 4272 4273 4274 /* 4275 * Handle OFFLINE state of an FCA port 4276 */ 4277 static void 4278 fp_port_offline(fc_local_port_t *port, int notify) 4279 { 4280 int index; 4281 int statec; 4282 timeout_id_t tid; 4283 struct pwwn_hash *head; 4284 fc_remote_port_t *pd; 4285 4286 ASSERT(MUTEX_HELD(&port->fp_mutex)); 4287 4288 for (index = 0; index < pwwn_table_size; index++) { 4289 head = &port->fp_pwwn_table[index]; 4290 pd = head->pwwn_head; 4291 while (pd != NULL) { 4292 mutex_enter(&pd->pd_mutex); 4293 fp_remote_port_offline(pd); 4294 fctl_delist_did_table(port, pd); 4295 mutex_exit(&pd->pd_mutex); 4296 pd = pd->pd_wwn_hnext; 4297 } 4298 } 4299 port->fp_total_devices = 0; 4300 4301 statec = 0; 4302 if (notify) { 4303 /* 4304 * Decrement the statec busy counter as we 4305 * are almost done with handling the state 4306 * change 4307 */ 4308 ASSERT(port->fp_statec_busy > 0); 4309 if (--port->fp_statec_busy == 0) { 4310 port->fp_soft_state &= ~FP_SOFT_IN_STATEC_CB; 4311 } 4312 mutex_exit(&port->fp_mutex); 4313 (void) fp_ulp_statec_cb(port, FC_STATE_OFFLINE, NULL, 4314 0, 0, KM_SLEEP); 4315 mutex_enter(&port->fp_mutex); 4316 4317 if (port->fp_statec_busy) { 4318 statec++; 4319 } 4320 } else if (port->fp_statec_busy > 1) { 4321 statec++; 4322 } 4323 4324 if ((tid = port->fp_offline_tid) != NULL) { 4325 mutex_exit(&port->fp_mutex); 4326 (void) untimeout(tid); 4327 mutex_enter(&port->fp_mutex); 4328 } 4329 4330 if (!statec) { 4331 port->fp_offline_tid = timeout(fp_offline_timeout, 4332 (caddr_t)port, fp_offline_ticks); 4333 } 4334 } 4335 4336 4337 /* 4338 * Offline devices and send up a state change notification to ULPs 4339 */ 4340 static void 4341 fp_offline_timeout(void *port_handle) 4342 { 4343 int ret; 4344 fc_local_port_t *port = port_handle; 4345 uint32_t listlen = 0; 4346 fc_portmap_t *changelist = NULL; 4347 4348 mutex_enter(&port->fp_mutex); 4349 4350 if ((FC_PORT_STATE_MASK(port->fp_state) != FC_STATE_OFFLINE) || 4351 (port->fp_soft_state & 4352 (FP_SOFT_IN_DETACH | FP_SOFT_SUSPEND | FP_SOFT_POWER_DOWN)) || 4353 port->fp_dev_count == 0 || port->fp_statec_busy) { 4354 port->fp_offline_tid = NULL; 4355 mutex_exit(&port->fp_mutex); 4356 return; 4357 } 4358 4359 mutex_exit(&port->fp_mutex); 4360 4361 FP_TRACE(FP_NHEAD2(9, 0), "OFFLINE timeout"); 4362 4363 if (port->fp_options & FP_CORE_ON_OFFLINE_TIMEOUT) { 4364 if ((ret = port->fp_fca_tran->fca_reset(port->fp_fca_handle, 4365 FC_FCA_CORE)) != FC_SUCCESS) { 4366 FP_TRACE(FP_NHEAD1(9, ret), 4367 "Failed to force adapter dump"); 4368 } else { 4369 FP_TRACE(FP_NHEAD1(9, 0), 4370 "Forced adapter dump successfully"); 4371 } 4372 } else if (port->fp_options & FP_RESET_CORE_ON_OFFLINE_TIMEOUT) { 4373 if ((ret = port->fp_fca_tran->fca_reset(port->fp_fca_handle, 4374 FC_FCA_RESET_CORE)) != FC_SUCCESS) { 4375 FP_TRACE(FP_NHEAD1(9, ret), 4376 "Failed to force adapter dump and reset"); 4377 } else { 4378 FP_TRACE(FP_NHEAD1(9, 0), 4379 "Forced adapter dump and reset successfully"); 4380 } 4381 } 4382 4383 fctl_fillout_map(port, &changelist, &listlen, 1, 0, 0); 4384 (void) fp_ulp_statec_cb(port, FC_STATE_OFFLINE, changelist, 4385 listlen, listlen, KM_SLEEP); 4386 4387 mutex_enter(&port->fp_mutex); 4388 port->fp_offline_tid = NULL; 4389 mutex_exit(&port->fp_mutex); 4390 } 4391 4392 4393 /* 4394 * Perform general purpose ELS request initialization 4395 */ 4396 static void 4397 fp_els_init(fp_cmd_t *cmd, uint32_t s_id, uint32_t d_id, 4398 void (*comp) (), job_request_t *job) 4399 { 4400 fc_packet_t *pkt; 4401 4402 pkt = &cmd->cmd_pkt; 4403 cmd->cmd_job = job; 4404 4405 pkt->pkt_cmd_fhdr.r_ctl = R_CTL_ELS_REQ; 4406 pkt->pkt_cmd_fhdr.d_id = d_id; 4407 pkt->pkt_cmd_fhdr.s_id = s_id; 4408 pkt->pkt_cmd_fhdr.type = FC_TYPE_EXTENDED_LS; 4409 pkt->pkt_cmd_fhdr.f_ctl = F_CTL_SEQ_INITIATIVE | F_CTL_FIRST_SEQ; 4410 pkt->pkt_cmd_fhdr.seq_id = 0; 4411 pkt->pkt_cmd_fhdr.df_ctl = 0; 4412 pkt->pkt_cmd_fhdr.seq_cnt = 0; 4413 pkt->pkt_cmd_fhdr.ox_id = 0xffff; 4414 pkt->pkt_cmd_fhdr.rx_id = 0xffff; 4415 pkt->pkt_cmd_fhdr.ro = 0; 4416 pkt->pkt_cmd_fhdr.rsvd = 0; 4417 pkt->pkt_comp = comp; 4418 pkt->pkt_timeout = FP_ELS_TIMEOUT; 4419 } 4420 4421 4422 /* 4423 * Initialize PLOGI/FLOGI ELS request 4424 */ 4425 static void 4426 fp_xlogi_init(fc_local_port_t *port, fp_cmd_t *cmd, uint32_t s_id, 4427 uint32_t d_id, void (*intr) (), job_request_t *job, uchar_t ls_code) 4428 { 4429 ls_code_t payload; 4430 4431 fp_els_init(cmd, s_id, d_id, intr, job); 4432 cmd->cmd_transport = port->fp_fca_tran->fca_els_send; 4433 4434 payload.ls_code = ls_code; 4435 payload.mbz = 0; 4436 4437 FC_SET_CMD(port, cmd->cmd_pkt.pkt_cmd_acc, 4438 (uint8_t *)&port->fp_service_params, 4439 (uint8_t *)cmd->cmd_pkt.pkt_cmd, sizeof (port->fp_service_params), 4440 DDI_DEV_AUTOINCR); 4441 4442 FC_SET_CMD(port, cmd->cmd_pkt.pkt_cmd_acc, (uint8_t *)&payload, 4443 (uint8_t *)cmd->cmd_pkt.pkt_cmd, sizeof (payload), 4444 DDI_DEV_AUTOINCR); 4445 } 4446 4447 4448 /* 4449 * Initialize LOGO ELS request 4450 */ 4451 static void 4452 fp_logo_init(fc_remote_port_t *pd, fp_cmd_t *cmd, job_request_t *job) 4453 { 4454 fc_local_port_t *port; 4455 fc_packet_t *pkt; 4456 la_els_logo_t payload; 4457 4458 port = pd->pd_port; 4459 pkt = &cmd->cmd_pkt; 4460 ASSERT(MUTEX_HELD(&port->fp_mutex)); 4461 ASSERT(MUTEX_HELD(&pd->pd_mutex)); 4462 4463 fp_els_init(cmd, port->fp_port_id.port_id, pd->pd_port_id.port_id, 4464 fp_logo_intr, job); 4465 4466 cmd->cmd_transport = port->fp_fca_tran->fca_els_send; 4467 4468 pkt->pkt_tran_flags = FC_TRAN_INTR | pd->pd_login_class; 4469 pkt->pkt_tran_type = FC_PKT_EXCHANGE; 4470 4471 payload.ls_code.ls_code = LA_ELS_LOGO; 4472 payload.ls_code.mbz = 0; 4473 payload.nport_ww_name = port->fp_service_params.nport_ww_name; 4474 payload.nport_id = port->fp_port_id; 4475 4476 FC_SET_CMD(port, pkt->pkt_cmd_acc, (uint8_t *)&payload, 4477 (uint8_t *)pkt->pkt_cmd, sizeof (payload), DDI_DEV_AUTOINCR); 4478 } 4479 4480 /* 4481 * Initialize RNID ELS request 4482 */ 4483 static void 4484 fp_rnid_init(fp_cmd_t *cmd, uint16_t flag, job_request_t *job) 4485 { 4486 fc_local_port_t *port; 4487 fc_packet_t *pkt; 4488 la_els_rnid_t payload; 4489 fc_remote_port_t *pd; 4490 4491 pkt = &cmd->cmd_pkt; 4492 pd = pkt->pkt_pd; 4493 port = pd->pd_port; 4494 4495 ASSERT(MUTEX_HELD(&port->fp_mutex)); 4496 ASSERT(MUTEX_HELD(&pd->pd_mutex)); 4497 4498 fp_els_init(cmd, port->fp_port_id.port_id, pd->pd_port_id.port_id, 4499 fp_rnid_intr, job); 4500 4501 cmd->cmd_transport = port->fp_fca_tran->fca_els_send; 4502 pkt->pkt_tran_flags = FC_TRAN_INTR | pd->pd_login_class; 4503 pkt->pkt_tran_type = FC_PKT_EXCHANGE; 4504 4505 payload.ls_code.ls_code = LA_ELS_RNID; 4506 payload.ls_code.mbz = 0; 4507 payload.data_format = flag; 4508 4509 FC_SET_CMD(port, pkt->pkt_cmd_acc, (uint8_t *)&payload, 4510 (uint8_t *)pkt->pkt_cmd, sizeof (payload), DDI_DEV_AUTOINCR); 4511 } 4512 4513 /* 4514 * Initialize RLS ELS request 4515 */ 4516 static void 4517 fp_rls_init(fp_cmd_t *cmd, job_request_t *job) 4518 { 4519 fc_local_port_t *port; 4520 fc_packet_t *pkt; 4521 la_els_rls_t payload; 4522 fc_remote_port_t *pd; 4523 4524 pkt = &cmd->cmd_pkt; 4525 pd = pkt->pkt_pd; 4526 port = pd->pd_port; 4527 4528 ASSERT(MUTEX_HELD(&port->fp_mutex)); 4529 ASSERT(MUTEX_HELD(&pd->pd_mutex)); 4530 4531 fp_els_init(cmd, port->fp_port_id.port_id, pd->pd_port_id.port_id, 4532 fp_rls_intr, job); 4533 4534 cmd->cmd_transport = port->fp_fca_tran->fca_els_send; 4535 pkt->pkt_tran_flags = FC_TRAN_INTR | pd->pd_login_class; 4536 pkt->pkt_tran_type = FC_PKT_EXCHANGE; 4537 4538 payload.ls_code.ls_code = LA_ELS_RLS; 4539 payload.ls_code.mbz = 0; 4540 payload.rls_portid = port->fp_port_id; 4541 4542 FC_SET_CMD(port, pkt->pkt_cmd_acc, (uint8_t *)&payload, 4543 (uint8_t *)pkt->pkt_cmd, sizeof (payload), DDI_DEV_AUTOINCR); 4544 } 4545 4546 4547 /* 4548 * Initialize an ADISC ELS request 4549 */ 4550 static void 4551 fp_adisc_init(fp_cmd_t *cmd, job_request_t *job) 4552 { 4553 fc_local_port_t *port; 4554 fc_packet_t *pkt; 4555 la_els_adisc_t payload; 4556 fc_remote_port_t *pd; 4557 4558 pkt = &cmd->cmd_pkt; 4559 pd = pkt->pkt_pd; 4560 port = pd->pd_port; 4561 4562 ASSERT(MUTEX_HELD(&pd->pd_mutex)); 4563 ASSERT(MUTEX_HELD(&pd->pd_port->fp_mutex)); 4564 4565 fp_els_init(cmd, port->fp_port_id.port_id, pd->pd_port_id.port_id, 4566 fp_adisc_intr, job); 4567 4568 cmd->cmd_transport = port->fp_fca_tran->fca_els_send; 4569 pkt->pkt_tran_flags = FC_TRAN_INTR | pd->pd_login_class; 4570 pkt->pkt_tran_type = FC_PKT_EXCHANGE; 4571 4572 payload.ls_code.ls_code = LA_ELS_ADISC; 4573 payload.ls_code.mbz = 0; 4574 payload.nport_id = port->fp_port_id; 4575 payload.port_wwn = port->fp_service_params.nport_ww_name; 4576 payload.node_wwn = port->fp_service_params.node_ww_name; 4577 payload.hard_addr = port->fp_hard_addr; 4578 4579 FC_SET_CMD(port, pkt->pkt_cmd_acc, (uint8_t *)&payload, 4580 (uint8_t *)pkt->pkt_cmd, sizeof (payload), DDI_DEV_AUTOINCR); 4581 } 4582 4583 4584 /* 4585 * Send up a state change notification to ULPs. 4586 * Spawns a call to fctl_ulp_statec_cb in a taskq thread. 4587 */ 4588 static int 4589 fp_ulp_statec_cb(fc_local_port_t *port, uint32_t state, 4590 fc_portmap_t *changelist, uint32_t listlen, uint32_t alloc_len, int sleep) 4591 { 4592 fc_port_clist_t *clist; 4593 fc_remote_port_t *pd; 4594 int count; 4595 4596 ASSERT(!MUTEX_HELD(&port->fp_mutex)); 4597 4598 clist = kmem_zalloc(sizeof (*clist), sleep); 4599 if (clist == NULL) { 4600 kmem_free(changelist, alloc_len * sizeof (*changelist)); 4601 return (FC_NOMEM); 4602 } 4603 4604 clist->clist_state = state; 4605 4606 mutex_enter(&port->fp_mutex); 4607 clist->clist_flags = port->fp_topology; 4608 mutex_exit(&port->fp_mutex); 4609 4610 clist->clist_port = (opaque_t)port; 4611 clist->clist_len = listlen; 4612 clist->clist_size = alloc_len; 4613 clist->clist_map = changelist; 4614 4615 /* 4616 * Bump the reference count of each fc_remote_port_t in this changelist. 4617 * This is necessary since these devices will be sitting in a taskq 4618 * and referenced later. When the state change notification is 4619 * complete, the reference counts will be decremented. 4620 */ 4621 for (count = 0; count < clist->clist_len; count++) { 4622 pd = clist->clist_map[count].map_pd; 4623 4624 if (pd != NULL) { 4625 mutex_enter(&pd->pd_mutex); 4626 ASSERT((pd->pd_ref_count >= 0) || 4627 (pd->pd_aux_flags & PD_GIVEN_TO_ULPS)); 4628 pd->pd_ref_count++; 4629 4630 if (clist->clist_map[count].map_state != 4631 PORT_DEVICE_INVALID) { 4632 pd->pd_aux_flags |= PD_GIVEN_TO_ULPS; 4633 } 4634 4635 mutex_exit(&pd->pd_mutex); 4636 } 4637 } 4638 4639 #ifdef DEBUG 4640 /* 4641 * Sanity check for presence of OLD devices in the hash lists 4642 */ 4643 if (clist->clist_size) { 4644 ASSERT(clist->clist_map != NULL); 4645 for (count = 0; count < clist->clist_len; count++) { 4646 if (clist->clist_map[count].map_state == 4647 PORT_DEVICE_INVALID) { 4648 la_wwn_t pwwn; 4649 fc_portid_t d_id; 4650 4651 pd = clist->clist_map[count].map_pd; 4652 ASSERT(pd != NULL); 4653 4654 mutex_enter(&pd->pd_mutex); 4655 pwwn = pd->pd_port_name; 4656 d_id = pd->pd_port_id; 4657 mutex_exit(&pd->pd_mutex); 4658 4659 pd = fctl_get_remote_port_by_pwwn(port, &pwwn); 4660 ASSERT(pd != clist->clist_map[count].map_pd); 4661 4662 pd = fctl_get_remote_port_by_did(port, 4663 d_id.port_id); 4664 ASSERT(pd != clist->clist_map[count].map_pd); 4665 } 4666 } 4667 } 4668 #endif 4669 4670 mutex_enter(&port->fp_mutex); 4671 4672 if (state == FC_STATE_ONLINE) { 4673 if (--port->fp_statec_busy == 0) { 4674 port->fp_soft_state &= ~FP_SOFT_IN_STATEC_CB; 4675 } 4676 } 4677 mutex_exit(&port->fp_mutex); 4678 4679 (void) taskq_dispatch(port->fp_taskq, fctl_ulp_statec_cb, 4680 clist, KM_SLEEP); 4681 4682 FP_TRACE(FP_NHEAD1(4, 0), "fp_ulp_statec fired; Port=%p," 4683 "state=%x, len=%d", port, state, listlen); 4684 4685 return (FC_SUCCESS); 4686 } 4687 4688 4689 /* 4690 * Send up a FC_STATE_DEVICE_CHANGE state notification to ULPs 4691 */ 4692 static int 4693 fp_ulp_devc_cb(fc_local_port_t *port, fc_portmap_t *changelist, 4694 uint32_t listlen, uint32_t alloc_len, int sleep, int sync) 4695 { 4696 int ret; 4697 fc_port_clist_t *clist; 4698 4699 ASSERT(!MUTEX_HELD(&port->fp_mutex)); 4700 4701 clist = kmem_zalloc(sizeof (*clist), sleep); 4702 if (clist == NULL) { 4703 kmem_free(changelist, alloc_len * sizeof (*changelist)); 4704 return (FC_NOMEM); 4705 } 4706 4707 clist->clist_state = FC_STATE_DEVICE_CHANGE; 4708 4709 mutex_enter(&port->fp_mutex); 4710 clist->clist_flags = port->fp_topology; 4711 mutex_exit(&port->fp_mutex); 4712 4713 clist->clist_port = (opaque_t)port; 4714 clist->clist_len = listlen; 4715 clist->clist_size = alloc_len; 4716 clist->clist_map = changelist; 4717 4718 /* Send sysevents for target state changes */ 4719 4720 if (clist->clist_size) { 4721 int count; 4722 fc_remote_port_t *pd; 4723 4724 ASSERT(clist->clist_map != NULL); 4725 for (count = 0; count < clist->clist_len; count++) { 4726 pd = clist->clist_map[count].map_pd; 4727 4728 /* 4729 * Bump reference counts on all fc_remote_port_t 4730 * structs in this list. We don't know when the task 4731 * will fire, and we don't need these fc_remote_port_t 4732 * structs going away behind our back. 4733 */ 4734 if (pd) { 4735 mutex_enter(&pd->pd_mutex); 4736 ASSERT((pd->pd_ref_count >= 0) || 4737 (pd->pd_aux_flags & PD_GIVEN_TO_ULPS)); 4738 pd->pd_ref_count++; 4739 mutex_exit(&pd->pd_mutex); 4740 } 4741 4742 if (clist->clist_map[count].map_state == 4743 PORT_DEVICE_VALID) { 4744 if (clist->clist_map[count].map_type == 4745 PORT_DEVICE_NEW) { 4746 /* Update our state change counter */ 4747 mutex_enter(&port->fp_mutex); 4748 port->fp_last_change++; 4749 mutex_exit(&port->fp_mutex); 4750 4751 /* Additions */ 4752 fp_log_target_event(port, 4753 ESC_SUNFC_TARGET_ADD, 4754 clist->clist_map[count].map_pwwn, 4755 clist->clist_map[count].map_did. 4756 port_id); 4757 } 4758 4759 } else if ((clist->clist_map[count].map_type == 4760 PORT_DEVICE_OLD) && 4761 (clist->clist_map[count].map_state == 4762 PORT_DEVICE_INVALID)) { 4763 /* Update our state change counter */ 4764 mutex_enter(&port->fp_mutex); 4765 port->fp_last_change++; 4766 mutex_exit(&port->fp_mutex); 4767 4768 /* 4769 * For removals, we don't decrement 4770 * pd_ref_count until after the ULP's 4771 * state change callback function has 4772 * completed. 4773 */ 4774 4775 /* Removals */ 4776 fp_log_target_event(port, 4777 ESC_SUNFC_TARGET_REMOVE, 4778 clist->clist_map[count].map_pwwn, 4779 clist->clist_map[count].map_did.port_id); 4780 } 4781 4782 if (clist->clist_map[count].map_state != 4783 PORT_DEVICE_INVALID) { 4784 /* 4785 * Indicate that the ULPs are now aware of 4786 * this device. 4787 */ 4788 4789 mutex_enter(&pd->pd_mutex); 4790 pd->pd_aux_flags |= PD_GIVEN_TO_ULPS; 4791 mutex_exit(&pd->pd_mutex); 4792 } 4793 4794 #ifdef DEBUG 4795 /* 4796 * Sanity check for OLD devices in the hash lists 4797 */ 4798 if (pd && clist->clist_map[count].map_state == 4799 PORT_DEVICE_INVALID) { 4800 la_wwn_t pwwn; 4801 fc_portid_t d_id; 4802 4803 mutex_enter(&pd->pd_mutex); 4804 pwwn = pd->pd_port_name; 4805 d_id = pd->pd_port_id; 4806 mutex_exit(&pd->pd_mutex); 4807 4808 /* 4809 * This overwrites the 'pd' local variable. 4810 * Beware of this if 'pd' ever gets 4811 * referenced below this block. 4812 */ 4813 pd = fctl_get_remote_port_by_pwwn(port, &pwwn); 4814 ASSERT(pd != clist->clist_map[count].map_pd); 4815 4816 pd = fctl_get_remote_port_by_did(port, 4817 d_id.port_id); 4818 ASSERT(pd != clist->clist_map[count].map_pd); 4819 } 4820 #endif 4821 } 4822 } 4823 4824 if (sync) { 4825 clist->clist_wait = 1; 4826 mutex_init(&clist->clist_mutex, NULL, MUTEX_DRIVER, NULL); 4827 cv_init(&clist->clist_cv, NULL, CV_DRIVER, NULL); 4828 } 4829 4830 ret = taskq_dispatch(port->fp_taskq, fctl_ulp_statec_cb, clist, sleep); 4831 if (sync && ret) { 4832 mutex_enter(&clist->clist_mutex); 4833 while (clist->clist_wait) { 4834 cv_wait(&clist->clist_cv, &clist->clist_mutex); 4835 } 4836 mutex_exit(&clist->clist_mutex); 4837 4838 mutex_destroy(&clist->clist_mutex); 4839 cv_destroy(&clist->clist_cv); 4840 kmem_free(clist, sizeof (*clist)); 4841 } 4842 4843 if (!ret) { 4844 FP_TRACE(FP_NHEAD1(4, 0), "fp_ulp_devc dispatch failed; " 4845 "port=%p", port); 4846 kmem_free(clist->clist_map, 4847 sizeof (*(clist->clist_map)) * clist->clist_size); 4848 kmem_free(clist, sizeof (*clist)); 4849 } else { 4850 FP_TRACE(FP_NHEAD1(4, 0), "fp_ulp_devc fired; port=%p, len=%d", 4851 port, listlen); 4852 } 4853 4854 return (FC_SUCCESS); 4855 } 4856 4857 4858 /* 4859 * Perform PLOGI to the group of devices for ULPs 4860 */ 4861 static void 4862 fp_plogi_group(fc_local_port_t *port, job_request_t *job) 4863 { 4864 int offline; 4865 int count; 4866 int rval; 4867 uint32_t listlen; 4868 uint32_t done; 4869 uint32_t d_id; 4870 fc_remote_node_t *node; 4871 fc_remote_port_t *pd; 4872 fc_remote_port_t *tmp_pd; 4873 fc_packet_t *ulp_pkt; 4874 la_els_logi_t *els_data; 4875 ls_code_t ls_code; 4876 4877 FP_TRACE(FP_NHEAD1(1, 0), "fp_plogi_group begin; port=%p, job=%p", 4878 port, job); 4879 4880 done = 0; 4881 listlen = job->job_ulp_listlen; 4882 job->job_counter = job->job_ulp_listlen; 4883 4884 mutex_enter(&port->fp_mutex); 4885 offline = (port->fp_statec_busy || 4886 FC_PORT_STATE_MASK(port->fp_state) == FC_STATE_OFFLINE) ? 1 : 0; 4887 mutex_exit(&port->fp_mutex); 4888 4889 for (count = 0; count < listlen; count++) { 4890 ASSERT(job->job_ulp_pkts[count]->pkt_rsplen >= 4891 sizeof (la_els_logi_t)); 4892 4893 ulp_pkt = job->job_ulp_pkts[count]; 4894 pd = ulp_pkt->pkt_pd; 4895 d_id = ulp_pkt->pkt_cmd_fhdr.d_id; 4896 4897 if (offline) { 4898 done++; 4899 4900 ulp_pkt->pkt_state = FC_PKT_PORT_OFFLINE; 4901 ulp_pkt->pkt_reason = FC_REASON_OFFLINE; 4902 ulp_pkt->pkt_pd = NULL; 4903 ulp_pkt->pkt_comp(ulp_pkt); 4904 4905 job->job_ulp_pkts[count] = NULL; 4906 4907 fp_jobdone(job); 4908 continue; 4909 } 4910 4911 if (pd == NULL) { 4912 pd = fctl_get_remote_port_by_did(port, d_id); 4913 if (pd == NULL) { 4914 /* reset later */ 4915 ulp_pkt->pkt_state = FC_PKT_FAILURE; 4916 continue; 4917 } 4918 mutex_enter(&pd->pd_mutex); 4919 if (pd->pd_flags == PD_ELS_IN_PROGRESS) { 4920 mutex_exit(&pd->pd_mutex); 4921 ulp_pkt->pkt_state = FC_PKT_ELS_IN_PROGRESS; 4922 done++; 4923 ulp_pkt->pkt_comp(ulp_pkt); 4924 job->job_ulp_pkts[count] = NULL; 4925 fp_jobdone(job); 4926 } else { 4927 ulp_pkt->pkt_state = FC_PKT_FAILURE; 4928 mutex_exit(&pd->pd_mutex); 4929 } 4930 continue; 4931 } 4932 4933 switch (ulp_pkt->pkt_state) { 4934 case FC_PKT_ELS_IN_PROGRESS: 4935 ulp_pkt->pkt_reason = FC_REASON_OFFLINE; 4936 /* FALLTHRU */ 4937 case FC_PKT_LOCAL_RJT: 4938 done++; 4939 ulp_pkt->pkt_comp(ulp_pkt); 4940 job->job_ulp_pkts[count] = NULL; 4941 fp_jobdone(job); 4942 continue; 4943 default: 4944 break; 4945 } 4946 4947 /* 4948 * Validate the pd corresponding to the d_id passed 4949 * by the ULPs 4950 */ 4951 tmp_pd = fctl_get_remote_port_by_did(port, d_id); 4952 if ((tmp_pd == NULL) || (pd != tmp_pd)) { 4953 done++; 4954 ulp_pkt->pkt_state = FC_PKT_FAILURE; 4955 ulp_pkt->pkt_reason = FC_REASON_NO_CONNECTION; 4956 ulp_pkt->pkt_pd = NULL; 4957 ulp_pkt->pkt_comp(ulp_pkt); 4958 job->job_ulp_pkts[count] = NULL; 4959 fp_jobdone(job); 4960 continue; 4961 } 4962 4963 FP_TRACE(FP_NHEAD1(3, 0), "fp_plogi_group contd; " 4964 "port=%p, pd=%p", port, pd); 4965 4966 mutex_enter(&pd->pd_mutex); 4967 4968 if (pd->pd_state == PORT_DEVICE_LOGGED_IN) { 4969 done++; 4970 els_data = (la_els_logi_t *)ulp_pkt->pkt_resp; 4971 4972 ls_code.ls_code = LA_ELS_ACC; 4973 ls_code.mbz = 0; 4974 4975 FC_SET_CMD(pd->pd_port, ulp_pkt->pkt_resp_acc, 4976 (uint8_t *)&ls_code, (uint8_t *)&els_data->ls_code, 4977 sizeof (ls_code_t), DDI_DEV_AUTOINCR); 4978 4979 FC_SET_CMD(pd->pd_port, ulp_pkt->pkt_resp_acc, 4980 (uint8_t *)&pd->pd_csp, 4981 (uint8_t *)&els_data->common_service, 4982 sizeof (pd->pd_csp), DDI_DEV_AUTOINCR); 4983 4984 FC_SET_CMD(pd->pd_port, ulp_pkt->pkt_resp_acc, 4985 (uint8_t *)&pd->pd_port_name, 4986 (uint8_t *)&els_data->nport_ww_name, 4987 sizeof (pd->pd_port_name), DDI_DEV_AUTOINCR); 4988 4989 FC_SET_CMD(pd->pd_port, ulp_pkt->pkt_resp_acc, 4990 (uint8_t *)&pd->pd_clsp1, 4991 (uint8_t *)&els_data->class_1, 4992 sizeof (pd->pd_clsp1), DDI_DEV_AUTOINCR); 4993 4994 FC_SET_CMD(pd->pd_port, ulp_pkt->pkt_resp_acc, 4995 (uint8_t *)&pd->pd_clsp2, 4996 (uint8_t *)&els_data->class_2, 4997 sizeof (pd->pd_clsp2), DDI_DEV_AUTOINCR); 4998 4999 FC_SET_CMD(pd->pd_port, ulp_pkt->pkt_resp_acc, 5000 (uint8_t *)&pd->pd_clsp3, 5001 (uint8_t *)&els_data->class_3, 5002 sizeof (pd->pd_clsp3), DDI_DEV_AUTOINCR); 5003 5004 node = pd->pd_remote_nodep; 5005 pd->pd_login_count++; 5006 pd->pd_flags = PD_IDLE; 5007 ulp_pkt->pkt_pd = pd; 5008 mutex_exit(&pd->pd_mutex); 5009 5010 mutex_enter(&node->fd_mutex); 5011 FC_SET_CMD(pd->pd_port, ulp_pkt->pkt_resp_acc, 5012 (uint8_t *)&node->fd_node_name, 5013 (uint8_t *)(&els_data->node_ww_name), 5014 sizeof (node->fd_node_name), DDI_DEV_AUTOINCR); 5015 5016 FC_SET_CMD(pd->pd_port, ulp_pkt->pkt_resp_acc, 5017 (uint8_t *)&node->fd_vv, 5018 (uint8_t *)(&els_data->vendor_version), 5019 sizeof (node->fd_vv), DDI_DEV_AUTOINCR); 5020 5021 mutex_exit(&node->fd_mutex); 5022 ulp_pkt->pkt_state = FC_PKT_SUCCESS; 5023 } else { 5024 5025 ulp_pkt->pkt_state = FC_PKT_FAILURE; /* reset later */ 5026 mutex_exit(&pd->pd_mutex); 5027 } 5028 5029 if (ulp_pkt->pkt_state != FC_PKT_FAILURE) { 5030 ulp_pkt->pkt_comp(ulp_pkt); 5031 job->job_ulp_pkts[count] = NULL; 5032 fp_jobdone(job); 5033 } 5034 } 5035 5036 if (done == listlen) { 5037 fp_jobwait(job); 5038 fctl_jobdone(job); 5039 return; 5040 } 5041 5042 job->job_counter = listlen - done; 5043 5044 for (count = 0; count < listlen; count++) { 5045 int cmd_flags; 5046 5047 if ((ulp_pkt = job->job_ulp_pkts[count]) == NULL) { 5048 continue; 5049 } 5050 5051 ASSERT(ulp_pkt->pkt_state == FC_PKT_FAILURE); 5052 5053 cmd_flags = FP_CMD_PLOGI_RETAIN; 5054 5055 d_id = ulp_pkt->pkt_cmd_fhdr.d_id; 5056 ASSERT(d_id != 0); 5057 5058 pd = fctl_get_remote_port_by_did(port, d_id); 5059 5060 /* 5061 * We need to properly adjust the port device 5062 * reference counter before we assign the pd 5063 * to the ULP packets port device pointer. 5064 */ 5065 if (pd != NULL && ulp_pkt->pkt_pd == NULL) { 5066 mutex_enter(&pd->pd_mutex); 5067 pd->pd_ref_count++; 5068 mutex_exit(&pd->pd_mutex); 5069 FP_TRACE(FP_NHEAD1(3, 0), 5070 "fp_plogi_group: DID = 0x%x using new pd %p \ 5071 old pd NULL\n", d_id, pd); 5072 } else if (pd != NULL && ulp_pkt->pkt_pd != NULL && 5073 ulp_pkt->pkt_pd != pd) { 5074 mutex_enter(&pd->pd_mutex); 5075 pd->pd_ref_count++; 5076 mutex_exit(&pd->pd_mutex); 5077 mutex_enter(&ulp_pkt->pkt_pd->pd_mutex); 5078 ulp_pkt->pkt_pd->pd_ref_count--; 5079 mutex_exit(&ulp_pkt->pkt_pd->pd_mutex); 5080 FP_TRACE(FP_NHEAD1(3, 0), 5081 "fp_plogi_group: DID = 0x%x pkt_pd %p != pd %p\n", 5082 d_id, ulp_pkt->pkt_pd, pd); 5083 } else if (pd == NULL && ulp_pkt->pkt_pd != NULL) { 5084 mutex_enter(&ulp_pkt->pkt_pd->pd_mutex); 5085 ulp_pkt->pkt_pd->pd_ref_count--; 5086 mutex_exit(&ulp_pkt->pkt_pd->pd_mutex); 5087 FP_TRACE(FP_NHEAD1(3, 0), 5088 "fp_plogi_group: DID = 0x%x pd is NULL and \ 5089 pkt_pd = %p\n", d_id, ulp_pkt->pkt_pd); 5090 } 5091 5092 ulp_pkt->pkt_pd = pd; 5093 5094 if (pd != NULL) { 5095 mutex_enter(&pd->pd_mutex); 5096 d_id = pd->pd_port_id.port_id; 5097 pd->pd_flags = PD_ELS_IN_PROGRESS; 5098 mutex_exit(&pd->pd_mutex); 5099 } else { 5100 d_id = ulp_pkt->pkt_cmd_fhdr.d_id; 5101 #ifdef DEBUG 5102 pd = fctl_get_remote_port_by_did(port, d_id); 5103 ASSERT(pd == NULL); 5104 #endif 5105 /* 5106 * In the Fabric topology, use NS to create 5107 * port device, and if that fails still try 5108 * with PLOGI - which will make yet another 5109 * attempt to create after successful PLOGI 5110 */ 5111 mutex_enter(&port->fp_mutex); 5112 if (FC_IS_TOP_SWITCH(port->fp_topology)) { 5113 mutex_exit(&port->fp_mutex); 5114 pd = fp_create_remote_port_by_ns(port, 5115 d_id, KM_SLEEP); 5116 if (pd) { 5117 cmd_flags |= FP_CMD_DELDEV_ON_ERROR; 5118 5119 mutex_enter(&pd->pd_mutex); 5120 pd->pd_flags = PD_ELS_IN_PROGRESS; 5121 mutex_exit(&pd->pd_mutex); 5122 5123 FP_TRACE(FP_NHEAD1(3, 0), 5124 "fp_plogi_group;" 5125 " NS created PD port=%p, job=%p," 5126 " pd=%p", port, job, pd); 5127 } 5128 } else { 5129 mutex_exit(&port->fp_mutex); 5130 } 5131 if ((ulp_pkt->pkt_pd == NULL) && (pd != NULL)) { 5132 FP_TRACE(FP_NHEAD1(3, 0), 5133 "fp_plogi_group;" 5134 "ulp_pkt's pd is NULL, get a pd %p", 5135 pd); 5136 mutex_enter(&pd->pd_mutex); 5137 pd->pd_ref_count++; 5138 mutex_exit(&pd->pd_mutex); 5139 } 5140 ulp_pkt->pkt_pd = pd; 5141 } 5142 5143 rval = fp_port_login(port, d_id, job, cmd_flags, 5144 KM_SLEEP, pd, ulp_pkt); 5145 5146 if (rval == FC_SUCCESS) { 5147 continue; 5148 } 5149 5150 if (rval == FC_STATEC_BUSY) { 5151 ulp_pkt->pkt_state = FC_PKT_PORT_OFFLINE; 5152 ulp_pkt->pkt_reason = FC_REASON_OFFLINE; 5153 } else { 5154 ulp_pkt->pkt_state = FC_PKT_FAILURE; 5155 } 5156 5157 if (pd) { 5158 mutex_enter(&pd->pd_mutex); 5159 pd->pd_flags = PD_IDLE; 5160 mutex_exit(&pd->pd_mutex); 5161 } 5162 5163 if (cmd_flags & FP_CMD_DELDEV_ON_ERROR) { 5164 ASSERT(pd != NULL); 5165 5166 FP_TRACE(FP_NHEAD1(3, 0), "fp_plogi_group: NS created," 5167 " PD removed; port=%p, job=%p", port, job); 5168 5169 mutex_enter(&pd->pd_mutex); 5170 pd->pd_ref_count--; 5171 node = pd->pd_remote_nodep; 5172 mutex_exit(&pd->pd_mutex); 5173 5174 ASSERT(node != NULL); 5175 5176 if (fctl_destroy_remote_port(port, pd) == 0) { 5177 fctl_destroy_remote_node(node); 5178 } 5179 ulp_pkt->pkt_pd = NULL; 5180 } 5181 ulp_pkt->pkt_comp(ulp_pkt); 5182 fp_jobdone(job); 5183 } 5184 5185 fp_jobwait(job); 5186 fctl_jobdone(job); 5187 5188 FP_TRACE(FP_NHEAD1(1, 0), "fp_plogi_group end: port=%p, job=%p", 5189 port, job); 5190 } 5191 5192 5193 /* 5194 * Name server request initialization 5195 */ 5196 static void 5197 fp_ns_init(fc_local_port_t *port, job_request_t *job, int sleep) 5198 { 5199 int rval; 5200 int count; 5201 int size; 5202 5203 ASSERT((job->job_flags & JOB_TYPE_FP_ASYNC) == 0); 5204 5205 job->job_counter = 1; 5206 job->job_result = FC_SUCCESS; 5207 5208 rval = fp_port_login(port, 0xFFFFFC, job, FP_CMD_PLOGI_RETAIN, 5209 KM_SLEEP, NULL, NULL); 5210 5211 if (rval != FC_SUCCESS) { 5212 mutex_enter(&port->fp_mutex); 5213 port->fp_topology = FC_TOP_NO_NS; 5214 mutex_exit(&port->fp_mutex); 5215 return; 5216 } 5217 5218 fp_jobwait(job); 5219 5220 if (job->job_result != FC_SUCCESS) { 5221 mutex_enter(&port->fp_mutex); 5222 port->fp_topology = FC_TOP_NO_NS; 5223 mutex_exit(&port->fp_mutex); 5224 return; 5225 } 5226 5227 /* 5228 * At this time, we'll do NS registration for objects in the 5229 * ns_reg_cmds (see top of this file) array. 5230 * 5231 * Each time a ULP module registers with the transport, the 5232 * appropriate fc4 bit is set fc4 types and registered with 5233 * the NS for this support. Also, ULPs and FC admin utilities 5234 * may do registration for objects like IP address, symbolic 5235 * port/node name, Initial process associator at run time. 5236 */ 5237 size = sizeof (ns_reg_cmds) / sizeof (ns_reg_cmds[0]); 5238 job->job_counter = size; 5239 job->job_result = FC_SUCCESS; 5240 5241 for (count = 0; count < size; count++) { 5242 if (fp_ns_reg(port, NULL, ns_reg_cmds[count], 5243 job, 0, sleep) != FC_SUCCESS) { 5244 fp_jobdone(job); 5245 } 5246 } 5247 if (size) { 5248 fp_jobwait(job); 5249 } 5250 5251 job->job_result = FC_SUCCESS; 5252 5253 (void) fp_ns_get_devcount(port, job, 0, KM_SLEEP); 5254 5255 if (port->fp_dev_count < FP_MAX_DEVICES) { 5256 (void) fp_ns_get_devcount(port, job, 1, KM_SLEEP); 5257 } 5258 5259 job->job_counter = 1; 5260 5261 if (fp_ns_scr(port, job, FC_SCR_FULL_REGISTRATION, 5262 sleep) == FC_SUCCESS) { 5263 fp_jobwait(job); 5264 } 5265 } 5266 5267 5268 /* 5269 * Name server finish: 5270 * Unregister for RSCNs 5271 * Unregister all the host port objects in the Name Server 5272 * Perform LOGO with the NS; 5273 */ 5274 static void 5275 fp_ns_fini(fc_local_port_t *port, job_request_t *job) 5276 { 5277 fp_cmd_t *cmd; 5278 uchar_t class; 5279 uint32_t s_id; 5280 fc_packet_t *pkt; 5281 la_els_logo_t payload; 5282 5283 ASSERT((job->job_flags & JOB_TYPE_FP_ASYNC) == 0); 5284 5285 job->job_counter = 1; 5286 5287 if (fp_ns_scr(port, job, FC_SCR_CLEAR_REGISTRATION, KM_SLEEP) != 5288 FC_SUCCESS) { 5289 fp_jobdone(job); 5290 } 5291 fp_jobwait(job); 5292 5293 job->job_counter = 1; 5294 5295 if (fp_ns_reg(port, NULL, NS_DA_ID, job, 0, KM_SLEEP) != FC_SUCCESS) { 5296 fp_jobdone(job); 5297 } 5298 fp_jobwait(job); 5299 5300 job->job_counter = 1; 5301 5302 cmd = fp_alloc_pkt(port, sizeof (la_els_logo_t), 5303 FP_PORT_IDENTIFIER_LEN, KM_SLEEP, NULL); 5304 pkt = &cmd->cmd_pkt; 5305 5306 mutex_enter(&port->fp_mutex); 5307 class = port->fp_ns_login_class; 5308 s_id = port->fp_port_id.port_id; 5309 payload.nport_id = port->fp_port_id; 5310 mutex_exit(&port->fp_mutex); 5311 5312 cmd->cmd_pkt.pkt_tran_flags = FC_TRAN_INTR | class; 5313 cmd->cmd_pkt.pkt_tran_type = FC_PKT_EXCHANGE; 5314 cmd->cmd_flags = FP_CMD_PLOGI_DONT_CARE; 5315 cmd->cmd_retry_count = 1; 5316 cmd->cmd_ulp_pkt = NULL; 5317 5318 if (port->fp_npiv_type == FC_NPIV_PORT) { 5319 fp_els_init(cmd, s_id, 0xFFFFFE, fp_logo_intr, job); 5320 } else { 5321 fp_els_init(cmd, s_id, 0xFFFFFC, fp_logo_intr, job); 5322 } 5323 5324 cmd->cmd_transport = port->fp_fca_tran->fca_els_send; 5325 5326 payload.ls_code.ls_code = LA_ELS_LOGO; 5327 payload.ls_code.mbz = 0; 5328 payload.nport_ww_name = port->fp_service_params.nport_ww_name; 5329 5330 FC_SET_CMD(port, pkt->pkt_cmd_acc, (uint8_t *)&payload, 5331 (uint8_t *)pkt->pkt_cmd, sizeof (payload), DDI_DEV_AUTOINCR); 5332 5333 if (fp_sendcmd(port, cmd, port->fp_fca_handle) != FC_SUCCESS) { 5334 fp_iodone(cmd); 5335 } 5336 fp_jobwait(job); 5337 } 5338 5339 5340 /* 5341 * NS Registration function. 5342 * 5343 * It should be seriously noted that FC-GS-2 currently doesn't support 5344 * an Object Registration by a D_ID other than the owner of the object. 5345 * What we are aiming at currently is to at least allow Symbolic Node/Port 5346 * Name registration for any N_Port Identifier by the host software. 5347 * 5348 * Anyway, if the second argument (fc_remote_port_t *) is NULL, this 5349 * function treats the request as Host NS Object. 5350 */ 5351 static int 5352 fp_ns_reg(fc_local_port_t *port, fc_remote_port_t *pd, uint16_t cmd_code, 5353 job_request_t *job, int polled, int sleep) 5354 { 5355 int rval; 5356 fc_portid_t s_id; 5357 fc_packet_t *pkt; 5358 fp_cmd_t *cmd; 5359 5360 if (pd == NULL) { 5361 mutex_enter(&port->fp_mutex); 5362 s_id = port->fp_port_id; 5363 mutex_exit(&port->fp_mutex); 5364 } else { 5365 mutex_enter(&pd->pd_mutex); 5366 s_id = pd->pd_port_id; 5367 mutex_exit(&pd->pd_mutex); 5368 } 5369 5370 if (polled) { 5371 job->job_counter = 1; 5372 } 5373 5374 switch (cmd_code) { 5375 case NS_RPN_ID: 5376 case NS_RNN_ID: { 5377 ns_rxn_req_t rxn; 5378 5379 cmd = fp_alloc_pkt(port, sizeof (fc_ct_header_t) + 5380 sizeof (ns_rxn_req_t), sizeof (fc_reg_resp_t), sleep, NULL); 5381 if (cmd == NULL) { 5382 return (FC_NOMEM); 5383 } 5384 fp_ct_init(port, cmd, NULL, cmd_code, NULL, 0, 0, job); 5385 pkt = &cmd->cmd_pkt; 5386 5387 if (pd == NULL) { 5388 rxn.rxn_xname = ((cmd_code == NS_RPN_ID) ? 5389 (port->fp_service_params.nport_ww_name) : 5390 (port->fp_service_params.node_ww_name)); 5391 } else { 5392 if (cmd_code == NS_RPN_ID) { 5393 mutex_enter(&pd->pd_mutex); 5394 rxn.rxn_xname = pd->pd_port_name; 5395 mutex_exit(&pd->pd_mutex); 5396 } else { 5397 fc_remote_node_t *node; 5398 5399 mutex_enter(&pd->pd_mutex); 5400 node = pd->pd_remote_nodep; 5401 mutex_exit(&pd->pd_mutex); 5402 5403 mutex_enter(&node->fd_mutex); 5404 rxn.rxn_xname = node->fd_node_name; 5405 mutex_exit(&node->fd_mutex); 5406 } 5407 } 5408 rxn.rxn_port_id = s_id; 5409 5410 FC_SET_CMD(port, pkt->pkt_cmd_acc, (uint8_t *)&rxn, 5411 (uint8_t *)(pkt->pkt_cmd + sizeof (fc_ct_header_t)), 5412 sizeof (rxn), DDI_DEV_AUTOINCR); 5413 5414 break; 5415 } 5416 5417 case NS_RCS_ID: { 5418 ns_rcos_t rcos; 5419 5420 cmd = fp_alloc_pkt(port, sizeof (fc_ct_header_t) + 5421 sizeof (ns_rcos_t), sizeof (fc_reg_resp_t), sleep, NULL); 5422 if (cmd == NULL) { 5423 return (FC_NOMEM); 5424 } 5425 fp_ct_init(port, cmd, NULL, cmd_code, NULL, 0, 0, job); 5426 pkt = &cmd->cmd_pkt; 5427 5428 if (pd == NULL) { 5429 rcos.rcos_cos = port->fp_cos; 5430 } else { 5431 mutex_enter(&pd->pd_mutex); 5432 rcos.rcos_cos = pd->pd_cos; 5433 mutex_exit(&pd->pd_mutex); 5434 } 5435 rcos.rcos_port_id = s_id; 5436 5437 FC_SET_CMD(port, pkt->pkt_cmd_acc, (uint8_t *)&rcos, 5438 (uint8_t *)(pkt->pkt_cmd + sizeof (fc_ct_header_t)), 5439 sizeof (rcos), DDI_DEV_AUTOINCR); 5440 5441 break; 5442 } 5443 5444 case NS_RFT_ID: { 5445 ns_rfc_type_t rfc; 5446 5447 cmd = fp_alloc_pkt(port, sizeof (fc_ct_header_t) + 5448 sizeof (ns_rfc_type_t), sizeof (fc_reg_resp_t), sleep, 5449 NULL); 5450 if (cmd == NULL) { 5451 return (FC_NOMEM); 5452 } 5453 fp_ct_init(port, cmd, NULL, cmd_code, NULL, 0, 0, job); 5454 pkt = &cmd->cmd_pkt; 5455 5456 if (pd == NULL) { 5457 mutex_enter(&port->fp_mutex); 5458 bcopy(port->fp_fc4_types, rfc.rfc_types, 5459 sizeof (port->fp_fc4_types)); 5460 mutex_exit(&port->fp_mutex); 5461 } else { 5462 mutex_enter(&pd->pd_mutex); 5463 bcopy(pd->pd_fc4types, rfc.rfc_types, 5464 sizeof (pd->pd_fc4types)); 5465 mutex_exit(&pd->pd_mutex); 5466 } 5467 rfc.rfc_port_id = s_id; 5468 5469 FC_SET_CMD(port, pkt->pkt_cmd_acc, (uint8_t *)&rfc, 5470 (uint8_t *)(pkt->pkt_cmd + sizeof (fc_ct_header_t)), 5471 sizeof (rfc), DDI_DEV_AUTOINCR); 5472 5473 break; 5474 } 5475 5476 case NS_RSPN_ID: { 5477 uchar_t name_len; 5478 int pl_size; 5479 fc_portid_t spn; 5480 5481 if (pd == NULL) { 5482 mutex_enter(&port->fp_mutex); 5483 name_len = port->fp_sym_port_namelen; 5484 mutex_exit(&port->fp_mutex); 5485 } else { 5486 mutex_enter(&pd->pd_mutex); 5487 name_len = pd->pd_spn_len; 5488 mutex_exit(&pd->pd_mutex); 5489 } 5490 5491 pl_size = sizeof (fc_portid_t) + name_len + 1; 5492 5493 cmd = fp_alloc_pkt(port, sizeof (fc_ct_header_t) + pl_size, 5494 sizeof (fc_reg_resp_t), sleep, NULL); 5495 if (cmd == NULL) { 5496 return (FC_NOMEM); 5497 } 5498 5499 fp_ct_init(port, cmd, NULL, cmd_code, NULL, 0, 0, job); 5500 5501 pkt = &cmd->cmd_pkt; 5502 5503 spn = s_id; 5504 5505 FC_SET_CMD(port, pkt->pkt_cmd_acc, (uint8_t *)&spn, (uint8_t *) 5506 (pkt->pkt_cmd + sizeof (fc_ct_header_t)), sizeof (spn), 5507 DDI_DEV_AUTOINCR); 5508 FC_SET_CMD(port, pkt->pkt_cmd_acc, (uint8_t *)&name_len, 5509 (uint8_t *)(pkt->pkt_cmd + sizeof (fc_ct_header_t) 5510 + sizeof (fc_portid_t)), 1, DDI_DEV_AUTOINCR); 5511 5512 if (pd == NULL) { 5513 mutex_enter(&port->fp_mutex); 5514 FC_SET_CMD(port, pkt->pkt_cmd_acc, 5515 (uint8_t *)port->fp_sym_port_name, (uint8_t *) 5516 (pkt->pkt_cmd + sizeof (fc_ct_header_t) + 5517 sizeof (spn) + 1), name_len, DDI_DEV_AUTOINCR); 5518 mutex_exit(&port->fp_mutex); 5519 } else { 5520 mutex_enter(&pd->pd_mutex); 5521 FC_SET_CMD(port, pkt->pkt_cmd_acc, 5522 (uint8_t *)pd->pd_spn, 5523 (uint8_t *)(pkt->pkt_cmd + sizeof (fc_ct_header_t) + 5524 sizeof (spn) + 1), name_len, DDI_DEV_AUTOINCR); 5525 mutex_exit(&pd->pd_mutex); 5526 } 5527 break; 5528 } 5529 5530 case NS_RPT_ID: { 5531 ns_rpt_t rpt; 5532 5533 cmd = fp_alloc_pkt(port, sizeof (fc_ct_header_t) + 5534 sizeof (ns_rpt_t), sizeof (fc_reg_resp_t), sleep, NULL); 5535 if (cmd == NULL) { 5536 return (FC_NOMEM); 5537 } 5538 fp_ct_init(port, cmd, NULL, cmd_code, NULL, 0, 0, job); 5539 pkt = &cmd->cmd_pkt; 5540 5541 if (pd == NULL) { 5542 rpt.rpt_type = port->fp_port_type; 5543 } else { 5544 mutex_enter(&pd->pd_mutex); 5545 rpt.rpt_type = pd->pd_porttype; 5546 mutex_exit(&pd->pd_mutex); 5547 } 5548 rpt.rpt_port_id = s_id; 5549 5550 FC_SET_CMD(port, pkt->pkt_cmd_acc, (uint8_t *)&rpt, 5551 (uint8_t *)(pkt->pkt_cmd + sizeof (fc_ct_header_t)), 5552 sizeof (rpt), DDI_DEV_AUTOINCR); 5553 5554 break; 5555 } 5556 5557 case NS_RIP_NN: { 5558 ns_rip_t rip; 5559 5560 cmd = fp_alloc_pkt(port, sizeof (fc_ct_header_t) + 5561 sizeof (ns_rip_t), sizeof (fc_reg_resp_t), sleep, NULL); 5562 if (cmd == NULL) { 5563 return (FC_NOMEM); 5564 } 5565 fp_ct_init(port, cmd, NULL, cmd_code, NULL, 0, 0, job); 5566 pkt = &cmd->cmd_pkt; 5567 5568 if (pd == NULL) { 5569 rip.rip_node_name = 5570 port->fp_service_params.node_ww_name; 5571 bcopy(port->fp_ip_addr, rip.rip_ip_addr, 5572 sizeof (port->fp_ip_addr)); 5573 } else { 5574 fc_remote_node_t *node; 5575 5576 /* 5577 * The most correct implementation should have the IP 5578 * address in the fc_remote_node_t structure; I believe 5579 * Node WWN and IP address should have one to one 5580 * correlation (but guess what this is changing in 5581 * FC-GS-2 latest draft) 5582 */ 5583 mutex_enter(&pd->pd_mutex); 5584 node = pd->pd_remote_nodep; 5585 bcopy(pd->pd_ip_addr, rip.rip_ip_addr, 5586 sizeof (pd->pd_ip_addr)); 5587 mutex_exit(&pd->pd_mutex); 5588 5589 mutex_enter(&node->fd_mutex); 5590 rip.rip_node_name = node->fd_node_name; 5591 mutex_exit(&node->fd_mutex); 5592 } 5593 5594 FC_SET_CMD(port, pkt->pkt_cmd_acc, (uint8_t *)&rip, 5595 (uint8_t *)(pkt->pkt_cmd + sizeof (fc_ct_header_t)), 5596 sizeof (rip), DDI_DEV_AUTOINCR); 5597 5598 break; 5599 } 5600 5601 case NS_RIPA_NN: { 5602 ns_ipa_t ipa; 5603 5604 cmd = fp_alloc_pkt(port, sizeof (fc_ct_header_t) + 5605 sizeof (ns_ipa_t), sizeof (fc_reg_resp_t), sleep, NULL); 5606 if (cmd == NULL) { 5607 return (FC_NOMEM); 5608 } 5609 fp_ct_init(port, cmd, NULL, cmd_code, NULL, 0, 0, job); 5610 pkt = &cmd->cmd_pkt; 5611 5612 if (pd == NULL) { 5613 ipa.ipa_node_name = 5614 port->fp_service_params.node_ww_name; 5615 bcopy(port->fp_ipa, ipa.ipa_value, 5616 sizeof (port->fp_ipa)); 5617 } else { 5618 fc_remote_node_t *node; 5619 5620 mutex_enter(&pd->pd_mutex); 5621 node = pd->pd_remote_nodep; 5622 mutex_exit(&pd->pd_mutex); 5623 5624 mutex_enter(&node->fd_mutex); 5625 ipa.ipa_node_name = node->fd_node_name; 5626 bcopy(node->fd_ipa, ipa.ipa_value, 5627 sizeof (node->fd_ipa)); 5628 mutex_exit(&node->fd_mutex); 5629 } 5630 5631 FC_SET_CMD(port, pkt->pkt_cmd_acc, (uint8_t *)&ipa, 5632 (uint8_t *)(pkt->pkt_cmd + sizeof (fc_ct_header_t)), 5633 sizeof (ipa), DDI_DEV_AUTOINCR); 5634 5635 break; 5636 } 5637 5638 case NS_RSNN_NN: { 5639 uchar_t name_len; 5640 int pl_size; 5641 la_wwn_t snn; 5642 fc_remote_node_t *node = NULL; 5643 5644 if (pd == NULL) { 5645 mutex_enter(&port->fp_mutex); 5646 name_len = port->fp_sym_node_namelen; 5647 mutex_exit(&port->fp_mutex); 5648 } else { 5649 mutex_enter(&pd->pd_mutex); 5650 node = pd->pd_remote_nodep; 5651 mutex_exit(&pd->pd_mutex); 5652 5653 mutex_enter(&node->fd_mutex); 5654 name_len = node->fd_snn_len; 5655 mutex_exit(&node->fd_mutex); 5656 } 5657 5658 pl_size = sizeof (la_wwn_t) + name_len + 1; 5659 5660 cmd = fp_alloc_pkt(port, sizeof (fc_ct_header_t) + 5661 pl_size, sizeof (fc_reg_resp_t), sleep, NULL); 5662 if (cmd == NULL) { 5663 return (FC_NOMEM); 5664 } 5665 fp_ct_init(port, cmd, NULL, cmd_code, NULL, 0, 0, job); 5666 5667 pkt = &cmd->cmd_pkt; 5668 5669 bcopy(&port->fp_service_params.node_ww_name, 5670 &snn, sizeof (la_wwn_t)); 5671 5672 if (pd == NULL) { 5673 mutex_enter(&port->fp_mutex); 5674 FC_SET_CMD(port, pkt->pkt_cmd_acc, 5675 (uint8_t *)port->fp_sym_node_name, (uint8_t *) 5676 (pkt->pkt_cmd + sizeof (fc_ct_header_t) + 5677 sizeof (snn) + 1), name_len, DDI_DEV_AUTOINCR); 5678 mutex_exit(&port->fp_mutex); 5679 } else { 5680 ASSERT(node != NULL); 5681 mutex_enter(&node->fd_mutex); 5682 FC_SET_CMD(port, pkt->pkt_cmd_acc, 5683 (uint8_t *)node->fd_snn, 5684 (uint8_t *)(pkt->pkt_cmd + sizeof (fc_ct_header_t) + 5685 sizeof (snn) + 1), name_len, DDI_DEV_AUTOINCR); 5686 mutex_exit(&node->fd_mutex); 5687 } 5688 5689 FC_SET_CMD(port, pkt->pkt_cmd_acc, (uint8_t *)&snn, 5690 (uint8_t *)(pkt->pkt_cmd + sizeof (fc_ct_header_t)), 5691 sizeof (snn), DDI_DEV_AUTOINCR); 5692 FC_SET_CMD(port, pkt->pkt_cmd_acc, (uint8_t *)&name_len, 5693 (uint8_t *)(pkt->pkt_cmd 5694 + sizeof (fc_ct_header_t) + sizeof (snn)), 5695 1, DDI_DEV_AUTOINCR); 5696 5697 break; 5698 } 5699 5700 case NS_DA_ID: { 5701 ns_remall_t rall; 5702 char tmp[4] = {0}; 5703 char *ptr; 5704 5705 cmd = fp_alloc_pkt(port, sizeof (fc_ct_header_t) + 5706 sizeof (ns_remall_t), sizeof (fc_reg_resp_t), sleep, NULL); 5707 5708 if (cmd == NULL) { 5709 return (FC_NOMEM); 5710 } 5711 5712 fp_ct_init(port, cmd, NULL, cmd_code, NULL, 0, 0, job); 5713 pkt = &cmd->cmd_pkt; 5714 5715 ptr = (char *)(&s_id); 5716 tmp[3] = *ptr++; 5717 tmp[2] = *ptr++; 5718 tmp[1] = *ptr++; 5719 tmp[0] = *ptr; 5720 #if defined(_BIT_FIELDS_LTOH) 5721 bcopy((caddr_t)tmp, (caddr_t)(&rall.rem_port_id), 4); 5722 #else 5723 rall.rem_port_id = s_id; 5724 #endif 5725 FC_SET_CMD(port, pkt->pkt_cmd_acc, (uint8_t *)&rall, 5726 (uint8_t *)(pkt->pkt_cmd + sizeof (fc_ct_header_t)), 5727 sizeof (rall), DDI_DEV_AUTOINCR); 5728 5729 break; 5730 } 5731 5732 default: 5733 return (FC_FAILURE); 5734 } 5735 5736 rval = fp_sendcmd(port, cmd, port->fp_fca_handle); 5737 5738 if (rval != FC_SUCCESS) { 5739 job->job_result = rval; 5740 fp_iodone(cmd); 5741 } 5742 5743 if (polled) { 5744 ASSERT((job->job_flags & JOB_TYPE_FP_ASYNC) == 0); 5745 fp_jobwait(job); 5746 } else { 5747 rval = FC_SUCCESS; 5748 } 5749 5750 return (rval); 5751 } 5752 5753 5754 /* 5755 * Common interrupt handler 5756 */ 5757 static int 5758 fp_common_intr(fc_packet_t *pkt, int iodone) 5759 { 5760 int rval = FC_FAILURE; 5761 fp_cmd_t *cmd; 5762 fc_local_port_t *port; 5763 5764 cmd = pkt->pkt_ulp_private; 5765 port = cmd->cmd_port; 5766 5767 /* 5768 * Fail fast the upper layer requests if 5769 * a state change has occurred amidst. 5770 */ 5771 mutex_enter(&port->fp_mutex); 5772 if (cmd->cmd_ulp_pkt != NULL && port->fp_statec_busy) { 5773 mutex_exit(&port->fp_mutex); 5774 cmd->cmd_ulp_pkt->pkt_state = FC_PKT_PORT_OFFLINE; 5775 cmd->cmd_ulp_pkt->pkt_reason = FC_REASON_OFFLINE; 5776 } else if (!(port->fp_soft_state & 5777 (FP_SOFT_IN_DETACH | FP_DETACH_INPROGRESS))) { 5778 mutex_exit(&port->fp_mutex); 5779 5780 switch (pkt->pkt_state) { 5781 case FC_PKT_LOCAL_BSY: 5782 case FC_PKT_FABRIC_BSY: 5783 case FC_PKT_NPORT_BSY: 5784 case FC_PKT_TIMEOUT: 5785 cmd->cmd_retry_interval = (pkt->pkt_state == 5786 FC_PKT_TIMEOUT) ? 0 : fp_retry_delay; 5787 rval = fp_retry_cmd(pkt); 5788 break; 5789 5790 case FC_PKT_FABRIC_RJT: 5791 case FC_PKT_NPORT_RJT: 5792 case FC_PKT_LOCAL_RJT: 5793 case FC_PKT_LS_RJT: 5794 case FC_PKT_FS_RJT: 5795 case FC_PKT_BA_RJT: 5796 rval = fp_handle_reject(pkt); 5797 break; 5798 5799 default: 5800 if (pkt->pkt_resp_resid) { 5801 cmd->cmd_retry_interval = 0; 5802 rval = fp_retry_cmd(pkt); 5803 } 5804 break; 5805 } 5806 } else { 5807 mutex_exit(&port->fp_mutex); 5808 } 5809 5810 if (rval != FC_SUCCESS && iodone) { 5811 fp_iodone(cmd); 5812 rval = FC_SUCCESS; 5813 } 5814 5815 return (rval); 5816 } 5817 5818 5819 /* 5820 * Some not so long winding theory on point to point topology: 5821 * 5822 * In the ACC payload, if the D_ID is ZERO and the common service 5823 * parameters indicate N_Port, then the topology is POINT TO POINT. 5824 * 5825 * In a point to point topology with an N_Port, during Fabric Login, 5826 * the destination N_Port will check with our WWN and decide if it 5827 * needs to issue PLOGI or not. That means, FLOGI could potentially 5828 * trigger an unsolicited PLOGI from an N_Port. The Unsolicited 5829 * PLOGI creates the device handles. 5830 * 5831 * Assuming that the host port WWN is greater than the other N_Port 5832 * WWN, then we become the master (be aware that this isn't the word 5833 * used in the FC standards) and initiate the PLOGI. 5834 * 5835 */ 5836 static void 5837 fp_flogi_intr(fc_packet_t *pkt) 5838 { 5839 int state; 5840 int f_port; 5841 uint32_t s_id; 5842 uint32_t d_id; 5843 fp_cmd_t *cmd; 5844 fc_local_port_t *port; 5845 la_wwn_t *swwn; 5846 la_wwn_t dwwn; 5847 la_wwn_t nwwn; 5848 fc_remote_port_t *pd; 5849 la_els_logi_t *acc; 5850 com_svc_t csp; 5851 ls_code_t resp; 5852 5853 cmd = pkt->pkt_ulp_private; 5854 port = cmd->cmd_port; 5855 5856 mutex_enter(&port->fp_mutex); 5857 port->fp_out_fpcmds--; 5858 mutex_exit(&port->fp_mutex); 5859 5860 FP_TRACE(FP_NHEAD1(1, 0), "fp_flogi_intr; port=%p, pkt=%p, state=%x", 5861 port, pkt, pkt->pkt_state); 5862 5863 if (FP_IS_PKT_ERROR(pkt)) { 5864 (void) fp_common_intr(pkt, 1); 5865 return; 5866 } 5867 5868 /* 5869 * Currently, we don't need to swap bytes here because qlc is faking the 5870 * response for us and so endianness is getting taken care of. But we 5871 * have to fix this and generalize this at some point 5872 */ 5873 acc = (la_els_logi_t *)pkt->pkt_resp; 5874 5875 FC_GET_RSP(port, pkt->pkt_resp_acc, (uint8_t *)&resp, (uint8_t *)acc, 5876 sizeof (resp), DDI_DEV_AUTOINCR); 5877 5878 ASSERT(resp.ls_code == LA_ELS_ACC); 5879 if (resp.ls_code != LA_ELS_ACC) { 5880 (void) fp_common_intr(pkt, 1); 5881 return; 5882 } 5883 5884 FC_GET_RSP(port, pkt->pkt_resp_acc, (uint8_t *)&csp, 5885 (uint8_t *)&acc->common_service, sizeof (csp), DDI_DEV_AUTOINCR); 5886 5887 f_port = FP_IS_F_PORT(csp.cmn_features) ? 1 : 0; 5888 5889 ASSERT(!MUTEX_HELD(&port->fp_mutex)); 5890 5891 mutex_enter(&port->fp_mutex); 5892 state = FC_PORT_STATE_MASK(port->fp_state); 5893 mutex_exit(&port->fp_mutex); 5894 5895 if (f_port == 0) { 5896 if (state != FC_STATE_LOOP) { 5897 swwn = &port->fp_service_params.nport_ww_name; 5898 5899 FC_GET_RSP(port, pkt->pkt_resp_acc, (uint8_t *)&dwwn, 5900 (uint8_t *)&acc->nport_ww_name, sizeof (la_wwn_t), 5901 DDI_DEV_AUTOINCR); 5902 5903 FC_GET_RSP(port, pkt->pkt_resp_acc, (uint8_t *)&nwwn, 5904 (uint8_t *)&acc->node_ww_name, sizeof (la_wwn_t), 5905 DDI_DEV_AUTOINCR); 5906 5907 mutex_enter(&port->fp_mutex); 5908 5909 port->fp_topology = FC_TOP_PT_PT; 5910 port->fp_total_devices = 1; 5911 if (fctl_wwn_cmp(swwn, &dwwn) >= 0) { 5912 port->fp_ptpt_master = 1; 5913 /* 5914 * Let us choose 'X' as S_ID and 'Y' 5915 * as D_ID and that'll work; hopefully 5916 * If not, it will get changed. 5917 */ 5918 s_id = port->fp_instance + FP_DEFAULT_SID; 5919 d_id = port->fp_instance + FP_DEFAULT_DID; 5920 port->fp_port_id.port_id = s_id; 5921 mutex_exit(&port->fp_mutex); 5922 5923 FP_TRACE(FP_NHEAD1(1, 0), "fp_flogi_intr: fp %x" 5924 "pd %x", port->fp_port_id.port_id, d_id); 5925 pd = fctl_create_remote_port(port, 5926 &nwwn, &dwwn, d_id, PD_PLOGI_INITIATOR, 5927 KM_NOSLEEP); 5928 if (pd == NULL) { 5929 fp_printf(port, CE_NOTE, FP_LOG_ONLY, 5930 0, NULL, "couldn't create device" 5931 " d_id=%X", d_id); 5932 fp_iodone(cmd); 5933 return; 5934 } 5935 5936 cmd->cmd_pkt.pkt_tran_flags = 5937 pkt->pkt_tran_flags; 5938 cmd->cmd_pkt.pkt_tran_type = pkt->pkt_tran_type; 5939 cmd->cmd_flags = FP_CMD_PLOGI_RETAIN; 5940 cmd->cmd_retry_count = fp_retry_count; 5941 5942 fp_xlogi_init(port, cmd, s_id, d_id, 5943 fp_plogi_intr, cmd->cmd_job, LA_ELS_PLOGI); 5944 5945 (&cmd->cmd_pkt)->pkt_pd = pd; 5946 5947 /* 5948 * We've just created this fc_remote_port_t, and 5949 * we're about to use it to send a PLOGI, so 5950 * bump the reference count right now. When 5951 * the packet is freed, the reference count will 5952 * be decremented. The ULP may also start using 5953 * it, so mark it as given away as well. 5954 */ 5955 pd->pd_ref_count++; 5956 pd->pd_aux_flags |= PD_GIVEN_TO_ULPS; 5957 5958 if (fp_sendcmd(port, cmd, 5959 port->fp_fca_handle) == FC_SUCCESS) { 5960 return; 5961 } 5962 } else { 5963 /* 5964 * The device handles will be created when the 5965 * unsolicited PLOGI is completed successfully 5966 */ 5967 port->fp_ptpt_master = 0; 5968 mutex_exit(&port->fp_mutex); 5969 } 5970 } 5971 pkt->pkt_state = FC_PKT_FAILURE; 5972 } else { 5973 if (f_port) { 5974 mutex_enter(&port->fp_mutex); 5975 if (state == FC_STATE_LOOP) { 5976 port->fp_topology = FC_TOP_PUBLIC_LOOP; 5977 } else { 5978 port->fp_topology = FC_TOP_FABRIC; 5979 5980 FC_GET_RSP(port, pkt->pkt_resp_acc, 5981 (uint8_t *)&port->fp_fabric_name, 5982 (uint8_t *)&acc->node_ww_name, 5983 sizeof (la_wwn_t), 5984 DDI_DEV_AUTOINCR); 5985 } 5986 port->fp_port_id.port_id = pkt->pkt_resp_fhdr.d_id; 5987 mutex_exit(&port->fp_mutex); 5988 } else { 5989 pkt->pkt_state = FC_PKT_FAILURE; 5990 } 5991 } 5992 fp_iodone(cmd); 5993 } 5994 5995 5996 /* 5997 * Handle solicited PLOGI response 5998 */ 5999 static void 6000 fp_plogi_intr(fc_packet_t *pkt) 6001 { 6002 int nl_port; 6003 int bailout; 6004 uint32_t d_id; 6005 fp_cmd_t *cmd; 6006 la_els_logi_t *acc; 6007 fc_local_port_t *port; 6008 fc_remote_port_t *pd; 6009 la_wwn_t nwwn; 6010 la_wwn_t pwwn; 6011 ls_code_t resp; 6012 6013 nl_port = 0; 6014 cmd = pkt->pkt_ulp_private; 6015 port = cmd->cmd_port; 6016 d_id = pkt->pkt_cmd_fhdr.d_id; 6017 6018 ASSERT(cmd->cmd_job && cmd->cmd_job->job_counter); 6019 6020 FP_TRACE(FP_NHEAD1(3, 0), "fp_plogi_intr: port=%p, job=%p, d_id=%x," 6021 " jcount=%d pkt=%p, state=%x", port, cmd->cmd_job, d_id, 6022 cmd->cmd_job->job_counter, pkt, pkt->pkt_state); 6023 6024 /* 6025 * Bail out early on ULP initiated requests if the 6026 * state change has occurred 6027 */ 6028 mutex_enter(&port->fp_mutex); 6029 port->fp_out_fpcmds--; 6030 bailout = ((port->fp_statec_busy || 6031 FC_PORT_STATE_MASK(port->fp_state) == FC_STATE_OFFLINE) && 6032 cmd->cmd_ulp_pkt) ? 1 : 0; 6033 mutex_exit(&port->fp_mutex); 6034 6035 if (FP_IS_PKT_ERROR(pkt) || bailout) { 6036 int skip_msg = 0; 6037 int giveup = 0; 6038 6039 if (cmd->cmd_ulp_pkt) { 6040 cmd->cmd_ulp_pkt->pkt_state = pkt->pkt_state; 6041 cmd->cmd_ulp_pkt->pkt_reason = pkt->pkt_reason; 6042 cmd->cmd_ulp_pkt->pkt_action = pkt->pkt_action; 6043 cmd->cmd_ulp_pkt->pkt_expln = pkt->pkt_expln; 6044 } 6045 6046 /* 6047 * If an unsolicited cross login already created 6048 * a device speed up the discovery by not retrying 6049 * the command mindlessly. 6050 */ 6051 if (pkt->pkt_pd == NULL && 6052 fctl_get_remote_port_by_did(port, d_id) != NULL) { 6053 fp_iodone(cmd); 6054 return; 6055 } 6056 6057 if (pkt->pkt_pd != NULL) { 6058 giveup = (pkt->pkt_pd->pd_recepient == 6059 PD_PLOGI_RECEPIENT) ? 1 : 0; 6060 if (giveup) { 6061 /* 6062 * This pd is marked as plogi 6063 * recipient, stop retrying 6064 */ 6065 FP_TRACE(FP_NHEAD1(3, 0), 6066 "fp_plogi_intr: stop retry as" 6067 " a cross login was accepted" 6068 " from d_id=%x, port=%p.", 6069 d_id, port); 6070 fp_iodone(cmd); 6071 return; 6072 } 6073 } 6074 6075 if (fp_common_intr(pkt, 0) == FC_SUCCESS) { 6076 return; 6077 } 6078 6079 if ((pd = fctl_get_remote_port_by_did(port, d_id)) != NULL) { 6080 mutex_enter(&pd->pd_mutex); 6081 if (pd->pd_state == PORT_DEVICE_LOGGED_IN) { 6082 skip_msg++; 6083 } 6084 mutex_exit(&pd->pd_mutex); 6085 } 6086 6087 mutex_enter(&port->fp_mutex); 6088 if (!bailout && !(skip_msg && port->fp_statec_busy) && 6089 port->fp_statec_busy <= 1 && 6090 pkt->pkt_reason != FC_REASON_FCAL_OPN_FAIL) { 6091 mutex_exit(&port->fp_mutex); 6092 /* 6093 * In case of Login Collisions, JNI HBAs returns the 6094 * FC pkt back to the Initiator with the state set to 6095 * FC_PKT_LS_RJT and reason to FC_REASON_LOGICAL_ERROR. 6096 * QLC HBAs handles such cases in the FW and doesnot 6097 * return the LS_RJT with Logical error when 6098 * login collision happens. 6099 */ 6100 if ((pkt->pkt_state != FC_PKT_LS_RJT) || 6101 (pkt->pkt_reason != FC_REASON_LOGICAL_ERROR)) { 6102 fp_printf(port, CE_NOTE, FP_LOG_ONLY, 0, pkt, 6103 "PLOGI to %x failed", d_id); 6104 } 6105 FP_TRACE(FP_NHEAD2(9, 0), 6106 "PLOGI to %x failed. state=%x reason=%x.", 6107 d_id, pkt->pkt_state, pkt->pkt_reason); 6108 } else { 6109 mutex_exit(&port->fp_mutex); 6110 } 6111 6112 fp_iodone(cmd); 6113 return; 6114 } 6115 6116 acc = (la_els_logi_t *)pkt->pkt_resp; 6117 6118 FC_GET_RSP(port, pkt->pkt_resp_acc, (uint8_t *)&resp, (uint8_t *)acc, 6119 sizeof (resp), DDI_DEV_AUTOINCR); 6120 6121 ASSERT(resp.ls_code == LA_ELS_ACC); 6122 if (resp.ls_code != LA_ELS_ACC) { 6123 (void) fp_common_intr(pkt, 1); 6124 return; 6125 } 6126 6127 if (d_id == FS_NAME_SERVER || d_id == FS_FABRIC_CONTROLLER) { 6128 mutex_enter(&port->fp_mutex); 6129 port->fp_ns_login_class = FC_TRAN_CLASS(pkt->pkt_tran_flags); 6130 mutex_exit(&port->fp_mutex); 6131 fp_iodone(cmd); 6132 return; 6133 } 6134 6135 ASSERT(acc == (la_els_logi_t *)pkt->pkt_resp); 6136 6137 FC_GET_RSP(port, pkt->pkt_resp_acc, (uint8_t *)&pwwn, 6138 (uint8_t *)&acc->nport_ww_name, sizeof (la_wwn_t), 6139 DDI_DEV_AUTOINCR); 6140 6141 FC_GET_RSP(port, pkt->pkt_resp_acc, (uint8_t *)&nwwn, 6142 (uint8_t *)&acc->node_ww_name, sizeof (la_wwn_t), 6143 DDI_DEV_AUTOINCR); 6144 6145 ASSERT(fctl_is_wwn_zero(&pwwn) == FC_FAILURE); 6146 ASSERT(fctl_is_wwn_zero(&nwwn) == FC_FAILURE); 6147 6148 if ((pd = pkt->pkt_pd) == NULL) { 6149 pd = fctl_get_remote_port_by_pwwn(port, &pwwn); 6150 if (pd == NULL) { 6151 FP_TRACE(FP_NHEAD2(1, 0), "fp_plogi_intr: fp %x pd %x", 6152 port->fp_port_id.port_id, d_id); 6153 pd = fctl_create_remote_port(port, &nwwn, &pwwn, d_id, 6154 PD_PLOGI_INITIATOR, KM_NOSLEEP); 6155 if (pd == NULL) { 6156 fp_printf(port, CE_NOTE, FP_LOG_ONLY, 0, NULL, 6157 "couldn't create port device handles" 6158 " d_id=%x", d_id); 6159 fp_iodone(cmd); 6160 return; 6161 } 6162 } else { 6163 fc_remote_port_t *tmp_pd; 6164 6165 tmp_pd = fctl_get_remote_port_by_did(port, d_id); 6166 if (tmp_pd != NULL) { 6167 fp_iodone(cmd); 6168 return; 6169 } 6170 6171 mutex_enter(&port->fp_mutex); 6172 mutex_enter(&pd->pd_mutex); 6173 if ((pd->pd_state == PORT_DEVICE_LOGGED_IN) || 6174 (pd->pd_aux_flags & PD_LOGGED_OUT)) { 6175 cmd->cmd_flags |= FP_CMD_PLOGI_RETAIN; 6176 } 6177 6178 if (pd->pd_type == PORT_DEVICE_OLD) { 6179 if (pd->pd_port_id.port_id != d_id) { 6180 fctl_delist_did_table(port, pd); 6181 pd->pd_type = PORT_DEVICE_CHANGED; 6182 pd->pd_port_id.port_id = d_id; 6183 } else { 6184 pd->pd_type = PORT_DEVICE_NOCHANGE; 6185 } 6186 } 6187 6188 if (pd->pd_aux_flags & PD_IN_DID_QUEUE) { 6189 char ww_name[17]; 6190 6191 fc_wwn_to_str(&pd->pd_port_name, ww_name); 6192 6193 mutex_exit(&pd->pd_mutex); 6194 mutex_exit(&port->fp_mutex); 6195 FP_TRACE(FP_NHEAD2(9, 0), 6196 "Possible Duplicate name or address" 6197 " identifiers in the PLOGI response" 6198 " D_ID=%x, PWWN=%s: Please check the" 6199 " configuration", d_id, ww_name); 6200 fp_iodone(cmd); 6201 return; 6202 } 6203 fctl_enlist_did_table(port, pd); 6204 pd->pd_aux_flags &= ~PD_LOGGED_OUT; 6205 mutex_exit(&pd->pd_mutex); 6206 mutex_exit(&port->fp_mutex); 6207 } 6208 } else { 6209 fc_remote_port_t *tmp_pd, *new_wwn_pd; 6210 6211 tmp_pd = fctl_get_remote_port_by_did(port, d_id); 6212 new_wwn_pd = fctl_get_remote_port_by_pwwn(port, &pwwn); 6213 6214 mutex_enter(&port->fp_mutex); 6215 mutex_enter(&pd->pd_mutex); 6216 if (fctl_wwn_cmp(&pd->pd_port_name, &pwwn) == 0) { 6217 FP_TRACE(FP_NHEAD1(3, 0), "fp_plogi_intr: d_id=%x," 6218 " pd_state=%x pd_type=%x", d_id, pd->pd_state, 6219 pd->pd_type); 6220 if ((pd->pd_state == PORT_DEVICE_LOGGED_IN && 6221 pd->pd_type == PORT_DEVICE_OLD) || 6222 (pd->pd_aux_flags & PD_LOGGED_OUT)) { 6223 pd->pd_type = PORT_DEVICE_NOCHANGE; 6224 } else if (pd->pd_state != PORT_DEVICE_LOGGED_IN) { 6225 pd->pd_type = PORT_DEVICE_NEW; 6226 } 6227 } else { 6228 char old_name[17]; 6229 char new_name[17]; 6230 6231 fc_wwn_to_str(&pd->pd_port_name, old_name); 6232 fc_wwn_to_str(&pwwn, new_name); 6233 6234 FP_TRACE(FP_NHEAD1(9, 0), 6235 "fp_plogi_intr: PWWN of a device with D_ID=%x " 6236 "changed. New PWWN = %s, OLD PWWN = %s ; tmp_pd:%p " 6237 "pd:%p new_wwn_pd:%p, cmd_ulp_pkt:%p, bailout:0x%x", 6238 d_id, new_name, old_name, tmp_pd, pd, new_wwn_pd, 6239 cmd->cmd_ulp_pkt, bailout); 6240 6241 FP_TRACE(FP_NHEAD2(9, 0), 6242 "PWWN of a device with D_ID=%x changed." 6243 " New PWWN = %s, OLD PWWN = %s", d_id, 6244 new_name, old_name); 6245 6246 if (cmd->cmd_ulp_pkt && !bailout) { 6247 fc_remote_node_t *rnodep; 6248 fc_portmap_t *changelist; 6249 fc_portmap_t *listptr; 6250 int len = 1; 6251 /* # entries in changelist */ 6252 6253 fctl_delist_pwwn_table(port, pd); 6254 6255 /* 6256 * Lets now check if there already is a pd with 6257 * this new WWN in the table. If so, we'll mark 6258 * it as invalid 6259 */ 6260 6261 if (new_wwn_pd) { 6262 /* 6263 * There is another pd with in the pwwn 6264 * table with the same WWN that we got 6265 * in the PLOGI payload. We have to get 6266 * it out of the pwwn table, update the 6267 * pd's state (fp_fillout_old_map does 6268 * this for us) and add it to the 6269 * changelist that goes up to ULPs. 6270 * 6271 * len is length of changelist and so 6272 * increment it. 6273 */ 6274 len++; 6275 6276 if (tmp_pd != pd) { 6277 /* 6278 * Odd case where pwwn and did 6279 * tables are out of sync but 6280 * we will handle that too. See 6281 * more comments below. 6282 * 6283 * One more device that ULPs 6284 * should know about and so len 6285 * gets incremented again. 6286 */ 6287 len++; 6288 } 6289 6290 listptr = changelist = kmem_zalloc(len * 6291 sizeof (*changelist), KM_SLEEP); 6292 6293 mutex_enter(&new_wwn_pd->pd_mutex); 6294 rnodep = new_wwn_pd->pd_remote_nodep; 6295 mutex_exit(&new_wwn_pd->pd_mutex); 6296 6297 /* 6298 * Hold the fd_mutex since 6299 * fctl_copy_portmap_held expects it. 6300 * Preserve lock hierarchy by grabbing 6301 * fd_mutex before pd_mutex 6302 */ 6303 if (rnodep) { 6304 mutex_enter(&rnodep->fd_mutex); 6305 } 6306 mutex_enter(&new_wwn_pd->pd_mutex); 6307 fp_fillout_old_map_held(listptr++, 6308 new_wwn_pd, 0); 6309 mutex_exit(&new_wwn_pd->pd_mutex); 6310 if (rnodep) { 6311 mutex_exit(&rnodep->fd_mutex); 6312 } 6313 6314 /* 6315 * Safety check : 6316 * Lets ensure that the pwwn and did 6317 * tables are in sync. Ideally, we 6318 * should not find that these two pd's 6319 * are different. 6320 */ 6321 if (tmp_pd != pd) { 6322 mutex_enter(&tmp_pd->pd_mutex); 6323 rnodep = 6324 tmp_pd->pd_remote_nodep; 6325 mutex_exit(&tmp_pd->pd_mutex); 6326 6327 /* As above grab fd_mutex */ 6328 if (rnodep) { 6329 mutex_enter(&rnodep-> 6330 fd_mutex); 6331 } 6332 mutex_enter(&tmp_pd->pd_mutex); 6333 6334 fp_fillout_old_map_held( 6335 listptr++, tmp_pd, 0); 6336 6337 mutex_exit(&tmp_pd->pd_mutex); 6338 if (rnodep) { 6339 mutex_exit(&rnodep-> 6340 fd_mutex); 6341 } 6342 6343 /* 6344 * Now add "pd" (not tmp_pd) 6345 * to fp_did_table to sync it up 6346 * with fp_pwwn_table 6347 * 6348 * pd->pd_mutex is already held 6349 * at this point 6350 */ 6351 fctl_enlist_did_table(port, pd); 6352 } 6353 } else { 6354 listptr = changelist = kmem_zalloc( 6355 sizeof (*changelist), KM_SLEEP); 6356 } 6357 6358 ASSERT(changelist != NULL); 6359 6360 fp_fillout_changed_map(listptr, pd, &d_id, 6361 &pwwn); 6362 fctl_enlist_pwwn_table(port, pd); 6363 6364 mutex_exit(&pd->pd_mutex); 6365 mutex_exit(&port->fp_mutex); 6366 6367 fp_iodone(cmd); 6368 6369 (void) fp_ulp_devc_cb(port, changelist, len, 6370 len, KM_NOSLEEP, 0); 6371 6372 return; 6373 } 6374 } 6375 6376 if (pd->pd_porttype.port_type == FC_NS_PORT_NL) { 6377 nl_port = 1; 6378 } 6379 if (pd->pd_aux_flags & PD_DISABLE_RELOGIN) { 6380 pd->pd_aux_flags &= ~PD_LOGGED_OUT; 6381 } 6382 6383 mutex_exit(&pd->pd_mutex); 6384 mutex_exit(&port->fp_mutex); 6385 6386 if (tmp_pd == NULL) { 6387 mutex_enter(&port->fp_mutex); 6388 mutex_enter(&pd->pd_mutex); 6389 if (pd->pd_aux_flags & PD_IN_DID_QUEUE) { 6390 char ww_name[17]; 6391 6392 fc_wwn_to_str(&pd->pd_port_name, ww_name); 6393 mutex_exit(&pd->pd_mutex); 6394 mutex_exit(&port->fp_mutex); 6395 FP_TRACE(FP_NHEAD2(9, 0), 6396 "Possible Duplicate name or address" 6397 " identifiers in the PLOGI response" 6398 " D_ID=%x, PWWN=%s: Please check the" 6399 " configuration", d_id, ww_name); 6400 fp_iodone(cmd); 6401 return; 6402 } 6403 fctl_enlist_did_table(port, pd); 6404 pd->pd_aux_flags &= ~PD_LOGGED_OUT; 6405 mutex_exit(&pd->pd_mutex); 6406 mutex_exit(&port->fp_mutex); 6407 } 6408 } 6409 fp_register_login(&pkt->pkt_resp_acc, pd, acc, 6410 FC_TRAN_CLASS(pkt->pkt_tran_flags)); 6411 6412 if (cmd->cmd_ulp_pkt) { 6413 cmd->cmd_ulp_pkt->pkt_state = pkt->pkt_state; 6414 cmd->cmd_ulp_pkt->pkt_action = pkt->pkt_action; 6415 cmd->cmd_ulp_pkt->pkt_expln = pkt->pkt_expln; 6416 if (cmd->cmd_ulp_pkt->pkt_pd == NULL) { 6417 if (pd != NULL) { 6418 FP_TRACE(FP_NHEAD1(9, 0), 6419 "fp_plogi_intr;" 6420 "ulp_pkt's pd is NULL, get a pd %p", 6421 pd); 6422 mutex_enter(&pd->pd_mutex); 6423 pd->pd_ref_count++; 6424 mutex_exit(&pd->pd_mutex); 6425 } 6426 cmd->cmd_ulp_pkt->pkt_pd = pd; 6427 } 6428 bcopy((caddr_t)&pkt->pkt_resp_fhdr, 6429 (caddr_t)&cmd->cmd_ulp_pkt->pkt_resp_fhdr, 6430 sizeof (fc_frame_hdr_t)); 6431 bcopy((caddr_t)pkt->pkt_resp, 6432 (caddr_t)cmd->cmd_ulp_pkt->pkt_resp, 6433 sizeof (la_els_logi_t)); 6434 } 6435 6436 mutex_enter(&port->fp_mutex); 6437 if (port->fp_topology == FC_TOP_PRIVATE_LOOP || nl_port) { 6438 mutex_enter(&pd->pd_mutex); 6439 6440 cmd->cmd_pkt.pkt_tran_flags = FC_TRAN_INTR | pd->pd_login_class; 6441 cmd->cmd_pkt.pkt_tran_type = FC_PKT_EXCHANGE; 6442 cmd->cmd_retry_count = fp_retry_count; 6443 6444 /* 6445 * If the fc_remote_port_t pointer is not set in the given 6446 * fc_packet_t, then this fc_remote_port_t must have just 6447 * been created. Save the pointer and also increment the 6448 * fc_remote_port_t reference count. 6449 */ 6450 if (pkt->pkt_pd == NULL) { 6451 pkt->pkt_pd = pd; 6452 pd->pd_ref_count++; /* It's in use! */ 6453 } 6454 6455 fp_adisc_init(cmd, cmd->cmd_job); 6456 6457 pkt->pkt_cmdlen = sizeof (la_els_adisc_t); 6458 pkt->pkt_rsplen = sizeof (la_els_adisc_t); 6459 6460 mutex_exit(&pd->pd_mutex); 6461 mutex_exit(&port->fp_mutex); 6462 6463 if (fp_sendcmd(port, cmd, port->fp_fca_handle) == FC_SUCCESS) { 6464 return; 6465 } 6466 } else { 6467 mutex_exit(&port->fp_mutex); 6468 } 6469 6470 if ((cmd->cmd_flags & FP_CMD_PLOGI_RETAIN) == 0) { 6471 mutex_enter(&port->fp_mutex); 6472 mutex_enter(&pd->pd_mutex); 6473 6474 cmd->cmd_pkt.pkt_tran_flags = FC_TRAN_INTR | pd->pd_login_class; 6475 cmd->cmd_pkt.pkt_tran_type = FC_PKT_EXCHANGE; 6476 cmd->cmd_retry_count = fp_retry_count; 6477 6478 fp_logo_init(pd, cmd, cmd->cmd_job); 6479 6480 pkt->pkt_cmdlen = sizeof (la_els_logo_t); 6481 pkt->pkt_rsplen = FP_PORT_IDENTIFIER_LEN; 6482 6483 mutex_exit(&pd->pd_mutex); 6484 mutex_exit(&port->fp_mutex); 6485 6486 if (fp_sendcmd(port, cmd, port->fp_fca_handle) == FC_SUCCESS) { 6487 return; 6488 } 6489 6490 } 6491 fp_iodone(cmd); 6492 } 6493 6494 6495 /* 6496 * Handle solicited ADISC response 6497 */ 6498 static void 6499 fp_adisc_intr(fc_packet_t *pkt) 6500 { 6501 int rval; 6502 int bailout; 6503 fp_cmd_t *cmd, *logi_cmd; 6504 fc_local_port_t *port; 6505 fc_remote_port_t *pd; 6506 la_els_adisc_t *acc; 6507 ls_code_t resp; 6508 fc_hardaddr_t ha; 6509 fc_portmap_t *changelist; 6510 int initiator, adiscfail = 0; 6511 6512 pd = pkt->pkt_pd; 6513 cmd = pkt->pkt_ulp_private; 6514 port = cmd->cmd_port; 6515 6516 ASSERT(cmd->cmd_job && cmd->cmd_job->job_counter); 6517 6518 ASSERT(pd != NULL && port != NULL && cmd != NULL); 6519 6520 mutex_enter(&port->fp_mutex); 6521 port->fp_out_fpcmds--; 6522 bailout = ((port->fp_statec_busy || 6523 FC_PORT_STATE_MASK(port->fp_state) == FC_STATE_OFFLINE) && 6524 cmd->cmd_ulp_pkt) ? 1 : 0; 6525 mutex_exit(&port->fp_mutex); 6526 6527 if (bailout) { 6528 fp_iodone(cmd); 6529 return; 6530 } 6531 6532 if (pkt->pkt_state == FC_PKT_SUCCESS && pkt->pkt_resp_resid == 0) { 6533 acc = (la_els_adisc_t *)pkt->pkt_resp; 6534 6535 FC_GET_RSP(port, pkt->pkt_resp_acc, (uint8_t *)&resp, 6536 (uint8_t *)acc, sizeof (resp), DDI_DEV_AUTOINCR); 6537 6538 if (resp.ls_code == LA_ELS_ACC) { 6539 int is_private; 6540 6541 FC_GET_RSP(port, pkt->pkt_resp_acc, (uint8_t *)&ha, 6542 (uint8_t *)&acc->hard_addr, sizeof (ha), 6543 DDI_DEV_AUTOINCR); 6544 6545 mutex_enter(&port->fp_mutex); 6546 6547 is_private = 6548 (port->fp_topology == FC_TOP_PRIVATE_LOOP) ? 1 : 0; 6549 6550 mutex_enter(&pd->pd_mutex); 6551 if ((pd->pd_aux_flags & PD_IN_DID_QUEUE) == 0) { 6552 fctl_enlist_did_table(port, pd); 6553 } 6554 mutex_exit(&pd->pd_mutex); 6555 6556 mutex_exit(&port->fp_mutex); 6557 6558 mutex_enter(&pd->pd_mutex); 6559 if (pd->pd_type != PORT_DEVICE_NEW) { 6560 if (is_private && (pd->pd_hard_addr.hard_addr != 6561 ha.hard_addr)) { 6562 pd->pd_type = PORT_DEVICE_CHANGED; 6563 } else { 6564 pd->pd_type = PORT_DEVICE_NOCHANGE; 6565 } 6566 } 6567 6568 if (is_private && (ha.hard_addr && 6569 pd->pd_port_id.port_id != ha.hard_addr)) { 6570 char ww_name[17]; 6571 6572 fc_wwn_to_str(&pd->pd_port_name, ww_name); 6573 6574 fp_printf(port, CE_NOTE, FP_LOG_ONLY, 0, NULL, 6575 "NL_Port Identifier %x doesn't match" 6576 " with Hard Address %x, Will use Port" 6577 " WWN %s", pd->pd_port_id.port_id, 6578 ha.hard_addr, ww_name); 6579 6580 pd->pd_hard_addr.hard_addr = 0; 6581 } else { 6582 pd->pd_hard_addr.hard_addr = ha.hard_addr; 6583 } 6584 mutex_exit(&pd->pd_mutex); 6585 } else { 6586 if (fp_common_intr(pkt, 0) == FC_SUCCESS) { 6587 return; 6588 } 6589 } 6590 } else { 6591 if (fp_common_intr(pkt, 0) == FC_SUCCESS) { 6592 return; 6593 } 6594 6595 mutex_enter(&port->fp_mutex); 6596 if (port->fp_statec_busy <= 1) { 6597 mutex_exit(&port->fp_mutex); 6598 if (pkt->pkt_state == FC_PKT_LS_RJT && 6599 pkt->pkt_reason == FC_REASON_CMD_UNABLE) { 6600 uchar_t class; 6601 int cmd_flag; 6602 uint32_t src_id; 6603 6604 class = fp_get_nextclass(port, 6605 FC_TRAN_CLASS_INVALID); 6606 if (class == FC_TRAN_CLASS_INVALID) { 6607 fp_iodone(cmd); 6608 return; 6609 } 6610 6611 FP_TRACE(FP_NHEAD1(1, 0), "ADISC re-login; " 6612 "fp_state=0x%x, pkt_state=0x%x, " 6613 "reason=0x%x, class=0x%x", 6614 port->fp_state, pkt->pkt_state, 6615 pkt->pkt_reason, class); 6616 cmd_flag = FP_CMD_PLOGI_RETAIN; 6617 6618 logi_cmd = fp_alloc_pkt(port, 6619 sizeof (la_els_logi_t), 6620 sizeof (la_els_logi_t), KM_SLEEP, pd); 6621 if (logi_cmd == NULL) { 6622 fp_iodone(cmd); 6623 return; 6624 } 6625 6626 logi_cmd->cmd_pkt.pkt_tran_flags = 6627 FC_TRAN_INTR | class; 6628 logi_cmd->cmd_pkt.pkt_tran_type = 6629 FC_PKT_EXCHANGE; 6630 logi_cmd->cmd_flags = cmd_flag; 6631 logi_cmd->cmd_retry_count = fp_retry_count; 6632 logi_cmd->cmd_ulp_pkt = NULL; 6633 6634 mutex_enter(&port->fp_mutex); 6635 src_id = port->fp_port_id.port_id; 6636 mutex_exit(&port->fp_mutex); 6637 6638 fp_xlogi_init(port, logi_cmd, src_id, 6639 pkt->pkt_cmd_fhdr.d_id, fp_plogi_intr, 6640 cmd->cmd_job, LA_ELS_PLOGI); 6641 if (pd) { 6642 mutex_enter(&pd->pd_mutex); 6643 pd->pd_flags = PD_ELS_IN_PROGRESS; 6644 mutex_exit(&pd->pd_mutex); 6645 } 6646 6647 if (fp_sendcmd(port, logi_cmd, 6648 port->fp_fca_handle) == FC_SUCCESS) { 6649 fp_free_pkt(cmd); 6650 return; 6651 } else { 6652 fp_free_pkt(logi_cmd); 6653 } 6654 } else { 6655 fp_printf(port, CE_NOTE, FP_LOG_ONLY, 0, pkt, 6656 "ADISC to %x failed, cmd_flags=%x", 6657 pkt->pkt_cmd_fhdr.d_id, cmd->cmd_flags); 6658 cmd->cmd_flags &= ~FP_CMD_PLOGI_RETAIN; 6659 adiscfail = 1; 6660 } 6661 } else { 6662 mutex_exit(&port->fp_mutex); 6663 } 6664 } 6665 6666 if (cmd->cmd_ulp_pkt) { 6667 cmd->cmd_ulp_pkt->pkt_state = pkt->pkt_state; 6668 cmd->cmd_ulp_pkt->pkt_action = pkt->pkt_action; 6669 cmd->cmd_ulp_pkt->pkt_expln = pkt->pkt_expln; 6670 if (cmd->cmd_ulp_pkt->pkt_pd == NULL) { 6671 cmd->cmd_ulp_pkt->pkt_pd = pd; 6672 FP_TRACE(FP_NHEAD1(9, 0), 6673 "fp_adisc__intr;" 6674 "ulp_pkt's pd is NULL, get a pd %p", 6675 pd); 6676 6677 } 6678 bcopy((caddr_t)&pkt->pkt_resp_fhdr, 6679 (caddr_t)&cmd->cmd_ulp_pkt->pkt_resp_fhdr, 6680 sizeof (fc_frame_hdr_t)); 6681 bcopy((caddr_t)pkt->pkt_resp, 6682 (caddr_t)cmd->cmd_ulp_pkt->pkt_resp, 6683 sizeof (la_els_adisc_t)); 6684 } 6685 6686 if ((cmd->cmd_flags & FP_CMD_PLOGI_RETAIN) == 0) { 6687 FP_TRACE(FP_NHEAD1(9, 0), 6688 "fp_adisc_intr: Perform LOGO.cmd_flags=%x, " 6689 "fp_retry_count=%x, ulp_pkt=%p", 6690 cmd->cmd_flags, fp_retry_count, cmd->cmd_ulp_pkt); 6691 6692 mutex_enter(&port->fp_mutex); 6693 mutex_enter(&pd->pd_mutex); 6694 6695 cmd->cmd_pkt.pkt_tran_flags = FC_TRAN_INTR | pd->pd_login_class; 6696 cmd->cmd_pkt.pkt_tran_type = FC_PKT_EXCHANGE; 6697 cmd->cmd_retry_count = fp_retry_count; 6698 6699 fp_logo_init(pd, cmd, cmd->cmd_job); 6700 6701 pkt->pkt_cmdlen = sizeof (la_els_logo_t); 6702 pkt->pkt_rsplen = FP_PORT_IDENTIFIER_LEN; 6703 6704 mutex_exit(&pd->pd_mutex); 6705 mutex_exit(&port->fp_mutex); 6706 6707 rval = fp_sendcmd(port, cmd, port->fp_fca_handle); 6708 if (adiscfail) { 6709 mutex_enter(&pd->pd_mutex); 6710 initiator = 6711 ((pd->pd_recepient == PD_PLOGI_INITIATOR) ? 1 : 0); 6712 pd->pd_state = PORT_DEVICE_VALID; 6713 pd->pd_aux_flags |= PD_LOGGED_OUT; 6714 if (pd->pd_aux_flags & PD_DISABLE_RELOGIN) { 6715 pd->pd_type = PORT_DEVICE_NEW; 6716 } else { 6717 pd->pd_type = PORT_DEVICE_NOCHANGE; 6718 } 6719 mutex_exit(&pd->pd_mutex); 6720 6721 changelist = 6722 kmem_zalloc(sizeof (*changelist), KM_SLEEP); 6723 6724 if (initiator) { 6725 fp_unregister_login(pd); 6726 fctl_copy_portmap(changelist, pd); 6727 } else { 6728 fp_fillout_old_map(changelist, pd, 0); 6729 } 6730 6731 FP_TRACE(FP_NHEAD1(9, 0), 6732 "fp_adisc_intr: Dev change notification " 6733 "to ULP port=%p, pd=%p, map_type=%x map_state=%x " 6734 "map_flags=%x initiator=%d", port, pd, 6735 changelist->map_type, changelist->map_state, 6736 changelist->map_flags, initiator); 6737 6738 (void) fp_ulp_devc_cb(port, changelist, 6739 1, 1, KM_SLEEP, 0); 6740 } 6741 if (rval == FC_SUCCESS) { 6742 return; 6743 } 6744 } 6745 fp_iodone(cmd); 6746 } 6747 6748 6749 /* 6750 * Handle solicited LOGO response 6751 */ 6752 static void 6753 fp_logo_intr(fc_packet_t *pkt) 6754 { 6755 ls_code_t resp; 6756 fc_local_port_t *port = ((fp_cmd_t *)pkt->pkt_ulp_private)->cmd_port; 6757 6758 mutex_enter(&((fp_cmd_t *)pkt->pkt_ulp_private)->cmd_port->fp_mutex); 6759 ((fp_cmd_t *)pkt->pkt_ulp_private)->cmd_port->fp_out_fpcmds--; 6760 mutex_exit(&((fp_cmd_t *)pkt->pkt_ulp_private)->cmd_port->fp_mutex); 6761 6762 FC_GET_RSP(port, pkt->pkt_resp_acc, (uint8_t *)&resp, 6763 (uint8_t *)pkt->pkt_resp, sizeof (resp), DDI_DEV_AUTOINCR); 6764 6765 if (FP_IS_PKT_ERROR(pkt)) { 6766 (void) fp_common_intr(pkt, 1); 6767 return; 6768 } 6769 6770 ASSERT(resp.ls_code == LA_ELS_ACC); 6771 if (resp.ls_code != LA_ELS_ACC) { 6772 (void) fp_common_intr(pkt, 1); 6773 return; 6774 } 6775 6776 if (pkt->pkt_pd != NULL) { 6777 fp_unregister_login(pkt->pkt_pd); 6778 } 6779 6780 fp_iodone(pkt->pkt_ulp_private); 6781 } 6782 6783 6784 /* 6785 * Handle solicited RNID response 6786 */ 6787 static void 6788 fp_rnid_intr(fc_packet_t *pkt) 6789 { 6790 ls_code_t resp; 6791 job_request_t *job; 6792 fp_cmd_t *cmd; 6793 la_els_rnid_acc_t *acc; 6794 fc_local_port_t *port = ((fp_cmd_t *)pkt->pkt_ulp_private)->cmd_port; 6795 6796 FC_GET_RSP(port, pkt->pkt_resp_acc, (uint8_t *)&resp, 6797 (uint8_t *)pkt->pkt_resp, sizeof (resp), DDI_DEV_AUTOINCR); 6798 cmd = pkt->pkt_ulp_private; 6799 6800 mutex_enter(&cmd->cmd_port->fp_mutex); 6801 cmd->cmd_port->fp_out_fpcmds--; 6802 mutex_exit(&cmd->cmd_port->fp_mutex); 6803 6804 job = cmd->cmd_job; 6805 ASSERT(job->job_private != NULL); 6806 6807 /* If failure or LS_RJT then retry the packet, if needed */ 6808 if (pkt->pkt_state != FC_PKT_SUCCESS || resp.ls_code != LA_ELS_ACC) { 6809 (void) fp_common_intr(pkt, 1); 6810 return; 6811 } 6812 6813 /* Save node_id memory allocated in ioctl code */ 6814 acc = (la_els_rnid_acc_t *)pkt->pkt_resp; 6815 6816 FC_GET_RSP(port, pkt->pkt_resp_acc, (uint8_t *)job->job_private, 6817 (uint8_t *)acc, sizeof (la_els_rnid_acc_t), DDI_DEV_AUTOINCR); 6818 6819 /* wakeup the ioctl thread and free the pkt */ 6820 fp_iodone(cmd); 6821 } 6822 6823 6824 /* 6825 * Handle solicited RLS response 6826 */ 6827 static void 6828 fp_rls_intr(fc_packet_t *pkt) 6829 { 6830 ls_code_t resp; 6831 job_request_t *job; 6832 fp_cmd_t *cmd; 6833 la_els_rls_acc_t *acc; 6834 fc_local_port_t *port = ((fp_cmd_t *)pkt->pkt_ulp_private)->cmd_port; 6835 6836 FC_GET_RSP(port, pkt->pkt_resp_acc, (uint8_t *)&resp, 6837 (uint8_t *)pkt->pkt_resp, sizeof (resp), DDI_DEV_AUTOINCR); 6838 cmd = pkt->pkt_ulp_private; 6839 6840 mutex_enter(&cmd->cmd_port->fp_mutex); 6841 cmd->cmd_port->fp_out_fpcmds--; 6842 mutex_exit(&cmd->cmd_port->fp_mutex); 6843 6844 job = cmd->cmd_job; 6845 ASSERT(job->job_private != NULL); 6846 6847 /* If failure or LS_RJT then retry the packet, if needed */ 6848 if (FP_IS_PKT_ERROR(pkt) || resp.ls_code != LA_ELS_ACC) { 6849 (void) fp_common_intr(pkt, 1); 6850 return; 6851 } 6852 6853 /* Save link error status block in memory allocated in ioctl code */ 6854 acc = (la_els_rls_acc_t *)pkt->pkt_resp; 6855 6856 FC_GET_RSP(port, pkt->pkt_resp_acc, (uint8_t *)job->job_private, 6857 (uint8_t *)&acc->rls_link_params, sizeof (fc_rls_acc_t), 6858 DDI_DEV_AUTOINCR); 6859 6860 /* wakeup the ioctl thread and free the pkt */ 6861 fp_iodone(cmd); 6862 } 6863 6864 6865 /* 6866 * A solicited command completion interrupt (mostly for commands 6867 * that require almost no post processing such as SCR ELS) 6868 */ 6869 static void 6870 fp_intr(fc_packet_t *pkt) 6871 { 6872 mutex_enter(&((fp_cmd_t *)pkt->pkt_ulp_private)->cmd_port->fp_mutex); 6873 ((fp_cmd_t *)pkt->pkt_ulp_private)->cmd_port->fp_out_fpcmds--; 6874 mutex_exit(&((fp_cmd_t *)pkt->pkt_ulp_private)->cmd_port->fp_mutex); 6875 6876 if (FP_IS_PKT_ERROR(pkt)) { 6877 (void) fp_common_intr(pkt, 1); 6878 return; 6879 } 6880 fp_iodone(pkt->pkt_ulp_private); 6881 } 6882 6883 6884 /* 6885 * Handle the underlying port's state change 6886 */ 6887 static void 6888 fp_statec_cb(opaque_t port_handle, uint32_t state) 6889 { 6890 fc_local_port_t *port = port_handle; 6891 job_request_t *job; 6892 6893 /* 6894 * If it is not possible to process the callbacks 6895 * just drop the callback on the floor; Don't bother 6896 * to do something that isn't safe at this time 6897 */ 6898 mutex_enter(&port->fp_mutex); 6899 if ((port->fp_soft_state & 6900 (FP_SOFT_IN_DETACH | FP_SOFT_SUSPEND | FP_SOFT_POWER_DOWN)) || 6901 (FC_PORT_STATE_MASK(port->fp_state) == FC_PORT_STATE_MASK(state))) { 6902 mutex_exit(&port->fp_mutex); 6903 return; 6904 } 6905 6906 if (port->fp_statec_busy == 0) { 6907 port->fp_soft_state |= FP_SOFT_IN_STATEC_CB; 6908 #ifdef DEBUG 6909 } else { 6910 ASSERT(port->fp_soft_state & FP_SOFT_IN_STATEC_CB); 6911 #endif 6912 } 6913 6914 port->fp_statec_busy++; 6915 6916 /* 6917 * For now, force the trusted method of device authentication (by 6918 * PLOGI) when LIPs do not involve OFFLINE to ONLINE transition. 6919 */ 6920 if (FC_PORT_STATE_MASK(state) == FC_STATE_LIP || 6921 FC_PORT_STATE_MASK(state) == FC_STATE_LIP_LBIT_SET) { 6922 state = FC_PORT_SPEED_MASK(port->fp_state) | FC_STATE_LOOP; 6923 fp_port_offline(port, 0); 6924 } 6925 mutex_exit(&port->fp_mutex); 6926 6927 switch (FC_PORT_STATE_MASK(state)) { 6928 case FC_STATE_OFFLINE: 6929 job = fctl_alloc_job(JOB_PORT_OFFLINE, 6930 JOB_TYPE_FCTL_ASYNC, NULL, NULL, KM_NOSLEEP); 6931 if (job == NULL) { 6932 fp_printf(port, CE_NOTE, FP_LOG_ONLY, 0, NULL, 6933 " fp_statec_cb() couldn't submit a job " 6934 " to the thread: failing.."); 6935 mutex_enter(&port->fp_mutex); 6936 if (--port->fp_statec_busy == 0) { 6937 port->fp_soft_state &= ~FP_SOFT_IN_STATEC_CB; 6938 } 6939 mutex_exit(&port->fp_mutex); 6940 return; 6941 } 6942 mutex_enter(&port->fp_mutex); 6943 /* 6944 * Zero out this field so that we do not retain 6945 * the fabric name as its no longer valid 6946 */ 6947 bzero(&port->fp_fabric_name, sizeof (la_wwn_t)); 6948 port->fp_state = state; 6949 mutex_exit(&port->fp_mutex); 6950 6951 fctl_enque_job(port, job); 6952 break; 6953 6954 case FC_STATE_ONLINE: 6955 case FC_STATE_LOOP: 6956 mutex_enter(&port->fp_mutex); 6957 port->fp_state = state; 6958 6959 if (port->fp_offline_tid) { 6960 timeout_id_t tid; 6961 6962 tid = port->fp_offline_tid; 6963 port->fp_offline_tid = NULL; 6964 mutex_exit(&port->fp_mutex); 6965 (void) untimeout(tid); 6966 } else { 6967 mutex_exit(&port->fp_mutex); 6968 } 6969 6970 job = fctl_alloc_job(JOB_PORT_ONLINE, 6971 JOB_TYPE_FCTL_ASYNC, NULL, NULL, KM_NOSLEEP); 6972 if (job == NULL) { 6973 fp_printf(port, CE_NOTE, FP_LOG_ONLY, 0, NULL, 6974 "fp_statec_cb() couldn't submit a job " 6975 "to the thread: failing.."); 6976 6977 mutex_enter(&port->fp_mutex); 6978 if (--port->fp_statec_busy == 0) { 6979 port->fp_soft_state &= ~FP_SOFT_IN_STATEC_CB; 6980 } 6981 mutex_exit(&port->fp_mutex); 6982 return; 6983 } 6984 fctl_enque_job(port, job); 6985 break; 6986 6987 case FC_STATE_RESET_REQUESTED: 6988 mutex_enter(&port->fp_mutex); 6989 port->fp_state = FC_STATE_OFFLINE; 6990 port->fp_soft_state |= FP_SOFT_IN_FCA_RESET; 6991 mutex_exit(&port->fp_mutex); 6992 /* FALLTHROUGH */ 6993 6994 case FC_STATE_RESET: 6995 job = fctl_alloc_job(JOB_ULP_NOTIFY, 6996 JOB_TYPE_FCTL_ASYNC, NULL, NULL, KM_NOSLEEP); 6997 if (job == NULL) { 6998 fp_printf(port, CE_NOTE, FP_LOG_ONLY, 0, NULL, 6999 "fp_statec_cb() couldn't submit a job" 7000 " to the thread: failing.."); 7001 7002 mutex_enter(&port->fp_mutex); 7003 if (--port->fp_statec_busy == 0) { 7004 port->fp_soft_state &= ~FP_SOFT_IN_STATEC_CB; 7005 } 7006 mutex_exit(&port->fp_mutex); 7007 return; 7008 } 7009 7010 /* squeeze into some field in the job structure */ 7011 job->job_ulp_listlen = FC_PORT_STATE_MASK(state); 7012 fctl_enque_job(port, job); 7013 break; 7014 7015 case FC_STATE_TARGET_PORT_RESET: 7016 (void) fp_ulp_notify(port, state, KM_NOSLEEP); 7017 /* FALLTHROUGH */ 7018 7019 case FC_STATE_NAMESERVICE: 7020 /* FALLTHROUGH */ 7021 7022 default: 7023 mutex_enter(&port->fp_mutex); 7024 if (--port->fp_statec_busy == 0) { 7025 port->fp_soft_state &= ~FP_SOFT_IN_STATEC_CB; 7026 } 7027 mutex_exit(&port->fp_mutex); 7028 break; 7029 } 7030 } 7031 7032 7033 /* 7034 * Register with the Name Server for RSCNs 7035 */ 7036 static int 7037 fp_ns_scr(fc_local_port_t *port, job_request_t *job, uchar_t scr_func, 7038 int sleep) 7039 { 7040 uint32_t s_id; 7041 uchar_t class; 7042 fc_scr_req_t payload; 7043 fp_cmd_t *cmd; 7044 fc_packet_t *pkt; 7045 7046 mutex_enter(&port->fp_mutex); 7047 s_id = port->fp_port_id.port_id; 7048 class = port->fp_ns_login_class; 7049 mutex_exit(&port->fp_mutex); 7050 7051 cmd = fp_alloc_pkt(port, sizeof (fc_scr_req_t), 7052 sizeof (fc_scr_resp_t), sleep, NULL); 7053 if (cmd == NULL) { 7054 return (FC_NOMEM); 7055 } 7056 7057 cmd->cmd_pkt.pkt_tran_flags = FC_TRAN_INTR | class; 7058 cmd->cmd_pkt.pkt_tran_type = FC_PKT_EXCHANGE; 7059 cmd->cmd_flags = FP_CMD_CFLAG_UNDEFINED; 7060 cmd->cmd_retry_count = fp_retry_count; 7061 cmd->cmd_ulp_pkt = NULL; 7062 7063 pkt = &cmd->cmd_pkt; 7064 cmd->cmd_transport = port->fp_fca_tran->fca_els_send; 7065 7066 fp_els_init(cmd, s_id, 0xFFFFFD, fp_intr, job); 7067 7068 payload.ls_code.ls_code = LA_ELS_SCR; 7069 payload.ls_code.mbz = 0; 7070 payload.scr_rsvd = 0; 7071 payload.scr_func = scr_func; 7072 7073 FC_SET_CMD(port, pkt->pkt_cmd_acc, (uint8_t *)&payload, 7074 (uint8_t *)pkt->pkt_cmd, sizeof (payload), DDI_DEV_AUTOINCR); 7075 7076 job->job_counter = 1; 7077 7078 if (fp_sendcmd(port, cmd, port->fp_fca_handle) != FC_SUCCESS) { 7079 fp_iodone(cmd); 7080 } 7081 7082 return (FC_SUCCESS); 7083 } 7084 7085 7086 /* 7087 * There are basically two methods to determine the total number of 7088 * devices out in the NS database; Reading the details of the two 7089 * methods described below, it shouldn't be hard to identify which 7090 * of the two methods is better. 7091 * 7092 * Method 1. 7093 * Iteratively issue GANs until all ports identifiers are walked 7094 * 7095 * Method 2. 7096 * Issue GID_PT (get port Identifiers) with Maximum residual 7097 * field in the request CT HEADER set to accommodate only the 7098 * CT HEADER in the response frame. And if FC-GS2 has been 7099 * carefully read, the NS here has a chance to FS_ACC the 7100 * request and indicate the residual size in the FS_ACC. 7101 * 7102 * Method 2 is wonderful, although it's not mandatory for the NS 7103 * to update the Maximum/Residual Field as can be seen in 4.3.1.6 7104 * (note with particular care the use of the auxiliary verb 'may') 7105 * 7106 */ 7107 static int 7108 fp_ns_get_devcount(fc_local_port_t *port, job_request_t *job, int create, 7109 int sleep) 7110 { 7111 int flags; 7112 int rval; 7113 uint32_t src_id; 7114 fctl_ns_req_t *ns_cmd; 7115 7116 ASSERT(!MUTEX_HELD(&port->fp_mutex)); 7117 7118 mutex_enter(&port->fp_mutex); 7119 src_id = port->fp_port_id.port_id; 7120 mutex_exit(&port->fp_mutex); 7121 7122 if (!create && (port->fp_options & FP_NS_SMART_COUNT)) { 7123 ns_cmd = fctl_alloc_ns_cmd(sizeof (ns_req_gid_pt_t), 7124 sizeof (ns_resp_gid_pt_t), 0, 7125 (FCTL_NS_GET_DEV_COUNT | FCTL_NS_NO_DATA_BUF), sleep); 7126 7127 if (ns_cmd == NULL) { 7128 return (FC_NOMEM); 7129 } 7130 7131 ns_cmd->ns_cmd_code = NS_GID_PT; 7132 ((ns_req_gid_pt_t *)(ns_cmd->ns_cmd_buf))->port_type.port_type 7133 = FC_NS_PORT_NX; /* All port types */ 7134 ((ns_req_gid_pt_t *)(ns_cmd->ns_cmd_buf))->port_type.rsvd = 0; 7135 7136 } else { 7137 uint32_t ns_flags; 7138 7139 ns_flags = FCTL_NS_GET_DEV_COUNT | FCTL_NS_NO_DATA_BUF; 7140 if (create) { 7141 ns_flags |= FCTL_NS_CREATE_DEVICE; 7142 } 7143 ns_cmd = fctl_alloc_ns_cmd(sizeof (ns_req_gan_t), 7144 sizeof (ns_resp_gan_t), sizeof (int), ns_flags, sleep); 7145 7146 if (ns_cmd == NULL) { 7147 return (FC_NOMEM); 7148 } 7149 ns_cmd->ns_gan_index = 0; 7150 ns_cmd->ns_gan_sid = FCTL_GAN_START_ID; 7151 ns_cmd->ns_cmd_code = NS_GA_NXT; 7152 ns_cmd->ns_gan_max = 0xFFFF; 7153 7154 ((ns_req_gan_t *)(ns_cmd->ns_cmd_buf))->pid.port_id = src_id; 7155 ((ns_req_gan_t *)(ns_cmd->ns_cmd_buf))->pid.priv_lilp_posit = 0; 7156 } 7157 7158 flags = job->job_flags; 7159 job->job_flags &= ~JOB_TYPE_FP_ASYNC; 7160 job->job_counter = 1; 7161 7162 rval = fp_ns_query(port, ns_cmd, job, 1, sleep); 7163 job->job_flags = flags; 7164 7165 if (!create && (port->fp_options & FP_NS_SMART_COUNT)) { 7166 uint16_t max_resid; 7167 7168 /* 7169 * Revert to scanning the NS if NS_GID_PT isn't 7170 * helping us figure out total number of devices. 7171 */ 7172 if (job->job_result != FC_SUCCESS || 7173 ns_cmd->ns_resp_hdr.ct_cmdrsp != FS_ACC_IU) { 7174 mutex_enter(&port->fp_mutex); 7175 port->fp_options &= ~FP_NS_SMART_COUNT; 7176 mutex_exit(&port->fp_mutex); 7177 7178 fctl_free_ns_cmd(ns_cmd); 7179 return (fp_ns_get_devcount(port, job, create, sleep)); 7180 } 7181 7182 mutex_enter(&port->fp_mutex); 7183 port->fp_total_devices = 1; 7184 max_resid = ns_cmd->ns_resp_hdr.ct_aiusize; 7185 if (max_resid) { 7186 /* 7187 * Since port identifier is 4 bytes and max_resid 7188 * is also in WORDS, max_resid simply indicates 7189 * the total number of port identifiers not 7190 * transferred 7191 */ 7192 port->fp_total_devices += max_resid; 7193 } 7194 mutex_exit(&port->fp_mutex); 7195 } 7196 mutex_enter(&port->fp_mutex); 7197 port->fp_total_devices = *((int *)ns_cmd->ns_data_buf); 7198 mutex_exit(&port->fp_mutex); 7199 fctl_free_ns_cmd(ns_cmd); 7200 7201 return (rval); 7202 } 7203 7204 /* 7205 * One heck of a function to serve userland. 7206 */ 7207 static int 7208 fp_fciocmd(fc_local_port_t *port, intptr_t data, int mode, fcio_t *fcio) 7209 { 7210 int rval = 0; 7211 int jcode; 7212 uint32_t ret; 7213 uchar_t open_flag; 7214 fcio_t *kfcio; 7215 job_request_t *job; 7216 boolean_t use32 = B_FALSE; 7217 7218 #ifdef _MULTI_DATAMODEL 7219 switch (ddi_model_convert_from(mode & FMODELS)) { 7220 case DDI_MODEL_ILP32: 7221 use32 = B_TRUE; 7222 break; 7223 7224 case DDI_MODEL_NONE: 7225 default: 7226 break; 7227 } 7228 #endif 7229 7230 mutex_enter(&port->fp_mutex); 7231 if (port->fp_soft_state & (FP_SOFT_IN_STATEC_CB | 7232 FP_SOFT_IN_UNSOL_CB)) { 7233 fcio->fcio_errno = FC_STATEC_BUSY; 7234 mutex_exit(&port->fp_mutex); 7235 rval = EAGAIN; 7236 if (fp_fcio_copyout(fcio, data, mode)) { 7237 rval = EFAULT; 7238 } 7239 return (rval); 7240 } 7241 open_flag = port->fp_flag; 7242 mutex_exit(&port->fp_mutex); 7243 7244 if (fp_check_perms(open_flag, fcio->fcio_cmd) != FC_SUCCESS) { 7245 fcio->fcio_errno = FC_FAILURE; 7246 rval = EACCES; 7247 if (fp_fcio_copyout(fcio, data, mode)) { 7248 rval = EFAULT; 7249 } 7250 return (rval); 7251 } 7252 7253 /* 7254 * If an exclusive open was demanded during open, don't let 7255 * either innocuous or devil threads to share the file 7256 * descriptor and fire down exclusive access commands 7257 */ 7258 mutex_enter(&port->fp_mutex); 7259 if (port->fp_flag & FP_EXCL) { 7260 if (port->fp_flag & FP_EXCL_BUSY) { 7261 mutex_exit(&port->fp_mutex); 7262 fcio->fcio_errno = FC_FAILURE; 7263 return (EBUSY); 7264 } 7265 port->fp_flag |= FP_EXCL_BUSY; 7266 } 7267 mutex_exit(&port->fp_mutex); 7268 7269 fcio->fcio_errno = FC_SUCCESS; 7270 7271 switch (fcio->fcio_cmd) { 7272 case FCIO_GET_HOST_PARAMS: { 7273 fc_port_dev_t *val; 7274 fc_port_dev32_t *val32; 7275 int index; 7276 int lilp_device_count; 7277 fc_lilpmap_t *lilp_map; 7278 uchar_t *alpa_list; 7279 7280 if (use32 == B_TRUE) { 7281 if (fcio->fcio_olen != sizeof (*val32) || 7282 fcio->fcio_xfer != FCIO_XFER_READ) { 7283 rval = EINVAL; 7284 break; 7285 } 7286 } else { 7287 if (fcio->fcio_olen != sizeof (*val) || 7288 fcio->fcio_xfer != FCIO_XFER_READ) { 7289 rval = EINVAL; 7290 break; 7291 } 7292 } 7293 7294 val = kmem_zalloc(sizeof (*val), KM_SLEEP); 7295 7296 mutex_enter(&port->fp_mutex); 7297 val->dev_did = port->fp_port_id; 7298 val->dev_hard_addr = port->fp_hard_addr; 7299 val->dev_pwwn = port->fp_service_params.nport_ww_name; 7300 val->dev_nwwn = port->fp_service_params.node_ww_name; 7301 val->dev_state = port->fp_state; 7302 7303 lilp_map = &port->fp_lilp_map; 7304 alpa_list = &lilp_map->lilp_alpalist[0]; 7305 lilp_device_count = lilp_map->lilp_length; 7306 for (index = 0; index < lilp_device_count; index++) { 7307 uint32_t d_id; 7308 7309 d_id = alpa_list[index]; 7310 if (d_id == port->fp_port_id.port_id) { 7311 break; 7312 } 7313 } 7314 val->dev_did.priv_lilp_posit = (uint8_t)(index & 0xff); 7315 7316 bcopy(port->fp_fc4_types, val->dev_type, 7317 sizeof (port->fp_fc4_types)); 7318 mutex_exit(&port->fp_mutex); 7319 7320 if (use32 == B_TRUE) { 7321 val32 = kmem_zalloc(sizeof (*val32), KM_SLEEP); 7322 7323 val32->dev_did = val->dev_did; 7324 val32->dev_hard_addr = val->dev_hard_addr; 7325 val32->dev_pwwn = val->dev_pwwn; 7326 val32->dev_nwwn = val->dev_nwwn; 7327 val32->dev_state = val->dev_state; 7328 val32->dev_did.priv_lilp_posit = 7329 val->dev_did.priv_lilp_posit; 7330 7331 bcopy(val->dev_type, val32->dev_type, 7332 sizeof (port->fp_fc4_types)); 7333 7334 if (fp_copyout((void *)val32, (void *)fcio->fcio_obuf, 7335 fcio->fcio_olen, mode) == 0) { 7336 if (fp_fcio_copyout(fcio, data, mode)) { 7337 rval = EFAULT; 7338 } 7339 } else { 7340 rval = EFAULT; 7341 } 7342 7343 kmem_free(val32, sizeof (*val32)); 7344 } else { 7345 if (fp_copyout((void *)val, (void *)fcio->fcio_obuf, 7346 fcio->fcio_olen, mode) == 0) { 7347 if (fp_fcio_copyout(fcio, data, mode)) { 7348 rval = EFAULT; 7349 } 7350 } else { 7351 rval = EFAULT; 7352 } 7353 } 7354 7355 /* need to free "val" here */ 7356 kmem_free(val, sizeof (*val)); 7357 break; 7358 } 7359 7360 case FCIO_GET_OTHER_ADAPTER_PORTS: { 7361 uint32_t index; 7362 char *tmpPath; 7363 fc_local_port_t *tmpPort; 7364 7365 if (fcio->fcio_olen < MAXPATHLEN || 7366 fcio->fcio_ilen != sizeof (uint32_t)) { 7367 rval = EINVAL; 7368 break; 7369 } 7370 if (ddi_copyin(fcio->fcio_ibuf, &index, sizeof (index), mode)) { 7371 rval = EFAULT; 7372 break; 7373 } 7374 7375 tmpPort = fctl_get_adapter_port_by_index(port, index); 7376 if (tmpPort == NULL) { 7377 FP_TRACE(FP_NHEAD1(9, 0), 7378 "User supplied index out of range"); 7379 fcio->fcio_errno = FC_BADPORT; 7380 rval = EFAULT; 7381 if (fp_fcio_copyout(fcio, data, mode)) { 7382 rval = EFAULT; 7383 } 7384 break; 7385 } 7386 7387 tmpPath = kmem_zalloc(MAXPATHLEN, KM_SLEEP); 7388 (void) ddi_pathname(tmpPort->fp_port_dip, tmpPath); 7389 if (fp_copyout((void *)tmpPath, (void *)fcio->fcio_obuf, 7390 MAXPATHLEN, mode) == 0) { 7391 if (fp_fcio_copyout(fcio, data, mode)) { 7392 rval = EFAULT; 7393 } 7394 } else { 7395 rval = EFAULT; 7396 } 7397 kmem_free(tmpPath, MAXPATHLEN); 7398 break; 7399 } 7400 7401 case FCIO_NPIV_GET_ADAPTER_ATTRIBUTES: 7402 case FCIO_GET_ADAPTER_ATTRIBUTES: { 7403 fc_hba_adapter_attributes_t *val; 7404 fc_hba_adapter_attributes32_t *val32; 7405 7406 if (use32 == B_TRUE) { 7407 if (fcio->fcio_olen < sizeof (*val32) || 7408 fcio->fcio_xfer != FCIO_XFER_READ) { 7409 rval = EINVAL; 7410 break; 7411 } 7412 } else { 7413 if (fcio->fcio_olen < sizeof (*val) || 7414 fcio->fcio_xfer != FCIO_XFER_READ) { 7415 rval = EINVAL; 7416 break; 7417 } 7418 } 7419 7420 val = kmem_zalloc(sizeof (*val), KM_SLEEP); 7421 val->version = FC_HBA_ADAPTER_ATTRIBUTES_VERSION; 7422 mutex_enter(&port->fp_mutex); 7423 bcopy(port->fp_hba_port_attrs.manufacturer, 7424 val->Manufacturer, 7425 sizeof (val->Manufacturer)); 7426 bcopy(port->fp_hba_port_attrs.serial_number, 7427 val->SerialNumber, 7428 sizeof (val->SerialNumber)); 7429 bcopy(port->fp_hba_port_attrs.model, 7430 val->Model, 7431 sizeof (val->Model)); 7432 bcopy(port->fp_hba_port_attrs.model_description, 7433 val->ModelDescription, 7434 sizeof (val->ModelDescription)); 7435 bcopy(port->fp_sym_node_name, val->NodeSymbolicName, 7436 port->fp_sym_node_namelen); 7437 bcopy(port->fp_hba_port_attrs.hardware_version, 7438 val->HardwareVersion, 7439 sizeof (val->HardwareVersion)); 7440 bcopy(port->fp_hba_port_attrs.option_rom_version, 7441 val->OptionROMVersion, 7442 sizeof (val->OptionROMVersion)); 7443 bcopy(port->fp_hba_port_attrs.firmware_version, 7444 val->FirmwareVersion, 7445 sizeof (val->FirmwareVersion)); 7446 val->VendorSpecificID = 7447 port->fp_hba_port_attrs.vendor_specific_id; 7448 bcopy(&port->fp_service_params.node_ww_name.raw_wwn, 7449 &val->NodeWWN.raw_wwn, 7450 sizeof (val->NodeWWN.raw_wwn)); 7451 7452 7453 bcopy(port->fp_hba_port_attrs.driver_name, 7454 val->DriverName, 7455 sizeof (val->DriverName)); 7456 bcopy(port->fp_hba_port_attrs.driver_version, 7457 val->DriverVersion, 7458 sizeof (val->DriverVersion)); 7459 mutex_exit(&port->fp_mutex); 7460 7461 if (fcio->fcio_cmd == FCIO_GET_ADAPTER_ATTRIBUTES) { 7462 val->NumberOfPorts = fctl_count_fru_ports(port, 0); 7463 } else { 7464 val->NumberOfPorts = fctl_count_fru_ports(port, 1); 7465 } 7466 7467 if (use32 == B_TRUE) { 7468 val32 = kmem_zalloc(sizeof (*val32), KM_SLEEP); 7469 val32->version = val->version; 7470 bcopy(val->Manufacturer, val32->Manufacturer, 7471 sizeof (val->Manufacturer)); 7472 bcopy(val->SerialNumber, val32->SerialNumber, 7473 sizeof (val->SerialNumber)); 7474 bcopy(val->Model, val32->Model, 7475 sizeof (val->Model)); 7476 bcopy(val->ModelDescription, val32->ModelDescription, 7477 sizeof (val->ModelDescription)); 7478 bcopy(val->NodeSymbolicName, val32->NodeSymbolicName, 7479 sizeof (val->NodeSymbolicName)); 7480 bcopy(val->HardwareVersion, val32->HardwareVersion, 7481 sizeof (val->HardwareVersion)); 7482 bcopy(val->OptionROMVersion, val32->OptionROMVersion, 7483 sizeof (val->OptionROMVersion)); 7484 bcopy(val->FirmwareVersion, val32->FirmwareVersion, 7485 sizeof (val->FirmwareVersion)); 7486 val32->VendorSpecificID = val->VendorSpecificID; 7487 bcopy(&val->NodeWWN.raw_wwn, &val32->NodeWWN.raw_wwn, 7488 sizeof (val->NodeWWN.raw_wwn)); 7489 bcopy(val->DriverName, val32->DriverName, 7490 sizeof (val->DriverName)); 7491 bcopy(val->DriverVersion, val32->DriverVersion, 7492 sizeof (val->DriverVersion)); 7493 7494 val32->NumberOfPorts = val->NumberOfPorts; 7495 7496 if (fp_copyout((void *)val32, (void *)fcio->fcio_obuf, 7497 fcio->fcio_olen, mode) == 0) { 7498 if (fp_fcio_copyout(fcio, data, mode)) { 7499 rval = EFAULT; 7500 } 7501 } else { 7502 rval = EFAULT; 7503 } 7504 7505 kmem_free(val32, sizeof (*val32)); 7506 } else { 7507 if (fp_copyout((void *)val, (void *)fcio->fcio_obuf, 7508 fcio->fcio_olen, mode) == 0) { 7509 if (fp_fcio_copyout(fcio, data, mode)) { 7510 rval = EFAULT; 7511 } 7512 } else { 7513 rval = EFAULT; 7514 } 7515 } 7516 7517 kmem_free(val, sizeof (*val)); 7518 break; 7519 } 7520 7521 case FCIO_GET_NPIV_ATTRIBUTES: { 7522 fc_hba_npiv_attributes_t *attrs; 7523 7524 attrs = kmem_zalloc(sizeof (*attrs), KM_SLEEP); 7525 mutex_enter(&port->fp_mutex); 7526 bcopy(&port->fp_service_params.node_ww_name.raw_wwn, 7527 &attrs->NodeWWN.raw_wwn, 7528 sizeof (attrs->NodeWWN.raw_wwn)); 7529 bcopy(&port->fp_service_params.nport_ww_name.raw_wwn, 7530 &attrs->PortWWN.raw_wwn, 7531 sizeof (attrs->PortWWN.raw_wwn)); 7532 mutex_exit(&port->fp_mutex); 7533 if (fp_copyout((void *)attrs, (void *)fcio->fcio_obuf, 7534 fcio->fcio_olen, mode) == 0) { 7535 if (fp_fcio_copyout(fcio, data, mode)) { 7536 rval = EFAULT; 7537 } 7538 } else { 7539 rval = EFAULT; 7540 } 7541 kmem_free(attrs, sizeof (*attrs)); 7542 break; 7543 } 7544 7545 case FCIO_DELETE_NPIV_PORT: { 7546 fc_local_port_t *tmpport; 7547 char ww_pname[17]; 7548 la_wwn_t vwwn[1]; 7549 7550 FP_TRACE(FP_NHEAD1(1, 0), "Delete NPIV Port"); 7551 if (ddi_copyin(fcio->fcio_ibuf, 7552 &vwwn, sizeof (la_wwn_t), mode)) { 7553 rval = EFAULT; 7554 break; 7555 } 7556 7557 fc_wwn_to_str(&vwwn[0], ww_pname); 7558 FP_TRACE(FP_NHEAD1(3, 0), 7559 "Delete NPIV Port %s", ww_pname); 7560 tmpport = fc_delete_npiv_port(port, &vwwn[0]); 7561 if (tmpport == NULL) { 7562 FP_TRACE(FP_NHEAD1(3, 0), 7563 "Delete NPIV Port : no found"); 7564 rval = EFAULT; 7565 } else { 7566 fc_local_port_t *nextport = tmpport->fp_port_next; 7567 fc_local_port_t *prevport = tmpport->fp_port_prev; 7568 int portlen, portindex, ret; 7569 7570 portlen = sizeof (portindex); 7571 ret = ddi_prop_op(DDI_DEV_T_ANY, 7572 tmpport->fp_port_dip, PROP_LEN_AND_VAL_BUF, 7573 DDI_PROP_DONTPASS | DDI_PROP_CANSLEEP, "port", 7574 (caddr_t)&portindex, &portlen); 7575 if (ret != DDI_SUCCESS) { 7576 rval = EFAULT; 7577 break; 7578 } 7579 if (ndi_devi_offline(tmpport->fp_port_dip, 7580 NDI_DEVI_REMOVE) != DDI_SUCCESS) { 7581 FP_TRACE(FP_NHEAD1(1, 0), 7582 "Delete NPIV Port failed"); 7583 mutex_enter(&port->fp_mutex); 7584 tmpport->fp_npiv_state = 0; 7585 mutex_exit(&port->fp_mutex); 7586 rval = EFAULT; 7587 } else { 7588 mutex_enter(&port->fp_mutex); 7589 nextport->fp_port_prev = prevport; 7590 prevport->fp_port_next = nextport; 7591 if (port == port->fp_port_next) { 7592 port->fp_port_next = 7593 port->fp_port_prev = NULL; 7594 } 7595 port->fp_npiv_portnum--; 7596 FP_TRACE(FP_NHEAD1(3, 0), 7597 "Delete NPIV Port %d", portindex); 7598 port->fp_npiv_portindex[portindex-1] = 0; 7599 mutex_exit(&port->fp_mutex); 7600 } 7601 } 7602 break; 7603 } 7604 7605 case FCIO_CREATE_NPIV_PORT: { 7606 char ww_nname[17], ww_pname[17]; 7607 la_npiv_create_entry_t entrybuf; 7608 uint32_t vportindex = 0; 7609 int npiv_ret = 0; 7610 char *portname, *fcaname; 7611 7612 portname = kmem_zalloc(MAXPATHLEN, KM_SLEEP); 7613 (void) ddi_pathname(port->fp_port_dip, portname); 7614 fcaname = kmem_zalloc(MAXPATHLEN, KM_SLEEP); 7615 (void) ddi_pathname(port->fp_fca_dip, fcaname); 7616 FP_TRACE(FP_NHEAD1(1, 0), 7617 "Create NPIV port %s %s %s", portname, fcaname, 7618 ddi_driver_name(port->fp_fca_dip)); 7619 kmem_free(portname, MAXPATHLEN); 7620 kmem_free(fcaname, MAXPATHLEN); 7621 if (ddi_copyin(fcio->fcio_ibuf, 7622 &entrybuf, sizeof (la_npiv_create_entry_t), mode)) { 7623 rval = EFAULT; 7624 break; 7625 } 7626 7627 fc_wwn_to_str(&entrybuf.VNodeWWN, ww_nname); 7628 fc_wwn_to_str(&entrybuf.VPortWWN, ww_pname); 7629 vportindex = entrybuf.vindex; 7630 FP_TRACE(FP_NHEAD1(3, 0), 7631 "Create NPIV Port %s %s %d", 7632 ww_nname, ww_pname, vportindex); 7633 7634 if (fc_get_npiv_port(port, &entrybuf.VPortWWN)) { 7635 rval = EFAULT; 7636 break; 7637 } 7638 npiv_ret = fctl_fca_create_npivport(port->fp_fca_dip, 7639 port->fp_port_dip, ww_nname, ww_pname, &vportindex); 7640 if (npiv_ret == NDI_SUCCESS) { 7641 mutex_enter(&port->fp_mutex); 7642 port->fp_npiv_portnum++; 7643 mutex_exit(&port->fp_mutex); 7644 if (fp_copyout((void *)&vportindex, 7645 (void *)fcio->fcio_obuf, 7646 fcio->fcio_olen, mode) == 0) { 7647 if (fp_fcio_copyout(fcio, data, mode)) { 7648 rval = EFAULT; 7649 } 7650 } else { 7651 rval = EFAULT; 7652 } 7653 } else { 7654 rval = EFAULT; 7655 } 7656 FP_TRACE(FP_NHEAD1(3, 0), 7657 "Create NPIV Port %d %d", npiv_ret, vportindex); 7658 break; 7659 } 7660 7661 case FCIO_GET_NPIV_PORT_LIST: { 7662 fc_hba_npiv_port_list_t *list; 7663 int count; 7664 7665 if ((fcio->fcio_xfer != FCIO_XFER_READ) || 7666 (fcio->fcio_olen == 0) || (fcio->fcio_obuf == 0)) { 7667 rval = EINVAL; 7668 break; 7669 } 7670 7671 list = kmem_zalloc(fcio->fcio_olen, KM_SLEEP); 7672 list->version = FC_HBA_LIST_VERSION; 7673 7674 count = (fcio->fcio_olen - 7675 (int)sizeof (fc_hba_npiv_port_list_t))/MAXPATHLEN + 1; 7676 if (port->fp_npiv_portnum > count) { 7677 list->numAdapters = port->fp_npiv_portnum; 7678 } else { 7679 /* build npiv port list */ 7680 count = fc_ulp_get_npiv_port_list(port, 7681 (char *)list->hbaPaths); 7682 if (count < 0) { 7683 rval = ENXIO; 7684 FP_TRACE(FP_NHEAD1(1, 0), 7685 "Build NPIV Port List error"); 7686 kmem_free(list, fcio->fcio_olen); 7687 break; 7688 } 7689 list->numAdapters = count; 7690 } 7691 7692 if (fp_copyout((void *)list, (void *)fcio->fcio_obuf, 7693 fcio->fcio_olen, mode) == 0) { 7694 if (fp_fcio_copyout(fcio, data, mode)) { 7695 FP_TRACE(FP_NHEAD1(1, 0), 7696 "Copy NPIV Port data error"); 7697 rval = EFAULT; 7698 } 7699 } else { 7700 FP_TRACE(FP_NHEAD1(1, 0), "Copy NPIV Port List error"); 7701 rval = EFAULT; 7702 } 7703 kmem_free(list, fcio->fcio_olen); 7704 break; 7705 } 7706 7707 case FCIO_GET_ADAPTER_PORT_NPIV_ATTRIBUTES: { 7708 fc_hba_port_npiv_attributes_t *val; 7709 7710 val = kmem_zalloc(sizeof (*val), KM_SLEEP); 7711 val->version = FC_HBA_PORT_NPIV_ATTRIBUTES_VERSION; 7712 7713 mutex_enter(&port->fp_mutex); 7714 val->npivflag = port->fp_npiv_flag; 7715 val->lastChange = port->fp_last_change; 7716 bcopy(&port->fp_service_params.nport_ww_name.raw_wwn, 7717 &val->PortWWN.raw_wwn, 7718 sizeof (val->PortWWN.raw_wwn)); 7719 bcopy(&port->fp_service_params.node_ww_name.raw_wwn, 7720 &val->NodeWWN.raw_wwn, 7721 sizeof (val->NodeWWN.raw_wwn)); 7722 mutex_exit(&port->fp_mutex); 7723 7724 val->NumberOfNPIVPorts = fc_ulp_get_npiv_port_num(port); 7725 if (port->fp_npiv_type != FC_NPIV_PORT) { 7726 val->MaxNumberOfNPIVPorts = 7727 port->fp_fca_tran->fca_num_npivports; 7728 } else { 7729 val->MaxNumberOfNPIVPorts = 0; 7730 } 7731 7732 if (fp_copyout((void *)val, (void *)fcio->fcio_obuf, 7733 fcio->fcio_olen, mode) == 0) { 7734 if (fp_fcio_copyout(fcio, data, mode)) { 7735 rval = EFAULT; 7736 } 7737 } else { 7738 rval = EFAULT; 7739 } 7740 kmem_free(val, sizeof (*val)); 7741 break; 7742 } 7743 7744 case FCIO_GET_ADAPTER_PORT_ATTRIBUTES: { 7745 fc_hba_port_attributes_t *val; 7746 fc_hba_port_attributes32_t *val32; 7747 7748 if (use32 == B_TRUE) { 7749 if (fcio->fcio_olen < sizeof (*val32) || 7750 fcio->fcio_xfer != FCIO_XFER_READ) { 7751 rval = EINVAL; 7752 break; 7753 } 7754 } else { 7755 if (fcio->fcio_olen < sizeof (*val) || 7756 fcio->fcio_xfer != FCIO_XFER_READ) { 7757 rval = EINVAL; 7758 break; 7759 } 7760 } 7761 7762 val = kmem_zalloc(sizeof (*val), KM_SLEEP); 7763 val->version = FC_HBA_PORT_ATTRIBUTES_VERSION; 7764 mutex_enter(&port->fp_mutex); 7765 val->lastChange = port->fp_last_change; 7766 val->fp_minor = port->fp_instance; 7767 7768 bcopy(&port->fp_service_params.nport_ww_name.raw_wwn, 7769 &val->PortWWN.raw_wwn, 7770 sizeof (val->PortWWN.raw_wwn)); 7771 bcopy(&port->fp_service_params.node_ww_name.raw_wwn, 7772 &val->NodeWWN.raw_wwn, 7773 sizeof (val->NodeWWN.raw_wwn)); 7774 bcopy(&port->fp_fabric_name, &val->FabricName.raw_wwn, 7775 sizeof (val->FabricName.raw_wwn)); 7776 7777 val->PortFcId = port->fp_port_id.port_id; 7778 7779 switch (FC_PORT_STATE_MASK(port->fp_state)) { 7780 case FC_STATE_OFFLINE: 7781 val->PortState = FC_HBA_PORTSTATE_OFFLINE; 7782 break; 7783 case FC_STATE_ONLINE: 7784 case FC_STATE_LOOP: 7785 case FC_STATE_NAMESERVICE: 7786 val->PortState = FC_HBA_PORTSTATE_ONLINE; 7787 break; 7788 default: 7789 val->PortState = FC_HBA_PORTSTATE_UNKNOWN; 7790 break; 7791 } 7792 7793 /* Translate from LV to FC-HBA port type codes */ 7794 switch (port->fp_port_type.port_type) { 7795 case FC_NS_PORT_N: 7796 val->PortType = FC_HBA_PORTTYPE_NPORT; 7797 break; 7798 case FC_NS_PORT_NL: 7799 /* Actually means loop for us */ 7800 val->PortType = FC_HBA_PORTTYPE_LPORT; 7801 break; 7802 case FC_NS_PORT_F: 7803 val->PortType = FC_HBA_PORTTYPE_FPORT; 7804 break; 7805 case FC_NS_PORT_FL: 7806 val->PortType = FC_HBA_PORTTYPE_FLPORT; 7807 break; 7808 case FC_NS_PORT_E: 7809 val->PortType = FC_HBA_PORTTYPE_EPORT; 7810 break; 7811 default: 7812 val->PortType = FC_HBA_PORTTYPE_OTHER; 7813 break; 7814 } 7815 7816 7817 /* 7818 * If fp has decided that the topology is public loop, 7819 * we will indicate that using the appropriate 7820 * FC HBA API constant. 7821 */ 7822 switch (port->fp_topology) { 7823 case FC_TOP_PUBLIC_LOOP: 7824 val->PortType = FC_HBA_PORTTYPE_NLPORT; 7825 break; 7826 7827 case FC_TOP_PT_PT: 7828 val->PortType = FC_HBA_PORTTYPE_PTP; 7829 break; 7830 7831 case FC_TOP_UNKNOWN: 7832 /* 7833 * This should cover the case where nothing is connected 7834 * to the port. Crystal+ is p'bly an exception here. 7835 * For Crystal+, port 0 will come up as private loop 7836 * (i.e fp_bind_state will be FC_STATE_LOOP) even when 7837 * nothing is connected to it. 7838 * Current plan is to let userland handle this. 7839 */ 7840 if (port->fp_bind_state == FC_STATE_OFFLINE) { 7841 val->PortType = FC_HBA_PORTTYPE_UNKNOWN; 7842 } 7843 break; 7844 7845 default: 7846 /* 7847 * Do Nothing. 7848 * Unused: 7849 * val->PortType = FC_HBA_PORTTYPE_GPORT; 7850 */ 7851 break; 7852 } 7853 7854 val->PortSupportedClassofService = 7855 port->fp_hba_port_attrs.supported_cos; 7856 val->PortSupportedFc4Types[0] = 0; 7857 bcopy(port->fp_fc4_types, val->PortActiveFc4Types, 7858 sizeof (val->PortActiveFc4Types)); 7859 bcopy(port->fp_sym_port_name, val->PortSymbolicName, 7860 port->fp_sym_port_namelen); 7861 val->PortSupportedSpeed = 7862 port->fp_hba_port_attrs.supported_speed; 7863 7864 switch (FC_PORT_SPEED_MASK(port->fp_state)) { 7865 case FC_STATE_1GBIT_SPEED: 7866 val->PortSpeed = FC_HBA_PORTSPEED_1GBIT; 7867 break; 7868 case FC_STATE_2GBIT_SPEED: 7869 val->PortSpeed = FC_HBA_PORTSPEED_2GBIT; 7870 break; 7871 case FC_STATE_4GBIT_SPEED: 7872 val->PortSpeed = FC_HBA_PORTSPEED_4GBIT; 7873 break; 7874 case FC_STATE_8GBIT_SPEED: 7875 val->PortSpeed = FC_HBA_PORTSPEED_8GBIT; 7876 break; 7877 case FC_STATE_10GBIT_SPEED: 7878 val->PortSpeed = FC_HBA_PORTSPEED_10GBIT; 7879 break; 7880 case FC_STATE_16GBIT_SPEED: 7881 val->PortSpeed = FC_HBA_PORTSPEED_16GBIT; 7882 break; 7883 default: 7884 val->PortSpeed = FC_HBA_PORTSPEED_UNKNOWN; 7885 break; 7886 } 7887 val->PortMaxFrameSize = port->fp_hba_port_attrs.max_frame_size; 7888 val->NumberofDiscoveredPorts = port->fp_dev_count; 7889 mutex_exit(&port->fp_mutex); 7890 7891 if (use32 == B_TRUE) { 7892 val32 = kmem_zalloc(sizeof (*val32), KM_SLEEP); 7893 val32->version = val->version; 7894 val32->lastChange = val->lastChange; 7895 val32->fp_minor = val->fp_minor; 7896 7897 bcopy(&val->PortWWN.raw_wwn, &val32->PortWWN.raw_wwn, 7898 sizeof (val->PortWWN.raw_wwn)); 7899 bcopy(&val->NodeWWN.raw_wwn, &val32->NodeWWN.raw_wwn, 7900 sizeof (val->NodeWWN.raw_wwn)); 7901 val32->PortFcId = val->PortFcId; 7902 val32->PortState = val->PortState; 7903 val32->PortType = val->PortType; 7904 7905 val32->PortSupportedClassofService = 7906 val->PortSupportedClassofService; 7907 bcopy(val->PortActiveFc4Types, 7908 val32->PortActiveFc4Types, 7909 sizeof (val->PortActiveFc4Types)); 7910 bcopy(val->PortSymbolicName, val32->PortSymbolicName, 7911 sizeof (val->PortSymbolicName)); 7912 bcopy(&val->FabricName, &val32->FabricName, 7913 sizeof (val->FabricName.raw_wwn)); 7914 val32->PortSupportedSpeed = val->PortSupportedSpeed; 7915 val32->PortSpeed = val->PortSpeed; 7916 7917 val32->PortMaxFrameSize = val->PortMaxFrameSize; 7918 val32->NumberofDiscoveredPorts = 7919 val->NumberofDiscoveredPorts; 7920 7921 if (fp_copyout((void *)val32, (void *)fcio->fcio_obuf, 7922 fcio->fcio_olen, mode) == 0) { 7923 if (fp_fcio_copyout(fcio, data, mode)) { 7924 rval = EFAULT; 7925 } 7926 } else { 7927 rval = EFAULT; 7928 } 7929 7930 kmem_free(val32, sizeof (*val32)); 7931 } else { 7932 if (fp_copyout((void *)val, (void *)fcio->fcio_obuf, 7933 fcio->fcio_olen, mode) == 0) { 7934 if (fp_fcio_copyout(fcio, data, mode)) { 7935 rval = EFAULT; 7936 } 7937 } else { 7938 rval = EFAULT; 7939 } 7940 } 7941 7942 kmem_free(val, sizeof (*val)); 7943 break; 7944 } 7945 7946 case FCIO_GET_DISCOVERED_PORT_ATTRIBUTES: { 7947 fc_hba_port_attributes_t *val; 7948 fc_hba_port_attributes32_t *val32; 7949 uint32_t index = 0; 7950 fc_remote_port_t *tmp_pd; 7951 7952 if (use32 == B_TRUE) { 7953 if (fcio->fcio_olen < sizeof (*val32) || 7954 fcio->fcio_xfer != FCIO_XFER_READ) { 7955 rval = EINVAL; 7956 break; 7957 } 7958 } else { 7959 if (fcio->fcio_olen < sizeof (*val) || 7960 fcio->fcio_xfer != FCIO_XFER_READ) { 7961 rval = EINVAL; 7962 break; 7963 } 7964 } 7965 7966 if (ddi_copyin(fcio->fcio_ibuf, &index, sizeof (index), mode)) { 7967 rval = EFAULT; 7968 break; 7969 } 7970 7971 if (index >= port->fp_dev_count) { 7972 FP_TRACE(FP_NHEAD1(9, 0), 7973 "User supplied index out of range"); 7974 fcio->fcio_errno = FC_OUTOFBOUNDS; 7975 rval = EINVAL; 7976 if (fp_fcio_copyout(fcio, data, mode)) { 7977 rval = EFAULT; 7978 } 7979 break; 7980 } 7981 7982 val = kmem_zalloc(sizeof (*val), KM_SLEEP); 7983 val->version = FC_HBA_PORT_ATTRIBUTES_VERSION; 7984 7985 mutex_enter(&port->fp_mutex); 7986 tmp_pd = fctl_lookup_pd_by_index(port, index); 7987 7988 if (tmp_pd == NULL) { 7989 fcio->fcio_errno = FC_BADPORT; 7990 rval = EINVAL; 7991 } else { 7992 val->lastChange = port->fp_last_change; 7993 val->fp_minor = port->fp_instance; 7994 7995 mutex_enter(&tmp_pd->pd_mutex); 7996 bcopy(&tmp_pd->pd_port_name.raw_wwn, 7997 &val->PortWWN.raw_wwn, 7998 sizeof (val->PortWWN.raw_wwn)); 7999 bcopy(&tmp_pd->pd_remote_nodep->fd_node_name.raw_wwn, 8000 &val->NodeWWN.raw_wwn, 8001 sizeof (val->NodeWWN.raw_wwn)); 8002 val->PortFcId = tmp_pd->pd_port_id.port_id; 8003 bcopy(tmp_pd->pd_spn, val->PortSymbolicName, 8004 tmp_pd->pd_spn_len); 8005 val->PortSupportedClassofService = tmp_pd->pd_cos; 8006 /* 8007 * we will assume the sizeof these pd_fc4types and 8008 * portActiveFc4Types will remain the same. we could 8009 * add in a check for it, but we decided it was unneeded 8010 */ 8011 bcopy((caddr_t)tmp_pd->pd_fc4types, 8012 val->PortActiveFc4Types, 8013 sizeof (tmp_pd->pd_fc4types)); 8014 val->PortState = 8015 fp_map_remote_port_state(tmp_pd->pd_state); 8016 mutex_exit(&tmp_pd->pd_mutex); 8017 8018 val->PortType = FC_HBA_PORTTYPE_UNKNOWN; 8019 val->PortSupportedFc4Types[0] = 0; 8020 val->PortSupportedSpeed = FC_HBA_PORTSPEED_UNKNOWN; 8021 val->PortSpeed = FC_HBA_PORTSPEED_UNKNOWN; 8022 val->PortMaxFrameSize = 0; 8023 val->NumberofDiscoveredPorts = 0; 8024 8025 if (use32 == B_TRUE) { 8026 val32 = kmem_zalloc(sizeof (*val32), KM_SLEEP); 8027 val32->version = val->version; 8028 val32->lastChange = val->lastChange; 8029 val32->fp_minor = val->fp_minor; 8030 8031 bcopy(&val->PortWWN.raw_wwn, 8032 &val32->PortWWN.raw_wwn, 8033 sizeof (val->PortWWN.raw_wwn)); 8034 bcopy(&val->NodeWWN.raw_wwn, 8035 &val32->NodeWWN.raw_wwn, 8036 sizeof (val->NodeWWN.raw_wwn)); 8037 val32->PortFcId = val->PortFcId; 8038 bcopy(val->PortSymbolicName, 8039 val32->PortSymbolicName, 8040 sizeof (val->PortSymbolicName)); 8041 val32->PortSupportedClassofService = 8042 val->PortSupportedClassofService; 8043 bcopy(val->PortActiveFc4Types, 8044 val32->PortActiveFc4Types, 8045 sizeof (tmp_pd->pd_fc4types)); 8046 8047 val32->PortType = val->PortType; 8048 val32->PortState = val->PortState; 8049 val32->PortSupportedFc4Types[0] = 8050 val->PortSupportedFc4Types[0]; 8051 val32->PortSupportedSpeed = 8052 val->PortSupportedSpeed; 8053 val32->PortSpeed = val->PortSpeed; 8054 val32->PortMaxFrameSize = 8055 val->PortMaxFrameSize; 8056 val32->NumberofDiscoveredPorts = 8057 val->NumberofDiscoveredPorts; 8058 8059 if (fp_copyout((void *)val32, 8060 (void *)fcio->fcio_obuf, 8061 fcio->fcio_olen, mode) == 0) { 8062 if (fp_fcio_copyout(fcio, 8063 data, mode)) { 8064 rval = EFAULT; 8065 } 8066 } else { 8067 rval = EFAULT; 8068 } 8069 8070 kmem_free(val32, sizeof (*val32)); 8071 } else { 8072 if (fp_copyout((void *)val, 8073 (void *)fcio->fcio_obuf, 8074 fcio->fcio_olen, mode) == 0) { 8075 if (fp_fcio_copyout(fcio, data, mode)) { 8076 rval = EFAULT; 8077 } 8078 } else { 8079 rval = EFAULT; 8080 } 8081 } 8082 } 8083 8084 mutex_exit(&port->fp_mutex); 8085 kmem_free(val, sizeof (*val)); 8086 break; 8087 } 8088 8089 case FCIO_GET_PORT_ATTRIBUTES: { 8090 fc_hba_port_attributes_t *val; 8091 fc_hba_port_attributes32_t *val32; 8092 la_wwn_t wwn; 8093 fc_remote_port_t *tmp_pd; 8094 8095 if (use32 == B_TRUE) { 8096 if (fcio->fcio_olen < sizeof (*val32) || 8097 fcio->fcio_xfer != FCIO_XFER_READ) { 8098 rval = EINVAL; 8099 break; 8100 } 8101 } else { 8102 if (fcio->fcio_olen < sizeof (*val) || 8103 fcio->fcio_xfer != FCIO_XFER_READ) { 8104 rval = EINVAL; 8105 break; 8106 } 8107 } 8108 8109 if (ddi_copyin(fcio->fcio_ibuf, &wwn, sizeof (wwn), mode)) { 8110 rval = EFAULT; 8111 break; 8112 } 8113 8114 val = kmem_zalloc(sizeof (*val), KM_SLEEP); 8115 val->version = FC_HBA_PORT_ATTRIBUTES_VERSION; 8116 8117 mutex_enter(&port->fp_mutex); 8118 tmp_pd = fctl_lookup_pd_by_wwn(port, wwn); 8119 val->lastChange = port->fp_last_change; 8120 val->fp_minor = port->fp_instance; 8121 mutex_exit(&port->fp_mutex); 8122 8123 if (tmp_pd == NULL) { 8124 fcio->fcio_errno = FC_BADWWN; 8125 rval = EINVAL; 8126 } else { 8127 mutex_enter(&tmp_pd->pd_mutex); 8128 bcopy(&tmp_pd->pd_port_name.raw_wwn, 8129 &val->PortWWN.raw_wwn, 8130 sizeof (val->PortWWN.raw_wwn)); 8131 bcopy(&tmp_pd->pd_remote_nodep->fd_node_name.raw_wwn, 8132 &val->NodeWWN.raw_wwn, 8133 sizeof (val->NodeWWN.raw_wwn)); 8134 val->PortFcId = tmp_pd->pd_port_id.port_id; 8135 bcopy(tmp_pd->pd_spn, val->PortSymbolicName, 8136 tmp_pd->pd_spn_len); 8137 val->PortSupportedClassofService = tmp_pd->pd_cos; 8138 val->PortType = FC_HBA_PORTTYPE_UNKNOWN; 8139 val->PortState = 8140 fp_map_remote_port_state(tmp_pd->pd_state); 8141 val->PortSupportedFc4Types[0] = 0; 8142 /* 8143 * we will assume the sizeof these pd_fc4types and 8144 * portActiveFc4Types will remain the same. we could 8145 * add in a check for it, but we decided it was unneeded 8146 */ 8147 bcopy((caddr_t)tmp_pd->pd_fc4types, 8148 val->PortActiveFc4Types, 8149 sizeof (tmp_pd->pd_fc4types)); 8150 val->PortSupportedSpeed = FC_HBA_PORTSPEED_UNKNOWN; 8151 val->PortSpeed = FC_HBA_PORTSPEED_UNKNOWN; 8152 val->PortMaxFrameSize = 0; 8153 val->NumberofDiscoveredPorts = 0; 8154 mutex_exit(&tmp_pd->pd_mutex); 8155 8156 if (use32 == B_TRUE) { 8157 val32 = kmem_zalloc(sizeof (*val32), KM_SLEEP); 8158 val32->version = val->version; 8159 val32->lastChange = val->lastChange; 8160 val32->fp_minor = val->fp_minor; 8161 bcopy(&val->PortWWN.raw_wwn, 8162 &val32->PortWWN.raw_wwn, 8163 sizeof (val->PortWWN.raw_wwn)); 8164 bcopy(&val->NodeWWN.raw_wwn, 8165 &val32->NodeWWN.raw_wwn, 8166 sizeof (val->NodeWWN.raw_wwn)); 8167 val32->PortFcId = val->PortFcId; 8168 bcopy(val->PortSymbolicName, 8169 val32->PortSymbolicName, 8170 sizeof (val->PortSymbolicName)); 8171 val32->PortSupportedClassofService = 8172 val->PortSupportedClassofService; 8173 val32->PortType = val->PortType; 8174 val32->PortState = val->PortState; 8175 val32->PortSupportedFc4Types[0] = 8176 val->PortSupportedFc4Types[0]; 8177 bcopy(val->PortActiveFc4Types, 8178 val32->PortActiveFc4Types, 8179 sizeof (tmp_pd->pd_fc4types)); 8180 val32->PortSupportedSpeed = 8181 val->PortSupportedSpeed; 8182 val32->PortSpeed = val->PortSpeed; 8183 val32->PortMaxFrameSize = val->PortMaxFrameSize; 8184 val32->NumberofDiscoveredPorts = 8185 val->NumberofDiscoveredPorts; 8186 8187 if (fp_copyout((void *)val32, 8188 (void *)fcio->fcio_obuf, 8189 fcio->fcio_olen, mode) == 0) { 8190 if (fp_fcio_copyout(fcio, data, mode)) { 8191 rval = EFAULT; 8192 } 8193 } else { 8194 rval = EFAULT; 8195 } 8196 8197 kmem_free(val32, sizeof (*val32)); 8198 } else { 8199 if (fp_copyout((void *)val, 8200 (void *)fcio->fcio_obuf, 8201 fcio->fcio_olen, mode) == 0) { 8202 if (fp_fcio_copyout(fcio, data, mode)) { 8203 rval = EFAULT; 8204 } 8205 } else { 8206 rval = EFAULT; 8207 } 8208 } 8209 } 8210 kmem_free(val, sizeof (*val)); 8211 break; 8212 } 8213 8214 case FCIO_GET_NUM_DEVS: { 8215 int num_devices; 8216 8217 if (fcio->fcio_olen != sizeof (num_devices) || 8218 fcio->fcio_xfer != FCIO_XFER_READ) { 8219 rval = EINVAL; 8220 break; 8221 } 8222 8223 mutex_enter(&port->fp_mutex); 8224 switch (port->fp_topology) { 8225 case FC_TOP_PRIVATE_LOOP: 8226 case FC_TOP_PT_PT: 8227 num_devices = port->fp_total_devices; 8228 fcio->fcio_errno = FC_SUCCESS; 8229 break; 8230 8231 case FC_TOP_PUBLIC_LOOP: 8232 case FC_TOP_FABRIC: 8233 mutex_exit(&port->fp_mutex); 8234 job = fctl_alloc_job(JOB_NS_CMD, 0, NULL, 8235 NULL, KM_SLEEP); 8236 ASSERT(job != NULL); 8237 8238 /* 8239 * In FC-GS-2 the Name Server doesn't send out 8240 * RSCNs for any Name Server Database updates 8241 * When it is finally fixed there is no need 8242 * to probe as below and should be removed. 8243 */ 8244 (void) fp_ns_get_devcount(port, job, 0, KM_SLEEP); 8245 fctl_dealloc_job(job); 8246 8247 mutex_enter(&port->fp_mutex); 8248 num_devices = port->fp_total_devices; 8249 fcio->fcio_errno = FC_SUCCESS; 8250 break; 8251 8252 case FC_TOP_NO_NS: 8253 /* FALLTHROUGH */ 8254 case FC_TOP_UNKNOWN: 8255 /* FALLTHROUGH */ 8256 default: 8257 num_devices = 0; 8258 fcio->fcio_errno = FC_SUCCESS; 8259 break; 8260 } 8261 mutex_exit(&port->fp_mutex); 8262 8263 if (fp_copyout((void *)&num_devices, 8264 (void *)fcio->fcio_obuf, fcio->fcio_olen, 8265 mode) == 0) { 8266 if (fp_fcio_copyout(fcio, data, mode)) { 8267 rval = EFAULT; 8268 } 8269 } else { 8270 rval = EFAULT; 8271 } 8272 break; 8273 } 8274 8275 case FCIO_GET_DEV_LIST: { 8276 int num_devices; 8277 int new_count; 8278 int map_size; 8279 8280 if (fcio->fcio_xfer != FCIO_XFER_READ || 8281 fcio->fcio_alen != sizeof (new_count)) { 8282 rval = EINVAL; 8283 break; 8284 } 8285 8286 num_devices = fcio->fcio_olen / sizeof (fc_port_dev_t); 8287 8288 mutex_enter(&port->fp_mutex); 8289 if (num_devices < port->fp_total_devices) { 8290 fcio->fcio_errno = FC_TOOMANY; 8291 new_count = port->fp_total_devices; 8292 mutex_exit(&port->fp_mutex); 8293 8294 if (fp_copyout((void *)&new_count, 8295 (void *)fcio->fcio_abuf, 8296 sizeof (new_count), mode)) { 8297 rval = EFAULT; 8298 break; 8299 } 8300 8301 if (fp_fcio_copyout(fcio, data, mode)) { 8302 rval = EFAULT; 8303 break; 8304 } 8305 rval = EINVAL; 8306 break; 8307 } 8308 8309 if (port->fp_total_devices <= 0) { 8310 fcio->fcio_errno = FC_NO_MAP; 8311 new_count = port->fp_total_devices; 8312 mutex_exit(&port->fp_mutex); 8313 8314 if (fp_copyout((void *)&new_count, 8315 (void *)fcio->fcio_abuf, 8316 sizeof (new_count), mode)) { 8317 rval = EFAULT; 8318 break; 8319 } 8320 8321 if (fp_fcio_copyout(fcio, data, mode)) { 8322 rval = EFAULT; 8323 break; 8324 } 8325 rval = EINVAL; 8326 break; 8327 } 8328 8329 switch (port->fp_topology) { 8330 case FC_TOP_PRIVATE_LOOP: 8331 if (fp_fillout_loopmap(port, fcio, 8332 mode) != FC_SUCCESS) { 8333 rval = EFAULT; 8334 break; 8335 } 8336 if (fp_fcio_copyout(fcio, data, mode)) { 8337 rval = EFAULT; 8338 } 8339 break; 8340 8341 case FC_TOP_PT_PT: 8342 if (fp_fillout_p2pmap(port, fcio, 8343 mode) != FC_SUCCESS) { 8344 rval = EFAULT; 8345 break; 8346 } 8347 if (fp_fcio_copyout(fcio, data, mode)) { 8348 rval = EFAULT; 8349 } 8350 break; 8351 8352 case FC_TOP_PUBLIC_LOOP: 8353 case FC_TOP_FABRIC: { 8354 fctl_ns_req_t *ns_cmd; 8355 8356 map_size = 8357 sizeof (fc_port_dev_t) * port->fp_total_devices; 8358 8359 mutex_exit(&port->fp_mutex); 8360 8361 ns_cmd = fctl_alloc_ns_cmd(sizeof (ns_req_gan_t), 8362 sizeof (ns_resp_gan_t), map_size, 8363 (FCTL_NS_FILL_NS_MAP | FCTL_NS_BUF_IS_USERLAND), 8364 KM_SLEEP); 8365 ASSERT(ns_cmd != NULL); 8366 8367 ns_cmd->ns_gan_index = 0; 8368 ns_cmd->ns_gan_sid = FCTL_GAN_START_ID; 8369 ns_cmd->ns_cmd_code = NS_GA_NXT; 8370 ns_cmd->ns_gan_max = map_size / sizeof (fc_port_dev_t); 8371 8372 job = fctl_alloc_job(JOB_PORT_GETMAP, 0, NULL, 8373 NULL, KM_SLEEP); 8374 ASSERT(job != NULL); 8375 8376 ret = fp_ns_query(port, ns_cmd, job, 1, KM_SLEEP); 8377 8378 if (ret != FC_SUCCESS || 8379 job->job_result != FC_SUCCESS) { 8380 fctl_free_ns_cmd(ns_cmd); 8381 8382 fcio->fcio_errno = job->job_result; 8383 new_count = 0; 8384 if (fp_copyout((void *)&new_count, 8385 (void *)fcio->fcio_abuf, 8386 sizeof (new_count), mode)) { 8387 fctl_dealloc_job(job); 8388 mutex_enter(&port->fp_mutex); 8389 rval = EFAULT; 8390 break; 8391 } 8392 8393 if (fp_fcio_copyout(fcio, data, mode)) { 8394 fctl_dealloc_job(job); 8395 mutex_enter(&port->fp_mutex); 8396 rval = EFAULT; 8397 break; 8398 } 8399 rval = EIO; 8400 mutex_enter(&port->fp_mutex); 8401 break; 8402 } 8403 fctl_dealloc_job(job); 8404 8405 new_count = ns_cmd->ns_gan_index; 8406 if (fp_copyout((void *)&new_count, 8407 (void *)fcio->fcio_abuf, sizeof (new_count), 8408 mode)) { 8409 rval = EFAULT; 8410 fctl_free_ns_cmd(ns_cmd); 8411 mutex_enter(&port->fp_mutex); 8412 break; 8413 } 8414 8415 if (fp_copyout((void *)ns_cmd->ns_data_buf, 8416 (void *)fcio->fcio_obuf, sizeof (fc_port_dev_t) * 8417 ns_cmd->ns_gan_index, mode)) { 8418 rval = EFAULT; 8419 fctl_free_ns_cmd(ns_cmd); 8420 mutex_enter(&port->fp_mutex); 8421 break; 8422 } 8423 fctl_free_ns_cmd(ns_cmd); 8424 8425 if (fp_fcio_copyout(fcio, data, mode)) { 8426 rval = EFAULT; 8427 } 8428 mutex_enter(&port->fp_mutex); 8429 break; 8430 } 8431 8432 case FC_TOP_NO_NS: 8433 /* FALLTHROUGH */ 8434 case FC_TOP_UNKNOWN: 8435 /* FALLTHROUGH */ 8436 default: 8437 fcio->fcio_errno = FC_NO_MAP; 8438 num_devices = port->fp_total_devices; 8439 8440 if (fp_copyout((void *)&new_count, 8441 (void *)fcio->fcio_abuf, 8442 sizeof (new_count), mode)) { 8443 rval = EFAULT; 8444 break; 8445 } 8446 8447 if (fp_fcio_copyout(fcio, data, mode)) { 8448 rval = EFAULT; 8449 break; 8450 } 8451 rval = EINVAL; 8452 break; 8453 } 8454 mutex_exit(&port->fp_mutex); 8455 break; 8456 } 8457 8458 case FCIO_GET_SYM_PNAME: { 8459 rval = ENOTSUP; 8460 break; 8461 } 8462 8463 case FCIO_GET_SYM_NNAME: { 8464 rval = ENOTSUP; 8465 break; 8466 } 8467 8468 case FCIO_SET_SYM_PNAME: { 8469 rval = ENOTSUP; 8470 break; 8471 } 8472 8473 case FCIO_SET_SYM_NNAME: { 8474 rval = ENOTSUP; 8475 break; 8476 } 8477 8478 case FCIO_GET_LOGI_PARAMS: { 8479 la_wwn_t pwwn; 8480 la_wwn_t *my_pwwn; 8481 la_els_logi_t *params; 8482 la_els_logi32_t *params32; 8483 fc_remote_node_t *node; 8484 fc_remote_port_t *pd; 8485 8486 if (fcio->fcio_ilen != sizeof (la_wwn_t) || 8487 (fcio->fcio_xfer & FCIO_XFER_READ) == 0 || 8488 (fcio->fcio_xfer & FCIO_XFER_WRITE) == 0) { 8489 rval = EINVAL; 8490 break; 8491 } 8492 8493 if (use32 == B_TRUE) { 8494 if (fcio->fcio_olen != sizeof (la_els_logi32_t)) { 8495 rval = EINVAL; 8496 break; 8497 } 8498 } else { 8499 if (fcio->fcio_olen != sizeof (la_els_logi_t)) { 8500 rval = EINVAL; 8501 break; 8502 } 8503 } 8504 8505 if (ddi_copyin(fcio->fcio_ibuf, &pwwn, sizeof (pwwn), mode)) { 8506 rval = EFAULT; 8507 break; 8508 } 8509 8510 pd = fctl_hold_remote_port_by_pwwn(port, &pwwn); 8511 if (pd == NULL) { 8512 mutex_enter(&port->fp_mutex); 8513 my_pwwn = &port->fp_service_params.nport_ww_name; 8514 mutex_exit(&port->fp_mutex); 8515 8516 if (fctl_wwn_cmp(&pwwn, my_pwwn) != 0) { 8517 rval = ENXIO; 8518 break; 8519 } 8520 8521 params = kmem_zalloc(sizeof (*params), KM_SLEEP); 8522 mutex_enter(&port->fp_mutex); 8523 *params = port->fp_service_params; 8524 mutex_exit(&port->fp_mutex); 8525 } else { 8526 params = kmem_zalloc(sizeof (*params), KM_SLEEP); 8527 8528 mutex_enter(&pd->pd_mutex); 8529 params->ls_code.mbz = params->ls_code.ls_code = 0; 8530 params->common_service = pd->pd_csp; 8531 params->nport_ww_name = pd->pd_port_name; 8532 params->class_1 = pd->pd_clsp1; 8533 params->class_2 = pd->pd_clsp2; 8534 params->class_3 = pd->pd_clsp3; 8535 node = pd->pd_remote_nodep; 8536 mutex_exit(&pd->pd_mutex); 8537 8538 bzero(params->reserved, sizeof (params->reserved)); 8539 8540 mutex_enter(&node->fd_mutex); 8541 bcopy(node->fd_vv, params->vendor_version, 8542 sizeof (node->fd_vv)); 8543 params->node_ww_name = node->fd_node_name; 8544 mutex_exit(&node->fd_mutex); 8545 8546 fctl_release_remote_port(pd); 8547 } 8548 8549 if (use32 == B_TRUE) { 8550 params32 = kmem_zalloc(sizeof (*params32), KM_SLEEP); 8551 8552 params32->ls_code.mbz = params->ls_code.mbz; 8553 params32->common_service = params->common_service; 8554 params32->nport_ww_name = params->nport_ww_name; 8555 params32->class_1 = params->class_1; 8556 params32->class_2 = params->class_2; 8557 params32->class_3 = params->class_3; 8558 bzero(params32->reserved, sizeof (params32->reserved)); 8559 bcopy(params->vendor_version, params32->vendor_version, 8560 sizeof (node->fd_vv)); 8561 params32->node_ww_name = params->node_ww_name; 8562 8563 if (ddi_copyout((void *)params32, 8564 (void *)fcio->fcio_obuf, 8565 sizeof (*params32), mode)) { 8566 rval = EFAULT; 8567 } 8568 8569 kmem_free(params32, sizeof (*params32)); 8570 } else { 8571 if (ddi_copyout((void *)params, (void *)fcio->fcio_obuf, 8572 sizeof (*params), mode)) { 8573 rval = EFAULT; 8574 } 8575 } 8576 8577 kmem_free(params, sizeof (*params)); 8578 if (fp_fcio_copyout(fcio, data, mode)) { 8579 rval = EFAULT; 8580 } 8581 break; 8582 } 8583 8584 case FCIO_DEV_LOGOUT: 8585 case FCIO_DEV_LOGIN: 8586 if (fcio->fcio_ilen != sizeof (la_wwn_t) || 8587 fcio->fcio_xfer != FCIO_XFER_WRITE) { 8588 rval = EINVAL; 8589 8590 if (fp_fcio_copyout(fcio, data, mode)) { 8591 rval = EFAULT; 8592 } 8593 break; 8594 } 8595 8596 if (fcio->fcio_cmd == FCIO_DEV_LOGIN) { 8597 jcode = JOB_FCIO_LOGIN; 8598 } else { 8599 jcode = JOB_FCIO_LOGOUT; 8600 } 8601 8602 kfcio = kmem_zalloc(sizeof (*kfcio), KM_SLEEP); 8603 bcopy(fcio, kfcio, sizeof (*fcio)); 8604 8605 if (kfcio->fcio_ilen) { 8606 kfcio->fcio_ibuf = kmem_zalloc(kfcio->fcio_ilen, 8607 KM_SLEEP); 8608 8609 if (ddi_copyin((void *)fcio->fcio_ibuf, 8610 (void *)kfcio->fcio_ibuf, kfcio->fcio_ilen, 8611 mode)) { 8612 rval = EFAULT; 8613 8614 kmem_free(kfcio->fcio_ibuf, kfcio->fcio_ilen); 8615 kmem_free(kfcio, sizeof (*kfcio)); 8616 fcio->fcio_errno = job->job_result; 8617 if (fp_fcio_copyout(fcio, data, mode)) { 8618 rval = EFAULT; 8619 } 8620 break; 8621 } 8622 } 8623 8624 job = fctl_alloc_job(jcode, 0, NULL, NULL, KM_SLEEP); 8625 job->job_private = kfcio; 8626 8627 fctl_enque_job(port, job); 8628 fctl_jobwait(job); 8629 8630 rval = job->job_result; 8631 8632 fcio->fcio_errno = kfcio->fcio_errno; 8633 if (fp_fcio_copyout(fcio, data, mode)) { 8634 rval = EFAULT; 8635 } 8636 8637 kmem_free(kfcio->fcio_ibuf, kfcio->fcio_ilen); 8638 kmem_free(kfcio, sizeof (*kfcio)); 8639 fctl_dealloc_job(job); 8640 break; 8641 8642 case FCIO_GET_STATE: { 8643 la_wwn_t pwwn; 8644 uint32_t state; 8645 fc_remote_port_t *pd; 8646 fctl_ns_req_t *ns_cmd; 8647 8648 if (fcio->fcio_ilen != sizeof (la_wwn_t) || 8649 fcio->fcio_olen != sizeof (state) || 8650 (fcio->fcio_xfer & FCIO_XFER_WRITE) == 0 || 8651 (fcio->fcio_xfer & FCIO_XFER_READ) == 0) { 8652 rval = EINVAL; 8653 break; 8654 } 8655 8656 if (ddi_copyin(fcio->fcio_ibuf, &pwwn, sizeof (pwwn), mode)) { 8657 rval = EFAULT; 8658 break; 8659 } 8660 fcio->fcio_errno = 0; 8661 8662 pd = fctl_hold_remote_port_by_pwwn(port, &pwwn); 8663 if (pd == NULL) { 8664 mutex_enter(&port->fp_mutex); 8665 if (FC_IS_TOP_SWITCH(port->fp_topology)) { 8666 mutex_exit(&port->fp_mutex); 8667 job = fctl_alloc_job(JOB_PLOGI_ONE, 0, 8668 NULL, NULL, KM_SLEEP); 8669 8670 job->job_counter = 1; 8671 job->job_result = FC_SUCCESS; 8672 8673 ns_cmd = fctl_alloc_ns_cmd( 8674 sizeof (ns_req_gid_pn_t), 8675 sizeof (ns_resp_gid_pn_t), 8676 sizeof (ns_resp_gid_pn_t), 8677 FCTL_NS_BUF_IS_USERLAND, KM_SLEEP); 8678 ASSERT(ns_cmd != NULL); 8679 8680 ns_cmd->ns_cmd_code = NS_GID_PN; 8681 ((ns_req_gid_pn_t *) 8682 (ns_cmd->ns_cmd_buf))->pwwn = pwwn; 8683 8684 ret = fp_ns_query(port, ns_cmd, job, 8685 1, KM_SLEEP); 8686 8687 if (ret != FC_SUCCESS || job->job_result != 8688 FC_SUCCESS) { 8689 if (ret != FC_SUCCESS) { 8690 fcio->fcio_errno = ret; 8691 } else { 8692 fcio->fcio_errno = 8693 job->job_result; 8694 } 8695 rval = EIO; 8696 } else { 8697 state = PORT_DEVICE_INVALID; 8698 } 8699 fctl_free_ns_cmd(ns_cmd); 8700 fctl_dealloc_job(job); 8701 } else { 8702 mutex_exit(&port->fp_mutex); 8703 fcio->fcio_errno = FC_BADWWN; 8704 rval = ENXIO; 8705 } 8706 } else { 8707 mutex_enter(&pd->pd_mutex); 8708 state = pd->pd_state; 8709 mutex_exit(&pd->pd_mutex); 8710 8711 fctl_release_remote_port(pd); 8712 } 8713 8714 if (!rval) { 8715 if (ddi_copyout((void *)&state, 8716 (void *)fcio->fcio_obuf, sizeof (state), 8717 mode)) { 8718 rval = EFAULT; 8719 } 8720 } 8721 if (fp_fcio_copyout(fcio, data, mode)) { 8722 rval = EFAULT; 8723 } 8724 break; 8725 } 8726 8727 case FCIO_DEV_REMOVE: { 8728 la_wwn_t pwwn; 8729 fc_portmap_t *changelist; 8730 fc_remote_port_t *pd; 8731 8732 if (fcio->fcio_ilen != sizeof (la_wwn_t) || 8733 fcio->fcio_xfer != FCIO_XFER_WRITE) { 8734 rval = EINVAL; 8735 break; 8736 } 8737 8738 if (ddi_copyin(fcio->fcio_ibuf, &pwwn, sizeof (pwwn), mode)) { 8739 rval = EFAULT; 8740 break; 8741 } 8742 8743 pd = fctl_hold_remote_port_by_pwwn(port, &pwwn); 8744 if (pd == NULL) { 8745 rval = ENXIO; 8746 fcio->fcio_errno = FC_BADWWN; 8747 if (fp_fcio_copyout(fcio, data, mode)) { 8748 rval = EFAULT; 8749 } 8750 break; 8751 } 8752 8753 mutex_enter(&pd->pd_mutex); 8754 if (pd->pd_ref_count > 1) { 8755 mutex_exit(&pd->pd_mutex); 8756 8757 rval = EBUSY; 8758 fcio->fcio_errno = FC_FAILURE; 8759 fctl_release_remote_port(pd); 8760 8761 if (fp_fcio_copyout(fcio, data, mode)) { 8762 rval = EFAULT; 8763 } 8764 break; 8765 } 8766 mutex_exit(&pd->pd_mutex); 8767 8768 changelist = kmem_zalloc(sizeof (*changelist), KM_SLEEP); 8769 8770 fctl_copy_portmap(changelist, pd); 8771 changelist->map_type = PORT_DEVICE_USER_LOGOUT; 8772 (void) fp_ulp_devc_cb(port, changelist, 1, 1, KM_SLEEP, 1); 8773 8774 fctl_release_remote_port(pd); 8775 break; 8776 } 8777 8778 case FCIO_GET_FCODE_REV: { 8779 caddr_t fcode_rev; 8780 fc_fca_pm_t pm; 8781 8782 if (fcio->fcio_olen < FC_FCODE_REV_SIZE || 8783 fcio->fcio_xfer != FCIO_XFER_READ) { 8784 rval = EINVAL; 8785 break; 8786 } 8787 bzero((caddr_t)&pm, sizeof (pm)); 8788 8789 fcode_rev = kmem_zalloc(fcio->fcio_olen, KM_SLEEP); 8790 8791 pm.pm_cmd_flags = FC_FCA_PM_READ; 8792 pm.pm_cmd_code = FC_PORT_GET_FCODE_REV; 8793 pm.pm_data_len = fcio->fcio_olen; 8794 pm.pm_data_buf = fcode_rev; 8795 8796 ret = port->fp_fca_tran->fca_port_manage( 8797 port->fp_fca_handle, &pm); 8798 8799 if (ret == FC_SUCCESS) { 8800 if (ddi_copyout((void *)fcode_rev, 8801 (void *)fcio->fcio_obuf, 8802 fcio->fcio_olen, mode) == 0) { 8803 if (fp_fcio_copyout(fcio, data, mode)) { 8804 rval = EFAULT; 8805 } 8806 } else { 8807 rval = EFAULT; 8808 } 8809 } else { 8810 /* 8811 * check if buffer was not large enough to obtain 8812 * FCODE version. 8813 */ 8814 if (pm.pm_data_len > fcio->fcio_olen) { 8815 rval = ENOMEM; 8816 } else { 8817 rval = EIO; 8818 } 8819 fcio->fcio_errno = ret; 8820 if (fp_fcio_copyout(fcio, data, mode)) { 8821 rval = EFAULT; 8822 } 8823 } 8824 kmem_free(fcode_rev, fcio->fcio_olen); 8825 break; 8826 } 8827 8828 case FCIO_GET_FW_REV: { 8829 caddr_t fw_rev; 8830 fc_fca_pm_t pm; 8831 8832 if (fcio->fcio_olen < FC_FW_REV_SIZE || 8833 fcio->fcio_xfer != FCIO_XFER_READ) { 8834 rval = EINVAL; 8835 break; 8836 } 8837 bzero((caddr_t)&pm, sizeof (pm)); 8838 8839 fw_rev = kmem_zalloc(fcio->fcio_olen, KM_SLEEP); 8840 8841 pm.pm_cmd_flags = FC_FCA_PM_READ; 8842 pm.pm_cmd_code = FC_PORT_GET_FW_REV; 8843 pm.pm_data_len = fcio->fcio_olen; 8844 pm.pm_data_buf = fw_rev; 8845 8846 ret = port->fp_fca_tran->fca_port_manage( 8847 port->fp_fca_handle, &pm); 8848 8849 if (ret == FC_SUCCESS) { 8850 if (ddi_copyout((void *)fw_rev, 8851 (void *)fcio->fcio_obuf, 8852 fcio->fcio_olen, mode) == 0) { 8853 if (fp_fcio_copyout(fcio, data, mode)) { 8854 rval = EFAULT; 8855 } 8856 } else { 8857 rval = EFAULT; 8858 } 8859 } else { 8860 if (fp_fcio_copyout(fcio, data, mode)) { 8861 rval = EFAULT; 8862 } 8863 rval = EIO; 8864 } 8865 kmem_free(fw_rev, fcio->fcio_olen); 8866 break; 8867 } 8868 8869 case FCIO_GET_DUMP_SIZE: { 8870 uint32_t dump_size; 8871 fc_fca_pm_t pm; 8872 8873 if (fcio->fcio_olen != sizeof (dump_size) || 8874 fcio->fcio_xfer != FCIO_XFER_READ) { 8875 rval = EINVAL; 8876 break; 8877 } 8878 bzero((caddr_t)&pm, sizeof (pm)); 8879 pm.pm_cmd_flags = FC_FCA_PM_READ; 8880 pm.pm_cmd_code = FC_PORT_GET_DUMP_SIZE; 8881 pm.pm_data_len = sizeof (dump_size); 8882 pm.pm_data_buf = (caddr_t)&dump_size; 8883 8884 ret = port->fp_fca_tran->fca_port_manage( 8885 port->fp_fca_handle, &pm); 8886 8887 if (ret == FC_SUCCESS) { 8888 if (ddi_copyout((void *)&dump_size, 8889 (void *)fcio->fcio_obuf, sizeof (dump_size), 8890 mode) == 0) { 8891 if (fp_fcio_copyout(fcio, data, mode)) { 8892 rval = EFAULT; 8893 } 8894 } else { 8895 rval = EFAULT; 8896 } 8897 } else { 8898 fcio->fcio_errno = ret; 8899 rval = EIO; 8900 if (fp_fcio_copyout(fcio, data, mode)) { 8901 rval = EFAULT; 8902 } 8903 } 8904 break; 8905 } 8906 8907 case FCIO_DOWNLOAD_FW: { 8908 caddr_t firmware; 8909 fc_fca_pm_t pm; 8910 8911 if (fcio->fcio_ilen <= 0 || 8912 fcio->fcio_xfer != FCIO_XFER_WRITE) { 8913 rval = EINVAL; 8914 break; 8915 } 8916 8917 firmware = kmem_zalloc(fcio->fcio_ilen, KM_SLEEP); 8918 if (ddi_copyin(fcio->fcio_ibuf, firmware, 8919 fcio->fcio_ilen, mode)) { 8920 rval = EFAULT; 8921 kmem_free(firmware, fcio->fcio_ilen); 8922 break; 8923 } 8924 8925 bzero((caddr_t)&pm, sizeof (pm)); 8926 pm.pm_cmd_flags = FC_FCA_PM_WRITE; 8927 pm.pm_cmd_code = FC_PORT_DOWNLOAD_FW; 8928 pm.pm_data_len = fcio->fcio_ilen; 8929 pm.pm_data_buf = firmware; 8930 8931 ret = port->fp_fca_tran->fca_port_manage( 8932 port->fp_fca_handle, &pm); 8933 8934 kmem_free(firmware, fcio->fcio_ilen); 8935 8936 if (ret != FC_SUCCESS) { 8937 fcio->fcio_errno = ret; 8938 rval = EIO; 8939 if (fp_fcio_copyout(fcio, data, mode)) { 8940 rval = EFAULT; 8941 } 8942 } 8943 break; 8944 } 8945 8946 case FCIO_DOWNLOAD_FCODE: { 8947 caddr_t fcode; 8948 fc_fca_pm_t pm; 8949 8950 if (fcio->fcio_ilen <= 0 || 8951 fcio->fcio_xfer != FCIO_XFER_WRITE) { 8952 rval = EINVAL; 8953 break; 8954 } 8955 8956 fcode = kmem_zalloc(fcio->fcio_ilen, KM_SLEEP); 8957 if (ddi_copyin(fcio->fcio_ibuf, fcode, 8958 fcio->fcio_ilen, mode)) { 8959 rval = EFAULT; 8960 kmem_free(fcode, fcio->fcio_ilen); 8961 break; 8962 } 8963 8964 bzero((caddr_t)&pm, sizeof (pm)); 8965 pm.pm_cmd_flags = FC_FCA_PM_WRITE; 8966 pm.pm_cmd_code = FC_PORT_DOWNLOAD_FCODE; 8967 pm.pm_data_len = fcio->fcio_ilen; 8968 pm.pm_data_buf = fcode; 8969 8970 ret = port->fp_fca_tran->fca_port_manage( 8971 port->fp_fca_handle, &pm); 8972 8973 kmem_free(fcode, fcio->fcio_ilen); 8974 8975 if (ret != FC_SUCCESS) { 8976 fcio->fcio_errno = ret; 8977 rval = EIO; 8978 if (fp_fcio_copyout(fcio, data, mode)) { 8979 rval = EFAULT; 8980 } 8981 } 8982 break; 8983 } 8984 8985 case FCIO_FORCE_DUMP: 8986 ret = port->fp_fca_tran->fca_reset( 8987 port->fp_fca_handle, FC_FCA_CORE); 8988 8989 if (ret != FC_SUCCESS) { 8990 fcio->fcio_errno = ret; 8991 rval = EIO; 8992 if (fp_fcio_copyout(fcio, data, mode)) { 8993 rval = EFAULT; 8994 } 8995 } 8996 break; 8997 8998 case FCIO_GET_DUMP: { 8999 caddr_t dump; 9000 uint32_t dump_size; 9001 fc_fca_pm_t pm; 9002 9003 if (fcio->fcio_xfer != FCIO_XFER_READ) { 9004 rval = EINVAL; 9005 break; 9006 } 9007 bzero((caddr_t)&pm, sizeof (pm)); 9008 9009 pm.pm_cmd_flags = FC_FCA_PM_READ; 9010 pm.pm_cmd_code = FC_PORT_GET_DUMP_SIZE; 9011 pm.pm_data_len = sizeof (dump_size); 9012 pm.pm_data_buf = (caddr_t)&dump_size; 9013 9014 ret = port->fp_fca_tran->fca_port_manage( 9015 port->fp_fca_handle, &pm); 9016 9017 if (ret != FC_SUCCESS) { 9018 fcio->fcio_errno = ret; 9019 rval = EIO; 9020 if (fp_fcio_copyout(fcio, data, mode)) { 9021 rval = EFAULT; 9022 } 9023 break; 9024 } 9025 if (fcio->fcio_olen != dump_size) { 9026 fcio->fcio_errno = FC_NOMEM; 9027 rval = EINVAL; 9028 if (fp_fcio_copyout(fcio, data, mode)) { 9029 rval = EFAULT; 9030 } 9031 break; 9032 } 9033 9034 dump = kmem_zalloc(dump_size, KM_SLEEP); 9035 9036 bzero((caddr_t)&pm, sizeof (pm)); 9037 pm.pm_cmd_flags = FC_FCA_PM_READ; 9038 pm.pm_cmd_code = FC_PORT_GET_DUMP; 9039 pm.pm_data_len = dump_size; 9040 pm.pm_data_buf = dump; 9041 9042 ret = port->fp_fca_tran->fca_port_manage( 9043 port->fp_fca_handle, &pm); 9044 9045 if (ret == FC_SUCCESS) { 9046 if (ddi_copyout((void *)dump, (void *)fcio->fcio_obuf, 9047 dump_size, mode) == 0) { 9048 if (fp_fcio_copyout(fcio, data, mode)) { 9049 rval = EFAULT; 9050 } 9051 } else { 9052 rval = EFAULT; 9053 } 9054 } else { 9055 fcio->fcio_errno = ret; 9056 rval = EIO; 9057 if (fp_fcio_copyout(fcio, data, mode)) { 9058 rval = EFAULT; 9059 } 9060 } 9061 kmem_free(dump, dump_size); 9062 break; 9063 } 9064 9065 case FCIO_GET_TOPOLOGY: { 9066 uint32_t user_topology; 9067 9068 if (fcio->fcio_xfer != FCIO_XFER_READ || 9069 fcio->fcio_olen != sizeof (user_topology)) { 9070 rval = EINVAL; 9071 break; 9072 } 9073 9074 mutex_enter(&port->fp_mutex); 9075 if (FC_PORT_STATE_MASK(port->fp_state) == FC_STATE_OFFLINE) { 9076 user_topology = FC_TOP_UNKNOWN; 9077 } else { 9078 user_topology = port->fp_topology; 9079 } 9080 mutex_exit(&port->fp_mutex); 9081 9082 if (ddi_copyout((void *)&user_topology, 9083 (void *)fcio->fcio_obuf, sizeof (user_topology), 9084 mode)) { 9085 rval = EFAULT; 9086 } 9087 break; 9088 } 9089 9090 case FCIO_RESET_LINK: { 9091 la_wwn_t pwwn; 9092 9093 /* 9094 * Look at the output buffer field; if this field has zero 9095 * bytes then attempt to reset the local link/loop. If the 9096 * fcio_ibuf field points to a WWN, see if it's an NL_Port, 9097 * and if yes, determine the LFA and reset the remote LIP 9098 * by LINIT ELS. 9099 */ 9100 9101 if (fcio->fcio_xfer != FCIO_XFER_WRITE || 9102 fcio->fcio_ilen != sizeof (pwwn)) { 9103 rval = EINVAL; 9104 break; 9105 } 9106 9107 if (ddi_copyin(fcio->fcio_ibuf, &pwwn, 9108 sizeof (pwwn), mode)) { 9109 rval = EFAULT; 9110 break; 9111 } 9112 9113 mutex_enter(&port->fp_mutex); 9114 if (port->fp_soft_state & FP_SOFT_IN_LINK_RESET) { 9115 mutex_exit(&port->fp_mutex); 9116 break; 9117 } 9118 port->fp_soft_state |= FP_SOFT_IN_LINK_RESET; 9119 mutex_exit(&port->fp_mutex); 9120 9121 job = fctl_alloc_job(JOB_LINK_RESET, 0, NULL, NULL, KM_SLEEP); 9122 if (job == NULL) { 9123 rval = ENOMEM; 9124 break; 9125 } 9126 job->job_counter = 1; 9127 job->job_private = (void *)&pwwn; 9128 9129 fctl_enque_job(port, job); 9130 fctl_jobwait(job); 9131 9132 mutex_enter(&port->fp_mutex); 9133 port->fp_soft_state &= ~FP_SOFT_IN_LINK_RESET; 9134 mutex_exit(&port->fp_mutex); 9135 9136 if (job->job_result != FC_SUCCESS) { 9137 fcio->fcio_errno = job->job_result; 9138 rval = EIO; 9139 if (fp_fcio_copyout(fcio, data, mode)) { 9140 rval = EFAULT; 9141 } 9142 } 9143 fctl_dealloc_job(job); 9144 break; 9145 } 9146 9147 case FCIO_RESET_HARD: 9148 ret = port->fp_fca_tran->fca_reset( 9149 port->fp_fca_handle, FC_FCA_RESET); 9150 if (ret != FC_SUCCESS) { 9151 fcio->fcio_errno = ret; 9152 rval = EIO; 9153 if (fp_fcio_copyout(fcio, data, mode)) { 9154 rval = EFAULT; 9155 } 9156 } 9157 break; 9158 9159 case FCIO_RESET_HARD_CORE: 9160 ret = port->fp_fca_tran->fca_reset( 9161 port->fp_fca_handle, FC_FCA_RESET_CORE); 9162 if (ret != FC_SUCCESS) { 9163 rval = EIO; 9164 fcio->fcio_errno = ret; 9165 if (fp_fcio_copyout(fcio, data, mode)) { 9166 rval = EFAULT; 9167 } 9168 } 9169 break; 9170 9171 case FCIO_DIAG: { 9172 fc_fca_pm_t pm; 9173 9174 bzero((caddr_t)&pm, sizeof (fc_fca_pm_t)); 9175 9176 /* Validate user buffer from ioctl call. */ 9177 if (((fcio->fcio_ilen > 0) && (fcio->fcio_ibuf == NULL)) || 9178 ((fcio->fcio_ilen <= 0) && (fcio->fcio_ibuf != NULL)) || 9179 ((fcio->fcio_alen > 0) && (fcio->fcio_abuf == NULL)) || 9180 ((fcio->fcio_alen <= 0) && (fcio->fcio_abuf != NULL)) || 9181 ((fcio->fcio_olen > 0) && (fcio->fcio_obuf == NULL)) || 9182 ((fcio->fcio_olen <= 0) && (fcio->fcio_obuf != NULL))) { 9183 rval = EFAULT; 9184 break; 9185 } 9186 9187 if ((pm.pm_cmd_len = fcio->fcio_ilen) > 0) { 9188 pm.pm_cmd_buf = kmem_zalloc(fcio->fcio_ilen, KM_SLEEP); 9189 if (ddi_copyin(fcio->fcio_ibuf, pm.pm_cmd_buf, 9190 fcio->fcio_ilen, mode)) { 9191 rval = EFAULT; 9192 goto fp_fcio_diag_cleanup; 9193 } 9194 } 9195 9196 if ((pm.pm_data_len = fcio->fcio_alen) > 0) { 9197 pm.pm_data_buf = kmem_zalloc(fcio->fcio_alen, KM_SLEEP); 9198 if (ddi_copyin(fcio->fcio_abuf, pm.pm_data_buf, 9199 fcio->fcio_alen, mode)) { 9200 rval = EFAULT; 9201 goto fp_fcio_diag_cleanup; 9202 } 9203 } 9204 9205 if ((pm.pm_stat_len = fcio->fcio_olen) > 0) { 9206 pm.pm_stat_buf = kmem_zalloc(fcio->fcio_olen, KM_SLEEP); 9207 } 9208 9209 pm.pm_cmd_code = FC_PORT_DIAG; 9210 pm.pm_cmd_flags = fcio->fcio_cmd_flags; 9211 9212 ret = port->fp_fca_tran->fca_port_manage( 9213 port->fp_fca_handle, &pm); 9214 9215 if (ret != FC_SUCCESS) { 9216 if (ret == FC_INVALID_REQUEST) { 9217 rval = ENOTTY; 9218 } else { 9219 rval = EIO; 9220 } 9221 9222 fcio->fcio_errno = ret; 9223 if (fp_fcio_copyout(fcio, data, mode)) { 9224 rval = EFAULT; 9225 } 9226 goto fp_fcio_diag_cleanup; 9227 } 9228 9229 /* 9230 * pm_stat_len will contain the number of status bytes 9231 * an FCA driver requires to return the complete status 9232 * of the requested diag operation. If the user buffer 9233 * is not large enough to hold the entire status, We 9234 * copy only the portion of data the fits in the buffer and 9235 * return a ENOMEM to the user application. 9236 */ 9237 if (pm.pm_stat_len > fcio->fcio_olen) { 9238 fp_printf(port, CE_NOTE, FP_LOG_ONLY, 0, NULL, 9239 "fp:FCIO_DIAG:status buffer too small\n"); 9240 9241 rval = ENOMEM; 9242 if (ddi_copyout(pm.pm_stat_buf, fcio->fcio_obuf, 9243 fcio->fcio_olen, mode)) { 9244 rval = EFAULT; 9245 goto fp_fcio_diag_cleanup; 9246 } 9247 } else { 9248 /* 9249 * Copy only data pm_stat_len bytes of data 9250 */ 9251 if (ddi_copyout(pm.pm_stat_buf, fcio->fcio_obuf, 9252 pm.pm_stat_len, mode)) { 9253 rval = EFAULT; 9254 goto fp_fcio_diag_cleanup; 9255 } 9256 } 9257 9258 if (fp_fcio_copyout(fcio, data, mode)) { 9259 rval = EFAULT; 9260 } 9261 9262 fp_fcio_diag_cleanup: 9263 if (pm.pm_cmd_buf != NULL) { 9264 kmem_free(pm.pm_cmd_buf, fcio->fcio_ilen); 9265 } 9266 if (pm.pm_data_buf != NULL) { 9267 kmem_free(pm.pm_data_buf, fcio->fcio_alen); 9268 } 9269 if (pm.pm_stat_buf != NULL) { 9270 kmem_free(pm.pm_stat_buf, fcio->fcio_olen); 9271 } 9272 9273 break; 9274 } 9275 9276 case FCIO_GET_NODE_ID: { 9277 /* validate parameters */ 9278 if (fcio->fcio_xfer != FCIO_XFER_READ || 9279 fcio->fcio_olen < sizeof (fc_rnid_t)) { 9280 rval = EINVAL; 9281 break; 9282 } 9283 9284 rval = fp_get_rnid(port, data, mode, fcio); 9285 9286 /* ioctl handling is over */ 9287 break; 9288 } 9289 9290 case FCIO_SEND_NODE_ID: { 9291 la_wwn_t pwwn; 9292 9293 /* validate parameters */ 9294 if (fcio->fcio_ilen != sizeof (la_wwn_t) || 9295 fcio->fcio_xfer != FCIO_XFER_READ) { 9296 rval = EINVAL; 9297 break; 9298 } 9299 9300 if (ddi_copyin(fcio->fcio_ibuf, &pwwn, 9301 sizeof (la_wwn_t), mode)) { 9302 rval = EFAULT; 9303 break; 9304 } 9305 9306 rval = fp_send_rnid(port, data, mode, fcio, &pwwn); 9307 9308 /* ioctl handling is over */ 9309 break; 9310 } 9311 9312 case FCIO_SET_NODE_ID: { 9313 if (fcio->fcio_ilen != sizeof (fc_rnid_t) || 9314 (fcio->fcio_xfer != FCIO_XFER_WRITE)) { 9315 rval = EINVAL; 9316 break; 9317 } 9318 9319 rval = fp_set_rnid(port, data, mode, fcio); 9320 break; 9321 } 9322 9323 case FCIO_LINK_STATUS: { 9324 fc_portid_t rls_req; 9325 fc_rls_acc_t *rls_acc; 9326 fc_fca_pm_t pm; 9327 uint32_t dest, src_id; 9328 fp_cmd_t *cmd; 9329 fc_remote_port_t *pd; 9330 uchar_t pd_flags; 9331 9332 /* validate parameters */ 9333 if (fcio->fcio_ilen != sizeof (fc_portid_t) || 9334 fcio->fcio_olen != sizeof (fc_rls_acc_t) || 9335 fcio->fcio_xfer != FCIO_XFER_RW) { 9336 rval = EINVAL; 9337 break; 9338 } 9339 9340 if ((fcio->fcio_cmd_flags != FCIO_CFLAGS_RLS_DEST_FPORT) && 9341 (fcio->fcio_cmd_flags != FCIO_CFLAGS_RLS_DEST_NPORT)) { 9342 rval = EINVAL; 9343 break; 9344 } 9345 9346 if (ddi_copyin((void *)fcio->fcio_ibuf, (void *)&rls_req, 9347 sizeof (fc_portid_t), mode)) { 9348 rval = EFAULT; 9349 break; 9350 } 9351 9352 9353 /* Determine the destination of the RLS frame */ 9354 if (fcio->fcio_cmd_flags == FCIO_CFLAGS_RLS_DEST_FPORT) { 9355 dest = FS_FABRIC_F_PORT; 9356 } else { 9357 dest = rls_req.port_id; 9358 } 9359 9360 mutex_enter(&port->fp_mutex); 9361 src_id = port->fp_port_id.port_id; 9362 mutex_exit(&port->fp_mutex); 9363 9364 /* If dest is zero OR same as FCA ID, then use port_manage() */ 9365 if (dest == 0 || dest == src_id) { 9366 9367 /* Allocate memory for link error status block */ 9368 rls_acc = kmem_zalloc(sizeof (*rls_acc), KM_SLEEP); 9369 ASSERT(rls_acc != NULL); 9370 9371 /* Prepare the port management structure */ 9372 bzero((caddr_t)&pm, sizeof (pm)); 9373 9374 pm.pm_cmd_flags = FC_FCA_PM_READ; 9375 pm.pm_cmd_code = FC_PORT_RLS; 9376 pm.pm_data_len = sizeof (*rls_acc); 9377 pm.pm_data_buf = (caddr_t)rls_acc; 9378 9379 /* Get the adapter's link error status block */ 9380 ret = port->fp_fca_tran->fca_port_manage( 9381 port->fp_fca_handle, &pm); 9382 9383 if (ret == FC_SUCCESS) { 9384 /* xfer link status block to userland */ 9385 if (ddi_copyout((void *)rls_acc, 9386 (void *)fcio->fcio_obuf, 9387 sizeof (*rls_acc), mode) == 0) { 9388 if (fp_fcio_copyout(fcio, data, 9389 mode)) { 9390 rval = EFAULT; 9391 } 9392 } else { 9393 rval = EFAULT; 9394 } 9395 } else { 9396 rval = EIO; 9397 fcio->fcio_errno = ret; 9398 if (fp_fcio_copyout(fcio, data, mode)) { 9399 rval = EFAULT; 9400 } 9401 } 9402 9403 kmem_free(rls_acc, sizeof (*rls_acc)); 9404 9405 /* ioctl handling is over */ 9406 break; 9407 } 9408 9409 /* 9410 * Send RLS to the destination port. 9411 * Having RLS frame destination is as FPORT is not yet 9412 * supported and will be implemented in future, if needed. 9413 * Following call to get "pd" will fail if dest is FPORT 9414 */ 9415 pd = fctl_hold_remote_port_by_did(port, dest); 9416 if (pd == NULL) { 9417 fcio->fcio_errno = FC_BADOBJECT; 9418 rval = ENXIO; 9419 if (fp_fcio_copyout(fcio, data, mode)) { 9420 rval = EFAULT; 9421 } 9422 break; 9423 } 9424 9425 mutex_enter(&pd->pd_mutex); 9426 if (pd->pd_state != PORT_DEVICE_LOGGED_IN) { 9427 mutex_exit(&pd->pd_mutex); 9428 fctl_release_remote_port(pd); 9429 9430 fcio->fcio_errno = FC_LOGINREQ; 9431 rval = EINVAL; 9432 if (fp_fcio_copyout(fcio, data, mode)) { 9433 rval = EFAULT; 9434 } 9435 break; 9436 } 9437 ASSERT(pd->pd_login_count >= 1); 9438 mutex_exit(&pd->pd_mutex); 9439 9440 /* 9441 * Allocate job structure and set job_code as DUMMY, 9442 * because we will not go through the job thread. 9443 * Instead fp_sendcmd() is called directly here. 9444 */ 9445 job = fctl_alloc_job(JOB_DUMMY, JOB_TYPE_FP_ASYNC, 9446 NULL, NULL, KM_SLEEP); 9447 ASSERT(job != NULL); 9448 9449 job->job_counter = 1; 9450 9451 cmd = fp_alloc_pkt(port, sizeof (la_els_rls_t), 9452 sizeof (la_els_rls_acc_t), KM_SLEEP, pd); 9453 if (cmd == NULL) { 9454 fcio->fcio_errno = FC_NOMEM; 9455 rval = ENOMEM; 9456 9457 fctl_release_remote_port(pd); 9458 9459 fctl_dealloc_job(job); 9460 if (fp_fcio_copyout(fcio, data, mode)) { 9461 rval = EFAULT; 9462 } 9463 break; 9464 } 9465 9466 /* Allocate memory for link error status block */ 9467 rls_acc = kmem_zalloc(sizeof (*rls_acc), KM_SLEEP); 9468 9469 mutex_enter(&port->fp_mutex); 9470 mutex_enter(&pd->pd_mutex); 9471 9472 cmd->cmd_pkt.pkt_tran_flags = FC_TRAN_INTR | pd->pd_login_class; 9473 cmd->cmd_pkt.pkt_tran_type = FC_PKT_EXCHANGE; 9474 cmd->cmd_flags = FP_CMD_CFLAG_UNDEFINED; 9475 cmd->cmd_retry_count = 1; 9476 cmd->cmd_ulp_pkt = NULL; 9477 9478 fp_rls_init(cmd, job); 9479 9480 job->job_private = (void *)rls_acc; 9481 9482 pd_flags = pd->pd_flags; 9483 pd->pd_flags = PD_ELS_IN_PROGRESS; 9484 9485 mutex_exit(&pd->pd_mutex); 9486 mutex_exit(&port->fp_mutex); 9487 9488 if (fp_sendcmd(port, cmd, port->fp_fca_handle) == FC_SUCCESS) { 9489 fctl_jobwait(job); 9490 9491 fcio->fcio_errno = job->job_result; 9492 if (job->job_result == FC_SUCCESS) { 9493 ASSERT(pd != NULL); 9494 /* 9495 * link error status block is now available. 9496 * Copy it to userland 9497 */ 9498 ASSERT(job->job_private == (void *)rls_acc); 9499 if (ddi_copyout((void *)rls_acc, 9500 (void *)fcio->fcio_obuf, 9501 sizeof (*rls_acc), mode) == 0) { 9502 if (fp_fcio_copyout(fcio, data, 9503 mode)) { 9504 rval = EFAULT; 9505 } 9506 } else { 9507 rval = EFAULT; 9508 } 9509 } else { 9510 rval = EIO; 9511 } 9512 } else { 9513 rval = EIO; 9514 fp_free_pkt(cmd); 9515 } 9516 9517 if (rval) { 9518 mutex_enter(&port->fp_mutex); 9519 mutex_enter(&pd->pd_mutex); 9520 if (pd->pd_flags == PD_ELS_IN_PROGRESS) { 9521 pd->pd_flags = pd_flags; 9522 } 9523 mutex_exit(&pd->pd_mutex); 9524 mutex_exit(&port->fp_mutex); 9525 } 9526 9527 fctl_release_remote_port(pd); 9528 fctl_dealloc_job(job); 9529 kmem_free(rls_acc, sizeof (*rls_acc)); 9530 9531 if (fp_fcio_copyout(fcio, data, mode)) { 9532 rval = EFAULT; 9533 } 9534 break; 9535 } 9536 9537 case FCIO_NS: { 9538 fc_ns_cmd_t *ns_req; 9539 fc_ns_cmd32_t *ns_req32; 9540 fctl_ns_req_t *ns_cmd; 9541 9542 if (use32 == B_TRUE) { 9543 if (fcio->fcio_ilen != sizeof (*ns_req32)) { 9544 rval = EINVAL; 9545 break; 9546 } 9547 9548 ns_req = kmem_zalloc(sizeof (*ns_req), KM_SLEEP); 9549 ns_req32 = kmem_zalloc(sizeof (*ns_req32), KM_SLEEP); 9550 9551 if (ddi_copyin(fcio->fcio_ibuf, ns_req32, 9552 sizeof (*ns_req32), mode)) { 9553 rval = EFAULT; 9554 kmem_free(ns_req, sizeof (*ns_req)); 9555 kmem_free(ns_req32, sizeof (*ns_req32)); 9556 break; 9557 } 9558 9559 ns_req->ns_flags = ns_req32->ns_flags; 9560 ns_req->ns_cmd = ns_req32->ns_cmd; 9561 ns_req->ns_req_len = ns_req32->ns_req_len; 9562 ns_req->ns_req_payload = ns_req32->ns_req_payload; 9563 ns_req->ns_resp_len = ns_req32->ns_resp_len; 9564 ns_req->ns_resp_payload = ns_req32->ns_resp_payload; 9565 ns_req->ns_fctl_private = ns_req32->ns_fctl_private; 9566 ns_req->ns_resp_hdr = ns_req32->ns_resp_hdr; 9567 9568 kmem_free(ns_req32, sizeof (*ns_req32)); 9569 } else { 9570 if (fcio->fcio_ilen != sizeof (*ns_req)) { 9571 rval = EINVAL; 9572 break; 9573 } 9574 9575 ns_req = kmem_zalloc(sizeof (*ns_req), KM_SLEEP); 9576 9577 if (ddi_copyin(fcio->fcio_ibuf, ns_req, 9578 sizeof (fc_ns_cmd_t), mode)) { 9579 rval = EFAULT; 9580 kmem_free(ns_req, sizeof (*ns_req)); 9581 break; 9582 } 9583 } 9584 9585 if (ns_req->ns_req_len <= 0) { 9586 rval = EINVAL; 9587 kmem_free(ns_req, sizeof (*ns_req)); 9588 break; 9589 } 9590 9591 job = fctl_alloc_job(JOB_NS_CMD, 0, NULL, NULL, KM_SLEEP); 9592 ASSERT(job != NULL); 9593 9594 ns_cmd = fctl_alloc_ns_cmd(ns_req->ns_req_len, 9595 ns_req->ns_resp_len, ns_req->ns_resp_len, 9596 FCTL_NS_FILL_NS_MAP, KM_SLEEP); 9597 ASSERT(ns_cmd != NULL); 9598 ns_cmd->ns_cmd_code = ns_req->ns_cmd; 9599 9600 if (ns_cmd->ns_cmd_code == NS_GA_NXT) { 9601 ns_cmd->ns_gan_max = 1; 9602 ns_cmd->ns_gan_index = 0; 9603 ns_cmd->ns_gan_sid = FCTL_GAN_START_ID; 9604 } 9605 9606 if (ddi_copyin(ns_req->ns_req_payload, 9607 ns_cmd->ns_cmd_buf, ns_req->ns_req_len, mode)) { 9608 rval = EFAULT; 9609 fctl_free_ns_cmd(ns_cmd); 9610 fctl_dealloc_job(job); 9611 kmem_free(ns_req, sizeof (*ns_req)); 9612 break; 9613 } 9614 9615 job->job_private = (void *)ns_cmd; 9616 fctl_enque_job(port, job); 9617 fctl_jobwait(job); 9618 rval = job->job_result; 9619 9620 if (rval == FC_SUCCESS) { 9621 if (ns_req->ns_resp_len) { 9622 if (ddi_copyout(ns_cmd->ns_data_buf, 9623 ns_req->ns_resp_payload, 9624 ns_cmd->ns_data_len, mode)) { 9625 rval = EFAULT; 9626 fctl_free_ns_cmd(ns_cmd); 9627 fctl_dealloc_job(job); 9628 kmem_free(ns_req, sizeof (*ns_req)); 9629 break; 9630 } 9631 } 9632 } else { 9633 rval = EIO; 9634 } 9635 ns_req->ns_resp_hdr = ns_cmd->ns_resp_hdr; 9636 fctl_free_ns_cmd(ns_cmd); 9637 fctl_dealloc_job(job); 9638 kmem_free(ns_req, sizeof (*ns_req)); 9639 9640 if (fp_fcio_copyout(fcio, data, mode)) { 9641 rval = EFAULT; 9642 } 9643 break; 9644 } 9645 9646 default: 9647 rval = ENOTTY; 9648 break; 9649 } 9650 9651 /* 9652 * If set, reset the EXCL busy bit to 9653 * receive other exclusive access commands 9654 */ 9655 mutex_enter(&port->fp_mutex); 9656 if (port->fp_flag & FP_EXCL_BUSY) { 9657 port->fp_flag &= ~FP_EXCL_BUSY; 9658 } 9659 mutex_exit(&port->fp_mutex); 9660 9661 return (rval); 9662 } 9663 9664 9665 /* 9666 * This function assumes that the response length 9667 * is same regardless of data model (LP32 or LP64) 9668 * which is true for all the ioctls currently 9669 * supported. 9670 */ 9671 static int 9672 fp_copyout(void *from, void *to, size_t len, int mode) 9673 { 9674 return (ddi_copyout(from, to, len, mode)); 9675 } 9676 9677 /* 9678 * This function does the set rnid 9679 */ 9680 static int 9681 fp_set_rnid(fc_local_port_t *port, intptr_t data, int mode, fcio_t *fcio) 9682 { 9683 int rval = 0; 9684 fc_rnid_t *rnid; 9685 fc_fca_pm_t pm; 9686 9687 /* Allocate memory for node id block */ 9688 rnid = kmem_zalloc(sizeof (fc_rnid_t), KM_SLEEP); 9689 9690 if (ddi_copyin(fcio->fcio_ibuf, rnid, sizeof (fc_rnid_t), mode)) { 9691 FP_TRACE(FP_NHEAD1(3, 0), "fp_set_rnid: failed = %d", EFAULT); 9692 kmem_free(rnid, sizeof (fc_rnid_t)); 9693 return (EFAULT); 9694 } 9695 9696 /* Prepare the port management structure */ 9697 bzero((caddr_t)&pm, sizeof (pm)); 9698 9699 pm.pm_cmd_flags = FC_FCA_PM_WRITE; 9700 pm.pm_cmd_code = FC_PORT_SET_NODE_ID; 9701 pm.pm_data_len = sizeof (*rnid); 9702 pm.pm_data_buf = (caddr_t)rnid; 9703 9704 /* Get the adapter's node data */ 9705 rval = port->fp_fca_tran->fca_port_manage( 9706 port->fp_fca_handle, &pm); 9707 9708 if (rval != FC_SUCCESS) { 9709 fcio->fcio_errno = rval; 9710 rval = EIO; 9711 if (fp_fcio_copyout(fcio, data, mode)) { 9712 rval = EFAULT; 9713 } 9714 } else { 9715 mutex_enter(&port->fp_mutex); 9716 /* copy to the port structure */ 9717 bcopy(rnid, &port->fp_rnid_params, 9718 sizeof (port->fp_rnid_params)); 9719 mutex_exit(&port->fp_mutex); 9720 } 9721 9722 kmem_free(rnid, sizeof (fc_rnid_t)); 9723 9724 if (rval != FC_SUCCESS) { 9725 FP_TRACE(FP_NHEAD1(3, 0), "fp_set_rnid: failed = %d", rval); 9726 } 9727 9728 return (rval); 9729 } 9730 9731 /* 9732 * This function does the local pwwn get rnid 9733 */ 9734 static int 9735 fp_get_rnid(fc_local_port_t *port, intptr_t data, int mode, fcio_t *fcio) 9736 { 9737 fc_rnid_t *rnid; 9738 fc_fca_pm_t pm; 9739 int rval = 0; 9740 uint32_t ret; 9741 9742 /* Allocate memory for rnid data block */ 9743 rnid = kmem_zalloc(sizeof (fc_rnid_t), KM_SLEEP); 9744 9745 mutex_enter(&port->fp_mutex); 9746 if (port->fp_rnid_init == 1) { 9747 bcopy(&port->fp_rnid_params, rnid, sizeof (fc_rnid_t)); 9748 mutex_exit(&port->fp_mutex); 9749 /* xfer node info to userland */ 9750 if (ddi_copyout((void *)rnid, (void *)fcio->fcio_obuf, 9751 sizeof (*rnid), mode) == 0) { 9752 if (fp_fcio_copyout(fcio, data, mode)) { 9753 rval = EFAULT; 9754 } 9755 } else { 9756 rval = EFAULT; 9757 } 9758 9759 kmem_free(rnid, sizeof (fc_rnid_t)); 9760 9761 if (rval != FC_SUCCESS) { 9762 FP_TRACE(FP_NHEAD1(3, 0), "fp_get_rnid: failed = %d", 9763 rval); 9764 } 9765 9766 return (rval); 9767 } 9768 mutex_exit(&port->fp_mutex); 9769 9770 /* Prepare the port management structure */ 9771 bzero((caddr_t)&pm, sizeof (pm)); 9772 9773 pm.pm_cmd_flags = FC_FCA_PM_READ; 9774 pm.pm_cmd_code = FC_PORT_GET_NODE_ID; 9775 pm.pm_data_len = sizeof (fc_rnid_t); 9776 pm.pm_data_buf = (caddr_t)rnid; 9777 9778 /* Get the adapter's node data */ 9779 ret = port->fp_fca_tran->fca_port_manage( 9780 port->fp_fca_handle, 9781 &pm); 9782 9783 if (ret == FC_SUCCESS) { 9784 /* initialize in the port_info */ 9785 mutex_enter(&port->fp_mutex); 9786 port->fp_rnid_init = 1; 9787 bcopy(rnid, &port->fp_rnid_params, sizeof (*rnid)); 9788 mutex_exit(&port->fp_mutex); 9789 9790 /* xfer node info to userland */ 9791 if (ddi_copyout((void *)rnid, 9792 (void *)fcio->fcio_obuf, 9793 sizeof (*rnid), mode) == 0) { 9794 if (fp_fcio_copyout(fcio, data, 9795 mode)) { 9796 rval = EFAULT; 9797 } 9798 } else { 9799 rval = EFAULT; 9800 } 9801 } else { 9802 rval = EIO; 9803 fcio->fcio_errno = ret; 9804 if (fp_fcio_copyout(fcio, data, mode)) { 9805 rval = EFAULT; 9806 } 9807 } 9808 9809 kmem_free(rnid, sizeof (fc_rnid_t)); 9810 9811 if (rval != FC_SUCCESS) { 9812 FP_TRACE(FP_NHEAD1(3, 0), "fp_get_rnid: failed = %d", rval); 9813 } 9814 9815 return (rval); 9816 } 9817 9818 static int 9819 fp_send_rnid(fc_local_port_t *port, intptr_t data, int mode, fcio_t *fcio, 9820 la_wwn_t *pwwn) 9821 { 9822 int rval = 0; 9823 fc_remote_port_t *pd; 9824 fp_cmd_t *cmd; 9825 job_request_t *job; 9826 la_els_rnid_acc_t *rnid_acc; 9827 9828 pd = fctl_get_remote_port_by_pwwn(port, pwwn); 9829 if (pd == NULL) { 9830 /* 9831 * We can safely assume that the destination port 9832 * is logged in. Either the user land will explicitly 9833 * login before issuing RNID ioctl or the device would 9834 * have been configured, meaning already logged in. 9835 */ 9836 9837 FP_TRACE(FP_NHEAD1(3, 0), "fp_send_rnid: failed = %d", ENXIO); 9838 9839 return (ENXIO); 9840 } 9841 /* 9842 * Allocate job structure and set job_code as DUMMY, 9843 * because we will not go thorugh the job thread. 9844 * Instead fp_sendcmd() is called directly here. 9845 */ 9846 job = fctl_alloc_job(JOB_DUMMY, JOB_TYPE_FP_ASYNC, 9847 NULL, NULL, KM_SLEEP); 9848 9849 ASSERT(job != NULL); 9850 9851 job->job_counter = 1; 9852 9853 cmd = fp_alloc_pkt(port, sizeof (la_els_rnid_t), 9854 sizeof (la_els_rnid_acc_t), KM_SLEEP, pd); 9855 if (cmd == NULL) { 9856 fcio->fcio_errno = FC_NOMEM; 9857 rval = ENOMEM; 9858 9859 fctl_dealloc_job(job); 9860 if (fp_fcio_copyout(fcio, data, mode)) { 9861 rval = EFAULT; 9862 } 9863 9864 FP_TRACE(FP_NHEAD1(3, 0), "fp_send_rnid: failed = %d", rval); 9865 9866 return (rval); 9867 } 9868 9869 /* Allocate memory for node id accept block */ 9870 rnid_acc = kmem_zalloc(sizeof (la_els_rnid_acc_t), KM_SLEEP); 9871 9872 mutex_enter(&port->fp_mutex); 9873 mutex_enter(&pd->pd_mutex); 9874 9875 cmd->cmd_pkt.pkt_tran_flags = FC_TRAN_INTR | pd->pd_login_class; 9876 cmd->cmd_pkt.pkt_tran_type = FC_PKT_EXCHANGE; 9877 cmd->cmd_flags = FP_CMD_CFLAG_UNDEFINED; 9878 cmd->cmd_retry_count = 1; 9879 cmd->cmd_ulp_pkt = NULL; 9880 9881 fp_rnid_init(cmd, fcio->fcio_cmd_flags, job); 9882 9883 job->job_private = (void *)rnid_acc; 9884 9885 pd->pd_flags = PD_ELS_IN_PROGRESS; 9886 9887 mutex_exit(&pd->pd_mutex); 9888 mutex_exit(&port->fp_mutex); 9889 9890 if (fp_sendcmd(port, cmd, port->fp_fca_handle) == FC_SUCCESS) { 9891 fctl_jobwait(job); 9892 fcio->fcio_errno = job->job_result; 9893 if (job->job_result == FC_SUCCESS) { 9894 int rnid_cnt; 9895 ASSERT(pd != NULL); 9896 /* 9897 * node id block is now available. 9898 * Copy it to userland 9899 */ 9900 ASSERT(job->job_private == (void *)rnid_acc); 9901 9902 /* get the response length */ 9903 rnid_cnt = sizeof (ls_code_t) + sizeof (fc_rnid_hdr_t) + 9904 rnid_acc->hdr.cmn_len + 9905 rnid_acc->hdr.specific_len; 9906 9907 if (fcio->fcio_olen < rnid_cnt) { 9908 rval = EINVAL; 9909 } else if (ddi_copyout((void *)rnid_acc, 9910 (void *)fcio->fcio_obuf, 9911 rnid_cnt, mode) == 0) { 9912 if (fp_fcio_copyout(fcio, data, 9913 mode)) { 9914 rval = EFAULT; 9915 } 9916 } else { 9917 rval = EFAULT; 9918 } 9919 } else { 9920 rval = EIO; 9921 } 9922 } else { 9923 rval = EIO; 9924 if (pd) { 9925 mutex_enter(&pd->pd_mutex); 9926 pd->pd_flags = PD_IDLE; 9927 mutex_exit(&pd->pd_mutex); 9928 } 9929 fp_free_pkt(cmd); 9930 } 9931 9932 fctl_dealloc_job(job); 9933 kmem_free(rnid_acc, sizeof (la_els_rnid_acc_t)); 9934 9935 if (fp_fcio_copyout(fcio, data, mode)) { 9936 rval = EFAULT; 9937 } 9938 9939 if (rval != FC_SUCCESS) { 9940 FP_TRACE(FP_NHEAD1(3, 0), "fp_send_rnid: failed = %d", rval); 9941 } 9942 9943 return (rval); 9944 } 9945 9946 /* 9947 * Copy out to userland 9948 */ 9949 static int 9950 fp_fcio_copyout(fcio_t *fcio, intptr_t data, int mode) 9951 { 9952 int rval; 9953 9954 #ifdef _MULTI_DATAMODEL 9955 switch (ddi_model_convert_from(mode & FMODELS)) { 9956 case DDI_MODEL_ILP32: { 9957 struct fcio32 fcio32; 9958 9959 fcio32.fcio_xfer = fcio->fcio_xfer; 9960 fcio32.fcio_cmd = fcio->fcio_cmd; 9961 fcio32.fcio_flags = fcio->fcio_flags; 9962 fcio32.fcio_cmd_flags = fcio->fcio_cmd_flags; 9963 fcio32.fcio_ilen = fcio->fcio_ilen; 9964 fcio32.fcio_ibuf = 9965 (caddr32_t)(uintptr_t)fcio->fcio_ibuf; 9966 fcio32.fcio_olen = fcio->fcio_olen; 9967 fcio32.fcio_obuf = 9968 (caddr32_t)(uintptr_t)fcio->fcio_obuf; 9969 fcio32.fcio_alen = fcio->fcio_alen; 9970 fcio32.fcio_abuf = 9971 (caddr32_t)(uintptr_t)fcio->fcio_abuf; 9972 fcio32.fcio_errno = fcio->fcio_errno; 9973 9974 rval = ddi_copyout((void *)&fcio32, (void *)data, 9975 sizeof (struct fcio32), mode); 9976 break; 9977 } 9978 case DDI_MODEL_NONE: 9979 rval = ddi_copyout((void *)fcio, (void *)data, 9980 sizeof (fcio_t), mode); 9981 break; 9982 } 9983 #else 9984 rval = ddi_copyout((void *)fcio, (void *)data, sizeof (fcio_t), mode); 9985 #endif 9986 9987 return (rval); 9988 } 9989 9990 9991 static void 9992 fp_p2p_online(fc_local_port_t *port, job_request_t *job) 9993 { 9994 uint32_t listlen; 9995 fc_portmap_t *changelist; 9996 9997 ASSERT(MUTEX_HELD(&port->fp_mutex)); 9998 ASSERT(port->fp_topology == FC_TOP_PT_PT); 9999 ASSERT((job->job_flags & JOB_TYPE_FP_ASYNC) == 0); 10000 10001 listlen = 0; 10002 changelist = NULL; 10003 10004 if ((job->job_flags & JOB_CANCEL_ULP_NOTIFICATION) == 0) { 10005 if (port->fp_statec_busy > 1) { 10006 job->job_flags |= JOB_CANCEL_ULP_NOTIFICATION; 10007 } 10008 } 10009 mutex_exit(&port->fp_mutex); 10010 10011 if ((job->job_flags & JOB_CANCEL_ULP_NOTIFICATION) == 0) { 10012 fctl_fillout_map(port, &changelist, &listlen, 1, 0, 0); 10013 (void) fp_ulp_statec_cb(port, FC_STATE_ONLINE, changelist, 10014 listlen, listlen, KM_SLEEP); 10015 10016 mutex_enter(&port->fp_mutex); 10017 } else { 10018 ASSERT(changelist == NULL && listlen == 0); 10019 mutex_enter(&port->fp_mutex); 10020 if (--port->fp_statec_busy == 0) { 10021 port->fp_soft_state &= ~FP_SOFT_IN_STATEC_CB; 10022 } 10023 } 10024 } 10025 10026 static int 10027 fp_fillout_p2pmap(fc_local_port_t *port, fcio_t *fcio, int mode) 10028 { 10029 int rval; 10030 int count; 10031 int index; 10032 int num_devices; 10033 fc_remote_node_t *node; 10034 fc_port_dev_t *devlist; 10035 struct pwwn_hash *head; 10036 fc_remote_port_t *pd; 10037 10038 ASSERT(MUTEX_HELD(&port->fp_mutex)); 10039 10040 num_devices = fcio->fcio_olen / sizeof (fc_port_dev_t); 10041 10042 devlist = kmem_zalloc(sizeof (fc_port_dev_t) * num_devices, KM_SLEEP); 10043 10044 for (count = index = 0; index < pwwn_table_size; index++) { 10045 head = &port->fp_pwwn_table[index]; 10046 pd = head->pwwn_head; 10047 while (pd != NULL) { 10048 mutex_enter(&pd->pd_mutex); 10049 if (pd->pd_state == PORT_DEVICE_INVALID) { 10050 mutex_exit(&pd->pd_mutex); 10051 pd = pd->pd_wwn_hnext; 10052 continue; 10053 } 10054 10055 devlist[count].dev_state = pd->pd_state; 10056 devlist[count].dev_hard_addr = pd->pd_hard_addr; 10057 devlist[count].dev_did = pd->pd_port_id; 10058 devlist[count].dev_did.priv_lilp_posit = 10059 (uint8_t)(index & 0xff); 10060 bcopy((caddr_t)pd->pd_fc4types, 10061 (caddr_t)devlist[count].dev_type, 10062 sizeof (pd->pd_fc4types)); 10063 10064 bcopy((caddr_t)&pd->pd_port_name, 10065 (caddr_t)&devlist[count].dev_pwwn, 10066 sizeof (la_wwn_t)); 10067 10068 node = pd->pd_remote_nodep; 10069 mutex_exit(&pd->pd_mutex); 10070 10071 if (node) { 10072 mutex_enter(&node->fd_mutex); 10073 bcopy((caddr_t)&node->fd_node_name, 10074 (caddr_t)&devlist[count].dev_nwwn, 10075 sizeof (la_wwn_t)); 10076 mutex_exit(&node->fd_mutex); 10077 } 10078 count++; 10079 if (count >= num_devices) { 10080 goto found; 10081 } 10082 } 10083 } 10084 found: 10085 if (fp_copyout((void *)&count, (void *)fcio->fcio_abuf, 10086 sizeof (count), mode)) { 10087 rval = FC_FAILURE; 10088 } else if (fp_copyout((void *)devlist, (void *)fcio->fcio_obuf, 10089 sizeof (fc_port_dev_t) * num_devices, mode)) { 10090 rval = FC_FAILURE; 10091 } else { 10092 rval = FC_SUCCESS; 10093 } 10094 10095 kmem_free(devlist, sizeof (fc_port_dev_t) * num_devices); 10096 10097 return (rval); 10098 } 10099 10100 10101 /* 10102 * Handle Fabric ONLINE 10103 */ 10104 static void 10105 fp_fabric_online(fc_local_port_t *port, job_request_t *job) 10106 { 10107 int index; 10108 int rval; 10109 int dbg_count; 10110 int count = 0; 10111 char ww_name[17]; 10112 uint32_t d_id; 10113 uint32_t listlen; 10114 fctl_ns_req_t *ns_cmd; 10115 struct pwwn_hash *head; 10116 fc_remote_port_t *pd; 10117 fc_remote_port_t *npd; 10118 fc_portmap_t *changelist; 10119 10120 ASSERT(MUTEX_HELD(&port->fp_mutex)); 10121 ASSERT(FC_IS_TOP_SWITCH(port->fp_topology)); 10122 ASSERT((job->job_flags & JOB_TYPE_FP_ASYNC) == 0); 10123 10124 ns_cmd = fctl_alloc_ns_cmd(sizeof (ns_req_gid_pn_t), 10125 sizeof (ns_resp_gid_pn_t), sizeof (ns_resp_gid_pn_t), 10126 0, KM_SLEEP); 10127 10128 ASSERT(ns_cmd != NULL); 10129 10130 ns_cmd->ns_cmd_code = NS_GID_PN; 10131 10132 /* 10133 * Check if orphans are showing up now 10134 */ 10135 if (port->fp_orphan_count) { 10136 fc_orphan_t *orp; 10137 fc_orphan_t *norp = NULL; 10138 fc_orphan_t *prev = NULL; 10139 10140 for (orp = port->fp_orphan_list; orp; orp = norp) { 10141 norp = orp->orp_next; 10142 mutex_exit(&port->fp_mutex); 10143 orp->orp_nscan++; 10144 10145 job->job_counter = 1; 10146 job->job_result = FC_SUCCESS; 10147 10148 ((ns_req_gid_pn_t *) 10149 (ns_cmd->ns_cmd_buf))->pwwn = orp->orp_pwwn; 10150 ((ns_resp_gid_pn_t *) 10151 ns_cmd->ns_data_buf)->pid.port_id = 0; 10152 ((ns_resp_gid_pn_t *) 10153 ns_cmd->ns_data_buf)->pid.priv_lilp_posit = 0; 10154 10155 rval = fp_ns_query(port, ns_cmd, job, 1, KM_SLEEP); 10156 if (rval == FC_SUCCESS) { 10157 d_id = 10158 BE_32(*((uint32_t *)ns_cmd->ns_data_buf)); 10159 pd = fp_create_remote_port_by_ns(port, 10160 d_id, KM_SLEEP); 10161 10162 if (pd != NULL) { 10163 fc_wwn_to_str(&orp->orp_pwwn, ww_name); 10164 10165 fp_printf(port, CE_WARN, FP_LOG_ONLY, 10166 0, NULL, "N_x Port with D_ID=%x," 10167 " PWWN=%s reappeared in fabric", 10168 d_id, ww_name); 10169 10170 mutex_enter(&port->fp_mutex); 10171 if (prev) { 10172 prev->orp_next = orp->orp_next; 10173 } else { 10174 ASSERT(orp == 10175 port->fp_orphan_list); 10176 port->fp_orphan_list = 10177 orp->orp_next; 10178 } 10179 port->fp_orphan_count--; 10180 mutex_exit(&port->fp_mutex); 10181 kmem_free(orp, sizeof (*orp)); 10182 count++; 10183 10184 mutex_enter(&pd->pd_mutex); 10185 pd->pd_flags = PD_ELS_MARK; 10186 10187 mutex_exit(&pd->pd_mutex); 10188 } else { 10189 prev = orp; 10190 } 10191 } else { 10192 if (orp->orp_nscan == FC_ORPHAN_SCAN_LIMIT) { 10193 fc_wwn_to_str(&orp->orp_pwwn, ww_name); 10194 10195 fp_printf(port, CE_NOTE, FP_LOG_ONLY, 0, 10196 NULL, 10197 " Port WWN %s removed from orphan" 10198 " list after %d scans", ww_name, 10199 orp->orp_nscan); 10200 10201 mutex_enter(&port->fp_mutex); 10202 if (prev) { 10203 prev->orp_next = orp->orp_next; 10204 } else { 10205 ASSERT(orp == 10206 port->fp_orphan_list); 10207 port->fp_orphan_list = 10208 orp->orp_next; 10209 } 10210 port->fp_orphan_count--; 10211 mutex_exit(&port->fp_mutex); 10212 10213 kmem_free(orp, sizeof (*orp)); 10214 } else { 10215 prev = orp; 10216 } 10217 } 10218 mutex_enter(&port->fp_mutex); 10219 } 10220 } 10221 10222 /* 10223 * Walk the Port WWN hash table, reestablish LOGIN 10224 * if a LOGIN is already performed on a particular 10225 * device; Any failure to LOGIN should mark the 10226 * port device OLD. 10227 */ 10228 for (index = 0; index < pwwn_table_size; index++) { 10229 head = &port->fp_pwwn_table[index]; 10230 npd = head->pwwn_head; 10231 10232 while ((pd = npd) != NULL) { 10233 la_wwn_t *pwwn; 10234 10235 npd = pd->pd_wwn_hnext; 10236 10237 /* 10238 * Don't count in the port devices that are new 10239 * unless the total number of devices visible 10240 * through this port is less than FP_MAX_DEVICES 10241 */ 10242 mutex_enter(&pd->pd_mutex); 10243 if (port->fp_dev_count >= FP_MAX_DEVICES || 10244 (port->fp_options & FP_TARGET_MODE)) { 10245 if (pd->pd_type == PORT_DEVICE_NEW || 10246 pd->pd_flags == PD_ELS_MARK || 10247 pd->pd_recepient != PD_PLOGI_INITIATOR) { 10248 mutex_exit(&pd->pd_mutex); 10249 continue; 10250 } 10251 } else { 10252 if (pd->pd_flags == PD_ELS_MARK || 10253 pd->pd_recepient != PD_PLOGI_INITIATOR) { 10254 mutex_exit(&pd->pd_mutex); 10255 continue; 10256 } 10257 pd->pd_type = PORT_DEVICE_OLD; 10258 } 10259 count++; 10260 10261 /* 10262 * Consult with the name server about D_ID changes 10263 */ 10264 job->job_counter = 1; 10265 job->job_result = FC_SUCCESS; 10266 10267 ((ns_req_gid_pn_t *) 10268 (ns_cmd->ns_cmd_buf))->pwwn = pd->pd_port_name; 10269 ((ns_resp_gid_pn_t *) 10270 ns_cmd->ns_data_buf)->pid.port_id = 0; 10271 10272 ((ns_resp_gid_pn_t *)ns_cmd->ns_data_buf)-> 10273 pid.priv_lilp_posit = 0; 10274 10275 pwwn = &pd->pd_port_name; 10276 pd->pd_flags = PD_ELS_MARK; 10277 10278 mutex_exit(&pd->pd_mutex); 10279 mutex_exit(&port->fp_mutex); 10280 10281 rval = fp_ns_query(port, ns_cmd, job, 1, KM_SLEEP); 10282 if (rval != FC_SUCCESS) { 10283 fc_wwn_to_str(pwwn, ww_name); 10284 10285 mutex_enter(&pd->pd_mutex); 10286 d_id = pd->pd_port_id.port_id; 10287 pd->pd_type = PORT_DEVICE_DELETE; 10288 mutex_exit(&pd->pd_mutex); 10289 10290 FP_TRACE(FP_NHEAD1(3, 0), 10291 "fp_fabric_online: PD " 10292 "disappeared; d_id=%x, PWWN=%s", 10293 d_id, ww_name); 10294 10295 FP_TRACE(FP_NHEAD2(9, 0), 10296 "N_x Port with D_ID=%x, PWWN=%s" 10297 " disappeared from fabric", d_id, 10298 ww_name); 10299 10300 mutex_enter(&port->fp_mutex); 10301 continue; 10302 } 10303 10304 d_id = BE_32(*((uint32_t *)ns_cmd->ns_data_buf)); 10305 10306 mutex_enter(&port->fp_mutex); 10307 mutex_enter(&pd->pd_mutex); 10308 if (d_id != pd->pd_port_id.port_id) { 10309 fctl_delist_did_table(port, pd); 10310 fc_wwn_to_str(pwwn, ww_name); 10311 10312 FP_TRACE(FP_NHEAD2(9, 0), 10313 "D_ID of a device with PWWN %s changed." 10314 " New D_ID = %x, OLD D_ID = %x", ww_name, 10315 d_id, pd->pd_port_id.port_id); 10316 10317 pd->pd_port_id.port_id = BE_32(d_id); 10318 pd->pd_type = PORT_DEVICE_CHANGED; 10319 fctl_enlist_did_table(port, pd); 10320 } 10321 mutex_exit(&pd->pd_mutex); 10322 10323 } 10324 } 10325 10326 if (ns_cmd) { 10327 fctl_free_ns_cmd(ns_cmd); 10328 } 10329 10330 listlen = 0; 10331 changelist = NULL; 10332 if (count) { 10333 if (port->fp_soft_state & FP_SOFT_IN_FCA_RESET) { 10334 port->fp_soft_state &= ~FP_SOFT_IN_FCA_RESET; 10335 mutex_exit(&port->fp_mutex); 10336 delay(drv_usectohz(FLA_RR_TOV * 1000 * 1000)); 10337 mutex_enter(&port->fp_mutex); 10338 } 10339 10340 dbg_count = 0; 10341 10342 job->job_counter = count; 10343 10344 for (index = 0; index < pwwn_table_size; index++) { 10345 head = &port->fp_pwwn_table[index]; 10346 npd = head->pwwn_head; 10347 10348 while ((pd = npd) != NULL) { 10349 npd = pd->pd_wwn_hnext; 10350 10351 mutex_enter(&pd->pd_mutex); 10352 if (pd->pd_flags != PD_ELS_MARK) { 10353 mutex_exit(&pd->pd_mutex); 10354 continue; 10355 } 10356 10357 dbg_count++; 10358 10359 /* 10360 * If it is already marked deletion, nothing 10361 * else to do. 10362 */ 10363 if (pd->pd_type == PORT_DEVICE_DELETE) { 10364 pd->pd_type = PORT_DEVICE_OLD; 10365 10366 mutex_exit(&pd->pd_mutex); 10367 mutex_exit(&port->fp_mutex); 10368 fp_jobdone(job); 10369 mutex_enter(&port->fp_mutex); 10370 10371 continue; 10372 } 10373 10374 /* 10375 * If it is freshly discovered out of 10376 * the orphan list, nothing else to do 10377 */ 10378 if (pd->pd_type == PORT_DEVICE_NEW) { 10379 pd->pd_flags = PD_IDLE; 10380 10381 mutex_exit(&pd->pd_mutex); 10382 mutex_exit(&port->fp_mutex); 10383 fp_jobdone(job); 10384 mutex_enter(&port->fp_mutex); 10385 10386 continue; 10387 } 10388 10389 pd->pd_flags = PD_IDLE; 10390 d_id = pd->pd_port_id.port_id; 10391 10392 /* 10393 * Explicitly mark all devices OLD; successful 10394 * PLOGI should reset this to either NO_CHANGE 10395 * or CHANGED. 10396 */ 10397 if (pd->pd_type != PORT_DEVICE_CHANGED) { 10398 pd->pd_type = PORT_DEVICE_OLD; 10399 } 10400 10401 mutex_exit(&pd->pd_mutex); 10402 mutex_exit(&port->fp_mutex); 10403 10404 rval = fp_port_login(port, d_id, job, 10405 FP_CMD_PLOGI_RETAIN, KM_SLEEP, pd, NULL); 10406 10407 if (rval != FC_SUCCESS) { 10408 fp_jobdone(job); 10409 } 10410 mutex_enter(&port->fp_mutex); 10411 } 10412 } 10413 mutex_exit(&port->fp_mutex); 10414 10415 ASSERT(dbg_count == count); 10416 fp_jobwait(job); 10417 10418 mutex_enter(&port->fp_mutex); 10419 10420 ASSERT(port->fp_statec_busy > 0); 10421 if ((job->job_flags & JOB_CANCEL_ULP_NOTIFICATION) == 0) { 10422 if (port->fp_statec_busy > 1) { 10423 job->job_flags |= JOB_CANCEL_ULP_NOTIFICATION; 10424 } 10425 } 10426 mutex_exit(&port->fp_mutex); 10427 } else { 10428 ASSERT(port->fp_statec_busy > 0); 10429 if (port->fp_statec_busy > 1) { 10430 job->job_flags |= JOB_CANCEL_ULP_NOTIFICATION; 10431 } 10432 mutex_exit(&port->fp_mutex); 10433 } 10434 10435 if ((job->job_flags & JOB_CANCEL_ULP_NOTIFICATION) == 0) { 10436 fctl_fillout_map(port, &changelist, &listlen, 1, 0, 0); 10437 10438 (void) fp_ulp_statec_cb(port, FC_STATE_ONLINE, changelist, 10439 listlen, listlen, KM_SLEEP); 10440 10441 mutex_enter(&port->fp_mutex); 10442 } else { 10443 ASSERT(changelist == NULL && listlen == 0); 10444 mutex_enter(&port->fp_mutex); 10445 if (--port->fp_statec_busy == 0) { 10446 port->fp_soft_state &= ~FP_SOFT_IN_STATEC_CB; 10447 } 10448 } 10449 } 10450 10451 10452 /* 10453 * Fill out device list for userland ioctl in private loop 10454 */ 10455 static int 10456 fp_fillout_loopmap(fc_local_port_t *port, fcio_t *fcio, int mode) 10457 { 10458 int rval; 10459 int count; 10460 int index; 10461 int num_devices; 10462 fc_remote_node_t *node; 10463 fc_port_dev_t *devlist; 10464 int lilp_device_count; 10465 fc_lilpmap_t *lilp_map; 10466 uchar_t *alpa_list; 10467 10468 ASSERT(MUTEX_HELD(&port->fp_mutex)); 10469 10470 num_devices = fcio->fcio_olen / sizeof (fc_port_dev_t); 10471 if (port->fp_total_devices > port->fp_dev_count && 10472 num_devices >= port->fp_total_devices) { 10473 job_request_t *job; 10474 10475 mutex_exit(&port->fp_mutex); 10476 job = fctl_alloc_job(JOB_PORT_GETMAP, 0, NULL, NULL, KM_SLEEP); 10477 job->job_counter = 1; 10478 10479 mutex_enter(&port->fp_mutex); 10480 fp_get_loopmap(port, job); 10481 mutex_exit(&port->fp_mutex); 10482 10483 fp_jobwait(job); 10484 fctl_dealloc_job(job); 10485 } else { 10486 mutex_exit(&port->fp_mutex); 10487 } 10488 devlist = kmem_zalloc(sizeof (*devlist) * num_devices, KM_SLEEP); 10489 10490 mutex_enter(&port->fp_mutex); 10491 10492 /* 10493 * Applications are accustomed to getting the device list in 10494 * LILP map order. The HBA firmware usually returns the device 10495 * map in the LILP map order and diagnostic applications would 10496 * prefer to receive in the device list in that order too 10497 */ 10498 lilp_map = &port->fp_lilp_map; 10499 alpa_list = &lilp_map->lilp_alpalist[0]; 10500 10501 /* 10502 * the length field corresponds to the offset in the LILP frame 10503 * which begins with 1. The thing to note here is that the 10504 * lilp_device_count is 1 more than fp->fp_total_devices since 10505 * the host adapter's alpa also shows up in the lilp map. We 10506 * don't however return details of the host adapter since 10507 * fctl_get_remote_port_by_did fails for the host adapter's ALPA 10508 * and applications are required to issue the FCIO_GET_HOST_PARAMS 10509 * ioctl to obtain details about the host adapter port. 10510 */ 10511 lilp_device_count = lilp_map->lilp_length; 10512 10513 for (count = index = 0; index < lilp_device_count && 10514 count < num_devices; index++) { 10515 uint32_t d_id; 10516 fc_remote_port_t *pd; 10517 10518 d_id = alpa_list[index]; 10519 10520 mutex_exit(&port->fp_mutex); 10521 pd = fctl_get_remote_port_by_did(port, d_id); 10522 mutex_enter(&port->fp_mutex); 10523 10524 if (pd != NULL) { 10525 mutex_enter(&pd->pd_mutex); 10526 10527 if (pd->pd_state == PORT_DEVICE_INVALID) { 10528 mutex_exit(&pd->pd_mutex); 10529 continue; 10530 } 10531 10532 devlist[count].dev_state = pd->pd_state; 10533 devlist[count].dev_hard_addr = pd->pd_hard_addr; 10534 devlist[count].dev_did = pd->pd_port_id; 10535 devlist[count].dev_did.priv_lilp_posit = 10536 (uint8_t)(index & 0xff); 10537 bcopy((caddr_t)pd->pd_fc4types, 10538 (caddr_t)devlist[count].dev_type, 10539 sizeof (pd->pd_fc4types)); 10540 10541 bcopy((caddr_t)&pd->pd_port_name, 10542 (caddr_t)&devlist[count].dev_pwwn, 10543 sizeof (la_wwn_t)); 10544 10545 node = pd->pd_remote_nodep; 10546 mutex_exit(&pd->pd_mutex); 10547 10548 if (node) { 10549 mutex_enter(&node->fd_mutex); 10550 bcopy((caddr_t)&node->fd_node_name, 10551 (caddr_t)&devlist[count].dev_nwwn, 10552 sizeof (la_wwn_t)); 10553 mutex_exit(&node->fd_mutex); 10554 } 10555 count++; 10556 } 10557 } 10558 10559 if (fp_copyout((void *)&count, (void *)fcio->fcio_abuf, 10560 sizeof (count), mode)) { 10561 rval = FC_FAILURE; 10562 } 10563 10564 if (fp_copyout((void *)devlist, (void *)fcio->fcio_obuf, 10565 sizeof (fc_port_dev_t) * num_devices, mode)) { 10566 rval = FC_FAILURE; 10567 } else { 10568 rval = FC_SUCCESS; 10569 } 10570 10571 kmem_free(devlist, sizeof (*devlist) * num_devices); 10572 ASSERT(MUTEX_HELD(&port->fp_mutex)); 10573 10574 return (rval); 10575 } 10576 10577 10578 /* 10579 * Completion function for responses to unsolicited commands 10580 */ 10581 static void 10582 fp_unsol_intr(fc_packet_t *pkt) 10583 { 10584 fp_cmd_t *cmd; 10585 fc_local_port_t *port; 10586 10587 cmd = pkt->pkt_ulp_private; 10588 port = cmd->cmd_port; 10589 10590 mutex_enter(&port->fp_mutex); 10591 port->fp_out_fpcmds--; 10592 mutex_exit(&port->fp_mutex); 10593 10594 if (pkt->pkt_state != FC_PKT_SUCCESS) { 10595 fp_printf(port, CE_WARN, FP_LOG_ONLY, 0, pkt, 10596 "couldn't post response to unsolicited request;" 10597 " ox_id=%x rx_id=%x", pkt->pkt_cmd_fhdr.ox_id, 10598 pkt->pkt_resp_fhdr.rx_id); 10599 } 10600 10601 if (cmd == port->fp_els_resp_pkt) { 10602 mutex_enter(&port->fp_mutex); 10603 port->fp_els_resp_pkt_busy = 0; 10604 mutex_exit(&port->fp_mutex); 10605 return; 10606 } 10607 10608 fp_free_pkt(cmd); 10609 } 10610 10611 10612 /* 10613 * solicited LINIT ELS completion function 10614 */ 10615 static void 10616 fp_linit_intr(fc_packet_t *pkt) 10617 { 10618 fp_cmd_t *cmd; 10619 job_request_t *job; 10620 fc_linit_resp_t acc; 10621 fc_local_port_t *port = ((fp_cmd_t *)pkt->pkt_ulp_private)->cmd_port; 10622 10623 cmd = (fp_cmd_t *)pkt->pkt_ulp_private; 10624 10625 mutex_enter(&cmd->cmd_port->fp_mutex); 10626 cmd->cmd_port->fp_out_fpcmds--; 10627 mutex_exit(&cmd->cmd_port->fp_mutex); 10628 10629 if (FP_IS_PKT_ERROR(pkt)) { 10630 (void) fp_common_intr(pkt, 1); 10631 return; 10632 } 10633 10634 job = cmd->cmd_job; 10635 10636 FC_GET_RSP(port, pkt->pkt_resp_acc, (uint8_t *)&acc, 10637 (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR); 10638 if (acc.status != FC_LINIT_SUCCESS) { 10639 job->job_result = FC_FAILURE; 10640 } else { 10641 job->job_result = FC_SUCCESS; 10642 } 10643 10644 fp_iodone(cmd); 10645 } 10646 10647 10648 /* 10649 * Decode the unsolicited request; For FC-4 Device and Link data frames 10650 * notify the registered ULP of this FC-4 type right here. For Unsolicited 10651 * ELS requests, submit a request to the job_handler thread to work on it. 10652 * The intent is to act quickly on the FC-4 unsolicited link and data frames 10653 * and save much of the interrupt time processing of unsolicited ELS requests 10654 * and hand it off to the job_handler thread. 10655 */ 10656 static void 10657 fp_unsol_cb(opaque_t port_handle, fc_unsol_buf_t *buf, uint32_t type) 10658 { 10659 uchar_t r_ctl; 10660 uchar_t ls_code; 10661 uint32_t s_id; 10662 uint32_t rscn_count = FC_INVALID_RSCN_COUNT; 10663 uint32_t cb_arg; 10664 fp_cmd_t *cmd; 10665 fc_local_port_t *port; 10666 job_request_t *job; 10667 fc_remote_port_t *pd; 10668 10669 port = port_handle; 10670 10671 FP_TRACE(FP_NHEAD1(1, 0), "fp_unsol_cb: s_id=%x," 10672 " d_id=%x, type=%x, r_ctl=%x, f_ctl=%x" 10673 " seq_id=%x, df_ctl=%x, seq_cnt=%x, ox_id=%x, rx_id=%x" 10674 " ro=%x, buffer[0]:%x", buf->ub_frame.s_id, buf->ub_frame.d_id, 10675 buf->ub_frame.type, buf->ub_frame.r_ctl, buf->ub_frame.f_ctl, 10676 buf->ub_frame.seq_id, buf->ub_frame.df_ctl, buf->ub_frame.seq_cnt, 10677 buf->ub_frame.ox_id, buf->ub_frame.rx_id, buf->ub_frame.ro, 10678 buf->ub_buffer[0]); 10679 10680 if (type & 0x80000000) { 10681 /* 10682 * Huh ? Nothing much can be done without 10683 * a valid buffer. So just exit. 10684 */ 10685 return; 10686 } 10687 /* 10688 * If the unsolicited interrupts arrive while it isn't 10689 * safe to handle unsolicited callbacks; Drop them, yes, 10690 * drop them on the floor 10691 */ 10692 mutex_enter(&port->fp_mutex); 10693 port->fp_active_ubs++; 10694 if ((port->fp_soft_state & 10695 (FP_SOFT_IN_DETACH | FP_SOFT_SUSPEND | FP_SOFT_POWER_DOWN)) || 10696 FC_PORT_STATE_MASK(port->fp_state) == FC_STATE_OFFLINE) { 10697 10698 FP_TRACE(FP_NHEAD1(3, 0), "fp_unsol_cb: port state is " 10699 "not ONLINE. s_id=%x, d_id=%x, type=%x, " 10700 "seq_id=%x, ox_id=%x, rx_id=%x" 10701 "ro=%x", buf->ub_frame.s_id, buf->ub_frame.d_id, 10702 buf->ub_frame.type, buf->ub_frame.seq_id, 10703 buf->ub_frame.ox_id, buf->ub_frame.rx_id, buf->ub_frame.ro); 10704 10705 ASSERT(port->fp_active_ubs > 0); 10706 if (--(port->fp_active_ubs) == 0) { 10707 port->fp_soft_state &= ~FP_SOFT_IN_UNSOL_CB; 10708 } 10709 10710 mutex_exit(&port->fp_mutex); 10711 10712 port->fp_fca_tran->fca_ub_release(port->fp_fca_handle, 10713 1, &buf->ub_token); 10714 10715 return; 10716 } 10717 10718 r_ctl = buf->ub_frame.r_ctl; 10719 s_id = buf->ub_frame.s_id; 10720 if (port->fp_active_ubs == 1) { 10721 port->fp_soft_state |= FP_SOFT_IN_UNSOL_CB; 10722 } 10723 10724 if (r_ctl == R_CTL_ELS_REQ && buf->ub_buffer[0] == LA_ELS_LOGO && 10725 port->fp_statec_busy) { 10726 mutex_exit(&port->fp_mutex); 10727 pd = fctl_get_remote_port_by_did(port, s_id); 10728 if (pd) { 10729 mutex_enter(&pd->pd_mutex); 10730 if (pd->pd_state == PORT_DEVICE_LOGGED_IN) { 10731 FP_TRACE(FP_NHEAD1(3, 0), 10732 "LOGO for LOGGED IN D_ID %x", 10733 buf->ub_frame.s_id); 10734 pd->pd_state = PORT_DEVICE_VALID; 10735 } 10736 mutex_exit(&pd->pd_mutex); 10737 } 10738 10739 mutex_enter(&port->fp_mutex); 10740 ASSERT(port->fp_active_ubs > 0); 10741 if (--(port->fp_active_ubs) == 0) { 10742 port->fp_soft_state &= ~FP_SOFT_IN_UNSOL_CB; 10743 } 10744 mutex_exit(&port->fp_mutex); 10745 10746 port->fp_fca_tran->fca_ub_release(port->fp_fca_handle, 10747 1, &buf->ub_token); 10748 10749 FP_TRACE(FP_NHEAD1(3, 0), 10750 "fp_unsol_cb() bailing out LOGO for D_ID %x", 10751 buf->ub_frame.s_id); 10752 return; 10753 } 10754 10755 if (port->fp_els_resp_pkt_busy == 0) { 10756 if (r_ctl == R_CTL_ELS_REQ) { 10757 ls_code = buf->ub_buffer[0]; 10758 10759 switch (ls_code) { 10760 case LA_ELS_PLOGI: 10761 case LA_ELS_FLOGI: 10762 port->fp_els_resp_pkt_busy = 1; 10763 mutex_exit(&port->fp_mutex); 10764 fp_i_handle_unsol_els(port, buf); 10765 10766 mutex_enter(&port->fp_mutex); 10767 ASSERT(port->fp_active_ubs > 0); 10768 if (--(port->fp_active_ubs) == 0) { 10769 port->fp_soft_state &= 10770 ~FP_SOFT_IN_UNSOL_CB; 10771 } 10772 mutex_exit(&port->fp_mutex); 10773 port->fp_fca_tran->fca_ub_release( 10774 port->fp_fca_handle, 1, &buf->ub_token); 10775 10776 return; 10777 case LA_ELS_RSCN: 10778 if (++(port)->fp_rscn_count == 10779 FC_INVALID_RSCN_COUNT) { 10780 ++(port)->fp_rscn_count; 10781 } 10782 rscn_count = port->fp_rscn_count; 10783 break; 10784 10785 default: 10786 break; 10787 } 10788 } 10789 } else if ((r_ctl == R_CTL_ELS_REQ) && 10790 (buf->ub_buffer[0] == LA_ELS_RSCN)) { 10791 if (++port->fp_rscn_count == FC_INVALID_RSCN_COUNT) { 10792 ++port->fp_rscn_count; 10793 } 10794 rscn_count = port->fp_rscn_count; 10795 } 10796 10797 mutex_exit(&port->fp_mutex); 10798 10799 switch (r_ctl & R_CTL_ROUTING) { 10800 case R_CTL_DEVICE_DATA: 10801 /* 10802 * If the unsolicited buffer is a CT IU, 10803 * have the job_handler thread work on it. 10804 */ 10805 if (buf->ub_frame.type == FC_TYPE_FC_SERVICES) { 10806 break; 10807 } 10808 /* FALLTHROUGH */ 10809 10810 case R_CTL_FC4_SVC: { 10811 int sendup = 0; 10812 10813 /* 10814 * If a LOGIN isn't performed before this request 10815 * shut the door on this port with a reply that a 10816 * LOGIN is required. We make an exception however 10817 * for IP broadcast packets and pass them through 10818 * to the IP ULP(s) to handle broadcast requests. 10819 * This is not a problem for private loop devices 10820 * but for fabric topologies we don't log into the 10821 * remote ports during port initialization and 10822 * the ULPs need to log into requesting ports on 10823 * demand. 10824 */ 10825 pd = fctl_get_remote_port_by_did(port, s_id); 10826 if (pd) { 10827 mutex_enter(&pd->pd_mutex); 10828 if (pd->pd_state == PORT_DEVICE_LOGGED_IN) { 10829 sendup++; 10830 } 10831 mutex_exit(&pd->pd_mutex); 10832 } else if ((pd == NULL) && 10833 (buf->ub_frame.type == FC_TYPE_IS8802_SNAP) && 10834 (buf->ub_frame.d_id == 0xffffff || 10835 buf->ub_frame.d_id == 0x00)) { 10836 /* brodacst IP frame - so sendup via job thread */ 10837 break; 10838 } 10839 10840 /* 10841 * Send all FC4 services via job thread too 10842 */ 10843 if ((r_ctl & R_CTL_ROUTING) == R_CTL_FC4_SVC) { 10844 break; 10845 } 10846 10847 if (sendup || !FC_IS_REAL_DEVICE(s_id)) { 10848 fctl_ulp_unsol_cb(port, buf, buf->ub_frame.type); 10849 return; 10850 } 10851 10852 if (FP_IS_CLASS_1_OR_2(buf->ub_class)) { 10853 cmd = fp_alloc_pkt(port, sizeof (la_els_rjt_t), 10854 0, KM_NOSLEEP, pd); 10855 if (cmd != NULL) { 10856 fp_els_rjt_init(port, cmd, buf, 10857 FC_ACTION_NON_RETRYABLE, 10858 FC_REASON_LOGIN_REQUIRED, NULL); 10859 10860 if (fp_sendcmd(port, cmd, 10861 port->fp_fca_handle) != FC_SUCCESS) { 10862 fp_free_pkt(cmd); 10863 } 10864 } 10865 } 10866 10867 mutex_enter(&port->fp_mutex); 10868 ASSERT(port->fp_active_ubs > 0); 10869 if (--(port->fp_active_ubs) == 0) { 10870 port->fp_soft_state &= ~FP_SOFT_IN_UNSOL_CB; 10871 } 10872 mutex_exit(&port->fp_mutex); 10873 port->fp_fca_tran->fca_ub_release(port->fp_fca_handle, 10874 1, &buf->ub_token); 10875 10876 return; 10877 } 10878 10879 default: 10880 break; 10881 } 10882 10883 /* 10884 * Submit a Request to the job_handler thread to work 10885 * on the unsolicited request. The potential side effect 10886 * of this is that the unsolicited buffer takes a little 10887 * longer to get released but we save interrupt time in 10888 * the bargain. 10889 */ 10890 cb_arg = (rscn_count == FC_INVALID_RSCN_COUNT) ? NULL : rscn_count; 10891 10892 /* 10893 * One way that the rscn_count will get used is described below : 10894 * 10895 * 1. fp_unsol_cb() gets an RSCN and updates fp_rscn_count. 10896 * 2. Before mutex is released, a copy of it is stored in rscn_count. 10897 * 3. The count is passed to job thread as JOB_UNSOL_REQUEST (below) 10898 * by overloading the job_cb_arg to pass the rscn_count 10899 * 4. When one of the routines processing the RSCN picks it up (ex: 10900 * fp_validate_rscn_page()), it passes this count in the map 10901 * structure (as part of the map_rscn_info structure member) to the 10902 * ULPs. 10903 * 5. When ULPs make calls back to the transport (example interfaces for 10904 * this are fc_ulp_transport(), fc_ulp_login(), fc_issue_els()), they 10905 * can now pass back this count as part of the fc_packet's 10906 * pkt_ulp_rscn_count member. fcp does this currently. 10907 * 6. When transport gets a call to transport a command on the wire, it 10908 * will check to see if there is a valid pkt_ulp_rsvd1 field in the 10909 * fc_packet. If there is, it will match that info with the current 10910 * rscn_count on that instance of the port. If they don't match up 10911 * then there was a newer RSCN. The ULP gets back an error code which 10912 * informs it about it - FC_DEVICE_BUSY_NEW_RSCN. 10913 * 7. At this point the ULP is free to make up its own mind as to how to 10914 * handle this. Currently, fcp will reset its retry counters and keep 10915 * retrying the operation it was doing in anticipation of getting a 10916 * new state change call back for the new RSCN. 10917 */ 10918 job = fctl_alloc_job(JOB_UNSOL_REQUEST, 0, NULL, 10919 (opaque_t)(uintptr_t)cb_arg, KM_NOSLEEP); 10920 if (job == NULL) { 10921 fp_printf(port, CE_NOTE, FP_LOG_ONLY, 0, NULL, "fp_unsol_cb() " 10922 "couldn't submit a job to the thread, failing.."); 10923 10924 mutex_enter(&port->fp_mutex); 10925 10926 if (--port->fp_rscn_count == FC_INVALID_RSCN_COUNT) { 10927 --port->fp_rscn_count; 10928 } 10929 10930 ASSERT(port->fp_active_ubs > 0); 10931 if (--(port->fp_active_ubs) == 0) { 10932 port->fp_soft_state &= ~FP_SOFT_IN_UNSOL_CB; 10933 } 10934 10935 mutex_exit(&port->fp_mutex); 10936 port->fp_fca_tran->fca_ub_release(port->fp_fca_handle, 10937 1, &buf->ub_token); 10938 10939 return; 10940 } 10941 job->job_private = (void *)buf; 10942 fctl_enque_job(port, job); 10943 } 10944 10945 10946 /* 10947 * Handle unsolicited requests 10948 */ 10949 static void 10950 fp_handle_unsol_buf(fc_local_port_t *port, fc_unsol_buf_t *buf, 10951 job_request_t *job) 10952 { 10953 uchar_t r_ctl; 10954 uchar_t ls_code; 10955 uint32_t s_id; 10956 fp_cmd_t *cmd; 10957 fc_remote_port_t *pd; 10958 fp_unsol_spec_t *ub_spec; 10959 10960 r_ctl = buf->ub_frame.r_ctl; 10961 s_id = buf->ub_frame.s_id; 10962 10963 switch (r_ctl & R_CTL_ROUTING) { 10964 case R_CTL_EXTENDED_SVC: 10965 if (r_ctl != R_CTL_ELS_REQ) { 10966 break; 10967 } 10968 10969 ls_code = buf->ub_buffer[0]; 10970 switch (ls_code) { 10971 case LA_ELS_LOGO: 10972 case LA_ELS_ADISC: 10973 case LA_ELS_PRLO: 10974 pd = fctl_get_remote_port_by_did(port, s_id); 10975 if (pd == NULL) { 10976 if (!FC_IS_REAL_DEVICE(s_id)) { 10977 break; 10978 } 10979 if (!FP_IS_CLASS_1_OR_2(buf->ub_class)) { 10980 break; 10981 } 10982 if ((cmd = fp_alloc_pkt(port, 10983 sizeof (la_els_rjt_t), 0, KM_SLEEP, 10984 NULL)) == NULL) { 10985 /* 10986 * Can this actually fail when 10987 * given KM_SLEEP? (Could be used 10988 * this way in a number of places.) 10989 */ 10990 break; 10991 } 10992 10993 fp_els_rjt_init(port, cmd, buf, 10994 FC_ACTION_NON_RETRYABLE, 10995 FC_REASON_INVALID_LINK_CTRL, job); 10996 10997 if (fp_sendcmd(port, cmd, 10998 port->fp_fca_handle) != FC_SUCCESS) { 10999 fp_free_pkt(cmd); 11000 } 11001 11002 break; 11003 } 11004 if (ls_code == LA_ELS_LOGO) { 11005 fp_handle_unsol_logo(port, buf, pd, job); 11006 } else if (ls_code == LA_ELS_ADISC) { 11007 fp_handle_unsol_adisc(port, buf, pd, job); 11008 } else { 11009 fp_handle_unsol_prlo(port, buf, pd, job); 11010 } 11011 break; 11012 11013 case LA_ELS_PLOGI: 11014 fp_handle_unsol_plogi(port, buf, job, KM_SLEEP); 11015 break; 11016 11017 case LA_ELS_FLOGI: 11018 fp_handle_unsol_flogi(port, buf, job, KM_SLEEP); 11019 break; 11020 11021 case LA_ELS_RSCN: 11022 fp_handle_unsol_rscn(port, buf, job, KM_SLEEP); 11023 break; 11024 11025 default: 11026 ub_spec = kmem_zalloc(sizeof (*ub_spec), KM_SLEEP); 11027 ub_spec->port = port; 11028 ub_spec->buf = buf; 11029 11030 (void) taskq_dispatch(port->fp_taskq, 11031 fp_ulp_unsol_cb, ub_spec, KM_SLEEP); 11032 return; 11033 } 11034 break; 11035 11036 case R_CTL_BASIC_SVC: 11037 /* 11038 * The unsolicited basic link services could be ABTS 11039 * and RMC (Or even a NOP). Just BA_RJT them until 11040 * such time there arises a need to handle them more 11041 * carefully. 11042 */ 11043 if (FP_IS_CLASS_1_OR_2(buf->ub_class)) { 11044 cmd = fp_alloc_pkt(port, sizeof (la_ba_rjt_t), 11045 0, KM_SLEEP, NULL); 11046 if (cmd != NULL) { 11047 fp_ba_rjt_init(port, cmd, buf, job); 11048 if (fp_sendcmd(port, cmd, 11049 port->fp_fca_handle) != FC_SUCCESS) { 11050 fp_free_pkt(cmd); 11051 } 11052 } 11053 } 11054 break; 11055 11056 case R_CTL_DEVICE_DATA: 11057 if (buf->ub_frame.type == FC_TYPE_FC_SERVICES) { 11058 /* 11059 * Mostly this is of type FC_TYPE_FC_SERVICES. 11060 * As we don't like any Unsolicited FC services 11061 * requests, we would do well to RJT them as 11062 * well. 11063 */ 11064 if (FP_IS_CLASS_1_OR_2(buf->ub_class)) { 11065 cmd = fp_alloc_pkt(port, sizeof (la_els_rjt_t), 11066 0, KM_SLEEP, NULL); 11067 if (cmd != NULL) { 11068 fp_els_rjt_init(port, cmd, buf, 11069 FC_ACTION_NON_RETRYABLE, 11070 FC_REASON_INVALID_LINK_CTRL, job); 11071 11072 if (fp_sendcmd(port, cmd, 11073 port->fp_fca_handle) != 11074 FC_SUCCESS) { 11075 fp_free_pkt(cmd); 11076 } 11077 } 11078 } 11079 break; 11080 } 11081 /* FALLTHROUGH */ 11082 11083 case R_CTL_FC4_SVC: 11084 ub_spec = kmem_zalloc(sizeof (*ub_spec), KM_SLEEP); 11085 ub_spec->port = port; 11086 ub_spec->buf = buf; 11087 11088 (void) taskq_dispatch(port->fp_taskq, 11089 fp_ulp_unsol_cb, ub_spec, KM_SLEEP); 11090 return; 11091 11092 case R_CTL_LINK_CTL: 11093 /* 11094 * Turn deaf ear on unsolicited link control frames. 11095 * Typical unsolicited link control Frame is an LCR 11096 * (to reset End to End credit to the default login 11097 * value and abort current sequences for all classes) 11098 * An intelligent microcode/firmware should handle 11099 * this transparently at its level and not pass all 11100 * the way up here. 11101 * 11102 * Possible responses to LCR are R_RDY, F_RJT, P_RJT 11103 * or F_BSY. P_RJT is chosen to be the most appropriate 11104 * at this time. 11105 */ 11106 /* FALLTHROUGH */ 11107 11108 default: 11109 /* 11110 * Just reject everything else as an invalid request. 11111 */ 11112 if (FP_IS_CLASS_1_OR_2(buf->ub_class)) { 11113 cmd = fp_alloc_pkt(port, sizeof (la_els_rjt_t), 11114 0, KM_SLEEP, NULL); 11115 if (cmd != NULL) { 11116 fp_els_rjt_init(port, cmd, buf, 11117 FC_ACTION_NON_RETRYABLE, 11118 FC_REASON_INVALID_LINK_CTRL, job); 11119 11120 if (fp_sendcmd(port, cmd, 11121 port->fp_fca_handle) != FC_SUCCESS) { 11122 fp_free_pkt(cmd); 11123 } 11124 } 11125 } 11126 break; 11127 } 11128 11129 mutex_enter(&port->fp_mutex); 11130 ASSERT(port->fp_active_ubs > 0); 11131 if (--(port->fp_active_ubs) == 0) { 11132 port->fp_soft_state &= ~FP_SOFT_IN_UNSOL_CB; 11133 } 11134 mutex_exit(&port->fp_mutex); 11135 port->fp_fca_tran->fca_ub_release(port->fp_fca_handle, 11136 1, &buf->ub_token); 11137 } 11138 11139 11140 /* 11141 * Prepare a BA_RJT and send it over. 11142 */ 11143 static void 11144 fp_ba_rjt_init(fc_local_port_t *port, fp_cmd_t *cmd, fc_unsol_buf_t *buf, 11145 job_request_t *job) 11146 { 11147 fc_packet_t *pkt; 11148 la_ba_rjt_t payload; 11149 11150 ASSERT(!MUTEX_HELD(&port->fp_mutex)); 11151 11152 cmd->cmd_pkt.pkt_tran_flags = buf->ub_class; 11153 cmd->cmd_pkt.pkt_tran_type = FC_PKT_OUTBOUND; 11154 cmd->cmd_flags = FP_CMD_CFLAG_UNDEFINED; 11155 cmd->cmd_retry_count = 1; 11156 cmd->cmd_ulp_pkt = NULL; 11157 11158 cmd->cmd_transport = port->fp_fca_tran->fca_els_send; 11159 cmd->cmd_job = job; 11160 11161 pkt = &cmd->cmd_pkt; 11162 11163 fp_unsol_resp_init(pkt, buf, R_CTL_LS_BA_RJT, FC_TYPE_BASIC_LS); 11164 11165 payload.reserved = 0; 11166 payload.reason_code = FC_REASON_CMD_UNSUPPORTED; 11167 payload.explanation = FC_EXPLN_NONE; 11168 payload.vendor = 0; 11169 11170 FC_SET_CMD(port, pkt->pkt_cmd_acc, (uint8_t *)&payload, 11171 (uint8_t *)pkt->pkt_cmd, sizeof (payload), DDI_DEV_AUTOINCR); 11172 } 11173 11174 11175 /* 11176 * Prepare an LS_RJT and send it over 11177 */ 11178 static void 11179 fp_els_rjt_init(fc_local_port_t *port, fp_cmd_t *cmd, fc_unsol_buf_t *buf, 11180 uchar_t action, uchar_t reason, job_request_t *job) 11181 { 11182 fc_packet_t *pkt; 11183 la_els_rjt_t payload; 11184 11185 ASSERT(!MUTEX_HELD(&port->fp_mutex)); 11186 11187 cmd->cmd_pkt.pkt_tran_flags = buf->ub_class; 11188 cmd->cmd_pkt.pkt_tran_type = FC_PKT_OUTBOUND; 11189 cmd->cmd_flags = FP_CMD_CFLAG_UNDEFINED; 11190 cmd->cmd_retry_count = 1; 11191 cmd->cmd_ulp_pkt = NULL; 11192 11193 cmd->cmd_transport = port->fp_fca_tran->fca_els_send; 11194 cmd->cmd_job = job; 11195 11196 pkt = &cmd->cmd_pkt; 11197 11198 fp_unsol_resp_init(pkt, buf, R_CTL_ELS_RSP, FC_TYPE_EXTENDED_LS); 11199 11200 payload.ls_code.ls_code = LA_ELS_RJT; 11201 payload.ls_code.mbz = 0; 11202 payload.action = action; 11203 payload.reason = reason; 11204 payload.reserved = 0; 11205 payload.vu = 0; 11206 11207 FC_SET_CMD(port, pkt->pkt_cmd_acc, (uint8_t *)&payload, 11208 (uint8_t *)pkt->pkt_cmd, sizeof (payload), DDI_DEV_AUTOINCR); 11209 } 11210 11211 /* 11212 * Function: fp_prlo_acc_init 11213 * 11214 * Description: Initializes an Link Service Accept for a PRLO. 11215 * 11216 * Arguments: *port Local port through which the PRLO was 11217 * received. 11218 * cmd Command that will carry the accept. 11219 * *buf Unsolicited buffer containing the PRLO 11220 * request. 11221 * job Job request. 11222 * sleep Allocation mode. 11223 * 11224 * Return Value: *cmd Command containing the response. 11225 * 11226 * Context: Depends on the parameter sleep. 11227 */ 11228 fp_cmd_t * 11229 fp_prlo_acc_init(fc_local_port_t *port, fc_remote_port_t *pd, 11230 fc_unsol_buf_t *buf, job_request_t *job, int sleep) 11231 { 11232 fp_cmd_t *cmd; 11233 fc_packet_t *pkt; 11234 la_els_prlo_t *req; 11235 size_t len; 11236 uint16_t flags; 11237 11238 req = (la_els_prlo_t *)buf->ub_buffer; 11239 len = (size_t)ntohs(req->payload_length); 11240 11241 /* 11242 * The payload of the accept to a PRLO has to be the exact match of 11243 * the payload of the request (at the exception of the code). 11244 */ 11245 cmd = fp_alloc_pkt(port, (int)len, 0, sleep, pd); 11246 11247 if (cmd) { 11248 /* 11249 * The fp command was successfully allocated. 11250 */ 11251 cmd->cmd_pkt.pkt_tran_flags = buf->ub_class; 11252 cmd->cmd_pkt.pkt_tran_type = FC_PKT_OUTBOUND; 11253 cmd->cmd_flags = FP_CMD_CFLAG_UNDEFINED; 11254 cmd->cmd_retry_count = 1; 11255 cmd->cmd_ulp_pkt = NULL; 11256 11257 cmd->cmd_transport = port->fp_fca_tran->fca_els_send; 11258 cmd->cmd_job = job; 11259 11260 pkt = &cmd->cmd_pkt; 11261 11262 fp_unsol_resp_init(pkt, buf, R_CTL_ELS_RSP, 11263 FC_TYPE_EXTENDED_LS); 11264 11265 /* The code is overwritten for the copy. */ 11266 req->ls_code = LA_ELS_ACC; 11267 /* Response code is set. */ 11268 flags = ntohs(req->flags); 11269 flags &= ~SP_RESP_CODE_MASK; 11270 flags |= SP_RESP_CODE_REQ_EXECUTED; 11271 req->flags = htons(flags); 11272 11273 FC_SET_CMD(port, pkt->pkt_cmd_acc, (uint8_t *)req, 11274 (uint8_t *)pkt->pkt_cmd, len, DDI_DEV_AUTOINCR); 11275 } 11276 return (cmd); 11277 } 11278 11279 /* 11280 * Prepare an ACC response to an ELS request 11281 */ 11282 static void 11283 fp_els_acc_init(fc_local_port_t *port, fp_cmd_t *cmd, fc_unsol_buf_t *buf, 11284 job_request_t *job) 11285 { 11286 fc_packet_t *pkt; 11287 ls_code_t payload; 11288 11289 cmd->cmd_pkt.pkt_tran_flags = buf->ub_class; 11290 cmd->cmd_pkt.pkt_tran_type = FC_PKT_OUTBOUND; 11291 cmd->cmd_flags = FP_CMD_CFLAG_UNDEFINED; 11292 cmd->cmd_retry_count = 1; 11293 cmd->cmd_ulp_pkt = NULL; 11294 11295 cmd->cmd_transport = port->fp_fca_tran->fca_els_send; 11296 cmd->cmd_job = job; 11297 11298 pkt = &cmd->cmd_pkt; 11299 11300 fp_unsol_resp_init(pkt, buf, R_CTL_ELS_RSP, FC_TYPE_EXTENDED_LS); 11301 11302 payload.ls_code = LA_ELS_ACC; 11303 payload.mbz = 0; 11304 11305 FC_SET_CMD(port, pkt->pkt_cmd_acc, (uint8_t *)&payload, 11306 (uint8_t *)pkt->pkt_cmd, sizeof (payload), DDI_DEV_AUTOINCR); 11307 } 11308 11309 /* 11310 * Unsolicited PRLO handler 11311 * 11312 * A Process Logout should be handled by the ULP that established it. However, 11313 * some devices send a PRLO to trigger a PLOGI followed by a PRLI. This happens 11314 * when a device implicitly logs out an initiator (for whatever reason) and 11315 * tries to get that initiator to restablish the connection (PLOGI and PRLI). 11316 * The logical thing to do for the device would be to send a LOGO in response 11317 * to any FC4 frame sent by the initiator. Some devices choose, however, to send 11318 * a PRLO instead. 11319 * 11320 * From a Fibre Channel standpoint a PRLO calls for a PRLI. There's no reason to 11321 * think that the Port Login has been lost. If we follow the Fibre Channel 11322 * protocol to the letter a PRLI should be sent after accepting the PRLO. If 11323 * the Port Login has also been lost, the remote port will reject the PRLI 11324 * indicating that we must PLOGI first. The initiator will then turn around and 11325 * send a PLOGI. The way Leadville is layered and the way the ULP interface 11326 * is defined doesn't allow this scenario to be followed easily. If FCP were to 11327 * handle the PRLO and attempt the PRLI, the reject indicating that a PLOGI is 11328 * needed would be received by FCP. FCP would have, then, to tell the transport 11329 * (fp) to PLOGI. The problem is, the transport would still think the Port 11330 * Login is valid and there is no way for FCP to tell the transport: "PLOGI even 11331 * if you think it's not necessary". To work around that difficulty, the PRLO 11332 * is treated by the transport as a LOGO. The downside to it is a Port Login 11333 * may be disrupted (if a PLOGI wasn't actually needed) and another ULP (that 11334 * has nothing to do with the PRLO) may be impacted. However, this is a 11335 * scenario very unlikely to happen. As of today the only ULP in Leadville 11336 * using PRLI/PRLOs is FCP. For a PRLO to disrupt another ULP (that would be 11337 * FCIP), a SCSI target would have to be running FCP and FCIP (which is very 11338 * unlikely). 11339 */ 11340 static void 11341 fp_handle_unsol_prlo(fc_local_port_t *port, fc_unsol_buf_t *buf, 11342 fc_remote_port_t *pd, job_request_t *job) 11343 { 11344 int busy; 11345 int rval; 11346 int retain; 11347 fp_cmd_t *cmd; 11348 fc_portmap_t *listptr; 11349 boolean_t tolerance; 11350 la_els_prlo_t *req; 11351 11352 req = (la_els_prlo_t *)buf->ub_buffer; 11353 11354 if ((ntohs(req->payload_length) != 11355 (sizeof (service_parameter_page_t) + sizeof (ls_code_t))) || 11356 (req->page_length != sizeof (service_parameter_page_t))) { 11357 /* 11358 * We are being very restrictive. Only on page per 11359 * payload. If it is not the case we reject the ELS although 11360 * we should reply indicating we handle only single page 11361 * per PRLO. 11362 */ 11363 goto fp_reject_prlo; 11364 } 11365 11366 if (ntohs(req->payload_length) > buf->ub_bufsize) { 11367 /* 11368 * This is in case the payload advertizes a size bigger than 11369 * what it really is. 11370 */ 11371 goto fp_reject_prlo; 11372 } 11373 11374 mutex_enter(&port->fp_mutex); 11375 busy = port->fp_statec_busy; 11376 mutex_exit(&port->fp_mutex); 11377 11378 mutex_enter(&pd->pd_mutex); 11379 tolerance = fctl_tc_increment(&pd->pd_logo_tc); 11380 if (!busy) { 11381 if (pd->pd_state != PORT_DEVICE_LOGGED_IN || 11382 pd->pd_state == PORT_DEVICE_INVALID || 11383 pd->pd_flags == PD_ELS_IN_PROGRESS || 11384 pd->pd_type == PORT_DEVICE_OLD) { 11385 busy++; 11386 } 11387 } 11388 11389 if (busy) { 11390 mutex_exit(&pd->pd_mutex); 11391 11392 FP_TRACE(FP_NHEAD1(5, 0), "Logout; D_ID=%x," 11393 "pd=%p - busy", 11394 pd->pd_port_id.port_id, pd); 11395 11396 if (FP_IS_CLASS_1_OR_2(buf->ub_class)) { 11397 goto fp_reject_prlo; 11398 } 11399 } else { 11400 retain = (pd->pd_recepient == PD_PLOGI_INITIATOR) ? 1 : 0; 11401 11402 if (tolerance) { 11403 fctl_tc_reset(&pd->pd_logo_tc); 11404 retain = 0; 11405 pd->pd_state = PORT_DEVICE_INVALID; 11406 } 11407 11408 FP_TRACE(FP_NHEAD1(5, 0), "Accepting LOGO; d_id=%x, pd=%p," 11409 " tolerance=%d retain=%d", pd->pd_port_id.port_id, pd, 11410 tolerance, retain); 11411 11412 pd->pd_aux_flags |= PD_LOGGED_OUT; 11413 mutex_exit(&pd->pd_mutex); 11414 11415 cmd = fp_prlo_acc_init(port, pd, buf, job, KM_SLEEP); 11416 if (cmd == NULL) { 11417 return; 11418 } 11419 11420 rval = fp_sendcmd(port, cmd, port->fp_fca_handle); 11421 if (rval != FC_SUCCESS) { 11422 fp_free_pkt(cmd); 11423 return; 11424 } 11425 11426 listptr = kmem_zalloc(sizeof (fc_portmap_t), KM_SLEEP); 11427 11428 if (retain) { 11429 fp_unregister_login(pd); 11430 fctl_copy_portmap(listptr, pd); 11431 } else { 11432 uint32_t d_id; 11433 char ww_name[17]; 11434 11435 mutex_enter(&pd->pd_mutex); 11436 d_id = pd->pd_port_id.port_id; 11437 fc_wwn_to_str(&pd->pd_port_name, ww_name); 11438 mutex_exit(&pd->pd_mutex); 11439 11440 FP_TRACE(FP_NHEAD2(9, 0), 11441 "N_x Port with D_ID=%x, PWWN=%s logged out" 11442 " %d times in %d us; Giving up", d_id, ww_name, 11443 FC_LOGO_TOLERANCE_LIMIT, 11444 FC_LOGO_TOLERANCE_TIME_LIMIT); 11445 11446 fp_fillout_old_map(listptr, pd, 0); 11447 listptr->map_type = PORT_DEVICE_OLD; 11448 } 11449 11450 (void) fp_ulp_devc_cb(port, listptr, 1, 1, KM_SLEEP, 0); 11451 return; 11452 } 11453 11454 fp_reject_prlo: 11455 11456 cmd = fp_alloc_pkt(port, sizeof (la_els_rjt_t), 0, KM_SLEEP, pd); 11457 if (cmd != NULL) { 11458 fp_els_rjt_init(port, cmd, buf, FC_ACTION_NON_RETRYABLE, 11459 FC_REASON_INVALID_LINK_CTRL, job); 11460 11461 if (fp_sendcmd(port, cmd, port->fp_fca_handle) != FC_SUCCESS) { 11462 fp_free_pkt(cmd); 11463 } 11464 } 11465 } 11466 11467 /* 11468 * Unsolicited LOGO handler 11469 */ 11470 static void 11471 fp_handle_unsol_logo(fc_local_port_t *port, fc_unsol_buf_t *buf, 11472 fc_remote_port_t *pd, job_request_t *job) 11473 { 11474 int busy; 11475 int rval; 11476 int retain; 11477 fp_cmd_t *cmd; 11478 fc_portmap_t *listptr; 11479 boolean_t tolerance; 11480 11481 mutex_enter(&port->fp_mutex); 11482 busy = port->fp_statec_busy; 11483 mutex_exit(&port->fp_mutex); 11484 11485 mutex_enter(&pd->pd_mutex); 11486 tolerance = fctl_tc_increment(&pd->pd_logo_tc); 11487 if (!busy) { 11488 if (pd->pd_state != PORT_DEVICE_LOGGED_IN || 11489 pd->pd_state == PORT_DEVICE_INVALID || 11490 pd->pd_flags == PD_ELS_IN_PROGRESS || 11491 pd->pd_type == PORT_DEVICE_OLD) { 11492 busy++; 11493 } 11494 } 11495 11496 if (busy) { 11497 mutex_exit(&pd->pd_mutex); 11498 11499 FP_TRACE(FP_NHEAD1(5, 0), "Logout; D_ID=%x," 11500 "pd=%p - busy", 11501 pd->pd_port_id.port_id, pd); 11502 11503 if (FP_IS_CLASS_1_OR_2(buf->ub_class)) { 11504 cmd = fp_alloc_pkt(port, sizeof (la_els_rjt_t), 11505 0, KM_SLEEP, pd); 11506 if (cmd != NULL) { 11507 fp_els_rjt_init(port, cmd, buf, 11508 FC_ACTION_NON_RETRYABLE, 11509 FC_REASON_INVALID_LINK_CTRL, job); 11510 11511 if (fp_sendcmd(port, cmd, 11512 port->fp_fca_handle) != FC_SUCCESS) { 11513 fp_free_pkt(cmd); 11514 } 11515 } 11516 } 11517 } else { 11518 retain = (pd->pd_recepient == PD_PLOGI_INITIATOR) ? 1 : 0; 11519 11520 if (tolerance) { 11521 fctl_tc_reset(&pd->pd_logo_tc); 11522 retain = 0; 11523 pd->pd_state = PORT_DEVICE_INVALID; 11524 } 11525 11526 FP_TRACE(FP_NHEAD1(5, 0), "Accepting LOGO; d_id=%x, pd=%p," 11527 " tolerance=%d retain=%d", pd->pd_port_id.port_id, pd, 11528 tolerance, retain); 11529 11530 pd->pd_aux_flags |= PD_LOGGED_OUT; 11531 mutex_exit(&pd->pd_mutex); 11532 11533 cmd = fp_alloc_pkt(port, FP_PORT_IDENTIFIER_LEN, 0, 11534 KM_SLEEP, pd); 11535 if (cmd == NULL) { 11536 return; 11537 } 11538 11539 fp_els_acc_init(port, cmd, buf, job); 11540 11541 rval = fp_sendcmd(port, cmd, port->fp_fca_handle); 11542 if (rval != FC_SUCCESS) { 11543 fp_free_pkt(cmd); 11544 return; 11545 } 11546 11547 listptr = kmem_zalloc(sizeof (fc_portmap_t), KM_SLEEP); 11548 11549 if (retain) { 11550 job_request_t *job; 11551 fctl_ns_req_t *ns_cmd; 11552 11553 /* 11554 * when get LOGO, first try to get PID from nameserver 11555 * if failed, then we do not need 11556 * send PLOGI to that remote port 11557 */ 11558 job = fctl_alloc_job( 11559 JOB_NS_CMD, 0, NULL, (opaque_t)port, KM_SLEEP); 11560 11561 if (job != NULL) { 11562 ns_cmd = fctl_alloc_ns_cmd( 11563 sizeof (ns_req_gid_pn_t), 11564 sizeof (ns_resp_gid_pn_t), 11565 sizeof (ns_resp_gid_pn_t), 11566 0, KM_SLEEP); 11567 if (ns_cmd != NULL) { 11568 int ret; 11569 job->job_result = FC_SUCCESS; 11570 ns_cmd->ns_cmd_code = NS_GID_PN; 11571 ((ns_req_gid_pn_t *) 11572 (ns_cmd->ns_cmd_buf))->pwwn = 11573 pd->pd_port_name; 11574 ret = fp_ns_query( 11575 port, ns_cmd, job, 1, KM_SLEEP); 11576 if ((ret != FC_SUCCESS) || 11577 (job->job_result != FC_SUCCESS)) { 11578 fctl_free_ns_cmd(ns_cmd); 11579 fctl_dealloc_job(job); 11580 FP_TRACE(FP_NHEAD2(9, 0), 11581 "NS query failed,", 11582 " delete pd"); 11583 goto delete_pd; 11584 } 11585 fctl_free_ns_cmd(ns_cmd); 11586 } 11587 fctl_dealloc_job(job); 11588 } 11589 fp_unregister_login(pd); 11590 fctl_copy_portmap(listptr, pd); 11591 } else { 11592 uint32_t d_id; 11593 char ww_name[17]; 11594 11595 delete_pd: 11596 mutex_enter(&pd->pd_mutex); 11597 d_id = pd->pd_port_id.port_id; 11598 fc_wwn_to_str(&pd->pd_port_name, ww_name); 11599 mutex_exit(&pd->pd_mutex); 11600 11601 FP_TRACE(FP_NHEAD2(9, 0), 11602 "N_x Port with D_ID=%x, PWWN=%s logged out" 11603 " %d times in %d us; Giving up", d_id, ww_name, 11604 FC_LOGO_TOLERANCE_LIMIT, 11605 FC_LOGO_TOLERANCE_TIME_LIMIT); 11606 11607 fp_fillout_old_map(listptr, pd, 0); 11608 listptr->map_type = PORT_DEVICE_OLD; 11609 } 11610 11611 (void) fp_ulp_devc_cb(port, listptr, 1, 1, KM_SLEEP, 0); 11612 } 11613 } 11614 11615 11616 /* 11617 * Perform general purpose preparation of a response to an unsolicited request 11618 */ 11619 static void 11620 fp_unsol_resp_init(fc_packet_t *pkt, fc_unsol_buf_t *buf, 11621 uchar_t r_ctl, uchar_t type) 11622 { 11623 pkt->pkt_cmd_fhdr.r_ctl = r_ctl; 11624 pkt->pkt_cmd_fhdr.d_id = buf->ub_frame.s_id; 11625 pkt->pkt_cmd_fhdr.s_id = buf->ub_frame.d_id; 11626 pkt->pkt_cmd_fhdr.type = type; 11627 pkt->pkt_cmd_fhdr.f_ctl = F_CTL_LAST_SEQ | F_CTL_XCHG_CONTEXT; 11628 pkt->pkt_cmd_fhdr.seq_id = buf->ub_frame.seq_id; 11629 pkt->pkt_cmd_fhdr.df_ctl = buf->ub_frame.df_ctl; 11630 pkt->pkt_cmd_fhdr.seq_cnt = buf->ub_frame.seq_cnt; 11631 pkt->pkt_cmd_fhdr.ox_id = buf->ub_frame.ox_id; 11632 pkt->pkt_cmd_fhdr.rx_id = buf->ub_frame.rx_id; 11633 pkt->pkt_cmd_fhdr.ro = 0; 11634 pkt->pkt_cmd_fhdr.rsvd = 0; 11635 pkt->pkt_comp = fp_unsol_intr; 11636 pkt->pkt_timeout = FP_ELS_TIMEOUT; 11637 pkt->pkt_ub_resp_token = (opaque_t)buf; 11638 } 11639 11640 /* 11641 * Immediate handling of unsolicited FLOGI and PLOGI requests. In the 11642 * early development days of public loop soc+ firmware, numerous problems 11643 * were encountered (the details are undocumented and history now) which 11644 * led to the birth of this function. 11645 * 11646 * If a pre-allocated unsolicited response packet is free, send out an 11647 * immediate response, otherwise submit the request to the port thread 11648 * to do the deferred processing. 11649 */ 11650 static void 11651 fp_i_handle_unsol_els(fc_local_port_t *port, fc_unsol_buf_t *buf) 11652 { 11653 int sent; 11654 int f_port; 11655 int do_acc; 11656 fp_cmd_t *cmd; 11657 la_els_logi_t *payload; 11658 fc_remote_port_t *pd; 11659 char dww_name[17]; 11660 11661 ASSERT(!MUTEX_HELD(&port->fp_mutex)); 11662 11663 cmd = port->fp_els_resp_pkt; 11664 11665 mutex_enter(&port->fp_mutex); 11666 do_acc = (port->fp_statec_busy == 0) ? 1 : 0; 11667 mutex_exit(&port->fp_mutex); 11668 11669 switch (buf->ub_buffer[0]) { 11670 case LA_ELS_PLOGI: { 11671 int small; 11672 11673 payload = (la_els_logi_t *)buf->ub_buffer; 11674 11675 f_port = FP_IS_F_PORT(payload-> 11676 common_service.cmn_features) ? 1 : 0; 11677 11678 small = fctl_wwn_cmp(&port->fp_service_params.nport_ww_name, 11679 &payload->nport_ww_name); 11680 pd = fctl_get_remote_port_by_pwwn(port, 11681 &payload->nport_ww_name); 11682 if (pd) { 11683 mutex_enter(&pd->pd_mutex); 11684 sent = (pd->pd_flags == PD_ELS_IN_PROGRESS) ? 1 : 0; 11685 /* 11686 * Most likely this means a cross login is in 11687 * progress or a device about to be yanked out. 11688 * Only accept the plogi if my wwn is smaller. 11689 */ 11690 if (pd->pd_type == PORT_DEVICE_OLD) { 11691 sent = 1; 11692 } 11693 /* 11694 * Stop plogi request (if any) 11695 * attempt from local side to speedup 11696 * the discovery progress. 11697 * Mark the pd as PD_PLOGI_RECEPIENT. 11698 */ 11699 if (f_port == 0 && small < 0) { 11700 pd->pd_recepient = PD_PLOGI_RECEPIENT; 11701 } 11702 fc_wwn_to_str(&pd->pd_port_name, dww_name); 11703 11704 mutex_exit(&pd->pd_mutex); 11705 11706 FP_TRACE(FP_NHEAD1(3, 0), "fp_i_handle_unsol_els: " 11707 "Unsol PLOGI received. PD still exists in the " 11708 "PWWN list. pd=%p PWWN=%s, sent=%x", 11709 pd, dww_name, sent); 11710 11711 if (f_port == 0 && small < 0) { 11712 FP_TRACE(FP_NHEAD1(3, 0), 11713 "fp_i_handle_unsol_els: Mark the pd" 11714 " as plogi recipient, pd=%p, PWWN=%s" 11715 ", sent=%x", 11716 pd, dww_name, sent); 11717 } 11718 } else { 11719 sent = 0; 11720 } 11721 11722 /* 11723 * To avoid Login collisions, accept only if my WWN 11724 * is smaller than the requester (A curious side note 11725 * would be that this rule may not satisfy the PLOGIs 11726 * initiated by the switch from not-so-well known 11727 * ports such as 0xFFFC41) 11728 */ 11729 if ((f_port == 0 && small < 0) || 11730 (((small > 0 && do_acc) || 11731 FC_MUST_ACCEPT_D_ID(buf->ub_frame.s_id)) && sent == 0)) { 11732 if (fp_is_class_supported(port->fp_cos, 11733 buf->ub_class) == FC_FAILURE) { 11734 if (FP_IS_CLASS_1_OR_2(buf->ub_class)) { 11735 cmd->cmd_pkt.pkt_cmdlen = 11736 sizeof (la_els_rjt_t); 11737 cmd->cmd_pkt.pkt_rsplen = 0; 11738 fp_els_rjt_init(port, cmd, buf, 11739 FC_ACTION_NON_RETRYABLE, 11740 FC_REASON_CLASS_NOT_SUPP, NULL); 11741 FP_TRACE(FP_NHEAD1(3, 0), 11742 "fp_i_handle_unsol_els: " 11743 "Unsupported class. " 11744 "Rejecting PLOGI"); 11745 11746 } else { 11747 mutex_enter(&port->fp_mutex); 11748 port->fp_els_resp_pkt_busy = 0; 11749 mutex_exit(&port->fp_mutex); 11750 return; 11751 } 11752 } else { 11753 cmd->cmd_pkt.pkt_cmdlen = 11754 sizeof (la_els_logi_t); 11755 cmd->cmd_pkt.pkt_rsplen = 0; 11756 11757 /* 11758 * If fp_port_id is zero and topology is 11759 * Point-to-Point, get the local port id from 11760 * the d_id in the PLOGI request. 11761 * If the outgoing FLOGI hasn't been accepted, 11762 * the topology will be unknown here. But it's 11763 * still safe to save the d_id to fp_port_id, 11764 * just because it will be overwritten later 11765 * if the topology is not Point-to-Point. 11766 */ 11767 mutex_enter(&port->fp_mutex); 11768 if ((port->fp_port_id.port_id == 0) && 11769 (port->fp_topology == FC_TOP_PT_PT || 11770 port->fp_topology == FC_TOP_UNKNOWN)) { 11771 port->fp_port_id.port_id = 11772 buf->ub_frame.d_id; 11773 } 11774 mutex_exit(&port->fp_mutex); 11775 11776 /* 11777 * Sometime later, we should validate 11778 * the service parameters instead of 11779 * just accepting it. 11780 */ 11781 fp_login_acc_init(port, cmd, buf, NULL, 11782 KM_NOSLEEP); 11783 FP_TRACE(FP_NHEAD1(3, 0), 11784 "fp_i_handle_unsol_els: Accepting PLOGI," 11785 " f_port=%d, small=%d, do_acc=%d," 11786 " sent=%d.", f_port, small, do_acc, 11787 sent); 11788 } 11789 } else { 11790 if (FP_IS_CLASS_1_OR_2(buf->ub_class) || 11791 port->fp_options & FP_SEND_RJT) { 11792 cmd->cmd_pkt.pkt_cmdlen = sizeof (la_els_rjt_t); 11793 cmd->cmd_pkt.pkt_rsplen = 0; 11794 fp_els_rjt_init(port, cmd, buf, 11795 FC_ACTION_NON_RETRYABLE, 11796 FC_REASON_LOGICAL_BSY, NULL); 11797 FP_TRACE(FP_NHEAD1(3, 0), 11798 "fp_i_handle_unsol_els: " 11799 "Rejecting PLOGI with Logical Busy." 11800 "Possible Login collision."); 11801 } else { 11802 mutex_enter(&port->fp_mutex); 11803 port->fp_els_resp_pkt_busy = 0; 11804 mutex_exit(&port->fp_mutex); 11805 return; 11806 } 11807 } 11808 break; 11809 } 11810 11811 case LA_ELS_FLOGI: 11812 if (fp_is_class_supported(port->fp_cos, 11813 buf->ub_class) == FC_FAILURE) { 11814 if (FP_IS_CLASS_1_OR_2(buf->ub_class)) { 11815 cmd->cmd_pkt.pkt_cmdlen = sizeof (la_els_rjt_t); 11816 cmd->cmd_pkt.pkt_rsplen = 0; 11817 fp_els_rjt_init(port, cmd, buf, 11818 FC_ACTION_NON_RETRYABLE, 11819 FC_REASON_CLASS_NOT_SUPP, NULL); 11820 FP_TRACE(FP_NHEAD1(3, 0), 11821 "fp_i_handle_unsol_els: " 11822 "Unsupported Class. Rejecting FLOGI."); 11823 } else { 11824 mutex_enter(&port->fp_mutex); 11825 port->fp_els_resp_pkt_busy = 0; 11826 mutex_exit(&port->fp_mutex); 11827 return; 11828 } 11829 } else { 11830 mutex_enter(&port->fp_mutex); 11831 if (FC_PORT_STATE_MASK(port->fp_state) != 11832 FC_STATE_ONLINE || (port->fp_port_id.port_id && 11833 buf->ub_frame.s_id == port->fp_port_id.port_id)) { 11834 mutex_exit(&port->fp_mutex); 11835 if (FP_IS_CLASS_1_OR_2(buf->ub_class)) { 11836 cmd->cmd_pkt.pkt_cmdlen = 11837 sizeof (la_els_rjt_t); 11838 cmd->cmd_pkt.pkt_rsplen = 0; 11839 fp_els_rjt_init(port, cmd, buf, 11840 FC_ACTION_NON_RETRYABLE, 11841 FC_REASON_INVALID_LINK_CTRL, 11842 NULL); 11843 FP_TRACE(FP_NHEAD1(3, 0), 11844 "fp_i_handle_unsol_els: " 11845 "Invalid Link Ctrl. " 11846 "Rejecting FLOGI."); 11847 } else { 11848 mutex_enter(&port->fp_mutex); 11849 port->fp_els_resp_pkt_busy = 0; 11850 mutex_exit(&port->fp_mutex); 11851 return; 11852 } 11853 } else { 11854 mutex_exit(&port->fp_mutex); 11855 cmd->cmd_pkt.pkt_cmdlen = 11856 sizeof (la_els_logi_t); 11857 cmd->cmd_pkt.pkt_rsplen = 0; 11858 /* 11859 * Let's not aggressively validate the N_Port's 11860 * service parameters until PLOGI. Suffice it 11861 * to give a hint that we are an N_Port and we 11862 * are game to some serious stuff here. 11863 */ 11864 fp_login_acc_init(port, cmd, buf, 11865 NULL, KM_NOSLEEP); 11866 FP_TRACE(FP_NHEAD1(3, 0), 11867 "fp_i_handle_unsol_els: " 11868 "Accepting FLOGI."); 11869 } 11870 } 11871 break; 11872 11873 default: 11874 return; 11875 } 11876 11877 if ((fp_sendcmd(port, cmd, port->fp_fca_handle)) != FC_SUCCESS) { 11878 mutex_enter(&port->fp_mutex); 11879 port->fp_els_resp_pkt_busy = 0; 11880 mutex_exit(&port->fp_mutex); 11881 } 11882 } 11883 11884 11885 /* 11886 * Handle unsolicited PLOGI request 11887 */ 11888 static void 11889 fp_handle_unsol_plogi(fc_local_port_t *port, fc_unsol_buf_t *buf, 11890 job_request_t *job, int sleep) 11891 { 11892 int sent; 11893 int small; 11894 int f_port; 11895 int do_acc; 11896 fp_cmd_t *cmd; 11897 la_wwn_t *swwn; 11898 la_wwn_t *dwwn; 11899 la_els_logi_t *payload; 11900 fc_remote_port_t *pd; 11901 char dww_name[17]; 11902 11903 payload = (la_els_logi_t *)buf->ub_buffer; 11904 f_port = FP_IS_F_PORT(payload->common_service.cmn_features) ? 1 : 0; 11905 11906 mutex_enter(&port->fp_mutex); 11907 do_acc = (port->fp_statec_busy == 0) ? 1 : 0; 11908 mutex_exit(&port->fp_mutex); 11909 11910 FP_TRACE(FP_NHEAD1(3, 0), "fp_handle_unsol_plogi: s_id=%x, d_id=%x," 11911 "type=%x, f_ctl=%x" 11912 " seq_id=%x, ox_id=%x, rx_id=%x" 11913 " ro=%x", buf->ub_frame.s_id, buf->ub_frame.d_id, 11914 buf->ub_frame.type, buf->ub_frame.f_ctl, buf->ub_frame.seq_id, 11915 buf->ub_frame.ox_id, buf->ub_frame.rx_id, buf->ub_frame.ro); 11916 11917 swwn = &port->fp_service_params.nport_ww_name; 11918 dwwn = &payload->nport_ww_name; 11919 small = fctl_wwn_cmp(swwn, dwwn); 11920 pd = fctl_get_remote_port_by_pwwn(port, dwwn); 11921 if (pd) { 11922 mutex_enter(&pd->pd_mutex); 11923 sent = (pd->pd_flags == PD_ELS_IN_PROGRESS) ? 1 : 0; 11924 /* 11925 * Most likely this means a cross login is in 11926 * progress or a device about to be yanked out. 11927 * Only accept the plogi if my wwn is smaller. 11928 */ 11929 11930 if (pd->pd_type == PORT_DEVICE_OLD) { 11931 sent = 1; 11932 } 11933 /* 11934 * Stop plogi request (if any) 11935 * attempt from local side to speedup 11936 * the discovery progress. 11937 * Mark the pd as PD_PLOGI_RECEPIENT. 11938 */ 11939 if (f_port == 0 && small < 0) { 11940 pd->pd_recepient = PD_PLOGI_RECEPIENT; 11941 } 11942 fc_wwn_to_str(&pd->pd_port_name, dww_name); 11943 11944 mutex_exit(&pd->pd_mutex); 11945 11946 FP_TRACE(FP_NHEAD1(3, 0), "fp_handle_unsol_plogi: Unsol PLOGI" 11947 " received. PD still exists in the PWWN list. pd=%p " 11948 "PWWN=%s, sent=%x", pd, dww_name, sent); 11949 11950 if (f_port == 0 && small < 0) { 11951 FP_TRACE(FP_NHEAD1(3, 0), 11952 "fp_handle_unsol_plogi: Mark the pd" 11953 " as plogi recipient, pd=%p, PWWN=%s" 11954 ", sent=%x", 11955 pd, dww_name, sent); 11956 } 11957 } else { 11958 sent = 0; 11959 } 11960 11961 /* 11962 * Avoid Login collisions by accepting only if my WWN is smaller. 11963 * 11964 * A side note: There is no need to start a PLOGI from this end in 11965 * this context if login isn't going to be accepted for the 11966 * above reason as either a LIP (in private loop), RSCN (in 11967 * fabric topology), or an FLOGI (in point to point - Huh ? 11968 * check FC-PH) would normally drive the PLOGI from this end. 11969 * At this point of time there is no need for an inbound PLOGI 11970 * to kick an outbound PLOGI when it is going to be rejected 11971 * for the reason of WWN being smaller. However it isn't hard 11972 * to do that either (when such a need arises, start a timer 11973 * for a duration that extends beyond a normal device discovery 11974 * time and check if an outbound PLOGI did go before that, if 11975 * none fire one) 11976 * 11977 * Unfortunately, as it turned out, during booting, it is possible 11978 * to miss another initiator in the same loop as port driver 11979 * instances are serially attached. While preserving the above 11980 * comments for belly laughs, please kick an outbound PLOGI in 11981 * a non-switch environment (which is a pt pt between N_Ports or 11982 * a private loop) 11983 * 11984 * While preserving the above comments for amusement, send an 11985 * ACC if the PLOGI is going to be rejected for WWN being smaller 11986 * when no discovery is in progress at this end. Turn around 11987 * and make the port device as the PLOGI initiator, so that 11988 * during subsequent link/loop initialization, this end drives 11989 * the PLOGI (In fact both ends do in this particular case, but 11990 * only one wins) 11991 * 11992 * Make sure the PLOGIs initiated by the switch from not-so-well-known 11993 * ports (such as 0xFFFC41) are accepted too. 11994 */ 11995 if ((f_port == 0 && small < 0) || (((small > 0 && do_acc) || 11996 FC_MUST_ACCEPT_D_ID(buf->ub_frame.s_id)) && sent == 0)) { 11997 if (fp_is_class_supported(port->fp_cos, 11998 buf->ub_class) == FC_FAILURE) { 11999 if (FP_IS_CLASS_1_OR_2(buf->ub_class)) { 12000 cmd = fp_alloc_pkt(port, 12001 sizeof (la_els_logi_t), 0, sleep, pd); 12002 if (cmd == NULL) { 12003 return; 12004 } 12005 cmd->cmd_pkt.pkt_cmdlen = sizeof (la_els_rjt_t); 12006 cmd->cmd_pkt.pkt_rsplen = 0; 12007 fp_els_rjt_init(port, cmd, buf, 12008 FC_ACTION_NON_RETRYABLE, 12009 FC_REASON_CLASS_NOT_SUPP, job); 12010 FP_TRACE(FP_NHEAD1(3, 0), 12011 "fp_handle_unsol_plogi: " 12012 "Unsupported class. rejecting PLOGI"); 12013 } 12014 } else { 12015 cmd = fp_alloc_pkt(port, sizeof (la_els_logi_t), 12016 0, sleep, pd); 12017 if (cmd == NULL) { 12018 return; 12019 } 12020 cmd->cmd_pkt.pkt_cmdlen = sizeof (la_els_logi_t); 12021 cmd->cmd_pkt.pkt_rsplen = 0; 12022 12023 /* 12024 * Sometime later, we should validate the service 12025 * parameters instead of just accepting it. 12026 */ 12027 fp_login_acc_init(port, cmd, buf, job, KM_SLEEP); 12028 FP_TRACE(FP_NHEAD1(3, 0), "fp_handle_unsol_plogi: " 12029 "Accepting PLOGI, f_port=%d, small=%d, " 12030 "do_acc=%d, sent=%d.", f_port, small, do_acc, 12031 sent); 12032 12033 /* 12034 * If fp_port_id is zero and topology is 12035 * Point-to-Point, get the local port id from 12036 * the d_id in the PLOGI request. 12037 * If the outgoing FLOGI hasn't been accepted, 12038 * the topology will be unknown here. But it's 12039 * still safe to save the d_id to fp_port_id, 12040 * just because it will be overwritten later 12041 * if the topology is not Point-to-Point. 12042 */ 12043 mutex_enter(&port->fp_mutex); 12044 if ((port->fp_port_id.port_id == 0) && 12045 (port->fp_topology == FC_TOP_PT_PT || 12046 port->fp_topology == FC_TOP_UNKNOWN)) { 12047 port->fp_port_id.port_id = 12048 buf->ub_frame.d_id; 12049 } 12050 mutex_exit(&port->fp_mutex); 12051 } 12052 } else { 12053 if (FP_IS_CLASS_1_OR_2(buf->ub_class) || 12054 port->fp_options & FP_SEND_RJT) { 12055 cmd = fp_alloc_pkt(port, sizeof (la_els_logi_t), 12056 0, sleep, pd); 12057 if (cmd == NULL) { 12058 return; 12059 } 12060 cmd->cmd_pkt.pkt_cmdlen = sizeof (la_els_rjt_t); 12061 cmd->cmd_pkt.pkt_rsplen = 0; 12062 /* 12063 * Send out Logical busy to indicate 12064 * the detection of PLOGI collision 12065 */ 12066 fp_els_rjt_init(port, cmd, buf, 12067 FC_ACTION_NON_RETRYABLE, 12068 FC_REASON_LOGICAL_BSY, job); 12069 12070 fc_wwn_to_str(dwwn, dww_name); 12071 FP_TRACE(FP_NHEAD1(3, 0), "fp_handle_unsol_plogi: " 12072 "Rejecting Unsol PLOGI with Logical Busy." 12073 "possible PLOGI collision. PWWN=%s, sent=%x", 12074 dww_name, sent); 12075 } else { 12076 return; 12077 } 12078 } 12079 12080 if (fp_sendcmd(port, cmd, port->fp_fca_handle) != FC_SUCCESS) { 12081 fp_free_pkt(cmd); 12082 } 12083 } 12084 12085 12086 /* 12087 * Handle mischievous turning over of our own FLOGI requests back to 12088 * us by the SOC+ microcode. In other words, look at the class of such 12089 * bone headed requests, if 1 or 2, bluntly P_RJT them, if 3 drop them 12090 * on the floor 12091 */ 12092 static void 12093 fp_handle_unsol_flogi(fc_local_port_t *port, fc_unsol_buf_t *buf, 12094 job_request_t *job, int sleep) 12095 { 12096 uint32_t state; 12097 uint32_t s_id; 12098 fp_cmd_t *cmd; 12099 12100 if (fp_is_class_supported(port->fp_cos, buf->ub_class) == FC_FAILURE) { 12101 if (FP_IS_CLASS_1_OR_2(buf->ub_class)) { 12102 cmd = fp_alloc_pkt(port, sizeof (la_els_rjt_t), 12103 0, sleep, NULL); 12104 if (cmd == NULL) { 12105 return; 12106 } 12107 fp_els_rjt_init(port, cmd, buf, 12108 FC_ACTION_NON_RETRYABLE, 12109 FC_REASON_CLASS_NOT_SUPP, job); 12110 } else { 12111 return; 12112 } 12113 } else { 12114 12115 FP_TRACE(FP_NHEAD1(3, 0), "fp_handle_unsol_flogi:" 12116 " s_id=%x, d_id=%x, type=%x, f_ctl=%x" 12117 " seq_id=%x, ox_id=%x, rx_id=%x, ro=%x", 12118 buf->ub_frame.s_id, buf->ub_frame.d_id, 12119 buf->ub_frame.type, buf->ub_frame.f_ctl, 12120 buf->ub_frame.seq_id, buf->ub_frame.ox_id, 12121 buf->ub_frame.rx_id, buf->ub_frame.ro); 12122 12123 mutex_enter(&port->fp_mutex); 12124 state = FC_PORT_STATE_MASK(port->fp_state); 12125 s_id = port->fp_port_id.port_id; 12126 mutex_exit(&port->fp_mutex); 12127 12128 if (state != FC_STATE_ONLINE || 12129 (s_id && buf->ub_frame.s_id == s_id)) { 12130 if (FP_IS_CLASS_1_OR_2(buf->ub_class)) { 12131 cmd = fp_alloc_pkt(port, sizeof (la_els_rjt_t), 12132 0, sleep, NULL); 12133 if (cmd == NULL) { 12134 return; 12135 } 12136 fp_els_rjt_init(port, cmd, buf, 12137 FC_ACTION_NON_RETRYABLE, 12138 FC_REASON_INVALID_LINK_CTRL, job); 12139 FP_TRACE(FP_NHEAD1(3, 0), 12140 "fp_handle_unsol_flogi: " 12141 "Rejecting PLOGI. Invalid Link CTRL"); 12142 } else { 12143 return; 12144 } 12145 } else { 12146 cmd = fp_alloc_pkt(port, sizeof (la_els_logi_t), 12147 0, sleep, NULL); 12148 if (cmd == NULL) { 12149 return; 12150 } 12151 /* 12152 * Let's not aggressively validate the N_Port's 12153 * service parameters until PLOGI. Suffice it 12154 * to give a hint that we are an N_Port and we 12155 * are game to some serious stuff here. 12156 */ 12157 fp_login_acc_init(port, cmd, buf, job, KM_SLEEP); 12158 FP_TRACE(FP_NHEAD1(3, 0), "fp_handle_unsol_flogi: " 12159 "Accepting PLOGI"); 12160 } 12161 } 12162 12163 if (fp_sendcmd(port, cmd, port->fp_fca_handle) != FC_SUCCESS) { 12164 fp_free_pkt(cmd); 12165 } 12166 } 12167 12168 12169 /* 12170 * Perform PLOGI accept 12171 */ 12172 static void 12173 fp_login_acc_init(fc_local_port_t *port, fp_cmd_t *cmd, fc_unsol_buf_t *buf, 12174 job_request_t *job, int sleep) 12175 { 12176 fc_packet_t *pkt; 12177 fc_portmap_t *listptr; 12178 la_els_logi_t payload; 12179 12180 ASSERT(buf != NULL); 12181 12182 /* 12183 * If we are sending ACC to PLOGI and we haven't already 12184 * create port and node device handles, let's create them 12185 * here. 12186 */ 12187 if (buf->ub_buffer[0] == LA_ELS_PLOGI && 12188 FC_IS_REAL_DEVICE(buf->ub_frame.s_id)) { 12189 int small; 12190 int do_acc; 12191 fc_remote_port_t *pd; 12192 la_els_logi_t *req; 12193 12194 req = (la_els_logi_t *)buf->ub_buffer; 12195 small = fctl_wwn_cmp(&port->fp_service_params.nport_ww_name, 12196 &req->nport_ww_name); 12197 12198 mutex_enter(&port->fp_mutex); 12199 do_acc = (port->fp_statec_busy == 0) ? 1 : 0; 12200 mutex_exit(&port->fp_mutex); 12201 12202 FP_TRACE(FP_NHEAD1(3, 0), "fp_plogi_acc_init fp %x, pd %x", 12203 port->fp_port_id.port_id, buf->ub_frame.s_id); 12204 pd = fctl_create_remote_port(port, &req->node_ww_name, 12205 &req->nport_ww_name, buf->ub_frame.s_id, 12206 PD_PLOGI_RECEPIENT, sleep); 12207 if (pd == NULL) { 12208 FP_TRACE(FP_NHEAD1(3, 0), "login_acc_init: " 12209 "Couldn't create port device for d_id:0x%x", 12210 buf->ub_frame.s_id); 12211 12212 fp_printf(port, CE_NOTE, FP_LOG_ONLY, 0, NULL, 12213 "couldn't create port device d_id=%x", 12214 buf->ub_frame.s_id); 12215 } else { 12216 /* 12217 * usoc currently returns PLOGIs inline and 12218 * the maximum buffer size is 60 bytes or so. 12219 * So attempt not to look beyond what is in 12220 * the unsolicited buffer 12221 * 12222 * JNI also traverses this path sometimes 12223 */ 12224 if (buf->ub_bufsize >= sizeof (la_els_logi_t)) { 12225 fp_register_login(NULL, pd, req, buf->ub_class); 12226 } else { 12227 mutex_enter(&pd->pd_mutex); 12228 if (pd->pd_login_count == 0) { 12229 pd->pd_login_count++; 12230 } 12231 pd->pd_state = PORT_DEVICE_LOGGED_IN; 12232 pd->pd_login_class = buf->ub_class; 12233 mutex_exit(&pd->pd_mutex); 12234 } 12235 12236 listptr = kmem_zalloc(sizeof (fc_portmap_t), sleep); 12237 if (listptr != NULL) { 12238 fctl_copy_portmap(listptr, pd); 12239 (void) fp_ulp_devc_cb(port, listptr, 12240 1, 1, sleep, 0); 12241 } 12242 12243 if (small > 0 && do_acc) { 12244 mutex_enter(&pd->pd_mutex); 12245 pd->pd_recepient = PD_PLOGI_INITIATOR; 12246 mutex_exit(&pd->pd_mutex); 12247 } 12248 } 12249 } 12250 12251 cmd->cmd_pkt.pkt_tran_flags = buf->ub_class; 12252 cmd->cmd_pkt.pkt_tran_type = FC_PKT_OUTBOUND; 12253 cmd->cmd_flags = FP_CMD_CFLAG_UNDEFINED; 12254 cmd->cmd_retry_count = 1; 12255 cmd->cmd_ulp_pkt = NULL; 12256 12257 cmd->cmd_transport = port->fp_fca_tran->fca_els_send; 12258 cmd->cmd_job = job; 12259 12260 pkt = &cmd->cmd_pkt; 12261 12262 fp_unsol_resp_init(pkt, buf, R_CTL_ELS_RSP, FC_TYPE_EXTENDED_LS); 12263 12264 payload = port->fp_service_params; 12265 payload.ls_code.ls_code = LA_ELS_ACC; 12266 12267 FC_SET_CMD(port, pkt->pkt_cmd_acc, (uint8_t *)&payload, 12268 (uint8_t *)pkt->pkt_cmd, sizeof (payload), DDI_DEV_AUTOINCR); 12269 12270 FP_TRACE(FP_NHEAD1(3, 0), "login_acc_init: ELS:0x%x d_id:0x%x " 12271 "bufsize:0x%x sizeof (la_els_logi):0x%x " 12272 "port's wwn:0x%01x%03x%04x%08x requestor's wwn:0x%01x%03x%04x%08x " 12273 "statec_busy:0x%x", buf->ub_buffer[0], buf->ub_frame.s_id, 12274 buf->ub_bufsize, sizeof (la_els_logi_t), 12275 port->fp_service_params.nport_ww_name.w.naa_id, 12276 port->fp_service_params.nport_ww_name.w.nport_id, 12277 port->fp_service_params.nport_ww_name.w.wwn_hi, 12278 port->fp_service_params.nport_ww_name.w.wwn_lo, 12279 ((la_els_logi_t *)buf->ub_buffer)->nport_ww_name.w.naa_id, 12280 ((la_els_logi_t *)buf->ub_buffer)->nport_ww_name.w.nport_id, 12281 ((la_els_logi_t *)buf->ub_buffer)->nport_ww_name.w.wwn_hi, 12282 ((la_els_logi_t *)buf->ub_buffer)->nport_ww_name.w.wwn_lo, 12283 port->fp_statec_busy); 12284 } 12285 12286 12287 #define RSCN_EVENT_NAME_LEN 256 12288 12289 /* 12290 * Handle RSCNs 12291 */ 12292 static void 12293 fp_handle_unsol_rscn(fc_local_port_t *port, fc_unsol_buf_t *buf, 12294 job_request_t *job, int sleep) 12295 { 12296 uint32_t mask; 12297 fp_cmd_t *cmd; 12298 uint32_t count; 12299 int listindex; 12300 int16_t len; 12301 fc_rscn_t *payload; 12302 fc_portmap_t *listptr; 12303 fctl_ns_req_t *ns_cmd; 12304 fc_affected_id_t *page; 12305 caddr_t nvname; 12306 nvlist_t *attr_list = NULL; 12307 12308 mutex_enter(&port->fp_mutex); 12309 if (!FC_IS_TOP_SWITCH(port->fp_topology)) { 12310 if (--port->fp_rscn_count == FC_INVALID_RSCN_COUNT) { 12311 --port->fp_rscn_count; 12312 } 12313 mutex_exit(&port->fp_mutex); 12314 return; 12315 } 12316 mutex_exit(&port->fp_mutex); 12317 12318 cmd = fp_alloc_pkt(port, FP_PORT_IDENTIFIER_LEN, 0, sleep, NULL); 12319 if (cmd != NULL) { 12320 fp_els_acc_init(port, cmd, buf, job); 12321 if (fp_sendcmd(port, cmd, port->fp_fca_handle) != FC_SUCCESS) { 12322 fp_free_pkt(cmd); 12323 } 12324 } 12325 12326 payload = (fc_rscn_t *)buf->ub_buffer; 12327 ASSERT(payload->rscn_code == LA_ELS_RSCN); 12328 ASSERT(payload->rscn_len == FP_PORT_IDENTIFIER_LEN); 12329 12330 len = payload->rscn_payload_len - FP_PORT_IDENTIFIER_LEN; 12331 12332 if (len <= 0) { 12333 mutex_enter(&port->fp_mutex); 12334 if (--port->fp_rscn_count == FC_INVALID_RSCN_COUNT) { 12335 --port->fp_rscn_count; 12336 } 12337 mutex_exit(&port->fp_mutex); 12338 12339 return; 12340 } 12341 12342 ASSERT((len & 0x3) == 0); /* Must be power of 4 */ 12343 count = (len >> 2) << 1; /* number of pages multiplied by 2 */ 12344 12345 listptr = kmem_zalloc(sizeof (fc_portmap_t) * count, sleep); 12346 page = (fc_affected_id_t *)(buf->ub_buffer + sizeof (fc_rscn_t)); 12347 12348 ASSERT((job->job_flags & JOB_TYPE_FP_ASYNC) == 0); 12349 12350 ns_cmd = fctl_alloc_ns_cmd(sizeof (ns_req_gpn_id_t), 12351 sizeof (ns_resp_gpn_id_t), sizeof (ns_resp_gpn_id_t), 12352 0, sleep); 12353 if (ns_cmd == NULL) { 12354 kmem_free(listptr, sizeof (fc_portmap_t) * count); 12355 12356 mutex_enter(&port->fp_mutex); 12357 if (--port->fp_rscn_count == FC_INVALID_RSCN_COUNT) { 12358 --port->fp_rscn_count; 12359 } 12360 mutex_exit(&port->fp_mutex); 12361 12362 return; 12363 } 12364 12365 ns_cmd->ns_cmd_code = NS_GPN_ID; 12366 12367 FP_TRACE(FP_NHEAD1(3, 0), "fp_handle_unsol_rscn: s_id=%x, d_id=%x," 12368 "type=%x, f_ctl=%x seq_id=%x, ox_id=%x, rx_id=%x" 12369 " ro=%x", buf->ub_frame.s_id, buf->ub_frame.d_id, 12370 buf->ub_frame.type, buf->ub_frame.f_ctl, buf->ub_frame.seq_id, 12371 buf->ub_frame.ox_id, buf->ub_frame.rx_id, buf->ub_frame.ro); 12372 12373 /* Only proceed if we can allocate nvname and the nvlist */ 12374 if ((nvname = kmem_zalloc(RSCN_EVENT_NAME_LEN, KM_NOSLEEP)) != NULL && 12375 nvlist_alloc(&attr_list, NV_UNIQUE_NAME_TYPE, 12376 KM_NOSLEEP) == DDI_SUCCESS) { 12377 if (!(attr_list && nvlist_add_uint32(attr_list, "instance", 12378 port->fp_instance) == DDI_SUCCESS && 12379 nvlist_add_byte_array(attr_list, "port-wwn", 12380 port->fp_service_params.nport_ww_name.raw_wwn, 12381 sizeof (la_wwn_t)) == DDI_SUCCESS)) { 12382 nvlist_free(attr_list); 12383 attr_list = NULL; 12384 } 12385 } 12386 12387 for (listindex = 0; len; len -= FP_PORT_IDENTIFIER_LEN, page++) { 12388 /* Add affected page to the event payload */ 12389 if (attr_list != NULL) { 12390 (void) snprintf(nvname, RSCN_EVENT_NAME_LEN, 12391 "affected_page_%d", listindex); 12392 if (attr_list && nvlist_add_uint32(attr_list, nvname, 12393 ntohl(*(uint32_t *)page)) != DDI_SUCCESS) { 12394 /* We don't send a partial event, so dump it */ 12395 nvlist_free(attr_list); 12396 attr_list = NULL; 12397 } 12398 } 12399 /* 12400 * Query the NS to get the Port WWN for this 12401 * affected D_ID. 12402 */ 12403 mask = 0; 12404 switch (page->aff_format & FC_RSCN_ADDRESS_MASK) { 12405 case FC_RSCN_PORT_ADDRESS: 12406 fp_validate_rscn_page(port, page, job, ns_cmd, 12407 listptr, &listindex, sleep); 12408 12409 if (listindex == 0) { 12410 /* 12411 * We essentially did not process this RSCN. So, 12412 * ULPs are not going to be called and so we 12413 * decrement the rscn_count 12414 */ 12415 mutex_enter(&port->fp_mutex); 12416 if (--port->fp_rscn_count == 12417 FC_INVALID_RSCN_COUNT) { 12418 --port->fp_rscn_count; 12419 } 12420 mutex_exit(&port->fp_mutex); 12421 } 12422 break; 12423 12424 case FC_RSCN_AREA_ADDRESS: 12425 mask = 0xFFFF00; 12426 /* FALLTHROUGH */ 12427 12428 case FC_RSCN_DOMAIN_ADDRESS: 12429 if (!mask) { 12430 mask = 0xFF0000; 12431 } 12432 fp_validate_area_domain(port, page->aff_d_id, mask, 12433 job, sleep); 12434 break; 12435 12436 case FC_RSCN_FABRIC_ADDRESS: 12437 /* 12438 * We need to discover all the devices on this 12439 * port. 12440 */ 12441 fp_validate_area_domain(port, 0, 0, job, sleep); 12442 break; 12443 12444 default: 12445 break; 12446 } 12447 } 12448 if (attr_list != NULL) { 12449 (void) ddi_log_sysevent(port->fp_port_dip, DDI_VENDOR_SUNW, 12450 EC_SUNFC, ESC_SUNFC_PORT_RSCN, attr_list, 12451 NULL, DDI_SLEEP); 12452 nvlist_free(attr_list); 12453 } else { 12454 FP_TRACE(FP_NHEAD1(9, 0), 12455 "RSCN handled, but event not sent to userland"); 12456 } 12457 if (nvname != NULL) { 12458 kmem_free(nvname, RSCN_EVENT_NAME_LEN); 12459 } 12460 12461 if (ns_cmd) { 12462 fctl_free_ns_cmd(ns_cmd); 12463 } 12464 12465 if (listindex) { 12466 #ifdef DEBUG 12467 page = (fc_affected_id_t *)(buf->ub_buffer + 12468 sizeof (fc_rscn_t)); 12469 12470 if (listptr->map_did.port_id != page->aff_d_id) { 12471 FP_TRACE(FP_NHEAD1(9, 0), 12472 "PORT RSCN: processed=%x, reporting=%x", 12473 listptr->map_did.port_id, page->aff_d_id); 12474 } 12475 #endif 12476 12477 (void) fp_ulp_devc_cb(port, listptr, listindex, count, 12478 sleep, 0); 12479 } else { 12480 kmem_free(listptr, sizeof (fc_portmap_t) * count); 12481 } 12482 } 12483 12484 12485 /* 12486 * Fill out old map for ULPs with fp_mutex, fd_mutex and pd_mutex held 12487 */ 12488 static void 12489 fp_fillout_old_map_held(fc_portmap_t *map, fc_remote_port_t *pd, uchar_t flag) 12490 { 12491 int is_switch; 12492 int initiator; 12493 fc_local_port_t *port; 12494 12495 port = pd->pd_port; 12496 12497 /* This function has the following bunch of assumptions */ 12498 ASSERT(port != NULL); 12499 ASSERT(MUTEX_HELD(&port->fp_mutex)); 12500 ASSERT(MUTEX_HELD(&pd->pd_remote_nodep->fd_mutex)); 12501 ASSERT(MUTEX_HELD(&pd->pd_mutex)); 12502 12503 pd->pd_state = PORT_DEVICE_INVALID; 12504 pd->pd_type = PORT_DEVICE_OLD; 12505 initiator = (pd->pd_recepient == PD_PLOGI_INITIATOR) ? 1 : 0; 12506 is_switch = FC_IS_TOP_SWITCH(port->fp_topology); 12507 12508 fctl_delist_did_table(port, pd); 12509 fctl_delist_pwwn_table(port, pd); 12510 12511 FP_TRACE(FP_NHEAD1(6, 0), "fp_fillout_old_map_held: port=%p, d_id=%x" 12512 " removed the PD=%p from DID and PWWN tables", 12513 port, pd->pd_port_id.port_id, pd); 12514 12515 if ((!flag) && port && initiator && is_switch) { 12516 (void) fctl_add_orphan_held(port, pd); 12517 } 12518 fctl_copy_portmap_held(map, pd); 12519 map->map_pd = pd; 12520 } 12521 12522 /* 12523 * Fill out old map for ULPs 12524 */ 12525 static void 12526 fp_fillout_old_map(fc_portmap_t *map, fc_remote_port_t *pd, uchar_t flag) 12527 { 12528 int is_switch; 12529 int initiator; 12530 fc_local_port_t *port; 12531 12532 mutex_enter(&pd->pd_mutex); 12533 port = pd->pd_port; 12534 mutex_exit(&pd->pd_mutex); 12535 12536 mutex_enter(&port->fp_mutex); 12537 mutex_enter(&pd->pd_mutex); 12538 12539 pd->pd_state = PORT_DEVICE_INVALID; 12540 pd->pd_type = PORT_DEVICE_OLD; 12541 initiator = (pd->pd_recepient == PD_PLOGI_INITIATOR) ? 1 : 0; 12542 is_switch = FC_IS_TOP_SWITCH(port->fp_topology); 12543 12544 fctl_delist_did_table(port, pd); 12545 fctl_delist_pwwn_table(port, pd); 12546 12547 FP_TRACE(FP_NHEAD1(6, 0), "fp_fillout_old_map: port=%p, d_id=%x" 12548 " removed the PD=%p from DID and PWWN tables", 12549 port, pd->pd_port_id.port_id, pd); 12550 12551 mutex_exit(&pd->pd_mutex); 12552 mutex_exit(&port->fp_mutex); 12553 12554 ASSERT(port != NULL); 12555 if ((!flag) && port && initiator && is_switch) { 12556 (void) fctl_add_orphan(port, pd, KM_NOSLEEP); 12557 } 12558 fctl_copy_portmap(map, pd); 12559 map->map_pd = pd; 12560 } 12561 12562 12563 /* 12564 * Fillout Changed Map for ULPs 12565 */ 12566 static void 12567 fp_fillout_changed_map(fc_portmap_t *map, fc_remote_port_t *pd, 12568 uint32_t *new_did, la_wwn_t *new_pwwn) 12569 { 12570 ASSERT(MUTEX_HELD(&pd->pd_mutex)); 12571 12572 pd->pd_type = PORT_DEVICE_CHANGED; 12573 if (new_did) { 12574 pd->pd_port_id.port_id = *new_did; 12575 } 12576 if (new_pwwn) { 12577 pd->pd_port_name = *new_pwwn; 12578 } 12579 mutex_exit(&pd->pd_mutex); 12580 12581 fctl_copy_portmap(map, pd); 12582 12583 mutex_enter(&pd->pd_mutex); 12584 pd->pd_type = PORT_DEVICE_NOCHANGE; 12585 } 12586 12587 12588 /* 12589 * Fillout New Name Server map 12590 */ 12591 static void 12592 fp_fillout_new_nsmap(fc_local_port_t *port, ddi_acc_handle_t *handle, 12593 fc_portmap_t *port_map, ns_resp_gan_t *gan_resp, uint32_t d_id) 12594 { 12595 ASSERT(!MUTEX_HELD(&port->fp_mutex)); 12596 12597 if (handle) { 12598 FC_GET_RSP(port, *handle, (uint8_t *)&port_map->map_pwwn, 12599 (uint8_t *)&gan_resp->gan_pwwn, sizeof (gan_resp->gan_pwwn), 12600 DDI_DEV_AUTOINCR); 12601 FC_GET_RSP(port, *handle, (uint8_t *)&port_map->map_nwwn, 12602 (uint8_t *)&gan_resp->gan_nwwn, sizeof (gan_resp->gan_nwwn), 12603 DDI_DEV_AUTOINCR); 12604 FC_GET_RSP(port, *handle, (uint8_t *)port_map->map_fc4_types, 12605 (uint8_t *)gan_resp->gan_fc4types, 12606 sizeof (gan_resp->gan_fc4types), DDI_DEV_AUTOINCR); 12607 } else { 12608 bcopy(&gan_resp->gan_pwwn, &port_map->map_pwwn, 12609 sizeof (gan_resp->gan_pwwn)); 12610 bcopy(&gan_resp->gan_nwwn, &port_map->map_nwwn, 12611 sizeof (gan_resp->gan_nwwn)); 12612 bcopy(gan_resp->gan_fc4types, port_map->map_fc4_types, 12613 sizeof (gan_resp->gan_fc4types)); 12614 } 12615 port_map->map_did.port_id = d_id; 12616 port_map->map_did.priv_lilp_posit = 0; 12617 port_map->map_hard_addr.hard_addr = 0; 12618 port_map->map_hard_addr.rsvd = 0; 12619 port_map->map_state = PORT_DEVICE_INVALID; 12620 port_map->map_type = PORT_DEVICE_NEW; 12621 port_map->map_flags = 0; 12622 port_map->map_pd = NULL; 12623 12624 (void) fctl_remove_if_orphan(port, &port_map->map_pwwn); 12625 12626 ASSERT(port != NULL); 12627 } 12628 12629 12630 /* 12631 * Perform LINIT ELS 12632 */ 12633 static int 12634 fp_remote_lip(fc_local_port_t *port, la_wwn_t *pwwn, int sleep, 12635 job_request_t *job) 12636 { 12637 int rval; 12638 uint32_t d_id; 12639 uint32_t s_id; 12640 uint32_t lfa; 12641 uchar_t class; 12642 uint32_t ret; 12643 fp_cmd_t *cmd; 12644 fc_porttype_t ptype; 12645 fc_packet_t *pkt; 12646 fc_linit_req_t payload; 12647 fc_remote_port_t *pd; 12648 12649 rval = 0; 12650 12651 ASSERT(job != NULL); 12652 ASSERT((job->job_flags & JOB_TYPE_FP_ASYNC) == 0); 12653 12654 pd = fctl_get_remote_port_by_pwwn(port, pwwn); 12655 if (pd == NULL) { 12656 fctl_ns_req_t *ns_cmd; 12657 12658 ns_cmd = fctl_alloc_ns_cmd(sizeof (ns_req_gid_pn_t), 12659 sizeof (ns_resp_gid_pn_t), sizeof (ns_resp_gid_pn_t), 12660 0, sleep); 12661 12662 if (ns_cmd == NULL) { 12663 return (FC_NOMEM); 12664 } 12665 job->job_result = FC_SUCCESS; 12666 ns_cmd->ns_cmd_code = NS_GID_PN; 12667 ((ns_req_gid_pn_t *)(ns_cmd->ns_cmd_buf))->pwwn = *pwwn; 12668 12669 ret = fp_ns_query(port, ns_cmd, job, 1, sleep); 12670 if (ret != FC_SUCCESS || job->job_result != FC_SUCCESS) { 12671 fctl_free_ns_cmd(ns_cmd); 12672 return (FC_FAILURE); 12673 } 12674 bcopy(ns_cmd->ns_data_buf, (caddr_t)&d_id, sizeof (d_id)); 12675 d_id = BE_32(*((uint32_t *)ns_cmd->ns_data_buf)); 12676 12677 fctl_free_ns_cmd(ns_cmd); 12678 lfa = d_id & 0xFFFF00; 12679 12680 /* 12681 * Given this D_ID, get the port type to see if 12682 * we can do LINIT on the LFA 12683 */ 12684 ns_cmd = fctl_alloc_ns_cmd(sizeof (ns_req_gpt_id_t), 12685 sizeof (ns_resp_gpt_id_t), sizeof (ns_resp_gpt_id_t), 12686 0, sleep); 12687 12688 if (ns_cmd == NULL) { 12689 return (FC_NOMEM); 12690 } 12691 12692 job->job_result = FC_SUCCESS; 12693 ns_cmd->ns_cmd_code = NS_GPT_ID; 12694 12695 ((ns_req_gpt_id_t *)(ns_cmd->ns_cmd_buf))->pid.port_id = d_id; 12696 ((ns_req_gpt_id_t *) 12697 (ns_cmd->ns_cmd_buf))->pid.priv_lilp_posit = 0; 12698 12699 ret = fp_ns_query(port, ns_cmd, job, 1, sleep); 12700 if (ret != FC_SUCCESS || job->job_result != FC_SUCCESS) { 12701 fctl_free_ns_cmd(ns_cmd); 12702 return (FC_FAILURE); 12703 } 12704 bcopy(ns_cmd->ns_data_buf, (caddr_t)&ptype, sizeof (ptype)); 12705 12706 fctl_free_ns_cmd(ns_cmd); 12707 12708 switch (ptype.port_type) { 12709 case FC_NS_PORT_NL: 12710 case FC_NS_PORT_F_NL: 12711 case FC_NS_PORT_FL: 12712 break; 12713 12714 default: 12715 return (FC_FAILURE); 12716 } 12717 } else { 12718 mutex_enter(&pd->pd_mutex); 12719 ptype = pd->pd_porttype; 12720 12721 switch (pd->pd_porttype.port_type) { 12722 case FC_NS_PORT_NL: 12723 case FC_NS_PORT_F_NL: 12724 case FC_NS_PORT_FL: 12725 lfa = pd->pd_port_id.port_id & 0xFFFF00; 12726 break; 12727 12728 default: 12729 mutex_exit(&pd->pd_mutex); 12730 return (FC_FAILURE); 12731 } 12732 mutex_exit(&pd->pd_mutex); 12733 } 12734 12735 mutex_enter(&port->fp_mutex); 12736 s_id = port->fp_port_id.port_id; 12737 class = port->fp_ns_login_class; 12738 mutex_exit(&port->fp_mutex); 12739 12740 cmd = fp_alloc_pkt(port, sizeof (fc_linit_req_t), 12741 sizeof (fc_linit_resp_t), sleep, pd); 12742 if (cmd == NULL) { 12743 return (FC_NOMEM); 12744 } 12745 12746 cmd->cmd_pkt.pkt_tran_flags = FC_TRAN_INTR | class; 12747 cmd->cmd_pkt.pkt_tran_type = FC_PKT_EXCHANGE; 12748 cmd->cmd_flags = FP_CMD_CFLAG_UNDEFINED; 12749 cmd->cmd_retry_count = fp_retry_count; 12750 cmd->cmd_ulp_pkt = NULL; 12751 12752 pkt = &cmd->cmd_pkt; 12753 cmd->cmd_transport = port->fp_fca_tran->fca_els_send; 12754 12755 fp_els_init(cmd, s_id, lfa, fp_linit_intr, job); 12756 12757 /* 12758 * How does LIP work by the way ? 12759 * If the L_Port receives three consecutive identical ordered 12760 * sets whose first two characters (fully decoded) are equal to 12761 * the values shown in Table 3 of FC-AL-2 then the L_Port shall 12762 * recognize a Loop Initialization Primitive sequence. The 12763 * character 3 determines the type of lip: 12764 * LIP(F7) Normal LIP 12765 * LIP(F8) Loop Failure LIP 12766 * 12767 * The possible combination for the 3rd and 4th bytes are: 12768 * F7, F7 Normal Lip - No valid AL_PA 12769 * F8, F8 Loop Failure - No valid AL_PA 12770 * F7, AL_PS Normal Lip - Valid source AL_PA 12771 * F8, AL_PS Loop Failure - Valid source AL_PA 12772 * AL_PD AL_PS Loop reset of AL_PD originated by AL_PS 12773 * And Normal Lip for all other loop members 12774 * 0xFF AL_PS Vendor specific reset of all loop members 12775 * 12776 * Now, it may not always be that we, at the source, may have an 12777 * AL_PS (AL_PA of source) for 4th character slot, so we decide 12778 * to do (Normal Lip, No Valid AL_PA), that means, in the LINIT 12779 * payload we are going to set: 12780 * lip_b3 = 0xF7; Normal LIP 12781 * lip_b4 = 0xF7; No valid source AL_PA 12782 */ 12783 payload.ls_code.ls_code = LA_ELS_LINIT; 12784 payload.ls_code.mbz = 0; 12785 payload.rsvd = 0; 12786 payload.func = 0; /* Let Fabric determine the best way */ 12787 payload.lip_b3 = 0xF7; /* Normal LIP */ 12788 payload.lip_b4 = 0xF7; /* No valid source AL_PA */ 12789 12790 FC_SET_CMD(port, pkt->pkt_cmd_acc, (uint8_t *)&payload, 12791 (uint8_t *)pkt->pkt_cmd, sizeof (payload), DDI_DEV_AUTOINCR); 12792 12793 job->job_counter = 1; 12794 12795 ret = fp_sendcmd(port, cmd, port->fp_fca_handle); 12796 if (ret == FC_SUCCESS) { 12797 fp_jobwait(job); 12798 rval = job->job_result; 12799 } else { 12800 rval = FC_FAILURE; 12801 fp_free_pkt(cmd); 12802 } 12803 12804 return (rval); 12805 } 12806 12807 12808 /* 12809 * Fill out the device handles with GAN response 12810 */ 12811 static void 12812 fp_stuff_device_with_gan(ddi_acc_handle_t *handle, fc_remote_port_t *pd, 12813 ns_resp_gan_t *gan_resp) 12814 { 12815 fc_remote_node_t *node; 12816 fc_porttype_t type; 12817 fc_local_port_t *port; 12818 12819 ASSERT(pd != NULL); 12820 ASSERT(handle != NULL); 12821 12822 port = pd->pd_port; 12823 12824 FP_TRACE(FP_NHEAD1(1, 0), "GAN PD stuffing; pd=%p," 12825 " port_id=%x, sym_len=%d fc4-type=%x", 12826 pd, gan_resp->gan_type_id.rsvd, 12827 gan_resp->gan_spnlen, gan_resp->gan_fc4types[0]); 12828 12829 mutex_enter(&pd->pd_mutex); 12830 12831 FC_GET_RSP(port, *handle, (uint8_t *)&type, 12832 (uint8_t *)&gan_resp->gan_type_id, sizeof (type), DDI_DEV_AUTOINCR); 12833 12834 pd->pd_porttype.port_type = type.port_type; 12835 pd->pd_porttype.rsvd = 0; 12836 12837 pd->pd_spn_len = gan_resp->gan_spnlen; 12838 if (pd->pd_spn_len) { 12839 FC_GET_RSP(port, *handle, (uint8_t *)pd->pd_spn, 12840 (uint8_t *)gan_resp->gan_spname, pd->pd_spn_len, 12841 DDI_DEV_AUTOINCR); 12842 } 12843 12844 FC_GET_RSP(port, *handle, (uint8_t *)pd->pd_ip_addr, 12845 (uint8_t *)gan_resp->gan_ip, sizeof (pd->pd_ip_addr), 12846 DDI_DEV_AUTOINCR); 12847 FC_GET_RSP(port, *handle, (uint8_t *)&pd->pd_cos, 12848 (uint8_t *)&gan_resp->gan_cos, sizeof (pd->pd_cos), 12849 DDI_DEV_AUTOINCR); 12850 FC_GET_RSP(port, *handle, (uint8_t *)pd->pd_fc4types, 12851 (uint8_t *)gan_resp->gan_fc4types, sizeof (pd->pd_fc4types), 12852 DDI_DEV_AUTOINCR); 12853 12854 node = pd->pd_remote_nodep; 12855 mutex_exit(&pd->pd_mutex); 12856 12857 mutex_enter(&node->fd_mutex); 12858 12859 FC_GET_RSP(port, *handle, (uint8_t *)node->fd_ipa, 12860 (uint8_t *)gan_resp->gan_ipa, sizeof (node->fd_ipa), 12861 DDI_DEV_AUTOINCR); 12862 12863 node->fd_snn_len = gan_resp->gan_snnlen; 12864 if (node->fd_snn_len) { 12865 FC_GET_RSP(port, *handle, (uint8_t *)node->fd_snn, 12866 (uint8_t *)gan_resp->gan_snname, node->fd_snn_len, 12867 DDI_DEV_AUTOINCR); 12868 } 12869 12870 mutex_exit(&node->fd_mutex); 12871 } 12872 12873 12874 /* 12875 * Handles all NS Queries (also means that this function 12876 * doesn't handle NS object registration) 12877 */ 12878 static int 12879 fp_ns_query(fc_local_port_t *port, fctl_ns_req_t *ns_cmd, job_request_t *job, 12880 int polled, int sleep) 12881 { 12882 int rval; 12883 fp_cmd_t *cmd; 12884 12885 ASSERT(!MUTEX_HELD(&port->fp_mutex)); 12886 12887 if (ns_cmd->ns_cmd_code == NS_GA_NXT) { 12888 FP_TRACE(FP_NHEAD1(1, 0), "fp_ns_query GA_NXT fp %x pd %x", 12889 port->fp_port_id.port_id, ns_cmd->ns_gan_sid); 12890 } 12891 12892 if (ns_cmd->ns_cmd_size == 0) { 12893 return (FC_FAILURE); 12894 } 12895 12896 cmd = fp_alloc_pkt(port, sizeof (fc_ct_header_t) + 12897 ns_cmd->ns_cmd_size, sizeof (fc_ct_header_t) + 12898 ns_cmd->ns_resp_size, sleep, NULL); 12899 if (cmd == NULL) { 12900 return (FC_NOMEM); 12901 } 12902 12903 fp_ct_init(port, cmd, ns_cmd, ns_cmd->ns_cmd_code, ns_cmd->ns_cmd_buf, 12904 ns_cmd->ns_cmd_size, ns_cmd->ns_resp_size, job); 12905 12906 if (polled) { 12907 job->job_counter = 1; 12908 ASSERT((job->job_flags & JOB_TYPE_FP_ASYNC) == 0); 12909 } 12910 rval = fp_sendcmd(port, cmd, port->fp_fca_handle); 12911 if (rval != FC_SUCCESS) { 12912 job->job_result = rval; 12913 fp_iodone(cmd); 12914 if (polled == 0) { 12915 /* 12916 * Return FC_SUCCESS to indicate that 12917 * fp_iodone is performed already. 12918 */ 12919 rval = FC_SUCCESS; 12920 } 12921 } 12922 12923 if (polled) { 12924 fp_jobwait(job); 12925 rval = job->job_result; 12926 } 12927 12928 return (rval); 12929 } 12930 12931 12932 /* 12933 * Initialize Common Transport request 12934 */ 12935 static void 12936 fp_ct_init(fc_local_port_t *port, fp_cmd_t *cmd, fctl_ns_req_t *ns_cmd, 12937 uint16_t cmd_code, caddr_t cmd_buf, uint16_t cmd_len, 12938 uint16_t resp_len, job_request_t *job) 12939 { 12940 uint32_t s_id; 12941 uchar_t class; 12942 fc_packet_t *pkt; 12943 fc_ct_header_t ct; 12944 12945 ASSERT(!MUTEX_HELD(&port->fp_mutex)); 12946 12947 mutex_enter(&port->fp_mutex); 12948 s_id = port->fp_port_id.port_id; 12949 class = port->fp_ns_login_class; 12950 mutex_exit(&port->fp_mutex); 12951 12952 cmd->cmd_job = job; 12953 cmd->cmd_private = ns_cmd; 12954 pkt = &cmd->cmd_pkt; 12955 12956 ct.ct_rev = CT_REV; 12957 ct.ct_inid = 0; 12958 ct.ct_fcstype = FCSTYPE_DIRECTORY; 12959 ct.ct_fcssubtype = FCSSUB_DS_NAME_SERVER; 12960 ct.ct_options = 0; 12961 ct.ct_reserved1 = 0; 12962 ct.ct_cmdrsp = cmd_code; 12963 ct.ct_aiusize = resp_len >> 2; 12964 ct.ct_reserved2 = 0; 12965 ct.ct_reason = 0; 12966 ct.ct_expln = 0; 12967 ct.ct_vendor = 0; 12968 12969 FC_SET_CMD(port, pkt->pkt_cmd_acc, (uint8_t *)&ct, 12970 (uint8_t *)pkt->pkt_cmd, sizeof (ct), DDI_DEV_AUTOINCR); 12971 12972 pkt->pkt_cmd_fhdr.r_ctl = R_CTL_UNSOL_CONTROL; 12973 pkt->pkt_cmd_fhdr.d_id = 0xFFFFFC; 12974 pkt->pkt_cmd_fhdr.s_id = s_id; 12975 pkt->pkt_cmd_fhdr.type = FC_TYPE_FC_SERVICES; 12976 pkt->pkt_cmd_fhdr.f_ctl = F_CTL_SEQ_INITIATIVE | 12977 F_CTL_FIRST_SEQ | F_CTL_END_SEQ; 12978 pkt->pkt_cmd_fhdr.seq_id = 0; 12979 pkt->pkt_cmd_fhdr.df_ctl = 0; 12980 pkt->pkt_cmd_fhdr.seq_cnt = 0; 12981 pkt->pkt_cmd_fhdr.ox_id = 0xffff; 12982 pkt->pkt_cmd_fhdr.rx_id = 0xffff; 12983 pkt->pkt_cmd_fhdr.ro = 0; 12984 pkt->pkt_cmd_fhdr.rsvd = 0; 12985 12986 pkt->pkt_comp = fp_ns_intr; 12987 pkt->pkt_ulp_private = (opaque_t)cmd; 12988 pkt->pkt_timeout = FP_NS_TIMEOUT; 12989 12990 if (cmd_buf) { 12991 FC_SET_CMD(port, pkt->pkt_cmd_acc, (uint8_t *)cmd_buf, 12992 (uint8_t *)(pkt->pkt_cmd + sizeof (fc_ct_header_t)), 12993 cmd_len, DDI_DEV_AUTOINCR); 12994 } 12995 12996 cmd->cmd_transport = port->fp_fca_tran->fca_transport; 12997 12998 cmd->cmd_pkt.pkt_tran_flags = FC_TRAN_INTR | class; 12999 cmd->cmd_pkt.pkt_tran_type = FC_PKT_EXCHANGE; 13000 cmd->cmd_flags = FP_CMD_PLOGI_DONT_CARE; 13001 cmd->cmd_retry_count = fp_retry_count; 13002 cmd->cmd_ulp_pkt = NULL; 13003 } 13004 13005 13006 /* 13007 * Name Server request interrupt routine 13008 */ 13009 static void 13010 fp_ns_intr(fc_packet_t *pkt) 13011 { 13012 fp_cmd_t *cmd; 13013 fc_local_port_t *port; 13014 fc_ct_header_t resp_hdr; 13015 fc_ct_header_t cmd_hdr; 13016 fctl_ns_req_t *ns_cmd; 13017 13018 cmd = pkt->pkt_ulp_private; 13019 port = cmd->cmd_port; 13020 13021 mutex_enter(&port->fp_mutex); 13022 port->fp_out_fpcmds--; 13023 mutex_exit(&port->fp_mutex); 13024 13025 FC_GET_RSP(port, pkt->pkt_cmd_acc, (uint8_t *)&cmd_hdr, 13026 (uint8_t *)pkt->pkt_cmd, sizeof (cmd_hdr), DDI_DEV_AUTOINCR); 13027 ns_cmd = (fctl_ns_req_t *) 13028 (((fp_cmd_t *)(pkt->pkt_ulp_private))->cmd_private); 13029 if (!FP_IS_PKT_ERROR(pkt)) { 13030 FC_GET_RSP(port, pkt->pkt_resp_acc, (uint8_t *)&resp_hdr, 13031 (uint8_t *)pkt->pkt_resp, sizeof (resp_hdr), 13032 DDI_DEV_AUTOINCR); 13033 13034 /* 13035 * On x86 architectures, make sure the resp_hdr is big endian. 13036 * This macro is a NOP on sparc architectures mainly because 13037 * we don't want to end up wasting time since the end result 13038 * is going to be the same. 13039 */ 13040 MAKE_BE_32(&resp_hdr); 13041 13042 if (ns_cmd) { 13043 /* 13044 * Always copy out the response CT_HDR 13045 */ 13046 bcopy(&resp_hdr, &ns_cmd->ns_resp_hdr, 13047 sizeof (resp_hdr)); 13048 } 13049 13050 if (resp_hdr.ct_cmdrsp == FS_RJT_IU) { 13051 pkt->pkt_state = FC_PKT_FS_RJT; 13052 pkt->pkt_reason = resp_hdr.ct_reason; 13053 pkt->pkt_expln = resp_hdr.ct_expln; 13054 } 13055 } 13056 13057 if (FP_IS_PKT_ERROR(pkt)) { 13058 if (ns_cmd) { 13059 if (ns_cmd->ns_flags & FCTL_NS_VALIDATE_PD) { 13060 ASSERT(ns_cmd->ns_pd != NULL); 13061 13062 /* Mark it OLD if not already done */ 13063 mutex_enter(&ns_cmd->ns_pd->pd_mutex); 13064 ns_cmd->ns_pd->pd_type = PORT_DEVICE_OLD; 13065 mutex_exit(&ns_cmd->ns_pd->pd_mutex); 13066 } 13067 13068 if (ns_cmd->ns_flags & FCTL_NS_ASYNC_REQUEST) { 13069 fctl_free_ns_cmd(ns_cmd); 13070 ((fp_cmd_t *) 13071 (pkt->pkt_ulp_private))->cmd_private = NULL; 13072 } 13073 13074 } 13075 13076 FP_TRACE(FP_NHEAD2(1, 0), "%x NS failure pkt state=%x " 13077 "reason=%x, expln=%x, NSCMD=%04X, NSRSP=%04X", 13078 port->fp_port_id.port_id, pkt->pkt_state, 13079 pkt->pkt_reason, pkt->pkt_expln, 13080 cmd_hdr.ct_cmdrsp, resp_hdr.ct_cmdrsp); 13081 13082 (void) fp_common_intr(pkt, 1); 13083 13084 return; 13085 } 13086 13087 if (resp_hdr.ct_cmdrsp != FS_ACC_IU) { 13088 uint32_t d_id; 13089 fc_local_port_t *port; 13090 fp_cmd_t *cmd; 13091 13092 d_id = pkt->pkt_cmd_fhdr.d_id; 13093 cmd = pkt->pkt_ulp_private; 13094 port = cmd->cmd_port; 13095 FP_TRACE(FP_NHEAD2(9, 0), 13096 "Bogus NS response received for D_ID=%x", d_id); 13097 } 13098 13099 if (cmd_hdr.ct_cmdrsp == NS_GA_NXT) { 13100 fp_gan_handler(pkt, ns_cmd); 13101 return; 13102 } 13103 13104 if (cmd_hdr.ct_cmdrsp >= NS_GPN_ID && 13105 cmd_hdr.ct_cmdrsp <= NS_GID_PT) { 13106 if (ns_cmd) { 13107 if ((ns_cmd->ns_flags & FCTL_NS_NO_DATA_BUF) == 0) { 13108 fp_ns_query_handler(pkt, ns_cmd); 13109 return; 13110 } 13111 } 13112 } 13113 13114 fp_iodone(pkt->pkt_ulp_private); 13115 } 13116 13117 13118 /* 13119 * Process NS_GAN response 13120 */ 13121 static void 13122 fp_gan_handler(fc_packet_t *pkt, fctl_ns_req_t *ns_cmd) 13123 { 13124 int my_did; 13125 fc_portid_t d_id; 13126 fp_cmd_t *cmd; 13127 fc_local_port_t *port; 13128 fc_remote_port_t *pd; 13129 ns_req_gan_t gan_req; 13130 ns_resp_gan_t *gan_resp; 13131 13132 ASSERT(ns_cmd != NULL); 13133 13134 cmd = pkt->pkt_ulp_private; 13135 port = cmd->cmd_port; 13136 13137 gan_resp = (ns_resp_gan_t *)(pkt->pkt_resp + sizeof (fc_ct_header_t)); 13138 13139 FC_GET_RSP(port, pkt->pkt_resp_acc, (uint8_t *)&d_id, 13140 (uint8_t *)&gan_resp->gan_type_id, sizeof (d_id), DDI_DEV_AUTOINCR); 13141 13142 *(uint32_t *)&d_id = BE_32(*(uint32_t *)&d_id); 13143 13144 /* 13145 * In this case the priv_lilp_posit field in reality 13146 * is actually represents the relative position on a private loop. 13147 * So zero it while dealing with Port Identifiers. 13148 */ 13149 d_id.priv_lilp_posit = 0; 13150 pd = fctl_get_remote_port_by_did(port, d_id.port_id); 13151 if (ns_cmd->ns_gan_sid == d_id.port_id) { 13152 /* 13153 * We've come a full circle; time to get out. 13154 */ 13155 fp_iodone(cmd); 13156 return; 13157 } 13158 13159 if (ns_cmd->ns_gan_sid == FCTL_GAN_START_ID) { 13160 ns_cmd->ns_gan_sid = d_id.port_id; 13161 } 13162 13163 mutex_enter(&port->fp_mutex); 13164 my_did = (d_id.port_id == port->fp_port_id.port_id) ? 1 : 0; 13165 mutex_exit(&port->fp_mutex); 13166 13167 FP_TRACE(FP_NHEAD1(1, 0), "GAN response; port=%p, fp %x pd %x", port, 13168 port->fp_port_id.port_id, d_id.port_id); 13169 if (my_did == 0) { 13170 la_wwn_t pwwn; 13171 la_wwn_t nwwn; 13172 13173 FP_TRACE(FP_NHEAD1(1, 0), "GAN response details; " 13174 "port=%p, d_id=%x, type_id=%x, " 13175 "pwwn=%x %x %x %x %x %x %x %x, " 13176 "nwwn=%x %x %x %x %x %x %x %x", 13177 port, d_id.port_id, gan_resp->gan_type_id, 13178 13179 gan_resp->gan_pwwn.raw_wwn[0], 13180 gan_resp->gan_pwwn.raw_wwn[1], 13181 gan_resp->gan_pwwn.raw_wwn[2], 13182 gan_resp->gan_pwwn.raw_wwn[3], 13183 gan_resp->gan_pwwn.raw_wwn[4], 13184 gan_resp->gan_pwwn.raw_wwn[5], 13185 gan_resp->gan_pwwn.raw_wwn[6], 13186 gan_resp->gan_pwwn.raw_wwn[7], 13187 13188 gan_resp->gan_nwwn.raw_wwn[0], 13189 gan_resp->gan_nwwn.raw_wwn[1], 13190 gan_resp->gan_nwwn.raw_wwn[2], 13191 gan_resp->gan_nwwn.raw_wwn[3], 13192 gan_resp->gan_nwwn.raw_wwn[4], 13193 gan_resp->gan_nwwn.raw_wwn[5], 13194 gan_resp->gan_nwwn.raw_wwn[6], 13195 gan_resp->gan_nwwn.raw_wwn[7]); 13196 13197 FC_GET_RSP(port, pkt->pkt_resp_acc, (uint8_t *)&nwwn, 13198 (uint8_t *)&gan_resp->gan_nwwn, sizeof (nwwn), 13199 DDI_DEV_AUTOINCR); 13200 13201 FC_GET_RSP(port, pkt->pkt_resp_acc, (uint8_t *)&pwwn, 13202 (uint8_t *)&gan_resp->gan_pwwn, sizeof (pwwn), 13203 DDI_DEV_AUTOINCR); 13204 13205 if (ns_cmd->ns_flags & FCTL_NS_CREATE_DEVICE && pd == NULL) { 13206 FP_TRACE(FP_NHEAD1(1, 0), "fp %x gan_hander create" 13207 "pd %x", port->fp_port_id.port_id, d_id.port_id); 13208 pd = fctl_create_remote_port(port, &nwwn, &pwwn, 13209 d_id.port_id, PD_PLOGI_INITIATOR, KM_NOSLEEP); 13210 } 13211 if (pd != NULL) { 13212 fp_stuff_device_with_gan(&pkt->pkt_resp_acc, 13213 pd, gan_resp); 13214 } 13215 13216 if (ns_cmd->ns_flags & FCTL_NS_GET_DEV_COUNT) { 13217 *((int *)ns_cmd->ns_data_buf) += 1; 13218 } 13219 13220 if (ns_cmd->ns_flags & FCTL_NS_FILL_NS_MAP) { 13221 ASSERT((ns_cmd->ns_flags & FCTL_NS_NO_DATA_BUF) == 0); 13222 13223 if (ns_cmd->ns_flags & FCTL_NS_BUF_IS_USERLAND) { 13224 fc_port_dev_t *userbuf; 13225 13226 userbuf = ((fc_port_dev_t *) 13227 ns_cmd->ns_data_buf) + 13228 ns_cmd->ns_gan_index++; 13229 13230 userbuf->dev_did = d_id; 13231 13232 FC_GET_RSP(port, pkt->pkt_resp_acc, 13233 (uint8_t *)userbuf->dev_type, 13234 (uint8_t *)gan_resp->gan_fc4types, 13235 sizeof (userbuf->dev_type), 13236 DDI_DEV_AUTOINCR); 13237 13238 userbuf->dev_nwwn = nwwn; 13239 userbuf->dev_pwwn = pwwn; 13240 13241 if (pd != NULL) { 13242 mutex_enter(&pd->pd_mutex); 13243 userbuf->dev_state = pd->pd_state; 13244 userbuf->dev_hard_addr = 13245 pd->pd_hard_addr; 13246 mutex_exit(&pd->pd_mutex); 13247 } else { 13248 userbuf->dev_state = 13249 PORT_DEVICE_INVALID; 13250 } 13251 } else if (ns_cmd->ns_flags & 13252 FCTL_NS_BUF_IS_FC_PORTMAP) { 13253 fc_portmap_t *map; 13254 13255 map = ((fc_portmap_t *) 13256 ns_cmd->ns_data_buf) + 13257 ns_cmd->ns_gan_index++; 13258 13259 /* 13260 * First fill it like any new map 13261 * and update the port device info 13262 * below. 13263 */ 13264 fp_fillout_new_nsmap(port, &pkt->pkt_resp_acc, 13265 map, gan_resp, d_id.port_id); 13266 if (pd != NULL) { 13267 fctl_copy_portmap(map, pd); 13268 } else { 13269 map->map_state = PORT_DEVICE_INVALID; 13270 map->map_type = PORT_DEVICE_NOCHANGE; 13271 } 13272 } else { 13273 caddr_t dst_ptr; 13274 13275 dst_ptr = ns_cmd->ns_data_buf + 13276 (NS_GAN_RESP_LEN) * ns_cmd->ns_gan_index++; 13277 13278 FC_GET_RSP(port, pkt->pkt_resp_acc, 13279 (uint8_t *)dst_ptr, (uint8_t *)gan_resp, 13280 NS_GAN_RESP_LEN, DDI_DEV_AUTOINCR); 13281 } 13282 } else { 13283 ns_cmd->ns_gan_index++; 13284 } 13285 if (ns_cmd->ns_gan_index >= ns_cmd->ns_gan_max) { 13286 fp_iodone(cmd); 13287 return; 13288 } 13289 } 13290 13291 gan_req.pid = d_id; 13292 13293 FC_SET_CMD(port, pkt->pkt_cmd_acc, (uint8_t *)&gan_req, 13294 (uint8_t *)(pkt->pkt_cmd + sizeof (fc_ct_header_t)), 13295 sizeof (gan_req), DDI_DEV_AUTOINCR); 13296 13297 if (cmd->cmd_transport(port->fp_fca_handle, pkt) != FC_SUCCESS) { 13298 pkt->pkt_state = FC_PKT_TRAN_ERROR; 13299 fp_iodone(cmd); 13300 } else { 13301 mutex_enter(&port->fp_mutex); 13302 port->fp_out_fpcmds++; 13303 mutex_exit(&port->fp_mutex); 13304 } 13305 } 13306 13307 13308 /* 13309 * Handle NS Query interrupt 13310 */ 13311 static void 13312 fp_ns_query_handler(fc_packet_t *pkt, fctl_ns_req_t *ns_cmd) 13313 { 13314 fp_cmd_t *cmd; 13315 fc_local_port_t *port; 13316 caddr_t src_ptr; 13317 uint32_t xfer_len; 13318 13319 cmd = pkt->pkt_ulp_private; 13320 port = cmd->cmd_port; 13321 13322 xfer_len = ns_cmd->ns_resp_size; 13323 13324 FP_TRACE(FP_NHEAD1(1, 0), "NS Query response, cmd_code=%x, xfer_len=%x", 13325 ns_cmd->ns_cmd_code, xfer_len); 13326 13327 if (ns_cmd->ns_cmd_code == NS_GPN_ID) { 13328 src_ptr = (caddr_t)pkt->pkt_resp + sizeof (fc_ct_header_t); 13329 13330 FP_TRACE(FP_NHEAD1(6, 0), "GPN_ID results; %x %x %x %x %x", 13331 src_ptr[0], src_ptr[1], src_ptr[2], src_ptr[3], src_ptr[4]); 13332 } 13333 13334 if (xfer_len <= ns_cmd->ns_data_len) { 13335 src_ptr = (caddr_t)pkt->pkt_resp + sizeof (fc_ct_header_t); 13336 FC_GET_RSP(port, pkt->pkt_resp_acc, 13337 (uint8_t *)ns_cmd->ns_data_buf, 13338 (uint8_t *)src_ptr, xfer_len, DDI_DEV_AUTOINCR); 13339 } 13340 13341 if (ns_cmd->ns_flags & FCTL_NS_VALIDATE_PD) { 13342 ASSERT(ns_cmd->ns_pd != NULL); 13343 13344 mutex_enter(&ns_cmd->ns_pd->pd_mutex); 13345 if (ns_cmd->ns_pd->pd_type == PORT_DEVICE_OLD) { 13346 ns_cmd->ns_pd->pd_type = PORT_DEVICE_NOCHANGE; 13347 } 13348 mutex_exit(&ns_cmd->ns_pd->pd_mutex); 13349 } 13350 13351 if (ns_cmd->ns_flags & FCTL_NS_ASYNC_REQUEST) { 13352 fctl_free_ns_cmd(ns_cmd); 13353 ((fp_cmd_t *)(pkt->pkt_ulp_private))->cmd_private = NULL; 13354 } 13355 fp_iodone(cmd); 13356 } 13357 13358 13359 /* 13360 * Handle unsolicited ADISC ELS request 13361 */ 13362 static void 13363 fp_handle_unsol_adisc(fc_local_port_t *port, fc_unsol_buf_t *buf, 13364 fc_remote_port_t *pd, job_request_t *job) 13365 { 13366 int rval; 13367 fp_cmd_t *cmd; 13368 13369 FP_TRACE(FP_NHEAD1(5, 0), "ADISC; port=%p, D_ID=%x state=%x, pd=%p", 13370 port, pd->pd_port_id.port_id, pd->pd_state, pd); 13371 mutex_enter(&pd->pd_mutex); 13372 if (pd->pd_state != PORT_DEVICE_LOGGED_IN) { 13373 mutex_exit(&pd->pd_mutex); 13374 if (FP_IS_CLASS_1_OR_2(buf->ub_class)) { 13375 cmd = fp_alloc_pkt(port, sizeof (la_els_rjt_t), 13376 0, KM_SLEEP, pd); 13377 if (cmd != NULL) { 13378 fp_els_rjt_init(port, cmd, buf, 13379 FC_ACTION_NON_RETRYABLE, 13380 FC_REASON_INVALID_LINK_CTRL, job); 13381 13382 if (fp_sendcmd(port, cmd, 13383 port->fp_fca_handle) != FC_SUCCESS) { 13384 fp_free_pkt(cmd); 13385 } 13386 } 13387 } 13388 } else { 13389 mutex_exit(&pd->pd_mutex); 13390 /* 13391 * Yes, yes, we don't have a hard address. But we 13392 * we should still respond. Huh ? Visit 21.19.2 13393 * of FC-PH-2 which essentially says that if an 13394 * NL_Port doesn't have a hard address, or if a port 13395 * does not have FC-AL capability, it shall report 13396 * zeroes in this field. 13397 */ 13398 cmd = fp_alloc_pkt(port, sizeof (la_els_adisc_t), 13399 0, KM_SLEEP, pd); 13400 if (cmd == NULL) { 13401 return; 13402 } 13403 fp_adisc_acc_init(port, cmd, buf, job); 13404 rval = fp_sendcmd(port, cmd, port->fp_fca_handle); 13405 if (rval != FC_SUCCESS) { 13406 fp_free_pkt(cmd); 13407 } 13408 } 13409 } 13410 13411 13412 /* 13413 * Initialize ADISC response. 13414 */ 13415 static void 13416 fp_adisc_acc_init(fc_local_port_t *port, fp_cmd_t *cmd, fc_unsol_buf_t *buf, 13417 job_request_t *job) 13418 { 13419 fc_packet_t *pkt; 13420 la_els_adisc_t payload; 13421 13422 cmd->cmd_pkt.pkt_tran_flags = buf->ub_class; 13423 cmd->cmd_pkt.pkt_tran_type = FC_PKT_OUTBOUND; 13424 cmd->cmd_flags = FP_CMD_CFLAG_UNDEFINED; 13425 cmd->cmd_retry_count = 1; 13426 cmd->cmd_ulp_pkt = NULL; 13427 13428 cmd->cmd_transport = port->fp_fca_tran->fca_els_send; 13429 cmd->cmd_job = job; 13430 13431 pkt = &cmd->cmd_pkt; 13432 13433 fp_unsol_resp_init(pkt, buf, R_CTL_ELS_RSP, FC_TYPE_EXTENDED_LS); 13434 13435 payload.ls_code.ls_code = LA_ELS_ACC; 13436 payload.ls_code.mbz = 0; 13437 13438 mutex_enter(&port->fp_mutex); 13439 payload.nport_id = port->fp_port_id; 13440 payload.hard_addr = port->fp_hard_addr; 13441 mutex_exit(&port->fp_mutex); 13442 13443 payload.port_wwn = port->fp_service_params.nport_ww_name; 13444 payload.node_wwn = port->fp_service_params.node_ww_name; 13445 13446 FC_SET_CMD(port, pkt->pkt_cmd_acc, (uint8_t *)&payload, 13447 (uint8_t *)pkt->pkt_cmd, sizeof (payload), DDI_DEV_AUTOINCR); 13448 } 13449 13450 13451 /* 13452 * Hold and Install the requested ULP drivers 13453 */ 13454 static void 13455 fp_load_ulp_modules(dev_info_t *dip, fc_local_port_t *port) 13456 { 13457 int len; 13458 int count; 13459 int data_len; 13460 major_t ulp_major; 13461 caddr_t ulp_name; 13462 caddr_t data_ptr; 13463 caddr_t data_buf; 13464 13465 ASSERT(!MUTEX_HELD(&port->fp_mutex)); 13466 13467 data_buf = NULL; 13468 if (ddi_getlongprop(DDI_DEV_T_ANY, dip, 13469 DDI_PROP_DONTPASS, "load-ulp-list", 13470 (caddr_t)&data_buf, &data_len) != DDI_PROP_SUCCESS) { 13471 return; 13472 } 13473 13474 len = strlen(data_buf); 13475 port->fp_ulp_nload = fctl_atoi(data_buf, 10); 13476 13477 data_ptr = data_buf + len + 1; 13478 for (count = 0; count < port->fp_ulp_nload; count++) { 13479 len = strlen(data_ptr) + 1; 13480 ulp_name = kmem_zalloc(len, KM_SLEEP); 13481 bcopy(data_ptr, ulp_name, len); 13482 13483 ulp_major = ddi_name_to_major(ulp_name); 13484 13485 if (ulp_major != (major_t)-1) { 13486 if (modload("drv", ulp_name) < 0) { 13487 fp_printf(port, CE_NOTE, FP_LOG_ONLY, 13488 0, NULL, "failed to load %s", 13489 ulp_name); 13490 } 13491 } else { 13492 fp_printf(port, CE_NOTE, FP_LOG_ONLY, 0, NULL, 13493 "%s isn't a valid driver", ulp_name); 13494 } 13495 13496 kmem_free(ulp_name, len); 13497 data_ptr += len; /* Skip to next field */ 13498 } 13499 13500 /* 13501 * Free the memory allocated by DDI 13502 */ 13503 if (data_buf != NULL) { 13504 kmem_free(data_buf, data_len); 13505 } 13506 } 13507 13508 13509 /* 13510 * Perform LOGO operation 13511 */ 13512 static int 13513 fp_logout(fc_local_port_t *port, fc_remote_port_t *pd, job_request_t *job) 13514 { 13515 int rval; 13516 fp_cmd_t *cmd; 13517 13518 ASSERT(!MUTEX_HELD(&port->fp_mutex)); 13519 ASSERT(!MUTEX_HELD(&pd->pd_mutex)); 13520 13521 cmd = fp_alloc_pkt(port, sizeof (la_els_logo_t), 13522 FP_PORT_IDENTIFIER_LEN, KM_SLEEP, pd); 13523 13524 mutex_enter(&port->fp_mutex); 13525 mutex_enter(&pd->pd_mutex); 13526 13527 ASSERT(pd->pd_state == PORT_DEVICE_LOGGED_IN); 13528 ASSERT(pd->pd_login_count == 1); 13529 13530 cmd->cmd_pkt.pkt_tran_flags = FC_TRAN_INTR | pd->pd_login_class; 13531 cmd->cmd_pkt.pkt_tran_type = FC_PKT_EXCHANGE; 13532 cmd->cmd_flags = 0; 13533 cmd->cmd_retry_count = 1; 13534 cmd->cmd_ulp_pkt = NULL; 13535 13536 fp_logo_init(pd, cmd, job); 13537 13538 mutex_exit(&pd->pd_mutex); 13539 mutex_exit(&port->fp_mutex); 13540 13541 rval = fp_sendcmd(port, cmd, port->fp_fca_handle); 13542 if (rval != FC_SUCCESS) { 13543 fp_iodone(cmd); 13544 } 13545 13546 return (rval); 13547 } 13548 13549 13550 /* 13551 * Perform Port attach callbacks to registered ULPs 13552 */ 13553 static void 13554 fp_attach_ulps(fc_local_port_t *port, fc_attach_cmd_t cmd) 13555 { 13556 fp_soft_attach_t *att; 13557 13558 att = kmem_zalloc(sizeof (*att), KM_SLEEP); 13559 att->att_cmd = cmd; 13560 att->att_port = port; 13561 13562 /* 13563 * We need to remember whether or not fctl_busy_port 13564 * succeeded so we know whether or not to call 13565 * fctl_idle_port when the task is complete. 13566 */ 13567 13568 if (fctl_busy_port(port) == 0) { 13569 att->att_need_pm_idle = B_TRUE; 13570 } else { 13571 att->att_need_pm_idle = B_FALSE; 13572 } 13573 13574 (void) taskq_dispatch(port->fp_taskq, fp_ulp_port_attach, 13575 att, KM_SLEEP); 13576 } 13577 13578 13579 /* 13580 * Forward state change notifications on to interested ULPs. 13581 * Spawns a call to fctl_ulp_statec_cb() in a taskq thread to do all the 13582 * real work. 13583 */ 13584 static int 13585 fp_ulp_notify(fc_local_port_t *port, uint32_t statec, int sleep) 13586 { 13587 fc_port_clist_t *clist; 13588 13589 clist = kmem_zalloc(sizeof (*clist), sleep); 13590 if (clist == NULL) { 13591 return (FC_NOMEM); 13592 } 13593 13594 clist->clist_state = statec; 13595 13596 mutex_enter(&port->fp_mutex); 13597 clist->clist_flags = port->fp_topology; 13598 mutex_exit(&port->fp_mutex); 13599 13600 clist->clist_port = (opaque_t)port; 13601 clist->clist_len = 0; 13602 clist->clist_size = 0; 13603 clist->clist_map = NULL; 13604 13605 (void) taskq_dispatch(port->fp_taskq, fctl_ulp_statec_cb, 13606 clist, KM_SLEEP); 13607 13608 return (FC_SUCCESS); 13609 } 13610 13611 13612 /* 13613 * Get name server map 13614 */ 13615 static int 13616 fp_ns_getmap(fc_local_port_t *port, job_request_t *job, fc_portmap_t **map, 13617 uint32_t *len, uint32_t sid) 13618 { 13619 int ret; 13620 fctl_ns_req_t *ns_cmd; 13621 13622 /* 13623 * Don't let the allocator do anything for response; 13624 * we have have buffer ready to fillout. 13625 */ 13626 ns_cmd = fctl_alloc_ns_cmd(sizeof (ns_req_gan_t), 13627 sizeof (ns_resp_gan_t), 0, (FCTL_NS_FILL_NS_MAP | 13628 FCTL_NS_BUF_IS_FC_PORTMAP), KM_SLEEP); 13629 13630 ns_cmd->ns_data_len = sizeof (**map) * (*len); 13631 ns_cmd->ns_data_buf = (caddr_t)*map; 13632 13633 ASSERT(ns_cmd != NULL); 13634 13635 ns_cmd->ns_gan_index = 0; 13636 ns_cmd->ns_gan_sid = sid; 13637 ns_cmd->ns_cmd_code = NS_GA_NXT; 13638 ns_cmd->ns_gan_max = *len; 13639 13640 ret = fp_ns_query(port, ns_cmd, job, 1, KM_SLEEP); 13641 13642 if (ns_cmd->ns_gan_index != *len) { 13643 *len = ns_cmd->ns_gan_index; 13644 } 13645 ns_cmd->ns_data_len = 0; 13646 ns_cmd->ns_data_buf = NULL; 13647 fctl_free_ns_cmd(ns_cmd); 13648 13649 return (ret); 13650 } 13651 13652 13653 /* 13654 * Create a remote port in Fabric topology by using NS services 13655 */ 13656 static fc_remote_port_t * 13657 fp_create_remote_port_by_ns(fc_local_port_t *port, uint32_t d_id, int sleep) 13658 { 13659 int rval; 13660 job_request_t *job; 13661 fctl_ns_req_t *ns_cmd; 13662 fc_remote_port_t *pd; 13663 13664 ASSERT(!MUTEX_HELD(&port->fp_mutex)); 13665 13666 FP_TRACE(FP_NHEAD1(1, 0), "PD creation begin; port=%p, d_id=%x", 13667 port, d_id); 13668 13669 #ifdef DEBUG 13670 mutex_enter(&port->fp_mutex); 13671 ASSERT(FC_IS_TOP_SWITCH(port->fp_topology)); 13672 mutex_exit(&port->fp_mutex); 13673 #endif 13674 13675 job = fctl_alloc_job(JOB_NS_CMD, 0, NULL, (opaque_t)port, sleep); 13676 if (job == NULL) { 13677 return (NULL); 13678 } 13679 13680 ns_cmd = fctl_alloc_ns_cmd(sizeof (ns_req_gan_t), 13681 sizeof (ns_resp_gan_t), 0, (FCTL_NS_CREATE_DEVICE | 13682 FCTL_NS_NO_DATA_BUF), sleep); 13683 if (ns_cmd == NULL) { 13684 return (NULL); 13685 } 13686 13687 job->job_result = FC_SUCCESS; 13688 ns_cmd->ns_gan_max = 1; 13689 ns_cmd->ns_cmd_code = NS_GA_NXT; 13690 ns_cmd->ns_gan_sid = FCTL_GAN_START_ID; 13691 ((ns_req_gan_t *)(ns_cmd->ns_cmd_buf))->pid.port_id = d_id - 1; 13692 ((ns_req_gan_t *)(ns_cmd->ns_cmd_buf))->pid.priv_lilp_posit = 0; 13693 13694 ASSERT((job->job_flags & JOB_TYPE_FP_ASYNC) == 0); 13695 rval = fp_ns_query(port, ns_cmd, job, 1, KM_SLEEP); 13696 fctl_free_ns_cmd(ns_cmd); 13697 13698 if (rval != FC_SUCCESS || job->job_result != FC_SUCCESS) { 13699 fctl_dealloc_job(job); 13700 return (NULL); 13701 } 13702 fctl_dealloc_job(job); 13703 13704 pd = fctl_get_remote_port_by_did(port, d_id); 13705 13706 FP_TRACE(FP_NHEAD1(1, 0), "PD creation end; port=%p, d_id=%x, pd=%p", 13707 port, d_id, pd); 13708 13709 return (pd); 13710 } 13711 13712 13713 /* 13714 * Check for the permissions on an ioctl command. If it is required to have an 13715 * EXCLUSIVE open performed, return a FAILURE to just shut the door on it. If 13716 * the ioctl command isn't in one of the list built, shut the door on that too. 13717 * 13718 * Certain ioctls perform hardware accesses in FCA drivers, and it needs 13719 * to be made sure that users open the port for an exclusive access while 13720 * performing those operations. 13721 * 13722 * This can prevent a casual user from inflicting damage on the port by 13723 * sending these ioctls from multiple processes/threads (there is no good 13724 * reason why one would need to do that) without actually realizing how 13725 * expensive such commands could turn out to be. 13726 * 13727 * It is also important to note that, even with an exclusive access, 13728 * multiple threads can share the same file descriptor and fire down 13729 * commands in parallel. To prevent that the driver needs to make sure 13730 * that such commands aren't in progress already. This is taken care of 13731 * in the FP_EXCL_BUSY bit of fp_flag. 13732 */ 13733 static int 13734 fp_check_perms(uchar_t open_flag, uint16_t ioctl_cmd) 13735 { 13736 int ret = FC_FAILURE; 13737 int count; 13738 13739 for (count = 0; 13740 count < sizeof (fp_perm_list) / sizeof (fp_perm_list[0]); 13741 count++) { 13742 if (fp_perm_list[count].fp_ioctl_cmd == ioctl_cmd) { 13743 if (fp_perm_list[count].fp_open_flag & open_flag) { 13744 ret = FC_SUCCESS; 13745 } 13746 break; 13747 } 13748 } 13749 13750 return (ret); 13751 } 13752 13753 13754 /* 13755 * Bind Port driver's unsolicited, state change callbacks 13756 */ 13757 static int 13758 fp_bind_callbacks(fc_local_port_t *port) 13759 { 13760 fc_fca_bind_info_t bind_info = {0}; 13761 fc_fca_port_info_t *port_info; 13762 int rval = DDI_SUCCESS; 13763 uint16_t class; 13764 int node_namelen, port_namelen; 13765 char *nname = NULL, *pname = NULL; 13766 13767 ASSERT(!MUTEX_HELD(&port->fp_mutex)); 13768 13769 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, port->fp_port_dip, 13770 DDI_PROP_NOTPROM | DDI_PROP_DONTPASS, 13771 "node-name", &nname) != DDI_PROP_SUCCESS) { 13772 FP_TRACE(FP_NHEAD1(1, 0), 13773 "fp_bind_callback fail to get node-name"); 13774 } 13775 if (nname) { 13776 fc_str_to_wwn(nname, &(bind_info.port_nwwn)); 13777 } 13778 13779 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, port->fp_port_dip, 13780 DDI_PROP_NOTPROM | DDI_PROP_DONTPASS, 13781 "port-name", &pname) != DDI_PROP_SUCCESS) { 13782 FP_TRACE(FP_NHEAD1(1, 0), 13783 "fp_bind_callback fail to get port-name"); 13784 } 13785 if (pname) { 13786 fc_str_to_wwn(pname, &(bind_info.port_pwwn)); 13787 } 13788 13789 if (port->fp_npiv_type == FC_NPIV_PORT) { 13790 bind_info.port_npiv = 1; 13791 } 13792 13793 /* 13794 * fca_bind_port returns the FCA driver's handle for the local 13795 * port instance. If the port number isn't supported it returns NULL. 13796 * It also sets up callback in the FCA for various 13797 * things like state change, ELS etc.. 13798 */ 13799 bind_info.port_statec_cb = fp_statec_cb; 13800 bind_info.port_unsol_cb = fp_unsol_cb; 13801 bind_info.port_num = port->fp_port_num; 13802 bind_info.port_handle = (opaque_t)port; 13803 13804 port_info = kmem_zalloc(sizeof (*port_info), KM_SLEEP); 13805 13806 /* 13807 * Hold the port driver mutex as the callbacks are bound until the 13808 * service parameters are properly filled in (in order to be able to 13809 * properly respond to unsolicited ELS requests) 13810 */ 13811 mutex_enter(&port->fp_mutex); 13812 13813 port->fp_fca_handle = port->fp_fca_tran->fca_bind_port( 13814 port->fp_fca_dip, port_info, &bind_info); 13815 13816 if (port->fp_fca_handle == NULL) { 13817 rval = DDI_FAILURE; 13818 goto exit; 13819 } 13820 13821 /* 13822 * Only fcoei will set this bit 13823 */ 13824 if (port_info->pi_port_state & FC_STATE_FCA_IS_NODMA) { 13825 port->fp_soft_state |= FP_SOFT_FCA_IS_NODMA; 13826 port_info->pi_port_state &= ~(FC_STATE_FCA_IS_NODMA); 13827 } 13828 13829 port->fp_bind_state = port->fp_state = port_info->pi_port_state; 13830 port->fp_service_params = port_info->pi_login_params; 13831 port->fp_hard_addr = port_info->pi_hard_addr; 13832 13833 /* Copy from the FCA structure to the FP structure */ 13834 port->fp_hba_port_attrs = port_info->pi_attrs; 13835 13836 if (port_info->pi_rnid_params.status == FC_SUCCESS) { 13837 port->fp_rnid_init = 1; 13838 bcopy(&port_info->pi_rnid_params.params, 13839 &port->fp_rnid_params, 13840 sizeof (port->fp_rnid_params)); 13841 } else { 13842 port->fp_rnid_init = 0; 13843 } 13844 13845 node_namelen = strlen((char *)&port_info->pi_attrs.sym_node_name); 13846 if (node_namelen) { 13847 bcopy(&port_info->pi_attrs.sym_node_name, 13848 &port->fp_sym_node_name, 13849 node_namelen); 13850 port->fp_sym_node_namelen = node_namelen; 13851 } 13852 port_namelen = strlen((char *)&port_info->pi_attrs.sym_port_name); 13853 if (port_namelen) { 13854 bcopy(&port_info->pi_attrs.sym_port_name, 13855 &port->fp_sym_port_name, 13856 port_namelen); 13857 port->fp_sym_port_namelen = port_namelen; 13858 } 13859 13860 /* zero out the normally unused fields right away */ 13861 port->fp_service_params.ls_code.mbz = 0; 13862 port->fp_service_params.ls_code.ls_code = 0; 13863 bzero(&port->fp_service_params.reserved, 13864 sizeof (port->fp_service_params.reserved)); 13865 13866 class = port_info->pi_login_params.class_1.class_opt; 13867 port->fp_cos |= (class & 0x8000) ? FC_NS_CLASS1 : 0; 13868 13869 class = port_info->pi_login_params.class_2.class_opt; 13870 port->fp_cos |= (class & 0x8000) ? FC_NS_CLASS2 : 0; 13871 13872 class = port_info->pi_login_params.class_3.class_opt; 13873 port->fp_cos |= (class & 0x8000) ? FC_NS_CLASS3 : 0; 13874 13875 exit: 13876 if (nname) { 13877 ddi_prop_free(nname); 13878 } 13879 if (pname) { 13880 ddi_prop_free(pname); 13881 } 13882 mutex_exit(&port->fp_mutex); 13883 kmem_free(port_info, sizeof (*port_info)); 13884 13885 return (rval); 13886 } 13887 13888 13889 /* 13890 * Retrieve FCA capabilities 13891 */ 13892 static void 13893 fp_retrieve_caps(fc_local_port_t *port) 13894 { 13895 int rval; 13896 int ub_count; 13897 fc_fcp_dma_t fcp_dma; 13898 fc_reset_action_t action; 13899 fc_dma_behavior_t dma_behavior; 13900 13901 ASSERT(!MUTEX_HELD(&port->fp_mutex)); 13902 13903 rval = port->fp_fca_tran->fca_get_cap(port->fp_fca_handle, 13904 FC_CAP_UNSOL_BUF, &ub_count); 13905 13906 switch (rval) { 13907 case FC_CAP_FOUND: 13908 case FC_CAP_SETTABLE: 13909 switch (ub_count) { 13910 case 0: 13911 break; 13912 13913 case -1: 13914 ub_count = fp_unsol_buf_count; 13915 break; 13916 13917 default: 13918 /* 1/4th of total buffers is my share */ 13919 ub_count = 13920 (ub_count / port->fp_fca_tran->fca_numports) >> 2; 13921 break; 13922 } 13923 break; 13924 13925 default: 13926 ub_count = 0; 13927 break; 13928 } 13929 13930 mutex_enter(&port->fp_mutex); 13931 port->fp_ub_count = ub_count; 13932 mutex_exit(&port->fp_mutex); 13933 13934 rval = port->fp_fca_tran->fca_get_cap(port->fp_fca_handle, 13935 FC_CAP_POST_RESET_BEHAVIOR, &action); 13936 13937 switch (rval) { 13938 case FC_CAP_FOUND: 13939 case FC_CAP_SETTABLE: 13940 switch (action) { 13941 case FC_RESET_RETURN_NONE: 13942 case FC_RESET_RETURN_ALL: 13943 case FC_RESET_RETURN_OUTSTANDING: 13944 break; 13945 13946 default: 13947 action = FC_RESET_RETURN_NONE; 13948 break; 13949 } 13950 break; 13951 13952 default: 13953 action = FC_RESET_RETURN_NONE; 13954 break; 13955 } 13956 mutex_enter(&port->fp_mutex); 13957 port->fp_reset_action = action; 13958 mutex_exit(&port->fp_mutex); 13959 13960 rval = port->fp_fca_tran->fca_get_cap(port->fp_fca_handle, 13961 FC_CAP_NOSTREAM_ON_UNALIGN_BUF, &dma_behavior); 13962 13963 switch (rval) { 13964 case FC_CAP_FOUND: 13965 switch (dma_behavior) { 13966 case FC_ALLOW_STREAMING: 13967 /* FALLTHROUGH */ 13968 case FC_NO_STREAMING: 13969 break; 13970 13971 default: 13972 /* 13973 * If capability was found and the value 13974 * was incorrect assume the worst 13975 */ 13976 dma_behavior = FC_NO_STREAMING; 13977 break; 13978 } 13979 break; 13980 13981 default: 13982 /* 13983 * If capability was not defined - allow streaming; existing 13984 * FCAs should not be affected. 13985 */ 13986 dma_behavior = FC_ALLOW_STREAMING; 13987 break; 13988 } 13989 mutex_enter(&port->fp_mutex); 13990 port->fp_dma_behavior = dma_behavior; 13991 mutex_exit(&port->fp_mutex); 13992 13993 rval = port->fp_fca_tran->fca_get_cap(port->fp_fca_handle, 13994 FC_CAP_FCP_DMA, &fcp_dma); 13995 13996 if (rval != FC_CAP_FOUND || (fcp_dma != FC_NO_DVMA_SPACE && 13997 fcp_dma != FC_DVMA_SPACE)) { 13998 fcp_dma = FC_DVMA_SPACE; 13999 } 14000 14001 mutex_enter(&port->fp_mutex); 14002 port->fp_fcp_dma = fcp_dma; 14003 mutex_exit(&port->fp_mutex); 14004 } 14005 14006 14007 /* 14008 * Handle Domain, Area changes in the Fabric. 14009 */ 14010 static void 14011 fp_validate_area_domain(fc_local_port_t *port, uint32_t id, uint32_t mask, 14012 job_request_t *job, int sleep) 14013 { 14014 #ifdef DEBUG 14015 uint32_t dcnt; 14016 #endif 14017 int rval; 14018 int send; 14019 int index; 14020 int listindex; 14021 int login; 14022 int job_flags; 14023 char ww_name[17]; 14024 uint32_t d_id; 14025 uint32_t count; 14026 fctl_ns_req_t *ns_cmd; 14027 fc_portmap_t *list; 14028 fc_orphan_t *orp; 14029 fc_orphan_t *norp; 14030 fc_orphan_t *prev; 14031 fc_remote_port_t *pd; 14032 fc_remote_port_t *npd; 14033 struct pwwn_hash *head; 14034 14035 ns_cmd = fctl_alloc_ns_cmd(sizeof (ns_req_gid_pn_t), 14036 sizeof (ns_resp_gid_pn_t), sizeof (ns_resp_gid_pn_t), 14037 0, sleep); 14038 if (ns_cmd == NULL) { 14039 mutex_enter(&port->fp_mutex); 14040 if (--port->fp_rscn_count == FC_INVALID_RSCN_COUNT) { 14041 --port->fp_rscn_count; 14042 } 14043 mutex_exit(&port->fp_mutex); 14044 14045 return; 14046 } 14047 ns_cmd->ns_cmd_code = NS_GID_PN; 14048 14049 /* 14050 * We need to get a new count of devices from the 14051 * name server, which will also create any new devices 14052 * as needed. 14053 */ 14054 14055 (void) fp_ns_get_devcount(port, job, 1, sleep); 14056 14057 FP_TRACE(FP_NHEAD1(3, 0), 14058 "fp_validate_area_domain: get_devcount found %d devices", 14059 port->fp_total_devices); 14060 14061 mutex_enter(&port->fp_mutex); 14062 14063 for (count = index = 0; index < pwwn_table_size; index++) { 14064 head = &port->fp_pwwn_table[index]; 14065 pd = head->pwwn_head; 14066 while (pd != NULL) { 14067 mutex_enter(&pd->pd_mutex); 14068 if (pd->pd_flags != PD_ELS_IN_PROGRESS) { 14069 if ((pd->pd_port_id.port_id & mask) == id && 14070 pd->pd_recepient == PD_PLOGI_INITIATOR) { 14071 count++; 14072 pd->pd_type = PORT_DEVICE_OLD; 14073 pd->pd_flags = PD_ELS_MARK; 14074 } 14075 } 14076 mutex_exit(&pd->pd_mutex); 14077 pd = pd->pd_wwn_hnext; 14078 } 14079 } 14080 14081 #ifdef DEBUG 14082 dcnt = count; 14083 #endif /* DEBUG */ 14084 14085 /* 14086 * Since port->fp_orphan_count is declared an 'int' it is 14087 * theoretically possible that the count could go negative. 14088 * 14089 * This would be bad and if that happens we really do want 14090 * to know. 14091 */ 14092 14093 ASSERT(port->fp_orphan_count >= 0); 14094 14095 count += port->fp_orphan_count; 14096 14097 /* 14098 * We add the port->fp_total_devices value to the count 14099 * in the case where our port is newly attached. This is 14100 * because we haven't done any discovery and we don't have 14101 * any orphans in the port's orphan list. If we do not do 14102 * this addition to count then we won't alloc enough kmem 14103 * to do discovery with. 14104 */ 14105 14106 if (count == 0) { 14107 count += port->fp_total_devices; 14108 FP_TRACE(FP_NHEAD1(3, 0), "fp_validate_area_domain: " 14109 "0x%x orphans found, using 0x%x", 14110 port->fp_orphan_count, count); 14111 } 14112 14113 mutex_exit(&port->fp_mutex); 14114 14115 /* 14116 * Allocate the change list 14117 */ 14118 14119 list = kmem_zalloc(sizeof (fc_portmap_t) * count, sleep); 14120 if (list == NULL) { 14121 fp_printf(port, CE_NOTE, FP_LOG_ONLY, 0, NULL, 14122 " Not enough memory to service RSCNs" 14123 " for %d ports, continuing...", count); 14124 14125 fctl_free_ns_cmd(ns_cmd); 14126 14127 mutex_enter(&port->fp_mutex); 14128 if (--port->fp_rscn_count == FC_INVALID_RSCN_COUNT) { 14129 --port->fp_rscn_count; 14130 } 14131 mutex_exit(&port->fp_mutex); 14132 14133 return; 14134 } 14135 14136 /* 14137 * Attempt to validate or invalidate the devices that were 14138 * already in the pwwn hash table. 14139 */ 14140 14141 mutex_enter(&port->fp_mutex); 14142 for (listindex = 0, index = 0; index < pwwn_table_size; index++) { 14143 head = &port->fp_pwwn_table[index]; 14144 npd = head->pwwn_head; 14145 14146 while ((pd = npd) != NULL) { 14147 npd = pd->pd_wwn_hnext; 14148 14149 mutex_enter(&pd->pd_mutex); 14150 if ((pd->pd_port_id.port_id & mask) == id && 14151 pd->pd_flags == PD_ELS_MARK) { 14152 la_wwn_t *pwwn; 14153 14154 job->job_result = FC_SUCCESS; 14155 14156 ((ns_req_gid_pn_t *) 14157 (ns_cmd->ns_cmd_buf))->pwwn = 14158 pd->pd_port_name; 14159 14160 pwwn = &pd->pd_port_name; 14161 d_id = pd->pd_port_id.port_id; 14162 14163 mutex_exit(&pd->pd_mutex); 14164 mutex_exit(&port->fp_mutex); 14165 14166 rval = fp_ns_query(port, ns_cmd, job, 1, 14167 sleep); 14168 if (rval != FC_SUCCESS) { 14169 fc_wwn_to_str(pwwn, ww_name); 14170 14171 FP_TRACE(FP_NHEAD1(3, 0), 14172 "AREA RSCN: PD disappeared; " 14173 "d_id=%x, PWWN=%s", d_id, ww_name); 14174 14175 FP_TRACE(FP_NHEAD2(9, 0), 14176 "N_x Port with D_ID=%x," 14177 " PWWN=%s disappeared from fabric", 14178 d_id, ww_name); 14179 14180 fp_fillout_old_map(list + listindex++, 14181 pd, 1); 14182 } else { 14183 fctl_copy_portmap(list + listindex++, 14184 pd); 14185 14186 mutex_enter(&pd->pd_mutex); 14187 pd->pd_flags = PD_ELS_IN_PROGRESS; 14188 mutex_exit(&pd->pd_mutex); 14189 } 14190 14191 mutex_enter(&port->fp_mutex); 14192 } else { 14193 mutex_exit(&pd->pd_mutex); 14194 } 14195 } 14196 } 14197 14198 mutex_exit(&port->fp_mutex); 14199 14200 ASSERT(listindex == dcnt); 14201 14202 job->job_counter = listindex; 14203 job_flags = job->job_flags; 14204 job->job_flags |= JOB_TYPE_FP_ASYNC; 14205 14206 /* 14207 * Login (if we were the initiator) or validate devices in the 14208 * port map. 14209 */ 14210 14211 for (index = 0; index < listindex; index++) { 14212 pd = list[index].map_pd; 14213 14214 mutex_enter(&pd->pd_mutex); 14215 ASSERT((pd->pd_port_id.port_id & mask) == id); 14216 14217 if (pd->pd_flags != PD_ELS_IN_PROGRESS) { 14218 ASSERT(pd->pd_type == PORT_DEVICE_OLD); 14219 mutex_exit(&pd->pd_mutex); 14220 fp_jobdone(job); 14221 continue; 14222 } 14223 14224 login = (pd->pd_state == PORT_DEVICE_LOGGED_IN) ? 1 : 0; 14225 send = (pd->pd_recepient == PD_PLOGI_INITIATOR) ? 1 : 0; 14226 d_id = pd->pd_port_id.port_id; 14227 mutex_exit(&pd->pd_mutex); 14228 14229 if ((d_id & mask) == id && send) { 14230 if (login) { 14231 FP_TRACE(FP_NHEAD1(6, 0), 14232 "RSCN and PLOGI request;" 14233 " pd=%p, job=%p d_id=%x, index=%d", pd, 14234 job, d_id, index); 14235 14236 rval = fp_port_login(port, d_id, job, 14237 FP_CMD_PLOGI_RETAIN, sleep, pd, NULL); 14238 if (rval != FC_SUCCESS) { 14239 mutex_enter(&pd->pd_mutex); 14240 pd->pd_flags = PD_IDLE; 14241 mutex_exit(&pd->pd_mutex); 14242 14243 job->job_result = rval; 14244 fp_jobdone(job); 14245 } 14246 FP_TRACE(FP_NHEAD1(1, 0), 14247 "PLOGI succeeded:no skip(1) for " 14248 "D_ID %x", d_id); 14249 list[index].map_flags |= 14250 PORT_DEVICE_NO_SKIP_DEVICE_DISCOVERY; 14251 } else { 14252 FP_TRACE(FP_NHEAD1(6, 0), "RSCN and NS request;" 14253 " pd=%p, job=%p d_id=%x, index=%d", pd, 14254 job, d_id, index); 14255 14256 rval = fp_ns_validate_device(port, pd, job, 14257 0, sleep); 14258 if (rval != FC_SUCCESS) { 14259 fp_jobdone(job); 14260 } 14261 mutex_enter(&pd->pd_mutex); 14262 pd->pd_flags = PD_IDLE; 14263 mutex_exit(&pd->pd_mutex); 14264 } 14265 } else { 14266 FP_TRACE(FP_NHEAD1(6, 0), 14267 "RSCN and NO request sent; pd=%p," 14268 " d_id=%x, index=%d", pd, d_id, index); 14269 14270 mutex_enter(&pd->pd_mutex); 14271 pd->pd_flags = PD_IDLE; 14272 mutex_exit(&pd->pd_mutex); 14273 14274 fp_jobdone(job); 14275 } 14276 } 14277 14278 if (listindex) { 14279 fctl_jobwait(job); 14280 } 14281 job->job_flags = job_flags; 14282 14283 /* 14284 * Orphan list validation. 14285 */ 14286 mutex_enter(&port->fp_mutex); 14287 for (prev = NULL, orp = port->fp_orphan_list; port->fp_orphan_count && 14288 orp != NULL; orp = norp) { 14289 norp = orp->orp_next; 14290 mutex_exit(&port->fp_mutex); 14291 14292 job->job_counter = 1; 14293 job->job_result = FC_SUCCESS; 14294 ASSERT((job->job_flags & JOB_TYPE_FP_ASYNC) == 0); 14295 14296 ((ns_req_gid_pn_t *)ns_cmd->ns_cmd_buf)->pwwn = orp->orp_pwwn; 14297 14298 ((ns_resp_gid_pn_t *)ns_cmd->ns_data_buf)->pid.port_id = 0; 14299 ((ns_resp_gid_pn_t *) 14300 ns_cmd->ns_data_buf)->pid.priv_lilp_posit = 0; 14301 14302 rval = fp_ns_query(port, ns_cmd, job, 1, KM_SLEEP); 14303 if (rval == FC_SUCCESS) { 14304 d_id = BE_32(*((uint32_t *)ns_cmd->ns_data_buf)); 14305 pd = fp_create_remote_port_by_ns(port, d_id, KM_SLEEP); 14306 if (pd != NULL) { 14307 fc_wwn_to_str(&orp->orp_pwwn, ww_name); 14308 14309 FP_TRACE(FP_NHEAD1(6, 0), 14310 "RSCN and ORPHAN list " 14311 "success; d_id=%x, PWWN=%s", d_id, ww_name); 14312 14313 FP_TRACE(FP_NHEAD2(6, 0), 14314 "N_x Port with D_ID=%x, PWWN=%s reappeared" 14315 " in fabric", d_id, ww_name); 14316 14317 mutex_enter(&port->fp_mutex); 14318 if (prev) { 14319 prev->orp_next = orp->orp_next; 14320 } else { 14321 ASSERT(orp == port->fp_orphan_list); 14322 port->fp_orphan_list = orp->orp_next; 14323 } 14324 port->fp_orphan_count--; 14325 mutex_exit(&port->fp_mutex); 14326 14327 kmem_free(orp, sizeof (*orp)); 14328 fctl_copy_portmap(list + listindex++, pd); 14329 } else { 14330 prev = orp; 14331 } 14332 } else { 14333 prev = orp; 14334 } 14335 mutex_enter(&port->fp_mutex); 14336 } 14337 mutex_exit(&port->fp_mutex); 14338 14339 /* 14340 * One more pass through the list to delist old devices from 14341 * the d_id and pwwn tables and possibly add to the orphan list. 14342 */ 14343 14344 for (index = 0; index < listindex; index++) { 14345 pd = list[index].map_pd; 14346 ASSERT(pd != NULL); 14347 14348 /* 14349 * Update PLOGI results; For NS validation 14350 * of orphan list, it is redundant 14351 * 14352 * Take care to preserve PORT_DEVICE_NO_SKIP_DEVICE_DISCOVERY if 14353 * appropriate as fctl_copy_portmap() will clear map_flags. 14354 */ 14355 if (list[index].map_flags & 14356 PORT_DEVICE_NO_SKIP_DEVICE_DISCOVERY) { 14357 fctl_copy_portmap(list + index, pd); 14358 list[index].map_flags |= 14359 PORT_DEVICE_NO_SKIP_DEVICE_DISCOVERY; 14360 } else { 14361 fctl_copy_portmap(list + index, pd); 14362 } 14363 14364 FP_TRACE(FP_NHEAD1(6, 0), "RSCN with Area DOMAIN " 14365 "results; pd=%p, d_id=%x pwwn=%x %x %x %x %x %x %x %x", 14366 pd, pd->pd_port_id.port_id, 14367 pd->pd_port_name.raw_wwn[0], 14368 pd->pd_port_name.raw_wwn[1], 14369 pd->pd_port_name.raw_wwn[2], 14370 pd->pd_port_name.raw_wwn[3], 14371 pd->pd_port_name.raw_wwn[4], 14372 pd->pd_port_name.raw_wwn[5], 14373 pd->pd_port_name.raw_wwn[6], 14374 pd->pd_port_name.raw_wwn[7]); 14375 14376 FP_TRACE(FP_NHEAD1(6, 0), "RSCN with Area DOMAIN " 14377 "results continued, pd=%p type=%x, flags=%x, state=%x", 14378 pd, pd->pd_type, pd->pd_flags, pd->pd_state); 14379 14380 mutex_enter(&pd->pd_mutex); 14381 if (pd->pd_type == PORT_DEVICE_OLD) { 14382 int initiator; 14383 14384 pd->pd_flags = PD_IDLE; 14385 initiator = (pd->pd_recepient == 14386 PD_PLOGI_INITIATOR) ? 1 : 0; 14387 14388 mutex_exit(&pd->pd_mutex); 14389 14390 mutex_enter(&port->fp_mutex); 14391 mutex_enter(&pd->pd_mutex); 14392 14393 pd->pd_state = PORT_DEVICE_INVALID; 14394 fctl_delist_did_table(port, pd); 14395 fctl_delist_pwwn_table(port, pd); 14396 14397 mutex_exit(&pd->pd_mutex); 14398 mutex_exit(&port->fp_mutex); 14399 14400 if (initiator) { 14401 (void) fctl_add_orphan(port, pd, sleep); 14402 } 14403 list[index].map_pd = pd; 14404 } else { 14405 ASSERT(pd->pd_flags == PD_IDLE); 14406 if (pd->pd_state == PORT_DEVICE_LOGGED_IN) { 14407 /* 14408 * Reset LOGO tolerance to zero 14409 */ 14410 fctl_tc_reset(&pd->pd_logo_tc); 14411 } 14412 mutex_exit(&pd->pd_mutex); 14413 } 14414 } 14415 14416 if (ns_cmd) { 14417 fctl_free_ns_cmd(ns_cmd); 14418 } 14419 if (listindex) { 14420 (void) fp_ulp_devc_cb(port, list, listindex, count, 14421 sleep, 0); 14422 } else { 14423 kmem_free(list, sizeof (*list) * count); 14424 14425 mutex_enter(&port->fp_mutex); 14426 if (--port->fp_rscn_count == FC_INVALID_RSCN_COUNT) { 14427 --port->fp_rscn_count; 14428 } 14429 mutex_exit(&port->fp_mutex); 14430 } 14431 } 14432 14433 14434 /* 14435 * Work hard to make sense out of an RSCN page. 14436 */ 14437 static void 14438 fp_validate_rscn_page(fc_local_port_t *port, fc_affected_id_t *page, 14439 job_request_t *job, fctl_ns_req_t *ns_cmd, fc_portmap_t *listptr, 14440 int *listindex, int sleep) 14441 { 14442 int rval; 14443 char ww_name[17]; 14444 la_wwn_t *pwwn; 14445 fc_remote_port_t *pwwn_pd; 14446 fc_remote_port_t *did_pd; 14447 14448 did_pd = fctl_get_remote_port_by_did(port, page->aff_d_id); 14449 14450 FP_TRACE(FP_NHEAD1(6, 0), "RSCN with D_ID page; " 14451 "port=%p, d_id=%x, pd=%p, rscn_count:0x%x", port, page->aff_d_id, 14452 did_pd, (uint32_t)(uintptr_t)job->job_cb_arg); 14453 14454 if (did_pd != NULL) { 14455 mutex_enter(&did_pd->pd_mutex); 14456 if (did_pd->pd_flags != PD_IDLE) { 14457 mutex_exit(&did_pd->pd_mutex); 14458 FP_TRACE(FP_NHEAD1(6, 0), "RSCN with D_ID page: " 14459 "PD is BUSY; port=%p, d_id=%x, pd=%p", 14460 port, page->aff_d_id, did_pd); 14461 return; 14462 } 14463 did_pd->pd_flags = PD_ELS_IN_PROGRESS; 14464 mutex_exit(&did_pd->pd_mutex); 14465 } 14466 14467 job->job_counter = 1; 14468 14469 pwwn = &((ns_resp_gpn_id_t *)ns_cmd->ns_data_buf)->pwwn; 14470 14471 ((ns_req_gpn_id_t *)ns_cmd->ns_cmd_buf)->pid.port_id = page->aff_d_id; 14472 ((ns_req_gpn_id_t *)ns_cmd->ns_cmd_buf)->pid.priv_lilp_posit = 0; 14473 14474 bzero(ns_cmd->ns_data_buf, sizeof (la_wwn_t)); 14475 rval = fp_ns_query(port, ns_cmd, job, 1, sleep); 14476 14477 FP_TRACE(FP_NHEAD1(1, 0), "NS Query Response for D_ID page; rev=%x," 14478 " in_id=%x, cmdrsp=%x, reason=%x, expln=%x", 14479 ns_cmd->ns_resp_hdr.ct_rev, ns_cmd->ns_resp_hdr.ct_inid, 14480 ns_cmd->ns_resp_hdr.ct_cmdrsp, ns_cmd->ns_resp_hdr.ct_reason, 14481 ns_cmd->ns_resp_hdr.ct_expln); 14482 14483 job->job_counter = 1; 14484 14485 if (rval != FC_SUCCESS || fctl_is_wwn_zero(pwwn) == FC_SUCCESS) { 14486 /* 14487 * What this means is that the D_ID 14488 * disappeared from the Fabric. 14489 */ 14490 if (did_pd == NULL) { 14491 FP_TRACE(FP_NHEAD1(1, 0), "RSCN with D_ID page;" 14492 " NULL PD disappeared, rval=%x", rval); 14493 return; 14494 } 14495 14496 fc_wwn_to_str(&did_pd->pd_port_name, ww_name); 14497 14498 (listptr + *listindex)->map_rscn_info.ulp_rscn_count = 14499 (uint32_t)(uintptr_t)job->job_cb_arg; 14500 14501 fp_fillout_old_map(listptr + (*listindex)++, did_pd, 0); 14502 14503 FP_TRACE(FP_NHEAD1(3, 0), "RSCN: PD disappeared; " 14504 "d_id=%x, PWWN=%s", page->aff_d_id, ww_name); 14505 14506 FP_TRACE(FP_NHEAD2(9, 0), 14507 "GPN_ID for D_ID=%x failed", page->aff_d_id); 14508 14509 FP_TRACE(FP_NHEAD2(9, 0), 14510 "N_x Port with D_ID=%x, PWWN=%s disappeared from" 14511 " fabric", page->aff_d_id, ww_name); 14512 14513 mutex_enter(&did_pd->pd_mutex); 14514 did_pd->pd_flags = PD_IDLE; 14515 mutex_exit(&did_pd->pd_mutex); 14516 14517 FP_TRACE(FP_NHEAD1(3, 0), "RSCN with D_ID (%x) page; " 14518 "PD disappeared, pd=%p", page->aff_d_id, did_pd); 14519 14520 return; 14521 } 14522 14523 pwwn_pd = fctl_get_remote_port_by_pwwn(port, pwwn); 14524 14525 if (did_pd != NULL && pwwn_pd != NULL && did_pd == pwwn_pd) { 14526 /* 14527 * There is no change. Do PLOGI again and add it to 14528 * ULP portmap baggage and return. Note: When RSCNs 14529 * arrive with per page states, the need for PLOGI 14530 * can be determined correctly. 14531 */ 14532 mutex_enter(&pwwn_pd->pd_mutex); 14533 pwwn_pd->pd_type = PORT_DEVICE_NOCHANGE; 14534 mutex_exit(&pwwn_pd->pd_mutex); 14535 14536 (listptr + *listindex)->map_rscn_info.ulp_rscn_count = 14537 (uint32_t)(uintptr_t)job->job_cb_arg; 14538 14539 fctl_copy_portmap(listptr + (*listindex)++, pwwn_pd); 14540 14541 mutex_enter(&pwwn_pd->pd_mutex); 14542 if ((pwwn_pd->pd_state == PORT_DEVICE_LOGGED_IN) || 14543 (pwwn_pd->pd_aux_flags & PD_LOGGED_OUT)) { 14544 fc_wwn_to_str(&pwwn_pd->pd_port_name, ww_name); 14545 mutex_exit(&pwwn_pd->pd_mutex); 14546 14547 rval = fp_port_login(port, page->aff_d_id, job, 14548 FP_CMD_PLOGI_RETAIN, sleep, pwwn_pd, NULL); 14549 if (rval == FC_SUCCESS) { 14550 fp_jobwait(job); 14551 rval = job->job_result; 14552 14553 /* 14554 * Reset LOGO tolerance to zero 14555 * Also we are the PLOGI initiator now. 14556 */ 14557 mutex_enter(&pwwn_pd->pd_mutex); 14558 fctl_tc_reset(&pwwn_pd->pd_logo_tc); 14559 pwwn_pd->pd_recepient = PD_PLOGI_INITIATOR; 14560 mutex_exit(&pwwn_pd->pd_mutex); 14561 } 14562 14563 if (rval == FC_SUCCESS) { 14564 struct fc_portmap *map = 14565 listptr + *listindex - 1; 14566 14567 FP_TRACE(FP_NHEAD1(1, 0), 14568 "PLOGI succeeded: no skip(2)" 14569 " for D_ID %x", page->aff_d_id); 14570 map->map_flags |= 14571 PORT_DEVICE_NO_SKIP_DEVICE_DISCOVERY; 14572 } else { 14573 FP_TRACE(FP_NHEAD2(9, rval), 14574 "PLOGI to D_ID=%x failed", page->aff_d_id); 14575 14576 FP_TRACE(FP_NHEAD2(9, 0), 14577 "N_x Port with D_ID=%x, PWWN=%s" 14578 " disappeared from fabric", 14579 page->aff_d_id, ww_name); 14580 14581 fp_fillout_old_map(listptr + 14582 *listindex - 1, pwwn_pd, 0); 14583 } 14584 } else { 14585 mutex_exit(&pwwn_pd->pd_mutex); 14586 } 14587 14588 mutex_enter(&did_pd->pd_mutex); 14589 did_pd->pd_flags = PD_IDLE; 14590 mutex_exit(&did_pd->pd_mutex); 14591 14592 FP_TRACE(FP_NHEAD1(6, 0), "RSCN with D_ID (0x%x) page; " 14593 "Case ONE, rval=%x, result=%x pd=%p", page->aff_d_id, rval, 14594 job->job_result, pwwn_pd); 14595 14596 return; 14597 } 14598 14599 if (did_pd == NULL && pwwn_pd == NULL) { 14600 14601 fc_orphan_t *orp = NULL; 14602 fc_orphan_t *norp = NULL; 14603 fc_orphan_t *prev = NULL; 14604 14605 /* 14606 * Hunt down the orphan list before giving up. 14607 */ 14608 14609 mutex_enter(&port->fp_mutex); 14610 if (port->fp_orphan_count) { 14611 14612 for (orp = port->fp_orphan_list; orp; orp = norp) { 14613 norp = orp->orp_next; 14614 14615 if (fctl_wwn_cmp(&orp->orp_pwwn, pwwn) != 0) { 14616 prev = orp; 14617 continue; 14618 } 14619 14620 if (prev) { 14621 prev->orp_next = orp->orp_next; 14622 } else { 14623 ASSERT(orp == 14624 port->fp_orphan_list); 14625 port->fp_orphan_list = 14626 orp->orp_next; 14627 } 14628 port->fp_orphan_count--; 14629 break; 14630 } 14631 } 14632 14633 mutex_exit(&port->fp_mutex); 14634 pwwn_pd = fp_create_remote_port_by_ns(port, 14635 page->aff_d_id, sleep); 14636 14637 if (pwwn_pd != NULL) { 14638 14639 if (orp) { 14640 fc_wwn_to_str(&orp->orp_pwwn, 14641 ww_name); 14642 14643 FP_TRACE(FP_NHEAD2(9, 0), 14644 "N_x Port with D_ID=%x," 14645 " PWWN=%s reappeared in fabric", 14646 page->aff_d_id, ww_name); 14647 14648 kmem_free(orp, sizeof (*orp)); 14649 } 14650 14651 (listptr + *listindex)-> 14652 map_rscn_info.ulp_rscn_count = 14653 (uint32_t)(uintptr_t)job->job_cb_arg; 14654 14655 fctl_copy_portmap(listptr + 14656 (*listindex)++, pwwn_pd); 14657 } 14658 14659 FP_TRACE(FP_NHEAD1(6, 0), "RSCN with D_ID (0x%x) page; " 14660 "Case TWO", page->aff_d_id); 14661 14662 return; 14663 } 14664 14665 if (pwwn_pd != NULL && did_pd == NULL) { 14666 uint32_t old_d_id; 14667 uint32_t d_id = page->aff_d_id; 14668 14669 /* 14670 * What this means is there is a new D_ID for this 14671 * Port WWN. Take out the port device off D_ID 14672 * list and put it back with a new D_ID. Perform 14673 * PLOGI if already logged in. 14674 */ 14675 mutex_enter(&port->fp_mutex); 14676 mutex_enter(&pwwn_pd->pd_mutex); 14677 14678 old_d_id = pwwn_pd->pd_port_id.port_id; 14679 14680 fctl_delist_did_table(port, pwwn_pd); 14681 14682 (listptr + *listindex)->map_rscn_info.ulp_rscn_count = 14683 (uint32_t)(uintptr_t)job->job_cb_arg; 14684 14685 fp_fillout_changed_map(listptr + (*listindex)++, pwwn_pd, 14686 &d_id, NULL); 14687 fctl_enlist_did_table(port, pwwn_pd); 14688 14689 FP_TRACE(FP_NHEAD1(6, 0), "RSCN with D_ID page;" 14690 " Case THREE, pd=%p," 14691 " state=%x", pwwn_pd, pwwn_pd->pd_state); 14692 14693 if ((pwwn_pd->pd_state == PORT_DEVICE_LOGGED_IN) || 14694 (pwwn_pd->pd_aux_flags & PD_LOGGED_OUT)) { 14695 fc_wwn_to_str(&pwwn_pd->pd_port_name, ww_name); 14696 14697 mutex_exit(&pwwn_pd->pd_mutex); 14698 mutex_exit(&port->fp_mutex); 14699 14700 FP_TRACE(FP_NHEAD2(9, 0), 14701 "N_x Port with D_ID=%x, PWWN=%s has a new" 14702 " D_ID=%x now", old_d_id, ww_name, d_id); 14703 14704 rval = fp_port_login(port, page->aff_d_id, job, 14705 FP_CMD_PLOGI_RETAIN, sleep, pwwn_pd, NULL); 14706 if (rval == FC_SUCCESS) { 14707 fp_jobwait(job); 14708 rval = job->job_result; 14709 } 14710 14711 if (rval != FC_SUCCESS) { 14712 fp_fillout_old_map(listptr + 14713 *listindex - 1, pwwn_pd, 0); 14714 } 14715 } else { 14716 mutex_exit(&pwwn_pd->pd_mutex); 14717 mutex_exit(&port->fp_mutex); 14718 } 14719 14720 return; 14721 } 14722 14723 if (pwwn_pd == NULL && did_pd != NULL) { 14724 fc_portmap_t *ptr; 14725 uint32_t len = 1; 14726 char old_ww_name[17]; 14727 14728 mutex_enter(&did_pd->pd_mutex); 14729 fc_wwn_to_str(&did_pd->pd_port_name, old_ww_name); 14730 mutex_exit(&did_pd->pd_mutex); 14731 14732 fc_wwn_to_str(pwwn, ww_name); 14733 14734 (listptr + *listindex)->map_rscn_info.ulp_rscn_count = 14735 (uint32_t)(uintptr_t)job->job_cb_arg; 14736 14737 /* 14738 * What this means is that there is a new Port WWN for 14739 * this D_ID; Mark the Port device as old and provide 14740 * the new PWWN and D_ID combination as new. 14741 */ 14742 fp_fillout_old_map(listptr + (*listindex)++, did_pd, 0); 14743 14744 FP_TRACE(FP_NHEAD2(9, 0), 14745 "N_x Port with D_ID=%x, PWWN=%s has a new PWWN=%s now", 14746 page->aff_d_id, old_ww_name, ww_name); 14747 14748 (listptr + *listindex)->map_rscn_info.ulp_rscn_count = 14749 (uint32_t)(uintptr_t)job->job_cb_arg; 14750 14751 ptr = listptr + (*listindex)++; 14752 14753 job->job_counter = 1; 14754 14755 if (fp_ns_getmap(port, job, &ptr, &len, 14756 page->aff_d_id - 1) != FC_SUCCESS) { 14757 (*listindex)--; 14758 } 14759 14760 mutex_enter(&did_pd->pd_mutex); 14761 did_pd->pd_flags = PD_IDLE; 14762 mutex_exit(&did_pd->pd_mutex); 14763 14764 return; 14765 } 14766 14767 /* 14768 * A weird case of Port WWN and D_ID existence but not matching up 14769 * between them. Trust your instincts - Take the port device handle 14770 * off Port WWN list, fix it with new Port WWN and put it back, In 14771 * the mean time mark the port device corresponding to the old port 14772 * WWN as OLD. 14773 */ 14774 FP_TRACE(FP_NHEAD1(6, 0), "RSCN with D_ID page; Case WEIRD, pwwn_pd=%p," 14775 " did_pd=%p", pwwn_pd, did_pd); 14776 14777 mutex_enter(&port->fp_mutex); 14778 mutex_enter(&pwwn_pd->pd_mutex); 14779 14780 pwwn_pd->pd_type = PORT_DEVICE_OLD; 14781 pwwn_pd->pd_state = PORT_DEVICE_INVALID; 14782 fctl_delist_did_table(port, pwwn_pd); 14783 fctl_delist_pwwn_table(port, pwwn_pd); 14784 14785 FP_TRACE(FP_NHEAD1(6, 0), "RSCN with D_ID page; case WEIRD continued," 14786 " pwwn-d_id=%x pwwn-wwn=%x %x %x %x %x %x %x %x", 14787 pwwn_pd->pd_port_id.port_id, 14788 14789 pwwn_pd->pd_port_name.raw_wwn[0], 14790 pwwn_pd->pd_port_name.raw_wwn[1], 14791 pwwn_pd->pd_port_name.raw_wwn[2], 14792 pwwn_pd->pd_port_name.raw_wwn[3], 14793 pwwn_pd->pd_port_name.raw_wwn[4], 14794 pwwn_pd->pd_port_name.raw_wwn[5], 14795 pwwn_pd->pd_port_name.raw_wwn[6], 14796 pwwn_pd->pd_port_name.raw_wwn[7]); 14797 14798 mutex_exit(&pwwn_pd->pd_mutex); 14799 mutex_exit(&port->fp_mutex); 14800 14801 (listptr + *listindex)->map_rscn_info.ulp_rscn_count = 14802 (uint32_t)(uintptr_t)job->job_cb_arg; 14803 14804 fctl_copy_portmap(listptr + (*listindex)++, pwwn_pd); 14805 14806 mutex_enter(&port->fp_mutex); 14807 mutex_enter(&did_pd->pd_mutex); 14808 14809 fctl_delist_pwwn_table(port, did_pd); 14810 14811 (listptr + *listindex)->map_rscn_info.ulp_rscn_count = 14812 (uint32_t)(uintptr_t)job->job_cb_arg; 14813 14814 fp_fillout_changed_map(listptr + (*listindex)++, did_pd, NULL, pwwn); 14815 fctl_enlist_pwwn_table(port, did_pd); 14816 14817 FP_TRACE(FP_NHEAD1(6, 0), "RSCN with D_ID page; case WEIRD continued," 14818 " d_id=%x, state=%x, did-wwn=%x %x %x %x %x %x %x %x", 14819 did_pd->pd_port_id.port_id, did_pd->pd_state, 14820 14821 did_pd->pd_port_name.raw_wwn[0], 14822 did_pd->pd_port_name.raw_wwn[1], 14823 did_pd->pd_port_name.raw_wwn[2], 14824 did_pd->pd_port_name.raw_wwn[3], 14825 did_pd->pd_port_name.raw_wwn[4], 14826 did_pd->pd_port_name.raw_wwn[5], 14827 did_pd->pd_port_name.raw_wwn[6], 14828 did_pd->pd_port_name.raw_wwn[7]); 14829 14830 if ((did_pd->pd_state == PORT_DEVICE_LOGGED_IN) || 14831 (did_pd->pd_aux_flags & PD_LOGGED_OUT)) { 14832 mutex_exit(&did_pd->pd_mutex); 14833 mutex_exit(&port->fp_mutex); 14834 14835 rval = fp_port_login(port, page->aff_d_id, job, 14836 FP_CMD_PLOGI_RETAIN, sleep, did_pd, NULL); 14837 if (rval == FC_SUCCESS) { 14838 fp_jobwait(job); 14839 if (job->job_result != FC_SUCCESS) { 14840 fp_fillout_old_map(listptr + 14841 *listindex - 1, did_pd, 0); 14842 } 14843 } else { 14844 fp_fillout_old_map(listptr + *listindex - 1, did_pd, 0); 14845 } 14846 } else { 14847 mutex_exit(&did_pd->pd_mutex); 14848 mutex_exit(&port->fp_mutex); 14849 } 14850 14851 mutex_enter(&did_pd->pd_mutex); 14852 did_pd->pd_flags = PD_IDLE; 14853 mutex_exit(&did_pd->pd_mutex); 14854 } 14855 14856 14857 /* 14858 * Check with NS for the presence of this port WWN 14859 */ 14860 static int 14861 fp_ns_validate_device(fc_local_port_t *port, fc_remote_port_t *pd, 14862 job_request_t *job, int polled, int sleep) 14863 { 14864 la_wwn_t pwwn; 14865 uint32_t flags; 14866 fctl_ns_req_t *ns_cmd; 14867 14868 flags = FCTL_NS_VALIDATE_PD | ((polled) ? 0: FCTL_NS_ASYNC_REQUEST); 14869 ns_cmd = fctl_alloc_ns_cmd(sizeof (ns_req_gid_pn_t), 14870 sizeof (ns_resp_gid_pn_t), sizeof (ns_resp_gid_pn_t), 14871 flags, sleep); 14872 if (ns_cmd == NULL) { 14873 return (FC_NOMEM); 14874 } 14875 14876 mutex_enter(&pd->pd_mutex); 14877 pwwn = pd->pd_port_name; 14878 mutex_exit(&pd->pd_mutex); 14879 14880 ns_cmd->ns_cmd_code = NS_GID_PN; 14881 ns_cmd->ns_pd = pd; 14882 ((ns_req_gid_pn_t *)ns_cmd->ns_cmd_buf)->pwwn = pwwn; 14883 ((ns_resp_gid_pn_t *)ns_cmd->ns_data_buf)->pid.port_id = 0; 14884 ((ns_resp_gid_pn_t *)ns_cmd->ns_data_buf)->pid.priv_lilp_posit = 0; 14885 14886 return (fp_ns_query(port, ns_cmd, job, polled, sleep)); 14887 } 14888 14889 14890 /* 14891 * Sanity check the LILP map returned by FCA 14892 */ 14893 static int 14894 fp_validate_lilp_map(fc_lilpmap_t *lilp_map) 14895 { 14896 int count; 14897 14898 if (lilp_map->lilp_length == 0) { 14899 return (FC_FAILURE); 14900 } 14901 14902 for (count = 0; count < lilp_map->lilp_length; count++) { 14903 if (fp_is_valid_alpa(lilp_map->lilp_alpalist[count]) != 14904 FC_SUCCESS) { 14905 return (FC_FAILURE); 14906 } 14907 } 14908 14909 return (FC_SUCCESS); 14910 } 14911 14912 14913 /* 14914 * Sanity check if the AL_PA is a valid address 14915 */ 14916 static int 14917 fp_is_valid_alpa(uchar_t al_pa) 14918 { 14919 int count; 14920 14921 for (count = 0; count < sizeof (fp_valid_alpas); count++) { 14922 if (al_pa == fp_valid_alpas[count] || al_pa == 0) { 14923 return (FC_SUCCESS); 14924 } 14925 } 14926 14927 return (FC_FAILURE); 14928 } 14929 14930 14931 /* 14932 * Post unsolicited callbacks to ULPs 14933 */ 14934 static void 14935 fp_ulp_unsol_cb(void *arg) 14936 { 14937 fp_unsol_spec_t *ub_spec = (fp_unsol_spec_t *)arg; 14938 14939 fctl_ulp_unsol_cb(ub_spec->port, ub_spec->buf, 14940 ub_spec->buf->ub_frame.type); 14941 kmem_free(ub_spec, sizeof (*ub_spec)); 14942 } 14943 14944 14945 /* 14946 * Perform message reporting in a consistent manner. Unless there is 14947 * a strong reason NOT to use this function (which is very very rare) 14948 * all message reporting should go through this. 14949 */ 14950 static void 14951 fp_printf(fc_local_port_t *port, int level, fp_mesg_dest_t dest, int fc_errno, 14952 fc_packet_t *pkt, const char *fmt, ...) 14953 { 14954 caddr_t buf; 14955 va_list ap; 14956 14957 switch (level) { 14958 case CE_NOTE: 14959 if ((port->fp_verbose & FP_WARNING_MESSAGES) == 0) { 14960 return; 14961 } 14962 break; 14963 14964 case CE_WARN: 14965 if ((port->fp_verbose & FP_FATAL_MESSAGES) == 0) { 14966 return; 14967 } 14968 break; 14969 } 14970 14971 buf = kmem_zalloc(256, KM_NOSLEEP); 14972 if (buf == NULL) { 14973 return; 14974 } 14975 14976 (void) sprintf(buf, "fp(%d): ", port->fp_instance); 14977 14978 va_start(ap, fmt); 14979 (void) vsprintf(buf + strlen(buf), fmt, ap); 14980 va_end(ap); 14981 14982 if (fc_errno) { 14983 char *errmsg; 14984 14985 (void) fc_ulp_error(fc_errno, &errmsg); 14986 (void) sprintf(buf + strlen(buf), " FC Error=%s", errmsg); 14987 } else { 14988 if (pkt) { 14989 caddr_t state, reason, action, expln; 14990 14991 (void) fc_ulp_pkt_error(pkt, &state, &reason, 14992 &action, &expln); 14993 14994 (void) sprintf(buf + strlen(buf), 14995 " state=%s, reason=%s", state, reason); 14996 14997 if (pkt->pkt_resp_resid) { 14998 (void) sprintf(buf + strlen(buf), 14999 " resp resid=%x\n", pkt->pkt_resp_resid); 15000 } 15001 } 15002 } 15003 15004 switch (dest) { 15005 case FP_CONSOLE_ONLY: 15006 cmn_err(level, "^%s", buf); 15007 break; 15008 15009 case FP_LOG_ONLY: 15010 cmn_err(level, "!%s", buf); 15011 break; 15012 15013 default: 15014 cmn_err(level, "%s", buf); 15015 break; 15016 } 15017 15018 kmem_free(buf, 256); 15019 } 15020 15021 static int 15022 fp_fcio_login(fc_local_port_t *port, fcio_t *fcio, job_request_t *job) 15023 { 15024 int ret; 15025 uint32_t d_id; 15026 la_wwn_t pwwn; 15027 fc_remote_port_t *pd = NULL; 15028 fc_remote_port_t *held_pd = NULL; 15029 fctl_ns_req_t *ns_cmd; 15030 fc_portmap_t *changelist; 15031 15032 bcopy(fcio->fcio_ibuf, &pwwn, sizeof (pwwn)); 15033 15034 mutex_enter(&port->fp_mutex); 15035 if (FC_IS_TOP_SWITCH(port->fp_topology)) { 15036 mutex_exit(&port->fp_mutex); 15037 job->job_counter = 1; 15038 15039 job->job_result = FC_SUCCESS; 15040 15041 ns_cmd = fctl_alloc_ns_cmd(sizeof (ns_req_gid_pn_t), 15042 sizeof (ns_resp_gid_pn_t), sizeof (ns_resp_gid_pn_t), 15043 FCTL_NS_BUF_IS_USERLAND, KM_SLEEP); 15044 15045 ASSERT(ns_cmd != NULL); 15046 15047 ns_cmd->ns_cmd_code = NS_GID_PN; 15048 ((ns_req_gid_pn_t *)(ns_cmd->ns_cmd_buf))->pwwn = pwwn; 15049 15050 ret = fp_ns_query(port, ns_cmd, job, 1, KM_SLEEP); 15051 15052 if (ret != FC_SUCCESS || job->job_result != FC_SUCCESS) { 15053 if (ret != FC_SUCCESS) { 15054 fcio->fcio_errno = ret; 15055 } else { 15056 fcio->fcio_errno = job->job_result; 15057 } 15058 fctl_free_ns_cmd(ns_cmd); 15059 return (EIO); 15060 } 15061 d_id = BE_32(*((uint32_t *)ns_cmd->ns_data_buf)); 15062 fctl_free_ns_cmd(ns_cmd); 15063 } else { 15064 mutex_exit(&port->fp_mutex); 15065 15066 held_pd = fctl_hold_remote_port_by_pwwn(port, &pwwn); 15067 if (held_pd == NULL) { 15068 fcio->fcio_errno = FC_BADWWN; 15069 return (EIO); 15070 } 15071 pd = held_pd; 15072 15073 mutex_enter(&pd->pd_mutex); 15074 d_id = pd->pd_port_id.port_id; 15075 mutex_exit(&pd->pd_mutex); 15076 } 15077 15078 job->job_counter = 1; 15079 15080 pd = fctl_get_remote_port_by_did(port, d_id); 15081 15082 if (pd) { 15083 mutex_enter(&pd->pd_mutex); 15084 if (pd->pd_state == PORT_DEVICE_LOGGED_IN) { 15085 pd->pd_login_count++; 15086 mutex_exit(&pd->pd_mutex); 15087 15088 fcio->fcio_errno = FC_SUCCESS; 15089 if (held_pd) { 15090 fctl_release_remote_port(held_pd); 15091 } 15092 15093 return (0); 15094 } 15095 mutex_exit(&pd->pd_mutex); 15096 } else { 15097 mutex_enter(&port->fp_mutex); 15098 if (FC_IS_TOP_SWITCH(port->fp_topology)) { 15099 mutex_exit(&port->fp_mutex); 15100 pd = fp_create_remote_port_by_ns(port, d_id, KM_SLEEP); 15101 if (pd == NULL) { 15102 fcio->fcio_errno = FC_FAILURE; 15103 if (held_pd) { 15104 fctl_release_remote_port(held_pd); 15105 } 15106 return (EIO); 15107 } 15108 } else { 15109 mutex_exit(&port->fp_mutex); 15110 } 15111 } 15112 15113 job->job_flags &= ~JOB_TYPE_FP_ASYNC; 15114 job->job_counter = 1; 15115 15116 ret = fp_port_login(port, d_id, job, FP_CMD_PLOGI_RETAIN, 15117 KM_SLEEP, pd, NULL); 15118 15119 if (ret != FC_SUCCESS) { 15120 fcio->fcio_errno = ret; 15121 if (held_pd) { 15122 fctl_release_remote_port(held_pd); 15123 } 15124 return (EIO); 15125 } 15126 fp_jobwait(job); 15127 15128 fcio->fcio_errno = job->job_result; 15129 15130 if (held_pd) { 15131 fctl_release_remote_port(held_pd); 15132 } 15133 15134 if (job->job_result != FC_SUCCESS) { 15135 return (EIO); 15136 } 15137 15138 pd = fctl_hold_remote_port_by_pwwn(port, &pwwn); 15139 if (pd == NULL) { 15140 fcio->fcio_errno = FC_BADDEV; 15141 return (ENODEV); 15142 } 15143 15144 changelist = kmem_zalloc(sizeof (*changelist), KM_SLEEP); 15145 15146 fctl_copy_portmap(changelist, pd); 15147 changelist->map_type = PORT_DEVICE_USER_LOGIN; 15148 15149 (void) fp_ulp_devc_cb(port, changelist, 1, 1, KM_SLEEP, 1); 15150 15151 mutex_enter(&pd->pd_mutex); 15152 pd->pd_type = PORT_DEVICE_NOCHANGE; 15153 mutex_exit(&pd->pd_mutex); 15154 15155 fctl_release_remote_port(pd); 15156 15157 return (0); 15158 } 15159 15160 15161 static int 15162 fp_fcio_logout(fc_local_port_t *port, fcio_t *fcio, job_request_t *job) 15163 { 15164 la_wwn_t pwwn; 15165 fp_cmd_t *cmd; 15166 fc_portmap_t *changelist; 15167 fc_remote_port_t *pd; 15168 15169 bcopy(fcio->fcio_ibuf, &pwwn, sizeof (pwwn)); 15170 15171 pd = fctl_hold_remote_port_by_pwwn(port, &pwwn); 15172 if (pd == NULL) { 15173 fcio->fcio_errno = FC_BADWWN; 15174 return (ENXIO); 15175 } 15176 15177 mutex_enter(&pd->pd_mutex); 15178 if (pd->pd_state != PORT_DEVICE_LOGGED_IN) { 15179 fcio->fcio_errno = FC_LOGINREQ; 15180 mutex_exit(&pd->pd_mutex); 15181 15182 fctl_release_remote_port(pd); 15183 15184 return (EINVAL); 15185 } 15186 15187 ASSERT(pd->pd_login_count >= 1); 15188 15189 if (pd->pd_flags == PD_ELS_IN_PROGRESS) { 15190 fcio->fcio_errno = FC_FAILURE; 15191 mutex_exit(&pd->pd_mutex); 15192 15193 fctl_release_remote_port(pd); 15194 15195 return (EBUSY); 15196 } 15197 15198 if (pd->pd_login_count > 1) { 15199 pd->pd_login_count--; 15200 fcio->fcio_errno = FC_SUCCESS; 15201 mutex_exit(&pd->pd_mutex); 15202 15203 changelist = kmem_zalloc(sizeof (*changelist), KM_SLEEP); 15204 15205 fctl_copy_portmap(changelist, pd); 15206 changelist->map_type = PORT_DEVICE_USER_LOGOUT; 15207 15208 fctl_release_remote_port(pd); 15209 15210 (void) fp_ulp_devc_cb(port, changelist, 1, 1, KM_SLEEP, 1); 15211 15212 return (0); 15213 } 15214 15215 pd->pd_flags = PD_ELS_IN_PROGRESS; 15216 mutex_exit(&pd->pd_mutex); 15217 15218 job->job_counter = 1; 15219 15220 cmd = fp_alloc_pkt(port, sizeof (la_els_logo_t), 15221 FP_PORT_IDENTIFIER_LEN, KM_SLEEP, pd); 15222 if (cmd == NULL) { 15223 fcio->fcio_errno = FC_NOMEM; 15224 fctl_release_remote_port(pd); 15225 15226 mutex_enter(&pd->pd_mutex); 15227 pd->pd_flags = PD_IDLE; 15228 mutex_exit(&pd->pd_mutex); 15229 15230 return (ENOMEM); 15231 } 15232 15233 mutex_enter(&port->fp_mutex); 15234 mutex_enter(&pd->pd_mutex); 15235 15236 cmd->cmd_pkt.pkt_tran_flags = FC_TRAN_INTR | pd->pd_login_class; 15237 cmd->cmd_pkt.pkt_tran_type = FC_PKT_EXCHANGE; 15238 cmd->cmd_flags = FP_CMD_PLOGI_DONT_CARE; 15239 cmd->cmd_retry_count = 1; 15240 cmd->cmd_ulp_pkt = NULL; 15241 15242 fp_logo_init(pd, cmd, job); 15243 15244 mutex_exit(&pd->pd_mutex); 15245 mutex_exit(&port->fp_mutex); 15246 15247 if (fp_sendcmd(port, cmd, port->fp_fca_handle) != FC_SUCCESS) { 15248 mutex_enter(&pd->pd_mutex); 15249 pd->pd_flags = PD_IDLE; 15250 mutex_exit(&pd->pd_mutex); 15251 15252 fp_free_pkt(cmd); 15253 fctl_release_remote_port(pd); 15254 15255 return (EIO); 15256 } 15257 15258 fp_jobwait(job); 15259 15260 fcio->fcio_errno = job->job_result; 15261 if (job->job_result != FC_SUCCESS) { 15262 mutex_enter(&pd->pd_mutex); 15263 pd->pd_flags = PD_IDLE; 15264 mutex_exit(&pd->pd_mutex); 15265 15266 fctl_release_remote_port(pd); 15267 15268 return (EIO); 15269 } 15270 15271 ASSERT(pd != NULL); 15272 15273 changelist = kmem_zalloc(sizeof (*changelist), KM_SLEEP); 15274 15275 fctl_copy_portmap(changelist, pd); 15276 changelist->map_type = PORT_DEVICE_USER_LOGOUT; 15277 changelist->map_state = PORT_DEVICE_INVALID; 15278 15279 mutex_enter(&port->fp_mutex); 15280 mutex_enter(&pd->pd_mutex); 15281 15282 fctl_delist_did_table(port, pd); 15283 fctl_delist_pwwn_table(port, pd); 15284 pd->pd_flags = PD_IDLE; 15285 15286 mutex_exit(&pd->pd_mutex); 15287 mutex_exit(&port->fp_mutex); 15288 15289 (void) fp_ulp_devc_cb(port, changelist, 1, 1, KM_SLEEP, 1); 15290 15291 fctl_release_remote_port(pd); 15292 15293 return (0); 15294 } 15295 15296 15297 15298 /* 15299 * Send a syslog event for adapter port level events. 15300 */ 15301 static void 15302 fp_log_port_event(fc_local_port_t *port, char *subclass) 15303 { 15304 nvlist_t *attr_list; 15305 15306 if (nvlist_alloc(&attr_list, NV_UNIQUE_NAME_TYPE, 15307 KM_SLEEP) != DDI_SUCCESS) { 15308 goto alloc_failed; 15309 } 15310 15311 if (nvlist_add_uint32(attr_list, "instance", 15312 port->fp_instance) != DDI_SUCCESS) { 15313 goto error; 15314 } 15315 15316 if (nvlist_add_byte_array(attr_list, "port-wwn", 15317 port->fp_service_params.nport_ww_name.raw_wwn, 15318 sizeof (la_wwn_t)) != DDI_SUCCESS) { 15319 goto error; 15320 } 15321 15322 (void) ddi_log_sysevent(port->fp_port_dip, DDI_VENDOR_SUNW, EC_SUNFC, 15323 subclass, attr_list, NULL, DDI_SLEEP); 15324 15325 nvlist_free(attr_list); 15326 return; 15327 15328 error: 15329 nvlist_free(attr_list); 15330 alloc_failed: 15331 FP_TRACE(FP_NHEAD1(9, 0), "Unable to send %s event", subclass); 15332 } 15333 15334 15335 static void 15336 fp_log_target_event(fc_local_port_t *port, char *subclass, la_wwn_t tgt_pwwn, 15337 uint32_t port_id) 15338 { 15339 nvlist_t *attr_list; 15340 15341 if (nvlist_alloc(&attr_list, NV_UNIQUE_NAME_TYPE, 15342 KM_SLEEP) != DDI_SUCCESS) { 15343 goto alloc_failed; 15344 } 15345 15346 if (nvlist_add_uint32(attr_list, "instance", 15347 port->fp_instance) != DDI_SUCCESS) { 15348 goto error; 15349 } 15350 15351 if (nvlist_add_byte_array(attr_list, "port-wwn", 15352 port->fp_service_params.nport_ww_name.raw_wwn, 15353 sizeof (la_wwn_t)) != DDI_SUCCESS) { 15354 goto error; 15355 } 15356 15357 if (nvlist_add_byte_array(attr_list, "target-port-wwn", 15358 tgt_pwwn.raw_wwn, sizeof (la_wwn_t)) != DDI_SUCCESS) { 15359 goto error; 15360 } 15361 15362 if (nvlist_add_uint32(attr_list, "target-port-id", 15363 port_id) != DDI_SUCCESS) { 15364 goto error; 15365 } 15366 15367 (void) ddi_log_sysevent(port->fp_port_dip, DDI_VENDOR_SUNW, EC_SUNFC, 15368 subclass, attr_list, NULL, DDI_SLEEP); 15369 15370 nvlist_free(attr_list); 15371 return; 15372 15373 error: 15374 nvlist_free(attr_list); 15375 alloc_failed: 15376 FP_TRACE(FP_NHEAD1(9, 0), "Unable to send %s event", subclass); 15377 } 15378 15379 static uint32_t 15380 fp_map_remote_port_state(uint32_t rm_state) 15381 { 15382 switch (rm_state) { 15383 case PORT_DEVICE_LOGGED_IN: 15384 return (FC_HBA_PORTSTATE_ONLINE); 15385 case PORT_DEVICE_VALID: 15386 case PORT_DEVICE_INVALID: 15387 default: 15388 return (FC_HBA_PORTSTATE_UNKNOWN); 15389 } 15390 }