1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved. 23 */ 24 #include <sys/scsi/adapters/pmcs/pmcs.h> 25 26 #define PMCS_DRIVER_VERSION "pmcs HBA device driver" 27 28 static char *pmcs_driver_rev = PMCS_DRIVER_VERSION; 29 30 /* 31 * Non-DDI Compliant stuff 32 */ 33 extern char hw_serial[]; 34 35 /* 36 * Global driver data 37 */ 38 void *pmcs_softc_state = NULL; 39 void *pmcs_iport_softstate = NULL; 40 41 /* 42 * Tracing and Logging info 43 */ 44 pmcs_tbuf_t *pmcs_tbuf = NULL; 45 uint32_t pmcs_tbuf_num_elems = 0; 46 pmcs_tbuf_t *pmcs_tbuf_ptr; 47 uint32_t pmcs_tbuf_idx = 0; 48 boolean_t pmcs_tbuf_wrap = B_FALSE; 49 kmutex_t pmcs_trace_lock; 50 51 /* 52 * If pmcs_force_syslog value is non-zero, all messages put in the trace log 53 * will also be sent to system log. 54 */ 55 int pmcs_force_syslog = 0; 56 int pmcs_console = 0; 57 58 /* 59 * External References 60 */ 61 extern int ncpus_online; 62 63 /* 64 * Local static data 65 */ 66 static int fwlog_level = 3; 67 static int physpeed = PHY_LINK_ALL; 68 static int phymode = PHY_LM_AUTO; 69 static int block_mask = 0; 70 static int phymap_stable_usec = 3 * MICROSEC; 71 static int iportmap_stable_usec = 2 * MICROSEC; 72 static int iportmap_csync_usec = 20 * MICROSEC; 73 74 #ifdef DEBUG 75 static int debug_mask = 1; 76 #else 77 static int debug_mask = 0; 78 #endif 79 80 #ifdef DISABLE_MSIX 81 static int disable_msix = 1; 82 #else 83 static int disable_msix = 0; 84 #endif 85 86 #ifdef DISABLE_MSI 87 static int disable_msi = 1; 88 #else 89 static int disable_msi = 0; 90 #endif 91 92 /* 93 * DEBUG: testing: allow detach with an active port: 94 * 95 * # echo 'detach_driver_unconfig/W 10' | mdb -kw 96 * # echo 'scsi_hba_bus_unconfig_remove/W 1' | mdb -kw 97 * # echo 'pmcs`detach_with_active_port/W 1' | mdb -kw 98 * # modunload -i <pmcs_driver_index> 99 */ 100 static int detach_with_active_port = 0; 101 102 static uint16_t maxqdepth = 0xfffe; 103 104 /* 105 * Local prototypes 106 */ 107 static int pmcs_attach(dev_info_t *, ddi_attach_cmd_t); 108 static int pmcs_detach(dev_info_t *, ddi_detach_cmd_t); 109 static int pmcs_unattach(pmcs_hw_t *); 110 static int pmcs_iport_unattach(pmcs_iport_t *); 111 static int pmcs_add_more_chunks(pmcs_hw_t *, unsigned long); 112 static void pmcs_watchdog(void *); 113 static int pmcs_setup_intr(pmcs_hw_t *); 114 static int pmcs_teardown_intr(pmcs_hw_t *); 115 116 static uint_t pmcs_nonio_ix(caddr_t, caddr_t); 117 static uint_t pmcs_general_ix(caddr_t, caddr_t); 118 static uint_t pmcs_event_ix(caddr_t, caddr_t); 119 static uint_t pmcs_iodone_ix(caddr_t, caddr_t); 120 static uint_t pmcs_fatal_ix(caddr_t, caddr_t); 121 static uint_t pmcs_all_intr(caddr_t, caddr_t); 122 static int pmcs_quiesce(dev_info_t *dip); 123 static boolean_t pmcs_fabricate_wwid(pmcs_hw_t *); 124 125 static void pmcs_create_all_phy_stats(pmcs_iport_t *); 126 int pmcs_update_phy_stats(kstat_t *, int); 127 128 static void pmcs_fm_fini(pmcs_hw_t *pwp); 129 static void pmcs_fm_init(pmcs_hw_t *pwp); 130 static int pmcs_fm_error_cb(dev_info_t *dip, 131 ddi_fm_error_t *err, const void *impl_data); 132 133 /* 134 * Local configuration data 135 */ 136 static struct dev_ops pmcs_ops = { 137 DEVO_REV, /* devo_rev, */ 138 0, /* refcnt */ 139 ddi_no_info, /* info */ 140 nulldev, /* identify */ 141 nulldev, /* probe */ 142 pmcs_attach, /* attach */ 143 pmcs_detach, /* detach */ 144 nodev, /* reset */ 145 NULL, /* driver operations */ 146 NULL, /* bus operations */ 147 ddi_power, /* power management */ 148 pmcs_quiesce /* quiesce */ 149 }; 150 151 static struct modldrv modldrv = { 152 &mod_driverops, 153 PMCS_DRIVER_VERSION, 154 &pmcs_ops, /* driver ops */ 155 }; 156 static struct modlinkage modlinkage = { 157 MODREV_1, { &modldrv, NULL } 158 }; 159 160 const ddi_dma_attr_t pmcs_dattr = { 161 DMA_ATTR_V0, /* dma_attr version */ 162 0x0000000000000000ull, /* dma_attr_addr_lo */ 163 0xFFFFFFFFFFFFFFFFull, /* dma_attr_addr_hi */ 164 0x00000000FFFFFFFFull, /* dma_attr_count_max */ 165 0x0000000000000001ull, /* dma_attr_align */ 166 0x00000078, /* dma_attr_burstsizes */ 167 0x00000001, /* dma_attr_minxfer */ 168 0x00000000FFFFFFFFull, /* dma_attr_maxxfer */ 169 0x00000000FFFFFFFFull, /* dma_attr_seg */ 170 1, /* dma_attr_sgllen */ 171 512, /* dma_attr_granular */ 172 0 /* dma_attr_flags */ 173 }; 174 175 static ddi_device_acc_attr_t rattr = { 176 DDI_DEVICE_ATTR_V1, 177 DDI_STRUCTURE_LE_ACC, 178 DDI_STRICTORDER_ACC, 179 DDI_DEFAULT_ACC 180 }; 181 182 183 /* 184 * Attach/Detach functions 185 */ 186 187 int 188 _init(void) 189 { 190 int ret; 191 192 ret = ddi_soft_state_init(&pmcs_softc_state, sizeof (pmcs_hw_t), 1); 193 if (ret != 0) { 194 cmn_err(CE_WARN, "?soft state init failed for pmcs"); 195 return (ret); 196 } 197 198 if ((ret = scsi_hba_init(&modlinkage)) != 0) { 199 cmn_err(CE_WARN, "?scsi_hba_init failed for pmcs"); 200 ddi_soft_state_fini(&pmcs_softc_state); 201 return (ret); 202 } 203 204 /* 205 * Allocate soft state for iports 206 */ 207 ret = ddi_soft_state_init(&pmcs_iport_softstate, 208 sizeof (pmcs_iport_t), 2); 209 if (ret != 0) { 210 cmn_err(CE_WARN, "?iport soft state init failed for pmcs"); 211 ddi_soft_state_fini(&pmcs_softc_state); 212 return (ret); 213 } 214 215 ret = mod_install(&modlinkage); 216 if (ret != 0) { 217 cmn_err(CE_WARN, "?mod_install failed for pmcs (%d)", ret); 218 scsi_hba_fini(&modlinkage); 219 ddi_soft_state_fini(&pmcs_iport_softstate); 220 ddi_soft_state_fini(&pmcs_softc_state); 221 return (ret); 222 } 223 224 /* Initialize the global trace lock */ 225 mutex_init(&pmcs_trace_lock, NULL, MUTEX_DRIVER, NULL); 226 227 return (0); 228 } 229 230 int 231 _fini(void) 232 { 233 int ret; 234 if ((ret = mod_remove(&modlinkage)) != 0) { 235 return (ret); 236 } 237 scsi_hba_fini(&modlinkage); 238 239 /* Free pmcs log buffer and destroy the global lock */ 240 if (pmcs_tbuf) { 241 kmem_free(pmcs_tbuf, 242 pmcs_tbuf_num_elems * sizeof (pmcs_tbuf_t)); 243 pmcs_tbuf = NULL; 244 } 245 mutex_destroy(&pmcs_trace_lock); 246 247 ddi_soft_state_fini(&pmcs_iport_softstate); 248 ddi_soft_state_fini(&pmcs_softc_state); 249 return (0); 250 } 251 252 int 253 _info(struct modinfo *modinfop) 254 { 255 return (mod_info(&modlinkage, modinfop)); 256 } 257 258 static int 259 pmcs_iport_attach(dev_info_t *dip) 260 { 261 pmcs_iport_t *iport; 262 pmcs_hw_t *pwp; 263 scsi_hba_tran_t *tran; 264 void *ua_priv = NULL; 265 char *iport_ua; 266 char *init_port; 267 int hba_inst; 268 int inst; 269 270 hba_inst = ddi_get_instance(ddi_get_parent(dip)); 271 inst = ddi_get_instance(dip); 272 273 pwp = ddi_get_soft_state(pmcs_softc_state, hba_inst); 274 if (pwp == NULL) { 275 cmn_err(CE_WARN, "%s: No HBA softstate for instance %d", 276 __func__, inst); 277 return (DDI_FAILURE); 278 } 279 280 if ((pwp->state == STATE_UNPROBING) || (pwp->state == STATE_DEAD)) { 281 return (DDI_FAILURE); 282 } 283 284 if ((iport_ua = scsi_hba_iport_unit_address(dip)) == NULL) { 285 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 286 "%s: invoked with NULL unit address, inst (%d)", 287 __func__, inst); 288 return (DDI_FAILURE); 289 } 290 291 if (ddi_soft_state_zalloc(pmcs_iport_softstate, inst) != DDI_SUCCESS) { 292 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 293 "Failed to alloc soft state for iport %d", inst); 294 return (DDI_FAILURE); 295 } 296 297 iport = ddi_get_soft_state(pmcs_iport_softstate, inst); 298 if (iport == NULL) { 299 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 300 "cannot get iport soft state"); 301 goto iport_attach_fail1; 302 } 303 304 mutex_init(&iport->lock, NULL, MUTEX_DRIVER, 305 DDI_INTR_PRI(pwp->intr_pri)); 306 cv_init(&iport->refcnt_cv, NULL, CV_DEFAULT, NULL); 307 cv_init(&iport->smp_cv, NULL, CV_DEFAULT, NULL); 308 mutex_init(&iport->refcnt_lock, NULL, MUTEX_DRIVER, 309 DDI_INTR_PRI(pwp->intr_pri)); 310 mutex_init(&iport->smp_lock, NULL, MUTEX_DRIVER, 311 DDI_INTR_PRI(pwp->intr_pri)); 312 313 /* Set some data on the iport handle */ 314 iport->dip = dip; 315 iport->pwp = pwp; 316 317 /* Dup the UA into the iport handle */ 318 iport->ua = strdup(iport_ua); 319 320 tran = (scsi_hba_tran_t *)ddi_get_driver_private(dip); 321 tran->tran_hba_private = iport; 322 323 list_create(&iport->phys, sizeof (pmcs_phy_t), 324 offsetof(pmcs_phy_t, list_node)); 325 326 /* 327 * If our unit address is active in the phymap, configure our 328 * iport's phylist. 329 */ 330 mutex_enter(&iport->lock); 331 ua_priv = sas_phymap_lookup_uapriv(pwp->hss_phymap, iport->ua); 332 if (ua_priv) { 333 /* Non-NULL private data indicates the unit address is active */ 334 iport->ua_state = UA_ACTIVE; 335 if (pmcs_iport_configure_phys(iport) != DDI_SUCCESS) { 336 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, NULL, NULL, 337 "%s: failed to " 338 "configure phys on iport handle (0x%p), " 339 " unit address [%s]", __func__, 340 (void *)iport, iport_ua); 341 mutex_exit(&iport->lock); 342 goto iport_attach_fail2; 343 } 344 } else { 345 iport->ua_state = UA_INACTIVE; 346 } 347 mutex_exit(&iport->lock); 348 349 /* Allocate string-based soft state pool for targets */ 350 iport->tgt_sstate = NULL; 351 if (ddi_soft_state_bystr_init(&iport->tgt_sstate, 352 sizeof (pmcs_xscsi_t), PMCS_TGT_SSTATE_SZ) != 0) { 353 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 354 "cannot get iport tgt soft state"); 355 goto iport_attach_fail2; 356 } 357 358 /* Create this iport's target map */ 359 if (pmcs_iport_tgtmap_create(iport) == B_FALSE) { 360 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 361 "Failed to create tgtmap on iport %d", inst); 362 goto iport_attach_fail3; 363 } 364 365 /* Set up the 'initiator-port' DDI property on this iport */ 366 init_port = kmem_zalloc(PMCS_MAX_UA_SIZE, KM_SLEEP); 367 if (pwp->separate_ports) { 368 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 369 "%s: separate ports not supported", __func__); 370 } else { 371 /* Set initiator-port value to the HBA's base WWN */ 372 (void) scsi_wwn_to_wwnstr(pwp->sas_wwns[0], 1, 373 init_port); 374 } 375 376 mutex_enter(&iport->lock); 377 pmcs_smhba_add_iport_prop(iport, DATA_TYPE_STRING, 378 SCSI_ADDR_PROP_INITIATOR_PORT, init_port); 379 kmem_free(init_port, PMCS_MAX_UA_SIZE); 380 381 /* Set up a 'num-phys' DDI property for the iport node */ 382 pmcs_smhba_add_iport_prop(iport, DATA_TYPE_INT32, PMCS_NUM_PHYS, 383 &iport->nphy); 384 mutex_exit(&iport->lock); 385 386 /* Create kstats for each of the phys in this port */ 387 pmcs_create_all_phy_stats(iport); 388 389 /* 390 * Insert this iport handle into our list and set 391 * iports_attached on the HBA node. 392 */ 393 rw_enter(&pwp->iports_lock, RW_WRITER); 394 ASSERT(!list_link_active(&iport->list_node)); 395 list_insert_tail(&pwp->iports, iport); 396 pwp->iports_attached = 1; 397 pwp->num_iports++; 398 rw_exit(&pwp->iports_lock); 399 400 pmcs_prt(pwp, PMCS_PRT_DEBUG_IPORT, NULL, NULL, 401 "iport%d attached", inst); 402 ddi_report_dev(dip); 403 return (DDI_SUCCESS); 404 405 /* teardown and fail */ 406 iport_attach_fail3: 407 ddi_soft_state_bystr_fini(&iport->tgt_sstate); 408 iport_attach_fail2: 409 list_destroy(&iport->phys); 410 strfree(iport->ua); 411 mutex_destroy(&iport->refcnt_lock); 412 mutex_destroy(&iport->smp_lock); 413 cv_destroy(&iport->refcnt_cv); 414 cv_destroy(&iport->smp_cv); 415 mutex_destroy(&iport->lock); 416 iport_attach_fail1: 417 ddi_soft_state_free(pmcs_iport_softstate, inst); 418 return (DDI_FAILURE); 419 } 420 421 static int 422 pmcs_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) 423 { 424 scsi_hba_tran_t *tran; 425 char chiprev, *fwsupport, hw_rev[24], fw_rev[24]; 426 off_t set3size; 427 int inst, i; 428 int sm_hba = 1; 429 int protocol = 0; 430 int num_phys = 0; 431 pmcs_hw_t *pwp; 432 pmcs_phy_t *phyp; 433 uint32_t num_threads; 434 char buf[64]; 435 char *fwl_file; 436 437 switch (cmd) { 438 case DDI_ATTACH: 439 break; 440 441 case DDI_PM_RESUME: 442 case DDI_RESUME: 443 tran = (scsi_hba_tran_t *)ddi_get_driver_private(dip); 444 if (!tran) { 445 return (DDI_FAILURE); 446 } 447 /* No DDI_?_RESUME on iport nodes */ 448 if (scsi_hba_iport_unit_address(dip) != NULL) { 449 return (DDI_SUCCESS); 450 } 451 pwp = TRAN2PMC(tran); 452 if (pwp == NULL) { 453 return (DDI_FAILURE); 454 } 455 456 mutex_enter(&pwp->lock); 457 pwp->suspended = 0; 458 if (pwp->tq) { 459 ddi_taskq_resume(pwp->tq); 460 } 461 mutex_exit(&pwp->lock); 462 return (DDI_SUCCESS); 463 464 default: 465 return (DDI_FAILURE); 466 } 467 468 /* 469 * If this is an iport node, invoke iport attach. 470 */ 471 if (scsi_hba_iport_unit_address(dip) != NULL) { 472 return (pmcs_iport_attach(dip)); 473 } 474 475 /* 476 * From here on is attach for the HBA node 477 */ 478 479 #ifdef DEBUG 480 /* 481 * Check to see if this unit is to be disabled. We can't disable 482 * on a per-iport node. It's either the entire HBA or nothing. 483 */ 484 (void) snprintf(buf, sizeof (buf), 485 "disable-instance-%d", ddi_get_instance(dip)); 486 if (ddi_prop_get_int(DDI_DEV_T_ANY, dip, 487 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, buf, 0)) { 488 cmn_err(CE_NOTE, "pmcs%d: disabled by configuration", 489 ddi_get_instance(dip)); 490 return (DDI_FAILURE); 491 } 492 #endif 493 494 /* 495 * Allocate softstate 496 */ 497 inst = ddi_get_instance(dip); 498 if (ddi_soft_state_zalloc(pmcs_softc_state, inst) != DDI_SUCCESS) { 499 cmn_err(CE_WARN, "pmcs%d: Failed to alloc soft state", inst); 500 return (DDI_FAILURE); 501 } 502 503 pwp = ddi_get_soft_state(pmcs_softc_state, inst); 504 if (pwp == NULL) { 505 cmn_err(CE_WARN, "pmcs%d: cannot get soft state", inst); 506 ddi_soft_state_free(pmcs_softc_state, inst); 507 return (DDI_FAILURE); 508 } 509 pwp->dip = dip; 510 STAILQ_INIT(&pwp->dq); 511 STAILQ_INIT(&pwp->cq); 512 STAILQ_INIT(&pwp->wf); 513 STAILQ_INIT(&pwp->pf); 514 515 /* 516 * Create the list for iports and init its lock. 517 */ 518 list_create(&pwp->iports, sizeof (pmcs_iport_t), 519 offsetof(pmcs_iport_t, list_node)); 520 rw_init(&pwp->iports_lock, NULL, RW_DRIVER, NULL); 521 522 pwp->state = STATE_PROBING; 523 524 /* 525 * Get driver.conf properties 526 */ 527 pwp->debug_mask = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 528 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "pmcs-debug-mask", 529 debug_mask); 530 pwp->phyid_block_mask = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 531 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "pmcs-phyid-block-mask", 532 block_mask); 533 pwp->physpeed = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 534 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "pmcs-physpeed", physpeed); 535 pwp->phymode = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 536 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "pmcs-phymode", phymode); 537 pwp->fwlog = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 538 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "pmcs-fwlog", fwlog_level); 539 if (pwp->fwlog > PMCS_FWLOG_MAX) { 540 pwp->fwlog = PMCS_FWLOG_MAX; 541 } 542 if ((ddi_prop_lookup_string(DDI_DEV_T_ANY, dip, 0, "pmcs-fwlogfile", 543 &fwl_file) == DDI_SUCCESS)) { 544 if (snprintf(pwp->fwlogfile_aap1, MAXPATHLEN, "%s%d-aap1.0", 545 fwl_file, ddi_get_instance(dip)) > MAXPATHLEN) { 546 pwp->fwlogfile_aap1[0] = '\0'; 547 pwp->fwlogfile_iop[0] = '\0'; 548 } else if (snprintf(pwp->fwlogfile_iop, MAXPATHLEN, 549 "%s%d-iop.0", fwl_file, 550 ddi_get_instance(dip)) > MAXPATHLEN) { 551 pwp->fwlogfile_aap1[0] = '\0'; 552 pwp->fwlogfile_iop[0] = '\0'; 553 } 554 ddi_prop_free(fwl_file); 555 } else { 556 pwp->fwlogfile_aap1[0] = '\0'; 557 pwp->fwlogfile_iop[0] = '\0'; 558 } 559 560 pwp->open_retry_interval = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 561 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "pmcs-open-retry-interval", 562 OPEN_RETRY_INTERVAL_DEF); 563 if (pwp->open_retry_interval > OPEN_RETRY_INTERVAL_MAX) { 564 pwp->open_retry_interval = OPEN_RETRY_INTERVAL_MAX; 565 } 566 567 mutex_enter(&pmcs_trace_lock); 568 if (pmcs_tbuf == NULL) { 569 /* Allocate trace buffer */ 570 pmcs_tbuf_num_elems = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 571 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "pmcs-tbuf-num-elems", 572 PMCS_TBUF_NUM_ELEMS_DEF); 573 if ((pmcs_tbuf_num_elems == DDI_PROP_NOT_FOUND) || 574 (pmcs_tbuf_num_elems == 0)) { 575 pmcs_tbuf_num_elems = PMCS_TBUF_NUM_ELEMS_DEF; 576 } 577 578 pmcs_tbuf = kmem_zalloc(pmcs_tbuf_num_elems * 579 sizeof (pmcs_tbuf_t), KM_SLEEP); 580 pmcs_tbuf_ptr = pmcs_tbuf; 581 pmcs_tbuf_idx = 0; 582 } 583 mutex_exit(&pmcs_trace_lock); 584 585 if (pwp->fwlog && strlen(pwp->fwlogfile_aap1) > 0) { 586 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 587 "%s: firmware event log files: %s, %s", __func__, 588 pwp->fwlogfile_aap1, pwp->fwlogfile_iop); 589 pwp->fwlog_file = 1; 590 } else { 591 if (pwp->fwlog == 0) { 592 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 593 "%s: No firmware event log will be written " 594 "(event log disabled)", __func__); 595 } else { 596 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 597 "%s: No firmware event log will be written " 598 "(no filename configured - too long?)", __func__); 599 } 600 pwp->fwlog_file = 0; 601 } 602 603 disable_msix = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 604 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "pmcs-disable-msix", 605 disable_msix); 606 disable_msi = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 607 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "pmcs-disable-msi", 608 disable_msi); 609 maxqdepth = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 610 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "pmcs-maxqdepth", maxqdepth); 611 pwp->fw_force_update = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 612 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "pmcs-fw-force-update", 0); 613 if (pwp->fw_force_update == 0) { 614 pwp->fw_disable_update = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 615 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, 616 "pmcs-fw-disable-update", 0); 617 } 618 pwp->ioq_depth = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 619 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "pmcs-num-io-qentries", 620 PMCS_NQENTRY); 621 622 /* 623 * Initialize FMA 624 */ 625 pwp->dev_acc_attr = pwp->reg_acc_attr = rattr; 626 pwp->iqp_dma_attr = pwp->oqp_dma_attr = 627 pwp->regdump_dma_attr = pwp->cip_dma_attr = 628 pwp->fwlog_dma_attr = pmcs_dattr; 629 pwp->fm_capabilities = ddi_getprop(DDI_DEV_T_ANY, pwp->dip, 630 DDI_PROP_NOTPROM | DDI_PROP_DONTPASS, "fm-capable", 631 DDI_FM_EREPORT_CAPABLE | DDI_FM_ACCCHK_CAPABLE | 632 DDI_FM_DMACHK_CAPABLE | DDI_FM_ERRCB_CAPABLE); 633 pmcs_fm_init(pwp); 634 635 /* 636 * Map registers 637 */ 638 if (pci_config_setup(dip, &pwp->pci_acc_handle)) { 639 pmcs_prt(pwp, PMCS_PRT_WARN, NULL, NULL, 640 "pci config setup failed"); 641 ddi_soft_state_free(pmcs_softc_state, inst); 642 return (DDI_FAILURE); 643 } 644 645 /* 646 * Get the size of register set 3. 647 */ 648 if (ddi_dev_regsize(dip, PMCS_REGSET_3, &set3size) != DDI_SUCCESS) { 649 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 650 "unable to get size of register set %d", PMCS_REGSET_3); 651 pci_config_teardown(&pwp->pci_acc_handle); 652 ddi_soft_state_free(pmcs_softc_state, inst); 653 return (DDI_FAILURE); 654 } 655 656 /* 657 * Map registers 658 */ 659 pwp->reg_acc_attr.devacc_attr_endian_flags = DDI_STRUCTURE_LE_ACC; 660 661 if (ddi_regs_map_setup(dip, PMCS_REGSET_0, (caddr_t *)&pwp->msg_regs, 662 0, 0, &pwp->reg_acc_attr, &pwp->msg_acc_handle)) { 663 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 664 "failed to map Message Unit registers"); 665 pci_config_teardown(&pwp->pci_acc_handle); 666 ddi_soft_state_free(pmcs_softc_state, inst); 667 return (DDI_FAILURE); 668 } 669 670 if (ddi_regs_map_setup(dip, PMCS_REGSET_1, (caddr_t *)&pwp->top_regs, 671 0, 0, &pwp->reg_acc_attr, &pwp->top_acc_handle)) { 672 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 673 "failed to map TOP registers"); 674 ddi_regs_map_free(&pwp->msg_acc_handle); 675 pci_config_teardown(&pwp->pci_acc_handle); 676 ddi_soft_state_free(pmcs_softc_state, inst); 677 return (DDI_FAILURE); 678 } 679 680 if (ddi_regs_map_setup(dip, PMCS_REGSET_2, (caddr_t *)&pwp->gsm_regs, 681 0, 0, &pwp->reg_acc_attr, &pwp->gsm_acc_handle)) { 682 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 683 "failed to map GSM registers"); 684 ddi_regs_map_free(&pwp->top_acc_handle); 685 ddi_regs_map_free(&pwp->msg_acc_handle); 686 pci_config_teardown(&pwp->pci_acc_handle); 687 ddi_soft_state_free(pmcs_softc_state, inst); 688 return (DDI_FAILURE); 689 } 690 691 if (ddi_regs_map_setup(dip, PMCS_REGSET_3, (caddr_t *)&pwp->mpi_regs, 692 0, 0, &pwp->reg_acc_attr, &pwp->mpi_acc_handle)) { 693 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 694 "failed to map MPI registers"); 695 ddi_regs_map_free(&pwp->top_acc_handle); 696 ddi_regs_map_free(&pwp->gsm_acc_handle); 697 ddi_regs_map_free(&pwp->msg_acc_handle); 698 pci_config_teardown(&pwp->pci_acc_handle); 699 ddi_soft_state_free(pmcs_softc_state, inst); 700 return (DDI_FAILURE); 701 } 702 pwp->mpibar = 703 (((5U << 2) + 0x10) << PMCS_MSGU_MPI_BAR_SHIFT) | set3size; 704 705 /* 706 * Make sure we can support this card. 707 */ 708 pwp->chiprev = pmcs_rd_topunit(pwp, PMCS_DEVICE_REVISION); 709 710 switch (pwp->chiprev) { 711 case PMCS_PM8001_REV_A: 712 case PMCS_PM8001_REV_B: 713 pmcs_prt(pwp, PMCS_PRT_ERR, NULL, NULL, 714 "Rev A/B Card no longer supported"); 715 goto failure; 716 case PMCS_PM8001_REV_C: 717 break; 718 default: 719 pmcs_prt(pwp, PMCS_PRT_ERR, NULL, NULL, 720 "Unknown chip revision (%d)", pwp->chiprev); 721 goto failure; 722 } 723 724 /* 725 * Allocate DMA addressable area for Inbound and Outbound Queue indices 726 * that the chip needs to access plus a space for scratch usage 727 */ 728 pwp->cip_dma_attr.dma_attr_align = sizeof (uint32_t); 729 if (pmcs_dma_setup(pwp, &pwp->cip_dma_attr, &pwp->cip_acchdls, 730 &pwp->cip_handles, ptob(1), (caddr_t *)&pwp->cip, 731 &pwp->ciaddr) == B_FALSE) { 732 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 733 "Failed to setup DMA for index/scratch"); 734 goto failure; 735 } 736 737 bzero(pwp->cip, ptob(1)); 738 pwp->scratch = &pwp->cip[PMCS_INDICES_SIZE]; 739 pwp->scratch_dma = pwp->ciaddr + PMCS_INDICES_SIZE; 740 741 /* 742 * Allocate DMA S/G list chunks 743 */ 744 (void) pmcs_add_more_chunks(pwp, ptob(1) * PMCS_MIN_CHUNK_PAGES); 745 746 /* 747 * Allocate a DMA addressable area for the firmware log (if needed) 748 */ 749 if (pwp->fwlog) { 750 /* 751 * Align to event log header and entry size 752 */ 753 pwp->fwlog_dma_attr.dma_attr_align = 32; 754 if (pmcs_dma_setup(pwp, &pwp->fwlog_dma_attr, 755 &pwp->fwlog_acchdl, 756 &pwp->fwlog_hndl, PMCS_FWLOG_SIZE, 757 (caddr_t *)&pwp->fwlogp, 758 &pwp->fwaddr) == B_FALSE) { 759 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 760 "Failed to setup DMA for fwlog area"); 761 pwp->fwlog = 0; 762 } else { 763 bzero(pwp->fwlogp, PMCS_FWLOG_SIZE); 764 pwp->fwlogp_aap1 = (pmcs_fw_event_hdr_t *)pwp->fwlogp; 765 pwp->fwlogp_iop = (pmcs_fw_event_hdr_t *)((void *) 766 ((caddr_t)pwp->fwlogp + (PMCS_FWLOG_SIZE / 2))); 767 } 768 } 769 770 if (pwp->flash_chunk_addr == NULL) { 771 pwp->regdump_dma_attr.dma_attr_align = PMCS_FLASH_CHUNK_SIZE; 772 if (pmcs_dma_setup(pwp, &pwp->regdump_dma_attr, 773 &pwp->regdump_acchdl, 774 &pwp->regdump_hndl, PMCS_FLASH_CHUNK_SIZE, 775 (caddr_t *)&pwp->flash_chunkp, &pwp->flash_chunk_addr) == 776 B_FALSE) { 777 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 778 "Failed to setup DMA for register dump area"); 779 goto failure; 780 } 781 bzero(pwp->flash_chunkp, PMCS_FLASH_CHUNK_SIZE); 782 } 783 784 /* 785 * More bits of local initialization... 786 */ 787 pwp->tq = ddi_taskq_create(dip, "_tq", 4, TASKQ_DEFAULTPRI, 0); 788 if (pwp->tq == NULL) { 789 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 790 "unable to create worker taskq"); 791 goto failure; 792 } 793 794 /* 795 * Cache of structures for dealing with I/O completion callbacks. 796 */ 797 (void) snprintf(buf, sizeof (buf), "pmcs_iocomp_cb_cache%d", inst); 798 pwp->iocomp_cb_cache = kmem_cache_create(buf, 799 sizeof (pmcs_iocomp_cb_t), 16, NULL, NULL, NULL, NULL, NULL, 0); 800 801 /* 802 * Cache of PHY structures 803 */ 804 (void) snprintf(buf, sizeof (buf), "pmcs_phy_cache%d", inst); 805 pwp->phy_cache = kmem_cache_create(buf, sizeof (pmcs_phy_t), 8, 806 pmcs_phy_constructor, pmcs_phy_destructor, NULL, (void *)pwp, 807 NULL, 0); 808 809 /* 810 * Allocate space for the I/O completion threads 811 */ 812 num_threads = ncpus_online; 813 if (num_threads > PMCS_MAX_CQ_THREADS) { 814 num_threads = PMCS_MAX_CQ_THREADS; 815 } 816 817 pwp->cq_info.cq_threads = num_threads; 818 pwp->cq_info.cq_thr_info = kmem_zalloc( 819 sizeof (pmcs_cq_thr_info_t) * pwp->cq_info.cq_threads, KM_SLEEP); 820 pwp->cq_info.cq_next_disp_thr = 0; 821 pwp->cq_info.cq_stop = B_FALSE; 822 823 /* 824 * Set the quantum value in clock ticks for the I/O interrupt 825 * coalescing timer. 826 */ 827 pwp->io_intr_coal.quantum = drv_usectohz(PMCS_QUANTUM_TIME_USECS); 828 829 /* 830 * We have a delicate dance here. We need to set up 831 * interrupts so we know how to set up some OQC 832 * tables. However, while we're setting up table 833 * access, we may need to flash new firmware and 834 * reset the card, which will take some finessing. 835 */ 836 837 /* 838 * Set up interrupts here. 839 */ 840 switch (pmcs_setup_intr(pwp)) { 841 case 0: 842 break; 843 case EIO: 844 pwp->stuck = 1; 845 /* FALLTHROUGH */ 846 default: 847 goto failure; 848 } 849 850 /* 851 * Set these up now becuase they are used to initialize the OQC tables. 852 * 853 * If we have MSI or MSI-X interrupts set up and we have enough 854 * vectors for each OQ, the Outbound Queue vectors can all be the 855 * same as the appropriate interrupt routine will have been called 856 * and the doorbell register automatically cleared. 857 * This keeps us from having to check the Outbound Doorbell register 858 * when the routines for these interrupts are called. 859 * 860 * If we have Legacy INT-X interrupts set up or we didn't have enough 861 * MSI/MSI-X vectors to uniquely identify each OQ, we point these 862 * vectors to the bits we would like to have set in the Outbound 863 * Doorbell register because pmcs_all_intr will read the doorbell 864 * register to find out why we have an interrupt and write the 865 * corresponding 'clear' bit for that interrupt. 866 */ 867 868 switch (pwp->intr_cnt) { 869 case 1: 870 /* 871 * Only one vector, so we must check all OQs for MSI. For 872 * INT-X, there's only one vector anyway, so we can just 873 * use the outbound queue bits to keep from having to 874 * check each queue for each interrupt. 875 */ 876 if (pwp->int_type == PMCS_INT_FIXED) { 877 pwp->oqvec[PMCS_OQ_IODONE] = PMCS_OQ_IODONE; 878 pwp->oqvec[PMCS_OQ_GENERAL] = PMCS_OQ_GENERAL; 879 pwp->oqvec[PMCS_OQ_EVENTS] = PMCS_OQ_EVENTS; 880 } else { 881 pwp->oqvec[PMCS_OQ_IODONE] = PMCS_OQ_IODONE; 882 pwp->oqvec[PMCS_OQ_GENERAL] = PMCS_OQ_IODONE; 883 pwp->oqvec[PMCS_OQ_EVENTS] = PMCS_OQ_IODONE; 884 } 885 break; 886 case 2: 887 /* With 2, we can at least isolate IODONE */ 888 pwp->oqvec[PMCS_OQ_IODONE] = PMCS_OQ_IODONE; 889 pwp->oqvec[PMCS_OQ_GENERAL] = PMCS_OQ_GENERAL; 890 pwp->oqvec[PMCS_OQ_EVENTS] = PMCS_OQ_GENERAL; 891 break; 892 case 4: 893 /* With 4 vectors, everybody gets one */ 894 pwp->oqvec[PMCS_OQ_IODONE] = PMCS_OQ_IODONE; 895 pwp->oqvec[PMCS_OQ_GENERAL] = PMCS_OQ_GENERAL; 896 pwp->oqvec[PMCS_OQ_EVENTS] = PMCS_OQ_EVENTS; 897 break; 898 } 899 900 /* 901 * Do the first part of setup 902 */ 903 if (pmcs_setup(pwp)) { 904 goto failure; 905 } 906 pmcs_report_fwversion(pwp); 907 908 /* 909 * Now do some additonal allocations based upon information 910 * gathered during MPI setup. 911 */ 912 pwp->root_phys = kmem_zalloc(pwp->nphy * sizeof (pmcs_phy_t), KM_SLEEP); 913 ASSERT(pwp->nphy < SAS2_PHYNUM_MAX); 914 phyp = pwp->root_phys; 915 for (i = 0; i < pwp->nphy; i++) { 916 if (i < pwp->nphy-1) { 917 phyp->sibling = (phyp + 1); 918 } 919 mutex_init(&phyp->phy_lock, NULL, MUTEX_DRIVER, 920 DDI_INTR_PRI(pwp->intr_pri)); 921 phyp->phynum = i & SAS2_PHYNUM_MASK; 922 pmcs_phy_name(pwp, phyp, phyp->path, sizeof (phyp->path)); 923 phyp->pwp = pwp; 924 phyp->device_id = PMCS_INVALID_DEVICE_ID; 925 phyp->portid = PMCS_PHY_INVALID_PORT_ID; 926 phyp++; 927 } 928 929 pwp->work = kmem_zalloc(pwp->max_cmd * sizeof (pmcwork_t), KM_SLEEP); 930 for (i = 0; i < pwp->max_cmd; i++) { 931 pmcwork_t *pwrk = &pwp->work[i]; 932 mutex_init(&pwrk->lock, NULL, MUTEX_DRIVER, 933 DDI_INTR_PRI(pwp->intr_pri)); 934 cv_init(&pwrk->sleep_cv, NULL, CV_DRIVER, NULL); 935 STAILQ_INSERT_TAIL(&pwp->wf, pwrk, next); 936 937 } 938 pwp->targets = (pmcs_xscsi_t **) 939 kmem_zalloc(pwp->max_dev * sizeof (pmcs_xscsi_t *), KM_SLEEP); 940 941 pwp->iqpt = (pmcs_iqp_trace_t *) 942 kmem_zalloc(sizeof (pmcs_iqp_trace_t), KM_SLEEP); 943 pwp->iqpt->head = kmem_zalloc(PMCS_IQP_TRACE_BUFFER_SIZE, KM_SLEEP); 944 pwp->iqpt->curpos = pwp->iqpt->head; 945 pwp->iqpt->size_left = PMCS_IQP_TRACE_BUFFER_SIZE; 946 947 /* 948 * Start MPI communication. 949 */ 950 if (pmcs_start_mpi(pwp)) { 951 if (pmcs_soft_reset(pwp, B_FALSE)) { 952 goto failure; 953 } 954 pwp->last_reset_reason = PMCS_LAST_RST_ATTACH; 955 } 956 957 /* 958 * Do some initial acceptance tests. 959 * This tests interrupts and queues. 960 */ 961 if (pmcs_echo_test(pwp)) { 962 goto failure; 963 } 964 965 /* Read VPD - if it exists */ 966 if (pmcs_get_nvmd(pwp, PMCS_NVMD_VPD, PMCIN_NVMD_VPD, 0, NULL, 0)) { 967 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 968 "%s: Unable to read VPD: " 969 "attempting to fabricate", __func__); 970 /* 971 * When we release, this must goto failure and the call 972 * to pmcs_fabricate_wwid is removed. 973 */ 974 /* goto failure; */ 975 if (!pmcs_fabricate_wwid(pwp)) { 976 goto failure; 977 } 978 } 979 980 /* 981 * We're now officially running 982 */ 983 pwp->state = STATE_RUNNING; 984 985 /* 986 * Check firmware versions and load new firmware 987 * if needed and reset. 988 */ 989 if (pmcs_firmware_update(pwp)) { 990 pmcs_prt(pwp, PMCS_PRT_WARN, NULL, NULL, 991 "%s: Firmware update failed", __func__); 992 goto failure; 993 } 994 995 /* 996 * Create completion threads. 997 */ 998 for (i = 0; i < pwp->cq_info.cq_threads; i++) { 999 pwp->cq_info.cq_thr_info[i].cq_pwp = pwp; 1000 pwp->cq_info.cq_thr_info[i].cq_thread = 1001 thread_create(NULL, 0, pmcs_scsa_cq_run, 1002 &pwp->cq_info.cq_thr_info[i], 0, &p0, TS_RUN, minclsyspri); 1003 } 1004 1005 /* 1006 * Create one thread to deal with the updating of the interrupt 1007 * coalescing timer. 1008 */ 1009 pwp->ict_thread = thread_create(NULL, 0, pmcs_check_intr_coal, 1010 pwp, 0, &p0, TS_RUN, minclsyspri); 1011 1012 /* 1013 * Kick off the watchdog 1014 */ 1015 pwp->wdhandle = timeout(pmcs_watchdog, pwp, 1016 drv_usectohz(PMCS_WATCH_INTERVAL)); 1017 /* 1018 * Do the SCSI attachment code (before starting phys) 1019 */ 1020 if (pmcs_scsa_init(pwp, &pmcs_dattr)) { 1021 goto failure; 1022 } 1023 pwp->hba_attached = 1; 1024 1025 /* Check all acc & dma handles allocated in attach */ 1026 if (pmcs_check_acc_dma_handle(pwp)) { 1027 ddi_fm_service_impact(pwp->dip, DDI_SERVICE_LOST); 1028 goto failure; 1029 } 1030 1031 /* 1032 * Create the iportmap for this HBA instance 1033 */ 1034 if (scsi_hba_iportmap_create(dip, iportmap_csync_usec, 1035 iportmap_stable_usec, &pwp->hss_iportmap) != DDI_SUCCESS) { 1036 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 1037 "%s: pmcs%d iportmap_create failed", __func__, inst); 1038 goto failure; 1039 } 1040 ASSERT(pwp->hss_iportmap); 1041 1042 /* 1043 * Create the phymap for this HBA instance 1044 */ 1045 if (sas_phymap_create(dip, phymap_stable_usec, PHYMAP_MODE_SIMPLE, NULL, 1046 pwp, pmcs_phymap_activate, pmcs_phymap_deactivate, 1047 &pwp->hss_phymap) != DDI_SUCCESS) { 1048 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 1049 "%s: pmcs%d phymap_create failed", __func__, inst); 1050 goto failure; 1051 } 1052 ASSERT(pwp->hss_phymap); 1053 1054 /* 1055 * Start the PHYs. 1056 */ 1057 if (pmcs_start_phys(pwp)) { 1058 goto failure; 1059 } 1060 1061 /* 1062 * From this point on, we can't fail. 1063 */ 1064 ddi_report_dev(dip); 1065 1066 /* SM-HBA */ 1067 pmcs_smhba_add_hba_prop(pwp, DATA_TYPE_INT32, PMCS_SMHBA_SUPPORTED, 1068 &sm_hba); 1069 1070 /* SM-HBA */ 1071 pmcs_smhba_add_hba_prop(pwp, DATA_TYPE_STRING, PMCS_DRV_VERSION, 1072 pmcs_driver_rev); 1073 1074 /* SM-HBA */ 1075 chiprev = 'A' + pwp->chiprev; 1076 (void) snprintf(hw_rev, 2, "%s", &chiprev); 1077 pmcs_smhba_add_hba_prop(pwp, DATA_TYPE_STRING, PMCS_HWARE_VERSION, 1078 hw_rev); 1079 1080 /* SM-HBA */ 1081 switch (PMCS_FW_TYPE(pwp)) { 1082 case PMCS_FW_TYPE_RELEASED: 1083 fwsupport = "Released"; 1084 break; 1085 case PMCS_FW_TYPE_DEVELOPMENT: 1086 fwsupport = "Development"; 1087 break; 1088 case PMCS_FW_TYPE_ALPHA: 1089 fwsupport = "Alpha"; 1090 break; 1091 case PMCS_FW_TYPE_BETA: 1092 fwsupport = "Beta"; 1093 break; 1094 default: 1095 fwsupport = "Special"; 1096 break; 1097 } 1098 (void) snprintf(fw_rev, sizeof (fw_rev), "%x.%x.%x %s", 1099 PMCS_FW_MAJOR(pwp), PMCS_FW_MINOR(pwp), PMCS_FW_MICRO(pwp), 1100 fwsupport); 1101 pmcs_smhba_add_hba_prop(pwp, DATA_TYPE_STRING, PMCS_FWARE_VERSION, 1102 fw_rev); 1103 1104 /* SM-HBA */ 1105 num_phys = pwp->nphy; 1106 pmcs_smhba_add_hba_prop(pwp, DATA_TYPE_INT32, PMCS_NUM_PHYS_HBA, 1107 &num_phys); 1108 1109 /* SM-HBA */ 1110 protocol = SAS_SSP_SUPPORT | SAS_SATA_SUPPORT | SAS_SMP_SUPPORT; 1111 pmcs_smhba_add_hba_prop(pwp, DATA_TYPE_INT32, PMCS_SUPPORTED_PROTOCOL, 1112 &protocol); 1113 1114 /* Receptacle properties (FMA) */ 1115 pwp->recept_labels[0] = PMCS_RECEPT_LABEL_0; 1116 pwp->recept_pm[0] = PMCS_RECEPT_PM_0; 1117 pwp->recept_labels[1] = PMCS_RECEPT_LABEL_1; 1118 pwp->recept_pm[1] = PMCS_RECEPT_PM_1; 1119 if (ddi_prop_update_string_array(DDI_DEV_T_NONE, dip, 1120 SCSI_HBA_PROP_RECEPTACLE_LABEL, &pwp->recept_labels[0], 1121 PMCS_NUM_RECEPTACLES) != DDI_PROP_SUCCESS) { 1122 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 1123 "%s: failed to create %s property", __func__, 1124 "receptacle-label"); 1125 } 1126 if (ddi_prop_update_string_array(DDI_DEV_T_NONE, dip, 1127 SCSI_HBA_PROP_RECEPTACLE_PM, &pwp->recept_pm[0], 1128 PMCS_NUM_RECEPTACLES) != DDI_PROP_SUCCESS) { 1129 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 1130 "%s: failed to create %s property", __func__, 1131 "receptacle-pm"); 1132 } 1133 1134 return (DDI_SUCCESS); 1135 1136 failure: 1137 if (pmcs_unattach(pwp)) { 1138 pwp->stuck = 1; 1139 } 1140 return (DDI_FAILURE); 1141 } 1142 1143 int 1144 pmcs_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) 1145 { 1146 int inst = ddi_get_instance(dip); 1147 pmcs_iport_t *iport = NULL; 1148 pmcs_hw_t *pwp = NULL; 1149 scsi_hba_tran_t *tran; 1150 1151 if (scsi_hba_iport_unit_address(dip) != NULL) { 1152 /* iport node */ 1153 iport = ddi_get_soft_state(pmcs_iport_softstate, inst); 1154 ASSERT(iport); 1155 if (iport == NULL) { 1156 return (DDI_FAILURE); 1157 } 1158 pwp = iport->pwp; 1159 } else { 1160 /* hba node */ 1161 pwp = (pmcs_hw_t *)ddi_get_soft_state(pmcs_softc_state, inst); 1162 ASSERT(pwp); 1163 if (pwp == NULL) { 1164 return (DDI_FAILURE); 1165 } 1166 } 1167 switch (cmd) { 1168 case DDI_DETACH: 1169 if (iport) { 1170 /* iport detach */ 1171 if (pmcs_iport_unattach(iport)) { 1172 return (DDI_FAILURE); 1173 } 1174 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 1175 "iport%d detached", inst); 1176 return (DDI_SUCCESS); 1177 } else { 1178 /* HBA detach */ 1179 if (pmcs_unattach(pwp)) { 1180 return (DDI_FAILURE); 1181 } 1182 return (DDI_SUCCESS); 1183 } 1184 1185 case DDI_SUSPEND: 1186 case DDI_PM_SUSPEND: 1187 /* No DDI_SUSPEND on iport nodes */ 1188 if (iport) { 1189 return (DDI_SUCCESS); 1190 } 1191 1192 if (pwp->stuck) { 1193 return (DDI_FAILURE); 1194 } 1195 tran = (scsi_hba_tran_t *)ddi_get_driver_private(dip); 1196 if (!tran) { 1197 return (DDI_FAILURE); 1198 } 1199 1200 pwp = TRAN2PMC(tran); 1201 if (pwp == NULL) { 1202 return (DDI_FAILURE); 1203 } 1204 mutex_enter(&pwp->lock); 1205 if (pwp->tq) { 1206 ddi_taskq_suspend(pwp->tq); 1207 } 1208 pwp->suspended = 1; 1209 mutex_exit(&pwp->lock); 1210 pmcs_prt(pwp, PMCS_PRT_INFO, NULL, NULL, "PMC8X6G suspending"); 1211 return (DDI_SUCCESS); 1212 1213 default: 1214 return (DDI_FAILURE); 1215 } 1216 } 1217 1218 static int 1219 pmcs_iport_unattach(pmcs_iport_t *iport) 1220 { 1221 pmcs_hw_t *pwp = iport->pwp; 1222 1223 /* 1224 * First, check if there are still any configured targets on this 1225 * iport. If so, we fail detach. 1226 */ 1227 if (pmcs_iport_has_targets(pwp, iport)) { 1228 pmcs_prt(pwp, PMCS_PRT_DEBUG_IPORT, NULL, NULL, 1229 "iport%d detach failure: iport has targets (luns)", 1230 ddi_get_instance(iport->dip)); 1231 return (DDI_FAILURE); 1232 } 1233 1234 /* 1235 * Remove this iport from our list if it is inactive in the phymap. 1236 */ 1237 rw_enter(&pwp->iports_lock, RW_WRITER); 1238 mutex_enter(&iport->lock); 1239 1240 if ((iport->ua_state == UA_ACTIVE) && 1241 (detach_with_active_port == 0)) { 1242 mutex_exit(&iport->lock); 1243 rw_exit(&pwp->iports_lock); 1244 pmcs_prt(pwp, PMCS_PRT_DEBUG_IPORT, NULL, NULL, 1245 "iport%d detach failure: " 1246 "iport unit address active in phymap", 1247 ddi_get_instance(iport->dip)); 1248 return (DDI_FAILURE); 1249 } 1250 1251 /* If it's our only iport, clear iports_attached */ 1252 ASSERT(pwp->num_iports >= 1); 1253 if (--pwp->num_iports == 0) { 1254 pwp->iports_attached = 0; 1255 } 1256 1257 ASSERT(list_link_active(&iport->list_node)); 1258 list_remove(&pwp->iports, iport); 1259 rw_exit(&pwp->iports_lock); 1260 1261 /* 1262 * We have removed the iport handle from the HBA's iports list, 1263 * there will be no new references to it. Two things must be 1264 * guarded against here. First, we could have PHY up events, 1265 * adding themselves to the iport->phys list and grabbing ref's 1266 * on our iport handle. Second, we could have existing references 1267 * to this iport handle from a point in time prior to the list 1268 * removal above. 1269 * 1270 * So first, destroy the phys list. Remove any phys that have snuck 1271 * in after the phymap deactivate, dropping the refcnt accordingly. 1272 * If these PHYs are still up if and when the phymap reactivates 1273 * (i.e. when this iport reattaches), we'll populate the list with 1274 * them and bump the refcnt back up. 1275 */ 1276 pmcs_remove_phy_from_iport(iport, NULL); 1277 ASSERT(list_is_empty(&iport->phys)); 1278 list_destroy(&iport->phys); 1279 mutex_exit(&iport->lock); 1280 1281 /* 1282 * Second, wait for any other references to this iport to be 1283 * dropped, then continue teardown. 1284 */ 1285 mutex_enter(&iport->refcnt_lock); 1286 while (iport->refcnt != 0) { 1287 cv_wait(&iport->refcnt_cv, &iport->refcnt_lock); 1288 } 1289 mutex_exit(&iport->refcnt_lock); 1290 1291 1292 /* Destroy the iport target map */ 1293 if (pmcs_iport_tgtmap_destroy(iport) == B_FALSE) { 1294 return (DDI_FAILURE); 1295 } 1296 1297 /* Free the tgt soft state */ 1298 if (iport->tgt_sstate != NULL) { 1299 ddi_soft_state_bystr_fini(&iport->tgt_sstate); 1300 } 1301 1302 /* Free our unit address string */ 1303 strfree(iport->ua); 1304 1305 /* Finish teardown and free the softstate */ 1306 mutex_destroy(&iport->refcnt_lock); 1307 mutex_destroy(&iport->smp_lock); 1308 ASSERT(iport->refcnt == 0); 1309 cv_destroy(&iport->refcnt_cv); 1310 cv_destroy(&iport->smp_cv); 1311 mutex_destroy(&iport->lock); 1312 ddi_soft_state_free(pmcs_iport_softstate, ddi_get_instance(iport->dip)); 1313 1314 return (DDI_SUCCESS); 1315 } 1316 1317 static int 1318 pmcs_unattach(pmcs_hw_t *pwp) 1319 { 1320 int i; 1321 enum pwpstate curstate; 1322 pmcs_cq_thr_info_t *cqti; 1323 1324 /* 1325 * Tear down the interrupt infrastructure. 1326 */ 1327 if (pmcs_teardown_intr(pwp)) { 1328 pwp->stuck = 1; 1329 } 1330 pwp->intr_cnt = 0; 1331 1332 /* 1333 * Grab a lock, if initted, to set state. 1334 */ 1335 if (pwp->locks_initted) { 1336 mutex_enter(&pwp->lock); 1337 if (pwp->state != STATE_DEAD) { 1338 pwp->state = STATE_UNPROBING; 1339 } 1340 curstate = pwp->state; 1341 mutex_exit(&pwp->lock); 1342 1343 /* 1344 * Stop the I/O completion threads. 1345 */ 1346 mutex_enter(&pwp->cq_lock); 1347 pwp->cq_info.cq_stop = B_TRUE; 1348 for (i = 0; i < pwp->cq_info.cq_threads; i++) { 1349 if (pwp->cq_info.cq_thr_info[i].cq_thread) { 1350 cqti = &pwp->cq_info.cq_thr_info[i]; 1351 mutex_enter(&cqti->cq_thr_lock); 1352 cv_signal(&cqti->cq_cv); 1353 mutex_exit(&cqti->cq_thr_lock); 1354 mutex_exit(&pwp->cq_lock); 1355 thread_join(cqti->cq_thread->t_did); 1356 mutex_enter(&pwp->cq_lock); 1357 } 1358 } 1359 mutex_exit(&pwp->cq_lock); 1360 kmem_free(pwp->cq_info.cq_thr_info, 1361 sizeof (pmcs_cq_thr_info_t) * pwp->cq_info.cq_threads); 1362 1363 /* 1364 * Stop the interrupt coalescing timer thread 1365 */ 1366 if (pwp->ict_thread) { 1367 mutex_enter(&pwp->ict_lock); 1368 pwp->io_intr_coal.stop_thread = B_TRUE; 1369 cv_signal(&pwp->ict_cv); 1370 mutex_exit(&pwp->ict_lock); 1371 thread_join(pwp->ict_thread->t_did); 1372 } 1373 } else { 1374 if (pwp->state != STATE_DEAD) { 1375 pwp->state = STATE_UNPROBING; 1376 } 1377 curstate = pwp->state; 1378 } 1379 1380 /* 1381 * Make sure that any pending watchdog won't 1382 * be called from this point on out. 1383 */ 1384 (void) untimeout(pwp->wdhandle); 1385 /* 1386 * After the above action, the watchdog 1387 * timer that starts up the worker task 1388 * may trigger but will exit immediately 1389 * on triggering. 1390 * 1391 * Now that this is done, we can destroy 1392 * the task queue, which will wait if we're 1393 * running something on it. 1394 */ 1395 if (pwp->tq) { 1396 ddi_taskq_destroy(pwp->tq); 1397 pwp->tq = NULL; 1398 } 1399 1400 pmcs_fm_fini(pwp); 1401 1402 if (pwp->hba_attached) { 1403 (void) scsi_hba_detach(pwp->dip); 1404 pwp->hba_attached = 0; 1405 } 1406 1407 /* 1408 * If the chip hasn't been marked dead, shut it down now 1409 * to bring it back to a known state without attempting 1410 * a soft reset. 1411 */ 1412 if (curstate != STATE_DEAD && pwp->locks_initted) { 1413 /* 1414 * De-register all registered devices 1415 */ 1416 pmcs_deregister_devices(pwp, pwp->root_phys); 1417 1418 /* 1419 * Stop all the phys. 1420 */ 1421 pmcs_stop_phys(pwp); 1422 1423 /* 1424 * Shut Down Message Passing 1425 */ 1426 (void) pmcs_stop_mpi(pwp); 1427 1428 /* 1429 * Reset chip 1430 */ 1431 (void) pmcs_soft_reset(pwp, B_FALSE); 1432 pwp->last_reset_reason = PMCS_LAST_RST_DETACH; 1433 } 1434 1435 /* 1436 * Turn off interrupts on the chip 1437 */ 1438 if (pwp->mpi_acc_handle) { 1439 pmcs_wr_msgunit(pwp, PMCS_MSGU_OBDB_MASK, 0xffffffff); 1440 } 1441 1442 if (pwp->hss_phymap != NULL) { 1443 /* Destroy the phymap */ 1444 sas_phymap_destroy(pwp->hss_phymap); 1445 } 1446 1447 if (pwp->hss_iportmap != NULL) { 1448 /* Destroy the iportmap */ 1449 scsi_hba_iportmap_destroy(pwp->hss_iportmap); 1450 } 1451 1452 /* Destroy the iports lock and list */ 1453 rw_destroy(&pwp->iports_lock); 1454 ASSERT(list_is_empty(&pwp->iports)); 1455 list_destroy(&pwp->iports); 1456 1457 /* 1458 * Free DMA handles and associated consistent memory 1459 */ 1460 if (pwp->regdump_hndl) { 1461 if (ddi_dma_unbind_handle(pwp->regdump_hndl) != DDI_SUCCESS) { 1462 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 1463 "Condition check failed " 1464 "at %s():%d", __func__, __LINE__); 1465 } 1466 ddi_dma_free_handle(&pwp->regdump_hndl); 1467 ddi_dma_mem_free(&pwp->regdump_acchdl); 1468 pwp->regdump_hndl = 0; 1469 } 1470 if (pwp->fwlog_hndl) { 1471 if (ddi_dma_unbind_handle(pwp->fwlog_hndl) != DDI_SUCCESS) { 1472 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 1473 "Condition check failed " 1474 "at %s():%d", __func__, __LINE__); 1475 } 1476 ddi_dma_free_handle(&pwp->fwlog_hndl); 1477 ddi_dma_mem_free(&pwp->fwlog_acchdl); 1478 pwp->fwlog_hndl = 0; 1479 } 1480 if (pwp->cip_handles) { 1481 if (ddi_dma_unbind_handle(pwp->cip_handles) != DDI_SUCCESS) { 1482 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 1483 "Condition check failed " 1484 "at %s():%d", __func__, __LINE__); 1485 } 1486 ddi_dma_free_handle(&pwp->cip_handles); 1487 ddi_dma_mem_free(&pwp->cip_acchdls); 1488 pwp->cip_handles = 0; 1489 } 1490 for (i = 0; i < PMCS_NOQ; i++) { 1491 if (pwp->oqp_handles[i]) { 1492 if (ddi_dma_unbind_handle(pwp->oqp_handles[i]) != 1493 DDI_SUCCESS) { 1494 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 1495 "Condition check failed at %s():%d", 1496 __func__, __LINE__); 1497 } 1498 ddi_dma_free_handle(&pwp->oqp_handles[i]); 1499 ddi_dma_mem_free(&pwp->oqp_acchdls[i]); 1500 pwp->oqp_handles[i] = 0; 1501 } 1502 } 1503 for (i = 0; i < PMCS_NIQ; i++) { 1504 if (pwp->iqp_handles[i]) { 1505 if (ddi_dma_unbind_handle(pwp->iqp_handles[i]) != 1506 DDI_SUCCESS) { 1507 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 1508 "Condition check failed at %s():%d", 1509 __func__, __LINE__); 1510 } 1511 ddi_dma_free_handle(&pwp->iqp_handles[i]); 1512 ddi_dma_mem_free(&pwp->iqp_acchdls[i]); 1513 pwp->iqp_handles[i] = 0; 1514 } 1515 } 1516 1517 pmcs_free_dma_chunklist(pwp); 1518 1519 /* 1520 * Unmap registers and destroy access handles 1521 */ 1522 if (pwp->mpi_acc_handle) { 1523 ddi_regs_map_free(&pwp->mpi_acc_handle); 1524 pwp->mpi_acc_handle = 0; 1525 } 1526 if (pwp->top_acc_handle) { 1527 ddi_regs_map_free(&pwp->top_acc_handle); 1528 pwp->top_acc_handle = 0; 1529 } 1530 if (pwp->gsm_acc_handle) { 1531 ddi_regs_map_free(&pwp->gsm_acc_handle); 1532 pwp->gsm_acc_handle = 0; 1533 } 1534 if (pwp->msg_acc_handle) { 1535 ddi_regs_map_free(&pwp->msg_acc_handle); 1536 pwp->msg_acc_handle = 0; 1537 } 1538 if (pwp->pci_acc_handle) { 1539 pci_config_teardown(&pwp->pci_acc_handle); 1540 pwp->pci_acc_handle = 0; 1541 } 1542 1543 /* 1544 * Do memory allocation cleanup. 1545 */ 1546 while (pwp->dma_freelist) { 1547 pmcs_dmachunk_t *this = pwp->dma_freelist; 1548 pwp->dma_freelist = this->nxt; 1549 kmem_free(this, sizeof (pmcs_dmachunk_t)); 1550 } 1551 1552 /* 1553 * Free pools 1554 */ 1555 if (pwp->iocomp_cb_cache) { 1556 kmem_cache_destroy(pwp->iocomp_cb_cache); 1557 } 1558 1559 /* 1560 * Free all PHYs (at level > 0), then free the cache 1561 */ 1562 pmcs_free_all_phys(pwp, pwp->root_phys); 1563 if (pwp->phy_cache) { 1564 kmem_cache_destroy(pwp->phy_cache); 1565 } 1566 1567 /* 1568 * Free root PHYs 1569 */ 1570 if (pwp->root_phys) { 1571 pmcs_phy_t *phyp = pwp->root_phys; 1572 for (i = 0; i < pwp->nphy; i++) { 1573 mutex_destroy(&phyp->phy_lock); 1574 phyp = phyp->sibling; 1575 } 1576 kmem_free(pwp->root_phys, pwp->nphy * sizeof (pmcs_phy_t)); 1577 pwp->root_phys = NULL; 1578 pwp->nphy = 0; 1579 } 1580 1581 /* Free the targets list */ 1582 if (pwp->targets) { 1583 kmem_free(pwp->targets, 1584 sizeof (pmcs_xscsi_t *) * pwp->max_dev); 1585 } 1586 1587 /* 1588 * Free work structures 1589 */ 1590 1591 if (pwp->work && pwp->max_cmd) { 1592 for (i = 0; i < pwp->max_cmd; i++) { 1593 pmcwork_t *pwrk = &pwp->work[i]; 1594 mutex_destroy(&pwrk->lock); 1595 cv_destroy(&pwrk->sleep_cv); 1596 } 1597 kmem_free(pwp->work, sizeof (pmcwork_t) * pwp->max_cmd); 1598 pwp->work = NULL; 1599 pwp->max_cmd = 0; 1600 } 1601 1602 /* 1603 * Do last property and SCSA cleanup 1604 */ 1605 if (pwp->smp_tran) { 1606 smp_hba_tran_free(pwp->smp_tran); 1607 pwp->smp_tran = NULL; 1608 } 1609 if (pwp->tran) { 1610 scsi_hba_tran_free(pwp->tran); 1611 pwp->tran = NULL; 1612 } 1613 if (pwp->reset_notify_listf) { 1614 scsi_hba_reset_notify_tear_down(pwp->reset_notify_listf); 1615 pwp->reset_notify_listf = NULL; 1616 } 1617 ddi_prop_remove_all(pwp->dip); 1618 if (pwp->stuck) { 1619 return (-1); 1620 } 1621 1622 /* Free register dump area if allocated */ 1623 if (pwp->regdumpp) { 1624 kmem_free(pwp->regdumpp, PMCS_REG_DUMP_SIZE); 1625 pwp->regdumpp = NULL; 1626 } 1627 if (pwp->iqpt && pwp->iqpt->head) { 1628 kmem_free(pwp->iqpt->head, PMCS_IQP_TRACE_BUFFER_SIZE); 1629 pwp->iqpt->head = pwp->iqpt->curpos = NULL; 1630 } 1631 if (pwp->iqpt) { 1632 kmem_free(pwp->iqpt, sizeof (pmcs_iqp_trace_t)); 1633 pwp->iqpt = NULL; 1634 } 1635 1636 /* Destroy pwp's lock */ 1637 if (pwp->locks_initted) { 1638 mutex_destroy(&pwp->lock); 1639 mutex_destroy(&pwp->dma_lock); 1640 mutex_destroy(&pwp->axil_lock); 1641 mutex_destroy(&pwp->cq_lock); 1642 mutex_destroy(&pwp->config_lock); 1643 mutex_destroy(&pwp->ict_lock); 1644 mutex_destroy(&pwp->wfree_lock); 1645 mutex_destroy(&pwp->pfree_lock); 1646 mutex_destroy(&pwp->dead_phylist_lock); 1647 #ifdef DEBUG 1648 mutex_destroy(&pwp->dbglock); 1649 #endif 1650 cv_destroy(&pwp->config_cv); 1651 cv_destroy(&pwp->ict_cv); 1652 cv_destroy(&pwp->drain_cv); 1653 pwp->locks_initted = 0; 1654 } 1655 1656 ddi_soft_state_free(pmcs_softc_state, ddi_get_instance(pwp->dip)); 1657 return (0); 1658 } 1659 1660 /* 1661 * quiesce (9E) entry point 1662 * 1663 * This function is called when the system is single-threaded at high PIL 1664 * with preemption disabled. Therefore, the function must not block/wait/sleep. 1665 * 1666 * Returns DDI_SUCCESS or DDI_FAILURE. 1667 * 1668 */ 1669 static int 1670 pmcs_quiesce(dev_info_t *dip) 1671 { 1672 pmcs_hw_t *pwp; 1673 scsi_hba_tran_t *tran; 1674 1675 if ((tran = ddi_get_driver_private(dip)) == NULL) 1676 return (DDI_SUCCESS); 1677 1678 /* No quiesce necessary on a per-iport basis */ 1679 if (scsi_hba_iport_unit_address(dip) != NULL) { 1680 return (DDI_SUCCESS); 1681 } 1682 1683 if ((pwp = TRAN2PMC(tran)) == NULL) 1684 return (DDI_SUCCESS); 1685 1686 /* Stop MPI & Reset chip (no need to re-initialize) */ 1687 (void) pmcs_stop_mpi(pwp); 1688 (void) pmcs_soft_reset(pwp, B_TRUE); 1689 pwp->last_reset_reason = PMCS_LAST_RST_QUIESCE; 1690 1691 return (DDI_SUCCESS); 1692 } 1693 1694 /* 1695 * Called with xp->statlock and PHY lock and scratch acquired. 1696 */ 1697 static int 1698 pmcs_add_sata_device(pmcs_hw_t *pwp, pmcs_xscsi_t *xp) 1699 { 1700 ata_identify_t *ati; 1701 int result, i; 1702 pmcs_phy_t *pptr; 1703 uint16_t *a; 1704 union { 1705 uint8_t nsa[8]; 1706 uint16_t nsb[4]; 1707 } u; 1708 1709 /* 1710 * Safe defaults - use only if this target is brand new (i.e. doesn't 1711 * already have these settings configured) 1712 */ 1713 if (xp->capacity == 0) { 1714 xp->capacity = (uint64_t)-1; 1715 xp->ca = 1; 1716 xp->qdepth = 1; 1717 xp->pio = 1; 1718 } 1719 1720 pptr = xp->phy; 1721 1722 /* 1723 * We only try and issue an IDENTIFY for first level 1724 * (direct attached) devices. We don't try and 1725 * set other quirks here (this will happen later, 1726 * if the device is fully configured) 1727 */ 1728 if (pptr->level) { 1729 return (0); 1730 } 1731 1732 mutex_exit(&xp->statlock); 1733 result = pmcs_sata_identify(pwp, pptr); 1734 mutex_enter(&xp->statlock); 1735 1736 if (result) { 1737 return (result); 1738 } 1739 ati = pwp->scratch; 1740 a = &ati->word108; 1741 for (i = 0; i < 4; i++) { 1742 u.nsb[i] = ddi_swap16(*a++); 1743 } 1744 1745 /* 1746 * Check the returned data for being a valid (NAA=5) WWN. 1747 * If so, use that and override the SAS address we were 1748 * given at Link Up time. 1749 */ 1750 if ((u.nsa[0] >> 4) == 5) { 1751 (void) memcpy(pptr->sas_address, u.nsa, 8); 1752 } 1753 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, xp, 1754 "%s: %s has SAS ADDRESS " SAS_ADDR_FMT, 1755 __func__, pptr->path, SAS_ADDR_PRT(pptr->sas_address)); 1756 return (0); 1757 } 1758 1759 /* 1760 * Called with PHY lock and target statlock held and scratch acquired 1761 */ 1762 static boolean_t 1763 pmcs_add_new_device(pmcs_hw_t *pwp, pmcs_xscsi_t *target) 1764 { 1765 ASSERT(target != NULL); 1766 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, NULL, target, "%s: target = 0x%p", 1767 __func__, (void *) target); 1768 1769 switch (target->phy->dtype) { 1770 case SATA: 1771 if (pmcs_add_sata_device(pwp, target) != 0) { 1772 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, target->phy, 1773 target, "%s: add_sata_device failed for tgt 0x%p", 1774 __func__, (void *) target); 1775 return (B_FALSE); 1776 } 1777 break; 1778 case SAS: 1779 target->qdepth = maxqdepth; 1780 break; 1781 case EXPANDER: 1782 target->qdepth = 1; 1783 break; 1784 } 1785 1786 target->new = 0; 1787 target->assigned = 1; 1788 target->dev_state = PMCS_DEVICE_STATE_OPERATIONAL; 1789 target->dtype = target->phy->dtype; 1790 1791 /* 1792 * Set the PHY's config stop time to 0. This is one of the final 1793 * stops along the config path, so we're indicating that we 1794 * successfully configured the PHY. 1795 */ 1796 target->phy->config_stop = 0; 1797 1798 return (B_TRUE); 1799 } 1800 1801 void 1802 pmcs_worker(void *arg) 1803 { 1804 pmcs_hw_t *pwp = arg; 1805 ulong_t work_flags; 1806 1807 DTRACE_PROBE2(pmcs__worker, ulong_t, pwp->work_flags, boolean_t, 1808 pwp->config_changed); 1809 1810 if (pwp->state != STATE_RUNNING) { 1811 return; 1812 } 1813 1814 work_flags = atomic_swap_ulong(&pwp->work_flags, 0); 1815 1816 if (work_flags & PMCS_WORK_FLAG_DUMP_REGS) { 1817 mutex_enter(&pwp->lock); 1818 pmcs_register_dump_int(pwp); 1819 mutex_exit(&pwp->lock); 1820 } 1821 1822 if (work_flags & PMCS_WORK_FLAG_SAS_HW_ACK) { 1823 pmcs_ack_events(pwp); 1824 } 1825 1826 if (work_flags & PMCS_WORK_FLAG_SPINUP_RELEASE) { 1827 mutex_enter(&pwp->lock); 1828 pmcs_spinup_release(pwp, NULL); 1829 mutex_exit(&pwp->lock); 1830 } 1831 1832 if (work_flags & PMCS_WORK_FLAG_SSP_EVT_RECOVERY) { 1833 pmcs_ssp_event_recovery(pwp); 1834 } 1835 1836 if (work_flags & PMCS_WORK_FLAG_DS_ERR_RECOVERY) { 1837 pmcs_dev_state_recovery(pwp, NULL); 1838 } 1839 1840 if (work_flags & PMCS_WORK_FLAG_DEREGISTER_DEV) { 1841 pmcs_deregister_device_work(pwp, NULL); 1842 } 1843 1844 if (work_flags & PMCS_WORK_FLAG_DISCOVER) { 1845 pmcs_discover(pwp); 1846 } 1847 1848 if (work_flags & PMCS_WORK_FLAG_ABORT_HANDLE) { 1849 if (pmcs_abort_handler(pwp)) { 1850 SCHEDULE_WORK(pwp, PMCS_WORK_ABORT_HANDLE); 1851 } 1852 } 1853 1854 if (work_flags & PMCS_WORK_FLAG_SATA_RUN) { 1855 pmcs_sata_work(pwp); 1856 } 1857 1858 if (work_flags & PMCS_WORK_FLAG_RUN_QUEUES) { 1859 pmcs_scsa_wq_run(pwp); 1860 mutex_enter(&pwp->lock); 1861 PMCS_CQ_RUN(pwp); 1862 mutex_exit(&pwp->lock); 1863 } 1864 1865 if (work_flags & PMCS_WORK_FLAG_ADD_DMA_CHUNKS) { 1866 if (pmcs_add_more_chunks(pwp, 1867 ptob(1) * PMCS_ADDTL_CHUNK_PAGES)) { 1868 SCHEDULE_WORK(pwp, PMCS_WORK_ADD_DMA_CHUNKS); 1869 } else { 1870 SCHEDULE_WORK(pwp, PMCS_WORK_RUN_QUEUES); 1871 } 1872 } 1873 } 1874 1875 static int 1876 pmcs_add_more_chunks(pmcs_hw_t *pwp, unsigned long nsize) 1877 { 1878 pmcs_dmachunk_t *dc; 1879 unsigned long dl; 1880 pmcs_chunk_t *pchunk = NULL; 1881 1882 pwp->cip_dma_attr.dma_attr_align = sizeof (uint32_t); 1883 1884 pchunk = kmem_zalloc(sizeof (pmcs_chunk_t), KM_SLEEP); 1885 if (pchunk == NULL) { 1886 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 1887 "Not enough memory for DMA chunks"); 1888 return (-1); 1889 } 1890 1891 if (pmcs_dma_setup(pwp, &pwp->cip_dma_attr, &pchunk->acc_handle, 1892 &pchunk->dma_handle, nsize, (caddr_t *)&pchunk->addrp, 1893 &pchunk->dma_addr) == B_FALSE) { 1894 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 1895 "Failed to setup DMA for chunks"); 1896 kmem_free(pchunk, sizeof (pmcs_chunk_t)); 1897 return (-1); 1898 } 1899 1900 if ((pmcs_check_acc_handle(pchunk->acc_handle) != DDI_SUCCESS) || 1901 (pmcs_check_dma_handle(pchunk->dma_handle) != DDI_SUCCESS)) { 1902 ddi_fm_service_impact(pwp->dip, DDI_SERVICE_UNAFFECTED); 1903 return (-1); 1904 } 1905 1906 bzero(pchunk->addrp, nsize); 1907 dc = NULL; 1908 for (dl = 0; dl < (nsize / PMCS_SGL_CHUNKSZ); dl++) { 1909 pmcs_dmachunk_t *tmp; 1910 tmp = kmem_alloc(sizeof (pmcs_dmachunk_t), KM_SLEEP); 1911 tmp->nxt = dc; 1912 dc = tmp; 1913 } 1914 mutex_enter(&pwp->dma_lock); 1915 pmcs_idma_chunks(pwp, dc, pchunk, nsize); 1916 pwp->nchunks++; 1917 mutex_exit(&pwp->dma_lock); 1918 return (0); 1919 } 1920 1921 static void 1922 pmcs_check_forward_progress(pmcs_hw_t *pwp) 1923 { 1924 pmcwork_t *wrkp; 1925 uint32_t *iqp; 1926 uint32_t cur_iqci; 1927 uint32_t cur_work_idx; 1928 uint32_t cur_msgu_tick; 1929 uint32_t cur_iop_tick; 1930 int i; 1931 1932 mutex_enter(&pwp->lock); 1933 1934 if (pwp->state == STATE_IN_RESET) { 1935 mutex_exit(&pwp->lock); 1936 return; 1937 } 1938 1939 /* 1940 * Ensure that inbound work is getting picked up. First, check to 1941 * see if new work has been posted. If it has, ensure that the 1942 * work is moving forward by checking the consumer index and the 1943 * last_htag for the work being processed against what we saw last 1944 * time. Note: we use the work structure's 'last_htag' because at 1945 * any given moment it could be freed back, thus clearing 'htag' 1946 * and setting 'last_htag' (see pmcs_pwork). 1947 */ 1948 for (i = 0; i < PMCS_NIQ; i++) { 1949 cur_iqci = pmcs_rd_iqci(pwp, i); 1950 iqp = &pwp->iqp[i][cur_iqci * (PMCS_QENTRY_SIZE >> 2)]; 1951 cur_work_idx = PMCS_TAG_INDEX(LE_32(*(iqp+1))); 1952 wrkp = &pwp->work[cur_work_idx]; 1953 if (cur_iqci == pwp->shadow_iqpi[i]) { 1954 pwp->last_iqci[i] = cur_iqci; 1955 pwp->last_htag[i] = wrkp->last_htag; 1956 continue; 1957 } 1958 if ((cur_iqci == pwp->last_iqci[i]) && 1959 (wrkp->last_htag == pwp->last_htag[i])) { 1960 pmcs_prt(pwp, PMCS_PRT_WARN, NULL, NULL, 1961 "Inbound Queue stall detected, issuing reset"); 1962 goto hot_reset; 1963 } 1964 pwp->last_iqci[i] = cur_iqci; 1965 pwp->last_htag[i] = wrkp->last_htag; 1966 } 1967 1968 /* 1969 * Check heartbeat on both the MSGU and IOP. It is unlikely that 1970 * we'd ever fail here, as the inbound queue monitoring code above 1971 * would detect a stall due to either of these elements being 1972 * stalled, but we might as well keep an eye on them. 1973 */ 1974 cur_msgu_tick = pmcs_rd_gst_tbl(pwp, PMCS_GST_MSGU_TICK); 1975 if (cur_msgu_tick == pwp->last_msgu_tick) { 1976 pmcs_prt(pwp, PMCS_PRT_WARN, NULL, NULL, 1977 "Stall detected on MSGU, issuing reset"); 1978 goto hot_reset; 1979 } 1980 pwp->last_msgu_tick = cur_msgu_tick; 1981 1982 cur_iop_tick = pmcs_rd_gst_tbl(pwp, PMCS_GST_IOP_TICK); 1983 if (cur_iop_tick == pwp->last_iop_tick) { 1984 pmcs_prt(pwp, PMCS_PRT_WARN, NULL, NULL, 1985 "Stall detected on IOP, issuing reset"); 1986 goto hot_reset; 1987 } 1988 pwp->last_iop_tick = cur_iop_tick; 1989 1990 mutex_exit(&pwp->lock); 1991 return; 1992 1993 hot_reset: 1994 pwp->state = STATE_DEAD; 1995 /* 1996 * We've detected a stall. Attempt to recover service via hot 1997 * reset. In case of failure, pmcs_hot_reset() will handle the 1998 * failure and issue any required FM notifications. 1999 * See pmcs_subr.c for more details. 2000 */ 2001 if (pmcs_hot_reset(pwp)) { 2002 pmcs_prt(pwp, PMCS_PRT_ERR, NULL, NULL, 2003 "%s: hot reset failure", __func__); 2004 } else { 2005 pmcs_prt(pwp, PMCS_PRT_ERR, NULL, NULL, 2006 "%s: hot reset complete", __func__); 2007 pwp->last_reset_reason = PMCS_LAST_RST_STALL; 2008 } 2009 mutex_exit(&pwp->lock); 2010 } 2011 2012 static void 2013 pmcs_check_commands(pmcs_hw_t *pwp) 2014 { 2015 pmcs_cmd_t *sp; 2016 size_t amt; 2017 char path[32]; 2018 pmcwork_t *pwrk; 2019 pmcs_xscsi_t *target; 2020 pmcs_phy_t *phyp; 2021 int rval; 2022 2023 for (pwrk = pwp->work; pwrk < &pwp->work[pwp->max_cmd]; pwrk++) { 2024 mutex_enter(&pwrk->lock); 2025 2026 /* 2027 * If the command isn't active, we can't be timing it still. 2028 * Active means the tag is not free and the state is "on chip". 2029 */ 2030 if (!PMCS_COMMAND_ACTIVE(pwrk)) { 2031 mutex_exit(&pwrk->lock); 2032 continue; 2033 } 2034 2035 /* 2036 * No timer active for this command. 2037 */ 2038 if (pwrk->timer == 0) { 2039 mutex_exit(&pwrk->lock); 2040 continue; 2041 } 2042 2043 /* 2044 * Knock off bits for the time interval. 2045 */ 2046 if (pwrk->timer >= US2WT(PMCS_WATCH_INTERVAL)) { 2047 pwrk->timer -= US2WT(PMCS_WATCH_INTERVAL); 2048 } else { 2049 pwrk->timer = 0; 2050 } 2051 if (pwrk->timer > 0) { 2052 mutex_exit(&pwrk->lock); 2053 continue; 2054 } 2055 2056 /* 2057 * The command has now officially timed out. 2058 * Get the path for it. If it doesn't have 2059 * a phy pointer any more, it's really dead 2060 * and can just be put back on the free list. 2061 * There should *not* be any commands associated 2062 * with it any more. 2063 */ 2064 if (pwrk->phy == NULL) { 2065 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 2066 "dead command with gone phy being recycled"); 2067 ASSERT(pwrk->xp == NULL); 2068 pmcs_pwork(pwp, pwrk); 2069 continue; 2070 } 2071 amt = sizeof (path); 2072 amt = min(sizeof (pwrk->phy->path), amt); 2073 (void) memcpy(path, pwrk->phy->path, amt); 2074 2075 /* 2076 * If this is a non-SCSA command, stop here. Eventually 2077 * we might do something with non-SCSA commands here- 2078 * but so far their timeout mechanisms are handled in 2079 * the WAIT_FOR macro. 2080 */ 2081 if (pwrk->xp == NULL) { 2082 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 2083 "%s: non-SCSA cmd tag 0x%x timed out", 2084 path, pwrk->htag); 2085 mutex_exit(&pwrk->lock); 2086 continue; 2087 } 2088 2089 sp = pwrk->arg; 2090 ASSERT(sp != NULL); 2091 2092 /* 2093 * Mark it as timed out. 2094 */ 2095 CMD2PKT(sp)->pkt_reason = CMD_TIMEOUT; 2096 CMD2PKT(sp)->pkt_statistics |= STAT_TIMEOUT; 2097 #ifdef DEBUG 2098 pmcs_prt(pwp, PMCS_PRT_DEBUG, pwrk->phy, pwrk->xp, 2099 "%s: SCSA cmd tag 0x%x timed out (state %x) onwire=%d", 2100 path, pwrk->htag, pwrk->state, pwrk->onwire); 2101 #else 2102 pmcs_prt(pwp, PMCS_PRT_DEBUG, pwrk->phy, pwrk->xp, 2103 "%s: SCSA cmd tag 0x%x timed out (state %x)", 2104 path, pwrk->htag, pwrk->state); 2105 #endif 2106 /* 2107 * Mark the work structure as timed out. 2108 */ 2109 pwrk->state = PMCS_WORK_STATE_TIMED_OUT; 2110 phyp = pwrk->phy; 2111 target = pwrk->xp; 2112 ASSERT(target != NULL); 2113 mutex_exit(&pwrk->lock); 2114 2115 pmcs_lock_phy(phyp); 2116 mutex_enter(&target->statlock); 2117 2118 /* 2119 * No point attempting recovery if the device is gone 2120 */ 2121 if (target->dev_gone) { 2122 mutex_exit(&target->statlock); 2123 pmcs_unlock_phy(phyp); 2124 pmcs_prt(pwp, PMCS_PRT_DEBUG, phyp, target, 2125 "%s: tgt(0x%p) is gone. Returning CMD_DEV_GONE " 2126 "for htag 0x%08x", __func__, 2127 (void *)target, pwrk->htag); 2128 mutex_enter(&pwrk->lock); 2129 if (!PMCS_COMMAND_DONE(pwrk)) { 2130 /* Complete this command here */ 2131 pmcs_prt(pwp, PMCS_PRT_DEBUG, phyp, target, 2132 "%s: Completing cmd (htag 0x%08x) " 2133 "anyway", __func__, pwrk->htag); 2134 pwrk->dead = 1; 2135 CMD2PKT(sp)->pkt_reason = CMD_DEV_GONE; 2136 CMD2PKT(sp)->pkt_state = STATE_GOT_BUS; 2137 pmcs_complete_work_impl(pwp, pwrk, NULL, 0); 2138 } else { 2139 mutex_exit(&pwrk->lock); 2140 } 2141 continue; 2142 } 2143 2144 mutex_exit(&target->statlock); 2145 rval = pmcs_abort(pwp, phyp, pwrk->htag, 0, 1); 2146 if (rval) { 2147 pmcs_prt(pwp, PMCS_PRT_DEBUG, phyp, target, 2148 "%s: Bad status (%d) on abort of HTAG 0x%08x", 2149 __func__, rval, pwrk->htag); 2150 pmcs_unlock_phy(phyp); 2151 mutex_enter(&pwrk->lock); 2152 if (!PMCS_COMMAND_DONE(pwrk)) { 2153 /* Complete this command here */ 2154 pmcs_prt(pwp, PMCS_PRT_DEBUG, phyp, target, 2155 "%s: Completing cmd (htag 0x%08x) " 2156 "anyway", __func__, pwrk->htag); 2157 if (target->dev_gone) { 2158 pwrk->dead = 1; 2159 CMD2PKT(sp)->pkt_reason = CMD_DEV_GONE; 2160 CMD2PKT(sp)->pkt_state = STATE_GOT_BUS; 2161 } 2162 pmcs_complete_work_impl(pwp, pwrk, NULL, 0); 2163 } else { 2164 mutex_exit(&pwrk->lock); 2165 } 2166 pmcs_lock_phy(phyp); 2167 /* 2168 * No need to reschedule ABORT if we get any other 2169 * status 2170 */ 2171 if (rval == ENOMEM) { 2172 phyp->abort_sent = 0; 2173 phyp->abort_pending = 1; 2174 SCHEDULE_WORK(pwp, PMCS_WORK_ABORT_HANDLE); 2175 } 2176 } 2177 pmcs_unlock_phy(phyp); 2178 } 2179 /* 2180 * Run any completions that may have been queued up. 2181 */ 2182 PMCS_CQ_RUN(pwp); 2183 } 2184 2185 static void 2186 pmcs_watchdog(void *arg) 2187 { 2188 pmcs_hw_t *pwp = arg; 2189 2190 DTRACE_PROBE2(pmcs__watchdog, ulong_t, pwp->work_flags, boolean_t, 2191 pwp->config_changed); 2192 2193 /* 2194 * Check forward progress on the chip 2195 */ 2196 if (++pwp->watchdog_count == PMCS_FWD_PROG_TRIGGER) { 2197 pwp->watchdog_count = 0; 2198 pmcs_check_forward_progress(pwp); 2199 } 2200 2201 /* 2202 * Check to see if we need to kick discovery off again 2203 */ 2204 mutex_enter(&pwp->config_lock); 2205 if (pwp->config_restart && 2206 (ddi_get_lbolt() >= pwp->config_restart_time)) { 2207 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, NULL, NULL, 2208 "%s: Timer expired for re-enumeration: Start discovery", 2209 __func__); 2210 pwp->config_restart = B_FALSE; 2211 SCHEDULE_WORK(pwp, PMCS_WORK_DISCOVER); 2212 } 2213 mutex_exit(&pwp->config_lock); 2214 2215 mutex_enter(&pwp->lock); 2216 if (pwp->state != STATE_RUNNING) { 2217 mutex_exit(&pwp->lock); 2218 return; 2219 } 2220 2221 if (atomic_cas_ulong(&pwp->work_flags, 0, 0) != 0) { 2222 if (ddi_taskq_dispatch(pwp->tq, pmcs_worker, pwp, 2223 DDI_NOSLEEP) != DDI_SUCCESS) { 2224 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 2225 "Could not dispatch to worker thread"); 2226 } 2227 } 2228 pwp->wdhandle = timeout(pmcs_watchdog, pwp, 2229 drv_usectohz(PMCS_WATCH_INTERVAL)); 2230 2231 mutex_exit(&pwp->lock); 2232 2233 pmcs_check_commands(pwp); 2234 pmcs_handle_dead_phys(pwp); 2235 } 2236 2237 static int 2238 pmcs_remove_ihandlers(pmcs_hw_t *pwp, int icnt) 2239 { 2240 int i, r, rslt = 0; 2241 for (i = 0; i < icnt; i++) { 2242 r = ddi_intr_remove_handler(pwp->ih_table[i]); 2243 if (r == DDI_SUCCESS) { 2244 continue; 2245 } 2246 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 2247 "%s: unable to remove interrupt handler %d", __func__, i); 2248 rslt = -1; 2249 break; 2250 } 2251 return (rslt); 2252 } 2253 2254 static int 2255 pmcs_disable_intrs(pmcs_hw_t *pwp, int icnt) 2256 { 2257 if (pwp->intr_cap & DDI_INTR_FLAG_BLOCK) { 2258 int r = ddi_intr_block_disable(&pwp->ih_table[0], 2259 pwp->intr_cnt); 2260 if (r != DDI_SUCCESS) { 2261 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 2262 "unable to disable interrupt block"); 2263 return (-1); 2264 } 2265 } else { 2266 int i; 2267 for (i = 0; i < icnt; i++) { 2268 if (ddi_intr_disable(pwp->ih_table[i]) == DDI_SUCCESS) { 2269 continue; 2270 } 2271 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 2272 "unable to disable interrupt %d", i); 2273 return (-1); 2274 } 2275 } 2276 return (0); 2277 } 2278 2279 static int 2280 pmcs_free_intrs(pmcs_hw_t *pwp, int icnt) 2281 { 2282 int i; 2283 for (i = 0; i < icnt; i++) { 2284 if (ddi_intr_free(pwp->ih_table[i]) == DDI_SUCCESS) { 2285 continue; 2286 } 2287 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 2288 "unable to free interrupt %d", i); 2289 return (-1); 2290 } 2291 kmem_free(pwp->ih_table, pwp->ih_table_size); 2292 pwp->ih_table_size = 0; 2293 return (0); 2294 } 2295 2296 /* 2297 * Try to set up interrupts of type "type" with a minimum number of interrupts 2298 * of "min". 2299 */ 2300 static void 2301 pmcs_setup_intr_impl(pmcs_hw_t *pwp, int type, int min) 2302 { 2303 int rval, avail, count, actual, max; 2304 2305 rval = ddi_intr_get_nintrs(pwp->dip, type, &count); 2306 if ((rval != DDI_SUCCESS) || (count < min)) { 2307 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, NULL, NULL, 2308 "%s: get_nintrs failed; type: %d rc: %d count: %d min: %d", 2309 __func__, type, rval, count, min); 2310 return; 2311 } 2312 2313 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, NULL, NULL, 2314 "%s: nintrs = %d for type: %d", __func__, count, type); 2315 2316 rval = ddi_intr_get_navail(pwp->dip, type, &avail); 2317 if ((rval != DDI_SUCCESS) || (avail < min)) { 2318 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, NULL, NULL, 2319 "%s: get_navail failed; type: %d rc: %d avail: %d min: %d", 2320 __func__, type, rval, avail, min); 2321 return; 2322 } 2323 2324 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, NULL, NULL, 2325 "%s: navail = %d for type: %d", __func__, avail, type); 2326 2327 pwp->ih_table_size = avail * sizeof (ddi_intr_handle_t); 2328 pwp->ih_table = kmem_alloc(pwp->ih_table_size, KM_SLEEP); 2329 2330 switch (type) { 2331 case DDI_INTR_TYPE_MSIX: 2332 pwp->int_type = PMCS_INT_MSIX; 2333 max = PMCS_MAX_MSIX; 2334 break; 2335 case DDI_INTR_TYPE_MSI: 2336 pwp->int_type = PMCS_INT_MSI; 2337 max = PMCS_MAX_MSI; 2338 break; 2339 case DDI_INTR_TYPE_FIXED: 2340 default: 2341 pwp->int_type = PMCS_INT_FIXED; 2342 max = PMCS_MAX_FIXED; 2343 break; 2344 } 2345 2346 rval = ddi_intr_alloc(pwp->dip, pwp->ih_table, type, 0, max, &actual, 2347 DDI_INTR_ALLOC_NORMAL); 2348 if (rval != DDI_SUCCESS) { 2349 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, NULL, NULL, 2350 "%s: ddi_intr_alloc failed; type: %d rc: %d", 2351 __func__, type, rval); 2352 kmem_free(pwp->ih_table, pwp->ih_table_size); 2353 pwp->ih_table = NULL; 2354 pwp->ih_table_size = 0; 2355 pwp->intr_cnt = 0; 2356 pwp->int_type = PMCS_INT_NONE; 2357 return; 2358 } 2359 2360 pwp->intr_cnt = actual; 2361 } 2362 2363 /* 2364 * Set up interrupts. 2365 * We return one of three values: 2366 * 2367 * 0 - success 2368 * EAGAIN - failure to set up interrupts 2369 * EIO - "" + we're now stuck partly enabled 2370 * 2371 * If EIO is returned, we can't unload the driver. 2372 */ 2373 static int 2374 pmcs_setup_intr(pmcs_hw_t *pwp) 2375 { 2376 int i, r, itypes, oqv_count; 2377 ddi_intr_handler_t **iv_table; 2378 size_t iv_table_size; 2379 uint_t pri; 2380 2381 if (ddi_intr_get_supported_types(pwp->dip, &itypes) != DDI_SUCCESS) { 2382 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 2383 "cannot get interrupt types"); 2384 return (EAGAIN); 2385 } 2386 2387 if (disable_msix) { 2388 itypes &= ~DDI_INTR_TYPE_MSIX; 2389 } 2390 if (disable_msi) { 2391 itypes &= ~DDI_INTR_TYPE_MSI; 2392 } 2393 2394 /* 2395 * We won't know what firmware we're running until we call pmcs_setup, 2396 * and we can't call pmcs_setup until we establish interrupts. 2397 */ 2398 2399 pwp->int_type = PMCS_INT_NONE; 2400 2401 /* 2402 * We want PMCS_MAX_MSIX vectors for MSI-X. Anything less would be 2403 * uncivilized. 2404 */ 2405 if (itypes & DDI_INTR_TYPE_MSIX) { 2406 pmcs_setup_intr_impl(pwp, DDI_INTR_TYPE_MSIX, PMCS_MAX_MSIX); 2407 if (pwp->int_type == PMCS_INT_MSIX) { 2408 itypes = 0; 2409 } 2410 } 2411 2412 if (itypes & DDI_INTR_TYPE_MSI) { 2413 pmcs_setup_intr_impl(pwp, DDI_INTR_TYPE_MSI, 1); 2414 if (pwp->int_type == PMCS_INT_MSI) { 2415 itypes = 0; 2416 } 2417 } 2418 2419 if (itypes & DDI_INTR_TYPE_FIXED) { 2420 pmcs_setup_intr_impl(pwp, DDI_INTR_TYPE_FIXED, 1); 2421 if (pwp->int_type == PMCS_INT_FIXED) { 2422 itypes = 0; 2423 } 2424 } 2425 2426 if (pwp->intr_cnt == 0) { 2427 pmcs_prt(pwp, PMCS_PRT_ERR, NULL, NULL, 2428 "No interrupts available"); 2429 return (EAGAIN); 2430 } 2431 2432 iv_table_size = sizeof (ddi_intr_handler_t *) * pwp->intr_cnt; 2433 iv_table = kmem_alloc(iv_table_size, KM_SLEEP); 2434 2435 /* 2436 * Get iblock cookie and add handlers. 2437 */ 2438 switch (pwp->intr_cnt) { 2439 case 1: 2440 iv_table[0] = pmcs_all_intr; 2441 break; 2442 case 2: 2443 iv_table[0] = pmcs_iodone_ix; 2444 iv_table[1] = pmcs_nonio_ix; 2445 break; 2446 case 4: 2447 iv_table[PMCS_MSIX_GENERAL] = pmcs_general_ix; 2448 iv_table[PMCS_MSIX_IODONE] = pmcs_iodone_ix; 2449 iv_table[PMCS_MSIX_EVENTS] = pmcs_event_ix; 2450 iv_table[PMCS_MSIX_FATAL] = pmcs_fatal_ix; 2451 break; 2452 default: 2453 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 2454 "%s: intr_cnt = %d - unexpected", __func__, pwp->intr_cnt); 2455 kmem_free(iv_table, iv_table_size); 2456 return (EAGAIN); 2457 } 2458 2459 for (i = 0; i < pwp->intr_cnt; i++) { 2460 r = ddi_intr_add_handler(pwp->ih_table[i], iv_table[i], 2461 (caddr_t)pwp, NULL); 2462 if (r != DDI_SUCCESS) { 2463 kmem_free(iv_table, iv_table_size); 2464 if (pmcs_remove_ihandlers(pwp, i)) { 2465 return (EIO); 2466 } 2467 if (pmcs_free_intrs(pwp, i)) { 2468 return (EIO); 2469 } 2470 pwp->intr_cnt = 0; 2471 return (EAGAIN); 2472 } 2473 } 2474 2475 kmem_free(iv_table, iv_table_size); 2476 2477 if (ddi_intr_get_cap(pwp->ih_table[0], &pwp->intr_cap) != DDI_SUCCESS) { 2478 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 2479 "unable to get int capabilities"); 2480 if (pmcs_remove_ihandlers(pwp, pwp->intr_cnt)) { 2481 return (EIO); 2482 } 2483 if (pmcs_free_intrs(pwp, pwp->intr_cnt)) { 2484 return (EIO); 2485 } 2486 pwp->intr_cnt = 0; 2487 return (EAGAIN); 2488 } 2489 2490 if (pwp->intr_cap & DDI_INTR_FLAG_BLOCK) { 2491 r = ddi_intr_block_enable(&pwp->ih_table[0], pwp->intr_cnt); 2492 if (r != DDI_SUCCESS) { 2493 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 2494 "intr blk enable failed"); 2495 if (pmcs_remove_ihandlers(pwp, pwp->intr_cnt)) { 2496 return (EIO); 2497 } 2498 if (pmcs_free_intrs(pwp, pwp->intr_cnt)) { 2499 return (EIO); 2500 } 2501 pwp->intr_cnt = 0; 2502 return (EFAULT); 2503 } 2504 } else { 2505 for (i = 0; i < pwp->intr_cnt; i++) { 2506 r = ddi_intr_enable(pwp->ih_table[i]); 2507 if (r == DDI_SUCCESS) { 2508 continue; 2509 } 2510 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 2511 "unable to enable interrupt %d", i); 2512 if (pmcs_disable_intrs(pwp, i)) { 2513 return (EIO); 2514 } 2515 if (pmcs_remove_ihandlers(pwp, pwp->intr_cnt)) { 2516 return (EIO); 2517 } 2518 if (pmcs_free_intrs(pwp, pwp->intr_cnt)) { 2519 return (EIO); 2520 } 2521 pwp->intr_cnt = 0; 2522 return (EAGAIN); 2523 } 2524 } 2525 2526 /* 2527 * Set up locks. 2528 */ 2529 if (ddi_intr_get_pri(pwp->ih_table[0], &pri) != DDI_SUCCESS) { 2530 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 2531 "unable to get interrupt priority"); 2532 if (pmcs_disable_intrs(pwp, pwp->intr_cnt)) { 2533 return (EIO); 2534 } 2535 if (pmcs_remove_ihandlers(pwp, pwp->intr_cnt)) { 2536 return (EIO); 2537 } 2538 if (pmcs_free_intrs(pwp, pwp->intr_cnt)) { 2539 return (EIO); 2540 } 2541 pwp->intr_cnt = 0; 2542 return (EAGAIN); 2543 } 2544 2545 pwp->locks_initted = 1; 2546 pwp->intr_pri = pri; 2547 mutex_init(&pwp->lock, NULL, MUTEX_DRIVER, DDI_INTR_PRI(pri)); 2548 mutex_init(&pwp->dma_lock, NULL, MUTEX_DRIVER, DDI_INTR_PRI(pri)); 2549 mutex_init(&pwp->axil_lock, NULL, MUTEX_DRIVER, DDI_INTR_PRI(pri)); 2550 mutex_init(&pwp->cq_lock, NULL, MUTEX_DRIVER, DDI_INTR_PRI(pri)); 2551 mutex_init(&pwp->ict_lock, NULL, MUTEX_DRIVER, DDI_INTR_PRI(pri)); 2552 mutex_init(&pwp->config_lock, NULL, MUTEX_DRIVER, DDI_INTR_PRI(pri)); 2553 mutex_init(&pwp->wfree_lock, NULL, MUTEX_DRIVER, DDI_INTR_PRI(pri)); 2554 mutex_init(&pwp->pfree_lock, NULL, MUTEX_DRIVER, DDI_INTR_PRI(pri)); 2555 mutex_init(&pwp->dead_phylist_lock, NULL, MUTEX_DRIVER, 2556 DDI_INTR_PRI(pri)); 2557 #ifdef DEBUG 2558 mutex_init(&pwp->dbglock, NULL, MUTEX_DRIVER, DDI_INTR_PRI(pri)); 2559 #endif 2560 cv_init(&pwp->ict_cv, NULL, CV_DRIVER, NULL); 2561 cv_init(&pwp->drain_cv, NULL, CV_DRIVER, NULL); 2562 cv_init(&pwp->config_cv, NULL, CV_DRIVER, NULL); 2563 for (i = 0; i < PMCS_NIQ; i++) { 2564 mutex_init(&pwp->iqp_lock[i], NULL, 2565 MUTEX_DRIVER, DDI_INTR_PRI(pwp->intr_pri)); 2566 } 2567 for (i = 0; i < pwp->cq_info.cq_threads; i++) { 2568 mutex_init(&pwp->cq_info.cq_thr_info[i].cq_thr_lock, NULL, 2569 MUTEX_DRIVER, DDI_INTR_PRI(pwp->intr_pri)); 2570 cv_init(&pwp->cq_info.cq_thr_info[i].cq_cv, NULL, 2571 CV_DRIVER, NULL); 2572 } 2573 2574 pmcs_prt(pwp, PMCS_PRT_INFO, NULL, NULL, "%d %s interrup%s configured", 2575 pwp->intr_cnt, (pwp->int_type == PMCS_INT_MSIX)? "MSI-X" : 2576 ((pwp->int_type == PMCS_INT_MSI)? "MSI" : "INT-X"), 2577 pwp->intr_cnt == 1? "t" : "ts"); 2578 2579 2580 /* 2581 * Enable Interrupts 2582 */ 2583 if (pwp->intr_cnt > PMCS_NOQ) { 2584 oqv_count = pwp->intr_cnt; 2585 } else { 2586 oqv_count = PMCS_NOQ; 2587 } 2588 for (pri = 0xffffffff, i = 0; i < oqv_count; i++) { 2589 pri ^= (1 << i); 2590 } 2591 2592 mutex_enter(&pwp->lock); 2593 pwp->intr_mask = pri; 2594 pmcs_wr_msgunit(pwp, PMCS_MSGU_OBDB_MASK, pwp->intr_mask); 2595 pmcs_wr_msgunit(pwp, PMCS_MSGU_OBDB_CLEAR, 0xffffffff); 2596 mutex_exit(&pwp->lock); 2597 2598 return (0); 2599 } 2600 2601 static int 2602 pmcs_teardown_intr(pmcs_hw_t *pwp) 2603 { 2604 if (pwp->intr_cnt) { 2605 if (pmcs_disable_intrs(pwp, pwp->intr_cnt)) { 2606 return (EIO); 2607 } 2608 if (pmcs_remove_ihandlers(pwp, pwp->intr_cnt)) { 2609 return (EIO); 2610 } 2611 if (pmcs_free_intrs(pwp, pwp->intr_cnt)) { 2612 return (EIO); 2613 } 2614 pwp->intr_cnt = 0; 2615 } 2616 return (0); 2617 } 2618 2619 static uint_t 2620 pmcs_general_ix(caddr_t arg1, caddr_t arg2) 2621 { 2622 pmcs_hw_t *pwp = (pmcs_hw_t *)((void *)arg1); 2623 _NOTE(ARGUNUSED(arg2)); 2624 pmcs_general_intr(pwp); 2625 return (DDI_INTR_CLAIMED); 2626 } 2627 2628 static uint_t 2629 pmcs_event_ix(caddr_t arg1, caddr_t arg2) 2630 { 2631 pmcs_hw_t *pwp = (pmcs_hw_t *)((void *)arg1); 2632 _NOTE(ARGUNUSED(arg2)); 2633 pmcs_event_intr(pwp); 2634 return (DDI_INTR_CLAIMED); 2635 } 2636 2637 static uint_t 2638 pmcs_iodone_ix(caddr_t arg1, caddr_t arg2) 2639 { 2640 _NOTE(ARGUNUSED(arg2)); 2641 pmcs_hw_t *pwp = (pmcs_hw_t *)((void *)arg1); 2642 2643 /* 2644 * It's possible that if we just turned interrupt coalescing off 2645 * (and thus, re-enabled auto clear for interrupts on the I/O outbound 2646 * queue) that there was an interrupt already pending. We use 2647 * io_intr_coal.int_cleared to ensure that we still drop in here and 2648 * clear the appropriate interrupt bit one last time. 2649 */ 2650 mutex_enter(&pwp->ict_lock); 2651 if (pwp->io_intr_coal.timer_on || 2652 (pwp->io_intr_coal.int_cleared == B_FALSE)) { 2653 pmcs_wr_msgunit(pwp, PMCS_MSGU_OBDB_CLEAR, 2654 (1 << PMCS_OQ_IODONE)); 2655 pwp->io_intr_coal.int_cleared = B_TRUE; 2656 } 2657 mutex_exit(&pwp->ict_lock); 2658 2659 pmcs_iodone_intr(pwp); 2660 2661 return (DDI_INTR_CLAIMED); 2662 } 2663 2664 static uint_t 2665 pmcs_fatal_ix(caddr_t arg1, caddr_t arg2) 2666 { 2667 pmcs_hw_t *pwp = (pmcs_hw_t *)((void *)arg1); 2668 _NOTE(ARGUNUSED(arg2)); 2669 pmcs_fatal_handler(pwp); 2670 return (DDI_INTR_CLAIMED); 2671 } 2672 2673 static uint_t 2674 pmcs_nonio_ix(caddr_t arg1, caddr_t arg2) 2675 { 2676 _NOTE(ARGUNUSED(arg2)); 2677 pmcs_hw_t *pwp = (void *)arg1; 2678 uint32_t obdb = pmcs_rd_msgunit(pwp, PMCS_MSGU_OBDB); 2679 2680 /* 2681 * Check for Fatal Interrupts 2682 */ 2683 if (obdb & (1 << PMCS_FATAL_INTERRUPT)) { 2684 pmcs_fatal_handler(pwp); 2685 return (DDI_INTR_CLAIMED); 2686 } 2687 2688 if (obdb & (1 << PMCS_OQ_GENERAL)) { 2689 pmcs_wr_msgunit(pwp, PMCS_MSGU_OBDB_CLEAR, 2690 (1 << PMCS_OQ_GENERAL)); 2691 pmcs_general_intr(pwp); 2692 pmcs_event_intr(pwp); 2693 } 2694 2695 return (DDI_INTR_CLAIMED); 2696 } 2697 2698 static uint_t 2699 pmcs_all_intr(caddr_t arg1, caddr_t arg2) 2700 { 2701 _NOTE(ARGUNUSED(arg2)); 2702 pmcs_hw_t *pwp = (void *) arg1; 2703 uint32_t obdb; 2704 int handled = 0; 2705 2706 obdb = pmcs_rd_msgunit(pwp, PMCS_MSGU_OBDB); 2707 2708 /* 2709 * Check for Fatal Interrupts 2710 */ 2711 if (obdb & (1 << PMCS_FATAL_INTERRUPT)) { 2712 pmcs_fatal_handler(pwp); 2713 return (DDI_INTR_CLAIMED); 2714 } 2715 2716 /* 2717 * Check for Outbound Queue service needed 2718 */ 2719 if (obdb & (1 << PMCS_OQ_IODONE)) { 2720 pmcs_wr_msgunit(pwp, PMCS_MSGU_OBDB_CLEAR, 2721 (1 << PMCS_OQ_IODONE)); 2722 obdb ^= (1 << PMCS_OQ_IODONE); 2723 handled++; 2724 pmcs_iodone_intr(pwp); 2725 } 2726 if (obdb & (1 << PMCS_OQ_GENERAL)) { 2727 pmcs_wr_msgunit(pwp, PMCS_MSGU_OBDB_CLEAR, 2728 (1 << PMCS_OQ_GENERAL)); 2729 obdb ^= (1 << PMCS_OQ_GENERAL); 2730 handled++; 2731 pmcs_general_intr(pwp); 2732 } 2733 if (obdb & (1 << PMCS_OQ_EVENTS)) { 2734 pmcs_wr_msgunit(pwp, PMCS_MSGU_OBDB_CLEAR, 2735 (1 << PMCS_OQ_EVENTS)); 2736 obdb ^= (1 << PMCS_OQ_EVENTS); 2737 handled++; 2738 pmcs_event_intr(pwp); 2739 } 2740 if (obdb) { 2741 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 2742 "interrupt bits not handled (0x%x)", obdb); 2743 pmcs_wr_msgunit(pwp, PMCS_MSGU_OBDB_CLEAR, obdb); 2744 handled++; 2745 } 2746 if (pwp->int_type == PMCS_INT_MSI) { 2747 handled++; 2748 } 2749 return (handled? DDI_INTR_CLAIMED : DDI_INTR_UNCLAIMED); 2750 } 2751 2752 void 2753 pmcs_fatal_handler(pmcs_hw_t *pwp) 2754 { 2755 pmcs_prt(pwp, PMCS_PRT_ERR, NULL, NULL, "Fatal Interrupt caught"); 2756 2757 mutex_enter(&pwp->lock); 2758 pwp->state = STATE_DEAD; 2759 2760 /* 2761 * Attempt a hot reset. In case of failure, pmcs_hot_reset() will 2762 * handle the failure and issue any required FM notifications. 2763 * See pmcs_subr.c for more details. 2764 */ 2765 if (pmcs_hot_reset(pwp)) { 2766 pmcs_prt(pwp, PMCS_PRT_ERR, NULL, NULL, 2767 "%s: hot reset failure", __func__); 2768 } else { 2769 pmcs_prt(pwp, PMCS_PRT_ERR, NULL, NULL, 2770 "%s: hot reset complete", __func__); 2771 pwp->last_reset_reason = PMCS_LAST_RST_FATAL_ERROR; 2772 } 2773 mutex_exit(&pwp->lock); 2774 } 2775 2776 /* 2777 * Called with PHY lock and target statlock held and scratch acquired. 2778 */ 2779 boolean_t 2780 pmcs_assign_device(pmcs_hw_t *pwp, pmcs_xscsi_t *tgt) 2781 { 2782 pmcs_phy_t *pptr = tgt->phy; 2783 2784 switch (pptr->dtype) { 2785 case SAS: 2786 case EXPANDER: 2787 break; 2788 case SATA: 2789 tgt->ca = 1; 2790 break; 2791 default: 2792 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, tgt, 2793 "%s: Target %p has PHY %p with invalid dtype", 2794 __func__, (void *)tgt, (void *)pptr); 2795 return (B_FALSE); 2796 } 2797 2798 tgt->new = 1; 2799 tgt->dev_gone = 0; 2800 tgt->recover_wait = 0; 2801 2802 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, tgt, 2803 "%s: config %s vtgt %u for " SAS_ADDR_FMT, __func__, 2804 pptr->path, tgt->target_num, SAS_ADDR_PRT(pptr->sas_address)); 2805 2806 if (pmcs_add_new_device(pwp, tgt) != B_TRUE) { 2807 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, tgt, 2808 "%s: Failed for vtgt %u / WWN " SAS_ADDR_FMT, __func__, 2809 tgt->target_num, SAS_ADDR_PRT(pptr->sas_address)); 2810 mutex_destroy(&tgt->statlock); 2811 mutex_destroy(&tgt->wqlock); 2812 mutex_destroy(&tgt->aqlock); 2813 return (B_FALSE); 2814 } 2815 2816 return (B_TRUE); 2817 } 2818 2819 /* 2820 * Called with softstate lock held 2821 */ 2822 void 2823 pmcs_remove_device(pmcs_hw_t *pwp, pmcs_phy_t *pptr) 2824 { 2825 pmcs_xscsi_t *xp; 2826 unsigned int vtgt; 2827 2828 ASSERT(mutex_owned(&pwp->lock)); 2829 2830 for (vtgt = 0; vtgt < pwp->max_dev; vtgt++) { 2831 xp = pwp->targets[vtgt]; 2832 if (xp == NULL) { 2833 continue; 2834 } 2835 2836 mutex_enter(&xp->statlock); 2837 if (xp->phy == pptr) { 2838 if (xp->new) { 2839 xp->new = 0; 2840 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, xp, 2841 "cancel config of vtgt %u", vtgt); 2842 } else { 2843 pmcs_clear_xp(pwp, xp); 2844 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, xp, 2845 "Removed tgt 0x%p vtgt %u", 2846 (void *)xp, vtgt); 2847 } 2848 mutex_exit(&xp->statlock); 2849 break; 2850 } 2851 mutex_exit(&xp->statlock); 2852 } 2853 } 2854 2855 void 2856 pmcs_prt_impl(pmcs_hw_t *pwp, pmcs_prt_level_t level, 2857 pmcs_phy_t *phyp, pmcs_xscsi_t *target, const char *fmt, ...) 2858 { 2859 va_list ap; 2860 int written = 0; 2861 char *ptr; 2862 uint32_t elem_size = PMCS_TBUF_ELEM_SIZE - 1; 2863 boolean_t system_log; 2864 int system_log_level; 2865 hrtime_t hrtimestamp; 2866 2867 switch (level) { 2868 case PMCS_PRT_DEBUG_DEVEL: 2869 case PMCS_PRT_DEBUG_DEV_STATE: 2870 case PMCS_PRT_DEBUG_PHY_LOCKING: 2871 case PMCS_PRT_DEBUG_SCSI_STATUS: 2872 case PMCS_PRT_DEBUG_UNDERFLOW: 2873 case PMCS_PRT_DEBUG_CONFIG: 2874 case PMCS_PRT_DEBUG_IPORT: 2875 case PMCS_PRT_DEBUG_MAP: 2876 case PMCS_PRT_DEBUG3: 2877 case PMCS_PRT_DEBUG2: 2878 case PMCS_PRT_DEBUG1: 2879 case PMCS_PRT_DEBUG: 2880 system_log = B_FALSE; 2881 break; 2882 case PMCS_PRT_INFO: 2883 system_log = B_TRUE; 2884 system_log_level = CE_CONT; 2885 break; 2886 case PMCS_PRT_WARN: 2887 system_log = B_TRUE; 2888 system_log_level = CE_NOTE; 2889 break; 2890 case PMCS_PRT_ERR: 2891 system_log = B_TRUE; 2892 system_log_level = CE_WARN; 2893 break; 2894 default: 2895 return; 2896 } 2897 2898 mutex_enter(&pmcs_trace_lock); 2899 hrtimestamp = gethrtime(); 2900 gethrestime(&pmcs_tbuf_ptr->timestamp); 2901 2902 if (pwp->fw_timestamp != 0) { 2903 /* Calculate the approximate firmware time stamp... */ 2904 pmcs_tbuf_ptr->fw_timestamp = pwp->fw_timestamp + 2905 ((hrtimestamp - pwp->hrtimestamp) / PMCS_FWLOG_TIMER_DIV); 2906 } else { 2907 pmcs_tbuf_ptr->fw_timestamp = 0; 2908 } 2909 2910 ptr = pmcs_tbuf_ptr->buf; 2911 2912 /* 2913 * Store the pertinent PHY and target information if there is any 2914 */ 2915 if (target == NULL) { 2916 pmcs_tbuf_ptr->target_num = PMCS_INVALID_TARGET_NUM; 2917 pmcs_tbuf_ptr->target_ua[0] = '\0'; 2918 } else { 2919 pmcs_tbuf_ptr->target_num = target->target_num; 2920 (void) strncpy(pmcs_tbuf_ptr->target_ua, target->ua, 2921 PMCS_TBUF_UA_MAX_SIZE); 2922 } 2923 2924 if (phyp == NULL) { 2925 (void) memset(pmcs_tbuf_ptr->phy_sas_address, 0, 8); 2926 pmcs_tbuf_ptr->phy_path[0] = '\0'; 2927 pmcs_tbuf_ptr->phy_dtype = NOTHING; 2928 } else { 2929 (void) memcpy(pmcs_tbuf_ptr->phy_sas_address, 2930 phyp->sas_address, 8); 2931 (void) strncpy(pmcs_tbuf_ptr->phy_path, phyp->path, 32); 2932 pmcs_tbuf_ptr->phy_dtype = phyp->dtype; 2933 } 2934 2935 written += snprintf(ptr, elem_size, "pmcs%d:%d: ", 2936 ddi_get_instance(pwp->dip), level); 2937 ptr += strlen(ptr); 2938 va_start(ap, fmt); 2939 written += vsnprintf(ptr, elem_size - written, fmt, ap); 2940 va_end(ap); 2941 if (written > elem_size - 1) { 2942 /* Indicate truncation */ 2943 pmcs_tbuf_ptr->buf[elem_size - 1] = '+'; 2944 } 2945 if (++pmcs_tbuf_idx == pmcs_tbuf_num_elems) { 2946 pmcs_tbuf_ptr = pmcs_tbuf; 2947 pmcs_tbuf_wrap = B_TRUE; 2948 pmcs_tbuf_idx = 0; 2949 } else { 2950 ++pmcs_tbuf_ptr; 2951 } 2952 mutex_exit(&pmcs_trace_lock); 2953 2954 /* 2955 * When pmcs_force_syslog in non-zero, everything goes also 2956 * to syslog, at CE_CONT level. 2957 */ 2958 if (pmcs_force_syslog) { 2959 system_log = B_TRUE; 2960 system_log_level = CE_CONT; 2961 } 2962 2963 /* 2964 * Anything that comes in with PMCS_PRT_INFO, WARN, or ERR also 2965 * goes to syslog. 2966 */ 2967 if (system_log) { 2968 char local[196]; 2969 2970 switch (system_log_level) { 2971 case CE_CONT: 2972 (void) snprintf(local, sizeof (local), "%sINFO: ", 2973 pmcs_console ? "" : "?"); 2974 break; 2975 case CE_NOTE: 2976 case CE_WARN: 2977 local[0] = 0; 2978 break; 2979 default: 2980 return; 2981 } 2982 2983 ptr = local; 2984 ptr += strlen(local); 2985 (void) snprintf(ptr, (sizeof (local)) - 2986 ((size_t)ptr - (size_t)local), "pmcs%d: ", 2987 ddi_get_instance(pwp->dip)); 2988 ptr += strlen(ptr); 2989 va_start(ap, fmt); 2990 (void) vsnprintf(ptr, 2991 (sizeof (local)) - ((size_t)ptr - (size_t)local), fmt, ap); 2992 va_end(ap); 2993 if (level == CE_CONT) { 2994 (void) strlcat(local, "\n", sizeof (local)); 2995 } 2996 cmn_err(system_log_level, local); 2997 } 2998 2999 } 3000 3001 /* 3002 * pmcs_acquire_scratch 3003 * 3004 * If "wait" is true, the caller will wait until it can acquire the scratch. 3005 * This implies the caller needs to be in a context where spinning for an 3006 * indeterminate amount of time is acceptable. 3007 */ 3008 int 3009 pmcs_acquire_scratch(pmcs_hw_t *pwp, boolean_t wait) 3010 { 3011 int rval; 3012 3013 if (!wait) { 3014 return (atomic_swap_8(&pwp->scratch_locked, 1)); 3015 } 3016 3017 /* 3018 * Caller will wait for scratch. 3019 */ 3020 while ((rval = atomic_swap_8(&pwp->scratch_locked, 1)) != 0) { 3021 drv_usecwait(100); 3022 } 3023 3024 return (rval); 3025 } 3026 3027 void 3028 pmcs_release_scratch(pmcs_hw_t *pwp) 3029 { 3030 pwp->scratch_locked = 0; 3031 } 3032 3033 /* Called with iport_lock and phy lock held */ 3034 void 3035 pmcs_create_one_phy_stats(pmcs_iport_t *iport, pmcs_phy_t *phyp) 3036 { 3037 sas_phy_stats_t *ps; 3038 pmcs_hw_t *pwp; 3039 int ndata; 3040 char ks_name[KSTAT_STRLEN]; 3041 3042 ASSERT(mutex_owned(&iport->lock)); 3043 pwp = iport->pwp; 3044 ASSERT(pwp != NULL); 3045 ASSERT(mutex_owned(&phyp->phy_lock)); 3046 3047 if (phyp->phy_stats != NULL) { 3048 /* 3049 * Delete existing kstats with name containing 3050 * old iport instance# and allow creation of 3051 * new kstats with new iport instance# in the name. 3052 */ 3053 kstat_delete(phyp->phy_stats); 3054 } 3055 3056 ndata = (sizeof (sas_phy_stats_t)/sizeof (kstat_named_t)); 3057 3058 (void) snprintf(ks_name, sizeof (ks_name), 3059 "%s.%llx.%d.%d", ddi_driver_name(iport->dip), 3060 (longlong_t)pwp->sas_wwns[0], 3061 ddi_get_instance(iport->dip), phyp->phynum); 3062 3063 phyp->phy_stats = kstat_create("pmcs", 3064 ddi_get_instance(iport->dip), ks_name, KSTAT_SAS_PHY_CLASS, 3065 KSTAT_TYPE_NAMED, ndata, 0); 3066 3067 if (phyp->phy_stats == NULL) { 3068 pmcs_prt(pwp, PMCS_PRT_DEBUG, phyp, NULL, 3069 "%s: Failed to create %s kstats for PHY(0x%p) at %s", 3070 __func__, ks_name, (void *)phyp, phyp->path); 3071 return; 3072 } 3073 3074 ps = (sas_phy_stats_t *)phyp->phy_stats->ks_data; 3075 3076 kstat_named_init(&ps->seconds_since_last_reset, 3077 "SecondsSinceLastReset", KSTAT_DATA_ULONGLONG); 3078 kstat_named_init(&ps->tx_frames, 3079 "TxFrames", KSTAT_DATA_ULONGLONG); 3080 kstat_named_init(&ps->rx_frames, 3081 "RxFrames", KSTAT_DATA_ULONGLONG); 3082 kstat_named_init(&ps->tx_words, 3083 "TxWords", KSTAT_DATA_ULONGLONG); 3084 kstat_named_init(&ps->rx_words, 3085 "RxWords", KSTAT_DATA_ULONGLONG); 3086 kstat_named_init(&ps->invalid_dword_count, 3087 "InvalidDwordCount", KSTAT_DATA_ULONGLONG); 3088 kstat_named_init(&ps->running_disparity_error_count, 3089 "RunningDisparityErrorCount", KSTAT_DATA_ULONGLONG); 3090 kstat_named_init(&ps->loss_of_dword_sync_count, 3091 "LossofDwordSyncCount", KSTAT_DATA_ULONGLONG); 3092 kstat_named_init(&ps->phy_reset_problem_count, 3093 "PhyResetProblemCount", KSTAT_DATA_ULONGLONG); 3094 3095 phyp->phy_stats->ks_private = phyp; 3096 phyp->phy_stats->ks_update = pmcs_update_phy_stats; 3097 kstat_install(phyp->phy_stats); 3098 } 3099 3100 static void 3101 pmcs_create_all_phy_stats(pmcs_iport_t *iport) 3102 { 3103 pmcs_hw_t *pwp; 3104 pmcs_phy_t *phyp; 3105 3106 ASSERT(iport != NULL); 3107 pwp = iport->pwp; 3108 ASSERT(pwp != NULL); 3109 3110 mutex_enter(&iport->lock); 3111 3112 for (phyp = list_head(&iport->phys); 3113 phyp != NULL; 3114 phyp = list_next(&iport->phys, phyp)) { 3115 3116 mutex_enter(&phyp->phy_lock); 3117 pmcs_create_one_phy_stats(iport, phyp); 3118 mutex_exit(&phyp->phy_lock); 3119 } 3120 3121 mutex_exit(&iport->lock); 3122 } 3123 3124 int 3125 pmcs_update_phy_stats(kstat_t *ks, int rw) 3126 { 3127 int val, ret = DDI_FAILURE; 3128 pmcs_phy_t *pptr = (pmcs_phy_t *)ks->ks_private; 3129 pmcs_hw_t *pwp = pptr->pwp; 3130 sas_phy_stats_t *ps = ks->ks_data; 3131 3132 _NOTE(ARGUNUSED(rw)); 3133 ASSERT((pptr != NULL) && (pwp != NULL)); 3134 3135 /* 3136 * We just want to lock against other invocations of kstat; 3137 * we don't need to pmcs_lock_phy() for this. 3138 */ 3139 mutex_enter(&pptr->phy_lock); 3140 3141 /* Get Stats from Chip */ 3142 val = pmcs_get_diag_report(pwp, PMCS_INVALID_DWORD_CNT, pptr->phynum); 3143 if (val == DDI_FAILURE) 3144 goto fail; 3145 ps->invalid_dword_count.value.ull = (unsigned long long)val; 3146 3147 val = pmcs_get_diag_report(pwp, PMCS_DISPARITY_ERR_CNT, pptr->phynum); 3148 if (val == DDI_FAILURE) 3149 goto fail; 3150 ps->running_disparity_error_count.value.ull = (unsigned long long)val; 3151 3152 val = pmcs_get_diag_report(pwp, PMCS_LOST_DWORD_SYNC_CNT, pptr->phynum); 3153 if (val == DDI_FAILURE) 3154 goto fail; 3155 ps->loss_of_dword_sync_count.value.ull = (unsigned long long)val; 3156 3157 val = pmcs_get_diag_report(pwp, PMCS_RESET_FAILED_CNT, pptr->phynum); 3158 if (val == DDI_FAILURE) 3159 goto fail; 3160 ps->phy_reset_problem_count.value.ull = (unsigned long long)val; 3161 3162 ret = DDI_SUCCESS; 3163 fail: 3164 mutex_exit(&pptr->phy_lock); 3165 return (ret); 3166 } 3167 3168 /*ARGSUSED*/ 3169 static int 3170 pmcs_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err, const void *impl_data) 3171 { 3172 /* 3173 * as the driver can always deal with an error in any dma or 3174 * access handle, we can just return the fme_status value. 3175 */ 3176 pci_ereport_post(dip, err, NULL); 3177 return (err->fme_status); 3178 } 3179 3180 static void 3181 pmcs_fm_init(pmcs_hw_t *pwp) 3182 { 3183 ddi_iblock_cookie_t fm_ibc; 3184 3185 /* Only register with IO Fault Services if we have some capability */ 3186 if (pwp->fm_capabilities) { 3187 /* Adjust access and dma attributes for FMA */ 3188 pwp->reg_acc_attr.devacc_attr_access = DDI_FLAGERR_ACC; 3189 pwp->iqp_dma_attr.dma_attr_flags |= DDI_DMA_FLAGERR; 3190 pwp->oqp_dma_attr.dma_attr_flags |= DDI_DMA_FLAGERR; 3191 pwp->cip_dma_attr.dma_attr_flags |= DDI_DMA_FLAGERR; 3192 pwp->fwlog_dma_attr.dma_attr_flags |= DDI_DMA_FLAGERR; 3193 3194 /* 3195 * Register capabilities with IO Fault Services. 3196 */ 3197 ddi_fm_init(pwp->dip, &pwp->fm_capabilities, &fm_ibc); 3198 3199 /* 3200 * Initialize pci ereport capabilities if ereport 3201 * capable (should always be.) 3202 */ 3203 if (DDI_FM_EREPORT_CAP(pwp->fm_capabilities) || 3204 DDI_FM_ERRCB_CAP(pwp->fm_capabilities)) { 3205 pci_ereport_setup(pwp->dip); 3206 } 3207 3208 /* 3209 * Register error callback if error callback capable. 3210 */ 3211 if (DDI_FM_ERRCB_CAP(pwp->fm_capabilities)) { 3212 ddi_fm_handler_register(pwp->dip, 3213 pmcs_fm_error_cb, (void *) pwp); 3214 } 3215 } 3216 } 3217 3218 static void 3219 pmcs_fm_fini(pmcs_hw_t *pwp) 3220 { 3221 /* Only unregister FMA capabilities if registered */ 3222 if (pwp->fm_capabilities) { 3223 /* 3224 * Un-register error callback if error callback capable. 3225 */ 3226 if (DDI_FM_ERRCB_CAP(pwp->fm_capabilities)) { 3227 ddi_fm_handler_unregister(pwp->dip); 3228 } 3229 3230 /* 3231 * Release any resources allocated by pci_ereport_setup() 3232 */ 3233 if (DDI_FM_EREPORT_CAP(pwp->fm_capabilities) || 3234 DDI_FM_ERRCB_CAP(pwp->fm_capabilities)) { 3235 pci_ereport_teardown(pwp->dip); 3236 } 3237 3238 /* Unregister from IO Fault Services */ 3239 ddi_fm_fini(pwp->dip); 3240 3241 /* Adjust access and dma attributes for FMA */ 3242 pwp->reg_acc_attr.devacc_attr_access = DDI_DEFAULT_ACC; 3243 pwp->iqp_dma_attr.dma_attr_flags &= ~DDI_DMA_FLAGERR; 3244 pwp->oqp_dma_attr.dma_attr_flags &= ~DDI_DMA_FLAGERR; 3245 pwp->cip_dma_attr.dma_attr_flags &= ~DDI_DMA_FLAGERR; 3246 pwp->fwlog_dma_attr.dma_attr_flags &= ~DDI_DMA_FLAGERR; 3247 } 3248 } 3249 3250 static boolean_t 3251 pmcs_fabricate_wwid(pmcs_hw_t *pwp) 3252 { 3253 char *cp, c; 3254 uint64_t adr; 3255 int i; 3256 3257 cp = &c; 3258 (void) ddi_strtoul(hw_serial, &cp, 10, (unsigned long *)&adr); 3259 3260 if (adr == 0) { 3261 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 3262 "%s: No serial number available to fabricate WWN", 3263 __func__); 3264 3265 adr = (uint64_t)gethrtime(); 3266 } 3267 3268 adr <<= 8; 3269 adr |= ((uint64_t)ddi_get_instance(pwp->dip) << 52); 3270 adr |= (5ULL << 60); 3271 3272 for (i = 0; i < PMCS_MAX_PORTS; i++) { 3273 pwp->sas_wwns[i] = adr + i; 3274 } 3275 3276 return (B_TRUE); 3277 }