539 nulldev, /* identify */
540 nulldev, /* probe */
541 mptsas_attach, /* attach */
542 mptsas_detach, /* detach */
543 #ifdef __sparc
544 mptsas_reset,
545 #else
546 nodev, /* reset */
547 #endif /* __sparc */
548 &mptsas_cb_ops, /* driver operations */
549 NULL, /* bus operations */
550 mptsas_power, /* power management */
551 #ifdef __sparc
552 ddi_quiesce_not_needed
553 #else
554 mptsas_quiesce /* quiesce */
555 #endif /* __sparc */
556 };
557
558
559 #define MPTSAS_MOD_STRING "MPTSAS HBA Driver 00.00.00.24"
560
561 static struct modldrv modldrv = {
562 &mod_driverops, /* Type of module. This one is a driver */
563 MPTSAS_MOD_STRING, /* Name of the module. */
564 &mptsas_ops, /* driver ops */
565 };
566
567 static struct modlinkage modlinkage = {
568 MODREV_1, &modldrv, NULL
569 };
570 #define TARGET_PROP "target"
571 #define LUN_PROP "lun"
572 #define LUN64_PROP "lun64"
573 #define SAS_PROP "sas-mpt"
574 #define MDI_GUID "wwn"
575 #define NDI_GUID "guid"
576 #define MPTSAS_DEV_GONE "mptsas_dev_gone"
577
578 /*
579 * Local static data
1318 thread_create(NULL, 0, mptsas_doneq_thread,
1319 &mpt->m_doneq_thread_id[j].arg,
1320 0, &p0, TS_RUN, minclsyspri);
1321 mpt->m_doneq_thread_id[j].donetail =
1322 &mpt->m_doneq_thread_id[j].doneq;
1323 mutex_exit(&mpt->m_doneq_thread_id[j].mutex);
1324 }
1325 mutex_exit(&mpt->m_doneq_mutex);
1326 doneq_thread_create++;
1327 }
1328
1329 /*
1330 * Disable hardware interrupt since we're not ready to
1331 * handle it yet.
1332 */
1333 MPTSAS_DISABLE_INTR(mpt);
1334 if (mptsas_register_intrs(mpt) == FALSE)
1335 goto fail;
1336 intr_added++;
1337
1338 /* Initialize mutex used in interrupt handler */
1339 mutex_init(&mpt->m_mutex, NULL, MUTEX_DRIVER,
1340 DDI_INTR_PRI(mpt->m_intr_pri));
1341 mutex_init(&mpt->m_passthru_mutex, NULL, MUTEX_DRIVER, NULL);
1342 mutex_init(&mpt->m_tx_waitq_mutex, NULL, MUTEX_DRIVER,
1343 DDI_INTR_PRI(mpt->m_intr_pri));
1344 for (i = 0; i < MPTSAS_MAX_PHYS; i++) {
1345 mutex_init(&mpt->m_phy_info[i].smhba_info.phy_mutex,
1346 NULL, MUTEX_DRIVER,
1347 DDI_INTR_PRI(mpt->m_intr_pri));
1348 }
1349
1350 cv_init(&mpt->m_cv, NULL, CV_DRIVER, NULL);
1351 cv_init(&mpt->m_passthru_cv, NULL, CV_DRIVER, NULL);
1352 cv_init(&mpt->m_fw_cv, NULL, CV_DRIVER, NULL);
1353 cv_init(&mpt->m_config_cv, NULL, CV_DRIVER, NULL);
1354 cv_init(&mpt->m_fw_diag_cv, NULL, CV_DRIVER, NULL);
1355 cv_init(&mpt->m_extreq_sense_refcount_cv, NULL, CV_DRIVER, NULL);
1356 mutex_init_done++;
1357
1612 cv_destroy(&mpt->m_doneq_thread_id[j].cv);
1613 mutex_destroy(&mpt->m_doneq_thread_id[j].mutex);
1614 }
1615 kmem_free(mpt->m_doneq_thread_id,
1616 sizeof (mptsas_doneq_thread_list_t)
1617 * doneq_thread_num);
1618 mutex_exit(&mpt->m_doneq_mutex);
1619 cv_destroy(&mpt->m_doneq_thread_cv);
1620 mutex_destroy(&mpt->m_doneq_mutex);
1621 }
1622 if (event_taskq_create) {
1623 ddi_taskq_destroy(mpt->m_event_taskq);
1624 }
1625 if (dr_taskq_create) {
1626 ddi_taskq_destroy(mpt->m_dr_taskq);
1627 }
1628 if (mutex_init_done) {
1629 mutex_destroy(&mpt->m_tx_waitq_mutex);
1630 mutex_destroy(&mpt->m_passthru_mutex);
1631 mutex_destroy(&mpt->m_mutex);
1632 for (i = 0; i < MPTSAS_MAX_PHYS; i++) {
1633 mutex_destroy(
1634 &mpt->m_phy_info[i].smhba_info.phy_mutex);
1635 }
1636 cv_destroy(&mpt->m_cv);
1637 cv_destroy(&mpt->m_passthru_cv);
1638 cv_destroy(&mpt->m_fw_cv);
1639 cv_destroy(&mpt->m_config_cv);
1640 cv_destroy(&mpt->m_fw_diag_cv);
1641 cv_destroy(&mpt->m_extreq_sense_refcount_cv);
1642 }
1643
1644 if (map_setup) {
1645 mptsas_cfg_fini(mpt);
1646 }
1647 if (config_setup) {
1648 mptsas_config_space_fini(mpt);
1649 }
1650 mptsas_free_handshake_msg(mpt);
1651 mptsas_hba_fini(mpt);
2031 mutex_exit(&mpt->m_mutex);
2032
2033 /* deallocate everything that was allocated in mptsas_attach */
2034 mptsas_cache_destroy(mpt);
2035
2036 mptsas_hba_fini(mpt);
2037 mptsas_cfg_fini(mpt);
2038
2039 /* Lower the power informing PM Framework */
2040 if (mpt->m_options & MPTSAS_OPT_PM) {
2041 if (pm_lower_power(dip, 0, PM_LEVEL_D3) != DDI_SUCCESS)
2042 mptsas_log(mpt, CE_WARN,
2043 "!mptsas%d: Lower power request failed "
2044 "during detach, ignoring.",
2045 mpt->m_instance);
2046 }
2047
2048 mutex_destroy(&mpt->m_tx_waitq_mutex);
2049 mutex_destroy(&mpt->m_passthru_mutex);
2050 mutex_destroy(&mpt->m_mutex);
2051 for (i = 0; i < MPTSAS_MAX_PHYS; i++) {
2052 mutex_destroy(&mpt->m_phy_info[i].smhba_info.phy_mutex);
2053 }
2054 cv_destroy(&mpt->m_cv);
2055 cv_destroy(&mpt->m_passthru_cv);
2056 cv_destroy(&mpt->m_fw_cv);
2057 cv_destroy(&mpt->m_config_cv);
2058 cv_destroy(&mpt->m_fw_diag_cv);
2059 cv_destroy(&mpt->m_extreq_sense_refcount_cv);
2060
2061 mptsas_smp_teardown(mpt);
2062 mptsas_hba_teardown(mpt);
2063
2064 mptsas_config_space_fini(mpt);
2065
2066 mptsas_free_handshake_msg(mpt);
2067
2068 mptsas_fm_fini(mpt);
2069 ddi_soft_state_free(mptsas_state, ddi_get_instance(dip));
2070 ddi_prop_remove_all(dip);
2392 case PM_LEVEL_D0:
2393 NDBG11(("mptsas%d: turning power ON.", mpt->m_instance));
2394 MPTSAS_POWER_ON(mpt);
2395 /*
2396 * Wait up to 30 seconds for IOC to come out of reset.
2397 */
2398 while (((ioc_status = ddi_get32(mpt->m_datap,
2399 &mpt->m_reg->Doorbell)) &
2400 MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_RESET) {
2401 if (polls++ > 3000) {
2402 break;
2403 }
2404 delay(drv_usectohz(10000));
2405 }
2406 /*
2407 * If IOC is not in operational state, try to hard reset it.
2408 */
2409 if ((ioc_status & MPI2_IOC_STATE_MASK) !=
2410 MPI2_IOC_STATE_OPERATIONAL) {
2411 mpt->m_softstate &= ~MPTSAS_SS_MSG_UNIT_RESET;
2412 if (mptsas_restart_ioc(mpt) == DDI_FAILURE) {
2413 mptsas_log(mpt, CE_WARN,
2414 "mptsas_power: hard reset failed");
2415 mutex_exit(&mpt->m_mutex);
2416 return (DDI_FAILURE);
2417 }
2418 }
2419 mpt->m_power_level = PM_LEVEL_D0;
2420 break;
2421 case PM_LEVEL_D3:
2422 NDBG11(("mptsas%d: turning power OFF.", mpt->m_instance));
2423 MPTSAS_POWER_OFF(mpt);
2424 break;
2425 default:
2426 mptsas_log(mpt, CE_WARN, "mptsas%d: unknown power level <%x>.",
2427 mpt->m_instance, level);
2428 rval = DDI_FAILURE;
2429 break;
2430 }
2431 mutex_exit(&mpt->m_mutex);
2432 return (rval);
3409 }
3410 }
3411
3412 /*
3413 * reset the throttle if we were draining
3414 */
3415 if ((ptgt->m_t_ncmds == 0) &&
3416 (ptgt->m_t_throttle == DRAIN_THROTTLE)) {
3417 NDBG23(("reset throttle"));
3418 ASSERT(ptgt->m_reset_delay == 0);
3419 mptsas_set_throttle(mpt, ptgt, MAX_THROTTLE);
3420 }
3421
3422 /*
3423 * If HBA is being reset, the DevHandles are being re-initialized,
3424 * which means that they could be invalid even if the target is still
3425 * attached. Check if being reset and if DevHandle is being
3426 * re-initialized. If this is the case, return BUSY so the I/O can be
3427 * retried later.
3428 */
3429 if ((ptgt->m_devhdl == MPTSAS_INVALID_DEVHDL) && mpt->m_in_reset) {
3430 mptsas_set_pkt_reason(mpt, cmd, CMD_RESET, STAT_BUS_RESET);
3431 if (cmd->cmd_flags & CFLAG_TXQ) {
3432 mptsas_doneq_add(mpt, cmd);
3433 mptsas_doneq_empty(mpt);
3434 return (rval);
3435 } else {
3436 return (TRAN_BUSY);
3437 }
3438 }
3439
3440 /*
3441 * If device handle has already been invalidated, just
3442 * fail the command. In theory, command from scsi_vhci
3443 * client is impossible send down command with invalid
3444 * devhdl since devhdl is set after path offline, target
3445 * driver is not suppose to select a offlined path.
3446 */
3447 if (ptgt->m_devhdl == MPTSAS_INVALID_DEVHDL) {
3448 NDBG3(("rejecting command, it might because invalid devhdl "
3449 "request."));
3450 mptsas_set_pkt_reason(mpt, cmd, CMD_DEV_GONE, STAT_TERMINATED);
3451 if (cmd->cmd_flags & CFLAG_TXQ) {
3452 mptsas_doneq_add(mpt, cmd);
3453 mptsas_doneq_empty(mpt);
3454 return (rval);
3455 } else {
3456 return (TRAN_FATAL_ERROR);
3457 }
3458 }
3674 pkt->pkt_cdbp = (opaque_t)&cmd->cmd_cdb;
3675 cmd->cmd_pkt = (struct scsi_pkt *)pkt;
3676 cmd->cmd_cdblen = (uchar_t)cmdlen;
3677 cmd->cmd_scblen = statuslen;
3678 cmd->cmd_rqslen = SENSE_LENGTH;
3679 cmd->cmd_tgt_addr = ptgt;
3680
3681 if ((cmdlen > sizeof (cmd->cmd_cdb)) ||
3682 (tgtlen > PKT_PRIV_LEN) ||
3683 (statuslen > EXTCMDS_STATUS_SIZE)) {
3684 int failure;
3685
3686 /*
3687 * We are going to allocate external packet space which
3688 * might include the sense data buffer for DMA so we
3689 * need to increase the reference counter here. In a
3690 * case the HBA is in reset we just simply free the
3691 * allocated packet and bail out.
3692 */
3693 mutex_enter(&mpt->m_mutex);
3694 if (mpt->m_in_reset) {
3695 mutex_exit(&mpt->m_mutex);
3696
3697 cmd->cmd_flags = CFLAG_FREE;
3698 kmem_cache_free(mpt->m_kmem_cache, cmd);
3699 return (NULL);
3700 }
3701 mpt->m_extreq_sense_refcount++;
3702 ASSERT(mpt->m_extreq_sense_refcount > 0);
3703 mutex_exit(&mpt->m_mutex);
3704
3705 /*
3706 * if extern alloc fails, all will be
3707 * deallocated, including cmd
3708 */
3709 failure = mptsas_pkt_alloc_extern(mpt, cmd,
3710 cmdlen, tgtlen, statuslen, kf);
3711
3712 if (failure != 0 || cmd->cmd_extrqslen == 0) {
3713 /*
3714 * If the external packet space allocation
3715 * failed, or we didn't allocated the sense
3716 * data buffer for DMA we need to decrease the
3717 * reference counter.
3718 */
3719 mutex_enter(&mpt->m_mutex);
3720 ASSERT(mpt->m_extreq_sense_refcount > 0);
5355
5356 /*
5357 * Record the event if its type is enabled in
5358 * this mpt instance by ioctl.
5359 */
5360 mptsas_record_event(args);
5361
5362 /*
5363 * Handle time critical events
5364 * NOT_RESPONDING/ADDED only now
5365 */
5366 if (mptsas_handle_event_sync(args) == DDI_SUCCESS) {
5367 /*
5368 * Would not return main process,
5369 * just let taskq resolve ack action
5370 * and ack would be sent in taskq thread
5371 */
5372 NDBG20(("send mptsas_handle_event_sync success"));
5373 }
5374
5375 if (mpt->m_in_reset) {
5376 NDBG20(("dropping event received during reset"));
5377 return;
5378 }
5379
5380 if ((ddi_taskq_dispatch(mpt->m_event_taskq, mptsas_handle_event,
5381 (void *)args, DDI_NOSLEEP)) != DDI_SUCCESS) {
5382 mptsas_log(mpt, CE_WARN, "No memory available"
5383 "for dispatch taskq");
5384 /*
5385 * Return the reply frame to the free queue.
5386 */
5387 ddi_put32(mpt->m_acc_free_queue_hdl,
5388 &((uint32_t *)(void *)
5389 mpt->m_free_queue)[mpt->m_free_index], reply_addr);
5390 (void) ddi_dma_sync(mpt->m_dma_free_queue_hdl, 0, 0,
5391 DDI_DMA_SYNC_FORDEV);
5392 if (++mpt->m_free_index == mpt->m_free_queue_depth) {
5393 mpt->m_free_index = 0;
5394 }
5395
5396 ddi_put32(mpt->m_datap,
5397 &mpt->m_reg->ReplyFreeHostIndex, mpt->m_free_index);
5398 }
6325 }
6326 (void) sprintf(phy_mask_name, "%x", phymask);
6327 }
6328 parent = scsi_hba_iport_find(mpt->m_dip,
6329 phy_mask_name);
6330 if (parent == NULL) {
6331 mptsas_log(mpt, CE_WARN, "Failed to find an "
6332 "iport, should not happen!");
6333 goto out;
6334 }
6335
6336 }
6337 ASSERT(parent);
6338 handle_topo_change:
6339
6340 mutex_enter(&mpt->m_mutex);
6341 /*
6342 * If HBA is being reset, don't perform operations depending
6343 * on the IOC. We must free the topo list, however.
6344 */
6345 if (!mpt->m_in_reset)
6346 mptsas_handle_topo_change(topo_node, parent);
6347 else
6348 NDBG20(("skipping topo change received during reset"));
6349 save_node = topo_node;
6350 topo_node = topo_node->next;
6351 ASSERT(save_node);
6352 kmem_free(save_node, sizeof (mptsas_topo_change_list_t));
6353 mutex_exit(&mpt->m_mutex);
6354
6355 if ((flags == MPTSAS_TOPO_FLAG_DIRECT_ATTACHED_DEVICE) ||
6356 (flags == MPTSAS_TOPO_FLAG_RAID_PHYSDRV_ASSOCIATED) ||
6357 (flags == MPTSAS_TOPO_FLAG_RAID_ASSOCIATED)) {
6358 /*
6359 * If direct attached device associated, make sure
6360 * reset the parent before start the next one. But
6361 * all devices associated with expander shares the
6362 * parent. Also, reset parent if this is for RAID.
6363 */
6364 parent = NULL;
6365 }
6366 }
6367 out:
6368 kmem_free(phy_mask_name, MPTSAS_MAX_PHYS);
7597 */
7598 static void
7599 mptsas_handle_event(void *args)
7600 {
7601 m_replyh_arg_t *replyh_arg;
7602 pMpi2EventNotificationReply_t eventreply;
7603 uint32_t event, iocloginfo, rfm;
7604 uint32_t status;
7605 uint8_t port;
7606 mptsas_t *mpt;
7607 uint_t iocstatus;
7608
7609 replyh_arg = (m_replyh_arg_t *)args;
7610 rfm = replyh_arg->rfm;
7611 mpt = replyh_arg->mpt;
7612
7613 mutex_enter(&mpt->m_mutex);
7614 /*
7615 * If HBA is being reset, drop incoming event.
7616 */
7617 if (mpt->m_in_reset) {
7618 NDBG20(("dropping event received prior to reset"));
7619 mutex_exit(&mpt->m_mutex);
7620 return;
7621 }
7622
7623 eventreply = (pMpi2EventNotificationReply_t)
7624 (mpt->m_reply_frame + (rfm -
7625 (mpt->m_reply_frame_dma_addr & 0xffffffffu)));
7626 event = ddi_get16(mpt->m_acc_reply_frame_hdl, &eventreply->Event);
7627
7628 if (iocstatus = ddi_get16(mpt->m_acc_reply_frame_hdl,
7629 &eventreply->IOCStatus)) {
7630 if (iocstatus == MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE) {
7631 mptsas_log(mpt, CE_WARN,
7632 "!mptsas_handle_event: IOCStatus=0x%x, "
7633 "IOCLogInfo=0x%x", iocstatus,
7634 ddi_get32(mpt->m_acc_reply_frame_hdl,
7635 &eventreply->IOCLogInfo));
7636 } else {
7637 mptsas_log(mpt, CE_WARN,
7638 "mptsas_handle_event: IOCStatus=0x%x, "
7639 "IOCLogInfo=0x%x", iocstatus,
7640 ddi_get32(mpt->m_acc_reply_frame_hdl,
7641 &eventreply->IOCLogInfo));
9905 /* Skip device if not powered on */
9906 if (mpt->m_options & MPTSAS_OPT_PM) {
9907 if (mpt->m_power_level == PM_LEVEL_D0) {
9908 (void) pm_busy_component(mpt->m_dip, 0);
9909 mpt->m_busy = 1;
9910 } else {
9911 mutex_exit(&mpt->m_mutex);
9912 continue;
9913 }
9914 }
9915
9916 /*
9917 * Check if controller is in a FAULT state. If so, reset it.
9918 */
9919 doorbell = ddi_get32(mpt->m_datap, &mpt->m_reg->Doorbell);
9920 if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
9921 doorbell &= MPI2_DOORBELL_DATA_MASK;
9922 mptsas_log(mpt, CE_WARN, "MPT Firmware Fault, "
9923 "code: %04x", doorbell);
9924 mpt->m_softstate &= ~MPTSAS_SS_MSG_UNIT_RESET;
9925 if ((mptsas_restart_ioc(mpt)) == DDI_FAILURE) {
9926 mptsas_log(mpt, CE_WARN, "Reset failed"
9927 "after fault was detected");
9928 }
9929 }
9930
9931 /*
9932 * For now, always call mptsas_watchsubr.
9933 */
9934 mptsas_watchsubr(mpt);
9935
9936 if (mpt->m_options & MPTSAS_OPT_PM) {
9937 mpt->m_busy = 0;
9938 (void) pm_idle_component(mpt->m_dip, 0);
9939 }
9940
9941 mutex_exit(&mpt->m_mutex);
9942 }
9943 rw_exit(&mptsas_global_rwlock);
9944
9945 mutex_enter(&mptsas_global_mutex);
11162 mptsas_return_to_pool(mpt, cmd);
11163 if (pt_flags & MPTSAS_DATA_ALLOCATED) {
11164 if (mptsas_check_dma_handle(data_dma_state.handle) !=
11165 DDI_SUCCESS) {
11166 ddi_fm_service_impact(mpt->m_dip,
11167 DDI_SERVICE_UNAFFECTED);
11168 status = EFAULT;
11169 }
11170 mptsas_dma_free(&data_dma_state);
11171 }
11172 if (pt_flags & MPTSAS_DATAOUT_ALLOCATED) {
11173 if (mptsas_check_dma_handle(dataout_dma_state.handle) !=
11174 DDI_SUCCESS) {
11175 ddi_fm_service_impact(mpt->m_dip,
11176 DDI_SERVICE_UNAFFECTED);
11177 status = EFAULT;
11178 }
11179 mptsas_dma_free(&dataout_dma_state);
11180 }
11181 if (pt_flags & MPTSAS_CMD_TIMEOUT) {
11182 if ((mptsas_restart_ioc(mpt)) == DDI_FAILURE) {
11183 mptsas_log(mpt, CE_WARN, "mptsas_restart_ioc failed");
11184 }
11185 }
11186 if (request_msg)
11187 kmem_free(request_msg, request_size);
11188 NDBG27(("mptsas_do_passthru: Done status 0x%x", status));
11189
11190 return (status);
11191 }
11192
11193 static int
11194 mptsas_pass_thru(mptsas_t *mpt, mptsas_pass_thru_t *data, int mode)
11195 {
11196 /*
11197 * If timeout is 0, set timeout to default of 60 seconds.
11198 */
11199 if (data->Timeout == 0) {
11200 data->Timeout = MPTSAS_PASS_THRU_TIME_DEFAULT;
11201 }
11202
11203 if (((data->DataSize == 0) &&
12668 break;
12669 case MPTIOCTL_UPDATE_FLASH:
12670 if (ddi_copyin((void *)data, &flashdata,
12671 sizeof (struct mptsas_update_flash), mode)) {
12672 status = EFAULT;
12673 break;
12674 }
12675
12676 mutex_enter(&mpt->m_mutex);
12677 if (mptsas_update_flash(mpt,
12678 (caddr_t)(long)flashdata.PtrBuffer,
12679 flashdata.ImageSize, flashdata.ImageType, mode)) {
12680 status = EFAULT;
12681 }
12682
12683 /*
12684 * Reset the chip to start using the new
12685 * firmware. Reset if failed also.
12686 */
12687 mpt->m_softstate &= ~MPTSAS_SS_MSG_UNIT_RESET;
12688 if (mptsas_restart_ioc(mpt) == DDI_FAILURE) {
12689 status = EFAULT;
12690 }
12691 mutex_exit(&mpt->m_mutex);
12692 break;
12693 case MPTIOCTL_PASS_THRU:
12694 /*
12695 * The user has requested to pass through a command to
12696 * be executed by the MPT firmware. Call our routine
12697 * which does this. Only allow one passthru IOCTL at
12698 * one time. Other threads will block on
12699 * m_passthru_mutex, which is of adaptive variant.
12700 */
12701 if (ddi_copyin((void *)data, &passthru_data,
12702 sizeof (mptsas_pass_thru_t), mode)) {
12703 status = EFAULT;
12704 break;
12705 }
12706 mutex_enter(&mpt->m_passthru_mutex);
12707 mutex_enter(&mpt->m_mutex);
12708 status = mptsas_pass_thru(mpt, &passthru_data, mode);
12740 status = EFAULT;
12741 }
12742 break;
12743 case MPTIOCTL_GET_PCI_INFO:
12744 /*
12745 * The user has requested to read pci info. Call
12746 * our routine which does this.
12747 */
12748 bzero(&pci_info, sizeof (mptsas_pci_info_t));
12749 mutex_enter(&mpt->m_mutex);
12750 mptsas_read_pci_info(mpt, &pci_info);
12751 mutex_exit(&mpt->m_mutex);
12752 if (ddi_copyout((void *)(&pci_info), (void *)data,
12753 sizeof (mptsas_pci_info_t), mode) != 0) {
12754 status = EFAULT;
12755 }
12756 break;
12757 case MPTIOCTL_RESET_ADAPTER:
12758 mutex_enter(&mpt->m_mutex);
12759 mpt->m_softstate &= ~MPTSAS_SS_MSG_UNIT_RESET;
12760 if ((mptsas_restart_ioc(mpt)) == DDI_FAILURE) {
12761 mptsas_log(mpt, CE_WARN, "reset adapter IOCTL "
12762 "failed");
12763 status = EFAULT;
12764 }
12765 mutex_exit(&mpt->m_mutex);
12766 break;
12767 case MPTIOCTL_DIAG_ACTION:
12768 /*
12769 * The user has done a diag buffer action. Call our
12770 * routine which does this. Only allow one diag action
12771 * at one time.
12772 */
12773 mutex_enter(&mpt->m_mutex);
12774 if (mpt->m_diag_action_in_progress) {
12775 mutex_exit(&mpt->m_mutex);
12776 return (EBUSY);
12777 }
12778 mpt->m_diag_action_in_progress = 1;
12779 status = mptsas_diag_action(mpt,
12780 (mptsas_diag_action_t *)data, mode);
12807 break;
12808 case MPTIOCTL_REG_ACCESS:
12809 /*
12810 * The user has requested register access. Call our
12811 * routine which does this.
12812 */
12813 status = mptsas_reg_access(mpt,
12814 (mptsas_reg_access_t *)data, mode);
12815 break;
12816 default:
12817 status = scsi_hba_ioctl(dev, cmd, data, mode, credp,
12818 rval);
12819 break;
12820 }
12821
12822 out:
12823 return (status);
12824 }
12825
12826 int
12827 mptsas_restart_ioc(mptsas_t *mpt)
12828 {
12829 int rval = DDI_SUCCESS;
12830 mptsas_target_t *ptgt = NULL;
12831
12832 ASSERT(mutex_owned(&mpt->m_mutex));
12833
12834 /*
12835 * Set a flag telling I/O path that we're processing a reset. This is
12836 * needed because after the reset is complete, the hash table still
12837 * needs to be rebuilt. If I/Os are started before the hash table is
12838 * rebuilt, I/O errors will occur. This flag allows I/Os to be marked
12839 * so that they can be retried.
12840 */
12841 mpt->m_in_reset = TRUE;
12842
12843 /*
12844 * Wait until all the allocated sense data buffers for DMA are freed.
12845 */
12846 while (mpt->m_extreq_sense_refcount > 0)
12847 cv_wait(&mpt->m_extreq_sense_refcount_cv, &mpt->m_mutex);
12848
12849 /*
12850 * Set all throttles to HOLD
12851 */
12852 for (ptgt = refhash_first(mpt->m_targets); ptgt != NULL;
12853 ptgt = refhash_next(mpt->m_targets, ptgt)) {
12854 mptsas_set_throttle(mpt, ptgt, HOLD_THROTTLE);
12855 }
12856
12857 /*
12858 * Disable interrupts
12859 */
12860 MPTSAS_DISABLE_INTR(mpt);
12861
12886
12887 /*
12888 * Reset the throttles
12889 */
12890 for (ptgt = refhash_first(mpt->m_targets); ptgt != NULL;
12891 ptgt = refhash_next(mpt->m_targets, ptgt)) {
12892 mptsas_set_throttle(mpt, ptgt, MAX_THROTTLE);
12893 }
12894
12895 mptsas_doneq_empty(mpt);
12896 mptsas_restart_hba(mpt);
12897
12898 if (rval != DDI_SUCCESS) {
12899 mptsas_fm_ereport(mpt, DDI_FM_DEVICE_NO_RESPONSE);
12900 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_LOST);
12901 }
12902
12903 /*
12904 * Clear the reset flag so that I/Os can continue.
12905 */
12906 mpt->m_in_reset = FALSE;
12907
12908 return (rval);
12909 }
12910
12911 static int
12912 mptsas_init_chip(mptsas_t *mpt, int first_time)
12913 {
12914 ddi_dma_cookie_t cookie;
12915 uint32_t i;
12916 int rval;
12917
12918 /*
12919 * Check to see if the firmware image is valid
12920 */
12921 if (ddi_get32(mpt->m_datap, &mpt->m_reg->HostDiagnostic) &
12922 MPI2_DIAG_FLASH_BAD_SIG) {
12923 mptsas_log(mpt, CE_WARN, "mptsas bad flash signature!");
12924 goto fail;
12925 }
12926
|
539 nulldev, /* identify */
540 nulldev, /* probe */
541 mptsas_attach, /* attach */
542 mptsas_detach, /* detach */
543 #ifdef __sparc
544 mptsas_reset,
545 #else
546 nodev, /* reset */
547 #endif /* __sparc */
548 &mptsas_cb_ops, /* driver operations */
549 NULL, /* bus operations */
550 mptsas_power, /* power management */
551 #ifdef __sparc
552 ddi_quiesce_not_needed
553 #else
554 mptsas_quiesce /* quiesce */
555 #endif /* __sparc */
556 };
557
558
559 #define MPTSAS_MOD_STRING "MPTSAS HBA Driver 00.00.00.24X"
560
561 static struct modldrv modldrv = {
562 &mod_driverops, /* Type of module. This one is a driver */
563 MPTSAS_MOD_STRING, /* Name of the module. */
564 &mptsas_ops, /* driver ops */
565 };
566
567 static struct modlinkage modlinkage = {
568 MODREV_1, &modldrv, NULL
569 };
570 #define TARGET_PROP "target"
571 #define LUN_PROP "lun"
572 #define LUN64_PROP "lun64"
573 #define SAS_PROP "sas-mpt"
574 #define MDI_GUID "wwn"
575 #define NDI_GUID "guid"
576 #define MPTSAS_DEV_GONE "mptsas_dev_gone"
577
578 /*
579 * Local static data
1318 thread_create(NULL, 0, mptsas_doneq_thread,
1319 &mpt->m_doneq_thread_id[j].arg,
1320 0, &p0, TS_RUN, minclsyspri);
1321 mpt->m_doneq_thread_id[j].donetail =
1322 &mpt->m_doneq_thread_id[j].doneq;
1323 mutex_exit(&mpt->m_doneq_thread_id[j].mutex);
1324 }
1325 mutex_exit(&mpt->m_doneq_mutex);
1326 doneq_thread_create++;
1327 }
1328
1329 /*
1330 * Disable hardware interrupt since we're not ready to
1331 * handle it yet.
1332 */
1333 MPTSAS_DISABLE_INTR(mpt);
1334 if (mptsas_register_intrs(mpt) == FALSE)
1335 goto fail;
1336 intr_added++;
1337
1338 /*
1339 * The mutex to protect task management during reset
1340 */
1341 mutex_init(&mpt->m_taskmgmt_mutex, NULL, MUTEX_SPIN,
1342 DDI_INTR_PRI(mpt->m_intr_pri));
1343
1344 /* Initialize mutex used in interrupt handler */
1345 mutex_init(&mpt->m_mutex, NULL, MUTEX_DRIVER,
1346 DDI_INTR_PRI(mpt->m_intr_pri));
1347 mutex_init(&mpt->m_passthru_mutex, NULL, MUTEX_DRIVER, NULL);
1348 mutex_init(&mpt->m_tx_waitq_mutex, NULL, MUTEX_DRIVER,
1349 DDI_INTR_PRI(mpt->m_intr_pri));
1350 for (i = 0; i < MPTSAS_MAX_PHYS; i++) {
1351 mutex_init(&mpt->m_phy_info[i].smhba_info.phy_mutex,
1352 NULL, MUTEX_DRIVER,
1353 DDI_INTR_PRI(mpt->m_intr_pri));
1354 }
1355
1356 cv_init(&mpt->m_cv, NULL, CV_DRIVER, NULL);
1357 cv_init(&mpt->m_passthru_cv, NULL, CV_DRIVER, NULL);
1358 cv_init(&mpt->m_fw_cv, NULL, CV_DRIVER, NULL);
1359 cv_init(&mpt->m_config_cv, NULL, CV_DRIVER, NULL);
1360 cv_init(&mpt->m_fw_diag_cv, NULL, CV_DRIVER, NULL);
1361 cv_init(&mpt->m_extreq_sense_refcount_cv, NULL, CV_DRIVER, NULL);
1362 mutex_init_done++;
1363
1618 cv_destroy(&mpt->m_doneq_thread_id[j].cv);
1619 mutex_destroy(&mpt->m_doneq_thread_id[j].mutex);
1620 }
1621 kmem_free(mpt->m_doneq_thread_id,
1622 sizeof (mptsas_doneq_thread_list_t)
1623 * doneq_thread_num);
1624 mutex_exit(&mpt->m_doneq_mutex);
1625 cv_destroy(&mpt->m_doneq_thread_cv);
1626 mutex_destroy(&mpt->m_doneq_mutex);
1627 }
1628 if (event_taskq_create) {
1629 ddi_taskq_destroy(mpt->m_event_taskq);
1630 }
1631 if (dr_taskq_create) {
1632 ddi_taskq_destroy(mpt->m_dr_taskq);
1633 }
1634 if (mutex_init_done) {
1635 mutex_destroy(&mpt->m_tx_waitq_mutex);
1636 mutex_destroy(&mpt->m_passthru_mutex);
1637 mutex_destroy(&mpt->m_mutex);
1638 mutex_destroy(&mpt->m_taskmgmt_mutex);
1639 for (i = 0; i < MPTSAS_MAX_PHYS; i++) {
1640 mutex_destroy(
1641 &mpt->m_phy_info[i].smhba_info.phy_mutex);
1642 }
1643 cv_destroy(&mpt->m_cv);
1644 cv_destroy(&mpt->m_passthru_cv);
1645 cv_destroy(&mpt->m_fw_cv);
1646 cv_destroy(&mpt->m_config_cv);
1647 cv_destroy(&mpt->m_fw_diag_cv);
1648 cv_destroy(&mpt->m_extreq_sense_refcount_cv);
1649 }
1650
1651 if (map_setup) {
1652 mptsas_cfg_fini(mpt);
1653 }
1654 if (config_setup) {
1655 mptsas_config_space_fini(mpt);
1656 }
1657 mptsas_free_handshake_msg(mpt);
1658 mptsas_hba_fini(mpt);
2038 mutex_exit(&mpt->m_mutex);
2039
2040 /* deallocate everything that was allocated in mptsas_attach */
2041 mptsas_cache_destroy(mpt);
2042
2043 mptsas_hba_fini(mpt);
2044 mptsas_cfg_fini(mpt);
2045
2046 /* Lower the power informing PM Framework */
2047 if (mpt->m_options & MPTSAS_OPT_PM) {
2048 if (pm_lower_power(dip, 0, PM_LEVEL_D3) != DDI_SUCCESS)
2049 mptsas_log(mpt, CE_WARN,
2050 "!mptsas%d: Lower power request failed "
2051 "during detach, ignoring.",
2052 mpt->m_instance);
2053 }
2054
2055 mutex_destroy(&mpt->m_tx_waitq_mutex);
2056 mutex_destroy(&mpt->m_passthru_mutex);
2057 mutex_destroy(&mpt->m_mutex);
2058 mutex_destroy(&mpt->m_taskmgmt_mutex);
2059 for (i = 0; i < MPTSAS_MAX_PHYS; i++) {
2060 mutex_destroy(&mpt->m_phy_info[i].smhba_info.phy_mutex);
2061 }
2062 cv_destroy(&mpt->m_cv);
2063 cv_destroy(&mpt->m_passthru_cv);
2064 cv_destroy(&mpt->m_fw_cv);
2065 cv_destroy(&mpt->m_config_cv);
2066 cv_destroy(&mpt->m_fw_diag_cv);
2067 cv_destroy(&mpt->m_extreq_sense_refcount_cv);
2068
2069 mptsas_smp_teardown(mpt);
2070 mptsas_hba_teardown(mpt);
2071
2072 mptsas_config_space_fini(mpt);
2073
2074 mptsas_free_handshake_msg(mpt);
2075
2076 mptsas_fm_fini(mpt);
2077 ddi_soft_state_free(mptsas_state, ddi_get_instance(dip));
2078 ddi_prop_remove_all(dip);
2400 case PM_LEVEL_D0:
2401 NDBG11(("mptsas%d: turning power ON.", mpt->m_instance));
2402 MPTSAS_POWER_ON(mpt);
2403 /*
2404 * Wait up to 30 seconds for IOC to come out of reset.
2405 */
2406 while (((ioc_status = ddi_get32(mpt->m_datap,
2407 &mpt->m_reg->Doorbell)) &
2408 MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_RESET) {
2409 if (polls++ > 3000) {
2410 break;
2411 }
2412 delay(drv_usectohz(10000));
2413 }
2414 /*
2415 * If IOC is not in operational state, try to hard reset it.
2416 */
2417 if ((ioc_status & MPI2_IOC_STATE_MASK) !=
2418 MPI2_IOC_STATE_OPERATIONAL) {
2419 mpt->m_softstate &= ~MPTSAS_SS_MSG_UNIT_RESET;
2420 if (mptsas_reset_handler(mpt) == DDI_FAILURE) {
2421 mptsas_log(mpt, CE_WARN,
2422 "mptsas_power: hard reset failed");
2423 mutex_exit(&mpt->m_mutex);
2424 return (DDI_FAILURE);
2425 }
2426 }
2427 mpt->m_power_level = PM_LEVEL_D0;
2428 break;
2429 case PM_LEVEL_D3:
2430 NDBG11(("mptsas%d: turning power OFF.", mpt->m_instance));
2431 MPTSAS_POWER_OFF(mpt);
2432 break;
2433 default:
2434 mptsas_log(mpt, CE_WARN, "mptsas%d: unknown power level <%x>.",
2435 mpt->m_instance, level);
2436 rval = DDI_FAILURE;
2437 break;
2438 }
2439 mutex_exit(&mpt->m_mutex);
2440 return (rval);
3417 }
3418 }
3419
3420 /*
3421 * reset the throttle if we were draining
3422 */
3423 if ((ptgt->m_t_ncmds == 0) &&
3424 (ptgt->m_t_throttle == DRAIN_THROTTLE)) {
3425 NDBG23(("reset throttle"));
3426 ASSERT(ptgt->m_reset_delay == 0);
3427 mptsas_set_throttle(mpt, ptgt, MAX_THROTTLE);
3428 }
3429
3430 /*
3431 * If HBA is being reset, the DevHandles are being re-initialized,
3432 * which means that they could be invalid even if the target is still
3433 * attached. Check if being reset and if DevHandle is being
3434 * re-initialized. If this is the case, return BUSY so the I/O can be
3435 * retried later.
3436 */
3437 mutex_enter(&mpt->m_taskmgmt_mutex);
3438 if ((ptgt->m_devhdl == MPTSAS_INVALID_DEVHDL) && mpt->m_in_reset) {
3439 mptsas_set_pkt_reason(mpt, cmd, CMD_RESET, STAT_BUS_RESET);
3440 if (cmd->cmd_flags & CFLAG_TXQ) {
3441 mptsas_doneq_add(mpt, cmd);
3442 mptsas_doneq_empty(mpt);
3443 mutex_exit(&mpt->m_taskmgmt_mutex);
3444 return (rval);
3445 } else {
3446 mutex_exit(&mpt->m_taskmgmt_mutex);
3447 return (TRAN_BUSY);
3448 }
3449 }
3450 mutex_exit(&mpt->m_taskmgmt_mutex);
3451
3452 /*
3453 * If device handle has already been invalidated, just
3454 * fail the command. In theory, command from scsi_vhci
3455 * client is impossible send down command with invalid
3456 * devhdl since devhdl is set after path offline, target
3457 * driver is not suppose to select a offlined path.
3458 */
3459 if (ptgt->m_devhdl == MPTSAS_INVALID_DEVHDL) {
3460 NDBG3(("rejecting command, it might because invalid devhdl "
3461 "request."));
3462 mptsas_set_pkt_reason(mpt, cmd, CMD_DEV_GONE, STAT_TERMINATED);
3463 if (cmd->cmd_flags & CFLAG_TXQ) {
3464 mptsas_doneq_add(mpt, cmd);
3465 mptsas_doneq_empty(mpt);
3466 return (rval);
3467 } else {
3468 return (TRAN_FATAL_ERROR);
3469 }
3470 }
3686 pkt->pkt_cdbp = (opaque_t)&cmd->cmd_cdb;
3687 cmd->cmd_pkt = (struct scsi_pkt *)pkt;
3688 cmd->cmd_cdblen = (uchar_t)cmdlen;
3689 cmd->cmd_scblen = statuslen;
3690 cmd->cmd_rqslen = SENSE_LENGTH;
3691 cmd->cmd_tgt_addr = ptgt;
3692
3693 if ((cmdlen > sizeof (cmd->cmd_cdb)) ||
3694 (tgtlen > PKT_PRIV_LEN) ||
3695 (statuslen > EXTCMDS_STATUS_SIZE)) {
3696 int failure;
3697
3698 /*
3699 * We are going to allocate external packet space which
3700 * might include the sense data buffer for DMA so we
3701 * need to increase the reference counter here. In a
3702 * case the HBA is in reset we just simply free the
3703 * allocated packet and bail out.
3704 */
3705 mutex_enter(&mpt->m_mutex);
3706 mutex_enter(&mpt->m_taskmgmt_mutex);
3707 if (mpt->m_in_reset == TRUE) {
3708 mutex_exit(&mpt->m_taskmgmt_mutex);
3709 mutex_exit(&mpt->m_mutex);
3710
3711 cmd->cmd_flags = CFLAG_FREE;
3712 kmem_cache_free(mpt->m_kmem_cache, cmd);
3713 return (NULL);
3714 }
3715 mutex_exit(&mpt->m_taskmgmt_mutex);
3716 mpt->m_extreq_sense_refcount++;
3717 ASSERT(mpt->m_extreq_sense_refcount > 0);
3718 mutex_exit(&mpt->m_mutex);
3719
3720 /*
3721 * if extern alloc fails, all will be
3722 * deallocated, including cmd
3723 */
3724 failure = mptsas_pkt_alloc_extern(mpt, cmd,
3725 cmdlen, tgtlen, statuslen, kf);
3726
3727 if (failure != 0 || cmd->cmd_extrqslen == 0) {
3728 /*
3729 * If the external packet space allocation
3730 * failed, or we didn't allocated the sense
3731 * data buffer for DMA we need to decrease the
3732 * reference counter.
3733 */
3734 mutex_enter(&mpt->m_mutex);
3735 ASSERT(mpt->m_extreq_sense_refcount > 0);
5370
5371 /*
5372 * Record the event if its type is enabled in
5373 * this mpt instance by ioctl.
5374 */
5375 mptsas_record_event(args);
5376
5377 /*
5378 * Handle time critical events
5379 * NOT_RESPONDING/ADDED only now
5380 */
5381 if (mptsas_handle_event_sync(args) == DDI_SUCCESS) {
5382 /*
5383 * Would not return main process,
5384 * just let taskq resolve ack action
5385 * and ack would be sent in taskq thread
5386 */
5387 NDBG20(("send mptsas_handle_event_sync success"));
5388 }
5389
5390 mutex_enter(&mpt->m_taskmgmt_mutex);
5391 if (mpt->m_in_reset == TRUE) {
5392 NDBG20(("dropping event received during reset"));
5393 mutex_exit(&mpt->m_taskmgmt_mutex);
5394 return;
5395 }
5396 mutex_exit(&mpt->m_taskmgmt_mutex);
5397
5398 if ((ddi_taskq_dispatch(mpt->m_event_taskq, mptsas_handle_event,
5399 (void *)args, DDI_NOSLEEP)) != DDI_SUCCESS) {
5400 mptsas_log(mpt, CE_WARN, "No memory available"
5401 "for dispatch taskq");
5402 /*
5403 * Return the reply frame to the free queue.
5404 */
5405 ddi_put32(mpt->m_acc_free_queue_hdl,
5406 &((uint32_t *)(void *)
5407 mpt->m_free_queue)[mpt->m_free_index], reply_addr);
5408 (void) ddi_dma_sync(mpt->m_dma_free_queue_hdl, 0, 0,
5409 DDI_DMA_SYNC_FORDEV);
5410 if (++mpt->m_free_index == mpt->m_free_queue_depth) {
5411 mpt->m_free_index = 0;
5412 }
5413
5414 ddi_put32(mpt->m_datap,
5415 &mpt->m_reg->ReplyFreeHostIndex, mpt->m_free_index);
5416 }
6343 }
6344 (void) sprintf(phy_mask_name, "%x", phymask);
6345 }
6346 parent = scsi_hba_iport_find(mpt->m_dip,
6347 phy_mask_name);
6348 if (parent == NULL) {
6349 mptsas_log(mpt, CE_WARN, "Failed to find an "
6350 "iport, should not happen!");
6351 goto out;
6352 }
6353
6354 }
6355 ASSERT(parent);
6356 handle_topo_change:
6357
6358 mutex_enter(&mpt->m_mutex);
6359 /*
6360 * If HBA is being reset, don't perform operations depending
6361 * on the IOC. We must free the topo list, however.
6362 */
6363
6364 mutex_enter(&mpt->m_taskmgmt_mutex);
6365 if (mpt->m_in_reset == FALSE)
6366 mptsas_handle_topo_change(topo_node, parent);
6367 mutex_exit(&mpt->m_taskmgmt_mutex);
6368
6369 save_node = topo_node;
6370 topo_node = topo_node->next;
6371 ASSERT(save_node);
6372 kmem_free(save_node, sizeof (mptsas_topo_change_list_t));
6373 mutex_exit(&mpt->m_mutex);
6374
6375 if ((flags == MPTSAS_TOPO_FLAG_DIRECT_ATTACHED_DEVICE) ||
6376 (flags == MPTSAS_TOPO_FLAG_RAID_PHYSDRV_ASSOCIATED) ||
6377 (flags == MPTSAS_TOPO_FLAG_RAID_ASSOCIATED)) {
6378 /*
6379 * If direct attached device associated, make sure
6380 * reset the parent before start the next one. But
6381 * all devices associated with expander shares the
6382 * parent. Also, reset parent if this is for RAID.
6383 */
6384 parent = NULL;
6385 }
6386 }
6387 out:
6388 kmem_free(phy_mask_name, MPTSAS_MAX_PHYS);
7617 */
7618 static void
7619 mptsas_handle_event(void *args)
7620 {
7621 m_replyh_arg_t *replyh_arg;
7622 pMpi2EventNotificationReply_t eventreply;
7623 uint32_t event, iocloginfo, rfm;
7624 uint32_t status;
7625 uint8_t port;
7626 mptsas_t *mpt;
7627 uint_t iocstatus;
7628
7629 replyh_arg = (m_replyh_arg_t *)args;
7630 rfm = replyh_arg->rfm;
7631 mpt = replyh_arg->mpt;
7632
7633 mutex_enter(&mpt->m_mutex);
7634 /*
7635 * If HBA is being reset, drop incoming event.
7636 */
7637 mutex_enter(&mpt->m_taskmgmt_mutex);
7638 if (mpt->m_in_reset == TRUE) {
7639 NDBG20(("dropping event received prior to reset"));
7640 mutex_exit(&mpt->m_taskmgmt_mutex);
7641 mutex_exit(&mpt->m_mutex);
7642 return;
7643 }
7644 mutex_exit(&mpt->m_taskmgmt_mutex);
7645
7646 eventreply = (pMpi2EventNotificationReply_t)
7647 (mpt->m_reply_frame + (rfm -
7648 (mpt->m_reply_frame_dma_addr & 0xffffffffu)));
7649 event = ddi_get16(mpt->m_acc_reply_frame_hdl, &eventreply->Event);
7650
7651 if (iocstatus = ddi_get16(mpt->m_acc_reply_frame_hdl,
7652 &eventreply->IOCStatus)) {
7653 if (iocstatus == MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE) {
7654 mptsas_log(mpt, CE_WARN,
7655 "!mptsas_handle_event: IOCStatus=0x%x, "
7656 "IOCLogInfo=0x%x", iocstatus,
7657 ddi_get32(mpt->m_acc_reply_frame_hdl,
7658 &eventreply->IOCLogInfo));
7659 } else {
7660 mptsas_log(mpt, CE_WARN,
7661 "mptsas_handle_event: IOCStatus=0x%x, "
7662 "IOCLogInfo=0x%x", iocstatus,
7663 ddi_get32(mpt->m_acc_reply_frame_hdl,
7664 &eventreply->IOCLogInfo));
9928 /* Skip device if not powered on */
9929 if (mpt->m_options & MPTSAS_OPT_PM) {
9930 if (mpt->m_power_level == PM_LEVEL_D0) {
9931 (void) pm_busy_component(mpt->m_dip, 0);
9932 mpt->m_busy = 1;
9933 } else {
9934 mutex_exit(&mpt->m_mutex);
9935 continue;
9936 }
9937 }
9938
9939 /*
9940 * Check if controller is in a FAULT state. If so, reset it.
9941 */
9942 doorbell = ddi_get32(mpt->m_datap, &mpt->m_reg->Doorbell);
9943 if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
9944 doorbell &= MPI2_DOORBELL_DATA_MASK;
9945 mptsas_log(mpt, CE_WARN, "MPT Firmware Fault, "
9946 "code: %04x", doorbell);
9947 mpt->m_softstate &= ~MPTSAS_SS_MSG_UNIT_RESET;
9948 if ((mptsas_reset_handler(mpt)) == DDI_FAILURE) {
9949 mptsas_log(mpt, CE_WARN, "Reset failed"
9950 "after fault was detected");
9951 }
9952 }
9953
9954 /*
9955 * For now, always call mptsas_watchsubr.
9956 */
9957 mptsas_watchsubr(mpt);
9958
9959 if (mpt->m_options & MPTSAS_OPT_PM) {
9960 mpt->m_busy = 0;
9961 (void) pm_idle_component(mpt->m_dip, 0);
9962 }
9963
9964 mutex_exit(&mpt->m_mutex);
9965 }
9966 rw_exit(&mptsas_global_rwlock);
9967
9968 mutex_enter(&mptsas_global_mutex);
11185 mptsas_return_to_pool(mpt, cmd);
11186 if (pt_flags & MPTSAS_DATA_ALLOCATED) {
11187 if (mptsas_check_dma_handle(data_dma_state.handle) !=
11188 DDI_SUCCESS) {
11189 ddi_fm_service_impact(mpt->m_dip,
11190 DDI_SERVICE_UNAFFECTED);
11191 status = EFAULT;
11192 }
11193 mptsas_dma_free(&data_dma_state);
11194 }
11195 if (pt_flags & MPTSAS_DATAOUT_ALLOCATED) {
11196 if (mptsas_check_dma_handle(dataout_dma_state.handle) !=
11197 DDI_SUCCESS) {
11198 ddi_fm_service_impact(mpt->m_dip,
11199 DDI_SERVICE_UNAFFECTED);
11200 status = EFAULT;
11201 }
11202 mptsas_dma_free(&dataout_dma_state);
11203 }
11204 if (pt_flags & MPTSAS_CMD_TIMEOUT) {
11205 if ((mptsas_reset_handler(mpt)) == DDI_FAILURE) {
11206 mptsas_log(mpt, CE_WARN, "mptsas_reset_handler failed");
11207 }
11208 }
11209 if (request_msg)
11210 kmem_free(request_msg, request_size);
11211 NDBG27(("mptsas_do_passthru: Done status 0x%x", status));
11212
11213 return (status);
11214 }
11215
11216 static int
11217 mptsas_pass_thru(mptsas_t *mpt, mptsas_pass_thru_t *data, int mode)
11218 {
11219 /*
11220 * If timeout is 0, set timeout to default of 60 seconds.
11221 */
11222 if (data->Timeout == 0) {
11223 data->Timeout = MPTSAS_PASS_THRU_TIME_DEFAULT;
11224 }
11225
11226 if (((data->DataSize == 0) &&
12691 break;
12692 case MPTIOCTL_UPDATE_FLASH:
12693 if (ddi_copyin((void *)data, &flashdata,
12694 sizeof (struct mptsas_update_flash), mode)) {
12695 status = EFAULT;
12696 break;
12697 }
12698
12699 mutex_enter(&mpt->m_mutex);
12700 if (mptsas_update_flash(mpt,
12701 (caddr_t)(long)flashdata.PtrBuffer,
12702 flashdata.ImageSize, flashdata.ImageType, mode)) {
12703 status = EFAULT;
12704 }
12705
12706 /*
12707 * Reset the chip to start using the new
12708 * firmware. Reset if failed also.
12709 */
12710 mpt->m_softstate &= ~MPTSAS_SS_MSG_UNIT_RESET;
12711 if (mptsas_reset_handler(mpt) == DDI_FAILURE) {
12712 status = EFAULT;
12713 }
12714 mutex_exit(&mpt->m_mutex);
12715 break;
12716 case MPTIOCTL_PASS_THRU:
12717 /*
12718 * The user has requested to pass through a command to
12719 * be executed by the MPT firmware. Call our routine
12720 * which does this. Only allow one passthru IOCTL at
12721 * one time. Other threads will block on
12722 * m_passthru_mutex, which is of adaptive variant.
12723 */
12724 if (ddi_copyin((void *)data, &passthru_data,
12725 sizeof (mptsas_pass_thru_t), mode)) {
12726 status = EFAULT;
12727 break;
12728 }
12729 mutex_enter(&mpt->m_passthru_mutex);
12730 mutex_enter(&mpt->m_mutex);
12731 status = mptsas_pass_thru(mpt, &passthru_data, mode);
12763 status = EFAULT;
12764 }
12765 break;
12766 case MPTIOCTL_GET_PCI_INFO:
12767 /*
12768 * The user has requested to read pci info. Call
12769 * our routine which does this.
12770 */
12771 bzero(&pci_info, sizeof (mptsas_pci_info_t));
12772 mutex_enter(&mpt->m_mutex);
12773 mptsas_read_pci_info(mpt, &pci_info);
12774 mutex_exit(&mpt->m_mutex);
12775 if (ddi_copyout((void *)(&pci_info), (void *)data,
12776 sizeof (mptsas_pci_info_t), mode) != 0) {
12777 status = EFAULT;
12778 }
12779 break;
12780 case MPTIOCTL_RESET_ADAPTER:
12781 mutex_enter(&mpt->m_mutex);
12782 mpt->m_softstate &= ~MPTSAS_SS_MSG_UNIT_RESET;
12783 if ((mptsas_reset_handler(mpt)) == DDI_FAILURE) {
12784 mptsas_log(mpt, CE_WARN, "reset adapter IOCTL "
12785 "failed");
12786 status = EFAULT;
12787 }
12788 mutex_exit(&mpt->m_mutex);
12789 break;
12790 case MPTIOCTL_DIAG_ACTION:
12791 /*
12792 * The user has done a diag buffer action. Call our
12793 * routine which does this. Only allow one diag action
12794 * at one time.
12795 */
12796 mutex_enter(&mpt->m_mutex);
12797 if (mpt->m_diag_action_in_progress) {
12798 mutex_exit(&mpt->m_mutex);
12799 return (EBUSY);
12800 }
12801 mpt->m_diag_action_in_progress = 1;
12802 status = mptsas_diag_action(mpt,
12803 (mptsas_diag_action_t *)data, mode);
12830 break;
12831 case MPTIOCTL_REG_ACCESS:
12832 /*
12833 * The user has requested register access. Call our
12834 * routine which does this.
12835 */
12836 status = mptsas_reg_access(mpt,
12837 (mptsas_reg_access_t *)data, mode);
12838 break;
12839 default:
12840 status = scsi_hba_ioctl(dev, cmd, data, mode, credp,
12841 rval);
12842 break;
12843 }
12844
12845 out:
12846 return (status);
12847 }
12848
12849 int
12850 mptsas_reset_handler(mptsas_t *mpt)
12851 {
12852 int rval = DDI_SUCCESS;
12853 mptsas_target_t *ptgt = NULL;
12854
12855 ASSERT(mutex_owned(&mpt->m_mutex));
12856
12857 /*
12858 * Set a flag telling task management we are processing a reset. This
12859 * is needed because after the reset is complete, the hash table still
12860 * needs to be rebuilt. If I/Os are started before the hash table is
12861 * rebuilt, I/O errors will occur. This flag allows I/Os to be marked
12862 * so that they can be retried.
12863 */
12864 mutex_enter(&mpt->m_taskmgmt_mutex);
12865 if (mpt->m_in_reset == TRUE) {
12866 mutex_exit(&mpt->m_taskmgmt_mutex);
12867 return (DDI_FAILURE);
12868 }
12869 mpt->m_in_reset = TRUE;
12870 mutex_exit(&mpt->m_taskmgmt_mutex);
12871
12872 /*
12873 * Wait until all the allocated sense data buffers for DMA are freed.
12874 */
12875 while (mpt->m_extreq_sense_refcount > 0)
12876 cv_wait(&mpt->m_extreq_sense_refcount_cv, &mpt->m_mutex);
12877
12878 /*
12879 * Set all throttles to HOLD
12880 */
12881 for (ptgt = refhash_first(mpt->m_targets); ptgt != NULL;
12882 ptgt = refhash_next(mpt->m_targets, ptgt)) {
12883 mptsas_set_throttle(mpt, ptgt, HOLD_THROTTLE);
12884 }
12885
12886 /*
12887 * Disable interrupts
12888 */
12889 MPTSAS_DISABLE_INTR(mpt);
12890
12915
12916 /*
12917 * Reset the throttles
12918 */
12919 for (ptgt = refhash_first(mpt->m_targets); ptgt != NULL;
12920 ptgt = refhash_next(mpt->m_targets, ptgt)) {
12921 mptsas_set_throttle(mpt, ptgt, MAX_THROTTLE);
12922 }
12923
12924 mptsas_doneq_empty(mpt);
12925 mptsas_restart_hba(mpt);
12926
12927 if (rval != DDI_SUCCESS) {
12928 mptsas_fm_ereport(mpt, DDI_FM_DEVICE_NO_RESPONSE);
12929 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_LOST);
12930 }
12931
12932 /*
12933 * Clear the reset flag so that I/Os can continue.
12934 */
12935 mutex_enter(&mpt->m_taskmgmt_mutex);
12936 mpt->m_in_reset = FALSE;
12937 mutex_exit(&mpt->m_taskmgmt_mutex);
12938
12939 return (rval);
12940 }
12941
12942 static int
12943 mptsas_init_chip(mptsas_t *mpt, int first_time)
12944 {
12945 ddi_dma_cookie_t cookie;
12946 uint32_t i;
12947 int rval;
12948
12949 /*
12950 * Check to see if the firmware image is valid
12951 */
12952 if (ddi_get32(mpt->m_datap, &mpt->m_reg->HostDiagnostic) &
12953 MPI2_DIAG_FLASH_BAD_SIG) {
12954 mptsas_log(mpt, CE_WARN, "mptsas bad flash signature!");
12955 goto fail;
12956 }
12957
|