Print this page
XXXX don't fail device detach when it's physically removed


8534  *    Function: sd_unit_detach
8535  *
8536  * Description: Performs DDI_DETACH processing for sddetach().
8537  *
8538  * Return Code: DDI_SUCCESS
8539  *              DDI_FAILURE
8540  *
8541  *     Context: Kernel thread context
8542  */
8543 
8544 static int
8545 sd_unit_detach(dev_info_t *devi)
8546 {
8547         struct scsi_device      *devp;
8548         struct sd_lun           *un;
8549         int                     i;
8550         int                     tgt;
8551         dev_t                   dev;
8552         dev_info_t              *pdip = ddi_get_parent(devi);
8553         int                     instance = ddi_get_instance(devi);

8554 
8555         mutex_enter(&sd_detach_mutex);
8556 
8557         /*
8558          * Fail the detach for any of the following:
8559          *  - Unable to get the sd_lun struct for the instance
8560          *  - A layered driver has an outstanding open on the instance
8561          *  - Another thread is already detaching this instance
8562          *  - Another thread is currently performing an open




8563          */
8564         devp = ddi_get_driver_private(devi);
8565         if ((devp == NULL) ||
8566             ((un = (struct sd_lun *)devp->sd_private) == NULL) ||
8567             (un->un_ncmds_in_driver != 0) || (un->un_layer_count != 0) ||
8568             (un->un_detach_count != 0) || (un->un_opens_in_progress != 0)) {

8569                 mutex_exit(&sd_detach_mutex);
8570                 return (DDI_FAILURE);
8571         }
8572 
8573         SD_TRACE(SD_LOG_ATTACH_DETACH, un, "sd_unit_detach: entry 0x%p\n", un);
8574 
8575         /*
8576          * Mark this instance as currently in a detach, to inhibit any
8577          * opens from a layered driver.
8578          */
8579         un->un_detach_count++;
8580         mutex_exit(&sd_detach_mutex);
8581 
8582         tgt = ddi_prop_get_int(DDI_DEV_T_ANY, devi, DDI_PROP_DONTPASS,
8583             SCSI_ADDR_PROP_TARGET, -1);
8584 
8585         dev = sd_make_device(SD_DEVINFO(un));
8586 
8587 #ifndef lint
8588         _NOTE(COMPETING_THREADS_NOW);
8589 #endif
8590 
8591         mutex_enter(SD_MUTEX(un));
8592 
8593         /*
8594          * Fail the detach if there are any outstanding layered
8595          * opens on this device.
8596          */
8597         for (i = 0; i < NDKMAP; i++) {
8598                 if (un->un_ocmap.lyropen[i] != 0) {
8599                         goto err_notclosed;
8600                 }
8601         }
8602 
8603         /*
8604          * Verify there are NO outstanding commands issued to this device.
8605          * ie, un_ncmds_in_transport == 0.
8606          * It's possible to have outstanding commands through the physio
8607          * code path, even though everything's closed.
8608          */
8609         if ((un->un_ncmds_in_transport != 0) || (un->un_retry_timeid != NULL) ||
8610             (un->un_direct_priority_timeid != NULL) ||
8611             (un->un_state == SD_STATE_RWAIT)) {
8612                 mutex_exit(SD_MUTEX(un));
8613                 SD_ERROR(SD_LOG_ATTACH_DETACH, un,
8614                     "sd_dr_detach: Detach failure due to outstanding cmds\n");
8615                 goto err_stillbusy;
8616         }
8617 
8618         /*
8619          * If we have the device reserved, release the reservation.
8620          */
8621         if ((un->un_resvd_status & SD_RESERVE) &&

8622             !(un->un_resvd_status & SD_LOST_RESERVE)) {
8623                 mutex_exit(SD_MUTEX(un));
8624                 /*
8625                  * Note: sd_reserve_release sends a command to the device
8626                  * via the sd_ioctlcmd() path, and can sleep.
8627                  */
8628                 if (sd_reserve_release(dev, SD_RELEASE) != 0) {
8629                         SD_ERROR(SD_LOG_ATTACH_DETACH, un,
8630                             "sd_dr_detach: Cannot release reservation \n");
8631                 }
8632         } else {
8633                 mutex_exit(SD_MUTEX(un));
8634         }
8635 
8636         /*
8637          * Untimeout any reserve recover, throttle reset, restart unit
8638          * and delayed broadcast timeout threads. Protect the timeout pointer
8639          * from getting nulled by their callback functions.
8640          */
8641         mutex_enter(SD_MUTEX(un));
8642         if (un->un_resvd_timeid != NULL) {
8643                 timeout_id_t temp_id = un->un_resvd_timeid;
8644                 un->un_resvd_timeid = NULL;
8645                 mutex_exit(SD_MUTEX(un));
8646                 (void) untimeout(temp_id);
8647                 mutex_enter(SD_MUTEX(un));
8648         }
8649 
8650         if (un->un_reset_throttle_timeid != NULL) {


8667                 timeout_id_t temp_id = un->un_rmw_msg_timeid;
8668                 un->un_rmw_msg_timeid = NULL;
8669                 mutex_exit(SD_MUTEX(un));
8670                 (void) untimeout(temp_id);
8671                 mutex_enter(SD_MUTEX(un));
8672         }
8673 
8674         if (un->un_dcvb_timeid != NULL) {
8675                 timeout_id_t temp_id = un->un_dcvb_timeid;
8676                 un->un_dcvb_timeid = NULL;
8677                 mutex_exit(SD_MUTEX(un));
8678                 (void) untimeout(temp_id);
8679         } else {
8680                 mutex_exit(SD_MUTEX(un));
8681         }
8682 
8683         /* Remove any pending reservation reclaim requests for this device */
8684         sd_rmv_resv_reclaim_req(dev);
8685 
8686         mutex_enter(SD_MUTEX(un));







8687 
8688         /* Cancel any pending callbacks for SD_PATH_DIRECT_PRIORITY cmd. */
8689         if (un->un_direct_priority_timeid != NULL) {
8690                 timeout_id_t temp_id = un->un_direct_priority_timeid;
8691                 un->un_direct_priority_timeid = NULL;
8692                 mutex_exit(SD_MUTEX(un));
8693                 (void) untimeout(temp_id);
8694                 mutex_enter(SD_MUTEX(un));
8695         }
8696 
8697         /* Cancel any active multi-host disk watch thread requests */
8698         if (un->un_mhd_token != NULL) {
8699                 mutex_exit(SD_MUTEX(un));
8700                  _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::un_mhd_token));
8701                 if (scsi_watch_request_terminate(un->un_mhd_token,
8702                     SCSI_WATCH_TERMINATE_NOWAIT)) {
8703                         SD_ERROR(SD_LOG_ATTACH_DETACH, un,
8704                             "sd_dr_detach: Cannot cancel mhd watch request\n");
8705                         /*
8706                          * Note: We are returning here after having removed
8707                          * some driver timeouts above. This is consistent with
8708                          * the legacy implementation but perhaps the watch
8709                          * terminate call should be made with the wait flag set.
8710                          */
8711                         goto err_stillbusy;
8712                 }
8713                 mutex_enter(SD_MUTEX(un));
8714                 un->un_mhd_token = NULL;
8715         }
8716 
8717         if (un->un_swr_token != NULL) {
8718                 mutex_exit(SD_MUTEX(un));
8719                 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::un_swr_token));
8720                 if (scsi_watch_request_terminate(un->un_swr_token,
8721                     SCSI_WATCH_TERMINATE_NOWAIT)) {
8722                         SD_ERROR(SD_LOG_ATTACH_DETACH, un,
8723                             "sd_dr_detach: Cannot cancel swr watch request\n");
8724                         /*
8725                          * Note: We are returning here after having removed
8726                          * some driver timeouts above. This is consistent with
8727                          * the legacy implementation but perhaps the watch
8728                          * terminate call should be made with the wait flag set.
8729                          */
8730                         goto err_stillbusy;
8731                 }
8732                 mutex_enter(SD_MUTEX(un));
8733                 un->un_swr_token = NULL;
8734         }
8735 
8736         mutex_exit(SD_MUTEX(un));
8737 
8738         /*
8739          * Clear any scsi_reset_notifies. We clear the reset notifies
8740          * if we have not registered one.
8741          * Note: The sd_mhd_reset_notify_cb() fn tries to acquire SD_MUTEX!
8742          */

8743         (void) scsi_reset_notify(SD_ADDRESS(un), SCSI_RESET_CANCEL,
8744             sd_mhd_reset_notify_cb, (caddr_t)un);
8745 
8746         /*
8747          * protect the timeout pointers from getting nulled by
8748          * their callback functions during the cancellation process.
8749          * In such a scenario untimeout can be invoked with a null value.
8750          */
8751         _NOTE(NO_COMPETING_THREADS_NOW);
8752 
8753         mutex_enter(&un->un_pm_mutex);
8754         if (un->un_pm_idle_timeid != NULL) {
8755                 timeout_id_t temp_id = un->un_pm_idle_timeid;
8756                 un->un_pm_idle_timeid = NULL;
8757                 mutex_exit(&un->un_pm_mutex);
8758 
8759                 /*
8760                  * Timeout is active; cancel it.
8761                  * Note that it'll never be active on a device
8762                  * that does not support PM therefore we don't


8774          */
8775         if (un->un_pm_timeid != NULL) {
8776                 timeout_id_t temp_id = un->un_pm_timeid;
8777                 un->un_pm_timeid = NULL;
8778                 mutex_exit(&un->un_pm_mutex);
8779                 /*
8780                  * Timeout is active; cancel it.
8781                  * Note that it'll never be active on a device
8782                  * that does not support PM therefore we don't
8783                  * have to check before calling pm_idle_component.
8784                  */
8785                 (void) untimeout(temp_id);
8786                 (void) pm_idle_component(SD_DEVINFO(un), 0);
8787 
8788         } else {
8789                 mutex_exit(&un->un_pm_mutex);
8790                 if ((un->un_f_pm_is_enabled == TRUE) &&
8791                     (pm_lower_power(SD_DEVINFO(un), 0, SD_PM_STATE_STOPPED(un))
8792                     != DDI_SUCCESS)) {
8793                         SD_ERROR(SD_LOG_ATTACH_DETACH, un,
8794                     "sd_dr_detach: Lower power request failed, ignoring.\n");

8795                         /*
8796                          * Fix for bug: 4297749, item # 13
8797                          * The above test now includes a check to see if PM is
8798                          * supported by this device before call
8799                          * pm_lower_power().
8800                          * Note, the following is not dead code. The call to
8801                          * pm_lower_power above will generate a call back into
8802                          * our sdpower routine which might result in a timeout
8803                          * handler getting activated. Therefore the following
8804                          * code is valid and necessary.
8805                          */
8806                         mutex_enter(&un->un_pm_mutex);
8807                         if (un->un_pm_timeid != NULL) {
8808                                 timeout_id_t temp_id = un->un_pm_timeid;
8809                                 un->un_pm_timeid = NULL;
8810                                 mutex_exit(&un->un_pm_mutex);
8811                                 (void) untimeout(temp_id);
8812                                 (void) pm_idle_component(SD_DEVINFO(un), 0);
8813                         } else {
8814                                 mutex_exit(&un->un_pm_mutex);
8815                         }
8816                 }


8839         }
8840 
8841         if (un->un_f_is_fibre == FALSE) {
8842                 (void) scsi_ifsetcap(SD_ADDRESS(un), "auto-rqsense", 0, 1);
8843         }
8844 
8845         /*
8846          * Remove any event callbacks, fibre only
8847          */
8848         if (un->un_f_is_fibre == TRUE) {
8849                 if ((un->un_insert_event != NULL) &&
8850                     (ddi_remove_event_handler(un->un_insert_cb_id) !=
8851                     DDI_SUCCESS)) {
8852                         /*
8853                          * Note: We are returning here after having done
8854                          * substantial cleanup above. This is consistent
8855                          * with the legacy implementation but this may not
8856                          * be the right thing to do.
8857                          */
8858                         SD_ERROR(SD_LOG_ATTACH_DETACH, un,
8859                             "sd_dr_detach: Cannot cancel insert event\n");
8860                         goto err_remove_event;
8861                 }
8862                 un->un_insert_event = NULL;
8863 
8864                 if ((un->un_remove_event != NULL) &&
8865                     (ddi_remove_event_handler(un->un_remove_cb_id) !=
8866                     DDI_SUCCESS)) {
8867                         /*
8868                          * Note: We are returning here after having done
8869                          * substantial cleanup above. This is consistent
8870                          * with the legacy implementation but this may not
8871                          * be the right thing to do.
8872                          */
8873                         SD_ERROR(SD_LOG_ATTACH_DETACH, un,
8874                             "sd_dr_detach: Cannot cancel remove event\n");
8875                         goto err_remove_event;
8876                 }
8877                 un->un_remove_event = NULL;
8878         }
8879 
8880         /* Do not free the softstate if the callback routine is active */
8881         sd_sync_with_callback(un);
8882 
8883         cmlb_detach(un->un_cmlbhandle, (void *)SD_PATH_DIRECT);
8884         cmlb_free_handle(&un->un_cmlbhandle);
8885 
8886         /*
8887          * Hold the detach mutex here, to make sure that no other threads ever
8888          * can access a (partially) freed soft state structure.
8889          */
8890         mutex_enter(&sd_detach_mutex);
8891 
8892         /*
8893          * Clean up the soft state struct.
8894          * Cleanup is done in reverse order of allocs/inits.


8997          * check here because we've already checked during attach. No device
8998          * that is not parallel SCSI is in the chain.
8999          */
9000         if ((tgt >= 0) && (tgt < NTARGETS_WIDE)) {
9001                 sd_scsi_update_lun_on_target(pdip, tgt, SD_SCSI_LUN_DETACH);
9002         }
9003 
9004         return (DDI_SUCCESS);
9005 
9006 err_notclosed:
9007         mutex_exit(SD_MUTEX(un));
9008 
9009 err_stillbusy:
9010         _NOTE(NO_COMPETING_THREADS_NOW);
9011 
9012 err_remove_event:
9013         mutex_enter(&sd_detach_mutex);
9014         un->un_detach_count--;
9015         mutex_exit(&sd_detach_mutex);
9016 
9017         SD_TRACE(SD_LOG_ATTACH_DETACH, un, "sd_unit_detach: exit failure\n");
9018         return (DDI_FAILURE);
9019 }
9020 
9021 
9022 /*
9023  *    Function: sd_create_errstats
9024  *
9025  * Description: This routine instantiates the device error stats.
9026  *
9027  *              Note: During attach the stats are instantiated first so they are
9028  *              available for attach-time routines that utilize the driver
9029  *              iopath to send commands to the device. The stats are initialized
9030  *              separately so data obtained during some attach-time routines is
9031  *              available. (4362483)
9032  *
9033  *   Arguments: un - driver soft state (unit) structure
9034  *              instance - driver instance
9035  *
9036  *     Context: Kernel thread context
9037  */




8534  *    Function: sd_unit_detach
8535  *
8536  * Description: Performs DDI_DETACH processing for sddetach().
8537  *
8538  * Return Code: DDI_SUCCESS
8539  *              DDI_FAILURE
8540  *
8541  *     Context: Kernel thread context
8542  */
8543 
8544 static int
8545 sd_unit_detach(dev_info_t *devi)
8546 {
8547         struct scsi_device      *devp;
8548         struct sd_lun           *un;
8549         int                     i;
8550         int                     tgt;
8551         dev_t                   dev;
8552         dev_info_t              *pdip = ddi_get_parent(devi);
8553         int                     instance = ddi_get_instance(devi);
8554         int                     devigone = DEVI(devi)->devi_gone;
8555 
8556         mutex_enter(&sd_detach_mutex);
8557 
8558         /*
8559          * Fail the detach for any of the following:
8560          * - Unable to get the sd_lun struct for the instance

8561          * - Another thread is already detaching this instance
8562          * - Another thread is currently performing an open
8563          *
8564          * Additionaly, if "device gone" flag is not set:
8565          * - There are outstanding commands in driver
8566          * - There are outstanding commands in transport
8567          */
8568         devp = ddi_get_driver_private(devi);
8569         if (devp == NULL || (un = (struct sd_lun *)devp->sd_private) == NULL ||
8570             un->un_detach_count != 0 || un->un_opens_in_progress != 0 ||
8571             (!devigone && (un->un_ncmds_in_driver != 0 ||
8572             un->un_ncmds_in_transport != 0 ||
8573             un->un_state == SD_STATE_RWAIT))) {
8574                 mutex_exit(&sd_detach_mutex);
8575                 return (DDI_FAILURE);
8576         }
8577 
8578         SD_TRACE(SD_LOG_ATTACH_DETACH, un, "%s: entry 0x%p\n", __func__, un);
8579 
8580         /*
8581          * Mark this instance as currently in a detach, to inhibit any
8582          * opens from a layered driver.
8583          */
8584         un->un_detach_count++;
8585         mutex_exit(&sd_detach_mutex);
8586 
8587         tgt = ddi_prop_get_int(DDI_DEV_T_ANY, devi, DDI_PROP_DONTPASS,
8588             SCSI_ADDR_PROP_TARGET, -1);
8589 
8590         dev = sd_make_device(SD_DEVINFO(un));
8591 




8592         mutex_enter(SD_MUTEX(un));
8593 
8594         /*
8595          * Fail the detach if there are any outstanding layered
8596          * opens on this device.
8597          */
8598         for (i = 0; i < NDKMAP; i++) {
8599                 if (un->un_ocmap.lyropen[i] != 0) {
8600                         goto err_notclosed;
8601                 }
8602         }
8603 
8604         /*















8605          * If we have the device reserved, release the reservation.
8606          */
8607         if (!devigone &&
8608             (un->un_resvd_status & SD_RESERVE) &&
8609             !(un->un_resvd_status & SD_LOST_RESERVE)) {
8610                 mutex_exit(SD_MUTEX(un));
8611                 /*
8612                  * Note: sd_reserve_release sends a command to the device
8613                  * via the sd_ioctlcmd() path, and can sleep.
8614                  */
8615                 if (sd_reserve_release(dev, SD_RELEASE) != 0) {
8616                         SD_ERROR(SD_LOG_ATTACH_DETACH, un,
8617                             "%s: cannot release reservation\n", __func__);
8618                 }
8619         } else {
8620                 mutex_exit(SD_MUTEX(un));
8621         }
8622 
8623         /*
8624          * Untimeout any reserve recover, throttle reset, restart unit
8625          * and delayed broadcast timeout threads. Protect the timeout pointer
8626          * from getting nulled by their callback functions.
8627          */
8628         mutex_enter(SD_MUTEX(un));
8629         if (un->un_resvd_timeid != NULL) {
8630                 timeout_id_t temp_id = un->un_resvd_timeid;
8631                 un->un_resvd_timeid = NULL;
8632                 mutex_exit(SD_MUTEX(un));
8633                 (void) untimeout(temp_id);
8634                 mutex_enter(SD_MUTEX(un));
8635         }
8636 
8637         if (un->un_reset_throttle_timeid != NULL) {


8654                 timeout_id_t temp_id = un->un_rmw_msg_timeid;
8655                 un->un_rmw_msg_timeid = NULL;
8656                 mutex_exit(SD_MUTEX(un));
8657                 (void) untimeout(temp_id);
8658                 mutex_enter(SD_MUTEX(un));
8659         }
8660 
8661         if (un->un_dcvb_timeid != NULL) {
8662                 timeout_id_t temp_id = un->un_dcvb_timeid;
8663                 un->un_dcvb_timeid = NULL;
8664                 mutex_exit(SD_MUTEX(un));
8665                 (void) untimeout(temp_id);
8666         } else {
8667                 mutex_exit(SD_MUTEX(un));
8668         }
8669 
8670         /* Remove any pending reservation reclaim requests for this device */
8671         sd_rmv_resv_reclaim_req(dev);
8672 
8673         mutex_enter(SD_MUTEX(un));
8674         if (un->un_retry_timeid != NULL) {
8675                 timeout_id_t temp_id = un->un_retry_timeid;
8676                 un->un_retry_timeid = NULL;
8677                 mutex_exit(SD_MUTEX(un));
8678                 (void) untimeout(temp_id);
8679                 mutex_enter(SD_MUTEX(un));
8680         }
8681 
8682         /* Cancel any pending callbacks for SD_PATH_DIRECT_PRIORITY cmd. */
8683         if (un->un_direct_priority_timeid != NULL) {
8684                 timeout_id_t temp_id = un->un_direct_priority_timeid;
8685                 un->un_direct_priority_timeid = NULL;
8686                 mutex_exit(SD_MUTEX(un));
8687                 (void) untimeout(temp_id);
8688                 mutex_enter(SD_MUTEX(un));
8689         }
8690 
8691         /* Cancel any active multi-host disk watch thread requests */
8692         if (un->un_mhd_token != NULL) {
8693                 mutex_exit(SD_MUTEX(un));
8694                  _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::un_mhd_token));
8695                 if (scsi_watch_request_terminate(un->un_mhd_token,
8696                     SCSI_WATCH_TERMINATE_NOWAIT)) {
8697                         SD_ERROR(SD_LOG_ATTACH_DETACH, un,
8698                             "%s: cannot cancel mhd watch request\n", __func__);
8699                         /*
8700                          * Note: We are returning here after having removed
8701                          * some driver timeouts above. This is consistent with
8702                          * the legacy implementation but perhaps the watch
8703                          * terminate call should be made with the wait flag set.
8704                          */
8705                         goto err_stillbusy;
8706                 }
8707                 mutex_enter(SD_MUTEX(un));
8708                 un->un_mhd_token = NULL;
8709         }
8710 
8711         if (un->un_swr_token != NULL) {
8712                 mutex_exit(SD_MUTEX(un));
8713                 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::un_swr_token));
8714                 if (scsi_watch_request_terminate(un->un_swr_token,
8715                     SCSI_WATCH_TERMINATE_NOWAIT)) {
8716                         SD_ERROR(SD_LOG_ATTACH_DETACH, un,
8717                             "%s: cannot cancel swr watch request\n", __func__);
8718                         /*
8719                          * Note: We are returning here after having removed
8720                          * some driver timeouts above. This is consistent with
8721                          * the legacy implementation but perhaps the watch
8722                          * terminate call should be made with the wait flag set.
8723                          */
8724                         goto err_stillbusy;
8725                 }
8726                 mutex_enter(SD_MUTEX(un));
8727                 un->un_swr_token = NULL;
8728         }
8729 


8730         /*
8731          * Clear any scsi_reset_notifies. We clear the reset notifies
8732          * if we have not registered one.
8733          * Note: The sd_mhd_reset_notify_cb() fn tries to acquire SD_MUTEX!
8734          */
8735         mutex_exit(SD_MUTEX(un));
8736         (void) scsi_reset_notify(SD_ADDRESS(un), SCSI_RESET_CANCEL,
8737             sd_mhd_reset_notify_cb, (caddr_t)un);
8738 
8739         /*
8740          * protect the timeout pointers from getting nulled by
8741          * their callback functions during the cancellation process.
8742          * In such a scenario untimeout can be invoked with a null value.
8743          */
8744         _NOTE(NO_COMPETING_THREADS_NOW);
8745 
8746         mutex_enter(&un->un_pm_mutex);
8747         if (un->un_pm_idle_timeid != NULL) {
8748                 timeout_id_t temp_id = un->un_pm_idle_timeid;
8749                 un->un_pm_idle_timeid = NULL;
8750                 mutex_exit(&un->un_pm_mutex);
8751 
8752                 /*
8753                  * Timeout is active; cancel it.
8754                  * Note that it'll never be active on a device
8755                  * that does not support PM therefore we don't


8767          */
8768         if (un->un_pm_timeid != NULL) {
8769                 timeout_id_t temp_id = un->un_pm_timeid;
8770                 un->un_pm_timeid = NULL;
8771                 mutex_exit(&un->un_pm_mutex);
8772                 /*
8773                  * Timeout is active; cancel it.
8774                  * Note that it'll never be active on a device
8775                  * that does not support PM therefore we don't
8776                  * have to check before calling pm_idle_component.
8777                  */
8778                 (void) untimeout(temp_id);
8779                 (void) pm_idle_component(SD_DEVINFO(un), 0);
8780 
8781         } else {
8782                 mutex_exit(&un->un_pm_mutex);
8783                 if ((un->un_f_pm_is_enabled == TRUE) &&
8784                     (pm_lower_power(SD_DEVINFO(un), 0, SD_PM_STATE_STOPPED(un))
8785                     != DDI_SUCCESS)) {
8786                         SD_ERROR(SD_LOG_ATTACH_DETACH, un,
8787                             "%s: lower power request failed, ignoring\n",
8788                             __func__);
8789                         /*

8790                          * The above test now includes a check to see if PM is
8791                          * supported by this device before call
8792                          * pm_lower_power().
8793                          * Note, the following is not dead code. The call to
8794                          * pm_lower_power above will generate a call back into
8795                          * our sdpower routine which might result in a timeout
8796                          * handler getting activated. Therefore the following
8797                          * code is valid and necessary.
8798                          */
8799                         mutex_enter(&un->un_pm_mutex);
8800                         if (un->un_pm_timeid != NULL) {
8801                                 timeout_id_t temp_id = un->un_pm_timeid;
8802                                 un->un_pm_timeid = NULL;
8803                                 mutex_exit(&un->un_pm_mutex);
8804                                 (void) untimeout(temp_id);
8805                                 (void) pm_idle_component(SD_DEVINFO(un), 0);
8806                         } else {
8807                                 mutex_exit(&un->un_pm_mutex);
8808                         }
8809                 }


8832         }
8833 
8834         if (un->un_f_is_fibre == FALSE) {
8835                 (void) scsi_ifsetcap(SD_ADDRESS(un), "auto-rqsense", 0, 1);
8836         }
8837 
8838         /*
8839          * Remove any event callbacks, fibre only
8840          */
8841         if (un->un_f_is_fibre == TRUE) {
8842                 if ((un->un_insert_event != NULL) &&
8843                     (ddi_remove_event_handler(un->un_insert_cb_id) !=
8844                     DDI_SUCCESS)) {
8845                         /*
8846                          * Note: We are returning here after having done
8847                          * substantial cleanup above. This is consistent
8848                          * with the legacy implementation but this may not
8849                          * be the right thing to do.
8850                          */
8851                         SD_ERROR(SD_LOG_ATTACH_DETACH, un,
8852                             "%s: cannot cancel insert event\n", __func__);
8853                         goto err_remove_event;
8854                 }
8855                 un->un_insert_event = NULL;
8856 
8857                 if ((un->un_remove_event != NULL) &&
8858                     (ddi_remove_event_handler(un->un_remove_cb_id) !=
8859                     DDI_SUCCESS)) {
8860                         /*
8861                          * Note: We are returning here after having done
8862                          * substantial cleanup above. This is consistent
8863                          * with the legacy implementation but this may not
8864                          * be the right thing to do.
8865                          */
8866                         SD_ERROR(SD_LOG_ATTACH_DETACH, un,
8867                             "%s: cannot cancel remove event\n", __func__);
8868                         goto err_remove_event;
8869                 }
8870                 un->un_remove_event = NULL;
8871         }
8872 
8873         /* Do not free the softstate if the callback routine is active */
8874         sd_sync_with_callback(un);
8875 
8876         cmlb_detach(un->un_cmlbhandle, (void *)SD_PATH_DIRECT);
8877         cmlb_free_handle(&un->un_cmlbhandle);
8878 
8879         /*
8880          * Hold the detach mutex here, to make sure that no other threads ever
8881          * can access a (partially) freed soft state structure.
8882          */
8883         mutex_enter(&sd_detach_mutex);
8884 
8885         /*
8886          * Clean up the soft state struct.
8887          * Cleanup is done in reverse order of allocs/inits.


8990          * check here because we've already checked during attach. No device
8991          * that is not parallel SCSI is in the chain.
8992          */
8993         if ((tgt >= 0) && (tgt < NTARGETS_WIDE)) {
8994                 sd_scsi_update_lun_on_target(pdip, tgt, SD_SCSI_LUN_DETACH);
8995         }
8996 
8997         return (DDI_SUCCESS);
8998 
8999 err_notclosed:
9000         mutex_exit(SD_MUTEX(un));
9001 
9002 err_stillbusy:
9003         _NOTE(NO_COMPETING_THREADS_NOW);
9004 
9005 err_remove_event:
9006         mutex_enter(&sd_detach_mutex);
9007         un->un_detach_count--;
9008         mutex_exit(&sd_detach_mutex);
9009 
9010         SD_TRACE(SD_LOG_ATTACH_DETACH, un, "%s: exit failure\n", __func__);
9011         return (DDI_FAILURE);
9012 }
9013 
9014 
9015 /*
9016  *    Function: sd_create_errstats
9017  *
9018  * Description: This routine instantiates the device error stats.
9019  *
9020  *              Note: During attach the stats are instantiated first so they are
9021  *              available for attach-time routines that utilize the driver
9022  *              iopath to send commands to the device. The stats are initialized
9023  *              separately so data obtained during some attach-time routines is
9024  *              available. (4362483)
9025  *
9026  *   Arguments: un - driver soft state (unit) structure
9027  *              instance - driver instance
9028  *
9029  *     Context: Kernel thread context
9030  */