Print this page
First pass at 4310


1046 /*
1047  * Notes:
1048  *      Set up all device state and allocate data structures,
1049  *      mutexes, condition variables, etc. for device operation.
1050  *      Add interrupts needed.
1051  *      Return DDI_SUCCESS if device is ready, else return DDI_FAILURE.
1052  */
1053 static int
1054 mptsas_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
1055 {
1056         mptsas_t                *mpt = NULL;
1057         int                     instance, i, j;
1058         int                     doneq_thread_num;
1059         char                    intr_added = 0;
1060         char                    map_setup = 0;
1061         char                    config_setup = 0;
1062         char                    hba_attach_setup = 0;
1063         char                    smp_attach_setup = 0;
1064         char                    mutex_init_done = 0;
1065         char                    event_taskq_create = 0;

1066         char                    dr_taskq_create = 0;
1067         char                    doneq_thread_create = 0;
1068         char                    added_watchdog = 0;
1069         scsi_hba_tran_t         *hba_tran;
1070         uint_t                  mem_bar = MEM_SPACE;
1071         int                     rval = DDI_FAILURE;
1072 
1073         /* CONSTCOND */
1074         ASSERT(NO_COMPETING_THREADS);
1075 
1076         if (scsi_hba_iport_unit_address(dip)) {
1077                 return (mptsas_iport_attach(dip, cmd));
1078         }
1079 
1080         switch (cmd) {
1081         case DDI_ATTACH:
1082                 break;
1083 
1084         case DDI_RESUME:
1085                 if ((hba_tran = ddi_get_driver_private(dip)) == NULL)


1252          */
1253         if ((mpt->m_event_taskq = ddi_taskq_create(dip, "mptsas_event_taskq",
1254             1, TASKQ_DEFAULTPRI, 0)) == NULL) {
1255                 mptsas_log(mpt, CE_NOTE, "ddi_taskq_create failed");
1256                 goto fail;
1257         }
1258         event_taskq_create++;
1259 
1260         /*
1261          * A taskq is created for dealing with dr events
1262          */
1263         if ((mpt->m_dr_taskq = ddi_taskq_create(dip,
1264             "mptsas_dr_taskq",
1265             1, TASKQ_DEFAULTPRI, 0)) == NULL) {
1266                 mptsas_log(mpt, CE_NOTE, "ddi_taskq_create for discovery "
1267                     "failed");
1268                 goto fail;
1269         }
1270         dr_taskq_create++;
1271 












1272         mpt->m_doneq_thread_threshold = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
1273             0, "mptsas_doneq_thread_threshold_prop", 10);
1274         mpt->m_doneq_length_threshold = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
1275             0, "mptsas_doneq_length_threshold_prop", 8);
1276         mpt->m_doneq_thread_n = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
1277             0, "mptsas_doneq_thread_n_prop", 8);
1278 
1279         if (mpt->m_doneq_thread_n) {
1280                 cv_init(&mpt->m_doneq_thread_cv, NULL, CV_DRIVER, NULL);
1281                 mutex_init(&mpt->m_doneq_mutex, NULL, MUTEX_DRIVER, NULL);
1282 
1283                 mutex_enter(&mpt->m_doneq_mutex);
1284                 mpt->m_doneq_thread_id =
1285                     kmem_zalloc(sizeof (mptsas_doneq_thread_list_t)
1286                     * mpt->m_doneq_thread_n, KM_SLEEP);
1287 
1288                 for (j = 0; j < mpt->m_doneq_thread_n; j++) {
1289                         cv_init(&mpt->m_doneq_thread_id[j].cv, NULL,
1290                             CV_DRIVER, NULL);
1291                         mutex_init(&mpt->m_doneq_thread_id[j].mutex, NULL,


1576                                 cv_wait(&mpt->m_doneq_thread_cv,
1577                                     &mpt->m_doneq_mutex);
1578                         }
1579                         for (j = 0; j < doneq_thread_num; j++) {
1580                                 cv_destroy(&mpt->m_doneq_thread_id[j].cv);
1581                                 mutex_destroy(&mpt->m_doneq_thread_id[j].mutex);
1582                         }
1583                         kmem_free(mpt->m_doneq_thread_id,
1584                             sizeof (mptsas_doneq_thread_list_t)
1585                             * doneq_thread_num);
1586                         mutex_exit(&mpt->m_doneq_mutex);
1587                         cv_destroy(&mpt->m_doneq_thread_cv);
1588                         mutex_destroy(&mpt->m_doneq_mutex);
1589                 }
1590                 if (event_taskq_create) {
1591                         ddi_taskq_destroy(mpt->m_event_taskq);
1592                 }
1593                 if (dr_taskq_create) {
1594                         ddi_taskq_destroy(mpt->m_dr_taskq);
1595                 }



1596                 if (mutex_init_done) {
1597                         mutex_destroy(&mpt->m_tx_waitq_mutex);
1598                         mutex_destroy(&mpt->m_passthru_mutex);
1599                         mutex_destroy(&mpt->m_mutex);
1600                         for (i = 0; i < MPTSAS_MAX_PHYS; i++) {
1601                                 mutex_destroy(
1602                                     &mpt->m_phy_info[i].smhba_info.phy_mutex);
1603                         }
1604                         cv_destroy(&mpt->m_cv);
1605                         cv_destroy(&mpt->m_passthru_cv);
1606                         cv_destroy(&mpt->m_fw_cv);
1607                         cv_destroy(&mpt->m_config_cv);
1608                         cv_destroy(&mpt->m_fw_diag_cv);
1609                 }
1610 
1611                 if (map_setup) {
1612                         mptsas_cfg_fini(mpt);
1613                 }
1614                 if (config_setup) {
1615                         mptsas_config_space_fini(mpt);


1708 
1709         /*
1710          * If this mpt is not in full power(PM_LEVEL_D0), just return.
1711          */
1712         if ((mpt->m_options & MPTSAS_OPT_PM) &&
1713             (mpt->m_power_level != PM_LEVEL_D0)) {
1714                 mutex_exit(&mpt->m_mutex);
1715                 return (DDI_SUCCESS);
1716         }
1717 
1718         /* Disable HBA interrupts in hardware */
1719         MPTSAS_DISABLE_INTR(mpt);
1720         /*
1721          * Send RAID action system shutdown to sync IR
1722          */
1723         mptsas_raid_action_system_shutdown(mpt);
1724 
1725         mutex_exit(&mpt->m_mutex);
1726 
1727         /* drain the taskq */

1728         ddi_taskq_wait(mpt->m_event_taskq);
1729         ddi_taskq_wait(mpt->m_dr_taskq);
1730 
1731         return (DDI_SUCCESS);
1732 }
1733 
1734 #ifdef  __sparc
1735 /*ARGSUSED*/
1736 static int
1737 mptsas_reset(dev_info_t *devi, ddi_reset_cmd_t cmd)
1738 {
1739         mptsas_t        *mpt;
1740         scsi_hba_tran_t *tran;
1741 
1742         /*
1743          * If this call is for iport, just return.
1744          */
1745         if (scsi_hba_iport_unit_address(devi))
1746                 return (DDI_SUCCESS);
1747 


1883                                     "mptsas%d: Raise power request failed.",
1884                                     mpt->m_instance);
1885                                 (void) pm_idle_component(dip, 0);
1886                                 return (DDI_FAILURE);
1887                         }
1888                 }
1889         }
1890 
1891         /*
1892          * Send RAID action system shutdown to sync IR.  After action, send a
1893          * Message Unit Reset. Since after that DMA resource will be freed,
1894          * set ioc to READY state will avoid HBA initiated DMA operation.
1895          */
1896         mutex_enter(&mpt->m_mutex);
1897         MPTSAS_DISABLE_INTR(mpt);
1898         mptsas_raid_action_system_shutdown(mpt);
1899         mpt->m_softstate |= MPTSAS_SS_MSG_UNIT_RESET;
1900         (void) mptsas_ioc_reset(mpt, FALSE);
1901         mutex_exit(&mpt->m_mutex);
1902         mptsas_rem_intrs(mpt);

1903         ddi_taskq_destroy(mpt->m_event_taskq);
1904         ddi_taskq_destroy(mpt->m_dr_taskq);
1905 
1906         if (mpt->m_doneq_thread_n) {
1907                 mutex_enter(&mpt->m_doneq_mutex);
1908                 doneq_thread_num = mpt->m_doneq_thread_n;
1909                 for (i = 0; i < mpt->m_doneq_thread_n; i++) {
1910                         mutex_enter(&mpt->m_doneq_thread_id[i].mutex);
1911                         mpt->m_doneq_thread_id[i].flag &=
1912                             (~MPTSAS_DONEQ_THREAD_ACTIVE);
1913                         cv_signal(&mpt->m_doneq_thread_id[i].cv);
1914                         mutex_exit(&mpt->m_doneq_thread_id[i].mutex);
1915                 }
1916                 while (mpt->m_doneq_thread_n) {
1917                         cv_wait(&mpt->m_doneq_thread_cv,
1918                             &mpt->m_doneq_mutex);
1919                 }
1920                 for (i = 0;  i < doneq_thread_num; i++) {
1921                         cv_destroy(&mpt->m_doneq_thread_id[i].cv);
1922                         mutex_destroy(&mpt->m_doneq_thread_id[i].mutex);


12720                             (mptsas_event_report_t *)data, mode, rval);
12721                         break;
12722                 case MPTIOCTL_REG_ACCESS:
12723                         /*
12724                          * The user has requested register access.  Call our
12725                          * routine which does this.
12726                          */
12727                         status = mptsas_reg_access(mpt,
12728                             (mptsas_reg_access_t *)data, mode);
12729                         break;
12730                 default:
12731                         status = scsi_hba_ioctl(dev, cmd, data, mode, credp,
12732                             rval);
12733                         break;
12734         }
12735 
12736 out:
12737         return (status);
12738 }
12739 






12740 int
12741 mptsas_restart_ioc(mptsas_t *mpt)
12742 {
12743         int             rval = DDI_SUCCESS;
12744         mptsas_target_t *ptgt = NULL;
12745 
12746         ASSERT(mutex_owned(&mpt->m_mutex));
12747 
12748         /*
12749          * Set a flag telling I/O path that we're processing a reset.  This is
12750          * needed because after the reset is complete, the hash table still
12751          * needs to be rebuilt.  If I/Os are started before the hash table is
12752          * rebuilt, I/O errors will occur.  This flag allows I/Os to be marked
12753          * so that they can be retried.
12754          */
12755         mpt->m_in_reset = TRUE;
12756 
12757         /*
12758          * Set all throttles to HOLD
12759          */




1046 /*
1047  * Notes:
1048  *      Set up all device state and allocate data structures,
1049  *      mutexes, condition variables, etc. for device operation.
1050  *      Add interrupts needed.
1051  *      Return DDI_SUCCESS if device is ready, else return DDI_FAILURE.
1052  */
1053 static int
1054 mptsas_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
1055 {
1056         mptsas_t                *mpt = NULL;
1057         int                     instance, i, j;
1058         int                     doneq_thread_num;
1059         char                    intr_added = 0;
1060         char                    map_setup = 0;
1061         char                    config_setup = 0;
1062         char                    hba_attach_setup = 0;
1063         char                    smp_attach_setup = 0;
1064         char                    mutex_init_done = 0;
1065         char                    event_taskq_create = 0;
1066         char                    reset_taskq_create = 0;
1067         char                    dr_taskq_create = 0;
1068         char                    doneq_thread_create = 0;
1069         char                    added_watchdog = 0;
1070         scsi_hba_tran_t         *hba_tran;
1071         uint_t                  mem_bar = MEM_SPACE;
1072         int                     rval = DDI_FAILURE;
1073 
1074         /* CONSTCOND */
1075         ASSERT(NO_COMPETING_THREADS);
1076 
1077         if (scsi_hba_iport_unit_address(dip)) {
1078                 return (mptsas_iport_attach(dip, cmd));
1079         }
1080 
1081         switch (cmd) {
1082         case DDI_ATTACH:
1083                 break;
1084 
1085         case DDI_RESUME:
1086                 if ((hba_tran = ddi_get_driver_private(dip)) == NULL)


1253          */
1254         if ((mpt->m_event_taskq = ddi_taskq_create(dip, "mptsas_event_taskq",
1255             1, TASKQ_DEFAULTPRI, 0)) == NULL) {
1256                 mptsas_log(mpt, CE_NOTE, "ddi_taskq_create failed");
1257                 goto fail;
1258         }
1259         event_taskq_create++;
1260 
1261         /*
1262          * A taskq is created for dealing with dr events
1263          */
1264         if ((mpt->m_dr_taskq = ddi_taskq_create(dip,
1265             "mptsas_dr_taskq",
1266             1, TASKQ_DEFAULTPRI, 0)) == NULL) {
1267                 mptsas_log(mpt, CE_NOTE, "ddi_taskq_create for discovery "
1268                     "failed");
1269                 goto fail;
1270         }
1271         dr_taskq_create++;
1272 
1273         /*
1274          * A taskq is created for dealing with reset events
1275          */
1276         if ((mpt->m_reset_taskq = ddi_taskq_create(dip,
1277             "mptsas_reset_taskq",
1278             1, TASKQ_DEFAULTPRI, 0)) == NULL) {
1279                 mptsas_log(mpt, CE_NOTE, "ddi_taskq_create for reset "
1280                     "failed");
1281                 goto fail;
1282         }
1283         reset_taskq_create++;
1284 
1285         mpt->m_doneq_thread_threshold = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
1286             0, "mptsas_doneq_thread_threshold_prop", 10);
1287         mpt->m_doneq_length_threshold = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
1288             0, "mptsas_doneq_length_threshold_prop", 8);
1289         mpt->m_doneq_thread_n = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
1290             0, "mptsas_doneq_thread_n_prop", 8);
1291 
1292         if (mpt->m_doneq_thread_n) {
1293                 cv_init(&mpt->m_doneq_thread_cv, NULL, CV_DRIVER, NULL);
1294                 mutex_init(&mpt->m_doneq_mutex, NULL, MUTEX_DRIVER, NULL);
1295 
1296                 mutex_enter(&mpt->m_doneq_mutex);
1297                 mpt->m_doneq_thread_id =
1298                     kmem_zalloc(sizeof (mptsas_doneq_thread_list_t)
1299                     * mpt->m_doneq_thread_n, KM_SLEEP);
1300 
1301                 for (j = 0; j < mpt->m_doneq_thread_n; j++) {
1302                         cv_init(&mpt->m_doneq_thread_id[j].cv, NULL,
1303                             CV_DRIVER, NULL);
1304                         mutex_init(&mpt->m_doneq_thread_id[j].mutex, NULL,


1589                                 cv_wait(&mpt->m_doneq_thread_cv,
1590                                     &mpt->m_doneq_mutex);
1591                         }
1592                         for (j = 0; j < doneq_thread_num; j++) {
1593                                 cv_destroy(&mpt->m_doneq_thread_id[j].cv);
1594                                 mutex_destroy(&mpt->m_doneq_thread_id[j].mutex);
1595                         }
1596                         kmem_free(mpt->m_doneq_thread_id,
1597                             sizeof (mptsas_doneq_thread_list_t)
1598                             * doneq_thread_num);
1599                         mutex_exit(&mpt->m_doneq_mutex);
1600                         cv_destroy(&mpt->m_doneq_thread_cv);
1601                         mutex_destroy(&mpt->m_doneq_mutex);
1602                 }
1603                 if (event_taskq_create) {
1604                         ddi_taskq_destroy(mpt->m_event_taskq);
1605                 }
1606                 if (dr_taskq_create) {
1607                         ddi_taskq_destroy(mpt->m_dr_taskq);
1608                 }
1609                 if (reset_taskq_create) {
1610                         ddi_taskq_destroy(mpt->m_reset_taskq);
1611                 }
1612                 if (mutex_init_done) {
1613                         mutex_destroy(&mpt->m_tx_waitq_mutex);
1614                         mutex_destroy(&mpt->m_passthru_mutex);
1615                         mutex_destroy(&mpt->m_mutex);
1616                         for (i = 0; i < MPTSAS_MAX_PHYS; i++) {
1617                                 mutex_destroy(
1618                                     &mpt->m_phy_info[i].smhba_info.phy_mutex);
1619                         }
1620                         cv_destroy(&mpt->m_cv);
1621                         cv_destroy(&mpt->m_passthru_cv);
1622                         cv_destroy(&mpt->m_fw_cv);
1623                         cv_destroy(&mpt->m_config_cv);
1624                         cv_destroy(&mpt->m_fw_diag_cv);
1625                 }
1626 
1627                 if (map_setup) {
1628                         mptsas_cfg_fini(mpt);
1629                 }
1630                 if (config_setup) {
1631                         mptsas_config_space_fini(mpt);


1724 
1725         /*
1726          * If this mpt is not in full power(PM_LEVEL_D0), just return.
1727          */
1728         if ((mpt->m_options & MPTSAS_OPT_PM) &&
1729             (mpt->m_power_level != PM_LEVEL_D0)) {
1730                 mutex_exit(&mpt->m_mutex);
1731                 return (DDI_SUCCESS);
1732         }
1733 
1734         /* Disable HBA interrupts in hardware */
1735         MPTSAS_DISABLE_INTR(mpt);
1736         /*
1737          * Send RAID action system shutdown to sync IR
1738          */
1739         mptsas_raid_action_system_shutdown(mpt);
1740 
1741         mutex_exit(&mpt->m_mutex);
1742 
1743         /* drain the taskq */
1744         ddi_taskq_wait(mpt->m_reset_taskq);
1745         ddi_taskq_wait(mpt->m_event_taskq);
1746         ddi_taskq_wait(mpt->m_dr_taskq);
1747 
1748         return (DDI_SUCCESS);
1749 }
1750 
1751 #ifdef  __sparc
1752 /*ARGSUSED*/
1753 static int
1754 mptsas_reset(dev_info_t *devi, ddi_reset_cmd_t cmd)
1755 {
1756         mptsas_t        *mpt;
1757         scsi_hba_tran_t *tran;
1758 
1759         /*
1760          * If this call is for iport, just return.
1761          */
1762         if (scsi_hba_iport_unit_address(devi))
1763                 return (DDI_SUCCESS);
1764 


1900                                     "mptsas%d: Raise power request failed.",
1901                                     mpt->m_instance);
1902                                 (void) pm_idle_component(dip, 0);
1903                                 return (DDI_FAILURE);
1904                         }
1905                 }
1906         }
1907 
1908         /*
1909          * Send RAID action system shutdown to sync IR.  After action, send a
1910          * Message Unit Reset. Since after that DMA resource will be freed,
1911          * set ioc to READY state will avoid HBA initiated DMA operation.
1912          */
1913         mutex_enter(&mpt->m_mutex);
1914         MPTSAS_DISABLE_INTR(mpt);
1915         mptsas_raid_action_system_shutdown(mpt);
1916         mpt->m_softstate |= MPTSAS_SS_MSG_UNIT_RESET;
1917         (void) mptsas_ioc_reset(mpt, FALSE);
1918         mutex_exit(&mpt->m_mutex);
1919         mptsas_rem_intrs(mpt);
1920         ddi_taskq_destroy(mpt->m_reset_taskq);
1921         ddi_taskq_destroy(mpt->m_event_taskq);
1922         ddi_taskq_destroy(mpt->m_dr_taskq);
1923 
1924         if (mpt->m_doneq_thread_n) {
1925                 mutex_enter(&mpt->m_doneq_mutex);
1926                 doneq_thread_num = mpt->m_doneq_thread_n;
1927                 for (i = 0; i < mpt->m_doneq_thread_n; i++) {
1928                         mutex_enter(&mpt->m_doneq_thread_id[i].mutex);
1929                         mpt->m_doneq_thread_id[i].flag &=
1930                             (~MPTSAS_DONEQ_THREAD_ACTIVE);
1931                         cv_signal(&mpt->m_doneq_thread_id[i].cv);
1932                         mutex_exit(&mpt->m_doneq_thread_id[i].mutex);
1933                 }
1934                 while (mpt->m_doneq_thread_n) {
1935                         cv_wait(&mpt->m_doneq_thread_cv,
1936                             &mpt->m_doneq_mutex);
1937                 }
1938                 for (i = 0;  i < doneq_thread_num; i++) {
1939                         cv_destroy(&mpt->m_doneq_thread_id[i].cv);
1940                         mutex_destroy(&mpt->m_doneq_thread_id[i].mutex);


12738                             (mptsas_event_report_t *)data, mode, rval);
12739                         break;
12740                 case MPTIOCTL_REG_ACCESS:
12741                         /*
12742                          * The user has requested register access.  Call our
12743                          * routine which does this.
12744                          */
12745                         status = mptsas_reg_access(mpt,
12746                             (mptsas_reg_access_t *)data, mode);
12747                         break;
12748                 default:
12749                         status = scsi_hba_ioctl(dev, cmd, data, mode, credp,
12750                             rval);
12751                         break;
12752         }
12753 
12754 out:
12755         return (status);
12756 }
12757 
12758 /* Dirty wrapper for taskq */
12759 void
12760 mptsas_handle_restart_ioc(void *mpt) {
12761     mptsas_restart_ioc((mptsas_t *) mpt);
12762 }
12763 
12764 int
12765 mptsas_restart_ioc(mptsas_t *mpt)
12766 {
12767         int             rval = DDI_SUCCESS;
12768         mptsas_target_t *ptgt = NULL;
12769 
12770         ASSERT(mutex_owned(&mpt->m_mutex));
12771 
12772         /*
12773          * Set a flag telling I/O path that we're processing a reset.  This is
12774          * needed because after the reset is complete, the hash table still
12775          * needs to be rebuilt.  If I/Os are started before the hash table is
12776          * rebuilt, I/O errors will occur.  This flag allows I/Os to be marked
12777          * so that they can be retried.
12778          */
12779         mpt->m_in_reset = TRUE;
12780 
12781         /*
12782          * Set all throttles to HOLD
12783          */