Print this page
XXXX Nexenta fixes for mpt_sas(7d)


  50  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
  51  * DAMAGE.
  52  */
  53 
  54 /*
  55  * mptsas - This is a driver based on LSI Logic's MPT2.0 interface.
  56  *
  57  */
  58 
  59 #if defined(lint) || defined(DEBUG)
  60 #define MPTSAS_DEBUG
  61 #endif
  62 
  63 /*
  64  * standard header files.
  65  */
  66 #include <sys/note.h>
  67 #include <sys/scsi/scsi.h>
  68 #include <sys/pci.h>
  69 #include <sys/file.h>
  70 #include <sys/cpuvar.h>
  71 #include <sys/policy.h>
  72 #include <sys/sysevent.h>
  73 #include <sys/sysevent/eventdefs.h>
  74 #include <sys/sysevent/dr.h>
  75 #include <sys/sata/sata_defs.h>
  76 #include <sys/scsi/generic/sas.h>
  77 #include <sys/scsi/impl/scsi_sas.h>
  78 
  79 #pragma pack(1)
  80 #include <sys/scsi/adapters/mpt_sas/mpi/mpi2_type.h>
  81 #include <sys/scsi/adapters/mpt_sas/mpi/mpi2.h>
  82 #include <sys/scsi/adapters/mpt_sas/mpi/mpi2_cnfg.h>
  83 #include <sys/scsi/adapters/mpt_sas/mpi/mpi2_init.h>
  84 #include <sys/scsi/adapters/mpt_sas/mpi/mpi2_ioc.h>
  85 #include <sys/scsi/adapters/mpt_sas/mpi/mpi2_sas.h>
  86 #include <sys/scsi/adapters/mpt_sas/mpi/mpi2_tool.h>
  87 #include <sys/scsi/adapters/mpt_sas/mpi/mpi2_raid.h>
  88 #pragma pack()
  89 
  90 /*
  91  * private header files.
  92  *
  93  */
  94 #include <sys/scsi/impl/scsi_reset_notify.h>
  95 #include <sys/scsi/adapters/mpt_sas/mptsas_var.h>
  96 #include <sys/scsi/adapters/mpt_sas/mptsas_ioctl.h>
  97 #include <sys/scsi/adapters/mpt_sas/mptsas_smhba.h>
  98 
  99 #include <sys/raidioctl.h>
 100 
 101 #include <sys/fs/dv_node.h>       /* devfs_clean */
 102 
 103 /*
 104  * FMA header files
 105  */
 106 #include <sys/ddifm.h>
 107 #include <sys/fm/protocol.h>
 108 #include <sys/fm/util.h>
 109 #include <sys/fm/io/ddi.h>
 110 
 111 /*
 112  * For anyone who would modify the code in mptsas_driver, it must be awared
 113  * that from snv_145 where CR6910752(mpt_sas driver performance can be
 114  * improved) is integrated, the per_instance mutex m_mutex is not hold
 115  * in the key IO code path, including mptsas_scsi_start(), mptsas_intr()
 116  * and all of the recursive functions called in them, so don't
 117  * make it for granted that all operations are sync/exclude correctly. Before
 118  * doing any modification in key code path, and even other code path such as
 119  * DR, watchsubr, ioctl, passthrough etc, make sure the elements modified have
 120  * no releationship to elements shown in the fastpath
 121  * (function mptsas_handle_io_fastpath()) in ISR and its recursive functions.
 122  * otherwise, you have to use the new introduced mutex to protect them.
 123  * As to how to do correctly, refer to the comments in mptsas_intr().
 124  */
 125 
 126 /*
 127  * autoconfiguration data and routines.
 128  */
 129 static int mptsas_attach(dev_info_t *dip, ddi_attach_cmd_t cmd);
 130 static int mptsas_detach(dev_info_t *devi, ddi_detach_cmd_t cmd);
 131 static int mptsas_power(dev_info_t *dip, int component, int level);
 132 
 133 /*
 134  * cb_ops function
 135  */
 136 static int mptsas_ioctl(dev_t dev, int cmd, intptr_t data, int mode,
 137         cred_t *credp, int *rval);
 138 #ifdef __sparc
 139 static int mptsas_reset(dev_info_t *devi, ddi_reset_cmd_t cmd);
 140 #else  /* __sparc */
 141 static int mptsas_quiesce(dev_info_t *devi);
 142 #endif  /* __sparc */
 143 
 144 /*
 145  * Resource initilaization for hardware
 146  */


 199  * SMP functions
 200  */
 201 static int mptsas_smp_start(struct smp_pkt *smp_pkt);
 202 
 203 /*
 204  * internal function prototypes.
 205  */
 206 static void mptsas_list_add(mptsas_t *mpt);
 207 static void mptsas_list_del(mptsas_t *mpt);
 208 
 209 static int mptsas_quiesce_bus(mptsas_t *mpt);
 210 static int mptsas_unquiesce_bus(mptsas_t *mpt);
 211 
 212 static int mptsas_alloc_handshake_msg(mptsas_t *mpt, size_t alloc_size);
 213 static void mptsas_free_handshake_msg(mptsas_t *mpt);
 214 
 215 static void mptsas_ncmds_checkdrain(void *arg);
 216 
 217 static int mptsas_prepare_pkt(mptsas_cmd_t *cmd);
 218 static int mptsas_accept_pkt(mptsas_t *mpt, mptsas_cmd_t *sp);


 219 
 220 static int mptsas_do_detach(dev_info_t *dev);
 221 static int mptsas_do_scsi_reset(mptsas_t *mpt, uint16_t devhdl);
 222 static int mptsas_do_scsi_abort(mptsas_t *mpt, int target, int lun,
 223     struct scsi_pkt *pkt);
 224 static int mptsas_scsi_capchk(char *cap, int tgtonly, int *cidxp);
 225 
 226 static void mptsas_handle_qfull(mptsas_t *mpt, mptsas_cmd_t *cmd);
 227 static void mptsas_handle_event(void *args);
 228 static int mptsas_handle_event_sync(void *args);
 229 static void mptsas_handle_dr(void *args);
 230 static void mptsas_handle_topo_change(mptsas_topo_change_list_t *topo_node,
 231     dev_info_t *pdip);
 232 
 233 static void mptsas_restart_cmd(void *);
 234 
 235 static void mptsas_flush_hba(mptsas_t *mpt);
 236 static void mptsas_flush_target(mptsas_t *mpt, ushort_t target, int lun,
 237         uint8_t tasktype);
 238 static void mptsas_set_pkt_reason(mptsas_t *mpt, mptsas_cmd_t *cmd,
 239     uchar_t reason, uint_t stat);
 240 
 241 static uint_t mptsas_intr(caddr_t arg1, caddr_t arg2);
 242 static void mptsas_process_intr(mptsas_t *mpt,
 243     pMpi2ReplyDescriptorsUnion_t reply_desc_union);
 244 static int mptsas_handle_io_fastpath(mptsas_t *mpt, uint16_t SMID);
 245 static void mptsas_handle_scsi_io_success(mptsas_t *mpt,
 246     pMpi2ReplyDescriptorsUnion_t reply_desc);
 247 static void mptsas_handle_address_reply(mptsas_t *mpt,
 248     pMpi2ReplyDescriptorsUnion_t reply_desc);
 249 static int mptsas_wait_intr(mptsas_t *mpt, int polltime);
 250 static void mptsas_sge_setup(mptsas_t *mpt, mptsas_cmd_t *cmd,
 251     uint32_t *control, pMpi2SCSIIORequest_t frame, ddi_acc_handle_t acc_hdl);
 252 
 253 static void mptsas_watch(void *arg);
 254 static void mptsas_watchsubr(mptsas_t *mpt);
 255 static void mptsas_cmd_timeout(mptsas_t *mpt, uint16_t devhdl);

 256 
 257 static void mptsas_start_passthru(mptsas_t *mpt, mptsas_cmd_t *cmd);
 258 static int mptsas_do_passthru(mptsas_t *mpt, uint8_t *request, uint8_t *reply,
 259     uint8_t *data, uint32_t request_size, uint32_t reply_size,
 260     uint32_t data_size, uint32_t direction, uint8_t *dataout,
 261     uint32_t dataout_size, short timeout, int mode);
 262 static int mptsas_free_devhdl(mptsas_t *mpt, uint16_t devhdl);
 263 
 264 static uint8_t mptsas_get_fw_diag_buffer_number(mptsas_t *mpt,
 265     uint32_t unique_id);
 266 static void mptsas_start_diag(mptsas_t *mpt, mptsas_cmd_t *cmd);
 267 static int mptsas_post_fw_diag_buffer(mptsas_t *mpt,
 268     mptsas_fw_diagnostic_buffer_t *pBuffer, uint32_t *return_code);
 269 static int mptsas_release_fw_diag_buffer(mptsas_t *mpt,
 270     mptsas_fw_diagnostic_buffer_t *pBuffer, uint32_t *return_code,
 271     uint32_t diag_type);
 272 static int mptsas_diag_register(mptsas_t *mpt,
 273     mptsas_fw_diag_register_t *diag_register, uint32_t *return_code);
 274 static int mptsas_diag_unregister(mptsas_t *mpt,
 275     mptsas_fw_diag_unregister_t *diag_unregister, uint32_t *return_code);


 291 static void mptsas_pkt_destroy_extern(mptsas_t *mpt, mptsas_cmd_t *cmd);
 292 
 293 static int mptsas_kmem_cache_constructor(void *buf, void *cdrarg, int kmflags);
 294 static void mptsas_kmem_cache_destructor(void *buf, void *cdrarg);
 295 
 296 static int mptsas_cache_frames_constructor(void *buf, void *cdrarg,
 297     int kmflags);
 298 static void mptsas_cache_frames_destructor(void *buf, void *cdrarg);
 299 
 300 static void mptsas_check_scsi_io_error(mptsas_t *mpt, pMpi2SCSIIOReply_t reply,
 301     mptsas_cmd_t *cmd);
 302 static void mptsas_check_task_mgt(mptsas_t *mpt,
 303     pMpi2SCSIManagementReply_t reply, mptsas_cmd_t *cmd);
 304 static int mptsas_send_scsi_cmd(mptsas_t *mpt, struct scsi_address *ap,
 305     mptsas_target_t *ptgt, uchar_t *cdb, int cdblen, struct buf *data_bp,
 306     int *resid);
 307 
 308 static int mptsas_alloc_active_slots(mptsas_t *mpt, int flag);
 309 static void mptsas_free_active_slots(mptsas_t *mpt);
 310 static int mptsas_start_cmd(mptsas_t *mpt, mptsas_cmd_t *cmd);
 311 static int mptsas_start_cmd0(mptsas_t *mpt, mptsas_cmd_t *cmd);
 312 
 313 static void mptsas_restart_hba(mptsas_t *mpt);

 314 
 315 static void mptsas_deliver_doneq_thread(mptsas_t *mpt);
 316 static void mptsas_doneq_add(mptsas_t *mpt, mptsas_cmd_t *cmd);
 317 static inline void mptsas_doneq_add0(mptsas_t *mpt, mptsas_cmd_t *cmd);
 318 static void mptsas_doneq_mv(mptsas_t *mpt, uint64_t t);
 319 
 320 static mptsas_cmd_t *mptsas_doneq_thread_rm(mptsas_t *mpt, uint64_t t);
 321 static void mptsas_doneq_empty(mptsas_t *mpt);
 322 static void mptsas_doneq_thread(mptsas_doneq_thread_arg_t *arg);
 323 
 324 static mptsas_cmd_t *mptsas_waitq_rm(mptsas_t *mpt);
 325 static void mptsas_waitq_delete(mptsas_t *mpt, mptsas_cmd_t *cmd);


 326 

 327 static void mptsas_start_watch_reset_delay();
 328 static void mptsas_setup_bus_reset_delay(mptsas_t *mpt);
 329 static void mptsas_watch_reset_delay(void *arg);
 330 static int mptsas_watch_reset_delay_subr(mptsas_t *mpt);
 331 
 332 static int mptsas_outstanding_cmds_n(mptsas_t *mpt);
 333 /*
 334  * helper functions
 335  */
 336 static void mptsas_dump_cmd(mptsas_t *mpt, mptsas_cmd_t *cmd);
 337 
 338 static dev_info_t *mptsas_find_child(dev_info_t *pdip, char *name);
 339 static dev_info_t *mptsas_find_child_phy(dev_info_t *pdip, uint8_t phy);
 340 static dev_info_t *mptsas_find_child_addr(dev_info_t *pdip, uint64_t sasaddr,
 341     int lun);
 342 static mdi_pathinfo_t *mptsas_find_path_addr(dev_info_t *pdip, uint64_t sasaddr,
 343     int lun);
 344 static mdi_pathinfo_t *mptsas_find_path_phy(dev_info_t *pdip, uint8_t phy);
 345 static dev_info_t *mptsas_find_smp_child(dev_info_t *pdip, char *str_wwn);
 346 
 347 static int mptsas_parse_address(char *name, uint64_t *wwid, uint8_t *phy,
 348     int *lun);
 349 static int mptsas_parse_smp_name(char *name, uint64_t *wwn);
 350 
 351 static mptsas_target_t *mptsas_phy_to_tgt(mptsas_t *mpt, int phymask,
 352     uint8_t phy);
 353 static mptsas_target_t *mptsas_wwid_to_ptgt(mptsas_t *mpt, int phymask,
 354     uint64_t wwid);
 355 static mptsas_smp_t *mptsas_wwid_to_psmp(mptsas_t *mpt, int phymask,
 356     uint64_t wwid);
 357 
 358 static int mptsas_inquiry(mptsas_t *mpt, mptsas_target_t *ptgt, int lun,
 359     uchar_t page, unsigned char *buf, int len, int *rlen, uchar_t evpd);
 360 
 361 static int mptsas_get_target_device_info(mptsas_t *mpt, uint32_t page_address,
 362     uint16_t *handle, mptsas_target_t **pptgt);
 363 static void mptsas_update_phymask(mptsas_t *mpt);
 364 static inline void mptsas_remove_cmd0(mptsas_t *mpt, mptsas_cmd_t *cmd);
 365 
 366 static int mptsas_send_sep(mptsas_t *mpt, mptsas_target_t *ptgt,
 367     uint32_t *status, uint8_t cmd);
 368 static dev_info_t *mptsas_get_dip_from_dev(dev_t dev,
 369     mptsas_phymask_t *phymask);
 370 static mptsas_target_t *mptsas_addr_to_ptgt(mptsas_t *mpt, char *addr,
 371     mptsas_phymask_t phymask);
 372 static int mptsas_set_led_status(mptsas_t *mpt, mptsas_target_t *ptgt,
 373     uint32_t slotstatus);
 374 
 375 
 376 /*
 377  * Enumeration / DR functions
 378  */
 379 static void mptsas_config_all(dev_info_t *pdip);
 380 static int mptsas_config_one_addr(dev_info_t *pdip, uint64_t sasaddr, int lun,
 381     dev_info_t **lundip);
 382 static int mptsas_config_one_phy(dev_info_t *pdip, uint8_t phy, int lun,
 383     dev_info_t **lundip);
 384 
 385 static int mptsas_config_target(dev_info_t *pdip, mptsas_target_t *ptgt);
 386 static int mptsas_offline_target(dev_info_t *pdip, char *name);
 387 
 388 static int mptsas_config_raid(dev_info_t *pdip, uint16_t target,
 389     dev_info_t **dip);
 390 
 391 static int mptsas_config_luns(dev_info_t *pdip, mptsas_target_t *ptgt);
 392 static int mptsas_probe_lun(dev_info_t *pdip, int lun,
 393     dev_info_t **dip, mptsas_target_t *ptgt);


 414 static int mptsas_event_query(mptsas_t *mpt, mptsas_event_query_t *data,
 415     int mode, int *rval);
 416 static int mptsas_event_enable(mptsas_t *mpt, mptsas_event_enable_t *data,
 417     int mode, int *rval);
 418 static int mptsas_event_report(mptsas_t *mpt, mptsas_event_report_t *data,
 419     int mode, int *rval);
 420 static void mptsas_record_event(void *args);
 421 static int mptsas_reg_access(mptsas_t *mpt, mptsas_reg_access_t *data,
 422     int mode);
 423 
 424 static void mptsas_hash_init(mptsas_hash_table_t *hashtab);
 425 static void mptsas_hash_uninit(mptsas_hash_table_t *hashtab, size_t datalen);
 426 static void mptsas_hash_add(mptsas_hash_table_t *hashtab, void *data);
 427 static void * mptsas_hash_rem(mptsas_hash_table_t *hashtab, uint64_t key1,
 428     mptsas_phymask_t key2);
 429 static void * mptsas_hash_search(mptsas_hash_table_t *hashtab, uint64_t key1,
 430     mptsas_phymask_t key2);
 431 static void * mptsas_hash_traverse(mptsas_hash_table_t *hashtab, int pos);
 432 
 433 mptsas_target_t *mptsas_tgt_alloc(mptsas_hash_table_t *, uint16_t, uint64_t,
 434     uint32_t, mptsas_phymask_t, uint8_t, mptsas_t *);
 435 static mptsas_smp_t *mptsas_smp_alloc(mptsas_hash_table_t *hashtab,
 436     mptsas_smp_t *data);
 437 static void mptsas_smp_free(mptsas_hash_table_t *hashtab, uint64_t wwid,
 438     mptsas_phymask_t phymask);
 439 static void mptsas_tgt_free(mptsas_hash_table_t *, uint64_t, mptsas_phymask_t);
 440 static void * mptsas_search_by_devhdl(mptsas_hash_table_t *, uint16_t);
 441 static int mptsas_online_smp(dev_info_t *pdip, mptsas_smp_t *smp_node,
 442     dev_info_t **smp_dip);
 443 
 444 /*
 445  * Power management functions
 446  */
 447 static int mptsas_get_pci_cap(mptsas_t *mpt);
 448 static int mptsas_init_pm(mptsas_t *mpt);
 449 
 450 /*
 451  * MPT MSI tunable:
 452  *
 453  * By default MSI is enabled on all supported platforms.
 454  */


 464  * FMA Prototypes
 465  */
 466 static void mptsas_fm_init(mptsas_t *mpt);
 467 static void mptsas_fm_fini(mptsas_t *mpt);
 468 static int mptsas_fm_error_cb(dev_info_t *, ddi_fm_error_t *, const void *);
 469 
 470 extern pri_t minclsyspri, maxclsyspri;
 471 
 472 /*
 473  * This device is created by the SCSI pseudo nexus driver (SCSI vHCI).  It is
 474  * under this device that the paths to a physical device are created when
 475  * MPxIO is used.
 476  */
 477 extern dev_info_t       *scsi_vhci_dip;
 478 
 479 /*
 480  * Tunable timeout value for Inquiry VPD page 0x83
 481  * By default the value is 30 seconds.
 482  */
 483 int mptsas_inq83_retry_timeout = 30;








 484 
 485 /*
 486  * This is used to allocate memory for message frame storage, not for
 487  * data I/O DMA. All message frames must be stored in the first 4G of
 488  * physical memory.
 489  */
 490 ddi_dma_attr_t mptsas_dma_attrs = {
 491         DMA_ATTR_V0,    /* attribute layout version             */
 492         0x0ull,         /* address low - should be 0 (longlong) */
 493         0xffffffffull,  /* address high - 32-bit max range      */
 494         0x00ffffffull,  /* count max - max DMA object size      */
 495         4,              /* allocation alignment requirements    */
 496         0x78,           /* burstsizes - binary encoded values   */
 497         1,              /* minxfer - gran. of DMA engine        */
 498         0x00ffffffull,  /* maxxfer - gran. of DMA engine        */
 499         0xffffffffull,  /* max segment size (DMA boundary)      */
 500         MPTSAS_MAX_DMA_SEGS, /* scatter/gather list length      */
 501         512,            /* granularity - device transfer size   */
 502         0               /* flags, set to 0                      */
 503 };


1180                         mpt->m_doneq_thread_id[j].flag |=
1181                             MPTSAS_DONEQ_THREAD_ACTIVE;
1182                         mpt->m_doneq_thread_id[j].arg.mpt = mpt;
1183                         mpt->m_doneq_thread_id[j].arg.t = j;
1184                         mpt->m_doneq_thread_id[j].threadp =
1185                             thread_create(NULL, 0, mptsas_doneq_thread,
1186                             &mpt->m_doneq_thread_id[j].arg,
1187                             0, &p0, TS_RUN, minclsyspri);
1188                         mpt->m_doneq_thread_id[j].donetail =
1189                             &mpt->m_doneq_thread_id[j].doneq;
1190                         mutex_exit(&mpt->m_doneq_thread_id[j].mutex);
1191                 }
1192                 mutex_exit(&mpt->m_doneq_mutex);
1193                 doneq_thread_create++;
1194         }
1195 
1196         /* Initialize mutex used in interrupt handler */
1197         mutex_init(&mpt->m_mutex, NULL, MUTEX_DRIVER,
1198             DDI_INTR_PRI(mpt->m_intr_pri));
1199         mutex_init(&mpt->m_passthru_mutex, NULL, MUTEX_DRIVER, NULL);
1200         mutex_init(&mpt->m_intr_mutex, NULL, MUTEX_DRIVER,
1201             DDI_INTR_PRI(mpt->m_intr_pri));
1202         for (i = 0; i < MPTSAS_MAX_PHYS; i++) {
1203                 mutex_init(&mpt->m_phy_info[i].smhba_info.phy_mutex,
1204                     NULL, MUTEX_DRIVER,
1205                     DDI_INTR_PRI(mpt->m_intr_pri));
1206         }
1207 
1208         cv_init(&mpt->m_cv, NULL, CV_DRIVER, NULL);
1209         cv_init(&mpt->m_passthru_cv, NULL, CV_DRIVER, NULL);
1210         cv_init(&mpt->m_fw_cv, NULL, CV_DRIVER, NULL);
1211         cv_init(&mpt->m_config_cv, NULL, CV_DRIVER, NULL);
1212         cv_init(&mpt->m_fw_diag_cv, NULL, CV_DRIVER, NULL);
1213         mutex_init_done++;
1214 
1215         /*
1216          * Disable hardware interrupt since we're not ready to
1217          * handle it yet.
1218          */
1219         MPTSAS_DISABLE_INTR(mpt);
1220         if (mptsas_register_intrs(mpt) == FALSE)


1283                 goto fail;
1284         smp_attach_setup++;
1285 
1286         if (mptsas_cache_create(mpt) == FALSE)
1287                 goto fail;
1288 
1289         mpt->m_scsi_reset_delay      = ddi_prop_get_int(DDI_DEV_T_ANY,
1290             dip, 0, "scsi-reset-delay", SCSI_DEFAULT_RESET_DELAY);
1291         if (mpt->m_scsi_reset_delay == 0) {
1292                 mptsas_log(mpt, CE_NOTE,
1293                     "scsi_reset_delay of 0 is not recommended,"
1294                     " resetting to SCSI_DEFAULT_RESET_DELAY\n");
1295                 mpt->m_scsi_reset_delay = SCSI_DEFAULT_RESET_DELAY;
1296         }
1297 
1298         /*
1299          * Initialize the wait and done FIFO queue
1300          */
1301         mpt->m_donetail = &mpt->m_doneq;
1302         mpt->m_waitqtail = &mpt->m_waitq;


1303 
1304         /*
1305          * ioc cmd queue initialize
1306          */
1307         mpt->m_ioc_event_cmdtail = &mpt->m_ioc_event_cmdq;
1308         mpt->m_dev_handle = 0xFFFF;
1309 
1310         MPTSAS_ENABLE_INTR(mpt);
1311 
1312         /*
1313          * enable event notification
1314          */
1315         mutex_enter(&mpt->m_mutex);
1316         if (mptsas_ioc_enable_event_notification(mpt)) {
1317                 mutex_exit(&mpt->m_mutex);
1318                 goto fail;
1319         }
1320         mutex_exit(&mpt->m_mutex);
1321 
1322         /*


1442                                     &mpt->m_doneq_mutex);
1443                         }
1444                         for (j = 0; j < doneq_thread_num; j++) {
1445                                 cv_destroy(&mpt->m_doneq_thread_id[j].cv);
1446                                 mutex_destroy(&mpt->m_doneq_thread_id[j].mutex);
1447                         }
1448                         kmem_free(mpt->m_doneq_thread_id,
1449                             sizeof (mptsas_doneq_thread_list_t)
1450                             * doneq_thread_num);
1451                         mutex_exit(&mpt->m_doneq_mutex);
1452                         cv_destroy(&mpt->m_doneq_thread_cv);
1453                         mutex_destroy(&mpt->m_doneq_mutex);
1454                 }
1455                 if (event_taskq_create) {
1456                         ddi_taskq_destroy(mpt->m_event_taskq);
1457                 }
1458                 if (dr_taskq_create) {
1459                         ddi_taskq_destroy(mpt->m_dr_taskq);
1460                 }
1461                 if (mutex_init_done) {
1462                         mutex_destroy(&mpt->m_intr_mutex);
1463                         mutex_destroy(&mpt->m_passthru_mutex);
1464                         mutex_destroy(&mpt->m_mutex);
1465                         for (i = 0; i < MPTSAS_MAX_PHYS; i++) {
1466                                 mutex_destroy(
1467                                     &mpt->m_phy_info[i].smhba_info.phy_mutex);
1468                         }
1469                         cv_destroy(&mpt->m_cv);
1470                         cv_destroy(&mpt->m_passthru_cv);
1471                         cv_destroy(&mpt->m_fw_cv);
1472                         cv_destroy(&mpt->m_config_cv);
1473                         cv_destroy(&mpt->m_fw_diag_cv);
1474                 }
1475 
1476                 if (map_setup) {
1477                         mptsas_cfg_fini(mpt);
1478                 }
1479                 if (config_setup) {
1480                         mptsas_config_space_fini(mpt);
1481                 }
1482                 mptsas_free_handshake_msg(mpt);


1860         mptsas_hash_uninit(&mpt->m_active->m_tgttbl, sizeof (mptsas_target_t));
1861         mptsas_hash_uninit(&mpt->m_active->m_smptbl, sizeof (mptsas_smp_t));
1862         mptsas_free_active_slots(mpt);
1863         mutex_exit(&mpt->m_mutex);
1864 
1865         /* deallocate everything that was allocated in mptsas_attach */
1866         mptsas_cache_destroy(mpt);
1867 
1868         mptsas_hba_fini(mpt);
1869         mptsas_cfg_fini(mpt);
1870 
1871         /* Lower the power informing PM Framework */
1872         if (mpt->m_options & MPTSAS_OPT_PM) {
1873                 if (pm_lower_power(dip, 0, PM_LEVEL_D3) != DDI_SUCCESS)
1874                         mptsas_log(mpt, CE_WARN,
1875                             "!mptsas%d: Lower power request failed "
1876                             "during detach, ignoring.",
1877                             mpt->m_instance);
1878         }
1879 
1880         mutex_destroy(&mpt->m_intr_mutex);
1881         mutex_destroy(&mpt->m_passthru_mutex);
1882         mutex_destroy(&mpt->m_mutex);
1883         for (i = 0; i < MPTSAS_MAX_PHYS; i++) {
1884                 mutex_destroy(&mpt->m_phy_info[i].smhba_info.phy_mutex);
1885         }
1886         cv_destroy(&mpt->m_cv);
1887         cv_destroy(&mpt->m_passthru_cv);
1888         cv_destroy(&mpt->m_fw_cv);
1889         cv_destroy(&mpt->m_config_cv);
1890         cv_destroy(&mpt->m_fw_diag_cv);
1891 
1892 
1893         mptsas_smp_teardown(mpt);
1894         mptsas_hba_teardown(mpt);
1895 
1896         mptsas_config_space_fini(mpt);
1897 
1898         mptsas_free_handshake_msg(mpt);
1899 
1900         mptsas_fm_fini(mpt);


2224                     &mpt->m_reg->Doorbell)) &
2225                     MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_RESET) {
2226                         if (polls++ > 3000) {
2227                                 break;
2228                         }
2229                         delay(drv_usectohz(10000));
2230                 }
2231                 /*
2232                  * If IOC is not in operational state, try to hard reset it.
2233                  */
2234                 if ((ioc_status & MPI2_IOC_STATE_MASK) !=
2235                     MPI2_IOC_STATE_OPERATIONAL) {
2236                         mpt->m_softstate &= ~MPTSAS_SS_MSG_UNIT_RESET;
2237                         if (mptsas_restart_ioc(mpt) == DDI_FAILURE) {
2238                                 mptsas_log(mpt, CE_WARN,
2239                                     "mptsas_power: hard reset failed");
2240                                 mutex_exit(&mpt->m_mutex);
2241                                 return (DDI_FAILURE);
2242                         }
2243                 }
2244                 mutex_enter(&mpt->m_intr_mutex);
2245                 mpt->m_power_level = PM_LEVEL_D0;
2246                 mutex_exit(&mpt->m_intr_mutex);
2247                 break;
2248         case PM_LEVEL_D3:
2249                 NDBG11(("mptsas%d: turning power OFF.", mpt->m_instance));
2250                 MPTSAS_POWER_OFF(mpt);
2251                 break;
2252         default:
2253                 mptsas_log(mpt, CE_WARN, "mptsas%d: unknown power level <%x>.",
2254                     mpt->m_instance, level);
2255                 rval = DDI_FAILURE;
2256                 break;
2257         }
2258         mutex_exit(&mpt->m_mutex);
2259         return (rval);
2260 }
2261 
2262 /*
2263  * Initialize configuration space and figure out which
2264  * chip and revison of the chip the mpt driver is using.
2265  */
2266 static int


2617 
2618         /*
2619          * Store the reply descriptor post queue memory address.  This chip
2620          * uses this address to write to the reply descriptor post queue.  The
2621          * second address is the address mpt uses to manage the queue.
2622          */
2623         mpt->m_post_queue_dma_addr = cookie.dmac_laddress;
2624         mpt->m_post_queue = memp;
2625 
2626         /*
2627          * Clear the reply post queue memory.
2628          */
2629         bzero(mpt->m_post_queue, mem_size);
2630 
2631         return (DDI_SUCCESS);
2632 }
2633 
2634 static void
2635 mptsas_alloc_reply_args(mptsas_t *mpt)
2636 {
2637         if (mpt->m_replyh_args != NULL) {
2638                 kmem_free(mpt->m_replyh_args, sizeof (m_replyh_arg_t)
2639                     * mpt->m_max_replies);
2640                 mpt->m_replyh_args = NULL;
2641         }
2642         mpt->m_replyh_args = kmem_zalloc(sizeof (m_replyh_arg_t) *
2643             mpt->m_max_replies, KM_SLEEP);

2644 }
2645 
2646 static int
2647 mptsas_alloc_extra_sgl_frame(mptsas_t *mpt, mptsas_cmd_t *cmd)
2648 {
2649         mptsas_cache_frames_t   *frames = NULL;
2650         if (cmd->cmd_extra_frames == NULL) {
2651                 frames = kmem_cache_alloc(mpt->m_cache_frames, KM_NOSLEEP);
2652                 if (frames == NULL) {
2653                         return (DDI_FAILURE);
2654                 }
2655                 cmd->cmd_extra_frames = frames;
2656         }
2657         return (DDI_SUCCESS);
2658 }
2659 
2660 static void
2661 mptsas_free_extra_sgl_frame(mptsas_t *mpt, mptsas_cmd_t *cmd)
2662 {
2663         if (cmd->cmd_extra_frames) {


2982         if (rval != TRAN_ACCEPT) {
2983                 return (rval);
2984         }
2985 
2986         /*
2987          * Send the command to target/lun, however your HBA requires it.
2988          * If busy, return TRAN_BUSY; if there's some other formatting error
2989          * in the packet, return TRAN_BADPKT; otherwise, fall through to the
2990          * return of TRAN_ACCEPT.
2991          *
2992          * Remember that access to shared resources, including the mptsas_t
2993          * data structure and the HBA hardware registers, must be protected
2994          * with mutexes, here and everywhere.
2995          *
2996          * Also remember that at interrupt time, you'll get an argument
2997          * to the interrupt handler which is a pointer to your mptsas_t
2998          * structure; you'll have to remember which commands are outstanding
2999          * and which scsi_pkt is the currently-running command so the
3000          * interrupt handler can refer to the pkt to set completion
3001          * status, call the target driver back through pkt_comp, etc.




















3002          */
3003 
3004         mutex_enter(&ptgt->m_tgt_intr_mutex);














3005         if (ptgt->m_dr_flag == MPTSAS_DR_INTRANSITION) {
3006                 if (cmd->cmd_pkt_flags & FLAG_NOQUEUE) {
3007                         /*








































3008                          * commands should be allowed to retry by
3009                          * returning TRAN_BUSY to stall the I/O's
3010                          * which come from scsi_vhci since the device/
3011                          * path is in unstable state now.
3012                          */
3013                         mutex_exit(&ptgt->m_tgt_intr_mutex);
3014                         return (TRAN_BUSY);
3015                 } else {
3016                         /*
3017                          * The device is offline, just fail the
3018                          * command by returning TRAN_FATAL_ERROR.
3019                          */
3020                         mutex_exit(&ptgt->m_tgt_intr_mutex);
3021                         return (TRAN_FATAL_ERROR);
3022                 }
3023         }
3024         mutex_exit(&ptgt->m_tgt_intr_mutex);
3025         rval = mptsas_accept_pkt(mpt, cmd);


3026 
3027         return (rval);
3028 }
3029 



3030 static int








































3031 mptsas_accept_pkt(mptsas_t *mpt, mptsas_cmd_t *cmd)
3032 {
3033         int             rval = TRAN_ACCEPT;
3034         mptsas_target_t *ptgt = cmd->cmd_tgt_addr;
3035 
3036         NDBG1(("mptsas_accept_pkt: cmd=0x%p", (void *)cmd));
3037 


3038         if ((cmd->cmd_flags & CFLAG_PREPARED) == 0) {
3039                 rval = mptsas_prepare_pkt(cmd);
3040                 if (rval != TRAN_ACCEPT) {
3041                         cmd->cmd_flags &= ~CFLAG_TRANFLAG;
3042                         return (rval);
3043                 }
3044         }
3045 
3046         /*
3047          * reset the throttle if we were draining
3048          */
3049         mutex_enter(&ptgt->m_tgt_intr_mutex);
3050         if ((ptgt->m_t_ncmds == 0) &&
3051             (ptgt->m_t_throttle == DRAIN_THROTTLE)) {
3052                 NDBG23(("reset throttle"));
3053                 ASSERT(ptgt->m_reset_delay == 0);
3054                 mptsas_set_throttle(mpt, ptgt, MAX_THROTTLE);
3055         }
3056 
3057         /*
3058          * If device handle has already been invalidated, just
3059          * fail the command. In theory, command from scsi_vhci
3060          * client is impossible send down command with invalid
3061          * devhdl since devhdl is set after path offline, target
3062          * driver is not suppose to select a offlined path.
3063          */
3064         if (ptgt->m_devhdl == MPTSAS_INVALID_DEVHDL) {
3065                 NDBG20(("rejecting command, it might because invalid devhdl "
3066                     "request."));
3067                 mutex_exit(&ptgt->m_tgt_intr_mutex);
3068                 mutex_enter(&mpt->m_mutex);
3069                 /*
3070                  * If HBA is being reset, the DevHandles are being
3071                  * re-initialized, which means that they could be invalid
3072                  * even if the target is still attached. Check if being reset
3073                  * and if DevHandle is being re-initialized. If this is the
3074                  * case, return BUSY so the I/O can be retried later.
3075                  */
3076                 if (mpt->m_in_reset) {
3077                         mptsas_set_pkt_reason(mpt, cmd, CMD_RESET,
3078                             STAT_BUS_RESET);
3079                         if (cmd->cmd_flags & CFLAG_TXQ) {
3080                                 mptsas_doneq_add(mpt, cmd);
3081                                 mptsas_doneq_empty(mpt);
3082                                 mutex_exit(&mpt->m_mutex);
3083                                 return (rval);
3084                         } else {
3085                                 mutex_exit(&mpt->m_mutex);
3086                                 return (TRAN_BUSY);
3087                         }
3088                 }











3089                 mptsas_set_pkt_reason(mpt, cmd, CMD_DEV_GONE, STAT_TERMINATED);
3090                 if (cmd->cmd_flags & CFLAG_TXQ) {
3091                         mptsas_doneq_add(mpt, cmd);
3092                         mptsas_doneq_empty(mpt);
3093                         mutex_exit(&mpt->m_mutex);
3094                         return (rval);
3095                 } else {
3096                         mutex_exit(&mpt->m_mutex);
3097                         return (TRAN_FATAL_ERROR);
3098                 }
3099         }
3100         mutex_exit(&ptgt->m_tgt_intr_mutex);
3101         /*
3102          * The first case is the normal case.  mpt gets a command from the
3103          * target driver and starts it.
3104          * Since SMID 0 is reserved and the TM slot is reserved, the actual max
3105          * commands is m_max_requests - 2.
3106          */
3107         mutex_enter(&ptgt->m_tgt_intr_mutex);
3108         if ((ptgt->m_t_throttle > HOLD_THROTTLE) &&
3109             (ptgt->m_t_ncmds < ptgt->m_t_throttle) &&
3110             (ptgt->m_reset_delay == 0) &&
3111             (ptgt->m_t_nwait == 0) &&
3112             ((cmd->cmd_pkt_flags & FLAG_NOINTR) == 0)) {
3113                 mutex_exit(&ptgt->m_tgt_intr_mutex);
3114                 if (mptsas_save_cmd(mpt, cmd) == TRUE) {
3115                         (void) mptsas_start_cmd0(mpt, cmd);
3116                 } else {
3117                         mutex_enter(&mpt->m_mutex);
3118                         mptsas_waitq_add(mpt, cmd);
3119                         mutex_exit(&mpt->m_mutex);
3120                 }
3121         } else {
3122                 /*
3123                  * Add this pkt to the work queue
3124                  */
3125                 mutex_exit(&ptgt->m_tgt_intr_mutex);
3126                 mutex_enter(&mpt->m_mutex);
3127                 mptsas_waitq_add(mpt, cmd);
3128 
3129                 if (cmd->cmd_pkt_flags & FLAG_NOINTR) {
3130                         (void) mptsas_poll(mpt, cmd, MPTSAS_POLL_TIME);
3131 
3132                         /*
3133                          * Only flush the doneq if this is not a TM
3134                          * cmd.  For TM cmds the flushing of the
3135                          * doneq will be done in those routines.
3136                          */
3137                         if ((cmd->cmd_flags & CFLAG_TM_CMD) == 0) {
3138                                 mptsas_doneq_empty(mpt);
3139                         }
3140                 }
3141                 mutex_exit(&mpt->m_mutex);
3142         }
3143         return (rval);
3144 }
3145 
3146 int
3147 mptsas_save_cmd(mptsas_t *mpt, mptsas_cmd_t *cmd)
3148 {
3149         mptsas_slots_t  *slots;
3150         int             slot;
3151         mptsas_target_t *ptgt = cmd->cmd_tgt_addr;
3152         mptsas_slot_free_e_t    *pe;
3153         int             qn, qn_first;
3154 

3155         slots = mpt->m_active;
3156 
3157         /*
3158          * Account for reserved TM request slot and reserved SMID of 0.
3159          */
3160         ASSERT(slots->m_n_slots == (mpt->m_max_requests - 2));
3161 
3162         qn = qn_first = CPU->cpu_seqid & (mpt->m_slot_freeq_pair_n - 1);
3163 
3164 qpair_retry:
3165         ASSERT(qn < mpt->m_slot_freeq_pair_n);
3166         mutex_enter(&mpt->m_slot_freeq_pairp[qn].m_slot_allocq.s.m_fq_mutex);
3167         pe = list_head(&mpt->m_slot_freeq_pairp[qn].m_slot_allocq.
3168             s.m_fq_list);
3169         if (!pe) { /* switch the allocq and releq */
3170                 mutex_enter(&mpt->m_slot_freeq_pairp[qn].m_slot_releq.
3171                     s.m_fq_mutex);
3172                 if (mpt->m_slot_freeq_pairp[qn].m_slot_releq.s.m_fq_n) {
3173                         mpt->m_slot_freeq_pairp[qn].
3174                             m_slot_allocq.s.m_fq_n =
3175                             mpt->m_slot_freeq_pairp[qn].
3176                             m_slot_releq.s.m_fq_n;
3177                         mpt->m_slot_freeq_pairp[qn].
3178                             m_slot_allocq.s.m_fq_list.list_head.list_next =
3179                             mpt->m_slot_freeq_pairp[qn].
3180                             m_slot_releq.s.m_fq_list.list_head.list_next;
3181                         mpt->m_slot_freeq_pairp[qn].
3182                             m_slot_allocq.s.m_fq_list.list_head.list_prev =
3183                             mpt->m_slot_freeq_pairp[qn].
3184                             m_slot_releq.s.m_fq_list.list_head.list_prev;
3185                         mpt->m_slot_freeq_pairp[qn].
3186                             m_slot_releq.s.m_fq_list.list_head.list_prev->
3187                             list_next =
3188                             &mpt->m_slot_freeq_pairp[qn].
3189                             m_slot_allocq.s.m_fq_list.list_head;
3190                         mpt->m_slot_freeq_pairp[qn].
3191                             m_slot_releq.s.m_fq_list.list_head.list_next->
3192                             list_prev =
3193                             &mpt->m_slot_freeq_pairp[qn].
3194                             m_slot_allocq.s.m_fq_list.list_head;
3195 
3196                         mpt->m_slot_freeq_pairp[qn].
3197                             m_slot_releq.s.m_fq_list.list_head.list_next =
3198                             mpt->m_slot_freeq_pairp[qn].
3199                             m_slot_releq.s.m_fq_list.list_head.list_prev =
3200                             &mpt->m_slot_freeq_pairp[qn].
3201                             m_slot_releq.s.m_fq_list.list_head;
3202                         mpt->m_slot_freeq_pairp[qn].
3203                             m_slot_releq.s.m_fq_n = 0;
3204                 } else {
3205                         mutex_exit(&mpt->m_slot_freeq_pairp[qn].
3206                             m_slot_releq.s.m_fq_mutex);
3207                         mutex_exit(&mpt->m_slot_freeq_pairp[qn].
3208                             m_slot_allocq.s.m_fq_mutex);
3209                         qn = (qn + 1) & (mpt->m_slot_freeq_pair_n - 1);
3210                         if (qn == qn_first)
3211                                 return (FALSE);
3212                         else
3213                                 goto qpair_retry;
3214                 }
3215                 mutex_exit(&mpt->m_slot_freeq_pairp[qn].
3216                     m_slot_releq.s.m_fq_mutex);
3217                 pe = list_head(&mpt->m_slot_freeq_pairp[qn].
3218                     m_slot_allocq.s.m_fq_list);
3219                 ASSERT(pe);
3220         }
3221         list_remove(&mpt->m_slot_freeq_pairp[qn].
3222             m_slot_allocq.s.m_fq_list, pe);
3223         slot = pe->slot;
3224         /*
3225          * Make sure SMID is not using reserved value of 0
3226          * and the TM request slot.
3227          */
3228         ASSERT((slot > 0) && (slot <= slots->m_n_slots) &&
3229             mpt->m_slot_freeq_pairp[qn].m_slot_allocq.s.m_fq_n > 0);
3230         cmd->cmd_slot = slot;
3231         mpt->m_slot_freeq_pairp[qn].m_slot_allocq.s.m_fq_n--;
3232         ASSERT(mpt->m_slot_freeq_pairp[qn].m_slot_allocq.s.m_fq_n >= 0);
3233 
3234         mutex_exit(&mpt->m_slot_freeq_pairp[qn].m_slot_allocq.s.m_fq_mutex);
3235         /*
3236          * only increment per target ncmds if this is not a
3237          * command that has no target associated with it (i.e. a
3238          * event acknoledgment)
3239          */
3240         if ((cmd->cmd_flags & CFLAG_CMDIOC) == 0) {
3241                 mutex_enter(&ptgt->m_tgt_intr_mutex);
3242                 ptgt->m_t_ncmds++;
3243                 mutex_exit(&ptgt->m_tgt_intr_mutex);
3244         }
3245         cmd->cmd_active_timeout = cmd->cmd_pkt->pkt_time;
3246 
3247         /*
3248          * If initial timout is less than or equal to one tick, bump
3249          * the timeout by a tick so that command doesn't timeout before
3250          * its allotted time.
3251          */
3252         if (cmd->cmd_active_timeout <= mptsas_scsi_watchdog_tick) {
3253                 cmd->cmd_active_timeout += mptsas_scsi_watchdog_tick;
3254         }
3255         return (TRUE);



















3256 }
3257 
3258 /*
3259  * prepare the pkt:
3260  * the pkt may have been resubmitted or just reused so
3261  * initialize some fields and do some checks.
3262  */
3263 static int
3264 mptsas_prepare_pkt(mptsas_cmd_t *cmd)
3265 {
3266         struct scsi_pkt *pkt = CMD2PKT(cmd);
3267 
3268         NDBG1(("mptsas_prepare_pkt: cmd=0x%p", (void *)cmd));
3269 
3270         /*
3271          * Reinitialize some fields that need it; the packet may
3272          * have been resubmitted
3273          */
3274         pkt->pkt_reason = CMD_CMPLT;
3275         pkt->pkt_state = 0;


3303 
3304         return (TRAN_ACCEPT);
3305 }
3306 
3307 /*
3308  * tran_init_pkt(9E) - allocate scsi_pkt(9S) for command
3309  *
3310  * One of three possibilities:
3311  *      - allocate scsi_pkt
3312  *      - allocate scsi_pkt and DMA resources
3313  *      - allocate DMA resources to an already-allocated pkt
3314  */
3315 static struct scsi_pkt *
3316 mptsas_scsi_init_pkt(struct scsi_address *ap, struct scsi_pkt *pkt,
3317     struct buf *bp, int cmdlen, int statuslen, int tgtlen, int flags,
3318     int (*callback)(), caddr_t arg)
3319 {
3320         mptsas_cmd_t            *cmd, *new_cmd;
3321         mptsas_t                *mpt = ADDR2MPT(ap);
3322         int                     failure = 1;
3323 #ifndef __sparc
3324         uint_t                  oldcookiec;
3325 #endif  /* __sparc */
3326         mptsas_target_t         *ptgt = NULL;
3327         int                     rval;
3328         mptsas_tgt_private_t    *tgt_private;
3329         int                     kf;
3330 
3331         kf = (callback == SLEEP_FUNC)? KM_SLEEP: KM_NOSLEEP;
3332 
3333         tgt_private = (mptsas_tgt_private_t *)ap->a_hba_tran->
3334             tran_tgt_private;
3335         ASSERT(tgt_private != NULL);
3336         if (tgt_private == NULL) {
3337                 return (NULL);
3338         }
3339         ptgt = tgt_private->t_private;
3340         ASSERT(ptgt != NULL);
3341         if (ptgt == NULL)
3342                 return (NULL);
3343         ap->a_target = ptgt->m_devhdl;
3344         ap->a_lun = tgt_private->t_lun;
3345 
3346         ASSERT(callback == NULL_FUNC || callback == SLEEP_FUNC);
3347 #ifdef MPTSAS_TEST_EXTRN_ALLOC
3348         statuslen *= 100; tgtlen *= 4;
3349 #endif
3350         NDBG3(("mptsas_scsi_init_pkt:\n"
3351             "\ttgt=%d in=0x%p bp=0x%p clen=%d slen=%d tlen=%d flags=%x",
3352             ap->a_target, (void *)pkt, (void *)bp,
3353             cmdlen, statuslen, tgtlen, flags));
3354 
3355         /*
3356          * Allocate the new packet.
3357          */
3358         if (pkt == NULL) {
3359                 ddi_dma_handle_t        save_dma_handle;
3360                 ddi_dma_handle_t        save_arq_dma_handle;
3361                 struct buf              *save_arq_bp;
3362                 ddi_dma_cookie_t        save_arqcookie;
3363 #ifdef  __sparc
3364                 mptti_t                 *save_sg;
3365 #endif  /* __sparc */
3366 
3367                 cmd = kmem_cache_alloc(mpt->m_kmem_cache, kf);
3368 
3369                 if (cmd) {
3370                         save_dma_handle = cmd->cmd_dmahandle;
3371                         save_arq_dma_handle = cmd->cmd_arqhandle;
3372                         save_arq_bp = cmd->cmd_arq_buf;
3373                         save_arqcookie = cmd->cmd_arqcookie;
3374 #ifdef  __sparc
3375                         save_sg = cmd->cmd_sg;
3376 #endif  /* __sparc */
3377                         bzero(cmd, sizeof (*cmd) + scsi_pkt_size());
3378                         cmd->cmd_dmahandle = save_dma_handle;
3379                         cmd->cmd_arqhandle = save_arq_dma_handle;
3380                         cmd->cmd_arq_buf = save_arq_bp;
3381                         cmd->cmd_arqcookie = save_arqcookie;
3382 #ifdef  __sparc
3383                         cmd->cmd_sg = save_sg;
3384 #endif  /* __sparc */
3385                         pkt = (void *)((uchar_t *)cmd +
3386                             sizeof (struct mptsas_cmd));
3387                         pkt->pkt_ha_private = (opaque_t)cmd;
3388                         pkt->pkt_address = *ap;
3389                         pkt->pkt_private = (opaque_t)cmd->cmd_pkt_private;
3390                         pkt->pkt_scbp = (opaque_t)&cmd->cmd_scb;
3391                         pkt->pkt_cdbp = (opaque_t)&cmd->cmd_cdb;
3392                         cmd->cmd_pkt = (struct scsi_pkt *)pkt;
3393                         cmd->cmd_cdblen = (uchar_t)cmdlen;
3394                         cmd->cmd_scblen = statuslen;
3395                         cmd->cmd_rqslen = SENSE_LENGTH;
3396                         cmd->cmd_tgt_addr = ptgt;
3397                         failure = 0;
3398                 }
3399 
3400                 if (failure || (cmdlen > sizeof (cmd->cmd_cdb)) ||
3401                     (tgtlen > PKT_PRIV_LEN) ||
3402                     (statuslen > EXTCMDS_STATUS_SIZE)) {
3403                         if (failure == 0) {
3404                                 /*


3407                                  */
3408                                 failure = mptsas_pkt_alloc_extern(mpt, cmd,
3409                                     cmdlen, tgtlen, statuslen, kf);
3410                         }
3411                         if (failure) {
3412                                 /*
3413                                  * if extern allocation fails, it will
3414                                  * deallocate the new pkt as well
3415                                  */
3416                                 return (NULL);
3417                         }
3418                 }
3419                 new_cmd = cmd;
3420 
3421         } else {
3422                 cmd = PKT2CMD(pkt);
3423                 new_cmd = NULL;
3424         }
3425 
3426 
3427 #ifndef __sparc
3428         /* grab cmd->cmd_cookiec here as oldcookiec */
3429 
3430         oldcookiec = cmd->cmd_cookiec;
3431 #endif  /* __sparc */
3432 
3433         /*
3434          * If the dma was broken up into PARTIAL transfers cmd_nwin will be
3435          * greater than 0 and we'll need to grab the next dma window
3436          */
3437         /*
3438          * SLM-not doing extra command frame right now; may add later
3439          */
3440 
3441         if (cmd->cmd_nwin > 0) {
3442 
3443                 /*
3444                  * Make sure we havn't gone past the the total number
3445                  * of windows
3446                  */
3447                 if (++cmd->cmd_winindex >= cmd->cmd_nwin) {
3448                         return (NULL);
3449                 }
3450                 if (ddi_dma_getwin(cmd->cmd_dmahandle, cmd->cmd_winindex,
3451                     &cmd->cmd_dma_offset, &cmd->cmd_dma_len,


3561                                 bioerror(bp, ENOMEM);
3562                                 if (new_cmd) {
3563                                         mptsas_scsi_destroy_pkt(ap, pkt);
3564                                 }
3565                                 return ((struct scsi_pkt *)NULL);
3566                         }
3567                 }
3568 
3569                 /*
3570                  * Always use scatter-gather transfer
3571                  * Use the loop below to store physical addresses of
3572                  * DMA segments, from the DMA cookies, into your HBA's
3573                  * scatter-gather list.
3574                  * We need to ensure we have enough kmem alloc'd
3575                  * for the sg entries since we are no longer using an
3576                  * array inside mptsas_cmd_t.
3577                  *
3578                  * We check cmd->cmd_cookiec against oldcookiec so
3579                  * the scatter-gather list is correctly allocated
3580                  */
3581 #ifndef __sparc
3582                 if (oldcookiec != cmd->cmd_cookiec) {
3583                         if (cmd->cmd_sg != (mptti_t *)NULL) {
3584                                 kmem_free(cmd->cmd_sg, sizeof (mptti_t) *
3585                                     oldcookiec);
3586                                 cmd->cmd_sg = NULL;
3587                         }
3588                 }
3589 
3590                 if (cmd->cmd_sg == (mptti_t *)NULL) {
3591                         cmd->cmd_sg = kmem_alloc((size_t)(sizeof (mptti_t)*
3592                             cmd->cmd_cookiec), kf);
3593 
3594                         if (cmd->cmd_sg == (mptti_t *)NULL) {
3595                                 mptsas_log(mpt, CE_WARN,
3596                                     "unable to kmem_alloc enough memory "
3597                                     "for scatter/gather list");
3598                 /*
3599                  * if we have an ENOMEM condition we need to behave
3600                  * the same way as the rest of this routine
3601                  */
3602 
3603                                 bioerror(bp, ENOMEM);
3604                                 if (new_cmd) {
3605                                         mptsas_scsi_destroy_pkt(ap, pkt);
3606                                 }
3607                                 return ((struct scsi_pkt *)NULL);
3608                         }
3609                 }
3610 #endif  /* __sparc */
3611                 dmap = cmd->cmd_sg;
3612 
3613                 ASSERT(cmd->cmd_cookie.dmac_size != 0);
3614 
3615                 /*
3616                  * store the first segment into the S/G list
3617                  */
3618                 dmap->count = cmd->cmd_cookie.dmac_size;
3619                 dmap->addr.address64.Low = (uint32_t)
3620                     (cmd->cmd_cookie.dmac_laddress & 0xffffffffull);
3621                 dmap->addr.address64.High = (uint32_t)
3622                     (cmd->cmd_cookie.dmac_laddress >> 32);
3623 
3624                 /*
3625                  * dmacount counts the size of the dma for this window
3626                  * (if partial dma is being used).  totaldmacount
3627                  * keeps track of the total amount of dma we have
3628                  * transferred for all the windows (needed to calculate
3629                  * the resid value below).
3630                  */


3670 /*
3671  * tran_destroy_pkt(9E) - scsi_pkt(9s) deallocation
3672  *
3673  * Notes:
3674  *      - also frees DMA resources if allocated
3675  *      - implicit DMA synchonization
3676  */
3677 static void
3678 mptsas_scsi_destroy_pkt(struct scsi_address *ap, struct scsi_pkt *pkt)
3679 {
3680         mptsas_cmd_t    *cmd = PKT2CMD(pkt);
3681         mptsas_t        *mpt = ADDR2MPT(ap);
3682 
3683         NDBG3(("mptsas_scsi_destroy_pkt: target=%d pkt=0x%p",
3684             ap->a_target, (void *)pkt));
3685 
3686         if (cmd->cmd_flags & CFLAG_DMAVALID) {
3687                 (void) ddi_dma_unbind_handle(cmd->cmd_dmahandle);
3688                 cmd->cmd_flags &= ~CFLAG_DMAVALID;
3689         }
3690 #ifndef __sparc
3691         if (cmd->cmd_sg) {
3692                 kmem_free(cmd->cmd_sg, sizeof (mptti_t) * cmd->cmd_cookiec);
3693                 cmd->cmd_sg = NULL;
3694         }
3695 #endif  /* __sparc */
3696         mptsas_free_extra_sgl_frame(mpt, cmd);
3697 
3698         if ((cmd->cmd_flags &
3699             (CFLAG_FREE | CFLAG_CDBEXTERN | CFLAG_PRIVEXTERN |
3700             CFLAG_SCBEXTERN)) == 0) {
3701                 cmd->cmd_flags = CFLAG_FREE;
3702                 kmem_cache_free(mpt->m_kmem_cache, (void *)cmd);
3703         } else {
3704                 mptsas_pkt_destroy_extern(mpt, cmd);
3705         }
3706 }
3707 
3708 /*
3709  * kmem cache constructor and destructor:
3710  * When constructing, we bzero the cmd and allocate the dma handle
3711  * When destructing, just free the dma handle
3712  */
3713 static int
3714 mptsas_kmem_cache_constructor(void *buf, void *cdrarg, int kmflags)
3715 {


3753         if ((ddi_dma_alloc_handle(mpt->m_dip, &arq_dma_attr, callback,
3754             NULL, &cmd->cmd_arqhandle)) != DDI_SUCCESS) {
3755                 ddi_dma_free_handle(&cmd->cmd_dmahandle);
3756                 scsi_free_consistent_buf(cmd->cmd_arq_buf);
3757                 cmd->cmd_dmahandle = NULL;
3758                 cmd->cmd_arqhandle = NULL;
3759                 return (-1);
3760         }
3761 
3762         if (ddi_dma_buf_bind_handle(cmd->cmd_arqhandle,
3763             cmd->cmd_arq_buf, (DDI_DMA_READ | DDI_DMA_CONSISTENT),
3764             callback, NULL, &cmd->cmd_arqcookie, &cookiec) != DDI_SUCCESS) {
3765                 ddi_dma_free_handle(&cmd->cmd_dmahandle);
3766                 ddi_dma_free_handle(&cmd->cmd_arqhandle);
3767                 scsi_free_consistent_buf(cmd->cmd_arq_buf);
3768                 cmd->cmd_dmahandle = NULL;
3769                 cmd->cmd_arqhandle = NULL;
3770                 cmd->cmd_arq_buf = NULL;
3771                 return (-1);
3772         }
3773         /*
3774          * In sparc, the sgl length in most of the cases would be 1, so we
3775          * pre-allocate it in cache. On x86, the max number would be 256,
3776          * pre-allocate a maximum would waste a lot of memory especially
3777          * when many cmds are put onto waitq.
3778          */
3779 #ifdef  __sparc
3780         cmd->cmd_sg = kmem_alloc((size_t)(sizeof (mptti_t)*
3781             MPTSAS_MAX_CMD_SEGS), KM_SLEEP);
3782 #endif  /* __sparc */
3783 
3784         return (0);
3785 }
3786 
3787 static void
3788 mptsas_kmem_cache_destructor(void *buf, void *cdrarg)
3789 {
3790 #ifndef __lock_lint
3791         _NOTE(ARGUNUSED(cdrarg))
3792 #endif
3793         mptsas_cmd_t    *cmd = buf;
3794 
3795         NDBG4(("mptsas_kmem_cache_destructor"));
3796 
3797         if (cmd->cmd_arqhandle) {
3798                 (void) ddi_dma_unbind_handle(cmd->cmd_arqhandle);
3799                 ddi_dma_free_handle(&cmd->cmd_arqhandle);
3800                 cmd->cmd_arqhandle = NULL;
3801         }
3802         if (cmd->cmd_arq_buf) {
3803                 scsi_free_consistent_buf(cmd->cmd_arq_buf);
3804                 cmd->cmd_arq_buf = NULL;
3805         }
3806         if (cmd->cmd_dmahandle) {
3807                 ddi_dma_free_handle(&cmd->cmd_dmahandle);
3808                 cmd->cmd_dmahandle = NULL;
3809         }
3810 #ifdef  __sparc
3811         if (cmd->cmd_sg) {
3812                 kmem_free(cmd->cmd_sg, sizeof (mptti_t)* MPTSAS_MAX_CMD_SEGS);
3813                 cmd->cmd_sg = NULL;
3814         }
3815 #endif  /* __sparc */
3816 }
3817 
3818 static int
3819 mptsas_cache_frames_constructor(void *buf, void *cdrarg, int kmflags)
3820 {
3821         mptsas_cache_frames_t   *p = buf;
3822         mptsas_t                *mpt = cdrarg;
3823         ddi_dma_attr_t          frame_dma_attr;
3824         size_t                  mem_size, alloc_len;
3825         ddi_dma_cookie_t        cookie;
3826         uint_t                  ncookie;
3827         int (*callback)(caddr_t) = (kmflags == KM_SLEEP)
3828             ? DDI_DMA_SLEEP: DDI_DMA_DONTWAIT;
3829 
3830         frame_dma_attr = mpt->m_msg_dma_attr;
3831         frame_dma_attr.dma_attr_align = 0x10;
3832         frame_dma_attr.dma_attr_sgllen = 1;
3833 
3834         if (ddi_dma_alloc_handle(mpt->m_dip, &frame_dma_attr, callback, NULL,
3835             &p->m_dma_hdl) != DDI_SUCCESS) {


4457 
4458                 /*
4459                  * Sync DMA with the chain buffers that were just created
4460                  */
4461                 (void) ddi_dma_sync(p->m_dma_hdl, 0, 0, DDI_DMA_SYNC_FORDEV);
4462         }
4463 }
4464 
4465 /*
4466  * Interrupt handling
4467  * Utility routine.  Poll for status of a command sent to HBA
4468  * without interrupts (a FLAG_NOINTR command).
4469  */
4470 int
4471 mptsas_poll(mptsas_t *mpt, mptsas_cmd_t *poll_cmd, int polltime)
4472 {
4473         int     rval = TRUE;
4474 
4475         NDBG5(("mptsas_poll: cmd=0x%p", (void *)poll_cmd));
4476 
4477         /*
4478          * In order to avoid using m_mutex in ISR(a new separate mutex
4479          * m_intr_mutex is introduced) and keep the same lock logic,
4480          * the m_intr_mutex should be used to protect the getting and
4481          * setting of the ReplyDescriptorIndex.
4482          *
4483          * Since the m_intr_mutex would be released during processing the poll
4484          * cmd, so we should set the poll flag earlier here to make sure the
4485          * polled cmd be handled in this thread/context. A side effect is other
4486          * cmds during the period between the flag set and reset are also
4487          * handled in this thread and not the ISR. Since the poll cmd is not
4488          * so common, so the performance degradation in this case is not a big
4489          * issue.
4490          */
4491         mutex_enter(&mpt->m_intr_mutex);
4492         mpt->m_polled_intr = 1;
4493         mutex_exit(&mpt->m_intr_mutex);
4494 
4495         if ((poll_cmd->cmd_flags & CFLAG_TM_CMD) == 0) {
4496                 mptsas_restart_hba(mpt);
4497         }
4498 
4499         /*
4500          * Wait, using drv_usecwait(), long enough for the command to
4501          * reasonably return from the target if the target isn't
4502          * "dead".  A polled command may well be sent from scsi_poll, and
4503          * there are retries built in to scsi_poll if the transport
4504          * accepted the packet (TRAN_ACCEPT).  scsi_poll waits 1 second
4505          * and retries the transport up to scsi_poll_busycnt times
4506          * (currently 60) if
4507          * 1. pkt_reason is CMD_INCOMPLETE and pkt_state is 0, or
4508          * 2. pkt_reason is CMD_CMPLT and *pkt_scbp has STATUS_BUSY
4509          *
4510          * limit the waiting to avoid a hang in the event that the
4511          * cmd never gets started but we are still receiving interrupts
4512          */
4513         while (!(poll_cmd->cmd_flags & CFLAG_FINISHED)) {
4514                 if (mptsas_wait_intr(mpt, polltime) == FALSE) {
4515                         NDBG5(("mptsas_poll: command incomplete"));
4516                         rval = FALSE;
4517                         break;
4518                 }
4519         }
4520 
4521         mutex_enter(&mpt->m_intr_mutex);
4522         mpt->m_polled_intr = 0;
4523         mutex_exit(&mpt->m_intr_mutex);
4524 
4525         if (rval == FALSE) {
4526 
4527                 /*
4528                  * this isn't supposed to happen, the hba must be wedged
4529                  * Mark this cmd as a timeout.
4530                  */
4531                 mptsas_set_pkt_reason(mpt, poll_cmd, CMD_TIMEOUT,
4532                     (STAT_TIMEOUT|STAT_ABORTED));
4533 
4534                 if (poll_cmd->cmd_queued == FALSE) {
4535 
4536                         NDBG5(("mptsas_poll: not on waitq"));
4537 
4538                         poll_cmd->cmd_pkt->pkt_state |=
4539                             (STATE_GOT_BUS|STATE_GOT_TARGET|STATE_SENT_CMD);
4540                 } else {
4541 
4542                         /* find and remove it from the waitq */
4543                         NDBG5(("mptsas_poll: delete from waitq"));
4544                         mptsas_waitq_delete(mpt, poll_cmd);
4545                 }
4546 
4547         }
4548         mptsas_fma_check(mpt, poll_cmd);
4549         NDBG5(("mptsas_poll: done"));
4550         return (rval);
4551 }
4552 
4553 /*
4554  * Used for polling cmds and TM function
4555  */
4556 static int
4557 mptsas_wait_intr(mptsas_t *mpt, int polltime)
4558 {
4559         int                             cnt;
4560         pMpi2ReplyDescriptorsUnion_t    reply_desc_union;
4561         Mpi2ReplyDescriptorsUnion_t     reply_desc_union_v;
4562         uint32_t                        int_mask;
4563         uint8_t reply_type;
4564 
4565         NDBG5(("mptsas_wait_intr"));
4566 

4567 
4568         /*
4569          * Get the current interrupt mask and disable interrupts.  When
4570          * re-enabling ints, set mask to saved value.
4571          */
4572         int_mask = ddi_get32(mpt->m_datap, &mpt->m_reg->HostInterruptMask);
4573         MPTSAS_DISABLE_INTR(mpt);
4574 
4575         /*
4576          * Keep polling for at least (polltime * 1000) seconds
4577          */
4578         for (cnt = 0; cnt < polltime; cnt++) {
4579                 mutex_enter(&mpt->m_intr_mutex);
4580                 (void) ddi_dma_sync(mpt->m_dma_post_queue_hdl, 0, 0,
4581                     DDI_DMA_SYNC_FORCPU);
4582 
4583                 reply_desc_union = (pMpi2ReplyDescriptorsUnion_t)
4584                     MPTSAS_GET_NEXT_REPLY(mpt, mpt->m_post_index);
4585 
4586                 if (ddi_get32(mpt->m_acc_post_queue_hdl,
4587                     &reply_desc_union->Words.Low) == 0xFFFFFFFF ||
4588                     ddi_get32(mpt->m_acc_post_queue_hdl,
4589                     &reply_desc_union->Words.High) == 0xFFFFFFFF) {
4590                         mutex_exit(&mpt->m_intr_mutex);
4591                         drv_usecwait(1000);
4592                         continue;
4593                 }
4594 
4595                 reply_type = ddi_get8(mpt->m_acc_post_queue_hdl,
4596                     &reply_desc_union->Default.ReplyFlags);
4597                 reply_type &= MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
4598                 reply_desc_union_v.Default.ReplyFlags = reply_type;
4599                 if (reply_type == MPI2_RPY_DESCRIPT_FLAGS_SCSI_IO_SUCCESS) {
4600                         reply_desc_union_v.SCSIIOSuccess.SMID =
4601                             ddi_get16(mpt->m_acc_post_queue_hdl,
4602                             &reply_desc_union->SCSIIOSuccess.SMID);
4603                 } else if (reply_type ==
4604                     MPI2_RPY_DESCRIPT_FLAGS_ADDRESS_REPLY) {
4605                         reply_desc_union_v.AddressReply.ReplyFrameAddress =
4606                             ddi_get32(mpt->m_acc_post_queue_hdl,
4607                             &reply_desc_union->AddressReply.ReplyFrameAddress);
4608                         reply_desc_union_v.AddressReply.SMID =
4609                             ddi_get16(mpt->m_acc_post_queue_hdl,
4610                             &reply_desc_union->AddressReply.SMID);
4611                 }
4612                 /*
4613                  * Clear the reply descriptor for re-use and increment
4614                  * index.
4615                  */
4616                 ddi_put64(mpt->m_acc_post_queue_hdl,
4617                     &((uint64_t *)(void *)mpt->m_post_queue)[mpt->m_post_index],
4618                     0xFFFFFFFFFFFFFFFF);
4619                 (void) ddi_dma_sync(mpt->m_dma_post_queue_hdl, 0, 0,
4620                     DDI_DMA_SYNC_FORDEV);
4621 
4622                 if (++mpt->m_post_index == mpt->m_post_queue_depth) {
4623                         mpt->m_post_index = 0;
4624                 }
4625 
4626                 /*
4627                  * Update the global reply index
4628                  */
4629                 ddi_put32(mpt->m_datap,
4630                     &mpt->m_reg->ReplyPostHostIndex, mpt->m_post_index);
4631                 mutex_exit(&mpt->m_intr_mutex);
4632 
4633                 /*
4634                  * The reply is valid, process it according to its
4635                  * type.
4636                  */
4637                 mptsas_process_intr(mpt, &reply_desc_union_v);
4638 
4639 
4640                 /*
4641                  * Re-enable interrupts and quit.
4642                  */
4643                 ddi_put32(mpt->m_datap, &mpt->m_reg->HostInterruptMask,
4644                     int_mask);
4645                 return (TRUE);
4646 
4647         }
4648 
4649         /*
4650          * Clear polling flag, re-enable interrupts and quit.
4651          */

4652         ddi_put32(mpt->m_datap, &mpt->m_reg->HostInterruptMask, int_mask);
4653         return (FALSE);
4654 }
4655 
4656 /*
4657  * For fastpath, the m_intr_mutex should be held from the begining to the end,
4658  * so we only treat those cmds that need not release m_intr_mutex(even just for
4659  * a moment) as candidate for fast processing. otherwise, we don't handle them
4660  * and just return, then in ISR, those cmds would be handled later with m_mutex
4661  * held and m_intr_mutex not held.
4662  */
4663 static int
4664 mptsas_handle_io_fastpath(mptsas_t *mpt,
4665     uint16_t SMID)
4666 {
4667         mptsas_slots_t                          *slots = mpt->m_active;
4668         mptsas_cmd_t                            *cmd = NULL;
4669         struct scsi_pkt                         *pkt;
4670 
4671         /*
4672          * This is a success reply so just complete the IO.  First, do a sanity
4673          * check on the SMID.  The final slot is used for TM requests, which
4674          * would not come into this reply handler.
4675          */
4676         if ((SMID == 0) || (SMID > slots->m_n_slots)) {
4677                 mptsas_log(mpt, CE_WARN, "?Received invalid SMID of %d\n",
4678                     SMID);
4679                 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
4680                 return (TRUE);
4681         }
4682 
4683         cmd = slots->m_slot[SMID];
4684 
4685         /*
4686          * print warning and return if the slot is empty
4687          */
4688         if (cmd == NULL) {
4689                 mptsas_log(mpt, CE_WARN, "?NULL command for successful SCSI IO "
4690                     "in slot %d", SMID);
4691                 return (TRUE);
4692         }
4693 
4694         pkt = CMD2PKT(cmd);
4695         pkt->pkt_state |= (STATE_GOT_BUS | STATE_GOT_TARGET | STATE_SENT_CMD |
4696             STATE_GOT_STATUS);
4697         if (cmd->cmd_flags & CFLAG_DMAVALID) {
4698                 pkt->pkt_state |= STATE_XFERRED_DATA;
4699         }
4700         pkt->pkt_resid = 0;
4701 
4702         /*
4703          * If the cmd is a IOC, or a passthrough, then we don't process it in
4704          * fastpath, and later it would be handled by mptsas_process_intr()
4705          * with m_mutex protected.
4706          */
4707         if (cmd->cmd_flags & (CFLAG_PASSTHRU | CFLAG_CMDIOC)) {
4708                 return (FALSE);
4709         } else {
4710                 mptsas_remove_cmd0(mpt, cmd);
4711         }
4712 
4713         if (cmd->cmd_flags & CFLAG_RETRY) {
4714                 /*
4715                  * The target returned QFULL or busy, do not add tihs
4716                  * pkt to the doneq since the hba will retry
4717                  * this cmd.
4718                  *
4719                  * The pkt has already been resubmitted in
4720                  * mptsas_handle_qfull() or in mptsas_check_scsi_io_error().
4721                  * Remove this cmd_flag here.
4722                  */
4723                 cmd->cmd_flags &= ~CFLAG_RETRY;
4724         } else {
4725                 mptsas_doneq_add0(mpt, cmd);
4726         }
4727 
4728         /*
4729          * In fastpath, the cmd should only be a context reply, so just check
4730          * the post queue of the reply descriptor and the dmahandle of the cmd
4731          * is enough. No sense data in this case and no need to check the dma
4732          * handle where sense data dma info is saved, the dma handle of the
4733          * reply frame, and the dma handle of the reply free queue.
4734          * For the dma handle of the request queue. Check fma here since we
4735          * are sure the request must have already been sent/DMAed correctly.
4736          * otherwise checking in mptsas_scsi_start() is not correct since
4737          * at that time the dma may not start.
4738          */
4739         if ((mptsas_check_dma_handle(mpt->m_dma_req_frame_hdl) !=
4740             DDI_SUCCESS) ||
4741             (mptsas_check_dma_handle(mpt->m_dma_post_queue_hdl) !=
4742             DDI_SUCCESS)) {
4743                 ddi_fm_service_impact(mpt->m_dip,
4744                     DDI_SERVICE_UNAFFECTED);
4745                 pkt->pkt_reason = CMD_TRAN_ERR;
4746                 pkt->pkt_statistics = 0;
4747         }
4748         if (cmd->cmd_dmahandle &&
4749             (mptsas_check_dma_handle(cmd->cmd_dmahandle) != DDI_SUCCESS)) {
4750                 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
4751                 pkt->pkt_reason = CMD_TRAN_ERR;
4752                 pkt->pkt_statistics = 0;
4753         }
4754         if ((cmd->cmd_extra_frames &&
4755             ((mptsas_check_dma_handle(cmd->cmd_extra_frames->m_dma_hdl) !=
4756             DDI_SUCCESS) ||
4757             (mptsas_check_acc_handle(cmd->cmd_extra_frames->m_acc_hdl) !=
4758             DDI_SUCCESS)))) {
4759                 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
4760                 pkt->pkt_reason = CMD_TRAN_ERR;
4761                 pkt->pkt_statistics = 0;
4762         }
4763 
4764         return (TRUE);
4765 }
4766 
4767 static void
4768 mptsas_handle_scsi_io_success(mptsas_t *mpt,
4769     pMpi2ReplyDescriptorsUnion_t reply_desc)
4770 {
4771         pMpi2SCSIIOSuccessReplyDescriptor_t     scsi_io_success;
4772         uint16_t                                SMID;
4773         mptsas_slots_t                          *slots = mpt->m_active;
4774         mptsas_cmd_t                            *cmd = NULL;
4775         struct scsi_pkt                         *pkt;
4776 


4777         scsi_io_success = (pMpi2SCSIIOSuccessReplyDescriptor_t)reply_desc;
4778         SMID = scsi_io_success->SMID;
4779 
4780         /*
4781          * This is a success reply so just complete the IO.  First, do a sanity
4782          * check on the SMID.  The final slot is used for TM requests, which
4783          * would not come into this reply handler.
4784          */
4785         if ((SMID == 0) || (SMID > slots->m_n_slots)) {
4786                 mptsas_log(mpt, CE_WARN, "?Received invalid SMID of %d\n",
4787                     SMID);
4788                 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
4789                 return;
4790         }
4791 
4792         cmd = slots->m_slot[SMID];
4793 
4794         /*
4795          * print warning and return if the slot is empty
4796          */
4797         if (cmd == NULL) {
4798                 mptsas_log(mpt, CE_WARN, "?NULL command for successful SCSI IO "


4833 }
4834 
4835 static void
4836 mptsas_handle_address_reply(mptsas_t *mpt,
4837     pMpi2ReplyDescriptorsUnion_t reply_desc)
4838 {
4839         pMpi2AddressReplyDescriptor_t   address_reply;
4840         pMPI2DefaultReply_t             reply;
4841         mptsas_fw_diagnostic_buffer_t   *pBuffer;
4842         uint32_t                        reply_addr;
4843         uint16_t                        SMID, iocstatus;
4844         mptsas_slots_t                  *slots = mpt->m_active;
4845         mptsas_cmd_t                    *cmd = NULL;
4846         uint8_t                         function, buffer_type;
4847         m_replyh_arg_t                  *args;
4848         int                             reply_frame_no;
4849 
4850         ASSERT(mutex_owned(&mpt->m_mutex));
4851 
4852         address_reply = (pMpi2AddressReplyDescriptor_t)reply_desc;



4853 
4854         reply_addr = address_reply->ReplyFrameAddress;
4855         SMID = address_reply->SMID;
4856         /*
4857          * If reply frame is not in the proper range we should ignore this
4858          * message and exit the interrupt handler.
4859          */
4860         if ((reply_addr < mpt->m_reply_frame_dma_addr) ||
4861             (reply_addr >= (mpt->m_reply_frame_dma_addr +
4862             (mpt->m_reply_frame_size * mpt->m_max_replies))) ||
4863             ((reply_addr - mpt->m_reply_frame_dma_addr) %
4864             mpt->m_reply_frame_size != 0)) {
4865                 mptsas_log(mpt, CE_WARN, "?Received invalid reply frame "
4866                     "address 0x%x\n", reply_addr);
4867                 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
4868                 return;
4869         }
4870 
4871         (void) ddi_dma_sync(mpt->m_dma_reply_frame_hdl, 0, 0,
4872             DDI_DMA_SYNC_FORCPU);
4873         reply = (pMPI2DefaultReply_t)(mpt->m_reply_frame + (reply_addr -
4874             mpt->m_reply_frame_dma_addr));
4875         function = ddi_get8(mpt->m_acc_reply_frame_hdl, &reply->Function);


4941                 args->rfm = reply_addr;
4942 
4943                 /*
4944                  * Record the event if its type is enabled in
4945                  * this mpt instance by ioctl.
4946                  */
4947                 mptsas_record_event(args);
4948 
4949                 /*
4950                  * Handle time critical events
4951                  * NOT_RESPONDING/ADDED only now
4952                  */
4953                 if (mptsas_handle_event_sync(args) == DDI_SUCCESS) {
4954                         /*
4955                          * Would not return main process,
4956                          * just let taskq resolve ack action
4957                          * and ack would be sent in taskq thread
4958                          */
4959                         NDBG20(("send mptsas_handle_event_sync success"));
4960                 }






4961                 if ((ddi_taskq_dispatch(mpt->m_event_taskq, mptsas_handle_event,
4962                     (void *)args, DDI_NOSLEEP)) != DDI_SUCCESS) {
4963                         mptsas_log(mpt, CE_WARN, "No memory available"
4964                         "for dispatch taskq");
4965                         /*
4966                          * Return the reply frame to the free queue.
4967                          */
4968                         ddi_put32(mpt->m_acc_free_queue_hdl,
4969                             &((uint32_t *)(void *)
4970                             mpt->m_free_queue)[mpt->m_free_index], reply_addr);
4971                         (void) ddi_dma_sync(mpt->m_dma_free_queue_hdl, 0, 0,
4972                             DDI_DMA_SYNC_FORDEV);
4973                         if (++mpt->m_free_index == mpt->m_free_queue_depth) {
4974                                 mpt->m_free_index = 0;
4975                         }
4976 
4977                         ddi_put32(mpt->m_datap,
4978                             &mpt->m_reg->ReplyFreeHostIndex, mpt->m_free_index);
4979                 }
4980                 return;


5095             scsi_status, ioc_status, scsi_state));
5096 
5097         pkt = CMD2PKT(cmd);
5098         *(pkt->pkt_scbp) = scsi_status;
5099 
5100         if (loginfo == 0x31170000) {
5101                 /*
5102                  * if loginfo PL_LOGINFO_CODE_IO_DEVICE_MISSING_DELAY_RETRY
5103                  * 0x31170000 comes, that means the device missing delay
5104                  * is in progressing, the command need retry later.
5105                  */
5106                 *(pkt->pkt_scbp) = STATUS_BUSY;
5107                 return;
5108         }
5109 
5110         if ((scsi_state & MPI2_SCSI_STATE_NO_SCSI_STATUS) &&
5111             ((ioc_status & MPI2_IOCSTATUS_MASK) ==
5112             MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE)) {
5113                 pkt->pkt_reason = CMD_INCOMPLETE;
5114                 pkt->pkt_state |= STATE_GOT_BUS;
5115                 mutex_enter(&ptgt->m_tgt_intr_mutex);
5116                 if (ptgt->m_reset_delay == 0) {
5117                         mptsas_set_throttle(mpt, ptgt,
5118                             DRAIN_THROTTLE);
5119                 }
5120                 mutex_exit(&ptgt->m_tgt_intr_mutex);
5121                 return;
5122         }
5123 
5124         if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) {
5125                 responsedata &= 0x000000FF;
5126                 if (responsedata & MPTSAS_SCSI_RESPONSE_CODE_TLR_OFF) {
5127                         mptsas_log(mpt, CE_NOTE, "Do not support the TLR\n");
5128                         pkt->pkt_reason = CMD_TLR_OFF;
5129                         return;
5130                 }
5131         }
5132 
5133 
5134         switch (scsi_status) {
5135         case MPI2_SCSI_STATUS_CHECK_CONDITION:
5136                 pkt->pkt_resid = (cmd->cmd_dmacount - xferred);
5137                 arqstat = (void*)(pkt->pkt_scbp);
5138                 arqstat->sts_rqpkt_status = *((struct scsi_status *)
5139                     (pkt->pkt_scbp));
5140                 pkt->pkt_state |= (STATE_GOT_BUS | STATE_GOT_TARGET |


5200                         topo_node->un.phymask = ptgt->m_phymask;
5201                         topo_node->devhdl = ptgt->m_devhdl;
5202                         topo_node->object = (void *)ptgt;
5203                         topo_node->flags = MPTSAS_TOPO_FLAG_LUN_ASSOCIATED;
5204 
5205                         if ((ddi_taskq_dispatch(mpt->m_dr_taskq,
5206                             mptsas_handle_dr,
5207                             (void *)topo_node,
5208                             DDI_NOSLEEP)) != DDI_SUCCESS) {
5209                                 mptsas_log(mpt, CE_NOTE, "mptsas start taskq"
5210                                     "for handle SAS dynamic reconfigure"
5211                                     "failed. \n");
5212                         }
5213                 }
5214                 break;
5215         case MPI2_SCSI_STATUS_GOOD:
5216                 switch (ioc_status & MPI2_IOCSTATUS_MASK) {
5217                 case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
5218                         pkt->pkt_reason = CMD_DEV_GONE;
5219                         pkt->pkt_state |= STATE_GOT_BUS;
5220                         mutex_enter(&ptgt->m_tgt_intr_mutex);
5221                         if (ptgt->m_reset_delay == 0) {
5222                                 mptsas_set_throttle(mpt, ptgt, DRAIN_THROTTLE);
5223                         }
5224                         mutex_exit(&ptgt->m_tgt_intr_mutex);
5225                         NDBG31(("lost disk for target%d, command:%x",
5226                             Tgt(cmd), pkt->pkt_cdbp[0]));
5227                         break;
5228                 case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
5229                         NDBG31(("data overrun: xferred=%d", xferred));
5230                         NDBG31(("dmacount=%d", cmd->cmd_dmacount));
5231                         pkt->pkt_reason = CMD_DATA_OVR;
5232                         pkt->pkt_state |= (STATE_GOT_BUS | STATE_GOT_TARGET
5233                             | STATE_SENT_CMD | STATE_GOT_STATUS
5234                             | STATE_XFERRED_DATA);
5235                         pkt->pkt_resid = 0;
5236                         break;
5237                 case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
5238                 case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
5239                         NDBG31(("data underrun: xferred=%d", xferred));
5240                         NDBG31(("dmacount=%d", cmd->cmd_dmacount));
5241                         pkt->pkt_state |= (STATE_GOT_BUS | STATE_GOT_TARGET
5242                             | STATE_SENT_CMD | STATE_GOT_STATUS);
5243                         pkt->pkt_resid = (cmd->cmd_dmacount - xferred);
5244                         if (pkt->pkt_resid != cmd->cmd_dmacount) {


5251                         break;
5252                 case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
5253                 case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
5254                         mptsas_set_pkt_reason(mpt,
5255                             cmd, CMD_RESET, STAT_DEV_RESET);
5256                         break;
5257                 case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
5258                 case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
5259                         pkt->pkt_state |= (STATE_GOT_BUS | STATE_GOT_TARGET);
5260                         mptsas_set_pkt_reason(mpt,
5261                             cmd, CMD_TERMINATED, STAT_TERMINATED);
5262                         break;
5263                 case MPI2_IOCSTATUS_INSUFFICIENT_RESOURCES:
5264                 case MPI2_IOCSTATUS_BUSY:
5265                         /*
5266                          * set throttles to drain
5267                          */
5268                         ptgt = (mptsas_target_t *)mptsas_hash_traverse(
5269                             &mpt->m_active->m_tgttbl, MPTSAS_HASH_FIRST);
5270                         while (ptgt != NULL) {
5271                                 mutex_enter(&ptgt->m_tgt_intr_mutex);
5272                                 mptsas_set_throttle(mpt, ptgt, DRAIN_THROTTLE);
5273                                 mutex_exit(&ptgt->m_tgt_intr_mutex);
5274 
5275                                 ptgt = (mptsas_target_t *)mptsas_hash_traverse(
5276                                     &mpt->m_active->m_tgttbl, MPTSAS_HASH_NEXT);
5277                         }
5278 
5279                         /*
5280                          * retry command
5281                          */
5282                         cmd->cmd_flags |= CFLAG_RETRY;
5283                         cmd->cmd_pkt_flags |= FLAG_HEAD;
5284 
5285                         mutex_exit(&mpt->m_mutex);
5286                         (void) mptsas_accept_pkt(mpt, cmd);
5287                         mutex_enter(&mpt->m_mutex);
5288                         break;
5289                 default:
5290                         mptsas_log(mpt, CE_WARN,
5291                             "unknown ioc_status = %x\n", ioc_status);
5292                         mptsas_log(mpt, CE_CONT, "scsi_state = %x, transfer "
5293                             "count = %x, scsi_status = %x", scsi_state,
5294                             xferred, scsi_status);
5295                         break;
5296                 }
5297                 break;
5298         case MPI2_SCSI_STATUS_TASK_SET_FULL:
5299                 mptsas_handle_qfull(mpt, cmd);
5300                 break;
5301         case MPI2_SCSI_STATUS_BUSY:
5302                 NDBG31(("scsi_status busy received"));
5303                 break;
5304         case MPI2_SCSI_STATUS_RESERVATION_CONFLICT:
5305                 NDBG31(("scsi_status reservation conflict received"));
5306                 break;
5307         default:


5382                         cv_wait(&item->cv, &item->mutex);
5383                 }
5384                 pkt = NULL;
5385                 if ((cmd = mptsas_doneq_thread_rm(mpt, t)) != NULL) {
5386                         cmd->cmd_flags |= CFLAG_COMPLETED;
5387                         pkt = CMD2PKT(cmd);
5388                 }
5389                 mutex_exit(&item->mutex);
5390                 if (pkt) {
5391                         mptsas_pkt_comp(pkt, cmd);
5392                 }
5393                 mutex_enter(&item->mutex);
5394         }
5395         mutex_exit(&item->mutex);
5396         mutex_enter(&mpt->m_doneq_mutex);
5397         mpt->m_doneq_thread_n--;
5398         cv_broadcast(&mpt->m_doneq_thread_cv);
5399         mutex_exit(&mpt->m_doneq_mutex);
5400 }
5401 

5402 /*
5403  * mpt interrupt handler.
5404  */
5405 static uint_t
5406 mptsas_intr(caddr_t arg1, caddr_t arg2)
5407 {
5408         mptsas_t                        *mpt = (void *)arg1;
5409         pMpi2ReplyDescriptorsUnion_t    reply_desc_union;
5410         uchar_t                         did_reply = FALSE;
5411         int                             i = 0, j;
5412         uint8_t                         reply_type;
5413         uint16_t                        SMID;
5414 
5415         NDBG1(("mptsas_intr: arg1 0x%p arg2 0x%p", (void *)arg1, (void *)arg2));
5416 
5417         /*
5418          * 1.
5419          * To avoid using m_mutex in the ISR(ISR referes not only mptsas_intr,
5420          * but all of the recursive called functions in it. the same below),
5421          * separate mutexs are introduced to protect the elements shown in ISR.
5422          * 3 type of mutex are involved here:
5423          *      a)per instance mutex m_intr_mutex.
5424          *      b)per target mutex m_tgt_intr_mutex.
5425          *      c)mutex that protect the free slot.
5426          *
5427          * a)per instance mutex m_intr_mutex:
5428          * used to protect m_options, m_power, m_waitq, etc that would be
5429          * checked/modified in ISR; protect the getting and setting the reply
5430          * descriptor index; protect the m_slots[];
5431          *
5432          * b)per target mutex m_tgt_intr_mutex:
5433          * used to protect per target element which has relationship to ISR.
5434          * contention for the new per target mutex is just as high as it in
5435          * sd(7d) driver.
5436          *
5437          * c)mutexs that protect the free slots:
5438          * those mutexs are introduced to minimize the mutex contentions
5439          * between the IO request threads where free slots are allocated
5440          * for sending cmds and ISR where slots holding outstanding cmds
5441          * are returned to the free pool.
5442          * the idea is like this:
5443          * 1) Partition all of the free slot into NCPU groups. For example,
5444          * In system where we have 15 slots, and 4 CPU, then slot s1,s5,s9,s13
5445          * are marked belonging to CPU1, s2,s6,s10,s14 to CPU2, s3,s7,s11,s15
5446          * to CPU3, and s4,s8,s12 to CPU4.
5447          * 2) In each of the group, an alloc/release queue pair is created,
5448          * and both the allocq and the releaseq have a dedicated mutex.
5449          * 3) When init, all of the slots in a CPU group are inserted into the
5450          * allocq of its CPU's pair.
5451          * 4) When doing IO,
5452          * mptsas_scsi_start()
5453          * {
5454          *      cpuid = the cpu NO of the cpu where this thread is running on
5455          * retry:
5456          *      mutex_enter(&allocq[cpuid]);
5457          *      if (get free slot = success) {
5458          *              remove the slot from the allocq
5459          *              mutex_exit(&allocq[cpuid]);
5460          *              return(success);
5461          *      } else { // exchange allocq and releaseq and try again
5462          *              mutex_enter(&releq[cpuid]);
5463          *              exchange the allocq and releaseq of this pair;
5464          *              mutex_exit(&releq[cpuid]);
5465          *              if (try to get free slot again = success) {
5466          *                      remove the slot from the allocq
5467          *                      mutex_exit(&allocq[cpuid]);
5468          *                      return(success);
5469          *              } else {
5470          *                      MOD(cpuid)++;
5471          *                      goto retry;
5472          *                      if (all CPU groups tried)
5473          *                              mutex_exit(&allocq[cpuid]);
5474          *                              return(failure);
5475          *              }
5476          *      }
5477          * }
5478          * ISR()
5479          * {
5480          *              cpuid = the CPU group id where the slot sending the
5481          *              cmd belongs;
5482          *              mutex_enter(&releq[cpuid]);
5483          *              remove the slot from the releaseq
5484          *              mutex_exit(&releq[cpuid]);
5485          * }
5486          * This way, only when the queue pair doing exchange have mutex
5487          * contentions.
5488          *
5489          * For mutex m_intr_mutex and m_tgt_intr_mutex, there are 2 scenarios:
5490          *
5491          * a)If the elements are only checked but not modified in the ISR, then
5492          * only the places where those elements are modifed(outside of ISR)
5493          * need to be protected by the new introduced mutex.
5494          * For example, data A is only read/checked in ISR, then we need do
5495          * like this:
5496          * In ISR:
5497          * {
5498          *      mutex_enter(&new_mutex);
5499          *      read(A);
5500          *      mutex_exit(&new_mutex);
5501          *      //the new_mutex here is either the m_tgt_intr_mutex or
5502          *      //the m_intr_mutex.
5503          * }
5504          * In non-ISR
5505          * {
5506          *      mutex_enter(&m_mutex); //the stock driver already did this
5507          *      mutex_enter(&new_mutex);
5508          *      write(A);
5509          *      mutex_exit(&new_mutex);
5510          *      mutex_exit(&m_mutex); //the stock driver already did this
5511          *
5512          *      read(A);
5513          *      // read(A) in non-ISR is not required to be protected by new
5514          *      // mutex since 'A' has already been protected by m_mutex
5515          *      // outside of the ISR
5516          * }
5517          *
5518          * Those fields in mptsas_target_t/ptgt which are only read in ISR
5519          * fall into this catergory. So they, together with the fields which
5520          * are never read in ISR, are not necessary to be protected by
5521          * m_tgt_intr_mutex, don't bother.
5522          * checking of m_waitq also falls into this catergory. so all of the
5523          * place outside of ISR where the m_waitq is modified, such as in
5524          * mptsas_waitq_add(), mptsas_waitq_delete(), mptsas_waitq_rm(),
5525          * m_intr_mutex should be used.
5526          *
5527          * b)If the elements are modified in the ISR, then each place where
5528          * those elements are referred(outside of ISR) need to be protected
5529          * by the new introduced mutex. Of course, if those elements only
5530          * appear in the non-key code path, that is, they don't affect
5531          * performance, then the m_mutex can still be used as before.
5532          * For example, data B is modified in key code path in ISR, and data C
5533          * is modified in non-key code path in ISR, then we can do like this:
5534          * In ISR:
5535          * {
5536          *      mutex_enter(&new_mutex);
5537          *      wirte(B);
5538          *      mutex_exit(&new_mutex);
5539          *      if (seldom happen) {
5540          *              mutex_enter(&m_mutex);
5541          *              write(C);
5542          *              mutex_exit(&m_mutex);
5543          *      }
5544          *      //the new_mutex here is either the m_tgt_intr_mutex or
5545          *      //the m_intr_mutex.
5546          * }
5547          * In non-ISR
5548          * {
5549          *      mutex_enter(&new_mutex);
5550          *      write(B);
5551          *      mutex_exit(&new_mutex);
5552          *
5553          *      mutex_enter(&new_mutex);
5554          *      read(B);
5555          *      mutex_exit(&new_mutex);
5556          *      // both write(B) and read(B) in non-ISR is required to be
5557          *      // protected by new mutex outside of the ISR
5558          *
5559          *      mutex_enter(&m_mutex); //the stock driver already did this
5560          *      read(C);
5561          *      write(C);
5562          *      mutex_exit(&m_mutex); //the stock driver already did this
5563          *      // both write(C) and read(C) in non-ISR have been already
5564          *      // been protected by m_mutex outside of the ISR
5565          * }
5566          *
5567          * For example, ptgt->m_t_ncmds fall into 'B' of this catergory, and
5568          * elements shown in address reply, restart_hba, passthrough, IOC
5569          * fall into 'C' of  this catergory.
5570          *
5571          * In any case where mutexs are nested, make sure in the following
5572          * order:
5573          *      m_mutex -> m_intr_mutex -> m_tgt_intr_mutex
5574          *      m_intr_mutex -> m_tgt_intr_mutex
5575          *      m_mutex -> m_intr_mutex
5576          *      m_mutex -> m_tgt_intr_mutex
5577          *
5578          * 2.
5579          * Make sure at any time, getting the ReplyDescriptor by m_post_index
5580          * and setting m_post_index to the ReplyDescriptorIndex register are
5581          * atomic. Since m_mutex is not used for this purpose in ISR, the new
5582          * mutex m_intr_mutex must play this role. So mptsas_poll(), where this
5583          * kind of getting/setting is also performed, must use m_intr_mutex.
5584          * Note, since context reply in ISR/process_intr is the only code path
5585          * which affect performance, a fast path is introduced to only handle
5586          * the read/write IO having context reply. For other IOs such as
5587          * passthrough and IOC with context reply and all address reply, we
5588          * use the as-is process_intr() to handle them. In order to keep the
5589          * same semantics in process_intr(), make sure any new mutex is not held
5590          * before enterring it.
5591          */
5592 
5593         mutex_enter(&mpt->m_intr_mutex);
5594 
5595         /*
5596          * If interrupts are shared by two channels then check whether this
5597          * interrupt is genuinely for this channel by making sure first the
5598          * chip is in high power state.
5599          */
5600         if ((mpt->m_options & MPTSAS_OPT_PM) &&
5601             (mpt->m_power_level != PM_LEVEL_D0)) {
5602                 mutex_exit(&mpt->m_intr_mutex);
5603                 return (DDI_INTR_UNCLAIMED);
5604         }
5605 
5606         /*
5607          * If polling, interrupt was triggered by some shared interrupt because
5608          * IOC interrupts are disabled during polling, so polling routine will
5609          * handle any replies.  Considering this, if polling is happening,
5610          * return with interrupt unclaimed.
5611          */
5612         if (mpt->m_polled_intr) {
5613                 mutex_exit(&mpt->m_intr_mutex);
5614                 mptsas_log(mpt, CE_WARN, "mpt_sas: Unclaimed interrupt");
5615                 return (DDI_INTR_UNCLAIMED);
5616         }
5617 
5618         /*
5619          * Read the istat register.
5620          */
5621         if ((INTPENDING(mpt)) != 0) {
5622                 /*
5623                  * read fifo until empty.
5624                  */
5625 #ifndef __lock_lint
5626                 _NOTE(CONSTCOND)
5627 #endif
5628                 while (TRUE) {
5629                         (void) ddi_dma_sync(mpt->m_dma_post_queue_hdl, 0, 0,
5630                             DDI_DMA_SYNC_FORCPU);
5631                         reply_desc_union = (pMpi2ReplyDescriptorsUnion_t)
5632                             MPTSAS_GET_NEXT_REPLY(mpt, mpt->m_post_index);
5633 
5634                         if (ddi_get32(mpt->m_acc_post_queue_hdl,
5635                             &reply_desc_union->Words.Low) == 0xFFFFFFFF ||
5636                             ddi_get32(mpt->m_acc_post_queue_hdl,
5637                             &reply_desc_union->Words.High) == 0xFFFFFFFF) {
5638                                 break;
5639                         }
5640 
5641                         /*
5642                          * The reply is valid, process it according to its
5643                          * type.  Also, set a flag for updating the reply index
5644                          * after they've all been processed.
5645                          */
5646                         did_reply = TRUE;
5647 
5648                         reply_type = ddi_get8(mpt->m_acc_post_queue_hdl,
5649                             &reply_desc_union->Default.ReplyFlags);
5650                         reply_type &= MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
5651                         mpt->m_reply[i].Default.ReplyFlags = reply_type;
5652                         if (reply_type ==
5653                             MPI2_RPY_DESCRIPT_FLAGS_SCSI_IO_SUCCESS) {
5654                                 SMID = ddi_get16(mpt->m_acc_post_queue_hdl,
5655                                     &reply_desc_union->SCSIIOSuccess.SMID);
5656                                 if (mptsas_handle_io_fastpath(mpt, SMID) !=
5657                                     TRUE) {
5658                                         mpt->m_reply[i].SCSIIOSuccess.SMID =
5659                                             SMID;
5660                                         i++;
5661                                 }
5662                         } else if (reply_type ==
5663                             MPI2_RPY_DESCRIPT_FLAGS_ADDRESS_REPLY) {
5664                                 mpt->m_reply[i].AddressReply.ReplyFrameAddress =
5665                                     ddi_get32(mpt->m_acc_post_queue_hdl,
5666                                     &reply_desc_union->AddressReply.
5667                                     ReplyFrameAddress);
5668                                 mpt->m_reply[i].AddressReply.SMID =
5669                                     ddi_get16(mpt->m_acc_post_queue_hdl,
5670                                     &reply_desc_union->AddressReply.SMID);
5671                                 i++;
5672                         }
5673                         /*
5674                          * Clear the reply descriptor for re-use and increment
5675                          * index.
5676                          */
5677                         ddi_put64(mpt->m_acc_post_queue_hdl,
5678                             &((uint64_t *)(void *)mpt->m_post_queue)
5679                             [mpt->m_post_index], 0xFFFFFFFFFFFFFFFF);
5680                         (void) ddi_dma_sync(mpt->m_dma_post_queue_hdl, 0, 0,
5681                             DDI_DMA_SYNC_FORDEV);
5682 
5683                         /*
5684                          * Increment post index and roll over if needed.
5685                          */
5686                         if (++mpt->m_post_index == mpt->m_post_queue_depth) {
5687                                 mpt->m_post_index = 0;
5688                         }
5689                         if (i >= MPI_ADDRESS_COALSCE_MAX)
5690                                 break;
5691                 }
5692 
5693                 /*
5694                  * Update the global reply index if at least one reply was
5695                  * processed.
5696                  */
5697                 if (did_reply) {
5698                         ddi_put32(mpt->m_datap,
5699                             &mpt->m_reg->ReplyPostHostIndex, mpt->m_post_index);
5700 
5701                         /*
5702                          * For fma, only check the PIO is required and enough
5703                          * here. Those cases where fastpath is not hit, the
5704                          * mptsas_fma_check() check all of the types of
5705                          * fma. That is not necessary and sometimes not
5706                          * correct. fma check should only be done after
5707                          * the PIO and/or dma is performed.
5708                          */
5709                         if ((mptsas_check_acc_handle(mpt->m_datap) !=
5710                             DDI_SUCCESS)) {
5711                                 ddi_fm_service_impact(mpt->m_dip,
5712                                     DDI_SERVICE_UNAFFECTED);
5713                         }
5714 
5715                 }
5716         } else {
5717                 mutex_exit(&mpt->m_intr_mutex);
5718                 return (DDI_INTR_UNCLAIMED);
5719         }
5720         NDBG1(("mptsas_intr complete"));
5721         mutex_exit(&mpt->m_intr_mutex);
5722 
5723         /*
5724          * Since most of the cmds(read and write IO with success return.)
5725          * have already been processed in fast path in which the m_mutex
5726          * is not held, handling here the address reply and other context reply
5727          * such as passthrough and IOC cmd with m_mutex held should be a big
5728          * issue for performance.
5729          * If holding m_mutex to process these cmds was still an obvious issue,
5730          * we can process them in a taskq.
5731          */
5732         for (j = 0; j < i; j++) {
5733                 mutex_enter(&mpt->m_mutex);
5734                 mptsas_process_intr(mpt, &mpt->m_reply[j]);
5735                 mutex_exit(&mpt->m_mutex);
5736         }
5737 
5738         /*
5739          * If no helper threads are created, process the doneq in ISR. If
5740          * helpers are created, use the doneq length as a metric to measure the
5741          * load on the interrupt CPU. If it is long enough, which indicates the
5742          * load is heavy, then we deliver the IO completions to the helpers.
5743          * This measurement has some limitations, although it is simple and
5744          * straightforward and works well for most of the cases at present.
5745          */
5746         if (!mpt->m_doneq_thread_n) {

5747                 mptsas_doneq_empty(mpt);
5748         } else {
5749                 int helper = 1;
5750                 mutex_enter(&mpt->m_intr_mutex);
5751                 if (mpt->m_doneq_len <= mpt->m_doneq_length_threshold)
5752                         helper = 0;
5753                 mutex_exit(&mpt->m_intr_mutex);
5754                 if (helper) {
5755                         mptsas_deliver_doneq_thread(mpt);
5756                 } else {
5757                         mptsas_doneq_empty(mpt);
5758                 }
5759         }
5760 
5761         /*
5762          * If there are queued cmd, start them now.
5763          */
5764         mutex_enter(&mpt->m_intr_mutex);
5765         if (mpt->m_waitq != NULL) {
5766                 mutex_exit(&mpt->m_intr_mutex);
5767                 mutex_enter(&mpt->m_mutex);
5768                 mptsas_restart_hba(mpt);
5769                 mutex_exit(&mpt->m_mutex);
5770                 return (DDI_INTR_CLAIMED);
5771         }
5772         mutex_exit(&mpt->m_intr_mutex);
5773         return (DDI_INTR_CLAIMED);
5774 }
5775 
5776 /*
5777  * In ISR, the successfully completed read and write IO are processed in a
5778  * fast path. This function is only used to handle non-fastpath IO, including
5779  * all of the address reply, and the context reply for IOC cmd, passthrough,
5780  * etc.
5781  * This function is also used to process polled cmd.
5782  */
5783 static void
5784 mptsas_process_intr(mptsas_t *mpt,
5785     pMpi2ReplyDescriptorsUnion_t reply_desc_union)
5786 {
5787         uint8_t reply_type;
5788 


5789         /*
5790          * The reply is valid, process it according to its
5791          * type.  Also, set a flag for updated the reply index
5792          * after they've all been processed.
5793          */
5794         reply_type = reply_desc_union->Default.ReplyFlags;


5795         if (reply_type == MPI2_RPY_DESCRIPT_FLAGS_SCSI_IO_SUCCESS) {
5796                 mptsas_handle_scsi_io_success(mpt, reply_desc_union);
5797         } else if (reply_type == MPI2_RPY_DESCRIPT_FLAGS_ADDRESS_REPLY) {
5798                 mptsas_handle_address_reply(mpt, reply_desc_union);
5799         } else {
5800                 mptsas_log(mpt, CE_WARN, "?Bad reply type %x", reply_type);
5801                 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
5802         }










5803 }
5804 
5805 /*
5806  * handle qfull condition
5807  */
5808 static void
5809 mptsas_handle_qfull(mptsas_t *mpt, mptsas_cmd_t *cmd)
5810 {
5811         mptsas_target_t *ptgt = cmd->cmd_tgt_addr;
5812 
5813         if ((++cmd->cmd_qfull_retries > ptgt->m_qfull_retries) ||
5814             (ptgt->m_qfull_retries == 0)) {
5815                 /*
5816                  * We have exhausted the retries on QFULL, or,
5817                  * the target driver has indicated that it
5818                  * wants to handle QFULL itself by setting
5819                  * qfull-retries capability to 0. In either case
5820                  * we want the target driver's QFULL handling
5821                  * to kick in. We do this by having pkt_reason
5822                  * as CMD_CMPLT and pkt_scbp as STATUS_QFULL.
5823                  */
5824                 mutex_enter(&ptgt->m_tgt_intr_mutex);
5825                 mptsas_set_throttle(mpt, ptgt, DRAIN_THROTTLE);
5826                 mutex_exit(&ptgt->m_tgt_intr_mutex);
5827         } else {
5828                 mutex_enter(&ptgt->m_tgt_intr_mutex);
5829                 if (ptgt->m_reset_delay == 0) {
5830                         ptgt->m_t_throttle =
5831                             max((ptgt->m_t_ncmds - 2), 0);
5832                 }
5833                 mutex_exit(&ptgt->m_tgt_intr_mutex);
5834 
5835                 cmd->cmd_pkt_flags |= FLAG_HEAD;
5836                 cmd->cmd_flags &= ~(CFLAG_TRANFLAG);
5837                 cmd->cmd_flags |= CFLAG_RETRY;
5838 
5839                 mutex_exit(&mpt->m_mutex);
5840                 (void) mptsas_accept_pkt(mpt, cmd);
5841                 mutex_enter(&mpt->m_mutex);
5842 
5843                 /*
5844                  * when target gives queue full status with no commands
5845                  * outstanding (m_t_ncmds == 0), throttle is set to 0
5846                  * (HOLD_THROTTLE), and the queue full handling start
5847                  * (see psarc/1994/313); if there are commands outstanding,
5848                  * throttle is set to (m_t_ncmds - 2)
5849                  */
5850                 mutex_enter(&ptgt->m_tgt_intr_mutex);
5851                 if (ptgt->m_t_throttle == HOLD_THROTTLE) {
5852                         /*
5853                          * By setting throttle to QFULL_THROTTLE, we
5854                          * avoid submitting new commands and in
5855                          * mptsas_restart_cmd find out slots which need
5856                          * their throttles to be cleared.
5857                          */
5858                         mptsas_set_throttle(mpt, ptgt, QFULL_THROTTLE);
5859                         if (mpt->m_restart_cmd_timeid == 0) {
5860                                 mpt->m_restart_cmd_timeid =
5861                                     timeout(mptsas_restart_cmd, mpt,
5862                                     ptgt->m_qfull_retry_interval);
5863                         }
5864                 }
5865                 mutex_exit(&ptgt->m_tgt_intr_mutex);
5866         }
5867 }
5868 
5869 mptsas_phymask_t
5870 mptsas_physport_to_phymask(mptsas_t *mpt, uint8_t physport)
5871 {
5872         mptsas_phymask_t        phy_mask = 0;
5873         uint8_t                 i = 0;
5874 
5875         NDBG20(("mptsas%d physport_to_phymask enter", mpt->m_instance));
5876 
5877         ASSERT(mutex_owned(&mpt->m_mutex));
5878 
5879         /*
5880          * If physport is 0xFF, this is a RAID volume.  Use phymask of 0.
5881          */
5882         if (physport == 0xFF) {
5883                 return (0);
5884         }
5885 


6143                                         mutex_exit(&mpt->m_mutex);
6144 
6145                                         parent = NULL;
6146                                         continue;
6147                                 }
6148                                 (void) sprintf(phy_mask_name, "%x", phymask);
6149                         }
6150                         parent = scsi_hba_iport_find(mpt->m_dip,
6151                             phy_mask_name);
6152                         if (parent == NULL) {
6153                                 mptsas_log(mpt, CE_WARN, "Failed to find an "
6154                                     "iport, should not happen!");
6155                                 goto out;
6156                         }
6157 
6158                 }
6159                 ASSERT(parent);
6160 handle_topo_change:
6161 
6162                 mutex_enter(&mpt->m_mutex);
6163 




6164                 mptsas_handle_topo_change(topo_node, parent);


6165                 save_node = topo_node;
6166                 topo_node = topo_node->next;
6167                 ASSERT(save_node);
6168                 kmem_free(save_node, sizeof (mptsas_topo_change_list_t));
6169                 mutex_exit(&mpt->m_mutex);
6170 
6171                 if ((flags == MPTSAS_TOPO_FLAG_DIRECT_ATTACHED_DEVICE) ||
6172                     (flags == MPTSAS_TOPO_FLAG_RAID_PHYSDRV_ASSOCIATED) ||
6173                     (flags == MPTSAS_TOPO_FLAG_RAID_ASSOCIATED)) {
6174                         /*
6175                          * If direct attached device associated, make sure
6176                          * reset the parent before start the next one. But
6177                          * all devices associated with expander shares the
6178                          * parent.  Also, reset parent if this is for RAID.
6179                          */
6180                         parent = NULL;
6181                 }
6182         }
6183 out:
6184         kmem_free(phy_mask_name, MPTSAS_MAX_PHYS);


6434                             MPTSAS_NUM_PHYS, 0) !=
6435                             DDI_PROP_SUCCESS) {
6436                                 (void) ddi_prop_remove(DDI_DEV_T_NONE, parent,
6437                                     MPTSAS_NUM_PHYS);
6438                                 mptsas_log(mpt, CE_WARN, "mptsas num phys "
6439                                     "prop update failed");
6440                                 break;
6441                         }
6442                         if (ddi_prop_update_int(DDI_DEV_T_NONE, parent,
6443                             MPTSAS_VIRTUAL_PORT, 1) !=
6444                             DDI_PROP_SUCCESS) {
6445                                 (void) ddi_prop_remove(DDI_DEV_T_NONE, parent,
6446                                     MPTSAS_VIRTUAL_PORT);
6447                                 mptsas_log(mpt, CE_WARN, "mptsas virtual port "
6448                                     "prop update failed");
6449                                 break;
6450                         }
6451                 }
6452 
6453                 mutex_enter(&mpt->m_mutex);
6454                 if (mptsas_set_led_status(mpt, ptgt, 0) != DDI_SUCCESS) {
6455                         NDBG14(("mptsas: clear LED for tgt %x failed",
6456                             ptgt->m_slot_num));
6457                 }
6458                 if (rval == DDI_SUCCESS) {
6459                         mptsas_tgt_free(&mpt->m_active->m_tgttbl,
6460                             ptgt->m_sas_wwn, ptgt->m_phymask);
6461                         ptgt = NULL;
6462                 } else {
6463                         /*
6464                          * clean DR_INTRANSITION flag to allow I/O down to
6465                          * PHCI driver since failover finished.
6466                          * Invalidate the devhdl
6467                          */
6468                         mutex_enter(&ptgt->m_tgt_intr_mutex);
6469                         ptgt->m_devhdl = MPTSAS_INVALID_DEVHDL;
6470                         ptgt->m_tgt_unconfigured = 0;

6471                         ptgt->m_dr_flag = MPTSAS_DR_INACTIVE;
6472                         mutex_exit(&ptgt->m_tgt_intr_mutex);
6473                 }
6474 
6475                 /*
6476                  * Send SAS IO Unit Control to free the dev handle
6477                  */
6478                 if ((flags == MPTSAS_TOPO_FLAG_DIRECT_ATTACHED_DEVICE) ||
6479                     (flags == MPTSAS_TOPO_FLAG_EXPANDER_ATTACHED_DEVICE)) {
6480                         rval = mptsas_free_devhdl(mpt, devhdl);
6481 
6482                         NDBG20(("mptsas%d handle_topo_change to remove "
6483                             "devhdl:%x, rval:%x", mpt->m_instance, devhdl,
6484                             rval));
6485                 }
6486 
6487                 break;
6488         }
6489         case MPTSAS_TOPO_FLAG_REMOVE_HANDLE:
6490         {
6491                 devhdl = topo_node->devhdl;
6492                 /*


6998                                         topo_node->mpt = mpt;
6999                                         topo_node->un.phymask = 0;
7000                                         topo_node->event =
7001                                             MPTSAS_TOPO_FLAG_REMOVE_HANDLE;
7002                                         topo_node->devhdl = dev_handle;
7003                                         topo_node->flags = flags;
7004                                         topo_node->object = NULL;
7005                                         if (topo_head == NULL) {
7006                                                 topo_head = topo_tail =
7007                                                     topo_node;
7008                                         } else {
7009                                                 topo_tail->next = topo_node;
7010                                                 topo_tail = topo_node;
7011                                         }
7012                                         break;
7013                                 }
7014 
7015                                 /*
7016                                  * Update DR flag immediately avoid I/O failure
7017                                  * before failover finish. Pay attention to the
7018                                  * mutex protect, we need grab the per target
7019                                  * mutex during set m_dr_flag because the
7020                                  * m_mutex would not be held all the time in
7021                                  * mptsas_scsi_start().

7022                                  */
7023                                 mutex_enter(&ptgt->m_tgt_intr_mutex);
7024                                 ptgt->m_dr_flag = MPTSAS_DR_INTRANSITION;
7025                                 mutex_exit(&ptgt->m_tgt_intr_mutex);
7026 
7027                                 topo_node = kmem_zalloc(
7028                                     sizeof (mptsas_topo_change_list_t),
7029                                     KM_SLEEP);
7030                                 topo_node->mpt = mpt;
7031                                 topo_node->un.phymask = ptgt->m_phymask;
7032                                 topo_node->event =
7033                                     MPTSAS_DR_EVENT_OFFLINE_TARGET;
7034                                 topo_node->devhdl = dev_handle;
7035                                 topo_node->flags = flags;
7036                                 topo_node->object = NULL;
7037                                 if (topo_head == NULL) {
7038                                         topo_head = topo_tail = topo_node;
7039                                 } else {
7040                                         topo_tail->next = topo_node;
7041                                         topo_tail = topo_node;
7042                                 }
7043                                 break;
7044                         }
7045                         case MPI2_EVENT_SAS_TOPO_RC_PHY_CHANGED:


7238                                 break;
7239                         }
7240                         case MPI2_EVENT_IR_CHANGE_RC_REMOVED:
7241                         case MPI2_EVENT_IR_CHANGE_RC_VOLUME_DELETED:
7242                         {
7243                                 NDBG20(("mptsas %d volume deleted\n",
7244                                     mpt->m_instance));
7245                                 ptgt = mptsas_search_by_devhdl(tgttbl,
7246                                     volhandle);
7247                                 if (ptgt == NULL)
7248                                         break;
7249 
7250                                 /*
7251                                  * Clear any flags related to volume
7252                                  */
7253                                 (void) mptsas_delete_volume(mpt, volhandle);
7254 
7255                                 /*
7256                                  * Update DR flag immediately avoid I/O failure
7257                                  */
7258                                 mutex_enter(&ptgt->m_tgt_intr_mutex);
7259                                 ptgt->m_dr_flag = MPTSAS_DR_INTRANSITION;
7260                                 mutex_exit(&ptgt->m_tgt_intr_mutex);
7261 
7262                                 topo_node = kmem_zalloc(
7263                                     sizeof (mptsas_topo_change_list_t),
7264                                     KM_SLEEP);
7265                                 topo_node->mpt = mpt;
7266                                 topo_node->un.phymask = ptgt->m_phymask;
7267                                 topo_node->event =
7268                                     MPTSAS_DR_EVENT_OFFLINE_TARGET;
7269                                 topo_node->devhdl = volhandle;
7270                                 topo_node->flags =
7271                                     MPTSAS_TOPO_FLAG_RAID_ASSOCIATED;
7272                                 topo_node->object = (void *)ptgt;
7273                                 if (topo_head == NULL) {
7274                                         topo_head = topo_tail = topo_node;
7275                                 } else {
7276                                         topo_tail->next = topo_node;
7277                                         topo_tail = topo_node;
7278                                 }
7279                                 break;
7280                         }
7281                         case MPI2_EVENT_IR_CHANGE_RC_PD_CREATED:
7282                         case MPI2_EVENT_IR_CHANGE_RC_HIDE:
7283                         {
7284                                 ptgt = mptsas_search_by_devhdl(tgttbl,
7285                                     diskhandle);
7286                                 if (ptgt == NULL)
7287                                         break;
7288 
7289                                 /*
7290                                  * Update DR flag immediately avoid I/O failure
7291                                  */
7292                                 mutex_enter(&ptgt->m_tgt_intr_mutex);
7293                                 ptgt->m_dr_flag = MPTSAS_DR_INTRANSITION;
7294                                 mutex_exit(&ptgt->m_tgt_intr_mutex);
7295 
7296                                 topo_node = kmem_zalloc(
7297                                     sizeof (mptsas_topo_change_list_t),
7298                                     KM_SLEEP);
7299                                 topo_node->mpt = mpt;
7300                                 topo_node->un.phymask = ptgt->m_phymask;
7301                                 topo_node->event =
7302                                     MPTSAS_DR_EVENT_OFFLINE_TARGET;
7303                                 topo_node->devhdl = diskhandle;
7304                                 topo_node->flags =
7305                                     MPTSAS_TOPO_FLAG_RAID_PHYSDRV_ASSOCIATED;
7306                                 topo_node->object = (void *)ptgt;
7307                                 if (topo_head == NULL) {
7308                                         topo_head = topo_tail = topo_node;
7309                                 } else {
7310                                         topo_tail->next = topo_node;
7311                                         topo_tail = topo_node;
7312                                 }
7313                                 break;
7314                         }


7369 
7370 /*
7371  * handle events from ioc
7372  */
7373 static void
7374 mptsas_handle_event(void *args)
7375 {
7376         m_replyh_arg_t                  *replyh_arg;
7377         pMpi2EventNotificationReply_t   eventreply;
7378         uint32_t                        event, iocloginfo, rfm;
7379         uint32_t                        status;
7380         uint8_t                         port;
7381         mptsas_t                        *mpt;
7382         uint_t                          iocstatus;
7383 
7384         replyh_arg = (m_replyh_arg_t *)args;
7385         rfm = replyh_arg->rfm;
7386         mpt = replyh_arg->mpt;
7387 
7388         mutex_enter(&mpt->m_mutex);








7389 
7390         eventreply = (pMpi2EventNotificationReply_t)
7391             (mpt->m_reply_frame + (rfm - mpt->m_reply_frame_dma_addr));
7392         event = ddi_get16(mpt->m_acc_reply_frame_hdl, &eventreply->Event);
7393 
7394         if (iocstatus = ddi_get16(mpt->m_acc_reply_frame_hdl,
7395             &eventreply->IOCStatus)) {
7396                 if (iocstatus == MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE) {
7397                         mptsas_log(mpt, CE_WARN,
7398                             "!mptsas_handle_event: IOCStatus=0x%x, "
7399                             "IOCLogInfo=0x%x", iocstatus,
7400                             ddi_get32(mpt->m_acc_reply_frame_hdl,
7401                             &eventreply->IOCLogInfo));
7402                 } else {
7403                         mptsas_log(mpt, CE_WARN,
7404                             "mptsas_handle_event: IOCStatus=0x%x, "
7405                             "IOCLogInfo=0x%x", iocstatus,
7406                             ddi_get32(mpt->m_acc_reply_frame_hdl,
7407                             &eventreply->IOCLogInfo));
7408                 }


7929             mpt->m_free_index);
7930         mutex_exit(&mpt->m_mutex);
7931 }
7932 
7933 /*
7934  * invoked from timeout() to restart qfull cmds with throttle == 0
7935  */
7936 static void
7937 mptsas_restart_cmd(void *arg)
7938 {
7939         mptsas_t        *mpt = arg;
7940         mptsas_target_t *ptgt = NULL;
7941 
7942         mutex_enter(&mpt->m_mutex);
7943 
7944         mpt->m_restart_cmd_timeid = 0;
7945 
7946         ptgt = (mptsas_target_t *)mptsas_hash_traverse(&mpt->m_active->m_tgttbl,
7947             MPTSAS_HASH_FIRST);
7948         while (ptgt != NULL) {
7949                 mutex_enter(&ptgt->m_tgt_intr_mutex);
7950                 if (ptgt->m_reset_delay == 0) {
7951                         if (ptgt->m_t_throttle == QFULL_THROTTLE) {
7952                                 mptsas_set_throttle(mpt, ptgt,
7953                                     MAX_THROTTLE);
7954                         }
7955                 }
7956                 mutex_exit(&ptgt->m_tgt_intr_mutex);
7957 
7958                 ptgt = (mptsas_target_t *)mptsas_hash_traverse(
7959                     &mpt->m_active->m_tgttbl, MPTSAS_HASH_NEXT);
7960         }
7961         mptsas_restart_hba(mpt);
7962         mutex_exit(&mpt->m_mutex);
7963 }
7964 
7965 /*
7966  * mptsas_remove_cmd0 is similar to mptsas_remove_cmd except that it is called
7967  * where m_intr_mutex has already been held.
7968  */
7969 void
7970 mptsas_remove_cmd(mptsas_t *mpt, mptsas_cmd_t *cmd)
7971 {
7972         ASSERT(mutex_owned(&mpt->m_mutex));
7973 
7974         /*
7975          * With new fine-grained lock mechanism, the outstanding cmd is only
7976          * linked to m_active before the dma is triggerred(MPTSAS_START_CMD)
7977          * to send it. that is, mptsas_save_cmd() doesn't link the outstanding
7978          * cmd now. So when mptsas_remove_cmd is called, a mptsas_save_cmd must
7979          * have been called, but the cmd may have not been linked.
7980          * For mptsas_remove_cmd0, the cmd must have been linked.
7981          * In order to keep the same semantic, we link the cmd to the
7982          * outstanding cmd list.
7983          */
7984         mpt->m_active->m_slot[cmd->cmd_slot] = cmd;
7985 
7986         mutex_enter(&mpt->m_intr_mutex);
7987         mptsas_remove_cmd0(mpt, cmd);
7988         mutex_exit(&mpt->m_intr_mutex);
7989 }
7990 
7991 static inline void
7992 mptsas_remove_cmd0(mptsas_t *mpt, mptsas_cmd_t *cmd)
7993 {
7994         int             slot;
7995         mptsas_slots_t  *slots = mpt->m_active;
7996         int             t;
7997         mptsas_target_t *ptgt = cmd->cmd_tgt_addr;
7998         mptsas_slot_free_e_t    *pe;
7999 
8000         ASSERT(cmd != NULL);
8001         ASSERT(cmd->cmd_queued == FALSE);
8002 
8003         /*
8004          * Task Management cmds are removed in their own routines.  Also,
8005          * we don't want to modify timeout based on TM cmds.
8006          */
8007         if (cmd->cmd_flags & CFLAG_TM_CMD) {
8008                 return;
8009         }
8010 
8011         t = Tgt(cmd);
8012         slot = cmd->cmd_slot;
8013         pe = mpt->m_slot_free_ae + slot - 1;
8014         ASSERT(cmd == slots->m_slot[slot]);
8015         ASSERT((slot > 0) && slot < (mpt->m_max_requests - 1));
8016 
8017         /*
8018          * remove the cmd.
8019          */
8020         mutex_enter(&mpt->m_slot_freeq_pairp[pe->cpuid].
8021             m_slot_releq.s.m_fq_mutex);
8022         NDBG31(("mptsas_remove_cmd0: removing cmd=0x%p", (void *)cmd));
8023         slots->m_slot[slot] = NULL;
8024         ASSERT(pe->slot == slot);
8025         list_insert_tail(&mpt->m_slot_freeq_pairp[pe->cpuid].
8026             m_slot_releq.s.m_fq_list, pe);
8027         mpt->m_slot_freeq_pairp[pe->cpuid].m_slot_releq.s.m_fq_n++;
8028         ASSERT(mpt->m_slot_freeq_pairp[pe->cpuid].
8029             m_slot_releq.s.m_fq_n <= mpt->m_max_requests - 2);
8030         mutex_exit(&mpt->m_slot_freeq_pairp[pe->cpuid].
8031             m_slot_releq.s.m_fq_mutex);
8032 
8033         /*
8034          * only decrement per target ncmds if command
8035          * has a target associated with it.
8036          */
8037         if ((cmd->cmd_flags & CFLAG_CMDIOC) == 0) {
8038                 mutex_enter(&ptgt->m_tgt_intr_mutex);
8039                 ptgt->m_t_ncmds--;
8040                 /*
8041                  * reset throttle if we just ran an untagged command
8042                  * to a tagged target
8043                  */
8044                 if ((ptgt->m_t_ncmds == 0) &&
8045                     ((cmd->cmd_pkt_flags & FLAG_TAGMASK) == 0)) {
8046                         mptsas_set_throttle(mpt, ptgt, MAX_THROTTLE);
8047                 }
8048                 mutex_exit(&ptgt->m_tgt_intr_mutex);
8049         }
8050 


8051         /*
8052          * This is all we need to do for ioc commands.
8053          * The ioc cmds would never be handled in fastpath in ISR, so we make
8054          * sure the mptsas_return_to_pool() would always be called with
8055          * m_mutex protected.
8056          */
8057         if (cmd->cmd_flags & CFLAG_CMDIOC) {
8058                 ASSERT(mutex_owned(&mpt->m_mutex));
8059                 mptsas_return_to_pool(mpt, cmd);
8060                 return;
8061         }
8062 
8063         /*
8064          * Figure out what to set tag Q timeout for...
8065          *
8066          * Optimize: If we have duplicate's of same timeout
8067          * we're using, then we'll use it again until we run
8068          * out of duplicates.  This should be the normal case
8069          * for block and raw I/O.
8070          * If no duplicates, we have to scan through tag que and
8071          * find the longest timeout value and use it.  This is
8072          * going to take a while...
8073          * Add 1 to m_n_slots to account for TM request.
8074          */
8075         mutex_enter(&ptgt->m_tgt_intr_mutex);
8076         if (cmd->cmd_pkt->pkt_time == ptgt->m_timebase) {
8077                 if (--(ptgt->m_dups) == 0) {
8078                         if (ptgt->m_t_ncmds) {
8079                                 mptsas_cmd_t *ssp;
8080                                 uint_t n = 0;
8081                                 ushort_t nslots = (slots->m_n_slots + 1);
8082                                 ushort_t i;
8083                                 /*
8084                                  * This crude check assumes we don't do
8085                                  * this too often which seems reasonable
8086                                  * for block and raw I/O.
8087                                  */
8088                                 for (i = 0; i < nslots; i++) {
8089                                         ssp = slots->m_slot[i];
8090                                         if (ssp && (Tgt(ssp) == t) &&
8091                                             (ssp->cmd_pkt->pkt_time > n)) {
8092                                                 n = ssp->cmd_pkt->pkt_time;
8093                                                 ptgt->m_dups = 1;
8094                                         } else if (ssp && (Tgt(ssp) == t) &&
8095                                             (ssp->cmd_pkt->pkt_time == n)) {
8096                                                 ptgt->m_dups++;
8097                                         }
8098                                 }
8099                                 ptgt->m_timebase = n;
8100                         } else {
8101                                 ptgt->m_dups = 0;
8102                                 ptgt->m_timebase = 0;
8103                         }
8104                 }
8105         }
8106         ptgt->m_timeout = ptgt->m_timebase;
8107 
8108         ASSERT(cmd != slots->m_slot[cmd->cmd_slot]);
8109         mutex_exit(&ptgt->m_tgt_intr_mutex);
8110 }
8111 
8112 /*

8113  * start a fresh request from the top of the device queue.





8114  */
8115 static void
8116 mptsas_restart_hba(mptsas_t *mpt)
8117 {
















8118         mptsas_cmd_t    *cmd, *next_cmd;
8119         mptsas_target_t *ptgt = NULL;
8120 
8121         NDBG1(("mptsas_restart_hba: mpt=0x%p", (void *)mpt));
8122 
8123         ASSERT(mutex_owned(&mpt->m_mutex));
8124 
8125         /*
8126          * If there is a reset delay, don't start any cmds.  Otherwise, start
8127          * as many cmds as possible.
8128          * Since SMID 0 is reserved and the TM slot is reserved, the actual max
8129          * commands is m_max_requests - 2.
8130          */
8131         cmd = mpt->m_waitq;
8132 
8133         while (cmd != NULL) {
8134                 next_cmd = cmd->cmd_linkp;
8135                 if (cmd->cmd_flags & CFLAG_PASSTHRU) {
8136                         if (mptsas_save_cmd(mpt, cmd) == TRUE) {
8137                                 /*
8138                                  * passthru command get slot need
8139                                  * set CFLAG_PREPARED.
8140                                  */
8141                                 cmd->cmd_flags |= CFLAG_PREPARED;


8156                                 mptsas_start_config_page_access(mpt, cmd);
8157                         }
8158                         cmd = next_cmd;
8159                         continue;
8160                 }
8161                 if (cmd->cmd_flags & CFLAG_FW_DIAG) {
8162                         if (mptsas_save_cmd(mpt, cmd) == TRUE) {
8163                                 /*
8164                                  * Send the FW Diag request and delete if from
8165                                  * the waitq.
8166                                  */
8167                                 cmd->cmd_flags |= CFLAG_PREPARED;
8168                                 mptsas_waitq_delete(mpt, cmd);
8169                                 mptsas_start_diag(mpt, cmd);
8170                         }
8171                         cmd = next_cmd;
8172                         continue;
8173                 }
8174 
8175                 ptgt = cmd->cmd_tgt_addr;
8176                 if (ptgt) {
8177                         mutex_enter(&mpt->m_intr_mutex);
8178                         mutex_enter(&ptgt->m_tgt_intr_mutex);
8179                         if ((ptgt->m_t_throttle == DRAIN_THROTTLE) &&
8180                             (ptgt->m_t_ncmds == 0)) {
8181                                 mptsas_set_throttle(mpt, ptgt, MAX_THROTTLE);
8182                         }
8183                         if ((ptgt->m_reset_delay == 0) &&
8184                             (ptgt->m_t_ncmds < ptgt->m_t_throttle)) {
8185                                 mutex_exit(&ptgt->m_tgt_intr_mutex);
8186                                 mutex_exit(&mpt->m_intr_mutex);
8187                                 if (mptsas_save_cmd(mpt, cmd) == TRUE) {
8188                                         mptsas_waitq_delete(mpt, cmd);
8189                                         (void) mptsas_start_cmd(mpt, cmd);
8190                                 }
8191                                 goto out;
8192                         }
8193                         mutex_exit(&ptgt->m_tgt_intr_mutex);
8194                         mutex_exit(&mpt->m_intr_mutex);
8195                 }
8196 out:
8197                 cmd = next_cmd;
8198         }
8199 }
8200 
8201 /*
8202  * mpt tag type lookup


8203  */
8204 static char mptsas_tag_lookup[] =
8205         {0, MSG_HEAD_QTAG, MSG_ORDERED_QTAG, 0, MSG_SIMPLE_QTAG};
8206 
8207 /*
8208  * mptsas_start_cmd0 is similar to mptsas_start_cmd, except that, it is called
8209  * without ANY mutex protected, while, mptsas_start_cmd is called with m_mutex
8210  * protected.
8211  *
8212  * the relevant field in ptgt should be protected by m_tgt_intr_mutex in both
8213  * functions.
8214  *
8215  * before the cmds are linked on the slot for monitor as outstanding cmds, they
8216  * are accessed as slab objects, so slab framework ensures the exclusive access,
8217  * and no other mutex is requireed. Linking for monitor and the trigger of dma
8218  * must be done exclusively.
8219  */
8220 static int
8221 mptsas_start_cmd0(mptsas_t *mpt, mptsas_cmd_t *cmd)
8222 {
8223         struct scsi_pkt         *pkt = CMD2PKT(cmd);
8224         uint32_t                control = 0;
8225         int                     n;
8226         caddr_t                 mem;
8227         pMpi2SCSIIORequest_t    io_request;
8228         ddi_dma_handle_t        dma_hdl = mpt->m_dma_req_frame_hdl;
8229         ddi_acc_handle_t        acc_hdl = mpt->m_acc_req_frame_hdl;
8230         mptsas_target_t         *ptgt = cmd->cmd_tgt_addr;
8231         uint16_t                SMID, io_flags = 0;
8232         uint32_t                request_desc_low, request_desc_high;
8233 
8234         NDBG1(("mptsas_start_cmd0: cmd=0x%p", (void *)cmd));

8235 
8236         /*
8237          * Set SMID and increment index.  Rollover to 1 instead of 0 if index
8238          * is at the max.  0 is an invalid SMID, so we call the first index 1.


8239          */
8240         SMID = cmd->cmd_slot;
8241 
8242         /*
8243          * It is possible for back to back device reset to
8244          * happen before the reset delay has expired.  That's
8245          * ok, just let the device reset go out on the bus.
8246          */
8247         if ((cmd->cmd_pkt_flags & FLAG_NOINTR) == 0) {
8248                 ASSERT(ptgt->m_reset_delay == 0);
8249         }
8250 
8251         /*
8252          * if a non-tagged cmd is submitted to an active tagged target
8253          * then drain before submitting this cmd; SCSI-2 allows RQSENSE
8254          * to be untagged
8255          */
8256         mutex_enter(&ptgt->m_tgt_intr_mutex);
8257         if (((cmd->cmd_pkt_flags & FLAG_TAGMASK) == 0) &&
8258             (ptgt->m_t_ncmds > 1) &&
8259             ((cmd->cmd_flags & CFLAG_TM_CMD) == 0) &&
8260             (*(cmd->cmd_pkt->pkt_cdbp) != SCMD_REQUEST_SENSE)) {
8261                 if ((cmd->cmd_pkt_flags & FLAG_NOINTR) == 0) {
8262                         NDBG23(("target=%d, untagged cmd, start draining\n",
8263                             ptgt->m_devhdl));
8264 
8265                         if (ptgt->m_reset_delay == 0) {
8266                                 mptsas_set_throttle(mpt, ptgt, DRAIN_THROTTLE);
8267                         }
8268                         mutex_exit(&ptgt->m_tgt_intr_mutex);
8269 
8270                         mutex_enter(&mpt->m_mutex);
8271                         mptsas_remove_cmd(mpt, cmd);
8272                         cmd->cmd_pkt_flags |= FLAG_HEAD;
8273                         mptsas_waitq_add(mpt, cmd);
8274                         mutex_exit(&mpt->m_mutex);
8275                         return (DDI_FAILURE);
8276                 }
8277                 mutex_exit(&ptgt->m_tgt_intr_mutex);
8278                 return (DDI_FAILURE);
8279         }
8280         mutex_exit(&ptgt->m_tgt_intr_mutex);
8281 
8282         /*
8283          * Set correct tag bits.
8284          */
8285         if (cmd->cmd_pkt_flags & FLAG_TAGMASK) {
8286                 switch (mptsas_tag_lookup[((cmd->cmd_pkt_flags &
8287                     FLAG_TAGMASK) >> 12)]) {
8288                 case MSG_SIMPLE_QTAG:
8289                         control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
8290                         break;
8291                 case MSG_HEAD_QTAG:
8292                         control |= MPI2_SCSIIO_CONTROL_HEADOFQ;
8293                         break;
8294                 case MSG_ORDERED_QTAG:
8295                         control |= MPI2_SCSIIO_CONTROL_ORDEREDQ;
8296                         break;
8297                 default:
8298                         mptsas_log(mpt, CE_WARN, "mpt: Invalid tag type\n");
8299                         break;
8300                 }
8301         } else {
8302                 if (*(cmd->cmd_pkt->pkt_cdbp) != SCMD_REQUEST_SENSE) {
8303                                 ptgt->m_t_throttle = 1;
8304                 }
8305                 control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;





8306         }

8307 
8308         if (cmd->cmd_pkt_flags & FLAG_TLR) {
8309                 control |= MPI2_SCSIIO_CONTROL_TLR_ON;
8310         }
8311 
8312         mem = mpt->m_req_frame + (mpt->m_req_frame_size * SMID);
8313         io_request = (pMpi2SCSIIORequest_t)mem;
8314 
8315         bzero(io_request, sizeof (Mpi2SCSIIORequest_t));
8316         ddi_put8(acc_hdl, &io_request->SGLOffset0, offsetof
8317             (MPI2_SCSI_IO_REQUEST, SGL) / 4);
8318         mptsas_init_std_hdr(acc_hdl, io_request, ptgt->m_devhdl, Lun(cmd), 0,
8319             MPI2_FUNCTION_SCSI_IO_REQUEST);
8320 
8321         (void) ddi_rep_put8(acc_hdl, (uint8_t *)pkt->pkt_cdbp,
8322             io_request->CDB.CDB32, cmd->cmd_cdblen, DDI_DEV_AUTOINCR);
8323 
8324         io_flags = cmd->cmd_cdblen;
8325         ddi_put16(acc_hdl, &io_request->IoFlags, io_flags);
8326         /*
8327          * setup the Scatter/Gather DMA list for this request
8328          */
8329         if (cmd->cmd_cookiec > 0) {
8330                 mptsas_sge_setup(mpt, cmd, &control, io_request, acc_hdl);
8331         } else {
8332                 ddi_put32(acc_hdl, &io_request->SGL.MpiSimple.FlagsLength,
8333                     ((uint32_t)MPI2_SGE_FLAGS_LAST_ELEMENT |
8334                     MPI2_SGE_FLAGS_END_OF_BUFFER |
8335                     MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
8336                     MPI2_SGE_FLAGS_END_OF_LIST) << MPI2_SGE_FLAGS_SHIFT);
8337         }
8338 
8339         /*
8340          * save ARQ information
8341          */
8342         ddi_put8(acc_hdl, &io_request->SenseBufferLength, cmd->cmd_rqslen);
8343         if ((cmd->cmd_flags & (CFLAG_SCBEXTERN | CFLAG_EXTARQBUFVALID)) ==
8344             (CFLAG_SCBEXTERN | CFLAG_EXTARQBUFVALID)) {
8345                 ddi_put32(acc_hdl, &io_request->SenseBufferLowAddress,
8346                     cmd->cmd_ext_arqcookie.dmac_address);
8347         } else {
8348                 ddi_put32(acc_hdl, &io_request->SenseBufferLowAddress,
8349                     cmd->cmd_arqcookie.dmac_address);
8350         }
8351 
8352         ddi_put32(acc_hdl, &io_request->Control, control);
8353 
8354         NDBG31(("starting message=0x%p, with cmd=0x%p",
8355             (void *)(uintptr_t)mpt->m_req_frame_dma_addr, (void *)cmd));
8356 
8357         (void) ddi_dma_sync(dma_hdl, 0, 0, DDI_DMA_SYNC_FORDEV);
8358 
8359         /*
8360          * Build request descriptor and write it to the request desc post reg.
8361          */
8362         request_desc_low = (SMID << 16) + MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
8363         request_desc_high = ptgt->m_devhdl << 16;
8364 
8365         mutex_enter(&mpt->m_mutex);
8366         mpt->m_active->m_slot[cmd->cmd_slot] = cmd;
8367         MPTSAS_START_CMD(mpt, request_desc_low, request_desc_high);
8368         mutex_exit(&mpt->m_mutex);
8369 
8370         /*
8371          * Start timeout.
8372          */
8373         mutex_enter(&ptgt->m_tgt_intr_mutex);
8374 #ifdef MPTSAS_TEST
8375         /*
8376          * Temporarily set timebase = 0;  needed for
8377          * timeout torture test.
8378          */
8379         if (mptsas_test_timeouts) {
8380                 ptgt->m_timebase = 0;
8381         }
8382 #endif
8383         n = pkt->pkt_time - ptgt->m_timebase;
8384 
8385         if (n == 0) {
8386                 (ptgt->m_dups)++;
8387                 ptgt->m_timeout = ptgt->m_timebase;
8388         } else if (n > 0) {
8389                 ptgt->m_timeout =
8390                     ptgt->m_timebase = pkt->pkt_time;
8391                 ptgt->m_dups = 1;
8392         } else if (n < 0) {
8393                 ptgt->m_timeout = ptgt->m_timebase;
8394         }
8395 #ifdef MPTSAS_TEST
8396         /*
8397          * Set back to a number higher than
8398          * mptsas_scsi_watchdog_tick
8399          * so timeouts will happen in mptsas_watchsubr
8400          */
8401         if (mptsas_test_timeouts) {
8402                 ptgt->m_timebase = 60;
8403         }
8404 #endif
8405         mutex_exit(&ptgt->m_tgt_intr_mutex);
8406 
8407         if ((mptsas_check_dma_handle(dma_hdl) != DDI_SUCCESS) ||
8408             (mptsas_check_acc_handle(acc_hdl) != DDI_SUCCESS)) {
8409                 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
8410                 return (DDI_FAILURE);
8411         }
8412         return (DDI_SUCCESS);
8413 }
8414 
8415 static int
8416 mptsas_start_cmd(mptsas_t *mpt, mptsas_cmd_t *cmd)
8417 {
8418         struct scsi_pkt         *pkt = CMD2PKT(cmd);
8419         uint32_t                control = 0;
8420         int                     n;
8421         caddr_t                 mem;
8422         pMpi2SCSIIORequest_t    io_request;
8423         ddi_dma_handle_t        dma_hdl = mpt->m_dma_req_frame_hdl;
8424         ddi_acc_handle_t        acc_hdl = mpt->m_acc_req_frame_hdl;
8425         mptsas_target_t         *ptgt = cmd->cmd_tgt_addr;
8426         uint16_t                SMID, io_flags = 0;
8427         uint32_t                request_desc_low, request_desc_high;
8428 
8429         NDBG1(("mptsas_start_cmd: cmd=0x%p", (void *)cmd));
8430 
8431         /*
8432          * Set SMID and increment index.  Rollover to 1 instead of 0 if index
8433          * is at the max.  0 is an invalid SMID, so we call the first index 1.
8434          */
8435         SMID = cmd->cmd_slot;
8436 
8437         /*
8438          * It is possible for back to back device reset to
8439          * happen before the reset delay has expired.  That's
8440          * ok, just let the device reset go out on the bus.
8441          */
8442         if ((cmd->cmd_pkt_flags & FLAG_NOINTR) == 0) {
8443                 ASSERT(ptgt->m_reset_delay == 0);
8444         }
8445 
8446         /*
8447          * if a non-tagged cmd is submitted to an active tagged target
8448          * then drain before submitting this cmd; SCSI-2 allows RQSENSE
8449          * to be untagged
8450          */
8451         mutex_enter(&ptgt->m_tgt_intr_mutex);
8452         if (((cmd->cmd_pkt_flags & FLAG_TAGMASK) == 0) &&
8453             (ptgt->m_t_ncmds > 1) &&
8454             ((cmd->cmd_flags & CFLAG_TM_CMD) == 0) &&
8455             (*(cmd->cmd_pkt->pkt_cdbp) != SCMD_REQUEST_SENSE)) {
8456                 if ((cmd->cmd_pkt_flags & FLAG_NOINTR) == 0) {
8457                         NDBG23(("target=%d, untagged cmd, start draining\n",
8458                             ptgt->m_devhdl));
8459 
8460                         if (ptgt->m_reset_delay == 0) {
8461                                 mptsas_set_throttle(mpt, ptgt, DRAIN_THROTTLE);
8462                         }
8463                         mutex_exit(&ptgt->m_tgt_intr_mutex);
8464 
8465                         mptsas_remove_cmd(mpt, cmd);
8466                         cmd->cmd_pkt_flags |= FLAG_HEAD;
8467                         mptsas_waitq_add(mpt, cmd);
8468                         return (DDI_FAILURE);
8469                 }
8470                 mutex_exit(&ptgt->m_tgt_intr_mutex);
8471                 return (DDI_FAILURE);
8472         }
8473         mutex_exit(&ptgt->m_tgt_intr_mutex);
8474 
8475         /*
8476          * Set correct tag bits.
8477          */
8478         if (cmd->cmd_pkt_flags & FLAG_TAGMASK) {
8479                 switch (mptsas_tag_lookup[((cmd->cmd_pkt_flags &
8480                     FLAG_TAGMASK) >> 12)]) {
8481                 case MSG_SIMPLE_QTAG:
8482                         control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
8483                         break;
8484                 case MSG_HEAD_QTAG:
8485                         control |= MPI2_SCSIIO_CONTROL_HEADOFQ;
8486                         break;
8487                 case MSG_ORDERED_QTAG:
8488                         control |= MPI2_SCSIIO_CONTROL_ORDEREDQ;
8489                         break;
8490                 default:
8491                         mptsas_log(mpt, CE_WARN, "mpt: Invalid tag type\n");
8492                         break;
8493                 }


8537             (CFLAG_SCBEXTERN | CFLAG_EXTARQBUFVALID)) {
8538                 ddi_put32(acc_hdl, &io_request->SenseBufferLowAddress,
8539                     cmd->cmd_ext_arqcookie.dmac_address);
8540         } else {
8541                 ddi_put32(acc_hdl, &io_request->SenseBufferLowAddress,
8542                     cmd->cmd_arqcookie.dmac_address);
8543         }
8544 
8545         ddi_put32(acc_hdl, &io_request->Control, control);
8546 
8547         NDBG31(("starting message=0x%p, with cmd=0x%p",
8548             (void *)(uintptr_t)mpt->m_req_frame_dma_addr, (void *)cmd));
8549 
8550         (void) ddi_dma_sync(dma_hdl, 0, 0, DDI_DMA_SYNC_FORDEV);
8551 
8552         /*
8553          * Build request descriptor and write it to the request desc post reg.
8554          */
8555         request_desc_low = (SMID << 16) + MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
8556         request_desc_high = ptgt->m_devhdl << 16;
8557 
8558         mpt->m_active->m_slot[cmd->cmd_slot] = cmd;
8559         MPTSAS_START_CMD(mpt, request_desc_low, request_desc_high);
8560 
8561         /*
8562          * Start timeout.
8563          */
8564         mutex_enter(&ptgt->m_tgt_intr_mutex);
8565 #ifdef MPTSAS_TEST
8566         /*
8567          * Temporarily set timebase = 0;  needed for
8568          * timeout torture test.
8569          */
8570         if (mptsas_test_timeouts) {
8571                 ptgt->m_timebase = 0;
8572         }
8573 #endif
8574         n = pkt->pkt_time - ptgt->m_timebase;
8575 
8576         if (n == 0) {
8577                 (ptgt->m_dups)++;
8578                 ptgt->m_timeout = ptgt->m_timebase;
8579         } else if (n > 0) {
8580                 ptgt->m_timeout =
8581                     ptgt->m_timebase = pkt->pkt_time;
8582                 ptgt->m_dups = 1;
8583         } else if (n < 0) {
8584                 ptgt->m_timeout = ptgt->m_timebase;
8585         }
8586 #ifdef MPTSAS_TEST
8587         /*
8588          * Set back to a number higher than
8589          * mptsas_scsi_watchdog_tick
8590          * so timeouts will happen in mptsas_watchsubr
8591          */
8592         if (mptsas_test_timeouts) {
8593                 ptgt->m_timebase = 60;
8594         }
8595 #endif
8596         mutex_exit(&ptgt->m_tgt_intr_mutex);
8597 
8598         if ((mptsas_check_dma_handle(dma_hdl) != DDI_SUCCESS) ||
8599             (mptsas_check_acc_handle(acc_hdl) != DDI_SUCCESS)) {
8600                 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
8601                 return (DDI_FAILURE);
8602         }
8603         return (DDI_SUCCESS);
8604 }
8605 
8606 /*
8607  * Select a helper thread to handle current doneq
8608  */
8609 static void
8610 mptsas_deliver_doneq_thread(mptsas_t *mpt)
8611 {
8612         uint64_t                        t, i;
8613         uint32_t                        min = 0xffffffff;
8614         mptsas_doneq_thread_list_t      *item;
8615 
8616         for (i = 0; i < mpt->m_doneq_thread_n; i++) {


8623 
8624                 mutex_enter(&item->mutex);
8625                 if (item->len < mpt->m_doneq_thread_threshold) {
8626                         t = i;
8627                         mutex_exit(&item->mutex);
8628                         break;
8629                 }
8630                 if (item->len < min) {
8631                         min = item->len;
8632                         t = i;
8633                 }
8634                 mutex_exit(&item->mutex);
8635         }
8636         mutex_enter(&mpt->m_doneq_thread_id[t].mutex);
8637         mptsas_doneq_mv(mpt, t);
8638         cv_signal(&mpt->m_doneq_thread_id[t].cv);
8639         mutex_exit(&mpt->m_doneq_thread_id[t].mutex);
8640 }
8641 
8642 /*
8643  * move the current global doneq to the doneq of thread[t]
8644  */
8645 static void
8646 mptsas_doneq_mv(mptsas_t *mpt, uint64_t t)
8647 {
8648         mptsas_cmd_t                    *cmd;
8649         mptsas_doneq_thread_list_t      *item = &mpt->m_doneq_thread_id[t];
8650 
8651         ASSERT(mutex_owned(&item->mutex));
8652         mutex_enter(&mpt->m_intr_mutex);
8653         while ((cmd = mpt->m_doneq) != NULL) {
8654                 if ((mpt->m_doneq = cmd->cmd_linkp) == NULL) {
8655                         mpt->m_donetail = &mpt->m_doneq;
8656                 }
8657                 cmd->cmd_linkp = NULL;
8658                 *item->donetail = cmd;
8659                 item->donetail = &cmd->cmd_linkp;
8660                 mpt->m_doneq_len--;
8661                 item->len++;
8662         }
8663         mutex_exit(&mpt->m_intr_mutex);
8664 }
8665 
8666 void
8667 mptsas_fma_check(mptsas_t *mpt, mptsas_cmd_t *cmd)
8668 {
8669         struct scsi_pkt *pkt = CMD2PKT(cmd);
8670 
8671         /* Check all acc and dma handles */
8672         if ((mptsas_check_acc_handle(mpt->m_datap) !=
8673             DDI_SUCCESS) ||
8674             (mptsas_check_acc_handle(mpt->m_acc_req_frame_hdl) !=
8675             DDI_SUCCESS) ||
8676             (mptsas_check_acc_handle(mpt->m_acc_reply_frame_hdl) !=
8677             DDI_SUCCESS) ||
8678             (mptsas_check_acc_handle(mpt->m_acc_free_queue_hdl) !=
8679             DDI_SUCCESS) ||
8680             (mptsas_check_acc_handle(mpt->m_acc_post_queue_hdl) !=
8681             DDI_SUCCESS) ||
8682             (mptsas_check_acc_handle(mpt->m_hshk_acc_hdl) !=
8683             DDI_SUCCESS) ||


8718             DDI_SUCCESS)))) {
8719                 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
8720                 pkt->pkt_reason = CMD_TRAN_ERR;
8721                 pkt->pkt_statistics = 0;
8722         }
8723         if (cmd->cmd_arqhandle &&
8724             (mptsas_check_dma_handle(cmd->cmd_arqhandle) != DDI_SUCCESS)) {
8725                 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
8726                 pkt->pkt_reason = CMD_TRAN_ERR;
8727                 pkt->pkt_statistics = 0;
8728         }
8729         if (cmd->cmd_ext_arqhandle &&
8730             (mptsas_check_dma_handle(cmd->cmd_ext_arqhandle) != DDI_SUCCESS)) {
8731                 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
8732                 pkt->pkt_reason = CMD_TRAN_ERR;
8733                 pkt->pkt_statistics = 0;
8734         }
8735 }
8736 
8737 /*
8738  * mptsas_doneq_add0 is similar to mptsas_doneq_add except that it is called
8739  * where m_intr_mutex has already been held.





8740  */
8741 static inline void
8742 mptsas_doneq_add0(mptsas_t *mpt, mptsas_cmd_t *cmd)
8743 {
8744         struct scsi_pkt *pkt = CMD2PKT(cmd);
8745 
8746         NDBG31(("mptsas_doneq_add0: cmd=0x%p", (void *)cmd));
8747 
8748         ASSERT((cmd->cmd_flags & CFLAG_COMPLETED) == 0);
8749         cmd->cmd_linkp = NULL;
8750         cmd->cmd_flags |= CFLAG_FINISHED;
8751         cmd->cmd_flags &= ~CFLAG_IN_TRANSPORT;
8752 


8753         /*
8754          * only add scsi pkts that have completion routines to
8755          * the doneq.  no intr cmds do not have callbacks.
8756          */
8757         if (pkt && (pkt->pkt_comp)) {
8758                 *mpt->m_donetail = cmd;
8759                 mpt->m_donetail = &cmd->cmd_linkp;
8760                 mpt->m_doneq_len++;
8761         }
8762 }
8763 
8764 /*
8765  * These routines manipulate the queue of commands that
8766  * are waiting for their completion routines to be called.
8767  * The queue is usually in FIFO order but on an MP system
8768  * it's possible for the completion routines to get out
8769  * of order. If that's a problem you need to add a global
8770  * mutex around the code that calls the completion routine
8771  * in the interrupt handler.
8772  */
8773 static void
8774 mptsas_doneq_add(mptsas_t *mpt, mptsas_cmd_t *cmd)
8775 {
8776         ASSERT(mutex_owned(&mpt->m_mutex));
8777 
8778         mptsas_fma_check(mpt, cmd);
8779 
8780         mutex_enter(&mpt->m_intr_mutex);
8781         mptsas_doneq_add0(mpt, cmd);
8782         mutex_exit(&mpt->m_intr_mutex);
8783 }
8784 
8785 static mptsas_cmd_t *
8786 mptsas_doneq_thread_rm(mptsas_t *mpt, uint64_t t)
8787 {
8788         mptsas_cmd_t                    *cmd;
8789         mptsas_doneq_thread_list_t      *item = &mpt->m_doneq_thread_id[t];
8790 
8791         /* pop one off the done queue */
8792         if ((cmd = item->doneq) != NULL) {
8793                 /* if the queue is now empty fix the tail pointer */
8794                 NDBG31(("mptsas_doneq_thread_rm: cmd=0x%p", (void *)cmd));
8795                 if ((item->doneq = cmd->cmd_linkp) == NULL) {
8796                         item->donetail = &item->doneq;
8797                 }
8798                 cmd->cmd_linkp = NULL;
8799                 item->len--;
8800         }
8801         return (cmd);
8802 }
8803 
8804 static void
8805 mptsas_doneq_empty(mptsas_t *mpt)
8806 {
8807         mutex_enter(&mpt->m_intr_mutex);
8808         if (mpt->m_doneq && !mpt->m_in_callback) {
8809                 mptsas_cmd_t    *cmd, *next;
8810                 struct scsi_pkt *pkt;
8811 
8812                 mpt->m_in_callback = 1;
8813                 cmd = mpt->m_doneq;
8814                 mpt->m_doneq = NULL;
8815                 mpt->m_donetail = &mpt->m_doneq;
8816                 mpt->m_doneq_len = 0;
8817 
8818                 mutex_exit(&mpt->m_intr_mutex);
8819 
8820                 /*
8821                  * ONLY in ISR, is it called without m_mutex held, otherwise,
8822                  * it is always called with m_mutex held.
8823                  */
8824                 if ((curthread->t_flag & T_INTR_THREAD) == 0)
8825                         mutex_exit(&mpt->m_mutex);
8826                 /*
8827                  * run the completion routines of all the
8828                  * completed commands
8829                  */
8830                 while (cmd != NULL) {
8831                         next = cmd->cmd_linkp;
8832                         cmd->cmd_linkp = NULL;
8833                         /* run this command's completion routine */
8834                         cmd->cmd_flags |= CFLAG_COMPLETED;
8835                         pkt = CMD2PKT(cmd);
8836                         mptsas_pkt_comp(pkt, cmd);
8837                         cmd = next;
8838                 }
8839                 if ((curthread->t_flag & T_INTR_THREAD) == 0)
8840                         mutex_enter(&mpt->m_mutex);
8841                 mpt->m_in_callback = 0;
8842                 return;
8843         }
8844         mutex_exit(&mpt->m_intr_mutex);
8845 }
8846 
8847 /*
8848  * These routines manipulate the target's queue of pending requests
8849  */
8850 void
8851 mptsas_waitq_add(mptsas_t *mpt, mptsas_cmd_t *cmd)
8852 {
8853         NDBG7(("mptsas_waitq_add: cmd=0x%p", (void *)cmd));
8854         mptsas_target_t *ptgt = cmd->cmd_tgt_addr;
8855         cmd->cmd_queued = TRUE;
8856         if (ptgt)
8857                 ptgt->m_t_nwait++;
8858         if (cmd->cmd_pkt_flags & FLAG_HEAD) {
8859                 mutex_enter(&mpt->m_intr_mutex);
8860                 if ((cmd->cmd_linkp = mpt->m_waitq) == NULL) {
8861                         mpt->m_waitqtail = &cmd->cmd_linkp;
8862                 }
8863                 mpt->m_waitq = cmd;
8864                 mutex_exit(&mpt->m_intr_mutex);
8865         } else {
8866                 cmd->cmd_linkp = NULL;
8867                 *(mpt->m_waitqtail) = cmd;
8868                 mpt->m_waitqtail = &cmd->cmd_linkp;
8869         }
8870 }
8871 
8872 static mptsas_cmd_t *
8873 mptsas_waitq_rm(mptsas_t *mpt)
8874 {
8875         mptsas_cmd_t    *cmd;
8876         mptsas_target_t *ptgt;
8877         NDBG7(("mptsas_waitq_rm"));
8878 
8879         mutex_enter(&mpt->m_intr_mutex);
8880         MPTSAS_WAITQ_RM(mpt, cmd);
8881         mutex_exit(&mpt->m_intr_mutex);
8882 
8883         NDBG7(("mptsas_waitq_rm: cmd=0x%p", (void *)cmd));
8884         if (cmd) {
8885                 ptgt = cmd->cmd_tgt_addr;
8886                 if (ptgt) {
8887                         ptgt->m_t_nwait--;
8888                         ASSERT(ptgt->m_t_nwait >= 0);
8889                 }
8890         }
8891         return (cmd);
8892 }
8893 
8894 /*
8895  * remove specified cmd from the middle of the wait queue.
8896  */
8897 static void
8898 mptsas_waitq_delete(mptsas_t *mpt, mptsas_cmd_t *cmd)
8899 {
8900         mptsas_cmd_t    *prevp = mpt->m_waitq;
8901         mptsas_target_t *ptgt = cmd->cmd_tgt_addr;
8902 
8903         NDBG7(("mptsas_waitq_delete: mpt=0x%p cmd=0x%p",
8904             (void *)mpt, (void *)cmd));
8905         if (ptgt) {
8906                 ptgt->m_t_nwait--;
8907                 ASSERT(ptgt->m_t_nwait >= 0);
8908         }
8909 
8910         if (prevp == cmd) {
8911                 mutex_enter(&mpt->m_intr_mutex);
8912                 if ((mpt->m_waitq = cmd->cmd_linkp) == NULL)
8913                         mpt->m_waitqtail = &mpt->m_waitq;
8914                 mutex_exit(&mpt->m_intr_mutex);
8915 
8916                 cmd->cmd_linkp = NULL;
8917                 cmd->cmd_queued = FALSE;
8918                 NDBG7(("mptsas_waitq_delete: mpt=0x%p cmd=0x%p",
8919                     (void *)mpt, (void *)cmd));
8920                 return;
8921         }
8922 
8923         while (prevp != NULL) {
8924                 if (prevp->cmd_linkp == cmd) {
8925                         if ((prevp->cmd_linkp = cmd->cmd_linkp) == NULL)
8926                                 mpt->m_waitqtail = &prevp->cmd_linkp;
8927 
8928                         cmd->cmd_linkp = NULL;
8929                         cmd->cmd_queued = FALSE;
8930                         NDBG7(("mptsas_waitq_delete: mpt=0x%p cmd=0x%p",
8931                             (void *)mpt, (void *)cmd));
8932                         return;
8933                 }
8934                 prevp = prevp->cmd_linkp;
8935         }
8936         cmn_err(CE_PANIC, "mpt: mptsas_waitq_delete: queue botch");
8937 }
8938 













8939 /*






































8940  * device and bus reset handling
8941  *
8942  * Notes:
8943  *      - RESET_ALL:    reset the controller
8944  *      - RESET_TARGET: reset the target specified in scsi_address
8945  */
8946 static int
8947 mptsas_scsi_reset(struct scsi_address *ap, int level)
8948 {
8949         mptsas_t                *mpt = ADDR2MPT(ap);
8950         int                     rval;
8951         mptsas_tgt_private_t    *tgt_private;
8952         mptsas_target_t         *ptgt = NULL;
8953 
8954         tgt_private = (mptsas_tgt_private_t *)ap->a_hba_tran->tran_tgt_private;
8955         ptgt = tgt_private->t_private;
8956         if (ptgt == NULL) {
8957                 return (FALSE);
8958         }
8959         NDBG22(("mptsas_scsi_reset: target=%d level=%d", ptgt->m_devhdl,


9082  * commands for a particular target.   For the case of abort task set, this
9083  * function clears the waitq of all commonds for a particular target/lun.
9084  */
9085 static void
9086 mptsas_flush_target(mptsas_t *mpt, ushort_t target, int lun, uint8_t tasktype)
9087 {
9088         mptsas_slots_t  *slots = mpt->m_active;
9089         mptsas_cmd_t    *cmd, *next_cmd;
9090         int             slot;
9091         uchar_t         reason;
9092         uint_t          stat;
9093 
9094         NDBG25(("mptsas_flush_target: target=%d lun=%d", target, lun));
9095 
9096         /*
9097          * Make sure the I/O Controller has flushed all cmds
9098          * that are associated with this target for a target reset
9099          * and target/lun for abort task set.
9100          * Account for TM requests, which use the last SMID.
9101          */
9102         mutex_enter(&mpt->m_intr_mutex);
9103         for (slot = 0; slot <= mpt->m_active->m_n_slots; slot++) {
9104                 if ((cmd = slots->m_slot[slot]) == NULL) {
9105                         continue;
9106                 }
9107                 reason = CMD_RESET;
9108                 stat = STAT_DEV_RESET;
9109                 switch (tasktype) {
9110                 case MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET:
9111                         if (Tgt(cmd) == target) {









9112                                 NDBG25(("mptsas_flush_target discovered non-"
9113                                     "NULL cmd in slot %d, tasktype 0x%x", slot,
9114                                     tasktype));
9115                                 mptsas_dump_cmd(mpt, cmd);
9116                                 mptsas_remove_cmd0(mpt, cmd);
9117                                 mptsas_set_pkt_reason(mpt, cmd, reason, stat);
9118                                 mptsas_doneq_add0(mpt, cmd);
9119                         }
9120                         break;
9121                 case MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET:
9122                         reason = CMD_ABORTED;
9123                         stat = STAT_ABORTED;
9124                         /*FALLTHROUGH*/
9125                 case MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET:
9126                         if ((Tgt(cmd) == target) && (Lun(cmd) == lun)) {
9127 
9128                                 NDBG25(("mptsas_flush_target discovered non-"
9129                                     "NULL cmd in slot %d, tasktype 0x%x", slot,
9130                                     tasktype));
9131                                 mptsas_dump_cmd(mpt, cmd);
9132                                 mptsas_remove_cmd0(mpt, cmd);
9133                                 mptsas_set_pkt_reason(mpt, cmd, reason,
9134                                     stat);
9135                                 mptsas_doneq_add0(mpt, cmd);
9136                         }
9137                         break;
9138                 default:
9139                         break;
9140                 }
9141         }
9142         mutex_exit(&mpt->m_intr_mutex);
9143 
9144         /*
9145          * Flush the waitq of this target's cmds
9146          */
9147         cmd = mpt->m_waitq;
9148 
9149         reason = CMD_RESET;
9150         stat = STAT_DEV_RESET;
9151 
9152         switch (tasktype) {
9153         case MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET:
9154                 while (cmd != NULL) {
9155                         next_cmd = cmd->cmd_linkp;
9156                         if (Tgt(cmd) == target) {
9157                                 mptsas_waitq_delete(mpt, cmd);
9158                                 mptsas_set_pkt_reason(mpt, cmd,
9159                                     reason, stat);
9160                                 mptsas_doneq_add(mpt, cmd);
9161                         }
9162                         cmd = next_cmd;
9163                 }















9164                 break;
9165         case MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET:
9166                 reason = CMD_ABORTED;
9167                 stat =  STAT_ABORTED;
9168                 /*FALLTHROUGH*/
9169         case MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET:
9170                 while (cmd != NULL) {
9171                         next_cmd = cmd->cmd_linkp;
9172                         if ((Tgt(cmd) == target) && (Lun(cmd) == lun)) {
9173                                 mptsas_waitq_delete(mpt, cmd);
9174                                 mptsas_set_pkt_reason(mpt, cmd,
9175                                     reason, stat);
9176                                 mptsas_doneq_add(mpt, cmd);
9177                         }
9178                         cmd = next_cmd;
9179                 }















9180                 break;
9181         default:
9182                 mptsas_log(mpt, CE_WARN, "Unknown task management type %d.",
9183                     tasktype);
9184                 break;
9185         }
9186 }
9187 
9188 /*
9189  * Clean up hba state, abort all outstanding command and commands in waitq
9190  * reset timeout of all targets.
9191  */
9192 static void
9193 mptsas_flush_hba(mptsas_t *mpt)
9194 {
9195         mptsas_slots_t  *slots = mpt->m_active;
9196         mptsas_cmd_t    *cmd;
9197         int             slot;
9198 
9199         NDBG25(("mptsas_flush_hba"));
9200 
9201         /*
9202          * The I/O Controller should have already sent back
9203          * all commands via the scsi I/O reply frame.  Make
9204          * sure all commands have been flushed.
9205          * Account for TM request, which use the last SMID.
9206          */
9207         mutex_enter(&mpt->m_intr_mutex);
9208         for (slot = 0; slot <= mpt->m_active->m_n_slots; slot++) {
9209                 if ((cmd = slots->m_slot[slot]) == NULL) {
9210                         continue;
9211                 }
9212 
9213                 if (cmd->cmd_flags & CFLAG_CMDIOC) {
9214                         /*
9215                          * Need to make sure to tell everyone that might be
9216                          * waiting on this command that it's going to fail.  If
9217                          * we get here, this command will never timeout because
9218                          * the active command table is going to be re-allocated,
9219                          * so there will be nothing to check against a time out.
9220                          * Instead, mark the command as failed due to reset.
9221                          */
9222                         mptsas_set_pkt_reason(mpt, cmd, CMD_RESET,
9223                             STAT_BUS_RESET);
9224                         if ((cmd->cmd_flags & CFLAG_PASSTHRU) ||
9225                             (cmd->cmd_flags & CFLAG_CONFIG) ||
9226                             (cmd->cmd_flags & CFLAG_FW_DIAG)) {
9227                                 cmd->cmd_flags |= CFLAG_FINISHED;
9228                                 cv_broadcast(&mpt->m_passthru_cv);
9229                                 cv_broadcast(&mpt->m_config_cv);
9230                                 cv_broadcast(&mpt->m_fw_diag_cv);
9231                         }
9232                         continue;
9233                 }
9234 
9235                 NDBG25(("mptsas_flush_hba discovered non-NULL cmd in slot %d",
9236                     slot));
9237                 mptsas_dump_cmd(mpt, cmd);
9238 
9239                 mptsas_remove_cmd0(mpt, cmd);
9240                 mptsas_set_pkt_reason(mpt, cmd, CMD_RESET, STAT_BUS_RESET);
9241                 mptsas_doneq_add0(mpt, cmd);
9242         }
9243         mutex_exit(&mpt->m_intr_mutex);
9244 
9245         /*
9246          * Flush the waitq.
9247          */
9248         while ((cmd = mptsas_waitq_rm(mpt)) != NULL) {
9249                 mptsas_set_pkt_reason(mpt, cmd, CMD_RESET, STAT_BUS_RESET);
9250                 if ((cmd->cmd_flags & CFLAG_PASSTHRU) ||
9251                     (cmd->cmd_flags & CFLAG_CONFIG) ||
9252                     (cmd->cmd_flags & CFLAG_FW_DIAG)) {
9253                         cmd->cmd_flags |= CFLAG_FINISHED;
9254                         cv_broadcast(&mpt->m_passthru_cv);
9255                         cv_broadcast(&mpt->m_config_cv);
9256                         cv_broadcast(&mpt->m_fw_diag_cv);
9257                 } else {
9258                         mptsas_doneq_add(mpt, cmd);
9259                 }
9260         }




















9261 }
9262 
9263 /*
9264  * set pkt_reason and OR in pkt_statistics flag
9265  */
9266 static void
9267 mptsas_set_pkt_reason(mptsas_t *mpt, mptsas_cmd_t *cmd, uchar_t reason,
9268     uint_t stat)
9269 {
9270 #ifndef __lock_lint
9271         _NOTE(ARGUNUSED(mpt))
9272 #endif
9273 
9274         NDBG25(("mptsas_set_pkt_reason: cmd=0x%p reason=%x stat=%x",
9275             (void *)cmd, reason, stat));
9276 
9277         if (cmd) {
9278                 if (cmd->cmd_pkt->pkt_reason == CMD_CMPLT) {
9279                         cmd->cmd_pkt->pkt_reason = reason;
9280                 }


9289 
9290         mutex_enter(&mptsas_global_mutex);
9291         if (mptsas_reset_watch == NULL && mptsas_timeouts_enabled) {
9292                 mptsas_reset_watch = timeout(mptsas_watch_reset_delay, NULL,
9293                     drv_usectohz((clock_t)
9294                     MPTSAS_WATCH_RESET_DELAY_TICK * 1000));
9295                 ASSERT(mptsas_reset_watch != NULL);
9296         }
9297         mutex_exit(&mptsas_global_mutex);
9298 }
9299 
9300 static void
9301 mptsas_setup_bus_reset_delay(mptsas_t *mpt)
9302 {
9303         mptsas_target_t *ptgt = NULL;
9304 
9305         NDBG22(("mptsas_setup_bus_reset_delay"));
9306         ptgt = (mptsas_target_t *)mptsas_hash_traverse(&mpt->m_active->m_tgttbl,
9307             MPTSAS_HASH_FIRST);
9308         while (ptgt != NULL) {
9309                 mutex_enter(&ptgt->m_tgt_intr_mutex);
9310                 mptsas_set_throttle(mpt, ptgt, HOLD_THROTTLE);
9311                 ptgt->m_reset_delay = mpt->m_scsi_reset_delay;
9312                 mutex_exit(&ptgt->m_tgt_intr_mutex);
9313 
9314                 ptgt = (mptsas_target_t *)mptsas_hash_traverse(
9315                     &mpt->m_active->m_tgttbl, MPTSAS_HASH_NEXT);
9316         }
9317 
9318         mptsas_start_watch_reset_delay();
9319 }
9320 
9321 /*
9322  * mptsas_watch_reset_delay(_subr) is invoked by timeout() and checks every
9323  * mpt instance for active reset delays
9324  */
9325 static void
9326 mptsas_watch_reset_delay(void *arg)
9327 {
9328 #ifndef __lock_lint
9329         _NOTE(ARGUNUSED(arg))
9330 #endif
9331 
9332         mptsas_t        *mpt;


9350 
9351         if (not_done) {
9352                 mptsas_start_watch_reset_delay();
9353         }
9354 }
9355 
9356 static int
9357 mptsas_watch_reset_delay_subr(mptsas_t *mpt)
9358 {
9359         int             done = 0;
9360         int             restart = 0;
9361         mptsas_target_t *ptgt = NULL;
9362 
9363         NDBG22(("mptsas_watch_reset_delay_subr: mpt=0x%p", (void *)mpt));
9364 
9365         ASSERT(mutex_owned(&mpt->m_mutex));
9366 
9367         ptgt = (mptsas_target_t *)mptsas_hash_traverse(&mpt->m_active->m_tgttbl,
9368             MPTSAS_HASH_FIRST);
9369         while (ptgt != NULL) {
9370                 mutex_enter(&ptgt->m_tgt_intr_mutex);
9371                 if (ptgt->m_reset_delay != 0) {
9372                         ptgt->m_reset_delay -=
9373                             MPTSAS_WATCH_RESET_DELAY_TICK;
9374                         if (ptgt->m_reset_delay <= 0) {
9375                                 ptgt->m_reset_delay = 0;
9376                                 mptsas_set_throttle(mpt, ptgt,
9377                                     MAX_THROTTLE);
9378                                 restart++;
9379                         } else {
9380                                 done = -1;
9381                         }
9382                 }
9383                 mutex_exit(&ptgt->m_tgt_intr_mutex);
9384 
9385                 ptgt = (mptsas_target_t *)mptsas_hash_traverse(
9386                     &mpt->m_active->m_tgttbl, MPTSAS_HASH_NEXT);
9387         }
9388 
9389         if (restart > 0) {
9390                 mptsas_restart_hba(mpt);
9391         }
9392         return (done);
9393 }
9394 
9395 #ifdef MPTSAS_TEST
9396 static void
9397 mptsas_test_reset(mptsas_t *mpt, int target)
9398 {
9399         mptsas_target_t    *ptgt = NULL;
9400 
9401         if (mptsas_rtest == target) {
9402                 if (mptsas_do_scsi_reset(mpt, target) == TRUE) {
9403                         mptsas_rtest = -1;


9459          * command lists, etc.
9460          */
9461         if (pkt != NULL) {
9462                 /* abort the specified packet */
9463                 sp = PKT2CMD(pkt);
9464 
9465                 if (sp->cmd_queued) {
9466                         NDBG23(("mptsas_do_scsi_abort: queued sp=0x%p aborted",
9467                             (void *)sp));
9468                         mptsas_waitq_delete(mpt, sp);
9469                         mptsas_set_pkt_reason(mpt, sp, CMD_ABORTED,
9470                             STAT_ABORTED);
9471                         mptsas_doneq_add(mpt, sp);
9472                         rval = TRUE;
9473                         goto done;
9474                 }
9475 
9476                 /*
9477                  * Have mpt firmware abort this command
9478                  */
9479                 mutex_enter(&mpt->m_intr_mutex);
9480                 if (slots->m_slot[sp->cmd_slot] != NULL) {
9481                         mutex_exit(&mpt->m_intr_mutex);
9482                         rval = mptsas_ioc_task_management(mpt,
9483                             MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK, target,
9484                             lun, NULL, 0, 0);
9485 
9486                         /*
9487                          * The transport layer expects only TRUE and FALSE.
9488                          * Therefore, if mptsas_ioc_task_management returns
9489                          * FAILED we will return FALSE.
9490                          */
9491                         if (rval == FAILED)
9492                                 rval = FALSE;
9493                         goto done;
9494                 }
9495                 mutex_exit(&mpt->m_intr_mutex);
9496         }
9497 
9498         /*
9499          * If pkt is NULL then abort task set
9500          */
9501         rval = mptsas_ioc_task_management(mpt,
9502             MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET, target, lun, NULL, 0, 0);
9503 
9504         /*
9505          * The transport layer expects only TRUE and FALSE.
9506          * Therefore, if mptsas_ioc_task_management returns
9507          * FAILED we will return FALSE.
9508          */
9509         if (rval == FAILED)
9510                 rval = FALSE;
9511 
9512 #ifdef MPTSAS_TEST
9513         if (rval && mptsas_test_stop) {
9514                 debug_enter("mptsas_do_scsi_abort");
9515         }


9587         default:
9588                 rval = UNDEFINED;
9589                 break;
9590         }
9591 
9592         NDBG24(("mptsas_scsi_getcap: %s, rval=%x", cap, rval));
9593 
9594         mutex_exit(&mpt->m_mutex);
9595         return (rval);
9596 }
9597 
9598 /*
9599  * (*tran_setcap).  Set the capability named to the value given.
9600  */
9601 static int
9602 mptsas_scsi_setcap(struct scsi_address *ap, char *cap, int value, int tgtonly)
9603 {
9604         mptsas_t        *mpt = ADDR2MPT(ap);
9605         int             ckey;
9606         int             rval = FALSE;
9607         mptsas_target_t *ptgt;
9608 
9609         NDBG24(("mptsas_scsi_setcap: target=%d, cap=%s value=%x tgtonly=%x",
9610             ap->a_target, cap, value, tgtonly));
9611 
9612         if (!tgtonly) {
9613                 return (rval);
9614         }
9615 
9616         mutex_enter(&mpt->m_mutex);
9617 
9618         if ((mptsas_scsi_capchk(cap, tgtonly, &ckey)) != TRUE) {
9619                 mutex_exit(&mpt->m_mutex);
9620                 return (UNDEFINED);
9621         }
9622 
9623         switch (ckey) {
9624         case SCSI_CAP_DMA_MAX:
9625         case SCSI_CAP_MSG_OUT:
9626         case SCSI_CAP_PARITY:
9627         case SCSI_CAP_INITIATOR_ID:
9628         case SCSI_CAP_LINKED_CMDS:
9629         case SCSI_CAP_UNTAGGED_QING:
9630         case SCSI_CAP_RESET_NOTIFICATION:
9631                 /*
9632                  * None of these are settable via
9633                  * the capability interface.
9634                  */
9635                 break;
9636         case SCSI_CAP_ARQ:
9637                 /*
9638                  * We cannot turn off arq so return false if asked to
9639                  */
9640                 if (value) {
9641                         rval = TRUE;
9642                 } else {
9643                         rval = FALSE;
9644                 }
9645                 break;
9646         case SCSI_CAP_TAGGED_QING:
9647                 ptgt = ((mptsas_tgt_private_t *)
9648                     (ap->a_hba_tran->tran_tgt_private))->t_private;
9649                 mutex_enter(&ptgt->m_tgt_intr_mutex);
9650                 mptsas_set_throttle(mpt, ptgt, MAX_THROTTLE);
9651                 mutex_exit(&ptgt->m_tgt_intr_mutex);
9652                 rval = TRUE;
9653                 break;
9654         case SCSI_CAP_QFULL_RETRIES:
9655                 ((mptsas_tgt_private_t *)(ap->a_hba_tran->tran_tgt_private))->
9656                     t_private->m_qfull_retries = (uchar_t)value;
9657                 rval = TRUE;
9658                 break;
9659         case SCSI_CAP_QFULL_RETRY_INTERVAL:
9660                 ((mptsas_tgt_private_t *)(ap->a_hba_tran->tran_tgt_private))->
9661                     t_private->m_qfull_retry_interval =
9662                     drv_usectohz(value * 1000);
9663                 rval = TRUE;
9664                 break;
9665         default:
9666                 rval = UNDEFINED;
9667                 break;
9668         }
9669         mutex_exit(&mpt->m_mutex);
9670         return (rval);
9671 }


9675  */
9676 /*ARGSUSED*/
9677 static int
9678 mptsas_scsi_capchk(char *cap, int tgtonly, int *cidxp)
9679 {
9680         NDBG24(("mptsas_scsi_capchk: cap=%s", cap));
9681 
9682         if (!cap)
9683                 return (FALSE);
9684 
9685         *cidxp = scsi_hba_lookup_capstr(cap);
9686         return (TRUE);
9687 }
9688 
9689 static int
9690 mptsas_alloc_active_slots(mptsas_t *mpt, int flag)
9691 {
9692         mptsas_slots_t  *old_active = mpt->m_active;
9693         mptsas_slots_t  *new_active;
9694         size_t          size;
9695         int             rval = -1, nslot, i;
9696         mptsas_slot_free_e_t    *pe;
9697 
9698         if (mptsas_outstanding_cmds_n(mpt)) {
9699                 NDBG9(("cannot change size of active slots array"));
9700                 return (rval);
9701         }

9702 
9703         size = MPTSAS_SLOTS_SIZE(mpt);
9704         new_active = kmem_zalloc(size, flag);
9705         if (new_active == NULL) {
9706                 NDBG1(("new active alloc failed"));
9707                 return (rval);
9708         }
9709         /*
9710          * Since SMID 0 is reserved and the TM slot is reserved, the
9711          * number of slots that can be used at any one time is
9712          * m_max_requests - 2.
9713          */
9714         new_active->m_n_slots = nslot = (mpt->m_max_requests - 2);
9715         new_active->m_size = size;
9716         new_active->m_tags = 1;
9717 
9718         if (old_active) {
9719                 new_active->m_tgttbl = old_active->m_tgttbl;
9720                 new_active->m_smptbl = old_active->m_smptbl;
9721                 new_active->m_num_raid_configs =
9722                     old_active->m_num_raid_configs;
9723                 for (i = 0; i < new_active->m_num_raid_configs; i++) {
9724                         new_active->m_raidconfig[i] =
9725                             old_active->m_raidconfig[i];
9726                 }
9727                 mptsas_free_active_slots(mpt);
9728         }
9729 
9730         if (max_ncpus & (max_ncpus - 1)) {
9731                 mpt->m_slot_freeq_pair_n = (1 << highbit(max_ncpus));
9732         } else {
9733                 mpt->m_slot_freeq_pair_n = max_ncpus;
9734         }
9735         mpt->m_slot_freeq_pairp = kmem_zalloc(
9736             mpt->m_slot_freeq_pair_n *
9737             sizeof (mptsas_slot_freeq_pair_t), KM_SLEEP);
9738         for (i = 0; i < mpt->m_slot_freeq_pair_n; i++) {
9739                 list_create(&mpt->m_slot_freeq_pairp[i].
9740                     m_slot_allocq.s.m_fq_list,
9741                     sizeof (mptsas_slot_free_e_t),
9742                     offsetof(mptsas_slot_free_e_t, node));
9743                 list_create(&mpt->m_slot_freeq_pairp[i].
9744                     m_slot_releq.s.m_fq_list,
9745                     sizeof (mptsas_slot_free_e_t),
9746                     offsetof(mptsas_slot_free_e_t, node));
9747                 mpt->m_slot_freeq_pairp[i].m_slot_allocq.s.m_fq_n = 0;
9748                 mpt->m_slot_freeq_pairp[i].m_slot_releq.s.m_fq_n = 0;
9749                 mutex_init(&mpt->m_slot_freeq_pairp[i].
9750                     m_slot_allocq.s.m_fq_mutex, NULL, MUTEX_DRIVER,
9751                     DDI_INTR_PRI(mpt->m_intr_pri));
9752                 mutex_init(&mpt->m_slot_freeq_pairp[i].
9753                     m_slot_releq.s.m_fq_mutex, NULL, MUTEX_DRIVER,
9754                     DDI_INTR_PRI(mpt->m_intr_pri));
9755         }
9756         pe = mpt->m_slot_free_ae = kmem_zalloc(nslot *
9757             sizeof (mptsas_slot_free_e_t), KM_SLEEP);
9758         /*
9759          * An array of Mpi2ReplyDescriptorsUnion_t is defined here.
9760          * We are trying to eliminate the m_mutex in the context
9761          * reply code path in the ISR. Since the read of the
9762          * ReplyDescriptor and update/write of the ReplyIndex must
9763          * be atomic (since the poll thread may also update them at
9764          * the same time) so we first read out of the ReplyDescriptor
9765          * into this array and update the ReplyIndex register with a
9766          * separate mutex m_intr_mutex protected, and then release the
9767          * mutex and process all of them. the length of the array is
9768          * defined as max as 128(128*64=8k), which is
9769          * assumed as the maxmium depth of the interrupt coalese.
9770          */
9771         mpt->m_reply = kmem_zalloc(MPI_ADDRESS_COALSCE_MAX *
9772             sizeof (Mpi2ReplyDescriptorsUnion_t), KM_SLEEP);
9773         for (i = 0; i < nslot; i++, pe++) {
9774                 pe->slot = i + 1; /* SMID 0 is reserved */
9775                 pe->cpuid = i % mpt->m_slot_freeq_pair_n;
9776                 list_insert_tail(&mpt->m_slot_freeq_pairp
9777                     [i % mpt->m_slot_freeq_pair_n]
9778                     .m_slot_allocq.s.m_fq_list, pe);
9779                 mpt->m_slot_freeq_pairp[i % mpt->m_slot_freeq_pair_n]
9780                     .m_slot_allocq.s.m_fq_n++;
9781                 mpt->m_slot_freeq_pairp[i % mpt->m_slot_freeq_pair_n]
9782                     .m_slot_allocq.s.m_fq_n_init++;
9783         }
9784 
9785         mpt->m_active = new_active;
9786         rval = 0;
9787 
9788         return (rval);
9789 }
9790 
9791 static void
9792 mptsas_free_active_slots(mptsas_t *mpt)
9793 {
9794         mptsas_slots_t  *active = mpt->m_active;
9795         size_t          size;
9796         mptsas_slot_free_e_t    *pe;
9797         int     i;
9798 
9799         if (active == NULL)
9800                 return;
9801 
9802         if (mpt->m_slot_freeq_pairp) {
9803                 for (i = 0; i < mpt->m_slot_freeq_pair_n; i++) {
9804                         while ((pe = list_head(&mpt->m_slot_freeq_pairp
9805                             [i].m_slot_allocq.s.m_fq_list)) != NULL) {
9806                                 list_remove(&mpt->m_slot_freeq_pairp[i]
9807                                     .m_slot_allocq.s.m_fq_list, pe);
9808                         }
9809                         list_destroy(&mpt->m_slot_freeq_pairp
9810                             [i].m_slot_allocq.s.m_fq_list);
9811                         while ((pe = list_head(&mpt->m_slot_freeq_pairp
9812                             [i].m_slot_releq.s.m_fq_list)) != NULL) {
9813                                 list_remove(&mpt->m_slot_freeq_pairp[i]
9814                                     .m_slot_releq.s.m_fq_list, pe);
9815                         }
9816                         list_destroy(&mpt->m_slot_freeq_pairp
9817                             [i].m_slot_releq.s.m_fq_list);
9818                         mutex_destroy(&mpt->m_slot_freeq_pairp
9819                             [i].m_slot_allocq.s.m_fq_mutex);
9820                         mutex_destroy(&mpt->m_slot_freeq_pairp
9821                             [i].m_slot_releq.s.m_fq_mutex);
9822                 }
9823                 kmem_free(mpt->m_slot_freeq_pairp, mpt->m_slot_freeq_pair_n *
9824                     sizeof (mptsas_slot_freeq_pair_t));
9825         }
9826         if (mpt->m_slot_free_ae)
9827                 kmem_free(mpt->m_slot_free_ae, mpt->m_active->m_n_slots *
9828                     sizeof (mptsas_slot_free_e_t));
9829 
9830         if (mpt->m_reply)
9831                 kmem_free(mpt->m_reply, MPI_ADDRESS_COALSCE_MAX *
9832                     sizeof (Mpi2ReplyDescriptorsUnion_t));
9833 
9834         size = active->m_size;
9835         kmem_free(active, size);
9836         mpt->m_active = NULL;
9837 }
9838 
9839 /*
9840  * Error logging, printing, and debug print routines.
9841  */
9842 static char *mptsas_label = "mpt_sas";
9843 
9844 /*PRINTFLIKE3*/
9845 void
9846 mptsas_log(mptsas_t *mpt, int level, char *fmt, ...)
9847 {
9848         dev_info_t      *dev;
9849         va_list         ap;
9850 
9851         if (mpt) {
9852                 dev = mpt->m_dip;
9853         } else {


9960 
9961 static void
9962 mptsas_watchsubr(mptsas_t *mpt)
9963 {
9964         int             i;
9965         mptsas_cmd_t    *cmd;
9966         mptsas_target_t *ptgt = NULL;
9967 
9968         NDBG30(("mptsas_watchsubr: mpt=0x%p", (void *)mpt));
9969 
9970 #ifdef MPTSAS_TEST
9971         if (mptsas_enable_untagged) {
9972                 mptsas_test_untagged++;
9973         }
9974 #endif
9975 
9976         /*
9977          * Check for commands stuck in active slot
9978          * Account for TM requests, which use the last SMID.
9979          */
9980         mutex_enter(&mpt->m_intr_mutex);
9981         for (i = 0; i <= mpt->m_active->m_n_slots; i++) {
9982                 if ((cmd = mpt->m_active->m_slot[i]) != NULL) {
9983                         if ((cmd->cmd_flags & CFLAG_CMDIOC) == 0) {
9984                                 cmd->cmd_active_timeout -=
9985                                     mptsas_scsi_watchdog_tick;
9986                                 if (cmd->cmd_active_timeout <= 0) {
9987                                         /*
9988                                          * There seems to be a command stuck
9989                                          * in the active slot.  Drain throttle.
9990                                          */
9991                                         ptgt = cmd->cmd_tgt_addr;
9992                                         mutex_enter(&ptgt->m_tgt_intr_mutex);
9993                                         mptsas_set_throttle(mpt, ptgt,
9994                                             DRAIN_THROTTLE);
9995                                         mutex_exit(&ptgt->m_tgt_intr_mutex);
9996                                 }
9997                         }
9998                         if ((cmd->cmd_flags & CFLAG_PASSTHRU) ||
9999                             (cmd->cmd_flags & CFLAG_CONFIG) ||
10000                             (cmd->cmd_flags & CFLAG_FW_DIAG)) {
10001                                 cmd->cmd_active_timeout -=
10002                                     mptsas_scsi_watchdog_tick;
10003                                 if (cmd->cmd_active_timeout <= 0) {
10004                                         /*
10005                                          * passthrough command timeout
10006                                          */
10007                                         cmd->cmd_flags |= (CFLAG_FINISHED |
10008                                             CFLAG_TIMEOUT);
10009                                         cv_broadcast(&mpt->m_passthru_cv);
10010                                         cv_broadcast(&mpt->m_config_cv);
10011                                         cv_broadcast(&mpt->m_fw_diag_cv);
10012                                 }
10013                         }
10014                 }
10015         }
10016         mutex_exit(&mpt->m_intr_mutex);
10017 
10018         ptgt = (mptsas_target_t *)mptsas_hash_traverse(&mpt->m_active->m_tgttbl,
10019             MPTSAS_HASH_FIRST);
10020         while (ptgt != NULL) {
10021                 /*
10022                  * In order to avoid using m_mutex in the key code path in ISR,
10023                  * separate mutexs are introduced to protect those elements
10024                  * shown in ISR.
10025                  */
10026                 mutex_enter(&ptgt->m_tgt_intr_mutex);
10027 
10028                 /*
10029                  * If we were draining due to a qfull condition,
10030                  * go back to full throttle.
10031                  */
10032                 if ((ptgt->m_t_throttle < MAX_THROTTLE) &&
10033                     (ptgt->m_t_throttle > HOLD_THROTTLE) &&
10034                     (ptgt->m_t_ncmds < ptgt->m_t_throttle)) {
10035                         mptsas_set_throttle(mpt, ptgt, MAX_THROTTLE);
10036                         mptsas_restart_hba(mpt);
10037                 }
10038 
10039                 if ((ptgt->m_t_ncmds > 0) &&
10040                     (ptgt->m_timebase)) {
10041 
10042                         if (ptgt->m_timebase <=
10043                             mptsas_scsi_watchdog_tick) {
10044                                 ptgt->m_timebase +=
10045                                     mptsas_scsi_watchdog_tick;
10046                                 mutex_exit(&ptgt->m_tgt_intr_mutex);
10047                                 ptgt = (mptsas_target_t *)mptsas_hash_traverse(
10048                                     &mpt->m_active->m_tgttbl, MPTSAS_HASH_NEXT);
10049                                 continue;
10050                         }
10051 
10052                         ptgt->m_timeout -= mptsas_scsi_watchdog_tick;
10053 










10054                         if (ptgt->m_timeout < 0) {
10055                                 mutex_exit(&ptgt->m_tgt_intr_mutex);





10056                                 mptsas_cmd_timeout(mpt, ptgt->m_devhdl);

10057                                 ptgt = (mptsas_target_t *)mptsas_hash_traverse(
10058                                     &mpt->m_active->m_tgttbl, MPTSAS_HASH_NEXT);
10059                                 continue;
10060                         }
10061 
10062                         if ((ptgt->m_timeout) <=
10063                             mptsas_scsi_watchdog_tick) {
10064                                 NDBG23(("pending timeout"));
10065                                 mptsas_set_throttle(mpt, ptgt,
10066                                     DRAIN_THROTTLE);
10067                         }
10068                 }
10069                 mutex_exit(&ptgt->m_tgt_intr_mutex);
10070                 ptgt = (mptsas_target_t *)mptsas_hash_traverse(
10071                     &mpt->m_active->m_tgttbl, MPTSAS_HASH_NEXT);
10072         }
10073 }
10074 
10075 /*
10076  * timeout recovery
10077  */
10078 static void
10079 mptsas_cmd_timeout(mptsas_t *mpt, uint16_t devhdl)
10080 {
10081 
10082         NDBG29(("mptsas_cmd_timeout: target=%d", devhdl));
10083         mptsas_log(mpt, CE_WARN, "Disconnected command timeout for "
10084             "Target %d", devhdl);
10085 
10086         /*
10087          * If the current target is not the target passed in,
10088          * try to reset that target.
10089          */
10090         NDBG29(("mptsas_cmd_timeout: device reset"));
10091         if (mptsas_do_scsi_reset(mpt, devhdl) != TRUE) {
10092                 mptsas_log(mpt, CE_WARN, "Target %d reset for command timeout "
10093                     "recovery failed!", devhdl);
10094         }
10095 }
10096 
10097 /*


































10098  * Device / Hotplug control
10099  */
10100 static int
10101 mptsas_scsi_quiesce(dev_info_t *dip)
10102 {
10103         mptsas_t        *mpt;
10104         scsi_hba_tran_t *tran;
10105 
10106         tran = ddi_get_driver_private(dip);
10107         if (tran == NULL || (mpt = TRAN2MPT(tran)) == NULL)
10108                 return (-1);
10109 
10110         return (mptsas_quiesce_bus(mpt));
10111 }
10112 
10113 static int
10114 mptsas_scsi_unquiesce(dev_info_t *dip)
10115 {
10116         mptsas_t                *mpt;
10117         scsi_hba_tran_t *tran;
10118 
10119         tran = ddi_get_driver_private(dip);
10120         if (tran == NULL || (mpt = TRAN2MPT(tran)) == NULL)
10121                 return (-1);
10122 
10123         return (mptsas_unquiesce_bus(mpt));
10124 }
10125 
10126 static int
10127 mptsas_quiesce_bus(mptsas_t *mpt)
10128 {
10129         mptsas_target_t *ptgt = NULL;
10130 
10131         NDBG28(("mptsas_quiesce_bus"));
10132         mutex_enter(&mpt->m_mutex);
10133 
10134         /* Set all the throttles to zero */
10135         ptgt = (mptsas_target_t *)mptsas_hash_traverse(&mpt->m_active->m_tgttbl,
10136             MPTSAS_HASH_FIRST);
10137         while (ptgt != NULL) {
10138                 mutex_enter(&ptgt->m_tgt_intr_mutex);
10139                 mptsas_set_throttle(mpt, ptgt, HOLD_THROTTLE);
10140                 mutex_exit(&ptgt->m_tgt_intr_mutex);
10141 
10142                 ptgt = (mptsas_target_t *)mptsas_hash_traverse(
10143                     &mpt->m_active->m_tgttbl, MPTSAS_HASH_NEXT);
10144         }
10145 
10146         /* If there are any outstanding commands in the queue */
10147         mutex_enter(&mpt->m_intr_mutex);
10148         if (mptsas_outstanding_cmds_n(mpt)) {
10149                 mutex_exit(&mpt->m_intr_mutex);
10150                 mpt->m_softstate |= MPTSAS_SS_DRAINING;
10151                 mpt->m_quiesce_timeid = timeout(mptsas_ncmds_checkdrain,
10152                     mpt, (MPTSAS_QUIESCE_TIMEOUT * drv_usectohz(1000000)));
10153                 if (cv_wait_sig(&mpt->m_cv, &mpt->m_mutex) == 0) {
10154                         /*
10155                          * Quiesce has been interrupted
10156                          */
10157                         mpt->m_softstate &= ~MPTSAS_SS_DRAINING;
10158                         ptgt = (mptsas_target_t *)mptsas_hash_traverse(
10159                             &mpt->m_active->m_tgttbl, MPTSAS_HASH_FIRST);
10160                         while (ptgt != NULL) {
10161                                 mutex_enter(&ptgt->m_tgt_intr_mutex);
10162                                 mptsas_set_throttle(mpt, ptgt, MAX_THROTTLE);
10163                                 mutex_exit(&ptgt->m_tgt_intr_mutex);
10164 
10165                                 ptgt = (mptsas_target_t *)mptsas_hash_traverse(
10166                                     &mpt->m_active->m_tgttbl, MPTSAS_HASH_NEXT);
10167                         }
10168                         mptsas_restart_hba(mpt);
10169                         if (mpt->m_quiesce_timeid != 0) {
10170                                 timeout_id_t tid = mpt->m_quiesce_timeid;
10171                                 mpt->m_quiesce_timeid = 0;
10172                                 mutex_exit(&mpt->m_mutex);
10173                                 (void) untimeout(tid);
10174                                 return (-1);
10175                         }
10176                         mutex_exit(&mpt->m_mutex);
10177                         return (-1);
10178                 } else {
10179                         /* Bus has been quiesced */
10180                         ASSERT(mpt->m_quiesce_timeid == 0);
10181                         mpt->m_softstate &= ~MPTSAS_SS_DRAINING;
10182                         mpt->m_softstate |= MPTSAS_SS_QUIESCED;
10183                         mutex_exit(&mpt->m_mutex);
10184                         return (0);
10185                 }
10186         }
10187         mutex_exit(&mpt->m_intr_mutex);
10188         /* Bus was not busy - QUIESCED */
10189         mutex_exit(&mpt->m_mutex);
10190 
10191         return (0);
10192 }
10193 
10194 static int
10195 mptsas_unquiesce_bus(mptsas_t *mpt)
10196 {
10197         mptsas_target_t *ptgt = NULL;
10198 
10199         NDBG28(("mptsas_unquiesce_bus"));
10200         mutex_enter(&mpt->m_mutex);
10201         mpt->m_softstate &= ~MPTSAS_SS_QUIESCED;
10202         ptgt = (mptsas_target_t *)mptsas_hash_traverse(&mpt->m_active->m_tgttbl,
10203             MPTSAS_HASH_FIRST);
10204         while (ptgt != NULL) {
10205                 mutex_enter(&ptgt->m_tgt_intr_mutex);
10206                 mptsas_set_throttle(mpt, ptgt, MAX_THROTTLE);
10207                 mutex_exit(&ptgt->m_tgt_intr_mutex);
10208 
10209                 ptgt = (mptsas_target_t *)mptsas_hash_traverse(
10210                     &mpt->m_active->m_tgttbl, MPTSAS_HASH_NEXT);
10211         }
10212         mptsas_restart_hba(mpt);
10213         mutex_exit(&mpt->m_mutex);
10214         return (0);
10215 }
10216 
10217 static void
10218 mptsas_ncmds_checkdrain(void *arg)
10219 {
10220         mptsas_t        *mpt = arg;
10221         mptsas_target_t *ptgt = NULL;
10222 
10223         mutex_enter(&mpt->m_mutex);
10224         if (mpt->m_softstate & MPTSAS_SS_DRAINING) {
10225                 mpt->m_quiesce_timeid = 0;
10226                 mutex_enter(&mpt->m_intr_mutex);
10227                 if (mptsas_outstanding_cmds_n(mpt)) {
10228                         mutex_exit(&mpt->m_intr_mutex);

10229                         /*
10230                          * The throttle may have been reset because
10231                          * of a SCSI bus reset
10232                          */
10233                         ptgt = (mptsas_target_t *)mptsas_hash_traverse(
10234                             &mpt->m_active->m_tgttbl, MPTSAS_HASH_FIRST);
10235                         while (ptgt != NULL) {
10236                                 mutex_enter(&ptgt->m_tgt_intr_mutex);
10237                                 mptsas_set_throttle(mpt, ptgt, HOLD_THROTTLE);
10238                                 mutex_exit(&ptgt->m_tgt_intr_mutex);
10239 
10240                                 ptgt = (mptsas_target_t *)mptsas_hash_traverse(
10241                                     &mpt->m_active->m_tgttbl, MPTSAS_HASH_NEXT);
10242                         }
10243 
10244                         mpt->m_quiesce_timeid = timeout(mptsas_ncmds_checkdrain,
10245                             mpt, (MPTSAS_QUIESCE_TIMEOUT *
10246                             drv_usectohz(1000000)));
10247                 } else {
10248                         mutex_exit(&mpt->m_intr_mutex);
10249                         /* Command queue has been drained */
10250                         cv_signal(&mpt->m_cv);
10251                 }
10252         }
10253         mutex_exit(&mpt->m_mutex);
10254 }
10255 
10256 /*ARGSUSED*/
10257 static void
10258 mptsas_dump_cmd(mptsas_t *mpt, mptsas_cmd_t *cmd)
10259 {
10260         int     i;
10261         uint8_t *cp = (uchar_t *)cmd->cmd_pkt->pkt_cdbp;
10262         char    buf[128];
10263 
10264         buf[0] = '\0';
10265         NDBG25(("?Cmd (0x%p) dump for Target %d Lun %d:\n", (void *)cmd,
10266             Tgt(cmd), Lun(cmd)));
10267         (void) sprintf(&buf[0], "\tcdb=[");
10268         for (i = 0; i < (int)cmd->cmd_cdblen; i++) {
10269                 (void) sprintf(&buf[strlen(buf)], " 0x%x", *cp++);
10270         }


10394                 /*
10395                  * Setup descriptor info.  RAID passthrough must use the
10396                  * default request descriptor which is already set, so if this
10397                  * is a SCSI IO request, change the descriptor to SCSI IO.
10398                  */
10399                 if (function == MPI2_FUNCTION_SCSI_IO_REQUEST) {
10400                         desc_type = MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
10401                         request_desc_high = (ddi_get16(acc_hdl,
10402                             &scsi_io_req->DevHandle) << 16);
10403                 }
10404         }
10405 
10406         /*
10407          * We must wait till the message has been completed before
10408          * beginning the next message so we wait for this one to
10409          * finish.
10410          */
10411         (void) ddi_dma_sync(dma_hdl, 0, 0, DDI_DMA_SYNC_FORDEV);
10412         request_desc_low = (cmd->cmd_slot << 16) + desc_type;
10413         cmd->cmd_rfm = NULL;
10414         mpt->m_active->m_slot[cmd->cmd_slot] = cmd;
10415         MPTSAS_START_CMD(mpt, request_desc_low, request_desc_high);
10416         if ((mptsas_check_dma_handle(dma_hdl) != DDI_SUCCESS) ||
10417             (mptsas_check_acc_handle(acc_hdl) != DDI_SUCCESS)) {
10418                 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
10419         }
10420 }
10421 
10422 
10423 
10424 static int
10425 mptsas_do_passthru(mptsas_t *mpt, uint8_t *request, uint8_t *reply,
10426     uint8_t *data, uint32_t request_size, uint32_t reply_size,
10427     uint32_t data_size, uint32_t direction, uint8_t *dataout,
10428     uint32_t dataout_size, short timeout, int mode)
10429 {
10430         mptsas_pt_request_t             pt;
10431         mptsas_dma_alloc_state_t        data_dma_state;
10432         mptsas_dma_alloc_state_t        dataout_dma_state;
10433         caddr_t                         memp;
10434         mptsas_cmd_t                    *cmd = NULL;


10825         } else {
10826                 pDiag_release_msg = (pMpi2DiagReleaseRequest_t)
10827                     (mpt->m_req_frame + (mpt->m_req_frame_size *
10828                     cmd->cmd_slot));
10829                 bzero(pDiag_release_msg, mpt->m_req_frame_size);
10830                 ddi_put8(mpt->m_acc_req_frame_hdl,
10831                     &pDiag_release_msg->Function, diag->function);
10832                 ddi_put8(mpt->m_acc_req_frame_hdl,
10833                     &pDiag_release_msg->BufferType,
10834                     diag->pBuffer->buffer_type);
10835         }
10836 
10837         /*
10838          * Send the message
10839          */
10840         (void) ddi_dma_sync(mpt->m_dma_req_frame_hdl, 0, 0,
10841             DDI_DMA_SYNC_FORDEV);
10842         request_desc_low = (cmd->cmd_slot << 16) +
10843             MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
10844         cmd->cmd_rfm = NULL;
10845         mpt->m_active->m_slot[cmd->cmd_slot] = cmd;
10846         MPTSAS_START_CMD(mpt, request_desc_low, 0);
10847         if ((mptsas_check_dma_handle(mpt->m_dma_req_frame_hdl) !=
10848             DDI_SUCCESS) ||
10849             (mptsas_check_acc_handle(mpt->m_acc_req_frame_hdl) !=
10850             DDI_SUCCESS)) {
10851                 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
10852         }
10853 }
10854 
10855 static int
10856 mptsas_post_fw_diag_buffer(mptsas_t *mpt,
10857     mptsas_fw_diagnostic_buffer_t *pBuffer, uint32_t *return_code)
10858 {
10859         mptsas_diag_request_t           diag;
10860         int                             status, slot_num, post_flags = 0;
10861         mptsas_cmd_t                    *cmd = NULL;
10862         struct scsi_pkt                 *pkt;
10863         pMpi2DiagBufferPostReply_t      reply;
10864         uint16_t                        iocstatus;
10865         uint32_t                        iocloginfo, transfer_length;


11948 
11949         mutex_exit(&mpt->m_mutex);
11950         return (status);
11951 }
11952 
11953 static int
11954 mptsas_ioctl(dev_t dev, int cmd, intptr_t data, int mode, cred_t *credp,
11955     int *rval)
11956 {
11957         int                     status = 0;
11958         mptsas_t                *mpt;
11959         mptsas_update_flash_t   flashdata;
11960         mptsas_pass_thru_t      passthru_data;
11961         mptsas_adapter_data_t   adapter_data;
11962         mptsas_pci_info_t       pci_info;
11963         int                     copylen;
11964 
11965         int                     iport_flag = 0;
11966         dev_info_t              *dip = NULL;
11967         mptsas_phymask_t        phymask = 0;
11968         struct devctl_iocdata   *dcp = NULL;
11969         uint32_t                slotstatus = 0;
11970         char                    *addr = NULL;
11971         mptsas_target_t         *ptgt = NULL;
11972 
11973         *rval = MPTIOCTL_STATUS_GOOD;
11974         if (secpolicy_sys_config(credp, B_FALSE) != 0) {
11975                 return (EPERM);
11976         }
11977 
11978         mpt = ddi_get_soft_state(mptsas_state, MINOR2INST(getminor(dev)));
11979         if (mpt == NULL) {
11980                 /*
11981                  * Called from iport node, get the states
11982                  */
11983                 iport_flag = 1;
11984                 dip = mptsas_get_dip_from_dev(dev, &phymask);
11985                 if (dip == NULL) {
11986                         return (ENXIO);
11987                 }
11988                 mpt = DIP2MPT(dip);
11989         }
11990         /* Make sure power level is D0 before accessing registers */
11991         mutex_enter(&mpt->m_mutex);


11993                 (void) pm_busy_component(mpt->m_dip, 0);
11994                 if (mpt->m_power_level != PM_LEVEL_D0) {
11995                         mutex_exit(&mpt->m_mutex);
11996                         if (pm_raise_power(mpt->m_dip, 0, PM_LEVEL_D0) !=
11997                             DDI_SUCCESS) {
11998                                 mptsas_log(mpt, CE_WARN,
11999                                     "mptsas%d: mptsas_ioctl: Raise power "
12000                                     "request failed.", mpt->m_instance);
12001                                 (void) pm_idle_component(mpt->m_dip, 0);
12002                                 return (ENXIO);
12003                         }
12004                 } else {
12005                         mutex_exit(&mpt->m_mutex);
12006                 }
12007         } else {
12008                 mutex_exit(&mpt->m_mutex);
12009         }
12010 
12011         if (iport_flag) {
12012                 status = scsi_hba_ioctl(dev, cmd, data, mode, credp, rval);
12013                 if (status != 0) {
12014                         goto out;
12015                 }
12016                 /*
12017                  * The following code control the OK2RM LED, it doesn't affect
12018                  * the ioctl return status.
12019                  */
12020                 if ((cmd == DEVCTL_DEVICE_ONLINE) ||
12021                     (cmd == DEVCTL_DEVICE_OFFLINE)) {
12022                         if (ndi_dc_allochdl((void *)data, &dcp) !=
12023                             NDI_SUCCESS) {
12024                                 goto out;
12025                         }
12026                         addr = ndi_dc_getaddr(dcp);
12027                         ptgt = mptsas_addr_to_ptgt(mpt, addr, phymask);
12028                         if (ptgt == NULL) {
12029                                 NDBG14(("mptsas_ioctl led control: tgt %s not "
12030                                     "found", addr));
12031                                 ndi_dc_freehdl(dcp);
12032                                 goto out;
12033                         }
12034                         mutex_enter(&mpt->m_mutex);
12035                         if (cmd == DEVCTL_DEVICE_ONLINE) {
12036                                 ptgt->m_tgt_unconfigured = 0;
12037                         } else if (cmd == DEVCTL_DEVICE_OFFLINE) {
12038                                 ptgt->m_tgt_unconfigured = 1;
12039                         }
12040                         slotstatus = 0;
12041 #ifdef MPTSAS_GET_LED
12042                         /*
12043                          * The get led status can't get a valid/reasonable
12044                          * state, so ignore the get led status, and write the
12045                          * required value directly
12046                          */
12047                         if (mptsas_get_led_status(mpt, ptgt, &slotstatus) !=
12048                             DDI_SUCCESS) {
12049                                 NDBG14(("mptsas_ioctl: get LED for tgt %s "
12050                                     "failed %x", addr, slotstatus));
12051                                 slotstatus = 0;
12052                         }
12053                         NDBG14(("mptsas_ioctl: LED status %x for %s",
12054                             slotstatus, addr));
12055 #endif
12056                         if (cmd == DEVCTL_DEVICE_OFFLINE) {
12057                                 slotstatus |=
12058                                     MPI2_SEP_REQ_SLOTSTATUS_REQUEST_REMOVE;
12059                         } else {
12060                                 slotstatus &=
12061                                     ~MPI2_SEP_REQ_SLOTSTATUS_REQUEST_REMOVE;
12062                         }
12063                         if (mptsas_set_led_status(mpt, ptgt, slotstatus) !=
12064                             DDI_SUCCESS) {
12065                                 NDBG14(("mptsas_ioctl: set LED for tgt %s "
12066                                     "failed %x", addr, slotstatus));
12067                         }
12068                         mutex_exit(&mpt->m_mutex);
12069                         ndi_dc_freehdl(dcp);
12070                 }
12071                 goto out;
12072         }
12073         switch (cmd) {
12074                 case MPTIOCTL_UPDATE_FLASH:
12075                         if (ddi_copyin((void *)data, &flashdata,
12076                                 sizeof (struct mptsas_update_flash), mode)) {
12077                                 status = EFAULT;
12078                                 break;
12079                         }
12080 
12081                         mutex_enter(&mpt->m_mutex);
12082                         if (mptsas_update_flash(mpt,
12083                             (caddr_t)(long)flashdata.PtrBuffer,
12084                             flashdata.ImageSize, flashdata.ImageType, mode)) {
12085                                 status = EFAULT;
12086                         }
12087 
12088                         /*
12089                          * Reset the chip to start using the new
12090                          * firmware.  Reset if failed also.
12091                          */
12092                         mpt->m_softstate &= ~MPTSAS_SS_MSG_UNIT_RESET;


12208                          * which does this.
12209                          */
12210                         status = mptsas_event_report(mpt,
12211                             (mptsas_event_report_t *)data, mode, rval);
12212                         break;
12213                 case MPTIOCTL_REG_ACCESS:
12214                         /*
12215                          * The user has requested register access.  Call our
12216                          * routine which does this.
12217                          */
12218                         status = mptsas_reg_access(mpt,
12219                             (mptsas_reg_access_t *)data, mode);
12220                         break;
12221                 default:
12222                         status = scsi_hba_ioctl(dev, cmd, data, mode, credp,
12223                             rval);
12224                         break;
12225         }
12226 
12227 out:
12228         if (mpt->m_options & MPTSAS_OPT_PM)
12229                 (void) pm_idle_component(mpt->m_dip, 0);
12230         return (status);
12231 }
12232 
12233 int
12234 mptsas_restart_ioc(mptsas_t *mpt)
12235 {
12236         int             rval = DDI_SUCCESS;
12237         mptsas_target_t *ptgt = NULL;
12238 
12239         ASSERT(mutex_owned(&mpt->m_mutex));
12240 
12241         /*
12242          * Set a flag telling I/O path that we're processing a reset.  This is
12243          * needed because after the reset is complete, the hash table still
12244          * needs to be rebuilt.  If I/Os are started before the hash table is
12245          * rebuilt, I/O errors will occur.  This flag allows I/Os to be marked
12246          * so that they can be retried.
12247          */
12248         mpt->m_in_reset = TRUE;
12249 
12250         /*
12251          * Set all throttles to HOLD
12252          */
12253         ptgt = (mptsas_target_t *)mptsas_hash_traverse(&mpt->m_active->m_tgttbl,
12254             MPTSAS_HASH_FIRST);
12255         while (ptgt != NULL) {
12256                 mutex_enter(&ptgt->m_tgt_intr_mutex);
12257                 mptsas_set_throttle(mpt, ptgt, HOLD_THROTTLE);
12258                 mutex_exit(&ptgt->m_tgt_intr_mutex);
12259 
12260                 ptgt = (mptsas_target_t *)mptsas_hash_traverse(
12261                     &mpt->m_active->m_tgttbl, MPTSAS_HASH_NEXT);
12262         }
12263 
12264         /*
12265          * Disable interrupts
12266          */
12267         MPTSAS_DISABLE_INTR(mpt);
12268 
12269         /*
12270          * Abort all commands: outstanding commands, commands in waitq

12271          */
12272         mptsas_flush_hba(mpt);
12273 
12274         /*
12275          * Reinitialize the chip.
12276          */
12277         if (mptsas_init_chip(mpt, FALSE) == DDI_FAILURE) {
12278                 rval = DDI_FAILURE;
12279         }
12280 
12281         /*
12282          * Enable interrupts again
12283          */
12284         MPTSAS_ENABLE_INTR(mpt);
12285 
12286         /*
12287          * If mptsas_init_chip was successful, update the driver data.
12288          */
12289         if (rval == DDI_SUCCESS) {
12290                 mptsas_update_driver_data(mpt);
12291         }
12292 
12293         /*
12294          * Reset the throttles
12295          */
12296         ptgt = (mptsas_target_t *)mptsas_hash_traverse(&mpt->m_active->m_tgttbl,
12297             MPTSAS_HASH_FIRST);
12298         while (ptgt != NULL) {
12299                 mutex_enter(&ptgt->m_tgt_intr_mutex);
12300                 mptsas_set_throttle(mpt, ptgt, MAX_THROTTLE);
12301                 mutex_exit(&ptgt->m_tgt_intr_mutex);
12302 
12303                 ptgt = (mptsas_target_t *)mptsas_hash_traverse(
12304                     &mpt->m_active->m_tgttbl, MPTSAS_HASH_NEXT);
12305         }
12306 
12307         mptsas_doneq_empty(mpt);
12308         mptsas_restart_hba(mpt);
12309 
12310         if (rval != DDI_SUCCESS) {
12311                 mptsas_fm_ereport(mpt, DDI_FM_DEVICE_NO_RESPONSE);
12312                 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_LOST);
12313         }
12314 
12315         /*
12316          * Clear the reset flag so that I/Os can continue.
12317          */
12318         mpt->m_in_reset = FALSE;
12319 
12320         return (rval);
12321 }


12591                         };
12592         uint16_t        pmcsr_stat;
12593 
12594         if (mptsas_get_pci_cap(mpt) == FALSE) {
12595                 return (DDI_FAILURE);
12596         }
12597         /*
12598          * If PCI's capability does not support PM, then don't need
12599          * to registe the pm-components
12600          */
12601         if (!(mpt->m_options & MPTSAS_OPT_PM))
12602                 return (DDI_SUCCESS);
12603         /*
12604          * If power management is supported by this chip, create
12605          * pm-components property for the power management framework
12606          */
12607         (void) sprintf(pmc_name, "NAME=mptsas%d", mpt->m_instance);
12608         pmc[0] = pmc_name;
12609         if (ddi_prop_update_string_array(DDI_DEV_T_NONE, mpt->m_dip,
12610             "pm-components", pmc, 3) != DDI_PROP_SUCCESS) {
12611                 mutex_enter(&mpt->m_intr_mutex);
12612                 mpt->m_options &= ~MPTSAS_OPT_PM;
12613                 mutex_exit(&mpt->m_intr_mutex);
12614                 mptsas_log(mpt, CE_WARN,
12615                     "mptsas%d: pm-component property creation failed.",
12616                     mpt->m_instance);
12617                 return (DDI_FAILURE);
12618         }
12619 
12620         /*
12621          * Power on device.
12622          */
12623         (void) pm_busy_component(mpt->m_dip, 0);
12624         pmcsr_stat = pci_config_get16(mpt->m_config_handle,
12625             mpt->m_pmcsr_offset);
12626         if ((pmcsr_stat & PCI_PMCSR_STATE_MASK) != PCI_PMCSR_D0) {
12627                 mptsas_log(mpt, CE_WARN, "mptsas%d: Power up the device",
12628                     mpt->m_instance);
12629                 pci_config_put16(mpt->m_config_handle, mpt->m_pmcsr_offset,
12630                     PCI_PMCSR_D0);
12631         }
12632         if (pm_power_has_changed(mpt->m_dip, 0, PM_LEVEL_D0) != DDI_SUCCESS) {
12633                 mptsas_log(mpt, CE_WARN, "pm_power_has_changed failed");
12634                 return (DDI_FAILURE);
12635         }
12636         mutex_enter(&mpt->m_intr_mutex);
12637         mpt->m_power_level = PM_LEVEL_D0;
12638         mutex_exit(&mpt->m_intr_mutex);
12639         /*
12640          * Set pm idle delay.
12641          */
12642         mpt->m_pm_idle_delay = ddi_prop_get_int(DDI_DEV_T_ANY,
12643             mpt->m_dip, 0, "mptsas-pm-idle-delay", MPTSAS_PM_IDLE_TIMEOUT);
12644 
12645         return (DDI_SUCCESS);
12646 }
12647 
12648 static int
12649 mptsas_register_intrs(mptsas_t *mpt)
12650 {
12651         dev_info_t *dip;
12652         int intr_types;
12653 
12654         dip = mpt->m_dip;
12655 
12656         /* Get supported interrupt types */
12657         if (ddi_intr_get_supported_types(dip, &intr_types) != DDI_SUCCESS) {
12658                 mptsas_log(mpt, CE_WARN, "ddi_intr_get_supported_types "


12707         NDBG6(("mptsas_add_intrs:interrupt type 0x%x", intr_type));
12708 
12709         /* Get number of interrupts */
12710         ret = ddi_intr_get_nintrs(dip, intr_type, &count);
12711         if ((ret != DDI_SUCCESS) || (count <= 0)) {
12712                 mptsas_log(mpt, CE_WARN, "ddi_intr_get_nintrs() failed, "
12713                     "ret %d count %d\n", ret, count);
12714 
12715                 return (DDI_FAILURE);
12716         }
12717 
12718         /* Get number of available interrupts */
12719         ret = ddi_intr_get_navail(dip, intr_type, &avail);
12720         if ((ret != DDI_SUCCESS) || (avail == 0)) {
12721                 mptsas_log(mpt, CE_WARN, "ddi_intr_get_navail() failed, "
12722                     "ret %d avail %d\n", ret, avail);
12723 
12724                 return (DDI_FAILURE);
12725         }
12726 
12727         if (avail < count) {
12728                 mptsas_log(mpt, CE_NOTE, "ddi_intr_get_nvail returned %d, "
12729                     "navail() returned %d", count, avail);
12730         }
12731 
12732         /* Mpt only have one interrupt routine */
12733         if ((intr_type == DDI_INTR_TYPE_MSI) && (count > 1)) {
12734                 count = 1;
12735         }
12736 
12737         /* Allocate an array of interrupt handles */
12738         mpt->m_intr_size = count * sizeof (ddi_intr_handle_t);
12739         mpt->m_htable = kmem_alloc(mpt->m_intr_size, KM_SLEEP);
12740 
12741         flag = DDI_INTR_ALLOC_NORMAL;
12742 
12743         /* call ddi_intr_alloc() */
12744         ret = ddi_intr_alloc(dip, mpt->m_htable, intr_type, 0,
12745             count, &actual, flag);
12746 
12747         if ((ret != DDI_SUCCESS) || (actual == 0)) {


13059                 tmp_tgt = kmem_zalloc(sizeof (struct mptsas_target),
13060                     KM_SLEEP);
13061                 tmp_tgt->m_devhdl = *dev_handle;
13062                 tmp_tgt->m_deviceinfo = dev_info;
13063                 tmp_tgt->m_qfull_retries = QFULL_RETRIES;
13064                 tmp_tgt->m_qfull_retry_interval =
13065                     drv_usectohz(QFULL_RETRY_INTERVAL * 1000);
13066                 tmp_tgt->m_t_throttle = MAX_THROTTLE;
13067                 devicename = mptsas_get_sata_guid(mpt, tmp_tgt, 0);
13068                 kmem_free(tmp_tgt, sizeof (struct mptsas_target));
13069                 mutex_enter(&mpt->m_mutex);
13070                 if (devicename != 0 && (((devicename >> 56) & 0xf0) == 0x50)) {
13071                         sas_wwn = devicename;
13072                 } else if (dev_info & MPI2_SAS_DEVICE_INFO_DIRECT_ATTACH) {
13073                         sas_wwn = 0;
13074                 }
13075         }
13076 
13077         phymask = mptsas_physport_to_phymask(mpt, physport);
13078         *pptgt = mptsas_tgt_alloc(&slots->m_tgttbl, *dev_handle, sas_wwn,
13079             dev_info, phymask, phynum, mpt);
13080         if (*pptgt == NULL) {
13081                 mptsas_log(mpt, CE_WARN, "Failed to allocated target"
13082                     "structure!");
13083                 rval = DEV_INFO_FAIL_ALLOC;
13084                 return (rval);
13085         }
13086         (*pptgt)->m_enclosure = enclosure;
13087         (*pptgt)->m_slot_num = bay_num;
13088         return (DEV_INFO_SUCCESS);
13089 }
13090 
13091 uint64_t
13092 mptsas_get_sata_guid(mptsas_t *mpt, mptsas_target_t *ptgt, int lun)
13093 {
13094         uint64_t        sata_guid = 0, *pwwn = NULL;
13095         int             target = ptgt->m_devhdl;
13096         uchar_t         *inq83 = NULL;
13097         int             inq83_len = 0xFF;
13098         uchar_t         *dblk = NULL;
13099         int             inq83_retry = 3;


14513                 *pip = mptsas_find_path_addr(pdip, sas_wwn, lun);
14514         } else {
14515                 *pip = mptsas_find_path_phy(pdip, phy);
14516         }
14517 
14518         if (*pip != NULL) {
14519                 *lun_dip = MDI_PI(*pip)->pi_client->ct_dip;
14520                 ASSERT(*lun_dip != NULL);
14521                 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, *lun_dip,
14522                     (DDI_PROP_DONTPASS | DDI_PROP_NOTPROM),
14523                     MDI_CLIENT_GUID_PROP, &old_guid) == DDI_SUCCESS) {
14524                         if (strncmp(guid, old_guid, strlen(guid)) == 0) {
14525                                 /*
14526                                  * Same path back online again.
14527                                  */
14528                                 (void) ddi_prop_free(old_guid);
14529                                 if ((!MDI_PI_IS_ONLINE(*pip)) &&
14530                                     (!MDI_PI_IS_STANDBY(*pip)) &&
14531                                     (ptgt->m_tgt_unconfigured == 0)) {
14532                                         rval = mdi_pi_online(*pip, 0);
14533                                         mutex_enter(&mpt->m_mutex);
14534                                         (void) mptsas_set_led_status(mpt, ptgt,
14535                                             0);
14536                                         mutex_exit(&mpt->m_mutex);
14537                                 } else {
14538                                         rval = DDI_SUCCESS;
14539                                 }
14540                                 if (rval != DDI_SUCCESS) {
14541                                         mptsas_log(mpt, CE_WARN, "path:target: "
14542                                             "%x, lun:%x online failed!", target,
14543                                             lun);
14544                                         *pip = NULL;
14545                                         *lun_dip = NULL;
14546                                 }
14547                                 return (rval);
14548                         } else {
14549                                 /*
14550                                  * The GUID of the LUN has changed which maybe
14551                                  * because customer mapped another volume to the
14552                                  * same LUN.
14553                                  */
14554                                 mptsas_log(mpt, CE_WARN, "The GUID of the "
14555                                     "target:%x, lun:%x was changed, maybe "
14556                                     "because someone mapped another volume "


14770                                 mptsas_log(mpt, CE_WARN, "mptsas driver"
14771                                     "failed to create pm-capable "
14772                                     "property, target %d", target);
14773                                 mdi_rtn = MDI_FAILURE;
14774                                 goto virt_create_done;
14775                         }
14776                 }
14777                 /*
14778                  * Create the phy-num property
14779                  */
14780                 if (mdi_prop_update_int(*pip, "phy-num",
14781                     ptgt->m_phynum) != DDI_SUCCESS) {
14782                         mptsas_log(mpt, CE_WARN, "mptsas driver unable to "
14783                             "create phy-num property for target %d lun %d",
14784                             target, lun);
14785                         mdi_rtn = MDI_FAILURE;
14786                         goto virt_create_done;
14787                 }
14788                 NDBG20(("new path:%s onlining,", MDI_PI(*pip)->pi_addr));
14789                 mdi_rtn = mdi_pi_online(*pip, 0);
14790                 if (mdi_rtn == MDI_SUCCESS) {
14791                         mutex_enter(&mpt->m_mutex);
14792                         if (mptsas_set_led_status(mpt, ptgt, 0) !=
14793                             DDI_SUCCESS) {
14794                                 NDBG14(("mptsas: clear LED for slot %x "
14795                                     "failed", ptgt->m_slot_num));
14796                         }
14797                         mutex_exit(&mpt->m_mutex);
14798                 }
14799                 if (mdi_rtn == MDI_NOT_SUPPORTED) {
14800                         mdi_rtn = MDI_FAILURE;
14801                 }
14802 virt_create_done:
14803                 if (*pip && mdi_rtn != MDI_SUCCESS) {
14804                         (void) mdi_pi_free(*pip, 0);
14805                         *pip = NULL;
14806                         *lun_dip = NULL;
14807                 }
14808         }
14809 
14810         scsi_hba_nodename_compatible_free(nodename, compatible);
14811         if (lun_addr != NULL) {
14812                 kmem_free(lun_addr, SCSI_MAXNAMELEN);
14813         }
14814         if (wwn_str != NULL) {
14815                 kmem_free(wwn_str, MPTSAS_WWN_STRLEN);
14816         }
14817         if (component != NULL) {
14818                 kmem_free(component, MAXPATHLEN);


15132                         if (ndi_prop_update_int(DDI_DEV_T_NONE,
15133                             *lun_dip, "phy-num", ptgt->m_phynum) !=
15134                             DDI_PROP_SUCCESS) {
15135                                 mptsas_log(mpt, CE_WARN, "mptsas driver "
15136                                     "failed to create phy-num property for "
15137                                     "target %d", target);
15138                                 ndi_rtn = NDI_FAILURE;
15139                                 goto phys_create_done;
15140                         }
15141                 }
15142 phys_create_done:
15143                 /*
15144                  * If props were setup ok, online the lun
15145                  */
15146                 if (ndi_rtn == NDI_SUCCESS) {
15147                         /*
15148                          * Try to online the new node
15149                          */
15150                         ndi_rtn = ndi_devi_online(*lun_dip, NDI_ONLINE_ATTACH);
15151                 }
15152                 if (ndi_rtn == NDI_SUCCESS) {
15153                         mutex_enter(&mpt->m_mutex);
15154                         if (mptsas_set_led_status(mpt, ptgt, 0) !=
15155                             DDI_SUCCESS) {
15156                                 NDBG14(("mptsas: clear LED for tgt %x "
15157                                     "failed", ptgt->m_slot_num));
15158                         }
15159                         mutex_exit(&mpt->m_mutex);
15160                 }
15161 
15162                 /*
15163                  * If success set rtn flag, else unwire alloc'd lun
15164                  */
15165                 if (ndi_rtn != NDI_SUCCESS) {
15166                         NDBG12(("mptsas driver unable to online "
15167                             "target %d lun %d", target, lun));
15168                         ndi_prop_remove_all(*lun_dip);
15169                         (void) ndi_devi_free(*lun_dip);
15170                         *lun_dip = NULL;
15171                 }
15172         }
15173 
15174         scsi_hba_nodename_compatible_free(nodename, compatible);
15175 
15176         if (wwn_str != NULL) {
15177                 kmem_free(wwn_str, MPTSAS_WWN_STRLEN);
15178         }
15179         if (component != NULL) {
15180                 kmem_free(component, MAXPATHLEN);


15767  * Can't have duplicate entries for same devhdl,
15768  * if there are invalid entries, the devhdl should be set to 0xffff
15769  */
15770 static void *
15771 mptsas_search_by_devhdl(mptsas_hash_table_t *hashtab, uint16_t devhdl)
15772 {
15773         mptsas_hash_data_t *data;
15774 
15775         data = mptsas_hash_traverse(hashtab, MPTSAS_HASH_FIRST);
15776         while (data != NULL) {
15777                 if (data->devhdl == devhdl) {
15778                         break;
15779                 }
15780                 data = mptsas_hash_traverse(hashtab, MPTSAS_HASH_NEXT);
15781         }
15782         return (data);
15783 }
15784 
15785 mptsas_target_t *
15786 mptsas_tgt_alloc(mptsas_hash_table_t *hashtab, uint16_t devhdl, uint64_t wwid,
15787     uint32_t devinfo, mptsas_phymask_t phymask, uint8_t phynum, mptsas_t *mpt)
15788 {
15789         mptsas_target_t *tmp_tgt = NULL;
15790 
15791         tmp_tgt = mptsas_hash_search(hashtab, wwid, phymask);
15792         if (tmp_tgt != NULL) {
15793                 NDBG20(("Hash item already exist"));
15794                 tmp_tgt->m_deviceinfo = devinfo;
15795                 tmp_tgt->m_devhdl = devhdl;
15796                 return (tmp_tgt);
15797         }
15798         tmp_tgt = kmem_zalloc(sizeof (struct mptsas_target), KM_SLEEP);
15799         if (tmp_tgt == NULL) {
15800                 cmn_err(CE_WARN, "Fatal, allocated tgt failed");
15801                 return (NULL);
15802         }
15803         tmp_tgt->m_devhdl = devhdl;
15804         tmp_tgt->m_sas_wwn = wwid;
15805         tmp_tgt->m_deviceinfo = devinfo;
15806         tmp_tgt->m_phymask = phymask;
15807         tmp_tgt->m_phynum = phynum;
15808         /* Initialized the tgt structure */
15809         tmp_tgt->m_qfull_retries = QFULL_RETRIES;
15810         tmp_tgt->m_qfull_retry_interval =
15811             drv_usectohz(QFULL_RETRY_INTERVAL * 1000);
15812         tmp_tgt->m_t_throttle = MAX_THROTTLE;
15813         mutex_init(&tmp_tgt->m_tgt_intr_mutex, NULL, MUTEX_DRIVER,
15814             DDI_INTR_PRI(mpt->m_intr_pri));
15815 
15816         mptsas_hash_add(hashtab, tmp_tgt);
15817 
15818         return (tmp_tgt);
15819 }
15820 
15821 static void
15822 mptsas_tgt_free(mptsas_hash_table_t *hashtab, uint64_t wwid,
15823     mptsas_phymask_t phymask)
15824 {
15825         mptsas_target_t *tmp_tgt;
15826         tmp_tgt = mptsas_hash_rem(hashtab, wwid, phymask);
15827         if (tmp_tgt == NULL) {
15828                 cmn_err(CE_WARN, "Tgt not found, nothing to free");
15829         } else {
15830                 mutex_destroy(&tmp_tgt->m_tgt_intr_mutex);
15831                 kmem_free(tmp_tgt, sizeof (struct mptsas_target));
15832         }
15833 }
15834 
15835 /*
15836  * Return the entry in the hash table
15837  */
15838 static mptsas_smp_t *
15839 mptsas_smp_alloc(mptsas_hash_table_t *hashtab, mptsas_smp_t *data)
15840 {
15841         uint64_t key1 = data->m_sasaddr;
15842         mptsas_phymask_t key2 = data->m_phymask;
15843         mptsas_smp_t *ret_data;
15844 
15845         ret_data = mptsas_hash_search(hashtab, key1, key2);
15846         if (ret_data != NULL) {
15847                 bcopy(data, ret_data, sizeof (mptsas_smp_t));
15848                 return (ret_data);
15849         }
15850 


16017         return (this->data);
16018 }
16019 
16020 /*
16021  * Functions for SGPIO LED support
16022  */
16023 static dev_info_t *
16024 mptsas_get_dip_from_dev(dev_t dev, mptsas_phymask_t *phymask)
16025 {
16026         dev_info_t      *dip;
16027         int             prop;
16028         dip = e_ddi_hold_devi_by_dev(dev, 0);
16029         if (dip == NULL)
16030                 return (dip);
16031         prop = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0,
16032             "phymask", 0);
16033         *phymask = (mptsas_phymask_t)prop;
16034         ddi_release_devi(dip);
16035         return (dip);
16036 }
16037 static mptsas_target_t *
16038 mptsas_addr_to_ptgt(mptsas_t *mpt, char *addr, mptsas_phymask_t phymask)
16039 {
16040         uint8_t                 phynum;
16041         uint64_t                wwn;
16042         int                     lun;
16043         mptsas_target_t         *ptgt = NULL;
16044 
16045         if (mptsas_parse_address(addr, &wwn, &phynum, &lun) != DDI_SUCCESS) {
16046                 return (NULL);
16047         }
16048         if (addr[0] == 'w') {
16049                 ptgt = mptsas_wwid_to_ptgt(mpt, (int)phymask, wwn);
16050         } else {
16051                 ptgt = mptsas_phy_to_tgt(mpt, (int)phymask, phynum);
16052         }
16053         return (ptgt);
16054 }
16055 
16056 #ifdef MPTSAS_GET_LED
16057 static int
16058 mptsas_get_led_status(mptsas_t *mpt, mptsas_target_t *ptgt,
16059     uint32_t *slotstatus)
16060 {
16061         return (mptsas_send_sep(mpt, ptgt, slotstatus,
16062             MPI2_SEP_REQ_ACTION_READ_STATUS));
16063 }
16064 #endif
16065 static int
16066 mptsas_set_led_status(mptsas_t *mpt, mptsas_target_t *ptgt, uint32_t slotstatus)
16067 {
16068         NDBG14(("mptsas_ioctl: set LED status %x for slot %x",
16069             slotstatus, ptgt->m_slot_num));
16070         return (mptsas_send_sep(mpt, ptgt, &slotstatus,
16071             MPI2_SEP_REQ_ACTION_WRITE_STATUS));
16072 }
16073 /*
16074  *  send sep request, use enclosure/slot addressing
16075  */
16076 static int mptsas_send_sep(mptsas_t *mpt, mptsas_target_t *ptgt,
16077     uint32_t *status, uint8_t act)
16078 {
16079         Mpi2SepRequest_t        req;
16080         Mpi2SepReply_t          rep;
16081         int                     ret;
16082 
16083         ASSERT(mutex_owned(&mpt->m_mutex));
16084 
16085         bzero(&req, sizeof (req));
16086         bzero(&rep, sizeof (rep));
16087 
16088         /* Do nothing for RAID volumes */
16089         if (ptgt->m_phymask == 0) {
16090                 NDBG14(("mptsas_send_sep: Skip RAID volumes"));
16091                 return (DDI_FAILURE);
16092         }
16093 
16094         req.Function = MPI2_FUNCTION_SCSI_ENCLOSURE_PROCESSOR;
16095         req.Action = act;
16096         req.Flags = MPI2_SEP_REQ_FLAGS_ENCLOSURE_SLOT_ADDRESS;
16097         req.EnclosureHandle = LE_16(ptgt->m_enclosure);
16098         req.Slot = LE_16(ptgt->m_slot_num);
16099         if (act == MPI2_SEP_REQ_ACTION_WRITE_STATUS) {
16100                 req.SlotStatus = LE_32(*status);
16101         }
16102         ret = mptsas_do_passthru(mpt, (uint8_t *)&req, (uint8_t *)&rep, NULL,
16103             sizeof (req), sizeof (rep), NULL, 0, NULL, 0, 60, FKIOCTL);
16104         if (ret != 0) {
16105                 mptsas_log(mpt, CE_NOTE, "mptsas_send_sep: passthru SEP "
16106                     "Processor Request message error %d", ret);
16107                 return (DDI_FAILURE);
16108         }
16109         /* do passthrough success, check the ioc status */
16110         if (LE_16(rep.IOCStatus) != MPI2_IOCSTATUS_SUCCESS) {
16111                 if ((LE_16(rep.IOCStatus) & MPI2_IOCSTATUS_MASK) ==
16112                     MPI2_IOCSTATUS_INVALID_FIELD) {
16113                         mptsas_log(mpt, CE_NOTE, "send sep act %x: Not "
16114                             "supported action, loginfo %x", act,
16115                             LE_32(rep.IOCLogInfo));
16116                         return (DDI_FAILURE);
16117                 }
16118                 mptsas_log(mpt, CE_NOTE, "send_sep act %x: ioc "
16119                     "status:%x", act, LE_16(rep.IOCStatus));
16120                 return (DDI_FAILURE);
16121         }
16122         if (act != MPI2_SEP_REQ_ACTION_WRITE_STATUS) {
16123                 *status = LE_32(rep.SlotStatus);
16124         }
16125 
16126         return (DDI_SUCCESS);
16127 }
16128 
16129 int
16130 mptsas_dma_addr_create(mptsas_t *mpt, ddi_dma_attr_t dma_attr,
16131     ddi_dma_handle_t *dma_hdp, ddi_acc_handle_t *acc_hdp, caddr_t *dma_memp,
16132     uint32_t alloc_size, ddi_dma_cookie_t *cookiep)
16133 {
16134         ddi_dma_cookie_t        new_cookie;
16135         size_t                  alloc_len;
16136         uint_t                  ncookie;
16137 
16138         if (cookiep == NULL)
16139                 cookiep = &new_cookie;
16140 
16141         if (ddi_dma_alloc_handle(mpt->m_dip, &dma_attr, DDI_DMA_SLEEP,
16142             NULL, dma_hdp) != DDI_SUCCESS) {
16143                 dma_hdp = NULL;
16144                 return (FALSE);
16145         }
16146 
16147         if (ddi_dma_mem_alloc(*dma_hdp, alloc_size, &mpt->m_dev_acc_attr,
16148             DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL, dma_memp, &alloc_len,


16157             cookiep, &ncookie) != DDI_DMA_MAPPED) {
16158                 (void) ddi_dma_mem_free(acc_hdp);
16159                 ddi_dma_free_handle(dma_hdp);
16160                 dma_hdp = NULL;
16161                 return (FALSE);
16162         }
16163 
16164         return (TRUE);
16165 }
16166 
16167 void
16168 mptsas_dma_addr_destroy(ddi_dma_handle_t *dma_hdp, ddi_acc_handle_t *acc_hdp)
16169 {
16170         if (*dma_hdp == NULL)
16171                 return;
16172 
16173         (void) ddi_dma_unbind_handle(*dma_hdp);
16174         (void) ddi_dma_mem_free(acc_hdp);
16175         ddi_dma_free_handle(dma_hdp);
16176         dma_hdp = NULL;
16177 }
16178 
16179 static int
16180 mptsas_outstanding_cmds_n(mptsas_t *mpt)
16181 {
16182         int n = 0, i;
16183         for (i = 0; i < mpt->m_slot_freeq_pair_n; i++) {
16184                 mutex_enter(&mpt->m_slot_freeq_pairp[i].
16185                     m_slot_allocq.s.m_fq_mutex);
16186                 mutex_enter(&mpt->m_slot_freeq_pairp[i].
16187                     m_slot_releq.s.m_fq_mutex);
16188                 n += (mpt->m_slot_freeq_pairp[i].m_slot_allocq.s.m_fq_n_init -
16189                     mpt->m_slot_freeq_pairp[i].m_slot_allocq.s.m_fq_n -
16190                     mpt->m_slot_freeq_pairp[i].m_slot_releq.s.m_fq_n);
16191                 mutex_exit(&mpt->m_slot_freeq_pairp[i].
16192                     m_slot_releq.s.m_fq_mutex);
16193                 mutex_exit(&mpt->m_slot_freeq_pairp[i].
16194                     m_slot_allocq.s.m_fq_mutex);
16195         }
16196         if (mpt->m_max_requests - 2 < n)
16197                 panic("mptsas: free slot allocq and releq crazy");
16198         return (n);
16199 }


  50  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
  51  * DAMAGE.
  52  */
  53 
  54 /*
  55  * mptsas - This is a driver based on LSI Logic's MPT2.0 interface.
  56  *
  57  */
  58 
  59 #if defined(lint) || defined(DEBUG)
  60 #define MPTSAS_DEBUG
  61 #endif
  62 
  63 /*
  64  * standard header files.
  65  */
  66 #include <sys/note.h>
  67 #include <sys/scsi/scsi.h>
  68 #include <sys/pci.h>
  69 #include <sys/file.h>

  70 #include <sys/policy.h>
  71 #include <sys/sysevent.h>
  72 #include <sys/sysevent/eventdefs.h>
  73 #include <sys/sysevent/dr.h>
  74 #include <sys/sata/sata_defs.h>
  75 #include <sys/scsi/generic/sas.h>
  76 #include <sys/scsi/impl/scsi_sas.h>
  77 
  78 #pragma pack(1)
  79 #include <sys/scsi/adapters/mpt_sas/mpi/mpi2_type.h>
  80 #include <sys/scsi/adapters/mpt_sas/mpi/mpi2.h>
  81 #include <sys/scsi/adapters/mpt_sas/mpi/mpi2_cnfg.h>
  82 #include <sys/scsi/adapters/mpt_sas/mpi/mpi2_init.h>
  83 #include <sys/scsi/adapters/mpt_sas/mpi/mpi2_ioc.h>
  84 #include <sys/scsi/adapters/mpt_sas/mpi/mpi2_sas.h>
  85 #include <sys/scsi/adapters/mpt_sas/mpi/mpi2_tool.h>
  86 #include <sys/scsi/adapters/mpt_sas/mpi/mpi2_raid.h>
  87 #pragma pack()
  88 
  89 /*
  90  * private header files.
  91  *
  92  */
  93 #include <sys/scsi/impl/scsi_reset_notify.h>
  94 #include <sys/scsi/adapters/mpt_sas/mptsas_var.h>
  95 #include <sys/scsi/adapters/mpt_sas/mptsas_ioctl.h>
  96 #include <sys/scsi/adapters/mpt_sas/mptsas_smhba.h>

  97 #include <sys/raidioctl.h>
  98 
  99 #include <sys/fs/dv_node.h>       /* devfs_clean */
 100 
 101 /*
 102  * FMA header files
 103  */
 104 #include <sys/ddifm.h>
 105 #include <sys/fm/protocol.h>
 106 #include <sys/fm/util.h>
 107 #include <sys/fm/io/ddi.h>
 108 
 109 /*















 110  * autoconfiguration data and routines.
 111  */
 112 static int mptsas_attach(dev_info_t *dip, ddi_attach_cmd_t cmd);
 113 static int mptsas_detach(dev_info_t *devi, ddi_detach_cmd_t cmd);
 114 static int mptsas_power(dev_info_t *dip, int component, int level);
 115 
 116 /*
 117  * cb_ops function
 118  */
 119 static int mptsas_ioctl(dev_t dev, int cmd, intptr_t data, int mode,
 120         cred_t *credp, int *rval);
 121 #ifdef __sparc
 122 static int mptsas_reset(dev_info_t *devi, ddi_reset_cmd_t cmd);
 123 #else  /* __sparc */
 124 static int mptsas_quiesce(dev_info_t *devi);
 125 #endif  /* __sparc */
 126 
 127 /*
 128  * Resource initilaization for hardware
 129  */


 182  * SMP functions
 183  */
 184 static int mptsas_smp_start(struct smp_pkt *smp_pkt);
 185 
 186 /*
 187  * internal function prototypes.
 188  */
 189 static void mptsas_list_add(mptsas_t *mpt);
 190 static void mptsas_list_del(mptsas_t *mpt);
 191 
 192 static int mptsas_quiesce_bus(mptsas_t *mpt);
 193 static int mptsas_unquiesce_bus(mptsas_t *mpt);
 194 
 195 static int mptsas_alloc_handshake_msg(mptsas_t *mpt, size_t alloc_size);
 196 static void mptsas_free_handshake_msg(mptsas_t *mpt);
 197 
 198 static void mptsas_ncmds_checkdrain(void *arg);
 199 
 200 static int mptsas_prepare_pkt(mptsas_cmd_t *cmd);
 201 static int mptsas_accept_pkt(mptsas_t *mpt, mptsas_cmd_t *sp);
 202 static int mptsas_accept_txwq_and_pkt(mptsas_t *mpt, mptsas_cmd_t *sp);
 203 static void mptsas_accept_tx_waitq(mptsas_t *mpt);
 204 
 205 static int mptsas_do_detach(dev_info_t *dev);
 206 static int mptsas_do_scsi_reset(mptsas_t *mpt, uint16_t devhdl);
 207 static int mptsas_do_scsi_abort(mptsas_t *mpt, int target, int lun,
 208     struct scsi_pkt *pkt);
 209 static int mptsas_scsi_capchk(char *cap, int tgtonly, int *cidxp);
 210 
 211 static void mptsas_handle_qfull(mptsas_t *mpt, mptsas_cmd_t *cmd);
 212 static void mptsas_handle_event(void *args);
 213 static int mptsas_handle_event_sync(void *args);
 214 static void mptsas_handle_dr(void *args);
 215 static void mptsas_handle_topo_change(mptsas_topo_change_list_t *topo_node,
 216     dev_info_t *pdip);
 217 
 218 static void mptsas_restart_cmd(void *);
 219 
 220 static void mptsas_flush_hba(mptsas_t *mpt);
 221 static void mptsas_flush_target(mptsas_t *mpt, ushort_t target, int lun,
 222         uint8_t tasktype);
 223 static void mptsas_set_pkt_reason(mptsas_t *mpt, mptsas_cmd_t *cmd,
 224     uchar_t reason, uint_t stat);
 225 
 226 static uint_t mptsas_intr(caddr_t arg1, caddr_t arg2);
 227 static void mptsas_process_intr(mptsas_t *mpt,
 228     pMpi2ReplyDescriptorsUnion_t reply_desc_union);

 229 static void mptsas_handle_scsi_io_success(mptsas_t *mpt,
 230     pMpi2ReplyDescriptorsUnion_t reply_desc);
 231 static void mptsas_handle_address_reply(mptsas_t *mpt,
 232     pMpi2ReplyDescriptorsUnion_t reply_desc);
 233 static int mptsas_wait_intr(mptsas_t *mpt, int polltime);
 234 static void mptsas_sge_setup(mptsas_t *mpt, mptsas_cmd_t *cmd,
 235     uint32_t *control, pMpi2SCSIIORequest_t frame, ddi_acc_handle_t acc_hdl);
 236 
 237 static void mptsas_watch(void *arg);
 238 static void mptsas_watchsubr(mptsas_t *mpt);
 239 static void mptsas_cmd_timeout(mptsas_t *mpt, uint16_t devhdl);
 240 static void mptsas_kill_target(mptsas_t *mpt, mptsas_target_t *ptgt);
 241 
 242 static void mptsas_start_passthru(mptsas_t *mpt, mptsas_cmd_t *cmd);
 243 static int mptsas_do_passthru(mptsas_t *mpt, uint8_t *request, uint8_t *reply,
 244     uint8_t *data, uint32_t request_size, uint32_t reply_size,
 245     uint32_t data_size, uint32_t direction, uint8_t *dataout,
 246     uint32_t dataout_size, short timeout, int mode);
 247 static int mptsas_free_devhdl(mptsas_t *mpt, uint16_t devhdl);
 248 
 249 static uint8_t mptsas_get_fw_diag_buffer_number(mptsas_t *mpt,
 250     uint32_t unique_id);
 251 static void mptsas_start_diag(mptsas_t *mpt, mptsas_cmd_t *cmd);
 252 static int mptsas_post_fw_diag_buffer(mptsas_t *mpt,
 253     mptsas_fw_diagnostic_buffer_t *pBuffer, uint32_t *return_code);
 254 static int mptsas_release_fw_diag_buffer(mptsas_t *mpt,
 255     mptsas_fw_diagnostic_buffer_t *pBuffer, uint32_t *return_code,
 256     uint32_t diag_type);
 257 static int mptsas_diag_register(mptsas_t *mpt,
 258     mptsas_fw_diag_register_t *diag_register, uint32_t *return_code);
 259 static int mptsas_diag_unregister(mptsas_t *mpt,
 260     mptsas_fw_diag_unregister_t *diag_unregister, uint32_t *return_code);


 276 static void mptsas_pkt_destroy_extern(mptsas_t *mpt, mptsas_cmd_t *cmd);
 277 
 278 static int mptsas_kmem_cache_constructor(void *buf, void *cdrarg, int kmflags);
 279 static void mptsas_kmem_cache_destructor(void *buf, void *cdrarg);
 280 
 281 static int mptsas_cache_frames_constructor(void *buf, void *cdrarg,
 282     int kmflags);
 283 static void mptsas_cache_frames_destructor(void *buf, void *cdrarg);
 284 
 285 static void mptsas_check_scsi_io_error(mptsas_t *mpt, pMpi2SCSIIOReply_t reply,
 286     mptsas_cmd_t *cmd);
 287 static void mptsas_check_task_mgt(mptsas_t *mpt,
 288     pMpi2SCSIManagementReply_t reply, mptsas_cmd_t *cmd);
 289 static int mptsas_send_scsi_cmd(mptsas_t *mpt, struct scsi_address *ap,
 290     mptsas_target_t *ptgt, uchar_t *cdb, int cdblen, struct buf *data_bp,
 291     int *resid);
 292 
 293 static int mptsas_alloc_active_slots(mptsas_t *mpt, int flag);
 294 static void mptsas_free_active_slots(mptsas_t *mpt);
 295 static int mptsas_start_cmd(mptsas_t *mpt, mptsas_cmd_t *cmd);

 296 
 297 static void mptsas_restart_hba(mptsas_t *mpt);
 298 static void mptsas_restart_waitq(mptsas_t *mpt);
 299 
 300 static void mptsas_deliver_doneq_thread(mptsas_t *mpt);
 301 static void mptsas_doneq_add(mptsas_t *mpt, mptsas_cmd_t *cmd);

 302 static void mptsas_doneq_mv(mptsas_t *mpt, uint64_t t);
 303 
 304 static mptsas_cmd_t *mptsas_doneq_thread_rm(mptsas_t *mpt, uint64_t t);
 305 static void mptsas_doneq_empty(mptsas_t *mpt);
 306 static void mptsas_doneq_thread(mptsas_doneq_thread_arg_t *arg);
 307 
 308 static mptsas_cmd_t *mptsas_waitq_rm(mptsas_t *mpt);
 309 static void mptsas_waitq_delete(mptsas_t *mpt, mptsas_cmd_t *cmd);
 310 static mptsas_cmd_t *mptsas_tx_waitq_rm(mptsas_t *mpt);
 311 static void mptsas_tx_waitq_delete(mptsas_t *mpt, mptsas_cmd_t *cmd);
 312 
 313 
 314 static void mptsas_start_watch_reset_delay();
 315 static void mptsas_setup_bus_reset_delay(mptsas_t *mpt);
 316 static void mptsas_watch_reset_delay(void *arg);
 317 static int mptsas_watch_reset_delay_subr(mptsas_t *mpt);
 318 

 319 /*
 320  * helper functions
 321  */
 322 static void mptsas_dump_cmd(mptsas_t *mpt, mptsas_cmd_t *cmd);
 323 
 324 static dev_info_t *mptsas_find_child(dev_info_t *pdip, char *name);
 325 static dev_info_t *mptsas_find_child_phy(dev_info_t *pdip, uint8_t phy);
 326 static dev_info_t *mptsas_find_child_addr(dev_info_t *pdip, uint64_t sasaddr,
 327     int lun);
 328 static mdi_pathinfo_t *mptsas_find_path_addr(dev_info_t *pdip, uint64_t sasaddr,
 329     int lun);
 330 static mdi_pathinfo_t *mptsas_find_path_phy(dev_info_t *pdip, uint8_t phy);
 331 static dev_info_t *mptsas_find_smp_child(dev_info_t *pdip, char *str_wwn);
 332 
 333 static int mptsas_parse_address(char *name, uint64_t *wwid, uint8_t *phy,
 334     int *lun);
 335 static int mptsas_parse_smp_name(char *name, uint64_t *wwn);
 336 
 337 static mptsas_target_t *mptsas_phy_to_tgt(mptsas_t *mpt, int phymask,
 338     uint8_t phy);
 339 static mptsas_target_t *mptsas_wwid_to_ptgt(mptsas_t *mpt, int phymask,
 340     uint64_t wwid);
 341 static mptsas_smp_t *mptsas_wwid_to_psmp(mptsas_t *mpt, int phymask,
 342     uint64_t wwid);
 343 
 344 static int mptsas_inquiry(mptsas_t *mpt, mptsas_target_t *ptgt, int lun,
 345     uchar_t page, unsigned char *buf, int len, int *rlen, uchar_t evpd);
 346 
 347 static int mptsas_get_target_device_info(mptsas_t *mpt, uint32_t page_address,
 348     uint16_t *handle, mptsas_target_t **pptgt);
 349 static void mptsas_update_phymask(mptsas_t *mpt);

 350 


 351 static dev_info_t *mptsas_get_dip_from_dev(dev_t dev,
 352     mptsas_phymask_t *phymask);




 353 
 354 
 355 /*
 356  * Enumeration / DR functions
 357  */
 358 static void mptsas_config_all(dev_info_t *pdip);
 359 static int mptsas_config_one_addr(dev_info_t *pdip, uint64_t sasaddr, int lun,
 360     dev_info_t **lundip);
 361 static int mptsas_config_one_phy(dev_info_t *pdip, uint8_t phy, int lun,
 362     dev_info_t **lundip);
 363 
 364 static int mptsas_config_target(dev_info_t *pdip, mptsas_target_t *ptgt);
 365 static int mptsas_offline_target(dev_info_t *pdip, char *name);
 366 
 367 static int mptsas_config_raid(dev_info_t *pdip, uint16_t target,
 368     dev_info_t **dip);
 369 
 370 static int mptsas_config_luns(dev_info_t *pdip, mptsas_target_t *ptgt);
 371 static int mptsas_probe_lun(dev_info_t *pdip, int lun,
 372     dev_info_t **dip, mptsas_target_t *ptgt);


 393 static int mptsas_event_query(mptsas_t *mpt, mptsas_event_query_t *data,
 394     int mode, int *rval);
 395 static int mptsas_event_enable(mptsas_t *mpt, mptsas_event_enable_t *data,
 396     int mode, int *rval);
 397 static int mptsas_event_report(mptsas_t *mpt, mptsas_event_report_t *data,
 398     int mode, int *rval);
 399 static void mptsas_record_event(void *args);
 400 static int mptsas_reg_access(mptsas_t *mpt, mptsas_reg_access_t *data,
 401     int mode);
 402 
 403 static void mptsas_hash_init(mptsas_hash_table_t *hashtab);
 404 static void mptsas_hash_uninit(mptsas_hash_table_t *hashtab, size_t datalen);
 405 static void mptsas_hash_add(mptsas_hash_table_t *hashtab, void *data);
 406 static void * mptsas_hash_rem(mptsas_hash_table_t *hashtab, uint64_t key1,
 407     mptsas_phymask_t key2);
 408 static void * mptsas_hash_search(mptsas_hash_table_t *hashtab, uint64_t key1,
 409     mptsas_phymask_t key2);
 410 static void * mptsas_hash_traverse(mptsas_hash_table_t *hashtab, int pos);
 411 
 412 mptsas_target_t *mptsas_tgt_alloc(mptsas_hash_table_t *, uint16_t, uint64_t,
 413     uint32_t, mptsas_phymask_t, uint8_t);
 414 static mptsas_smp_t *mptsas_smp_alloc(mptsas_hash_table_t *hashtab,
 415     mptsas_smp_t *data);
 416 static void mptsas_smp_free(mptsas_hash_table_t *hashtab, uint64_t wwid,
 417     mptsas_phymask_t phymask);
 418 static void mptsas_tgt_free(mptsas_hash_table_t *, uint64_t, mptsas_phymask_t);
 419 static void * mptsas_search_by_devhdl(mptsas_hash_table_t *, uint16_t);
 420 static int mptsas_online_smp(dev_info_t *pdip, mptsas_smp_t *smp_node,
 421     dev_info_t **smp_dip);
 422 
 423 /*
 424  * Power management functions
 425  */
 426 static int mptsas_get_pci_cap(mptsas_t *mpt);
 427 static int mptsas_init_pm(mptsas_t *mpt);
 428 
 429 /*
 430  * MPT MSI tunable:
 431  *
 432  * By default MSI is enabled on all supported platforms.
 433  */


 443  * FMA Prototypes
 444  */
 445 static void mptsas_fm_init(mptsas_t *mpt);
 446 static void mptsas_fm_fini(mptsas_t *mpt);
 447 static int mptsas_fm_error_cb(dev_info_t *, ddi_fm_error_t *, const void *);
 448 
 449 extern pri_t minclsyspri, maxclsyspri;
 450 
 451 /*
 452  * This device is created by the SCSI pseudo nexus driver (SCSI vHCI).  It is
 453  * under this device that the paths to a physical device are created when
 454  * MPxIO is used.
 455  */
 456 extern dev_info_t       *scsi_vhci_dip;
 457 
 458 /*
 459  * Tunable timeout value for Inquiry VPD page 0x83
 460  * By default the value is 30 seconds.
 461  */
 462 int mptsas_inq83_retry_timeout = 30;
 463 /*
 464  * Maximum number of command timeouts (0 - 255) considered acceptable.
 465  */
 466 int mptsas_timeout_threshold = 2;
 467 /*
 468  * Timeouts exceeding threshold within this period are considered excessive.
 469  */
 470 int mptsas_timeout_interval = 30;
 471 
 472 /*
 473  * This is used to allocate memory for message frame storage, not for
 474  * data I/O DMA. All message frames must be stored in the first 4G of
 475  * physical memory.
 476  */
 477 ddi_dma_attr_t mptsas_dma_attrs = {
 478         DMA_ATTR_V0,    /* attribute layout version             */
 479         0x0ull,         /* address low - should be 0 (longlong) */
 480         0xffffffffull,  /* address high - 32-bit max range      */
 481         0x00ffffffull,  /* count max - max DMA object size      */
 482         4,              /* allocation alignment requirements    */
 483         0x78,           /* burstsizes - binary encoded values   */
 484         1,              /* minxfer - gran. of DMA engine        */
 485         0x00ffffffull,  /* maxxfer - gran. of DMA engine        */
 486         0xffffffffull,  /* max segment size (DMA boundary)      */
 487         MPTSAS_MAX_DMA_SEGS, /* scatter/gather list length      */
 488         512,            /* granularity - device transfer size   */
 489         0               /* flags, set to 0                      */
 490 };


1167                         mpt->m_doneq_thread_id[j].flag |=
1168                             MPTSAS_DONEQ_THREAD_ACTIVE;
1169                         mpt->m_doneq_thread_id[j].arg.mpt = mpt;
1170                         mpt->m_doneq_thread_id[j].arg.t = j;
1171                         mpt->m_doneq_thread_id[j].threadp =
1172                             thread_create(NULL, 0, mptsas_doneq_thread,
1173                             &mpt->m_doneq_thread_id[j].arg,
1174                             0, &p0, TS_RUN, minclsyspri);
1175                         mpt->m_doneq_thread_id[j].donetail =
1176                             &mpt->m_doneq_thread_id[j].doneq;
1177                         mutex_exit(&mpt->m_doneq_thread_id[j].mutex);
1178                 }
1179                 mutex_exit(&mpt->m_doneq_mutex);
1180                 doneq_thread_create++;
1181         }
1182 
1183         /* Initialize mutex used in interrupt handler */
1184         mutex_init(&mpt->m_mutex, NULL, MUTEX_DRIVER,
1185             DDI_INTR_PRI(mpt->m_intr_pri));
1186         mutex_init(&mpt->m_passthru_mutex, NULL, MUTEX_DRIVER, NULL);
1187         mutex_init(&mpt->m_tx_waitq_mutex, NULL, MUTEX_DRIVER,
1188             DDI_INTR_PRI(mpt->m_intr_pri));
1189         for (i = 0; i < MPTSAS_MAX_PHYS; i++) {
1190                 mutex_init(&mpt->m_phy_info[i].smhba_info.phy_mutex,
1191                     NULL, MUTEX_DRIVER,
1192                     DDI_INTR_PRI(mpt->m_intr_pri));
1193         }
1194 
1195         cv_init(&mpt->m_cv, NULL, CV_DRIVER, NULL);
1196         cv_init(&mpt->m_passthru_cv, NULL, CV_DRIVER, NULL);
1197         cv_init(&mpt->m_fw_cv, NULL, CV_DRIVER, NULL);
1198         cv_init(&mpt->m_config_cv, NULL, CV_DRIVER, NULL);
1199         cv_init(&mpt->m_fw_diag_cv, NULL, CV_DRIVER, NULL);
1200         mutex_init_done++;
1201 
1202         /*
1203          * Disable hardware interrupt since we're not ready to
1204          * handle it yet.
1205          */
1206         MPTSAS_DISABLE_INTR(mpt);
1207         if (mptsas_register_intrs(mpt) == FALSE)


1270                 goto fail;
1271         smp_attach_setup++;
1272 
1273         if (mptsas_cache_create(mpt) == FALSE)
1274                 goto fail;
1275 
1276         mpt->m_scsi_reset_delay      = ddi_prop_get_int(DDI_DEV_T_ANY,
1277             dip, 0, "scsi-reset-delay", SCSI_DEFAULT_RESET_DELAY);
1278         if (mpt->m_scsi_reset_delay == 0) {
1279                 mptsas_log(mpt, CE_NOTE,
1280                     "scsi_reset_delay of 0 is not recommended,"
1281                     " resetting to SCSI_DEFAULT_RESET_DELAY\n");
1282                 mpt->m_scsi_reset_delay = SCSI_DEFAULT_RESET_DELAY;
1283         }
1284 
1285         /*
1286          * Initialize the wait and done FIFO queue
1287          */
1288         mpt->m_donetail = &mpt->m_doneq;
1289         mpt->m_waitqtail = &mpt->m_waitq;
1290         mpt->m_tx_waitqtail = &mpt->m_tx_waitq;
1291         mpt->m_tx_draining = 0;
1292 
1293         /*
1294          * ioc cmd queue initialize
1295          */
1296         mpt->m_ioc_event_cmdtail = &mpt->m_ioc_event_cmdq;
1297         mpt->m_dev_handle = 0xFFFF;
1298 
1299         MPTSAS_ENABLE_INTR(mpt);
1300 
1301         /*
1302          * enable event notification
1303          */
1304         mutex_enter(&mpt->m_mutex);
1305         if (mptsas_ioc_enable_event_notification(mpt)) {
1306                 mutex_exit(&mpt->m_mutex);
1307                 goto fail;
1308         }
1309         mutex_exit(&mpt->m_mutex);
1310 
1311         /*


1431                                     &mpt->m_doneq_mutex);
1432                         }
1433                         for (j = 0; j < doneq_thread_num; j++) {
1434                                 cv_destroy(&mpt->m_doneq_thread_id[j].cv);
1435                                 mutex_destroy(&mpt->m_doneq_thread_id[j].mutex);
1436                         }
1437                         kmem_free(mpt->m_doneq_thread_id,
1438                             sizeof (mptsas_doneq_thread_list_t)
1439                             * doneq_thread_num);
1440                         mutex_exit(&mpt->m_doneq_mutex);
1441                         cv_destroy(&mpt->m_doneq_thread_cv);
1442                         mutex_destroy(&mpt->m_doneq_mutex);
1443                 }
1444                 if (event_taskq_create) {
1445                         ddi_taskq_destroy(mpt->m_event_taskq);
1446                 }
1447                 if (dr_taskq_create) {
1448                         ddi_taskq_destroy(mpt->m_dr_taskq);
1449                 }
1450                 if (mutex_init_done) {
1451                         mutex_destroy(&mpt->m_tx_waitq_mutex);
1452                         mutex_destroy(&mpt->m_passthru_mutex);
1453                         mutex_destroy(&mpt->m_mutex);
1454                         for (i = 0; i < MPTSAS_MAX_PHYS; i++) {
1455                                 mutex_destroy(
1456                                     &mpt->m_phy_info[i].smhba_info.phy_mutex);
1457                         }
1458                         cv_destroy(&mpt->m_cv);
1459                         cv_destroy(&mpt->m_passthru_cv);
1460                         cv_destroy(&mpt->m_fw_cv);
1461                         cv_destroy(&mpt->m_config_cv);
1462                         cv_destroy(&mpt->m_fw_diag_cv);
1463                 }
1464 
1465                 if (map_setup) {
1466                         mptsas_cfg_fini(mpt);
1467                 }
1468                 if (config_setup) {
1469                         mptsas_config_space_fini(mpt);
1470                 }
1471                 mptsas_free_handshake_msg(mpt);


1849         mptsas_hash_uninit(&mpt->m_active->m_tgttbl, sizeof (mptsas_target_t));
1850         mptsas_hash_uninit(&mpt->m_active->m_smptbl, sizeof (mptsas_smp_t));
1851         mptsas_free_active_slots(mpt);
1852         mutex_exit(&mpt->m_mutex);
1853 
1854         /* deallocate everything that was allocated in mptsas_attach */
1855         mptsas_cache_destroy(mpt);
1856 
1857         mptsas_hba_fini(mpt);
1858         mptsas_cfg_fini(mpt);
1859 
1860         /* Lower the power informing PM Framework */
1861         if (mpt->m_options & MPTSAS_OPT_PM) {
1862                 if (pm_lower_power(dip, 0, PM_LEVEL_D3) != DDI_SUCCESS)
1863                         mptsas_log(mpt, CE_WARN,
1864                             "!mptsas%d: Lower power request failed "
1865                             "during detach, ignoring.",
1866                             mpt->m_instance);
1867         }
1868 
1869         mutex_destroy(&mpt->m_tx_waitq_mutex);
1870         mutex_destroy(&mpt->m_passthru_mutex);
1871         mutex_destroy(&mpt->m_mutex);
1872         for (i = 0; i < MPTSAS_MAX_PHYS; i++) {
1873                 mutex_destroy(&mpt->m_phy_info[i].smhba_info.phy_mutex);
1874         }
1875         cv_destroy(&mpt->m_cv);
1876         cv_destroy(&mpt->m_passthru_cv);
1877         cv_destroy(&mpt->m_fw_cv);
1878         cv_destroy(&mpt->m_config_cv);
1879         cv_destroy(&mpt->m_fw_diag_cv);
1880 
1881 
1882         mptsas_smp_teardown(mpt);
1883         mptsas_hba_teardown(mpt);
1884 
1885         mptsas_config_space_fini(mpt);
1886 
1887         mptsas_free_handshake_msg(mpt);
1888 
1889         mptsas_fm_fini(mpt);


2213                     &mpt->m_reg->Doorbell)) &
2214                     MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_RESET) {
2215                         if (polls++ > 3000) {
2216                                 break;
2217                         }
2218                         delay(drv_usectohz(10000));
2219                 }
2220                 /*
2221                  * If IOC is not in operational state, try to hard reset it.
2222                  */
2223                 if ((ioc_status & MPI2_IOC_STATE_MASK) !=
2224                     MPI2_IOC_STATE_OPERATIONAL) {
2225                         mpt->m_softstate &= ~MPTSAS_SS_MSG_UNIT_RESET;
2226                         if (mptsas_restart_ioc(mpt) == DDI_FAILURE) {
2227                                 mptsas_log(mpt, CE_WARN,
2228                                     "mptsas_power: hard reset failed");
2229                                 mutex_exit(&mpt->m_mutex);
2230                                 return (DDI_FAILURE);
2231                         }
2232                 }

2233                 mpt->m_power_level = PM_LEVEL_D0;

2234                 break;
2235         case PM_LEVEL_D3:
2236                 NDBG11(("mptsas%d: turning power OFF.", mpt->m_instance));
2237                 MPTSAS_POWER_OFF(mpt);
2238                 break;
2239         default:
2240                 mptsas_log(mpt, CE_WARN, "mptsas%d: unknown power level <%x>.",
2241                     mpt->m_instance, level);
2242                 rval = DDI_FAILURE;
2243                 break;
2244         }
2245         mutex_exit(&mpt->m_mutex);
2246         return (rval);
2247 }
2248 
2249 /*
2250  * Initialize configuration space and figure out which
2251  * chip and revison of the chip the mpt driver is using.
2252  */
2253 static int


2604 
2605         /*
2606          * Store the reply descriptor post queue memory address.  This chip
2607          * uses this address to write to the reply descriptor post queue.  The
2608          * second address is the address mpt uses to manage the queue.
2609          */
2610         mpt->m_post_queue_dma_addr = cookie.dmac_laddress;
2611         mpt->m_post_queue = memp;
2612 
2613         /*
2614          * Clear the reply post queue memory.
2615          */
2616         bzero(mpt->m_post_queue, mem_size);
2617 
2618         return (DDI_SUCCESS);
2619 }
2620 
2621 static void
2622 mptsas_alloc_reply_args(mptsas_t *mpt)
2623 {
2624         if (mpt->m_replyh_args == NULL) {




2625                 mpt->m_replyh_args = kmem_zalloc(sizeof (m_replyh_arg_t) *
2626                     mpt->m_max_replies, KM_SLEEP);
2627         }
2628 }
2629 
2630 static int
2631 mptsas_alloc_extra_sgl_frame(mptsas_t *mpt, mptsas_cmd_t *cmd)
2632 {
2633         mptsas_cache_frames_t   *frames = NULL;
2634         if (cmd->cmd_extra_frames == NULL) {
2635                 frames = kmem_cache_alloc(mpt->m_cache_frames, KM_NOSLEEP);
2636                 if (frames == NULL) {
2637                         return (DDI_FAILURE);
2638                 }
2639                 cmd->cmd_extra_frames = frames;
2640         }
2641         return (DDI_SUCCESS);
2642 }
2643 
2644 static void
2645 mptsas_free_extra_sgl_frame(mptsas_t *mpt, mptsas_cmd_t *cmd)
2646 {
2647         if (cmd->cmd_extra_frames) {


2966         if (rval != TRAN_ACCEPT) {
2967                 return (rval);
2968         }
2969 
2970         /*
2971          * Send the command to target/lun, however your HBA requires it.
2972          * If busy, return TRAN_BUSY; if there's some other formatting error
2973          * in the packet, return TRAN_BADPKT; otherwise, fall through to the
2974          * return of TRAN_ACCEPT.
2975          *
2976          * Remember that access to shared resources, including the mptsas_t
2977          * data structure and the HBA hardware registers, must be protected
2978          * with mutexes, here and everywhere.
2979          *
2980          * Also remember that at interrupt time, you'll get an argument
2981          * to the interrupt handler which is a pointer to your mptsas_t
2982          * structure; you'll have to remember which commands are outstanding
2983          * and which scsi_pkt is the currently-running command so the
2984          * interrupt handler can refer to the pkt to set completion
2985          * status, call the target driver back through pkt_comp, etc.
2986          *
2987          * If the instance lock is held by other thread, don't spin to wait
2988          * for it. Instead, queue the cmd and next time when the instance lock
2989          * is not held, accept all the queued cmd. A extra tx_waitq is
2990          * introduced to protect the queue.
2991          *
2992          * The polled cmd will not be queud and accepted as usual.
2993          *
2994          * Under the tx_waitq mutex, record whether a thread is draining
2995          * the tx_waitq.  An IO requesting thread that finds the instance
2996          * mutex contended appends to the tx_waitq and while holding the
2997          * tx_wait mutex, if the draining flag is not set, sets it and then
2998          * proceeds to spin for the instance mutex. This scheme ensures that
2999          * the last cmd in a burst be processed.
3000          *
3001          * we enable this feature only when the helper threads are enabled,
3002          * at which we think the loads are heavy.
3003          *
3004          * per instance mutex m_tx_waitq_mutex is introduced to protect the
3005          * m_tx_waitqtail, m_tx_waitq, m_tx_draining.
3006          */
3007 
3008         if (mpt->m_doneq_thread_n) {
3009                 if (mutex_tryenter(&mpt->m_mutex) != 0) {
3010                         rval = mptsas_accept_txwq_and_pkt(mpt, cmd);
3011                         mutex_exit(&mpt->m_mutex);
3012                 } else if (cmd->cmd_pkt_flags & FLAG_NOINTR) {
3013                         mutex_enter(&mpt->m_mutex);
3014                         rval = mptsas_accept_txwq_and_pkt(mpt, cmd);
3015                         mutex_exit(&mpt->m_mutex);
3016                 } else {
3017                         mutex_enter(&mpt->m_tx_waitq_mutex);
3018                         /*
3019                          * ptgt->m_dr_flag is protected by m_mutex or
3020                          * m_tx_waitq_mutex. In this case, m_tx_waitq_mutex
3021                          * is acquired.
3022                          */
3023                         if (ptgt->m_dr_flag == MPTSAS_DR_INTRANSITION) {
3024                                 if (cmd->cmd_pkt_flags & FLAG_NOQUEUE) {
3025                                         /*
3026                                          * The command should be allowed to
3027                                          * retry by returning TRAN_BUSY to
3028                                          * to stall the I/O's which come from
3029                                          * scsi_vhci since the device/path is
3030                                          * in unstable state now.
3031                                          */
3032                                         mutex_exit(&mpt->m_tx_waitq_mutex);
3033                                         return (TRAN_BUSY);
3034                                 } else {
3035                                         /*
3036                                          * The device is offline, just fail the
3037                                          * command by returning
3038                                          * TRAN_FATAL_ERROR.
3039                                          */
3040                                         mutex_exit(&mpt->m_tx_waitq_mutex);
3041                                         return (TRAN_FATAL_ERROR);
3042                                 }
3043                         }
3044                         if (mpt->m_tx_draining) {
3045                                 cmd->cmd_flags |= CFLAG_TXQ;
3046                                 *mpt->m_tx_waitqtail = cmd;
3047                                 mpt->m_tx_waitqtail = &cmd->cmd_linkp;
3048                                 mutex_exit(&mpt->m_tx_waitq_mutex);
3049                         } else { /* drain the queue */
3050                                 mpt->m_tx_draining = 1;
3051                                 mutex_exit(&mpt->m_tx_waitq_mutex);
3052                                 mutex_enter(&mpt->m_mutex);
3053                                 rval = mptsas_accept_txwq_and_pkt(mpt, cmd);
3054                                 mutex_exit(&mpt->m_mutex);
3055                         }
3056                 }
3057         } else {
3058                 mutex_enter(&mpt->m_mutex);
3059                 /*
3060                  * ptgt->m_dr_flag is protected by m_mutex or m_tx_waitq_mutex
3061                  * in this case, m_mutex is acquired.
3062                  */
3063                 if (ptgt->m_dr_flag == MPTSAS_DR_INTRANSITION) {
3064                         if (cmd->cmd_pkt_flags & FLAG_NOQUEUE) {
3065                                 /*
3066                                  * commands should be allowed to retry by
3067                                  * returning TRAN_BUSY to stall the I/O's
3068                                  * which come from scsi_vhci since the device/
3069                                  * path is in unstable state now.
3070                                  */
3071                                 mutex_exit(&mpt->m_mutex);
3072                                 return (TRAN_BUSY);
3073                         } else {
3074                                 /*
3075                                  * The device is offline, just fail the
3076                                  * command by returning TRAN_FATAL_ERROR.
3077                                  */
3078                                 mutex_exit(&mpt->m_mutex);
3079                                 return (TRAN_FATAL_ERROR);
3080                         }
3081                 }

3082                 rval = mptsas_accept_pkt(mpt, cmd);
3083                 mutex_exit(&mpt->m_mutex);
3084         }
3085 
3086         return (rval);
3087 }
3088 
3089 /*
3090  * Accept all the queued cmds(if any) before accept the current one.
3091  */
3092 static int
3093 mptsas_accept_txwq_and_pkt(mptsas_t *mpt, mptsas_cmd_t *cmd)
3094 {
3095         int rval;
3096         mptsas_target_t *ptgt = cmd->cmd_tgt_addr;
3097 
3098         ASSERT(mutex_owned(&mpt->m_mutex));
3099         /*
3100          * The call to mptsas_accept_tx_waitq() must always be performed
3101          * because that is where mpt->m_tx_draining is cleared.
3102          */
3103         mutex_enter(&mpt->m_tx_waitq_mutex);
3104         mptsas_accept_tx_waitq(mpt);
3105         mutex_exit(&mpt->m_tx_waitq_mutex);
3106         /*
3107          * ptgt->m_dr_flag is protected by m_mutex or m_tx_waitq_mutex
3108          * in this case, m_mutex is acquired.
3109          */
3110         if (ptgt->m_dr_flag == MPTSAS_DR_INTRANSITION) {
3111                 if (cmd->cmd_pkt_flags & FLAG_NOQUEUE) {
3112                         /*
3113                          * The command should be allowed to retry by returning
3114                          * TRAN_BUSY to stall the I/O's which come from
3115                          * scsi_vhci since the device/path is in unstable state
3116                          * now.
3117                          */
3118                         return (TRAN_BUSY);
3119                 } else {
3120                         /*
3121                          * The device is offline, just fail the command by
3122                          * return TRAN_FATAL_ERROR.
3123                          */
3124                         return (TRAN_FATAL_ERROR);
3125                 }
3126         }
3127         rval = mptsas_accept_pkt(mpt, cmd);
3128 
3129         return (rval);
3130 }
3131 
3132 static int
3133 mptsas_accept_pkt(mptsas_t *mpt, mptsas_cmd_t *cmd)
3134 {
3135         int             rval = TRAN_ACCEPT;
3136         mptsas_target_t *ptgt = cmd->cmd_tgt_addr;
3137 
3138         NDBG1(("mptsas_accept_pkt: cmd=0x%p", (void *)cmd));
3139 
3140         ASSERT(mutex_owned(&mpt->m_mutex));
3141 
3142         if ((cmd->cmd_flags & CFLAG_PREPARED) == 0) {
3143                 rval = mptsas_prepare_pkt(cmd);
3144                 if (rval != TRAN_ACCEPT) {
3145                         cmd->cmd_flags &= ~CFLAG_TRANFLAG;
3146                         return (rval);
3147                 }
3148         }
3149 
3150         /*
3151          * reset the throttle if we were draining
3152          */

3153         if ((ptgt->m_t_ncmds == 0) &&
3154             (ptgt->m_t_throttle == DRAIN_THROTTLE)) {
3155                 NDBG23(("reset throttle"));
3156                 ASSERT(ptgt->m_reset_delay == 0);
3157                 mptsas_set_throttle(mpt, ptgt, MAX_THROTTLE);
3158         }
3159 
3160         /*
3161          * If HBA is being reset, the DevHandles are being re-initialized,
3162          * which means that they could be invalid even if the target is still
3163          * attached.  Check if being reset and if DevHandle is being
3164          * re-initialized.  If this is the case, return BUSY so the I/O can be
3165          * retried later.
3166          */
3167         if ((ptgt->m_devhdl == MPTSAS_INVALID_DEVHDL) && mpt->m_in_reset) {
3168                 mptsas_set_pkt_reason(mpt, cmd, CMD_RESET, STAT_BUS_RESET);













3169                 if (cmd->cmd_flags & CFLAG_TXQ) {
3170                         mptsas_doneq_add(mpt, cmd);
3171                         mptsas_doneq_empty(mpt);

3172                         return (rval);
3173                 } else {

3174                         return (TRAN_BUSY);
3175                 }
3176         }
3177 
3178         /*
3179          * If device handle has already been invalidated, just
3180          * fail the command. In theory, command from scsi_vhci
3181          * client is impossible send down command with invalid
3182          * devhdl since devhdl is set after path offline, target
3183          * driver is not suppose to select a offlined path.
3184          */
3185         if (ptgt->m_devhdl == MPTSAS_INVALID_DEVHDL) {
3186                 NDBG20(("rejecting command, it might because invalid devhdl "
3187                     "request."));
3188                 mptsas_set_pkt_reason(mpt, cmd, CMD_DEV_GONE, STAT_TERMINATED);
3189                 if (cmd->cmd_flags & CFLAG_TXQ) {
3190                         mptsas_doneq_add(mpt, cmd);
3191                         mptsas_doneq_empty(mpt);

3192                         return (rval);
3193                 } else {

3194                         return (TRAN_FATAL_ERROR);
3195                 }
3196         }

3197         /*
3198          * The first case is the normal case.  mpt gets a command from the
3199          * target driver and starts it.
3200          * Since SMID 0 is reserved and the TM slot is reserved, the actual max
3201          * commands is m_max_requests - 2.
3202          */
3203         if ((mpt->m_ncmds <= (mpt->m_max_requests - 2)) &&
3204             (ptgt->m_t_throttle > HOLD_THROTTLE) &&
3205             (ptgt->m_t_ncmds < ptgt->m_t_throttle) &&
3206             (ptgt->m_reset_delay == 0) &&
3207             (ptgt->m_t_nwait == 0) &&
3208             ((cmd->cmd_pkt_flags & FLAG_NOINTR) == 0)) {

3209                 if (mptsas_save_cmd(mpt, cmd) == TRUE) {
3210                         (void) mptsas_start_cmd(mpt, cmd);
3211                 } else {

3212                         mptsas_waitq_add(mpt, cmd);

3213                 }
3214         } else {
3215                 /*
3216                  * Add this pkt to the work queue
3217                  */


3218                 mptsas_waitq_add(mpt, cmd);
3219 
3220                 if (cmd->cmd_pkt_flags & FLAG_NOINTR) {
3221                         (void) mptsas_poll(mpt, cmd, MPTSAS_POLL_TIME);
3222 
3223                         /*
3224                          * Only flush the doneq if this is not a TM
3225                          * cmd.  For TM cmds the flushing of the
3226                          * doneq will be done in those routines.
3227                          */
3228                         if ((cmd->cmd_flags & CFLAG_TM_CMD) == 0) {
3229                                 mptsas_doneq_empty(mpt);
3230                         }
3231                 }

3232         }
3233         return (rval);
3234 }
3235 
3236 int
3237 mptsas_save_cmd(mptsas_t *mpt, mptsas_cmd_t *cmd)
3238 {
3239         mptsas_slots_t  *slots;
3240         int             slot;
3241         mptsas_target_t *ptgt = cmd->cmd_tgt_addr;


3242 
3243         ASSERT(mutex_owned(&mpt->m_mutex));
3244         slots = mpt->m_active;
3245 
3246         /*
3247          * Account for reserved TM request slot and reserved SMID of 0.
3248          */
3249         ASSERT(slots->m_n_slots == (mpt->m_max_requests - 2));
3250 
3251         /*
3252          * m_tags is equivalent to the SMID when sending requests.  Since the
3253          * SMID cannot be 0, start out at one if rolling over past the size
3254          * of the request queue depth.  Also, don't use the last SMID, which is
3255          * reserved for TM requests.
3256          */
3257         slot = (slots->m_tags)++;
3258         if (slots->m_tags > slots->m_n_slots) {
3259                 slots->m_tags = 1;











































3260         }
3261 
3262 alloc_tag:
3263         /* Validate tag, should never fail. */
3264         if (slots->m_slot[slot] == NULL) {





3265                 /*
3266                  * Make sure SMID is not using reserved value of 0
3267                  * and the TM request slot.
3268                  */
3269                 ASSERT((slot > 0) && (slot <= slots->m_n_slots));

3270                 cmd->cmd_slot = slot;
3271                 slots->m_slot[slot] = cmd;
3272                 mpt->m_ncmds++;
3273 

3274                 /*
3275                  * only increment per target ncmds if this is not a
3276                  * command that has no target associated with it (i.e. a
3277                  * event acknoledgment)
3278                  */
3279                 if ((cmd->cmd_flags & CFLAG_CMDIOC) == 0) {

3280                         ptgt->m_t_ncmds++;

3281                 }
3282                 cmd->cmd_active_timeout = cmd->cmd_pkt->pkt_time;
3283 
3284                 /*
3285                  * If initial timout is less than or equal to one tick, bump
3286                  * the timeout by a tick so that command doesn't timeout before
3287                  * its allotted time.
3288                  */
3289                 if (cmd->cmd_active_timeout <= mptsas_scsi_watchdog_tick) {
3290                         cmd->cmd_active_timeout += mptsas_scsi_watchdog_tick;
3291                 }
3292                 return (TRUE);
3293         } else {
3294                 int i;
3295 
3296                 /*
3297                  * If slot in use, scan until a free one is found. Don't use 0
3298                  * or final slot, which is reserved for TM requests.
3299                  */
3300                 for (i = 0; i < slots->m_n_slots; i++) {
3301                         slot = slots->m_tags;
3302                         if (++(slots->m_tags) > slots->m_n_slots) {
3303                                 slots->m_tags = 1;
3304                         }
3305                         if (slots->m_slot[slot] == NULL) {
3306                                 NDBG22(("found free slot %d", slot));
3307                                 goto alloc_tag;
3308                         }
3309                 }
3310         }
3311         return (FALSE);
3312 }
3313 
3314 /*
3315  * prepare the pkt:
3316  * the pkt may have been resubmitted or just reused so
3317  * initialize some fields and do some checks.
3318  */
3319 static int
3320 mptsas_prepare_pkt(mptsas_cmd_t *cmd)
3321 {
3322         struct scsi_pkt *pkt = CMD2PKT(cmd);
3323 
3324         NDBG1(("mptsas_prepare_pkt: cmd=0x%p", (void *)cmd));
3325 
3326         /*
3327          * Reinitialize some fields that need it; the packet may
3328          * have been resubmitted
3329          */
3330         pkt->pkt_reason = CMD_CMPLT;
3331         pkt->pkt_state = 0;


3359 
3360         return (TRAN_ACCEPT);
3361 }
3362 
3363 /*
3364  * tran_init_pkt(9E) - allocate scsi_pkt(9S) for command
3365  *
3366  * One of three possibilities:
3367  *      - allocate scsi_pkt
3368  *      - allocate scsi_pkt and DMA resources
3369  *      - allocate DMA resources to an already-allocated pkt
3370  */
3371 static struct scsi_pkt *
3372 mptsas_scsi_init_pkt(struct scsi_address *ap, struct scsi_pkt *pkt,
3373     struct buf *bp, int cmdlen, int statuslen, int tgtlen, int flags,
3374     int (*callback)(), caddr_t arg)
3375 {
3376         mptsas_cmd_t            *cmd, *new_cmd;
3377         mptsas_t                *mpt = ADDR2MPT(ap);
3378         int                     failure = 1;

3379         uint_t                  oldcookiec;

3380         mptsas_target_t         *ptgt = NULL;
3381         int                     rval;
3382         mptsas_tgt_private_t    *tgt_private;
3383         int                     kf;
3384 
3385         kf = (callback == SLEEP_FUNC)? KM_SLEEP: KM_NOSLEEP;
3386 
3387         tgt_private = (mptsas_tgt_private_t *)ap->a_hba_tran->
3388             tran_tgt_private;
3389         ASSERT(tgt_private != NULL);
3390         if (tgt_private == NULL) {
3391                 return (NULL);
3392         }
3393         ptgt = tgt_private->t_private;
3394         ASSERT(ptgt != NULL);
3395         if (ptgt == NULL)
3396                 return (NULL);
3397         ap->a_target = ptgt->m_devhdl;
3398         ap->a_lun = tgt_private->t_lun;
3399 
3400         ASSERT(callback == NULL_FUNC || callback == SLEEP_FUNC);
3401 #ifdef MPTSAS_TEST_EXTRN_ALLOC
3402         statuslen *= 100; tgtlen *= 4;
3403 #endif
3404         NDBG3(("mptsas_scsi_init_pkt:\n"
3405             "\ttgt=%d in=0x%p bp=0x%p clen=%d slen=%d tlen=%d flags=%x",
3406             ap->a_target, (void *)pkt, (void *)bp,
3407             cmdlen, statuslen, tgtlen, flags));
3408 
3409         /*
3410          * Allocate the new packet.
3411          */
3412         if (pkt == NULL) {
3413                 ddi_dma_handle_t        save_dma_handle;
3414                 ddi_dma_handle_t        save_arq_dma_handle;
3415                 struct buf              *save_arq_bp;
3416                 ddi_dma_cookie_t        save_arqcookie;



3417 
3418                 cmd = kmem_cache_alloc(mpt->m_kmem_cache, kf);
3419 
3420                 if (cmd) {
3421                         save_dma_handle = cmd->cmd_dmahandle;
3422                         save_arq_dma_handle = cmd->cmd_arqhandle;
3423                         save_arq_bp = cmd->cmd_arq_buf;
3424                         save_arqcookie = cmd->cmd_arqcookie;



3425                         bzero(cmd, sizeof (*cmd) + scsi_pkt_size());
3426                         cmd->cmd_dmahandle = save_dma_handle;
3427                         cmd->cmd_arqhandle = save_arq_dma_handle;
3428                         cmd->cmd_arq_buf = save_arq_bp;
3429                         cmd->cmd_arqcookie = save_arqcookie;
3430 


3431                         pkt = (void *)((uchar_t *)cmd +
3432                             sizeof (struct mptsas_cmd));
3433                         pkt->pkt_ha_private = (opaque_t)cmd;
3434                         pkt->pkt_address = *ap;
3435                         pkt->pkt_private = (opaque_t)cmd->cmd_pkt_private;
3436                         pkt->pkt_scbp = (opaque_t)&cmd->cmd_scb;
3437                         pkt->pkt_cdbp = (opaque_t)&cmd->cmd_cdb;
3438                         cmd->cmd_pkt = (struct scsi_pkt *)pkt;
3439                         cmd->cmd_cdblen = (uchar_t)cmdlen;
3440                         cmd->cmd_scblen = statuslen;
3441                         cmd->cmd_rqslen = SENSE_LENGTH;
3442                         cmd->cmd_tgt_addr = ptgt;
3443                         failure = 0;
3444                 }
3445 
3446                 if (failure || (cmdlen > sizeof (cmd->cmd_cdb)) ||
3447                     (tgtlen > PKT_PRIV_LEN) ||
3448                     (statuslen > EXTCMDS_STATUS_SIZE)) {
3449                         if (failure == 0) {
3450                                 /*


3453                                  */
3454                                 failure = mptsas_pkt_alloc_extern(mpt, cmd,
3455                                     cmdlen, tgtlen, statuslen, kf);
3456                         }
3457                         if (failure) {
3458                                 /*
3459                                  * if extern allocation fails, it will
3460                                  * deallocate the new pkt as well
3461                                  */
3462                                 return (NULL);
3463                         }
3464                 }
3465                 new_cmd = cmd;
3466 
3467         } else {
3468                 cmd = PKT2CMD(pkt);
3469                 new_cmd = NULL;
3470         }
3471 
3472 

3473         /* grab cmd->cmd_cookiec here as oldcookiec */
3474 
3475         oldcookiec = cmd->cmd_cookiec;

3476 
3477         /*
3478          * If the dma was broken up into PARTIAL transfers cmd_nwin will be
3479          * greater than 0 and we'll need to grab the next dma window
3480          */
3481         /*
3482          * SLM-not doing extra command frame right now; may add later
3483          */
3484 
3485         if (cmd->cmd_nwin > 0) {
3486 
3487                 /*
3488                  * Make sure we havn't gone past the the total number
3489                  * of windows
3490                  */
3491                 if (++cmd->cmd_winindex >= cmd->cmd_nwin) {
3492                         return (NULL);
3493                 }
3494                 if (ddi_dma_getwin(cmd->cmd_dmahandle, cmd->cmd_winindex,
3495                     &cmd->cmd_dma_offset, &cmd->cmd_dma_len,


3605                                 bioerror(bp, ENOMEM);
3606                                 if (new_cmd) {
3607                                         mptsas_scsi_destroy_pkt(ap, pkt);
3608                                 }
3609                                 return ((struct scsi_pkt *)NULL);
3610                         }
3611                 }
3612 
3613                 /*
3614                  * Always use scatter-gather transfer
3615                  * Use the loop below to store physical addresses of
3616                  * DMA segments, from the DMA cookies, into your HBA's
3617                  * scatter-gather list.
3618                  * We need to ensure we have enough kmem alloc'd
3619                  * for the sg entries since we are no longer using an
3620                  * array inside mptsas_cmd_t.
3621                  *
3622                  * We check cmd->cmd_cookiec against oldcookiec so
3623                  * the scatter-gather list is correctly allocated
3624                  */
3625 
3626                 if (oldcookiec != cmd->cmd_cookiec) {
3627                         if (cmd->cmd_sg != (mptti_t *)NULL) {
3628                                 kmem_free(cmd->cmd_sg, sizeof (mptti_t) *
3629                                     oldcookiec);
3630                                 cmd->cmd_sg = NULL;
3631                         }
3632                 }
3633 
3634                 if (cmd->cmd_sg == (mptti_t *)NULL) {
3635                         cmd->cmd_sg = kmem_alloc((size_t)(sizeof (mptti_t)*
3636                             cmd->cmd_cookiec), kf);
3637 
3638                         if (cmd->cmd_sg == (mptti_t *)NULL) {
3639                                 mptsas_log(mpt, CE_WARN,
3640                                     "unable to kmem_alloc enough memory "
3641                                     "for scatter/gather list");
3642                 /*
3643                  * if we have an ENOMEM condition we need to behave
3644                  * the same way as the rest of this routine
3645                  */
3646 
3647                                 bioerror(bp, ENOMEM);
3648                                 if (new_cmd) {
3649                                         mptsas_scsi_destroy_pkt(ap, pkt);
3650                                 }
3651                                 return ((struct scsi_pkt *)NULL);
3652                         }
3653                 }
3654 
3655                 dmap = cmd->cmd_sg;
3656 
3657                 ASSERT(cmd->cmd_cookie.dmac_size != 0);
3658 
3659                 /*
3660                  * store the first segment into the S/G list
3661                  */
3662                 dmap->count = cmd->cmd_cookie.dmac_size;
3663                 dmap->addr.address64.Low = (uint32_t)
3664                     (cmd->cmd_cookie.dmac_laddress & 0xffffffffull);
3665                 dmap->addr.address64.High = (uint32_t)
3666                     (cmd->cmd_cookie.dmac_laddress >> 32);
3667 
3668                 /*
3669                  * dmacount counts the size of the dma for this window
3670                  * (if partial dma is being used).  totaldmacount
3671                  * keeps track of the total amount of dma we have
3672                  * transferred for all the windows (needed to calculate
3673                  * the resid value below).
3674                  */


3714 /*
3715  * tran_destroy_pkt(9E) - scsi_pkt(9s) deallocation
3716  *
3717  * Notes:
3718  *      - also frees DMA resources if allocated
3719  *      - implicit DMA synchonization
3720  */
3721 static void
3722 mptsas_scsi_destroy_pkt(struct scsi_address *ap, struct scsi_pkt *pkt)
3723 {
3724         mptsas_cmd_t    *cmd = PKT2CMD(pkt);
3725         mptsas_t        *mpt = ADDR2MPT(ap);
3726 
3727         NDBG3(("mptsas_scsi_destroy_pkt: target=%d pkt=0x%p",
3728             ap->a_target, (void *)pkt));
3729 
3730         if (cmd->cmd_flags & CFLAG_DMAVALID) {
3731                 (void) ddi_dma_unbind_handle(cmd->cmd_dmahandle);
3732                 cmd->cmd_flags &= ~CFLAG_DMAVALID;
3733         }
3734 
3735         if (cmd->cmd_sg) {
3736                 kmem_free(cmd->cmd_sg, sizeof (mptti_t) * cmd->cmd_cookiec);
3737                 cmd->cmd_sg = NULL;
3738         }
3739 
3740         mptsas_free_extra_sgl_frame(mpt, cmd);
3741 
3742         if ((cmd->cmd_flags &
3743             (CFLAG_FREE | CFLAG_CDBEXTERN | CFLAG_PRIVEXTERN |
3744             CFLAG_SCBEXTERN)) == 0) {
3745                 cmd->cmd_flags = CFLAG_FREE;
3746                 kmem_cache_free(mpt->m_kmem_cache, (void *)cmd);
3747         } else {
3748                 mptsas_pkt_destroy_extern(mpt, cmd);
3749         }
3750 }
3751 
3752 /*
3753  * kmem cache constructor and destructor:
3754  * When constructing, we bzero the cmd and allocate the dma handle
3755  * When destructing, just free the dma handle
3756  */
3757 static int
3758 mptsas_kmem_cache_constructor(void *buf, void *cdrarg, int kmflags)
3759 {


3797         if ((ddi_dma_alloc_handle(mpt->m_dip, &arq_dma_attr, callback,
3798             NULL, &cmd->cmd_arqhandle)) != DDI_SUCCESS) {
3799                 ddi_dma_free_handle(&cmd->cmd_dmahandle);
3800                 scsi_free_consistent_buf(cmd->cmd_arq_buf);
3801                 cmd->cmd_dmahandle = NULL;
3802                 cmd->cmd_arqhandle = NULL;
3803                 return (-1);
3804         }
3805 
3806         if (ddi_dma_buf_bind_handle(cmd->cmd_arqhandle,
3807             cmd->cmd_arq_buf, (DDI_DMA_READ | DDI_DMA_CONSISTENT),
3808             callback, NULL, &cmd->cmd_arqcookie, &cookiec) != DDI_SUCCESS) {
3809                 ddi_dma_free_handle(&cmd->cmd_dmahandle);
3810                 ddi_dma_free_handle(&cmd->cmd_arqhandle);
3811                 scsi_free_consistent_buf(cmd->cmd_arq_buf);
3812                 cmd->cmd_dmahandle = NULL;
3813                 cmd->cmd_arqhandle = NULL;
3814                 cmd->cmd_arq_buf = NULL;
3815                 return (-1);
3816         }










3817 
3818         return (0);
3819 }
3820 
3821 static void
3822 mptsas_kmem_cache_destructor(void *buf, void *cdrarg)
3823 {
3824 #ifndef __lock_lint
3825         _NOTE(ARGUNUSED(cdrarg))
3826 #endif
3827         mptsas_cmd_t    *cmd = buf;
3828 
3829         NDBG4(("mptsas_kmem_cache_destructor"));
3830 
3831         if (cmd->cmd_arqhandle) {
3832                 (void) ddi_dma_unbind_handle(cmd->cmd_arqhandle);
3833                 ddi_dma_free_handle(&cmd->cmd_arqhandle);
3834                 cmd->cmd_arqhandle = NULL;
3835         }
3836         if (cmd->cmd_arq_buf) {
3837                 scsi_free_consistent_buf(cmd->cmd_arq_buf);
3838                 cmd->cmd_arq_buf = NULL;
3839         }
3840         if (cmd->cmd_dmahandle) {
3841                 ddi_dma_free_handle(&cmd->cmd_dmahandle);
3842                 cmd->cmd_dmahandle = NULL;
3843         }






3844 }
3845 
3846 static int
3847 mptsas_cache_frames_constructor(void *buf, void *cdrarg, int kmflags)
3848 {
3849         mptsas_cache_frames_t   *p = buf;
3850         mptsas_t                *mpt = cdrarg;
3851         ddi_dma_attr_t          frame_dma_attr;
3852         size_t                  mem_size, alloc_len;
3853         ddi_dma_cookie_t        cookie;
3854         uint_t                  ncookie;
3855         int (*callback)(caddr_t) = (kmflags == KM_SLEEP)
3856             ? DDI_DMA_SLEEP: DDI_DMA_DONTWAIT;
3857 
3858         frame_dma_attr = mpt->m_msg_dma_attr;
3859         frame_dma_attr.dma_attr_align = 0x10;
3860         frame_dma_attr.dma_attr_sgllen = 1;
3861 
3862         if (ddi_dma_alloc_handle(mpt->m_dip, &frame_dma_attr, callback, NULL,
3863             &p->m_dma_hdl) != DDI_SUCCESS) {


4485 
4486                 /*
4487                  * Sync DMA with the chain buffers that were just created
4488                  */
4489                 (void) ddi_dma_sync(p->m_dma_hdl, 0, 0, DDI_DMA_SYNC_FORDEV);
4490         }
4491 }
4492 
4493 /*
4494  * Interrupt handling
4495  * Utility routine.  Poll for status of a command sent to HBA
4496  * without interrupts (a FLAG_NOINTR command).
4497  */
4498 int
4499 mptsas_poll(mptsas_t *mpt, mptsas_cmd_t *poll_cmd, int polltime)
4500 {
4501         int     rval = TRUE;
4502 
4503         NDBG5(("mptsas_poll: cmd=0x%p", (void *)poll_cmd));
4504 


















4505         if ((poll_cmd->cmd_flags & CFLAG_TM_CMD) == 0) {
4506                 mptsas_restart_hba(mpt);
4507         }
4508 
4509         /*
4510          * Wait, using drv_usecwait(), long enough for the command to
4511          * reasonably return from the target if the target isn't
4512          * "dead".  A polled command may well be sent from scsi_poll, and
4513          * there are retries built in to scsi_poll if the transport
4514          * accepted the packet (TRAN_ACCEPT).  scsi_poll waits 1 second
4515          * and retries the transport up to scsi_poll_busycnt times
4516          * (currently 60) if
4517          * 1. pkt_reason is CMD_INCOMPLETE and pkt_state is 0, or
4518          * 2. pkt_reason is CMD_CMPLT and *pkt_scbp has STATUS_BUSY
4519          *
4520          * limit the waiting to avoid a hang in the event that the
4521          * cmd never gets started but we are still receiving interrupts
4522          */
4523         while (!(poll_cmd->cmd_flags & CFLAG_FINISHED)) {
4524                 if (mptsas_wait_intr(mpt, polltime) == FALSE) {
4525                         NDBG5(("mptsas_poll: command incomplete"));
4526                         rval = FALSE;
4527                         break;
4528                 }
4529         }
4530 




4531         if (rval == FALSE) {
4532 
4533                 /*
4534                  * this isn't supposed to happen, the hba must be wedged
4535                  * Mark this cmd as a timeout.
4536                  */
4537                 mptsas_set_pkt_reason(mpt, poll_cmd, CMD_TIMEOUT,
4538                     (STAT_TIMEOUT|STAT_ABORTED));
4539 
4540                 if (poll_cmd->cmd_queued == FALSE) {
4541 
4542                         NDBG5(("mptsas_poll: not on waitq"));
4543 
4544                         poll_cmd->cmd_pkt->pkt_state |=
4545                             (STATE_GOT_BUS|STATE_GOT_TARGET|STATE_SENT_CMD);
4546                 } else {
4547 
4548                         /* find and remove it from the waitq */
4549                         NDBG5(("mptsas_poll: delete from waitq"));
4550                         mptsas_waitq_delete(mpt, poll_cmd);
4551                 }
4552 
4553         }
4554         mptsas_fma_check(mpt, poll_cmd);
4555         NDBG5(("mptsas_poll: done"));
4556         return (rval);
4557 }
4558 
4559 /*
4560  * Used for polling cmds and TM function
4561  */
4562 static int
4563 mptsas_wait_intr(mptsas_t *mpt, int polltime)
4564 {
4565         int                             cnt;
4566         pMpi2ReplyDescriptorsUnion_t    reply_desc_union;

4567         uint32_t                        int_mask;

4568 
4569         NDBG5(("mptsas_wait_intr"));
4570 
4571         mpt->m_polled_intr = 1;
4572 
4573         /*
4574          * Get the current interrupt mask and disable interrupts.  When
4575          * re-enabling ints, set mask to saved value.
4576          */
4577         int_mask = ddi_get32(mpt->m_datap, &mpt->m_reg->HostInterruptMask);
4578         MPTSAS_DISABLE_INTR(mpt);
4579 
4580         /*
4581          * Keep polling for at least (polltime * 1000) seconds
4582          */
4583         for (cnt = 0; cnt < polltime; cnt++) {

4584                 (void) ddi_dma_sync(mpt->m_dma_post_queue_hdl, 0, 0,
4585                     DDI_DMA_SYNC_FORCPU);
4586 
4587                 reply_desc_union = (pMpi2ReplyDescriptorsUnion_t)
4588                     MPTSAS_GET_NEXT_REPLY(mpt, mpt->m_post_index);
4589 
4590                 if (ddi_get32(mpt->m_acc_post_queue_hdl,
4591                     &reply_desc_union->Words.Low) == 0xFFFFFFFF ||
4592                     ddi_get32(mpt->m_acc_post_queue_hdl,
4593                     &reply_desc_union->Words.High) == 0xFFFFFFFF) {

4594                         drv_usecwait(1000);
4595                         continue;
4596                 }
4597 

















4598                 /*
4599                  * The reply is valid, process it according to its
4600                  * type.
4601                  */
4602                 mptsas_process_intr(mpt, reply_desc_union);




4603 
4604                 if (++mpt->m_post_index == mpt->m_post_queue_depth) {
4605                         mpt->m_post_index = 0;
4606                 }
4607 
4608                 /*
4609                  * Update the global reply index
4610                  */
4611                 ddi_put32(mpt->m_datap,
4612                     &mpt->m_reg->ReplyPostHostIndex, mpt->m_post_index);
4613                 mpt->m_polled_intr = 0;
4614 
4615                 /*







4616                  * Re-enable interrupts and quit.
4617                  */
4618                 ddi_put32(mpt->m_datap, &mpt->m_reg->HostInterruptMask,
4619                     int_mask);
4620                 return (TRUE);
4621 
4622         }
4623 
4624         /*
4625          * Clear polling flag, re-enable interrupts and quit.
4626          */
4627         mpt->m_polled_intr = 0;
4628         ddi_put32(mpt->m_datap, &mpt->m_reg->HostInterruptMask, int_mask);
4629         return (FALSE);
4630 }
4631 















































































































4632 static void
4633 mptsas_handle_scsi_io_success(mptsas_t *mpt,
4634     pMpi2ReplyDescriptorsUnion_t reply_desc)
4635 {
4636         pMpi2SCSIIOSuccessReplyDescriptor_t     scsi_io_success;
4637         uint16_t                                SMID;
4638         mptsas_slots_t                          *slots = mpt->m_active;
4639         mptsas_cmd_t                            *cmd = NULL;
4640         struct scsi_pkt                         *pkt;
4641 
4642         ASSERT(mutex_owned(&mpt->m_mutex));
4643 
4644         scsi_io_success = (pMpi2SCSIIOSuccessReplyDescriptor_t)reply_desc;
4645         SMID = ddi_get16(mpt->m_acc_post_queue_hdl, &scsi_io_success->SMID);
4646 
4647         /*
4648          * This is a success reply so just complete the IO.  First, do a sanity
4649          * check on the SMID.  The final slot is used for TM requests, which
4650          * would not come into this reply handler.
4651          */
4652         if ((SMID == 0) || (SMID > slots->m_n_slots)) {
4653                 mptsas_log(mpt, CE_WARN, "?Received invalid SMID of %d\n",
4654                     SMID);
4655                 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
4656                 return;
4657         }
4658 
4659         cmd = slots->m_slot[SMID];
4660 
4661         /*
4662          * print warning and return if the slot is empty
4663          */
4664         if (cmd == NULL) {
4665                 mptsas_log(mpt, CE_WARN, "?NULL command for successful SCSI IO "


4700 }
4701 
4702 static void
4703 mptsas_handle_address_reply(mptsas_t *mpt,
4704     pMpi2ReplyDescriptorsUnion_t reply_desc)
4705 {
4706         pMpi2AddressReplyDescriptor_t   address_reply;
4707         pMPI2DefaultReply_t             reply;
4708         mptsas_fw_diagnostic_buffer_t   *pBuffer;
4709         uint32_t                        reply_addr;
4710         uint16_t                        SMID, iocstatus;
4711         mptsas_slots_t                  *slots = mpt->m_active;
4712         mptsas_cmd_t                    *cmd = NULL;
4713         uint8_t                         function, buffer_type;
4714         m_replyh_arg_t                  *args;
4715         int                             reply_frame_no;
4716 
4717         ASSERT(mutex_owned(&mpt->m_mutex));
4718 
4719         address_reply = (pMpi2AddressReplyDescriptor_t)reply_desc;
4720         reply_addr = ddi_get32(mpt->m_acc_post_queue_hdl,
4721             &address_reply->ReplyFrameAddress);
4722         SMID = ddi_get16(mpt->m_acc_post_queue_hdl, &address_reply->SMID);
4723 


4724         /*
4725          * If reply frame is not in the proper range we should ignore this
4726          * message and exit the interrupt handler.
4727          */
4728         if ((reply_addr < mpt->m_reply_frame_dma_addr) ||
4729             (reply_addr >= (mpt->m_reply_frame_dma_addr +
4730             (mpt->m_reply_frame_size * mpt->m_max_replies))) ||
4731             ((reply_addr - mpt->m_reply_frame_dma_addr) %
4732             mpt->m_reply_frame_size != 0)) {
4733                 mptsas_log(mpt, CE_WARN, "?Received invalid reply frame "
4734                     "address 0x%x\n", reply_addr);
4735                 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
4736                 return;
4737         }
4738 
4739         (void) ddi_dma_sync(mpt->m_dma_reply_frame_hdl, 0, 0,
4740             DDI_DMA_SYNC_FORCPU);
4741         reply = (pMPI2DefaultReply_t)(mpt->m_reply_frame + (reply_addr -
4742             mpt->m_reply_frame_dma_addr));
4743         function = ddi_get8(mpt->m_acc_reply_frame_hdl, &reply->Function);


4809                 args->rfm = reply_addr;
4810 
4811                 /*
4812                  * Record the event if its type is enabled in
4813                  * this mpt instance by ioctl.
4814                  */
4815                 mptsas_record_event(args);
4816 
4817                 /*
4818                  * Handle time critical events
4819                  * NOT_RESPONDING/ADDED only now
4820                  */
4821                 if (mptsas_handle_event_sync(args) == DDI_SUCCESS) {
4822                         /*
4823                          * Would not return main process,
4824                          * just let taskq resolve ack action
4825                          * and ack would be sent in taskq thread
4826                          */
4827                         NDBG20(("send mptsas_handle_event_sync success"));
4828                 }
4829 
4830                 if (mpt->m_in_reset) {
4831                         NDBG20(("dropping event received during reset"));
4832                         return;
4833                 }
4834 
4835                 if ((ddi_taskq_dispatch(mpt->m_event_taskq, mptsas_handle_event,
4836                     (void *)args, DDI_NOSLEEP)) != DDI_SUCCESS) {
4837                         mptsas_log(mpt, CE_WARN, "No memory available"
4838                         "for dispatch taskq");
4839                         /*
4840                          * Return the reply frame to the free queue.
4841                          */
4842                         ddi_put32(mpt->m_acc_free_queue_hdl,
4843                             &((uint32_t *)(void *)
4844                             mpt->m_free_queue)[mpt->m_free_index], reply_addr);
4845                         (void) ddi_dma_sync(mpt->m_dma_free_queue_hdl, 0, 0,
4846                             DDI_DMA_SYNC_FORDEV);
4847                         if (++mpt->m_free_index == mpt->m_free_queue_depth) {
4848                                 mpt->m_free_index = 0;
4849                         }
4850 
4851                         ddi_put32(mpt->m_datap,
4852                             &mpt->m_reg->ReplyFreeHostIndex, mpt->m_free_index);
4853                 }
4854                 return;


4969             scsi_status, ioc_status, scsi_state));
4970 
4971         pkt = CMD2PKT(cmd);
4972         *(pkt->pkt_scbp) = scsi_status;
4973 
4974         if (loginfo == 0x31170000) {
4975                 /*
4976                  * if loginfo PL_LOGINFO_CODE_IO_DEVICE_MISSING_DELAY_RETRY
4977                  * 0x31170000 comes, that means the device missing delay
4978                  * is in progressing, the command need retry later.
4979                  */
4980                 *(pkt->pkt_scbp) = STATUS_BUSY;
4981                 return;
4982         }
4983 
4984         if ((scsi_state & MPI2_SCSI_STATE_NO_SCSI_STATUS) &&
4985             ((ioc_status & MPI2_IOCSTATUS_MASK) ==
4986             MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE)) {
4987                 pkt->pkt_reason = CMD_INCOMPLETE;
4988                 pkt->pkt_state |= STATE_GOT_BUS;

4989                 if (ptgt->m_reset_delay == 0) {
4990                         mptsas_set_throttle(mpt, ptgt,
4991                             DRAIN_THROTTLE);
4992                 }

4993                 return;
4994         }
4995 
4996         if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) {
4997                 responsedata &= 0x000000FF;
4998                 if (responsedata & MPTSAS_SCSI_RESPONSE_CODE_TLR_OFF) {
4999                         mptsas_log(mpt, CE_NOTE, "Do not support the TLR\n");
5000                         pkt->pkt_reason = CMD_TLR_OFF;
5001                         return;
5002                 }
5003         }
5004 
5005 
5006         switch (scsi_status) {
5007         case MPI2_SCSI_STATUS_CHECK_CONDITION:
5008                 pkt->pkt_resid = (cmd->cmd_dmacount - xferred);
5009                 arqstat = (void*)(pkt->pkt_scbp);
5010                 arqstat->sts_rqpkt_status = *((struct scsi_status *)
5011                     (pkt->pkt_scbp));
5012                 pkt->pkt_state |= (STATE_GOT_BUS | STATE_GOT_TARGET |


5072                         topo_node->un.phymask = ptgt->m_phymask;
5073                         topo_node->devhdl = ptgt->m_devhdl;
5074                         topo_node->object = (void *)ptgt;
5075                         topo_node->flags = MPTSAS_TOPO_FLAG_LUN_ASSOCIATED;
5076 
5077                         if ((ddi_taskq_dispatch(mpt->m_dr_taskq,
5078                             mptsas_handle_dr,
5079                             (void *)topo_node,
5080                             DDI_NOSLEEP)) != DDI_SUCCESS) {
5081                                 mptsas_log(mpt, CE_NOTE, "mptsas start taskq"
5082                                     "for handle SAS dynamic reconfigure"
5083                                     "failed. \n");
5084                         }
5085                 }
5086                 break;
5087         case MPI2_SCSI_STATUS_GOOD:
5088                 switch (ioc_status & MPI2_IOCSTATUS_MASK) {
5089                 case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
5090                         pkt->pkt_reason = CMD_DEV_GONE;
5091                         pkt->pkt_state |= STATE_GOT_BUS;

5092                         if (ptgt->m_reset_delay == 0) {
5093                                 mptsas_set_throttle(mpt, ptgt, DRAIN_THROTTLE);
5094                         }

5095                         NDBG31(("lost disk for target%d, command:%x",
5096                             Tgt(cmd), pkt->pkt_cdbp[0]));
5097                         break;
5098                 case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
5099                         NDBG31(("data overrun: xferred=%d", xferred));
5100                         NDBG31(("dmacount=%d", cmd->cmd_dmacount));
5101                         pkt->pkt_reason = CMD_DATA_OVR;
5102                         pkt->pkt_state |= (STATE_GOT_BUS | STATE_GOT_TARGET
5103                             | STATE_SENT_CMD | STATE_GOT_STATUS
5104                             | STATE_XFERRED_DATA);
5105                         pkt->pkt_resid = 0;
5106                         break;
5107                 case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
5108                 case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
5109                         NDBG31(("data underrun: xferred=%d", xferred));
5110                         NDBG31(("dmacount=%d", cmd->cmd_dmacount));
5111                         pkt->pkt_state |= (STATE_GOT_BUS | STATE_GOT_TARGET
5112                             | STATE_SENT_CMD | STATE_GOT_STATUS);
5113                         pkt->pkt_resid = (cmd->cmd_dmacount - xferred);
5114                         if (pkt->pkt_resid != cmd->cmd_dmacount) {


5121                         break;
5122                 case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
5123                 case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
5124                         mptsas_set_pkt_reason(mpt,
5125                             cmd, CMD_RESET, STAT_DEV_RESET);
5126                         break;
5127                 case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
5128                 case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
5129                         pkt->pkt_state |= (STATE_GOT_BUS | STATE_GOT_TARGET);
5130                         mptsas_set_pkt_reason(mpt,
5131                             cmd, CMD_TERMINATED, STAT_TERMINATED);
5132                         break;
5133                 case MPI2_IOCSTATUS_INSUFFICIENT_RESOURCES:
5134                 case MPI2_IOCSTATUS_BUSY:
5135                         /*
5136                          * set throttles to drain
5137                          */
5138                         ptgt = (mptsas_target_t *)mptsas_hash_traverse(
5139                             &mpt->m_active->m_tgttbl, MPTSAS_HASH_FIRST);
5140                         while (ptgt != NULL) {

5141                                 mptsas_set_throttle(mpt, ptgt, DRAIN_THROTTLE);

5142 
5143                                 ptgt = (mptsas_target_t *)mptsas_hash_traverse(
5144                                     &mpt->m_active->m_tgttbl, MPTSAS_HASH_NEXT);
5145                         }
5146 
5147                         /*
5148                          * retry command
5149                          */
5150                         cmd->cmd_flags |= CFLAG_RETRY;
5151                         cmd->cmd_pkt_flags |= FLAG_HEAD;
5152 

5153                         (void) mptsas_accept_pkt(mpt, cmd);

5154                         break;
5155                 default:
5156                         mptsas_log(mpt, CE_WARN,
5157                             "unknown ioc_status = %x\n", ioc_status);
5158                         mptsas_log(mpt, CE_CONT, "scsi_state = %x, transfer "
5159                             "count = %x, scsi_status = %x", scsi_state,
5160                             xferred, scsi_status);
5161                         break;
5162                 }
5163                 break;
5164         case MPI2_SCSI_STATUS_TASK_SET_FULL:
5165                 mptsas_handle_qfull(mpt, cmd);
5166                 break;
5167         case MPI2_SCSI_STATUS_BUSY:
5168                 NDBG31(("scsi_status busy received"));
5169                 break;
5170         case MPI2_SCSI_STATUS_RESERVATION_CONFLICT:
5171                 NDBG31(("scsi_status reservation conflict received"));
5172                 break;
5173         default:


5248                         cv_wait(&item->cv, &item->mutex);
5249                 }
5250                 pkt = NULL;
5251                 if ((cmd = mptsas_doneq_thread_rm(mpt, t)) != NULL) {
5252                         cmd->cmd_flags |= CFLAG_COMPLETED;
5253                         pkt = CMD2PKT(cmd);
5254                 }
5255                 mutex_exit(&item->mutex);
5256                 if (pkt) {
5257                         mptsas_pkt_comp(pkt, cmd);
5258                 }
5259                 mutex_enter(&item->mutex);
5260         }
5261         mutex_exit(&item->mutex);
5262         mutex_enter(&mpt->m_doneq_mutex);
5263         mpt->m_doneq_thread_n--;
5264         cv_broadcast(&mpt->m_doneq_thread_cv);
5265         mutex_exit(&mpt->m_doneq_mutex);
5266 }
5267 
5268 
5269 /*
5270  * mpt interrupt handler.
5271  */
5272 static uint_t
5273 mptsas_intr(caddr_t arg1, caddr_t arg2)
5274 {
5275         mptsas_t                        *mpt = (void *)arg1;
5276         pMpi2ReplyDescriptorsUnion_t    reply_desc_union;
5277         uchar_t                         did_reply = FALSE;



5278 
5279         NDBG1(("mptsas_intr: arg1 0x%p arg2 0x%p", (void *)arg1, (void *)arg2));
5280 
5281         mutex_enter(&mpt->m_mutex);














































































































































































5282 


5283         /*
5284          * If interrupts are shared by two channels then check whether this
5285          * interrupt is genuinely for this channel by making sure first the
5286          * chip is in high power state.
5287          */
5288         if ((mpt->m_options & MPTSAS_OPT_PM) &&
5289             (mpt->m_power_level != PM_LEVEL_D0)) {
5290                 mutex_exit(&mpt->m_mutex);
5291                 return (DDI_INTR_UNCLAIMED);
5292         }
5293 
5294         /*
5295          * If polling, interrupt was triggered by some shared interrupt because
5296          * IOC interrupts are disabled during polling, so polling routine will
5297          * handle any replies.  Considering this, if polling is happening,
5298          * return with interrupt unclaimed.
5299          */
5300         if (mpt->m_polled_intr) {
5301                 mutex_exit(&mpt->m_mutex);
5302                 mptsas_log(mpt, CE_WARN, "mpt_sas: Unclaimed interrupt");
5303                 return (DDI_INTR_UNCLAIMED);
5304         }
5305 
5306         /*
5307          * Read the istat register.
5308          */
5309         if ((INTPENDING(mpt)) != 0) {
5310                 /*
5311                  * read fifo until empty.
5312                  */
5313 #ifndef __lock_lint
5314                 _NOTE(CONSTCOND)
5315 #endif
5316                 while (TRUE) {
5317                         (void) ddi_dma_sync(mpt->m_dma_post_queue_hdl, 0, 0,
5318                             DDI_DMA_SYNC_FORCPU);
5319                         reply_desc_union = (pMpi2ReplyDescriptorsUnion_t)
5320                             MPTSAS_GET_NEXT_REPLY(mpt, mpt->m_post_index);
5321 
5322                         if (ddi_get32(mpt->m_acc_post_queue_hdl,
5323                             &reply_desc_union->Words.Low) == 0xFFFFFFFF ||
5324                             ddi_get32(mpt->m_acc_post_queue_hdl,
5325                             &reply_desc_union->Words.High) == 0xFFFFFFFF) {
5326                                 break;
5327                         }
5328 
5329                         /*
5330                          * The reply is valid, process it according to its
5331                          * type.  Also, set a flag for updating the reply index
5332                          * after they've all been processed.
5333                          */
5334                         did_reply = TRUE;
5335 
5336                         mptsas_process_intr(mpt, reply_desc_union);

































5337 
5338                         /*
5339                          * Increment post index and roll over if needed.
5340                          */
5341                         if (++mpt->m_post_index == mpt->m_post_queue_depth) {
5342                                 mpt->m_post_index = 0;
5343                         }


5344                 }
5345 
5346                 /*
5347                  * Update the global reply index if at least one reply was
5348                  * processed.
5349                  */
5350                 if (did_reply) {
5351                         ddi_put32(mpt->m_datap,
5352                             &mpt->m_reg->ReplyPostHostIndex, mpt->m_post_index);













5353                 }


5354         } else {
5355                 mutex_exit(&mpt->m_mutex);
5356                 return (DDI_INTR_UNCLAIMED);
5357         }
5358         NDBG1(("mptsas_intr complete"));

5359 
5360         /*















5361          * If no helper threads are created, process the doneq in ISR. If
5362          * helpers are created, use the doneq length as a metric to measure the
5363          * load on the interrupt CPU. If it is long enough, which indicates the
5364          * load is heavy, then we deliver the IO completions to the helpers.
5365          * This measurement has some limitations, although it is simple and
5366          * straightforward and works well for most of the cases at present.
5367          */
5368         if (!mpt->m_doneq_thread_n ||
5369             (mpt->m_doneq_len <= mpt->m_doneq_length_threshold)) {
5370                 mptsas_doneq_empty(mpt);
5371         } else {






5372                 mptsas_deliver_doneq_thread(mpt);


5373         }

5374 
5375         /*
5376          * If there are queued cmd, start them now.
5377          */

5378         if (mpt->m_waitq != NULL) {
5379                 mptsas_restart_waitq(mpt);
5380         }
5381 
5382         mutex_exit(&mpt->m_mutex);
5383         return (DDI_INTR_CLAIMED);



5384 }
5385 







5386 static void
5387 mptsas_process_intr(mptsas_t *mpt,
5388     pMpi2ReplyDescriptorsUnion_t reply_desc_union)
5389 {
5390         uint8_t reply_type;
5391 
5392         ASSERT(mutex_owned(&mpt->m_mutex));
5393 
5394         /*
5395          * The reply is valid, process it according to its
5396          * type.  Also, set a flag for updated the reply index
5397          * after they've all been processed.
5398          */
5399         reply_type = ddi_get8(mpt->m_acc_post_queue_hdl,
5400             &reply_desc_union->Default.ReplyFlags);
5401         reply_type &= MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
5402         if (reply_type == MPI2_RPY_DESCRIPT_FLAGS_SCSI_IO_SUCCESS) {
5403                 mptsas_handle_scsi_io_success(mpt, reply_desc_union);
5404         } else if (reply_type == MPI2_RPY_DESCRIPT_FLAGS_ADDRESS_REPLY) {
5405                 mptsas_handle_address_reply(mpt, reply_desc_union);
5406         } else {
5407                 mptsas_log(mpt, CE_WARN, "?Bad reply type %x", reply_type);
5408                 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
5409         }
5410 
5411         /*
5412          * Clear the reply descriptor for re-use and increment
5413          * index.
5414          */
5415         ddi_put64(mpt->m_acc_post_queue_hdl,
5416             &((uint64_t *)(void *)mpt->m_post_queue)[mpt->m_post_index],
5417             0xFFFFFFFFFFFFFFFF);
5418         (void) ddi_dma_sync(mpt->m_dma_post_queue_hdl, 0, 0,
5419             DDI_DMA_SYNC_FORDEV);
5420 }
5421 
5422 /*
5423  * handle qfull condition
5424  */
5425 static void
5426 mptsas_handle_qfull(mptsas_t *mpt, mptsas_cmd_t *cmd)
5427 {
5428         mptsas_target_t *ptgt = cmd->cmd_tgt_addr;
5429 
5430         if ((++cmd->cmd_qfull_retries > ptgt->m_qfull_retries) ||
5431             (ptgt->m_qfull_retries == 0)) {
5432                 /*
5433                  * We have exhausted the retries on QFULL, or,
5434                  * the target driver has indicated that it
5435                  * wants to handle QFULL itself by setting
5436                  * qfull-retries capability to 0. In either case
5437                  * we want the target driver's QFULL handling
5438                  * to kick in. We do this by having pkt_reason
5439                  * as CMD_CMPLT and pkt_scbp as STATUS_QFULL.
5440                  */

5441                 mptsas_set_throttle(mpt, ptgt, DRAIN_THROTTLE);

5442         } else {

5443                 if (ptgt->m_reset_delay == 0) {
5444                         ptgt->m_t_throttle =
5445                             max((ptgt->m_t_ncmds - 2), 0);
5446                 }

5447 
5448                 cmd->cmd_pkt_flags |= FLAG_HEAD;
5449                 cmd->cmd_flags &= ~(CFLAG_TRANFLAG);
5450                 cmd->cmd_flags |= CFLAG_RETRY;
5451 

5452                 (void) mptsas_accept_pkt(mpt, cmd);

5453 
5454                 /*
5455                  * when target gives queue full status with no commands
5456                  * outstanding (m_t_ncmds == 0), throttle is set to 0
5457                  * (HOLD_THROTTLE), and the queue full handling start
5458                  * (see psarc/1994/313); if there are commands outstanding,
5459                  * throttle is set to (m_t_ncmds - 2)
5460                  */

5461                 if (ptgt->m_t_throttle == HOLD_THROTTLE) {
5462                         /*
5463                          * By setting throttle to QFULL_THROTTLE, we
5464                          * avoid submitting new commands and in
5465                          * mptsas_restart_cmd find out slots which need
5466                          * their throttles to be cleared.
5467                          */
5468                         mptsas_set_throttle(mpt, ptgt, QFULL_THROTTLE);
5469                         if (mpt->m_restart_cmd_timeid == 0) {
5470                                 mpt->m_restart_cmd_timeid =
5471                                     timeout(mptsas_restart_cmd, mpt,
5472                                     ptgt->m_qfull_retry_interval);
5473                         }
5474                 }

5475         }
5476 }
5477 
5478 mptsas_phymask_t
5479 mptsas_physport_to_phymask(mptsas_t *mpt, uint8_t physport)
5480 {
5481         mptsas_phymask_t        phy_mask = 0;
5482         uint8_t                 i = 0;
5483 
5484         NDBG20(("mptsas%d physport_to_phymask enter", mpt->m_instance));
5485 
5486         ASSERT(mutex_owned(&mpt->m_mutex));
5487 
5488         /*
5489          * If physport is 0xFF, this is a RAID volume.  Use phymask of 0.
5490          */
5491         if (physport == 0xFF) {
5492                 return (0);
5493         }
5494 


5752                                         mutex_exit(&mpt->m_mutex);
5753 
5754                                         parent = NULL;
5755                                         continue;
5756                                 }
5757                                 (void) sprintf(phy_mask_name, "%x", phymask);
5758                         }
5759                         parent = scsi_hba_iport_find(mpt->m_dip,
5760                             phy_mask_name);
5761                         if (parent == NULL) {
5762                                 mptsas_log(mpt, CE_WARN, "Failed to find an "
5763                                     "iport, should not happen!");
5764                                 goto out;
5765                         }
5766 
5767                 }
5768                 ASSERT(parent);
5769 handle_topo_change:
5770 
5771                 mutex_enter(&mpt->m_mutex);
5772                 /*
5773                  * If HBA is being reset, don't perform operations depending
5774                  * on the IOC. We must free the topo list, however.
5775                  */
5776                 if (!mpt->m_in_reset)
5777                         mptsas_handle_topo_change(topo_node, parent);
5778                 else
5779                         NDBG20(("skipping topo change received during reset"));
5780                 save_node = topo_node;
5781                 topo_node = topo_node->next;
5782                 ASSERT(save_node);
5783                 kmem_free(save_node, sizeof (mptsas_topo_change_list_t));
5784                 mutex_exit(&mpt->m_mutex);
5785 
5786                 if ((flags == MPTSAS_TOPO_FLAG_DIRECT_ATTACHED_DEVICE) ||
5787                     (flags == MPTSAS_TOPO_FLAG_RAID_PHYSDRV_ASSOCIATED) ||
5788                     (flags == MPTSAS_TOPO_FLAG_RAID_ASSOCIATED)) {
5789                         /*
5790                          * If direct attached device associated, make sure
5791                          * reset the parent before start the next one. But
5792                          * all devices associated with expander shares the
5793                          * parent.  Also, reset parent if this is for RAID.
5794                          */
5795                         parent = NULL;
5796                 }
5797         }
5798 out:
5799         kmem_free(phy_mask_name, MPTSAS_MAX_PHYS);


6049                             MPTSAS_NUM_PHYS, 0) !=
6050                             DDI_PROP_SUCCESS) {
6051                                 (void) ddi_prop_remove(DDI_DEV_T_NONE, parent,
6052                                     MPTSAS_NUM_PHYS);
6053                                 mptsas_log(mpt, CE_WARN, "mptsas num phys "
6054                                     "prop update failed");
6055                                 break;
6056                         }
6057                         if (ddi_prop_update_int(DDI_DEV_T_NONE, parent,
6058                             MPTSAS_VIRTUAL_PORT, 1) !=
6059                             DDI_PROP_SUCCESS) {
6060                                 (void) ddi_prop_remove(DDI_DEV_T_NONE, parent,
6061                                     MPTSAS_VIRTUAL_PORT);
6062                                 mptsas_log(mpt, CE_WARN, "mptsas virtual port "
6063                                     "prop update failed");
6064                                 break;
6065                         }
6066                 }
6067 
6068                 mutex_enter(&mpt->m_mutex);




6069                 if (rval == DDI_SUCCESS) {
6070                         mptsas_tgt_free(&mpt->m_active->m_tgttbl,
6071                             ptgt->m_sas_wwn, ptgt->m_phymask);
6072                         ptgt = NULL;
6073                 } else {
6074                         /*
6075                          * clean DR_INTRANSITION flag to allow I/O down to
6076                          * PHCI driver since failover finished.
6077                          * Invalidate the devhdl
6078                          */

6079                         ptgt->m_devhdl = MPTSAS_INVALID_DEVHDL;
6080                         ptgt->m_tgt_unconfigured = 0;
6081                         mutex_enter(&mpt->m_tx_waitq_mutex);
6082                         ptgt->m_dr_flag = MPTSAS_DR_INACTIVE;
6083                         mutex_exit(&mpt->m_tx_waitq_mutex);
6084                 }
6085 
6086                 /*
6087                  * Send SAS IO Unit Control to free the dev handle
6088                  */
6089                 if ((flags == MPTSAS_TOPO_FLAG_DIRECT_ATTACHED_DEVICE) ||
6090                     (flags == MPTSAS_TOPO_FLAG_EXPANDER_ATTACHED_DEVICE)) {
6091                         rval = mptsas_free_devhdl(mpt, devhdl);
6092 
6093                         NDBG20(("mptsas%d handle_topo_change to remove "
6094                             "devhdl:%x, rval:%x", mpt->m_instance, devhdl,
6095                             rval));
6096                 }
6097 
6098                 break;
6099         }
6100         case MPTSAS_TOPO_FLAG_REMOVE_HANDLE:
6101         {
6102                 devhdl = topo_node->devhdl;
6103                 /*


6609                                         topo_node->mpt = mpt;
6610                                         topo_node->un.phymask = 0;
6611                                         topo_node->event =
6612                                             MPTSAS_TOPO_FLAG_REMOVE_HANDLE;
6613                                         topo_node->devhdl = dev_handle;
6614                                         topo_node->flags = flags;
6615                                         topo_node->object = NULL;
6616                                         if (topo_head == NULL) {
6617                                                 topo_head = topo_tail =
6618                                                     topo_node;
6619                                         } else {
6620                                                 topo_tail->next = topo_node;
6621                                                 topo_tail = topo_node;
6622                                         }
6623                                         break;
6624                                 }
6625 
6626                                 /*
6627                                  * Update DR flag immediately avoid I/O failure
6628                                  * before failover finish. Pay attention to the
6629                                  * mutex protect, we need grab m_tx_waitq_mutex
6630                                  * during set m_dr_flag because we won't add
6631                                  * the following command into waitq, instead,
6632                                  * we need return TRAN_BUSY in the tran_start
6633                                  * context.
6634                                  */
6635                                 mutex_enter(&mpt->m_tx_waitq_mutex);
6636                                 ptgt->m_dr_flag = MPTSAS_DR_INTRANSITION;
6637                                 mutex_exit(&mpt->m_tx_waitq_mutex);
6638 
6639                                 topo_node = kmem_zalloc(
6640                                     sizeof (mptsas_topo_change_list_t),
6641                                     KM_SLEEP);
6642                                 topo_node->mpt = mpt;
6643                                 topo_node->un.phymask = ptgt->m_phymask;
6644                                 topo_node->event =
6645                                     MPTSAS_DR_EVENT_OFFLINE_TARGET;
6646                                 topo_node->devhdl = dev_handle;
6647                                 topo_node->flags = flags;
6648                                 topo_node->object = NULL;
6649                                 if (topo_head == NULL) {
6650                                         topo_head = topo_tail = topo_node;
6651                                 } else {
6652                                         topo_tail->next = topo_node;
6653                                         topo_tail = topo_node;
6654                                 }
6655                                 break;
6656                         }
6657                         case MPI2_EVENT_SAS_TOPO_RC_PHY_CHANGED:


6850                                 break;
6851                         }
6852                         case MPI2_EVENT_IR_CHANGE_RC_REMOVED:
6853                         case MPI2_EVENT_IR_CHANGE_RC_VOLUME_DELETED:
6854                         {
6855                                 NDBG20(("mptsas %d volume deleted\n",
6856                                     mpt->m_instance));
6857                                 ptgt = mptsas_search_by_devhdl(tgttbl,
6858                                     volhandle);
6859                                 if (ptgt == NULL)
6860                                         break;
6861 
6862                                 /*
6863                                  * Clear any flags related to volume
6864                                  */
6865                                 (void) mptsas_delete_volume(mpt, volhandle);
6866 
6867                                 /*
6868                                  * Update DR flag immediately avoid I/O failure
6869                                  */
6870                                 mutex_enter(&mpt->m_tx_waitq_mutex);
6871                                 ptgt->m_dr_flag = MPTSAS_DR_INTRANSITION;
6872                                 mutex_exit(&mpt->m_tx_waitq_mutex);
6873 
6874                                 topo_node = kmem_zalloc(
6875                                     sizeof (mptsas_topo_change_list_t),
6876                                     KM_SLEEP);
6877                                 topo_node->mpt = mpt;
6878                                 topo_node->un.phymask = ptgt->m_phymask;
6879                                 topo_node->event =
6880                                     MPTSAS_DR_EVENT_OFFLINE_TARGET;
6881                                 topo_node->devhdl = volhandle;
6882                                 topo_node->flags =
6883                                     MPTSAS_TOPO_FLAG_RAID_ASSOCIATED;
6884                                 topo_node->object = (void *)ptgt;
6885                                 if (topo_head == NULL) {
6886                                         topo_head = topo_tail = topo_node;
6887                                 } else {
6888                                         topo_tail->next = topo_node;
6889                                         topo_tail = topo_node;
6890                                 }
6891                                 break;
6892                         }
6893                         case MPI2_EVENT_IR_CHANGE_RC_PD_CREATED:
6894                         case MPI2_EVENT_IR_CHANGE_RC_HIDE:
6895                         {
6896                                 ptgt = mptsas_search_by_devhdl(tgttbl,
6897                                     diskhandle);
6898                                 if (ptgt == NULL)
6899                                         break;
6900 
6901                                 /*
6902                                  * Update DR flag immediately avoid I/O failure
6903                                  */
6904                                 mutex_enter(&mpt->m_tx_waitq_mutex);
6905                                 ptgt->m_dr_flag = MPTSAS_DR_INTRANSITION;
6906                                 mutex_exit(&mpt->m_tx_waitq_mutex);
6907 
6908                                 topo_node = kmem_zalloc(
6909                                     sizeof (mptsas_topo_change_list_t),
6910                                     KM_SLEEP);
6911                                 topo_node->mpt = mpt;
6912                                 topo_node->un.phymask = ptgt->m_phymask;
6913                                 topo_node->event =
6914                                     MPTSAS_DR_EVENT_OFFLINE_TARGET;
6915                                 topo_node->devhdl = diskhandle;
6916                                 topo_node->flags =
6917                                     MPTSAS_TOPO_FLAG_RAID_PHYSDRV_ASSOCIATED;
6918                                 topo_node->object = (void *)ptgt;
6919                                 if (topo_head == NULL) {
6920                                         topo_head = topo_tail = topo_node;
6921                                 } else {
6922                                         topo_tail->next = topo_node;
6923                                         topo_tail = topo_node;
6924                                 }
6925                                 break;
6926                         }


6981 
6982 /*
6983  * handle events from ioc
6984  */
6985 static void
6986 mptsas_handle_event(void *args)
6987 {
6988         m_replyh_arg_t                  *replyh_arg;
6989         pMpi2EventNotificationReply_t   eventreply;
6990         uint32_t                        event, iocloginfo, rfm;
6991         uint32_t                        status;
6992         uint8_t                         port;
6993         mptsas_t                        *mpt;
6994         uint_t                          iocstatus;
6995 
6996         replyh_arg = (m_replyh_arg_t *)args;
6997         rfm = replyh_arg->rfm;
6998         mpt = replyh_arg->mpt;
6999 
7000         mutex_enter(&mpt->m_mutex);
7001         /*
7002          * If HBA is being reset, drop incoming event.
7003          */
7004         if (mpt->m_in_reset) {
7005                 NDBG20(("dropping event received prior to reset"));
7006                 mutex_exit(&mpt->m_mutex);
7007                 return;
7008         }
7009 
7010         eventreply = (pMpi2EventNotificationReply_t)
7011             (mpt->m_reply_frame + (rfm - mpt->m_reply_frame_dma_addr));
7012         event = ddi_get16(mpt->m_acc_reply_frame_hdl, &eventreply->Event);
7013 
7014         if (iocstatus = ddi_get16(mpt->m_acc_reply_frame_hdl,
7015             &eventreply->IOCStatus)) {
7016                 if (iocstatus == MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE) {
7017                         mptsas_log(mpt, CE_WARN,
7018                             "!mptsas_handle_event: IOCStatus=0x%x, "
7019                             "IOCLogInfo=0x%x", iocstatus,
7020                             ddi_get32(mpt->m_acc_reply_frame_hdl,
7021                             &eventreply->IOCLogInfo));
7022                 } else {
7023                         mptsas_log(mpt, CE_WARN,
7024                             "mptsas_handle_event: IOCStatus=0x%x, "
7025                             "IOCLogInfo=0x%x", iocstatus,
7026                             ddi_get32(mpt->m_acc_reply_frame_hdl,
7027                             &eventreply->IOCLogInfo));
7028                 }


7549             mpt->m_free_index);
7550         mutex_exit(&mpt->m_mutex);
7551 }
7552 
7553 /*
7554  * invoked from timeout() to restart qfull cmds with throttle == 0
7555  */
7556 static void
7557 mptsas_restart_cmd(void *arg)
7558 {
7559         mptsas_t        *mpt = arg;
7560         mptsas_target_t *ptgt = NULL;
7561 
7562         mutex_enter(&mpt->m_mutex);
7563 
7564         mpt->m_restart_cmd_timeid = 0;
7565 
7566         ptgt = (mptsas_target_t *)mptsas_hash_traverse(&mpt->m_active->m_tgttbl,
7567             MPTSAS_HASH_FIRST);
7568         while (ptgt != NULL) {

7569                 if (ptgt->m_reset_delay == 0) {
7570                         if (ptgt->m_t_throttle == QFULL_THROTTLE) {
7571                                 mptsas_set_throttle(mpt, ptgt,
7572                                     MAX_THROTTLE);
7573                         }
7574                 }

7575 
7576                 ptgt = (mptsas_target_t *)mptsas_hash_traverse(
7577                     &mpt->m_active->m_tgttbl, MPTSAS_HASH_NEXT);
7578         }
7579         mptsas_restart_hba(mpt);
7580         mutex_exit(&mpt->m_mutex);
7581 }
7582 




7583 void
7584 mptsas_remove_cmd(mptsas_t *mpt, mptsas_cmd_t *cmd)
7585 {






















7586         int             slot;
7587         mptsas_slots_t  *slots = mpt->m_active;
7588         int             t;
7589         mptsas_target_t *ptgt = cmd->cmd_tgt_addr;

7590 
7591         ASSERT(cmd != NULL);
7592         ASSERT(cmd->cmd_queued == FALSE);
7593 
7594         /*
7595          * Task Management cmds are removed in their own routines.  Also,
7596          * we don't want to modify timeout based on TM cmds.
7597          */
7598         if (cmd->cmd_flags & CFLAG_TM_CMD) {
7599                 return;
7600         }
7601 
7602         t = Tgt(cmd);
7603         slot = cmd->cmd_slot;



7604 
7605         /*
7606          * remove the cmd.
7607          */
7608         if (cmd == slots->m_slot[slot]) {
7609                 NDBG31(("mptsas_remove_cmd: removing cmd=0x%p", (void *)cmd));

7610                 slots->m_slot[slot] = NULL;
7611                 mpt->m_ncmds--;







7612 
7613                 /*
7614                  * only decrement per target ncmds if command
7615                  * has a target associated with it.
7616                  */
7617                 if ((cmd->cmd_flags & CFLAG_CMDIOC) == 0) {

7618                         ptgt->m_t_ncmds--;
7619                         /*
7620                          * reset throttle if we just ran an untagged command
7621                          * to a tagged target
7622                          */
7623                         if ((ptgt->m_t_ncmds == 0) &&
7624                             ((cmd->cmd_pkt_flags & FLAG_TAGMASK) == 0)) {
7625                                 mptsas_set_throttle(mpt, ptgt, MAX_THROTTLE);
7626                         }

7627                 }
7628 
7629         }
7630 
7631         /*
7632          * This is all we need to do for ioc commands.



7633          */
7634         if (cmd->cmd_flags & CFLAG_CMDIOC) {

7635                 mptsas_return_to_pool(mpt, cmd);
7636                 return;
7637         }
7638 
7639         /*
7640          * Figure out what to set tag Q timeout for...
7641          *
7642          * Optimize: If we have duplicate's of same timeout
7643          * we're using, then we'll use it again until we run
7644          * out of duplicates.  This should be the normal case
7645          * for block and raw I/O.
7646          * If no duplicates, we have to scan through tag que and
7647          * find the longest timeout value and use it.  This is
7648          * going to take a while...
7649          * Add 1 to m_n_slots to account for TM request.
7650          */

7651         if (cmd->cmd_pkt->pkt_time == ptgt->m_timebase) {
7652                 if (--(ptgt->m_dups) == 0) {
7653                         if (ptgt->m_t_ncmds) {
7654                                 mptsas_cmd_t *ssp;
7655                                 uint_t n = 0;
7656                                 ushort_t nslots = (slots->m_n_slots + 1);
7657                                 ushort_t i;
7658                                 /*
7659                                  * This crude check assumes we don't do
7660                                  * this too often which seems reasonable
7661                                  * for block and raw I/O.
7662                                  */
7663                                 for (i = 0; i < nslots; i++) {
7664                                         ssp = slots->m_slot[i];
7665                                         if (ssp && (Tgt(ssp) == t) &&
7666                                             (ssp->cmd_pkt->pkt_time > n)) {
7667                                                 n = ssp->cmd_pkt->pkt_time;
7668                                                 ptgt->m_dups = 1;
7669                                         } else if (ssp && (Tgt(ssp) == t) &&
7670                                             (ssp->cmd_pkt->pkt_time == n)) {
7671                                                 ptgt->m_dups++;
7672                                         }
7673                                 }
7674                                 ptgt->m_timebase = n;
7675                         } else {
7676                                 ptgt->m_dups = 0;
7677                                 ptgt->m_timebase = 0;
7678                         }
7679                 }
7680         }
7681         ptgt->m_timeout = ptgt->m_timebase;
7682 
7683         ASSERT(cmd != slots->m_slot[cmd->cmd_slot]);

7684 }
7685 
7686 /*
7687  * accept all cmds on the tx_waitq if any and then
7688  * start a fresh request from the top of the device queue.
7689  *
7690  * since there are always cmds queued on the tx_waitq, and rare cmds on
7691  * the instance waitq, so this function should not be invoked in the ISR,
7692  * the mptsas_restart_waitq() is invoked in the ISR instead. otherwise, the
7693  * burden belongs to the IO dispatch CPUs is moved the interrupt CPU.
7694  */
7695 static void
7696 mptsas_restart_hba(mptsas_t *mpt)
7697 {
7698         ASSERT(mutex_owned(&mpt->m_mutex));
7699 
7700         mutex_enter(&mpt->m_tx_waitq_mutex);
7701         if (mpt->m_tx_waitq) {
7702                 mptsas_accept_tx_waitq(mpt);
7703         }
7704         mutex_exit(&mpt->m_tx_waitq_mutex);
7705         mptsas_restart_waitq(mpt);
7706 }
7707 
7708 /*
7709  * start a fresh request from the top of the device queue
7710  */
7711 static void
7712 mptsas_restart_waitq(mptsas_t *mpt)
7713 {
7714         mptsas_cmd_t    *cmd, *next_cmd;
7715         mptsas_target_t *ptgt = NULL;
7716 
7717         NDBG1(("mptsas_restart_waitq: mpt=0x%p", (void *)mpt));
7718 
7719         ASSERT(mutex_owned(&mpt->m_mutex));
7720 
7721         /*
7722          * If there is a reset delay, don't start any cmds.  Otherwise, start
7723          * as many cmds as possible.
7724          * Since SMID 0 is reserved and the TM slot is reserved, the actual max
7725          * commands is m_max_requests - 2.
7726          */
7727         cmd = mpt->m_waitq;
7728 
7729         while (cmd != NULL) {
7730                 next_cmd = cmd->cmd_linkp;
7731                 if (cmd->cmd_flags & CFLAG_PASSTHRU) {
7732                         if (mptsas_save_cmd(mpt, cmd) == TRUE) {
7733                                 /*
7734                                  * passthru command get slot need
7735                                  * set CFLAG_PREPARED.
7736                                  */
7737                                 cmd->cmd_flags |= CFLAG_PREPARED;


7752                                 mptsas_start_config_page_access(mpt, cmd);
7753                         }
7754                         cmd = next_cmd;
7755                         continue;
7756                 }
7757                 if (cmd->cmd_flags & CFLAG_FW_DIAG) {
7758                         if (mptsas_save_cmd(mpt, cmd) == TRUE) {
7759                                 /*
7760                                  * Send the FW Diag request and delete if from
7761                                  * the waitq.
7762                                  */
7763                                 cmd->cmd_flags |= CFLAG_PREPARED;
7764                                 mptsas_waitq_delete(mpt, cmd);
7765                                 mptsas_start_diag(mpt, cmd);
7766                         }
7767                         cmd = next_cmd;
7768                         continue;
7769                 }
7770 
7771                 ptgt = cmd->cmd_tgt_addr;
7772                 if (ptgt && (ptgt->m_t_throttle == DRAIN_THROTTLE) &&



7773                     (ptgt->m_t_ncmds == 0)) {
7774                         mptsas_set_throttle(mpt, ptgt, MAX_THROTTLE);
7775                 }
7776                 if ((mpt->m_ncmds <= (mpt->m_max_requests - 2)) &&
7777                     (ptgt && (ptgt->m_reset_delay == 0)) &&
7778                     (ptgt && (ptgt->m_t_ncmds <
7779                     ptgt->m_t_throttle))) {
7780                         if (mptsas_save_cmd(mpt, cmd) == TRUE) {
7781                                 mptsas_waitq_delete(mpt, cmd);
7782                                 (void) mptsas_start_cmd(mpt, cmd);
7783                         }

7784                 }




7785                 cmd = next_cmd;
7786         }
7787 }

7788 /*
7789  * Cmds are queued if tran_start() doesn't get the m_mutexlock(no wait).
7790  * Accept all those queued cmds before new cmd is accept so that the
7791  * cmds are sent in order.
7792  */
7793 static void
7794 mptsas_accept_tx_waitq(mptsas_t *mpt)
















7795 {
7796         mptsas_cmd_t *cmd;









7797 
7798         ASSERT(mutex_owned(&mpt->m_mutex));
7799         ASSERT(mutex_owned(&mpt->m_tx_waitq_mutex));
7800 
7801         /*
7802          * A Bus Reset could occur at any time and flush the tx_waitq,
7803          * so we cannot count on the tx_waitq to contain even one cmd.
7804          * And when the m_tx_waitq_mutex is released and run
7805          * mptsas_accept_pkt(), the tx_waitq may be flushed.
7806          */
7807         cmd = mpt->m_tx_waitq;
7808         for (;;) {
7809                 if ((cmd = mpt->m_tx_waitq) == NULL) {
7810                         mpt->m_tx_draining = 0;














































7811                         break;









7812                 }
7813                 if ((mpt->m_tx_waitq = cmd->cmd_linkp) == NULL) {
7814                         mpt->m_tx_waitqtail = &mpt->m_tx_waitq;

7815                 }
7816                 cmd->cmd_linkp = NULL;
7817                 mutex_exit(&mpt->m_tx_waitq_mutex);
7818                 if (mptsas_accept_pkt(mpt, cmd) != TRAN_ACCEPT)
7819                         cmn_err(CE_WARN, "mpt: mptsas_accept_tx_waitq: failed "
7820                             "to accept cmd on queue\n");
7821                 mutex_enter(&mpt->m_tx_waitq_mutex);
7822         }
7823 }
7824 



7825 
7826 /*
7827  * mpt tag type lookup














7828  */
7829 static char mptsas_tag_lookup[] =
7830         {0, MSG_HEAD_QTAG, MSG_ORDERED_QTAG, 0, MSG_SIMPLE_QTAG};







7831 












































































7832 static int
7833 mptsas_start_cmd(mptsas_t *mpt, mptsas_cmd_t *cmd)
7834 {
7835         struct scsi_pkt         *pkt = CMD2PKT(cmd);
7836         uint32_t                control = 0;
7837         int                     n;
7838         caddr_t                 mem;
7839         pMpi2SCSIIORequest_t    io_request;
7840         ddi_dma_handle_t        dma_hdl = mpt->m_dma_req_frame_hdl;
7841         ddi_acc_handle_t        acc_hdl = mpt->m_acc_req_frame_hdl;
7842         mptsas_target_t         *ptgt = cmd->cmd_tgt_addr;
7843         uint16_t                SMID, io_flags = 0;
7844         uint32_t                request_desc_low, request_desc_high;
7845 
7846         NDBG1(("mptsas_start_cmd: cmd=0x%p", (void *)cmd));
7847 
7848         /*
7849          * Set SMID and increment index.  Rollover to 1 instead of 0 if index
7850          * is at the max.  0 is an invalid SMID, so we call the first index 1.
7851          */
7852         SMID = cmd->cmd_slot;
7853 
7854         /*
7855          * It is possible for back to back device reset to
7856          * happen before the reset delay has expired.  That's
7857          * ok, just let the device reset go out on the bus.
7858          */
7859         if ((cmd->cmd_pkt_flags & FLAG_NOINTR) == 0) {
7860                 ASSERT(ptgt->m_reset_delay == 0);
7861         }
7862 
7863         /*
7864          * if a non-tagged cmd is submitted to an active tagged target
7865          * then drain before submitting this cmd; SCSI-2 allows RQSENSE
7866          * to be untagged
7867          */

7868         if (((cmd->cmd_pkt_flags & FLAG_TAGMASK) == 0) &&
7869             (ptgt->m_t_ncmds > 1) &&
7870             ((cmd->cmd_flags & CFLAG_TM_CMD) == 0) &&
7871             (*(cmd->cmd_pkt->pkt_cdbp) != SCMD_REQUEST_SENSE)) {
7872                 if ((cmd->cmd_pkt_flags & FLAG_NOINTR) == 0) {
7873                         NDBG23(("target=%d, untagged cmd, start draining\n",
7874                             ptgt->m_devhdl));
7875 
7876                         if (ptgt->m_reset_delay == 0) {
7877                                 mptsas_set_throttle(mpt, ptgt, DRAIN_THROTTLE);
7878                         }

7879 
7880                         mptsas_remove_cmd(mpt, cmd);
7881                         cmd->cmd_pkt_flags |= FLAG_HEAD;
7882                         mptsas_waitq_add(mpt, cmd);

7883                 }

7884                 return (DDI_FAILURE);
7885         }

7886 
7887         /*
7888          * Set correct tag bits.
7889          */
7890         if (cmd->cmd_pkt_flags & FLAG_TAGMASK) {
7891                 switch (mptsas_tag_lookup[((cmd->cmd_pkt_flags &
7892                     FLAG_TAGMASK) >> 12)]) {
7893                 case MSG_SIMPLE_QTAG:
7894                         control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
7895                         break;
7896                 case MSG_HEAD_QTAG:
7897                         control |= MPI2_SCSIIO_CONTROL_HEADOFQ;
7898                         break;
7899                 case MSG_ORDERED_QTAG:
7900                         control |= MPI2_SCSIIO_CONTROL_ORDEREDQ;
7901                         break;
7902                 default:
7903                         mptsas_log(mpt, CE_WARN, "mpt: Invalid tag type\n");
7904                         break;
7905                 }


7949             (CFLAG_SCBEXTERN | CFLAG_EXTARQBUFVALID)) {
7950                 ddi_put32(acc_hdl, &io_request->SenseBufferLowAddress,
7951                     cmd->cmd_ext_arqcookie.dmac_address);
7952         } else {
7953                 ddi_put32(acc_hdl, &io_request->SenseBufferLowAddress,
7954                     cmd->cmd_arqcookie.dmac_address);
7955         }
7956 
7957         ddi_put32(acc_hdl, &io_request->Control, control);
7958 
7959         NDBG31(("starting message=0x%p, with cmd=0x%p",
7960             (void *)(uintptr_t)mpt->m_req_frame_dma_addr, (void *)cmd));
7961 
7962         (void) ddi_dma_sync(dma_hdl, 0, 0, DDI_DMA_SYNC_FORDEV);
7963 
7964         /*
7965          * Build request descriptor and write it to the request desc post reg.
7966          */
7967         request_desc_low = (SMID << 16) + MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
7968         request_desc_high = ptgt->m_devhdl << 16;


7969         MPTSAS_START_CMD(mpt, request_desc_low, request_desc_high);
7970 
7971         /*
7972          * Start timeout.
7973          */

7974 #ifdef MPTSAS_TEST
7975         /*
7976          * Temporarily set timebase = 0;  needed for
7977          * timeout torture test.
7978          */
7979         if (mptsas_test_timeouts) {
7980                 ptgt->m_timebase = 0;
7981         }
7982 #endif
7983         n = pkt->pkt_time - ptgt->m_timebase;
7984 
7985         if (n == 0) {
7986                 (ptgt->m_dups)++;
7987                 ptgt->m_timeout = ptgt->m_timebase;
7988         } else if (n > 0) {
7989                 ptgt->m_timeout =
7990                     ptgt->m_timebase = pkt->pkt_time;
7991                 ptgt->m_dups = 1;
7992         } else if (n < 0) {
7993                 ptgt->m_timeout = ptgt->m_timebase;
7994         }
7995 #ifdef MPTSAS_TEST
7996         /*
7997          * Set back to a number higher than
7998          * mptsas_scsi_watchdog_tick
7999          * so timeouts will happen in mptsas_watchsubr
8000          */
8001         if (mptsas_test_timeouts) {
8002                 ptgt->m_timebase = 60;
8003         }
8004 #endif

8005 
8006         if ((mptsas_check_dma_handle(dma_hdl) != DDI_SUCCESS) ||
8007             (mptsas_check_acc_handle(acc_hdl) != DDI_SUCCESS)) {
8008                 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
8009                 return (DDI_FAILURE);
8010         }
8011         return (DDI_SUCCESS);
8012 }
8013 
8014 /*
8015  * Select a helper thread to handle current doneq
8016  */
8017 static void
8018 mptsas_deliver_doneq_thread(mptsas_t *mpt)
8019 {
8020         uint64_t                        t, i;
8021         uint32_t                        min = 0xffffffff;
8022         mptsas_doneq_thread_list_t      *item;
8023 
8024         for (i = 0; i < mpt->m_doneq_thread_n; i++) {


8031 
8032                 mutex_enter(&item->mutex);
8033                 if (item->len < mpt->m_doneq_thread_threshold) {
8034                         t = i;
8035                         mutex_exit(&item->mutex);
8036                         break;
8037                 }
8038                 if (item->len < min) {
8039                         min = item->len;
8040                         t = i;
8041                 }
8042                 mutex_exit(&item->mutex);
8043         }
8044         mutex_enter(&mpt->m_doneq_thread_id[t].mutex);
8045         mptsas_doneq_mv(mpt, t);
8046         cv_signal(&mpt->m_doneq_thread_id[t].cv);
8047         mutex_exit(&mpt->m_doneq_thread_id[t].mutex);
8048 }
8049 
8050 /*
8051  * move the current global doneq to the doneq of thead[t]
8052  */
8053 static void
8054 mptsas_doneq_mv(mptsas_t *mpt, uint64_t t)
8055 {
8056         mptsas_cmd_t                    *cmd;
8057         mptsas_doneq_thread_list_t      *item = &mpt->m_doneq_thread_id[t];
8058 
8059         ASSERT(mutex_owned(&item->mutex));

8060         while ((cmd = mpt->m_doneq) != NULL) {
8061                 if ((mpt->m_doneq = cmd->cmd_linkp) == NULL) {
8062                         mpt->m_donetail = &mpt->m_doneq;
8063                 }
8064                 cmd->cmd_linkp = NULL;
8065                 *item->donetail = cmd;
8066                 item->donetail = &cmd->cmd_linkp;
8067                 mpt->m_doneq_len--;
8068                 item->len++;
8069         }

8070 }
8071 
8072 void
8073 mptsas_fma_check(mptsas_t *mpt, mptsas_cmd_t *cmd)
8074 {
8075         struct scsi_pkt *pkt = CMD2PKT(cmd);
8076 
8077         /* Check all acc and dma handles */
8078         if ((mptsas_check_acc_handle(mpt->m_datap) !=
8079             DDI_SUCCESS) ||
8080             (mptsas_check_acc_handle(mpt->m_acc_req_frame_hdl) !=
8081             DDI_SUCCESS) ||
8082             (mptsas_check_acc_handle(mpt->m_acc_reply_frame_hdl) !=
8083             DDI_SUCCESS) ||
8084             (mptsas_check_acc_handle(mpt->m_acc_free_queue_hdl) !=
8085             DDI_SUCCESS) ||
8086             (mptsas_check_acc_handle(mpt->m_acc_post_queue_hdl) !=
8087             DDI_SUCCESS) ||
8088             (mptsas_check_acc_handle(mpt->m_hshk_acc_hdl) !=
8089             DDI_SUCCESS) ||


8124             DDI_SUCCESS)))) {
8125                 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
8126                 pkt->pkt_reason = CMD_TRAN_ERR;
8127                 pkt->pkt_statistics = 0;
8128         }
8129         if (cmd->cmd_arqhandle &&
8130             (mptsas_check_dma_handle(cmd->cmd_arqhandle) != DDI_SUCCESS)) {
8131                 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
8132                 pkt->pkt_reason = CMD_TRAN_ERR;
8133                 pkt->pkt_statistics = 0;
8134         }
8135         if (cmd->cmd_ext_arqhandle &&
8136             (mptsas_check_dma_handle(cmd->cmd_ext_arqhandle) != DDI_SUCCESS)) {
8137                 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
8138                 pkt->pkt_reason = CMD_TRAN_ERR;
8139                 pkt->pkt_statistics = 0;
8140         }
8141 }
8142 
8143 /*
8144  * These routines manipulate the queue of commands that
8145  * are waiting for their completion routines to be called.
8146  * The queue is usually in FIFO order but on an MP system
8147  * it's possible for the completion routines to get out
8148  * of order. If that's a problem you need to add a global
8149  * mutex around the code that calls the completion routine
8150  * in the interrupt handler.
8151  */
8152 static void
8153 mptsas_doneq_add(mptsas_t *mpt, mptsas_cmd_t *cmd)
8154 {
8155         struct scsi_pkt *pkt = CMD2PKT(cmd);
8156 
8157         NDBG31(("mptsas_doneq_add: cmd=0x%p", (void *)cmd));
8158 
8159         ASSERT((cmd->cmd_flags & CFLAG_COMPLETED) == 0);
8160         cmd->cmd_linkp = NULL;
8161         cmd->cmd_flags |= CFLAG_FINISHED;
8162         cmd->cmd_flags &= ~CFLAG_IN_TRANSPORT;
8163 
8164         mptsas_fma_check(mpt, cmd);
8165 
8166         /*
8167          * only add scsi pkts that have completion routines to
8168          * the doneq.  no intr cmds do not have callbacks.
8169          */
8170         if (pkt && (pkt->pkt_comp)) {
8171                 *mpt->m_donetail = cmd;
8172                 mpt->m_donetail = &cmd->cmd_linkp;
8173                 mpt->m_doneq_len++;
8174         }
8175 }
8176 





















8177 static mptsas_cmd_t *
8178 mptsas_doneq_thread_rm(mptsas_t *mpt, uint64_t t)
8179 {
8180         mptsas_cmd_t                    *cmd;
8181         mptsas_doneq_thread_list_t      *item = &mpt->m_doneq_thread_id[t];
8182 
8183         /* pop one off the done queue */
8184         if ((cmd = item->doneq) != NULL) {
8185                 /* if the queue is now empty fix the tail pointer */
8186                 NDBG31(("mptsas_doneq_thread_rm: cmd=0x%p", (void *)cmd));
8187                 if ((item->doneq = cmd->cmd_linkp) == NULL) {
8188                         item->donetail = &item->doneq;
8189                 }
8190                 cmd->cmd_linkp = NULL;
8191                 item->len--;
8192         }
8193         return (cmd);
8194 }
8195 
8196 static void
8197 mptsas_doneq_empty(mptsas_t *mpt)
8198 {

8199         if (mpt->m_doneq && !mpt->m_in_callback) {
8200                 mptsas_cmd_t    *cmd, *next;
8201                 struct scsi_pkt *pkt;
8202 
8203                 mpt->m_in_callback = 1;
8204                 cmd = mpt->m_doneq;
8205                 mpt->m_doneq = NULL;
8206                 mpt->m_donetail = &mpt->m_doneq;
8207                 mpt->m_doneq_len = 0;
8208 







8209                 mutex_exit(&mpt->m_mutex);
8210                 /*
8211                  * run the completion routines of all the
8212                  * completed commands
8213                  */
8214                 while (cmd != NULL) {
8215                         next = cmd->cmd_linkp;
8216                         cmd->cmd_linkp = NULL;
8217                         /* run this command's completion routine */
8218                         cmd->cmd_flags |= CFLAG_COMPLETED;
8219                         pkt = CMD2PKT(cmd);
8220                         mptsas_pkt_comp(pkt, cmd);
8221                         cmd = next;
8222                 }

8223                 mutex_enter(&mpt->m_mutex);
8224                 mpt->m_in_callback = 0;

8225         }

8226 }
8227 
8228 /*
8229  * These routines manipulate the target's queue of pending requests
8230  */
8231 void
8232 mptsas_waitq_add(mptsas_t *mpt, mptsas_cmd_t *cmd)
8233 {
8234         NDBG7(("mptsas_waitq_add: cmd=0x%p", (void *)cmd));
8235         mptsas_target_t *ptgt = cmd->cmd_tgt_addr;
8236         cmd->cmd_queued = TRUE;
8237         if (ptgt)
8238                 ptgt->m_t_nwait++;
8239         if (cmd->cmd_pkt_flags & FLAG_HEAD) {

8240                 if ((cmd->cmd_linkp = mpt->m_waitq) == NULL) {
8241                         mpt->m_waitqtail = &cmd->cmd_linkp;
8242                 }
8243                 mpt->m_waitq = cmd;

8244         } else {
8245                 cmd->cmd_linkp = NULL;
8246                 *(mpt->m_waitqtail) = cmd;
8247                 mpt->m_waitqtail = &cmd->cmd_linkp;
8248         }
8249 }
8250 
8251 static mptsas_cmd_t *
8252 mptsas_waitq_rm(mptsas_t *mpt)
8253 {
8254         mptsas_cmd_t    *cmd;
8255         mptsas_target_t *ptgt;
8256         NDBG7(("mptsas_waitq_rm"));
8257 

8258         MPTSAS_WAITQ_RM(mpt, cmd);

8259 
8260         NDBG7(("mptsas_waitq_rm: cmd=0x%p", (void *)cmd));
8261         if (cmd) {
8262                 ptgt = cmd->cmd_tgt_addr;
8263                 if (ptgt) {
8264                         ptgt->m_t_nwait--;
8265                         ASSERT(ptgt->m_t_nwait >= 0);
8266                 }
8267         }
8268         return (cmd);
8269 }
8270 
8271 /*
8272  * remove specified cmd from the middle of the wait queue.
8273  */
8274 static void
8275 mptsas_waitq_delete(mptsas_t *mpt, mptsas_cmd_t *cmd)
8276 {
8277         mptsas_cmd_t    *prevp = mpt->m_waitq;
8278         mptsas_target_t *ptgt = cmd->cmd_tgt_addr;
8279 
8280         NDBG7(("mptsas_waitq_delete: mpt=0x%p cmd=0x%p",
8281             (void *)mpt, (void *)cmd));
8282         if (ptgt) {
8283                 ptgt->m_t_nwait--;
8284                 ASSERT(ptgt->m_t_nwait >= 0);
8285         }
8286 
8287         if (prevp == cmd) {

8288                 if ((mpt->m_waitq = cmd->cmd_linkp) == NULL)
8289                         mpt->m_waitqtail = &mpt->m_waitq;

8290 
8291                 cmd->cmd_linkp = NULL;
8292                 cmd->cmd_queued = FALSE;
8293                 NDBG7(("mptsas_waitq_delete: mpt=0x%p cmd=0x%p",
8294                     (void *)mpt, (void *)cmd));
8295                 return;
8296         }
8297 
8298         while (prevp != NULL) {
8299                 if (prevp->cmd_linkp == cmd) {
8300                         if ((prevp->cmd_linkp = cmd->cmd_linkp) == NULL)
8301                                 mpt->m_waitqtail = &prevp->cmd_linkp;
8302 
8303                         cmd->cmd_linkp = NULL;
8304                         cmd->cmd_queued = FALSE;
8305                         NDBG7(("mptsas_waitq_delete: mpt=0x%p cmd=0x%p",
8306                             (void *)mpt, (void *)cmd));
8307                         return;
8308                 }
8309                 prevp = prevp->cmd_linkp;
8310         }
8311         cmn_err(CE_PANIC, "mpt: mptsas_waitq_delete: queue botch");
8312 }
8313 
8314 static mptsas_cmd_t *
8315 mptsas_tx_waitq_rm(mptsas_t *mpt)
8316 {
8317         mptsas_cmd_t *cmd;
8318         NDBG7(("mptsas_tx_waitq_rm"));
8319 
8320         MPTSAS_TX_WAITQ_RM(mpt, cmd);
8321 
8322         NDBG7(("mptsas_tx_waitq_rm: cmd=0x%p", (void *)cmd));
8323 
8324         return (cmd);
8325 }
8326 
8327 /*
8328  * remove specified cmd from the middle of the tx_waitq.
8329  */
8330 static void
8331 mptsas_tx_waitq_delete(mptsas_t *mpt, mptsas_cmd_t *cmd)
8332 {
8333         mptsas_cmd_t *prevp = mpt->m_tx_waitq;
8334 
8335         NDBG7(("mptsas_tx_waitq_delete: mpt=0x%p cmd=0x%p",
8336             (void *)mpt, (void *)cmd));
8337 
8338         if (prevp == cmd) {
8339                 if ((mpt->m_tx_waitq = cmd->cmd_linkp) == NULL)
8340                         mpt->m_tx_waitqtail = &mpt->m_tx_waitq;
8341 
8342                 cmd->cmd_linkp = NULL;
8343                 cmd->cmd_queued = FALSE;
8344                 NDBG7(("mptsas_tx_waitq_delete: mpt=0x%p cmd=0x%p",
8345                     (void *)mpt, (void *)cmd));
8346                 return;
8347         }
8348 
8349         while (prevp != NULL) {
8350                 if (prevp->cmd_linkp == cmd) {
8351                         if ((prevp->cmd_linkp = cmd->cmd_linkp) == NULL)
8352                                 mpt->m_tx_waitqtail = &prevp->cmd_linkp;
8353 
8354                         cmd->cmd_linkp = NULL;
8355                         cmd->cmd_queued = FALSE;
8356                         NDBG7(("mptsas_tx_waitq_delete: mpt=0x%p cmd=0x%p",
8357                             (void *)mpt, (void *)cmd));
8358                         return;
8359                 }
8360                 prevp = prevp->cmd_linkp;
8361         }
8362         cmn_err(CE_PANIC, "mpt: mptsas_tx_waitq_delete: queue botch");
8363 }
8364 
8365 /*
8366  * device and bus reset handling
8367  *
8368  * Notes:
8369  *      - RESET_ALL:    reset the controller
8370  *      - RESET_TARGET: reset the target specified in scsi_address
8371  */
8372 static int
8373 mptsas_scsi_reset(struct scsi_address *ap, int level)
8374 {
8375         mptsas_t                *mpt = ADDR2MPT(ap);
8376         int                     rval;
8377         mptsas_tgt_private_t    *tgt_private;
8378         mptsas_target_t         *ptgt = NULL;
8379 
8380         tgt_private = (mptsas_tgt_private_t *)ap->a_hba_tran->tran_tgt_private;
8381         ptgt = tgt_private->t_private;
8382         if (ptgt == NULL) {
8383                 return (FALSE);
8384         }
8385         NDBG22(("mptsas_scsi_reset: target=%d level=%d", ptgt->m_devhdl,


8508  * commands for a particular target.   For the case of abort task set, this
8509  * function clears the waitq of all commonds for a particular target/lun.
8510  */
8511 static void
8512 mptsas_flush_target(mptsas_t *mpt, ushort_t target, int lun, uint8_t tasktype)
8513 {
8514         mptsas_slots_t  *slots = mpt->m_active;
8515         mptsas_cmd_t    *cmd, *next_cmd;
8516         int             slot;
8517         uchar_t         reason;
8518         uint_t          stat;
8519 
8520         NDBG25(("mptsas_flush_target: target=%d lun=%d", target, lun));
8521 
8522         /*
8523          * Make sure the I/O Controller has flushed all cmds
8524          * that are associated with this target for a target reset
8525          * and target/lun for abort task set.
8526          * Account for TM requests, which use the last SMID.
8527          */

8528         for (slot = 0; slot <= mpt->m_active->m_n_slots; slot++) {
8529                 if ((cmd = slots->m_slot[slot]) == NULL)
8530                         continue;

8531                 reason = CMD_RESET;
8532                 stat = STAT_DEV_RESET;
8533                 switch (tasktype) {
8534                 case MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET:
8535                         if (Tgt(cmd) == target) {
8536                                 if (cmd->cmd_tgt_addr->m_timeout < 0) {
8537                                         /*
8538                                          * When timeout requested, propagate
8539                                          * proper reason and statistics to
8540                                          * target drivers.
8541                                          */
8542                                         reason = CMD_TIMEOUT;
8543                                         stat |= STAT_TIMEOUT;
8544                                 }
8545                                 NDBG25(("mptsas_flush_target discovered non-"
8546                                     "NULL cmd in slot %d, tasktype 0x%x", slot,
8547                                     tasktype));
8548                                 mptsas_dump_cmd(mpt, cmd);
8549                                 mptsas_remove_cmd(mpt, cmd);
8550                                 mptsas_set_pkt_reason(mpt, cmd, reason, stat);
8551                                 mptsas_doneq_add(mpt, cmd);
8552                         }
8553                         break;
8554                 case MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET:
8555                         reason = CMD_ABORTED;
8556                         stat = STAT_ABORTED;
8557                         /*FALLTHROUGH*/
8558                 case MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET:
8559                         if ((Tgt(cmd) == target) && (Lun(cmd) == lun)) {
8560 
8561                                 NDBG25(("mptsas_flush_target discovered non-"
8562                                     "NULL cmd in slot %d, tasktype 0x%x", slot,
8563                                     tasktype));
8564                                 mptsas_dump_cmd(mpt, cmd);
8565                                 mptsas_remove_cmd(mpt, cmd);
8566                                 mptsas_set_pkt_reason(mpt, cmd, reason,
8567                                     stat);
8568                                 mptsas_doneq_add(mpt, cmd);
8569                         }
8570                         break;
8571                 default:
8572                         break;
8573                 }
8574         }

8575 
8576         /*
8577          * Flush the waitq and tx_waitq of this target's cmds
8578          */
8579         cmd = mpt->m_waitq;
8580 
8581         reason = CMD_RESET;
8582         stat = STAT_DEV_RESET;
8583 
8584         switch (tasktype) {
8585         case MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET:
8586                 while (cmd != NULL) {
8587                         next_cmd = cmd->cmd_linkp;
8588                         if (Tgt(cmd) == target) {
8589                                 mptsas_waitq_delete(mpt, cmd);
8590                                 mptsas_set_pkt_reason(mpt, cmd,
8591                                     reason, stat);
8592                                 mptsas_doneq_add(mpt, cmd);
8593                         }
8594                         cmd = next_cmd;
8595                 }
8596                 mutex_enter(&mpt->m_tx_waitq_mutex);
8597                 cmd = mpt->m_tx_waitq;
8598                 while (cmd != NULL) {
8599                         next_cmd = cmd->cmd_linkp;
8600                         if (Tgt(cmd) == target) {
8601                                 mptsas_tx_waitq_delete(mpt, cmd);
8602                                 mutex_exit(&mpt->m_tx_waitq_mutex);
8603                                 mptsas_set_pkt_reason(mpt, cmd,
8604                                     reason, stat);
8605                                 mptsas_doneq_add(mpt, cmd);
8606                                 mutex_enter(&mpt->m_tx_waitq_mutex);
8607                         }
8608                         cmd = next_cmd;
8609                 }
8610                 mutex_exit(&mpt->m_tx_waitq_mutex);
8611                 break;
8612         case MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET:
8613                 reason = CMD_ABORTED;
8614                 stat =  STAT_ABORTED;
8615                 /*FALLTHROUGH*/
8616         case MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET:
8617                 while (cmd != NULL) {
8618                         next_cmd = cmd->cmd_linkp;
8619                         if ((Tgt(cmd) == target) && (Lun(cmd) == lun)) {
8620                                 mptsas_waitq_delete(mpt, cmd);
8621                                 mptsas_set_pkt_reason(mpt, cmd,
8622                                     reason, stat);
8623                                 mptsas_doneq_add(mpt, cmd);
8624                         }
8625                         cmd = next_cmd;
8626                 }
8627                 mutex_enter(&mpt->m_tx_waitq_mutex);
8628                 cmd = mpt->m_tx_waitq;
8629                 while (cmd != NULL) {
8630                         next_cmd = cmd->cmd_linkp;
8631                         if ((Tgt(cmd) == target) && (Lun(cmd) == lun)) {
8632                                 mptsas_tx_waitq_delete(mpt, cmd);
8633                                 mutex_exit(&mpt->m_tx_waitq_mutex);
8634                                 mptsas_set_pkt_reason(mpt, cmd,
8635                                     reason, stat);
8636                                 mptsas_doneq_add(mpt, cmd);
8637                                 mutex_enter(&mpt->m_tx_waitq_mutex);
8638                         }
8639                         cmd = next_cmd;
8640                 }
8641                 mutex_exit(&mpt->m_tx_waitq_mutex);
8642                 break;
8643         default:
8644                 mptsas_log(mpt, CE_WARN, "Unknown task management type %d.",
8645                     tasktype);
8646                 break;
8647         }
8648 }
8649 
8650 /*
8651  * Clean up hba state, abort all outstanding command and commands in waitq
8652  * reset timeout of all targets.
8653  */
8654 static void
8655 mptsas_flush_hba(mptsas_t *mpt)
8656 {
8657         mptsas_slots_t  *slots = mpt->m_active;
8658         mptsas_cmd_t    *cmd;
8659         int             slot;
8660 
8661         NDBG25(("mptsas_flush_hba"));
8662 
8663         /*
8664          * The I/O Controller should have already sent back
8665          * all commands via the scsi I/O reply frame.  Make
8666          * sure all commands have been flushed.
8667          * Account for TM request, which use the last SMID.
8668          */

8669         for (slot = 0; slot <= mpt->m_active->m_n_slots; slot++) {
8670                 if ((cmd = slots->m_slot[slot]) == NULL)
8671                         continue;

8672 
8673                 if (cmd->cmd_flags & CFLAG_CMDIOC) {
8674                         /*
8675                          * Need to make sure to tell everyone that might be
8676                          * waiting on this command that it's going to fail.  If
8677                          * we get here, this command will never timeout because
8678                          * the active command table is going to be re-allocated,
8679                          * so there will be nothing to check against a time out.
8680                          * Instead, mark the command as failed due to reset.
8681                          */
8682                         mptsas_set_pkt_reason(mpt, cmd, CMD_RESET,
8683                             STAT_BUS_RESET);
8684                         if ((cmd->cmd_flags & CFLAG_PASSTHRU) ||
8685                             (cmd->cmd_flags & CFLAG_CONFIG) ||
8686                             (cmd->cmd_flags & CFLAG_FW_DIAG)) {
8687                                 cmd->cmd_flags |= CFLAG_FINISHED;
8688                                 cv_broadcast(&mpt->m_passthru_cv);
8689                                 cv_broadcast(&mpt->m_config_cv);
8690                                 cv_broadcast(&mpt->m_fw_diag_cv);
8691                         }
8692                         continue;
8693                 }
8694 
8695                 NDBG25(("mptsas_flush_hba discovered non-NULL cmd in slot %d",
8696                     slot));
8697                 mptsas_dump_cmd(mpt, cmd);
8698 
8699                 mptsas_remove_cmd(mpt, cmd);
8700                 mptsas_set_pkt_reason(mpt, cmd, CMD_RESET, STAT_BUS_RESET);
8701                 mptsas_doneq_add(mpt, cmd);
8702         }

8703 
8704         /*
8705          * Flush the waitq.
8706          */
8707         while ((cmd = mptsas_waitq_rm(mpt)) != NULL) {
8708                 mptsas_set_pkt_reason(mpt, cmd, CMD_RESET, STAT_BUS_RESET);
8709                 if ((cmd->cmd_flags & CFLAG_PASSTHRU) ||
8710                     (cmd->cmd_flags & CFLAG_CONFIG) ||
8711                     (cmd->cmd_flags & CFLAG_FW_DIAG)) {
8712                         cmd->cmd_flags |= CFLAG_FINISHED;
8713                         cv_broadcast(&mpt->m_passthru_cv);
8714                         cv_broadcast(&mpt->m_config_cv);
8715                         cv_broadcast(&mpt->m_fw_diag_cv);
8716                 } else {
8717                         mptsas_doneq_add(mpt, cmd);
8718                 }
8719         }
8720 
8721         /*
8722          * Flush the tx_waitq
8723          */
8724         mutex_enter(&mpt->m_tx_waitq_mutex);
8725         while ((cmd = mptsas_tx_waitq_rm(mpt)) != NULL) {
8726                 mutex_exit(&mpt->m_tx_waitq_mutex);
8727                 mptsas_set_pkt_reason(mpt, cmd, CMD_RESET, STAT_BUS_RESET);
8728                 mptsas_doneq_add(mpt, cmd);
8729                 mutex_enter(&mpt->m_tx_waitq_mutex);
8730         }
8731         mutex_exit(&mpt->m_tx_waitq_mutex);
8732 
8733         /*
8734          * Drain the taskqs prior to reallocating resources.
8735          */
8736         mutex_exit(&mpt->m_mutex);
8737         ddi_taskq_wait(mpt->m_event_taskq);
8738         ddi_taskq_wait(mpt->m_dr_taskq);
8739         mutex_enter(&mpt->m_mutex);
8740 }
8741 
8742 /*
8743  * set pkt_reason and OR in pkt_statistics flag
8744  */
8745 static void
8746 mptsas_set_pkt_reason(mptsas_t *mpt, mptsas_cmd_t *cmd, uchar_t reason,
8747     uint_t stat)
8748 {
8749 #ifndef __lock_lint
8750         _NOTE(ARGUNUSED(mpt))
8751 #endif
8752 
8753         NDBG25(("mptsas_set_pkt_reason: cmd=0x%p reason=%x stat=%x",
8754             (void *)cmd, reason, stat));
8755 
8756         if (cmd) {
8757                 if (cmd->cmd_pkt->pkt_reason == CMD_CMPLT) {
8758                         cmd->cmd_pkt->pkt_reason = reason;
8759                 }


8768 
8769         mutex_enter(&mptsas_global_mutex);
8770         if (mptsas_reset_watch == NULL && mptsas_timeouts_enabled) {
8771                 mptsas_reset_watch = timeout(mptsas_watch_reset_delay, NULL,
8772                     drv_usectohz((clock_t)
8773                     MPTSAS_WATCH_RESET_DELAY_TICK * 1000));
8774                 ASSERT(mptsas_reset_watch != NULL);
8775         }
8776         mutex_exit(&mptsas_global_mutex);
8777 }
8778 
8779 static void
8780 mptsas_setup_bus_reset_delay(mptsas_t *mpt)
8781 {
8782         mptsas_target_t *ptgt = NULL;
8783 
8784         NDBG22(("mptsas_setup_bus_reset_delay"));
8785         ptgt = (mptsas_target_t *)mptsas_hash_traverse(&mpt->m_active->m_tgttbl,
8786             MPTSAS_HASH_FIRST);
8787         while (ptgt != NULL) {

8788                 mptsas_set_throttle(mpt, ptgt, HOLD_THROTTLE);
8789                 ptgt->m_reset_delay = mpt->m_scsi_reset_delay;

8790 
8791                 ptgt = (mptsas_target_t *)mptsas_hash_traverse(
8792                     &mpt->m_active->m_tgttbl, MPTSAS_HASH_NEXT);
8793         }
8794 
8795         mptsas_start_watch_reset_delay();
8796 }
8797 
8798 /*
8799  * mptsas_watch_reset_delay(_subr) is invoked by timeout() and checks every
8800  * mpt instance for active reset delays
8801  */
8802 static void
8803 mptsas_watch_reset_delay(void *arg)
8804 {
8805 #ifndef __lock_lint
8806         _NOTE(ARGUNUSED(arg))
8807 #endif
8808 
8809         mptsas_t        *mpt;


8827 
8828         if (not_done) {
8829                 mptsas_start_watch_reset_delay();
8830         }
8831 }
8832 
8833 static int
8834 mptsas_watch_reset_delay_subr(mptsas_t *mpt)
8835 {
8836         int             done = 0;
8837         int             restart = 0;
8838         mptsas_target_t *ptgt = NULL;
8839 
8840         NDBG22(("mptsas_watch_reset_delay_subr: mpt=0x%p", (void *)mpt));
8841 
8842         ASSERT(mutex_owned(&mpt->m_mutex));
8843 
8844         ptgt = (mptsas_target_t *)mptsas_hash_traverse(&mpt->m_active->m_tgttbl,
8845             MPTSAS_HASH_FIRST);
8846         while (ptgt != NULL) {

8847                 if (ptgt->m_reset_delay != 0) {
8848                         ptgt->m_reset_delay -=
8849                             MPTSAS_WATCH_RESET_DELAY_TICK;
8850                         if (ptgt->m_reset_delay <= 0) {
8851                                 ptgt->m_reset_delay = 0;
8852                                 mptsas_set_throttle(mpt, ptgt,
8853                                     MAX_THROTTLE);
8854                                 restart++;
8855                         } else {
8856                                 done = -1;
8857                         }
8858                 }

8859 
8860                 ptgt = (mptsas_target_t *)mptsas_hash_traverse(
8861                     &mpt->m_active->m_tgttbl, MPTSAS_HASH_NEXT);
8862         }
8863 
8864         if (restart > 0) {
8865                 mptsas_restart_hba(mpt);
8866         }
8867         return (done);
8868 }
8869 
8870 #ifdef MPTSAS_TEST
8871 static void
8872 mptsas_test_reset(mptsas_t *mpt, int target)
8873 {
8874         mptsas_target_t    *ptgt = NULL;
8875 
8876         if (mptsas_rtest == target) {
8877                 if (mptsas_do_scsi_reset(mpt, target) == TRUE) {
8878                         mptsas_rtest = -1;


8934          * command lists, etc.
8935          */
8936         if (pkt != NULL) {
8937                 /* abort the specified packet */
8938                 sp = PKT2CMD(pkt);
8939 
8940                 if (sp->cmd_queued) {
8941                         NDBG23(("mptsas_do_scsi_abort: queued sp=0x%p aborted",
8942                             (void *)sp));
8943                         mptsas_waitq_delete(mpt, sp);
8944                         mptsas_set_pkt_reason(mpt, sp, CMD_ABORTED,
8945                             STAT_ABORTED);
8946                         mptsas_doneq_add(mpt, sp);
8947                         rval = TRUE;
8948                         goto done;
8949                 }
8950 
8951                 /*
8952                  * Have mpt firmware abort this command
8953                  */
8954 
8955                 if (slots->m_slot[sp->cmd_slot] != NULL) {

8956                         rval = mptsas_ioc_task_management(mpt,
8957                             MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK, target,
8958                             lun, NULL, 0, 0);
8959 
8960                         /*
8961                          * The transport layer expects only TRUE and FALSE.
8962                          * Therefore, if mptsas_ioc_task_management returns
8963                          * FAILED we will return FALSE.
8964                          */
8965                         if (rval == FAILED)
8966                                 rval = FALSE;
8967                         goto done;
8968                 }

8969         }
8970 
8971         /*
8972          * If pkt is NULL then abort task set
8973          */
8974         rval = mptsas_ioc_task_management(mpt,
8975             MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET, target, lun, NULL, 0, 0);
8976 
8977         /*
8978          * The transport layer expects only TRUE and FALSE.
8979          * Therefore, if mptsas_ioc_task_management returns
8980          * FAILED we will return FALSE.
8981          */
8982         if (rval == FAILED)
8983                 rval = FALSE;
8984 
8985 #ifdef MPTSAS_TEST
8986         if (rval && mptsas_test_stop) {
8987                 debug_enter("mptsas_do_scsi_abort");
8988         }


9060         default:
9061                 rval = UNDEFINED;
9062                 break;
9063         }
9064 
9065         NDBG24(("mptsas_scsi_getcap: %s, rval=%x", cap, rval));
9066 
9067         mutex_exit(&mpt->m_mutex);
9068         return (rval);
9069 }
9070 
9071 /*
9072  * (*tran_setcap).  Set the capability named to the value given.
9073  */
9074 static int
9075 mptsas_scsi_setcap(struct scsi_address *ap, char *cap, int value, int tgtonly)
9076 {
9077         mptsas_t        *mpt = ADDR2MPT(ap);
9078         int             ckey;
9079         int             rval = FALSE;

9080 
9081         NDBG24(("mptsas_scsi_setcap: target=%d, cap=%s value=%x tgtonly=%x",
9082             ap->a_target, cap, value, tgtonly));
9083 
9084         if (!tgtonly) {
9085                 return (rval);
9086         }
9087 
9088         mutex_enter(&mpt->m_mutex);
9089 
9090         if ((mptsas_scsi_capchk(cap, tgtonly, &ckey)) != TRUE) {
9091                 mutex_exit(&mpt->m_mutex);
9092                 return (UNDEFINED);
9093         }
9094 
9095         switch (ckey) {
9096         case SCSI_CAP_DMA_MAX:
9097         case SCSI_CAP_MSG_OUT:
9098         case SCSI_CAP_PARITY:
9099         case SCSI_CAP_INITIATOR_ID:
9100         case SCSI_CAP_LINKED_CMDS:
9101         case SCSI_CAP_UNTAGGED_QING:
9102         case SCSI_CAP_RESET_NOTIFICATION:
9103                 /*
9104                  * None of these are settable via
9105                  * the capability interface.
9106                  */
9107                 break;
9108         case SCSI_CAP_ARQ:
9109                 /*
9110                  * We cannot turn off arq so return false if asked to
9111                  */
9112                 if (value) {
9113                         rval = TRUE;
9114                 } else {
9115                         rval = FALSE;
9116                 }
9117                 break;
9118         case SCSI_CAP_TAGGED_QING:
9119                 mptsas_set_throttle(mpt, ((mptsas_tgt_private_t *)
9120                     (ap->a_hba_tran->tran_tgt_private))->t_private,
9121                     MAX_THROTTLE);


9122                 rval = TRUE;
9123                 break;
9124         case SCSI_CAP_QFULL_RETRIES:
9125                 ((mptsas_tgt_private_t *)(ap->a_hba_tran->tran_tgt_private))->
9126                     t_private->m_qfull_retries = (uchar_t)value;
9127                 rval = TRUE;
9128                 break;
9129         case SCSI_CAP_QFULL_RETRY_INTERVAL:
9130                 ((mptsas_tgt_private_t *)(ap->a_hba_tran->tran_tgt_private))->
9131                     t_private->m_qfull_retry_interval =
9132                     drv_usectohz(value * 1000);
9133                 rval = TRUE;
9134                 break;
9135         default:
9136                 rval = UNDEFINED;
9137                 break;
9138         }
9139         mutex_exit(&mpt->m_mutex);
9140         return (rval);
9141 }


9145  */
9146 /*ARGSUSED*/
9147 static int
9148 mptsas_scsi_capchk(char *cap, int tgtonly, int *cidxp)
9149 {
9150         NDBG24(("mptsas_scsi_capchk: cap=%s", cap));
9151 
9152         if (!cap)
9153                 return (FALSE);
9154 
9155         *cidxp = scsi_hba_lookup_capstr(cap);
9156         return (TRUE);
9157 }
9158 
9159 static int
9160 mptsas_alloc_active_slots(mptsas_t *mpt, int flag)
9161 {
9162         mptsas_slots_t  *old_active = mpt->m_active;
9163         mptsas_slots_t  *new_active;
9164         size_t          size;
9165         int             rval = -1, i;

9166 
9167         /*
9168          * if there are active commands, then we cannot
9169          * change size of active slots array.
9170          */
9171         ASSERT(mpt->m_ncmds == 0);
9172 
9173         size = MPTSAS_SLOTS_SIZE(mpt);
9174         new_active = kmem_zalloc(size, flag);
9175         if (new_active == NULL) {
9176                 NDBG1(("new active alloc failed"));
9177                 return (rval);
9178         }
9179         /*
9180          * Since SMID 0 is reserved and the TM slot is reserved, the
9181          * number of slots that can be used at any one time is
9182          * m_max_requests - 2.
9183          */
9184         new_active->m_n_slots = (mpt->m_max_requests - 2);
9185         new_active->m_size = size;
9186         new_active->m_tags = 1;

9187         if (old_active) {
9188                 new_active->m_tgttbl = old_active->m_tgttbl;
9189                 new_active->m_smptbl = old_active->m_smptbl;
9190                 new_active->m_num_raid_configs =
9191                     old_active->m_num_raid_configs;
9192                 for (i = 0; i < new_active->m_num_raid_configs; i++) {
9193                         new_active->m_raidconfig[i] =
9194                             old_active->m_raidconfig[i];
9195                 }
9196                 mptsas_free_active_slots(mpt);
9197         }
























































9198         mpt->m_active = new_active;
9199         rval = 0;
9200 
9201         return (rval);
9202 }
9203 
9204 static void
9205 mptsas_free_active_slots(mptsas_t *mpt)
9206 {
9207         mptsas_slots_t  *active = mpt->m_active;
9208         size_t          size;


9209 
9210         if (active == NULL)
9211                 return;

































9212         size = active->m_size;
9213         kmem_free(active, size);
9214         mpt->m_active = NULL;
9215 }
9216 
9217 /*
9218  * Error logging, printing, and debug print routines.
9219  */
9220 static char *mptsas_label = "mpt_sas";
9221 
9222 /*PRINTFLIKE3*/
9223 void
9224 mptsas_log(mptsas_t *mpt, int level, char *fmt, ...)
9225 {
9226         dev_info_t      *dev;
9227         va_list         ap;
9228 
9229         if (mpt) {
9230                 dev = mpt->m_dip;
9231         } else {


9338 
9339 static void
9340 mptsas_watchsubr(mptsas_t *mpt)
9341 {
9342         int             i;
9343         mptsas_cmd_t    *cmd;
9344         mptsas_target_t *ptgt = NULL;
9345 
9346         NDBG30(("mptsas_watchsubr: mpt=0x%p", (void *)mpt));
9347 
9348 #ifdef MPTSAS_TEST
9349         if (mptsas_enable_untagged) {
9350                 mptsas_test_untagged++;
9351         }
9352 #endif
9353 
9354         /*
9355          * Check for commands stuck in active slot
9356          * Account for TM requests, which use the last SMID.
9357          */

9358         for (i = 0; i <= mpt->m_active->m_n_slots; i++) {
9359                 if ((cmd = mpt->m_active->m_slot[i]) != NULL) {
9360                         if ((cmd->cmd_flags & CFLAG_CMDIOC) == 0) {
9361                                 cmd->cmd_active_timeout -=
9362                                     mptsas_scsi_watchdog_tick;
9363                                 if (cmd->cmd_active_timeout <= 0) {
9364                                         /*
9365                                          * There seems to be a command stuck
9366                                          * in the active slot.  Drain throttle.
9367                                          */
9368                                         mptsas_set_throttle(mpt,
9369                                             cmd->cmd_tgt_addr,

9370                                             DRAIN_THROTTLE);

9371                                 }
9372                         }
9373                         if ((cmd->cmd_flags & CFLAG_PASSTHRU) ||
9374                             (cmd->cmd_flags & CFLAG_CONFIG) ||
9375                             (cmd->cmd_flags & CFLAG_FW_DIAG)) {
9376                                 cmd->cmd_active_timeout -=
9377                                     mptsas_scsi_watchdog_tick;
9378                                 if (cmd->cmd_active_timeout <= 0) {
9379                                         /*
9380                                          * passthrough command timeout
9381                                          */
9382                                         cmd->cmd_flags |= (CFLAG_FINISHED |
9383                                             CFLAG_TIMEOUT);
9384                                         cv_broadcast(&mpt->m_passthru_cv);
9385                                         cv_broadcast(&mpt->m_config_cv);
9386                                         cv_broadcast(&mpt->m_fw_diag_cv);
9387                                 }
9388                         }
9389                 }
9390         }

9391 
9392         ptgt = (mptsas_target_t *)mptsas_hash_traverse(&mpt->m_active->m_tgttbl,
9393             MPTSAS_HASH_FIRST);
9394         while (ptgt != NULL) {
9395                 /*







9396                  * If we were draining due to a qfull condition,
9397                  * go back to full throttle.
9398                  */
9399                 if ((ptgt->m_t_throttle < MAX_THROTTLE) &&
9400                     (ptgt->m_t_throttle > HOLD_THROTTLE) &&
9401                     (ptgt->m_t_ncmds < ptgt->m_t_throttle)) {
9402                         mptsas_set_throttle(mpt, ptgt, MAX_THROTTLE);
9403                         mptsas_restart_hba(mpt);
9404                 }
9405 
9406                 if ((ptgt->m_t_ncmds > 0) &&
9407                     (ptgt->m_timebase)) {
9408 
9409                         if (ptgt->m_timebase <=
9410                             mptsas_scsi_watchdog_tick) {
9411                                 ptgt->m_timebase +=
9412                                     mptsas_scsi_watchdog_tick;

9413                                 ptgt = (mptsas_target_t *)mptsas_hash_traverse(
9414                                     &mpt->m_active->m_tgttbl, MPTSAS_HASH_NEXT);
9415                                 continue;
9416                         }
9417 
9418                         ptgt->m_timeout -= mptsas_scsi_watchdog_tick;
9419 
9420                         if (ptgt->m_timeout_count > 0) {
9421                                 ptgt->m_timeout_interval +=
9422                                     mptsas_scsi_watchdog_tick;
9423                         }
9424                         if (ptgt->m_timeout_interval >
9425                             mptsas_timeout_interval) {
9426                                 ptgt->m_timeout_interval = 0;
9427                                 ptgt->m_timeout_count = 0;
9428                         }
9429 
9430                         if (ptgt->m_timeout < 0) {
9431                                 ptgt->m_timeout_count++;
9432                                 if (ptgt->m_timeout_count >
9433                                     mptsas_timeout_threshold) {
9434                                         ptgt->m_timeout_count = 0;
9435                                         mptsas_kill_target(mpt, ptgt);
9436                                 } else {
9437                                         mptsas_cmd_timeout(mpt, ptgt->m_devhdl);
9438                                 }
9439                                 ptgt = (mptsas_target_t *)mptsas_hash_traverse(
9440                                     &mpt->m_active->m_tgttbl, MPTSAS_HASH_NEXT);
9441                                 continue;
9442                         }
9443 
9444                         if ((ptgt->m_timeout) <=
9445                             mptsas_scsi_watchdog_tick) {
9446                                 NDBG23(("pending timeout"));
9447                                 mptsas_set_throttle(mpt, ptgt,
9448                                     DRAIN_THROTTLE);
9449                         }
9450                 }
9451 
9452                 ptgt = (mptsas_target_t *)mptsas_hash_traverse(
9453                     &mpt->m_active->m_tgttbl, MPTSAS_HASH_NEXT);
9454         }
9455 }
9456 
9457 /*
9458  * timeout recovery
9459  */
9460 static void
9461 mptsas_cmd_timeout(mptsas_t *mpt, uint16_t devhdl)
9462 {
9463 
9464         NDBG29(("mptsas_cmd_timeout: target=%d", devhdl));
9465         mptsas_log(mpt, CE_WARN, "Disconnected command timeout for "
9466             "Target %d", devhdl);
9467 
9468         /*
9469          * If the current target is not the target passed in,
9470          * try to reset that target.
9471          */
9472         NDBG29(("mptsas_cmd_timeout: device reset"));
9473         if (mptsas_do_scsi_reset(mpt, devhdl) != TRUE) {
9474                 mptsas_log(mpt, CE_WARN, "Target %d reset for command timeout "
9475                     "recovery failed!", devhdl);
9476         }
9477 }
9478 
9479 /*
9480  * target causing too many timeouts
9481  */
9482 static void
9483 mptsas_kill_target(mptsas_t *mpt, mptsas_target_t *ptgt)
9484 {
9485         mptsas_topo_change_list_t       *topo_node = NULL;
9486 
9487         NDBG29(("mptsas_tgt_kill: target=%d", ptgt->m_devhdl));
9488         mptsas_log(mpt, CE_WARN, "timeout threshold exceeded for "
9489             "Target %d", ptgt->m_devhdl);
9490 
9491         topo_node = kmem_zalloc(sizeof (mptsas_topo_change_list_t), KM_SLEEP);
9492         topo_node->mpt = mpt;
9493         topo_node->un.phymask = ptgt->m_phymask;
9494         topo_node->event = MPTSAS_DR_EVENT_OFFLINE_TARGET;
9495         topo_node->devhdl = ptgt->m_devhdl;
9496         if (ptgt->m_deviceinfo & DEVINFO_DIRECT_ATTACHED)
9497                 topo_node->flags = MPTSAS_TOPO_FLAG_DIRECT_ATTACHED_DEVICE;
9498         else
9499                 topo_node->flags = MPTSAS_TOPO_FLAG_EXPANDER_ATTACHED_DEVICE;
9500         topo_node->object = NULL;
9501 
9502         /*
9503          * Launch DR taskq to fake topology change
9504          */
9505         if ((ddi_taskq_dispatch(mpt->m_dr_taskq,
9506             mptsas_handle_dr, (void *)topo_node,
9507             DDI_NOSLEEP)) != DDI_SUCCESS) {
9508                 mptsas_log(mpt, CE_NOTE, "mptsas start taskq "
9509                     "for fake offline event failed. \n");
9510         }
9511 }
9512 
9513 /*
9514  * Device / Hotplug control
9515  */
9516 static int
9517 mptsas_scsi_quiesce(dev_info_t *dip)
9518 {
9519         mptsas_t        *mpt;
9520         scsi_hba_tran_t *tran;
9521 
9522         tran = ddi_get_driver_private(dip);
9523         if (tran == NULL || (mpt = TRAN2MPT(tran)) == NULL)
9524                 return (-1);
9525 
9526         return (mptsas_quiesce_bus(mpt));
9527 }
9528 
9529 static int
9530 mptsas_scsi_unquiesce(dev_info_t *dip)
9531 {
9532         mptsas_t                *mpt;
9533         scsi_hba_tran_t *tran;
9534 
9535         tran = ddi_get_driver_private(dip);
9536         if (tran == NULL || (mpt = TRAN2MPT(tran)) == NULL)
9537                 return (-1);
9538 
9539         return (mptsas_unquiesce_bus(mpt));
9540 }
9541 
9542 static int
9543 mptsas_quiesce_bus(mptsas_t *mpt)
9544 {
9545         mptsas_target_t *ptgt = NULL;
9546 
9547         NDBG28(("mptsas_quiesce_bus"));
9548         mutex_enter(&mpt->m_mutex);
9549 
9550         /* Set all the throttles to zero */
9551         ptgt = (mptsas_target_t *)mptsas_hash_traverse(&mpt->m_active->m_tgttbl,
9552             MPTSAS_HASH_FIRST);
9553         while (ptgt != NULL) {

9554                 mptsas_set_throttle(mpt, ptgt, HOLD_THROTTLE);

9555 
9556                 ptgt = (mptsas_target_t *)mptsas_hash_traverse(
9557                     &mpt->m_active->m_tgttbl, MPTSAS_HASH_NEXT);
9558         }
9559 
9560         /* If there are any outstanding commands in the queue */
9561         if (mpt->m_ncmds) {


9562                 mpt->m_softstate |= MPTSAS_SS_DRAINING;
9563                 mpt->m_quiesce_timeid = timeout(mptsas_ncmds_checkdrain,
9564                     mpt, (MPTSAS_QUIESCE_TIMEOUT * drv_usectohz(1000000)));
9565                 if (cv_wait_sig(&mpt->m_cv, &mpt->m_mutex) == 0) {
9566                         /*
9567                          * Quiesce has been interrupted
9568                          */
9569                         mpt->m_softstate &= ~MPTSAS_SS_DRAINING;
9570                         ptgt = (mptsas_target_t *)mptsas_hash_traverse(
9571                             &mpt->m_active->m_tgttbl, MPTSAS_HASH_FIRST);
9572                         while (ptgt != NULL) {

9573                                 mptsas_set_throttle(mpt, ptgt, MAX_THROTTLE);

9574 
9575                                 ptgt = (mptsas_target_t *)mptsas_hash_traverse(
9576                                     &mpt->m_active->m_tgttbl, MPTSAS_HASH_NEXT);
9577                         }
9578                         mptsas_restart_hba(mpt);
9579                         if (mpt->m_quiesce_timeid != 0) {
9580                                 timeout_id_t tid = mpt->m_quiesce_timeid;
9581                                 mpt->m_quiesce_timeid = 0;
9582                                 mutex_exit(&mpt->m_mutex);
9583                                 (void) untimeout(tid);
9584                                 return (-1);
9585                         }
9586                         mutex_exit(&mpt->m_mutex);
9587                         return (-1);
9588                 } else {
9589                         /* Bus has been quiesced */
9590                         ASSERT(mpt->m_quiesce_timeid == 0);
9591                         mpt->m_softstate &= ~MPTSAS_SS_DRAINING;
9592                         mpt->m_softstate |= MPTSAS_SS_QUIESCED;
9593                         mutex_exit(&mpt->m_mutex);
9594                         return (0);
9595                 }
9596         }

9597         /* Bus was not busy - QUIESCED */
9598         mutex_exit(&mpt->m_mutex);
9599 
9600         return (0);
9601 }
9602 
9603 static int
9604 mptsas_unquiesce_bus(mptsas_t *mpt)
9605 {
9606         mptsas_target_t *ptgt = NULL;
9607 
9608         NDBG28(("mptsas_unquiesce_bus"));
9609         mutex_enter(&mpt->m_mutex);
9610         mpt->m_softstate &= ~MPTSAS_SS_QUIESCED;
9611         ptgt = (mptsas_target_t *)mptsas_hash_traverse(&mpt->m_active->m_tgttbl,
9612             MPTSAS_HASH_FIRST);
9613         while (ptgt != NULL) {

9614                 mptsas_set_throttle(mpt, ptgt, MAX_THROTTLE);

9615 
9616                 ptgt = (mptsas_target_t *)mptsas_hash_traverse(
9617                     &mpt->m_active->m_tgttbl, MPTSAS_HASH_NEXT);
9618         }
9619         mptsas_restart_hba(mpt);
9620         mutex_exit(&mpt->m_mutex);
9621         return (0);
9622 }
9623 
9624 static void
9625 mptsas_ncmds_checkdrain(void *arg)
9626 {
9627         mptsas_t        *mpt = arg;
9628         mptsas_target_t *ptgt = NULL;
9629 
9630         mutex_enter(&mpt->m_mutex);
9631         if (mpt->m_softstate & MPTSAS_SS_DRAINING) {
9632                 mpt->m_quiesce_timeid = 0;
9633                 if (mpt->m_ncmds == 0) {
9634                         /* Command queue has been drained */
9635                         cv_signal(&mpt->m_cv);
9636                 } else {
9637                         /*
9638                          * The throttle may have been reset because
9639                          * of a SCSI bus reset
9640                          */
9641                         ptgt = (mptsas_target_t *)mptsas_hash_traverse(
9642                             &mpt->m_active->m_tgttbl, MPTSAS_HASH_FIRST);
9643                         while (ptgt != NULL) {

9644                                 mptsas_set_throttle(mpt, ptgt, HOLD_THROTTLE);

9645 
9646                                 ptgt = (mptsas_target_t *)mptsas_hash_traverse(
9647                                     &mpt->m_active->m_tgttbl, MPTSAS_HASH_NEXT);
9648                         }
9649 
9650                         mpt->m_quiesce_timeid = timeout(mptsas_ncmds_checkdrain,
9651                             mpt, (MPTSAS_QUIESCE_TIMEOUT *
9652                             drv_usectohz(1000000)));




9653                 }
9654         }
9655         mutex_exit(&mpt->m_mutex);
9656 }
9657 
9658 /*ARGSUSED*/
9659 static void
9660 mptsas_dump_cmd(mptsas_t *mpt, mptsas_cmd_t *cmd)
9661 {
9662         int     i;
9663         uint8_t *cp = (uchar_t *)cmd->cmd_pkt->pkt_cdbp;
9664         char    buf[128];
9665 
9666         buf[0] = '\0';
9667         NDBG25(("?Cmd (0x%p) dump for Target %d Lun %d:\n", (void *)cmd,
9668             Tgt(cmd), Lun(cmd)));
9669         (void) sprintf(&buf[0], "\tcdb=[");
9670         for (i = 0; i < (int)cmd->cmd_cdblen; i++) {
9671                 (void) sprintf(&buf[strlen(buf)], " 0x%x", *cp++);
9672         }


9796                 /*
9797                  * Setup descriptor info.  RAID passthrough must use the
9798                  * default request descriptor which is already set, so if this
9799                  * is a SCSI IO request, change the descriptor to SCSI IO.
9800                  */
9801                 if (function == MPI2_FUNCTION_SCSI_IO_REQUEST) {
9802                         desc_type = MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
9803                         request_desc_high = (ddi_get16(acc_hdl,
9804                             &scsi_io_req->DevHandle) << 16);
9805                 }
9806         }
9807 
9808         /*
9809          * We must wait till the message has been completed before
9810          * beginning the next message so we wait for this one to
9811          * finish.
9812          */
9813         (void) ddi_dma_sync(dma_hdl, 0, 0, DDI_DMA_SYNC_FORDEV);
9814         request_desc_low = (cmd->cmd_slot << 16) + desc_type;
9815         cmd->cmd_rfm = NULL;

9816         MPTSAS_START_CMD(mpt, request_desc_low, request_desc_high);
9817         if ((mptsas_check_dma_handle(dma_hdl) != DDI_SUCCESS) ||
9818             (mptsas_check_acc_handle(acc_hdl) != DDI_SUCCESS)) {
9819                 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
9820         }
9821 }
9822 
9823 
9824 
9825 static int
9826 mptsas_do_passthru(mptsas_t *mpt, uint8_t *request, uint8_t *reply,
9827     uint8_t *data, uint32_t request_size, uint32_t reply_size,
9828     uint32_t data_size, uint32_t direction, uint8_t *dataout,
9829     uint32_t dataout_size, short timeout, int mode)
9830 {
9831         mptsas_pt_request_t             pt;
9832         mptsas_dma_alloc_state_t        data_dma_state;
9833         mptsas_dma_alloc_state_t        dataout_dma_state;
9834         caddr_t                         memp;
9835         mptsas_cmd_t                    *cmd = NULL;


10226         } else {
10227                 pDiag_release_msg = (pMpi2DiagReleaseRequest_t)
10228                     (mpt->m_req_frame + (mpt->m_req_frame_size *
10229                     cmd->cmd_slot));
10230                 bzero(pDiag_release_msg, mpt->m_req_frame_size);
10231                 ddi_put8(mpt->m_acc_req_frame_hdl,
10232                     &pDiag_release_msg->Function, diag->function);
10233                 ddi_put8(mpt->m_acc_req_frame_hdl,
10234                     &pDiag_release_msg->BufferType,
10235                     diag->pBuffer->buffer_type);
10236         }
10237 
10238         /*
10239          * Send the message
10240          */
10241         (void) ddi_dma_sync(mpt->m_dma_req_frame_hdl, 0, 0,
10242             DDI_DMA_SYNC_FORDEV);
10243         request_desc_low = (cmd->cmd_slot << 16) +
10244             MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
10245         cmd->cmd_rfm = NULL;

10246         MPTSAS_START_CMD(mpt, request_desc_low, 0);
10247         if ((mptsas_check_dma_handle(mpt->m_dma_req_frame_hdl) !=
10248             DDI_SUCCESS) ||
10249             (mptsas_check_acc_handle(mpt->m_acc_req_frame_hdl) !=
10250             DDI_SUCCESS)) {
10251                 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
10252         }
10253 }
10254 
10255 static int
10256 mptsas_post_fw_diag_buffer(mptsas_t *mpt,
10257     mptsas_fw_diagnostic_buffer_t *pBuffer, uint32_t *return_code)
10258 {
10259         mptsas_diag_request_t           diag;
10260         int                             status, slot_num, post_flags = 0;
10261         mptsas_cmd_t                    *cmd = NULL;
10262         struct scsi_pkt                 *pkt;
10263         pMpi2DiagBufferPostReply_t      reply;
10264         uint16_t                        iocstatus;
10265         uint32_t                        iocloginfo, transfer_length;


11348 
11349         mutex_exit(&mpt->m_mutex);
11350         return (status);
11351 }
11352 
11353 static int
11354 mptsas_ioctl(dev_t dev, int cmd, intptr_t data, int mode, cred_t *credp,
11355     int *rval)
11356 {
11357         int                     status = 0;
11358         mptsas_t                *mpt;
11359         mptsas_update_flash_t   flashdata;
11360         mptsas_pass_thru_t      passthru_data;
11361         mptsas_adapter_data_t   adapter_data;
11362         mptsas_pci_info_t       pci_info;
11363         int                     copylen;
11364 
11365         int                     iport_flag = 0;
11366         dev_info_t              *dip = NULL;
11367         mptsas_phymask_t        phymask = 0;




11368 
11369         *rval = MPTIOCTL_STATUS_GOOD;
11370         if (secpolicy_sys_config(credp, B_FALSE) != 0) {
11371                 return (EPERM);
11372         }
11373 
11374         mpt = ddi_get_soft_state(mptsas_state, MINOR2INST(getminor(dev)));
11375         if (mpt == NULL) {
11376                 /*
11377                  * Called from iport node, get the states
11378                  */
11379                 iport_flag = 1;
11380                 dip = mptsas_get_dip_from_dev(dev, &phymask);
11381                 if (dip == NULL) {
11382                         return (ENXIO);
11383                 }
11384                 mpt = DIP2MPT(dip);
11385         }
11386         /* Make sure power level is D0 before accessing registers */
11387         mutex_enter(&mpt->m_mutex);


11389                 (void) pm_busy_component(mpt->m_dip, 0);
11390                 if (mpt->m_power_level != PM_LEVEL_D0) {
11391                         mutex_exit(&mpt->m_mutex);
11392                         if (pm_raise_power(mpt->m_dip, 0, PM_LEVEL_D0) !=
11393                             DDI_SUCCESS) {
11394                                 mptsas_log(mpt, CE_WARN,
11395                                     "mptsas%d: mptsas_ioctl: Raise power "
11396                                     "request failed.", mpt->m_instance);
11397                                 (void) pm_idle_component(mpt->m_dip, 0);
11398                                 return (ENXIO);
11399                         }
11400                 } else {
11401                         mutex_exit(&mpt->m_mutex);
11402                 }
11403         } else {
11404                 mutex_exit(&mpt->m_mutex);
11405         }
11406 
11407         if (iport_flag) {
11408                 status = scsi_hba_ioctl(dev, cmd, data, mode, credp, rval);

11409                 goto out;
11410         }

























































11411         switch (cmd) {
11412                 case MPTIOCTL_UPDATE_FLASH:
11413                         if (ddi_copyin((void *)data, &flashdata,
11414                                 sizeof (struct mptsas_update_flash), mode)) {
11415                                 status = EFAULT;
11416                                 break;
11417                         }
11418 
11419                         mutex_enter(&mpt->m_mutex);
11420                         if (mptsas_update_flash(mpt,
11421                             (caddr_t)(long)flashdata.PtrBuffer,
11422                             flashdata.ImageSize, flashdata.ImageType, mode)) {
11423                                 status = EFAULT;
11424                         }
11425 
11426                         /*
11427                          * Reset the chip to start using the new
11428                          * firmware.  Reset if failed also.
11429                          */
11430                         mpt->m_softstate &= ~MPTSAS_SS_MSG_UNIT_RESET;


11546                          * which does this.
11547                          */
11548                         status = mptsas_event_report(mpt,
11549                             (mptsas_event_report_t *)data, mode, rval);
11550                         break;
11551                 case MPTIOCTL_REG_ACCESS:
11552                         /*
11553                          * The user has requested register access.  Call our
11554                          * routine which does this.
11555                          */
11556                         status = mptsas_reg_access(mpt,
11557                             (mptsas_reg_access_t *)data, mode);
11558                         break;
11559                 default:
11560                         status = scsi_hba_ioctl(dev, cmd, data, mode, credp,
11561                             rval);
11562                         break;
11563         }
11564 
11565 out:


11566         return (status);
11567 }
11568 
11569 int
11570 mptsas_restart_ioc(mptsas_t *mpt)
11571 {
11572         int             rval = DDI_SUCCESS;
11573         mptsas_target_t *ptgt = NULL;
11574 
11575         ASSERT(mutex_owned(&mpt->m_mutex));
11576 
11577         /*
11578          * Set a flag telling I/O path that we're processing a reset.  This is
11579          * needed because after the reset is complete, the hash table still
11580          * needs to be rebuilt.  If I/Os are started before the hash table is
11581          * rebuilt, I/O errors will occur.  This flag allows I/Os to be marked
11582          * so that they can be retried.
11583          */
11584         mpt->m_in_reset = TRUE;
11585 
11586         /*
11587          * Set all throttles to HOLD
11588          */
11589         ptgt = (mptsas_target_t *)mptsas_hash_traverse(&mpt->m_active->m_tgttbl,
11590             MPTSAS_HASH_FIRST);
11591         while (ptgt != NULL) {

11592                 mptsas_set_throttle(mpt, ptgt, HOLD_THROTTLE);

11593 
11594                 ptgt = (mptsas_target_t *)mptsas_hash_traverse(
11595                     &mpt->m_active->m_tgttbl, MPTSAS_HASH_NEXT);
11596         }
11597 
11598         /*
11599          * Disable interrupts
11600          */
11601         MPTSAS_DISABLE_INTR(mpt);
11602 
11603         /*
11604          * Abort all commands: outstanding commands, commands in waitq and
11605          * tx_waitq.
11606          */
11607         mptsas_flush_hba(mpt);
11608 
11609         /*
11610          * Reinitialize the chip.
11611          */
11612         if (mptsas_init_chip(mpt, FALSE) == DDI_FAILURE) {
11613                 rval = DDI_FAILURE;
11614         }
11615 
11616         /*
11617          * Enable interrupts again
11618          */
11619         MPTSAS_ENABLE_INTR(mpt);
11620 
11621         /*
11622          * If mptsas_init_chip was successful, update the driver data.
11623          */
11624         if (rval == DDI_SUCCESS) {
11625                 mptsas_update_driver_data(mpt);
11626         }
11627 
11628         /*
11629          * Reset the throttles
11630          */
11631         ptgt = (mptsas_target_t *)mptsas_hash_traverse(&mpt->m_active->m_tgttbl,
11632             MPTSAS_HASH_FIRST);
11633         while (ptgt != NULL) {

11634                 mptsas_set_throttle(mpt, ptgt, MAX_THROTTLE);

11635 
11636                 ptgt = (mptsas_target_t *)mptsas_hash_traverse(
11637                     &mpt->m_active->m_tgttbl, MPTSAS_HASH_NEXT);
11638         }
11639 
11640         mptsas_doneq_empty(mpt);
11641         mptsas_restart_hba(mpt);
11642 
11643         if (rval != DDI_SUCCESS) {
11644                 mptsas_fm_ereport(mpt, DDI_FM_DEVICE_NO_RESPONSE);
11645                 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_LOST);
11646         }
11647 
11648         /*
11649          * Clear the reset flag so that I/Os can continue.
11650          */
11651         mpt->m_in_reset = FALSE;
11652 
11653         return (rval);
11654 }


11924                         };
11925         uint16_t        pmcsr_stat;
11926 
11927         if (mptsas_get_pci_cap(mpt) == FALSE) {
11928                 return (DDI_FAILURE);
11929         }
11930         /*
11931          * If PCI's capability does not support PM, then don't need
11932          * to registe the pm-components
11933          */
11934         if (!(mpt->m_options & MPTSAS_OPT_PM))
11935                 return (DDI_SUCCESS);
11936         /*
11937          * If power management is supported by this chip, create
11938          * pm-components property for the power management framework
11939          */
11940         (void) sprintf(pmc_name, "NAME=mptsas%d", mpt->m_instance);
11941         pmc[0] = pmc_name;
11942         if (ddi_prop_update_string_array(DDI_DEV_T_NONE, mpt->m_dip,
11943             "pm-components", pmc, 3) != DDI_PROP_SUCCESS) {

11944                 mpt->m_options &= ~MPTSAS_OPT_PM;

11945                 mptsas_log(mpt, CE_WARN,
11946                     "mptsas%d: pm-component property creation failed.",
11947                     mpt->m_instance);
11948                 return (DDI_FAILURE);
11949         }
11950 
11951         /*
11952          * Power on device.
11953          */
11954         (void) pm_busy_component(mpt->m_dip, 0);
11955         pmcsr_stat = pci_config_get16(mpt->m_config_handle,
11956             mpt->m_pmcsr_offset);
11957         if ((pmcsr_stat & PCI_PMCSR_STATE_MASK) != PCI_PMCSR_D0) {
11958                 mptsas_log(mpt, CE_WARN, "mptsas%d: Power up the device",
11959                     mpt->m_instance);
11960                 pci_config_put16(mpt->m_config_handle, mpt->m_pmcsr_offset,
11961                     PCI_PMCSR_D0);
11962         }
11963         if (pm_power_has_changed(mpt->m_dip, 0, PM_LEVEL_D0) != DDI_SUCCESS) {
11964                 mptsas_log(mpt, CE_WARN, "pm_power_has_changed failed");
11965                 return (DDI_FAILURE);
11966         }

11967         mpt->m_power_level = PM_LEVEL_D0;

11968         /*
11969          * Set pm idle delay.
11970          */
11971         mpt->m_pm_idle_delay = ddi_prop_get_int(DDI_DEV_T_ANY,
11972             mpt->m_dip, 0, "mptsas-pm-idle-delay", MPTSAS_PM_IDLE_TIMEOUT);
11973 
11974         return (DDI_SUCCESS);
11975 }
11976 
11977 static int
11978 mptsas_register_intrs(mptsas_t *mpt)
11979 {
11980         dev_info_t *dip;
11981         int intr_types;
11982 
11983         dip = mpt->m_dip;
11984 
11985         /* Get supported interrupt types */
11986         if (ddi_intr_get_supported_types(dip, &intr_types) != DDI_SUCCESS) {
11987                 mptsas_log(mpt, CE_WARN, "ddi_intr_get_supported_types "


12036         NDBG6(("mptsas_add_intrs:interrupt type 0x%x", intr_type));
12037 
12038         /* Get number of interrupts */
12039         ret = ddi_intr_get_nintrs(dip, intr_type, &count);
12040         if ((ret != DDI_SUCCESS) || (count <= 0)) {
12041                 mptsas_log(mpt, CE_WARN, "ddi_intr_get_nintrs() failed, "
12042                     "ret %d count %d\n", ret, count);
12043 
12044                 return (DDI_FAILURE);
12045         }
12046 
12047         /* Get number of available interrupts */
12048         ret = ddi_intr_get_navail(dip, intr_type, &avail);
12049         if ((ret != DDI_SUCCESS) || (avail == 0)) {
12050                 mptsas_log(mpt, CE_WARN, "ddi_intr_get_navail() failed, "
12051                     "ret %d avail %d\n", ret, avail);
12052 
12053                 return (DDI_FAILURE);
12054         }
12055 
12056         if (0 && avail < count) {
12057                 mptsas_log(mpt, CE_NOTE, "ddi_intr_get_nvail returned %d, "
12058                     "navail() returned %d", count, avail);
12059         }
12060 
12061         /* Mpt only have one interrupt routine */
12062         if ((intr_type == DDI_INTR_TYPE_MSI) && (count > 1)) {
12063                 count = 1;
12064         }
12065 
12066         /* Allocate an array of interrupt handles */
12067         mpt->m_intr_size = count * sizeof (ddi_intr_handle_t);
12068         mpt->m_htable = kmem_alloc(mpt->m_intr_size, KM_SLEEP);
12069 
12070         flag = DDI_INTR_ALLOC_NORMAL;
12071 
12072         /* call ddi_intr_alloc() */
12073         ret = ddi_intr_alloc(dip, mpt->m_htable, intr_type, 0,
12074             count, &actual, flag);
12075 
12076         if ((ret != DDI_SUCCESS) || (actual == 0)) {


12388                 tmp_tgt = kmem_zalloc(sizeof (struct mptsas_target),
12389                     KM_SLEEP);
12390                 tmp_tgt->m_devhdl = *dev_handle;
12391                 tmp_tgt->m_deviceinfo = dev_info;
12392                 tmp_tgt->m_qfull_retries = QFULL_RETRIES;
12393                 tmp_tgt->m_qfull_retry_interval =
12394                     drv_usectohz(QFULL_RETRY_INTERVAL * 1000);
12395                 tmp_tgt->m_t_throttle = MAX_THROTTLE;
12396                 devicename = mptsas_get_sata_guid(mpt, tmp_tgt, 0);
12397                 kmem_free(tmp_tgt, sizeof (struct mptsas_target));
12398                 mutex_enter(&mpt->m_mutex);
12399                 if (devicename != 0 && (((devicename >> 56) & 0xf0) == 0x50)) {
12400                         sas_wwn = devicename;
12401                 } else if (dev_info & MPI2_SAS_DEVICE_INFO_DIRECT_ATTACH) {
12402                         sas_wwn = 0;
12403                 }
12404         }
12405 
12406         phymask = mptsas_physport_to_phymask(mpt, physport);
12407         *pptgt = mptsas_tgt_alloc(&slots->m_tgttbl, *dev_handle, sas_wwn,
12408             dev_info, phymask, phynum);
12409         if (*pptgt == NULL) {
12410                 mptsas_log(mpt, CE_WARN, "Failed to allocated target"
12411                     "structure!");
12412                 rval = DEV_INFO_FAIL_ALLOC;
12413                 return (rval);
12414         }
12415         (*pptgt)->m_enclosure = enclosure;
12416         (*pptgt)->m_slot_num = bay_num;
12417         return (DEV_INFO_SUCCESS);
12418 }
12419 
12420 uint64_t
12421 mptsas_get_sata_guid(mptsas_t *mpt, mptsas_target_t *ptgt, int lun)
12422 {
12423         uint64_t        sata_guid = 0, *pwwn = NULL;
12424         int             target = ptgt->m_devhdl;
12425         uchar_t         *inq83 = NULL;
12426         int             inq83_len = 0xFF;
12427         uchar_t         *dblk = NULL;
12428         int             inq83_retry = 3;


13842                 *pip = mptsas_find_path_addr(pdip, sas_wwn, lun);
13843         } else {
13844                 *pip = mptsas_find_path_phy(pdip, phy);
13845         }
13846 
13847         if (*pip != NULL) {
13848                 *lun_dip = MDI_PI(*pip)->pi_client->ct_dip;
13849                 ASSERT(*lun_dip != NULL);
13850                 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, *lun_dip,
13851                     (DDI_PROP_DONTPASS | DDI_PROP_NOTPROM),
13852                     MDI_CLIENT_GUID_PROP, &old_guid) == DDI_SUCCESS) {
13853                         if (strncmp(guid, old_guid, strlen(guid)) == 0) {
13854                                 /*
13855                                  * Same path back online again.
13856                                  */
13857                                 (void) ddi_prop_free(old_guid);
13858                                 if ((!MDI_PI_IS_ONLINE(*pip)) &&
13859                                     (!MDI_PI_IS_STANDBY(*pip)) &&
13860                                     (ptgt->m_tgt_unconfigured == 0)) {
13861                                         rval = mdi_pi_online(*pip, 0);




13862                                 } else {
13863                                         rval = DDI_SUCCESS;
13864                                 }
13865                                 if (rval != DDI_SUCCESS) {
13866                                         mptsas_log(mpt, CE_WARN, "path:target: "
13867                                             "%x, lun:%x online failed!", target,
13868                                             lun);
13869                                         *pip = NULL;
13870                                         *lun_dip = NULL;
13871                                 }
13872                                 return (rval);
13873                         } else {
13874                                 /*
13875                                  * The GUID of the LUN has changed which maybe
13876                                  * because customer mapped another volume to the
13877                                  * same LUN.
13878                                  */
13879                                 mptsas_log(mpt, CE_WARN, "The GUID of the "
13880                                     "target:%x, lun:%x was changed, maybe "
13881                                     "because someone mapped another volume "


14095                                 mptsas_log(mpt, CE_WARN, "mptsas driver"
14096                                     "failed to create pm-capable "
14097                                     "property, target %d", target);
14098                                 mdi_rtn = MDI_FAILURE;
14099                                 goto virt_create_done;
14100                         }
14101                 }
14102                 /*
14103                  * Create the phy-num property
14104                  */
14105                 if (mdi_prop_update_int(*pip, "phy-num",
14106                     ptgt->m_phynum) != DDI_SUCCESS) {
14107                         mptsas_log(mpt, CE_WARN, "mptsas driver unable to "
14108                             "create phy-num property for target %d lun %d",
14109                             target, lun);
14110                         mdi_rtn = MDI_FAILURE;
14111                         goto virt_create_done;
14112                 }
14113                 NDBG20(("new path:%s onlining,", MDI_PI(*pip)->pi_addr));
14114                 mdi_rtn = mdi_pi_online(*pip, 0);









14115                 if (mdi_rtn == MDI_NOT_SUPPORTED) {
14116                         mdi_rtn = MDI_FAILURE;
14117                 }
14118 virt_create_done:
14119                 if (*pip && mdi_rtn != MDI_SUCCESS) {
14120                         (void) mdi_pi_free(*pip, 0);
14121                         *pip = NULL;
14122                         *lun_dip = NULL;
14123                 }
14124         }
14125 
14126         scsi_hba_nodename_compatible_free(nodename, compatible);
14127         if (lun_addr != NULL) {
14128                 kmem_free(lun_addr, SCSI_MAXNAMELEN);
14129         }
14130         if (wwn_str != NULL) {
14131                 kmem_free(wwn_str, MPTSAS_WWN_STRLEN);
14132         }
14133         if (component != NULL) {
14134                 kmem_free(component, MAXPATHLEN);


14448                         if (ndi_prop_update_int(DDI_DEV_T_NONE,
14449                             *lun_dip, "phy-num", ptgt->m_phynum) !=
14450                             DDI_PROP_SUCCESS) {
14451                                 mptsas_log(mpt, CE_WARN, "mptsas driver "
14452                                     "failed to create phy-num property for "
14453                                     "target %d", target);
14454                                 ndi_rtn = NDI_FAILURE;
14455                                 goto phys_create_done;
14456                         }
14457                 }
14458 phys_create_done:
14459                 /*
14460                  * If props were setup ok, online the lun
14461                  */
14462                 if (ndi_rtn == NDI_SUCCESS) {
14463                         /*
14464                          * Try to online the new node
14465                          */
14466                         ndi_rtn = ndi_devi_online(*lun_dip, NDI_ONLINE_ATTACH);
14467                 }









14468 
14469                 /*
14470                  * If success set rtn flag, else unwire alloc'd lun
14471                  */
14472                 if (ndi_rtn != NDI_SUCCESS) {
14473                         NDBG12(("mptsas driver unable to online "
14474                             "target %d lun %d", target, lun));
14475                         ndi_prop_remove_all(*lun_dip);
14476                         (void) ndi_devi_free(*lun_dip);
14477                         *lun_dip = NULL;
14478                 }
14479         }
14480 
14481         scsi_hba_nodename_compatible_free(nodename, compatible);
14482 
14483         if (wwn_str != NULL) {
14484                 kmem_free(wwn_str, MPTSAS_WWN_STRLEN);
14485         }
14486         if (component != NULL) {
14487                 kmem_free(component, MAXPATHLEN);


15074  * Can't have duplicate entries for same devhdl,
15075  * if there are invalid entries, the devhdl should be set to 0xffff
15076  */
15077 static void *
15078 mptsas_search_by_devhdl(mptsas_hash_table_t *hashtab, uint16_t devhdl)
15079 {
15080         mptsas_hash_data_t *data;
15081 
15082         data = mptsas_hash_traverse(hashtab, MPTSAS_HASH_FIRST);
15083         while (data != NULL) {
15084                 if (data->devhdl == devhdl) {
15085                         break;
15086                 }
15087                 data = mptsas_hash_traverse(hashtab, MPTSAS_HASH_NEXT);
15088         }
15089         return (data);
15090 }
15091 
15092 mptsas_target_t *
15093 mptsas_tgt_alloc(mptsas_hash_table_t *hashtab, uint16_t devhdl, uint64_t wwid,
15094     uint32_t devinfo, mptsas_phymask_t phymask, uint8_t phynum)
15095 {
15096         mptsas_target_t *tmp_tgt = NULL;
15097 
15098         tmp_tgt = mptsas_hash_search(hashtab, wwid, phymask);
15099         if (tmp_tgt != NULL) {
15100                 NDBG20(("Hash item already exist"));
15101                 tmp_tgt->m_deviceinfo = devinfo;
15102                 tmp_tgt->m_devhdl = devhdl;
15103                 return (tmp_tgt);
15104         }
15105         tmp_tgt = kmem_zalloc(sizeof (struct mptsas_target), KM_SLEEP);
15106         if (tmp_tgt == NULL) {
15107                 cmn_err(CE_WARN, "Fatal, allocated tgt failed");
15108                 return (NULL);
15109         }
15110         tmp_tgt->m_devhdl = devhdl;
15111         tmp_tgt->m_sas_wwn = wwid;
15112         tmp_tgt->m_deviceinfo = devinfo;
15113         tmp_tgt->m_phymask = phymask;
15114         tmp_tgt->m_phynum = phynum;
15115         /* Initialized the tgt structure */
15116         tmp_tgt->m_qfull_retries = QFULL_RETRIES;
15117         tmp_tgt->m_qfull_retry_interval =
15118             drv_usectohz(QFULL_RETRY_INTERVAL * 1000);
15119         tmp_tgt->m_t_throttle = MAX_THROTTLE;


15120 
15121         mptsas_hash_add(hashtab, tmp_tgt);
15122 
15123         return (tmp_tgt);
15124 }
15125 
15126 static void
15127 mptsas_tgt_free(mptsas_hash_table_t *hashtab, uint64_t wwid,
15128     mptsas_phymask_t phymask)
15129 {
15130         mptsas_target_t *tmp_tgt;
15131         tmp_tgt = mptsas_hash_rem(hashtab, wwid, phymask);
15132         if (tmp_tgt == NULL) {
15133                 cmn_err(CE_WARN, "Tgt not found, nothing to free");
15134         } else {

15135                 kmem_free(tmp_tgt, sizeof (struct mptsas_target));
15136         }
15137 }
15138 
15139 /*
15140  * Return the entry in the hash table
15141  */
15142 static mptsas_smp_t *
15143 mptsas_smp_alloc(mptsas_hash_table_t *hashtab, mptsas_smp_t *data)
15144 {
15145         uint64_t key1 = data->m_sasaddr;
15146         mptsas_phymask_t key2 = data->m_phymask;
15147         mptsas_smp_t *ret_data;
15148 
15149         ret_data = mptsas_hash_search(hashtab, key1, key2);
15150         if (ret_data != NULL) {
15151                 bcopy(data, ret_data, sizeof (mptsas_smp_t));
15152                 return (ret_data);
15153         }
15154 


15321         return (this->data);
15322 }
15323 
15324 /*
15325  * Functions for SGPIO LED support
15326  */
15327 static dev_info_t *
15328 mptsas_get_dip_from_dev(dev_t dev, mptsas_phymask_t *phymask)
15329 {
15330         dev_info_t      *dip;
15331         int             prop;
15332         dip = e_ddi_hold_devi_by_dev(dev, 0);
15333         if (dip == NULL)
15334                 return (dip);
15335         prop = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0,
15336             "phymask", 0);
15337         *phymask = (mptsas_phymask_t)prop;
15338         ddi_release_devi(dip);
15339         return (dip);
15340 }







15341 




















































































15342 int
15343 mptsas_dma_addr_create(mptsas_t *mpt, ddi_dma_attr_t dma_attr,
15344     ddi_dma_handle_t *dma_hdp, ddi_acc_handle_t *acc_hdp, caddr_t *dma_memp,
15345     uint32_t alloc_size, ddi_dma_cookie_t *cookiep)
15346 {
15347         ddi_dma_cookie_t        new_cookie;
15348         size_t                  alloc_len;
15349         uint_t                  ncookie;
15350 
15351         if (cookiep == NULL)
15352                 cookiep = &new_cookie;
15353 
15354         if (ddi_dma_alloc_handle(mpt->m_dip, &dma_attr, DDI_DMA_SLEEP,
15355             NULL, dma_hdp) != DDI_SUCCESS) {
15356                 dma_hdp = NULL;
15357                 return (FALSE);
15358         }
15359 
15360         if (ddi_dma_mem_alloc(*dma_hdp, alloc_size, &mpt->m_dev_acc_attr,
15361             DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL, dma_memp, &alloc_len,


15370             cookiep, &ncookie) != DDI_DMA_MAPPED) {
15371                 (void) ddi_dma_mem_free(acc_hdp);
15372                 ddi_dma_free_handle(dma_hdp);
15373                 dma_hdp = NULL;
15374                 return (FALSE);
15375         }
15376 
15377         return (TRUE);
15378 }
15379 
15380 void
15381 mptsas_dma_addr_destroy(ddi_dma_handle_t *dma_hdp, ddi_acc_handle_t *acc_hdp)
15382 {
15383         if (*dma_hdp == NULL)
15384                 return;
15385 
15386         (void) ddi_dma_unbind_handle(*dma_hdp);
15387         (void) ddi_dma_mem_free(acc_hdp);
15388         ddi_dma_free_handle(dma_hdp);
15389         dma_hdp = NULL;






















15390 }