1 /*
   2  * mr_sas.c: source for mr_sas driver
   3  *
   4  * Solaris MegaRAID device driver for SAS2.0 controllers
   5  * Copyright (c) 2008-2012, LSI Logic Corporation.
   6  * All rights reserved.
   7  *
   8  * Version:
   9  * Author:
  10  *              Swaminathan K S
  11  *              Arun Chandrashekhar
  12  *              Manju R
  13  *              Rasheed
  14  *              Shakeel Bukhari
  15  *
  16  * Redistribution and use in source and binary forms, with or without
  17  * modification, are permitted provided that the following conditions are met:
  18  *
  19  * 1. Redistributions of source code must retain the above copyright notice,
  20  *    this list of conditions and the following disclaimer.
  21  *
  22  * 2. Redistributions in binary form must reproduce the above copyright notice,
  23  *    this list of conditions and the following disclaimer in the documentation
  24  *    and/or other materials provided with the distribution.
  25  *
  26  * 3. Neither the name of the author nor the names of its contributors may be
  27  *    used to endorse or promote products derived from this software without
  28  *    specific prior written permission.
  29  *
  30  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  31  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  32  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
  33  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
  34  * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
  35  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
  36  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
  37  * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
  38  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
  39  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
  40  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
  41  * DAMAGE.
  42  */
  43 
  44 /*
  45  * Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved.
  46  * Copyright (c) 2011 Bayard G. Bell. All rights reserved.
  47  * Copyright 2013 Nexenta Systems, Inc. All rights reserved.
  48  * Copyright 2015, 2017 Citrus IT Limited. All rights reserved.
  49  * Copyright 2015 Garrett D'Amore <garrett@damore.org>
  50  */
  51 
  52 #include <sys/types.h>
  53 #include <sys/param.h>
  54 #include <sys/file.h>
  55 #include <sys/errno.h>
  56 #include <sys/open.h>
  57 #include <sys/cred.h>
  58 #include <sys/modctl.h>
  59 #include <sys/conf.h>
  60 #include <sys/devops.h>
  61 #include <sys/cmn_err.h>
  62 #include <sys/kmem.h>
  63 #include <sys/stat.h>
  64 #include <sys/mkdev.h>
  65 #include <sys/pci.h>
  66 #include <sys/scsi/scsi.h>
  67 #include <sys/ddi.h>
  68 #include <sys/sunddi.h>
  69 #include <sys/atomic.h>
  70 #include <sys/signal.h>
  71 #include <sys/byteorder.h>
  72 #include <sys/sdt.h>
  73 #include <sys/fs/dv_node.h>       /* devfs_clean */
  74 
  75 #include "mr_sas.h"
  76 
  77 /*
  78  * FMA header files
  79  */
  80 #include <sys/ddifm.h>
  81 #include <sys/fm/protocol.h>
  82 #include <sys/fm/util.h>
  83 #include <sys/fm/io/ddi.h>
  84 
  85 /* Macros to help Skinny and stock 2108/MFI live together. */
  86 #define WR_IB_PICK_QPORT(addr, instance) \
  87         if ((instance)->skinny) { \
  88                 WR_IB_LOW_QPORT((addr), (instance)); \
  89                 WR_IB_HIGH_QPORT(0, (instance)); \
  90         } else { \
  91                 WR_IB_QPORT((addr), (instance)); \
  92         }
  93 
  94 /*
  95  * Local static data
  96  */
  97 static void     *mrsas_state = NULL;
  98 static volatile boolean_t       mrsas_relaxed_ordering = B_TRUE;
  99 volatile int    debug_level_g = CL_NONE;
 100 static volatile int     msi_enable = 1;
 101 static volatile int     ctio_enable = 1;
 102 
 103 /* Default Timeout value to issue online controller reset */
 104 volatile int  debug_timeout_g  = 0xF0;          /* 0xB4; */
 105 /* Simulate consecutive firmware fault */
 106 static volatile int  debug_fw_faults_after_ocr_g  = 0;
 107 #ifdef OCRDEBUG
 108 /* Simulate three consecutive timeout for an IO */
 109 static volatile int  debug_consecutive_timeout_after_ocr_g  = 0;
 110 #endif
 111 
 112 #pragma weak scsi_hba_open
 113 #pragma weak scsi_hba_close
 114 #pragma weak scsi_hba_ioctl
 115 
 116 /* Local static prototypes. */
 117 static int      mrsas_getinfo(dev_info_t *, ddi_info_cmd_t,  void *, void **);
 118 static int      mrsas_attach(dev_info_t *, ddi_attach_cmd_t);
 119 #ifdef __sparc
 120 static int      mrsas_reset(dev_info_t *, ddi_reset_cmd_t);
 121 #else
 122 static int      mrsas_quiesce(dev_info_t *);
 123 #endif
 124 static int      mrsas_detach(dev_info_t *, ddi_detach_cmd_t);
 125 static int      mrsas_open(dev_t *, int, int, cred_t *);
 126 static int      mrsas_close(dev_t, int, int, cred_t *);
 127 static int      mrsas_ioctl(dev_t, int, intptr_t, int, cred_t *, int *);
 128 
 129 static int      mrsas_tran_tgt_init(dev_info_t *, dev_info_t *,
 130                     scsi_hba_tran_t *, struct scsi_device *);
 131 static struct scsi_pkt *mrsas_tran_init_pkt(struct scsi_address *, register
 132                     struct scsi_pkt *, struct buf *, int, int, int, int,
 133                     int (*)(), caddr_t);
 134 static int      mrsas_tran_start(struct scsi_address *,
 135                     register struct scsi_pkt *);
 136 static int      mrsas_tran_abort(struct scsi_address *, struct scsi_pkt *);
 137 static int      mrsas_tran_reset(struct scsi_address *, int);
 138 static int      mrsas_tran_getcap(struct scsi_address *, char *, int);
 139 static int      mrsas_tran_setcap(struct scsi_address *, char *, int, int);
 140 static void     mrsas_tran_destroy_pkt(struct scsi_address *,
 141                     struct scsi_pkt *);
 142 static void     mrsas_tran_dmafree(struct scsi_address *, struct scsi_pkt *);
 143 static void     mrsas_tran_sync_pkt(struct scsi_address *, struct scsi_pkt *);
 144 static int      mrsas_tran_quiesce(dev_info_t *dip);
 145 static int      mrsas_tran_unquiesce(dev_info_t *dip);
 146 static uint_t   mrsas_isr();
 147 static uint_t   mrsas_softintr();
 148 static void     mrsas_undo_resources(dev_info_t *, struct mrsas_instance *);
 149 
 150 static void     free_space_for_mfi(struct mrsas_instance *);
 151 static uint32_t read_fw_status_reg_ppc(struct mrsas_instance *);
 152 static void     issue_cmd_ppc(struct mrsas_cmd *, struct mrsas_instance *);
 153 static int      issue_cmd_in_poll_mode_ppc(struct mrsas_instance *,
 154                     struct mrsas_cmd *);
 155 static int      issue_cmd_in_sync_mode_ppc(struct mrsas_instance *,
 156                     struct mrsas_cmd *);
 157 static void     enable_intr_ppc(struct mrsas_instance *);
 158 static void     disable_intr_ppc(struct mrsas_instance *);
 159 static int      intr_ack_ppc(struct mrsas_instance *);
 160 static void     flush_cache(struct mrsas_instance *instance);
 161 void    display_scsi_inquiry(caddr_t);
 162 static int      start_mfi_aen(struct mrsas_instance *instance);
 163 static int      handle_drv_ioctl(struct mrsas_instance *instance,
 164                     struct mrsas_ioctl *ioctl, int mode);
 165 static int      handle_mfi_ioctl(struct mrsas_instance *instance,
 166                     struct mrsas_ioctl *ioctl, int mode);
 167 static int      handle_mfi_aen(struct mrsas_instance *instance,
 168                     struct mrsas_aen *aen);
 169 static struct mrsas_cmd *build_cmd(struct mrsas_instance *,
 170     struct scsi_address *, struct scsi_pkt *, uchar_t *);
 171 static int      alloc_additional_dma_buffer(struct mrsas_instance *);
 172 static void     complete_cmd_in_sync_mode(struct mrsas_instance *,
 173                 struct mrsas_cmd *);
 174 static int      mrsas_kill_adapter(struct mrsas_instance *);
 175 static int      mrsas_issue_init_mfi(struct mrsas_instance *);
 176 static int      mrsas_reset_ppc(struct mrsas_instance *);
 177 static uint32_t mrsas_initiate_ocr_if_fw_is_faulty(struct mrsas_instance *);
 178 static int      wait_for_outstanding(struct mrsas_instance *instance);
 179 static int      register_mfi_aen(struct mrsas_instance *instance,
 180                     uint32_t seq_num, uint32_t class_locale_word);
 181 static int      issue_mfi_pthru(struct mrsas_instance *instance, struct
 182                     mrsas_ioctl *ioctl, struct mrsas_cmd *cmd, int mode);
 183 static int      issue_mfi_dcmd(struct mrsas_instance *instance, struct
 184                     mrsas_ioctl *ioctl, struct mrsas_cmd *cmd, int mode);
 185 static int      issue_mfi_smp(struct mrsas_instance *instance, struct
 186                     mrsas_ioctl *ioctl, struct mrsas_cmd *cmd, int mode);
 187 static int      issue_mfi_stp(struct mrsas_instance *instance, struct
 188                     mrsas_ioctl *ioctl, struct mrsas_cmd *cmd, int mode);
 189 static int      abort_aen_cmd(struct mrsas_instance *instance,
 190                     struct mrsas_cmd *cmd_to_abort);
 191 
 192 static void     mrsas_rem_intrs(struct mrsas_instance *instance);
 193 static int      mrsas_add_intrs(struct mrsas_instance *instance, int intr_type);
 194 
 195 static void     mrsas_tran_tgt_free(dev_info_t *, dev_info_t *,
 196                     scsi_hba_tran_t *, struct scsi_device *);
 197 static int      mrsas_tran_bus_config(dev_info_t *, uint_t,
 198                     ddi_bus_config_op_t, void *, dev_info_t **);
 199 static int      mrsas_parse_devname(char *, int *, int *);
 200 static int      mrsas_config_all_devices(struct mrsas_instance *);
 201 static int      mrsas_config_ld(struct mrsas_instance *, uint16_t,
 202                         uint8_t, dev_info_t **);
 203 static int      mrsas_name_node(dev_info_t *, char *, int);
 204 static void     mrsas_issue_evt_taskq(struct mrsas_eventinfo *);
 205 static void     free_additional_dma_buffer(struct mrsas_instance *);
 206 static void io_timeout_checker(void *);
 207 static void mrsas_fm_init(struct mrsas_instance *);
 208 static void mrsas_fm_fini(struct mrsas_instance *);
 209 
 210 static struct mrsas_function_template mrsas_function_template_ppc = {
 211         .read_fw_status_reg = read_fw_status_reg_ppc,
 212         .issue_cmd = issue_cmd_ppc,
 213         .issue_cmd_in_sync_mode = issue_cmd_in_sync_mode_ppc,
 214         .issue_cmd_in_poll_mode = issue_cmd_in_poll_mode_ppc,
 215         .enable_intr = enable_intr_ppc,
 216         .disable_intr = disable_intr_ppc,
 217         .intr_ack = intr_ack_ppc,
 218         .init_adapter = mrsas_init_adapter_ppc
 219 };
 220 
 221 
 222 static struct mrsas_function_template mrsas_function_template_fusion = {
 223         .read_fw_status_reg = tbolt_read_fw_status_reg,
 224         .issue_cmd = tbolt_issue_cmd,
 225         .issue_cmd_in_sync_mode = tbolt_issue_cmd_in_sync_mode,
 226         .issue_cmd_in_poll_mode = tbolt_issue_cmd_in_poll_mode,
 227         .enable_intr = tbolt_enable_intr,
 228         .disable_intr = tbolt_disable_intr,
 229         .intr_ack = tbolt_intr_ack,
 230         .init_adapter = mrsas_init_adapter_tbolt
 231 };
 232 
 233 
 234 ddi_dma_attr_t mrsas_generic_dma_attr = {
 235         DMA_ATTR_V0,            /* dma_attr_version */
 236         0,                      /* low DMA address range */
 237         0xFFFFFFFFU,            /* high DMA address range */
 238         0xFFFFFFFFU,            /* DMA counter register  */
 239         8,                      /* DMA address alignment */
 240         0x07,                   /* DMA burstsizes  */
 241         1,                      /* min DMA size */
 242         0xFFFFFFFFU,            /* max DMA size */
 243         0xFFFFFFFFU,            /* segment boundary */
 244         MRSAS_MAX_SGE_CNT,      /* dma_attr_sglen */
 245         512,                    /* granularity of device */
 246         0                       /* bus specific DMA flags */
 247 };
 248 
 249 int32_t mrsas_max_cap_maxxfer = 0x1000000;
 250 
 251 /*
 252  * Fix for: Thunderbolt controller IO timeout when IO write size is 1MEG,
 253  * Limit size to 256K
 254  */
 255 uint32_t mrsas_tbolt_max_cap_maxxfer = (512 * 512);
 256 
 257 /*
 258  * cb_ops contains base level routines
 259  */
 260 static struct cb_ops mrsas_cb_ops = {
 261         mrsas_open,             /* open */
 262         mrsas_close,            /* close */
 263         nodev,                  /* strategy */
 264         nodev,                  /* print */
 265         nodev,                  /* dump */
 266         nodev,                  /* read */
 267         nodev,                  /* write */
 268         mrsas_ioctl,            /* ioctl */
 269         nodev,                  /* devmap */
 270         nodev,                  /* mmap */
 271         nodev,                  /* segmap */
 272         nochpoll,               /* poll */
 273         nodev,                  /* cb_prop_op */
 274         0,                      /* streamtab  */
 275         D_NEW | D_HOTPLUG,      /* cb_flag */
 276         CB_REV,                 /* cb_rev */
 277         nodev,                  /* cb_aread */
 278         nodev                   /* cb_awrite */
 279 };
 280 
 281 /*
 282  * dev_ops contains configuration routines
 283  */
 284 static struct dev_ops mrsas_ops = {
 285         DEVO_REV,               /* rev, */
 286         0,                      /* refcnt */
 287         mrsas_getinfo,          /* getinfo */
 288         nulldev,                /* identify */
 289         nulldev,                /* probe */
 290         mrsas_attach,           /* attach */
 291         mrsas_detach,           /* detach */
 292 #ifdef  __sparc
 293         mrsas_reset,            /* reset */
 294 #else   /* __sparc */
 295         nodev,
 296 #endif  /* __sparc */
 297         &mrsas_cb_ops,              /* char/block ops */
 298         NULL,                   /* bus ops */
 299         NULL,                   /* power */
 300 #ifdef __sparc
 301         ddi_quiesce_not_needed
 302 #else   /* __sparc */
 303         mrsas_quiesce   /* quiesce */
 304 #endif  /* __sparc */
 305 };
 306 
 307 static struct modldrv modldrv = {
 308         &mod_driverops,             /* module type - driver */
 309         MRSAS_VERSION,
 310         &mrsas_ops,         /* driver ops */
 311 };
 312 
 313 static struct modlinkage modlinkage = {
 314         MODREV_1,       /* ml_rev - must be MODREV_1 */
 315         &modldrv,   /* ml_linkage */
 316         NULL            /* end of driver linkage */
 317 };
 318 
 319 static struct ddi_device_acc_attr endian_attr = {
 320         DDI_DEVICE_ATTR_V1,
 321         DDI_STRUCTURE_LE_ACC,
 322         DDI_STRICTORDER_ACC,
 323         DDI_DEFAULT_ACC
 324 };
 325 
 326 /* Use the LSI Fast Path for the 2208 (tbolt) commands. */
 327 unsigned int enable_fp = 1;
 328 
 329 
 330 /*
 331  * ************************************************************************** *
 332  *                                                                            *
 333  *         common entry points - for loadable kernel modules                  *
 334  *                                                                            *
 335  * ************************************************************************** *
 336  */
 337 
 338 /*
 339  * _init - initialize a loadable module
 340  * @void
 341  *
 342  * The driver should perform any one-time resource allocation or data
 343  * initialization during driver loading in _init(). For example, the driver
 344  * should initialize any mutexes global to the driver in this routine.
 345  * The driver should not, however, use _init() to allocate or initialize
 346  * anything that has to do with a particular instance of the device.
 347  * Per-instance initialization must be done in attach().
 348  */
 349 int
 350 _init(void)
 351 {
 352         int ret;
 353 
 354         con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
 355 
 356         ret = ddi_soft_state_init(&mrsas_state,
 357             sizeof (struct mrsas_instance), 0);
 358 
 359         if (ret != DDI_SUCCESS) {
 360                 cmn_err(CE_WARN, "mr_sas: could not init state");
 361                 return (ret);
 362         }
 363 
 364         if ((ret = scsi_hba_init(&modlinkage)) != DDI_SUCCESS) {
 365                 cmn_err(CE_WARN, "mr_sas: could not init scsi hba");
 366                 ddi_soft_state_fini(&mrsas_state);
 367                 return (ret);
 368         }
 369 
 370         ret = mod_install(&modlinkage);
 371 
 372         if (ret != DDI_SUCCESS) {
 373                 cmn_err(CE_WARN, "mr_sas: mod_install failed");
 374                 scsi_hba_fini(&modlinkage);
 375                 ddi_soft_state_fini(&mrsas_state);
 376         }
 377 
 378         return (ret);
 379 }
 380 
 381 /*
 382  * _info - returns information about a loadable module.
 383  * @void
 384  *
 385  * _info() is called to return module information. This is a typical entry
 386  * point that does predefined role. It simply calls mod_info().
 387  */
 388 int
 389 _info(struct modinfo *modinfop)
 390 {
 391         con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
 392 
 393         return (mod_info(&modlinkage, modinfop));
 394 }
 395 
 396 /*
 397  * _fini - prepare a loadable module for unloading
 398  * @void
 399  *
 400  * In _fini(), the driver should release any resources that were allocated in
 401  * _init(). The driver must remove itself from the system module list.
 402  */
 403 int
 404 _fini(void)
 405 {
 406         int ret;
 407 
 408         con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
 409 
 410         if ((ret = mod_remove(&modlinkage)) != DDI_SUCCESS) {
 411                 con_log(CL_ANN1,
 412                     (CE_WARN, "_fini: mod_remove() failed, error 0x%X", ret));
 413                 return (ret);
 414         }
 415 
 416         scsi_hba_fini(&modlinkage);
 417         con_log(CL_DLEVEL1, (CE_NOTE, "_fini: scsi_hba_fini() done."));
 418 
 419         ddi_soft_state_fini(&mrsas_state);
 420         con_log(CL_DLEVEL1, (CE_NOTE, "_fini: ddi_soft_state_fini() done."));
 421 
 422         return (ret);
 423 }
 424 
 425 
 426 /*
 427  * ************************************************************************** *
 428  *                                                                            *
 429  *               common entry points - for autoconfiguration                  *
 430  *                                                                            *
 431  * ************************************************************************** *
 432  */
 433 /*
 434  * attach - adds a device to the system as part of initialization
 435  * @dip:
 436  * @cmd:
 437  *
 438  * The kernel calls a driver's attach() entry point to attach an instance of
 439  * a device (for MegaRAID, it is instance of a controller) or to resume
 440  * operation for an instance of a device that has been suspended or has been
 441  * shut down by the power management framework
 442  * The attach() entry point typically includes the following types of
 443  * processing:
 444  * - allocate a soft-state structure for the device instance (for MegaRAID,
 445  *   controller instance)
 446  * - initialize per-instance mutexes
 447  * - initialize condition variables
 448  * - register the device's interrupts (for MegaRAID, controller's interrupts)
 449  * - map the registers and memory of the device instance (for MegaRAID,
 450  *   controller instance)
 451  * - create minor device nodes for the device instance (for MegaRAID,
 452  *   controller instance)
 453  * - report that the device instance (for MegaRAID, controller instance) has
 454  *   attached
 455  */
 456 static int
 457 mrsas_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
 458 {
 459         int             instance_no;
 460         int             nregs;
 461         int             i = 0;
 462         uint8_t         irq;
 463         uint16_t        vendor_id;
 464         uint16_t        device_id;
 465         uint16_t        subsysvid;
 466         uint16_t        subsysid;
 467         uint16_t        command;
 468         off_t           reglength = 0;
 469         int             intr_types = 0;
 470         char            *data;
 471 
 472         scsi_hba_tran_t         *tran;
 473         ddi_dma_attr_t  tran_dma_attr;
 474         struct mrsas_instance   *instance;
 475 
 476         con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
 477 
 478         /* CONSTCOND */
 479         ASSERT(NO_COMPETING_THREADS);
 480 
 481         instance_no = ddi_get_instance(dip);
 482 
 483         /*
 484          * check to see whether this device is in a DMA-capable slot.
 485          */
 486         if (ddi_slaveonly(dip) == DDI_SUCCESS) {
 487                 dev_err(dip, CE_WARN, "Device in slave-only slot, unused");
 488                 return (DDI_FAILURE);
 489         }
 490 
 491         switch (cmd) {
 492         case DDI_ATTACH:
 493                 /* allocate the soft state for the instance */
 494                 if (ddi_soft_state_zalloc(mrsas_state, instance_no)
 495                     != DDI_SUCCESS) {
 496                         dev_err(dip, CE_WARN, "Failed to allocate soft state");
 497                         return (DDI_FAILURE);
 498                 }
 499 
 500                 instance = (struct mrsas_instance *)ddi_get_soft_state
 501                     (mrsas_state, instance_no);
 502 
 503                 if (instance == NULL) {
 504                         dev_err(dip, CE_WARN, "Bad soft state");
 505                         ddi_soft_state_free(mrsas_state, instance_no);
 506                         return (DDI_FAILURE);
 507                 }
 508 
 509                 instance->unroll.softs       = 1;
 510 
 511                 /* Setup the PCI configuration space handles */
 512                 if (pci_config_setup(dip, &instance->pci_handle) !=
 513                     DDI_SUCCESS) {
 514                         dev_err(dip, CE_WARN, "pci config setup failed");
 515 
 516                         ddi_soft_state_free(mrsas_state, instance_no);
 517                         return (DDI_FAILURE);
 518                 }
 519 
 520                 if (ddi_dev_nregs(dip, &nregs) != DDI_SUCCESS) {
 521                         dev_err(dip, CE_WARN, "Failed to get registers");
 522 
 523                         pci_config_teardown(&instance->pci_handle);
 524                         ddi_soft_state_free(mrsas_state, instance_no);
 525                         return (DDI_FAILURE);
 526                 }
 527 
 528                 vendor_id = pci_config_get16(instance->pci_handle,
 529                     PCI_CONF_VENID);
 530                 device_id = pci_config_get16(instance->pci_handle,
 531                     PCI_CONF_DEVID);
 532 
 533                 subsysvid = pci_config_get16(instance->pci_handle,
 534                     PCI_CONF_SUBVENID);
 535                 subsysid = pci_config_get16(instance->pci_handle,
 536                     PCI_CONF_SUBSYSID);
 537 
 538                 pci_config_put16(instance->pci_handle, PCI_CONF_COMM,
 539                     (pci_config_get16(instance->pci_handle,
 540                     PCI_CONF_COMM) | PCI_COMM_ME));
 541                 irq = pci_config_get8(instance->pci_handle,
 542                     PCI_CONF_ILINE);
 543 
 544                 dev_err(dip, CE_CONT,
 545                     "?0x%x:0x%x 0x%x:0x%x, irq:%d drv-ver:%s\n",
 546                     vendor_id, device_id, subsysvid,
 547                     subsysid, irq, MRSAS_VERSION);
 548 
 549                 /* enable bus-mastering */
 550                 command = pci_config_get16(instance->pci_handle,
 551                     PCI_CONF_COMM);
 552 
 553                 if (!(command & PCI_COMM_ME)) {
 554                         command |= PCI_COMM_ME;
 555 
 556                         pci_config_put16(instance->pci_handle,
 557                             PCI_CONF_COMM, command);
 558 
 559                         con_log(CL_ANN, (CE_CONT, "mr_sas%d: "
 560                             "enable bus-mastering", instance_no));
 561                 } else {
 562                         con_log(CL_DLEVEL1, (CE_CONT, "mr_sas%d: "
 563                             "bus-mastering already set", instance_no));
 564                 }
 565 
 566                 /* initialize function pointers */
 567                 switch (device_id) {
 568                 case PCI_DEVICE_ID_LSI_INVADER:
 569                 case PCI_DEVICE_ID_LSI_FURY:
 570                 case PCI_DEVICE_ID_LSI_INTRUDER:
 571                 case PCI_DEVICE_ID_LSI_INTRUDER_24:
 572                 case PCI_DEVICE_ID_LSI_CUTLASS_52:
 573                 case PCI_DEVICE_ID_LSI_CUTLASS_53:
 574                         dev_err(dip, CE_CONT, "?Gen3 device detected\n");
 575                         instance->gen3 = 1;
 576                         /* FALLTHROUGH */
 577                 case PCI_DEVICE_ID_LSI_TBOLT:
 578                         dev_err(dip, CE_CONT, "?TBOLT device detected\n");
 579 
 580                         instance->func_ptr =
 581                             &mrsas_function_template_fusion;
 582                         instance->tbolt = 1;
 583                         break;
 584 
 585                 case PCI_DEVICE_ID_LSI_SKINNY:
 586                 case PCI_DEVICE_ID_LSI_SKINNY_NEW:
 587                         /*
 588                          * FALLTHRU to PPC-style functions, but mark this
 589                          * instance as Skinny, because the register set is
 590                          * slightly different (See WR_IB_PICK_QPORT), and
 591                          * certain other features are available to a Skinny
 592                          * HBA.
 593                          */
 594                         dev_err(dip, CE_CONT, "?Skinny device detected\n");
 595                         instance->skinny = 1;
 596                         /* FALLTHRU */
 597 
 598                 case PCI_DEVICE_ID_LSI_2108VDE:
 599                 case PCI_DEVICE_ID_LSI_2108V:
 600                         dev_err(dip, CE_CONT,
 601                             "?2108 Liberator device detected\n");
 602 
 603                         instance->func_ptr =
 604                             &mrsas_function_template_ppc;
 605                         break;
 606 
 607                 default:
 608                         dev_err(dip, CE_WARN, "Invalid device detected");
 609 
 610                         pci_config_teardown(&instance->pci_handle);
 611                         ddi_soft_state_free(mrsas_state, instance_no);
 612                         return (DDI_FAILURE);
 613                 }
 614 
 615                 instance->baseaddress = pci_config_get32(
 616                     instance->pci_handle, PCI_CONF_BASE0);
 617                 instance->baseaddress &= 0x0fffc;
 618 
 619                 instance->dip                = dip;
 620                 instance->vendor_id  = vendor_id;
 621                 instance->device_id  = device_id;
 622                 instance->subsysvid  = subsysvid;
 623                 instance->subsysid   = subsysid;
 624                 instance->instance   = instance_no;
 625 
 626                 /* Initialize FMA */
 627                 instance->fm_capabilities = ddi_prop_get_int(
 628                     DDI_DEV_T_ANY, instance->dip, DDI_PROP_DONTPASS,
 629                     "fm-capable", DDI_FM_EREPORT_CAPABLE |
 630                     DDI_FM_ACCCHK_CAPABLE | DDI_FM_DMACHK_CAPABLE
 631                     | DDI_FM_ERRCB_CAPABLE);
 632 
 633                 mrsas_fm_init(instance);
 634 
 635                 /* Setup register map */
 636                 if ((ddi_dev_regsize(instance->dip,
 637                     REGISTER_SET_IO_2108, &reglength) != DDI_SUCCESS) ||
 638                     reglength < MINIMUM_MFI_MEM_SZ) {
 639                         goto fail_attach;
 640                 }
 641                 if (reglength > DEFAULT_MFI_MEM_SZ) {
 642                         reglength = DEFAULT_MFI_MEM_SZ;
 643                         con_log(CL_DLEVEL1, (CE_NOTE,
 644                             "mr_sas: register length to map is 0x%lx bytes",
 645                             reglength));
 646                 }
 647                 if (ddi_regs_map_setup(instance->dip,
 648                     REGISTER_SET_IO_2108, &instance->regmap, 0,
 649                     reglength, &endian_attr, &instance->regmap_handle)
 650                     != DDI_SUCCESS) {
 651                         dev_err(dip, CE_WARN, "couldn't map control registers");
 652                         goto fail_attach;
 653                 }
 654 
 655                 instance->unroll.regs = 1;
 656 
 657                 /*
 658                  * Disable Interrupt Now.
 659                  * Setup Software interrupt
 660                  */
 661                 instance->func_ptr->disable_intr(instance);
 662 
 663                 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, dip, 0,
 664                     "mrsas-enable-msi", &data) == DDI_SUCCESS) {
 665                         if (strncmp(data, "no", 3) == 0) {
 666                                 msi_enable = 0;
 667                                 con_log(CL_ANN1, (CE_WARN,
 668                                     "msi_enable = %d disabled", msi_enable));
 669                         }
 670                         ddi_prop_free(data);
 671                 }
 672 
 673                 dev_err(dip, CE_CONT, "?msi_enable = %d\n", msi_enable);
 674 
 675                 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, dip, 0,
 676                     "mrsas-enable-fp", &data) == DDI_SUCCESS) {
 677                         if (strncmp(data, "no", 3) == 0) {
 678                                 enable_fp = 0;
 679                                 dev_err(dip, CE_NOTE,
 680                                     "enable_fp = %d, Fast-Path disabled.\n",
 681                                     enable_fp);
 682                         }
 683 
 684                         ddi_prop_free(data);
 685                 }
 686 
 687                 dev_err(dip, CE_CONT, "?enable_fp = %d\n", enable_fp);
 688 
 689                 /* Check for all supported interrupt types */
 690                 if (ddi_intr_get_supported_types(
 691                     dip, &intr_types) != DDI_SUCCESS) {
 692                         dev_err(dip, CE_WARN,
 693                             "ddi_intr_get_supported_types() failed");
 694                         goto fail_attach;
 695                 }
 696 
 697                 con_log(CL_DLEVEL1, (CE_NOTE,
 698                     "ddi_intr_get_supported_types() ret: 0x%x", intr_types));
 699 
 700                 /* Initialize and Setup Interrupt handler */
 701                 if (msi_enable && (intr_types & DDI_INTR_TYPE_MSIX)) {
 702                         if (mrsas_add_intrs(instance, DDI_INTR_TYPE_MSIX) !=
 703                             DDI_SUCCESS) {
 704                                 dev_err(dip, CE_WARN,
 705                                     "MSIX interrupt query failed");
 706                                 goto fail_attach;
 707                         }
 708                         instance->intr_type = DDI_INTR_TYPE_MSIX;
 709                 } else if (msi_enable && (intr_types & DDI_INTR_TYPE_MSI)) {
 710                         if (mrsas_add_intrs(instance, DDI_INTR_TYPE_MSI) !=
 711                             DDI_SUCCESS) {
 712                                 dev_err(dip, CE_WARN,
 713                                     "MSI interrupt query failed");
 714                                 goto fail_attach;
 715                         }
 716                         instance->intr_type = DDI_INTR_TYPE_MSI;
 717                 } else if (intr_types & DDI_INTR_TYPE_FIXED) {
 718                         msi_enable = 0;
 719                         if (mrsas_add_intrs(instance, DDI_INTR_TYPE_FIXED) !=
 720                             DDI_SUCCESS) {
 721                                 dev_err(dip, CE_WARN,
 722                                     "FIXED interrupt query failed");
 723                                 goto fail_attach;
 724                         }
 725                         instance->intr_type = DDI_INTR_TYPE_FIXED;
 726                 } else {
 727                         dev_err(dip, CE_WARN, "Device cannot "
 728                             "suppport either FIXED or MSI/X "
 729                             "interrupts");
 730                         goto fail_attach;
 731                 }
 732 
 733                 instance->unroll.intr = 1;
 734 
 735                 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, dip, 0,
 736                     "mrsas-enable-ctio", &data) == DDI_SUCCESS) {
 737                         if (strncmp(data, "no", 3) == 0) {
 738                                 ctio_enable = 0;
 739                                 con_log(CL_ANN1, (CE_WARN,
 740                                     "ctio_enable = %d disabled", ctio_enable));
 741                         }
 742                         ddi_prop_free(data);
 743                 }
 744 
 745                 dev_err(dip, CE_CONT, "?ctio_enable = %d\n", ctio_enable);
 746 
 747                 /* setup the mfi based low level driver */
 748                 if (mrsas_init_adapter(instance) != DDI_SUCCESS) {
 749                         dev_err(dip, CE_WARN,
 750                             "could not initialize the low level driver");
 751 
 752                         goto fail_attach;
 753                 }
 754 
 755                 /* Initialize all Mutex */
 756                 INIT_LIST_HEAD(&instance->completed_pool_list);
 757                 mutex_init(&instance->completed_pool_mtx, NULL,
 758                     MUTEX_DRIVER, DDI_INTR_PRI(instance->intr_pri));
 759 
 760                 mutex_init(&instance->sync_map_mtx, NULL,
 761                     MUTEX_DRIVER, DDI_INTR_PRI(instance->intr_pri));
 762 
 763                 mutex_init(&instance->app_cmd_pool_mtx, NULL,
 764                     MUTEX_DRIVER, DDI_INTR_PRI(instance->intr_pri));
 765 
 766                 mutex_init(&instance->config_dev_mtx, NULL,
 767                     MUTEX_DRIVER, DDI_INTR_PRI(instance->intr_pri));
 768 
 769                 mutex_init(&instance->cmd_pend_mtx, NULL,
 770                     MUTEX_DRIVER, DDI_INTR_PRI(instance->intr_pri));
 771 
 772                 mutex_init(&instance->ocr_flags_mtx, NULL,
 773                     MUTEX_DRIVER, DDI_INTR_PRI(instance->intr_pri));
 774 
 775                 mutex_init(&instance->int_cmd_mtx, NULL,
 776                     MUTEX_DRIVER, DDI_INTR_PRI(instance->intr_pri));
 777                 cv_init(&instance->int_cmd_cv, NULL, CV_DRIVER, NULL);
 778 
 779                 mutex_init(&instance->cmd_pool_mtx, NULL,
 780                     MUTEX_DRIVER, DDI_INTR_PRI(instance->intr_pri));
 781 
 782                 mutex_init(&instance->reg_write_mtx, NULL,
 783                     MUTEX_DRIVER, DDI_INTR_PRI(instance->intr_pri));
 784 
 785                 if (instance->tbolt) {
 786                         mutex_init(&instance->cmd_app_pool_mtx, NULL,
 787                             MUTEX_DRIVER, DDI_INTR_PRI(instance->intr_pri));
 788 
 789                         mutex_init(&instance->chip_mtx, NULL,
 790                             MUTEX_DRIVER, DDI_INTR_PRI(instance->intr_pri));
 791 
 792                 }
 793 
 794                 instance->unroll.mutexs = 1;
 795 
 796                 instance->timeout_id = (timeout_id_t)-1;
 797 
 798                 /* Register our soft-isr for highlevel interrupts. */
 799                 instance->isr_level = instance->intr_pri;
 800                 if (!(instance->tbolt)) {
 801                         if (instance->isr_level == HIGH_LEVEL_INTR) {
 802                                 if (ddi_add_softintr(dip,
 803                                     DDI_SOFTINT_HIGH,
 804                                     &instance->soft_intr_id, NULL, NULL,
 805                                     mrsas_softintr, (caddr_t)instance) !=
 806                                     DDI_SUCCESS) {
 807                                         dev_err(dip, CE_WARN,
 808                                             "Software ISR did not register");
 809 
 810                                         goto fail_attach;
 811                                 }
 812 
 813                                 instance->unroll.soft_isr = 1;
 814 
 815                         }
 816                 }
 817 
 818                 instance->softint_running = 0;
 819 
 820                 /* Allocate a transport structure */
 821                 tran = scsi_hba_tran_alloc(dip, SCSI_HBA_CANSLEEP);
 822 
 823                 if (tran == NULL) {
 824                         dev_err(dip, CE_WARN,
 825                             "scsi_hba_tran_alloc failed");
 826                         goto fail_attach;
 827                 }
 828 
 829                 instance->tran = tran;
 830                 instance->unroll.tran = 1;
 831 
 832                 tran->tran_hba_private       = instance;
 833                 tran->tran_tgt_init  = mrsas_tran_tgt_init;
 834                 tran->tran_tgt_probe = scsi_hba_probe;
 835                 tran->tran_tgt_free  = mrsas_tran_tgt_free;
 836                 tran->tran_init_pkt  = mrsas_tran_init_pkt;
 837                 if (instance->tbolt)
 838                         tran->tran_start = mrsas_tbolt_tran_start;
 839                 else
 840                         tran->tran_start = mrsas_tran_start;
 841                 tran->tran_abort     = mrsas_tran_abort;
 842                 tran->tran_reset     = mrsas_tran_reset;
 843                 tran->tran_getcap    = mrsas_tran_getcap;
 844                 tran->tran_setcap    = mrsas_tran_setcap;
 845                 tran->tran_destroy_pkt       = mrsas_tran_destroy_pkt;
 846                 tran->tran_dmafree   = mrsas_tran_dmafree;
 847                 tran->tran_sync_pkt  = mrsas_tran_sync_pkt;
 848                 tran->tran_quiesce   = mrsas_tran_quiesce;
 849                 tran->tran_unquiesce = mrsas_tran_unquiesce;
 850                 tran->tran_bus_config        = mrsas_tran_bus_config;
 851 
 852                 if (mrsas_relaxed_ordering)
 853                         mrsas_generic_dma_attr.dma_attr_flags |=
 854                             DDI_DMA_RELAXED_ORDERING;
 855 
 856 
 857                 tran_dma_attr = mrsas_generic_dma_attr;
 858                 tran_dma_attr.dma_attr_sgllen = instance->max_num_sge;
 859 
 860                 /* Attach this instance of the hba */
 861                 if (scsi_hba_attach_setup(dip, &tran_dma_attr, tran, 0)
 862                     != DDI_SUCCESS) {
 863                         dev_err(dip, CE_WARN,
 864                             "scsi_hba_attach failed");
 865 
 866                         goto fail_attach;
 867                 }
 868                 instance->unroll.tranSetup = 1;
 869                 con_log(CL_ANN1,
 870                     (CE_CONT, "scsi_hba_attach_setup()  done."));
 871 
 872                 /* create devctl node for cfgadm command */
 873                 if (ddi_create_minor_node(dip, "devctl",
 874                     S_IFCHR, INST2DEVCTL(instance_no),
 875                     DDI_NT_SCSI_NEXUS, 0) == DDI_FAILURE) {
 876                         dev_err(dip, CE_WARN, "failed to create devctl node.");
 877 
 878                         goto fail_attach;
 879                 }
 880 
 881                 instance->unroll.devctl = 1;
 882 
 883                 /* create scsi node for cfgadm command */
 884                 if (ddi_create_minor_node(dip, "scsi", S_IFCHR,
 885                     INST2SCSI(instance_no), DDI_NT_SCSI_ATTACHMENT_POINT, 0) ==
 886                     DDI_FAILURE) {
 887                         dev_err(dip, CE_WARN, "failed to create scsi node.");
 888 
 889                         goto fail_attach;
 890                 }
 891 
 892                 instance->unroll.scsictl = 1;
 893 
 894                 (void) snprintf(instance->iocnode, sizeof (instance->iocnode),
 895                     "%d:lsirdctl", instance_no);
 896 
 897                 /*
 898                  * Create a node for applications
 899                  * for issuing ioctl to the driver.
 900                  */
 901                 if (ddi_create_minor_node(dip, instance->iocnode,
 902                     S_IFCHR, INST2LSIRDCTL(instance_no), DDI_PSEUDO, 0) ==
 903                     DDI_FAILURE) {
 904                         dev_err(dip, CE_WARN, "failed to create ioctl node.");
 905 
 906                         goto fail_attach;
 907                 }
 908 
 909                 instance->unroll.ioctl = 1;
 910 
 911                 /* Create a taskq to handle dr events */
 912                 if ((instance->taskq = ddi_taskq_create(dip,
 913                     "mrsas_dr_taskq", 1, TASKQ_DEFAULTPRI, 0)) == NULL) {
 914                         dev_err(dip, CE_WARN, "failed to create taskq.");
 915                         instance->taskq = NULL;
 916                         goto fail_attach;
 917                 }
 918                 instance->unroll.taskq = 1;
 919                 con_log(CL_ANN1, (CE_CONT, "ddi_taskq_create() done."));
 920 
 921                 /* enable interrupt */
 922                 instance->func_ptr->enable_intr(instance);
 923 
 924                 /* initiate AEN */
 925                 if (start_mfi_aen(instance)) {
 926                         dev_err(dip, CE_WARN, "failed to initiate AEN.");
 927                         goto fail_attach;
 928                 }
 929                 instance->unroll.aenPend = 1;
 930                 con_log(CL_ANN1,
 931                     (CE_CONT, "AEN started for instance %d.", instance_no));
 932 
 933                 /* Finally! We are on the air.  */
 934                 ddi_report_dev(dip);
 935 
 936                 /* FMA handle checking. */
 937                 if (mrsas_check_acc_handle(instance->regmap_handle) !=
 938                     DDI_SUCCESS) {
 939                         goto fail_attach;
 940                 }
 941                 if (mrsas_check_acc_handle(instance->pci_handle) !=
 942                     DDI_SUCCESS) {
 943                         goto fail_attach;
 944                 }
 945 
 946                 instance->mr_ld_list =
 947                     kmem_zalloc(MRDRV_MAX_LD * sizeof (struct mrsas_ld),
 948                     KM_SLEEP);
 949                 instance->unroll.ldlist_buff = 1;
 950 
 951                 if (instance->tbolt || instance->skinny) {
 952                         instance->mr_tbolt_pd_max = MRSAS_TBOLT_PD_TGT_MAX;
 953                         instance->mr_tbolt_pd_list =
 954                             kmem_zalloc(MRSAS_TBOLT_GET_PD_MAX(instance) *
 955                             sizeof (struct mrsas_tbolt_pd), KM_SLEEP);
 956                         ASSERT(instance->mr_tbolt_pd_list);
 957                         for (i = 0; i < instance->mr_tbolt_pd_max; i++) {
 958                                 instance->mr_tbolt_pd_list[i].lun_type =
 959                                     MRSAS_TBOLT_PD_LUN;
 960                                 instance->mr_tbolt_pd_list[i].dev_id =
 961                                     (uint8_t)i;
 962                         }
 963 
 964                         instance->unroll.pdlist_buff = 1;
 965                 }
 966                 break;
 967         case DDI_PM_RESUME:
 968                 con_log(CL_ANN, (CE_NOTE, "mr_sas: DDI_PM_RESUME"));
 969                 break;
 970         case DDI_RESUME:
 971                 con_log(CL_ANN, (CE_NOTE, "mr_sas: DDI_RESUME"));
 972                 break;
 973         default:
 974                 con_log(CL_ANN,
 975                     (CE_WARN, "mr_sas: invalid attach cmd=%x", cmd));
 976                 return (DDI_FAILURE);
 977         }
 978 
 979 
 980         con_log(CL_DLEVEL1,
 981             (CE_NOTE, "mrsas_attach() return SUCCESS instance_num %d",
 982             instance_no));
 983         return (DDI_SUCCESS);
 984 
 985 fail_attach:
 986 
 987         mrsas_undo_resources(dip, instance);
 988 
 989         mrsas_fm_ereport(instance, DDI_FM_DEVICE_NO_RESPONSE);
 990         ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST);
 991 
 992         mrsas_fm_fini(instance);
 993 
 994         pci_config_teardown(&instance->pci_handle);
 995         ddi_soft_state_free(mrsas_state, instance_no);
 996 
 997         return (DDI_FAILURE);
 998 }
 999 
1000 /*
1001  * getinfo - gets device information
1002  * @dip:
1003  * @cmd:
1004  * @arg:
1005  * @resultp:
1006  *
1007  * The system calls getinfo() to obtain configuration information that only
1008  * the driver knows. The mapping of minor numbers to device instance is
1009  * entirely under the control of the driver. The system sometimes needs to ask
1010  * the driver which device a particular dev_t represents.
1011  * Given the device number return the devinfo pointer from the scsi_device
1012  * structure.
1013  */
1014 /*ARGSUSED*/
1015 static int
1016 mrsas_getinfo(dev_info_t *dip, ddi_info_cmd_t cmd,  void *arg, void **resultp)
1017 {
1018         int     rval;
1019         int     mrsas_minor = getminor((dev_t)arg);
1020 
1021         struct mrsas_instance   *instance;
1022 
1023         con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1024 
1025         switch (cmd) {
1026                 case DDI_INFO_DEVT2DEVINFO:
1027                         instance = (struct mrsas_instance *)
1028                             ddi_get_soft_state(mrsas_state,
1029                             MINOR2INST(mrsas_minor));
1030 
1031                         if (instance == NULL) {
1032                                 *resultp = NULL;
1033                                 rval = DDI_FAILURE;
1034                         } else {
1035                                 *resultp = instance->dip;
1036                                 rval = DDI_SUCCESS;
1037                         }
1038                         break;
1039                 case DDI_INFO_DEVT2INSTANCE:
1040                         *resultp = (void *)(intptr_t)
1041                             (MINOR2INST(getminor((dev_t)arg)));
1042                         rval = DDI_SUCCESS;
1043                         break;
1044                 default:
1045                         *resultp = NULL;
1046                         rval = DDI_FAILURE;
1047         }
1048 
1049         return (rval);
1050 }
1051 
1052 /*
1053  * detach - detaches a device from the system
1054  * @dip: pointer to the device's dev_info structure
1055  * @cmd: type of detach
1056  *
1057  * A driver's detach() entry point is called to detach an instance of a device
1058  * that is bound to the driver. The entry point is called with the instance of
1059  * the device node to be detached and with DDI_DETACH, which is specified as
1060  * the cmd argument to the entry point.
1061  * This routine is called during driver unload. We free all the allocated
1062  * resources and call the corresponding LLD so that it can also release all
1063  * its resources.
1064  */
1065 static int
1066 mrsas_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
1067 {
1068         int     instance_no;
1069 
1070         struct mrsas_instance   *instance;
1071 
1072         con_log(CL_ANN, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1073 
1074 
1075         /* CONSTCOND */
1076         ASSERT(NO_COMPETING_THREADS);
1077 
1078         instance_no = ddi_get_instance(dip);
1079 
1080         instance = (struct mrsas_instance *)ddi_get_soft_state(mrsas_state,
1081             instance_no);
1082 
1083         if (!instance) {
1084                 dev_err(dip, CE_WARN, "could not get instance in detach");
1085 
1086                 return (DDI_FAILURE);
1087         }
1088 
1089         switch (cmd) {
1090                 case DDI_DETACH:
1091                         con_log(CL_ANN, (CE_NOTE,
1092                             "mrsas_detach: DDI_DETACH"));
1093 
1094                         mutex_enter(&instance->config_dev_mtx);
1095                         if (instance->timeout_id != (timeout_id_t)-1) {
1096                                 mutex_exit(&instance->config_dev_mtx);
1097                                 (void) untimeout(instance->timeout_id);
1098                                 instance->timeout_id = (timeout_id_t)-1;
1099                                 mutex_enter(&instance->config_dev_mtx);
1100                                 instance->unroll.timer = 0;
1101                         }
1102                         mutex_exit(&instance->config_dev_mtx);
1103 
1104                         if (instance->unroll.tranSetup == 1) {
1105                                 if (scsi_hba_detach(dip) != DDI_SUCCESS) {
1106                                         dev_err(dip, CE_WARN,
1107                                             "failed to detach");
1108                                         return (DDI_FAILURE);
1109                                 }
1110                                 instance->unroll.tranSetup = 0;
1111                                 con_log(CL_ANN1,
1112                                     (CE_CONT, "scsi_hba_dettach()  done."));
1113                         }
1114 
1115                         flush_cache(instance);
1116 
1117                         mrsas_undo_resources(dip, instance);
1118 
1119                         mrsas_fm_fini(instance);
1120 
1121                         pci_config_teardown(&instance->pci_handle);
1122                         ddi_soft_state_free(mrsas_state, instance_no);
1123                         break;
1124 
1125                 case DDI_PM_SUSPEND:
1126                         con_log(CL_ANN, (CE_NOTE,
1127                             "mrsas_detach: DDI_PM_SUSPEND"));
1128 
1129                         break;
1130                 case DDI_SUSPEND:
1131                         con_log(CL_ANN, (CE_NOTE,
1132                             "mrsas_detach: DDI_SUSPEND"));
1133 
1134                         break;
1135                 default:
1136                         con_log(CL_ANN, (CE_WARN,
1137                             "invalid detach command:0x%x", cmd));
1138                         return (DDI_FAILURE);
1139         }
1140 
1141         return (DDI_SUCCESS);
1142 }
1143 
1144 
1145 static void
1146 mrsas_undo_resources(dev_info_t *dip, struct mrsas_instance *instance)
1147 {
1148         con_log(CL_ANN, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1149 
1150         if (instance->unroll.ioctl == 1) {
1151                 ddi_remove_minor_node(dip, instance->iocnode);
1152                 instance->unroll.ioctl = 0;
1153         }
1154 
1155         if (instance->unroll.scsictl == 1) {
1156                 ddi_remove_minor_node(dip, "scsi");
1157                 instance->unroll.scsictl = 0;
1158         }
1159 
1160         if (instance->unroll.devctl == 1) {
1161                 ddi_remove_minor_node(dip, "devctl");
1162                 instance->unroll.devctl = 0;
1163         }
1164 
1165         if (instance->unroll.tranSetup == 1) {
1166                 if (scsi_hba_detach(dip) != DDI_SUCCESS) {
1167                         dev_err(dip, CE_WARN, "failed to detach");
1168                         return;  /* DDI_FAILURE */
1169                 }
1170                 instance->unroll.tranSetup = 0;
1171                 con_log(CL_ANN1, (CE_CONT, "scsi_hba_dettach()  done."));
1172         }
1173 
1174         if (instance->unroll.tran == 1)       {
1175                 scsi_hba_tran_free(instance->tran);
1176                 instance->unroll.tran = 0;
1177                 con_log(CL_ANN1, (CE_CONT, "scsi_hba_tran_free()  done."));
1178         }
1179 
1180         if (instance->unroll.syncCmd == 1) {
1181                 if (instance->tbolt) {
1182                         if (abort_syncmap_cmd(instance,
1183                             instance->map_update_cmd)) {
1184                                 dev_err(dip, CE_WARN, "mrsas_detach: "
1185                                     "failed to abort previous syncmap command");
1186                         }
1187 
1188                         instance->unroll.syncCmd = 0;
1189                         con_log(CL_ANN1, (CE_CONT, "sync cmd aborted, done."));
1190                 }
1191         }
1192 
1193         if (instance->unroll.aenPend == 1) {
1194                 if (abort_aen_cmd(instance, instance->aen_cmd))
1195                         dev_err(dip, CE_WARN, "mrsas_detach: "
1196                             "failed to abort prevous AEN command");
1197 
1198                 instance->unroll.aenPend = 0;
1199                 con_log(CL_ANN1, (CE_CONT, "aen cmd aborted, done."));
1200                 /* This means the controller is fully initialized and running */
1201                 /* Shutdown should be a last command to controller. */
1202                 /* shutdown_controller(); */
1203         }
1204 
1205 
1206         if (instance->unroll.timer == 1)      {
1207                 if (instance->timeout_id != (timeout_id_t)-1) {
1208                         (void) untimeout(instance->timeout_id);
1209                         instance->timeout_id = (timeout_id_t)-1;
1210 
1211                         instance->unroll.timer = 0;
1212                 }
1213         }
1214 
1215         instance->func_ptr->disable_intr(instance);
1216 
1217 
1218         if (instance->unroll.mutexs == 1) {
1219                 mutex_destroy(&instance->cmd_pool_mtx);
1220                 mutex_destroy(&instance->app_cmd_pool_mtx);
1221                 mutex_destroy(&instance->cmd_pend_mtx);
1222                 mutex_destroy(&instance->completed_pool_mtx);
1223                 mutex_destroy(&instance->sync_map_mtx);
1224                 mutex_destroy(&instance->int_cmd_mtx);
1225                 cv_destroy(&instance->int_cmd_cv);
1226                 mutex_destroy(&instance->config_dev_mtx);
1227                 mutex_destroy(&instance->ocr_flags_mtx);
1228                 mutex_destroy(&instance->reg_write_mtx);
1229 
1230                 if (instance->tbolt) {
1231                         mutex_destroy(&instance->cmd_app_pool_mtx);
1232                         mutex_destroy(&instance->chip_mtx);
1233                 }
1234 
1235                 instance->unroll.mutexs = 0;
1236                 con_log(CL_ANN1, (CE_CONT, "Destroy mutex & cv,      done."));
1237         }
1238 
1239 
1240         if (instance->unroll.soft_isr == 1) {
1241                 ddi_remove_softintr(instance->soft_intr_id);
1242                 instance->unroll.soft_isr = 0;
1243         }
1244 
1245         if (instance->unroll.intr == 1) {
1246                 mrsas_rem_intrs(instance);
1247                 instance->unroll.intr = 0;
1248         }
1249 
1250 
1251         if (instance->unroll.taskq == 1)      {
1252                 if (instance->taskq) {
1253                         ddi_taskq_destroy(instance->taskq);
1254                         instance->unroll.taskq = 0;
1255                 }
1256 
1257         }
1258 
1259         /*
1260          * free dma memory allocated for
1261          * cmds/frames/queues/driver version etc
1262          */
1263         if (instance->unroll.verBuff == 1) {
1264                 (void) mrsas_free_dma_obj(instance, instance->drv_ver_dma_obj);
1265                 instance->unroll.verBuff = 0;
1266         }
1267 
1268         if (instance->unroll.pdlist_buff == 1)       {
1269                 if (instance->mr_tbolt_pd_list != NULL) {
1270                         kmem_free(instance->mr_tbolt_pd_list,
1271                             MRSAS_TBOLT_GET_PD_MAX(instance) *
1272                             sizeof (struct mrsas_tbolt_pd));
1273                 }
1274 
1275                 instance->mr_tbolt_pd_list = NULL;
1276                 instance->unroll.pdlist_buff = 0;
1277         }
1278 
1279         if (instance->unroll.ldlist_buff == 1)       {
1280                 if (instance->mr_ld_list != NULL) {
1281                         kmem_free(instance->mr_ld_list, MRDRV_MAX_LD
1282                             * sizeof (struct mrsas_ld));
1283                 }
1284 
1285                 instance->mr_ld_list = NULL;
1286                 instance->unroll.ldlist_buff = 0;
1287         }
1288 
1289         if (instance->tbolt) {
1290                 if (instance->unroll.alloc_space_mpi2 == 1) {
1291                         free_space_for_mpi2(instance);
1292                         instance->unroll.alloc_space_mpi2 = 0;
1293                 }
1294         } else {
1295                 if (instance->unroll.alloc_space_mfi == 1) {
1296                         free_space_for_mfi(instance);
1297                         instance->unroll.alloc_space_mfi = 0;
1298                 }
1299         }
1300 
1301         if (instance->unroll.regs == 1)       {
1302                 ddi_regs_map_free(&instance->regmap_handle);
1303                 instance->unroll.regs = 0;
1304                 con_log(CL_ANN1, (CE_CONT, "ddi_regs_map_free()  done."));
1305         }
1306 }
1307 
1308 
1309 
1310 /*
1311  * ************************************************************************** *
1312  *                                                                            *
1313  *             common entry points - for character driver types               *
1314  *                                                                            *
1315  * ************************************************************************** *
1316  */
1317 /*
1318  * open - gets access to a device
1319  * @dev:
1320  * @openflags:
1321  * @otyp:
1322  * @credp:
1323  *
1324  * Access to a device by one or more application programs is controlled
1325  * through the open() and close() entry points. The primary function of
1326  * open() is to verify that the open request is allowed.
1327  */
1328 static  int
1329 mrsas_open(dev_t *dev, int openflags, int otyp, cred_t *credp)
1330 {
1331         int     rval = 0;
1332 
1333         con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1334 
1335         /* Check root permissions */
1336         if (drv_priv(credp) != 0) {
1337                 con_log(CL_ANN, (CE_WARN,
1338                     "mr_sas: Non-root ioctl access denied!"));
1339                 return (EPERM);
1340         }
1341 
1342         /* Verify we are being opened as a character device */
1343         if (otyp != OTYP_CHR) {
1344                 con_log(CL_ANN, (CE_WARN,
1345                     "mr_sas: ioctl node must be a char node"));
1346                 return (EINVAL);
1347         }
1348 
1349         if (ddi_get_soft_state(mrsas_state, MINOR2INST(getminor(*dev)))
1350             == NULL) {
1351                 return (ENXIO);
1352         }
1353 
1354         if (scsi_hba_open) {
1355                 rval = scsi_hba_open(dev, openflags, otyp, credp);
1356         }
1357 
1358         return (rval);
1359 }
1360 
1361 /*
1362  * close - gives up access to a device
1363  * @dev:
1364  * @openflags:
1365  * @otyp:
1366  * @credp:
1367  *
1368  * close() should perform any cleanup necessary to finish using the minor
1369  * device, and prepare the device (and driver) to be opened again.
1370  */
1371 static  int
1372 mrsas_close(dev_t dev, int openflags, int otyp, cred_t *credp)
1373 {
1374         int     rval = 0;
1375 
1376         con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1377 
1378         /* no need for locks! */
1379 
1380         if (scsi_hba_close) {
1381                 rval = scsi_hba_close(dev, openflags, otyp, credp);
1382         }
1383 
1384         return (rval);
1385 }
1386 
1387 /*
1388  * ioctl - performs a range of I/O commands for character drivers
1389  * @dev:
1390  * @cmd:
1391  * @arg:
1392  * @mode:
1393  * @credp:
1394  * @rvalp:
1395  *
1396  * ioctl() routine must make sure that user data is copied into or out of the
1397  * kernel address space explicitly using copyin(), copyout(), ddi_copyin(),
1398  * and ddi_copyout(), as appropriate.
1399  * This is a wrapper routine to serialize access to the actual ioctl routine.
1400  * ioctl() should return 0 on success, or the appropriate error number. The
1401  * driver may also set the value returned to the calling process through rvalp.
1402  */
1403 
1404 static int
1405 mrsas_ioctl(dev_t dev, int cmd, intptr_t arg, int mode, cred_t *credp,
1406     int *rvalp)
1407 {
1408         int     rval = 0;
1409 
1410         struct mrsas_instance   *instance;
1411         struct mrsas_ioctl      *ioctl;
1412         struct mrsas_aen        aen;
1413         con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1414 
1415         instance = ddi_get_soft_state(mrsas_state, MINOR2INST(getminor(dev)));
1416 
1417         if (instance == NULL) {
1418                 /* invalid minor number */
1419                 con_log(CL_ANN, (CE_WARN, "mr_sas: adapter not found."));
1420                 return (ENXIO);
1421         }
1422 
1423         ioctl = (struct mrsas_ioctl *)kmem_zalloc(sizeof (struct mrsas_ioctl),
1424             KM_SLEEP);
1425         ASSERT(ioctl);
1426 
1427         switch ((uint_t)cmd) {
1428                 case MRSAS_IOCTL_FIRMWARE:
1429                         if (ddi_copyin((void *)arg, ioctl,
1430                             sizeof (struct mrsas_ioctl), mode)) {
1431                                 con_log(CL_ANN, (CE_WARN, "mrsas_ioctl: "
1432                                     "ERROR IOCTL copyin"));
1433                                 kmem_free(ioctl, sizeof (struct mrsas_ioctl));
1434                                 return (EFAULT);
1435                         }
1436 
1437                         if (ioctl->control_code == MRSAS_DRIVER_IOCTL_COMMON) {
1438                                 rval = handle_drv_ioctl(instance, ioctl, mode);
1439                         } else {
1440                                 rval = handle_mfi_ioctl(instance, ioctl, mode);
1441                         }
1442 
1443                         if (ddi_copyout((void *)ioctl, (void *)arg,
1444                             (sizeof (struct mrsas_ioctl) - 1), mode)) {
1445                                 con_log(CL_ANN, (CE_WARN,
1446                                     "mrsas_ioctl: copy_to_user failed"));
1447                                 rval = 1;
1448                         }
1449 
1450                         break;
1451                 case MRSAS_IOCTL_AEN:
1452                         if (ddi_copyin((void *) arg, &aen,
1453                             sizeof (struct mrsas_aen), mode)) {
1454                                 con_log(CL_ANN, (CE_WARN,
1455                                     "mrsas_ioctl: ERROR AEN copyin"));
1456                                 kmem_free(ioctl, sizeof (struct mrsas_ioctl));
1457                                 return (EFAULT);
1458                         }
1459 
1460                         rval = handle_mfi_aen(instance, &aen);
1461 
1462                         if (ddi_copyout((void *) &aen, (void *)arg,
1463                             sizeof (struct mrsas_aen), mode)) {
1464                                 con_log(CL_ANN, (CE_WARN,
1465                                     "mrsas_ioctl: copy_to_user failed"));
1466                                 rval = 1;
1467                         }
1468 
1469                         break;
1470                 default:
1471                         rval = scsi_hba_ioctl(dev, cmd, arg,
1472                             mode, credp, rvalp);
1473 
1474                         con_log(CL_DLEVEL1, (CE_NOTE, "mrsas_ioctl: "
1475                             "scsi_hba_ioctl called, ret = %x.", rval));
1476         }
1477 
1478         kmem_free(ioctl, sizeof (struct mrsas_ioctl));
1479         return (rval);
1480 }
1481 
1482 /*
1483  * ************************************************************************** *
1484  *                                                                            *
1485  *               common entry points - for block driver types                 *
1486  *                                                                            *
1487  * ************************************************************************** *
1488  */
1489 #ifdef __sparc
1490 /*
1491  * reset - TBD
1492  * @dip:
1493  * @cmd:
1494  *
1495  * TBD
1496  */
1497 /*ARGSUSED*/
1498 static int
1499 mrsas_reset(dev_info_t *dip, ddi_reset_cmd_t cmd)
1500 {
1501         int     instance_no;
1502 
1503         struct mrsas_instance   *instance;
1504 
1505         instance_no = ddi_get_instance(dip);
1506         instance = (struct mrsas_instance *)ddi_get_soft_state
1507             (mrsas_state, instance_no);
1508 
1509         con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1510 
1511         if (!instance) {
1512                 con_log(CL_ANN, (CE_WARN, "mr_sas:%d could not get adapter "
1513                     "in reset", instance_no));
1514                 return (DDI_FAILURE);
1515         }
1516 
1517         instance->func_ptr->disable_intr(instance);
1518 
1519         con_log(CL_ANN1, (CE_CONT, "flushing cache for instance %d",
1520             instance_no));
1521 
1522         flush_cache(instance);
1523 
1524         return (DDI_SUCCESS);
1525 }
1526 #else /* __sparc */
1527 /*ARGSUSED*/
1528 static int
1529 mrsas_quiesce(dev_info_t *dip)
1530 {
1531         int     instance_no;
1532 
1533         struct mrsas_instance   *instance;
1534 
1535         instance_no = ddi_get_instance(dip);
1536         instance = (struct mrsas_instance *)ddi_get_soft_state
1537             (mrsas_state, instance_no);
1538 
1539         con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1540 
1541         if (!instance) {
1542                 con_log(CL_ANN1, (CE_WARN, "mr_sas:%d could not get adapter "
1543                     "in quiesce", instance_no));
1544                 return (DDI_FAILURE);
1545         }
1546         if (instance->deadadapter || instance->adapterresetinprogress) {
1547                 con_log(CL_ANN1, (CE_WARN, "mr_sas:%d adapter is not in "
1548                     "healthy state", instance_no));
1549                 return (DDI_FAILURE);
1550         }
1551 
1552         if (abort_aen_cmd(instance, instance->aen_cmd)) {
1553                 con_log(CL_ANN1, (CE_WARN, "mrsas_quiesce: "
1554                     "failed to abort prevous AEN command QUIESCE"));
1555         }
1556 
1557         if (instance->tbolt) {
1558                 if (abort_syncmap_cmd(instance,
1559                     instance->map_update_cmd)) {
1560                         dev_err(dip, CE_WARN,
1561                             "mrsas_detach: failed to abort "
1562                             "previous syncmap command");
1563                         return (DDI_FAILURE);
1564                 }
1565         }
1566 
1567         instance->func_ptr->disable_intr(instance);
1568 
1569         con_log(CL_ANN1, (CE_CONT, "flushing cache for instance %d",
1570             instance_no));
1571 
1572         flush_cache(instance);
1573 
1574         if (wait_for_outstanding(instance)) {
1575                 con_log(CL_ANN1,
1576                     (CE_CONT, "wait_for_outstanding: return FAIL.\n"));
1577                 return (DDI_FAILURE);
1578         }
1579         return (DDI_SUCCESS);
1580 }
1581 #endif  /* __sparc */
1582 
1583 /*
1584  * ************************************************************************** *
1585  *                                                                            *
1586  *                          entry points (SCSI HBA)                           *
1587  *                                                                            *
1588  * ************************************************************************** *
1589  */
1590 /*
1591  * tran_tgt_init - initialize a target device instance
1592  * @hba_dip:
1593  * @tgt_dip:
1594  * @tran:
1595  * @sd:
1596  *
1597  * The tran_tgt_init() entry point enables the HBA to allocate and initialize
1598  * any per-target resources. tran_tgt_init() also enables the HBA to qualify
1599  * the device's address as valid and supportable for that particular HBA.
1600  * By returning DDI_FAILURE, the instance of the target driver for that device
1601  * is not probed or attached.
1602  */
1603 /*ARGSUSED*/
1604 static int
1605 mrsas_tran_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
1606     scsi_hba_tran_t *tran, struct scsi_device *sd)
1607 {
1608         struct mrsas_instance *instance;
1609         uint16_t tgt = sd->sd_address.a_target;
1610         uint8_t lun = sd->sd_address.a_lun;
1611         dev_info_t *child = NULL;
1612 
1613         con_log(CL_DLEVEL2, (CE_NOTE, "mrsas_tgt_init target %d lun %d",
1614             tgt, lun));
1615 
1616         instance = ADDR2MR(&sd->sd_address);
1617 
1618         if (ndi_dev_is_persistent_node(tgt_dip) == 0) {
1619                 /*
1620                  * If no persistent node exists, we don't allow .conf node
1621                  * to be created.
1622                  */
1623                 if ((child = mrsas_find_child(instance, tgt, lun)) != NULL) {
1624                         con_log(CL_DLEVEL2,
1625                             (CE_NOTE, "mrsas_tgt_init find child ="
1626                             " %p t = %d l = %d", (void *)child, tgt, lun));
1627                         if (ndi_merge_node(tgt_dip, mrsas_name_node) !=
1628                             DDI_SUCCESS)
1629                                 /* Create this .conf node */
1630                                 return (DDI_SUCCESS);
1631                 }
1632                 con_log(CL_DLEVEL2, (CE_NOTE, "mrsas_tgt_init in ndi_per "
1633                     "DDI_FAILURE t = %d l = %d", tgt, lun));
1634                 return (DDI_FAILURE);
1635 
1636         }
1637 
1638         con_log(CL_DLEVEL2, (CE_NOTE, "mrsas_tgt_init dev_dip %p tgt_dip %p",
1639             (void *)instance->mr_ld_list[tgt].dip, (void *)tgt_dip));
1640 
1641         if (tgt < MRDRV_MAX_LD && lun == 0) {
1642                 if (instance->mr_ld_list[tgt].dip == NULL &&
1643                     strcmp(ddi_driver_name(sd->sd_dev), "sd") == 0) {
1644                         mutex_enter(&instance->config_dev_mtx);
1645                         instance->mr_ld_list[tgt].dip = tgt_dip;
1646                         instance->mr_ld_list[tgt].lun_type = MRSAS_LD_LUN;
1647                         instance->mr_ld_list[tgt].flag = MRDRV_TGT_VALID;
1648                         mutex_exit(&instance->config_dev_mtx);
1649                 }
1650         } else if (instance->tbolt || instance->skinny) {
1651                 if (instance->mr_tbolt_pd_list[tgt].dip == NULL) {
1652                         mutex_enter(&instance->config_dev_mtx);
1653                         instance->mr_tbolt_pd_list[tgt].dip = tgt_dip;
1654                         instance->mr_tbolt_pd_list[tgt].flag =
1655                             MRDRV_TGT_VALID;
1656                         mutex_exit(&instance->config_dev_mtx);
1657                         con_log(CL_ANN1, (CE_NOTE, "mrsas_tran_tgt_init:"
1658                             "t%xl%x", tgt, lun));
1659                 }
1660         }
1661 
1662         return (DDI_SUCCESS);
1663 }
1664 
1665 /*ARGSUSED*/
1666 static void
1667 mrsas_tran_tgt_free(dev_info_t *hba_dip, dev_info_t *tgt_dip,
1668     scsi_hba_tran_t *hba_tran, struct scsi_device *sd)
1669 {
1670         struct mrsas_instance *instance;
1671         int tgt = sd->sd_address.a_target;
1672         int lun = sd->sd_address.a_lun;
1673 
1674         instance = ADDR2MR(&sd->sd_address);
1675 
1676         con_log(CL_DLEVEL2, (CE_NOTE, "tgt_free t = %d l = %d", tgt, lun));
1677 
1678         if (tgt < MRDRV_MAX_LD && lun == 0) {
1679                 if (instance->mr_ld_list[tgt].dip == tgt_dip) {
1680                         mutex_enter(&instance->config_dev_mtx);
1681                         instance->mr_ld_list[tgt].dip = NULL;
1682                         mutex_exit(&instance->config_dev_mtx);
1683                 }
1684         } else if (instance->tbolt || instance->skinny) {
1685                 mutex_enter(&instance->config_dev_mtx);
1686                 instance->mr_tbolt_pd_list[tgt].dip = NULL;
1687                 mutex_exit(&instance->config_dev_mtx);
1688                 con_log(CL_ANN1, (CE_NOTE, "tgt_free: Setting dip = NULL"
1689                     "for tgt:%x", tgt));
1690         }
1691 }
1692 
1693 dev_info_t *
1694 mrsas_find_child(struct mrsas_instance *instance, uint16_t tgt, uint8_t lun)
1695 {
1696         dev_info_t *child = NULL;
1697         char addr[SCSI_MAXNAMELEN];
1698         char tmp[MAXNAMELEN];
1699 
1700         (void) snprintf(addr, sizeof (addr), "%x,%x", tgt, lun);
1701         for (child = ddi_get_child(instance->dip); child;
1702             child = ddi_get_next_sibling(child)) {
1703 
1704                 if (ndi_dev_is_persistent_node(child) == 0) {
1705                         continue;
1706                 }
1707 
1708                 if (mrsas_name_node(child, tmp, MAXNAMELEN) !=
1709                     DDI_SUCCESS) {
1710                         continue;
1711                 }
1712 
1713                 if (strcmp(addr, tmp) == 0) {
1714                         break;
1715                 }
1716         }
1717         con_log(CL_DLEVEL2, (CE_NOTE, "mrsas_find_child: return child = %p",
1718             (void *)child));
1719         return (child);
1720 }
1721 
1722 /*
1723  * mrsas_name_node -
1724  * @dip:
1725  * @name:
1726  * @len:
1727  */
1728 static int
1729 mrsas_name_node(dev_info_t *dip, char *name, int len)
1730 {
1731         int tgt, lun;
1732 
1733         tgt = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
1734             DDI_PROP_DONTPASS, "target", -1);
1735         con_log(CL_DLEVEL2, (CE_NOTE,
1736             "mrsas_name_node: dip %p tgt %d", (void *)dip, tgt));
1737         if (tgt == -1) {
1738                 return (DDI_FAILURE);
1739         }
1740         lun = ddi_prop_get_int(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
1741             "lun", -1);
1742         con_log(CL_DLEVEL2,
1743             (CE_NOTE, "mrsas_name_node: tgt %d lun %d", tgt, lun));
1744         if (lun == -1) {
1745                 return (DDI_FAILURE);
1746         }
1747         (void) snprintf(name, len, "%x,%x", tgt, lun);
1748         return (DDI_SUCCESS);
1749 }
1750 
1751 /*
1752  * tran_init_pkt - allocate & initialize a scsi_pkt structure
1753  * @ap:
1754  * @pkt:
1755  * @bp:
1756  * @cmdlen:
1757  * @statuslen:
1758  * @tgtlen:
1759  * @flags:
1760  * @callback:
1761  *
1762  * The tran_init_pkt() entry point allocates and initializes a scsi_pkt
1763  * structure and DMA resources for a target driver request. The
1764  * tran_init_pkt() entry point is called when the target driver calls the
1765  * SCSA function scsi_init_pkt(). Each call of the tran_init_pkt() entry point
1766  * is a request to perform one or more of three possible services:
1767  *  - allocation and initialization of a scsi_pkt structure
1768  *  - allocation of DMA resources for data transfer
1769  *  - reallocation of DMA resources for the next portion of the data transfer
1770  */
1771 static struct scsi_pkt *
1772 mrsas_tran_init_pkt(struct scsi_address *ap, register struct scsi_pkt *pkt,
1773     struct buf *bp, int cmdlen, int statuslen, int tgtlen,
1774     int flags, int (*callback)(), caddr_t arg)
1775 {
1776         struct scsa_cmd *acmd;
1777         struct mrsas_instance   *instance;
1778         struct scsi_pkt *new_pkt;
1779 
1780         con_log(CL_DLEVEL1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1781 
1782         instance = ADDR2MR(ap);
1783 
1784         /* step #1 : pkt allocation */
1785         if (pkt == NULL) {
1786                 pkt = scsi_hba_pkt_alloc(instance->dip, ap, cmdlen, statuslen,
1787                     tgtlen, sizeof (struct scsa_cmd), callback, arg);
1788                 if (pkt == NULL) {
1789                         return (NULL);
1790                 }
1791 
1792                 acmd = PKT2CMD(pkt);
1793 
1794                 /*
1795                  * Initialize the new pkt - we redundantly initialize
1796                  * all the fields for illustrative purposes.
1797                  */
1798                 acmd->cmd_pkt                = pkt;
1799                 acmd->cmd_flags              = 0;
1800                 acmd->cmd_scblen     = statuslen;
1801                 acmd->cmd_cdblen     = cmdlen;
1802                 acmd->cmd_dmahandle  = NULL;
1803                 acmd->cmd_ncookies   = 0;
1804                 acmd->cmd_cookie     = 0;
1805                 acmd->cmd_cookiecnt  = 0;
1806                 acmd->cmd_nwin               = 0;
1807 
1808                 pkt->pkt_address     = *ap;
1809                 pkt->pkt_comp                = (void (*)())NULL;
1810                 pkt->pkt_flags               = 0;
1811                 pkt->pkt_time                = 0;
1812                 pkt->pkt_resid               = 0;
1813                 pkt->pkt_state               = 0;
1814                 pkt->pkt_statistics  = 0;
1815                 pkt->pkt_reason              = 0;
1816                 new_pkt                 = pkt;
1817         } else {
1818                 acmd = PKT2CMD(pkt);
1819                 new_pkt = NULL;
1820         }
1821 
1822         /* step #2 : dma allocation/move */
1823         if (bp && bp->b_bcount != 0) {
1824                 if (acmd->cmd_dmahandle == NULL) {
1825                         if (mrsas_dma_alloc(instance, pkt, bp, flags,
1826                             callback) == DDI_FAILURE) {
1827                                 if (new_pkt) {
1828                                         scsi_hba_pkt_free(ap, new_pkt);
1829                                 }
1830                                 return ((struct scsi_pkt *)NULL);
1831                         }
1832                 } else {
1833                         if (mrsas_dma_move(instance, pkt, bp) == DDI_FAILURE) {
1834                                 return ((struct scsi_pkt *)NULL);
1835                         }
1836                 }
1837         }
1838 
1839         return (pkt);
1840 }
1841 
1842 /*
1843  * tran_start - transport a SCSI command to the addressed target
1844  * @ap:
1845  * @pkt:
1846  *
1847  * The tran_start() entry point for a SCSI HBA driver is called to transport a
1848  * SCSI command to the addressed target. The SCSI command is described
1849  * entirely within the scsi_pkt structure, which the target driver allocated
1850  * through the HBA driver's tran_init_pkt() entry point. If the command
1851  * involves a data transfer, DMA resources must also have been allocated for
1852  * the scsi_pkt structure.
1853  *
1854  * Return Values :
1855  *      TRAN_BUSY - request queue is full, no more free scbs
1856  *      TRAN_ACCEPT - pkt has been submitted to the instance
1857  */
1858 static int
1859 mrsas_tran_start(struct scsi_address *ap, register struct scsi_pkt *pkt)
1860 {
1861         uchar_t         cmd_done = 0;
1862 
1863         struct mrsas_instance   *instance = ADDR2MR(ap);
1864         struct mrsas_cmd        *cmd;
1865 
1866         con_log(CL_DLEVEL1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1867         if (instance->deadadapter == 1) {
1868                 con_log(CL_ANN1, (CE_WARN,
1869                     "mrsas_tran_start: return TRAN_FATAL_ERROR "
1870                     "for IO, as the HBA doesnt take any more IOs"));
1871                 if (pkt) {
1872                         pkt->pkt_reason              = CMD_DEV_GONE;
1873                         pkt->pkt_statistics  = STAT_DISCON;
1874                 }
1875                 return (TRAN_FATAL_ERROR);
1876         }
1877 
1878         if (instance->adapterresetinprogress) {
1879                 con_log(CL_ANN1, (CE_NOTE, "mrsas_tran_start: Reset flag set, "
1880                     "returning mfi_pkt and setting TRAN_BUSY\n"));
1881                 return (TRAN_BUSY);
1882         }
1883 
1884         con_log(CL_ANN1, (CE_CONT, "chkpnt:%s:%d:SCSI CDB[0]=0x%x time:%x",
1885             __func__, __LINE__, pkt->pkt_cdbp[0], pkt->pkt_time));
1886 
1887         pkt->pkt_reason      = CMD_CMPLT;
1888         *pkt->pkt_scbp = STATUS_GOOD; /* clear arq scsi_status */
1889 
1890         cmd = build_cmd(instance, ap, pkt, &cmd_done);
1891 
1892         /*
1893          * Check if the command is already completed by the mrsas_build_cmd()
1894          * routine. In which case the busy_flag would be clear and scb will be
1895          * NULL and appropriate reason provided in pkt_reason field
1896          */
1897         if (cmd_done) {
1898                 pkt->pkt_reason = CMD_CMPLT;
1899                 pkt->pkt_scbp[0] = STATUS_GOOD;
1900                 pkt->pkt_state |= STATE_GOT_BUS | STATE_GOT_TARGET
1901                     | STATE_SENT_CMD;
1902                 if (((pkt->pkt_flags & FLAG_NOINTR) == 0) && pkt->pkt_comp) {
1903                         (*pkt->pkt_comp)(pkt);
1904                 }
1905 
1906                 return (TRAN_ACCEPT);
1907         }
1908 
1909         if (cmd == NULL) {
1910                 return (TRAN_BUSY);
1911         }
1912 
1913         if ((pkt->pkt_flags & FLAG_NOINTR) == 0) {
1914                 if (instance->fw_outstanding > instance->max_fw_cmds) {
1915                         con_log(CL_ANN, (CE_CONT, "mr_sas:Firmware busy"));
1916                         DTRACE_PROBE2(start_tran_err,
1917                             uint16_t, instance->fw_outstanding,
1918                             uint16_t, instance->max_fw_cmds);
1919                         mrsas_return_mfi_pkt(instance, cmd);
1920                         return (TRAN_BUSY);
1921                 }
1922 
1923                 /* Synchronize the Cmd frame for the controller */
1924                 (void) ddi_dma_sync(cmd->frame_dma_obj.dma_handle, 0, 0,
1925                     DDI_DMA_SYNC_FORDEV);
1926                 con_log(CL_ANN, (CE_CONT, "issue_cmd_ppc: SCSI CDB[0]=0x%x"
1927                     "cmd->index:%x\n", pkt->pkt_cdbp[0], cmd->index));
1928                 instance->func_ptr->issue_cmd(cmd, instance);
1929 
1930         } else {
1931                 struct mrsas_header *hdr = &cmd->frame->hdr;
1932 
1933                 instance->func_ptr->issue_cmd_in_poll_mode(instance, cmd);
1934 
1935                 pkt->pkt_reason              = CMD_CMPLT;
1936                 pkt->pkt_statistics  = 0;
1937                 pkt->pkt_state |= STATE_XFERRED_DATA | STATE_GOT_STATUS;
1938 
1939                 switch (ddi_get8(cmd->frame_dma_obj.acc_handle,
1940                     &hdr->cmd_status)) {
1941                 case MFI_STAT_OK:
1942                         pkt->pkt_scbp[0] = STATUS_GOOD;
1943                         break;
1944 
1945                 case MFI_STAT_SCSI_DONE_WITH_ERROR:
1946                         con_log(CL_ANN, (CE_CONT,
1947                             "mrsas_tran_start: scsi done with error"));
1948                         pkt->pkt_reason      = CMD_CMPLT;
1949                         pkt->pkt_statistics = 0;
1950 
1951                         ((struct scsi_status *)pkt->pkt_scbp)->sts_chk = 1;
1952                         break;
1953 
1954                 case MFI_STAT_DEVICE_NOT_FOUND:
1955                         con_log(CL_ANN, (CE_CONT,
1956                             "mrsas_tran_start: device not found error"));
1957                         pkt->pkt_reason              = CMD_DEV_GONE;
1958                         pkt->pkt_statistics  = STAT_DISCON;
1959                         break;
1960 
1961                 default:
1962                         ((struct scsi_status *)pkt->pkt_scbp)->sts_busy = 1;
1963                 }
1964 
1965                 (void) mrsas_common_check(instance, cmd);
1966                 DTRACE_PROBE2(start_nointr_done, uint8_t, hdr->cmd,
1967                     uint8_t, hdr->cmd_status);
1968                 mrsas_return_mfi_pkt(instance, cmd);
1969 
1970                 if (pkt->pkt_comp) {
1971                         (*pkt->pkt_comp)(pkt);
1972                 }
1973 
1974         }
1975 
1976         return (TRAN_ACCEPT);
1977 }
1978 
1979 /*
1980  * tran_abort - Abort any commands that are currently in transport
1981  * @ap:
1982  * @pkt:
1983  *
1984  * The tran_abort() entry point for a SCSI HBA driver is called to abort any
1985  * commands that are currently in transport for a particular target. This entry
1986  * point is called when a target driver calls scsi_abort(). The tran_abort()
1987  * entry point should attempt to abort the command denoted by the pkt
1988  * parameter. If the pkt parameter is NULL, tran_abort() should attempt to
1989  * abort all outstanding commands in the transport layer for the particular
1990  * target or logical unit.
1991  */
1992 /*ARGSUSED*/
1993 static int
1994 mrsas_tran_abort(struct scsi_address *ap, struct scsi_pkt *pkt)
1995 {
1996         con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1997 
1998         /* abort command not supported by H/W */
1999 
2000         return (DDI_FAILURE);
2001 }
2002 
2003 /*
2004  * tran_reset - reset either the SCSI bus or target
2005  * @ap:
2006  * @level:
2007  *
2008  * The tran_reset() entry point for a SCSI HBA driver is called to reset either
2009  * the SCSI bus or a particular SCSI target device. This entry point is called
2010  * when a target driver calls scsi_reset(). The tran_reset() entry point must
2011  * reset the SCSI bus if level is RESET_ALL. If level is RESET_TARGET, just the
2012  * particular target or logical unit must be reset.
2013  */
2014 /*ARGSUSED*/
2015 static int
2016 mrsas_tran_reset(struct scsi_address *ap, int level)
2017 {
2018         struct mrsas_instance *instance = ADDR2MR(ap);
2019 
2020         con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
2021 
2022         if (wait_for_outstanding(instance)) {
2023                 con_log(CL_ANN1,
2024                     (CE_CONT, "wait_for_outstanding: return FAIL.\n"));
2025                 return (DDI_FAILURE);
2026         } else {
2027                 return (DDI_SUCCESS);
2028         }
2029 }
2030 
2031 /*
2032  * tran_getcap - get one of a set of SCSA-defined capabilities
2033  * @ap:
2034  * @cap:
2035  * @whom:
2036  *
2037  * The target driver can request the current setting of the capability for a
2038  * particular target by setting the whom parameter to nonzero. A whom value of
2039  * zero indicates a request for the current setting of the general capability
2040  * for the SCSI bus or for adapter hardware. The tran_getcap() should return -1
2041  * for undefined capabilities or the current value of the requested capability.
2042  */
2043 /*ARGSUSED*/
2044 static int
2045 mrsas_tran_getcap(struct scsi_address *ap, char *cap, int whom)
2046 {
2047         int     rval = 0;
2048 
2049         struct mrsas_instance   *instance = ADDR2MR(ap);
2050 
2051         con_log(CL_DLEVEL2, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
2052 
2053         /* we do allow inquiring about capabilities for other targets */
2054         if (cap == NULL) {
2055                 return (-1);
2056         }
2057 
2058         switch (scsi_hba_lookup_capstr(cap)) {
2059         case SCSI_CAP_DMA_MAX:
2060                 if (instance->tbolt) {
2061                         /* Limit to 256k max transfer */
2062                         rval = mrsas_tbolt_max_cap_maxxfer;
2063                 } else {
2064                         /* Limit to 16MB max transfer */
2065                         rval = mrsas_max_cap_maxxfer;
2066                 }
2067                 break;
2068         case SCSI_CAP_MSG_OUT:
2069                 rval = 1;
2070                 break;
2071         case SCSI_CAP_DISCONNECT:
2072                 rval = 0;
2073                 break;
2074         case SCSI_CAP_SYNCHRONOUS:
2075                 rval = 0;
2076                 break;
2077         case SCSI_CAP_WIDE_XFER:
2078                 rval = 1;
2079                 break;
2080         case SCSI_CAP_TAGGED_QING:
2081                 rval = 1;
2082                 break;
2083         case SCSI_CAP_UNTAGGED_QING:
2084                 rval = 1;
2085                 break;
2086         case SCSI_CAP_PARITY:
2087                 rval = 1;
2088                 break;
2089         case SCSI_CAP_INITIATOR_ID:
2090                 rval = instance->init_id;
2091                 break;
2092         case SCSI_CAP_ARQ:
2093                 rval = 1;
2094                 break;
2095         case SCSI_CAP_LINKED_CMDS:
2096                 rval = 0;
2097                 break;
2098         case SCSI_CAP_RESET_NOTIFICATION:
2099                 rval = 1;
2100                 break;
2101         case SCSI_CAP_GEOMETRY:
2102                 rval = -1;
2103 
2104                 break;
2105         default:
2106                 con_log(CL_DLEVEL2, (CE_NOTE, "Default cap coming 0x%x",
2107                     scsi_hba_lookup_capstr(cap)));
2108                 rval = -1;
2109                 break;
2110         }
2111 
2112         return (rval);
2113 }
2114 
2115 /*
2116  * tran_setcap - set one of a set of SCSA-defined capabilities
2117  * @ap:
2118  * @cap:
2119  * @value:
2120  * @whom:
2121  *
2122  * The target driver might request that the new value be set for a particular
2123  * target by setting the whom parameter to nonzero. A whom value of zero
2124  * means that request is to set the new value for the SCSI bus or for adapter
2125  * hardware in general.
2126  * The tran_setcap() should return the following values as appropriate:
2127  * - -1 for undefined capabilities
2128  * - 0 if the HBA driver cannot set the capability to the requested value
2129  * - 1 if the HBA driver is able to set the capability to the requested value
2130  */
2131 /*ARGSUSED*/
2132 static int
2133 mrsas_tran_setcap(struct scsi_address *ap, char *cap, int value, int whom)
2134 {
2135         int             rval = 1;
2136 
2137         con_log(CL_DLEVEL2, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
2138 
2139         /* We don't allow setting capabilities for other targets */
2140         if (cap == NULL || whom == 0) {
2141                 return (-1);
2142         }
2143 
2144         switch (scsi_hba_lookup_capstr(cap)) {
2145                 case SCSI_CAP_DMA_MAX:
2146                 case SCSI_CAP_MSG_OUT:
2147                 case SCSI_CAP_PARITY:
2148                 case SCSI_CAP_LINKED_CMDS:
2149                 case SCSI_CAP_RESET_NOTIFICATION:
2150                 case SCSI_CAP_DISCONNECT:
2151                 case SCSI_CAP_SYNCHRONOUS:
2152                 case SCSI_CAP_UNTAGGED_QING:
2153                 case SCSI_CAP_WIDE_XFER:
2154                 case SCSI_CAP_INITIATOR_ID:
2155                 case SCSI_CAP_ARQ:
2156                         /*
2157                          * None of these are settable via
2158                          * the capability interface.
2159                          */
2160                         break;
2161                 case SCSI_CAP_TAGGED_QING:
2162                         rval = 1;
2163                         break;
2164                 case SCSI_CAP_SECTOR_SIZE:
2165                         rval = 1;
2166                         break;
2167 
2168                 case SCSI_CAP_TOTAL_SECTORS:
2169                         rval = 1;
2170                         break;
2171                 default:
2172                         rval = -1;
2173                         break;
2174         }
2175 
2176         return (rval);
2177 }
2178 
2179 /*
2180  * tran_destroy_pkt - deallocate scsi_pkt structure
2181  * @ap:
2182  * @pkt:
2183  *
2184  * The tran_destroy_pkt() entry point is the HBA driver function that
2185  * deallocates scsi_pkt structures. The tran_destroy_pkt() entry point is
2186  * called when the target driver calls scsi_destroy_pkt(). The
2187  * tran_destroy_pkt() entry point must free any DMA resources that have been
2188  * allocated for the packet. An implicit DMA synchronization occurs if the
2189  * DMA resources are freed and any cached data remains after the completion
2190  * of the transfer.
2191  */
2192 static void
2193 mrsas_tran_destroy_pkt(struct scsi_address *ap, struct scsi_pkt *pkt)
2194 {
2195         struct scsa_cmd *acmd = PKT2CMD(pkt);
2196 
2197         con_log(CL_DLEVEL2, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
2198 
2199         if (acmd->cmd_flags & CFLAG_DMAVALID) {
2200                 acmd->cmd_flags &= ~CFLAG_DMAVALID;
2201 
2202                 (void) ddi_dma_unbind_handle(acmd->cmd_dmahandle);
2203 
2204                 ddi_dma_free_handle(&acmd->cmd_dmahandle);
2205 
2206                 acmd->cmd_dmahandle = NULL;
2207         }
2208 
2209         /* free the pkt */
2210         scsi_hba_pkt_free(ap, pkt);
2211 }
2212 
2213 /*
2214  * tran_dmafree - deallocates DMA resources
2215  * @ap:
2216  * @pkt:
2217  *
2218  * The tran_dmafree() entry point deallocates DMAQ resources that have been
2219  * allocated for a scsi_pkt structure. The tran_dmafree() entry point is
2220  * called when the target driver calls scsi_dmafree(). The tran_dmafree() must
2221  * free only DMA resources allocated for a scsi_pkt structure, not the
2222  * scsi_pkt itself. When DMA resources are freed, a DMA synchronization is
2223  * implicitly performed.
2224  */
2225 /*ARGSUSED*/
2226 static void
2227 mrsas_tran_dmafree(struct scsi_address *ap, struct scsi_pkt *pkt)
2228 {
2229         register struct scsa_cmd *acmd = PKT2CMD(pkt);
2230 
2231         con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
2232 
2233         if (acmd->cmd_flags & CFLAG_DMAVALID) {
2234                 acmd->cmd_flags &= ~CFLAG_DMAVALID;
2235 
2236                 (void) ddi_dma_unbind_handle(acmd->cmd_dmahandle);
2237 
2238                 ddi_dma_free_handle(&acmd->cmd_dmahandle);
2239 
2240                 acmd->cmd_dmahandle = NULL;
2241         }
2242 }
2243 
2244 /*
2245  * tran_sync_pkt - synchronize the DMA object allocated
2246  * @ap:
2247  * @pkt:
2248  *
2249  * The tran_sync_pkt() entry point synchronizes the DMA object allocated for
2250  * the scsi_pkt structure before or after a DMA transfer. The tran_sync_pkt()
2251  * entry point is called when the target driver calls scsi_sync_pkt(). If the
2252  * data transfer direction is a DMA read from device to memory, tran_sync_pkt()
2253  * must synchronize the CPU's view of the data. If the data transfer direction
2254  * is a DMA write from memory to device, tran_sync_pkt() must synchronize the
2255  * device's view of the data.
2256  */
2257 /*ARGSUSED*/
2258 static void
2259 mrsas_tran_sync_pkt(struct scsi_address *ap, struct scsi_pkt *pkt)
2260 {
2261         register struct scsa_cmd        *acmd = PKT2CMD(pkt);
2262 
2263         con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
2264 
2265         if (acmd->cmd_flags & CFLAG_DMAVALID) {
2266                 (void) ddi_dma_sync(acmd->cmd_dmahandle, acmd->cmd_dma_offset,
2267                     acmd->cmd_dma_len, (acmd->cmd_flags & CFLAG_DMASEND) ?
2268                     DDI_DMA_SYNC_FORDEV : DDI_DMA_SYNC_FORCPU);
2269         }
2270 }
2271 
2272 /*ARGSUSED*/
2273 static int
2274 mrsas_tran_quiesce(dev_info_t *dip)
2275 {
2276         con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
2277 
2278         return (1);
2279 }
2280 
2281 /*ARGSUSED*/
2282 static int
2283 mrsas_tran_unquiesce(dev_info_t *dip)
2284 {
2285         con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
2286 
2287         return (1);
2288 }
2289 
2290 
2291 /*
2292  * mrsas_isr(caddr_t)
2293  *
2294  * The Interrupt Service Routine
2295  *
2296  * Collect status for all completed commands and do callback
2297  *
2298  */
2299 static uint_t
2300 mrsas_isr(struct mrsas_instance *instance)
2301 {
2302         int             need_softintr;
2303         uint32_t        producer;
2304         uint32_t        consumer;
2305         uint32_t        context;
2306         int             retval;
2307 
2308         struct mrsas_cmd        *cmd;
2309         struct mrsas_header     *hdr;
2310         struct scsi_pkt         *pkt;
2311 
2312         con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
2313         ASSERT(instance);
2314         if (instance->tbolt) {
2315                 mutex_enter(&instance->chip_mtx);
2316                 if ((instance->intr_type == DDI_INTR_TYPE_FIXED) &&
2317                     !(instance->func_ptr->intr_ack(instance))) {
2318                         mutex_exit(&instance->chip_mtx);
2319                         return (DDI_INTR_UNCLAIMED);
2320                 }
2321                 retval = mr_sas_tbolt_process_outstanding_cmd(instance);
2322                 mutex_exit(&instance->chip_mtx);
2323                 return (retval);
2324         } else {
2325                 if ((instance->intr_type == DDI_INTR_TYPE_FIXED) &&
2326                     !instance->func_ptr->intr_ack(instance)) {
2327                         return (DDI_INTR_UNCLAIMED);
2328                 }
2329         }
2330 
2331         (void) ddi_dma_sync(instance->mfi_internal_dma_obj.dma_handle,
2332             0, 0, DDI_DMA_SYNC_FORCPU);
2333 
2334         if (mrsas_check_dma_handle(instance->mfi_internal_dma_obj.dma_handle)
2335             != DDI_SUCCESS) {
2336                 mrsas_fm_ereport(instance, DDI_FM_DEVICE_NO_RESPONSE);
2337                 ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST);
2338                 con_log(CL_ANN1, (CE_WARN,
2339                     "mr_sas_isr(): FMA check, returning DDI_INTR_UNCLAIMED"));
2340                 return (DDI_INTR_CLAIMED);
2341         }
2342         con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
2343 
2344 #ifdef OCRDEBUG
2345         if (debug_consecutive_timeout_after_ocr_g == 1) {
2346                 con_log(CL_ANN1, (CE_NOTE,
2347                     "simulating consecutive timeout after ocr"));
2348                 return (DDI_INTR_CLAIMED);
2349         }
2350 #endif
2351 
2352         mutex_enter(&instance->completed_pool_mtx);
2353         mutex_enter(&instance->cmd_pend_mtx);
2354 
2355         producer = ddi_get32(instance->mfi_internal_dma_obj.acc_handle,
2356             instance->producer);
2357         consumer = ddi_get32(instance->mfi_internal_dma_obj.acc_handle,
2358             instance->consumer);
2359 
2360         con_log(CL_ANN, (CE_CONT, " producer %x consumer %x ",
2361             producer, consumer));
2362         if (producer == consumer) {
2363                 con_log(CL_ANN, (CE_WARN, "producer ==  consumer case"));
2364                 DTRACE_PROBE2(isr_pc_err, uint32_t, producer,
2365                     uint32_t, consumer);
2366                 mutex_exit(&instance->cmd_pend_mtx);
2367                 mutex_exit(&instance->completed_pool_mtx);
2368                 return (DDI_INTR_CLAIMED);
2369         }
2370 
2371         while (consumer != producer) {
2372                 context = ddi_get32(instance->mfi_internal_dma_obj.acc_handle,
2373                     &instance->reply_queue[consumer]);
2374                 cmd = instance->cmd_list[context];
2375 
2376                 if (cmd->sync_cmd == MRSAS_TRUE) {
2377                         hdr = (struct mrsas_header *)&cmd->frame->hdr;
2378                         if (hdr) {
2379                                 mlist_del_init(&cmd->list);
2380                         }
2381                 } else {
2382                         pkt = cmd->pkt;
2383                         if (pkt) {
2384                                 mlist_del_init(&cmd->list);
2385                         }
2386                 }
2387 
2388                 mlist_add_tail(&cmd->list, &instance->completed_pool_list);
2389 
2390                 consumer++;
2391                 if (consumer == (instance->max_fw_cmds + 1)) {
2392                         consumer = 0;
2393                 }
2394         }
2395         ddi_put32(instance->mfi_internal_dma_obj.acc_handle,
2396             instance->consumer, consumer);
2397         mutex_exit(&instance->cmd_pend_mtx);
2398         mutex_exit(&instance->completed_pool_mtx);
2399 
2400         (void) ddi_dma_sync(instance->mfi_internal_dma_obj.dma_handle,
2401             0, 0, DDI_DMA_SYNC_FORDEV);
2402 
2403         if (instance->softint_running) {
2404                 need_softintr = 0;
2405         } else {
2406                 need_softintr = 1;
2407         }
2408 
2409         if (instance->isr_level == HIGH_LEVEL_INTR) {
2410                 if (need_softintr) {
2411                         ddi_trigger_softintr(instance->soft_intr_id);
2412                 }
2413         } else {
2414                 /*
2415                  * Not a high-level interrupt, therefore call the soft level
2416                  * interrupt explicitly
2417                  */
2418                 (void) mrsas_softintr(instance);
2419         }
2420 
2421         return (DDI_INTR_CLAIMED);
2422 }
2423 
2424 
2425 /*
2426  * ************************************************************************** *
2427  *                                                                            *
2428  *                                  libraries                                 *
2429  *                                                                            *
2430  * ************************************************************************** *
2431  */
2432 /*
2433  * get_mfi_pkt : Get a command from the free pool
2434  * After successful allocation, the caller of this routine
2435  * must clear the frame buffer (memset to zero) before
2436  * using the packet further.
2437  *
2438  * ***** Note *****
2439  * After clearing the frame buffer the context id of the
2440  * frame buffer SHOULD be restored back.
2441  */
2442 struct mrsas_cmd *
2443 mrsas_get_mfi_pkt(struct mrsas_instance *instance)
2444 {
2445         mlist_t                 *head = &instance->cmd_pool_list;
2446         struct mrsas_cmd        *cmd = NULL;
2447 
2448         mutex_enter(&instance->cmd_pool_mtx);
2449 
2450         if (!mlist_empty(head)) {
2451                 cmd = mlist_entry(head->next, struct mrsas_cmd, list);
2452                 mlist_del_init(head->next);
2453         }
2454         if (cmd != NULL) {
2455                 cmd->pkt = NULL;
2456                 cmd->retry_count_for_ocr = 0;
2457                 cmd->drv_pkt_time = 0;
2458 
2459         }
2460         mutex_exit(&instance->cmd_pool_mtx);
2461 
2462         return (cmd);
2463 }
2464 
2465 static struct mrsas_cmd *
2466 get_mfi_app_pkt(struct mrsas_instance *instance)
2467 {
2468         mlist_t                         *head = &instance->app_cmd_pool_list;
2469         struct mrsas_cmd        *cmd = NULL;
2470 
2471         mutex_enter(&instance->app_cmd_pool_mtx);
2472 
2473         if (!mlist_empty(head)) {
2474                 cmd = mlist_entry(head->next, struct mrsas_cmd, list);
2475                 mlist_del_init(head->next);
2476         }
2477         if (cmd != NULL) {
2478                 cmd->pkt = NULL;
2479                 cmd->retry_count_for_ocr = 0;
2480                 cmd->drv_pkt_time = 0;
2481         }
2482 
2483         mutex_exit(&instance->app_cmd_pool_mtx);
2484 
2485         return (cmd);
2486 }
2487 /*
2488  * return_mfi_pkt : Return a cmd to free command pool
2489  */
2490 void
2491 mrsas_return_mfi_pkt(struct mrsas_instance *instance, struct mrsas_cmd *cmd)
2492 {
2493         mutex_enter(&instance->cmd_pool_mtx);
2494         /* use mlist_add_tail for debug assistance */
2495         mlist_add_tail(&cmd->list, &instance->cmd_pool_list);
2496 
2497         mutex_exit(&instance->cmd_pool_mtx);
2498 }
2499 
2500 static void
2501 return_mfi_app_pkt(struct mrsas_instance *instance, struct mrsas_cmd *cmd)
2502 {
2503         mutex_enter(&instance->app_cmd_pool_mtx);
2504 
2505         mlist_add(&cmd->list, &instance->app_cmd_pool_list);
2506 
2507         mutex_exit(&instance->app_cmd_pool_mtx);
2508 }
2509 void
2510 push_pending_mfi_pkt(struct mrsas_instance *instance, struct mrsas_cmd *cmd)
2511 {
2512         struct scsi_pkt *pkt;
2513         struct mrsas_header     *hdr;
2514         con_log(CL_DLEVEL2, (CE_NOTE, "push_pending_pkt(): Called\n"));
2515         mutex_enter(&instance->cmd_pend_mtx);
2516         mlist_del_init(&cmd->list);
2517         mlist_add_tail(&cmd->list, &instance->cmd_pend_list);
2518         if (cmd->sync_cmd == MRSAS_TRUE) {
2519                 hdr = (struct mrsas_header *)&cmd->frame->hdr;
2520                 if (hdr) {
2521                         con_log(CL_ANN1, (CE_CONT,
2522                             "push_pending_mfi_pkt: "
2523                             "cmd %p index %x "
2524                             "time %llx",
2525                             (void *)cmd, cmd->index,
2526                             gethrtime()));
2527                         /* Wait for specified interval  */
2528                         cmd->drv_pkt_time = ddi_get16(
2529                             cmd->frame_dma_obj.acc_handle, &hdr->timeout);
2530                         if (cmd->drv_pkt_time < debug_timeout_g)
2531                                 cmd->drv_pkt_time = (uint16_t)debug_timeout_g;
2532                                 con_log(CL_ANN1, (CE_CONT,
2533                                     "push_pending_pkt(): "
2534                                     "Called IO Timeout Value %x\n",
2535                                     cmd->drv_pkt_time));
2536                 }
2537                 if (hdr && instance->timeout_id == (timeout_id_t)-1) {
2538                         instance->timeout_id = timeout(io_timeout_checker,
2539                             (void *) instance, drv_usectohz(MRSAS_1_SECOND));
2540                 }
2541         } else {
2542                 pkt = cmd->pkt;
2543                 if (pkt) {
2544                         con_log(CL_ANN1, (CE_CONT,
2545                             "push_pending_mfi_pkt: "
2546                             "cmd %p index %x pkt %p, "
2547                             "time %llx",
2548                             (void *)cmd, cmd->index, (void *)pkt,
2549                             gethrtime()));
2550                         cmd->drv_pkt_time = (uint16_t)debug_timeout_g;
2551                 }
2552                 if (pkt && instance->timeout_id == (timeout_id_t)-1) {
2553                         instance->timeout_id = timeout(io_timeout_checker,
2554                             (void *) instance, drv_usectohz(MRSAS_1_SECOND));
2555                 }
2556         }
2557 
2558         mutex_exit(&instance->cmd_pend_mtx);
2559 
2560 }
2561 
2562 int
2563 mrsas_print_pending_cmds(struct mrsas_instance *instance)
2564 {
2565         mlist_t *head = &instance->cmd_pend_list;
2566         mlist_t *tmp = head;
2567         struct mrsas_cmd *cmd = NULL;
2568         struct mrsas_header     *hdr;
2569         unsigned int            flag = 1;
2570         struct scsi_pkt *pkt;
2571         int saved_level;
2572         int cmd_count = 0;
2573 
2574         saved_level = debug_level_g;
2575         debug_level_g = CL_ANN1;
2576 
2577         dev_err(instance->dip, CE_NOTE,
2578             "mrsas_print_pending_cmds(): Called");
2579 
2580         while (flag) {
2581                 mutex_enter(&instance->cmd_pend_mtx);
2582                 tmp     =       tmp->next;
2583                 if (tmp == head) {
2584                         mutex_exit(&instance->cmd_pend_mtx);
2585                         flag = 0;
2586                         con_log(CL_ANN1, (CE_CONT, "mrsas_print_pending_cmds():"
2587                             " NO MORE CMDS PENDING....\n"));
2588                         break;
2589                 } else {
2590                         cmd = mlist_entry(tmp, struct mrsas_cmd, list);
2591                         mutex_exit(&instance->cmd_pend_mtx);
2592                         if (cmd) {
2593                                 if (cmd->sync_cmd == MRSAS_TRUE) {
2594                                         hdr = (struct mrsas_header *)
2595                                             &cmd->frame->hdr;
2596                                         if (hdr) {
2597                                                 con_log(CL_ANN1, (CE_CONT,
2598                                                     "print: cmd %p index 0x%x "
2599                                                     "drv_pkt_time 0x%x (NO-PKT)"
2600                                                     " hdr %p\n", (void *)cmd,
2601                                                     cmd->index,
2602                                                     cmd->drv_pkt_time,
2603                                                     (void *)hdr));
2604                                         }
2605                                 } else {
2606                                         pkt = cmd->pkt;
2607                                         if (pkt) {
2608                                         con_log(CL_ANN1, (CE_CONT,
2609                                             "print: cmd %p index 0x%x "
2610                                             "drv_pkt_time 0x%x pkt %p \n",
2611                                             (void *)cmd, cmd->index,
2612                                             cmd->drv_pkt_time, (void *)pkt));
2613                                         }
2614                                 }
2615 
2616                                 if (++cmd_count == 1) {
2617                                         mrsas_print_cmd_details(instance, cmd,
2618                                             0xDD);
2619                                 } else {
2620                                         mrsas_print_cmd_details(instance, cmd,
2621                                             1);
2622                                 }
2623 
2624                         }
2625                 }
2626         }
2627         con_log(CL_ANN1, (CE_CONT, "mrsas_print_pending_cmds(): Done\n"));
2628 
2629 
2630         debug_level_g = saved_level;
2631 
2632         return (DDI_SUCCESS);
2633 }
2634 
2635 
2636 int
2637 mrsas_complete_pending_cmds(struct mrsas_instance *instance)
2638 {
2639 
2640         struct mrsas_cmd *cmd = NULL;
2641         struct scsi_pkt *pkt;
2642         struct mrsas_header *hdr;
2643 
2644         struct mlist_head               *pos, *next;
2645 
2646         con_log(CL_ANN1, (CE_NOTE,
2647             "mrsas_complete_pending_cmds(): Called"));
2648 
2649         mutex_enter(&instance->cmd_pend_mtx);
2650         mlist_for_each_safe(pos, next, &instance->cmd_pend_list) {
2651                 cmd = mlist_entry(pos, struct mrsas_cmd, list);
2652                 if (cmd) {
2653                         pkt = cmd->pkt;
2654                         if (pkt) { /* for IO */
2655                                 if (((pkt->pkt_flags & FLAG_NOINTR)
2656                                     == 0) && pkt->pkt_comp) {
2657                                         pkt->pkt_reason
2658                                             = CMD_DEV_GONE;
2659                                         pkt->pkt_statistics
2660                                             = STAT_DISCON;
2661                                         con_log(CL_ANN1, (CE_CONT,
2662                                             "fail and posting to scsa "
2663                                             "cmd %p index %x"
2664                                             " pkt %p "
2665                                             "time : %llx",
2666                                             (void *)cmd, cmd->index,
2667                                             (void *)pkt, gethrtime()));
2668                                         (*pkt->pkt_comp)(pkt);
2669                                 }
2670                         } else { /* for DCMDS */
2671                                 if (cmd->sync_cmd == MRSAS_TRUE) {
2672                                 hdr = (struct mrsas_header *)&cmd->frame->hdr;
2673                                 con_log(CL_ANN1, (CE_CONT,
2674                                     "posting invalid status to application "
2675                                     "cmd %p index %x"
2676                                     " hdr %p "
2677                                     "time : %llx",
2678                                     (void *)cmd, cmd->index,
2679                                     (void *)hdr, gethrtime()));
2680                                 hdr->cmd_status = MFI_STAT_INVALID_STATUS;
2681                                 complete_cmd_in_sync_mode(instance, cmd);
2682                                 }
2683                         }
2684                         mlist_del_init(&cmd->list);
2685                 } else {
2686                         con_log(CL_ANN1, (CE_CONT,
2687                             "mrsas_complete_pending_cmds:"
2688                             "NULL command\n"));
2689                 }
2690                 con_log(CL_ANN1, (CE_CONT,
2691                     "mrsas_complete_pending_cmds:"
2692                     "looping for more commands\n"));
2693         }
2694         mutex_exit(&instance->cmd_pend_mtx);
2695 
2696         con_log(CL_ANN1, (CE_CONT, "mrsas_complete_pending_cmds(): DONE\n"));
2697         return (DDI_SUCCESS);
2698 }
2699 
2700 void
2701 mrsas_print_cmd_details(struct mrsas_instance *instance, struct mrsas_cmd *cmd,
2702     int detail)
2703 {
2704         struct scsi_pkt *pkt = cmd->pkt;
2705         Mpi2RaidSCSIIORequest_t *scsi_io = cmd->scsi_io_request;
2706         int i;
2707         int saved_level;
2708         ddi_acc_handle_t acc_handle =
2709             instance->mpi2_frame_pool_dma_obj.acc_handle;
2710 
2711         if (detail == 0xDD) {
2712                 saved_level = debug_level_g;
2713                 debug_level_g = CL_ANN1;
2714         }
2715 
2716 
2717         if (instance->tbolt) {
2718                 con_log(CL_ANN1, (CE_CONT, "print_cmd_details: cmd %p "
2719                     "cmd->index 0x%x SMID 0x%x timer 0x%x sec\n",
2720                     (void *)cmd, cmd->index, cmd->SMID, cmd->drv_pkt_time));
2721         } else {
2722                 con_log(CL_ANN1, (CE_CONT, "print_cmd_details: cmd %p "
2723                     "cmd->index 0x%x timer 0x%x sec\n",
2724                     (void *)cmd, cmd->index, cmd->drv_pkt_time));
2725         }
2726 
2727         if (pkt) {
2728                 con_log(CL_ANN1, (CE_CONT, "scsi_pkt CDB[0]=0x%x",
2729                     pkt->pkt_cdbp[0]));
2730         } else {
2731                 con_log(CL_ANN1, (CE_CONT, "NO-PKT"));
2732         }
2733 
2734         if ((detail == 0xDD) && instance->tbolt) {
2735                 con_log(CL_ANN1, (CE_CONT, "RAID_SCSI_IO_REQUEST\n"));
2736                 con_log(CL_ANN1, (CE_CONT, "DevHandle=0x%X Function=0x%X "
2737                     "IoFlags=0x%X SGLFlags=0x%X DataLength=0x%X\n",
2738                     ddi_get16(acc_handle, &scsi_io->DevHandle),
2739                     ddi_get8(acc_handle, &scsi_io->Function),
2740                     ddi_get16(acc_handle, &scsi_io->IoFlags),
2741                     ddi_get16(acc_handle, &scsi_io->SGLFlags),
2742                     ddi_get32(acc_handle, &scsi_io->DataLength)));
2743 
2744                 for (i = 0; i < 32; i++) {
2745                         con_log(CL_ANN1, (CE_CONT, "CDB[%d]=0x%x ", i,
2746                             ddi_get8(acc_handle, &scsi_io->CDB.CDB32[i])));
2747                 }
2748 
2749                 con_log(CL_ANN1, (CE_CONT, "RAID-CONTEXT\n"));
2750                 con_log(CL_ANN1, (CE_CONT, "status=0x%X extStatus=0x%X "
2751                     "ldTargetId=0x%X timeoutValue=0x%X regLockFlags=0x%X "
2752                     "RAIDFlags=0x%X regLockRowLBA=0x%" PRIu64
2753                     " regLockLength=0x%X spanArm=0x%X\n",
2754                     ddi_get8(acc_handle, &scsi_io->RaidContext.status),
2755                     ddi_get8(acc_handle, &scsi_io->RaidContext.extStatus),
2756                     ddi_get16(acc_handle, &scsi_io->RaidContext.ldTargetId),
2757                     ddi_get16(acc_handle, &scsi_io->RaidContext.timeoutValue),
2758                     ddi_get8(acc_handle, &scsi_io->RaidContext.regLockFlags),
2759                     ddi_get8(acc_handle, &scsi_io->RaidContext.RAIDFlags),
2760                     ddi_get64(acc_handle, &scsi_io->RaidContext.regLockRowLBA),
2761                     ddi_get32(acc_handle, &scsi_io->RaidContext.regLockLength),
2762                     ddi_get8(acc_handle, &scsi_io->RaidContext.spanArm)));
2763         }
2764 
2765         if (detail == 0xDD) {
2766                 debug_level_g = saved_level;
2767         }
2768 }
2769 
2770 
2771 int
2772 mrsas_issue_pending_cmds(struct mrsas_instance *instance)
2773 {
2774         mlist_t *head   =       &instance->cmd_pend_list;
2775         mlist_t *tmp    =       head->next;
2776         struct mrsas_cmd *cmd = NULL;
2777         struct scsi_pkt *pkt;
2778 
2779         con_log(CL_ANN1, (CE_NOTE, "mrsas_issue_pending_cmds(): Called"));
2780         while (tmp != head) {
2781                 mutex_enter(&instance->cmd_pend_mtx);
2782                 cmd = mlist_entry(tmp, struct mrsas_cmd, list);
2783                 tmp = tmp->next;
2784                 mutex_exit(&instance->cmd_pend_mtx);
2785                 if (cmd) {
2786                         con_log(CL_ANN1, (CE_CONT,
2787                             "mrsas_issue_pending_cmds(): "
2788                             "Got a cmd: cmd %p index 0x%x drv_pkt_time 0x%x ",
2789                             (void *)cmd, cmd->index, cmd->drv_pkt_time));
2790 
2791                         /* Reset command timeout value */
2792                         if (cmd->drv_pkt_time < debug_timeout_g)
2793                                 cmd->drv_pkt_time = (uint16_t)debug_timeout_g;
2794 
2795                         cmd->retry_count_for_ocr++;
2796 
2797                         dev_err(instance->dip, CE_CONT,
2798                             "cmd retry count = %d\n",
2799                             cmd->retry_count_for_ocr);
2800 
2801                         if (cmd->retry_count_for_ocr > IO_RETRY_COUNT) {
2802                                 dev_err(instance->dip,
2803                                     CE_WARN, "mrsas_issue_pending_cmds(): "
2804                                     "cmd->retry_count exceeded limit >%d\n",
2805                                     IO_RETRY_COUNT);
2806                                 mrsas_print_cmd_details(instance, cmd, 0xDD);
2807 
2808                                 dev_err(instance->dip, CE_WARN,
2809                                     "mrsas_issue_pending_cmds():"
2810                                     "Calling KILL Adapter");
2811                                 if (instance->tbolt)
2812                                         mrsas_tbolt_kill_adapter(instance);
2813                                 else
2814                                         (void) mrsas_kill_adapter(instance);
2815                                 return (DDI_FAILURE);
2816                         }
2817 
2818                         pkt = cmd->pkt;
2819                         if (pkt) {
2820                                 con_log(CL_ANN1, (CE_CONT,
2821                                     "PENDING PKT-CMD ISSUE: cmd %p index %x "
2822                                     "pkt %p time %llx",
2823                                     (void *)cmd, cmd->index,
2824                                     (void *)pkt,
2825                                     gethrtime()));
2826 
2827                         } else {
2828                                 dev_err(instance->dip, CE_CONT,
2829                                     "mrsas_issue_pending_cmds(): NO-PKT, "
2830                                     "cmd %p index 0x%x drv_pkt_time 0x%x",
2831                                     (void *)cmd, cmd->index, cmd->drv_pkt_time);
2832                         }
2833 
2834 
2835                         if (cmd->sync_cmd == MRSAS_TRUE) {
2836                                 dev_err(instance->dip, CE_CONT,
2837                                     "mrsas_issue_pending_cmds(): "
2838                                     "SYNC_CMD == TRUE \n");
2839                                 instance->func_ptr->issue_cmd_in_sync_mode(
2840                                     instance, cmd);
2841                         } else {
2842                                 instance->func_ptr->issue_cmd(cmd, instance);
2843                         }
2844                 } else {
2845                         con_log(CL_ANN1, (CE_CONT,
2846                             "mrsas_issue_pending_cmds: NULL command\n"));
2847                 }
2848                 con_log(CL_ANN1, (CE_CONT,
2849                     "mrsas_issue_pending_cmds:"
2850                     "looping for more commands"));
2851         }
2852         con_log(CL_ANN1, (CE_CONT, "mrsas_issue_pending_cmds(): DONE\n"));
2853         return (DDI_SUCCESS);
2854 }
2855 
2856 
2857 
2858 /*
2859  * destroy_mfi_frame_pool
2860  */
2861 void
2862 destroy_mfi_frame_pool(struct mrsas_instance *instance)
2863 {
2864         int             i;
2865         uint32_t        max_cmd = instance->max_fw_cmds;
2866 
2867         struct mrsas_cmd        *cmd;
2868 
2869         /* return all frames to pool */
2870 
2871         for (i = 0; i < max_cmd; i++) {
2872 
2873                 cmd = instance->cmd_list[i];
2874 
2875                 if (cmd->frame_dma_obj_status == DMA_OBJ_ALLOCATED)
2876                         (void) mrsas_free_dma_obj(instance, cmd->frame_dma_obj);
2877 
2878                 cmd->frame_dma_obj_status  = DMA_OBJ_FREED;
2879         }
2880 
2881 }
2882 
2883 /*
2884  * create_mfi_frame_pool
2885  */
2886 int
2887 create_mfi_frame_pool(struct mrsas_instance *instance)
2888 {
2889         int             i = 0;
2890         int             cookie_cnt;
2891         uint16_t        max_cmd;
2892         uint16_t        sge_sz;
2893         uint32_t        sgl_sz;
2894         uint32_t        tot_frame_size;
2895         struct mrsas_cmd        *cmd;
2896         int                     retval = DDI_SUCCESS;
2897 
2898         max_cmd = instance->max_fw_cmds;
2899         sge_sz  = sizeof (struct mrsas_sge_ieee);
2900         /* calculated the number of 64byte frames required for SGL */
2901         sgl_sz          = sge_sz * instance->max_num_sge;
2902         tot_frame_size  = sgl_sz + MRMFI_FRAME_SIZE + SENSE_LENGTH;
2903 
2904         con_log(CL_DLEVEL3, (CE_NOTE, "create_mfi_frame_pool: "
2905             "sgl_sz %x tot_frame_size %x", sgl_sz, tot_frame_size));
2906 
2907         while (i < max_cmd) {
2908                 cmd = instance->cmd_list[i];
2909 
2910                 cmd->frame_dma_obj.size      = tot_frame_size;
2911                 cmd->frame_dma_obj.dma_attr = mrsas_generic_dma_attr;
2912                 cmd->frame_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
2913                 cmd->frame_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
2914                 cmd->frame_dma_obj.dma_attr.dma_attr_sgllen = 1;
2915                 cmd->frame_dma_obj.dma_attr.dma_attr_align = 64;
2916 
2917                 cookie_cnt = mrsas_alloc_dma_obj(instance, &cmd->frame_dma_obj,
2918                     (uchar_t)DDI_STRUCTURE_LE_ACC);
2919 
2920                 if (cookie_cnt == -1 || cookie_cnt > 1) {
2921                         dev_err(instance->dip, CE_WARN,
2922                             "create_mfi_frame_pool: could not alloc.");
2923                         retval = DDI_FAILURE;
2924                         goto mrsas_undo_frame_pool;
2925                 }
2926 
2927                 bzero(cmd->frame_dma_obj.buffer, tot_frame_size);
2928 
2929                 cmd->frame_dma_obj_status = DMA_OBJ_ALLOCATED;
2930                 cmd->frame = (union mrsas_frame *)cmd->frame_dma_obj.buffer;
2931                 cmd->frame_phys_addr =
2932                     cmd->frame_dma_obj.dma_cookie[0].dmac_address;
2933 
2934                 cmd->sense = (uint8_t *)(((unsigned long)
2935                     cmd->frame_dma_obj.buffer) +
2936                     tot_frame_size - SENSE_LENGTH);
2937                 cmd->sense_phys_addr =
2938                     cmd->frame_dma_obj.dma_cookie[0].dmac_address +
2939                     tot_frame_size - SENSE_LENGTH;
2940 
2941                 if (!cmd->frame || !cmd->sense) {
2942                         dev_err(instance->dip, CE_WARN,
2943                             "pci_pool_alloc failed");
2944                         retval = ENOMEM;
2945                         goto mrsas_undo_frame_pool;
2946                 }
2947 
2948                 ddi_put32(cmd->frame_dma_obj.acc_handle,
2949                     &cmd->frame->io.context, cmd->index);
2950                 i++;
2951 
2952                 con_log(CL_DLEVEL3, (CE_NOTE, "[%x]-%x",
2953                     cmd->index, cmd->frame_phys_addr));
2954         }
2955 
2956         return (DDI_SUCCESS);
2957 
2958 mrsas_undo_frame_pool:
2959         if (i > 0)
2960                 destroy_mfi_frame_pool(instance);
2961 
2962         return (retval);
2963 }
2964 
2965 /*
2966  * free_additional_dma_buffer
2967  */
2968 static void
2969 free_additional_dma_buffer(struct mrsas_instance *instance)
2970 {
2971         if (instance->mfi_internal_dma_obj.status == DMA_OBJ_ALLOCATED) {
2972                 (void) mrsas_free_dma_obj(instance,
2973                     instance->mfi_internal_dma_obj);
2974                 instance->mfi_internal_dma_obj.status = DMA_OBJ_FREED;
2975         }
2976 
2977         if (instance->mfi_evt_detail_obj.status == DMA_OBJ_ALLOCATED) {
2978                 (void) mrsas_free_dma_obj(instance,
2979                     instance->mfi_evt_detail_obj);
2980                 instance->mfi_evt_detail_obj.status = DMA_OBJ_FREED;
2981         }
2982 }
2983 
2984 /*
2985  * alloc_additional_dma_buffer
2986  */
2987 static int
2988 alloc_additional_dma_buffer(struct mrsas_instance *instance)
2989 {
2990         uint32_t        reply_q_sz;
2991         uint32_t        internal_buf_size = PAGESIZE*2;
2992 
2993         /* max cmds plus 1 + producer & consumer */
2994         reply_q_sz = sizeof (uint32_t) * (instance->max_fw_cmds + 1 + 2);
2995 
2996         instance->mfi_internal_dma_obj.size = internal_buf_size;
2997         instance->mfi_internal_dma_obj.dma_attr      = mrsas_generic_dma_attr;
2998         instance->mfi_internal_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
2999         instance->mfi_internal_dma_obj.dma_attr.dma_attr_count_max =
3000             0xFFFFFFFFU;
3001         instance->mfi_internal_dma_obj.dma_attr.dma_attr_sgllen      = 1;
3002 
3003         if (mrsas_alloc_dma_obj(instance, &instance->mfi_internal_dma_obj,
3004             (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
3005                 dev_err(instance->dip, CE_WARN,
3006                     "could not alloc reply queue");
3007                 return (DDI_FAILURE);
3008         }
3009 
3010         bzero(instance->mfi_internal_dma_obj.buffer, internal_buf_size);
3011 
3012         instance->mfi_internal_dma_obj.status |= DMA_OBJ_ALLOCATED;
3013 
3014         instance->producer = (uint32_t *)((unsigned long)
3015             instance->mfi_internal_dma_obj.buffer);
3016         instance->consumer = (uint32_t *)((unsigned long)
3017             instance->mfi_internal_dma_obj.buffer + 4);
3018         instance->reply_queue = (uint32_t *)((unsigned long)
3019             instance->mfi_internal_dma_obj.buffer + 8);
3020         instance->internal_buf = (caddr_t)(((unsigned long)
3021             instance->mfi_internal_dma_obj.buffer) + reply_q_sz + 8);
3022         instance->internal_buf_dmac_add =
3023             instance->mfi_internal_dma_obj.dma_cookie[0].dmac_address +
3024             (reply_q_sz + 8);
3025         instance->internal_buf_size = internal_buf_size -
3026             (reply_q_sz + 8);
3027 
3028         /* allocate evt_detail */
3029         instance->mfi_evt_detail_obj.size = sizeof (struct mrsas_evt_detail);
3030         instance->mfi_evt_detail_obj.dma_attr = mrsas_generic_dma_attr;
3031         instance->mfi_evt_detail_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
3032         instance->mfi_evt_detail_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
3033         instance->mfi_evt_detail_obj.dma_attr.dma_attr_sgllen = 1;
3034         instance->mfi_evt_detail_obj.dma_attr.dma_attr_align = 1;
3035 
3036         if (mrsas_alloc_dma_obj(instance, &instance->mfi_evt_detail_obj,
3037             (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
3038                 dev_err(instance->dip, CE_WARN, "alloc_additional_dma_buffer: "
3039                     "could not allocate data transfer buffer.");
3040                 goto mrsas_undo_internal_buff;
3041         }
3042 
3043         bzero(instance->mfi_evt_detail_obj.buffer,
3044             sizeof (struct mrsas_evt_detail));
3045 
3046         instance->mfi_evt_detail_obj.status |= DMA_OBJ_ALLOCATED;
3047 
3048         return (DDI_SUCCESS);
3049 
3050 mrsas_undo_internal_buff:
3051         if (instance->mfi_internal_dma_obj.status == DMA_OBJ_ALLOCATED) {
3052                 (void) mrsas_free_dma_obj(instance,
3053                     instance->mfi_internal_dma_obj);
3054                 instance->mfi_internal_dma_obj.status = DMA_OBJ_FREED;
3055         }
3056 
3057         return (DDI_FAILURE);
3058 }
3059 
3060 
3061 void
3062 mrsas_free_cmd_pool(struct mrsas_instance *instance)
3063 {
3064         int             i;
3065         uint32_t        max_cmd;
3066         size_t          sz;
3067 
3068         /* already freed */
3069         if (instance->cmd_list == NULL) {
3070                 return;
3071         }
3072 
3073         max_cmd = instance->max_fw_cmds;
3074 
3075         /* size of cmd_list array */
3076         sz = sizeof (struct mrsas_cmd *) * max_cmd;
3077 
3078         /* First free each cmd */
3079         for (i = 0; i < max_cmd; i++) {
3080                 if (instance->cmd_list[i] != NULL) {
3081                         kmem_free(instance->cmd_list[i],
3082                             sizeof (struct mrsas_cmd));
3083                 }
3084 
3085                 instance->cmd_list[i] = NULL;
3086         }
3087 
3088         /* Now, free cmd_list array */
3089         if (instance->cmd_list != NULL)
3090                 kmem_free(instance->cmd_list, sz);
3091 
3092         instance->cmd_list = NULL;
3093 
3094         INIT_LIST_HEAD(&instance->cmd_pool_list);
3095         INIT_LIST_HEAD(&instance->cmd_pend_list);
3096         if (instance->tbolt) {
3097                 INIT_LIST_HEAD(&instance->cmd_app_pool_list);
3098         } else {
3099                 INIT_LIST_HEAD(&instance->app_cmd_pool_list);
3100         }
3101 
3102 }
3103 
3104 
3105 /*
3106  * mrsas_alloc_cmd_pool
3107  */
3108 int
3109 mrsas_alloc_cmd_pool(struct mrsas_instance *instance)
3110 {
3111         int             i;
3112         int             count;
3113         uint32_t        max_cmd;
3114         uint32_t        reserve_cmd;
3115         size_t          sz;
3116 
3117         struct mrsas_cmd        *cmd;
3118 
3119         max_cmd = instance->max_fw_cmds;
3120         con_log(CL_ANN1, (CE_NOTE, "mrsas_alloc_cmd_pool: "
3121             "max_cmd %x", max_cmd));
3122 
3123 
3124         sz = sizeof (struct mrsas_cmd *) * max_cmd;
3125 
3126         /*
3127          * instance->cmd_list is an array of struct mrsas_cmd pointers.
3128          * Allocate the dynamic array first and then allocate individual
3129          * commands.
3130          */
3131         instance->cmd_list = kmem_zalloc(sz, KM_SLEEP);
3132         ASSERT(instance->cmd_list);
3133 
3134         /* create a frame pool and assign one frame to each cmd */
3135         for (count = 0; count < max_cmd; count++) {
3136                 instance->cmd_list[count] =
3137                     kmem_zalloc(sizeof (struct mrsas_cmd), KM_SLEEP);
3138                 ASSERT(instance->cmd_list[count]);
3139         }
3140 
3141         /* add all the commands to command pool */
3142 
3143         INIT_LIST_HEAD(&instance->cmd_pool_list);
3144         INIT_LIST_HEAD(&instance->cmd_pend_list);
3145         INIT_LIST_HEAD(&instance->app_cmd_pool_list);
3146 
3147         /*
3148          * When max_cmd is lower than MRSAS_APP_RESERVED_CMDS, how do I split
3149          * into app_cmd and regular cmd?  For now, just take
3150          * max(1/8th of max, 4);
3151          */
3152         reserve_cmd = min(MRSAS_APP_RESERVED_CMDS,
3153             max(max_cmd >> 3, MRSAS_APP_MIN_RESERVED_CMDS));
3154 
3155         for (i = 0; i < reserve_cmd; i++) {
3156                 cmd = instance->cmd_list[i];
3157                 cmd->index = i;
3158                 mlist_add_tail(&cmd->list, &instance->app_cmd_pool_list);
3159         }
3160 
3161 
3162         for (i = reserve_cmd; i < max_cmd; i++) {
3163                 cmd = instance->cmd_list[i];
3164                 cmd->index = i;
3165                 mlist_add_tail(&cmd->list, &instance->cmd_pool_list);
3166         }
3167 
3168         return (DDI_SUCCESS);
3169 
3170 mrsas_undo_cmds:
3171         if (count > 0) {
3172                 /* free each cmd */
3173                 for (i = 0; i < count; i++) {
3174                         if (instance->cmd_list[i] != NULL) {
3175                                 kmem_free(instance->cmd_list[i],
3176                                     sizeof (struct mrsas_cmd));
3177                         }
3178                         instance->cmd_list[i] = NULL;
3179                 }
3180         }
3181 
3182 mrsas_undo_cmd_list:
3183         if (instance->cmd_list != NULL)
3184                 kmem_free(instance->cmd_list, sz);
3185         instance->cmd_list = NULL;
3186 
3187         return (DDI_FAILURE);
3188 }
3189 
3190 
3191 /*
3192  * free_space_for_mfi
3193  */
3194 static void
3195 free_space_for_mfi(struct mrsas_instance *instance)
3196 {
3197 
3198         /* already freed */
3199         if (instance->cmd_list == NULL) {
3200                 return;
3201         }
3202 
3203         /* Free additional dma buffer */
3204         free_additional_dma_buffer(instance);
3205 
3206         /* Free the MFI frame pool */
3207         destroy_mfi_frame_pool(instance);
3208 
3209         /* Free all the commands in the cmd_list */
3210         /* Free the cmd_list buffer itself */
3211         mrsas_free_cmd_pool(instance);
3212 }
3213 
3214 /*
3215  * alloc_space_for_mfi
3216  */
3217 static int
3218 alloc_space_for_mfi(struct mrsas_instance *instance)
3219 {
3220         /* Allocate command pool (memory for cmd_list & individual commands) */
3221         if (mrsas_alloc_cmd_pool(instance)) {
3222                 dev_err(instance->dip, CE_WARN, "error creating cmd pool");
3223                 return (DDI_FAILURE);
3224         }
3225 
3226         /* Allocate MFI Frame pool */
3227         if (create_mfi_frame_pool(instance)) {
3228                 dev_err(instance->dip, CE_WARN,
3229                     "error creating frame DMA pool");
3230                 goto mfi_undo_cmd_pool;
3231         }
3232 
3233         /* Allocate additional DMA buffer */
3234         if (alloc_additional_dma_buffer(instance)) {
3235                 dev_err(instance->dip, CE_WARN,
3236                     "error creating frame DMA pool");
3237                 goto mfi_undo_frame_pool;
3238         }
3239 
3240         return (DDI_SUCCESS);
3241 
3242 mfi_undo_frame_pool:
3243         destroy_mfi_frame_pool(instance);
3244 
3245 mfi_undo_cmd_pool:
3246         mrsas_free_cmd_pool(instance);
3247 
3248         return (DDI_FAILURE);
3249 }
3250 
3251 
3252 
3253 /*
3254  * get_ctrl_info
3255  */
3256 static int
3257 get_ctrl_info(struct mrsas_instance *instance,
3258     struct mrsas_ctrl_info *ctrl_info)
3259 {
3260         int     ret = 0;
3261 
3262         struct mrsas_cmd                *cmd;
3263         struct mrsas_dcmd_frame *dcmd;
3264         struct mrsas_ctrl_info  *ci;
3265 
3266         if (instance->tbolt) {
3267                 cmd = get_raid_msg_mfi_pkt(instance);
3268         } else {
3269                 cmd = mrsas_get_mfi_pkt(instance);
3270         }
3271 
3272         if (!cmd) {
3273                 con_log(CL_ANN, (CE_WARN,
3274                     "Failed to get a cmd for ctrl info"));
3275                 DTRACE_PROBE2(info_mfi_err, uint16_t, instance->fw_outstanding,
3276                     uint16_t, instance->max_fw_cmds);
3277                 return (DDI_FAILURE);
3278         }
3279 
3280         /* Clear the frame buffer and assign back the context id */
3281         (void) memset((char *)&cmd->frame[0], 0, sizeof (union mrsas_frame));
3282         ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context,
3283             cmd->index);
3284 
3285         dcmd = &cmd->frame->dcmd;
3286 
3287         ci = (struct mrsas_ctrl_info *)instance->internal_buf;
3288 
3289         if (!ci) {
3290                 dev_err(instance->dip, CE_WARN,
3291                     "Failed to alloc mem for ctrl info");
3292                 mrsas_return_mfi_pkt(instance, cmd);
3293                 return (DDI_FAILURE);
3294         }
3295 
3296         (void) memset(ci, 0, sizeof (struct mrsas_ctrl_info));
3297 
3298         /* for( i = 0; i < DCMD_MBOX_SZ; i++ ) dcmd->mbox.b[i] = 0; */
3299         (void) memset(dcmd->mbox.b, 0, DCMD_MBOX_SZ);
3300 
3301         ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->cmd, MFI_CMD_OP_DCMD);
3302         ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->cmd_status,
3303             MFI_CMD_STATUS_POLL_MODE);
3304         ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->sge_count, 1);
3305         ddi_put16(cmd->frame_dma_obj.acc_handle, &dcmd->flags,
3306             MFI_FRAME_DIR_READ);
3307         ddi_put16(cmd->frame_dma_obj.acc_handle, &dcmd->timeout, 0);
3308         ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->data_xfer_len,
3309             sizeof (struct mrsas_ctrl_info));
3310         ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->opcode,
3311             MR_DCMD_CTRL_GET_INFO);
3312         ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->sgl.sge32[0].phys_addr,
3313             instance->internal_buf_dmac_add);
3314         ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->sgl.sge32[0].length,
3315             sizeof (struct mrsas_ctrl_info));
3316 
3317         cmd->frame_count = 1;
3318 
3319         if (instance->tbolt) {
3320                 mr_sas_tbolt_build_mfi_cmd(instance, cmd);
3321         }
3322 
3323         if (!instance->func_ptr->issue_cmd_in_poll_mode(instance, cmd)) {
3324                 ret = 0;
3325 
3326                 ctrl_info->max_request_size = ddi_get32(
3327                     cmd->frame_dma_obj.acc_handle, &ci->max_request_size);
3328 
3329                 ctrl_info->ld_present_count = ddi_get16(
3330                     cmd->frame_dma_obj.acc_handle, &ci->ld_present_count);
3331 
3332                 ctrl_info->properties.on_off_properties = ddi_get32(
3333                     cmd->frame_dma_obj.acc_handle,
3334                     &ci->properties.on_off_properties);
3335                 ddi_rep_get8(cmd->frame_dma_obj.acc_handle,
3336                     (uint8_t *)(ctrl_info->product_name),
3337                     (uint8_t *)(ci->product_name), 80 * sizeof (char),
3338                     DDI_DEV_AUTOINCR);
3339                 /* should get more members of ci with ddi_get when needed */
3340         } else {
3341                 dev_err(instance->dip, CE_WARN,
3342                     "get_ctrl_info: Ctrl info failed");
3343                 ret = -1;
3344         }
3345 
3346         if (mrsas_common_check(instance, cmd) != DDI_SUCCESS) {
3347                 ret = -1;
3348         }
3349         if (instance->tbolt) {
3350                 return_raid_msg_mfi_pkt(instance, cmd);
3351         } else {
3352                 mrsas_return_mfi_pkt(instance, cmd);
3353         }
3354 
3355         return (ret);
3356 }
3357 
3358 /*
3359  * abort_aen_cmd
3360  */
3361 static int
3362 abort_aen_cmd(struct mrsas_instance *instance,
3363     struct mrsas_cmd *cmd_to_abort)
3364 {
3365         int     ret = 0;
3366 
3367         struct mrsas_cmd                *cmd;
3368         struct mrsas_abort_frame        *abort_fr;
3369 
3370         con_log(CL_ANN1, (CE_NOTE, "chkpnt: abort_aen:%d", __LINE__));
3371 
3372         if (instance->tbolt) {
3373                 cmd = get_raid_msg_mfi_pkt(instance);
3374         } else {
3375                 cmd = mrsas_get_mfi_pkt(instance);
3376         }
3377 
3378         if (!cmd) {
3379                 con_log(CL_ANN1, (CE_WARN,
3380                     "abort_aen_cmd():Failed to get a cmd for abort_aen_cmd"));
3381                 DTRACE_PROBE2(abort_mfi_err, uint16_t, instance->fw_outstanding,
3382                     uint16_t, instance->max_fw_cmds);
3383                 return (DDI_FAILURE);
3384         }
3385 
3386         /* Clear the frame buffer and assign back the context id */
3387         (void) memset((char *)&cmd->frame[0], 0, sizeof (union mrsas_frame));
3388         ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context,
3389             cmd->index);
3390 
3391         abort_fr = &cmd->frame->abort;
3392 
3393         /* prepare and issue the abort frame */
3394         ddi_put8(cmd->frame_dma_obj.acc_handle,
3395             &abort_fr->cmd, MFI_CMD_OP_ABORT);
3396         ddi_put8(cmd->frame_dma_obj.acc_handle, &abort_fr->cmd_status,
3397             MFI_CMD_STATUS_SYNC_MODE);
3398         ddi_put16(cmd->frame_dma_obj.acc_handle, &abort_fr->flags, 0);
3399         ddi_put32(cmd->frame_dma_obj.acc_handle, &abort_fr->abort_context,
3400             cmd_to_abort->index);
3401         ddi_put32(cmd->frame_dma_obj.acc_handle,
3402             &abort_fr->abort_mfi_phys_addr_lo, cmd_to_abort->frame_phys_addr);
3403         ddi_put32(cmd->frame_dma_obj.acc_handle,
3404             &abort_fr->abort_mfi_phys_addr_hi, 0);
3405 
3406         instance->aen_cmd->abort_aen = 1;
3407 
3408         cmd->frame_count = 1;
3409 
3410         if (instance->tbolt) {
3411                 mr_sas_tbolt_build_mfi_cmd(instance, cmd);
3412         }
3413 
3414         if (instance->func_ptr->issue_cmd_in_poll_mode(instance, cmd)) {
3415                 con_log(CL_ANN1, (CE_WARN,
3416                     "abort_aen_cmd: issue_cmd_in_poll_mode failed"));
3417                 ret = -1;
3418         } else {
3419                 ret = 0;
3420         }
3421 
3422         instance->aen_cmd->abort_aen = 1;
3423         instance->aen_cmd = 0;
3424 
3425         if (instance->tbolt) {
3426                 return_raid_msg_mfi_pkt(instance, cmd);
3427         } else {
3428                 mrsas_return_mfi_pkt(instance, cmd);
3429         }
3430 
3431         atomic_add_16(&instance->fw_outstanding, (-1));
3432 
3433         return (ret);
3434 }
3435 
3436 
3437 static int
3438 mrsas_build_init_cmd(struct mrsas_instance *instance,
3439     struct mrsas_cmd **cmd_ptr)
3440 {
3441         struct mrsas_cmd                *cmd;
3442         struct mrsas_init_frame         *init_frame;
3443         struct mrsas_init_queue_info    *initq_info;
3444         struct mrsas_drv_ver            drv_ver_info;
3445 
3446 
3447         /*
3448          * Prepare a init frame. Note the init frame points to queue info
3449          * structure. Each frame has SGL allocated after first 64 bytes. For
3450          * this frame - since we don't need any SGL - we use SGL's space as
3451          * queue info structure
3452          */
3453         cmd = *cmd_ptr;
3454 
3455 
3456         /* Clear the frame buffer and assign back the context id */
3457         (void) memset((char *)&cmd->frame[0], 0, sizeof (union mrsas_frame));
3458         ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context,
3459             cmd->index);
3460 
3461         init_frame = (struct mrsas_init_frame *)cmd->frame;
3462         initq_info = (struct mrsas_init_queue_info *)
3463             ((unsigned long)init_frame + 64);
3464 
3465         (void) memset(init_frame, 0, MRMFI_FRAME_SIZE);
3466         (void) memset(initq_info, 0, sizeof (struct mrsas_init_queue_info));
3467 
3468         ddi_put32(cmd->frame_dma_obj.acc_handle, &initq_info->init_flags, 0);
3469 
3470         ddi_put32(cmd->frame_dma_obj.acc_handle,
3471             &initq_info->reply_queue_entries, instance->max_fw_cmds + 1);
3472 
3473         ddi_put32(cmd->frame_dma_obj.acc_handle,
3474             &initq_info->producer_index_phys_addr_hi, 0);
3475         ddi_put32(cmd->frame_dma_obj.acc_handle,
3476             &initq_info->producer_index_phys_addr_lo,
3477             instance->mfi_internal_dma_obj.dma_cookie[0].dmac_address);
3478 
3479         ddi_put32(cmd->frame_dma_obj.acc_handle,
3480             &initq_info->consumer_index_phys_addr_hi, 0);
3481         ddi_put32(cmd->frame_dma_obj.acc_handle,
3482             &initq_info->consumer_index_phys_addr_lo,
3483             instance->mfi_internal_dma_obj.dma_cookie[0].dmac_address + 4);
3484 
3485         ddi_put32(cmd->frame_dma_obj.acc_handle,
3486             &initq_info->reply_queue_start_phys_addr_hi, 0);
3487         ddi_put32(cmd->frame_dma_obj.acc_handle,
3488             &initq_info->reply_queue_start_phys_addr_lo,
3489             instance->mfi_internal_dma_obj.dma_cookie[0].dmac_address + 8);
3490 
3491         ddi_put8(cmd->frame_dma_obj.acc_handle,
3492             &init_frame->cmd, MFI_CMD_OP_INIT);
3493         ddi_put8(cmd->frame_dma_obj.acc_handle, &init_frame->cmd_status,
3494             MFI_CMD_STATUS_POLL_MODE);
3495         ddi_put16(cmd->frame_dma_obj.acc_handle, &init_frame->flags, 0);
3496         ddi_put32(cmd->frame_dma_obj.acc_handle,
3497             &init_frame->queue_info_new_phys_addr_lo,
3498             cmd->frame_phys_addr + 64);
3499         ddi_put32(cmd->frame_dma_obj.acc_handle,
3500             &init_frame->queue_info_new_phys_addr_hi, 0);
3501 
3502 
3503         /* fill driver version information */
3504         fill_up_drv_ver(&drv_ver_info);
3505 
3506         /* allocate the driver version data transfer buffer */
3507         instance->drv_ver_dma_obj.size = sizeof (drv_ver_info.drv_ver);
3508         instance->drv_ver_dma_obj.dma_attr = mrsas_generic_dma_attr;
3509         instance->drv_ver_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
3510         instance->drv_ver_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
3511         instance->drv_ver_dma_obj.dma_attr.dma_attr_sgllen = 1;
3512         instance->drv_ver_dma_obj.dma_attr.dma_attr_align = 1;
3513 
3514         if (mrsas_alloc_dma_obj(instance, &instance->drv_ver_dma_obj,
3515             (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
3516                 con_log(CL_ANN, (CE_WARN,
3517                     "init_mfi : Could not allocate driver version buffer."));
3518                 return (DDI_FAILURE);
3519         }
3520         /* copy driver version to dma buffer */
3521         (void) memset(instance->drv_ver_dma_obj.buffer, 0,
3522             sizeof (drv_ver_info.drv_ver));
3523         ddi_rep_put8(cmd->frame_dma_obj.acc_handle,
3524             (uint8_t *)drv_ver_info.drv_ver,
3525             (uint8_t *)instance->drv_ver_dma_obj.buffer,
3526             sizeof (drv_ver_info.drv_ver), DDI_DEV_AUTOINCR);
3527 
3528 
3529         /* copy driver version physical address to init frame */
3530         ddi_put64(cmd->frame_dma_obj.acc_handle, &init_frame->driverversion,
3531             instance->drv_ver_dma_obj.dma_cookie[0].dmac_address);
3532 
3533         ddi_put32(cmd->frame_dma_obj.acc_handle, &init_frame->data_xfer_len,
3534             sizeof (struct mrsas_init_queue_info));
3535 
3536         cmd->frame_count = 1;
3537 
3538         *cmd_ptr = cmd;
3539 
3540         return (DDI_SUCCESS);
3541 }
3542 
3543 
3544 /*
3545  * mrsas_init_adapter_ppc - Initialize MFI interface adapter.
3546  */
3547 int
3548 mrsas_init_adapter_ppc(struct mrsas_instance *instance)
3549 {
3550         struct mrsas_cmd                *cmd;
3551 
3552         /*
3553          * allocate memory for mfi adapter(cmd pool, individual commands, mfi
3554          * frames etc
3555          */
3556         if (alloc_space_for_mfi(instance) != DDI_SUCCESS) {
3557                 con_log(CL_ANN, (CE_NOTE,
3558                     "Error, failed to allocate memory for MFI adapter"));
3559                 return (DDI_FAILURE);
3560         }
3561 
3562         /* Build INIT command */
3563         cmd = mrsas_get_mfi_pkt(instance);
3564         if (cmd == NULL) {
3565                 DTRACE_PROBE2(init_adapter_mfi_err, uint16_t,
3566                     instance->fw_outstanding, uint16_t, instance->max_fw_cmds);
3567                 return (DDI_FAILURE);
3568         }
3569 
3570         if (mrsas_build_init_cmd(instance, &cmd) != DDI_SUCCESS) {
3571                 con_log(CL_ANN,
3572                     (CE_NOTE, "Error, failed to build INIT command"));
3573 
3574                 goto fail_undo_alloc_mfi_space;
3575         }
3576 
3577         /*
3578          * Disable interrupt before sending init frame ( see linux driver code)
3579          * send INIT MFI frame in polled mode
3580          */
3581         if (instance->func_ptr->issue_cmd_in_poll_mode(instance, cmd)) {
3582                 con_log(CL_ANN, (CE_WARN, "failed to init firmware"));
3583                 goto fail_fw_init;
3584         }
3585 
3586         if (mrsas_common_check(instance, cmd) != DDI_SUCCESS)
3587                 goto fail_fw_init;
3588         mrsas_return_mfi_pkt(instance, cmd);
3589 
3590         if (ctio_enable &&
3591             (instance->func_ptr->read_fw_status_reg(instance) & 0x04000000)) {
3592                 con_log(CL_ANN, (CE_NOTE, "mr_sas: IEEE SGL's supported"));
3593                 instance->flag_ieee = 1;
3594         } else {
3595                 instance->flag_ieee = 0;
3596         }
3597 
3598         ASSERT(!instance->skinny || instance->flag_ieee);
3599 
3600         instance->unroll.alloc_space_mfi = 1;
3601         instance->unroll.verBuff = 1;
3602 
3603         return (DDI_SUCCESS);
3604 
3605 
3606 fail_fw_init:
3607         (void) mrsas_free_dma_obj(instance, instance->drv_ver_dma_obj);
3608 
3609 fail_undo_alloc_mfi_space:
3610         mrsas_return_mfi_pkt(instance, cmd);
3611         free_space_for_mfi(instance);
3612 
3613         return (DDI_FAILURE);
3614 
3615 }
3616 
3617 /*
3618  * mrsas_init_adapter - Initialize adapter.
3619  */
3620 int
3621 mrsas_init_adapter(struct mrsas_instance *instance)
3622 {
3623         struct mrsas_ctrl_info          ctrl_info;
3624 
3625 
3626         /* we expect the FW state to be READY */
3627         if (mfi_state_transition_to_ready(instance)) {
3628                 con_log(CL_ANN, (CE_WARN, "mr_sas: F/W is not ready"));
3629                 return (DDI_FAILURE);
3630         }
3631 
3632         /* get various operational parameters from status register */
3633         instance->max_num_sge =
3634             (instance->func_ptr->read_fw_status_reg(instance) &
3635             0xFF0000) >> 0x10;
3636         instance->max_num_sge =
3637             (instance->max_num_sge > MRSAS_MAX_SGE_CNT) ?
3638             MRSAS_MAX_SGE_CNT : instance->max_num_sge;
3639 
3640         /*
3641          * Reduce the max supported cmds by 1. This is to ensure that the
3642          * reply_q_sz (1 more than the max cmd that driver may send)
3643          * does not exceed max cmds that the FW can support
3644          */
3645         instance->max_fw_cmds =
3646             instance->func_ptr->read_fw_status_reg(instance) & 0xFFFF;
3647         instance->max_fw_cmds = instance->max_fw_cmds - 1;
3648 
3649 
3650 
3651         /* Initialize adapter */
3652         if (instance->func_ptr->init_adapter(instance) != DDI_SUCCESS) {
3653                 con_log(CL_ANN,
3654                     (CE_WARN, "mr_sas: could not initialize adapter"));
3655                 return (DDI_FAILURE);
3656         }
3657 
3658         /* gather misc FW related information */
3659         instance->disable_online_ctrl_reset = 0;
3660 
3661         if (!get_ctrl_info(instance, &ctrl_info)) {
3662                 instance->max_sectors_per_req = ctrl_info.max_request_size;
3663                 con_log(CL_ANN1, (CE_NOTE,
3664                     "product name %s ld present %d",
3665                     ctrl_info.product_name, ctrl_info.ld_present_count));
3666         } else {
3667                 instance->max_sectors_per_req = instance->max_num_sge *
3668                     PAGESIZE / 512;
3669         }
3670 
3671         if (ctrl_info.properties.on_off_properties & DISABLE_OCR_PROP_FLAG)
3672                 instance->disable_online_ctrl_reset = 1;
3673 
3674         return (DDI_SUCCESS);
3675 
3676 }
3677 
3678 
3679 
3680 static int
3681 mrsas_issue_init_mfi(struct mrsas_instance *instance)
3682 {
3683         struct mrsas_cmd                *cmd;
3684         struct mrsas_init_frame         *init_frame;
3685         struct mrsas_init_queue_info    *initq_info;
3686 
3687 /*
3688  * Prepare a init frame. Note the init frame points to queue info
3689  * structure. Each frame has SGL allocated after first 64 bytes. For
3690  * this frame - since we don't need any SGL - we use SGL's space as
3691  * queue info structure
3692  */
3693         con_log(CL_ANN1, (CE_NOTE,
3694             "mrsas_issue_init_mfi: entry\n"));
3695         cmd = get_mfi_app_pkt(instance);
3696 
3697         if (!cmd) {
3698                 con_log(CL_ANN1, (CE_WARN,
3699                     "mrsas_issue_init_mfi: get_pkt failed\n"));
3700                 return (DDI_FAILURE);
3701         }
3702 
3703         /* Clear the frame buffer and assign back the context id */
3704         (void) memset((char *)&cmd->frame[0], 0, sizeof (union mrsas_frame));
3705         ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context,
3706             cmd->index);
3707 
3708         init_frame = (struct mrsas_init_frame *)cmd->frame;
3709         initq_info = (struct mrsas_init_queue_info *)
3710             ((unsigned long)init_frame + 64);
3711 
3712         (void) memset(init_frame, 0, MRMFI_FRAME_SIZE);
3713         (void) memset(initq_info, 0, sizeof (struct mrsas_init_queue_info));
3714 
3715         ddi_put32(cmd->frame_dma_obj.acc_handle, &initq_info->init_flags, 0);
3716 
3717         ddi_put32(cmd->frame_dma_obj.acc_handle,
3718             &initq_info->reply_queue_entries, instance->max_fw_cmds + 1);
3719         ddi_put32(cmd->frame_dma_obj.acc_handle,
3720             &initq_info->producer_index_phys_addr_hi, 0);
3721         ddi_put32(cmd->frame_dma_obj.acc_handle,
3722             &initq_info->producer_index_phys_addr_lo,
3723             instance->mfi_internal_dma_obj.dma_cookie[0].dmac_address);
3724         ddi_put32(cmd->frame_dma_obj.acc_handle,
3725             &initq_info->consumer_index_phys_addr_hi, 0);
3726         ddi_put32(cmd->frame_dma_obj.acc_handle,
3727             &initq_info->consumer_index_phys_addr_lo,
3728             instance->mfi_internal_dma_obj.dma_cookie[0].dmac_address + 4);
3729 
3730         ddi_put32(cmd->frame_dma_obj.acc_handle,
3731             &initq_info->reply_queue_start_phys_addr_hi, 0);
3732         ddi_put32(cmd->frame_dma_obj.acc_handle,
3733             &initq_info->reply_queue_start_phys_addr_lo,
3734             instance->mfi_internal_dma_obj.dma_cookie[0].dmac_address + 8);
3735 
3736         ddi_put8(cmd->frame_dma_obj.acc_handle,
3737             &init_frame->cmd, MFI_CMD_OP_INIT);
3738         ddi_put8(cmd->frame_dma_obj.acc_handle, &init_frame->cmd_status,
3739             MFI_CMD_STATUS_POLL_MODE);
3740         ddi_put16(cmd->frame_dma_obj.acc_handle, &init_frame->flags, 0);
3741         ddi_put32(cmd->frame_dma_obj.acc_handle,
3742             &init_frame->queue_info_new_phys_addr_lo,
3743             cmd->frame_phys_addr + 64);
3744         ddi_put32(cmd->frame_dma_obj.acc_handle,
3745             &init_frame->queue_info_new_phys_addr_hi, 0);
3746 
3747         ddi_put32(cmd->frame_dma_obj.acc_handle, &init_frame->data_xfer_len,
3748             sizeof (struct mrsas_init_queue_info));
3749 
3750         cmd->frame_count = 1;
3751 
3752         /* issue the init frame in polled mode */
3753         if (instance->func_ptr->issue_cmd_in_poll_mode(instance, cmd)) {
3754                 con_log(CL_ANN1, (CE_WARN,
3755                     "mrsas_issue_init_mfi():failed to "
3756                     "init firmware"));
3757                 return_mfi_app_pkt(instance, cmd);
3758                 return (DDI_FAILURE);
3759         }
3760 
3761         if (mrsas_common_check(instance, cmd) != DDI_SUCCESS) {
3762                 return_mfi_app_pkt(instance, cmd);
3763                 return (DDI_FAILURE);
3764         }
3765 
3766         return_mfi_app_pkt(instance, cmd);
3767         con_log(CL_ANN1, (CE_CONT, "mrsas_issue_init_mfi: Done"));
3768 
3769         return (DDI_SUCCESS);
3770 }
3771 /*
3772  * mfi_state_transition_to_ready        : Move the FW to READY state
3773  *
3774  * @reg_set                     : MFI register set
3775  */
3776 int
3777 mfi_state_transition_to_ready(struct mrsas_instance *instance)
3778 {
3779         int             i;
3780         uint8_t         max_wait;
3781         uint32_t        fw_ctrl = 0;
3782         uint32_t        fw_state;
3783         uint32_t        cur_state;
3784         uint32_t        cur_abs_reg_val;
3785         uint32_t        prev_abs_reg_val;
3786         uint32_t        status;
3787 
3788         cur_abs_reg_val =
3789             instance->func_ptr->read_fw_status_reg(instance);
3790         fw_state =
3791             cur_abs_reg_val & MFI_STATE_MASK;
3792         con_log(CL_ANN1, (CE_CONT,
3793             "mfi_state_transition_to_ready:FW state = 0x%x", fw_state));
3794 
3795         while (fw_state != MFI_STATE_READY) {
3796                 con_log(CL_ANN, (CE_CONT,
3797                     "mfi_state_transition_to_ready:FW state%x", fw_state));
3798 
3799                 switch (fw_state) {
3800                 case MFI_STATE_FAULT:
3801                         con_log(CL_ANN, (CE_NOTE,
3802                             "mr_sas: FW in FAULT state!!"));
3803 
3804                         return (ENODEV);
3805                 case MFI_STATE_WAIT_HANDSHAKE:
3806                         /* set the CLR bit in IMR0 */
3807                         con_log(CL_ANN1, (CE_NOTE,
3808                             "mr_sas: FW waiting for HANDSHAKE"));
3809                         /*
3810                          * PCI_Hot Plug: MFI F/W requires
3811                          * (MFI_INIT_CLEAR_HANDSHAKE|MFI_INIT_HOTPLUG)
3812                          * to be set
3813                          */
3814                         /* WR_IB_MSG_0(MFI_INIT_CLEAR_HANDSHAKE, instance); */
3815                         if (!instance->tbolt && !instance->skinny) {
3816                                 WR_IB_DOORBELL(MFI_INIT_CLEAR_HANDSHAKE |
3817                                     MFI_INIT_HOTPLUG, instance);
3818                         } else {
3819                                 WR_RESERVED0_REGISTER(MFI_INIT_CLEAR_HANDSHAKE |
3820                                     MFI_INIT_HOTPLUG, instance);
3821                         }
3822                         max_wait        = (instance->tbolt == 1) ? 180 : 2;
3823                         cur_state       = MFI_STATE_WAIT_HANDSHAKE;
3824                         break;
3825                 case MFI_STATE_BOOT_MESSAGE_PENDING:
3826                         /* set the CLR bit in IMR0 */
3827                         con_log(CL_ANN1, (CE_NOTE,
3828                             "mr_sas: FW state boot message pending"));
3829                         /*
3830                          * PCI_Hot Plug: MFI F/W requires
3831                          * (MFI_INIT_CLEAR_HANDSHAKE|MFI_INIT_HOTPLUG)
3832                          * to be set
3833                          */
3834                         if (!instance->tbolt && !instance->skinny) {
3835                                 WR_IB_DOORBELL(MFI_INIT_HOTPLUG, instance);
3836                         } else {
3837                                 WR_RESERVED0_REGISTER(MFI_INIT_HOTPLUG,
3838                                     instance);
3839                         }
3840                         max_wait        = (instance->tbolt == 1) ? 180 : 10;
3841                         cur_state       = MFI_STATE_BOOT_MESSAGE_PENDING;
3842                         break;
3843                 case MFI_STATE_OPERATIONAL:
3844                         /* bring it to READY state; assuming max wait 2 secs */
3845                         instance->func_ptr->disable_intr(instance);
3846                         con_log(CL_ANN1, (CE_NOTE,
3847                             "mr_sas: FW in OPERATIONAL state"));
3848                         /*
3849                          * PCI_Hot Plug: MFI F/W requires
3850                          * (MFI_INIT_READY | MFI_INIT_MFIMODE | MFI_INIT_ABORT)
3851                          * to be set
3852                          */
3853                         /* WR_IB_DOORBELL(MFI_INIT_READY, instance); */
3854                         if (!instance->tbolt && !instance->skinny) {
3855                                 WR_IB_DOORBELL(MFI_RESET_FLAGS, instance);
3856                         } else {
3857                                 WR_RESERVED0_REGISTER(MFI_RESET_FLAGS,
3858                                     instance);
3859 
3860                                 for (i = 0; i < (10 * 1000); i++) {
3861                                         status =
3862                                             RD_RESERVED0_REGISTER(instance);
3863                                         if (status & 1) {
3864                                                 delay(1 *
3865                                                     drv_usectohz(MILLISEC));
3866                                         } else {
3867                                                 break;
3868                                         }
3869                                 }
3870 
3871                         }
3872                         max_wait        = (instance->tbolt == 1) ? 180 : 10;
3873                         cur_state       = MFI_STATE_OPERATIONAL;
3874                         break;
3875                 case MFI_STATE_UNDEFINED:
3876                         /* this state should not last for more than 2 seconds */
3877                         con_log(CL_ANN1, (CE_NOTE, "FW state undefined"));
3878 
3879                         max_wait        = (instance->tbolt == 1) ? 180 : 2;
3880                         cur_state       = MFI_STATE_UNDEFINED;
3881                         break;
3882                 case MFI_STATE_BB_INIT:
3883                         max_wait        = (instance->tbolt == 1) ? 180 : 2;
3884                         cur_state       = MFI_STATE_BB_INIT;
3885                         break;
3886                 case MFI_STATE_FW_INIT:
3887                         max_wait        = (instance->tbolt == 1) ? 180 : 2;
3888                         cur_state       = MFI_STATE_FW_INIT;
3889                         break;
3890                 case MFI_STATE_FW_INIT_2:
3891                         max_wait        = 180;
3892                         cur_state       = MFI_STATE_FW_INIT_2;
3893                         break;
3894                 case MFI_STATE_DEVICE_SCAN:
3895                         max_wait        = 180;
3896                         cur_state       = MFI_STATE_DEVICE_SCAN;
3897                         prev_abs_reg_val = cur_abs_reg_val;
3898                         con_log(CL_NONE, (CE_NOTE,
3899                             "Device scan in progress ...\n"));
3900                         break;
3901                 case MFI_STATE_FLUSH_CACHE:
3902                         max_wait        = 180;
3903                         cur_state       = MFI_STATE_FLUSH_CACHE;
3904                         break;
3905                 default:
3906                         con_log(CL_ANN1, (CE_NOTE,
3907                             "mr_sas: Unknown state 0x%x", fw_state));
3908                         return (ENODEV);
3909                 }
3910 
3911                 /* the cur_state should not last for more than max_wait secs */
3912                 for (i = 0; i < (max_wait * MILLISEC); i++) {
3913                         /* fw_state = RD_OB_MSG_0(instance) & MFI_STATE_MASK; */
3914                         cur_abs_reg_val =
3915                             instance->func_ptr->read_fw_status_reg(instance);
3916                         fw_state = cur_abs_reg_val & MFI_STATE_MASK;
3917 
3918                         if (fw_state == cur_state) {
3919                                 delay(1 * drv_usectohz(MILLISEC));
3920                         } else {
3921                                 break;
3922                         }
3923                 }
3924                 if (fw_state == MFI_STATE_DEVICE_SCAN) {
3925                         if (prev_abs_reg_val != cur_abs_reg_val) {
3926                                 continue;
3927                         }
3928                 }
3929 
3930                 /* return error if fw_state hasn't changed after max_wait */
3931                 if (fw_state == cur_state) {
3932                         con_log(CL_ANN1, (CE_WARN,
3933                             "FW state hasn't changed in %d secs", max_wait));
3934                         return (ENODEV);
3935                 }
3936         };
3937 
3938         /* This may also need to apply to Skinny, but for now, don't worry. */
3939         if (!instance->tbolt && !instance->skinny) {
3940                 fw_ctrl = RD_IB_DOORBELL(instance);
3941                 con_log(CL_ANN1, (CE_CONT,
3942                     "mfi_state_transition_to_ready:FW ctrl = 0x%x", fw_ctrl));
3943 
3944                 /*
3945                  * Write 0xF to the doorbell register to do the following.
3946                  * - Abort all outstanding commands (bit 0).
3947                  * - Transition from OPERATIONAL to READY state (bit 1).
3948                  * - Discard (possible) low MFA posted in 64-bit mode (bit-2).
3949                  * - Set to release FW to continue running (i.e. BIOS handshake
3950                  *   (bit 3).
3951                  */
3952                 WR_IB_DOORBELL(0xF, instance);
3953         }
3954 
3955         if (mrsas_check_acc_handle(instance->regmap_handle) != DDI_SUCCESS) {
3956                 return (EIO);
3957         }
3958 
3959         return (DDI_SUCCESS);
3960 }
3961 
3962 /*
3963  * get_seq_num
3964  */
3965 static int
3966 get_seq_num(struct mrsas_instance *instance,
3967     struct mrsas_evt_log_info *eli)
3968 {
3969         int     ret = DDI_SUCCESS;
3970 
3971         dma_obj_t                       dcmd_dma_obj;
3972         struct mrsas_cmd                *cmd;
3973         struct mrsas_dcmd_frame         *dcmd;
3974         struct mrsas_evt_log_info *eli_tmp;
3975         if (instance->tbolt) {
3976                 cmd = get_raid_msg_mfi_pkt(instance);
3977         } else {
3978                 cmd = mrsas_get_mfi_pkt(instance);
3979         }
3980 
3981         if (!cmd) {
3982                 dev_err(instance->dip, CE_WARN, "failed to get a cmd");
3983                 DTRACE_PROBE2(seq_num_mfi_err, uint16_t,
3984                     instance->fw_outstanding, uint16_t, instance->max_fw_cmds);
3985                 return (ENOMEM);
3986         }
3987 
3988         /* Clear the frame buffer and assign back the context id */
3989         (void) memset((char *)&cmd->frame[0], 0, sizeof (union mrsas_frame));
3990         ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context,
3991             cmd->index);
3992 
3993         dcmd = &cmd->frame->dcmd;
3994 
3995         /* allocate the data transfer buffer */
3996         dcmd_dma_obj.size = sizeof (struct mrsas_evt_log_info);
3997         dcmd_dma_obj.dma_attr = mrsas_generic_dma_attr;
3998         dcmd_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
3999         dcmd_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
4000         dcmd_dma_obj.dma_attr.dma_attr_sgllen = 1;
4001         dcmd_dma_obj.dma_attr.dma_attr_align = 1;
4002 
4003         if (mrsas_alloc_dma_obj(instance, &dcmd_dma_obj,
4004             (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
4005                 dev_err(instance->dip, CE_WARN,
4006                     "get_seq_num: could not allocate data transfer buffer.");
4007                 return (DDI_FAILURE);
4008         }
4009 
4010         (void) memset(dcmd_dma_obj.buffer, 0,
4011             sizeof (struct mrsas_evt_log_info));
4012 
4013         (void) memset(dcmd->mbox.b, 0, DCMD_MBOX_SZ);
4014 
4015         ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->cmd, MFI_CMD_OP_DCMD);
4016         ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->cmd_status, 0);
4017         ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->sge_count, 1);
4018         ddi_put16(cmd->frame_dma_obj.acc_handle, &dcmd->flags,
4019             MFI_FRAME_DIR_READ);
4020         ddi_put16(cmd->frame_dma_obj.acc_handle, &dcmd->timeout, 0);
4021         ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->data_xfer_len,
4022             sizeof (struct mrsas_evt_log_info));
4023         ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->opcode,
4024             MR_DCMD_CTRL_EVENT_GET_INFO);
4025         ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->sgl.sge32[0].length,
4026             sizeof (struct mrsas_evt_log_info));
4027         ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->sgl.sge32[0].phys_addr,
4028             dcmd_dma_obj.dma_cookie[0].dmac_address);
4029 
4030         cmd->sync_cmd = MRSAS_TRUE;
4031         cmd->frame_count = 1;
4032 
4033         if (instance->tbolt) {
4034                 mr_sas_tbolt_build_mfi_cmd(instance, cmd);
4035         }
4036 
4037         if (instance->func_ptr->issue_cmd_in_sync_mode(instance, cmd)) {
4038                 dev_err(instance->dip, CE_WARN, "get_seq_num: "
4039                     "failed to issue MRSAS_DCMD_CTRL_EVENT_GET_INFO");
4040                 ret = DDI_FAILURE;
4041         } else {
4042                 eli_tmp = (struct mrsas_evt_log_info *)dcmd_dma_obj.buffer;
4043                 eli->newest_seq_num = ddi_get32(cmd->frame_dma_obj.acc_handle,
4044                     &eli_tmp->newest_seq_num);
4045                 ret = DDI_SUCCESS;
4046         }
4047 
4048         if (mrsas_free_dma_obj(instance, dcmd_dma_obj) != DDI_SUCCESS)
4049                 ret = DDI_FAILURE;
4050 
4051         if (instance->tbolt) {
4052                 return_raid_msg_mfi_pkt(instance, cmd);
4053         } else {
4054                 mrsas_return_mfi_pkt(instance, cmd);
4055         }
4056 
4057         return (ret);
4058 }
4059 
4060 /*
4061  * start_mfi_aen
4062  */
4063 static int
4064 start_mfi_aen(struct mrsas_instance *instance)
4065 {
4066         int     ret = 0;
4067 
4068         struct mrsas_evt_log_info       eli;
4069         union mrsas_evt_class_locale    class_locale;
4070 
4071         /* get the latest sequence number from FW */
4072         (void) memset(&eli, 0, sizeof (struct mrsas_evt_log_info));
4073 
4074         if (get_seq_num(instance, &eli)) {
4075                 dev_err(instance->dip, CE_WARN,
4076                     "start_mfi_aen: failed to get seq num");
4077                 return (-1);
4078         }
4079 
4080         /* register AEN with FW for latest sequence number plus 1 */
4081         class_locale.members.reserved   = 0;
4082         class_locale.members.locale     = LE_16(MR_EVT_LOCALE_ALL);
4083         class_locale.members.class      = MR_EVT_CLASS_INFO;
4084         class_locale.word       = LE_32(class_locale.word);
4085         ret = register_mfi_aen(instance, eli.newest_seq_num + 1,
4086             class_locale.word);
4087 
4088         if (ret) {
4089                 dev_err(instance->dip, CE_WARN,
4090                     "start_mfi_aen: aen registration failed");
4091                 return (-1);
4092         }
4093 
4094 
4095         return (ret);
4096 }
4097 
4098 /*
4099  * flush_cache
4100  */
4101 static void
4102 flush_cache(struct mrsas_instance *instance)
4103 {
4104         struct mrsas_cmd                *cmd = NULL;
4105         struct mrsas_dcmd_frame         *dcmd;
4106         if (instance->tbolt) {
4107                 cmd = get_raid_msg_mfi_pkt(instance);
4108         } else {
4109                 cmd = mrsas_get_mfi_pkt(instance);
4110         }
4111 
4112         if (!cmd) {
4113                 con_log(CL_ANN1, (CE_WARN,
4114                     "flush_cache():Failed to get a cmd for flush_cache"));
4115                 DTRACE_PROBE2(flush_cache_err, uint16_t,
4116                     instance->fw_outstanding, uint16_t, instance->max_fw_cmds);
4117                 return;
4118         }
4119 
4120         /* Clear the frame buffer and assign back the context id */
4121         (void) memset((char *)&cmd->frame[0], 0, sizeof (union mrsas_frame));
4122         ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context,
4123             cmd->index);
4124 
4125         dcmd = &cmd->frame->dcmd;
4126 
4127         (void) memset(dcmd->mbox.b, 0, DCMD_MBOX_SZ);
4128 
4129         ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->cmd, MFI_CMD_OP_DCMD);
4130         ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->cmd_status, 0x0);
4131         ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->sge_count, 0);
4132         ddi_put16(cmd->frame_dma_obj.acc_handle, &dcmd->flags,
4133             MFI_FRAME_DIR_NONE);
4134         ddi_put16(cmd->frame_dma_obj.acc_handle, &dcmd->timeout, 0);
4135         ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->data_xfer_len, 0);
4136         ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->opcode,
4137             MR_DCMD_CTRL_CACHE_FLUSH);
4138         ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->mbox.b[0],
4139             MR_FLUSH_CTRL_CACHE | MR_FLUSH_DISK_CACHE);
4140 
4141         cmd->frame_count = 1;
4142 
4143         if (instance->tbolt) {
4144                 mr_sas_tbolt_build_mfi_cmd(instance, cmd);
4145         }
4146 
4147         if (instance->func_ptr->issue_cmd_in_poll_mode(instance, cmd)) {
4148                 con_log(CL_ANN1, (CE_WARN,
4149             "flush_cache: failed to issue MFI_DCMD_CTRL_CACHE_FLUSH"));
4150         }
4151         con_log(CL_ANN1, (CE_CONT, "flush_cache done"));
4152         if (instance->tbolt) {
4153                 return_raid_msg_mfi_pkt(instance, cmd);
4154         } else {
4155                 mrsas_return_mfi_pkt(instance, cmd);
4156         }
4157 
4158 }
4159 
4160 /*
4161  * service_mfi_aen-     Completes an AEN command
4162  * @instance:                   Adapter soft state
4163  * @cmd:                        Command to be completed
4164  *
4165  */
4166 void
4167 service_mfi_aen(struct mrsas_instance *instance, struct mrsas_cmd *cmd)
4168 {
4169         uint32_t        seq_num;
4170         struct mrsas_evt_detail *evt_detail =
4171             (struct mrsas_evt_detail *)instance->mfi_evt_detail_obj.buffer;
4172         int             rval = 0;
4173         int             tgt = 0;
4174         uint8_t         dtype;
4175         mrsas_pd_address_t      *pd_addr;
4176         ddi_acc_handle_t                acc_handle;
4177 
4178         con_log(CL_ANN, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
4179 
4180         acc_handle = cmd->frame_dma_obj.acc_handle;
4181         cmd->cmd_status = ddi_get8(acc_handle, &cmd->frame->io.cmd_status);
4182         if (cmd->cmd_status == ENODATA) {
4183                 cmd->cmd_status = 0;
4184         }
4185 
4186         /*
4187          * log the MFI AEN event to the sysevent queue so that
4188          * application will get noticed
4189          */
4190         if (ddi_log_sysevent(instance->dip, DDI_VENDOR_LSI, "LSIMEGA", "SAS",
4191             NULL, NULL, DDI_NOSLEEP) != DDI_SUCCESS) {
4192                 int     instance_no = ddi_get_instance(instance->dip);
4193                 con_log(CL_ANN, (CE_WARN,
4194                     "mr_sas%d: Failed to log AEN event", instance_no));
4195         }
4196         /*
4197          * Check for any ld devices that has changed state. i.e. online
4198          * or offline.
4199          */
4200         con_log(CL_ANN1, (CE_CONT,
4201             "AEN: code = %x class = %x locale = %x args = %x",
4202             ddi_get32(acc_handle, &evt_detail->code),
4203             evt_detail->cl.members.class,
4204             ddi_get16(acc_handle, &evt_detail->cl.members.locale),
4205             ddi_get8(acc_handle, &evt_detail->arg_type)));
4206 
4207         switch (ddi_get32(acc_handle, &evt_detail->code)) {
4208         case MR_EVT_CFG_CLEARED: {
4209                 for (tgt = 0; tgt < MRDRV_MAX_LD; tgt++) {
4210                         if (instance->mr_ld_list[tgt].dip != NULL) {
4211                                 mutex_enter(&instance->config_dev_mtx);
4212                                 instance->mr_ld_list[tgt].flag =
4213                                     (uint8_t)~MRDRV_TGT_VALID;
4214                                 mutex_exit(&instance->config_dev_mtx);
4215                                 rval = mrsas_service_evt(instance, tgt, 0,
4216                                     MRSAS_EVT_UNCONFIG_TGT, NULL);
4217                                 con_log(CL_ANN1, (CE_WARN,
4218                                     "mr_sas: CFG CLEARED AEN rval = %d "
4219                                     "tgt id = %d", rval, tgt));
4220                         }
4221                 }
4222                 break;
4223         }
4224 
4225         case MR_EVT_LD_DELETED: {
4226                 tgt = ddi_get16(acc_handle, &evt_detail->args.ld.target_id);
4227                 mutex_enter(&instance->config_dev_mtx);
4228                 instance->mr_ld_list[tgt].flag = (uint8_t)~MRDRV_TGT_VALID;
4229                 mutex_exit(&instance->config_dev_mtx);
4230                 rval = mrsas_service_evt(instance,
4231                     ddi_get16(acc_handle, &evt_detail->args.ld.target_id), 0,
4232                     MRSAS_EVT_UNCONFIG_TGT, NULL);
4233                 con_log(CL_ANN1, (CE_WARN, "mr_sas: LD DELETED AEN rval = %d "
4234                     "tgt id = %d index = %d", rval,
4235                     ddi_get16(acc_handle, &evt_detail->args.ld.target_id),
4236                     ddi_get8(acc_handle, &evt_detail->args.ld.ld_index)));
4237                 break;
4238         } /* End of MR_EVT_LD_DELETED */
4239 
4240         case MR_EVT_LD_CREATED: {
4241                 rval = mrsas_service_evt(instance,
4242                     ddi_get16(acc_handle, &evt_detail->args.ld.target_id), 0,
4243                     MRSAS_EVT_CONFIG_TGT, NULL);
4244                 con_log(CL_ANN1, (CE_WARN, "mr_sas: LD CREATED AEN rval = %d "
4245                     "tgt id = %d index = %d", rval,
4246                     ddi_get16(acc_handle, &evt_detail->args.ld.target_id),
4247                     ddi_get8(acc_handle, &evt_detail->args.ld.ld_index)));
4248                 break;
4249         } /* End of MR_EVT_LD_CREATED */
4250 
4251         case MR_EVT_PD_REMOVED_EXT: {
4252                 if (instance->tbolt || instance->skinny) {
4253                         pd_addr = &evt_detail->args.pd_addr;
4254                         dtype = pd_addr->scsi_dev_type;
4255                         con_log(CL_DLEVEL1, (CE_NOTE,
4256                             " MR_EVT_PD_REMOVED_EXT: dtype = %x,"
4257                             " arg_type = %d ", dtype, evt_detail->arg_type));
4258                         tgt = ddi_get16(acc_handle,
4259                             &evt_detail->args.pd.device_id);
4260                         mutex_enter(&instance->config_dev_mtx);
4261                         instance->mr_tbolt_pd_list[tgt].flag =
4262                             (uint8_t)~MRDRV_TGT_VALID;
4263                         mutex_exit(&instance->config_dev_mtx);
4264                         rval = mrsas_service_evt(instance, ddi_get16(
4265                             acc_handle, &evt_detail->args.pd.device_id),
4266                             1, MRSAS_EVT_UNCONFIG_TGT, NULL);
4267                         con_log(CL_ANN1, (CE_WARN, "mr_sas: PD_REMOVED:"
4268                             "rval = %d tgt id = %d ", rval,
4269                             ddi_get16(acc_handle,
4270                             &evt_detail->args.pd.device_id)));
4271                 }
4272                 break;
4273         } /* End of MR_EVT_PD_REMOVED_EXT */
4274 
4275         case MR_EVT_PD_INSERTED_EXT: {
4276                 if (instance->tbolt || instance->skinny) {
4277                         rval = mrsas_service_evt(instance,
4278                             ddi_get16(acc_handle,
4279                             &evt_detail->args.pd.device_id),
4280                             1, MRSAS_EVT_CONFIG_TGT, NULL);
4281                         con_log(CL_ANN1, (CE_WARN, "mr_sas: PD_INSERTEDi_EXT:"
4282                             "rval = %d tgt id = %d ", rval,
4283                             ddi_get16(acc_handle,
4284                             &evt_detail->args.pd.device_id)));
4285                 }
4286                 break;
4287         } /* End of MR_EVT_PD_INSERTED_EXT */
4288 
4289         case MR_EVT_PD_STATE_CHANGE: {
4290                 if (instance->tbolt || instance->skinny) {
4291                         tgt = ddi_get16(acc_handle,
4292                             &evt_detail->args.pd.device_id);
4293                         if ((evt_detail->args.pd_state.prevState ==
4294                             PD_SYSTEM) &&
4295                             (evt_detail->args.pd_state.newState != PD_SYSTEM)) {
4296                                 mutex_enter(&instance->config_dev_mtx);
4297                                 instance->mr_tbolt_pd_list[tgt].flag =
4298                                     (uint8_t)~MRDRV_TGT_VALID;
4299                                 mutex_exit(&instance->config_dev_mtx);
4300                                 rval = mrsas_service_evt(instance,
4301                                     ddi_get16(acc_handle,
4302                                     &evt_detail->args.pd.device_id),
4303                                     1, MRSAS_EVT_UNCONFIG_TGT, NULL);
4304                                 con_log(CL_ANN1, (CE_WARN, "mr_sas: PD_REMOVED:"
4305                                     "rval = %d tgt id = %d ", rval,
4306                                     ddi_get16(acc_handle,
4307                                     &evt_detail->args.pd.device_id)));
4308                                 break;
4309                         }
4310                         if ((evt_detail->args.pd_state.prevState
4311                             == UNCONFIGURED_GOOD) &&
4312                             (evt_detail->args.pd_state.newState == PD_SYSTEM)) {
4313                                 rval = mrsas_service_evt(instance,
4314                                     ddi_get16(acc_handle,
4315                                     &evt_detail->args.pd.device_id),
4316                                     1, MRSAS_EVT_CONFIG_TGT, NULL);
4317                                 con_log(CL_ANN1, (CE_WARN,
4318                                     "mr_sas: PD_INSERTED: rval = %d "
4319                                     " tgt id = %d ", rval,
4320                                     ddi_get16(acc_handle,
4321                                     &evt_detail->args.pd.device_id)));
4322                                 break;
4323                         }
4324                 }
4325                 break;
4326         }
4327 
4328         } /* End of Main Switch */
4329 
4330         /* get copy of seq_num and class/locale for re-registration */
4331         seq_num = ddi_get32(acc_handle, &evt_detail->seq_num);
4332         seq_num++;
4333         (void) memset(instance->mfi_evt_detail_obj.buffer, 0,
4334             sizeof (struct mrsas_evt_detail));
4335 
4336         ddi_put8(acc_handle, &cmd->frame->dcmd.cmd_status, 0x0);
4337         ddi_put32(acc_handle, &cmd->frame->dcmd.mbox.w[0], seq_num);
4338 
4339         instance->aen_seq_num = seq_num;
4340 
4341         cmd->frame_count = 1;
4342 
4343         cmd->retry_count_for_ocr = 0;
4344         cmd->drv_pkt_time = 0;
4345 
4346         /* Issue the aen registration frame */
4347         instance->func_ptr->issue_cmd(cmd, instance);
4348 }
4349 
4350 /*
4351  * complete_cmd_in_sync_mode -  Completes an internal command
4352  * @instance:                   Adapter soft state
4353  * @cmd:                        Command to be completed
4354  *
4355  * The issue_cmd_in_sync_mode() function waits for a command to complete
4356  * after it issues a command. This function wakes up that waiting routine by
4357  * calling wake_up() on the wait queue.
4358  */
4359 static void
4360 complete_cmd_in_sync_mode(struct mrsas_instance *instance,
4361     struct mrsas_cmd *cmd)
4362 {
4363         cmd->cmd_status = ddi_get8(cmd->frame_dma_obj.acc_handle,
4364             &cmd->frame->io.cmd_status);
4365 
4366         cmd->sync_cmd = MRSAS_FALSE;
4367 
4368         con_log(CL_ANN1, (CE_NOTE, "complete_cmd_in_sync_mode called %p \n",
4369             (void *)cmd));
4370 
4371         mutex_enter(&instance->int_cmd_mtx);
4372         if (cmd->cmd_status == ENODATA) {
4373                 cmd->cmd_status = 0;
4374         }
4375         cv_broadcast(&instance->int_cmd_cv);
4376         mutex_exit(&instance->int_cmd_mtx);
4377 
4378 }
4379 
4380 /*
4381  * Call this function inside mrsas_softintr.
4382  * mrsas_initiate_ocr_if_fw_is_faulty  - Initiates OCR if FW status is faulty
4383  * @instance:                   Adapter soft state
4384  */
4385 
4386 static uint32_t
4387 mrsas_initiate_ocr_if_fw_is_faulty(struct mrsas_instance *instance)
4388 {
4389         uint32_t        cur_abs_reg_val;
4390         uint32_t        fw_state;
4391 
4392         cur_abs_reg_val =  instance->func_ptr->read_fw_status_reg(instance);
4393         fw_state = cur_abs_reg_val & MFI_STATE_MASK;
4394         if (fw_state == MFI_STATE_FAULT) {
4395                 if (instance->disable_online_ctrl_reset == 1) {
4396                         dev_err(instance->dip, CE_WARN,
4397                             "mrsas_initiate_ocr_if_fw_is_faulty: "
4398                             "FW in Fault state, detected in ISR: "
4399                             "FW doesn't support ocr ");
4400 
4401                         return (ADAPTER_RESET_NOT_REQUIRED);
4402                 } else {
4403                         con_log(CL_ANN, (CE_NOTE,
4404                             "mrsas_initiate_ocr_if_fw_is_faulty: FW in Fault "
4405                             "state, detected in ISR: FW supports ocr "));
4406 
4407                         return (ADAPTER_RESET_REQUIRED);
4408                 }
4409         }
4410 
4411         return (ADAPTER_RESET_NOT_REQUIRED);
4412 }
4413 
4414 /*
4415  * mrsas_softintr - The Software ISR
4416  * @param arg   : HBA soft state
4417  *
4418  * called from high-level interrupt if hi-level interrupt are not there,
4419  * otherwise triggered as a soft interrupt
4420  */
4421 static uint_t
4422 mrsas_softintr(struct mrsas_instance *instance)
4423 {
4424         struct scsi_pkt         *pkt;
4425         struct scsa_cmd         *acmd;
4426         struct mrsas_cmd        *cmd;
4427         struct mlist_head       *pos, *next;
4428         mlist_t                 process_list;
4429         struct mrsas_header     *hdr;
4430         struct scsi_arq_status  *arqstat;
4431 
4432         con_log(CL_ANN1, (CE_NOTE, "mrsas_softintr() called."));
4433 
4434         ASSERT(instance);
4435 
4436         mutex_enter(&instance->completed_pool_mtx);
4437 
4438         if (mlist_empty(&instance->completed_pool_list)) {
4439                 mutex_exit(&instance->completed_pool_mtx);
4440                 return (DDI_INTR_CLAIMED);
4441         }
4442 
4443         instance->softint_running = 1;
4444 
4445         INIT_LIST_HEAD(&process_list);
4446         mlist_splice(&instance->completed_pool_list, &process_list);
4447         INIT_LIST_HEAD(&instance->completed_pool_list);
4448 
4449         mutex_exit(&instance->completed_pool_mtx);
4450 
4451         /* perform all callbacks first, before releasing the SCBs */
4452         mlist_for_each_safe(pos, next, &process_list) {
4453                 cmd = mlist_entry(pos, struct mrsas_cmd, list);
4454 
4455                 /* syncronize the Cmd frame for the controller */
4456                 (void) ddi_dma_sync(cmd->frame_dma_obj.dma_handle,
4457                     0, 0, DDI_DMA_SYNC_FORCPU);
4458 
4459                 if (mrsas_check_dma_handle(cmd->frame_dma_obj.dma_handle) !=
4460                     DDI_SUCCESS) {
4461                         mrsas_fm_ereport(instance, DDI_FM_DEVICE_NO_RESPONSE);
4462                         ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST);
4463                         con_log(CL_ANN1, (CE_WARN,
4464                             "mrsas_softintr: "
4465                             "FMA check reports DMA handle failure"));
4466                         return (DDI_INTR_CLAIMED);
4467                 }
4468 
4469                 hdr = &cmd->frame->hdr;
4470 
4471                 /* remove the internal command from the process list */
4472                 mlist_del_init(&cmd->list);
4473 
4474                 switch (ddi_get8(cmd->frame_dma_obj.acc_handle, &hdr->cmd)) {
4475                 case MFI_CMD_OP_PD_SCSI:
4476                 case MFI_CMD_OP_LD_SCSI:
4477                 case MFI_CMD_OP_LD_READ:
4478                 case MFI_CMD_OP_LD_WRITE:
4479                         /*
4480                          * MFI_CMD_OP_PD_SCSI and MFI_CMD_OP_LD_SCSI
4481                          * could have been issued either through an
4482                          * IO path or an IOCTL path. If it was via IOCTL,
4483                          * we will send it to internal completion.
4484                          */
4485                         if (cmd->sync_cmd == MRSAS_TRUE) {
4486                                 complete_cmd_in_sync_mode(instance, cmd);
4487                                 break;
4488                         }
4489 
4490                         /* regular commands */
4491                         acmd =  cmd->cmd;
4492                         pkt =   CMD2PKT(acmd);
4493 
4494                         if (acmd->cmd_flags & CFLAG_DMAVALID) {
4495                                 if (acmd->cmd_flags & CFLAG_CONSISTENT) {
4496                                         (void) ddi_dma_sync(acmd->cmd_dmahandle,
4497                                             acmd->cmd_dma_offset,
4498                                             acmd->cmd_dma_len,
4499                                             DDI_DMA_SYNC_FORCPU);
4500                                 }
4501                         }
4502 
4503                         pkt->pkt_reason              = CMD_CMPLT;
4504                         pkt->pkt_statistics  = 0;
4505                         pkt->pkt_state = STATE_GOT_BUS
4506                             | STATE_GOT_TARGET | STATE_SENT_CMD
4507                             | STATE_XFERRED_DATA | STATE_GOT_STATUS;
4508 
4509                         con_log(CL_ANN, (CE_CONT,
4510                             "CDB[0] = %x completed for %s: size %lx context %x",
4511                             pkt->pkt_cdbp[0], ((acmd->islogical) ? "LD" : "PD"),
4512                             acmd->cmd_dmacount, hdr->context));
4513                         DTRACE_PROBE3(softintr_cdb, uint8_t, pkt->pkt_cdbp[0],
4514                             uint_t, acmd->cmd_cdblen, ulong_t,
4515                             acmd->cmd_dmacount);
4516 
4517                         if (pkt->pkt_cdbp[0] == SCMD_INQUIRY) {
4518                                 struct scsi_inquiry     *inq;
4519 
4520                                 if (acmd->cmd_dmacount != 0) {
4521                                         bp_mapin(acmd->cmd_buf);
4522                                         inq = (struct scsi_inquiry *)
4523                                             acmd->cmd_buf->b_un.b_addr;
4524 
4525                                         if (hdr->cmd_status == MFI_STAT_OK) {
4526                                                 display_scsi_inquiry(
4527                                                     (caddr_t)inq);
4528                                         }
4529                                 }
4530                         }
4531 
4532                         DTRACE_PROBE2(softintr_done, uint8_t, hdr->cmd,
4533                             uint8_t, hdr->cmd_status);
4534 
4535                         switch (hdr->cmd_status) {
4536                         case MFI_STAT_OK:
4537                                 pkt->pkt_scbp[0] = STATUS_GOOD;
4538                                 break;
4539                         case MFI_STAT_LD_CC_IN_PROGRESS:
4540                         case MFI_STAT_LD_RECON_IN_PROGRESS:
4541                                 pkt->pkt_scbp[0] = STATUS_GOOD;
4542                                 break;
4543                         case MFI_STAT_LD_INIT_IN_PROGRESS:
4544                                 con_log(CL_ANN,
4545                                     (CE_WARN, "Initialization in Progress"));
4546                                 pkt->pkt_reason      = CMD_TRAN_ERR;
4547 
4548                                 break;
4549                         case MFI_STAT_SCSI_DONE_WITH_ERROR:
4550                                 con_log(CL_ANN, (CE_CONT, "scsi_done error"));
4551 
4552                                 pkt->pkt_reason      = CMD_CMPLT;
4553                                 ((struct scsi_status *)
4554                                     pkt->pkt_scbp)->sts_chk = 1;
4555 
4556                                 if (pkt->pkt_cdbp[0] == SCMD_TEST_UNIT_READY) {
4557                                         con_log(CL_ANN,
4558                                             (CE_WARN, "TEST_UNIT_READY fail"));
4559                                 } else {
4560                                         pkt->pkt_state |= STATE_ARQ_DONE;
4561                                         arqstat = (void *)(pkt->pkt_scbp);
4562                                         arqstat->sts_rqpkt_reason = CMD_CMPLT;
4563                                         arqstat->sts_rqpkt_resid = 0;
4564                                         arqstat->sts_rqpkt_state |=
4565                                             STATE_GOT_BUS | STATE_GOT_TARGET
4566                                             | STATE_SENT_CMD
4567                                             | STATE_XFERRED_DATA;
4568                                         *(uint8_t *)&arqstat->sts_rqpkt_status =
4569                                             STATUS_GOOD;
4570                                         ddi_rep_get8(
4571                                             cmd->frame_dma_obj.acc_handle,
4572                                             (uint8_t *)
4573                                             &(arqstat->sts_sensedata),
4574                                             cmd->sense,
4575                                             sizeof (struct scsi_extended_sense),
4576                                             DDI_DEV_AUTOINCR);
4577                                 }
4578                                 break;
4579                         case MFI_STAT_LD_OFFLINE:
4580                         case MFI_STAT_DEVICE_NOT_FOUND:
4581                                 con_log(CL_ANN, (CE_CONT,
4582                                 "mrsas_softintr:device not found error"));
4583                                 pkt->pkt_reason      = CMD_DEV_GONE;
4584                                 pkt->pkt_statistics  = STAT_DISCON;
4585                                 break;
4586                         case MFI_STAT_LD_LBA_OUT_OF_RANGE:
4587                                 pkt->pkt_state |= STATE_ARQ_DONE;
4588                                 pkt->pkt_reason      = CMD_CMPLT;
4589                                 ((struct scsi_status *)
4590                                     pkt->pkt_scbp)->sts_chk = 1;
4591 
4592                                 arqstat = (void *)(pkt->pkt_scbp);
4593                                 arqstat->sts_rqpkt_reason = CMD_CMPLT;
4594                                 arqstat->sts_rqpkt_resid = 0;
4595                                 arqstat->sts_rqpkt_state |= STATE_GOT_BUS
4596                                     | STATE_GOT_TARGET | STATE_SENT_CMD
4597                                     | STATE_XFERRED_DATA;
4598                                 *(uint8_t *)&arqstat->sts_rqpkt_status =
4599                                     STATUS_GOOD;
4600 
4601                                 arqstat->sts_sensedata.es_valid = 1;
4602                                 arqstat->sts_sensedata.es_key =
4603                                     KEY_ILLEGAL_REQUEST;
4604                                 arqstat->sts_sensedata.es_class =
4605                                     CLASS_EXTENDED_SENSE;
4606 
4607                                 /*
4608                                  * LOGICAL BLOCK ADDRESS OUT OF RANGE:
4609                                  * ASC: 0x21h; ASCQ: 0x00h;
4610                                  */
4611                                 arqstat->sts_sensedata.es_add_code = 0x21;
4612                                 arqstat->sts_sensedata.es_qual_code = 0x00;
4613 
4614                                 break;
4615 
4616                         default:
4617                                 con_log(CL_ANN, (CE_CONT, "Unknown status!"));
4618                                 pkt->pkt_reason      = CMD_TRAN_ERR;
4619 
4620                                 break;
4621                         }
4622 
4623                         atomic_add_16(&instance->fw_outstanding, (-1));
4624 
4625                         (void) mrsas_common_check(instance, cmd);
4626 
4627                         if (acmd->cmd_dmahandle) {
4628                                 if (mrsas_check_dma_handle(
4629                                     acmd->cmd_dmahandle) != DDI_SUCCESS) {
4630                                         ddi_fm_service_impact(instance->dip,
4631                                             DDI_SERVICE_UNAFFECTED);
4632                                         pkt->pkt_reason = CMD_TRAN_ERR;
4633                                         pkt->pkt_statistics = 0;
4634                                 }
4635                         }
4636 
4637                         mrsas_return_mfi_pkt(instance, cmd);
4638 
4639                         /* Call the callback routine */
4640                         if (((pkt->pkt_flags & FLAG_NOINTR) == 0) &&
4641                             pkt->pkt_comp) {
4642                                 (*pkt->pkt_comp)(pkt);
4643                         }
4644 
4645                         break;
4646 
4647                 case MFI_CMD_OP_SMP:
4648                 case MFI_CMD_OP_STP:
4649                         complete_cmd_in_sync_mode(instance, cmd);
4650                         break;
4651 
4652                 case MFI_CMD_OP_DCMD:
4653                         /* see if got an event notification */
4654                         if (ddi_get32(cmd->frame_dma_obj.acc_handle,
4655                             &cmd->frame->dcmd.opcode) ==
4656                             MR_DCMD_CTRL_EVENT_WAIT) {
4657                                 if ((instance->aen_cmd == cmd) &&
4658                                     (instance->aen_cmd->abort_aen)) {
4659                                         con_log(CL_ANN, (CE_WARN,
4660                                             "mrsas_softintr: "
4661                                             "aborted_aen returned"));
4662                                 } else {
4663                                         atomic_add_16(&instance->fw_outstanding,
4664                                             (-1));
4665                                         service_mfi_aen(instance, cmd);
4666                                 }
4667                         } else {
4668                                 complete_cmd_in_sync_mode(instance, cmd);
4669                         }
4670 
4671                         break;
4672 
4673                 case MFI_CMD_OP_ABORT:
4674                         con_log(CL_ANN, (CE_NOTE, "MFI_CMD_OP_ABORT complete"));
4675                         /*
4676                          * MFI_CMD_OP_ABORT successfully completed
4677                          * in the synchronous mode
4678                          */
4679                         complete_cmd_in_sync_mode(instance, cmd);
4680                         break;
4681 
4682                 default:
4683                         mrsas_fm_ereport(instance, DDI_FM_DEVICE_NO_RESPONSE);
4684                         ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST);
4685 
4686                         if (cmd->pkt != NULL) {
4687                                 pkt = cmd->pkt;
4688                                 if (((pkt->pkt_flags & FLAG_NOINTR) == 0) &&
4689                                     pkt->pkt_comp) {
4690 
4691                                         con_log(CL_ANN1, (CE_CONT, "posting to "
4692                                             "scsa cmd %p index %x pkt %p"
4693                                             "time %llx, default ", (void *)cmd,
4694                                             cmd->index, (void *)pkt,
4695                                             gethrtime()));
4696 
4697                                         (*pkt->pkt_comp)(pkt);
4698 
4699                                 }
4700                         }
4701                         con_log(CL_ANN, (CE_WARN, "Cmd type unknown !"));
4702                         break;
4703                 }
4704         }
4705 
4706         instance->softint_running = 0;
4707 
4708         return (DDI_INTR_CLAIMED);
4709 }
4710 
4711 /*
4712  * mrsas_alloc_dma_obj
4713  *
4714  * Allocate the memory and other resources for an dma object.
4715  */
4716 int
4717 mrsas_alloc_dma_obj(struct mrsas_instance *instance, dma_obj_t *obj,
4718     uchar_t endian_flags)
4719 {
4720         int     i;
4721         size_t  alen = 0;
4722         uint_t  cookie_cnt;
4723         struct ddi_device_acc_attr tmp_endian_attr;
4724 
4725         tmp_endian_attr = endian_attr;
4726         tmp_endian_attr.devacc_attr_endian_flags = endian_flags;
4727         tmp_endian_attr.devacc_attr_access = DDI_DEFAULT_ACC;
4728 
4729         i = ddi_dma_alloc_handle(instance->dip, &obj->dma_attr,
4730             DDI_DMA_SLEEP, NULL, &obj->dma_handle);
4731         if (i != DDI_SUCCESS) {
4732 
4733                 switch (i) {
4734                         case DDI_DMA_BADATTR :
4735                                 con_log(CL_ANN, (CE_WARN,
4736                                 "Failed ddi_dma_alloc_handle- Bad attribute"));
4737                                 break;
4738                         case DDI_DMA_NORESOURCES :
4739                                 con_log(CL_ANN, (CE_WARN,
4740                                 "Failed ddi_dma_alloc_handle- No Resources"));
4741                                 break;
4742                         default :
4743                                 con_log(CL_ANN, (CE_WARN,
4744                                 "Failed ddi_dma_alloc_handle: "
4745                                 "unknown status %d", i));
4746                                 break;
4747                 }
4748 
4749                 return (-1);
4750         }
4751 
4752         if ((ddi_dma_mem_alloc(obj->dma_handle, obj->size, &tmp_endian_attr,
4753             DDI_DMA_RDWR | DDI_DMA_STREAMING, DDI_DMA_SLEEP, NULL,
4754             &obj->buffer, &alen, &obj->acc_handle) != DDI_SUCCESS) ||
4755             alen < obj->size) {
4756 
4757                 ddi_dma_free_handle(&obj->dma_handle);
4758 
4759                 con_log(CL_ANN, (CE_WARN, "Failed : ddi_dma_mem_alloc"));
4760 
4761                 return (-1);
4762         }
4763 
4764         if (ddi_dma_addr_bind_handle(obj->dma_handle, NULL, obj->buffer,
4765             obj->size, DDI_DMA_RDWR | DDI_DMA_STREAMING, DDI_DMA_SLEEP,
4766             NULL, &obj->dma_cookie[0], &cookie_cnt) != DDI_SUCCESS) {
4767 
4768                 ddi_dma_mem_free(&obj->acc_handle);
4769                 ddi_dma_free_handle(&obj->dma_handle);
4770 
4771                 con_log(CL_ANN, (CE_WARN, "Failed : ddi_dma_addr_bind_handle"));
4772 
4773                 return (-1);
4774         }
4775 
4776         if (mrsas_check_dma_handle(obj->dma_handle) != DDI_SUCCESS) {
4777                 ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST);
4778                 return (-1);
4779         }
4780 
4781         if (mrsas_check_acc_handle(obj->acc_handle) != DDI_SUCCESS) {
4782                 ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST);
4783                 return (-1);
4784         }
4785 
4786         return (cookie_cnt);
4787 }
4788 
4789 /*
4790  * mrsas_free_dma_obj(struct mrsas_instance *, dma_obj_t)
4791  *
4792  * De-allocate the memory and other resources for an dma object, which must
4793  * have been alloated by a previous call to mrsas_alloc_dma_obj()
4794  */
4795 int
4796 mrsas_free_dma_obj(struct mrsas_instance *instance, dma_obj_t obj)
4797 {
4798 
4799         if ((obj.dma_handle == NULL) || (obj.acc_handle == NULL)) {
4800                 return (DDI_SUCCESS);
4801         }
4802 
4803         /*
4804          * NOTE: These check-handle functions fail if *_handle == NULL, but
4805          * this function succeeds because of the previous check.
4806          */
4807         if (mrsas_check_dma_handle(obj.dma_handle) != DDI_SUCCESS) {
4808                 ddi_fm_service_impact(instance->dip, DDI_SERVICE_UNAFFECTED);
4809                 return (DDI_FAILURE);
4810         }
4811 
4812         if (mrsas_check_acc_handle(obj.acc_handle) != DDI_SUCCESS) {
4813                 ddi_fm_service_impact(instance->dip, DDI_SERVICE_UNAFFECTED);
4814                 return (DDI_FAILURE);
4815         }
4816 
4817         (void) ddi_dma_unbind_handle(obj.dma_handle);
4818         ddi_dma_mem_free(&obj.acc_handle);
4819         ddi_dma_free_handle(&obj.dma_handle);
4820         obj.acc_handle = NULL;
4821         return (DDI_SUCCESS);
4822 }
4823 
4824 /*
4825  * mrsas_dma_alloc(instance_t *, struct scsi_pkt *, struct buf *,
4826  * int, int (*)())
4827  *
4828  * Allocate dma resources for a new scsi command
4829  */
4830 int
4831 mrsas_dma_alloc(struct mrsas_instance *instance, struct scsi_pkt *pkt,
4832     struct buf *bp, int flags, int (*callback)())
4833 {
4834         int     dma_flags;
4835         int     (*cb)(caddr_t);
4836         int     i;
4837 
4838         ddi_dma_attr_t  tmp_dma_attr = mrsas_generic_dma_attr;
4839         struct scsa_cmd *acmd = PKT2CMD(pkt);
4840 
4841         acmd->cmd_buf = bp;
4842 
4843         if (bp->b_flags & B_READ) {
4844                 acmd->cmd_flags &= ~CFLAG_DMASEND;
4845                 dma_flags = DDI_DMA_READ;
4846         } else {
4847                 acmd->cmd_flags |= CFLAG_DMASEND;
4848                 dma_flags = DDI_DMA_WRITE;
4849         }
4850 
4851         if (flags & PKT_CONSISTENT) {
4852                 acmd->cmd_flags |= CFLAG_CONSISTENT;
4853                 dma_flags |= DDI_DMA_CONSISTENT;
4854         }
4855 
4856         if (flags & PKT_DMA_PARTIAL) {
4857                 dma_flags |= DDI_DMA_PARTIAL;
4858         }
4859 
4860         dma_flags |= DDI_DMA_REDZONE;
4861 
4862         cb = (callback == NULL_FUNC) ? DDI_DMA_DONTWAIT : DDI_DMA_SLEEP;
4863 
4864         tmp_dma_attr.dma_attr_sgllen = instance->max_num_sge;
4865         tmp_dma_attr.dma_attr_addr_hi = 0xffffffffffffffffull;
4866         if (instance->tbolt) {
4867                 /* OCR-RESET FIX */
4868                 tmp_dma_attr.dma_attr_count_max =
4869                     (U64)mrsas_tbolt_max_cap_maxxfer;  /* limit to 256K */
4870                 tmp_dma_attr.dma_attr_maxxfer =
4871                     (U64)mrsas_tbolt_max_cap_maxxfer;  /* limit to 256K */
4872         }
4873 
4874         if ((i = ddi_dma_alloc_handle(instance->dip, &tmp_dma_attr,
4875             cb, 0, &acmd->cmd_dmahandle)) != DDI_SUCCESS) {
4876                 switch (i) {
4877                 case DDI_DMA_BADATTR:
4878                         bioerror(bp, EFAULT);
4879                         return (DDI_FAILURE);
4880 
4881                 case DDI_DMA_NORESOURCES:
4882                         bioerror(bp, 0);
4883                         return (DDI_FAILURE);
4884 
4885                 default:
4886                         con_log(CL_ANN, (CE_PANIC, "ddi_dma_alloc_handle: "
4887                             "impossible result (0x%x)", i));
4888                         bioerror(bp, EFAULT);
4889                         return (DDI_FAILURE);
4890                 }
4891         }
4892 
4893         i = ddi_dma_buf_bind_handle(acmd->cmd_dmahandle, bp, dma_flags,
4894             cb, 0, &acmd->cmd_dmacookies[0], &acmd->cmd_ncookies);
4895 
4896         switch (i) {
4897         case DDI_DMA_PARTIAL_MAP:
4898                 if ((dma_flags & DDI_DMA_PARTIAL) == 0) {
4899                         con_log(CL_ANN, (CE_PANIC, "ddi_dma_buf_bind_handle: "
4900                             "DDI_DMA_PARTIAL_MAP impossible"));
4901                         goto no_dma_cookies;
4902                 }
4903 
4904                 if (ddi_dma_numwin(acmd->cmd_dmahandle, &acmd->cmd_nwin) ==
4905                     DDI_FAILURE) {
4906                         con_log(CL_ANN, (CE_PANIC, "ddi_dma_numwin failed"));
4907                         goto no_dma_cookies;
4908                 }
4909 
4910                 if (ddi_dma_getwin(acmd->cmd_dmahandle, acmd->cmd_curwin,
4911                     &acmd->cmd_dma_offset, &acmd->cmd_dma_len,
4912                     &acmd->cmd_dmacookies[0], &acmd->cmd_ncookies) ==
4913                     DDI_FAILURE) {
4914 
4915                         con_log(CL_ANN, (CE_PANIC, "ddi_dma_getwin failed"));
4916                         goto no_dma_cookies;
4917                 }
4918 
4919                 goto get_dma_cookies;
4920         case DDI_DMA_MAPPED:
4921                 acmd->cmd_nwin = 1;
4922                 acmd->cmd_dma_len = 0;
4923                 acmd->cmd_dma_offset = 0;
4924 
4925 get_dma_cookies:
4926                 i = 0;
4927                 acmd->cmd_dmacount = 0;
4928                 for (;;) {
4929                         acmd->cmd_dmacount +=
4930                             acmd->cmd_dmacookies[i++].dmac_size;
4931 
4932                         if (i == instance->max_num_sge ||
4933                             i == acmd->cmd_ncookies)
4934                                 break;
4935 
4936                         ddi_dma_nextcookie(acmd->cmd_dmahandle,
4937                             &acmd->cmd_dmacookies[i]);
4938                 }
4939 
4940                 acmd->cmd_cookie = i;
4941                 acmd->cmd_cookiecnt = i;
4942 
4943                 acmd->cmd_flags |= CFLAG_DMAVALID;
4944 
4945                 if (bp->b_bcount >= acmd->cmd_dmacount) {
4946                         pkt->pkt_resid = bp->b_bcount - acmd->cmd_dmacount;
4947                 } else {
4948                         pkt->pkt_resid = 0;
4949                 }
4950 
4951                 return (DDI_SUCCESS);
4952         case DDI_DMA_NORESOURCES:
4953                 bioerror(bp, 0);
4954                 break;
4955         case DDI_DMA_NOMAPPING:
4956                 bioerror(bp, EFAULT);
4957                 break;
4958         case DDI_DMA_TOOBIG:
4959                 bioerror(bp, EINVAL);
4960                 break;
4961         case DDI_DMA_INUSE:
4962                 con_log(CL_ANN, (CE_PANIC, "ddi_dma_buf_bind_handle:"
4963                     " DDI_DMA_INUSE impossible"));
4964                 break;
4965         default:
4966                 con_log(CL_ANN, (CE_PANIC, "ddi_dma_buf_bind_handle: "
4967                     "impossible result (0x%x)", i));
4968                 break;
4969         }
4970 
4971 no_dma_cookies:
4972         ddi_dma_free_handle(&acmd->cmd_dmahandle);
4973         acmd->cmd_dmahandle = NULL;
4974         acmd->cmd_flags &= ~CFLAG_DMAVALID;
4975         return (DDI_FAILURE);
4976 }
4977 
4978 /*
4979  * mrsas_dma_move(struct mrsas_instance *, struct scsi_pkt *, struct buf *)
4980  *
4981  * move dma resources to next dma window
4982  *
4983  */
4984 int
4985 mrsas_dma_move(struct mrsas_instance *instance, struct scsi_pkt *pkt,
4986     struct buf *bp)
4987 {
4988         int     i = 0;
4989 
4990         struct scsa_cmd *acmd = PKT2CMD(pkt);
4991 
4992         /*
4993          * If there are no more cookies remaining in this window,
4994          * must move to the next window first.
4995          */
4996         if (acmd->cmd_cookie == acmd->cmd_ncookies) {
4997                 if (acmd->cmd_curwin == acmd->cmd_nwin && acmd->cmd_nwin == 1) {
4998                         return (DDI_SUCCESS);
4999                 }
5000 
5001                 /* at last window, cannot move */
5002                 if (++acmd->cmd_curwin >= acmd->cmd_nwin) {
5003                         return (DDI_FAILURE);
5004                 }
5005 
5006                 if (ddi_dma_getwin(acmd->cmd_dmahandle, acmd->cmd_curwin,
5007                     &acmd->cmd_dma_offset, &acmd->cmd_dma_len,
5008                     &acmd->cmd_dmacookies[0], &acmd->cmd_ncookies) ==
5009                     DDI_FAILURE) {
5010                         return (DDI_FAILURE);
5011                 }
5012 
5013                 acmd->cmd_cookie = 0;
5014         } else {
5015                 /* still more cookies in this window - get the next one */
5016                 ddi_dma_nextcookie(acmd->cmd_dmahandle,
5017                     &acmd->cmd_dmacookies[0]);
5018         }
5019 
5020         /* get remaining cookies in this window, up to our maximum */
5021         for (;;) {
5022                 acmd->cmd_dmacount += acmd->cmd_dmacookies[i++].dmac_size;
5023                 acmd->cmd_cookie++;
5024 
5025                 if (i == instance->max_num_sge ||
5026                     acmd->cmd_cookie == acmd->cmd_ncookies) {
5027                         break;
5028                 }
5029 
5030                 ddi_dma_nextcookie(acmd->cmd_dmahandle,
5031                     &acmd->cmd_dmacookies[i]);
5032         }
5033 
5034         acmd->cmd_cookiecnt = i;
5035 
5036         if (bp->b_bcount >= acmd->cmd_dmacount) {
5037                 pkt->pkt_resid = bp->b_bcount - acmd->cmd_dmacount;
5038         } else {
5039                 pkt->pkt_resid = 0;
5040         }
5041 
5042         return (DDI_SUCCESS);
5043 }
5044 
5045 /*
5046  * build_cmd
5047  */
5048 static struct mrsas_cmd *
5049 build_cmd(struct mrsas_instance *instance, struct scsi_address *ap,
5050     struct scsi_pkt *pkt, uchar_t *cmd_done)
5051 {
5052         uint16_t        flags = 0;
5053         uint32_t        i;
5054         uint32_t        sge_bytes;
5055         uint32_t        tmp_data_xfer_len;
5056         ddi_acc_handle_t acc_handle;
5057         struct mrsas_cmd                *cmd;
5058         struct mrsas_sge64              *mfi_sgl;
5059         struct mrsas_sge_ieee           *mfi_sgl_ieee;
5060         struct scsa_cmd                 *acmd = PKT2CMD(pkt);
5061         struct mrsas_pthru_frame        *pthru;
5062         struct mrsas_io_frame           *ldio;
5063 
5064         /* find out if this is logical or physical drive command.  */
5065         acmd->islogical = MRDRV_IS_LOGICAL(ap);
5066         acmd->device_id = MAP_DEVICE_ID(instance, ap);
5067         *cmd_done = 0;
5068 
5069         /* get the command packet */
5070         if (!(cmd = mrsas_get_mfi_pkt(instance))) {
5071                 DTRACE_PROBE2(build_cmd_mfi_err, uint16_t,
5072                     instance->fw_outstanding, uint16_t, instance->max_fw_cmds);
5073                 return (NULL);
5074         }
5075 
5076         acc_handle = cmd->frame_dma_obj.acc_handle;
5077 
5078         /* Clear the frame buffer and assign back the context id */
5079         (void) memset((char *)&cmd->frame[0], 0, sizeof (union mrsas_frame));
5080         ddi_put32(acc_handle, &cmd->frame->hdr.context, cmd->index);
5081 
5082         cmd->pkt = pkt;
5083         cmd->cmd = acmd;
5084         DTRACE_PROBE3(build_cmds, uint8_t, pkt->pkt_cdbp[0],
5085             ulong_t, acmd->cmd_dmacount, ulong_t, acmd->cmd_dma_len);
5086 
5087         /* lets get the command directions */
5088         if (acmd->cmd_flags & CFLAG_DMASEND) {
5089                 flags = MFI_FRAME_DIR_WRITE;
5090 
5091                 if (acmd->cmd_flags & CFLAG_CONSISTENT) {
5092                         (void) ddi_dma_sync(acmd->cmd_dmahandle,
5093                             acmd->cmd_dma_offset, acmd->cmd_dma_len,
5094                             DDI_DMA_SYNC_FORDEV);
5095                 }
5096         } else if (acmd->cmd_flags & ~CFLAG_DMASEND) {
5097                 flags = MFI_FRAME_DIR_READ;
5098 
5099                 if (acmd->cmd_flags & CFLAG_CONSISTENT) {
5100                         (void) ddi_dma_sync(acmd->cmd_dmahandle,
5101                             acmd->cmd_dma_offset, acmd->cmd_dma_len,
5102                             DDI_DMA_SYNC_FORCPU);
5103                 }
5104         } else {
5105                 flags = MFI_FRAME_DIR_NONE;
5106         }
5107 
5108         if (instance->flag_ieee) {
5109                 flags |= MFI_FRAME_IEEE;
5110         }
5111         flags |= MFI_FRAME_SGL64;
5112 
5113         switch (pkt->pkt_cdbp[0]) {
5114 
5115         /*
5116          * case SCMD_SYNCHRONIZE_CACHE:
5117          *      flush_cache(instance);
5118          *      mrsas_return_mfi_pkt(instance, cmd);
5119          *      *cmd_done = 1;
5120          *
5121          *      return (NULL);
5122          */
5123 
5124         case SCMD_READ:
5125         case SCMD_WRITE:
5126         case SCMD_READ_G1:
5127         case SCMD_WRITE_G1:
5128         case SCMD_READ_G4:
5129         case SCMD_WRITE_G4:
5130         case SCMD_READ_G5:
5131         case SCMD_WRITE_G5:
5132                 if (acmd->islogical) {
5133                         ldio = (struct mrsas_io_frame *)cmd->frame;
5134 
5135                         /*
5136                          * preare the Logical IO frame:
5137                          * 2nd bit is zero for all read cmds
5138                          */
5139                         ddi_put8(acc_handle, &ldio->cmd,
5140                             (pkt->pkt_cdbp[0] & 0x02) ? MFI_CMD_OP_LD_WRITE
5141                             : MFI_CMD_OP_LD_READ);
5142                         ddi_put8(acc_handle, &ldio->cmd_status, 0x0);
5143                         ddi_put8(acc_handle, &ldio->scsi_status, 0x0);
5144                         ddi_put8(acc_handle, &ldio->target_id, acmd->device_id);
5145                         ddi_put16(acc_handle, &ldio->timeout, 0);
5146                         ddi_put8(acc_handle, &ldio->reserved_0, 0);
5147                         ddi_put16(acc_handle, &ldio->pad_0, 0);
5148                         ddi_put16(acc_handle, &ldio->flags, flags);
5149 
5150                         /* Initialize sense Information */
5151                         bzero(cmd->sense, SENSE_LENGTH);
5152                         ddi_put8(acc_handle, &ldio->sense_len, SENSE_LENGTH);
5153                         ddi_put32(acc_handle, &ldio->sense_buf_phys_addr_hi, 0);
5154                         ddi_put32(acc_handle, &ldio->sense_buf_phys_addr_lo,
5155                             cmd->sense_phys_addr);
5156                         ddi_put32(acc_handle, &ldio->start_lba_hi, 0);
5157                         ddi_put8(acc_handle, &ldio->access_byte,
5158                             (acmd->cmd_cdblen != 6) ? pkt->pkt_cdbp[1] : 0);
5159                         ddi_put8(acc_handle, &ldio->sge_count,
5160                             acmd->cmd_cookiecnt);
5161                         if (instance->flag_ieee) {
5162                                 mfi_sgl_ieee =
5163                                     (struct mrsas_sge_ieee *)&ldio->sgl;
5164                         } else {
5165                                 mfi_sgl = (struct mrsas_sge64   *)&ldio->sgl;
5166                         }
5167 
5168                         (void) ddi_get32(acc_handle, &ldio->context);
5169 
5170                         if (acmd->cmd_cdblen == CDB_GROUP0) {
5171                                 /* 6-byte cdb */
5172                                 ddi_put32(acc_handle, &ldio->lba_count, (
5173                                     (uint16_t)(pkt->pkt_cdbp[4])));
5174 
5175                                 ddi_put32(acc_handle, &ldio->start_lba_lo, (
5176                                     ((uint32_t)(pkt->pkt_cdbp[3])) |
5177                                     ((uint32_t)(pkt->pkt_cdbp[2]) << 8) |
5178                                     ((uint32_t)((pkt->pkt_cdbp[1]) & 0x1F)
5179                                     << 16)));
5180                         } else if (acmd->cmd_cdblen == CDB_GROUP1) {
5181                                 /* 10-byte cdb */
5182                                 ddi_put32(acc_handle, &ldio->lba_count, (
5183                                     ((uint16_t)(pkt->pkt_cdbp[8])) |
5184                                     ((uint16_t)(pkt->pkt_cdbp[7]) << 8)));
5185 
5186                                 ddi_put32(acc_handle, &ldio->start_lba_lo, (
5187                                     ((uint32_t)(pkt->pkt_cdbp[5])) |
5188                                     ((uint32_t)(pkt->pkt_cdbp[4]) << 8) |
5189                                     ((uint32_t)(pkt->pkt_cdbp[3]) << 16) |
5190                                     ((uint32_t)(pkt->pkt_cdbp[2]) << 24)));
5191                         } else if (acmd->cmd_cdblen == CDB_GROUP5) {
5192                                 /* 12-byte cdb */
5193                                 ddi_put32(acc_handle, &ldio->lba_count, (
5194                                     ((uint32_t)(pkt->pkt_cdbp[9])) |
5195                                     ((uint32_t)(pkt->pkt_cdbp[8]) << 8) |
5196                                     ((uint32_t)(pkt->pkt_cdbp[7]) << 16) |
5197                                     ((uint32_t)(pkt->pkt_cdbp[6]) << 24)));
5198 
5199                                 ddi_put32(acc_handle, &ldio->start_lba_lo, (
5200                                     ((uint32_t)(pkt->pkt_cdbp[5])) |
5201                                     ((uint32_t)(pkt->pkt_cdbp[4]) << 8) |
5202                                     ((uint32_t)(pkt->pkt_cdbp[3]) << 16) |
5203                                     ((uint32_t)(pkt->pkt_cdbp[2]) << 24)));
5204                         } else if (acmd->cmd_cdblen == CDB_GROUP4) {
5205                                 /* 16-byte cdb */
5206                                 ddi_put32(acc_handle, &ldio->lba_count, (
5207                                     ((uint32_t)(pkt->pkt_cdbp[13])) |
5208                                     ((uint32_t)(pkt->pkt_cdbp[12]) << 8) |
5209                                     ((uint32_t)(pkt->pkt_cdbp[11]) << 16) |
5210                                     ((uint32_t)(pkt->pkt_cdbp[10]) << 24)));
5211 
5212                                 ddi_put32(acc_handle, &ldio->start_lba_lo, (
5213                                     ((uint32_t)(pkt->pkt_cdbp[9])) |
5214                                     ((uint32_t)(pkt->pkt_cdbp[8]) << 8) |
5215                                     ((uint32_t)(pkt->pkt_cdbp[7]) << 16) |
5216                                     ((uint32_t)(pkt->pkt_cdbp[6]) << 24)));
5217 
5218                                 ddi_put32(acc_handle, &ldio->start_lba_hi, (
5219                                     ((uint32_t)(pkt->pkt_cdbp[5])) |
5220                                     ((uint32_t)(pkt->pkt_cdbp[4]) << 8) |
5221                                     ((uint32_t)(pkt->pkt_cdbp[3]) << 16) |
5222                                     ((uint32_t)(pkt->pkt_cdbp[2]) << 24)));
5223                         }
5224 
5225                         break;
5226                 }
5227                 /* For all non-rd/wr and physical disk cmds */
5228                 /* FALLTHROUGH */
5229         default:
5230 
5231                 switch (pkt->pkt_cdbp[0]) {
5232                 case SCMD_MODE_SENSE:
5233                 case SCMD_MODE_SENSE_G1: {
5234                         union scsi_cdb  *cdbp;
5235                         uint16_t        page_code;
5236 
5237                         cdbp = (void *)pkt->pkt_cdbp;
5238                         page_code = (uint16_t)cdbp->cdb_un.sg.scsi[0];
5239                         switch (page_code) {
5240                         case 0x3:
5241                         case 0x4:
5242                                 (void) mrsas_mode_sense_build(pkt);
5243                                 mrsas_return_mfi_pkt(instance, cmd);
5244                                 *cmd_done = 1;
5245                                 return (NULL);
5246                         }
5247                         break;
5248                 }
5249                 default:
5250                         break;
5251                 }
5252 
5253                 pthru   = (struct mrsas_pthru_frame *)cmd->frame;
5254 
5255                 /* prepare the DCDB frame */
5256                 ddi_put8(acc_handle, &pthru->cmd, (acmd->islogical) ?
5257                     MFI_CMD_OP_LD_SCSI : MFI_CMD_OP_PD_SCSI);
5258                 ddi_put8(acc_handle, &pthru->cmd_status, 0x0);
5259                 ddi_put8(acc_handle, &pthru->scsi_status, 0x0);
5260                 ddi_put8(acc_handle, &pthru->target_id, acmd->device_id);
5261                 ddi_put8(acc_handle, &pthru->lun, 0);
5262                 ddi_put8(acc_handle, &pthru->cdb_len, acmd->cmd_cdblen);
5263                 ddi_put16(acc_handle, &pthru->timeout, 0);
5264                 ddi_put16(acc_handle, &pthru->flags, flags);
5265                 tmp_data_xfer_len = 0;
5266                 for (i = 0; i < acmd->cmd_cookiecnt; i++) {
5267                         tmp_data_xfer_len += acmd->cmd_dmacookies[i].dmac_size;
5268                 }
5269                 ddi_put32(acc_handle, &pthru->data_xfer_len,
5270                     tmp_data_xfer_len);
5271                 ddi_put8(acc_handle, &pthru->sge_count, acmd->cmd_cookiecnt);
5272                 if (instance->flag_ieee) {
5273                         mfi_sgl_ieee = (struct mrsas_sge_ieee *)&pthru->sgl;
5274                 } else {
5275                         mfi_sgl = (struct mrsas_sge64 *)&pthru->sgl;
5276                 }
5277 
5278                 bzero(cmd->sense, SENSE_LENGTH);
5279                 ddi_put8(acc_handle, &pthru->sense_len, SENSE_LENGTH);
5280                 ddi_put32(acc_handle, &pthru->sense_buf_phys_addr_hi, 0);
5281                 ddi_put32(acc_handle, &pthru->sense_buf_phys_addr_lo,
5282                     cmd->sense_phys_addr);
5283 
5284                 (void) ddi_get32(acc_handle, &pthru->context);
5285                 ddi_rep_put8(acc_handle, (uint8_t *)pkt->pkt_cdbp,
5286                     (uint8_t *)pthru->cdb, acmd->cmd_cdblen, DDI_DEV_AUTOINCR);
5287 
5288                 break;
5289         }
5290 
5291         /* prepare the scatter-gather list for the firmware */
5292         if (instance->flag_ieee) {
5293                 for (i = 0; i < acmd->cmd_cookiecnt; i++, mfi_sgl_ieee++) {
5294                         ddi_put64(acc_handle, &mfi_sgl_ieee->phys_addr,
5295                             acmd->cmd_dmacookies[i].dmac_laddress);
5296                         ddi_put32(acc_handle, &mfi_sgl_ieee->length,
5297                             acmd->cmd_dmacookies[i].dmac_size);
5298                 }
5299                 sge_bytes = sizeof (struct mrsas_sge_ieee)*acmd->cmd_cookiecnt;
5300         } else {
5301                 for (i = 0; i < acmd->cmd_cookiecnt; i++, mfi_sgl++) {
5302                         ddi_put64(acc_handle, &mfi_sgl->phys_addr,
5303                             acmd->cmd_dmacookies[i].dmac_laddress);
5304                         ddi_put32(acc_handle, &mfi_sgl->length,
5305                             acmd->cmd_dmacookies[i].dmac_size);
5306                 }
5307                 sge_bytes = sizeof (struct mrsas_sge64)*acmd->cmd_cookiecnt;
5308         }
5309 
5310         cmd->frame_count = (sge_bytes / MRMFI_FRAME_SIZE) +
5311             ((sge_bytes % MRMFI_FRAME_SIZE) ? 1 : 0) + 1;
5312 
5313         if (cmd->frame_count >= 8) {
5314                 cmd->frame_count = 8;
5315         }
5316 
5317         return (cmd);
5318 }
5319 
5320 /*
5321  * wait_for_outstanding -       Wait for all outstanding cmds
5322  * @instance:                           Adapter soft state
5323  *
5324  * This function waits for upto MRDRV_RESET_WAIT_TIME seconds for FW to
5325  * complete all its outstanding commands. Returns error if one or more IOs
5326  * are pending after this time period.
5327  */
5328 static int
5329 wait_for_outstanding(struct mrsas_instance *instance)
5330 {
5331         int             i;
5332         uint32_t        wait_time = 90;
5333 
5334         for (i = 0; i < wait_time; i++) {
5335                 if (!instance->fw_outstanding) {
5336                         break;
5337                 }
5338 
5339                 drv_usecwait(MILLISEC); /* wait for 1000 usecs */;
5340         }
5341 
5342         if (instance->fw_outstanding) {
5343                 return (1);
5344         }
5345 
5346         return (0);
5347 }
5348 
5349 /*
5350  * issue_mfi_pthru
5351  */
5352 static int
5353 issue_mfi_pthru(struct mrsas_instance *instance, struct mrsas_ioctl *ioctl,
5354     struct mrsas_cmd *cmd, int mode)
5355 {
5356         void            *ubuf;
5357         uint32_t        kphys_addr = 0;
5358         uint32_t        xferlen = 0;
5359         uint32_t        new_xfer_length = 0;
5360         uint_t          model;
5361         ddi_acc_handle_t        acc_handle = cmd->frame_dma_obj.acc_handle;
5362         dma_obj_t                       pthru_dma_obj;
5363         struct mrsas_pthru_frame        *kpthru;
5364         struct mrsas_pthru_frame        *pthru;
5365         int i;
5366         pthru = &cmd->frame->pthru;
5367         kpthru = (struct mrsas_pthru_frame *)&ioctl->frame[0];
5368 
5369         if (instance->adapterresetinprogress) {
5370                 con_log(CL_ANN1, (CE_WARN, "issue_mfi_pthru: Reset flag set, "
5371                 "returning mfi_pkt and setting TRAN_BUSY\n"));
5372                 return (DDI_FAILURE);
5373         }
5374         model = ddi_model_convert_from(mode & FMODELS);
5375         if (model == DDI_MODEL_ILP32) {
5376                 con_log(CL_ANN1, (CE_CONT, "issue_mfi_pthru: DDI_MODEL_LP32"));
5377 
5378                 xferlen = kpthru->sgl.sge32[0].length;
5379 
5380                 ubuf    = (void *)(ulong_t)kpthru->sgl.sge32[0].phys_addr;
5381         } else {
5382 #ifdef _ILP32
5383                 con_log(CL_ANN1, (CE_CONT, "issue_mfi_pthru: DDI_MODEL_LP32"));
5384                 xferlen = kpthru->sgl.sge32[0].length;
5385                 ubuf    = (void *)(ulong_t)kpthru->sgl.sge32[0].phys_addr;
5386 #else
5387                 con_log(CL_ANN1, (CE_CONT, "issue_mfi_pthru: DDI_MODEL_LP64"));
5388                 xferlen = kpthru->sgl.sge64[0].length;
5389                 ubuf    = (void *)(ulong_t)kpthru->sgl.sge64[0].phys_addr;
5390 #endif
5391         }
5392 
5393         if (xferlen) {
5394                 /* means IOCTL requires DMA */
5395                 /* allocate the data transfer buffer */
5396                 /* pthru_dma_obj.size = xferlen; */
5397                 MRSAS_GET_BOUNDARY_ALIGNED_LEN(xferlen, new_xfer_length,
5398                     PAGESIZE);
5399                 pthru_dma_obj.size = new_xfer_length;
5400                 pthru_dma_obj.dma_attr = mrsas_generic_dma_attr;
5401                 pthru_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
5402                 pthru_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
5403                 pthru_dma_obj.dma_attr.dma_attr_sgllen = 1;
5404                 pthru_dma_obj.dma_attr.dma_attr_align = 1;
5405 
5406                 /* allocate kernel buffer for DMA */
5407                 if (mrsas_alloc_dma_obj(instance, &pthru_dma_obj,
5408                     (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
5409                         con_log(CL_ANN, (CE_WARN, "issue_mfi_pthru: "
5410                             "could not allocate data transfer buffer."));
5411                         return (DDI_FAILURE);
5412                 }
5413                 (void) memset(pthru_dma_obj.buffer, 0, xferlen);
5414 
5415                 /* If IOCTL requires DMA WRITE, do ddi_copyin IOCTL data copy */
5416                 if (kpthru->flags & MFI_FRAME_DIR_WRITE) {
5417                         for (i = 0; i < xferlen; i++) {
5418                                 if (ddi_copyin((uint8_t *)ubuf+i,
5419                                     (uint8_t *)pthru_dma_obj.buffer+i,
5420                                     1, mode)) {
5421                                         con_log(CL_ANN, (CE_WARN,
5422                                             "issue_mfi_pthru : "
5423                                             "copy from user space failed"));
5424                                         return (DDI_FAILURE);
5425                                 }
5426                         }
5427                 }
5428 
5429                 kphys_addr = pthru_dma_obj.dma_cookie[0].dmac_address;
5430         }
5431 
5432         ddi_put8(acc_handle, &pthru->cmd, kpthru->cmd);
5433         ddi_put8(acc_handle, &pthru->sense_len, SENSE_LENGTH);
5434         ddi_put8(acc_handle, &pthru->cmd_status, 0);
5435         ddi_put8(acc_handle, &pthru->scsi_status, 0);
5436         ddi_put8(acc_handle, &pthru->target_id, kpthru->target_id);
5437         ddi_put8(acc_handle, &pthru->lun, kpthru->lun);
5438         ddi_put8(acc_handle, &pthru->cdb_len, kpthru->cdb_len);
5439         ddi_put8(acc_handle, &pthru->sge_count, kpthru->sge_count);
5440         ddi_put16(acc_handle, &pthru->timeout, kpthru->timeout);
5441         ddi_put32(acc_handle, &pthru->data_xfer_len, kpthru->data_xfer_len);
5442 
5443         ddi_put32(acc_handle, &pthru->sense_buf_phys_addr_hi, 0);
5444         pthru->sense_buf_phys_addr_lo = cmd->sense_phys_addr;
5445         /* ddi_put32(acc_handle, &pthru->sense_buf_phys_addr_lo, 0); */
5446 
5447         ddi_rep_put8(acc_handle, (uint8_t *)kpthru->cdb, (uint8_t *)pthru->cdb,
5448             pthru->cdb_len, DDI_DEV_AUTOINCR);
5449 
5450         ddi_put16(acc_handle, &pthru->flags, kpthru->flags & ~MFI_FRAME_SGL64);
5451         ddi_put32(acc_handle, &pthru->sgl.sge32[0].length, xferlen);
5452         ddi_put32(acc_handle, &pthru->sgl.sge32[0].phys_addr, kphys_addr);
5453 
5454         cmd->sync_cmd = MRSAS_TRUE;
5455         cmd->frame_count = 1;
5456 
5457         if (instance->tbolt) {
5458                 mr_sas_tbolt_build_mfi_cmd(instance, cmd);
5459         }
5460 
5461         if (instance->func_ptr->issue_cmd_in_sync_mode(instance, cmd)) {
5462                 con_log(CL_ANN, (CE_WARN,
5463                     "issue_mfi_pthru: fw_ioctl failed"));
5464         } else {
5465                 if (xferlen && kpthru->flags & MFI_FRAME_DIR_READ) {
5466                         for (i = 0; i < xferlen; i++) {
5467                                 if (ddi_copyout(
5468                                     (uint8_t *)pthru_dma_obj.buffer+i,
5469                                     (uint8_t *)ubuf+i, 1, mode)) {
5470                                         con_log(CL_ANN, (CE_WARN,
5471                                             "issue_mfi_pthru : "
5472                                             "copy to user space failed"));
5473                                         return (DDI_FAILURE);
5474                                 }
5475                         }
5476                 }
5477         }
5478 
5479         kpthru->cmd_status = ddi_get8(acc_handle, &pthru->cmd_status);
5480         kpthru->scsi_status = ddi_get8(acc_handle, &pthru->scsi_status);
5481 
5482         con_log(CL_ANN, (CE_CONT, "issue_mfi_pthru: cmd_status %x, "
5483             "scsi_status %x", kpthru->cmd_status, kpthru->scsi_status));
5484         DTRACE_PROBE3(issue_pthru, uint8_t, kpthru->cmd, uint8_t,
5485             kpthru->cmd_status, uint8_t, kpthru->scsi_status);
5486 
5487         if (kpthru->sense_len) {
5488                 uint_t sense_len = SENSE_LENGTH;
5489                 void *sense_ubuf =
5490                     (void *)(ulong_t)kpthru->sense_buf_phys_addr_lo;
5491                 if (kpthru->sense_len <= SENSE_LENGTH) {
5492                         sense_len = kpthru->sense_len;
5493                 }
5494 
5495                 for (i = 0; i < sense_len; i++) {
5496                         if (ddi_copyout(
5497                             (uint8_t *)cmd->sense+i,
5498                             (uint8_t *)sense_ubuf+i, 1, mode)) {
5499                                 con_log(CL_ANN, (CE_WARN,
5500                                     "issue_mfi_pthru : "
5501                                     "copy to user space failed"));
5502                         }
5503                         con_log(CL_DLEVEL1, (CE_WARN,
5504                             "Copying Sense info sense_buff[%d] = 0x%X",
5505                             i, *((uint8_t *)cmd->sense + i)));
5506                 }
5507         }
5508         (void) ddi_dma_sync(cmd->frame_dma_obj.dma_handle, 0, 0,
5509             DDI_DMA_SYNC_FORDEV);
5510 
5511         if (xferlen) {
5512                 /* free kernel buffer */
5513                 if (mrsas_free_dma_obj(instance, pthru_dma_obj) != DDI_SUCCESS)
5514                         return (DDI_FAILURE);
5515         }
5516 
5517         return (DDI_SUCCESS);
5518 }
5519 
5520 /*
5521  * issue_mfi_dcmd
5522  */
5523 static int
5524 issue_mfi_dcmd(struct mrsas_instance *instance, struct mrsas_ioctl *ioctl,
5525     struct mrsas_cmd *cmd, int mode)
5526 {
5527         void            *ubuf;
5528         uint32_t        kphys_addr = 0;
5529         uint32_t        xferlen = 0;
5530         uint32_t        new_xfer_length = 0;
5531         uint32_t        model;
5532         dma_obj_t       dcmd_dma_obj;
5533         struct mrsas_dcmd_frame *kdcmd;
5534         struct mrsas_dcmd_frame *dcmd;
5535         ddi_acc_handle_t        acc_handle = cmd->frame_dma_obj.acc_handle;
5536         int i;
5537         dcmd = &cmd->frame->dcmd;
5538         kdcmd = (struct mrsas_dcmd_frame *)&ioctl->frame[0];
5539 
5540         if (instance->adapterresetinprogress) {
5541                 con_log(CL_ANN1, (CE_NOTE, "Reset flag set, "
5542                 "returning mfi_pkt and setting TRAN_BUSY"));
5543                 return (DDI_FAILURE);
5544         }
5545         model = ddi_model_convert_from(mode & FMODELS);
5546         if (model == DDI_MODEL_ILP32) {
5547                 con_log(CL_ANN1, (CE_CONT, "issue_mfi_dcmd: DDI_MODEL_ILP32"));
5548 
5549                 xferlen = kdcmd->sgl.sge32[0].length;
5550 
5551                 ubuf    = (void *)(ulong_t)kdcmd->sgl.sge32[0].phys_addr;
5552         } else {
5553 #ifdef _ILP32
5554                 con_log(CL_ANN1, (CE_CONT, "issue_mfi_dcmd: DDI_MODEL_ILP32"));
5555                 xferlen = kdcmd->sgl.sge32[0].length;
5556                 ubuf    = (void *)(ulong_t)kdcmd->sgl.sge32[0].phys_addr;
5557 #else
5558                 con_log(CL_ANN1, (CE_CONT, "issue_mfi_dcmd: DDI_MODEL_LP64"));
5559                 xferlen = kdcmd->sgl.sge64[0].length;
5560                 ubuf    = (void *)(ulong_t)kdcmd->sgl.sge64[0].phys_addr;
5561 #endif
5562         }
5563         if (xferlen) {
5564                 /* means IOCTL requires DMA */
5565                 /* allocate the data transfer buffer */
5566                 /* dcmd_dma_obj.size = xferlen; */
5567                 MRSAS_GET_BOUNDARY_ALIGNED_LEN(xferlen, new_xfer_length,
5568                     PAGESIZE);
5569                 dcmd_dma_obj.size = new_xfer_length;
5570                 dcmd_dma_obj.dma_attr = mrsas_generic_dma_attr;
5571                 dcmd_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
5572                 dcmd_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
5573                 dcmd_dma_obj.dma_attr.dma_attr_sgllen = 1;
5574                 dcmd_dma_obj.dma_attr.dma_attr_align = 1;
5575 
5576                 /* allocate kernel buffer for DMA */
5577                         if (mrsas_alloc_dma_obj(instance, &dcmd_dma_obj,
5578                             (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
5579                                 con_log(CL_ANN,
5580                                     (CE_WARN, "issue_mfi_dcmd: could not "
5581                                     "allocate data transfer buffer."));
5582                                 return (DDI_FAILURE);
5583                         }
5584                 (void) memset(dcmd_dma_obj.buffer, 0, xferlen);
5585 
5586                 /* If IOCTL requires DMA WRITE, do ddi_copyin IOCTL data copy */
5587                 if (kdcmd->flags & MFI_FRAME_DIR_WRITE) {
5588                         for (i = 0; i < xferlen; i++) {
5589                                 if (ddi_copyin((uint8_t *)ubuf + i,
5590                                     (uint8_t *)dcmd_dma_obj.buffer + i,
5591                                     1, mode)) {
5592                                         con_log(CL_ANN, (CE_WARN,
5593                                             "issue_mfi_dcmd : "
5594                                             "copy from user space failed"));
5595                                         return (DDI_FAILURE);
5596                                 }
5597                         }
5598                 }
5599 
5600                 kphys_addr = dcmd_dma_obj.dma_cookie[0].dmac_address;
5601         }
5602 
5603         ddi_put8(acc_handle, &dcmd->cmd, kdcmd->cmd);
5604         ddi_put8(acc_handle, &dcmd->cmd_status, 0);
5605         ddi_put8(acc_handle, &dcmd->sge_count, kdcmd->sge_count);
5606         ddi_put16(acc_handle, &dcmd->timeout, kdcmd->timeout);
5607         ddi_put32(acc_handle, &dcmd->data_xfer_len, kdcmd->data_xfer_len);
5608         ddi_put32(acc_handle, &dcmd->opcode, kdcmd->opcode);
5609 
5610         ddi_rep_put8(acc_handle, (uint8_t *)kdcmd->mbox.b,
5611             (uint8_t *)dcmd->mbox.b, DCMD_MBOX_SZ, DDI_DEV_AUTOINCR);
5612 
5613         ddi_put16(acc_handle, &dcmd->flags, kdcmd->flags & ~MFI_FRAME_SGL64);
5614         ddi_put32(acc_handle, &dcmd->sgl.sge32[0].length, xferlen);
5615         ddi_put32(acc_handle, &dcmd->sgl.sge32[0].phys_addr, kphys_addr);
5616 
5617         cmd->sync_cmd = MRSAS_TRUE;
5618         cmd->frame_count = 1;
5619 
5620         if (instance->tbolt) {
5621                 mr_sas_tbolt_build_mfi_cmd(instance, cmd);
5622         }
5623 
5624         if (instance->func_ptr->issue_cmd_in_sync_mode(instance, cmd)) {
5625                 con_log(CL_ANN, (CE_WARN, "issue_mfi_dcmd: fw_ioctl failed"));
5626         } else {
5627                 if (xferlen && (kdcmd->flags & MFI_FRAME_DIR_READ)) {
5628                         for (i = 0; i < xferlen; i++) {
5629                                 if (ddi_copyout(
5630                                     (uint8_t *)dcmd_dma_obj.buffer + i,
5631                                     (uint8_t *)ubuf + i,
5632                                     1, mode)) {
5633                                         con_log(CL_ANN, (CE_WARN,
5634                                             "issue_mfi_dcmd : "
5635                                             "copy to user space failed"));
5636                                         return (DDI_FAILURE);
5637                                 }
5638                         }
5639                 }
5640         }
5641 
5642         kdcmd->cmd_status = ddi_get8(acc_handle, &dcmd->cmd_status);
5643         con_log(CL_ANN,
5644             (CE_CONT, "issue_mfi_dcmd: cmd_status %x", kdcmd->cmd_status));
5645         DTRACE_PROBE3(issue_dcmd, uint32_t, kdcmd->opcode, uint8_t,
5646             kdcmd->cmd, uint8_t, kdcmd->cmd_status);
5647 
5648         if (xferlen) {
5649                 /* free kernel buffer */
5650                 if (mrsas_free_dma_obj(instance, dcmd_dma_obj) != DDI_SUCCESS)
5651                         return (DDI_FAILURE);
5652         }
5653 
5654         return (DDI_SUCCESS);
5655 }
5656 
5657 /*
5658  * issue_mfi_smp
5659  */
5660 static int
5661 issue_mfi_smp(struct mrsas_instance *instance, struct mrsas_ioctl *ioctl,
5662     struct mrsas_cmd *cmd, int mode)
5663 {
5664         void            *request_ubuf;
5665         void            *response_ubuf;
5666         uint32_t        request_xferlen = 0;
5667         uint32_t        response_xferlen = 0;
5668         uint32_t        new_xfer_length1 = 0;
5669         uint32_t        new_xfer_length2 = 0;
5670         uint_t          model;
5671         dma_obj_t                       request_dma_obj;
5672         dma_obj_t                       response_dma_obj;
5673         ddi_acc_handle_t        acc_handle = cmd->frame_dma_obj.acc_handle;
5674         struct mrsas_smp_frame          *ksmp;
5675         struct mrsas_smp_frame          *smp;
5676         struct mrsas_sge32              *sge32;
5677 #ifndef _ILP32
5678         struct mrsas_sge64              *sge64;
5679 #endif
5680         int i;
5681         uint64_t                        tmp_sas_addr;
5682 
5683         smp = &cmd->frame->smp;
5684         ksmp = (struct mrsas_smp_frame *)&ioctl->frame[0];
5685 
5686         if (instance->adapterresetinprogress) {
5687                 con_log(CL_ANN1, (CE_WARN, "Reset flag set, "
5688                 "returning mfi_pkt and setting TRAN_BUSY\n"));
5689                 return (DDI_FAILURE);
5690         }
5691         model = ddi_model_convert_from(mode & FMODELS);
5692         if (model == DDI_MODEL_ILP32) {
5693                 con_log(CL_ANN1, (CE_CONT, "issue_mfi_smp: DDI_MODEL_ILP32"));
5694 
5695                 sge32                   = &ksmp->sgl[0].sge32[0];
5696                 response_xferlen        = sge32[0].length;
5697                 request_xferlen         = sge32[1].length;
5698                 con_log(CL_ANN, (CE_CONT, "issue_mfi_smp: "
5699                     "response_xferlen = %x, request_xferlen = %x",
5700                     response_xferlen, request_xferlen));
5701 
5702                 response_ubuf   = (void *)(ulong_t)sge32[0].phys_addr;
5703                 request_ubuf    = (void *)(ulong_t)sge32[1].phys_addr;
5704                 con_log(CL_ANN1, (CE_CONT, "issue_mfi_smp: "
5705                     "response_ubuf = %p, request_ubuf = %p",
5706                     response_ubuf, request_ubuf));
5707         } else {
5708 #ifdef _ILP32
5709                 con_log(CL_ANN1, (CE_CONT, "issue_mfi_smp: DDI_MODEL_ILP32"));
5710 
5711                 sge32                   = &ksmp->sgl[0].sge32[0];
5712                 response_xferlen        = sge32[0].length;
5713                 request_xferlen         = sge32[1].length;
5714                 con_log(CL_ANN, (CE_CONT, "issue_mfi_smp: "
5715                     "response_xferlen = %x, request_xferlen = %x",
5716                     response_xferlen, request_xferlen));
5717 
5718                 response_ubuf   = (void *)(ulong_t)sge32[0].phys_addr;
5719                 request_ubuf    = (void *)(ulong_t)sge32[1].phys_addr;
5720                 con_log(CL_ANN1, (CE_CONT, "issue_mfi_smp: "
5721                     "response_ubuf = %p, request_ubuf = %p",
5722                     response_ubuf, request_ubuf));
5723 #else
5724                 con_log(CL_ANN1, (CE_CONT, "issue_mfi_smp: DDI_MODEL_LP64"));
5725 
5726                 sge64                   = &ksmp->sgl[0].sge64[0];
5727                 response_xferlen        = sge64[0].length;
5728                 request_xferlen         = sge64[1].length;
5729 
5730                 response_ubuf   = (void *)(ulong_t)sge64[0].phys_addr;
5731                 request_ubuf    = (void *)(ulong_t)sge64[1].phys_addr;
5732 #endif
5733         }
5734         if (request_xferlen) {
5735                 /* means IOCTL requires DMA */
5736                 /* allocate the data transfer buffer */
5737                 /* request_dma_obj.size = request_xferlen; */
5738                 MRSAS_GET_BOUNDARY_ALIGNED_LEN(request_xferlen,
5739                     new_xfer_length1, PAGESIZE);
5740                 request_dma_obj.size = new_xfer_length1;
5741                 request_dma_obj.dma_attr = mrsas_generic_dma_attr;
5742                 request_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
5743                 request_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
5744                 request_dma_obj.dma_attr.dma_attr_sgllen = 1;
5745                 request_dma_obj.dma_attr.dma_attr_align = 1;
5746 
5747                 /* allocate kernel buffer for DMA */
5748                 if (mrsas_alloc_dma_obj(instance, &request_dma_obj,
5749                     (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
5750                         con_log(CL_ANN, (CE_WARN, "issue_mfi_smp: "
5751                             "could not allocate data transfer buffer."));
5752                         return (DDI_FAILURE);
5753                 }
5754                 (void) memset(request_dma_obj.buffer, 0, request_xferlen);
5755 
5756                 /* If IOCTL requires DMA WRITE, do ddi_copyin IOCTL data copy */
5757                 for (i = 0; i < request_xferlen; i++) {
5758                         if (ddi_copyin((uint8_t *)request_ubuf + i,
5759                             (uint8_t *)request_dma_obj.buffer + i,
5760                             1, mode)) {
5761                                 con_log(CL_ANN, (CE_WARN, "issue_mfi_smp: "
5762                                     "copy from user space failed"));
5763                                 return (DDI_FAILURE);
5764                         }
5765                 }
5766         }
5767 
5768         if (response_xferlen) {
5769                 /* means IOCTL requires DMA */
5770                 /* allocate the data transfer buffer */
5771                 /* response_dma_obj.size = response_xferlen; */
5772                 MRSAS_GET_BOUNDARY_ALIGNED_LEN(response_xferlen,
5773                     new_xfer_length2, PAGESIZE);
5774                 response_dma_obj.size = new_xfer_length2;
5775                 response_dma_obj.dma_attr = mrsas_generic_dma_attr;
5776                 response_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
5777                 response_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
5778                 response_dma_obj.dma_attr.dma_attr_sgllen = 1;
5779                 response_dma_obj.dma_attr.dma_attr_align = 1;
5780 
5781                 /* allocate kernel buffer for DMA */
5782                 if (mrsas_alloc_dma_obj(instance, &response_dma_obj,
5783                     (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
5784                         con_log(CL_ANN, (CE_WARN, "issue_mfi_smp: "
5785                             "could not allocate data transfer buffer."));
5786                         return (DDI_FAILURE);
5787                 }
5788                 (void) memset(response_dma_obj.buffer, 0, response_xferlen);
5789 
5790                 /* If IOCTL requires DMA WRITE, do ddi_copyin IOCTL data copy */
5791                 for (i = 0; i < response_xferlen; i++) {
5792                         if (ddi_copyin((uint8_t *)response_ubuf + i,
5793                             (uint8_t *)response_dma_obj.buffer + i,
5794                             1, mode)) {
5795                                 con_log(CL_ANN, (CE_WARN, "issue_mfi_smp: "
5796                                     "copy from user space failed"));
5797                                 return (DDI_FAILURE);
5798                         }
5799                 }
5800         }
5801 
5802         ddi_put8(acc_handle, &smp->cmd, ksmp->cmd);
5803         ddi_put8(acc_handle, &smp->cmd_status, 0);
5804         ddi_put8(acc_handle, &smp->connection_status, 0);
5805         ddi_put8(acc_handle, &smp->sge_count, ksmp->sge_count);
5806         /* smp->context              = ksmp->context; */
5807         ddi_put16(acc_handle, &smp->timeout, ksmp->timeout);
5808         ddi_put32(acc_handle, &smp->data_xfer_len, ksmp->data_xfer_len);
5809 
5810         bcopy((void *)&ksmp->sas_addr, (void *)&tmp_sas_addr,
5811             sizeof (uint64_t));
5812         ddi_put64(acc_handle, &smp->sas_addr, tmp_sas_addr);
5813 
5814         ddi_put16(acc_handle, &smp->flags, ksmp->flags & ~MFI_FRAME_SGL64);
5815 
5816         model = ddi_model_convert_from(mode & FMODELS);
5817         if (model == DDI_MODEL_ILP32) {
5818                 con_log(CL_ANN1, (CE_CONT,
5819                     "issue_mfi_smp: DDI_MODEL_ILP32"));
5820 
5821                 sge32 = &smp->sgl[0].sge32[0];
5822                 ddi_put32(acc_handle, &sge32[0].length, response_xferlen);
5823                 ddi_put32(acc_handle, &sge32[0].phys_addr,
5824                     response_dma_obj.dma_cookie[0].dmac_address);
5825                 ddi_put32(acc_handle, &sge32[1].length, request_xferlen);
5826                 ddi_put32(acc_handle, &sge32[1].phys_addr,
5827                     request_dma_obj.dma_cookie[0].dmac_address);
5828         } else {
5829 #ifdef _ILP32
5830                 con_log(CL_ANN1, (CE_CONT,
5831                     "issue_mfi_smp: DDI_MODEL_ILP32"));
5832                 sge32 = &smp->sgl[0].sge32[0];
5833                 ddi_put32(acc_handle, &sge32[0].length, response_xferlen);
5834                 ddi_put32(acc_handle, &sge32[0].phys_addr,
5835                     response_dma_obj.dma_cookie[0].dmac_address);
5836                 ddi_put32(acc_handle, &sge32[1].length, request_xferlen);
5837                 ddi_put32(acc_handle, &sge32[1].phys_addr,
5838                     request_dma_obj.dma_cookie[0].dmac_address);
5839 #else
5840                 con_log(CL_ANN1, (CE_CONT,
5841                     "issue_mfi_smp: DDI_MODEL_LP64"));
5842                 sge64 = &smp->sgl[0].sge64[0];
5843                 ddi_put32(acc_handle, &sge64[0].length, response_xferlen);
5844                 ddi_put64(acc_handle, &sge64[0].phys_addr,
5845                     response_dma_obj.dma_cookie[0].dmac_address);
5846                 ddi_put32(acc_handle, &sge64[1].length, request_xferlen);
5847                 ddi_put64(acc_handle, &sge64[1].phys_addr,
5848                     request_dma_obj.dma_cookie[0].dmac_address);
5849 #endif
5850         }
5851         con_log(CL_ANN1, (CE_CONT, "issue_mfi_smp : "
5852             "smp->response_xferlen = %d, smp->request_xferlen = %d "
5853             "smp->data_xfer_len = %d", ddi_get32(acc_handle, &sge32[0].length),
5854             ddi_get32(acc_handle, &sge32[1].length),
5855             ddi_get32(acc_handle, &smp->data_xfer_len)));
5856 
5857         cmd->sync_cmd = MRSAS_TRUE;
5858         cmd->frame_count = 1;
5859 
5860         if (instance->tbolt) {
5861                 mr_sas_tbolt_build_mfi_cmd(instance, cmd);
5862         }
5863 
5864         if (instance->func_ptr->issue_cmd_in_sync_mode(instance, cmd)) {
5865                 con_log(CL_ANN, (CE_WARN,
5866                     "issue_mfi_smp: fw_ioctl failed"));
5867         } else {
5868                 con_log(CL_ANN1, (CE_CONT,
5869                     "issue_mfi_smp: copy to user space"));
5870 
5871                 if (request_xferlen) {
5872                         for (i = 0; i < request_xferlen; i++) {
5873                                 if (ddi_copyout(
5874                                     (uint8_t *)request_dma_obj.buffer +
5875                                     i, (uint8_t *)request_ubuf + i,
5876                                     1, mode)) {
5877                                         con_log(CL_ANN, (CE_WARN,
5878                                             "issue_mfi_smp : copy to user space"
5879                                             " failed"));
5880                                         return (DDI_FAILURE);
5881                                 }
5882                         }
5883                 }
5884 
5885                 if (response_xferlen) {
5886                         for (i = 0; i < response_xferlen; i++) {
5887                                 if (ddi_copyout(
5888                                     (uint8_t *)response_dma_obj.buffer
5889                                     + i, (uint8_t *)response_ubuf
5890                                     + i, 1, mode)) {
5891                                         con_log(CL_ANN, (CE_WARN,
5892                                             "issue_mfi_smp : copy to "
5893                                             "user space failed"));
5894                                         return (DDI_FAILURE);
5895                                 }
5896                         }
5897                 }
5898         }
5899 
5900         ksmp->cmd_status = ddi_get8(acc_handle, &smp->cmd_status);
5901         con_log(CL_ANN1, (CE_NOTE, "issue_mfi_smp: smp->cmd_status = %d",
5902             ksmp->cmd_status));
5903         DTRACE_PROBE2(issue_smp, uint8_t, ksmp->cmd, uint8_t, ksmp->cmd_status);
5904 
5905         if (request_xferlen) {
5906                 /* free kernel buffer */
5907                 if (mrsas_free_dma_obj(instance, request_dma_obj) !=
5908                     DDI_SUCCESS)
5909                         return (DDI_FAILURE);
5910         }
5911 
5912         if (response_xferlen) {
5913                 /* free kernel buffer */
5914                 if (mrsas_free_dma_obj(instance, response_dma_obj) !=
5915                     DDI_SUCCESS)
5916                         return (DDI_FAILURE);
5917         }
5918 
5919         return (DDI_SUCCESS);
5920 }
5921 
5922 /*
5923  * issue_mfi_stp
5924  */
5925 static int
5926 issue_mfi_stp(struct mrsas_instance *instance, struct mrsas_ioctl *ioctl,
5927     struct mrsas_cmd *cmd, int mode)
5928 {
5929         void            *fis_ubuf;
5930         void            *data_ubuf;
5931         uint32_t        fis_xferlen = 0;
5932         uint32_t   new_xfer_length1 = 0;
5933         uint32_t   new_xfer_length2 = 0;
5934         uint32_t        data_xferlen = 0;
5935         uint_t          model;
5936         dma_obj_t       fis_dma_obj;
5937         dma_obj_t       data_dma_obj;
5938         struct mrsas_stp_frame  *kstp;
5939         struct mrsas_stp_frame  *stp;
5940         ddi_acc_handle_t        acc_handle = cmd->frame_dma_obj.acc_handle;
5941         int i;
5942 
5943         stp = &cmd->frame->stp;
5944         kstp = (struct mrsas_stp_frame *)&ioctl->frame[0];
5945 
5946         if (instance->adapterresetinprogress) {
5947                 con_log(CL_ANN1, (CE_WARN, "Reset flag set, "
5948                 "returning mfi_pkt and setting TRAN_BUSY\n"));
5949                 return (DDI_FAILURE);
5950         }
5951         model = ddi_model_convert_from(mode & FMODELS);
5952         if (model == DDI_MODEL_ILP32) {
5953                 con_log(CL_ANN1, (CE_CONT, "issue_mfi_stp: DDI_MODEL_ILP32"));
5954 
5955                 fis_xferlen     = kstp->sgl.sge32[0].length;
5956                 data_xferlen    = kstp->sgl.sge32[1].length;
5957 
5958                 fis_ubuf        = (void *)(ulong_t)kstp->sgl.sge32[0].phys_addr;
5959                 data_ubuf       = (void *)(ulong_t)kstp->sgl.sge32[1].phys_addr;
5960         } else {
5961 #ifdef _ILP32
5962                 con_log(CL_ANN1, (CE_CONT, "issue_mfi_stp: DDI_MODEL_ILP32"));
5963 
5964                 fis_xferlen     = kstp->sgl.sge32[0].length;
5965                 data_xferlen    = kstp->sgl.sge32[1].length;
5966 
5967                 fis_ubuf        = (void *)(ulong_t)kstp->sgl.sge32[0].phys_addr;
5968                 data_ubuf       = (void *)(ulong_t)kstp->sgl.sge32[1].phys_addr;
5969 #else
5970                 con_log(CL_ANN1, (CE_CONT, "issue_mfi_stp: DDI_MODEL_LP64"));
5971 
5972                 fis_xferlen     = kstp->sgl.sge64[0].length;
5973                 data_xferlen    = kstp->sgl.sge64[1].length;
5974 
5975                 fis_ubuf        = (void *)(ulong_t)kstp->sgl.sge64[0].phys_addr;
5976                 data_ubuf       = (void *)(ulong_t)kstp->sgl.sge64[1].phys_addr;
5977 #endif
5978         }
5979 
5980 
5981         if (fis_xferlen) {
5982                 con_log(CL_ANN, (CE_CONT, "issue_mfi_stp: "
5983                     "fis_ubuf = %p fis_xferlen = %x", fis_ubuf, fis_xferlen));
5984 
5985                 /* means IOCTL requires DMA */
5986                 /* allocate the data transfer buffer */
5987                 /* fis_dma_obj.size = fis_xferlen; */
5988                 MRSAS_GET_BOUNDARY_ALIGNED_LEN(fis_xferlen,
5989                     new_xfer_length1, PAGESIZE);
5990                 fis_dma_obj.size = new_xfer_length1;
5991                 fis_dma_obj.dma_attr = mrsas_generic_dma_attr;
5992                 fis_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
5993                 fis_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
5994                 fis_dma_obj.dma_attr.dma_attr_sgllen = 1;
5995                 fis_dma_obj.dma_attr.dma_attr_align = 1;
5996 
5997                 /* allocate kernel buffer for DMA */
5998                 if (mrsas_alloc_dma_obj(instance, &fis_dma_obj,
5999                     (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
6000                         con_log(CL_ANN, (CE_WARN, "issue_mfi_stp : "
6001                             "could not allocate data transfer buffer."));
6002                         return (DDI_FAILURE);
6003                 }
6004                 (void) memset(fis_dma_obj.buffer, 0, fis_xferlen);
6005 
6006                 /* If IOCTL requires DMA WRITE, do ddi_copyin IOCTL data copy */
6007                 for (i = 0; i < fis_xferlen; i++) {
6008                         if (ddi_copyin((uint8_t *)fis_ubuf + i,
6009                             (uint8_t *)fis_dma_obj.buffer + i, 1, mode)) {
6010                                 con_log(CL_ANN, (CE_WARN, "issue_mfi_stp: "
6011                                     "copy from user space failed"));
6012                                 return (DDI_FAILURE);
6013                         }
6014                 }
6015         }
6016 
6017         if (data_xferlen) {
6018                 con_log(CL_ANN, (CE_CONT, "issue_mfi_stp: data_ubuf = %p "
6019                     "data_xferlen = %x", data_ubuf, data_xferlen));
6020 
6021                 /* means IOCTL requires DMA */
6022                 /* allocate the data transfer buffer */
6023                 /* data_dma_obj.size = data_xferlen; */
6024                 MRSAS_GET_BOUNDARY_ALIGNED_LEN(data_xferlen, new_xfer_length2,
6025                     PAGESIZE);
6026                 data_dma_obj.size = new_xfer_length2;
6027                 data_dma_obj.dma_attr = mrsas_generic_dma_attr;
6028                 data_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
6029                 data_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
6030                 data_dma_obj.dma_attr.dma_attr_sgllen = 1;
6031                 data_dma_obj.dma_attr.dma_attr_align = 1;
6032 
6033                 /* allocate kernel buffer for DMA */
6034                 if (mrsas_alloc_dma_obj(instance, &data_dma_obj,
6035                     (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
6036                         con_log(CL_ANN, (CE_WARN, "issue_mfi_stp: "
6037                             "could not allocate data transfer buffer."));
6038                         return (DDI_FAILURE);
6039                 }
6040                 (void) memset(data_dma_obj.buffer, 0, data_xferlen);
6041 
6042                 /* If IOCTL requires DMA WRITE, do ddi_copyin IOCTL data copy */
6043                 for (i = 0; i < data_xferlen; i++) {
6044                         if (ddi_copyin((uint8_t *)data_ubuf + i,
6045                             (uint8_t *)data_dma_obj.buffer + i, 1, mode)) {
6046                                 con_log(CL_ANN, (CE_WARN, "issue_mfi_stp: "
6047                                     "copy from user space failed"));
6048                                 return (DDI_FAILURE);
6049                         }
6050                 }
6051         }
6052 
6053         ddi_put8(acc_handle, &stp->cmd, kstp->cmd);
6054         ddi_put8(acc_handle, &stp->cmd_status, 0);
6055         ddi_put8(acc_handle, &stp->connection_status, 0);
6056         ddi_put8(acc_handle, &stp->target_id, kstp->target_id);
6057         ddi_put8(acc_handle, &stp->sge_count, kstp->sge_count);
6058 
6059         ddi_put16(acc_handle, &stp->timeout, kstp->timeout);
6060         ddi_put32(acc_handle, &stp->data_xfer_len, kstp->data_xfer_len);
6061 
6062         ddi_rep_put8(acc_handle, (uint8_t *)kstp->fis, (uint8_t *)stp->fis, 10,
6063             DDI_DEV_AUTOINCR);
6064 
6065         ddi_put16(acc_handle, &stp->flags, kstp->flags & ~MFI_FRAME_SGL64);
6066         ddi_put32(acc_handle, &stp->stp_flags, kstp->stp_flags);
6067         ddi_put32(acc_handle, &stp->sgl.sge32[0].length, fis_xferlen);
6068         ddi_put32(acc_handle, &stp->sgl.sge32[0].phys_addr,
6069             fis_dma_obj.dma_cookie[0].dmac_address);
6070         ddi_put32(acc_handle, &stp->sgl.sge32[1].length, data_xferlen);
6071         ddi_put32(acc_handle, &stp->sgl.sge32[1].phys_addr,
6072             data_dma_obj.dma_cookie[0].dmac_address);
6073 
6074         cmd->sync_cmd = MRSAS_TRUE;
6075         cmd->frame_count = 1;
6076 
6077         if (instance->tbolt) {
6078                 mr_sas_tbolt_build_mfi_cmd(instance, cmd);
6079         }
6080 
6081         if (instance->func_ptr->issue_cmd_in_sync_mode(instance, cmd)) {
6082                 con_log(CL_ANN, (CE_WARN, "issue_mfi_stp: fw_ioctl failed"));
6083         } else {
6084 
6085                 if (fis_xferlen) {
6086                         for (i = 0; i < fis_xferlen; i++) {
6087                                 if (ddi_copyout(
6088                                     (uint8_t *)fis_dma_obj.buffer + i,
6089                                     (uint8_t *)fis_ubuf + i, 1, mode)) {
6090                                         con_log(CL_ANN, (CE_WARN,
6091                                             "issue_mfi_stp : copy to "
6092                                             "user space failed"));
6093                                         return (DDI_FAILURE);
6094                                 }
6095                         }
6096                 }
6097         }
6098         if (data_xferlen) {
6099                 for (i = 0; i < data_xferlen; i++) {
6100                         if (ddi_copyout(
6101                             (uint8_t *)data_dma_obj.buffer + i,
6102                             (uint8_t *)data_ubuf + i, 1, mode)) {
6103                                 con_log(CL_ANN, (CE_WARN,
6104                                     "issue_mfi_stp : copy to"
6105                                     " user space failed"));
6106                                 return (DDI_FAILURE);
6107                         }
6108                 }
6109         }
6110 
6111         kstp->cmd_status = ddi_get8(acc_handle, &stp->cmd_status);
6112         con_log(CL_ANN1, (CE_NOTE, "issue_mfi_stp: stp->cmd_status = %d",
6113             kstp->cmd_status));
6114         DTRACE_PROBE2(issue_stp, uint8_t, kstp->cmd, uint8_t, kstp->cmd_status);
6115 
6116         if (fis_xferlen) {
6117                 /* free kernel buffer */
6118                 if (mrsas_free_dma_obj(instance, fis_dma_obj) != DDI_SUCCESS)
6119                         return (DDI_FAILURE);
6120         }
6121 
6122         if (data_xferlen) {
6123                 /* free kernel buffer */
6124                 if (mrsas_free_dma_obj(instance, data_dma_obj) != DDI_SUCCESS)
6125                         return (DDI_FAILURE);
6126         }
6127 
6128         return (DDI_SUCCESS);
6129 }
6130 
6131 /*
6132  * fill_up_drv_ver
6133  */
6134 void
6135 fill_up_drv_ver(struct mrsas_drv_ver *dv)
6136 {
6137         (void) memset(dv, 0, sizeof (struct mrsas_drv_ver));
6138 
6139         (void) memcpy(dv->signature, "$LSI LOGIC$", strlen("$LSI LOGIC$"));
6140         (void) memcpy(dv->os_name, "Solaris", strlen("Solaris"));
6141         (void) memcpy(dv->drv_name, "mr_sas", strlen("mr_sas"));
6142         (void) memcpy(dv->drv_ver, MRSAS_VERSION, strlen(MRSAS_VERSION));
6143         (void) memcpy(dv->drv_rel_date, MRSAS_RELDATE,
6144             strlen(MRSAS_RELDATE));
6145 
6146 }
6147 
6148 /*
6149  * handle_drv_ioctl
6150  */
6151 static int
6152 handle_drv_ioctl(struct mrsas_instance *instance, struct mrsas_ioctl *ioctl,
6153     int mode)
6154 {
6155         int     i;
6156         int     rval = DDI_SUCCESS;
6157         int     *props = NULL;
6158         void    *ubuf;
6159 
6160         uint8_t         *pci_conf_buf;
6161         uint32_t        xferlen;
6162         uint32_t        num_props;
6163         uint_t          model;
6164         struct mrsas_dcmd_frame *kdcmd;
6165         struct mrsas_drv_ver    dv;
6166         struct mrsas_pci_information pi;
6167 
6168         kdcmd = (struct mrsas_dcmd_frame *)&ioctl->frame[0];
6169 
6170         model = ddi_model_convert_from(mode & FMODELS);
6171         if (model == DDI_MODEL_ILP32) {
6172                 con_log(CL_ANN1, (CE_CONT,
6173                     "handle_drv_ioctl: DDI_MODEL_ILP32"));
6174 
6175                 xferlen = kdcmd->sgl.sge32[0].length;
6176 
6177                 ubuf = (void *)(ulong_t)kdcmd->sgl.sge32[0].phys_addr;
6178         } else {
6179 #ifdef _ILP32
6180                 con_log(CL_ANN1, (CE_CONT,
6181                     "handle_drv_ioctl: DDI_MODEL_ILP32"));
6182                 xferlen = kdcmd->sgl.sge32[0].length;
6183                 ubuf = (void *)(ulong_t)kdcmd->sgl.sge32[0].phys_addr;
6184 #else
6185                 con_log(CL_ANN1, (CE_CONT,
6186                     "handle_drv_ioctl: DDI_MODEL_LP64"));
6187                 xferlen = kdcmd->sgl.sge64[0].length;
6188                 ubuf = (void *)(ulong_t)kdcmd->sgl.sge64[0].phys_addr;
6189 #endif
6190         }
6191         con_log(CL_ANN1, (CE_CONT, "handle_drv_ioctl: "
6192             "dataBuf=%p size=%d bytes", ubuf, xferlen));
6193 
6194         switch (kdcmd->opcode) {
6195         case MRSAS_DRIVER_IOCTL_DRIVER_VERSION:
6196                 con_log(CL_ANN1, (CE_CONT, "handle_drv_ioctl: "
6197                     "MRSAS_DRIVER_IOCTL_DRIVER_VERSION"));
6198 
6199                 fill_up_drv_ver(&dv);
6200 
6201                 if (ddi_copyout(&dv, ubuf, xferlen, mode)) {
6202                         con_log(CL_ANN, (CE_WARN, "handle_drv_ioctl: "
6203                             "MRSAS_DRIVER_IOCTL_DRIVER_VERSION : "
6204                             "copy to user space failed"));
6205                         kdcmd->cmd_status = 1;
6206                         rval = 1;
6207                 } else {
6208                         kdcmd->cmd_status = 0;
6209                 }
6210                 break;
6211         case MRSAS_DRIVER_IOCTL_PCI_INFORMATION:
6212                 con_log(CL_ANN1, (CE_NOTE, "handle_drv_ioctl: "
6213                     "MRSAS_DRIVER_IOCTL_PCI_INFORMAITON"));
6214 
6215                 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, instance->dip,
6216                     0, "reg", &props, &num_props)) {
6217                         con_log(CL_ANN, (CE_WARN, "handle_drv_ioctl: "
6218                             "MRSAS_DRIVER_IOCTL_PCI_INFORMATION : "
6219                             "ddi_prop_look_int_array failed"));
6220                         rval = DDI_FAILURE;
6221                 } else {
6222 
6223                         pi.busNumber = (props[0] >> 16) & 0xFF;
6224                         pi.deviceNumber = (props[0] >> 11) & 0x1f;
6225                         pi.functionNumber = (props[0] >> 8) & 0x7;
6226                         ddi_prop_free((void *)props);
6227                 }
6228 
6229                 pci_conf_buf = (uint8_t *)&pi.pciHeaderInfo;
6230 
6231                 for (i = 0; i < (sizeof (struct mrsas_pci_information) -
6232                     offsetof(struct mrsas_pci_information, pciHeaderInfo));
6233                     i++) {
6234                         pci_conf_buf[i] =
6235                             pci_config_get8(instance->pci_handle, i);
6236                 }
6237 
6238                 if (ddi_copyout(&pi, ubuf, xferlen, mode)) {
6239                         con_log(CL_ANN, (CE_WARN, "handle_drv_ioctl: "
6240                             "MRSAS_DRIVER_IOCTL_PCI_INFORMATION : "
6241                             "copy to user space failed"));
6242                         kdcmd->cmd_status = 1;
6243                         rval = 1;
6244                 } else {
6245                         kdcmd->cmd_status = 0;
6246                 }
6247                 break;
6248         default:
6249                 con_log(CL_ANN, (CE_WARN, "handle_drv_ioctl: "
6250                     "invalid driver specific IOCTL opcode = 0x%x",
6251                     kdcmd->opcode));
6252                 kdcmd->cmd_status = 1;
6253                 rval = DDI_FAILURE;
6254                 break;
6255         }
6256 
6257         return (rval);
6258 }
6259 
6260 /*
6261  * handle_mfi_ioctl
6262  */
6263 static int
6264 handle_mfi_ioctl(struct mrsas_instance *instance, struct mrsas_ioctl *ioctl,
6265     int mode)
6266 {
6267         int     rval = DDI_SUCCESS;
6268 
6269         struct mrsas_header     *hdr;
6270         struct mrsas_cmd        *cmd;
6271 
6272         if (instance->tbolt) {
6273                 cmd = get_raid_msg_mfi_pkt(instance);
6274         } else {
6275                 cmd = mrsas_get_mfi_pkt(instance);
6276         }
6277         if (!cmd) {
6278                 con_log(CL_ANN, (CE_WARN, "mr_sas: "
6279                     "failed to get a cmd packet"));
6280                 DTRACE_PROBE2(mfi_ioctl_err, uint16_t,
6281                     instance->fw_outstanding, uint16_t, instance->max_fw_cmds);
6282                 return (DDI_FAILURE);
6283         }
6284 
6285         /* Clear the frame buffer and assign back the context id */
6286         (void) memset((char *)&cmd->frame[0], 0, sizeof (union mrsas_frame));
6287         ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context,
6288             cmd->index);
6289 
6290         hdr = (struct mrsas_header *)&ioctl->frame[0];
6291 
6292         switch (ddi_get8(cmd->frame_dma_obj.acc_handle, &hdr->cmd)) {
6293         case MFI_CMD_OP_DCMD:
6294                 rval = issue_mfi_dcmd(instance, ioctl, cmd, mode);
6295                 break;
6296         case MFI_CMD_OP_SMP:
6297                 rval = issue_mfi_smp(instance, ioctl, cmd, mode);
6298                 break;
6299         case MFI_CMD_OP_STP:
6300                 rval = issue_mfi_stp(instance, ioctl, cmd, mode);
6301                 break;
6302         case MFI_CMD_OP_LD_SCSI:
6303         case MFI_CMD_OP_PD_SCSI:
6304                 rval = issue_mfi_pthru(instance, ioctl, cmd, mode);
6305                 break;
6306         default:
6307                 con_log(CL_ANN, (CE_WARN, "handle_mfi_ioctl: "
6308                     "invalid mfi ioctl hdr->cmd = %d", hdr->cmd));
6309                 rval = DDI_FAILURE;
6310                 break;
6311         }
6312 
6313         if (mrsas_common_check(instance, cmd) != DDI_SUCCESS)
6314                 rval = DDI_FAILURE;
6315 
6316         if (instance->tbolt) {
6317                 return_raid_msg_mfi_pkt(instance, cmd);
6318         } else {
6319                 mrsas_return_mfi_pkt(instance, cmd);
6320         }
6321 
6322         return (rval);
6323 }
6324 
6325 /*
6326  * AEN
6327  */
6328 static int
6329 handle_mfi_aen(struct mrsas_instance *instance, struct mrsas_aen *aen)
6330 {
6331         int     rval = 0;
6332 
6333         rval = register_mfi_aen(instance, instance->aen_seq_num,
6334             aen->class_locale_word);
6335 
6336         aen->cmd_status = (uint8_t)rval;
6337 
6338         return (rval);
6339 }
6340 
6341 static int
6342 register_mfi_aen(struct mrsas_instance *instance, uint32_t seq_num,
6343     uint32_t class_locale_word)
6344 {
6345         int     ret_val;
6346 
6347         struct mrsas_cmd        *cmd, *aen_cmd;
6348         struct mrsas_dcmd_frame *dcmd;
6349         union mrsas_evt_class_locale    curr_aen;
6350         union mrsas_evt_class_locale    prev_aen;
6351 
6352         con_log(CL_ANN, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
6353         /*
6354          * If there an AEN pending already (aen_cmd), check if the
6355          * class_locale of that pending AEN is inclusive of the new
6356          * AEN request we currently have. If it is, then we don't have
6357          * to do anything. In other words, whichever events the current
6358          * AEN request is subscribing to, have already been subscribed
6359          * to.
6360          *
6361          * If the old_cmd is _not_ inclusive, then we have to abort
6362          * that command, form a class_locale that is superset of both
6363          * old and current and re-issue to the FW
6364          */
6365 
6366         curr_aen.word = LE_32(class_locale_word);
6367         curr_aen.members.locale = LE_16(curr_aen.members.locale);
6368         aen_cmd = instance->aen_cmd;
6369         if (aen_cmd) {
6370                 prev_aen.word = ddi_get32(aen_cmd->frame_dma_obj.acc_handle,
6371                     &aen_cmd->frame->dcmd.mbox.w[1]);
6372                 prev_aen.word = LE_32(prev_aen.word);
6373                 prev_aen.members.locale = LE_16(prev_aen.members.locale);
6374                 /*
6375                  * A class whose enum value is smaller is inclusive of all
6376                  * higher values. If a PROGRESS (= -1) was previously
6377                  * registered, then a new registration requests for higher
6378                  * classes need not be sent to FW. They are automatically
6379                  * included.
6380                  *
6381                  * Locale numbers don't have such hierarchy. They are bitmap
6382                  * values
6383                  */
6384                 if ((prev_aen.members.class <= curr_aen.members.class) &&
6385                     !((prev_aen.members.locale & curr_aen.members.locale) ^
6386                     curr_aen.members.locale)) {
6387                         /*
6388                          * Previously issued event registration includes
6389                          * current request. Nothing to do.
6390                          */
6391 
6392                         return (0);
6393                 } else {
6394                         curr_aen.members.locale |= prev_aen.members.locale;
6395 
6396                         if (prev_aen.members.class < curr_aen.members.class)
6397                                 curr_aen.members.class = prev_aen.members.class;
6398 
6399                         ret_val = abort_aen_cmd(instance, aen_cmd);
6400 
6401                         if (ret_val) {
6402                                 con_log(CL_ANN, (CE_WARN, "register_mfi_aen: "
6403                                     "failed to abort prevous AEN command"));
6404 
6405                                 return (ret_val);
6406                         }
6407                 }
6408         } else {
6409                 curr_aen.word = LE_32(class_locale_word);
6410                 curr_aen.members.locale = LE_16(curr_aen.members.locale);
6411         }
6412 
6413         if (instance->tbolt) {
6414                 cmd = get_raid_msg_mfi_pkt(instance);
6415         } else {
6416                 cmd = mrsas_get_mfi_pkt(instance);
6417         }
6418 
6419         if (!cmd) {
6420                 DTRACE_PROBE2(mfi_aen_err, uint16_t, instance->fw_outstanding,
6421                     uint16_t, instance->max_fw_cmds);
6422                 return (ENOMEM);
6423         }
6424 
6425         /* Clear the frame buffer and assign back the context id */
6426         (void) memset((char *)&cmd->frame[0], 0, sizeof (union mrsas_frame));
6427         ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context,
6428             cmd->index);
6429 
6430         dcmd = &cmd->frame->dcmd;
6431 
6432         /* for(i = 0; i < DCMD_MBOX_SZ; i++) dcmd->mbox.b[i] = 0; */
6433         (void) memset(dcmd->mbox.b, 0, DCMD_MBOX_SZ);
6434 
6435         (void) memset(instance->mfi_evt_detail_obj.buffer, 0,
6436             sizeof (struct mrsas_evt_detail));
6437 
6438         /* Prepare DCMD for aen registration */
6439         ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->cmd, MFI_CMD_OP_DCMD);
6440         ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->cmd_status, 0x0);
6441         ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->sge_count, 1);
6442         ddi_put16(cmd->frame_dma_obj.acc_handle, &dcmd->flags,
6443             MFI_FRAME_DIR_READ);
6444         ddi_put16(cmd->frame_dma_obj.acc_handle, &dcmd->timeout, 0);
6445         ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->data_xfer_len,
6446             sizeof (struct mrsas_evt_detail));
6447         ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->opcode,
6448             MR_DCMD_CTRL_EVENT_WAIT);
6449         ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->mbox.w[0], seq_num);
6450         curr_aen.members.locale = LE_16(curr_aen.members.locale);
6451         curr_aen.word = LE_32(curr_aen.word);
6452         ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->mbox.w[1],
6453             curr_aen.word);
6454         ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->sgl.sge32[0].phys_addr,
6455             instance->mfi_evt_detail_obj.dma_cookie[0].dmac_address);
6456         ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->sgl.sge32[0].length,
6457             sizeof (struct mrsas_evt_detail));
6458 
6459         instance->aen_seq_num = seq_num;
6460 
6461 
6462         /*
6463          * Store reference to the cmd used to register for AEN. When an
6464          * application wants us to register for AEN, we have to abort this
6465          * cmd and re-register with a new EVENT LOCALE supplied by that app
6466          */
6467         instance->aen_cmd = cmd;
6468 
6469         cmd->frame_count = 1;
6470 
6471         /* Issue the aen registration frame */
6472         /* atomic_add_16 (&instance->fw_outstanding, 1); */
6473         if (instance->tbolt) {
6474                 mr_sas_tbolt_build_mfi_cmd(instance, cmd);
6475         }
6476         instance->func_ptr->issue_cmd(cmd, instance);
6477 
6478         return (0);
6479 }
6480 
6481 void
6482 display_scsi_inquiry(caddr_t scsi_inq)
6483 {
6484 #define MAX_SCSI_DEVICE_CODE    14
6485         int             i;
6486         char            inquiry_buf[256] = {0};
6487         int             len;
6488         const char      *const scsi_device_types[] = {
6489                 "Direct-Access    ",
6490                 "Sequential-Access",
6491                 "Printer          ",
6492                 "Processor        ",
6493                 "WORM             ",
6494                 "CD-ROM           ",
6495                 "Scanner          ",
6496                 "Optical Device   ",
6497                 "Medium Changer   ",
6498                 "Communications   ",
6499                 "Unknown          ",
6500                 "Unknown          ",
6501                 "Unknown          ",
6502                 "Enclosure        ",
6503         };
6504 
6505         len = 0;
6506 
6507         len += snprintf(inquiry_buf + len, 265 - len, "  Vendor: ");
6508         for (i = 8; i < 16; i++) {
6509                 len += snprintf(inquiry_buf + len, 265 - len, "%c",
6510                     scsi_inq[i]);
6511         }
6512 
6513         len += snprintf(inquiry_buf + len, 265 - len, "  Model: ");
6514 
6515         for (i = 16; i < 32; i++) {
6516                 len += snprintf(inquiry_buf + len, 265 - len, "%c",
6517                     scsi_inq[i]);
6518         }
6519 
6520         len += snprintf(inquiry_buf + len, 265 - len, "  Rev: ");
6521 
6522         for (i = 32; i < 36; i++) {
6523                 len += snprintf(inquiry_buf + len, 265 - len, "%c",
6524                     scsi_inq[i]);
6525         }
6526 
6527         len += snprintf(inquiry_buf + len, 265 - len, "\n");
6528 
6529 
6530         i = scsi_inq[0] & 0x1f;
6531 
6532 
6533         len += snprintf(inquiry_buf + len, 265 - len, "  Type:   %s ",
6534             i < MAX_SCSI_DEVICE_CODE ? scsi_device_types[i] :
6535             "Unknown          ");
6536 
6537 
6538         len += snprintf(inquiry_buf + len, 265 - len,
6539             "                 ANSI SCSI revision: %02x", scsi_inq[2] & 0x07);
6540 
6541         if ((scsi_inq[2] & 0x07) == 1 && (scsi_inq[3] & 0x0f) == 1) {
6542                 len += snprintf(inquiry_buf + len, 265 - len, " CCS\n");
6543         } else {
6544                 len += snprintf(inquiry_buf + len, 265 - len, "\n");
6545         }
6546 
6547         con_log(CL_DLEVEL2, (CE_CONT, inquiry_buf));
6548 }
6549 
6550 static void
6551 io_timeout_checker(void *arg)
6552 {
6553         struct scsi_pkt *pkt;
6554         struct mrsas_instance *instance = arg;
6555         struct mrsas_cmd        *cmd = NULL;
6556         struct mrsas_header     *hdr;
6557         int time = 0;
6558         int counter = 0;
6559         struct mlist_head       *pos, *next;
6560         mlist_t                 process_list;
6561 
6562         if (instance->adapterresetinprogress == 1) {
6563                 con_log(CL_ANN, (CE_NOTE, "io_timeout_checker:"
6564                     " reset in progress"));
6565 
6566                 instance->timeout_id = timeout(io_timeout_checker,
6567                     (void *) instance, drv_usectohz(MRSAS_1_SECOND));
6568                 return;
6569         }
6570 
6571         /* See if this check needs to be in the beginning or last in ISR */
6572         if (mrsas_initiate_ocr_if_fw_is_faulty(instance) ==  1) {
6573                 dev_err(instance->dip, CE_WARN, "io_timeout_checker: "
6574                     "FW Fault, calling reset adapter");
6575                 dev_err(instance->dip, CE_CONT, "io_timeout_checker: "
6576                     "fw_outstanding 0x%X max_fw_cmds 0x%X",
6577                     instance->fw_outstanding, instance->max_fw_cmds);
6578                 if (instance->adapterresetinprogress == 0) {
6579                         instance->adapterresetinprogress = 1;
6580                         if (instance->tbolt)
6581                                 (void) mrsas_tbolt_reset_ppc(instance);
6582                         else
6583                                 (void) mrsas_reset_ppc(instance);
6584                         instance->adapterresetinprogress = 0;
6585                 }
6586                 instance->timeout_id = timeout(io_timeout_checker,
6587                     (void *) instance, drv_usectohz(MRSAS_1_SECOND));
6588                 return;
6589         }
6590 
6591         INIT_LIST_HEAD(&process_list);
6592 
6593         mutex_enter(&instance->cmd_pend_mtx);
6594         mlist_for_each_safe(pos, next, &instance->cmd_pend_list) {
6595                 cmd = mlist_entry(pos, struct mrsas_cmd, list);
6596 
6597                 if (cmd == NULL) {
6598                         continue;
6599                 }
6600 
6601                 if (cmd->sync_cmd == MRSAS_TRUE) {
6602                         hdr = (struct mrsas_header *)&cmd->frame->hdr;
6603                         if (hdr == NULL) {
6604                                 continue;
6605                         }
6606                         time = --cmd->drv_pkt_time;
6607                 } else {
6608                         pkt = cmd->pkt;
6609                         if (pkt == NULL) {
6610                                 continue;
6611                         }
6612                         time = --cmd->drv_pkt_time;
6613                 }
6614                 if (time <= 0) {
6615                         dev_err(instance->dip, CE_WARN, "%llx: "
6616                             "io_timeout_checker: TIMING OUT: pkt: %p, "
6617                             "cmd %p fw_outstanding 0x%X max_fw_cmds 0x%X",
6618                             gethrtime(), (void *)pkt, (void *)cmd,
6619                             instance->fw_outstanding, instance->max_fw_cmds);
6620 
6621                         counter++;
6622                         break;
6623                 }
6624         }
6625         mutex_exit(&instance->cmd_pend_mtx);
6626 
6627         if (counter) {
6628                 if (instance->disable_online_ctrl_reset == 1) {
6629                         dev_err(instance->dip, CE_WARN, "%s(): OCR is NOT "
6630                             "supported by Firmware, KILL adapter!!!",
6631                             __func__);
6632 
6633                         if (instance->tbolt)
6634                                 mrsas_tbolt_kill_adapter(instance);
6635                         else
6636                                 (void) mrsas_kill_adapter(instance);
6637 
6638                         return;
6639                 } else {
6640                         if (cmd->retry_count_for_ocr <=    IO_RETRY_COUNT) {
6641                                 if (instance->adapterresetinprogress == 0) {
6642                                         if (instance->tbolt) {
6643                                                 (void) mrsas_tbolt_reset_ppc(
6644                                                     instance);
6645                                         } else {
6646                                                 (void) mrsas_reset_ppc(
6647                                                     instance);
6648                                         }
6649                                 }
6650                         } else {
6651                                 dev_err(instance->dip, CE_WARN,
6652                                     "io_timeout_checker: "
6653                                     "cmd %p cmd->index %d "
6654                                     "timed out even after 3 resets: "
6655                                     "so KILL adapter", (void *)cmd, cmd->index);
6656 
6657                                 mrsas_print_cmd_details(instance, cmd, 0xDD);
6658 
6659                                 if (instance->tbolt)
6660                                         mrsas_tbolt_kill_adapter(instance);
6661                                 else
6662                                         (void) mrsas_kill_adapter(instance);
6663                                 return;
6664                         }
6665                 }
6666         }
6667         con_log(CL_ANN, (CE_NOTE, "mrsas: "
6668             "schedule next timeout check: "
6669             "do timeout \n"));
6670         instance->timeout_id =
6671             timeout(io_timeout_checker, (void *)instance,
6672             drv_usectohz(MRSAS_1_SECOND));
6673 }
6674 
6675 static uint32_t
6676 read_fw_status_reg_ppc(struct mrsas_instance *instance)
6677 {
6678         return ((uint32_t)RD_OB_SCRATCH_PAD_0(instance));
6679 }
6680 
6681 static void
6682 issue_cmd_ppc(struct mrsas_cmd *cmd, struct mrsas_instance *instance)
6683 {
6684         struct scsi_pkt *pkt;
6685         atomic_inc_16(&instance->fw_outstanding);
6686 
6687         pkt = cmd->pkt;
6688         if (pkt) {
6689                 con_log(CL_DLEVEL1, (CE_NOTE, "%llx : issue_cmd_ppc:"
6690                     "ISSUED CMD TO FW : called : cmd:"
6691                     ": %p instance : %p pkt : %p pkt_time : %x\n",
6692                     gethrtime(), (void *)cmd, (void *)instance,
6693                     (void *)pkt, cmd->drv_pkt_time));
6694                 if (instance->adapterresetinprogress) {
6695                         cmd->drv_pkt_time = (uint16_t)debug_timeout_g;
6696                         con_log(CL_ANN1, (CE_NOTE, "Reset the scsi_pkt timer"));
6697                 } else {
6698                         push_pending_mfi_pkt(instance, cmd);
6699                 }
6700 
6701         } else {
6702                 con_log(CL_DLEVEL1, (CE_NOTE, "%llx : issue_cmd_ppc:"
6703                     "ISSUED CMD TO FW : called : cmd : %p, instance: %p"
6704                     "(NO PKT)\n", gethrtime(), (void *)cmd, (void *)instance));
6705         }
6706 
6707         mutex_enter(&instance->reg_write_mtx);
6708         /* Issue the command to the FW */
6709         WR_IB_PICK_QPORT((cmd->frame_phys_addr) |
6710             (((cmd->frame_count - 1) << 1) | 1), instance);
6711         mutex_exit(&instance->reg_write_mtx);
6712 
6713 }
6714 
6715 /*
6716  * issue_cmd_in_sync_mode
6717  */
6718 static int
6719 issue_cmd_in_sync_mode_ppc(struct mrsas_instance *instance,
6720     struct mrsas_cmd *cmd)
6721 {
6722         int     i;
6723         uint32_t        msecs = MFI_POLL_TIMEOUT_SECS * MILLISEC;
6724         struct mrsas_header *hdr = &cmd->frame->hdr;
6725 
6726         con_log(CL_ANN1, (CE_NOTE, "issue_cmd_in_sync_mode_ppc: called"));
6727 
6728         if (instance->adapterresetinprogress) {
6729                 cmd->drv_pkt_time = ddi_get16(
6730                     cmd->frame_dma_obj.acc_handle, &hdr->timeout);
6731                 if (cmd->drv_pkt_time < debug_timeout_g)
6732                         cmd->drv_pkt_time = (uint16_t)debug_timeout_g;
6733 
6734                 con_log(CL_ANN1, (CE_NOTE, "sync_mode_ppc: "
6735                     "issue and return in reset case\n"));
6736                 WR_IB_PICK_QPORT((cmd->frame_phys_addr) |
6737                     (((cmd->frame_count - 1) << 1) | 1), instance);
6738 
6739                 return (DDI_SUCCESS);
6740         } else {
6741                 con_log(CL_ANN1, (CE_NOTE, "sync_mode_ppc: pushing the pkt\n"));
6742                 push_pending_mfi_pkt(instance, cmd);
6743         }
6744 
6745         cmd->cmd_status      = ENODATA;
6746 
6747         mutex_enter(&instance->reg_write_mtx);
6748         /* Issue the command to the FW */
6749         WR_IB_PICK_QPORT((cmd->frame_phys_addr) |
6750             (((cmd->frame_count - 1) << 1) | 1), instance);
6751         mutex_exit(&instance->reg_write_mtx);
6752 
6753         mutex_enter(&instance->int_cmd_mtx);
6754         for (i = 0; i < msecs && (cmd->cmd_status == ENODATA); i++) {
6755                 cv_wait(&instance->int_cmd_cv, &instance->int_cmd_mtx);
6756         }
6757         mutex_exit(&instance->int_cmd_mtx);
6758 
6759         con_log(CL_ANN1, (CE_NOTE, "issue_cmd_in_sync_mode_ppc: done"));
6760 
6761         if (i < (msecs -1)) {
6762                 return (DDI_SUCCESS);
6763         } else {
6764                 return (DDI_FAILURE);
6765         }
6766 }
6767 
6768 /*
6769  * issue_cmd_in_poll_mode
6770  */
6771 static int
6772 issue_cmd_in_poll_mode_ppc(struct mrsas_instance *instance,
6773     struct mrsas_cmd *cmd)
6774 {
6775         int             i;
6776         uint16_t        flags;
6777         uint32_t        msecs = MFI_POLL_TIMEOUT_SECS * MILLISEC;
6778         struct mrsas_header *frame_hdr;
6779 
6780         con_log(CL_ANN1, (CE_NOTE, "issue_cmd_in_poll_mode_ppc: called"));
6781 
6782         frame_hdr = (struct mrsas_header *)cmd->frame;
6783         ddi_put8(cmd->frame_dma_obj.acc_handle, &frame_hdr->cmd_status,
6784             MFI_CMD_STATUS_POLL_MODE);
6785         flags = ddi_get16(cmd->frame_dma_obj.acc_handle, &frame_hdr->flags);
6786         flags   |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
6787 
6788         ddi_put16(cmd->frame_dma_obj.acc_handle, &frame_hdr->flags, flags);
6789 
6790         /* issue the frame using inbound queue port */
6791         WR_IB_PICK_QPORT((cmd->frame_phys_addr) |
6792             (((cmd->frame_count - 1) << 1) | 1), instance);
6793 
6794         /* wait for cmd_status to change from 0xFF */
6795         for (i = 0; i < msecs && (
6796             ddi_get8(cmd->frame_dma_obj.acc_handle, &frame_hdr->cmd_status)
6797             == MFI_CMD_STATUS_POLL_MODE); i++) {
6798                 drv_usecwait(MILLISEC); /* wait for 1000 usecs */
6799         }
6800 
6801         if (ddi_get8(cmd->frame_dma_obj.acc_handle, &frame_hdr->cmd_status)
6802             == MFI_CMD_STATUS_POLL_MODE) {
6803                 con_log(CL_ANN, (CE_NOTE, "issue_cmd_in_poll_mode: "
6804                     "cmd polling timed out"));
6805                 return (DDI_FAILURE);
6806         }
6807 
6808         return (DDI_SUCCESS);
6809 }
6810 
6811 static void
6812 enable_intr_ppc(struct mrsas_instance *instance)
6813 {
6814         uint32_t        mask;
6815 
6816         con_log(CL_ANN1, (CE_NOTE, "enable_intr_ppc: called"));
6817 
6818         if (instance->skinny) {
6819                 /* For SKINNY, write ~0x1, from BSD's mfi driver. */
6820                 WR_OB_INTR_MASK(0xfffffffe, instance);
6821         } else {
6822                 /* WR_OB_DOORBELL_CLEAR(0xFFFFFFFF, instance); */
6823                 WR_OB_DOORBELL_CLEAR(OB_DOORBELL_CLEAR_MASK, instance);
6824 
6825                 /* WR_OB_INTR_MASK(~0x80000000, instance); */
6826                 WR_OB_INTR_MASK(~(MFI_REPLY_2108_MESSAGE_INTR_MASK), instance);
6827         }
6828 
6829         /* dummy read to force PCI flush */
6830         mask = RD_OB_INTR_MASK(instance);
6831 
6832         con_log(CL_ANN1, (CE_NOTE, "enable_intr_ppc: "
6833             "outbound_intr_mask = 0x%x", mask));
6834 }
6835 
6836 static void
6837 disable_intr_ppc(struct mrsas_instance *instance)
6838 {
6839         con_log(CL_ANN1, (CE_NOTE, "disable_intr_ppc: called"));
6840 
6841         con_log(CL_ANN1, (CE_NOTE, "disable_intr_ppc: before : "
6842             "outbound_intr_mask = 0x%x", RD_OB_INTR_MASK(instance)));
6843 
6844         /* For now, assume there are no extras needed for Skinny support. */
6845 
6846         WR_OB_INTR_MASK(OB_INTR_MASK, instance);
6847 
6848         con_log(CL_ANN1, (CE_NOTE, "disable_intr_ppc: after : "
6849             "outbound_intr_mask = 0x%x", RD_OB_INTR_MASK(instance)));
6850 
6851         /* dummy read to force PCI flush */
6852         (void) RD_OB_INTR_MASK(instance);
6853 }
6854 
6855 static int
6856 intr_ack_ppc(struct mrsas_instance *instance)
6857 {
6858         uint32_t        status;
6859         int ret = DDI_INTR_CLAIMED;
6860 
6861         con_log(CL_ANN1, (CE_NOTE, "intr_ack_ppc: called"));
6862 
6863         /* check if it is our interrupt */
6864         status = RD_OB_INTR_STATUS(instance);
6865 
6866         con_log(CL_ANN1, (CE_NOTE, "intr_ack_ppc: status = 0x%x", status));
6867 
6868         /*
6869          * NOTE:  Some drivers call out SKINNY here, but the return is the same
6870          * for SKINNY and 2108.
6871          */
6872         if (!(status & MFI_REPLY_2108_MESSAGE_INTR)) {
6873                 ret = DDI_INTR_UNCLAIMED;
6874         }
6875 
6876         if (mrsas_check_acc_handle(instance->regmap_handle) != DDI_SUCCESS) {
6877                 ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST);
6878                 ret = DDI_INTR_UNCLAIMED;
6879         }
6880 
6881         if (ret == DDI_INTR_UNCLAIMED) {
6882                 return (ret);
6883         }
6884 
6885         /*
6886          * Clear the interrupt by writing back the same value.
6887          * Another case where SKINNY is slightly different.
6888          */
6889         if (instance->skinny) {
6890                 WR_OB_INTR_STATUS(status, instance);
6891         } else {
6892                 WR_OB_DOORBELL_CLEAR(status, instance);
6893         }
6894 
6895         /* dummy READ */
6896         status = RD_OB_INTR_STATUS(instance);
6897 
6898         con_log(CL_ANN1, (CE_NOTE, "intr_ack_ppc: interrupt cleared"));
6899 
6900         return (ret);
6901 }
6902 
6903 /*
6904  * Marks HBA as bad. This will be called either when an
6905  * IO packet times out even after 3 FW resets
6906  * or FW is found to be fault even after 3 continuous resets.
6907  */
6908 
6909 static int
6910 mrsas_kill_adapter(struct mrsas_instance *instance)
6911 {
6912         if (instance->deadadapter == 1)
6913                 return (DDI_FAILURE);
6914 
6915         con_log(CL_ANN1, (CE_NOTE, "mrsas_kill_adapter: "
6916             "Writing to doorbell with MFI_STOP_ADP "));
6917         mutex_enter(&instance->ocr_flags_mtx);
6918         instance->deadadapter = 1;
6919         mutex_exit(&instance->ocr_flags_mtx);
6920         instance->func_ptr->disable_intr(instance);
6921         WR_IB_DOORBELL(MFI_STOP_ADP, instance);
6922         (void) mrsas_complete_pending_cmds(instance);
6923         return (DDI_SUCCESS);
6924 }
6925 
6926 
6927 static int
6928 mrsas_reset_ppc(struct mrsas_instance *instance)
6929 {
6930         uint32_t status;
6931         uint32_t retry = 0;
6932         uint32_t cur_abs_reg_val;
6933         uint32_t fw_state;
6934 
6935         con_log(CL_ANN, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
6936 
6937         if (instance->deadadapter == 1) {
6938                 dev_err(instance->dip, CE_WARN, "mrsas_reset_ppc: "
6939                     "no more resets as HBA has been marked dead ");
6940                 return (DDI_FAILURE);
6941         }
6942         mutex_enter(&instance->ocr_flags_mtx);
6943         instance->adapterresetinprogress = 1;
6944         mutex_exit(&instance->ocr_flags_mtx);
6945         con_log(CL_ANN1, (CE_NOTE, "mrsas_reset_ppc: adpterresetinprogress "
6946             "flag set, time %llx", gethrtime()));
6947 
6948         instance->func_ptr->disable_intr(instance);
6949 retry_reset:
6950         WR_IB_WRITE_SEQ(0, instance);
6951         WR_IB_WRITE_SEQ(4, instance);
6952         WR_IB_WRITE_SEQ(0xb, instance);
6953         WR_IB_WRITE_SEQ(2, instance);
6954         WR_IB_WRITE_SEQ(7, instance);
6955         WR_IB_WRITE_SEQ(0xd, instance);
6956         con_log(CL_ANN1, (CE_NOTE, "mrsas_reset_ppc: magic number written "
6957             "to write sequence register\n"));
6958         delay(100 * drv_usectohz(MILLISEC));
6959         status = RD_OB_DRWE(instance);
6960 
6961         while (!(status & DIAG_WRITE_ENABLE)) {
6962                 delay(100 * drv_usectohz(MILLISEC));
6963                 status = RD_OB_DRWE(instance);
6964                 if (retry++ == 100) {
6965                         dev_err(instance->dip, CE_WARN,
6966                             "mrsas_reset_ppc: DRWE bit "
6967                             "check retry count %d", retry);
6968                         return (DDI_FAILURE);
6969                 }
6970         }
6971         WR_IB_DRWE(status | DIAG_RESET_ADAPTER, instance);
6972         delay(100 * drv_usectohz(MILLISEC));
6973         status = RD_OB_DRWE(instance);
6974         while (status & DIAG_RESET_ADAPTER) {
6975                 delay(100 * drv_usectohz(MILLISEC));
6976                 status = RD_OB_DRWE(instance);
6977                 if (retry++ == 100) {
6978                         dev_err(instance->dip, CE_WARN, "mrsas_reset_ppc: "
6979                             "RESET FAILED. KILL adapter called.");
6980 
6981                         (void) mrsas_kill_adapter(instance);
6982                         return (DDI_FAILURE);
6983                 }
6984         }
6985         con_log(CL_ANN, (CE_NOTE, "mrsas_reset_ppc: Adapter reset complete"));
6986         con_log(CL_ANN1, (CE_NOTE, "mrsas_reset_ppc: "
6987             "Calling mfi_state_transition_to_ready"));
6988 
6989         /* Mark HBA as bad, if FW is fault after 3 continuous resets */
6990         if (mfi_state_transition_to_ready(instance) ||
6991             debug_fw_faults_after_ocr_g == 1) {
6992                 cur_abs_reg_val =
6993                     instance->func_ptr->read_fw_status_reg(instance);
6994                 fw_state        = cur_abs_reg_val & MFI_STATE_MASK;
6995 
6996 #ifdef OCRDEBUG
6997                 con_log(CL_ANN1, (CE_NOTE,
6998                     "mrsas_reset_ppc :before fake: FW is not ready "
6999                     "FW state = 0x%x", fw_state));
7000                 if (debug_fw_faults_after_ocr_g == 1)
7001                         fw_state = MFI_STATE_FAULT;
7002 #endif
7003 
7004                 con_log(CL_ANN1, (CE_NOTE,  "mrsas_reset_ppc : FW is not ready "
7005                     "FW state = 0x%x", fw_state));
7006 
7007                 if (fw_state == MFI_STATE_FAULT) {
7008                         /* increment the count */
7009                         instance->fw_fault_count_after_ocr++;
7010                         if (instance->fw_fault_count_after_ocr
7011                             < MAX_FW_RESET_COUNT) {
7012                                 dev_err(instance->dip, CE_WARN,
7013                                     "mrsas_reset_ppc: "
7014                                     "FW is in fault after OCR count %d "
7015                                     "Retry Reset",
7016                                     instance->fw_fault_count_after_ocr);
7017                                 goto retry_reset;
7018 
7019                         } else {
7020                                 dev_err(instance->dip, CE_WARN,
7021                                     "mrsas_reset_ppc: "
7022                                     "Max Reset Count exceeded >%d"
7023                                     "Mark HBA as bad, KILL adapter",
7024                                     MAX_FW_RESET_COUNT);
7025 
7026                                 (void) mrsas_kill_adapter(instance);
7027                                 return (DDI_FAILURE);
7028                         }
7029                 }
7030         }
7031         /* reset the counter as FW is up after OCR */
7032         instance->fw_fault_count_after_ocr = 0;
7033 
7034 
7035         ddi_put32(instance->mfi_internal_dma_obj.acc_handle,
7036             instance->producer, 0);
7037 
7038         ddi_put32(instance->mfi_internal_dma_obj.acc_handle,
7039             instance->consumer, 0);
7040 
7041         con_log(CL_ANN1, (CE_NOTE, "mrsas_reset_ppc: "
7042             " after resetting produconsumer chck indexs:"
7043             "producer %x consumer %x", *instance->producer,
7044             *instance->consumer));
7045 
7046         con_log(CL_ANN1, (CE_NOTE, "mrsas_reset_ppc: "
7047             "Calling mrsas_issue_init_mfi"));
7048         (void) mrsas_issue_init_mfi(instance);
7049         con_log(CL_ANN1, (CE_NOTE, "mrsas_reset_ppc: "
7050             "mrsas_issue_init_mfi Done"));
7051 
7052         con_log(CL_ANN1, (CE_NOTE, "mrsas_reset_ppc: "
7053             "Calling mrsas_print_pending_cmd\n"));
7054         (void) mrsas_print_pending_cmds(instance);
7055         con_log(CL_ANN1, (CE_NOTE, "mrsas_reset_ppc: "
7056             "mrsas_print_pending_cmd done\n"));
7057 
7058         instance->func_ptr->enable_intr(instance);
7059         instance->fw_outstanding = 0;
7060 
7061         con_log(CL_ANN1, (CE_NOTE, "mrsas_reset_ppc: "
7062             "Calling mrsas_issue_pending_cmds"));
7063         (void) mrsas_issue_pending_cmds(instance);
7064         con_log(CL_ANN1, (CE_NOTE, "mrsas_reset_ppc: "
7065             "issue_pending_cmds done.\n"));
7066 
7067         con_log(CL_ANN1, (CE_NOTE, "mrsas_reset_ppc: "
7068             "Calling aen registration"));
7069 
7070 
7071         instance->aen_cmd->retry_count_for_ocr = 0;
7072         instance->aen_cmd->drv_pkt_time = 0;
7073 
7074         instance->func_ptr->issue_cmd(instance->aen_cmd, instance);
7075         con_log(CL_ANN1, (CE_NOTE, "Unsetting adpresetinprogress flag.\n"));
7076 
7077         mutex_enter(&instance->ocr_flags_mtx);
7078         instance->adapterresetinprogress = 0;
7079         mutex_exit(&instance->ocr_flags_mtx);
7080         con_log(CL_ANN1, (CE_NOTE, "mrsas_reset_ppc: "
7081             "adpterresetinprogress flag unset"));
7082 
7083         con_log(CL_ANN1, (CE_NOTE, "mrsas_reset_ppc done\n"));
7084         return (DDI_SUCCESS);
7085 }
7086 
7087 /*
7088  * FMA functions.
7089  */
7090 int
7091 mrsas_common_check(struct mrsas_instance *instance, struct  mrsas_cmd *cmd)
7092 {
7093         int ret = DDI_SUCCESS;
7094 
7095         if (cmd != NULL &&
7096             mrsas_check_dma_handle(cmd->frame_dma_obj.dma_handle) !=
7097             DDI_SUCCESS) {
7098                 ddi_fm_service_impact(instance->dip, DDI_SERVICE_UNAFFECTED);
7099                 if (cmd->pkt != NULL) {
7100                         cmd->pkt->pkt_reason = CMD_TRAN_ERR;
7101                         cmd->pkt->pkt_statistics = 0;
7102                 }
7103                 ret = DDI_FAILURE;
7104         }
7105         if (mrsas_check_dma_handle(instance->mfi_internal_dma_obj.dma_handle)
7106             != DDI_SUCCESS) {
7107                 ddi_fm_service_impact(instance->dip, DDI_SERVICE_UNAFFECTED);
7108                 if (cmd != NULL && cmd->pkt != NULL) {
7109                         cmd->pkt->pkt_reason = CMD_TRAN_ERR;
7110                         cmd->pkt->pkt_statistics = 0;
7111                 }
7112                 ret = DDI_FAILURE;
7113         }
7114         if (mrsas_check_dma_handle(instance->mfi_evt_detail_obj.dma_handle) !=
7115             DDI_SUCCESS) {
7116                 ddi_fm_service_impact(instance->dip, DDI_SERVICE_UNAFFECTED);
7117                 if (cmd != NULL && cmd->pkt != NULL) {
7118                         cmd->pkt->pkt_reason = CMD_TRAN_ERR;
7119                         cmd->pkt->pkt_statistics = 0;
7120                 }
7121                 ret = DDI_FAILURE;
7122         }
7123         if (mrsas_check_acc_handle(instance->regmap_handle) != DDI_SUCCESS) {
7124                 ddi_fm_service_impact(instance->dip, DDI_SERVICE_UNAFFECTED);
7125 
7126                 ddi_fm_acc_err_clear(instance->regmap_handle, DDI_FME_VER0);
7127 
7128                 if (cmd != NULL && cmd->pkt != NULL) {
7129                         cmd->pkt->pkt_reason = CMD_TRAN_ERR;
7130                         cmd->pkt->pkt_statistics = 0;
7131                 }
7132                 ret = DDI_FAILURE;
7133         }
7134 
7135         return (ret);
7136 }
7137 
7138 /*ARGSUSED*/
7139 static int
7140 mrsas_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err, const void *impl_data)
7141 {
7142         /*
7143          * as the driver can always deal with an error in any dma or
7144          * access handle, we can just return the fme_status value.
7145          */
7146         pci_ereport_post(dip, err, NULL);
7147         return (err->fme_status);
7148 }
7149 
7150 static void
7151 mrsas_fm_init(struct mrsas_instance *instance)
7152 {
7153         /* Need to change iblock to priority for new MSI intr */
7154         ddi_iblock_cookie_t fm_ibc;
7155 
7156         /* Only register with IO Fault Services if we have some capability */
7157         if (instance->fm_capabilities) {
7158                 /* Adjust access and dma attributes for FMA */
7159                 endian_attr.devacc_attr_access = DDI_FLAGERR_ACC;
7160                 mrsas_generic_dma_attr.dma_attr_flags = DDI_DMA_FLAGERR;
7161 
7162                 /*
7163                  * Register capabilities with IO Fault Services.
7164                  * fm_capabilities will be updated to indicate
7165                  * capabilities actually supported (not requested.)
7166                  */
7167 
7168                 ddi_fm_init(instance->dip, &instance->fm_capabilities, &fm_ibc);
7169 
7170                 /*
7171                  * Initialize pci ereport capabilities if ereport
7172                  * capable (should always be.)
7173                  */
7174 
7175                 if (DDI_FM_EREPORT_CAP(instance->fm_capabilities) ||
7176                     DDI_FM_ERRCB_CAP(instance->fm_capabilities)) {
7177                         pci_ereport_setup(instance->dip);
7178                 }
7179 
7180                 /*
7181                  * Register error callback if error callback capable.
7182                  */
7183                 if (DDI_FM_ERRCB_CAP(instance->fm_capabilities)) {
7184                         ddi_fm_handler_register(instance->dip,
7185                             mrsas_fm_error_cb, (void*) instance);
7186                 }
7187         } else {
7188                 endian_attr.devacc_attr_access = DDI_DEFAULT_ACC;
7189                 mrsas_generic_dma_attr.dma_attr_flags = 0;
7190         }
7191 }
7192 
7193 static void
7194 mrsas_fm_fini(struct mrsas_instance *instance)
7195 {
7196         /* Only unregister FMA capabilities if registered */
7197         if (instance->fm_capabilities) {
7198                 /*
7199                  * Un-register error callback if error callback capable.
7200                  */
7201                 if (DDI_FM_ERRCB_CAP(instance->fm_capabilities)) {
7202                         ddi_fm_handler_unregister(instance->dip);
7203                 }
7204 
7205                 /*
7206                  * Release any resources allocated by pci_ereport_setup()
7207                  */
7208                 if (DDI_FM_EREPORT_CAP(instance->fm_capabilities) ||
7209                     DDI_FM_ERRCB_CAP(instance->fm_capabilities)) {
7210                         pci_ereport_teardown(instance->dip);
7211                 }
7212 
7213                 /* Unregister from IO Fault Services */
7214                 ddi_fm_fini(instance->dip);
7215 
7216                 /* Adjust access and dma attributes for FMA */
7217                 endian_attr.devacc_attr_access = DDI_DEFAULT_ACC;
7218                 mrsas_generic_dma_attr.dma_attr_flags = 0;
7219         }
7220 }
7221 
7222 int
7223 mrsas_check_acc_handle(ddi_acc_handle_t handle)
7224 {
7225         ddi_fm_error_t de;
7226 
7227         if (handle == NULL) {
7228                 return (DDI_FAILURE);
7229         }
7230 
7231         ddi_fm_acc_err_get(handle, &de, DDI_FME_VERSION);
7232 
7233         return (de.fme_status);
7234 }
7235 
7236 int
7237 mrsas_check_dma_handle(ddi_dma_handle_t handle)
7238 {
7239         ddi_fm_error_t de;
7240 
7241         if (handle == NULL) {
7242                 return (DDI_FAILURE);
7243         }
7244 
7245         ddi_fm_dma_err_get(handle, &de, DDI_FME_VERSION);
7246 
7247         return (de.fme_status);
7248 }
7249 
7250 void
7251 mrsas_fm_ereport(struct mrsas_instance *instance, char *detail)
7252 {
7253         uint64_t ena;
7254         char buf[FM_MAX_CLASS];
7255 
7256         (void) snprintf(buf, FM_MAX_CLASS, "%s.%s", DDI_FM_DEVICE, detail);
7257         ena = fm_ena_generate(0, FM_ENA_FMT1);
7258         if (DDI_FM_EREPORT_CAP(instance->fm_capabilities)) {
7259                 ddi_fm_ereport_post(instance->dip, buf, ena, DDI_NOSLEEP,
7260                     FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERSION, NULL);
7261         }
7262 }
7263 
7264 static int
7265 mrsas_add_intrs(struct mrsas_instance *instance, int intr_type)
7266 {
7267 
7268         dev_info_t *dip = instance->dip;
7269         int     avail, actual, count;
7270         int     i, flag, ret;
7271 
7272         con_log(CL_DLEVEL1, (CE_NOTE, "mrsas_add_intrs: intr_type = %x",
7273             intr_type));
7274 
7275         /* Get number of interrupts */
7276         ret = ddi_intr_get_nintrs(dip, intr_type, &count);
7277         if ((ret != DDI_SUCCESS) || (count == 0)) {
7278                 con_log(CL_ANN, (CE_WARN, "ddi_intr_get_nintrs() failed:"
7279                     "ret %d count %d", ret, count));
7280 
7281                 return (DDI_FAILURE);
7282         }
7283 
7284         con_log(CL_DLEVEL1, (CE_NOTE, "mrsas_add_intrs: count = %d ", count));
7285 
7286         /* Get number of available interrupts */
7287         ret = ddi_intr_get_navail(dip, intr_type, &avail);
7288         if ((ret != DDI_SUCCESS) || (avail == 0)) {
7289                 con_log(CL_ANN, (CE_WARN, "ddi_intr_get_navail() failed:"
7290                     "ret %d avail %d", ret, avail));
7291 
7292                 return (DDI_FAILURE);
7293         }
7294         con_log(CL_DLEVEL1, (CE_NOTE, "mrsas_add_intrs: avail = %d ", avail));
7295 
7296         /* Only one interrupt routine. So limit the count to 1 */
7297         if (count > 1) {
7298                 count = 1;
7299         }
7300 
7301         /*
7302          * Allocate an array of interrupt handlers. Currently we support
7303          * only one interrupt. The framework can be extended later.
7304          */
7305         instance->intr_htable_size = count * sizeof (ddi_intr_handle_t);
7306         instance->intr_htable = kmem_zalloc(instance->intr_htable_size,
7307             KM_SLEEP);
7308         ASSERT(instance->intr_htable);
7309 
7310         flag = ((intr_type == DDI_INTR_TYPE_MSI) ||
7311             (intr_type == DDI_INTR_TYPE_MSIX)) ?
7312             DDI_INTR_ALLOC_STRICT : DDI_INTR_ALLOC_NORMAL;
7313 
7314         /* Allocate interrupt */
7315         ret = ddi_intr_alloc(dip, instance->intr_htable, intr_type, 0,
7316             count, &actual, flag);
7317 
7318         if ((ret != DDI_SUCCESS) || (actual == 0)) {
7319                 con_log(CL_ANN, (CE_WARN, "mrsas_add_intrs: "
7320                     "avail = %d", avail));
7321                 goto mrsas_free_htable;
7322         }
7323 
7324         if (actual < count) {
7325                 con_log(CL_ANN, (CE_WARN, "mrsas_add_intrs: "
7326                     "Requested = %d  Received = %d", count, actual));
7327         }
7328         instance->intr_cnt = actual;
7329 
7330         /*
7331          * Get the priority of the interrupt allocated.
7332          */
7333         if ((ret = ddi_intr_get_pri(instance->intr_htable[0],
7334             &instance->intr_pri)) != DDI_SUCCESS) {
7335                 con_log(CL_ANN, (CE_WARN, "mrsas_add_intrs: "
7336                     "get priority call failed"));
7337                 goto mrsas_free_handles;
7338         }
7339 
7340         /*
7341          * Test for high level mutex. we don't support them.
7342          */
7343         if (instance->intr_pri >= ddi_intr_get_hilevel_pri()) {
7344                 con_log(CL_ANN, (CE_WARN, "mrsas_add_intrs: "
7345                     "High level interrupts not supported."));
7346                 goto mrsas_free_handles;
7347         }
7348 
7349         con_log(CL_DLEVEL1, (CE_NOTE, "mrsas_add_intrs: intr_pri = 0x%x ",
7350             instance->intr_pri));
7351 
7352         /* Call ddi_intr_add_handler() */
7353         for (i = 0; i < actual; i++) {
7354                 ret = ddi_intr_add_handler(instance->intr_htable[i],
7355                     (ddi_intr_handler_t *)mrsas_isr, (caddr_t)instance,
7356                     (caddr_t)(uintptr_t)i);
7357 
7358                 if (ret != DDI_SUCCESS) {
7359                         con_log(CL_ANN, (CE_WARN, "mrsas_add_intrs:"
7360                             "failed %d", ret));
7361                         goto mrsas_free_handles;
7362                 }
7363 
7364         }
7365 
7366         con_log(CL_DLEVEL1, (CE_NOTE, " ddi_intr_add_handler done"));
7367 
7368         if ((ret = ddi_intr_get_cap(instance->intr_htable[0],
7369             &instance->intr_cap)) != DDI_SUCCESS) {
7370                 con_log(CL_ANN, (CE_WARN, "ddi_intr_get_cap() failed %d",
7371                     ret));
7372                 goto mrsas_free_handlers;
7373         }
7374 
7375         if (instance->intr_cap &  DDI_INTR_FLAG_BLOCK) {
7376                 con_log(CL_ANN, (CE_WARN, "Calling ddi_intr_block _enable"));
7377 
7378                 (void) ddi_intr_block_enable(instance->intr_htable,
7379                     instance->intr_cnt);
7380         } else {
7381                 con_log(CL_ANN, (CE_NOTE, " calling ddi_intr_enable"));
7382 
7383                 for (i = 0; i < instance->intr_cnt; i++) {
7384                         (void) ddi_intr_enable(instance->intr_htable[i]);
7385                         con_log(CL_ANN, (CE_NOTE, "ddi intr enable returns "
7386                             "%d", i));
7387                 }
7388         }
7389 
7390         return (DDI_SUCCESS);
7391 
7392 mrsas_free_handlers:
7393         for (i = 0; i < actual; i++)
7394                 (void) ddi_intr_remove_handler(instance->intr_htable[i]);
7395 
7396 mrsas_free_handles:
7397         for (i = 0; i < actual; i++)
7398                 (void) ddi_intr_free(instance->intr_htable[i]);
7399 
7400 mrsas_free_htable:
7401         if (instance->intr_htable != NULL)
7402                 kmem_free(instance->intr_htable, instance->intr_htable_size);
7403 
7404         instance->intr_htable = NULL;
7405         instance->intr_htable_size = 0;
7406 
7407         return (DDI_FAILURE);
7408 
7409 }
7410 
7411 
7412 static void
7413 mrsas_rem_intrs(struct mrsas_instance *instance)
7414 {
7415         int i;
7416 
7417         con_log(CL_ANN, (CE_NOTE, "mrsas_rem_intrs called"));
7418 
7419         /* Disable all interrupts first */
7420         if (instance->intr_cap & DDI_INTR_FLAG_BLOCK) {
7421                 (void) ddi_intr_block_disable(instance->intr_htable,
7422                     instance->intr_cnt);
7423         } else {
7424                 for (i = 0; i < instance->intr_cnt; i++) {
7425                         (void) ddi_intr_disable(instance->intr_htable[i]);
7426                 }
7427         }
7428 
7429         /* Remove all the handlers */
7430 
7431         for (i = 0; i < instance->intr_cnt; i++) {
7432                 (void) ddi_intr_remove_handler(instance->intr_htable[i]);
7433                 (void) ddi_intr_free(instance->intr_htable[i]);
7434         }
7435 
7436         if (instance->intr_htable != NULL)
7437                 kmem_free(instance->intr_htable, instance->intr_htable_size);
7438 
7439         instance->intr_htable = NULL;
7440         instance->intr_htable_size = 0;
7441 
7442 }
7443 
7444 static int
7445 mrsas_tran_bus_config(dev_info_t *parent, uint_t flags,
7446     ddi_bus_config_op_t op, void *arg, dev_info_t **childp)
7447 {
7448         struct mrsas_instance *instance;
7449         int config;
7450         int rval  = NDI_SUCCESS;
7451 
7452         char *ptr = NULL;
7453         int tgt, lun;
7454 
7455         con_log(CL_ANN1, (CE_NOTE, "Bus config called for op = %x", op));
7456 
7457         if ((instance = ddi_get_soft_state(mrsas_state,
7458             ddi_get_instance(parent))) == NULL) {
7459                 return (NDI_FAILURE);
7460         }
7461 
7462         /* Hold nexus during bus_config */
7463         ndi_devi_enter(parent, &config);
7464         switch (op) {
7465         case BUS_CONFIG_ONE: {
7466 
7467                 /* parse wwid/target name out of name given */
7468                 if ((ptr = strchr((char *)arg, '@')) == NULL) {
7469                         rval = NDI_FAILURE;
7470                         break;
7471                 }
7472                 ptr++;
7473 
7474                 if (mrsas_parse_devname(arg, &tgt, &lun) != 0) {
7475                         rval = NDI_FAILURE;
7476                         break;
7477                 }
7478 
7479                 if (lun == 0) {
7480                         rval = mrsas_config_ld(instance, tgt, lun, childp);
7481                 } else if ((instance->tbolt || instance->skinny) && lun != 0) {
7482                         rval = mrsas_tbolt_config_pd(instance,
7483                             tgt, lun, childp);
7484                 } else {
7485                         rval = NDI_FAILURE;
7486                 }
7487 
7488                 break;
7489         }
7490         case BUS_CONFIG_DRIVER:
7491         case BUS_CONFIG_ALL: {
7492 
7493                 rval = mrsas_config_all_devices(instance);
7494 
7495                 rval = NDI_SUCCESS;
7496                 break;
7497         }
7498         }
7499 
7500         if (rval == NDI_SUCCESS) {
7501                 rval = ndi_busop_bus_config(parent, flags, op, arg, childp, 0);
7502 
7503         }
7504         ndi_devi_exit(parent, config);
7505 
7506         con_log(CL_ANN1, (CE_NOTE, "mrsas_tran_bus_config: rval = %x",
7507             rval));
7508         return (rval);
7509 }
7510 
7511 static int
7512 mrsas_config_all_devices(struct mrsas_instance *instance)
7513 {
7514         int rval, tgt;
7515 
7516         for (tgt = 0; tgt < MRDRV_MAX_LD; tgt++) {
7517                 (void) mrsas_config_ld(instance, tgt, 0, NULL);
7518 
7519         }
7520 
7521         /* Config PD devices connected to the card */
7522         if (instance->tbolt || instance->skinny) {
7523                 for (tgt = 0; tgt < instance->mr_tbolt_pd_max; tgt++) {
7524                         (void) mrsas_tbolt_config_pd(instance, tgt, 1, NULL);
7525                 }
7526         }
7527 
7528         rval = NDI_SUCCESS;
7529         return (rval);
7530 }
7531 
7532 static int
7533 mrsas_parse_devname(char *devnm, int *tgt, int *lun)
7534 {
7535         char devbuf[SCSI_MAXNAMELEN];
7536         char *addr;
7537         char *p,  *tp, *lp;
7538         long num;
7539 
7540         /* Parse dev name and address */
7541         (void) strcpy(devbuf, devnm);
7542         addr = "";
7543         for (p = devbuf; *p != '\0'; p++) {
7544                 if (*p == '@') {
7545                         addr = p + 1;
7546                         *p = '\0';
7547                 } else if (*p == ':') {
7548                         *p = '\0';
7549                         break;
7550                 }
7551         }
7552 
7553         /* Parse target and lun */
7554         for (p = tp = addr, lp = NULL; *p != '\0'; p++) {
7555                 if (*p == ',') {
7556                         lp = p + 1;
7557                         *p = '\0';
7558                         break;
7559                 }
7560         }
7561         if (tgt && tp) {
7562                 if (ddi_strtol(tp, NULL, 0x10, &num)) {
7563                         return (DDI_FAILURE); /* Can declare this as constant */
7564                 }
7565                         *tgt = (int)num;
7566         }
7567         if (lun && lp) {
7568                 if (ddi_strtol(lp, NULL, 0x10, &num)) {
7569                         return (DDI_FAILURE);
7570                 }
7571                         *lun = (int)num;
7572         }
7573         return (DDI_SUCCESS);  /* Success case */
7574 }
7575 
7576 static int
7577 mrsas_config_ld(struct mrsas_instance *instance, uint16_t tgt,
7578     uint8_t lun, dev_info_t **ldip)
7579 {
7580         struct scsi_device *sd;
7581         dev_info_t *child;
7582         int rval;
7583 
7584         con_log(CL_DLEVEL1, (CE_NOTE, "mrsas_config_ld: t = %d l = %d",
7585             tgt, lun));
7586 
7587         if ((child = mrsas_find_child(instance, tgt, lun)) != NULL) {
7588                 if (ldip) {
7589                         *ldip = child;
7590                 }
7591                 if (instance->mr_ld_list[tgt].flag != MRDRV_TGT_VALID) {
7592                         rval = mrsas_service_evt(instance, tgt, 0,
7593                             MRSAS_EVT_UNCONFIG_TGT, NULL);
7594                         con_log(CL_ANN1, (CE_WARN,
7595                             "mr_sas: DELETING STALE ENTRY rval = %d "
7596                             "tgt id = %d ", rval, tgt));
7597                         return (NDI_FAILURE);
7598                 }
7599                 return (NDI_SUCCESS);
7600         }
7601 
7602         sd = kmem_zalloc(sizeof (struct scsi_device), KM_SLEEP);
7603         sd->sd_address.a_hba_tran = instance->tran;
7604         sd->sd_address.a_target = (uint16_t)tgt;
7605         sd->sd_address.a_lun = (uint8_t)lun;
7606 
7607         if (scsi_hba_probe(sd, NULL) == SCSIPROBE_EXISTS)
7608                 rval = mrsas_config_scsi_device(instance, sd, ldip);
7609         else
7610                 rval = NDI_FAILURE;
7611 
7612         /* sd_unprobe is blank now. Free buffer manually */
7613         if (sd->sd_inq) {
7614                 kmem_free(sd->sd_inq, SUN_INQSIZE);
7615                 sd->sd_inq = (struct scsi_inquiry *)NULL;
7616         }
7617 
7618         kmem_free(sd, sizeof (struct scsi_device));
7619         con_log(CL_DLEVEL1, (CE_NOTE, "mrsas_config_ld: return rval = %d",
7620             rval));
7621         return (rval);
7622 }
7623 
7624 int
7625 mrsas_config_scsi_device(struct mrsas_instance *instance,
7626     struct scsi_device *sd, dev_info_t **dipp)
7627 {
7628         char *nodename = NULL;
7629         char **compatible = NULL;
7630         int ncompatible = 0;
7631         char *childname;
7632         dev_info_t *ldip = NULL;
7633         int tgt = sd->sd_address.a_target;
7634         int lun = sd->sd_address.a_lun;
7635         int dtype = sd->sd_inq->inq_dtype & DTYPE_MASK;
7636         int rval;
7637 
7638         con_log(CL_DLEVEL1, (CE_NOTE, "mr_sas: scsi_device t%dL%d", tgt, lun));
7639         scsi_hba_nodename_compatible_get(sd->sd_inq, NULL, dtype,
7640             NULL, &nodename, &compatible, &ncompatible);
7641 
7642         if (nodename == NULL) {
7643                 con_log(CL_ANN1, (CE_WARN, "mr_sas: Found no compatible driver "
7644                     "for t%dL%d", tgt, lun));
7645                 rval = NDI_FAILURE;
7646                 goto finish;
7647         }
7648 
7649         childname = (dtype == DTYPE_DIRECT) ? "sd" : nodename;
7650         con_log(CL_DLEVEL1, (CE_NOTE,
7651             "mr_sas: Childname = %2s nodename = %s", childname, nodename));
7652 
7653         /* Create a dev node */
7654         rval = ndi_devi_alloc(instance->dip, childname, DEVI_SID_NODEID, &ldip);
7655         con_log(CL_DLEVEL1, (CE_NOTE,
7656             "mr_sas_config_scsi_device: ndi_devi_alloc rval = %x", rval));
7657         if (rval == NDI_SUCCESS) {
7658                 if (ndi_prop_update_int(DDI_DEV_T_NONE, ldip, "target", tgt) !=
7659                     DDI_PROP_SUCCESS) {
7660                         con_log(CL_ANN1, (CE_WARN, "mr_sas: unable to create "
7661                             "property for t%dl%d target", tgt, lun));
7662                         rval = NDI_FAILURE;
7663                         goto finish;
7664                 }
7665                 if (ndi_prop_update_int(DDI_DEV_T_NONE, ldip, "lun", lun) !=
7666                     DDI_PROP_SUCCESS) {
7667                         con_log(CL_ANN1, (CE_WARN, "mr_sas: unable to create "
7668                             "property for t%dl%d lun", tgt, lun));
7669                         rval = NDI_FAILURE;
7670                         goto finish;
7671                 }
7672 
7673                 if (ndi_prop_update_string_array(DDI_DEV_T_NONE, ldip,
7674                     "compatible", compatible, ncompatible) !=
7675                     DDI_PROP_SUCCESS) {
7676                         con_log(CL_ANN1, (CE_WARN, "mr_sas: unable to create "
7677                             "property for t%dl%d compatible", tgt, lun));
7678                         rval = NDI_FAILURE;
7679                         goto finish;
7680                 }
7681 
7682                 rval = ndi_devi_online(ldip, NDI_ONLINE_ATTACH);
7683                 if (rval != NDI_SUCCESS) {
7684                         con_log(CL_ANN1, (CE_WARN, "mr_sas: unable to online "
7685                             "t%dl%d", tgt, lun));
7686                         ndi_prop_remove_all(ldip);
7687                         (void) ndi_devi_free(ldip);
7688                 } else {
7689                         con_log(CL_ANN1, (CE_CONT, "mr_sas: online Done :"
7690                             "0 t%dl%d", tgt, lun));
7691                 }
7692 
7693         }
7694 finish:
7695         if (dipp) {
7696                 *dipp = ldip;
7697         }
7698 
7699         con_log(CL_DLEVEL1, (CE_NOTE,
7700             "mr_sas: config_scsi_device rval = %d t%dL%d",
7701             rval, tgt, lun));
7702         scsi_hba_nodename_compatible_free(nodename, compatible);
7703         return (rval);
7704 }
7705 
7706 /*ARGSUSED*/
7707 int
7708 mrsas_service_evt(struct mrsas_instance *instance, int tgt, int lun, int event,
7709     uint64_t wwn)
7710 {
7711         struct mrsas_eventinfo *mrevt = NULL;
7712 
7713         con_log(CL_ANN1, (CE_NOTE,
7714             "mrsas_service_evt called for t%dl%d event = %d",
7715             tgt, lun, event));
7716 
7717         if ((instance->taskq == NULL) || (mrevt =
7718             kmem_zalloc(sizeof (struct mrsas_eventinfo), KM_NOSLEEP)) == NULL) {
7719                 return (ENOMEM);
7720         }
7721 
7722         mrevt->instance = instance;
7723         mrevt->tgt = tgt;
7724         mrevt->lun = lun;
7725         mrevt->event = event;
7726         mrevt->wwn = wwn;
7727 
7728         if ((ddi_taskq_dispatch(instance->taskq,
7729             (void (*)(void *))mrsas_issue_evt_taskq, mrevt, DDI_NOSLEEP)) !=
7730             DDI_SUCCESS) {
7731                 con_log(CL_ANN1, (CE_NOTE,
7732                     "mr_sas: Event task failed for t%dl%d event = %d",
7733                     tgt, lun, event));
7734                 kmem_free(mrevt, sizeof (struct mrsas_eventinfo));
7735                 return (DDI_FAILURE);
7736         }
7737         DTRACE_PROBE3(service_evt, int, tgt, int, lun, int, event);
7738         return (DDI_SUCCESS);
7739 }
7740 
7741 static void
7742 mrsas_issue_evt_taskq(struct mrsas_eventinfo *mrevt)
7743 {
7744         struct mrsas_instance *instance = mrevt->instance;
7745         dev_info_t *dip, *pdip;
7746         int circ1 = 0;
7747         char *devname;
7748 
7749         con_log(CL_ANN1, (CE_NOTE, "mrsas_issue_evt_taskq: called for"
7750             " tgt %d lun %d event %d",
7751             mrevt->tgt, mrevt->lun, mrevt->event));
7752 
7753         if (mrevt->tgt < MRDRV_MAX_LD && mrevt->lun == 0) {
7754                 mutex_enter(&instance->config_dev_mtx);
7755                 dip = instance->mr_ld_list[mrevt->tgt].dip;
7756                 mutex_exit(&instance->config_dev_mtx);
7757         } else {
7758                 mutex_enter(&instance->config_dev_mtx);
7759                 dip = instance->mr_tbolt_pd_list[mrevt->tgt].dip;
7760                 mutex_exit(&instance->config_dev_mtx);
7761         }
7762 
7763 
7764         ndi_devi_enter(instance->dip, &circ1);
7765         switch (mrevt->event) {
7766         case MRSAS_EVT_CONFIG_TGT:
7767                 if (dip == NULL) {
7768 
7769                         if (mrevt->lun == 0) {
7770                                 (void) mrsas_config_ld(instance, mrevt->tgt,
7771                                     0, NULL);
7772                         } else if (instance->tbolt || instance->skinny) {
7773                                 (void) mrsas_tbolt_config_pd(instance,
7774                                     mrevt->tgt,
7775                                     1, NULL);
7776                         }
7777                         con_log(CL_ANN1, (CE_NOTE,
7778                             "mr_sas: EVT_CONFIG_TGT called:"
7779                             " for tgt %d lun %d event %d",
7780                             mrevt->tgt, mrevt->lun, mrevt->event));
7781 
7782                 } else {
7783                         con_log(CL_ANN1, (CE_NOTE,
7784                             "mr_sas: EVT_CONFIG_TGT dip != NULL:"
7785                             " for tgt %d lun %d event %d",
7786                             mrevt->tgt, mrevt->lun, mrevt->event));
7787                 }
7788                 break;
7789         case MRSAS_EVT_UNCONFIG_TGT:
7790                 if (dip) {
7791                         if (i_ddi_devi_attached(dip)) {
7792 
7793                                 pdip = ddi_get_parent(dip);
7794 
7795                                 devname = kmem_zalloc(MAXNAMELEN + 1, KM_SLEEP);
7796                                 (void) ddi_deviname(dip, devname);
7797 
7798                                 (void) devfs_clean(pdip, devname + 1,
7799                                     DV_CLEAN_FORCE);
7800                                 kmem_free(devname, MAXNAMELEN + 1);
7801                         }
7802                         (void) ndi_devi_offline(dip, NDI_DEVI_REMOVE);
7803                         con_log(CL_ANN1, (CE_NOTE,
7804                             "mr_sas: EVT_UNCONFIG_TGT called:"
7805                             " for tgt %d lun %d event %d",
7806                             mrevt->tgt, mrevt->lun, mrevt->event));
7807                 } else {
7808                         con_log(CL_ANN1, (CE_NOTE,
7809                             "mr_sas: EVT_UNCONFIG_TGT dip == NULL:"
7810                             " for tgt %d lun %d event %d",
7811                             mrevt->tgt, mrevt->lun, mrevt->event));
7812                 }
7813                 break;
7814         }
7815         kmem_free(mrevt, sizeof (struct mrsas_eventinfo));
7816         ndi_devi_exit(instance->dip, circ1);
7817 }
7818 
7819 
7820 int
7821 mrsas_mode_sense_build(struct scsi_pkt *pkt)
7822 {
7823         union scsi_cdb          *cdbp;
7824         uint16_t                page_code;
7825         struct scsa_cmd         *acmd;
7826         struct buf              *bp;
7827         struct mode_header      *modehdrp;
7828 
7829         cdbp = (void *)pkt->pkt_cdbp;
7830         page_code = cdbp->cdb_un.sg.scsi[0];
7831         acmd = PKT2CMD(pkt);
7832         bp = acmd->cmd_buf;
7833         if ((!bp) && bp->b_un.b_addr && bp->b_bcount && acmd->cmd_dmacount) {
7834                 con_log(CL_ANN1, (CE_WARN, "Failing MODESENSE Command"));
7835                 /* ADD pkt statistics as Command failed. */
7836                 return (NULL);
7837         }
7838 
7839         bp_mapin(bp);
7840         bzero(bp->b_un.b_addr, bp->b_bcount);
7841 
7842         switch (page_code) {
7843                 case 0x3: {
7844                         struct mode_format *page3p = NULL;
7845                         modehdrp = (struct mode_header *)(bp->b_un.b_addr);
7846                         modehdrp->bdesc_length = MODE_BLK_DESC_LENGTH;
7847 
7848                         page3p = (void *)((caddr_t)modehdrp +
7849                             MODE_HEADER_LENGTH + MODE_BLK_DESC_LENGTH);
7850                         page3p->mode_page.code = 0x3;
7851                         page3p->mode_page.length =
7852                             (uchar_t)(sizeof (struct mode_format));
7853                         page3p->data_bytes_sect = 512;
7854                         page3p->sect_track = 63;
7855                         break;
7856                 }
7857                 case 0x4: {
7858                         struct mode_geometry *page4p = NULL;
7859                         modehdrp = (struct mode_header *)(bp->b_un.b_addr);
7860                         modehdrp->bdesc_length = MODE_BLK_DESC_LENGTH;
7861 
7862                         page4p = (void *)((caddr_t)modehdrp +
7863                             MODE_HEADER_LENGTH + MODE_BLK_DESC_LENGTH);
7864                         page4p->mode_page.code = 0x4;
7865                         page4p->mode_page.length =
7866                             (uchar_t)(sizeof (struct mode_geometry));
7867                         page4p->heads = 255;
7868                         page4p->rpm = 10000;
7869                         break;
7870                 }
7871                 default:
7872                         break;
7873         }
7874         return (NULL);
7875 }