1 /*
   2  *
   3  *  skd.c: Solaris 11/10 Driver for sTec, Inc. S112x PCIe SSD card
   4  *
   5  *  Solaris driver is based on the Linux driver authored by:
   6  *
   7  *  Authors/Alphabetical:       Dragan Stancevic <dstancevic@stec-inc.com>
   8  *                              Gordon Waidhofer <gwaidhofer@stec-inc.com>
   9  *                              John Hamilton    <jhamilton@stec-inc.com>
  10  */
  11 
  12 /*
  13  * This file and its contents are supplied under the terms of the
  14  * Common Development and Distribution License ("CDDL"), version 1.0.
  15  * You may only use this file in accordance with the terms of version
  16  * 1.0 of the CDDL.
  17  *
  18  * A full copy of the text of the CDDL should have accompanied this
  19  * source.  A copy of the CDDL is also available via the Internet at
  20  * http://www.illumos.org/license/CDDL.
  21  */
  22 
  23 /*
  24  * Copyright 2013 STEC, Inc.  All rights reserved.
  25  * Copyright 2015 Nexenta Systems, Inc.  All rights reserved.
  26  */
  27 
  28 #include        <sys/types.h>
  29 #include        <sys/stream.h>
  30 #include        <sys/cmn_err.h>
  31 #include        <sys/kmem.h>
  32 #include        <sys/file.h>
  33 #include        <sys/buf.h>
  34 #include        <sys/uio.h>
  35 #include        <sys/cred.h>
  36 #include        <sys/modctl.h>
  37 #include        <sys/debug.h>
  38 #include        <sys/modctl.h>
  39 #include        <sys/list.h>
  40 #include        <sys/sysmacros.h>
  41 #include        <sys/errno.h>
  42 #include        <sys/pcie.h>
  43 #include        <sys/pci.h>
  44 #include        <sys/ddi.h>
  45 #include        <sys/dditypes.h>
  46 #include        <sys/sunddi.h>
  47 #include        <sys/atomic.h>
  48 #include        <sys/mutex.h>
  49 #include        <sys/param.h>
  50 #include        <sys/devops.h>
  51 #include        <sys/blkdev.h>
  52 #include        <sys/queue.h>
  53 #include        <sys/scsi/impl/inquiry.h>
  54 
  55 #include        "skd_s1120.h"
  56 #include        "skd.h"
  57 
  58 int             skd_dbg_level     = 0;
  59 
  60 void            *skd_state        = NULL;
  61 int             skd_disable_msi   = 0;
  62 int             skd_disable_msix  = 0;
  63 
  64 /* Initialized in _init() and tunable, see _init(). */
  65 clock_t         skd_timer_ticks;
  66 
  67 /* I/O DMA attributes structures. */
  68 static ddi_dma_attr_t skd_64bit_io_dma_attr = {
  69         DMA_ATTR_V0,                    /* dma_attr_version */
  70         SKD_DMA_LOW_ADDRESS,            /* low DMA address range */
  71         SKD_DMA_HIGH_64BIT_ADDRESS,     /* high DMA address range */
  72         SKD_DMA_XFER_COUNTER,           /* DMA counter register */
  73         SKD_DMA_ADDRESS_ALIGNMENT,      /* DMA address alignment */
  74         SKD_DMA_BURSTSIZES,             /* DMA burstsizes */
  75         SKD_DMA_MIN_XFER_SIZE,          /* min effective DMA size */
  76         SKD_DMA_MAX_XFER_SIZE,          /* max DMA xfer size */
  77         SKD_DMA_SEGMENT_BOUNDARY,       /* segment boundary */
  78         SKD_DMA_SG_LIST_LENGTH,         /* s/g list length */
  79         SKD_DMA_GRANULARITY,            /* granularity of device */
  80         SKD_DMA_XFER_FLAGS              /* DMA transfer flags */
  81 };
  82 
  83 int skd_isr_type = -1;
  84 
  85 #define SKD_MAX_QUEUE_DEPTH         255
  86 #define SKD_MAX_QUEUE_DEPTH_DEFAULT 64
  87 int skd_max_queue_depth = SKD_MAX_QUEUE_DEPTH_DEFAULT;
  88 
  89 #define SKD_MAX_REQ_PER_MSG         14
  90 #define SKD_MAX_REQ_PER_MSG_DEFAULT 1
  91 int skd_max_req_per_msg = SKD_MAX_REQ_PER_MSG_DEFAULT;
  92 
  93 #define SKD_MAX_N_SG_PER_REQ        4096
  94 int skd_sgs_per_request = SKD_N_SG_PER_REQ_DEFAULT;
  95 
  96 static int skd_sys_quiesce_dev(dev_info_t *);
  97 static int skd_quiesce_dev(skd_device_t *);
  98 static int skd_list_skmsg(skd_device_t *, int);
  99 static int skd_list_skreq(skd_device_t *, int);
 100 static int skd_attach(dev_info_t *dip, ddi_attach_cmd_t cmd);
 101 static int skd_detach(dev_info_t *dip, ddi_detach_cmd_t cmd);
 102 static int skd_format_internal_skspcl(struct skd_device *skdev);
 103 static void skd_start(skd_device_t *);
 104 static void skd_destroy_mutex(skd_device_t *skdev);
 105 static void skd_enable_interrupts(struct skd_device *);
 106 static void skd_request_fn_not_online(skd_device_t *skdev);
 107 static void skd_send_internal_skspcl(struct skd_device *,
 108     struct skd_special_context *, uint8_t);
 109 static void skd_queue(skd_device_t *, skd_buf_private_t *);
 110 static void *skd_alloc_dma_mem(skd_device_t *, dma_mem_t *, uint8_t);
 111 static void skd_release_intr(skd_device_t *skdev);
 112 static void skd_isr_fwstate(struct skd_device *skdev);
 113 static void skd_isr_msg_from_dev(struct skd_device *skdev);
 114 static void skd_soft_reset(struct skd_device *skdev);
 115 static void skd_refresh_device_data(struct skd_device *skdev);
 116 static void skd_update_props(skd_device_t *, dev_info_t *);
 117 static void skd_end_request_abnormal(struct skd_device *, skd_buf_private_t *,
 118     int, int);
 119 static char *skd_pci_info(struct skd_device *skdev, char *str, size_t len);
 120 
 121 static skd_buf_private_t *skd_get_queued_pbuf(skd_device_t *);
 122 
 123 static void skd_bd_driveinfo(void *arg, bd_drive_t *drive);
 124 static int  skd_bd_mediainfo(void *arg, bd_media_t *media);
 125 static int  skd_bd_read(void *arg,  bd_xfer_t *xfer);
 126 static int  skd_bd_write(void *arg, bd_xfer_t *xfer);
 127 static int  skd_devid_init(void *arg, dev_info_t *, ddi_devid_t *);
 128 
 129 
 130 static bd_ops_t skd_bd_ops = {
 131         BD_OPS_VERSION_0,
 132         skd_bd_driveinfo,
 133         skd_bd_mediainfo,
 134         skd_devid_init,
 135         NULL,                   /* sync_cache */
 136         skd_bd_read,
 137         skd_bd_write,
 138 };
 139 
 140 static ddi_device_acc_attr_t    dev_acc_attr = {
 141         DDI_DEVICE_ATTR_V0,
 142         DDI_STRUCTURE_LE_ACC,
 143         DDI_STRICTORDER_ACC
 144 };
 145 
 146 /*
 147  * Solaris module loading/unloading structures
 148  */
 149 struct dev_ops skd_dev_ops = {
 150         DEVO_REV,                       /* devo_rev */
 151         0,                              /* refcnt */
 152         ddi_no_info,                    /* getinfo */
 153         nulldev,                        /* identify */
 154         nulldev,                        /* probe */
 155         skd_attach,                     /* attach */
 156         skd_detach,                     /* detach */
 157         nodev,                          /* reset */
 158         NULL,                           /* char/block ops */
 159         NULL,                           /* bus operations */
 160         NULL,                           /* power management */
 161         skd_sys_quiesce_dev             /* quiesce */
 162 };
 163 
 164 static struct modldrv modldrv = {
 165         &mod_driverops,                     /* type of module: driver */
 166         "sTec skd v" DRV_VER_COMPL,     /* name of module */
 167         &skd_dev_ops                        /* driver dev_ops */
 168 };
 169 
 170 static struct modlinkage modlinkage = {
 171         MODREV_1,
 172         &modldrv,
 173         NULL
 174 };
 175 
 176 /*
 177  * sTec-required wrapper for debug printing.
 178  */
 179 /*PRINTFLIKE2*/
 180 static inline void
 181 Dcmn_err(int lvl, const char *fmt, ...)
 182 {
 183         va_list ap;
 184 
 185         if (skd_dbg_level == 0)
 186                 return;
 187 
 188         va_start(ap, fmt);
 189         vcmn_err(lvl, fmt, ap);
 190         va_end(ap);
 191 }
 192 
 193 /*
 194  * Solaris module loading/unloading routines
 195  */
 196 
 197 /*
 198  *
 199  * Name:        _init, performs initial installation
 200  *
 201  * Inputs:      None.
 202  *
 203  * Returns:     Returns the value returned by the ddi_softstate_init function
 204  *              on a failure to create the device state structure or the result
 205  *              of the module install routines.
 206  *
 207  */
 208 int
 209 _init(void)
 210 {
 211         int             rval = 0;
 212         int             tgts = 0;
 213 
 214         tgts |= 0x02;
 215         tgts |= 0x08;   /* In #ifdef NEXENTA block from original sTec drop. */
 216 
 217         /*
 218          * drv_usectohz() is a function, so can't initialize it at
 219          * instantiation.
 220          */
 221         skd_timer_ticks = drv_usectohz(1000000);
 222 
 223         Dcmn_err(CE_NOTE,
 224             "<# Installing skd Driver dbg-lvl=%d %s %x>",
 225             skd_dbg_level, DRV_BUILD_ID, tgts);
 226 
 227         rval = ddi_soft_state_init(&skd_state, sizeof (skd_device_t), 0);
 228         if (rval != DDI_SUCCESS)
 229                 return (rval);
 230 
 231         bd_mod_init(&skd_dev_ops);
 232 
 233         rval = mod_install(&modlinkage);
 234         if (rval != DDI_SUCCESS) {
 235                 ddi_soft_state_fini(&skd_state);
 236                 bd_mod_fini(&skd_dev_ops);
 237         }
 238 
 239         return (rval);
 240 }
 241 
 242 /*
 243  *
 244  * Name:        _info, returns information about loadable module.
 245  *
 246  * Inputs:      modinfo, pointer to module information structure.
 247  *
 248  * Returns:     Value returned by mod_info().
 249  *
 250  */
 251 int
 252 _info(struct modinfo *modinfop)
 253 {
 254         return (mod_info(&modlinkage, modinfop));
 255 }
 256 
 257 /*
 258  * _fini        Prepares a module for unloading. It is called when the system
 259  *              wants to unload a module. If the module determines that it can
 260  *              be unloaded, then _fini() returns the value returned by
 261  *              mod_remove(). Upon successful return from _fini() no other
 262  *              routine in the module will be called before _init() is called.
 263  *
 264  * Inputs:      None.
 265  *
 266  * Returns:     DDI_SUCCESS or DDI_FAILURE.
 267  *
 268  */
 269 int
 270 _fini(void)
 271 {
 272         int rval;
 273 
 274         rval = mod_remove(&modlinkage);
 275         if (rval == DDI_SUCCESS) {
 276                 ddi_soft_state_fini(&skd_state);
 277                 bd_mod_fini(&skd_dev_ops);
 278         }
 279 
 280         return (rval);
 281 }
 282 
 283 /*
 284  * Solaris Register read/write routines
 285  */
 286 
 287 /*
 288  *
 289  * Name:        skd_reg_write64, writes a 64-bit value to specified address
 290  *
 291  * Inputs:      skdev           - device state structure.
 292  *              val             - 64-bit value to be written.
 293  *              offset          - offset from PCI base address.
 294  *
 295  * Returns:     Nothing.
 296  *
 297  */
 298 /*
 299  * Local vars are to keep lint silent.  Any compiler worth its weight will
 300  * optimize it all right out...
 301  */
 302 static inline void
 303 skd_reg_write64(struct skd_device *skdev, uint64_t val, uint32_t offset)
 304 {
 305         uint64_t *addr;
 306 
 307         ASSERT((offset & 0x7) == 0);
 308         /* LINTED */
 309         addr = (uint64_t *)(skdev->dev_iobase + offset);
 310         ddi_put64(skdev->dev_handle, addr, val);
 311 }
 312 
 313 /*
 314  *
 315  * Name:        skd_reg_read32, reads a 32-bit value to specified address
 316  *
 317  * Inputs:      skdev           - device state structure.
 318  *              offset          - offset from PCI base address.
 319  *
 320  * Returns:     val, 32-bit value read from specified PCI address.
 321  *
 322  */
 323 static inline uint32_t
 324 skd_reg_read32(struct skd_device *skdev, uint32_t offset)
 325 {
 326         uint32_t *addr;
 327 
 328         ASSERT((offset & 0x3) == 0);
 329         /* LINTED */
 330         addr = (uint32_t *)(skdev->dev_iobase + offset);
 331         return (ddi_get32(skdev->dev_handle, addr));
 332 }
 333 
 334 /*
 335  *
 336  * Name:        skd_reg_write32, writes a 32-bit value to specified address
 337  *
 338  * Inputs:      skdev           - device state structure.
 339  *              val             - value to be written.
 340  *              offset          - offset from PCI base address.
 341  *
 342  * Returns:     Nothing.
 343  *
 344  */
 345 static inline void
 346 skd_reg_write32(struct skd_device *skdev, uint32_t val, uint32_t offset)
 347 {
 348         uint32_t *addr;
 349 
 350         ASSERT((offset & 0x3) == 0);
 351         /* LINTED */
 352         addr = (uint32_t *)(skdev->dev_iobase + offset);
 353         ddi_put32(skdev->dev_handle, addr, val);
 354 }
 355 
 356 
 357 /*
 358  * Solaris skd routines
 359  */
 360 
 361 /*
 362  *
 363  * Name:        skd_name, generates the name of the driver.
 364  *
 365  * Inputs:      skdev   - device state structure
 366  *
 367  * Returns:     char pointer to generated driver name.
 368  *
 369  */
 370 static const char *
 371 skd_name(struct skd_device *skdev)
 372 {
 373         (void) snprintf(skdev->id_str, sizeof (skdev->id_str), "%s:", DRV_NAME);
 374 
 375         return (skdev->id_str);
 376 }
 377 
 378 /*
 379  *
 380  * Name:        skd_pci_find_capability, searches the PCI capability
 381  *              list for the specified capability.
 382  *
 383  * Inputs:      skdev           - device state structure.
 384  *              cap             - capability sought.
 385  *
 386  * Returns:     Returns position where capability was found.
 387  *              If not found, returns zero.
 388  *
 389  */
 390 static int
 391 skd_pci_find_capability(struct skd_device *skdev, int cap)
 392 {
 393         uint16_t status;
 394         uint8_t  pos, id, hdr;
 395         int      ttl = 48;
 396 
 397         status = pci_config_get16(skdev->pci_handle, PCI_CONF_STAT);
 398 
 399         if (!(status & PCI_STAT_CAP))
 400                 return (0);
 401 
 402         hdr = pci_config_get8(skdev->pci_handle, PCI_CONF_HEADER);
 403 
 404         if ((hdr & PCI_HEADER_TYPE_M) != 0)
 405                 return (0);
 406 
 407         pos = pci_config_get8(skdev->pci_handle, PCI_CONF_CAP_PTR);
 408 
 409         while (ttl-- && pos >= 0x40) {
 410                 pos &= ~3;
 411                 id = pci_config_get8(skdev->pci_handle, pos+PCI_CAP_ID);
 412                 if (id == 0xff)
 413                         break;
 414                 if (id == cap)
 415                         return (pos);
 416                 pos = pci_config_get8(skdev->pci_handle, pos+PCI_CAP_NEXT_PTR);
 417         }
 418 
 419         return (0);
 420 }
 421 
 422 /*
 423  *
 424  * Name:        skd_io_done, called to conclude an I/O operation.
 425  *
 426  * Inputs:      skdev           - device state structure.
 427  *              pbuf            - I/O request
 428  *              error           - contain error value.
 429  *              mode            - debug only.
 430  *
 431  * Returns:     Nothing.
 432  *
 433  */
 434 static void
 435 skd_io_done(skd_device_t *skdev, skd_buf_private_t *pbuf,
 436     int error, int mode)
 437 {
 438         bd_xfer_t *xfer;
 439 
 440         ASSERT(pbuf != NULL);
 441 
 442         xfer = pbuf->x_xfer;
 443 
 444         switch (mode) {
 445         case SKD_IODONE_WIOC:
 446                 skdev->iodone_wioc++;
 447                 break;
 448         case SKD_IODONE_WNIOC:
 449                 skdev->iodone_wnioc++;
 450                 break;
 451         case SKD_IODONE_WDEBUG:
 452                 skdev->iodone_wdebug++;
 453                 break;
 454         default:
 455                 skdev->iodone_unknown++;
 456         }
 457 
 458         if (error) {
 459                 skdev->ios_errors++;
 460                 cmn_err(CE_WARN,
 461                     "!%s:skd_io_done:ERR=%d %lld-%ld %s", skdev->name,
 462                     error, xfer->x_blkno, xfer->x_nblks,
 463                     (pbuf->dir & B_READ) ? "Read" : "Write");
 464         }
 465 
 466         kmem_free(pbuf, sizeof (skd_buf_private_t));
 467 
 468         bd_xfer_done(xfer,  error);
 469 }
 470 
 471 /*
 472  * QUIESCE DEVICE
 473  */
 474 
 475 /*
 476  *
 477  * Name:        skd_sys_quiesce_dev, quiets the device
 478  *
 479  * Inputs:      dip             - dev info strucuture
 480  *
 481  * Returns:     Zero.
 482  *
 483  */
 484 static int
 485 skd_sys_quiesce_dev(dev_info_t *dip)
 486 {
 487         skd_device_t    *skdev;
 488 
 489         skdev = ddi_get_soft_state(skd_state, ddi_get_instance(dip));
 490 
 491         /* make sure Dcmn_err() doesn't actually print anything */
 492         skd_dbg_level = 0;
 493 
 494         skd_disable_interrupts(skdev);
 495         skd_soft_reset(skdev);
 496 
 497         return (0);
 498 }
 499 
 500 /*
 501  *
 502  * Name:        skd_quiesce_dev, quiets the device, but doesn't really do much.
 503  *
 504  * Inputs:      skdev           - Device state.
 505  *
 506  * Returns:     -EINVAL if device is not in proper state otherwise
 507  *              returns zero.
 508  *
 509  */
 510 static int
 511 skd_quiesce_dev(skd_device_t *skdev)
 512 {
 513         int rc = 0;
 514 
 515         if (skd_dbg_level)
 516                 Dcmn_err(CE_NOTE, "skd_quiece_dev:");
 517 
 518         switch (skdev->state) {
 519         case SKD_DRVR_STATE_BUSY:
 520         case SKD_DRVR_STATE_BUSY_IMMINENT:
 521                 Dcmn_err(CE_NOTE, "%s: stopping queue", skdev->name);
 522                 break;
 523         case SKD_DRVR_STATE_ONLINE:
 524         case SKD_DRVR_STATE_STOPPING:
 525         case SKD_DRVR_STATE_SYNCING:
 526         case SKD_DRVR_STATE_PAUSING:
 527         case SKD_DRVR_STATE_PAUSED:
 528         case SKD_DRVR_STATE_STARTING:
 529         case SKD_DRVR_STATE_RESTARTING:
 530         case SKD_DRVR_STATE_RESUMING:
 531         default:
 532                 rc = -EINVAL;
 533                 cmn_err(CE_NOTE, "state [%d] not implemented", skdev->state);
 534         }
 535 
 536         return (rc);
 537 }
 538 
 539 /*
 540  * UNQUIESCE DEVICE:
 541  * Note: Assumes lock is held to protect device state.
 542  */
 543 /*
 544  *
 545  * Name:        skd_unquiesce_dev, awkens the device
 546  *
 547  * Inputs:      skdev           - Device state.
 548  *
 549  * Returns:     -EINVAL if device is not in proper state otherwise
 550  *              returns zero.
 551  *
 552  */
 553 static int
 554 skd_unquiesce_dev(struct skd_device *skdev)
 555 {
 556         Dcmn_err(CE_NOTE, "skd_unquiece_dev:");
 557 
 558         skd_log_skdev(skdev, "unquiesce");
 559         if (skdev->state == SKD_DRVR_STATE_ONLINE) {
 560                 Dcmn_err(CE_NOTE, "**** device already ONLINE");
 561 
 562                 return (0);
 563         }
 564         if (skdev->drive_state != FIT_SR_DRIVE_ONLINE) {
 565                 /*
 566                  * If there has been an state change to other than
 567                  * ONLINE, we will rely on controller state change
 568                  * to come back online and restart the queue.
 569                  * The BUSY state means that driver is ready to
 570                  * continue normal processing but waiting for controller
 571                  * to become available.
 572                  */
 573                 skdev->state = SKD_DRVR_STATE_BUSY;
 574                 Dcmn_err(CE_NOTE, "drive BUSY state\n");
 575 
 576                 return (0);
 577         }
 578         /*
 579          * Drive just come online, driver is either in startup,
 580          * paused performing a task, or bust waiting for hardware.
 581          */
 582         switch (skdev->state) {
 583         case SKD_DRVR_STATE_PAUSED:
 584         case SKD_DRVR_STATE_BUSY:
 585         case SKD_DRVR_STATE_BUSY_IMMINENT:
 586         case SKD_DRVR_STATE_BUSY_ERASE:
 587         case SKD_DRVR_STATE_STARTING:
 588         case SKD_DRVR_STATE_RESTARTING:
 589         case SKD_DRVR_STATE_FAULT:
 590         case SKD_DRVR_STATE_IDLE:
 591         case SKD_DRVR_STATE_LOAD:
 592                 skdev->state = SKD_DRVR_STATE_ONLINE;
 593                 Dcmn_err(CE_NOTE, "%s: sTec s1120 ONLINE", skdev->name);
 594                 Dcmn_err(CE_NOTE, "%s: Starting request queue", skdev->name);
 595                 Dcmn_err(CE_NOTE,
 596                     "%s: queue depth limit=%d hard=%d soft=%d lowat=%d",
 597                     skdev->name,
 598                     skdev->queue_depth_limit,
 599                     skdev->hard_queue_depth_limit,
 600                     skdev->soft_queue_depth_limit,
 601                     skdev->queue_depth_lowat);
 602 
 603                 skdev->gendisk_on = 1;
 604                 cv_signal(&skdev->cv_waitq);
 605                 break;
 606         case SKD_DRVR_STATE_DISAPPEARED:
 607         default:
 608                 cmn_err(CE_NOTE, "**** driver state %d, not implemented \n",
 609                     skdev->state);
 610                 return (-EBUSY);
 611         }
 612 
 613         return (0);
 614 }
 615 
 616 /*
 617  * READ/WRITE REQUESTS
 618  */
 619 
 620 /*
 621  *
 622  * Name:        skd_blkdev_preop_sg_list, builds the S/G list from info
 623  *              passed in by the blkdev driver.
 624  *
 625  * Inputs:      skdev           - device state structure.
 626  *              skreq           - request structure.
 627  *              sg_byte_count   - data transfer byte count.
 628  *
 629  * Returns:     Nothing.
 630  *
 631  */
 632 /*ARGSUSED*/
 633 static void
 634 skd_blkdev_preop_sg_list(struct skd_device *skdev,
 635     struct skd_request_context *skreq, uint32_t *sg_byte_count)
 636 {
 637         bd_xfer_t               *xfer;
 638         skd_buf_private_t       *pbuf;
 639         int                     i, bcount = 0;
 640         uint_t                  n_sg;
 641 
 642         *sg_byte_count = 0;
 643 
 644         ASSERT(skreq->sg_data_dir == SKD_DATA_DIR_HOST_TO_CARD ||
 645             skreq->sg_data_dir == SKD_DATA_DIR_CARD_TO_HOST);
 646 
 647         pbuf = skreq->pbuf;
 648         ASSERT(pbuf != NULL);
 649 
 650         xfer = pbuf->x_xfer;
 651         n_sg = xfer->x_ndmac;
 652 
 653         ASSERT(n_sg <= skdev->sgs_per_request);
 654 
 655         skreq->n_sg = n_sg;
 656 
 657         skreq->io_dma_handle = xfer->x_dmah;
 658 
 659         skreq->total_sg_bcount = 0;
 660 
 661         for (i = 0; i < n_sg; i++) {
 662                 ddi_dma_cookie_t *cookiep = &xfer->x_dmac;
 663                 struct fit_sg_descriptor *sgd;
 664                 uint32_t cnt = (uint32_t)cookiep->dmac_size;
 665 
 666                 bcount += cnt;
 667 
 668                 sgd                     = &skreq->sksg_list[i];
 669                 sgd->control         = FIT_SGD_CONTROL_NOT_LAST;
 670                 sgd->byte_count              = cnt;
 671                 sgd->host_side_addr  = cookiep->dmac_laddress;
 672                 sgd->dev_side_addr   = 0; /* not used */
 673                 *sg_byte_count          += cnt;
 674 
 675                 skreq->total_sg_bcount += cnt;
 676 
 677                 if ((i + 1) != n_sg)
 678                         ddi_dma_nextcookie(skreq->io_dma_handle, &xfer->x_dmac);
 679         }
 680 
 681         skreq->sksg_list[n_sg - 1].next_desc_ptr = 0LL;
 682         skreq->sksg_list[n_sg - 1].control = FIT_SGD_CONTROL_LAST;
 683 
 684         (void) ddi_dma_sync(skreq->sksg_dma_address.dma_handle, 0, 0,
 685             DDI_DMA_SYNC_FORDEV);
 686 }
 687 
 688 /*
 689  *
 690  * Name:        skd_blkdev_postop_sg_list, deallocates DMA
 691  *
 692  * Inputs:      skdev           - device state structure.
 693  *              skreq           - skreq data structure.
 694  *
 695  * Returns:     Nothing.
 696  *
 697  */
 698 /* ARGSUSED */  /* Upstream common source with other platforms. */
 699 static void
 700 skd_blkdev_postop_sg_list(struct skd_device *skdev,
 701     struct skd_request_context *skreq)
 702 {
 703         /*
 704          * restore the next ptr for next IO request so we
 705          * don't have to set it every time.
 706          */
 707         skreq->sksg_list[skreq->n_sg - 1].next_desc_ptr =
 708             skreq->sksg_dma_address.cookies->dmac_laddress +
 709             ((skreq->n_sg) * sizeof (struct fit_sg_descriptor));
 710 }
 711 
 712 /*
 713  *
 714  * Name:        skd_start, initiates an I/O.
 715  *
 716  * Inputs:      skdev           - device state structure.
 717  *
 718  * Returns:     EAGAIN if devicfe is not ONLINE.
 719  *              On error, if the caller is the blkdev driver, return
 720  *              the error value. Otherwise, return zero.
 721  *
 722  */
 723 /* Upstream common source with other platforms. */
 724 static void
 725 skd_start(skd_device_t *skdev)
 726 {
 727         struct skd_fitmsg_context       *skmsg = NULL;
 728         struct fit_msg_hdr              *fmh = NULL;
 729         struct skd_request_context      *skreq = NULL;
 730         struct waitqueue                *waitq = &skdev->waitqueue;
 731         struct skd_scsi_request         *scsi_req;
 732         skd_buf_private_t               *pbuf = NULL;
 733         int                             bcount;
 734 
 735         uint32_t                        lba;
 736         uint32_t                        count;
 737         uint32_t                        timo_slot;
 738         void                            *cmd_ptr;
 739         uint32_t                        sg_byte_count = 0;
 740 
 741         /*
 742          * Stop conditions:
 743          *  - There are no more native requests
 744          *  - There are already the maximum number of requests is progress
 745          *  - There are no more skd_request_context entries
 746          *  - There are no more FIT msg buffers
 747          */
 748         for (;;) {
 749                 /* Are too many requests already in progress? */
 750                 if (skdev->queue_depth_busy >= skdev->queue_depth_limit) {
 751                         Dcmn_err(CE_NOTE, "qdepth %d, limit %d\n",
 752                             skdev->queue_depth_busy,
 753                             skdev->queue_depth_limit);
 754                         break;
 755                 }
 756 
 757                 WAITQ_LOCK(skdev);
 758                 if (SIMPLEQ_EMPTY(waitq)) {
 759                         WAITQ_UNLOCK(skdev);
 760                         break;
 761                 }
 762 
 763                 /* Is a skd_request_context available? */
 764                 skreq = skdev->skreq_free_list;
 765                 if (skreq == NULL) {
 766                         WAITQ_UNLOCK(skdev);
 767                         break;
 768                 }
 769 
 770                 ASSERT(skreq->state == SKD_REQ_STATE_IDLE);
 771                 ASSERT((skreq->id & SKD_ID_INCR) == 0);
 772 
 773                 skdev->skreq_free_list = skreq->next;
 774 
 775                 skreq->state = SKD_REQ_STATE_BUSY;
 776                 skreq->id += SKD_ID_INCR;
 777 
 778                 /* Start a new FIT msg if there is none in progress. */
 779                 if (skmsg == NULL) {
 780                         /* Are there any FIT msg buffers available? */
 781                         skmsg = skdev->skmsg_free_list;
 782                         if (skmsg == NULL) {
 783                                 WAITQ_UNLOCK(skdev);
 784                                 break;
 785                         }
 786 
 787                         ASSERT(skmsg->state == SKD_MSG_STATE_IDLE);
 788                         ASSERT((skmsg->id & SKD_ID_INCR) == 0);
 789 
 790                         skdev->skmsg_free_list = skmsg->next;
 791 
 792                         skmsg->state = SKD_MSG_STATE_BUSY;
 793                         skmsg->id += SKD_ID_INCR;
 794 
 795                         /* Initialize the FIT msg header */
 796                         fmh = (struct fit_msg_hdr *)skmsg->msg_buf64;
 797                         bzero(fmh, sizeof (*fmh)); /* Too expensive */
 798                         fmh->protocol_id = FIT_PROTOCOL_ID_SOFIT;
 799                         skmsg->length = sizeof (struct fit_msg_hdr);
 800                 }
 801 
 802                 /*
 803                  * At this point we are committed to either start or reject
 804                  * the native request. Note that a FIT msg may have just been
 805                  * started but contains no SoFIT requests yet.
 806                  * Now - dequeue pbuf.
 807                  */
 808                 pbuf = skd_get_queued_pbuf(skdev);
 809                 WAITQ_UNLOCK(skdev);
 810 
 811                 skreq->pbuf = pbuf;
 812                 lba = pbuf->x_xfer->x_blkno;
 813                 count = pbuf->x_xfer->x_nblks;
 814                 skreq->did_complete = 0;
 815 
 816                 skreq->fitmsg_id = skmsg->id;
 817 
 818                 Dcmn_err(CE_NOTE,
 819                     "pbuf=%p lba=%u(0x%x) count=%u(0x%x) dir=%x\n",
 820                     (void *)pbuf, lba, lba, count, count, pbuf->dir);
 821 
 822                 /*
 823                  * Transcode the request.
 824                  */
 825                 cmd_ptr = &skmsg->msg_buf[skmsg->length];
 826                 bzero(cmd_ptr, 32); /* This is too expensive */
 827 
 828                 scsi_req = cmd_ptr;
 829                 scsi_req->hdr.tag = skreq->id;
 830                 scsi_req->hdr.sg_list_dma_address =
 831                     cpu_to_be64(skreq->sksg_dma_address.cookies->dmac_laddress);
 832                 scsi_req->cdb[1] = 0;
 833                 scsi_req->cdb[2] = (lba & 0xff000000) >> 24;
 834                 scsi_req->cdb[3] = (lba & 0xff0000) >> 16;
 835                 scsi_req->cdb[4] = (lba & 0xff00) >> 8;
 836                 scsi_req->cdb[5] = (lba & 0xff);
 837                 scsi_req->cdb[6] = 0;
 838                 scsi_req->cdb[7] = (count & 0xff00) >> 8;
 839                 scsi_req->cdb[8] = count & 0xff;
 840                 scsi_req->cdb[9] = 0;
 841 
 842                 if (pbuf->dir & B_READ) {
 843                         scsi_req->cdb[0] = 0x28;
 844                         skreq->sg_data_dir = SKD_DATA_DIR_CARD_TO_HOST;
 845                 } else {
 846                         scsi_req->cdb[0] = 0x2a;
 847                         skreq->sg_data_dir = SKD_DATA_DIR_HOST_TO_CARD;
 848                 }
 849 
 850                 skd_blkdev_preop_sg_list(skdev, skreq, &sg_byte_count);
 851 
 852                 scsi_req->hdr.sg_list_len_bytes = cpu_to_be32(sg_byte_count);
 853 
 854                 bcount = (sg_byte_count + 511) / 512;
 855                 scsi_req->cdb[7] = (bcount & 0xff00) >> 8;
 856                 scsi_req->cdb[8] =  bcount & 0xff;
 857 
 858                 Dcmn_err(CE_NOTE,
 859                     "skd_start: pbuf=%p skreq->id=%x opc=%x ====>>>>>",
 860                     (void *)pbuf, skreq->id, *scsi_req->cdb);
 861 
 862                 skmsg->length += sizeof (struct skd_scsi_request);
 863                 fmh->num_protocol_cmds_coalesced++;
 864 
 865                 /*
 866                  * Update the active request counts.
 867                  * Capture the timeout timestamp.
 868                  */
 869                 skreq->timeout_stamp = skdev->timeout_stamp;
 870                 timo_slot = skreq->timeout_stamp & SKD_TIMEOUT_SLOT_MASK;
 871 
 872                 atomic_inc_32(&skdev->timeout_slot[timo_slot]);
 873                 atomic_inc_32(&skdev->queue_depth_busy);
 874 
 875                 Dcmn_err(CE_NOTE, "req=0x%x busy=%d timo_slot=%d",
 876                     skreq->id, skdev->queue_depth_busy, timo_slot);
 877                 /*
 878                  * If the FIT msg buffer is full send it.
 879                  */
 880                 if (skmsg->length >= SKD_N_FITMSG_BYTES ||
 881                     fmh->num_protocol_cmds_coalesced >= skd_max_req_per_msg) {
 882 
 883                         atomic_inc_64(&skdev->active_cmds);
 884                         pbuf->skreq = skreq;
 885 
 886                         skdev->fitmsg_sent1++;
 887                         skd_send_fitmsg(skdev, skmsg);
 888 
 889                         skmsg = NULL;
 890                         fmh = NULL;
 891                 }
 892         }
 893 
 894         /*
 895          * Is a FIT msg in progress? If it is empty put the buffer back
 896          * on the free list. If it is non-empty send what we got.
 897          * This minimizes latency when there are fewer requests than
 898          * what fits in a FIT msg.
 899          */
 900         if (skmsg != NULL) {
 901                 ASSERT(skmsg->length > sizeof (struct fit_msg_hdr));
 902                 Dcmn_err(CE_NOTE, "sending msg=%p, len %d",
 903                     (void *)skmsg, skmsg->length);
 904 
 905                 skdev->active_cmds++;
 906 
 907                 skdev->fitmsg_sent2++;
 908                 skd_send_fitmsg(skdev, skmsg);
 909         }
 910 }
 911 
 912 /*
 913  *
 914  * Name:        skd_end_request
 915  *
 916  * Inputs:      skdev           - device state structure.
 917  *              skreq           - request structure.
 918  *              error           - I/O error value.
 919  *
 920  * Returns:     Nothing.
 921  *
 922  */
 923 static void
 924 skd_end_request(struct skd_device *skdev,
 925     struct skd_request_context *skreq, int error)
 926 {
 927         skdev->ios_completed++;
 928         skd_io_done(skdev, skreq->pbuf, error, SKD_IODONE_WIOC);
 929         skreq->pbuf = NULL;
 930         skreq->did_complete = 1;
 931 }
 932 
 933 /*
 934  *
 935  * Name:        skd_end_request_abnormal
 936  *
 937  * Inputs:      skdev           - device state structure.
 938  *              pbuf            - I/O request.
 939  *              error           - I/O error value.
 940  *              mode            - debug
 941  *
 942  * Returns:     Nothing.
 943  *
 944  */
 945 static void
 946 skd_end_request_abnormal(skd_device_t *skdev, skd_buf_private_t *pbuf,
 947     int error, int mode)
 948 {
 949         skd_io_done(skdev, pbuf, error, mode);
 950 }
 951 
 952 /*
 953  *
 954  * Name:        skd_request_fn_not_online, handles the condition
 955  *              of the device not being online.
 956  *
 957  * Inputs:      skdev           - device state structure.
 958  *
 959  * Returns:     nothing (void).
 960  *
 961  */
 962 static void
 963 skd_request_fn_not_online(skd_device_t *skdev)
 964 {
 965         int error;
 966         skd_buf_private_t *pbuf;
 967 
 968         ASSERT(skdev->state != SKD_DRVR_STATE_ONLINE);
 969 
 970         skd_log_skdev(skdev, "req_not_online");
 971 
 972         switch (skdev->state) {
 973         case SKD_DRVR_STATE_PAUSING:
 974         case SKD_DRVR_STATE_PAUSED:
 975         case SKD_DRVR_STATE_STARTING:
 976         case SKD_DRVR_STATE_RESTARTING:
 977         case SKD_DRVR_STATE_WAIT_BOOT:
 978                 /*
 979                  * In case of starting, we haven't started the queue,
 980                  * so we can't get here... but requests are
 981                  * possibly hanging out waiting for us because we
 982                  * reported the dev/skd/0 already.  They'll wait
 983                  * forever if connect doesn't complete.
 984                  * What to do??? delay dev/skd/0 ??
 985                  */
 986         case SKD_DRVR_STATE_BUSY:
 987         case SKD_DRVR_STATE_BUSY_IMMINENT:
 988         case SKD_DRVR_STATE_BUSY_ERASE:
 989         case SKD_DRVR_STATE_DRAINING_TIMEOUT:
 990                 return;
 991 
 992         case SKD_DRVR_STATE_BUSY_SANITIZE:
 993         case SKD_DRVR_STATE_STOPPING:
 994         case SKD_DRVR_STATE_SYNCING:
 995         case SKD_DRVR_STATE_FAULT:
 996         case SKD_DRVR_STATE_DISAPPEARED:
 997         default:
 998                 error = -EIO;
 999                 break;
1000         }
1001 
1002         /*
1003          * If we get here, terminate all pending block requeusts
1004          * with EIO and any scsi pass thru with appropriate sense
1005          */
1006         ASSERT(WAITQ_LOCK_HELD(skdev));
1007         if (SIMPLEQ_EMPTY(&skdev->waitqueue))
1008                 return;
1009 
1010         while ((pbuf = skd_get_queued_pbuf(skdev)))
1011                 skd_end_request_abnormal(skdev, pbuf, error, SKD_IODONE_WNIOC);
1012 
1013         cv_signal(&skdev->cv_waitq);
1014 }
1015 
1016 /*
1017  * TIMER
1018  */
1019 
1020 static void skd_timer_tick_not_online(struct skd_device *skdev);
1021 
1022 /*
1023  *
1024  * Name:        skd_timer_tick, monitors requests for timeouts.
1025  *
1026  * Inputs:      skdev           - device state structure.
1027  *
1028  * Returns:     Nothing.
1029  *
1030  */
1031 static void
1032 skd_timer_tick(skd_device_t *skdev)
1033 {
1034         uint32_t timo_slot;
1035 
1036         skdev->timer_active = 1;
1037 
1038         if (skdev->state != SKD_DRVR_STATE_ONLINE) {
1039                 skd_timer_tick_not_online(skdev);
1040                 goto timer_func_out;
1041         }
1042 
1043         skdev->timeout_stamp++;
1044         timo_slot = skdev->timeout_stamp & SKD_TIMEOUT_SLOT_MASK;
1045 
1046         /*
1047          * All requests that happened during the previous use of
1048          * this slot should be done by now. The previous use was
1049          * over 7 seconds ago.
1050          */
1051         if (skdev->timeout_slot[timo_slot] == 0) {
1052                 goto timer_func_out;
1053         }
1054 
1055         /* Something is overdue */
1056         Dcmn_err(CE_NOTE, "found %d timeouts, draining busy=%d",
1057             skdev->timeout_slot[timo_slot],
1058             skdev->queue_depth_busy);
1059         skdev->timer_countdown = SKD_TIMER_SECONDS(3);
1060         skdev->state = SKD_DRVR_STATE_DRAINING_TIMEOUT;
1061         skdev->timo_slot = timo_slot;
1062 
1063 timer_func_out:
1064         skdev->timer_active = 0;
1065 }
1066 
1067 /*
1068  *
1069  * Name:        skd_timer_tick_not_online, handles various device
1070  *              state transitions.
1071  *
1072  * Inputs:      skdev           - device state structure.
1073  *
1074  * Returns:     Nothing.
1075  *
1076  */
1077 static void
1078 skd_timer_tick_not_online(struct skd_device *skdev)
1079 {
1080         Dcmn_err(CE_NOTE, "skd_skd_timer_tick_not_online: state=%d tmo=%d",
1081             skdev->state, skdev->timer_countdown);
1082 
1083         ASSERT(skdev->state != SKD_DRVR_STATE_ONLINE);
1084 
1085         switch (skdev->state) {
1086         case SKD_DRVR_STATE_IDLE:
1087         case SKD_DRVR_STATE_LOAD:
1088                 break;
1089         case SKD_DRVR_STATE_BUSY_SANITIZE:
1090                 cmn_err(CE_WARN, "!drive busy sanitize[%x], driver[%x]\n",
1091                     skdev->drive_state, skdev->state);
1092                 break;
1093 
1094         case SKD_DRVR_STATE_BUSY:
1095         case SKD_DRVR_STATE_BUSY_IMMINENT:
1096         case SKD_DRVR_STATE_BUSY_ERASE:
1097                 Dcmn_err(CE_NOTE, "busy[%x], countdown=%d\n",
1098                     skdev->state, skdev->timer_countdown);
1099                 if (skdev->timer_countdown > 0) {
1100                         skdev->timer_countdown--;
1101                         return;
1102                 }
1103                 cmn_err(CE_WARN, "!busy[%x], timedout=%d, restarting device.",
1104                     skdev->state, skdev->timer_countdown);
1105                 skd_restart_device(skdev);
1106                 break;
1107 
1108         case SKD_DRVR_STATE_WAIT_BOOT:
1109         case SKD_DRVR_STATE_STARTING:
1110                 if (skdev->timer_countdown > 0) {
1111                         skdev->timer_countdown--;
1112                         return;
1113                 }
1114                 /*
1115                  * For now, we fault the drive.  Could attempt resets to
1116                  * revcover at some point.
1117                  */
1118                 skdev->state = SKD_DRVR_STATE_FAULT;
1119 
1120                 cmn_err(CE_WARN, "!(%s): DriveFault Connect Timeout (%x)",
1121                     skd_name(skdev), skdev->drive_state);
1122 
1123                 /* start the queue so we can respond with error to requests */
1124                 skd_start(skdev);
1125 
1126                 /* wakeup anyone waiting for startup complete */
1127                 skdev->gendisk_on = -1;
1128 
1129                 cv_signal(&skdev->cv_waitq);
1130                 break;
1131 
1132 
1133         case SKD_DRVR_STATE_PAUSING:
1134         case SKD_DRVR_STATE_PAUSED:
1135                 break;
1136 
1137         case SKD_DRVR_STATE_DRAINING_TIMEOUT:
1138                 cmn_err(CE_WARN,
1139                     "!%s: draining busy [%d] tick[%d] qdb[%d] tmls[%d]\n",
1140                     skdev->name,
1141                     skdev->timo_slot,
1142                     skdev->timer_countdown,
1143                     skdev->queue_depth_busy,
1144                     skdev->timeout_slot[skdev->timo_slot]);
1145                 /* if the slot has cleared we can let the I/O continue */
1146                 if (skdev->timeout_slot[skdev->timo_slot] == 0) {
1147                         Dcmn_err(CE_NOTE, "Slot drained, starting queue.");
1148                         skdev->state = SKD_DRVR_STATE_ONLINE;
1149                         skd_start(skdev);
1150                         return;
1151                 }
1152                 if (skdev->timer_countdown > 0) {
1153                         skdev->timer_countdown--;
1154                         return;
1155                 }
1156                 skd_restart_device(skdev);
1157                 break;
1158 
1159         case SKD_DRVR_STATE_RESTARTING:
1160                 if (skdev->timer_countdown > 0) {
1161                         skdev->timer_countdown--;
1162 
1163                         return;
1164                 }
1165                 /*
1166                  * For now, we fault the drive. Could attempt resets to
1167                  * revcover at some point.
1168                  */
1169                 skdev->state = SKD_DRVR_STATE_FAULT;
1170                 cmn_err(CE_WARN, "!(%s): DriveFault Reconnect Timeout (%x)\n",
1171                     skd_name(skdev), skdev->drive_state);
1172 
1173                 /*
1174                  * Recovering does two things:
1175                  * 1. completes IO with error
1176                  * 2. reclaims dma resources
1177                  * When is it safe to recover requests?
1178                  * - if the drive state is faulted
1179                  * - if the state is still soft reset after out timeout
1180                  * - if the drive registers are dead (state = FF)
1181                  */
1182 
1183                 if ((skdev->drive_state == FIT_SR_DRIVE_SOFT_RESET) ||
1184                     (skdev->drive_state == FIT_SR_DRIVE_FAULT) ||
1185                     (skdev->drive_state == FIT_SR_DRIVE_STATE_MASK)) {
1186                         /*
1187                          * It never came out of soft reset. Try to
1188                          * recover the requests and then let them
1189                          * fail. This is to mitigate hung processes.
1190                          *
1191                          * Acquire the interrupt lock since these lists are
1192                          * manipulated by interrupt handlers.
1193                          */
1194                         ASSERT(!WAITQ_LOCK_HELD(skdev));
1195                         INTR_LOCK(skdev);
1196                         skd_recover_requests(skdev);
1197                         INTR_UNLOCK(skdev);
1198                 }
1199                 /* start the queue so we can respond with error to requests */
1200                 skd_start(skdev);
1201                 /* wakeup anyone waiting for startup complete */
1202                 skdev->gendisk_on = -1;
1203                 cv_signal(&skdev->cv_waitq);
1204                 break;
1205 
1206         case SKD_DRVR_STATE_RESUMING:
1207         case SKD_DRVR_STATE_STOPPING:
1208         case SKD_DRVR_STATE_SYNCING:
1209         case SKD_DRVR_STATE_FAULT:
1210         case SKD_DRVR_STATE_DISAPPEARED:
1211         default:
1212                 break;
1213         }
1214 }
1215 
1216 /*
1217  *
1218  * Name:        skd_timer, kicks off the timer processing.
1219  *
1220  * Inputs:      skdev           - device state structure.
1221  *
1222  * Returns:     Nothing.
1223  *
1224  */
1225 static void
1226 skd_timer(void *arg)
1227 {
1228         skd_device_t *skdev = (skd_device_t *)arg;
1229 
1230         /* Someone set us to 0, don't bother rescheduling. */
1231         ADAPTER_STATE_LOCK(skdev);
1232         if (skdev->skd_timer_timeout_id != 0) {
1233                 ADAPTER_STATE_UNLOCK(skdev);
1234                 /* Pardon the drop-and-then-acquire logic here. */
1235                 skd_timer_tick(skdev);
1236                 ADAPTER_STATE_LOCK(skdev);
1237                 /* Restart timer, if not being stopped. */
1238                 if (skdev->skd_timer_timeout_id != 0) {
1239                         skdev->skd_timer_timeout_id =
1240                             timeout(skd_timer, arg, skd_timer_ticks);
1241                 }
1242         }
1243         ADAPTER_STATE_UNLOCK(skdev);
1244 }
1245 
1246 /*
1247  *
1248  * Name:        skd_start_timer, kicks off the 1-second timer.
1249  *
1250  * Inputs:      skdev           - device state structure.
1251  *
1252  * Returns:     Zero.
1253  *
1254  */
1255 static void
1256 skd_start_timer(struct skd_device *skdev)
1257 {
1258         /* Start one second driver timer. */
1259         ADAPTER_STATE_LOCK(skdev);
1260         ASSERT(skdev->skd_timer_timeout_id == 0);
1261 
1262         /*
1263          * Do first "timeout tick" right away, but not in this
1264          * thread.
1265          */
1266         skdev->skd_timer_timeout_id = timeout(skd_timer, skdev, 1);
1267         ADAPTER_STATE_UNLOCK(skdev);
1268 }
1269 
1270 /*
1271  * INTERNAL REQUESTS -- generated by driver itself
1272  */
1273 
1274 /*
1275  *
1276  * Name:        skd_format_internal_skspcl, setups the internal
1277  *              FIT request message.
1278  *
1279  * Inputs:      skdev           - device state structure.
1280  *
1281  * Returns:     One.
1282  *
1283  */
1284 static int
1285 skd_format_internal_skspcl(struct skd_device *skdev)
1286 {
1287         struct skd_special_context *skspcl = &skdev->internal_skspcl;
1288         struct fit_sg_descriptor *sgd = &skspcl->req.sksg_list[0];
1289         struct fit_msg_hdr *fmh;
1290         uint64_t dma_address;
1291         struct skd_scsi_request *scsi;
1292 
1293         fmh = (struct fit_msg_hdr *)&skspcl->msg_buf64[0];
1294         fmh->protocol_id = FIT_PROTOCOL_ID_SOFIT;
1295         fmh->num_protocol_cmds_coalesced = 1;
1296 
1297         /* Instead of 64-bytes in, use 8-(64-bit-words) for linted alignment. */
1298         scsi = (struct skd_scsi_request *)&skspcl->msg_buf64[8];
1299         bzero(scsi, sizeof (*scsi));
1300         dma_address = skspcl->req.sksg_dma_address.cookies->_dmu._dmac_ll;
1301         scsi->hdr.sg_list_dma_address = cpu_to_be64(dma_address);
1302         sgd->control = FIT_SGD_CONTROL_LAST;
1303         sgd->byte_count = 0;
1304         sgd->host_side_addr = skspcl->db_dma_address.cookies->_dmu._dmac_ll;
1305         sgd->dev_side_addr = 0; /* not used */
1306         sgd->next_desc_ptr = 0LL;
1307 
1308         return (1);
1309 }
1310 
1311 /*
1312  *
1313  * Name:        skd_send_internal_skspcl, send internal requests to
1314  *              the hardware.
1315  *
1316  * Inputs:      skdev           - device state structure.
1317  *              skspcl          - request structure
1318  *              opcode          - just what it says
1319  *
1320  * Returns:     Nothing.
1321  *
1322  */
1323 void
1324 skd_send_internal_skspcl(struct skd_device *skdev,
1325     struct skd_special_context *skspcl, uint8_t opcode)
1326 {
1327         struct fit_sg_descriptor *sgd = &skspcl->req.sksg_list[0];
1328         struct skd_scsi_request *scsi;
1329 
1330         if (SKD_REQ_STATE_IDLE != skspcl->req.state) {
1331                 /*
1332                  * A refresh is already in progress.
1333                  * Just wait for it to finish.
1334                  */
1335                 return;
1336         }
1337 
1338         ASSERT(0 == (skspcl->req.id & SKD_ID_INCR));
1339         skspcl->req.state = SKD_REQ_STATE_BUSY;
1340         skspcl->req.id += SKD_ID_INCR;
1341 
1342         /* Instead of 64-bytes in, use 8-(64-bit-words) for linted alignment. */
1343         scsi = (struct skd_scsi_request *)&skspcl->msg_buf64[8];
1344         scsi->hdr.tag = skspcl->req.id;
1345 
1346         Dcmn_err(CE_NOTE, "internal skspcl: opcode=%x req.id=%x ==========>",
1347             opcode, skspcl->req.id);
1348 
1349         switch (opcode) {
1350         case TEST_UNIT_READY:
1351                 scsi->cdb[0] = TEST_UNIT_READY;
1352                 scsi->cdb[1] = 0x00;
1353                 scsi->cdb[2] = 0x00;
1354                 scsi->cdb[3] = 0x00;
1355                 scsi->cdb[4] = 0x00;
1356                 scsi->cdb[5] = 0x00;
1357                 sgd->byte_count = 0;
1358                 scsi->hdr.sg_list_len_bytes = 0;
1359                 break;
1360         case READ_CAPACITY_EXT:
1361                 scsi->cdb[0]  = READ_CAPACITY_EXT;
1362                 scsi->cdb[1]  = 0x10;
1363                 scsi->cdb[2]  = 0x00;
1364                 scsi->cdb[3]  = 0x00;
1365                 scsi->cdb[4]  = 0x00;
1366                 scsi->cdb[5]  = 0x00;
1367                 scsi->cdb[6]  = 0x00;
1368                 scsi->cdb[7]  = 0x00;
1369                 scsi->cdb[8]  = 0x00;
1370                 scsi->cdb[9]  = 0x00;
1371                 scsi->cdb[10] = 0x00;
1372                 scsi->cdb[11] = 0x00;
1373                 scsi->cdb[12] = 0x00;
1374                 scsi->cdb[13] = 0x20;
1375                 scsi->cdb[14] = 0x00;
1376                 scsi->cdb[15] = 0x00;
1377                 sgd->byte_count = SKD_N_READ_CAP_EXT_BYTES;
1378                 scsi->hdr.sg_list_len_bytes = cpu_to_be32(sgd->byte_count);
1379                 break;
1380         case 0x28:
1381                 (void) memset(skspcl->data_buf, 0x65, SKD_N_INTERNAL_BYTES);
1382 
1383                 scsi->cdb[0] = 0x28;
1384                 scsi->cdb[1] = 0x00;
1385                 scsi->cdb[2] = 0x00;
1386                 scsi->cdb[3] = 0x00;
1387                 scsi->cdb[4] = 0x00;
1388                 scsi->cdb[5] = 0x00;
1389                 scsi->cdb[6] = 0x00;
1390                 scsi->cdb[7] = 0x00;
1391                 scsi->cdb[8] = 0x01;
1392                 scsi->cdb[9] = 0x00;
1393                 sgd->byte_count = SKD_N_INTERNAL_BYTES;
1394                 scsi->hdr.sg_list_len_bytes = cpu_to_be32(SKD_N_INTERNAL_BYTES);
1395                 break;
1396         case INQUIRY:
1397                 scsi->cdb[0] = INQUIRY;
1398                 scsi->cdb[1] = 0x01; /* evpd */
1399                 scsi->cdb[2] = 0x80; /* serial number page */
1400                 scsi->cdb[3] = 0x00;
1401                 scsi->cdb[4] = 0x10;
1402                 scsi->cdb[5] = 0x00;
1403                 sgd->byte_count = 16; /* SKD_N_INQ_BYTES */;
1404                 scsi->hdr.sg_list_len_bytes = cpu_to_be32(sgd->byte_count);
1405                 break;
1406         case INQUIRY2:
1407                 scsi->cdb[0] = INQUIRY;
1408                 scsi->cdb[1] = 0x00;
1409                 scsi->cdb[2] = 0x00; /* serial number page */
1410                 scsi->cdb[3] = 0x00;
1411                 scsi->cdb[4] = 0x24;
1412                 scsi->cdb[5] = 0x00;
1413                 sgd->byte_count = 36; /* SKD_N_INQ_BYTES */;
1414                 scsi->hdr.sg_list_len_bytes = cpu_to_be32(sgd->byte_count);
1415                 break;
1416         case SYNCHRONIZE_CACHE:
1417                 scsi->cdb[0] = SYNCHRONIZE_CACHE;
1418                 scsi->cdb[1] = 0x00;
1419                 scsi->cdb[2] = 0x00;
1420                 scsi->cdb[3] = 0x00;
1421                 scsi->cdb[4] = 0x00;
1422                 scsi->cdb[5] = 0x00;
1423                 scsi->cdb[6] = 0x00;
1424                 scsi->cdb[7] = 0x00;
1425                 scsi->cdb[8] = 0x00;
1426                 scsi->cdb[9] = 0x00;
1427                 sgd->byte_count = 0;
1428                 scsi->hdr.sg_list_len_bytes = 0;
1429                 break;
1430         default:
1431                 ASSERT("Don't know what to send");
1432                 return;
1433 
1434         }
1435 
1436         skd_send_special_fitmsg(skdev, skspcl);
1437 }
1438 
1439 /*
1440  *
1441  * Name:        skd_refresh_device_data, sends a TUR command.
1442  *
1443  * Inputs:      skdev           - device state structure.
1444  *
1445  * Returns:     Nothing.
1446  *
1447  */
1448 static void
1449 skd_refresh_device_data(struct skd_device *skdev)
1450 {
1451         struct skd_special_context *skspcl = &skdev->internal_skspcl;
1452 
1453         Dcmn_err(CE_NOTE, "refresh_device_data: state=%d", skdev->state);
1454 
1455         skd_send_internal_skspcl(skdev, skspcl, TEST_UNIT_READY);
1456 }
1457 
1458 /*
1459  *
1460  * Name:        skd_complete_internal, handles the completion of
1461  *              driver-initiated I/O requests.
1462  *
1463  * Inputs:      skdev           - device state structure.
1464  *              skcomp          - completion structure.
1465  *              skerr           - error structure.
1466  *              skspcl          - request structure.
1467  *
1468  * Returns:     Nothing.
1469  *
1470  */
1471 /* ARGSUSED */  /* Upstream common source with other platforms. */
1472 static void
1473 skd_complete_internal(struct skd_device *skdev,
1474     volatile struct fit_completion_entry_v1 *skcomp,
1475     volatile struct fit_comp_error_info *skerr,
1476     struct skd_special_context *skspcl)
1477 {
1478         uint8_t *buf = skspcl->data_buf;
1479         uint8_t status = 2;
1480         /* Instead of 64-bytes in, use 8-(64-bit-words) for linted alignment. */
1481         struct skd_scsi_request *scsi =
1482             (struct skd_scsi_request *)&skspcl->msg_buf64[8];
1483 
1484         ASSERT(skspcl == &skdev->internal_skspcl);
1485 
1486         (void) ddi_dma_sync(skspcl->db_dma_address.dma_handle, 0, 0,
1487             DDI_DMA_SYNC_FORKERNEL);
1488         (void) ddi_dma_sync(skspcl->mb_dma_address.dma_handle, 0, 0,
1489             DDI_DMA_SYNC_FORKERNEL);
1490 
1491         Dcmn_err(CE_NOTE, "complete internal %x", scsi->cdb[0]);
1492 
1493         skspcl->req.completion = *skcomp;
1494         skspcl->req.state = SKD_REQ_STATE_IDLE;
1495         skspcl->req.id += SKD_ID_INCR;
1496 
1497         status = skspcl->req.completion.status;
1498 
1499         Dcmn_err(CE_NOTE, "<<<<====== complete_internal: opc=%x", *scsi->cdb);
1500 
1501         switch (scsi->cdb[0]) {
1502         case TEST_UNIT_READY:
1503                 if (SAM_STAT_GOOD == status) {
1504                         skd_send_internal_skspcl(skdev, skspcl,
1505                             READ_CAPACITY_EXT);
1506                 } else {
1507                         if (skdev->state == SKD_DRVR_STATE_STOPPING) {
1508                                 cmn_err(CE_WARN,
1509                                     "!%s: TUR failed, don't send anymore"
1510                                     "state 0x%x", skdev->name, skdev->state);
1511 
1512                                 return;
1513                         }
1514 
1515                         Dcmn_err(CE_NOTE, "%s: TUR failed, retry skerr",
1516                             skdev->name);
1517                         skd_send_internal_skspcl(skdev, skspcl, 0x00);
1518                 }
1519                 break;
1520         case READ_CAPACITY_EXT: {
1521                 uint64_t cap, Nblocks;
1522                 uint64_t xbuf[1];
1523 
1524                 skdev->read_cap_is_valid = 0;
1525                 if (SAM_STAT_GOOD == status) {
1526                         bcopy(buf, xbuf, 8);
1527                         cap = be64_to_cpu(*xbuf);
1528                         skdev->read_cap_last_lba = cap;
1529                         skdev->read_cap_blocksize =
1530                             (buf[8] << 24) | (buf[9] << 16) |
1531                             (buf[10] << 8) | buf[11];
1532 
1533                         cap *= skdev->read_cap_blocksize;
1534                         Dcmn_err(CE_NOTE, "  Last LBA: %" PRIu64 " (0x%" PRIx64
1535                             "), blk sz: %d, Capacity: %" PRIu64 "GB\n",
1536                             skdev->read_cap_last_lba,
1537                             skdev->read_cap_last_lba,
1538                             skdev->read_cap_blocksize,
1539                             cap >> 30ULL);
1540 
1541                         Nblocks = skdev->read_cap_last_lba + 1;
1542 
1543                         skdev->Nblocks = Nblocks;
1544                         skdev->read_cap_is_valid = 1;
1545 
1546                         skd_send_internal_skspcl(skdev, skspcl, INQUIRY2);
1547 
1548                 } else {
1549                         Dcmn_err(CE_NOTE, "**** READCAP failed, retry TUR");
1550                         skd_send_internal_skspcl(skdev, skspcl,
1551                             TEST_UNIT_READY);
1552                 }
1553                 break;
1554         }
1555         case INQUIRY:
1556                 skdev->inquiry_is_valid = 0;
1557                 if (SAM_STAT_GOOD == status) {
1558                         skdev->inquiry_is_valid = 1;
1559 
1560                         if (scsi->cdb[1] == 0x1) {
1561                                 bcopy(&buf[4], skdev->inq_serial_num, 12);
1562                                 skdev->inq_serial_num[12] = '\0';
1563                         } else {
1564                                 char *tmp = skdev->inq_vendor_id;
1565 
1566                                 bcopy(&buf[8], tmp, 8);
1567                                 tmp[8] = '\0';
1568 
1569                                 tmp = skdev->inq_product_id;
1570                                 bcopy(&buf[16], tmp, 16);
1571                                 tmp[16] = '\0';
1572 
1573                                 tmp = skdev->inq_product_rev;
1574                                 bcopy(&buf[32], tmp, 4);
1575                                 tmp[4] = '\0';
1576                         }
1577                 }
1578 
1579                 if (skdev->state != SKD_DRVR_STATE_ONLINE)
1580                         if (skd_unquiesce_dev(skdev) < 0)
1581                                 cmn_err(CE_NOTE, "** failed, to ONLINE device");
1582                 break;
1583         case SYNCHRONIZE_CACHE:
1584                 skdev->sync_done = (SAM_STAT_GOOD == status) ? 1 : -1;
1585 
1586                 cv_signal(&skdev->cv_waitq);
1587                 break;
1588 
1589         default:
1590                 ASSERT("we didn't send this");
1591         }
1592 }
1593 
1594 /*
1595  * FIT MESSAGES
1596  */
1597 
1598 /*
1599  *
1600  * Name:        skd_send_fitmsg, send a FIT message to the hardware.
1601  *
1602  * Inputs:      skdev           - device state structure.
1603  *              skmsg           - FIT message structure.
1604  *
1605  * Returns:     Nothing.
1606  *
1607  */
1608 /* ARGSUSED */  /* Upstream common source with other platforms. */
1609 static void
1610 skd_send_fitmsg(struct skd_device *skdev,
1611     struct skd_fitmsg_context *skmsg)
1612 {
1613         uint64_t qcmd;
1614         struct fit_msg_hdr *fmh;
1615 
1616         Dcmn_err(CE_NOTE, "msgbuf's DMA addr: 0x%" PRIx64 ", qdepth_busy=%d",
1617             skmsg->mb_dma_address.cookies->dmac_laddress,
1618             skdev->queue_depth_busy);
1619 
1620         Dcmn_err(CE_NOTE, "msg_buf 0x%p, offset %x", (void *)skmsg->msg_buf,
1621             skmsg->offset);
1622 
1623         qcmd = skmsg->mb_dma_address.cookies->dmac_laddress;
1624         qcmd |= FIT_QCMD_QID_NORMAL;
1625 
1626         fmh = (struct fit_msg_hdr *)skmsg->msg_buf64;
1627         skmsg->outstanding = fmh->num_protocol_cmds_coalesced;
1628 
1629         if (skdev->dbg_level > 1) {
1630                 uint8_t *bp = skmsg->msg_buf;
1631                 int i;
1632 
1633                 for (i = 0; i < skmsg->length; i += 8) {
1634                         Dcmn_err(CE_NOTE, "  msg[%2d] %02x %02x %02x %02x "
1635                             "%02x %02x %02x %02x",
1636                             i, bp[i + 0], bp[i + 1], bp[i + 2],
1637                             bp[i + 3], bp[i + 4], bp[i + 5],
1638                             bp[i + 6], bp[i + 7]);
1639                         if (i == 0) i = 64 - 8;
1640                 }
1641         }
1642 
1643         (void) ddi_dma_sync(skmsg->mb_dma_address.dma_handle, 0, 0,
1644             DDI_DMA_SYNC_FORDEV);
1645 
1646         ASSERT(skmsg->length > sizeof (struct fit_msg_hdr));
1647         if (skmsg->length > 256) {
1648                 qcmd |= FIT_QCMD_MSGSIZE_512;
1649         } else if (skmsg->length > 128) {
1650                 qcmd |= FIT_QCMD_MSGSIZE_256;
1651         } else if (skmsg->length > 64) {
1652                 qcmd |= FIT_QCMD_MSGSIZE_128;
1653         }
1654 
1655         skdev->ios_started++;
1656 
1657         SKD_WRITEQ(skdev, qcmd, FIT_Q_COMMAND);
1658 }
1659 
1660 /*
1661  *
1662  * Name:        skd_send_special_fitmsg, send a special FIT message
1663  *              to the hardware used driver-originated I/O requests.
1664  *
1665  * Inputs:      skdev           - device state structure.
1666  *              skspcl          - skspcl structure.
1667  *
1668  * Returns:     Nothing.
1669  *
1670  */
1671 static void
1672 skd_send_special_fitmsg(struct skd_device *skdev,
1673     struct skd_special_context *skspcl)
1674 {
1675         uint64_t qcmd;
1676 
1677         Dcmn_err(CE_NOTE, "send_special_fitmsg: pt 1");
1678 
1679         if (skdev->dbg_level > 1) {
1680                 uint8_t *bp = skspcl->msg_buf;
1681                 int i;
1682 
1683                 for (i = 0; i < SKD_N_SPECIAL_FITMSG_BYTES; i += 8) {
1684                         cmn_err(CE_NOTE,
1685                             "  spcl[%2d] %02x %02x %02x %02x  "
1686                             "%02x %02x %02x %02x\n", i,
1687                             bp[i + 0], bp[i + 1], bp[i + 2], bp[i + 3],
1688                             bp[i + 4], bp[i + 5], bp[i + 6], bp[i + 7]);
1689                         if (i == 0) i = 64 - 8;
1690                 }
1691 
1692                 for (i = 0; i < skspcl->req.n_sg; i++) {
1693                         struct fit_sg_descriptor *sgd =
1694                             &skspcl->req.sksg_list[i];
1695 
1696                         cmn_err(CE_NOTE, "  sg[%d] count=%u ctrl=0x%x "
1697                             "addr=0x%" PRIx64 " next=0x%" PRIx64,
1698                             i, sgd->byte_count, sgd->control,
1699                             sgd->host_side_addr, sgd->next_desc_ptr);
1700                 }
1701         }
1702 
1703         (void) ddi_dma_sync(skspcl->mb_dma_address.dma_handle, 0, 0,
1704             DDI_DMA_SYNC_FORDEV);
1705         (void) ddi_dma_sync(skspcl->db_dma_address.dma_handle, 0, 0,
1706             DDI_DMA_SYNC_FORDEV);
1707 
1708         /*
1709          * Special FIT msgs are always 128 bytes: a 64-byte FIT hdr
1710          * and one 64-byte SSDI command.
1711          */
1712         qcmd = skspcl->mb_dma_address.cookies->dmac_laddress;
1713 
1714         qcmd |= FIT_QCMD_QID_NORMAL + FIT_QCMD_MSGSIZE_128;
1715 
1716         SKD_WRITEQ(skdev, qcmd, FIT_Q_COMMAND);
1717 }
1718 
1719 /*
1720  * COMPLETION QUEUE
1721  */
1722 
1723 static void skd_complete_other(struct skd_device *skdev,
1724     volatile struct fit_completion_entry_v1 *skcomp,
1725     volatile struct fit_comp_error_info *skerr);
1726 
1727 struct sns_info {
1728         uint8_t type;
1729         uint8_t stat;
1730         uint8_t key;
1731         uint8_t asc;
1732         uint8_t ascq;
1733         uint8_t mask;
1734         enum skd_check_status_action action;
1735 };
1736 
1737 static struct sns_info skd_chkstat_table[] = {
1738         /* Good */
1739         {0x70, 0x02, RECOVERED_ERROR, 0, 0, 0x1c, SKD_CHECK_STATUS_REPORT_GOOD},
1740 
1741         /* Smart alerts */
1742         {0x70, 0x02, NO_SENSE, 0x0B, 0x00, 0x1E, /* warnings */
1743             SKD_CHECK_STATUS_REPORT_SMART_ALERT},
1744         {0x70, 0x02, NO_SENSE, 0x5D, 0x00, 0x1E, /* thresholds */
1745             SKD_CHECK_STATUS_REPORT_SMART_ALERT},
1746         {0x70, 0x02, RECOVERED_ERROR, 0x0B, 0x01, 0x1F, /* temp over trigger */
1747             SKD_CHECK_STATUS_REPORT_SMART_ALERT},
1748 
1749         /* Retry (with limits) */
1750         {0x70, 0x02, ABORTED_COMMAND, 0, 0, 0x1C, /* DMA errors */
1751             SKD_CHECK_STATUS_REQUEUE_REQUEST},
1752         {0x70, 0x02, UNIT_ATTENTION, 0x0B, 0x00, 0x1E, /* warnings */
1753             SKD_CHECK_STATUS_REQUEUE_REQUEST},
1754         {0x70, 0x02, UNIT_ATTENTION, 0x5D, 0x00, 0x1E, /* thresholds */
1755             SKD_CHECK_STATUS_REQUEUE_REQUEST},
1756         {0x70, 0x02, UNIT_ATTENTION, 0x80, 0x30, 0x1F, /* backup power */
1757             SKD_CHECK_STATUS_REQUEUE_REQUEST},
1758 
1759         /* Busy (or about to be) */
1760         {0x70, 0x02, UNIT_ATTENTION, 0x3f, 0x01, 0x1F, /* fw changed */
1761             SKD_CHECK_STATUS_BUSY_IMMINENT},
1762 };
1763 
1764 /*
1765  *
1766  * Name:        skd_check_status, checks the return status from a
1767  *              completed I/O request.
1768  *
1769  * Inputs:      skdev           - device state structure.
1770  *              cmp_status      - SCSI status byte.
1771  *              skerr           - the error data structure.
1772  *
1773  * Returns:     Depending on the error condition, return the action
1774  *              to be taken as specified in the skd_chkstat_table.
1775  *              If no corresponding value is found in the table
1776  *              return SKD_CHECK_STATUS_REPORT_GOOD is no error otherwise
1777  *              return SKD_CHECK_STATUS_REPORT_ERROR.
1778  *
1779  */
1780 static enum skd_check_status_action
1781 skd_check_status(struct skd_device *skdev, uint8_t cmp_status,
1782     volatile struct fit_comp_error_info *skerr)
1783 {
1784         /*
1785          * Look up status and sense data to decide how to handle the error
1786          * from the device.
1787          * mask says which fields must match e.g., mask=0x18 means check
1788          * type and stat, ignore key, asc, ascq.
1789          */
1790         int i, n;
1791 
1792         Dcmn_err(CE_NOTE, "(%s): key/asc/ascq %02x/%02x/%02x",
1793             skd_name(skdev), skerr->key, skerr->code, skerr->qual);
1794 
1795         Dcmn_err(CE_NOTE, "stat: t=%02x stat=%02x k=%02x c=%02x q=%02x",
1796             skerr->type, cmp_status, skerr->key, skerr->code, skerr->qual);
1797 
1798         /* Does the info match an entry in the good category? */
1799         n = sizeof (skd_chkstat_table) / sizeof (skd_chkstat_table[0]);
1800         for (i = 0; i < n; i++) {
1801                 struct sns_info *sns = &skd_chkstat_table[i];
1802 
1803                 if (sns->mask & 0x10)
1804                         if (skerr->type != sns->type) continue;
1805 
1806                 if (sns->mask & 0x08)
1807                         if (cmp_status != sns->stat) continue;
1808 
1809                 if (sns->mask & 0x04)
1810                         if (skerr->key != sns->key) continue;
1811 
1812                 if (sns->mask & 0x02)
1813                         if (skerr->code != sns->asc) continue;
1814 
1815                 if (sns->mask & 0x01)
1816                         if (skerr->qual != sns->ascq) continue;
1817 
1818                 if (sns->action == SKD_CHECK_STATUS_REPORT_SMART_ALERT) {
1819                         cmn_err(CE_WARN, "!(%s):SMART Alert: sense key/asc/ascq"
1820                             " %02x/%02x/%02x",
1821                             skd_name(skdev), skerr->key,
1822                             skerr->code, skerr->qual);
1823                 }
1824 
1825                 Dcmn_err(CE_NOTE, "skd_check_status: returning %x",
1826                     sns->action);
1827 
1828                 return (sns->action);
1829         }
1830 
1831         /*
1832          * No other match, so nonzero status means error,
1833          * zero status means good
1834          */
1835         if (cmp_status) {
1836                 cmn_err(CE_WARN,
1837                     "!%s: status check: qdepth=%d skmfl=%p (%d) skrfl=%p (%d)",
1838                     skdev->name,
1839                     skdev->queue_depth_busy,
1840                     (void *)skdev->skmsg_free_list, skd_list_skmsg(skdev, 0),
1841                     (void *)skdev->skreq_free_list, skd_list_skreq(skdev, 0));
1842 
1843                 cmn_err(CE_WARN, "!%s: t=%02x stat=%02x k=%02x c=%02x q=%02x",
1844                     skdev->name, skerr->type, cmp_status, skerr->key,
1845                     skerr->code, skerr->qual);
1846 
1847                 return (SKD_CHECK_STATUS_REPORT_ERROR);
1848         }
1849 
1850         Dcmn_err(CE_NOTE, "status check good default");
1851 
1852         return (SKD_CHECK_STATUS_REPORT_GOOD);
1853 }
1854 
1855 /*
1856  *
1857  * Name:        skd_isr_completion_posted, handles I/O completions.
1858  *
1859  * Inputs:      skdev           - device state structure.
1860  *
1861  * Returns:     Nothing.
1862  *
1863  */
1864 static void
1865 skd_isr_completion_posted(struct skd_device *skdev)
1866 {
1867         volatile struct fit_completion_entry_v1 *skcmp = NULL;
1868         volatile struct fit_comp_error_info  *skerr;
1869         struct skd_fitmsg_context       *skmsg;
1870         struct skd_request_context      *skreq;
1871         skd_buf_private_t               *pbuf;
1872         uint16_t req_id;
1873         uint32_t req_slot;
1874         uint32_t timo_slot;
1875         uint32_t msg_slot;
1876         uint16_t cmp_cntxt = 0;
1877         uint8_t cmp_status = 0;
1878         uint8_t cmp_cycle = 0;
1879         uint32_t cmp_bytes = 0;
1880 
1881         (void) ddi_dma_sync(skdev->cq_dma_address.dma_handle, 0, 0,
1882             DDI_DMA_SYNC_FORKERNEL);
1883 
1884         for (;;) {
1885                 ASSERT(skdev->skcomp_ix < SKD_N_COMPLETION_ENTRY);
1886 
1887                 WAITQ_LOCK(skdev);
1888 
1889                 skcmp = &skdev->skcomp_table[skdev->skcomp_ix];
1890                 cmp_cycle = skcmp->cycle;
1891                 cmp_cntxt = skcmp->tag;
1892                 cmp_status = skcmp->status;
1893                 cmp_bytes = be32_to_cpu(skcmp->num_returned_bytes);
1894 
1895                 skerr = &skdev->skerr_table[skdev->skcomp_ix];
1896 
1897                 Dcmn_err(CE_NOTE,
1898                     "cycle=%d ix=%d got cycle=%d cmdctxt=0x%x stat=%d "
1899                     "qdepth_busy=%d rbytes=0x%x proto=%d",
1900                     skdev->skcomp_cycle, skdev->skcomp_ix,
1901                     cmp_cycle, cmp_cntxt, cmp_status,
1902                     skdev->queue_depth_busy, cmp_bytes, skdev->proto_ver);
1903 
1904                 if (cmp_cycle != skdev->skcomp_cycle) {
1905                         Dcmn_err(CE_NOTE, "%s:end of completions", skdev->name);
1906 
1907                         WAITQ_UNLOCK(skdev);
1908                         break;
1909                 }
1910 
1911 
1912                 skdev->n_req++;
1913 
1914                 /*
1915                  * Update the completion queue head index and possibly
1916                  * the completion cycle count.
1917                  */
1918                 skdev->skcomp_ix++;
1919                 if (skdev->skcomp_ix >= SKD_N_COMPLETION_ENTRY) {
1920                         skdev->skcomp_ix = 0;
1921                         skdev->skcomp_cycle++; /* 8-bit wrap-around */
1922                 }
1923 
1924 
1925                 /*
1926                  * The command context is a unique 32-bit ID. The low order
1927                  * bits help locate the request. The request is usually a
1928                  * r/w request (see skd_start() above) or a special request.
1929                  */
1930                 req_id   = cmp_cntxt;
1931                 req_slot = req_id & SKD_ID_SLOT_AND_TABLE_MASK;
1932 
1933                 Dcmn_err(CE_NOTE,
1934                     "<<<< completion_posted 1: req_id=%x req_slot=%x",
1935                     req_id, req_slot);
1936 
1937                 /* Is this other than a r/w request? */
1938                 if (req_slot >= skdev->num_req_context) {
1939                         /*
1940                          * This is not a completion for a r/w request.
1941                          */
1942                         skd_complete_other(skdev, skcmp, skerr);
1943                         WAITQ_UNLOCK(skdev);
1944                         continue;
1945                 }
1946 
1947                 skreq    = &skdev->skreq_table[req_slot];
1948 
1949                 /*
1950                  * Make sure the request ID for the slot matches.
1951                  */
1952                 ASSERT(skreq->id == req_id);
1953 
1954                 if (SKD_REQ_STATE_ABORTED == skreq->state) {
1955                         Dcmn_err(CE_NOTE, "reclaim req %p id=%04x\n",
1956                             (void *)skreq, skreq->id);
1957                         /*
1958                          * a previously timed out command can
1959                          * now be cleaned up
1960                          */
1961                         msg_slot = skreq->fitmsg_id & SKD_ID_SLOT_MASK;
1962                         ASSERT(msg_slot < skdev->num_fitmsg_context);
1963                         skmsg = &skdev->skmsg_table[msg_slot];
1964                         if (skmsg->id == skreq->fitmsg_id) {
1965                                 ASSERT(skmsg->outstanding > 0);
1966                                 skmsg->outstanding--;
1967                                 if (skmsg->outstanding == 0) {
1968                                         ASSERT(SKD_MSG_STATE_BUSY ==
1969                                             skmsg->state);
1970                                         skmsg->state = SKD_MSG_STATE_IDLE;
1971                                         skmsg->id += SKD_ID_INCR;
1972                                         skmsg->next = skdev->skmsg_free_list;
1973                                         skdev->skmsg_free_list = skmsg;
1974                                 }
1975                         }
1976                         /*
1977                          * Reclaim the skd_request_context
1978                          */
1979                         skreq->state = SKD_REQ_STATE_IDLE;
1980                         skreq->id += SKD_ID_INCR;
1981                         skreq->next = skdev->skreq_free_list;
1982                         skdev->skreq_free_list = skreq;
1983                         WAITQ_UNLOCK(skdev);
1984                         continue;
1985                 }
1986 
1987                 skreq->completion.status = cmp_status;
1988 
1989                 pbuf = skreq->pbuf;
1990                 ASSERT(pbuf != NULL);
1991 
1992                 Dcmn_err(CE_NOTE, "<<<< completion_posted 2: pbuf=%p "
1993                     "req_id=%x req_slot=%x", (void *)pbuf, req_id, req_slot);
1994                 if (cmp_status && skdev->disks_initialized) {
1995                         cmn_err(CE_WARN, "!%s: "
1996                             "I/O err: pbuf=%p blkno=%lld (%llx) nbklks=%ld ",
1997                             skdev->name, (void *)pbuf, pbuf->x_xfer->x_blkno,
1998                             pbuf->x_xfer->x_blkno, pbuf->x_xfer->x_nblks);
1999                 }
2000 
2001                 ASSERT(skdev->active_cmds);
2002                 atomic_dec_64(&skdev->active_cmds);
2003 
2004                 if (SAM_STAT_GOOD == cmp_status) {
2005                         /* Release DMA resources for the request. */
2006                         if (pbuf->x_xfer->x_nblks != 0)
2007                                         skd_blkdev_postop_sg_list(skdev, skreq);
2008                         WAITQ_UNLOCK(skdev);
2009                         skd_end_request(skdev, skreq, 0);
2010                         WAITQ_LOCK(skdev);
2011                 } else {
2012                         switch (skd_check_status(skdev, cmp_status, skerr)) {
2013                         case SKD_CHECK_STATUS_REPORT_GOOD:
2014                         case SKD_CHECK_STATUS_REPORT_SMART_ALERT:
2015                                 WAITQ_UNLOCK(skdev);
2016                                 skd_end_request(skdev, skreq, 0);
2017                                 WAITQ_LOCK(skdev);
2018                                 break;
2019 
2020                         case SKD_CHECK_STATUS_BUSY_IMMINENT:
2021                                 skd_log_skreq(skdev, skreq, "retry(busy)");
2022                                 skd_queue(skdev, pbuf);
2023                                 skdev->state = SKD_DRVR_STATE_BUSY_IMMINENT;
2024                                 skdev->timer_countdown = SKD_TIMER_MINUTES(20);
2025 
2026                                 (void) skd_quiesce_dev(skdev);
2027                                 break;
2028 
2029                                 /* FALLTHRU */
2030                         case SKD_CHECK_STATUS_REPORT_ERROR:
2031                                 /* fall thru to report error */
2032                         default:
2033                                 /*
2034                                  * Save the entire completion
2035                                  * and error entries for
2036                                  * later error interpretation.
2037                                  */
2038                                 skreq->completion = *skcmp;
2039                                 skreq->err_info = *skerr;
2040                                 WAITQ_UNLOCK(skdev);
2041                                 skd_end_request(skdev, skreq, -EIO);
2042                                 WAITQ_LOCK(skdev);
2043                                 break;
2044                         }
2045                 }
2046 
2047                 /*
2048                  * Reclaim the FIT msg buffer if this is
2049                  * the first of the requests it carried to
2050                  * be completed. The FIT msg buffer used to
2051                  * send this request cannot be reused until
2052                  * we are sure the s1120 card has copied
2053                  * it to its memory. The FIT msg might have
2054                  * contained several requests. As soon as
2055                  * any of them are completed we know that
2056                  * the entire FIT msg was transferred.
2057                  * Only the first completed request will
2058                  * match the FIT msg buffer id. The FIT
2059                  * msg buffer id is immediately updated.
2060                  * When subsequent requests complete the FIT
2061                  * msg buffer id won't match, so we know
2062                  * quite cheaply that it is already done.
2063                  */
2064                 msg_slot = skreq->fitmsg_id & SKD_ID_SLOT_MASK;
2065 
2066                 ASSERT(msg_slot < skdev->num_fitmsg_context);
2067                 skmsg = &skdev->skmsg_table[msg_slot];
2068                 if (skmsg->id == skreq->fitmsg_id) {
2069                         ASSERT(SKD_MSG_STATE_BUSY == skmsg->state);
2070                         skmsg->state = SKD_MSG_STATE_IDLE;
2071                         skmsg->id += SKD_ID_INCR;
2072                         skmsg->next = skdev->skmsg_free_list;
2073                         skdev->skmsg_free_list = skmsg;
2074                 }
2075 
2076                 /*
2077                  * Decrease the number of active requests.
2078                  * This also decrements the count in the
2079                  * timeout slot.
2080                  */
2081                 timo_slot = skreq->timeout_stamp & SKD_TIMEOUT_SLOT_MASK;
2082                 ASSERT(skdev->timeout_slot[timo_slot] > 0);
2083                 ASSERT(skdev->queue_depth_busy > 0);
2084 
2085                 atomic_dec_32(&skdev->timeout_slot[timo_slot]);
2086                 atomic_dec_32(&skdev->queue_depth_busy);
2087 
2088                 /*
2089                  * Reclaim the skd_request_context
2090                  */
2091                 skreq->state = SKD_REQ_STATE_IDLE;
2092                 skreq->id += SKD_ID_INCR;
2093                 skreq->next = skdev->skreq_free_list;
2094                 skdev->skreq_free_list = skreq;
2095 
2096                 WAITQ_UNLOCK(skdev);
2097 
2098                 /*
2099                  * make sure the lock is held by caller.
2100                  */
2101                 if ((skdev->state == SKD_DRVR_STATE_PAUSING) &&
2102                     (0 == skdev->queue_depth_busy)) {
2103                         skdev->state = SKD_DRVR_STATE_PAUSED;
2104                         cv_signal(&skdev->cv_waitq);
2105                 }
2106         } /* for(;;) */
2107 }
2108 
2109 /*
2110  *
2111  * Name:        skd_complete_other, handle the completion of a
2112  *              non-r/w request.
2113  *
2114  * Inputs:      skdev           - device state structure.
2115  *              skcomp          - FIT completion structure.
2116  *              skerr           - error structure.
2117  *
2118  * Returns:     Nothing.
2119  *
2120  */
2121 static void
2122 skd_complete_other(struct skd_device *skdev,
2123     volatile struct fit_completion_entry_v1 *skcomp,
2124     volatile struct fit_comp_error_info *skerr)
2125 {
2126         uint32_t req_id = 0;
2127         uint32_t req_table;
2128         uint32_t req_slot;
2129         struct skd_special_context *skspcl;
2130 
2131         req_id = skcomp->tag;
2132         req_table = req_id & SKD_ID_TABLE_MASK;
2133         req_slot = req_id & SKD_ID_SLOT_MASK;
2134 
2135         Dcmn_err(CE_NOTE, "complete_other: table=0x%x id=0x%x slot=%d",
2136             req_table, req_id, req_slot);
2137 
2138         /*
2139          * Based on the request id, determine how to dispatch this completion.
2140          * This swich/case is finding the good cases and forwarding the
2141          * completion entry. Errors are reported below the switch.
2142          */
2143         ASSERT(req_table == SKD_ID_INTERNAL);
2144         ASSERT(req_slot == 0);
2145 
2146         skspcl = &skdev->internal_skspcl;
2147         ASSERT(skspcl->req.id == req_id);
2148         ASSERT(skspcl->req.state == SKD_REQ_STATE_BUSY);
2149 
2150         Dcmn_err(CE_NOTE, "<<<<== complete_other: ID_INTERNAL");
2151         skd_complete_internal(skdev, skcomp, skerr, skspcl);
2152 }
2153 
2154 /*
2155  *
2156  * Name:        skd_reset_skcomp, does what it says, resetting completion
2157  *              tables.
2158  *
2159  * Inputs:      skdev           - device state structure.
2160  *
2161  * Returns:     Nothing.
2162  *
2163  */
2164 static void
2165 skd_reset_skcomp(struct skd_device *skdev)
2166 {
2167         uint32_t nbytes;
2168 
2169         nbytes =  sizeof (struct fit_completion_entry_v1) *
2170             SKD_N_COMPLETION_ENTRY;
2171         nbytes += sizeof (struct fit_comp_error_info) * SKD_N_COMPLETION_ENTRY;
2172 
2173         if (skdev->skcomp_table)
2174                 bzero(skdev->skcomp_table, nbytes);
2175 
2176         skdev->skcomp_ix = 0;
2177         skdev->skcomp_cycle = 1;
2178 }
2179 
2180 
2181 
2182 /*
2183  * INTERRUPTS
2184  */
2185 
2186 /*
2187  *
2188  * Name:        skd_isr_aif, handles the device interrupts.
2189  *
2190  * Inputs:      arg             - skdev device state structure.
2191  *              intvec          - not referenced
2192  *
2193  * Returns:     DDI_INTR_CLAIMED if interrupt is handled otherwise
2194  *              return DDI_INTR_UNCLAIMED.
2195  *
2196  */
2197 /* ARGSUSED */  /* Upstream common source with other platforms. */
2198 static uint_t
2199 skd_isr_aif(caddr_t arg, caddr_t intvec)
2200 {
2201         uint32_t          intstat;
2202         uint32_t          ack;
2203         int               rc = DDI_INTR_UNCLAIMED;
2204         struct skd_device *skdev;
2205 
2206         skdev = (skd_device_t *)(uintptr_t)arg;
2207 
2208         ASSERT(skdev != NULL);
2209 
2210         skdev->intr_cntr++;
2211 
2212         Dcmn_err(CE_NOTE, "skd_isr_aif: intr=%" PRId64 "\n", skdev->intr_cntr);
2213 
2214         for (;;) {
2215 
2216                 ASSERT(!WAITQ_LOCK_HELD(skdev));
2217                 INTR_LOCK(skdev);
2218 
2219                 intstat = SKD_READL(skdev, FIT_INT_STATUS_HOST);
2220 
2221                 ack = FIT_INT_DEF_MASK;
2222                 ack &= intstat;
2223 
2224                 Dcmn_err(CE_NOTE, "intstat=0x%x ack=0x%x", intstat, ack);
2225 
2226                 /*
2227                  * As long as there is an int pending on device, keep
2228                  * running loop.  When none, get out, but if we've never
2229                  * done any processing, call completion handler?
2230                  */
2231                 if (ack == 0) {
2232                         /*
2233                          * No interrupts on device, but run the completion
2234                          * processor anyway?
2235                          */
2236                         if (rc == DDI_INTR_UNCLAIMED &&
2237                             skdev->state == SKD_DRVR_STATE_ONLINE) {
2238                                 Dcmn_err(CE_NOTE,
2239                                     "1: Want isr_comp_posted call");
2240                                 skd_isr_completion_posted(skdev);
2241                         }
2242                         INTR_UNLOCK(skdev);
2243 
2244                         break;
2245                 }
2246                 rc = DDI_INTR_CLAIMED;
2247 
2248                 SKD_WRITEL(skdev, ack, FIT_INT_STATUS_HOST);
2249 
2250                 if ((skdev->state != SKD_DRVR_STATE_LOAD) &&
2251                     (skdev->state != SKD_DRVR_STATE_STOPPING)) {
2252                         if (intstat & FIT_ISH_COMPLETION_POSTED) {
2253                                 Dcmn_err(CE_NOTE,
2254                                     "2: Want isr_comp_posted call");
2255                                 skd_isr_completion_posted(skdev);
2256                         }
2257 
2258                         if (intstat & FIT_ISH_FW_STATE_CHANGE) {
2259                                 Dcmn_err(CE_NOTE, "isr: fwstate change");
2260 
2261                                 skd_isr_fwstate(skdev);
2262                                 if (skdev->state == SKD_DRVR_STATE_FAULT ||
2263                                     skdev->state ==
2264                                     SKD_DRVR_STATE_DISAPPEARED) {
2265                                         INTR_UNLOCK(skdev);
2266 
2267                                         return (rc);
2268                                 }
2269                         }
2270 
2271                         if (intstat & FIT_ISH_MSG_FROM_DEV) {
2272                                 Dcmn_err(CE_NOTE, "isr: msg_from_dev change");
2273                                 skd_isr_msg_from_dev(skdev);
2274                         }
2275                 }
2276 
2277                 INTR_UNLOCK(skdev);
2278         }
2279 
2280         if (!SIMPLEQ_EMPTY(&skdev->waitqueue))
2281                 skd_start(skdev);
2282 
2283         return (rc);
2284 }
2285 
2286 /*
2287  *
2288  * Name:        skd_drive_fault, set the drive state to DRV_STATE_FAULT.
2289  *
2290  * Inputs:      skdev           - device state structure.
2291  *
2292  * Returns:     Nothing.
2293  *
2294  */
2295 static void
2296 skd_drive_fault(struct skd_device *skdev)
2297 {
2298         skdev->state = SKD_DRVR_STATE_FAULT;
2299         cmn_err(CE_WARN, "!(%s): Drive FAULT\n",
2300             skd_name(skdev));
2301 }
2302 
2303 /*
2304  *
2305  * Name:        skd_drive_disappeared, set the drive state to DISAPPEARED..
2306  *
2307  * Inputs:      skdev           - device state structure.
2308  *
2309  * Returns:     Nothing.
2310  *
2311  */
2312 static void
2313 skd_drive_disappeared(struct skd_device *skdev)
2314 {
2315         skdev->state = SKD_DRVR_STATE_DISAPPEARED;
2316         cmn_err(CE_WARN, "!(%s): Drive DISAPPEARED\n",
2317             skd_name(skdev));
2318 }
2319 
2320 /*
2321  *
2322  * Name:        skd_isr_fwstate, handles the various device states.
2323  *
2324  * Inputs:      skdev           - device state structure.
2325  *
2326  * Returns:     Nothing.
2327  *
2328  */
2329 static void
2330 skd_isr_fwstate(struct skd_device *skdev)
2331 {
2332         uint32_t sense;
2333         uint32_t state;
2334         int prev_driver_state;
2335         uint32_t mtd;
2336 
2337         prev_driver_state = skdev->state;
2338 
2339         sense = SKD_READL(skdev, FIT_STATUS);
2340         state = sense & FIT_SR_DRIVE_STATE_MASK;
2341 
2342         Dcmn_err(CE_NOTE, "s1120 state %s(%d)=>%s(%d)",
2343             skd_drive_state_to_str(skdev->drive_state), skdev->drive_state,
2344             skd_drive_state_to_str(state), state);
2345 
2346         skdev->drive_state = state;
2347 
2348         switch (skdev->drive_state) {
2349         case FIT_SR_DRIVE_INIT:
2350                 if (skdev->state == SKD_DRVR_STATE_PROTOCOL_MISMATCH) {
2351                         skd_disable_interrupts(skdev);
2352                         break;
2353                 }
2354                 if (skdev->state == SKD_DRVR_STATE_RESTARTING) {
2355                         skd_recover_requests(skdev);
2356                 }
2357                 if (skdev->state == SKD_DRVR_STATE_WAIT_BOOT) {
2358                         skdev->timer_countdown =
2359                             SKD_TIMER_SECONDS(SKD_STARTING_TO);
2360                         skdev->state = SKD_DRVR_STATE_STARTING;
2361                         skd_soft_reset(skdev);
2362                         break;
2363                 }
2364                 mtd = FIT_MXD_CONS(FIT_MTD_FITFW_INIT, 0, 0);
2365                 SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
2366                 skdev->last_mtd = mtd;
2367                 break;
2368 
2369         case FIT_SR_DRIVE_ONLINE:
2370                 skdev->queue_depth_limit = skdev->soft_queue_depth_limit;
2371                 if (skdev->queue_depth_limit > skdev->hard_queue_depth_limit) {
2372                         skdev->queue_depth_limit =
2373                             skdev->hard_queue_depth_limit;
2374                 }
2375 
2376                 skdev->queue_depth_lowat = skdev->queue_depth_limit * 2 / 3 + 1;
2377                 if (skdev->queue_depth_lowat < 1)
2378                         skdev->queue_depth_lowat = 1;
2379                 Dcmn_err(CE_NOTE,
2380                     "%s queue depth limit=%d hard=%d soft=%d lowat=%d",
2381                     DRV_NAME,
2382                     skdev->queue_depth_limit,
2383                     skdev->hard_queue_depth_limit,
2384                     skdev->soft_queue_depth_limit,
2385                     skdev->queue_depth_lowat);
2386 
2387                 skd_refresh_device_data(skdev);
2388                 break;
2389         case FIT_SR_DRIVE_BUSY:
2390                 skdev->state = SKD_DRVR_STATE_BUSY;
2391                 skdev->timer_countdown = SKD_TIMER_MINUTES(20);
2392                 (void) skd_quiesce_dev(skdev);
2393                 break;
2394         case FIT_SR_DRIVE_BUSY_SANITIZE:
2395                 skdev->state = SKD_DRVR_STATE_BUSY_SANITIZE;
2396                 skd_start(skdev);
2397                 break;
2398         case FIT_SR_DRIVE_BUSY_ERASE:
2399                 skdev->state = SKD_DRVR_STATE_BUSY_ERASE;
2400                 skdev->timer_countdown = SKD_TIMER_MINUTES(20);
2401                 break;
2402         case FIT_SR_DRIVE_OFFLINE:
2403                 skdev->state = SKD_DRVR_STATE_IDLE;
2404                 break;
2405         case FIT_SR_DRIVE_SOFT_RESET:
2406                 skdev->state = SKD_DRVR_STATE_RESTARTING;
2407 
2408                 switch (skdev->state) {
2409                 case SKD_DRVR_STATE_STARTING:
2410                 case SKD_DRVR_STATE_RESTARTING:
2411                         break;
2412                 default:
2413                         skdev->state = SKD_DRVR_STATE_RESTARTING;
2414                         break;
2415                 }
2416                 break;
2417         case FIT_SR_DRIVE_FW_BOOTING:
2418                 Dcmn_err(CE_NOTE,
2419                     "ISR FIT_SR_DRIVE_FW_BOOTING %s", skdev->name);
2420                 skdev->state = SKD_DRVR_STATE_WAIT_BOOT;
2421                 skdev->timer_countdown = SKD_TIMER_SECONDS(SKD_WAIT_BOOT_TO);
2422                 break;
2423 
2424         case FIT_SR_DRIVE_DEGRADED:
2425         case FIT_SR_PCIE_LINK_DOWN:
2426         case FIT_SR_DRIVE_NEED_FW_DOWNLOAD:
2427                 break;
2428 
2429         case FIT_SR_DRIVE_FAULT:
2430                 skd_drive_fault(skdev);
2431                 skd_recover_requests(skdev);
2432                 skd_start(skdev);
2433                 break;
2434 
2435         case 0xFF:
2436                 skd_drive_disappeared(skdev);
2437                 skd_recover_requests(skdev);
2438                 skd_start(skdev);
2439                 break;
2440         default:
2441                 /*
2442                  * Uknown FW State. Wait for a state we recognize.
2443                  */
2444                 break;
2445         }
2446 
2447         Dcmn_err(CE_NOTE, "Driver state %s(%d)=>%s(%d)",
2448             skd_skdev_state_to_str(prev_driver_state), prev_driver_state,
2449             skd_skdev_state_to_str(skdev->state), skdev->state);
2450 }
2451 
2452 /*
2453  *
2454  * Name:        skd_recover_requests, attempts to recover requests.
2455  *
2456  * Inputs:      skdev           - device state structure.
2457  *
2458  * Returns:     Nothing.
2459  *
2460  */
2461 static void
2462 skd_recover_requests(struct skd_device *skdev)
2463 {
2464         int i;
2465 
2466         ASSERT(INTR_LOCK_HELD(skdev));
2467 
2468         for (i = 0; i < skdev->num_req_context; i++) {
2469                 struct skd_request_context *skreq = &skdev->skreq_table[i];
2470 
2471                 if (skreq->state == SKD_REQ_STATE_BUSY) {
2472                         skd_log_skreq(skdev, skreq, "requeue");
2473 
2474                         ASSERT(0 != (skreq->id & SKD_ID_INCR));
2475                         ASSERT(skreq->pbuf != NULL);
2476                         /* Release DMA resources for the request. */
2477                         skd_blkdev_postop_sg_list(skdev, skreq);
2478 
2479                         skd_end_request(skdev, skreq, EAGAIN);
2480                         skreq->pbuf = NULL;
2481                         skreq->state = SKD_REQ_STATE_IDLE;
2482                         skreq->id += SKD_ID_INCR;
2483                 }
2484                 if (i > 0) {
2485                         skreq[-1].next = skreq;
2486                 }
2487                 skreq->next = NULL;
2488         }
2489 
2490         WAITQ_LOCK(skdev);
2491         skdev->skreq_free_list = skdev->skreq_table;
2492         WAITQ_UNLOCK(skdev);
2493 
2494         for (i = 0; i < skdev->num_fitmsg_context; i++) {
2495                 struct skd_fitmsg_context *skmsg = &skdev->skmsg_table[i];
2496 
2497                 if (skmsg->state == SKD_MSG_STATE_BUSY) {
2498                         skd_log_skmsg(skdev, skmsg, "salvaged");
2499                         ASSERT((skmsg->id & SKD_ID_INCR) != 0);
2500                         skmsg->state = SKD_MSG_STATE_IDLE;
2501                         skmsg->id &= ~SKD_ID_INCR;
2502                 }
2503                 if (i > 0) {
2504                         skmsg[-1].next = skmsg;
2505                 }
2506                 skmsg->next = NULL;
2507         }
2508         WAITQ_LOCK(skdev);
2509         skdev->skmsg_free_list = skdev->skmsg_table;
2510         WAITQ_UNLOCK(skdev);
2511 
2512         for (i = 0; i < SKD_N_TIMEOUT_SLOT; i++) {
2513                 skdev->timeout_slot[i] = 0;
2514         }
2515         skdev->queue_depth_busy = 0;
2516 }
2517 
2518 /*
2519  *
2520  * Name:        skd_isr_msg_from_dev, handles a message from the device.
2521  *
2522  * Inputs:      skdev           - device state structure.
2523  *
2524  * Returns:     Nothing.
2525  *
2526  */
2527 static void
2528 skd_isr_msg_from_dev(struct skd_device *skdev)
2529 {
2530         uint32_t mfd;
2531         uint32_t mtd;
2532 
2533         Dcmn_err(CE_NOTE, "skd_isr_msg_from_dev:");
2534 
2535         mfd = SKD_READL(skdev, FIT_MSG_FROM_DEVICE);
2536 
2537         Dcmn_err(CE_NOTE, "mfd=0x%x last_mtd=0x%x\n", mfd, skdev->last_mtd);
2538 
2539         /*
2540          * ignore any mtd that is an ack for something we didn't send
2541          */
2542         if (FIT_MXD_TYPE(mfd) != FIT_MXD_TYPE(skdev->last_mtd)) {
2543                 return;
2544         }
2545 
2546         switch (FIT_MXD_TYPE(mfd)) {
2547         case FIT_MTD_FITFW_INIT:
2548                 skdev->proto_ver = FIT_PROTOCOL_MAJOR_VER(mfd);
2549 
2550                 if (skdev->proto_ver != FIT_PROTOCOL_VERSION_1) {
2551                         cmn_err(CE_WARN, "!(%s): protocol mismatch\n",
2552                             skdev->name);
2553                         cmn_err(CE_WARN, "!(%s):   got=%d support=%d\n",
2554                             skdev->name, skdev->proto_ver,
2555                             FIT_PROTOCOL_VERSION_1);
2556                         cmn_err(CE_WARN, "!(%s):   please upgrade driver\n",
2557                             skdev->name);
2558                         skdev->state = SKD_DRVR_STATE_PROTOCOL_MISMATCH;
2559                         skd_soft_reset(skdev);
2560                         break;
2561                 }
2562                 mtd = FIT_MXD_CONS(FIT_MTD_GET_CMDQ_DEPTH, 0, 0);
2563                 SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
2564                 skdev->last_mtd = mtd;
2565                 break;
2566 
2567         case FIT_MTD_GET_CMDQ_DEPTH:
2568                 skdev->hard_queue_depth_limit = FIT_MXD_DATA(mfd);
2569                 mtd = FIT_MXD_CONS(FIT_MTD_SET_COMPQ_DEPTH, 0,
2570                     SKD_N_COMPLETION_ENTRY);
2571                 SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
2572                 skdev->last_mtd = mtd;
2573                 break;
2574 
2575         case FIT_MTD_SET_COMPQ_DEPTH:
2576                 SKD_WRITEQ(skdev, skdev->cq_dma_address.cookies->dmac_laddress,
2577                     FIT_MSG_TO_DEVICE_ARG);
2578                 mtd = FIT_MXD_CONS(FIT_MTD_SET_COMPQ_ADDR, 0, 0);
2579                 SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
2580                 skdev->last_mtd = mtd;
2581                 break;
2582 
2583         case FIT_MTD_SET_COMPQ_ADDR:
2584                 skd_reset_skcomp(skdev);
2585                 mtd = FIT_MXD_CONS(FIT_MTD_ARM_QUEUE, 0, 0);
2586                 SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
2587                 skdev->last_mtd = mtd;
2588                 break;
2589 
2590         case FIT_MTD_ARM_QUEUE:
2591                 skdev->last_mtd = 0;
2592                 /*
2593                  * State should be, or soon will be, FIT_SR_DRIVE_ONLINE.
2594                  */
2595                 break;
2596 
2597         default:
2598                 break;
2599         }
2600 }
2601 
2602 
2603 /*
2604  *
2605  * Name:        skd_disable_interrupts, issues command to disable
2606  *              device interrupts.
2607  *
2608  * Inputs:      skdev           - device state structure.
2609  *
2610  * Returns:     Nothing.
2611  *
2612  */
2613 static void
2614 skd_disable_interrupts(struct skd_device *skdev)
2615 {
2616         uint32_t sense;
2617 
2618         Dcmn_err(CE_NOTE, "skd_disable_interrupts:");
2619 
2620         sense = SKD_READL(skdev, FIT_CONTROL);
2621         sense &= ~FIT_CR_ENABLE_INTERRUPTS;
2622         SKD_WRITEL(skdev, sense, FIT_CONTROL);
2623 
2624         Dcmn_err(CE_NOTE, "sense 0x%x", sense);
2625 
2626         /*
2627          * Note that the 1s is written. A 1-bit means
2628          * disable, a 0 means enable.
2629          */
2630         SKD_WRITEL(skdev, ~0, FIT_INT_MASK_HOST);
2631 }
2632 
2633 /*
2634  *
2635  * Name:        skd_enable_interrupts, issues command to enable
2636  *              device interrupts.
2637  *
2638  * Inputs:      skdev           - device state structure.
2639  *
2640  * Returns:     Nothing.
2641  *
2642  */
2643 static void
2644 skd_enable_interrupts(struct skd_device *skdev)
2645 {
2646         uint32_t val;
2647 
2648         Dcmn_err(CE_NOTE, "skd_enable_interrupts:");
2649 
2650         /* unmask interrupts first */
2651         val = FIT_ISH_FW_STATE_CHANGE +
2652             FIT_ISH_COMPLETION_POSTED +
2653             FIT_ISH_MSG_FROM_DEV;
2654 
2655         /*
2656          * Note that the compliment of mask is written. A 1-bit means
2657          * disable, a 0 means enable.
2658          */
2659         SKD_WRITEL(skdev, ~val, FIT_INT_MASK_HOST);
2660 
2661         Dcmn_err(CE_NOTE, "interrupt mask=0x%x", ~val);
2662 
2663         val = SKD_READL(skdev, FIT_CONTROL);
2664         val |= FIT_CR_ENABLE_INTERRUPTS;
2665 
2666         Dcmn_err(CE_NOTE, "control=0x%x", val);
2667 
2668         SKD_WRITEL(skdev, val, FIT_CONTROL);
2669 }
2670 
2671 /*
2672  *
2673  * Name:        skd_soft_reset, issues a soft reset to the hardware.
2674  *
2675  * Inputs:      skdev           - device state structure.
2676  *
2677  * Returns:     Nothing.
2678  *
2679  */
2680 static void
2681 skd_soft_reset(struct skd_device *skdev)
2682 {
2683         uint32_t val;
2684 
2685         Dcmn_err(CE_NOTE, "skd_soft_reset:");
2686 
2687         val = SKD_READL(skdev, FIT_CONTROL);
2688         val |= (FIT_CR_SOFT_RESET);
2689 
2690         Dcmn_err(CE_NOTE, "soft_reset: control=0x%x", val);
2691 
2692         SKD_WRITEL(skdev, val, FIT_CONTROL);
2693 }
2694 
2695 /*
2696  *
2697  * Name:        skd_start_device, gets the device going.
2698  *
2699  * Inputs:      skdev           - device state structure.
2700  *
2701  * Returns:     Nothing.
2702  *
2703  */
2704 static void
2705 skd_start_device(struct skd_device *skdev)
2706 {
2707         uint32_t state;
2708         int delay_action = 0;
2709 
2710         Dcmn_err(CE_NOTE, "skd_start_device:");
2711 
2712         /* ack all ghost interrupts */
2713         SKD_WRITEL(skdev, FIT_INT_DEF_MASK, FIT_INT_STATUS_HOST);
2714 
2715         state = SKD_READL(skdev, FIT_STATUS);
2716 
2717         Dcmn_err(CE_NOTE, "initial status=0x%x", state);
2718 
2719         state &= FIT_SR_DRIVE_STATE_MASK;
2720         skdev->drive_state = state;
2721         skdev->last_mtd = 0;
2722 
2723         skdev->state = SKD_DRVR_STATE_STARTING;
2724         skdev->timer_countdown = SKD_TIMER_SECONDS(SKD_STARTING_TO);
2725 
2726         skd_enable_interrupts(skdev);
2727 
2728         switch (skdev->drive_state) {
2729         case FIT_SR_DRIVE_OFFLINE:
2730                 Dcmn_err(CE_NOTE, "(%s): Drive offline...",
2731                     skd_name(skdev));
2732                 break;
2733 
2734         case FIT_SR_DRIVE_FW_BOOTING:
2735                 Dcmn_err(CE_NOTE, "FIT_SR_DRIVE_FW_BOOTING %s\n", skdev->name);
2736                 skdev->state = SKD_DRVR_STATE_WAIT_BOOT;
2737                 skdev->timer_countdown = SKD_TIMER_SECONDS(SKD_WAIT_BOOT_TO);
2738                 break;
2739 
2740         case FIT_SR_DRIVE_BUSY_SANITIZE:
2741                 Dcmn_err(CE_NOTE, "(%s): Start: BUSY_SANITIZE\n",
2742                     skd_name(skdev));
2743                 skdev->state = SKD_DRVR_STATE_BUSY_SANITIZE;
2744                 skdev->timer_countdown = SKD_TIMER_SECONDS(60);
2745                 break;
2746 
2747         case FIT_SR_DRIVE_BUSY_ERASE:
2748                 Dcmn_err(CE_NOTE, "(%s): Start: BUSY_ERASE\n",
2749                     skd_name(skdev));
2750                 skdev->state = SKD_DRVR_STATE_BUSY_ERASE;
2751                 skdev->timer_countdown = SKD_TIMER_SECONDS(60);
2752                 break;
2753 
2754         case FIT_SR_DRIVE_INIT:
2755         case FIT_SR_DRIVE_ONLINE:
2756                 skd_soft_reset(skdev);
2757 
2758                 break;
2759 
2760         case FIT_SR_DRIVE_BUSY:
2761                 Dcmn_err(CE_NOTE, "(%s): Drive Busy...\n",
2762                     skd_name(skdev));
2763                 skdev->state = SKD_DRVR_STATE_BUSY;
2764                 skdev->timer_countdown = SKD_TIMER_SECONDS(60);
2765                 break;
2766 
2767         case FIT_SR_DRIVE_SOFT_RESET:
2768                 Dcmn_err(CE_NOTE, "(%s) drive soft reset in prog\n",
2769                     skd_name(skdev));
2770                 break;
2771 
2772         case FIT_SR_DRIVE_FAULT:
2773                 /*
2774                  * Fault state is bad...soft reset won't do it...
2775                  * Hard reset, maybe, but does it work on device?
2776                  * For now, just fault so the system doesn't hang.
2777                  */
2778                 skd_drive_fault(skdev);
2779 
2780                 delay_action = 1;
2781                 break;
2782 
2783         case 0xFF:
2784                 skd_drive_disappeared(skdev);
2785 
2786                 delay_action = 1;
2787                 break;
2788 
2789         default:
2790                 Dcmn_err(CE_NOTE, "(%s) Start: unknown state %x\n",
2791                     skd_name(skdev), skdev->drive_state);
2792                 break;
2793         }
2794 
2795         state = SKD_READL(skdev, FIT_CONTROL);
2796         Dcmn_err(CE_NOTE, "FIT Control Status=0x%x\n", state);
2797 
2798         state = SKD_READL(skdev, FIT_INT_STATUS_HOST);
2799         Dcmn_err(CE_NOTE, "Intr Status=0x%x\n", state);
2800 
2801         state = SKD_READL(skdev, FIT_INT_MASK_HOST);
2802         Dcmn_err(CE_NOTE, "Intr Mask=0x%x\n", state);
2803 
2804         state = SKD_READL(skdev, FIT_MSG_FROM_DEVICE);
2805         Dcmn_err(CE_NOTE, "Msg from Dev=0x%x\n", state);
2806 
2807         state = SKD_READL(skdev, FIT_HW_VERSION);
2808         Dcmn_err(CE_NOTE, "HW version=0x%x\n", state);
2809 
2810         if (delay_action) {
2811                 /* start the queue so we can respond with error to requests */
2812                 Dcmn_err(CE_NOTE, "Starting %s queue\n", skdev->name);
2813                 skd_start(skdev);
2814                 skdev->gendisk_on = -1;
2815                 cv_signal(&skdev->cv_waitq);
2816         }
2817 }
2818 
2819 /*
2820  *
2821  * Name:        skd_restart_device, restart the hardware.
2822  *
2823  * Inputs:      skdev           - device state structure.
2824  *
2825  * Returns:     Nothing.
2826  *
2827  */
2828 static void
2829 skd_restart_device(struct skd_device *skdev)
2830 {
2831         uint32_t state;
2832 
2833         Dcmn_err(CE_NOTE, "skd_restart_device:");
2834 
2835         /* ack all ghost interrupts */
2836         SKD_WRITEL(skdev, FIT_INT_DEF_MASK, FIT_INT_STATUS_HOST);
2837 
2838         state = SKD_READL(skdev, FIT_STATUS);
2839 
2840         Dcmn_err(CE_NOTE, "skd_restart_device: drive status=0x%x\n", state);
2841 
2842         state &= FIT_SR_DRIVE_STATE_MASK;
2843         skdev->drive_state = state;
2844         skdev->last_mtd = 0;
2845 
2846         skdev->state = SKD_DRVR_STATE_RESTARTING;
2847         skdev->timer_countdown = SKD_TIMER_MINUTES(4);
2848 
2849         skd_soft_reset(skdev);
2850 }
2851 
2852 /*
2853  *
2854  * Name:        skd_stop_device, stops the device.
2855  *
2856  * Inputs:      skdev           - device state structure.
2857  *
2858  * Returns:     Nothing.
2859  *
2860  */
2861 static void
2862 skd_stop_device(struct skd_device *skdev)
2863 {
2864         clock_t cur_ticks, tmo;
2865         int secs;
2866         struct skd_special_context *skspcl = &skdev->internal_skspcl;
2867 
2868         if (SKD_DRVR_STATE_ONLINE != skdev->state) {
2869                 Dcmn_err(CE_NOTE, "(%s): skd_stop_device not online no sync\n",
2870                     skdev->name);
2871                 goto stop_out;
2872         }
2873 
2874         if (SKD_REQ_STATE_IDLE != skspcl->req.state) {
2875                 Dcmn_err(CE_NOTE, "(%s): skd_stop_device no special\n",
2876                     skdev->name);
2877                 goto stop_out;
2878         }
2879 
2880         skdev->state = SKD_DRVR_STATE_SYNCING;
2881         skdev->sync_done = 0;
2882 
2883         skd_send_internal_skspcl(skdev, skspcl, SYNCHRONIZE_CACHE);
2884 
2885         secs = 10;
2886         mutex_enter(&skdev->skd_internalio_mutex);
2887         while (skdev->sync_done == 0) {
2888                 cur_ticks = ddi_get_lbolt();
2889                 tmo = cur_ticks + drv_usectohz(1000000 * secs);
2890                 if (cv_timedwait(&skdev->cv_waitq,
2891                     &skdev->skd_internalio_mutex, tmo) == -1) {
2892                         /* Oops - timed out */
2893 
2894                         Dcmn_err(CE_NOTE, "stop_device - %d secs TMO", secs);
2895                 }
2896         }
2897 
2898         mutex_exit(&skdev->skd_internalio_mutex);
2899 
2900         switch (skdev->sync_done) {
2901         case 0:
2902                 Dcmn_err(CE_NOTE, "(%s): skd_stop_device no sync\n",
2903                     skdev->name);
2904                 break;
2905         case 1:
2906                 Dcmn_err(CE_NOTE, "(%s): skd_stop_device sync done\n",
2907                     skdev->name);
2908                 break;
2909         default:
2910                 Dcmn_err(CE_NOTE, "(%s): skd_stop_device sync error\n",
2911                     skdev->name);
2912         }
2913 
2914 
2915 stop_out:
2916         skdev->state = SKD_DRVR_STATE_STOPPING;
2917 
2918         skd_disable_interrupts(skdev);
2919 
2920         /* ensure all ints on device are cleared */
2921         SKD_WRITEL(skdev, FIT_INT_DEF_MASK, FIT_INT_STATUS_HOST);
2922         /* soft reset the device to unload with a clean slate */
2923         SKD_WRITEL(skdev, FIT_CR_SOFT_RESET, FIT_CONTROL);
2924 }
2925 
2926 /*
2927  * CONSTRUCT
2928  */
2929 
2930 static int skd_cons_skcomp(struct skd_device *);
2931 static int skd_cons_skmsg(struct skd_device *);
2932 static int skd_cons_skreq(struct skd_device *);
2933 static int skd_cons_sksb(struct skd_device *);
2934 static struct fit_sg_descriptor *skd_cons_sg_list(struct skd_device *, uint32_t,
2935     dma_mem_t *);
2936 
2937 /*
2938  *
2939  * Name:        skd_construct, calls other routines to build device
2940  *              interface structures.
2941  *
2942  * Inputs:      skdev           - device state structure.
2943  *              instance        - DDI instance number.
2944  *
2945  * Returns:     Returns DDI_FAILURE on any failure otherwise returns
2946  *              DDI_SUCCESS.
2947  *
2948  */
2949 /* ARGSUSED */  /* Upstream common source with other platforms. */
2950 static int
2951 skd_construct(skd_device_t *skdev, int instance)
2952 {
2953         int rc = 0;
2954 
2955         skdev->state = SKD_DRVR_STATE_LOAD;
2956         skdev->irq_type = skd_isr_type;
2957         skdev->soft_queue_depth_limit = skd_max_queue_depth;
2958         skdev->hard_queue_depth_limit = 10; /* until GET_CMDQ_DEPTH */
2959 
2960         skdev->num_req_context = skd_max_queue_depth;
2961         skdev->num_fitmsg_context = skd_max_queue_depth;
2962 
2963         skdev->queue_depth_limit = skdev->hard_queue_depth_limit;
2964         skdev->queue_depth_lowat = 1;
2965         skdev->proto_ver = 99; /* initialize to invalid value */
2966         skdev->sgs_per_request = skd_sgs_per_request;
2967         skdev->dbg_level = skd_dbg_level;
2968 
2969         rc = skd_cons_skcomp(skdev);
2970         if (rc < 0) {
2971                 goto err_out;
2972         }
2973 
2974         rc = skd_cons_skmsg(skdev);
2975         if (rc < 0) {
2976                 goto err_out;
2977         }
2978 
2979         rc = skd_cons_skreq(skdev);
2980         if (rc < 0) {
2981                 goto err_out;
2982         }
2983 
2984         rc = skd_cons_sksb(skdev);
2985         if (rc < 0) {
2986                 goto err_out;
2987         }
2988 
2989         Dcmn_err(CE_NOTE, "CONSTRUCT VICTORY");
2990 
2991         return (DDI_SUCCESS);
2992 
2993 err_out:
2994         Dcmn_err(CE_NOTE, "construct failed\n");
2995         skd_destruct(skdev);
2996 
2997         return (DDI_FAILURE);
2998 }
2999 
3000 /*
3001  *
3002  * Name:        skd_free_phys, frees DMA memory.
3003  *
3004  * Inputs:      skdev           - device state structure.
3005  *              mem             - DMA info.
3006  *
3007  * Returns:     Nothing.
3008  *
3009  */
3010 static void
3011 skd_free_phys(skd_device_t *skdev, dma_mem_t *mem)
3012 {
3013         _NOTE(ARGUNUSED(skdev));
3014 
3015         if (mem == NULL || mem->dma_handle == NULL)
3016                 return;
3017 
3018         (void) ddi_dma_unbind_handle(mem->dma_handle);
3019 
3020         if (mem->acc_handle != NULL) {
3021                 ddi_dma_mem_free(&mem->acc_handle);
3022                 mem->acc_handle = NULL;
3023         }
3024 
3025         mem->bp = NULL;
3026         ddi_dma_free_handle(&mem->dma_handle);
3027         mem->dma_handle = NULL;
3028 }
3029 
3030 /*
3031  *
3032  * Name:        skd_alloc_dma_mem, allocates DMA memory.
3033  *
3034  * Inputs:      skdev           - device state structure.
3035  *              mem             - DMA data structure.
3036  *              sleep           - indicates whether called routine can sleep.
3037  *              atype           - specified 32 or 64 bit allocation.
3038  *
3039  * Returns:     Void pointer to mem->bp on success else NULL.
3040  *              NOTE:  There are some failure modes even if sleep is set
3041  *              to KM_SLEEP, so callers MUST check the return code even
3042  *              if KM_SLEEP is passed in.
3043  *
3044  */
3045 static void *
3046 skd_alloc_dma_mem(skd_device_t *skdev, dma_mem_t *mem, uint8_t atype)
3047 {
3048         size_t          rlen;
3049         uint_t          cnt;
3050         ddi_dma_attr_t  dma_attr = skd_64bit_io_dma_attr;
3051         ddi_device_acc_attr_t acc_attr = {
3052                 DDI_DEVICE_ATTR_V0,
3053                 DDI_STRUCTURE_LE_ACC,
3054                 DDI_STRICTORDER_ACC
3055         };
3056 
3057         if (atype == ATYPE_32BIT)
3058                 dma_attr.dma_attr_addr_hi = SKD_DMA_HIGH_32BIT_ADDRESS;
3059 
3060         dma_attr.dma_attr_sgllen = 1;
3061 
3062         /*
3063          * Allocate DMA memory.
3064          */
3065         if (ddi_dma_alloc_handle(skdev->dip, &dma_attr, DDI_DMA_SLEEP, NULL,
3066             &mem->dma_handle) != DDI_SUCCESS) {
3067                 cmn_err(CE_WARN, "!alloc_dma_mem-1, failed");
3068 
3069                 mem->dma_handle = NULL;
3070 
3071                 return (NULL);
3072         }
3073 
3074         if (ddi_dma_mem_alloc(mem->dma_handle, mem->size, &acc_attr,
3075             DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL, (caddr_t *)&mem->bp, &rlen,
3076             &mem->acc_handle) != DDI_SUCCESS) {
3077                 cmn_err(CE_WARN, "!skd_alloc_dma_mem-2, failed");
3078                 ddi_dma_free_handle(&mem->dma_handle);
3079                 mem->dma_handle = NULL;
3080                 mem->acc_handle = NULL;
3081                 mem->bp = NULL;
3082 
3083                 return (NULL);
3084         }
3085         bzero(mem->bp, mem->size);
3086 
3087         if (ddi_dma_addr_bind_handle(mem->dma_handle, NULL, mem->bp,
3088             mem->size, (DDI_DMA_CONSISTENT | DDI_DMA_RDWR), DDI_DMA_SLEEP, NULL,
3089             &mem->cookie, &cnt) != DDI_DMA_MAPPED) {
3090                 cmn_err(CE_WARN, "!skd_alloc_dma_mem-3, failed");
3091                 ddi_dma_mem_free(&mem->acc_handle);
3092                 ddi_dma_free_handle(&mem->dma_handle);
3093 
3094                 return (NULL);
3095         }
3096 
3097         if (cnt > 1) {
3098                 (void) ddi_dma_unbind_handle(mem->dma_handle);
3099                 cmn_err(CE_WARN, "!skd_alloc_dma_mem-4, failed, "
3100                     "cookie_count %d > 1", cnt);
3101                 skd_free_phys(skdev, mem);
3102 
3103                 return (NULL);
3104         }
3105         mem->cookies = &mem->cookie;
3106         mem->cookies->dmac_size = mem->size;
3107 
3108         return (mem->bp);
3109 }
3110 
3111 /*
3112  *
3113  * Name:        skd_cons_skcomp, allocates space for the skcomp table.
3114  *
3115  * Inputs:      skdev           - device state structure.
3116  *
3117  * Returns:     -ENOMEM if no memory otherwise NULL.
3118  *
3119  */
3120 static int
3121 skd_cons_skcomp(struct skd_device *skdev)
3122 {
3123         uint64_t        *dma_alloc;
3124         struct fit_completion_entry_v1 *skcomp;
3125         int             rc = 0;
3126         uint32_t                nbytes;
3127         dma_mem_t       *mem;
3128 
3129         nbytes = sizeof (*skcomp) * SKD_N_COMPLETION_ENTRY;
3130         nbytes += sizeof (struct fit_comp_error_info) * SKD_N_COMPLETION_ENTRY;
3131 
3132         Dcmn_err(CE_NOTE, "cons_skcomp: nbytes=%d,entries=%d", nbytes,
3133             SKD_N_COMPLETION_ENTRY);
3134 
3135         mem                     = &skdev->cq_dma_address;
3136         mem->size            = nbytes;
3137 
3138         dma_alloc = skd_alloc_dma_mem(skdev, mem, ATYPE_64BIT);
3139         skcomp = (struct fit_completion_entry_v1 *)dma_alloc;
3140         if (skcomp == NULL) {
3141                 rc = -ENOMEM;
3142                 goto err_out;
3143         }
3144 
3145         bzero(skcomp, nbytes);
3146 
3147         Dcmn_err(CE_NOTE, "cons_skcomp: skcomp=%p nbytes=%d",
3148             (void *)skcomp, nbytes);
3149 
3150         skdev->skcomp_table = skcomp;
3151         skdev->skerr_table = (struct fit_comp_error_info *)(dma_alloc +
3152             (SKD_N_COMPLETION_ENTRY * sizeof (*skcomp) / sizeof (uint64_t)));
3153 
3154 err_out:
3155         return (rc);
3156 }
3157 
3158 /*
3159  *
3160  * Name:        skd_cons_skmsg, allocates space for the skmsg table.
3161  *
3162  * Inputs:      skdev           - device state structure.
3163  *
3164  * Returns:     -ENOMEM if no memory otherwise NULL.
3165  *
3166  */
3167 static int
3168 skd_cons_skmsg(struct skd_device *skdev)
3169 {
3170         dma_mem_t       *mem;
3171         int             rc = 0;
3172         uint32_t                i;
3173 
3174         Dcmn_err(CE_NOTE, "skmsg_table kzalloc, struct %lu, count %u total %lu",
3175             (ulong_t)sizeof (struct skd_fitmsg_context),
3176             skdev->num_fitmsg_context,
3177             (ulong_t)(sizeof (struct skd_fitmsg_context) *
3178             skdev->num_fitmsg_context));
3179 
3180         skdev->skmsg_table = (struct skd_fitmsg_context *)kmem_zalloc(
3181             sizeof (struct skd_fitmsg_context) * skdev->num_fitmsg_context,
3182             KM_SLEEP);
3183 
3184         for (i = 0; i < skdev->num_fitmsg_context; i++) {
3185                 struct skd_fitmsg_context *skmsg;
3186 
3187                 skmsg = &skdev->skmsg_table[i];
3188 
3189                 skmsg->id = i + SKD_ID_FIT_MSG;
3190 
3191                 skmsg->state = SKD_MSG_STATE_IDLE;
3192 
3193                 mem = &skmsg->mb_dma_address;
3194                 mem->size = SKD_N_FITMSG_BYTES + 64;
3195 
3196                 skmsg->msg_buf = skd_alloc_dma_mem(skdev, mem, ATYPE_64BIT);
3197 
3198                 if (NULL == skmsg->msg_buf) {
3199                         rc = -ENOMEM;
3200                         i++;
3201                         break;
3202                 }
3203 
3204                 skmsg->offset = 0;
3205 
3206                 bzero(skmsg->msg_buf, SKD_N_FITMSG_BYTES);
3207 
3208                 skmsg->next = &skmsg[1];
3209         }
3210 
3211         /* Free list is in order starting with the 0th entry. */
3212         skdev->skmsg_table[i - 1].next = NULL;
3213         skdev->skmsg_free_list = skdev->skmsg_table;
3214 
3215         return (rc);
3216 }
3217 
3218 /*
3219  *
3220  * Name:        skd_cons_skreq, allocates space for the skreq table.
3221  *
3222  * Inputs:      skdev           - device state structure.
3223  *
3224  * Returns:     -ENOMEM if no memory otherwise NULL.
3225  *
3226  */
3227 static int
3228 skd_cons_skreq(struct skd_device *skdev)
3229 {
3230         int     rc = 0;
3231         uint32_t        i;
3232 
3233         Dcmn_err(CE_NOTE,
3234             "skreq_table kmem_zalloc, struct %lu, count %u total %lu",
3235             (ulong_t)sizeof (struct skd_request_context),
3236             skdev->num_req_context,
3237             (ulong_t) (sizeof (struct skd_request_context) *
3238             skdev->num_req_context));
3239 
3240         skdev->skreq_table = (struct skd_request_context *)kmem_zalloc(
3241             sizeof (struct skd_request_context) * skdev->num_req_context,
3242             KM_SLEEP);
3243 
3244         for (i = 0; i < skdev->num_req_context; i++) {
3245                 struct skd_request_context *skreq;
3246 
3247                 skreq = &skdev->skreq_table[i];
3248 
3249                 skreq->id = (uint16_t)(i + SKD_ID_RW_REQUEST);
3250                 skreq->state = SKD_REQ_STATE_IDLE;
3251 
3252                 skreq->sksg_list = skd_cons_sg_list(skdev,
3253                     skdev->sgs_per_request,
3254                     &skreq->sksg_dma_address);
3255 
3256                 if (NULL == skreq->sksg_list) {
3257                         rc = -ENOMEM;
3258                         goto err_out;
3259                 }
3260 
3261                 skreq->next = &skreq[1];
3262         }
3263 
3264         /* Free list is in order starting with the 0th entry. */
3265         skdev->skreq_table[i - 1].next = NULL;
3266         skdev->skreq_free_list = skdev->skreq_table;
3267 
3268 err_out:
3269         return (rc);
3270 }
3271 
3272 /*
3273  *
3274  * Name:        skd_cons_sksb, allocates space for the skspcl msg buf
3275  *              and data buf.
3276  *
3277  * Inputs:      skdev           - device state structure.
3278  *
3279  * Returns:     -ENOMEM if no memory otherwise NULL.
3280  *
3281  */
3282 static int
3283 skd_cons_sksb(struct skd_device *skdev)
3284 {
3285         int                             rc = 0;
3286         struct skd_special_context      *skspcl;
3287         dma_mem_t                       *mem;
3288         uint32_t                                nbytes;
3289 
3290         skspcl = &skdev->internal_skspcl;
3291 
3292         skspcl->req.id = 0 + SKD_ID_INTERNAL;
3293         skspcl->req.state = SKD_REQ_STATE_IDLE;
3294 
3295         nbytes = SKD_N_INTERNAL_BYTES;
3296 
3297         mem                     = &skspcl->db_dma_address;
3298         mem->size            = nbytes;
3299 
3300         /* data_buf's DMA pointer is skspcl->db_dma_address */
3301         skspcl->data_buf = skd_alloc_dma_mem(skdev, mem, ATYPE_64BIT);
3302         if (skspcl->data_buf == NULL) {
3303                 rc = -ENOMEM;
3304                 goto err_out;
3305         }
3306 
3307         bzero(skspcl->data_buf, nbytes);
3308 
3309         nbytes = SKD_N_SPECIAL_FITMSG_BYTES;
3310 
3311         mem                     = &skspcl->mb_dma_address;
3312         mem->size            = nbytes;
3313 
3314         /* msg_buf DMA pointer is skspcl->mb_dma_address */
3315         skspcl->msg_buf = skd_alloc_dma_mem(skdev, mem, ATYPE_64BIT);
3316         if (skspcl->msg_buf == NULL) {
3317                 rc = -ENOMEM;
3318                 goto err_out;
3319         }
3320 
3321 
3322         bzero(skspcl->msg_buf, nbytes);
3323 
3324         skspcl->req.sksg_list = skd_cons_sg_list(skdev, 1,
3325             &skspcl->req.sksg_dma_address);
3326 
3327 
3328         if (skspcl->req.sksg_list == NULL) {
3329                 rc = -ENOMEM;
3330                 goto err_out;
3331         }
3332 
3333         if (skd_format_internal_skspcl(skdev) == 0) {
3334                 rc = -EINVAL;
3335                 goto err_out;
3336         }
3337 
3338 err_out:
3339         return (rc);
3340 }
3341 
3342 /*
3343  *
3344  * Name:        skd_cons_sg_list, allocates the S/G list.
3345  *
3346  * Inputs:      skdev           - device state structure.
3347  *              n_sg            - Number of scatter-gather entries.
3348  *              ret_dma_addr    - S/G list DMA pointer.
3349  *
3350  * Returns:     A list of FIT message descriptors.
3351  *
3352  */
3353 static struct fit_sg_descriptor
3354 *skd_cons_sg_list(struct skd_device *skdev,
3355     uint32_t n_sg, dma_mem_t *ret_dma_addr)
3356 {
3357         struct fit_sg_descriptor *sg_list;
3358         uint32_t nbytes;
3359         dma_mem_t *mem;
3360 
3361         nbytes = sizeof (*sg_list) * n_sg;
3362 
3363         mem                     = ret_dma_addr;
3364         mem->size            = nbytes;
3365 
3366         /* sg_list's DMA pointer is *ret_dma_addr */
3367         sg_list = skd_alloc_dma_mem(skdev, mem, ATYPE_32BIT);
3368 
3369         if (sg_list != NULL) {
3370                 uint64_t dma_address = ret_dma_addr->cookie.dmac_laddress;
3371                 uint32_t i;
3372 
3373                 bzero(sg_list, nbytes);
3374 
3375                 for (i = 0; i < n_sg - 1; i++) {
3376                         uint64_t ndp_off;
3377                         ndp_off = (i + 1) * sizeof (struct fit_sg_descriptor);
3378 
3379                         sg_list[i].next_desc_ptr = dma_address + ndp_off;
3380                 }
3381                 sg_list[i].next_desc_ptr = 0LL;
3382         }
3383 
3384         return (sg_list);
3385 }
3386 
3387 /*
3388  * DESTRUCT (FREE)
3389  */
3390 
3391 static void skd_free_skcomp(struct skd_device *skdev);
3392 static void skd_free_skmsg(struct skd_device *skdev);
3393 static void skd_free_skreq(struct skd_device *skdev);
3394 static void skd_free_sksb(struct skd_device *skdev);
3395 
3396 static void skd_free_sg_list(struct skd_device *skdev,
3397     struct fit_sg_descriptor *sg_list,
3398     uint32_t n_sg, dma_mem_t dma_addr);
3399 
3400 /*
3401  *
3402  * Name:        skd_destruct, call various rouines to deallocate
3403  *              space acquired during initialization.
3404  *
3405  * Inputs:      skdev           - device state structure.
3406  *
3407  * Returns:     Nothing.
3408  *
3409  */
3410 static void
3411 skd_destruct(struct skd_device *skdev)
3412 {
3413         if (skdev == NULL) {
3414                 return;
3415         }
3416 
3417         Dcmn_err(CE_NOTE, "destruct sksb");
3418         skd_free_sksb(skdev);
3419 
3420         Dcmn_err(CE_NOTE, "destruct skreq");
3421         skd_free_skreq(skdev);
3422 
3423         Dcmn_err(CE_NOTE, "destruct skmsg");
3424         skd_free_skmsg(skdev);
3425 
3426         Dcmn_err(CE_NOTE, "destruct skcomp");
3427         skd_free_skcomp(skdev);
3428 
3429         Dcmn_err(CE_NOTE, "DESTRUCT VICTORY");
3430 }
3431 
3432 /*
3433  *
3434  * Name:        skd_free_skcomp, deallocates skcomp table DMA resources.
3435  *
3436  * Inputs:      skdev           - device state structure.
3437  *
3438  * Returns:     Nothing.
3439  *
3440  */
3441 static void
3442 skd_free_skcomp(struct skd_device *skdev)
3443 {
3444         if (skdev->skcomp_table != NULL) {
3445                 skd_free_phys(skdev, &skdev->cq_dma_address);
3446         }
3447 
3448         skdev->skcomp_table = NULL;
3449 }
3450 
3451 /*
3452  *
3453  * Name:        skd_free_skmsg, deallocates skmsg table DMA resources.
3454  *
3455  * Inputs:      skdev           - device state structure.
3456  *
3457  * Returns:     Nothing.
3458  *
3459  */
3460 static void
3461 skd_free_skmsg(struct skd_device *skdev)
3462 {
3463         uint32_t                i;
3464 
3465         if (NULL == skdev->skmsg_table)
3466                 return;
3467 
3468         for (i = 0; i < skdev->num_fitmsg_context; i++) {
3469                 struct skd_fitmsg_context *skmsg;
3470 
3471                 skmsg = &skdev->skmsg_table[i];
3472 
3473                 if (skmsg->msg_buf != NULL) {
3474                         skd_free_phys(skdev, &skmsg->mb_dma_address);
3475                 }
3476 
3477 
3478                 skmsg->msg_buf = NULL;
3479         }
3480 
3481         kmem_free(skdev->skmsg_table, sizeof (struct skd_fitmsg_context) *
3482             skdev->num_fitmsg_context);
3483 
3484         skdev->skmsg_table = NULL;
3485 
3486 }
3487 
3488 /*
3489  *
3490  * Name:        skd_free_skreq, deallocates skspcl table DMA resources.
3491  *
3492  * Inputs:      skdev           - device state structure.
3493  *
3494  * Returns:     Nothing.
3495  *
3496  */
3497 static void
3498 skd_free_skreq(struct skd_device *skdev)
3499 {
3500         uint32_t i;
3501 
3502         if (NULL == skdev->skreq_table)
3503                 return;
3504 
3505         for (i = 0; i < skdev->num_req_context; i++) {
3506                 struct skd_request_context *skreq;
3507 
3508                 skreq = &skdev->skreq_table[i];
3509 
3510                 skd_free_sg_list(skdev, skreq->sksg_list,
3511                     skdev->sgs_per_request, skreq->sksg_dma_address);
3512 
3513                 skreq->sksg_list = NULL;
3514         }
3515 
3516         kmem_free(skdev->skreq_table, sizeof (struct skd_request_context) *
3517             skdev->num_req_context);
3518 
3519         skdev->skreq_table = NULL;
3520 
3521 }
3522 
3523 /*
3524  *
3525  * Name:        skd_free_sksb, deallocates skspcl data buf and
3526  *              msg buf DMA resources.
3527  *
3528  * Inputs:      skdev           - device state structure.
3529  *
3530  * Returns:     Nothing.
3531  *
3532  */
3533 static void
3534 skd_free_sksb(struct skd_device *skdev)
3535 {
3536         struct skd_special_context *skspcl;
3537 
3538         skspcl = &skdev->internal_skspcl;
3539 
3540         if (skspcl->data_buf != NULL) {
3541                 skd_free_phys(skdev, &skspcl->db_dma_address);
3542         }
3543 
3544         skspcl->data_buf = NULL;
3545 
3546         if (skspcl->msg_buf != NULL) {
3547                 skd_free_phys(skdev, &skspcl->mb_dma_address);
3548         }
3549 
3550         skspcl->msg_buf = NULL;
3551 
3552         skd_free_sg_list(skdev, skspcl->req.sksg_list, 1,
3553             skspcl->req.sksg_dma_address);
3554 
3555         skspcl->req.sksg_list = NULL;
3556 }
3557 
3558 /*
3559  *
3560  * Name:        skd_free_sg_list, deallocates S/G DMA resources.
3561  *
3562  * Inputs:      skdev           - device state structure.
3563  *              sg_list         - S/G list itself.
3564  *              n_sg            - nukmber of segments
3565  *              dma_addr        - S/G list DMA address.
3566  *
3567  * Returns:     Nothing.
3568  *
3569  */
3570 /* ARGSUSED */  /* Upstream common source with other platforms. */
3571 static void
3572 skd_free_sg_list(struct skd_device *skdev,
3573     struct fit_sg_descriptor *sg_list,
3574     uint32_t n_sg, dma_mem_t dma_addr)
3575 {
3576         if (sg_list != NULL) {
3577                 skd_free_phys(skdev, &dma_addr);
3578         }
3579 }
3580 
3581 /*
3582  *
3583  * Name:        skd_queue, queues the I/O request.
3584  *
3585  * Inputs:      skdev           - device state structure.
3586  *              pbuf            - I/O request
3587  *
3588  * Returns:     Nothing.
3589  *
3590  */
3591 static void
3592 skd_queue(skd_device_t *skdev, skd_buf_private_t *pbuf)
3593 {
3594         struct waitqueue *waitq;
3595 
3596         ASSERT(skdev != NULL);
3597         ASSERT(pbuf != NULL);
3598 
3599         ASSERT(WAITQ_LOCK_HELD(skdev));
3600 
3601         waitq = &skdev->waitqueue;
3602 
3603         if (SIMPLEQ_EMPTY(waitq))
3604                 SIMPLEQ_INSERT_HEAD(waitq, pbuf, sq);
3605         else
3606                 SIMPLEQ_INSERT_TAIL(waitq, pbuf, sq);
3607 }
3608 
3609 /*
3610  *
3611  * Name:        skd_list_skreq, displays the skreq table entries.
3612  *
3613  * Inputs:      skdev           - device state structure.
3614  *              list            - flag, if true displays the entry address.
3615  *
3616  * Returns:     Returns number of skmsg entries found.
3617  *
3618  */
3619 /* ARGSUSED */  /* Upstream common source with other platforms. */
3620 static int
3621 skd_list_skreq(skd_device_t *skdev, int list)
3622 {
3623         int     inx = 0;
3624         struct skd_request_context *skreq;
3625 
3626         if (list) {
3627                 Dcmn_err(CE_NOTE, "skreq_table[0]\n");
3628 
3629                 skreq = &skdev->skreq_table[0];
3630                 while (skreq) {
3631                         if (list)
3632                                 Dcmn_err(CE_NOTE,
3633                                     "%d: skreq=%p state=%d id=%x fid=%x "
3634                                     "pbuf=%p dir=%d comp=%d\n",
3635                                     inx, (void *)skreq, skreq->state,
3636                                     skreq->id, skreq->fitmsg_id,
3637                                     (void *)skreq->pbuf,
3638                                     skreq->sg_data_dir, skreq->did_complete);
3639                         inx++;
3640                         skreq = skreq->next;
3641                 }
3642         }
3643 
3644         inx = 0;
3645         skreq = skdev->skreq_free_list;
3646 
3647         if (list)
3648                 Dcmn_err(CE_NOTE, "skreq_free_list\n");
3649         while (skreq) {
3650                 if (list)
3651                         Dcmn_err(CE_NOTE, "%d: skreq=%p state=%d id=%x fid=%x "
3652                             "pbuf=%p dir=%d\n", inx, (void *)skreq,
3653                             skreq->state, skreq->id, skreq->fitmsg_id,
3654                             (void *)skreq->pbuf, skreq->sg_data_dir);
3655                 inx++;
3656                 skreq = skreq->next;
3657         }
3658 
3659         return (inx);
3660 }
3661 
3662 /*
3663  *
3664  * Name:        skd_list_skmsg, displays the skmsg table entries.
3665  *
3666  * Inputs:      skdev           - device state structure.
3667  *              list            - flag, if true displays the entry address.
3668  *
3669  * Returns:     Returns number of skmsg entries found.
3670  *
3671  */
3672 static int
3673 skd_list_skmsg(skd_device_t *skdev, int list)
3674 {
3675         int     inx = 0;
3676         struct skd_fitmsg_context *skmsgp;
3677 
3678         skmsgp = &skdev->skmsg_table[0];
3679 
3680         if (list) {
3681                 Dcmn_err(CE_NOTE, "skmsg_table[0]\n");
3682 
3683                 while (skmsgp) {
3684                         if (list)
3685                                 Dcmn_err(CE_NOTE, "%d: skmsgp=%p id=%x outs=%d "
3686                                     "l=%d o=%d nxt=%p\n", inx, (void *)skmsgp,
3687                                     skmsgp->id, skmsgp->outstanding,
3688                                     skmsgp->length, skmsgp->offset,
3689                                     (void *)skmsgp->next);
3690                         inx++;
3691                         skmsgp = skmsgp->next;
3692                 }
3693         }
3694 
3695         inx = 0;
3696         if (list)
3697                 Dcmn_err(CE_NOTE, "skmsg_free_list\n");
3698         skmsgp = skdev->skmsg_free_list;
3699         while (skmsgp) {
3700                 if (list)
3701                         Dcmn_err(CE_NOTE, "%d: skmsgp=%p id=%x outs=%d l=%d "
3702                             "o=%d nxt=%p\n",
3703                             inx, (void *)skmsgp, skmsgp->id,
3704                             skmsgp->outstanding, skmsgp->length,
3705                             skmsgp->offset, (void *)skmsgp->next);
3706                 inx++;
3707                 skmsgp = skmsgp->next;
3708         }
3709 
3710         return (inx);
3711 }
3712 
3713 /*
3714  *
3715  * Name:        skd_get_queue_pbuf, retrieves top of queue entry and
3716  *              delinks entry from the queue.
3717  *
3718  * Inputs:      skdev           - device state structure.
3719  *              drive           - device number
3720  *
3721  * Returns:     Returns the top of the job queue entry.
3722  *
3723  */
3724 static skd_buf_private_t
3725 *skd_get_queued_pbuf(skd_device_t *skdev)
3726 {
3727         skd_buf_private_t *pbuf;
3728 
3729         ASSERT(WAITQ_LOCK_HELD(skdev));
3730         pbuf = SIMPLEQ_FIRST(&skdev->waitqueue);
3731         if (pbuf != NULL)
3732                 SIMPLEQ_REMOVE_HEAD(&skdev->waitqueue, sq);
3733         return (pbuf);
3734 }
3735 
3736 /*
3737  * PCI DRIVER GLUE
3738  */
3739 
3740 /*
3741  *
3742  * Name:        skd_pci_info, logs certain device PCI info.
3743  *
3744  * Inputs:      skdev           - device state structure.
3745  *
3746  * Returns:     str which contains the device speed info..
3747  *
3748  */
3749 static char *
3750 skd_pci_info(struct skd_device *skdev, char *str, size_t len)
3751 {
3752         int pcie_reg;
3753 
3754         str[0] = '\0';
3755 
3756         pcie_reg = skd_pci_find_capability(skdev, PCI_CAP_ID_EXP);
3757 
3758         if (pcie_reg) {
3759                 uint16_t lstat, lspeed, lwidth;
3760 
3761                 pcie_reg += 0x12;
3762                 lstat  = pci_config_get16(skdev->pci_handle, pcie_reg);
3763                 lspeed = lstat & (0xF);
3764                 lwidth = (lstat & 0x3F0) >> 4;
3765 
3766                 (void) snprintf(str, len, "PCIe (%s rev %d)",
3767                     lspeed == 1 ? "2.5GT/s" :
3768                     lspeed == 2 ? "5.0GT/s" : "<unknown>",
3769                     lwidth);
3770         }
3771 
3772         return (str);
3773 }
3774 
3775 /*
3776  * MODULE GLUE
3777  */
3778 
3779 /*
3780  *
3781  * Name:        skd_init, initializes certain values.
3782  *
3783  * Inputs:      skdev           - device state structure.
3784  *
3785  * Returns:     Zero.
3786  *
3787  */
3788 /* ARGSUSED */  /* Upstream common source with other platforms. */
3789 static int
3790 skd_init(skd_device_t *skdev)
3791 {
3792         Dcmn_err(CE_NOTE, "skd_init: v%s-b%s\n", DRV_VERSION, DRV_BUILD_ID);
3793 
3794         if (skd_max_queue_depth < 1 ||
3795             skd_max_queue_depth > SKD_MAX_QUEUE_DEPTH) {
3796                 cmn_err(CE_NOTE, "skd_max_q_depth %d invalid, re-set to %d\n",
3797                     skd_max_queue_depth, SKD_MAX_QUEUE_DEPTH_DEFAULT);
3798                 skd_max_queue_depth = SKD_MAX_QUEUE_DEPTH_DEFAULT;
3799         }
3800 
3801         if (skd_max_req_per_msg < 1 || skd_max_req_per_msg > 14) {
3802                 cmn_err(CE_NOTE, "skd_max_req_per_msg %d invalid, set to %d\n",
3803                     skd_max_req_per_msg, SKD_MAX_REQ_PER_MSG_DEFAULT);
3804                 skd_max_req_per_msg = SKD_MAX_REQ_PER_MSG_DEFAULT;
3805         }
3806 
3807 
3808         if (skd_sgs_per_request < 1 || skd_sgs_per_request > 4096) {
3809                 cmn_err(CE_NOTE, "skd_sg_per_request %d invalid, set to %d\n",
3810                     skd_sgs_per_request, SKD_N_SG_PER_REQ_DEFAULT);
3811                 skd_sgs_per_request = SKD_N_SG_PER_REQ_DEFAULT;
3812         }
3813 
3814         if (skd_dbg_level < 0 || skd_dbg_level > 2) {
3815                 cmn_err(CE_NOTE, "skd_dbg_level %d invalid, re-set to %d\n",
3816                     skd_dbg_level, 0);
3817                 skd_dbg_level = 0;
3818         }
3819 
3820         return (0);
3821 }
3822 
3823 /*
3824  *
3825  * Name:        skd_exit, exits the driver & logs the fact.
3826  *
3827  * Inputs:      none.
3828  *
3829  * Returns:     Nothing.
3830  *
3831  */
3832 static void
3833 skd_exit(void)
3834 {
3835         cmn_err(CE_NOTE, "skd v%s unloading", DRV_VERSION);
3836 }
3837 
3838 /*
3839  *
3840  * Name:        skd_drive_state_to_str, converts binary drive state
3841  *              to its corresponding string value.
3842  *
3843  * Inputs:      Drive state.
3844  *
3845  * Returns:     String representing drive state.
3846  *
3847  */
3848 const char *
3849 skd_drive_state_to_str(int state)
3850 {
3851         switch (state) {
3852         case FIT_SR_DRIVE_OFFLINE:      return ("OFFLINE");
3853         case FIT_SR_DRIVE_INIT:         return ("INIT");
3854         case FIT_SR_DRIVE_ONLINE:       return ("ONLINE");
3855         case FIT_SR_DRIVE_BUSY:         return ("BUSY");
3856         case FIT_SR_DRIVE_FAULT:        return ("FAULT");
3857         case FIT_SR_DRIVE_DEGRADED:     return ("DEGRADED");
3858         case FIT_SR_PCIE_LINK_DOWN:     return ("LINK_DOWN");
3859         case FIT_SR_DRIVE_SOFT_RESET:   return ("SOFT_RESET");
3860         case FIT_SR_DRIVE_NEED_FW_DOWNLOAD: return ("NEED_FW");
3861         case FIT_SR_DRIVE_INIT_FAULT:   return ("INIT_FAULT");
3862         case FIT_SR_DRIVE_BUSY_SANITIZE:return ("BUSY_SANITIZE");
3863         case FIT_SR_DRIVE_BUSY_ERASE:   return ("BUSY_ERASE");
3864         case FIT_SR_DRIVE_FW_BOOTING:   return ("FW_BOOTING");
3865         default:                        return ("???");
3866         }
3867 }
3868 
3869 /*
3870  *
3871  * Name:        skd_skdev_state_to_str, converts binary driver state
3872  *              to its corresponding string value.
3873  *
3874  * Inputs:      Driver state.
3875  *
3876  * Returns:     String representing driver state.
3877  *
3878  */
3879 static const char *
3880 skd_skdev_state_to_str(enum skd_drvr_state state)
3881 {
3882         switch (state) {
3883         case SKD_DRVR_STATE_LOAD:       return ("LOAD");
3884         case SKD_DRVR_STATE_IDLE:       return ("IDLE");
3885         case SKD_DRVR_STATE_BUSY:       return ("BUSY");
3886         case SKD_DRVR_STATE_STARTING:   return ("STARTING");
3887         case SKD_DRVR_STATE_ONLINE:     return ("ONLINE");
3888         case SKD_DRVR_STATE_PAUSING:    return ("PAUSING");
3889         case SKD_DRVR_STATE_PAUSED:     return ("PAUSED");
3890         case SKD_DRVR_STATE_DRAINING_TIMEOUT: return ("DRAINING_TIMEOUT");
3891         case SKD_DRVR_STATE_RESTARTING: return ("RESTARTING");
3892         case SKD_DRVR_STATE_RESUMING:   return ("RESUMING");
3893         case SKD_DRVR_STATE_STOPPING:   return ("STOPPING");
3894         case SKD_DRVR_STATE_SYNCING:    return ("SYNCING");
3895         case SKD_DRVR_STATE_FAULT:      return ("FAULT");
3896         case SKD_DRVR_STATE_DISAPPEARED: return ("DISAPPEARED");
3897         case SKD_DRVR_STATE_BUSY_ERASE: return ("BUSY_ERASE");
3898         case SKD_DRVR_STATE_BUSY_SANITIZE:return ("BUSY_SANITIZE");
3899         case SKD_DRVR_STATE_BUSY_IMMINENT: return ("BUSY_IMMINENT");
3900         case SKD_DRVR_STATE_WAIT_BOOT:  return ("WAIT_BOOT");
3901 
3902         default:                        return ("???");
3903         }
3904 }
3905 
3906 /*
3907  *
3908  * Name:        skd_skmsg_state_to_str, converts binary driver state
3909  *              to its corresponding string value.
3910  *
3911  * Inputs:      Msg state.
3912  *
3913  * Returns:     String representing msg state.
3914  *
3915  */
3916 static const char *
3917 skd_skmsg_state_to_str(enum skd_fit_msg_state state)
3918 {
3919         switch (state) {
3920         case SKD_MSG_STATE_IDLE:        return ("IDLE");
3921         case SKD_MSG_STATE_BUSY:        return ("BUSY");
3922         default:                        return ("???");
3923         }
3924 }
3925 
3926 /*
3927  *
3928  * Name:        skd_skreq_state_to_str, converts binary req state
3929  *              to its corresponding string value.
3930  *
3931  * Inputs:      Req state.
3932  *
3933  * Returns:     String representing req state.
3934  *
3935  */
3936 static const char *
3937 skd_skreq_state_to_str(enum skd_req_state state)
3938 {
3939         switch (state) {
3940         case SKD_REQ_STATE_IDLE:        return ("IDLE");
3941         case SKD_REQ_STATE_SETUP:       return ("SETUP");
3942         case SKD_REQ_STATE_BUSY:        return ("BUSY");
3943         case SKD_REQ_STATE_COMPLETED:   return ("COMPLETED");
3944         case SKD_REQ_STATE_TIMEOUT:     return ("TIMEOUT");
3945         case SKD_REQ_STATE_ABORTED:     return ("ABORTED");
3946         default:                        return ("???");
3947         }
3948 }
3949 
3950 /*
3951  *
3952  * Name:        skd_log_skdev, logs device state & parameters.
3953  *
3954  * Inputs:      skdev           - device state structure.
3955  *              event           - event (string) to log.
3956  *
3957  * Returns:     Nothing.
3958  *
3959  */
3960 static void
3961 skd_log_skdev(struct skd_device *skdev, const char *event)
3962 {
3963         Dcmn_err(CE_NOTE, "log_skdev(%s) skdev=%p event='%s'",
3964             skdev->name, (void *)skdev, event);
3965         Dcmn_err(CE_NOTE, "  drive_state=%s(%d) driver_state=%s(%d)",
3966             skd_drive_state_to_str(skdev->drive_state), skdev->drive_state,
3967             skd_skdev_state_to_str(skdev->state), skdev->state);
3968         Dcmn_err(CE_NOTE, "  busy=%d limit=%d soft=%d hard=%d lowat=%d",
3969             skdev->queue_depth_busy, skdev->queue_depth_limit,
3970             skdev->soft_queue_depth_limit, skdev->hard_queue_depth_limit,
3971             skdev->queue_depth_lowat);
3972         Dcmn_err(CE_NOTE, "  timestamp=0x%x cycle=%d cycle_ix=%d",
3973             skdev->timeout_stamp, skdev->skcomp_cycle, skdev->skcomp_ix);
3974 }
3975 
3976 /*
3977  *
3978  * Name:        skd_log_skmsg, logs the skmsg event.
3979  *
3980  * Inputs:      skdev           - device state structure.
3981  *              skmsg           - FIT message structure.
3982  *              event           - event string to log.
3983  *
3984  * Returns:     Nothing.
3985  *
3986  */
3987 static void
3988 skd_log_skmsg(struct skd_device *skdev,
3989     struct skd_fitmsg_context *skmsg, const char *event)
3990 {
3991         Dcmn_err(CE_NOTE, "log_skmsg:(%s) skmsg=%p event='%s'",
3992             skdev->name, (void *)skmsg, event);
3993         Dcmn_err(CE_NOTE, "  state=%s(%d) id=0x%04x length=%d",
3994             skd_skmsg_state_to_str(skmsg->state), skmsg->state,
3995             skmsg->id, skmsg->length);
3996 }
3997 
3998 /*
3999  *
4000  * Name:        skd_log_skreq, logs the skreq event.
4001  *
4002  * Inputs:      skdev           - device state structure.
4003  *              skreq           -skreq structure.
4004  *              event           - event string to log.
4005  *
4006  * Returns:     Nothing.
4007  *
4008  */
4009 static void
4010 skd_log_skreq(struct skd_device *skdev,
4011     struct skd_request_context *skreq, const char *event)
4012 {
4013         skd_buf_private_t *pbuf;
4014 
4015         Dcmn_err(CE_NOTE, "log_skreq: (%s) skreq=%p pbuf=%p event='%s'",
4016             skdev->name, (void *)skreq, (void *)skreq->pbuf, event);
4017 
4018         Dcmn_err(CE_NOTE, "  state=%s(%d) id=0x%04x fitmsg=0x%04x",
4019             skd_skreq_state_to_str(skreq->state), skreq->state,
4020             skreq->id, skreq->fitmsg_id);
4021         Dcmn_err(CE_NOTE, "  timo=0x%x sg_dir=%d n_sg=%d",
4022             skreq->timeout_stamp, skreq->sg_data_dir, skreq->n_sg);
4023 
4024         if ((pbuf = skreq->pbuf) != NULL) {
4025                 uint32_t lba, count;
4026                 lba = pbuf->x_xfer->x_blkno;
4027                 count = pbuf->x_xfer->x_nblks;
4028                 Dcmn_err(CE_NOTE, "  pbuf=%p lba=%u(0x%x) count=%u(0x%x) ",
4029                     (void *)pbuf, lba, lba, count, count);
4030                 Dcmn_err(CE_NOTE, "  dir=%s "
4031                     " intrs=%" PRId64 " qdepth=%d",
4032                     (pbuf->dir & B_READ) ? "Read" : "Write",
4033                     skdev->intr_cntr, skdev->queue_depth_busy);
4034         } else {
4035                 Dcmn_err(CE_NOTE, "  req=NULL\n");
4036         }
4037 }
4038 
4039 /*
4040  *
4041  * Name:        skd_init_mutex, initializes all mutexes.
4042  *
4043  * Inputs:      skdev           - device state structure.
4044  *
4045  * Returns:     DDI_FAILURE on failure otherwise DDI_SUCCESS.
4046  *
4047  */
4048 static int
4049 skd_init_mutex(skd_device_t *skdev)
4050 {
4051         void    *intr;
4052 
4053         Dcmn_err(CE_CONT, "(%s%d): init_mutex flags=%x", DRV_NAME,
4054             skdev->instance, skdev->flags);
4055 
4056         intr = (void *)(uintptr_t)skdev->intr_pri;
4057 
4058         if (skdev->flags & SKD_MUTEX_INITED)
4059                 cmn_err(CE_NOTE, "init_mutex: Oh-Oh - already INITED");
4060 
4061         /* mutexes to protect the adapter state structure. */
4062         mutex_init(&skdev->skd_lock_mutex, NULL, MUTEX_DRIVER,
4063             DDI_INTR_PRI(intr));
4064         mutex_init(&skdev->skd_intr_mutex, NULL, MUTEX_DRIVER,
4065             DDI_INTR_PRI(intr));
4066         mutex_init(&skdev->waitqueue_mutex, NULL, MUTEX_DRIVER,
4067             DDI_INTR_PRI(intr));
4068         mutex_init(&skdev->skd_internalio_mutex, NULL, MUTEX_DRIVER,
4069             DDI_INTR_PRI(intr));
4070 
4071         cv_init(&skdev->cv_waitq, NULL, CV_DRIVER, NULL);
4072 
4073         skdev->flags |= SKD_MUTEX_INITED;
4074         if (skdev->flags & SKD_MUTEX_DESTROYED)
4075                 skdev->flags &= ~SKD_MUTEX_DESTROYED;
4076 
4077         Dcmn_err(CE_CONT, "init_mutex (%s%d): done, flags=%x", DRV_NAME,
4078             skdev->instance, skdev->flags);
4079 
4080         return (DDI_SUCCESS);
4081 }
4082 
4083 /*
4084  *
4085  * Name:        skd_destroy_mutex, destroys all mutexes.
4086  *
4087  * Inputs:      skdev           - device state structure.
4088  *
4089  * Returns:     Nothing.
4090  *
4091  */
4092 static void
4093 skd_destroy_mutex(skd_device_t *skdev)
4094 {
4095         if ((skdev->flags & SKD_MUTEX_DESTROYED) == 0) {
4096                 if (skdev->flags & SKD_MUTEX_INITED) {
4097                         mutex_destroy(&skdev->waitqueue_mutex);
4098                         mutex_destroy(&skdev->skd_intr_mutex);
4099                         mutex_destroy(&skdev->skd_lock_mutex);
4100                         mutex_destroy(&skdev->skd_internalio_mutex);
4101 
4102                         cv_destroy(&skdev->cv_waitq);
4103 
4104                         skdev->flags |= SKD_MUTEX_DESTROYED;
4105 
4106                         if (skdev->flags & SKD_MUTEX_INITED)
4107                                 skdev->flags &= ~SKD_MUTEX_INITED;
4108                 }
4109         }
4110 }
4111 
4112 /*
4113  *
4114  * Name:        skd_setup_intr, setup the interrupt handling
4115  *
4116  * Inputs:      skdev           - device state structure.
4117  *              intr_type       - requested DDI interrupt type.
4118  *
4119  * Returns:     DDI_FAILURE on failure otherwise DDI_SUCCESS.
4120  *
4121  */
4122 static int
4123 skd_setup_intr(skd_device_t *skdev, int intr_type)
4124 {
4125         int32_t         count = 0;
4126         int32_t         avail = 0;
4127         int32_t         actual = 0;
4128         int32_t         ret;
4129         uint32_t        i;
4130 
4131         Dcmn_err(CE_CONT, "(%s%d): setup_intr", DRV_NAME, skdev->instance);
4132 
4133         /* Get number of interrupts the platform h/w supports */
4134         if (((ret = ddi_intr_get_nintrs(skdev->dip, intr_type, &count)) !=
4135             DDI_SUCCESS) || count == 0) {
4136                 cmn_err(CE_WARN, "!intr_setup failed, nintrs ret=%xh, cnt=%xh",
4137                     ret, count);
4138 
4139                 return (DDI_FAILURE);
4140         }
4141 
4142         /* Get number of available system interrupts */
4143         if (((ret = ddi_intr_get_navail(skdev->dip, intr_type, &avail)) !=
4144             DDI_SUCCESS) || avail == 0) {
4145                 cmn_err(CE_WARN, "!intr_setup failed, navail ret=%xh, "
4146                     "avail=%xh", ret, avail);
4147 
4148                 return (DDI_FAILURE);
4149         }
4150 
4151         if (intr_type == DDI_INTR_TYPE_MSIX && avail < SKD_MSIX_MAXAIF) {
4152                 cmn_err(CE_WARN, "!intr_setup failed, min MSI-X h/w vectors "
4153                     "req'd: %d, avail: %d",
4154                     SKD_MSIX_MAXAIF, count);
4155 
4156                 return (DDI_FAILURE);
4157         }
4158 
4159         /* Allocate space for interrupt handles */
4160         skdev->hsize = sizeof (ddi_intr_handle_t) * avail;
4161         skdev->htable = kmem_zalloc(skdev->hsize, KM_SLEEP);
4162 
4163         /* Allocate the interrupts */
4164         if ((ret = ddi_intr_alloc(skdev->dip, skdev->htable, intr_type,
4165             0, count, &actual, 0)) != DDI_SUCCESS) {
4166                 cmn_err(CE_WARN, "!intr_setup failed, intr_alloc ret=%xh, "
4167                     "count = %xh, " "actual=%xh", ret, count, actual);
4168 
4169                 skd_release_intr(skdev);
4170 
4171                 return (DDI_FAILURE);
4172         }
4173 
4174         skdev->intr_cnt = actual;
4175 
4176         if (intr_type == DDI_INTR_TYPE_FIXED)
4177                 (void) ddi_intr_set_pri(skdev->htable[0], 10);
4178 
4179         /* Get interrupt priority */
4180         if ((ret = ddi_intr_get_pri(skdev->htable[0], &skdev->intr_pri)) !=
4181             DDI_SUCCESS) {
4182                 cmn_err(CE_WARN, "!intr_setup failed, get_pri ret=%xh", ret);
4183                 skd_release_intr(skdev);
4184 
4185                 return (ret);
4186         }
4187 
4188         /* Add the interrupt handlers */
4189         for (i = 0; i < actual; i++) {
4190                 if ((ret = ddi_intr_add_handler(skdev->htable[i],
4191                     skd_isr_aif, (void *)skdev, (void *)((ulong_t)i))) !=
4192                     DDI_SUCCESS) {
4193                         cmn_err(CE_WARN, "!intr_setup failed, addh#=%xh, "
4194                             "act=%xh, ret=%xh", i, actual, ret);
4195                         skd_release_intr(skdev);
4196 
4197                         return (ret);
4198                 }
4199         }
4200 
4201         /* Setup mutexes */
4202         if ((ret = skd_init_mutex(skdev)) != DDI_SUCCESS) {
4203                 cmn_err(CE_WARN, "!intr_setup failed, mutex init ret=%xh", ret);
4204                 skd_release_intr(skdev);
4205 
4206                 return (ret);
4207         }
4208 
4209         /* Get the capabilities */
4210         (void) ddi_intr_get_cap(skdev->htable[0], &skdev->intr_cap);
4211 
4212         /* Enable interrupts */
4213         if (skdev->intr_cap & DDI_INTR_FLAG_BLOCK) {
4214                 if ((ret = ddi_intr_block_enable(skdev->htable,
4215                     skdev->intr_cnt)) != DDI_SUCCESS) {
4216                         cmn_err(CE_WARN, "!failed, intr_setup block enable, "
4217                             "ret=%xh", ret);
4218                         skd_destroy_mutex(skdev);
4219                         skd_release_intr(skdev);
4220 
4221                         return (ret);
4222                 }
4223         } else {
4224                 for (i = 0; i < skdev->intr_cnt; i++) {
4225                         if ((ret = ddi_intr_enable(skdev->htable[i])) !=
4226                             DDI_SUCCESS) {
4227                                 cmn_err(CE_WARN, "!intr_setup failed, "
4228                                     "intr enable, ret=%xh", ret);
4229                                 skd_destroy_mutex(skdev);
4230                                 skd_release_intr(skdev);
4231 
4232                                 return (ret);
4233                         }
4234                 }
4235         }
4236 
4237         if (intr_type == DDI_INTR_TYPE_FIXED)
4238                 (void) ddi_intr_clr_mask(skdev->htable[0]);
4239 
4240         skdev->irq_type = intr_type;
4241 
4242         return (DDI_SUCCESS);
4243 }
4244 
4245 /*
4246  *
4247  * Name:        skd_disable_intr, disable interrupt handling.
4248  *
4249  * Inputs:      skdev           - device state structure.
4250  *
4251  * Returns:     Nothing.
4252  *
4253  */
4254 static void
4255 skd_disable_intr(skd_device_t *skdev)
4256 {
4257         uint32_t        i, rval;
4258 
4259         if (skdev->intr_cap & DDI_INTR_FLAG_BLOCK) {
4260                 /* Remove AIF block interrupts (MSI/MSI-X) */
4261                 if ((rval = ddi_intr_block_disable(skdev->htable,
4262                     skdev->intr_cnt)) != DDI_SUCCESS) {
4263                         cmn_err(CE_WARN, "!failed intr block disable, rval=%x",
4264                             rval);
4265                 }
4266         } else {
4267                 /* Remove AIF non-block interrupts (fixed).  */
4268                 for (i = 0; i < skdev->intr_cnt; i++) {
4269                         if ((rval = ddi_intr_disable(skdev->htable[i])) !=
4270                             DDI_SUCCESS) {
4271                                 cmn_err(CE_WARN, "!failed intr disable, "
4272                                     "intr#=%xh, " "rval=%xh", i, rval);
4273                         }
4274                 }
4275         }
4276 }
4277 
4278 /*
4279  *
4280  * Name:        skd_release_intr, disables interrupt handling.
4281  *
4282  * Inputs:      skdev           - device state structure.
4283  *
4284  * Returns:     Nothing.
4285  *
4286  */
4287 static void
4288 skd_release_intr(skd_device_t *skdev)
4289 {
4290         int32_t         i;
4291         int             rval;
4292 
4293 
4294         Dcmn_err(CE_CONT, "REL_INTR intr_cnt=%d", skdev->intr_cnt);
4295 
4296         if (skdev->irq_type == 0) {
4297                 Dcmn_err(CE_CONT, "release_intr: (%s%d): done",
4298                     DRV_NAME, skdev->instance);
4299                 return;
4300         }
4301 
4302         if (skdev->htable != NULL && skdev->hsize > 0) {
4303                 i = (int32_t)skdev->hsize / (int32_t)sizeof (ddi_intr_handle_t);
4304 
4305                 while (i-- > 0) {
4306                         if (skdev->htable[i] == 0) {
4307                                 Dcmn_err(CE_NOTE, "htable[%x]=0h", i);
4308                                 continue;
4309                         }
4310 
4311                         if ((rval = ddi_intr_disable(skdev->htable[i])) !=
4312                             DDI_SUCCESS)
4313                                 Dcmn_err(CE_NOTE, "release_intr: intr_disable "
4314                                     "htable[%d], rval=%d", i, rval);
4315 
4316                         if (i < skdev->intr_cnt) {
4317                                 if ((rval = ddi_intr_remove_handler(
4318                                     skdev->htable[i])) != DDI_SUCCESS)
4319                                         cmn_err(CE_WARN, "!release_intr: "
4320                                             "intr_remove_handler FAILED, "
4321                                             "rval=%d", rval);
4322 
4323                                 Dcmn_err(CE_NOTE, "release_intr: "
4324                                     "remove_handler htable[%d]", i);
4325                         }
4326 
4327                         if ((rval = ddi_intr_free(skdev->htable[i])) !=
4328                             DDI_SUCCESS)
4329                                 cmn_err(CE_WARN, "!release_intr: intr_free "
4330                                     "FAILED, rval=%d", rval);
4331                         Dcmn_err(CE_NOTE, "release_intr: intr_free htable[%d]",
4332                             i);
4333                 }
4334 
4335                 kmem_free(skdev->htable, skdev->hsize);
4336                 skdev->htable = NULL;
4337         }
4338 
4339         skdev->hsize    = 0;
4340         skdev->intr_cnt = 0;
4341         skdev->intr_pri = 0;
4342         skdev->intr_cap = 0;
4343         skdev->irq_type = 0;
4344 }
4345 
4346 /*
4347  *
4348  * Name:        skd_dealloc_resources, deallocate resources allocated
4349  *              during attach.
4350  *
4351  * Inputs:      dip             - DDI device info pointer.
4352  *              skdev           - device state structure.
4353  *              seq             - bit flag representing allocated item.
4354  *              instance        - device instance.
4355  *
4356  * Returns:     Nothing.
4357  *
4358  */
4359 /* ARGSUSED */  /* Upstream common source with other platforms. */
4360 static void
4361 skd_dealloc_resources(dev_info_t *dip, skd_device_t *skdev,
4362     uint32_t seq, int instance)
4363 {
4364 
4365         if (skdev == NULL)
4366                 return;
4367 
4368         if (seq & SKD_CONSTRUCTED)
4369                 skd_destruct(skdev);
4370 
4371         if (seq & SKD_INTR_ADDED) {
4372                 skd_disable_intr(skdev);
4373                 skd_release_intr(skdev);
4374         }
4375 
4376         if (seq & SKD_DEV_IOBASE_MAPPED)
4377                 ddi_regs_map_free(&skdev->dev_handle);
4378 
4379         if (seq & SKD_IOMAP_IOBASE_MAPPED)
4380                 ddi_regs_map_free(&skdev->iomap_handle);
4381 
4382         if (seq & SKD_REGS_MAPPED)
4383                 ddi_regs_map_free(&skdev->iobase_handle);
4384 
4385         if (seq & SKD_CONFIG_SPACE_SETUP)
4386                 pci_config_teardown(&skdev->pci_handle);
4387 
4388         if (seq & SKD_SOFT_STATE_ALLOCED)  {
4389                 if (skdev->pathname &&
4390                     (skdev->flags & SKD_PATHNAME_ALLOCED)) {
4391                         kmem_free(skdev->pathname,
4392                             strlen(skdev->pathname)+1);
4393                 }
4394         }
4395 
4396         if (skdev->s1120_devid)
4397                 ddi_devid_free(skdev->s1120_devid);
4398 }
4399 
4400 /*
4401  *
4402  * Name:        skd_setup_interrupt, sets up the appropriate interrupt type
4403  *              msi, msix, or fixed.
4404  *
4405  * Inputs:      skdev           - device state structure.
4406  *
4407  * Returns:     DDI_FAILURE on failure otherwise DDI_SUCCESS.
4408  *
4409  */
4410 static int
4411 skd_setup_interrupts(skd_device_t *skdev)
4412 {
4413         int32_t         rval = DDI_FAILURE;
4414         int32_t         i;
4415         int32_t         itypes = 0;
4416 
4417         /*
4418          * See what types of interrupts this adapter and platform support
4419          */
4420         if ((i = ddi_intr_get_supported_types(skdev->dip, &itypes)) !=
4421             DDI_SUCCESS) {
4422                 cmn_err(CE_NOTE, "intr supported types failed, rval=%xh, ", i);
4423                 return (DDI_FAILURE);
4424         }
4425 
4426         Dcmn_err(CE_NOTE, "%s:supported interrupts types: %x",
4427             skdev->name, itypes);
4428 
4429         itypes &= skdev->irq_type;
4430 
4431         if (!skd_disable_msix && (itypes & DDI_INTR_TYPE_MSIX) &&
4432             (rval = skd_setup_intr(skdev, DDI_INTR_TYPE_MSIX)) == DDI_SUCCESS) {
4433                 cmn_err(CE_NOTE, "!%s: successful MSI-X setup",
4434                     skdev->name);
4435         } else if (!skd_disable_msi && (itypes & DDI_INTR_TYPE_MSI) &&
4436             (rval = skd_setup_intr(skdev, DDI_INTR_TYPE_MSI)) == DDI_SUCCESS) {
4437                 cmn_err(CE_NOTE, "!%s: successful MSI setup",
4438                     skdev->name);
4439         } else if ((itypes & DDI_INTR_TYPE_FIXED) &&
4440             (rval = skd_setup_intr(skdev, DDI_INTR_TYPE_FIXED))
4441             == DDI_SUCCESS) {
4442                 cmn_err(CE_NOTE, "!%s: successful fixed intr setup",
4443                     skdev->name);
4444         } else {
4445                 cmn_err(CE_WARN, "!%s: no supported interrupt types",
4446                     skdev->name);
4447                 return (DDI_FAILURE);
4448         }
4449 
4450         Dcmn_err(CE_CONT, "%s: setup interrupts done", skdev->name);
4451 
4452         return (rval);
4453 }
4454 
4455 /*
4456  *
4457  * Name:        skd_get_properties, retrieves properties from skd.conf.
4458  *
4459  * Inputs:      skdev           - device state structure.
4460  *              dip             - dev_info data structure.
4461  *
4462  * Returns:     Nothing.
4463  *
4464  */
4465 /* ARGSUSED */  /* Upstream common source with other platforms. */
4466 static void
4467 skd_get_properties(dev_info_t *dip, skd_device_t *skdev)
4468 {
4469         int     prop_value;
4470 
4471         skd_isr_type =  ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0,
4472             "intr-type-cap", -1);
4473 
4474         prop_value =  ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0,
4475             "max-scsi-reqs", -1);
4476         if (prop_value >= 1 && prop_value <= SKD_MAX_QUEUE_DEPTH)
4477                 skd_max_queue_depth = prop_value;
4478 
4479         prop_value =  ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0,
4480             "max-scsi-reqs-per-msg", -1);
4481         if (prop_value >= 1 && prop_value <= SKD_MAX_REQ_PER_MSG)
4482                 skd_max_req_per_msg = prop_value;
4483 
4484         prop_value =  ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0,
4485             "max-sgs-per-req", -1);
4486         if (prop_value >= 1 && prop_value <= SKD_MAX_N_SG_PER_REQ)
4487                 skd_sgs_per_request = prop_value;
4488 
4489         prop_value =  ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0,
4490             "dbg-level", -1);
4491         if (prop_value >= 1 && prop_value <= 2)
4492                 skd_dbg_level = prop_value;
4493 }
4494 
4495 /*
4496  *
4497  * Name:        skd_wait_for_s1120, wait for device to finish
4498  *              its initialization.
4499  *
4500  * Inputs:      skdev           - device state structure.
4501  *
4502  * Returns:     DDI_SUCCESS or DDI_FAILURE.
4503  *
4504  */
4505 static int
4506 skd_wait_for_s1120(skd_device_t *skdev)
4507 {
4508         clock_t cur_ticks, tmo;
4509         int     loop_cntr = 0;
4510         int     rc = DDI_FAILURE;
4511 
4512         mutex_enter(&skdev->skd_internalio_mutex);
4513 
4514         while (skdev->gendisk_on == 0) {
4515                 cur_ticks = ddi_get_lbolt();
4516                 tmo = cur_ticks + drv_usectohz(MICROSEC);
4517                 if (cv_timedwait(&skdev->cv_waitq,
4518                     &skdev->skd_internalio_mutex, tmo) == -1) {
4519                         /* Oops - timed out */
4520                         if (loop_cntr++ > 10)
4521                                 break;
4522                 }
4523         }
4524 
4525         mutex_exit(&skdev->skd_internalio_mutex);
4526 
4527         if (skdev->gendisk_on == 1)
4528                 rc = DDI_SUCCESS;
4529 
4530         return (rc);
4531 }
4532 
4533 /*
4534  *
4535  * Name:        skd_update_props, updates certain device properties.
4536  *
4537  * Inputs:      skdev           - device state structure.
4538  *              dip             - dev info structure
4539  *
4540  * Returns:     Nothing.
4541  *
4542  */
4543 static void
4544 skd_update_props(skd_device_t *skdev, dev_info_t *dip)
4545 {
4546         int     blksize = 512;
4547 
4548         if ((ddi_prop_update_int64(DDI_DEV_T_NONE, dip, "device-nblocks",
4549             skdev->Nblocks) != DDI_SUCCESS) ||
4550             (ddi_prop_update_int(DDI_DEV_T_NONE,   dip, "device-blksize",
4551             blksize) != DDI_SUCCESS)) {
4552                 cmn_err(CE_NOTE, "%s: FAILED to create driver properties",
4553                     skdev->name);
4554         }
4555 }
4556 
4557 /*
4558  *
4559  * Name:        skd_setup_devid, sets up device ID info.
4560  *
4561  * Inputs:      skdev           - device state structure.
4562  *              devid           - Device ID for the DDI.
4563  *
4564  * Returns:     DDI_SUCCESS or DDI_FAILURE.
4565  *
4566  */
4567 static int
4568 skd_setup_devid(skd_device_t *skdev, ddi_devid_t *devid)
4569 {
4570         int  rc, sz_model, sz_sn, sz;
4571 
4572         sz_model = scsi_ascii_inquiry_len(skdev->inq_product_id,
4573             strlen(skdev->inq_product_id));
4574         sz_sn = scsi_ascii_inquiry_len(skdev->inq_serial_num,
4575             strlen(skdev->inq_serial_num));
4576         sz = sz_model + sz_sn + 1;
4577 
4578         (void) snprintf(skdev->devid_str, sizeof (skdev->devid_str),
4579             "%.*s=%.*s", sz_model, skdev->inq_product_id, sz_sn,
4580             skdev->inq_serial_num);
4581         rc = ddi_devid_init(skdev->dip, DEVID_SCSI_SERIAL, sz,
4582             skdev->devid_str, devid);
4583 
4584         if (rc != DDI_SUCCESS)
4585                 cmn_err(CE_WARN, "!%s: devid_init FAILED", skdev->name);
4586 
4587         return (rc);
4588 
4589 }
4590 
4591 /*
4592  *
4593  * Name:        skd_bd_attach, attach to blkdev driver
4594  *
4595  * Inputs:      skdev           - device state structure.
4596  *              dip             - device info structure.
4597  *
4598  * Returns:     DDI_SUCCESS or DDI_FAILURE.
4599  *
4600  */
4601 static int
4602 skd_bd_attach(dev_info_t *dip, skd_device_t *skdev)
4603 {
4604         int             rv;
4605 
4606         skdev->s_bdh = bd_alloc_handle(skdev, &skd_bd_ops,
4607             &skd_64bit_io_dma_attr, KM_SLEEP);
4608 
4609         if (skdev->s_bdh == NULL) {
4610                 cmn_err(CE_WARN, "!skd_bd_attach: FAILED");
4611 
4612                 return (DDI_FAILURE);
4613         }
4614 
4615         rv = bd_attach_handle(dip, skdev->s_bdh);
4616 
4617         if (rv != DDI_SUCCESS) {
4618                 cmn_err(CE_WARN, "!bd_attach_handle FAILED\n");
4619         } else {
4620                 Dcmn_err(CE_NOTE, "bd_attach_handle OK\n");
4621                 skdev->bd_attached++;
4622         }
4623 
4624         return (rv);
4625 }
4626 
4627 /*
4628  *
4629  * Name:        skd_bd_detach, detach from the blkdev driver.
4630  *
4631  * Inputs:      skdev           - device state structure.
4632  *
4633  * Returns:     Nothing.
4634  *
4635  */
4636 static void
4637 skd_bd_detach(skd_device_t *skdev)
4638 {
4639         if (skdev->bd_attached)
4640                 (void) bd_detach_handle(skdev->s_bdh);
4641 
4642         bd_free_handle(skdev->s_bdh);
4643 }
4644 
4645 /*
4646  *
4647  * Name:        skd_attach, attach sdk device driver
4648  *
4649  * Inputs:      dip             - device info structure.
4650  *              cmd             - DDI attach argument (ATTACH, RESUME, etc.)
4651  *
4652  * Returns:     DDI_SUCCESS or DDI_FAILURE.
4653  *
4654  */
4655 static int
4656 skd_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
4657 {
4658         int                     instance;
4659         int                     nregs;
4660         skd_device_t            *skdev = NULL;
4661         int                     inx;
4662         uint16_t                cmd_reg;
4663         int                     progress = 0;
4664         char                    name[MAXPATHLEN];
4665         off_t                   regsize;
4666         char                    pci_str[32];
4667         char                    fw_version[8];
4668 
4669         instance = ddi_get_instance(dip);
4670 
4671         (void) ddi_get_parent_data(dip);
4672 
4673         switch (cmd) {
4674         case DDI_ATTACH:
4675                 break;
4676 
4677         case DDI_RESUME:
4678                 /* Re-enable timer */
4679                 skd_start_timer(skdev);
4680 
4681                 return (DDI_SUCCESS);
4682 
4683         default:
4684                 return (DDI_FAILURE);
4685         }
4686 
4687         Dcmn_err(CE_NOTE, "sTec S1120 Driver v%s Instance: %d",
4688             VERSIONSTR, instance);
4689 
4690         /*
4691          * Check that hardware is installed in a DMA-capable slot
4692          */
4693         if (ddi_slaveonly(dip) == DDI_SUCCESS) {
4694                 cmn_err(CE_WARN, "!%s%d: installed in a "
4695                     "slot that isn't DMA-capable slot", DRV_NAME, instance);
4696                 return (DDI_FAILURE);
4697         }
4698 
4699         /*
4700          * No support for high-level interrupts
4701          */
4702         if (ddi_intr_hilevel(dip, 0) != 0) {
4703                 cmn_err(CE_WARN, "!%s%d: High level interrupt not supported",
4704                     DRV_NAME, instance);
4705                 return (DDI_FAILURE);
4706         }
4707 
4708         /*
4709          * Allocate our per-device-instance structure
4710          */
4711         if (ddi_soft_state_zalloc(skd_state, instance) !=
4712             DDI_SUCCESS) {
4713                 cmn_err(CE_WARN, "!%s%d: soft state zalloc failed ",
4714                     DRV_NAME, instance);
4715                 return (DDI_FAILURE);
4716         }
4717 
4718         progress |= SKD_SOFT_STATE_ALLOCED;
4719 
4720         skdev = ddi_get_soft_state(skd_state, instance);
4721         if (skdev == NULL) {
4722                 cmn_err(CE_WARN, "!%s%d: Unable to get soft state structure",
4723                     DRV_NAME, instance);
4724                 goto skd_attach_failed;
4725         }
4726 
4727         (void) snprintf(skdev->name, sizeof (skdev->name),
4728             DRV_NAME "%d", instance);
4729 
4730         skdev->dip      = dip;
4731         skdev->instance         = instance;
4732 
4733         ddi_set_driver_private(dip, skdev);
4734 
4735         (void) ddi_pathname(dip, name);
4736         for (inx = strlen(name); inx; inx--) {
4737                 if (name[inx] == ',') {
4738                         name[inx] = '\0';
4739                         break;
4740                 }
4741                 if (name[inx] == '@') {
4742                         break;
4743                 }
4744         }
4745 
4746         skdev->pathname = kmem_zalloc(strlen(name) + 1, KM_SLEEP);
4747         (void) strlcpy(skdev->pathname, name, strlen(name) + 1);
4748 
4749         progress        |= SKD_PATHNAME_ALLOCED;
4750         skdev->flags |= SKD_PATHNAME_ALLOCED;
4751 
4752         if (pci_config_setup(dip, &skdev->pci_handle) != DDI_SUCCESS) {
4753                 cmn_err(CE_WARN, "!%s%d: pci_config_setup FAILED",
4754                     DRV_NAME, instance);
4755                 goto skd_attach_failed;
4756         }
4757 
4758         progress |= SKD_CONFIG_SPACE_SETUP;
4759 
4760         /* Save adapter path. */
4761 
4762         (void) ddi_dev_nregs(dip, &nregs);
4763 
4764         /*
4765          *      0x0   Configuration Space
4766          *      0x1   I/O Space
4767          *      0x2   s1120 register space
4768          */
4769         if (ddi_dev_regsize(dip, 1, &regsize) != DDI_SUCCESS ||
4770             ddi_regs_map_setup(dip, 1, &skdev->iobase, 0, regsize,
4771             &dev_acc_attr, &skdev->iobase_handle) != DDI_SUCCESS) {
4772                 cmn_err(CE_WARN, "!%s%d: regs_map_setup(mem) failed",
4773                     DRV_NAME, instance);
4774                 goto skd_attach_failed;
4775         }
4776         progress |= SKD_REGS_MAPPED;
4777 
4778                 skdev->iomap_iobase = skdev->iobase;
4779                 skdev->iomap_handle = skdev->iobase_handle;
4780 
4781         Dcmn_err(CE_NOTE, "%s: PCI iobase=%ph, iomap=%ph, regnum=%d, "
4782             "regsize=%ld", skdev->name, (void *)skdev->iobase,
4783             (void *)skdev->iomap_iobase, 1, regsize);
4784 
4785         if (ddi_dev_regsize(dip, 2, &regsize) != DDI_SUCCESS ||
4786             ddi_regs_map_setup(dip, 2, &skdev->dev_iobase, 0, regsize,
4787             &dev_acc_attr, &skdev->dev_handle) != DDI_SUCCESS) {
4788                 cmn_err(CE_WARN, "!%s%d: regs_map_setup(mem) failed",
4789                     DRV_NAME, instance);
4790 
4791                 goto skd_attach_failed;
4792         }
4793 
4794         skdev->dev_memsize = (int)regsize;
4795 
4796         Dcmn_err(CE_NOTE, "%s: DEV iobase=%ph regsize=%d",
4797             skdev->name, (void *)skdev->dev_iobase,
4798             skdev->dev_memsize);
4799 
4800         progress |= SKD_DEV_IOBASE_MAPPED;
4801 
4802         cmd_reg = pci_config_get16(skdev->pci_handle, PCI_CONF_COMM);
4803         cmd_reg |= (PCI_COMM_ME | PCI_COMM_INTX_DISABLE);
4804         cmd_reg &= ~PCI_COMM_PARITY_DETECT;
4805         pci_config_put16(skdev->pci_handle, PCI_CONF_COMM, cmd_reg);
4806 
4807         /* Get adapter PCI device information. */
4808         skdev->vendor_id = pci_config_get16(skdev->pci_handle, PCI_CONF_VENID);
4809         skdev->device_id = pci_config_get16(skdev->pci_handle, PCI_CONF_DEVID);
4810 
4811         Dcmn_err(CE_NOTE, "%s: %x-%x card detected",
4812             skdev->name, skdev->vendor_id, skdev->device_id);
4813 
4814         skd_get_properties(dip, skdev);
4815 
4816         (void) skd_init(skdev);
4817 
4818         if (skd_construct(skdev, instance)) {
4819                 cmn_err(CE_WARN, "!%s: construct FAILED", skdev->name);
4820                 goto skd_attach_failed;
4821         }
4822 
4823         progress |= SKD_PROBED;
4824         progress |= SKD_CONSTRUCTED;
4825 
4826         SIMPLEQ_INIT(&skdev->waitqueue);
4827 
4828         /*
4829          * Setup interrupt handler
4830          */
4831         if (skd_setup_interrupts(skdev) != DDI_SUCCESS) {
4832                 cmn_err(CE_WARN, "!%s: Unable to add interrupt",
4833                     skdev->name);
4834                 goto skd_attach_failed;
4835         }
4836 
4837         progress |= SKD_INTR_ADDED;
4838 
4839         ADAPTER_STATE_LOCK(skdev);
4840         skdev->flags |= SKD_ATTACHED;
4841         ADAPTER_STATE_UNLOCK(skdev);
4842 
4843         skdev->d_blkshift = 9;
4844         progress |= SKD_ATTACHED;
4845 
4846 
4847         skd_start_device(skdev);
4848 
4849         ADAPTER_STATE_LOCK(skdev);
4850         skdev->progress = progress;
4851         ADAPTER_STATE_UNLOCK(skdev);
4852 
4853         /*
4854          * Give the board a chance to
4855          * complete its initialization.
4856          */
4857         if (skdev->gendisk_on != 1)
4858                 (void) skd_wait_for_s1120(skdev);
4859 
4860         if (skdev->gendisk_on != 1) {
4861                 cmn_err(CE_WARN, "!%s: s1120 failed to come ONLINE",
4862                     skdev->name);
4863                 goto skd_attach_failed;
4864         }
4865 
4866         ddi_report_dev(dip);
4867 
4868         skd_send_internal_skspcl(skdev, &skdev->internal_skspcl, INQUIRY);
4869 
4870         skdev->disks_initialized++;
4871 
4872         (void) strcpy(fw_version, "???");
4873         (void) skd_pci_info(skdev, pci_str, sizeof (pci_str));
4874         Dcmn_err(CE_NOTE, " sTec S1120 Driver(%s) version %s-b%s",
4875             DRV_NAME, DRV_VERSION, DRV_BUILD_ID);
4876 
4877         Dcmn_err(CE_NOTE, " sTec S1120 %04x:%04x %s 64 bit",
4878             skdev->vendor_id, skdev->device_id, pci_str);
4879 
4880         Dcmn_err(CE_NOTE, " sTec S1120 %s\n", skdev->pathname);
4881 
4882         if (*skdev->inq_serial_num)
4883                 Dcmn_err(CE_NOTE, " sTec S1120 serial#=%s",
4884                     skdev->inq_serial_num);
4885 
4886         if (*skdev->inq_product_id &&
4887             *skdev->inq_product_rev)
4888                 Dcmn_err(CE_NOTE, " sTec S1120 prod ID=%s prod rev=%s",
4889                     skdev->inq_product_id, skdev->inq_product_rev);
4890 
4891         Dcmn_err(CE_NOTE, "%s: intr-type-cap:        %d",
4892             skdev->name, skdev->irq_type);
4893         Dcmn_err(CE_NOTE, "%s: max-scsi-reqs:        %d",
4894             skdev->name, skd_max_queue_depth);
4895         Dcmn_err(CE_NOTE, "%s: max-sgs-per-req:      %d",
4896             skdev->name, skd_sgs_per_request);
4897         Dcmn_err(CE_NOTE, "%s: max-scsi-req-per-msg: %d",
4898             skdev->name, skd_max_req_per_msg);
4899 
4900         if (skd_bd_attach(dip, skdev) == DDI_FAILURE)
4901                 goto skd_attach_failed;
4902 
4903         skd_update_props(skdev, dip);
4904 
4905         /* Enable timer */
4906         skd_start_timer(skdev);
4907 
4908         ADAPTER_STATE_LOCK(skdev);
4909         skdev->progress = progress;
4910         ADAPTER_STATE_UNLOCK(skdev);
4911 
4912         skdev->attached = 1;
4913         return (DDI_SUCCESS);
4914 
4915 skd_attach_failed:
4916         skd_dealloc_resources(dip, skdev, progress, instance);
4917 
4918         if ((skdev->flags & SKD_MUTEX_DESTROYED) == 0) {
4919                 skd_destroy_mutex(skdev);
4920         }
4921 
4922         ddi_soft_state_free(skd_state, instance);
4923 
4924         cmn_err(CE_WARN, "!skd_attach FAILED: progress=%x", progress);
4925         return (DDI_FAILURE);
4926 }
4927 
4928 /*
4929  *
4930  * Name:        skd_halt
4931  *
4932  * Inputs:      skdev           - device state structure.
4933  *
4934  * Returns:     Nothing.
4935  *
4936  */
4937 static void
4938 skd_halt(skd_device_t *skdev)
4939 {
4940         Dcmn_err(CE_NOTE, "%s: halt/suspend ......", skdev->name);
4941 }
4942 
4943 /*
4944  *
4945  * Name:        skd_detach, detaches driver from the system.
4946  *
4947  * Inputs:      dip             - device info structure.
4948  *
4949  * Returns:     DDI_SUCCESS on successful detach otherwise DDI_FAILURE.
4950  *
4951  */
4952 static int
4953 skd_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
4954 {
4955         skd_buf_private_t *pbuf;
4956         skd_device_t    *skdev;
4957         int             instance;
4958         timeout_id_t    timer_id = NULL;
4959         int             rv1 = DDI_SUCCESS;
4960         struct skd_special_context *skspcl;
4961 
4962         instance = ddi_get_instance(dip);
4963 
4964         skdev = ddi_get_soft_state(skd_state, instance);
4965         if (skdev == NULL) {
4966                 cmn_err(CE_WARN, "!detach failed: NULL skd state");
4967 
4968                 return (DDI_FAILURE);
4969         }
4970 
4971         Dcmn_err(CE_CONT, "skd_detach(%d): entered", instance);
4972 
4973         switch (cmd) {
4974         case DDI_DETACH:
4975                 /* Test for packet cache inuse. */
4976                 ADAPTER_STATE_LOCK(skdev);
4977 
4978                 /* Stop command/event processing. */
4979                 skdev->flags |= (SKD_SUSPENDED | SKD_CMD_ABORT_TMO);
4980 
4981                 /* Disable driver timer if no adapters. */
4982                 if (skdev->skd_timer_timeout_id != 0) {
4983                         timer_id = skdev->skd_timer_timeout_id;
4984                         skdev->skd_timer_timeout_id = 0;
4985                 }
4986                 ADAPTER_STATE_UNLOCK(skdev);
4987 
4988                 if (timer_id != 0) {
4989                         (void) untimeout(timer_id);
4990                 }
4991 
4992 #ifdef  SKD_PM
4993                 if (skdev->power_level != LOW_POWER_LEVEL) {
4994                         skd_halt(skdev);
4995                         skdev->power_level = LOW_POWER_LEVEL;
4996                 }
4997 #endif
4998                 skspcl = &skdev->internal_skspcl;
4999                 skd_send_internal_skspcl(skdev, skspcl, SYNCHRONIZE_CACHE);
5000 
5001                 skd_stop_device(skdev);
5002 
5003                 /*
5004                  * Clear request queue.
5005                  */
5006                 while (!SIMPLEQ_EMPTY(&skdev->waitqueue)) {
5007                         pbuf = skd_get_queued_pbuf(skdev);
5008                         skd_end_request_abnormal(skdev, pbuf, ECANCELED,
5009                             SKD_IODONE_WNIOC);
5010                         Dcmn_err(CE_NOTE,
5011                             "detach: cancelled pbuf %p %ld <%s> %lld\n",
5012                             (void *)pbuf, pbuf->x_xfer->x_nblks,
5013                             (pbuf->dir & B_READ) ? "Read" : "Write",
5014                             pbuf->x_xfer->x_blkno);
5015                 }
5016 
5017                 skd_bd_detach(skdev);
5018 
5019                 skd_dealloc_resources(dip, skdev, skdev->progress, instance);
5020 
5021                 if ((skdev->flags & SKD_MUTEX_DESTROYED) == 0) {
5022                         skd_destroy_mutex(skdev);
5023                 }
5024 
5025                 ddi_soft_state_free(skd_state, instance);
5026 
5027                 skd_exit();
5028 
5029                 break;
5030 
5031         case DDI_SUSPEND:
5032                 /* Block timer. */
5033 
5034                 ADAPTER_STATE_LOCK(skdev);
5035                 skdev->flags |= SKD_SUSPENDED;
5036 
5037                 /* Disable driver timer if last adapter. */
5038                 if (skdev->skd_timer_timeout_id != 0) {
5039                         timer_id = skdev->skd_timer_timeout_id;
5040                         skdev->skd_timer_timeout_id = 0;
5041                 }
5042                 ADAPTER_STATE_UNLOCK(skdev);
5043 
5044                 if (timer_id != 0) {
5045                         (void) untimeout(timer_id);
5046                 }
5047 
5048                 ddi_prop_remove_all(dip);
5049 
5050                 skd_halt(skdev);
5051 
5052                 break;
5053         default:
5054                 rv1 = DDI_FAILURE;
5055                 break;
5056         }
5057 
5058         if (rv1 != DDI_SUCCESS) {
5059                 cmn_err(CE_WARN, "!skd_detach, failed, rv1=%x", rv1);
5060         } else {
5061                 Dcmn_err(CE_CONT, "skd_detach: exiting");
5062         }
5063 
5064         if (rv1 != DDI_SUCCESS)
5065                 return (DDI_FAILURE);
5066 
5067         return (rv1);
5068 }
5069 
5070 /*
5071  *
5072  * Name:        skd_devid_init, calls skd_setup_devid to setup
5073  *              the device's devid structure.
5074  *
5075  * Inputs:      arg             - device state structure.
5076  *              dip             - dev_info structure.
5077  *              devid           - devid structure.
5078  *
5079  * Returns:     Nothing.
5080  *
5081  */
5082 /* ARGSUSED */  /* Upstream common source with other platforms. */
5083 static int
5084 skd_devid_init(void *arg, dev_info_t *dip, ddi_devid_t *devid)
5085 {
5086         skd_device_t    *skdev = arg;
5087 
5088         (void) skd_setup_devid(skdev, devid);
5089 
5090         return (0);
5091 }
5092 
5093 /*
5094  *
5095  * Name:        skd_bd_driveinfo, retrieves device's info.
5096  *
5097  * Inputs:      drive           - drive data structure.
5098  *              arg             - device state structure.
5099  *
5100  * Returns:     Nothing.
5101  *
5102  */
5103 static void
5104 skd_bd_driveinfo(void *arg, bd_drive_t *drive)
5105 {
5106         skd_device_t    *skdev = arg;
5107 
5108         drive->d_qsize               = (skdev->queue_depth_limit * 4) / 5;
5109         drive->d_maxxfer     = SKD_DMA_MAXXFER;
5110         drive->d_removable   = B_FALSE;
5111         drive->d_hotpluggable        = B_FALSE;
5112         drive->d_target              = 0;
5113         drive->d_lun         = 0;
5114 
5115         if (skdev->inquiry_is_valid != 0) {
5116                 drive->d_vendor = skdev->inq_vendor_id;
5117                 drive->d_vendor_len = strlen(drive->d_vendor);
5118 
5119                 drive->d_product = skdev->inq_product_id;
5120                 drive->d_product_len = strlen(drive->d_product);
5121 
5122                 drive->d_serial = skdev->inq_serial_num;
5123                 drive->d_serial_len = strlen(drive->d_serial);
5124 
5125                 drive->d_revision = skdev->inq_product_rev;
5126                 drive->d_revision_len = strlen(drive->d_revision);
5127         }
5128 }
5129 
5130 /*
5131  *
5132  * Name:        skd_bd_mediainfo, retrieves device media info.
5133  *
5134  * Inputs:      arg             - device state structure.
5135  *              media           - container for media info.
5136  *
5137  * Returns:     Zero.
5138  *
5139  */
5140 static int
5141 skd_bd_mediainfo(void *arg, bd_media_t *media)
5142 {
5143         skd_device_t    *skdev = arg;
5144 
5145         media->m_nblks    = skdev->Nblocks;
5146         media->m_blksize  = 512;
5147         media->m_pblksize = 4096;
5148         media->m_readonly = B_FALSE;
5149         media->m_solidstate = B_TRUE;
5150 
5151         return (0);
5152 }
5153 
5154 /*
5155  *
5156  * Name:        skd_rw, performs R/W requests for blkdev driver.
5157  *
5158  * Inputs:      skdev           - device state structure.
5159  *              xfer            - tranfer structure.
5160  *              dir             - I/O direction.
5161  *
5162  * Returns:     EAGAIN if device is not online.  EIO if blkdev wants us to
5163  *              be a dump device (for now).
5164  *              Value returned by skd_start().
5165  *
5166  */
5167 static int
5168 skd_rw(skd_device_t *skdev, bd_xfer_t *xfer, int dir)
5169 {
5170         skd_buf_private_t       *pbuf;
5171 
5172         /*
5173          * The x_flags structure element is not defined in Oracle Solaris
5174          */
5175         /* We'll need to fix this in order to support dump on this device. */
5176         if (xfer->x_flags & BD_XFER_POLL)
5177                 return (EIO);
5178 
5179         if (skdev->state != SKD_DRVR_STATE_ONLINE) {
5180                 Dcmn_err(CE_NOTE, "Device - not ONLINE");
5181 
5182                 skd_request_fn_not_online(skdev);
5183 
5184                 return (EAGAIN);
5185         }
5186 
5187         pbuf = kmem_zalloc(sizeof (skd_buf_private_t), KM_NOSLEEP);
5188         if (pbuf == NULL)
5189                 return (ENOMEM);
5190 
5191         WAITQ_LOCK(skdev);
5192         pbuf->dir = dir;
5193         pbuf->x_xfer = xfer;
5194 
5195         skd_queue(skdev, pbuf);
5196         skdev->ios_queued++;
5197         WAITQ_UNLOCK(skdev);
5198 
5199         skd_start(skdev);
5200 
5201         return (0);
5202 }
5203 
5204 /*
5205  *
5206  * Name:        skd_bd_read, performs blkdev read requests.
5207  *
5208  * Inputs:      arg             - device state structure.
5209  *              xfer            - tranfer request structure.
5210  *
5211  * Returns:     Value return by skd_rw().
5212  *
5213  */
5214 static int
5215 skd_bd_read(void *arg, bd_xfer_t *xfer)
5216 {
5217         return (skd_rw(arg, xfer, B_READ));
5218 }
5219 
5220 /*
5221  *
5222  * Name:        skd_bd_write, performs blkdev write requests.
5223  *
5224  * Inputs:      arg             - device state structure.
5225  *              xfer            - tranfer request structure.
5226  *
5227  * Returns:     Value return by skd_rw().
5228  *
5229  */
5230 static int
5231 skd_bd_write(void *arg, bd_xfer_t *xfer)
5232 {
5233         return (skd_rw(arg, xfer, B_WRITE));
5234 }