1 /*
   2  * This file and its contents are supplied under the terms of the
   3  * Common Development and Distribution License ("CDDL"), version 1.0.
   4  * You may only use this file in accordance with the terms of version
   5  * 1.0 of the CDDL.
   6  *
   7  * A full copy of the text of the CDDL should have accompanied this
   8  * source.  A copy of the CDDL is also available via the Internet at
   9  * http://www.illumos.org/license/CDDL.
  10  */
  11 
  12 /*
  13  * Copyright 2016 Nexenta Systems, Inc. All rights reserved.
  14  * Copyright 2016 Tegile Systems, Inc. All rights reserved.
  15  * Copyright (c) 2016 The MathWorks, Inc.  All rights reserved.
  16  */
  17 
  18 /*
  19  * blkdev driver for NVMe compliant storage devices
  20  *
  21  * This driver was written to conform to version 1.1b of the NVMe specification.
  22  * It may work with newer versions, but that is completely untested and disabled
  23  * by default.
  24  *
  25  * The driver has only been tested on x86 systems and will not work on big-
  26  * endian systems without changes to the code accessing registers and data
  27  * structures used by the hardware.
  28  *
  29  *
  30  * Interrupt Usage:
  31  *
  32  * The driver will use a FIXED interrupt while configuring the device as the
  33  * specification requires. Later in the attach process it will switch to MSI-X
  34  * or MSI if supported. The driver wants to have one interrupt vector per CPU,
  35  * but it will work correctly if less are available. Interrupts can be shared
  36  * by queues, the interrupt handler will iterate through the I/O queue array by
  37  * steps of n_intr_cnt. Usually only the admin queue will share an interrupt
  38  * with one I/O queue. The interrupt handler will retrieve completed commands
  39  * from all queues sharing an interrupt vector and will post them to a taskq
  40  * for completion processing.
  41  *
  42  *
  43  * Command Processing:
  44  *
  45  * NVMe devices can have up to 65536 I/O queue pairs, with each queue holding up
  46  * to 65536 I/O commands. The driver will configure one I/O queue pair per
  47  * available interrupt vector, with the queue length usually much smaller than
  48  * the maximum of 65536. If the hardware doesn't provide enough queues, fewer
  49  * interrupt vectors will be used.
  50  *
  51  * Additionally the hardware provides a single special admin queue pair that can
  52  * hold up to 4096 admin commands.
  53  *
  54  * From the hardware perspective both queues of a queue pair are independent,
  55  * but they share some driver state: the command array (holding pointers to
  56  * commands currently being processed by the hardware) and the active command
  57  * counter. Access to the submission side of a queue pair and the shared state
  58  * is protected by nq_mutex. The completion side of a queue pair does not need
  59  * that protection apart from its access to the shared state; it is called only
  60  * in the interrupt handler which does not run concurrently for the same
  61  * interrupt vector.
  62  *
  63  * When a command is submitted to a queue pair the active command counter is
  64  * incremented and a pointer to the command is stored in the command array. The
  65  * array index is used as command identifier (CID) in the submission queue
  66  * entry. Some commands may take a very long time to complete, and if the queue
  67  * wraps around in that time a submission may find the next array slot to still
  68  * be used by a long-running command. In this case the array is sequentially
  69  * searched for the next free slot. The length of the command array is the same
  70  * as the configured queue length.
  71  *
  72  *
  73  * Namespace Support:
  74  *
  75  * NVMe devices can have multiple namespaces, each being a independent data
  76  * store. The driver supports multiple namespaces and creates a blkdev interface
  77  * for each namespace found. Namespaces can have various attributes to support
  78  * thin provisioning and protection information. This driver does not support
  79  * any of this and ignores namespaces that have these attributes.
  80  *
  81  * As of NVMe 1.1 namespaces can have an 64bit Extended Unique Identifier
  82  * (EUI64). This driver uses the EUI64 if present to generate the devid and
  83  * passes it to blkdev to use it in the device node names. As this is currently
  84  * untested namespaces with EUI64 are ignored by default.
  85  *
  86  *
  87  * Blkdev Interface:
  88  *
  89  * This driver uses blkdev to do all the heavy lifting involved with presenting
  90  * a disk device to the system. As a result, the processing of I/O requests is
  91  * relatively simple as blkdev takes care of partitioning, boundary checks, DMA
  92  * setup, and splitting of transfers into manageable chunks.
  93  *
  94  * I/O requests coming in from blkdev are turned into NVM commands and posted to
  95  * an I/O queue. The queue is selected by taking the CPU id modulo the number of
  96  * queues. There is currently no timeout handling of I/O commands.
  97  *
  98  * Blkdev also supports querying device/media information and generating a
  99  * devid. The driver reports the best block size as determined by the namespace
 100  * format back to blkdev as physical block size to support partition and block
 101  * alignment. The devid is either based on the namespace EUI64, if present, or
 102  * composed using the device vendor ID, model number, serial number, and the
 103  * namespace ID.
 104  *
 105  *
 106  * Error Handling:
 107  *
 108  * Error handling is currently limited to detecting fatal hardware errors,
 109  * either by asynchronous events, or synchronously through command status or
 110  * admin command timeouts. In case of severe errors the device is fenced off,
 111  * all further requests will return EIO. FMA is then called to fault the device.
 112  *
 113  * The hardware has a limit for outstanding asynchronous event requests. Before
 114  * this limit is known the driver assumes it is at least 1 and posts a single
 115  * asynchronous request. Later when the limit is known more asynchronous event
 116  * requests are posted to allow quicker reception of error information. When an
 117  * asynchronous event is posted by the hardware the driver will parse the error
 118  * status fields and log information or fault the device, depending on the
 119  * severity of the asynchronous event. The asynchronous event request is then
 120  * reused and posted to the admin queue again.
 121  *
 122  * On command completion the command status is checked for errors. In case of
 123  * errors indicating a driver bug the driver panics. Almost all other error
 124  * status values just cause EIO to be returned.
 125  *
 126  * Command timeouts are currently detected for all admin commands except
 127  * asynchronous event requests. If a command times out and the hardware appears
 128  * to be healthy the driver attempts to abort the command. If this fails the
 129  * driver assumes the device to be dead, fences it off, and calls FMA to retire
 130  * it. In general admin commands are issued at attach time only. No timeout
 131  * handling of normal I/O commands is presently done.
 132  *
 133  * In some cases it may be possible that the ABORT command times out, too. In
 134  * that case the device is also declared dead and fenced off.
 135  *
 136  *
 137  * Quiesce / Fast Reboot:
 138  *
 139  * The driver currently does not support fast reboot. A quiesce(9E) entry point
 140  * is still provided which is used to send a shutdown notification to the
 141  * device.
 142  *
 143  *
 144  * Driver Configuration:
 145  *
 146  * The following driver properties can be changed to control some aspects of the
 147  * drivers operation:
 148  * - strict-version: can be set to 0 to allow devices conforming to newer
 149  *   versions or namespaces with EUI64 to be used
 150  * - ignore-unknown-vendor-status: can be set to 1 to not handle any vendor
 151  *   specific command status as a fatal error leading device faulting
 152  * - admin-queue-len: the maximum length of the admin queue (16-4096)
 153  * - io-queue-len: the maximum length of the I/O queues (16-65536)
 154  * - async-event-limit: the maximum number of asynchronous event requests to be
 155  *   posted by the driver
 156  * - volatile-write-cache-enable: can be set to 0 to disable the volatile write
 157  *   cache
 158  * - min-phys-block-size: the minimum physical block size to report to blkdev,
 159  *   which is among other things the basis for ZFS vdev ashift
 160  *
 161  *
 162  * TODO:
 163  * - figure out sane default for I/O queue depth reported to blkdev
 164  * - polled I/O support to support kernel core dumping
 165  * - FMA handling of media errors
 166  * - support for devices supporting very large I/O requests using chained PRPs
 167  * - support for querying log pages from user space
 168  * - support for configuring hardware parameters like interrupt coalescing
 169  * - support for media formatting and hard partitioning into namespaces
 170  * - support for big-endian systems
 171  * - support for fast reboot
 172  * - support for firmware updates
 173  * - support for NVMe Subsystem Reset (1.1)
 174  * - support for Scatter/Gather lists (1.1)
 175  * - support for Reservations (1.1)
 176  * - support for power management
 177  */
 178 
 179 #include <sys/byteorder.h>
 180 #ifdef _BIG_ENDIAN
 181 #error nvme driver needs porting for big-endian platforms
 182 #endif
 183 
 184 #include <sys/modctl.h>
 185 #include <sys/conf.h>
 186 #include <sys/devops.h>
 187 #include <sys/ddi.h>
 188 #include <sys/sunddi.h>
 189 #include <sys/bitmap.h>
 190 #include <sys/sysmacros.h>
 191 #include <sys/param.h>
 192 #include <sys/varargs.h>
 193 #include <sys/cpuvar.h>
 194 #include <sys/disp.h>
 195 #include <sys/blkdev.h>
 196 #include <sys/atomic.h>
 197 #include <sys/archsystm.h>
 198 #include <sys/sata/sata_hba.h>
 199 
 200 #include "nvme_reg.h"
 201 #include "nvme_var.h"
 202 
 203 
 204 /* NVMe spec version supported */
 205 static const int nvme_version_major = 1;
 206 static const int nvme_version_minor = 1;
 207 
 208 /* tunable for admin command timeout in seconds, default is 1s */
 209 static volatile int nvme_admin_cmd_timeout = 1;
 210 
 211 static int nvme_attach(dev_info_t *, ddi_attach_cmd_t);
 212 static int nvme_detach(dev_info_t *, ddi_detach_cmd_t);
 213 static int nvme_quiesce(dev_info_t *);
 214 static int nvme_fm_errcb(dev_info_t *, ddi_fm_error_t *, const void *);
 215 static int nvme_setup_interrupts(nvme_t *, int, int);
 216 static void nvme_release_interrupts(nvme_t *);
 217 static uint_t nvme_intr(caddr_t, caddr_t);
 218 
 219 static void nvme_shutdown(nvme_t *, int, boolean_t);
 220 static boolean_t nvme_reset(nvme_t *, boolean_t);
 221 static int nvme_init(nvme_t *);
 222 static nvme_cmd_t *nvme_alloc_cmd(nvme_t *, int);
 223 static void nvme_free_cmd(nvme_cmd_t *);
 224 static nvme_cmd_t *nvme_create_nvm_cmd(nvme_namespace_t *, uint8_t,
 225     bd_xfer_t *);
 226 static int nvme_admin_cmd(nvme_cmd_t *, int);
 227 static int nvme_submit_cmd(nvme_qpair_t *, nvme_cmd_t *);
 228 static nvme_cmd_t *nvme_retrieve_cmd(nvme_t *, nvme_qpair_t *);
 229 static boolean_t nvme_wait_cmd(nvme_cmd_t *, uint_t);
 230 static void nvme_wakeup_cmd(void *);
 231 static void nvme_async_event_task(void *);
 232 
 233 static int nvme_check_unknown_cmd_status(nvme_cmd_t *);
 234 static int nvme_check_vendor_cmd_status(nvme_cmd_t *);
 235 static int nvme_check_integrity_cmd_status(nvme_cmd_t *);
 236 static int nvme_check_specific_cmd_status(nvme_cmd_t *);
 237 static int nvme_check_generic_cmd_status(nvme_cmd_t *);
 238 static inline int nvme_check_cmd_status(nvme_cmd_t *);
 239 
 240 static void nvme_abort_cmd(nvme_cmd_t *);
 241 static int nvme_async_event(nvme_t *);
 242 static void *nvme_get_logpage(nvme_t *, uint8_t, ...);
 243 static void *nvme_identify(nvme_t *, uint32_t);
 244 static boolean_t nvme_set_features(nvme_t *, uint32_t, uint8_t, uint32_t,
 245     uint32_t *);
 246 static boolean_t nvme_write_cache_set(nvme_t *, boolean_t);
 247 static int nvme_set_nqueues(nvme_t *, uint16_t);
 248 
 249 static void nvme_free_dma(nvme_dma_t *);
 250 static int nvme_zalloc_dma(nvme_t *, size_t, uint_t, ddi_dma_attr_t *,
 251     nvme_dma_t **);
 252 static int nvme_zalloc_queue_dma(nvme_t *, uint32_t, uint16_t, uint_t,
 253     nvme_dma_t **);
 254 static void nvme_free_qpair(nvme_qpair_t *);
 255 static int nvme_alloc_qpair(nvme_t *, uint32_t, nvme_qpair_t **, int);
 256 static int nvme_create_io_qpair(nvme_t *, nvme_qpair_t *, uint16_t);
 257 
 258 static inline void nvme_put64(nvme_t *, uintptr_t, uint64_t);
 259 static inline void nvme_put32(nvme_t *, uintptr_t, uint32_t);
 260 static inline uint64_t nvme_get64(nvme_t *, uintptr_t);
 261 static inline uint32_t nvme_get32(nvme_t *, uintptr_t);
 262 
 263 static boolean_t nvme_check_regs_hdl(nvme_t *);
 264 static boolean_t nvme_check_dma_hdl(nvme_dma_t *);
 265 
 266 static int nvme_fill_prp(nvme_cmd_t *, bd_xfer_t *);
 267 
 268 static void nvme_bd_xfer_done(void *);
 269 static void nvme_bd_driveinfo(void *, bd_drive_t *);
 270 static int nvme_bd_mediainfo(void *, bd_media_t *);
 271 static int nvme_bd_cmd(nvme_namespace_t *, bd_xfer_t *, uint8_t);
 272 static int nvme_bd_read(void *, bd_xfer_t *);
 273 static int nvme_bd_write(void *, bd_xfer_t *);
 274 static int nvme_bd_sync(void *, bd_xfer_t *);
 275 static int nvme_bd_devid(void *, dev_info_t *, ddi_devid_t *);
 276 
 277 static int nvme_prp_dma_constructor(void *, void *, int);
 278 static void nvme_prp_dma_destructor(void *, void *);
 279 
 280 static void nvme_prepare_devid(nvme_t *, uint32_t);
 281 
 282 static void *nvme_state;
 283 static kmem_cache_t *nvme_cmd_cache;
 284 
 285 /*
 286  * DMA attributes for queue DMA memory
 287  *
 288  * Queue DMA memory must be page aligned. The maximum length of a queue is
 289  * 65536 entries, and an entry can be 64 bytes long.
 290  */
 291 static ddi_dma_attr_t nvme_queue_dma_attr = {
 292         .dma_attr_version       = DMA_ATTR_V0,
 293         .dma_attr_addr_lo       = 0,
 294         .dma_attr_addr_hi       = 0xffffffffffffffffULL,
 295         .dma_attr_count_max     = (UINT16_MAX + 1) * sizeof (nvme_sqe_t) - 1,
 296         .dma_attr_align         = 0x1000,
 297         .dma_attr_burstsizes    = 0x7ff,
 298         .dma_attr_minxfer       = 0x1000,
 299         .dma_attr_maxxfer       = (UINT16_MAX + 1) * sizeof (nvme_sqe_t),
 300         .dma_attr_seg           = 0xffffffffffffffffULL,
 301         .dma_attr_sgllen        = 1,
 302         .dma_attr_granular      = 1,
 303         .dma_attr_flags         = 0,
 304 };
 305 
 306 /*
 307  * DMA attributes for transfers using Physical Region Page (PRP) entries
 308  *
 309  * A PRP entry describes one page of DMA memory using the page size specified
 310  * in the controller configuration's memory page size register (CC.MPS). It uses
 311  * a 64bit base address aligned to this page size. There is no limitation on
 312  * chaining PRPs together for arbitrarily large DMA transfers.
 313  */
 314 static ddi_dma_attr_t nvme_prp_dma_attr = {
 315         .dma_attr_version       = DMA_ATTR_V0,
 316         .dma_attr_addr_lo       = 0,
 317         .dma_attr_addr_hi       = 0xffffffffffffffffULL,
 318         .dma_attr_count_max     = 0xfff,
 319         .dma_attr_align         = 0x1000,
 320         .dma_attr_burstsizes    = 0x7ff,
 321         .dma_attr_minxfer       = 0x1000,
 322         .dma_attr_maxxfer       = 0x1000,
 323         .dma_attr_seg           = 0xfff,
 324         .dma_attr_sgllen        = -1,
 325         .dma_attr_granular      = 1,
 326         .dma_attr_flags         = 0,
 327 };
 328 
 329 /*
 330  * DMA attributes for transfers using scatter/gather lists
 331  *
 332  * A SGL entry describes a chunk of DMA memory using a 64bit base address and a
 333  * 32bit length field. SGL Segment and SGL Last Segment entries require the
 334  * length to be a multiple of 16 bytes.
 335  */
 336 static ddi_dma_attr_t nvme_sgl_dma_attr = {
 337         .dma_attr_version       = DMA_ATTR_V0,
 338         .dma_attr_addr_lo       = 0,
 339         .dma_attr_addr_hi       = 0xffffffffffffffffULL,
 340         .dma_attr_count_max     = 0xffffffffUL,
 341         .dma_attr_align         = 1,
 342         .dma_attr_burstsizes    = 0x7ff,
 343         .dma_attr_minxfer       = 0x10,
 344         .dma_attr_maxxfer       = 0xfffffffffULL,
 345         .dma_attr_seg           = 0xffffffffffffffffULL,
 346         .dma_attr_sgllen        = -1,
 347         .dma_attr_granular      = 0x10,
 348         .dma_attr_flags         = 0
 349 };
 350 
 351 static ddi_device_acc_attr_t nvme_reg_acc_attr = {
 352         .devacc_attr_version    = DDI_DEVICE_ATTR_V0,
 353         .devacc_attr_endian_flags = DDI_STRUCTURE_LE_ACC,
 354         .devacc_attr_dataorder  = DDI_STRICTORDER_ACC
 355 };
 356 
 357 static struct dev_ops nvme_dev_ops = {
 358         .devo_rev       = DEVO_REV,
 359         .devo_refcnt    = 0,
 360         .devo_getinfo   = ddi_no_info,
 361         .devo_identify  = nulldev,
 362         .devo_probe     = nulldev,
 363         .devo_attach    = nvme_attach,
 364         .devo_detach    = nvme_detach,
 365         .devo_reset     = nodev,
 366         .devo_cb_ops    = NULL,
 367         .devo_bus_ops   = NULL,
 368         .devo_power     = NULL,
 369         .devo_quiesce   = nvme_quiesce,
 370 };
 371 
 372 static struct modldrv nvme_modldrv = {
 373         .drv_modops     = &mod_driverops,
 374         .drv_linkinfo   = "NVMe v1.1b",
 375         .drv_dev_ops    = &nvme_dev_ops
 376 };
 377 
 378 static struct modlinkage nvme_modlinkage = {
 379         .ml_rev         = MODREV_1,
 380         .ml_linkage     = { &nvme_modldrv, NULL }
 381 };
 382 
 383 static bd_ops_t nvme_bd_ops = {
 384         .o_version      = BD_OPS_VERSION_0,
 385         .o_drive_info   = nvme_bd_driveinfo,
 386         .o_media_info   = nvme_bd_mediainfo,
 387         .o_devid_init   = nvme_bd_devid,
 388         .o_sync_cache   = nvme_bd_sync,
 389         .o_read         = nvme_bd_read,
 390         .o_write        = nvme_bd_write,
 391 };
 392 
 393 int
 394 _init(void)
 395 {
 396         int error;
 397 
 398         error = ddi_soft_state_init(&nvme_state, sizeof (nvme_t), 1);
 399         if (error != DDI_SUCCESS)
 400                 return (error);
 401 
 402         nvme_cmd_cache = kmem_cache_create("nvme_cmd_cache",
 403             sizeof (nvme_cmd_t), 64, NULL, NULL, NULL, NULL, NULL, 0);
 404 
 405         bd_mod_init(&nvme_dev_ops);
 406 
 407         error = mod_install(&nvme_modlinkage);
 408         if (error != DDI_SUCCESS) {
 409                 ddi_soft_state_fini(&nvme_state);
 410                 bd_mod_fini(&nvme_dev_ops);
 411         }
 412 
 413         return (error);
 414 }
 415 
 416 int
 417 _fini(void)
 418 {
 419         int error;
 420 
 421         error = mod_remove(&nvme_modlinkage);
 422         if (error == DDI_SUCCESS) {
 423                 ddi_soft_state_fini(&nvme_state);
 424                 kmem_cache_destroy(nvme_cmd_cache);
 425                 bd_mod_fini(&nvme_dev_ops);
 426         }
 427 
 428         return (error);
 429 }
 430 
 431 int
 432 _info(struct modinfo *modinfop)
 433 {
 434         return (mod_info(&nvme_modlinkage, modinfop));
 435 }
 436 
 437 static inline void
 438 nvme_put64(nvme_t *nvme, uintptr_t reg, uint64_t val)
 439 {
 440         ASSERT(((uintptr_t)(nvme->n_regs + reg) & 0x7) == 0);
 441 
 442         /*LINTED: E_BAD_PTR_CAST_ALIGN*/
 443         ddi_put64(nvme->n_regh, (uint64_t *)(nvme->n_regs + reg), val);
 444 }
 445 
 446 static inline void
 447 nvme_put32(nvme_t *nvme, uintptr_t reg, uint32_t val)
 448 {
 449         ASSERT(((uintptr_t)(nvme->n_regs + reg) & 0x3) == 0);
 450 
 451         /*LINTED: E_BAD_PTR_CAST_ALIGN*/
 452         ddi_put32(nvme->n_regh, (uint32_t *)(nvme->n_regs + reg), val);
 453 }
 454 
 455 static inline uint64_t
 456 nvme_get64(nvme_t *nvme, uintptr_t reg)
 457 {
 458         uint64_t val;
 459 
 460         ASSERT(((uintptr_t)(nvme->n_regs + reg) & 0x7) == 0);
 461 
 462         /*LINTED: E_BAD_PTR_CAST_ALIGN*/
 463         val = ddi_get64(nvme->n_regh, (uint64_t *)(nvme->n_regs + reg));
 464 
 465         return (val);
 466 }
 467 
 468 static inline uint32_t
 469 nvme_get32(nvme_t *nvme, uintptr_t reg)
 470 {
 471         uint32_t val;
 472 
 473         ASSERT(((uintptr_t)(nvme->n_regs + reg) & 0x3) == 0);
 474 
 475         /*LINTED: E_BAD_PTR_CAST_ALIGN*/
 476         val = ddi_get32(nvme->n_regh, (uint32_t *)(nvme->n_regs + reg));
 477 
 478         return (val);
 479 }
 480 
 481 static boolean_t
 482 nvme_check_regs_hdl(nvme_t *nvme)
 483 {
 484         ddi_fm_error_t error;
 485 
 486         ddi_fm_acc_err_get(nvme->n_regh, &error, DDI_FME_VERSION);
 487 
 488         if (error.fme_status != DDI_FM_OK)
 489                 return (B_TRUE);
 490 
 491         return (B_FALSE);
 492 }
 493 
 494 static boolean_t
 495 nvme_check_dma_hdl(nvme_dma_t *dma)
 496 {
 497         ddi_fm_error_t error;
 498 
 499         if (dma == NULL)
 500                 return (B_FALSE);
 501 
 502         ddi_fm_dma_err_get(dma->nd_dmah, &error, DDI_FME_VERSION);
 503 
 504         if (error.fme_status != DDI_FM_OK)
 505                 return (B_TRUE);
 506 
 507         return (B_FALSE);
 508 }
 509 
 510 static void
 511 nvme_free_dma_common(nvme_dma_t *dma)
 512 {
 513         if (dma->nd_dmah != NULL)
 514                 (void) ddi_dma_unbind_handle(dma->nd_dmah);
 515         if (dma->nd_acch != NULL)
 516                 ddi_dma_mem_free(&dma->nd_acch);
 517         if (dma->nd_dmah != NULL)
 518                 ddi_dma_free_handle(&dma->nd_dmah);
 519 }
 520 
 521 static void
 522 nvme_free_dma(nvme_dma_t *dma)
 523 {
 524         nvme_free_dma_common(dma);
 525         kmem_free(dma, sizeof (*dma));
 526 }
 527 
 528 /* ARGSUSED */
 529 static void
 530 nvme_prp_dma_destructor(void *buf, void *private)
 531 {
 532         nvme_dma_t *dma = (nvme_dma_t *)buf;
 533 
 534         nvme_free_dma_common(dma);
 535 }
 536 
 537 static int
 538 nvme_alloc_dma_common(nvme_t *nvme, nvme_dma_t *dma,
 539     size_t len, uint_t flags, ddi_dma_attr_t *dma_attr)
 540 {
 541         if (ddi_dma_alloc_handle(nvme->n_dip, dma_attr, DDI_DMA_SLEEP, NULL,
 542             &dma->nd_dmah) != DDI_SUCCESS) {
 543                 /*
 544                  * Due to DDI_DMA_SLEEP this can't be DDI_DMA_NORESOURCES, and
 545                  * the only other possible error is DDI_DMA_BADATTR which
 546                  * indicates a driver bug which should cause a panic.
 547                  */
 548                 dev_err(nvme->n_dip, CE_PANIC,
 549                     "!failed to get DMA handle, check DMA attributes");
 550                 return (DDI_FAILURE);
 551         }
 552 
 553         /*
 554          * ddi_dma_mem_alloc() can only fail when DDI_DMA_NOSLEEP is specified
 555          * or the flags are conflicting, which isn't the case here.
 556          */
 557         (void) ddi_dma_mem_alloc(dma->nd_dmah, len, &nvme->n_reg_acc_attr,
 558             DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL, &dma->nd_memp,
 559             &dma->nd_len, &dma->nd_acch);
 560 
 561         if (ddi_dma_addr_bind_handle(dma->nd_dmah, NULL, dma->nd_memp,
 562             dma->nd_len, flags | DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL,
 563             &dma->nd_cookie, &dma->nd_ncookie) != DDI_DMA_MAPPED) {
 564                 dev_err(nvme->n_dip, CE_WARN,
 565                     "!failed to bind DMA memory");
 566                 atomic_inc_32(&nvme->n_dma_bind_err);
 567                 nvme_free_dma_common(dma);
 568                 return (DDI_FAILURE);
 569         }
 570 
 571         return (DDI_SUCCESS);
 572 }
 573 
 574 static int
 575 nvme_zalloc_dma(nvme_t *nvme, size_t len, uint_t flags,
 576     ddi_dma_attr_t *dma_attr, nvme_dma_t **ret)
 577 {
 578         nvme_dma_t *dma = kmem_zalloc(sizeof (nvme_dma_t), KM_SLEEP);
 579 
 580         if (nvme_alloc_dma_common(nvme, dma, len, flags, dma_attr) !=
 581             DDI_SUCCESS) {
 582                 *ret = NULL;
 583                 kmem_free(dma, sizeof (nvme_dma_t));
 584                 return (DDI_FAILURE);
 585         }
 586 
 587         bzero(dma->nd_memp, dma->nd_len);
 588 
 589         *ret = dma;
 590         return (DDI_SUCCESS);
 591 }
 592 
 593 /* ARGSUSED */
 594 static int
 595 nvme_prp_dma_constructor(void *buf, void *private, int flags)
 596 {
 597         nvme_dma_t *dma = (nvme_dma_t *)buf;
 598         nvme_t *nvme = (nvme_t *)private;
 599 
 600         dma->nd_dmah = NULL;
 601         dma->nd_acch = NULL;
 602 
 603         if (nvme_alloc_dma_common(nvme, dma, nvme->n_pagesize,
 604             DDI_DMA_READ, &nvme->n_prp_dma_attr) != DDI_SUCCESS) {
 605                 return (-1);
 606         }
 607 
 608         ASSERT(dma->nd_ncookie == 1);
 609 
 610         dma->nd_cached = B_TRUE;
 611 
 612         return (0);
 613 }
 614 
 615 static int
 616 nvme_zalloc_queue_dma(nvme_t *nvme, uint32_t nentry, uint16_t qe_len,
 617     uint_t flags, nvme_dma_t **dma)
 618 {
 619         uint32_t len = nentry * qe_len;
 620         ddi_dma_attr_t q_dma_attr = nvme->n_queue_dma_attr;
 621 
 622         len = roundup(len, nvme->n_pagesize);
 623 
 624         q_dma_attr.dma_attr_minxfer = len;
 625 
 626         if (nvme_zalloc_dma(nvme, len, flags, &q_dma_attr, dma)
 627             != DDI_SUCCESS) {
 628                 dev_err(nvme->n_dip, CE_WARN,
 629                     "!failed to get DMA memory for queue");
 630                 goto fail;
 631         }
 632 
 633         if ((*dma)->nd_ncookie != 1) {
 634                 dev_err(nvme->n_dip, CE_WARN,
 635                     "!got too many cookies for queue DMA");
 636                 goto fail;
 637         }
 638 
 639         return (DDI_SUCCESS);
 640 
 641 fail:
 642         if (*dma) {
 643                 nvme_free_dma(*dma);
 644                 *dma = NULL;
 645         }
 646 
 647         return (DDI_FAILURE);
 648 }
 649 
 650 static void
 651 nvme_free_qpair(nvme_qpair_t *qp)
 652 {
 653         int i;
 654 
 655         mutex_destroy(&qp->nq_mutex);
 656 
 657         if (qp->nq_sqdma != NULL)
 658                 nvme_free_dma(qp->nq_sqdma);
 659         if (qp->nq_cqdma != NULL)
 660                 nvme_free_dma(qp->nq_cqdma);
 661 
 662         if (qp->nq_active_cmds > 0)
 663                 for (i = 0; i != qp->nq_nentry; i++)
 664                         if (qp->nq_cmd[i] != NULL)
 665                                 nvme_free_cmd(qp->nq_cmd[i]);
 666 
 667         if (qp->nq_cmd != NULL)
 668                 kmem_free(qp->nq_cmd, sizeof (nvme_cmd_t *) * qp->nq_nentry);
 669 
 670         kmem_free(qp, sizeof (nvme_qpair_t));
 671 }
 672 
 673 static int
 674 nvme_alloc_qpair(nvme_t *nvme, uint32_t nentry, nvme_qpair_t **nqp,
 675     int idx)
 676 {
 677         nvme_qpair_t *qp = kmem_zalloc(sizeof (*qp), KM_SLEEP);
 678 
 679         mutex_init(&qp->nq_mutex, NULL, MUTEX_DRIVER,
 680             DDI_INTR_PRI(nvme->n_intr_pri));
 681 
 682         if (nvme_zalloc_queue_dma(nvme, nentry, sizeof (nvme_sqe_t),
 683             DDI_DMA_WRITE, &qp->nq_sqdma) != DDI_SUCCESS)
 684                 goto fail;
 685 
 686         if (nvme_zalloc_queue_dma(nvme, nentry, sizeof (nvme_cqe_t),
 687             DDI_DMA_READ, &qp->nq_cqdma) != DDI_SUCCESS)
 688                 goto fail;
 689 
 690         qp->nq_sq = (nvme_sqe_t *)qp->nq_sqdma->nd_memp;
 691         qp->nq_cq = (nvme_cqe_t *)qp->nq_cqdma->nd_memp;
 692         qp->nq_nentry = nentry;
 693 
 694         qp->nq_sqtdbl = NVME_REG_SQTDBL(nvme, idx);
 695         qp->nq_cqhdbl = NVME_REG_CQHDBL(nvme, idx);
 696 
 697         qp->nq_cmd = kmem_zalloc(sizeof (nvme_cmd_t *) * nentry, KM_SLEEP);
 698         qp->nq_next_cmd = 0;
 699 
 700         *nqp = qp;
 701         return (DDI_SUCCESS);
 702 
 703 fail:
 704         nvme_free_qpair(qp);
 705         *nqp = NULL;
 706 
 707         return (DDI_FAILURE);
 708 }
 709 
 710 static nvme_cmd_t *
 711 nvme_alloc_cmd(nvme_t *nvme, int kmflag)
 712 {
 713         nvme_cmd_t *cmd = kmem_cache_alloc(nvme_cmd_cache, kmflag);
 714 
 715         if (cmd == NULL)
 716                 return (cmd);
 717 
 718         bzero(cmd, sizeof (nvme_cmd_t));
 719 
 720         cmd->nc_nvme = nvme;
 721 
 722         mutex_init(&cmd->nc_mutex, NULL, MUTEX_DRIVER,
 723             DDI_INTR_PRI(nvme->n_intr_pri));
 724         cv_init(&cmd->nc_cv, NULL, CV_DRIVER, NULL);
 725 
 726         return (cmd);
 727 }
 728 
 729 static void
 730 nvme_free_cmd(nvme_cmd_t *cmd)
 731 {
 732         if (cmd->nc_dma) {
 733                 if (cmd->nc_dma->nd_cached)
 734                         kmem_cache_free(cmd->nc_nvme->n_prp_cache,
 735                             cmd->nc_dma);
 736                 else
 737                         nvme_free_dma(cmd->nc_dma);
 738                 cmd->nc_dma = NULL;
 739         }
 740 
 741         cv_destroy(&cmd->nc_cv);
 742         mutex_destroy(&cmd->nc_mutex);
 743 
 744         kmem_cache_free(nvme_cmd_cache, cmd);
 745 }
 746 
 747 static int
 748 nvme_submit_cmd(nvme_qpair_t *qp, nvme_cmd_t *cmd)
 749 {
 750         nvme_reg_sqtdbl_t tail = { 0 };
 751 
 752         mutex_enter(&qp->nq_mutex);
 753 
 754         if (qp->nq_active_cmds == qp->nq_nentry) {
 755                 mutex_exit(&qp->nq_mutex);
 756                 return (DDI_FAILURE);
 757         }
 758 
 759         cmd->nc_completed = B_FALSE;
 760 
 761         /*
 762          * Try to insert the cmd into the active cmd array at the nq_next_cmd
 763          * slot. If the slot is already occupied advance to the next slot and
 764          * try again. This can happen for long running commands like async event
 765          * requests.
 766          */
 767         while (qp->nq_cmd[qp->nq_next_cmd] != NULL)
 768                 qp->nq_next_cmd = (qp->nq_next_cmd + 1) % qp->nq_nentry;
 769         qp->nq_cmd[qp->nq_next_cmd] = cmd;
 770 
 771         qp->nq_active_cmds++;
 772 
 773         cmd->nc_sqe.sqe_cid = qp->nq_next_cmd;
 774         bcopy(&cmd->nc_sqe, &qp->nq_sq[qp->nq_sqtail], sizeof (nvme_sqe_t));
 775         (void) ddi_dma_sync(qp->nq_sqdma->nd_dmah,
 776             sizeof (nvme_sqe_t) * qp->nq_sqtail,
 777             sizeof (nvme_sqe_t), DDI_DMA_SYNC_FORDEV);
 778         qp->nq_next_cmd = (qp->nq_next_cmd + 1) % qp->nq_nentry;
 779 
 780         tail.b.sqtdbl_sqt = qp->nq_sqtail = (qp->nq_sqtail + 1) % qp->nq_nentry;
 781         nvme_put32(cmd->nc_nvme, qp->nq_sqtdbl, tail.r);
 782 
 783         mutex_exit(&qp->nq_mutex);
 784         return (DDI_SUCCESS);
 785 }
 786 
 787 static nvme_cmd_t *
 788 nvme_retrieve_cmd(nvme_t *nvme, nvme_qpair_t *qp)
 789 {
 790         nvme_reg_cqhdbl_t head = { 0 };
 791 
 792         nvme_cqe_t *cqe;
 793         nvme_cmd_t *cmd;
 794 
 795         (void) ddi_dma_sync(qp->nq_cqdma->nd_dmah, 0,
 796             sizeof (nvme_cqe_t) * qp->nq_nentry, DDI_DMA_SYNC_FORKERNEL);
 797 
 798         cqe = &qp->nq_cq[qp->nq_cqhead];
 799 
 800         /* Check phase tag of CQE. Hardware inverts it for new entries. */
 801         if (cqe->cqe_sf.sf_p == qp->nq_phase)
 802                 return (NULL);
 803 
 804         ASSERT(nvme->n_ioq[cqe->cqe_sqid] == qp);
 805         ASSERT(cqe->cqe_cid < qp->nq_nentry);
 806 
 807         mutex_enter(&qp->nq_mutex);
 808         cmd = qp->nq_cmd[cqe->cqe_cid];
 809         qp->nq_cmd[cqe->cqe_cid] = NULL;
 810         qp->nq_active_cmds--;
 811         mutex_exit(&qp->nq_mutex);
 812 
 813         ASSERT(cmd != NULL);
 814         ASSERT(cmd->nc_nvme == nvme);
 815         ASSERT(cmd->nc_sqid == cqe->cqe_sqid);
 816         ASSERT(cmd->nc_sqe.sqe_cid == cqe->cqe_cid);
 817         bcopy(cqe, &cmd->nc_cqe, sizeof (nvme_cqe_t));
 818 
 819         qp->nq_sqhead = cqe->cqe_sqhd;
 820 
 821         head.b.cqhdbl_cqh = qp->nq_cqhead = (qp->nq_cqhead + 1) % qp->nq_nentry;
 822 
 823         /* Toggle phase on wrap-around. */
 824         if (qp->nq_cqhead == 0)
 825                 qp->nq_phase = qp->nq_phase ? 0 : 1;
 826 
 827         nvme_put32(cmd->nc_nvme, qp->nq_cqhdbl, head.r);
 828 
 829         return (cmd);
 830 }
 831 
 832 static int
 833 nvme_check_unknown_cmd_status(nvme_cmd_t *cmd)
 834 {
 835         nvme_cqe_t *cqe = &cmd->nc_cqe;
 836 
 837         dev_err(cmd->nc_nvme->n_dip, CE_WARN,
 838             "!unknown command status received: opc = %x, sqid = %d, cid = %d, "
 839             "sc = %x, sct = %x, dnr = %d, m = %d", cmd->nc_sqe.sqe_opc,
 840             cqe->cqe_sqid, cqe->cqe_cid, cqe->cqe_sf.sf_sc, cqe->cqe_sf.sf_sct,
 841             cqe->cqe_sf.sf_dnr, cqe->cqe_sf.sf_m);
 842 
 843         bd_error(cmd->nc_xfer, BD_ERR_ILLRQ);
 844 
 845         if (cmd->nc_nvme->n_strict_version) {
 846                 cmd->nc_nvme->n_dead = B_TRUE;
 847                 ddi_fm_service_impact(cmd->nc_nvme->n_dip, DDI_SERVICE_LOST);
 848         }
 849 
 850         return (EIO);
 851 }
 852 
 853 static int
 854 nvme_check_vendor_cmd_status(nvme_cmd_t *cmd)
 855 {
 856         nvme_cqe_t *cqe = &cmd->nc_cqe;
 857 
 858         dev_err(cmd->nc_nvme->n_dip, CE_WARN,
 859             "!unknown command status received: opc = %x, sqid = %d, cid = %d, "
 860             "sc = %x, sct = %x, dnr = %d, m = %d", cmd->nc_sqe.sqe_opc,
 861             cqe->cqe_sqid, cqe->cqe_cid, cqe->cqe_sf.sf_sc, cqe->cqe_sf.sf_sct,
 862             cqe->cqe_sf.sf_dnr, cqe->cqe_sf.sf_m);
 863         if (!cmd->nc_nvme->n_ignore_unknown_vendor_status) {
 864                 cmd->nc_nvme->n_dead = B_TRUE;
 865                 ddi_fm_service_impact(cmd->nc_nvme->n_dip, DDI_SERVICE_LOST);
 866         }
 867 
 868         return (EIO);
 869 }
 870 
 871 static int
 872 nvme_check_integrity_cmd_status(nvme_cmd_t *cmd)
 873 {
 874         nvme_cqe_t *cqe = &cmd->nc_cqe;
 875 
 876         switch (cqe->cqe_sf.sf_sc) {
 877         case NVME_CQE_SC_INT_NVM_WRITE:
 878                 /* write fail */
 879                 /* TODO: post ereport */
 880                 bd_error(cmd->nc_xfer, BD_ERR_MEDIA);
 881                 return (EIO);
 882 
 883         case NVME_CQE_SC_INT_NVM_READ:
 884                 /* read fail */
 885                 /* TODO: post ereport */
 886                 bd_error(cmd->nc_xfer, BD_ERR_MEDIA);
 887                 return (EIO);
 888 
 889         default:
 890                 return (nvme_check_unknown_cmd_status(cmd));
 891         }
 892 }
 893 
 894 static int
 895 nvme_check_generic_cmd_status(nvme_cmd_t *cmd)
 896 {
 897         nvme_cqe_t *cqe = &cmd->nc_cqe;
 898 
 899         switch (cqe->cqe_sf.sf_sc) {
 900         case NVME_CQE_SC_GEN_SUCCESS:
 901                 return (0);
 902 
 903         /*
 904          * Errors indicating a bug in the driver should cause a panic.
 905          */
 906         case NVME_CQE_SC_GEN_INV_OPC:
 907                 /* Invalid Command Opcode */
 908                 dev_err(cmd->nc_nvme->n_dip, CE_PANIC, "programming error: "
 909                     "invalid opcode in cmd %p", (void *)cmd);
 910                 return (0);
 911 
 912         case NVME_CQE_SC_GEN_INV_FLD:
 913                 /* Invalid Field in Command */
 914                 dev_err(cmd->nc_nvme->n_dip, CE_PANIC, "programming error: "
 915                     "invalid field in cmd %p", (void *)cmd);
 916                 return (0);
 917 
 918         case NVME_CQE_SC_GEN_ID_CNFL:
 919                 /* Command ID Conflict */
 920                 dev_err(cmd->nc_nvme->n_dip, CE_PANIC, "programming error: "
 921                     "cmd ID conflict in cmd %p", (void *)cmd);
 922                 return (0);
 923 
 924         case NVME_CQE_SC_GEN_INV_NS:
 925                 /* Invalid Namespace or Format */
 926                 dev_err(cmd->nc_nvme->n_dip, CE_PANIC, "programming error: "
 927                     "invalid NS/format in cmd %p", (void *)cmd);
 928                 return (0);
 929 
 930         case NVME_CQE_SC_GEN_NVM_LBA_RANGE:
 931                 /* LBA Out Of Range */
 932                 dev_err(cmd->nc_nvme->n_dip, CE_PANIC, "programming error: "
 933                     "LBA out of range in cmd %p", (void *)cmd);
 934                 return (0);
 935 
 936         /*
 937          * Non-fatal errors, handle gracefully.
 938          */
 939         case NVME_CQE_SC_GEN_DATA_XFR_ERR:
 940                 /* Data Transfer Error (DMA) */
 941                 /* TODO: post ereport */
 942                 atomic_inc_32(&cmd->nc_nvme->n_data_xfr_err);
 943                 bd_error(cmd->nc_xfer, BD_ERR_NTRDY);
 944                 return (EIO);
 945 
 946         case NVME_CQE_SC_GEN_INTERNAL_ERR:
 947                 /*
 948                  * Internal Error. The spec (v1.0, section 4.5.1.2) says
 949                  * detailed error information is returned as async event,
 950                  * so we pretty much ignore the error here and handle it
 951                  * in the async event handler.
 952                  */
 953                 atomic_inc_32(&cmd->nc_nvme->n_internal_err);
 954                 bd_error(cmd->nc_xfer, BD_ERR_NTRDY);
 955                 return (EIO);
 956 
 957         case NVME_CQE_SC_GEN_ABORT_REQUEST:
 958                 /*
 959                  * Command Abort Requested. This normally happens only when a
 960                  * command times out.
 961                  */
 962                 /* TODO: post ereport or change blkdev to handle this? */
 963                 atomic_inc_32(&cmd->nc_nvme->n_abort_rq_err);
 964                 return (ECANCELED);
 965 
 966         case NVME_CQE_SC_GEN_ABORT_PWRLOSS:
 967                 /* Command Aborted due to Power Loss Notification */
 968                 ddi_fm_service_impact(cmd->nc_nvme->n_dip, DDI_SERVICE_LOST);
 969                 cmd->nc_nvme->n_dead = B_TRUE;
 970                 return (EIO);
 971 
 972         case NVME_CQE_SC_GEN_ABORT_SQ_DEL:
 973                 /* Command Aborted due to SQ Deletion */
 974                 atomic_inc_32(&cmd->nc_nvme->n_abort_sq_del);
 975                 return (EIO);
 976 
 977         case NVME_CQE_SC_GEN_NVM_CAP_EXC:
 978                 /* Capacity Exceeded */
 979                 atomic_inc_32(&cmd->nc_nvme->n_nvm_cap_exc);
 980                 bd_error(cmd->nc_xfer, BD_ERR_MEDIA);
 981                 return (EIO);
 982 
 983         case NVME_CQE_SC_GEN_NVM_NS_NOTRDY:
 984                 /* Namespace Not Ready */
 985                 atomic_inc_32(&cmd->nc_nvme->n_nvm_ns_notrdy);
 986                 bd_error(cmd->nc_xfer, BD_ERR_NTRDY);
 987                 return (EIO);
 988 
 989         default:
 990                 return (nvme_check_unknown_cmd_status(cmd));
 991         }
 992 }
 993 
 994 static int
 995 nvme_check_specific_cmd_status(nvme_cmd_t *cmd)
 996 {
 997         nvme_cqe_t *cqe = &cmd->nc_cqe;
 998 
 999         switch (cqe->cqe_sf.sf_sc) {
1000         case NVME_CQE_SC_SPC_INV_CQ:
1001                 /* Completion Queue Invalid */
1002                 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_CREATE_SQUEUE);
1003                 atomic_inc_32(&cmd->nc_nvme->n_inv_cq_err);
1004                 return (EINVAL);
1005 
1006         case NVME_CQE_SC_SPC_INV_QID:
1007                 /* Invalid Queue Identifier */
1008                 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_CREATE_SQUEUE ||
1009                     cmd->nc_sqe.sqe_opc == NVME_OPC_DELETE_SQUEUE ||
1010                     cmd->nc_sqe.sqe_opc == NVME_OPC_CREATE_CQUEUE ||
1011                     cmd->nc_sqe.sqe_opc == NVME_OPC_DELETE_CQUEUE);
1012                 atomic_inc_32(&cmd->nc_nvme->n_inv_qid_err);
1013                 return (EINVAL);
1014 
1015         case NVME_CQE_SC_SPC_MAX_QSZ_EXC:
1016                 /* Max Queue Size Exceeded */
1017                 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_CREATE_SQUEUE ||
1018                     cmd->nc_sqe.sqe_opc == NVME_OPC_CREATE_CQUEUE);
1019                 atomic_inc_32(&cmd->nc_nvme->n_max_qsz_exc);
1020                 return (EINVAL);
1021 
1022         case NVME_CQE_SC_SPC_ABRT_CMD_EXC:
1023                 /* Abort Command Limit Exceeded */
1024                 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_ABORT);
1025                 dev_err(cmd->nc_nvme->n_dip, CE_PANIC, "programming error: "
1026                     "abort command limit exceeded in cmd %p", (void *)cmd);
1027                 return (0);
1028 
1029         case NVME_CQE_SC_SPC_ASYNC_EVREQ_EXC:
1030                 /* Async Event Request Limit Exceeded */
1031                 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_ASYNC_EVENT);
1032                 dev_err(cmd->nc_nvme->n_dip, CE_PANIC, "programming error: "
1033                     "async event request limit exceeded in cmd %p",
1034                     (void *)cmd);
1035                 return (0);
1036 
1037         case NVME_CQE_SC_SPC_INV_INT_VECT:
1038                 /* Invalid Interrupt Vector */
1039                 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_CREATE_CQUEUE);
1040                 atomic_inc_32(&cmd->nc_nvme->n_inv_int_vect);
1041                 return (EINVAL);
1042 
1043         case NVME_CQE_SC_SPC_INV_LOG_PAGE:
1044                 /* Invalid Log Page */
1045                 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_GET_LOG_PAGE);
1046                 atomic_inc_32(&cmd->nc_nvme->n_inv_log_page);
1047                 bd_error(cmd->nc_xfer, BD_ERR_ILLRQ);
1048                 return (EINVAL);
1049 
1050         case NVME_CQE_SC_SPC_INV_FORMAT:
1051                 /* Invalid Format */
1052                 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_NVM_FORMAT);
1053                 atomic_inc_32(&cmd->nc_nvme->n_inv_format);
1054                 bd_error(cmd->nc_xfer, BD_ERR_ILLRQ);
1055                 return (EINVAL);
1056 
1057         case NVME_CQE_SC_SPC_INV_Q_DEL:
1058                 /* Invalid Queue Deletion */
1059                 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_DELETE_CQUEUE);
1060                 atomic_inc_32(&cmd->nc_nvme->n_inv_q_del);
1061                 return (EINVAL);
1062 
1063         case NVME_CQE_SC_SPC_NVM_CNFL_ATTR:
1064                 /* Conflicting Attributes */
1065                 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_NVM_DSET_MGMT ||
1066                     cmd->nc_sqe.sqe_opc == NVME_OPC_NVM_READ ||
1067                     cmd->nc_sqe.sqe_opc == NVME_OPC_NVM_WRITE);
1068                 atomic_inc_32(&cmd->nc_nvme->n_cnfl_attr);
1069                 bd_error(cmd->nc_xfer, BD_ERR_ILLRQ);
1070                 return (EINVAL);
1071 
1072         case NVME_CQE_SC_SPC_NVM_INV_PROT:
1073                 /* Invalid Protection Information */
1074                 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_NVM_COMPARE ||
1075                     cmd->nc_sqe.sqe_opc == NVME_OPC_NVM_READ ||
1076                     cmd->nc_sqe.sqe_opc == NVME_OPC_NVM_WRITE);
1077                 atomic_inc_32(&cmd->nc_nvme->n_inv_prot);
1078                 bd_error(cmd->nc_xfer, BD_ERR_ILLRQ);
1079                 return (EINVAL);
1080 
1081         case NVME_CQE_SC_SPC_NVM_READONLY:
1082                 /* Write to Read Only Range */
1083                 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_NVM_WRITE);
1084                 atomic_inc_32(&cmd->nc_nvme->n_readonly);
1085                 bd_error(cmd->nc_xfer, BD_ERR_ILLRQ);
1086                 return (EROFS);
1087 
1088         default:
1089                 return (nvme_check_unknown_cmd_status(cmd));
1090         }
1091 }
1092 
1093 static inline int
1094 nvme_check_cmd_status(nvme_cmd_t *cmd)
1095 {
1096         nvme_cqe_t *cqe = &cmd->nc_cqe;
1097 
1098         /* take a shortcut if everything is alright */
1099         if (cqe->cqe_sf.sf_sct == NVME_CQE_SCT_GENERIC &&
1100             cqe->cqe_sf.sf_sc == NVME_CQE_SC_GEN_SUCCESS)
1101                 return (0);
1102 
1103         if (cqe->cqe_sf.sf_sct == NVME_CQE_SCT_GENERIC)
1104                 return (nvme_check_generic_cmd_status(cmd));
1105         else if (cqe->cqe_sf.sf_sct == NVME_CQE_SCT_SPECIFIC)
1106                 return (nvme_check_specific_cmd_status(cmd));
1107         else if (cqe->cqe_sf.sf_sct == NVME_CQE_SCT_INTEGRITY)
1108                 return (nvme_check_integrity_cmd_status(cmd));
1109         else if (cqe->cqe_sf.sf_sct == NVME_CQE_SCT_VENDOR)
1110                 return (nvme_check_vendor_cmd_status(cmd));
1111 
1112         return (nvme_check_unknown_cmd_status(cmd));
1113 }
1114 
1115 /*
1116  * nvme_abort_cmd_cb -- replaces nc_callback of aborted commands
1117  *
1118  * This functions takes care of cleaning up aborted commands. The command
1119  * status is checked to catch any fatal errors.
1120  */
1121 static void
1122 nvme_abort_cmd_cb(void *arg)
1123 {
1124         nvme_cmd_t *cmd = arg;
1125 
1126         /*
1127          * Grab the command mutex. Once we have it we hold the last reference
1128          * to the command and can safely free it.
1129          */
1130         mutex_enter(&cmd->nc_mutex);
1131         (void) nvme_check_cmd_status(cmd);
1132         mutex_exit(&cmd->nc_mutex);
1133 
1134         nvme_free_cmd(cmd);
1135 }
1136 
1137 static void
1138 nvme_abort_cmd(nvme_cmd_t *abort_cmd)
1139 {
1140         nvme_t *nvme = abort_cmd->nc_nvme;
1141         nvme_cmd_t *cmd = nvme_alloc_cmd(nvme, KM_SLEEP);
1142         nvme_abort_cmd_t ac = { 0 };
1143 
1144         sema_p(&nvme->n_abort_sema);
1145 
1146         ac.b.ac_cid = abort_cmd->nc_sqe.sqe_cid;
1147         ac.b.ac_sqid = abort_cmd->nc_sqid;
1148 
1149         /*
1150          * Drop the mutex of the aborted command. From this point on
1151          * we must assume that the abort callback has freed the command.
1152          */
1153         mutex_exit(&abort_cmd->nc_mutex);
1154 
1155         cmd->nc_sqid = 0;
1156         cmd->nc_sqe.sqe_opc = NVME_OPC_ABORT;
1157         cmd->nc_callback = nvme_wakeup_cmd;
1158         cmd->nc_sqe.sqe_cdw10 = ac.r;
1159 
1160         /*
1161          * Send the ABORT to the hardware. The ABORT command will return _after_
1162          * the aborted command has completed (aborted or otherwise).
1163          */
1164         if (nvme_admin_cmd(cmd, nvme_admin_cmd_timeout) != DDI_SUCCESS) {
1165                 sema_v(&nvme->n_abort_sema);
1166                 dev_err(nvme->n_dip, CE_WARN,
1167                     "!nvme_admin_cmd failed for ABORT");
1168                 atomic_inc_32(&nvme->n_abort_failed);
1169                 return;
1170         }
1171         sema_v(&nvme->n_abort_sema);
1172 
1173         if (nvme_check_cmd_status(cmd)) {
1174                 dev_err(nvme->n_dip, CE_WARN,
1175                     "!ABORT failed with sct = %x, sc = %x",
1176                     cmd->nc_cqe.cqe_sf.sf_sct, cmd->nc_cqe.cqe_sf.sf_sc);
1177                 atomic_inc_32(&nvme->n_abort_failed);
1178         } else {
1179                 atomic_inc_32(&nvme->n_cmd_aborted);
1180         }
1181 
1182         nvme_free_cmd(cmd);
1183 }
1184 
1185 /*
1186  * nvme_wait_cmd -- wait for command completion or timeout
1187  *
1188  * Returns B_TRUE if the command completed normally.
1189  *
1190  * Returns B_FALSE if the command timed out and an abort was attempted. The
1191  * command mutex will be dropped and the command must be considered freed. The
1192  * freeing of the command is normally done by the abort command callback.
1193  *
1194  * In case of a serious error or a timeout of the abort command the hardware
1195  * will be declared dead and FMA will be notified.
1196  */
1197 static boolean_t
1198 nvme_wait_cmd(nvme_cmd_t *cmd, uint_t sec)
1199 {
1200         clock_t timeout = ddi_get_lbolt() + drv_usectohz(sec * MICROSEC);
1201         nvme_t *nvme = cmd->nc_nvme;
1202         nvme_reg_csts_t csts;
1203 
1204         ASSERT(mutex_owned(&cmd->nc_mutex));
1205 
1206         while (!cmd->nc_completed) {
1207                 if (cv_timedwait(&cmd->nc_cv, &cmd->nc_mutex, timeout) == -1)
1208                         break;
1209         }
1210 
1211         if (cmd->nc_completed)
1212                 return (B_TRUE);
1213 
1214         /*
1215          * The command timed out. Change the callback to the cleanup function.
1216          */
1217         cmd->nc_callback = nvme_abort_cmd_cb;
1218 
1219         /*
1220          * Check controller for fatal status, any errors associated with the
1221          * register or DMA handle, or for a double timeout (abort command timed
1222          * out). If necessary log a warning and call FMA.
1223          */
1224         csts.r = nvme_get32(nvme, NVME_REG_CSTS);
1225         dev_err(nvme->n_dip, CE_WARN, "!command timeout, "
1226             "OPC = %x, CFS = %d", cmd->nc_sqe.sqe_opc, csts.b.csts_cfs);
1227         atomic_inc_32(&nvme->n_cmd_timeout);
1228 
1229         if (csts.b.csts_cfs ||
1230             nvme_check_regs_hdl(nvme) ||
1231             nvme_check_dma_hdl(cmd->nc_dma) ||
1232             cmd->nc_sqe.sqe_opc == NVME_OPC_ABORT) {
1233                 ddi_fm_service_impact(nvme->n_dip, DDI_SERVICE_LOST);
1234                 nvme->n_dead = B_TRUE;
1235                 mutex_exit(&cmd->nc_mutex);
1236         } else {
1237                 /*
1238                  * Try to abort the command. The command mutex is released by
1239                  * nvme_abort_cmd().
1240                  * If the abort succeeds it will have freed the aborted command.
1241                  * If the abort fails for other reasons we must assume that the
1242                  * command may complete at any time, and the callback will free
1243                  * it for us.
1244                  */
1245                 nvme_abort_cmd(cmd);
1246         }
1247 
1248         return (B_FALSE);
1249 }
1250 
1251 static void
1252 nvme_wakeup_cmd(void *arg)
1253 {
1254         nvme_cmd_t *cmd = arg;
1255 
1256         mutex_enter(&cmd->nc_mutex);
1257         /*
1258          * There is a slight chance that this command completed shortly after
1259          * the timeout was hit in nvme_wait_cmd() but before the callback was
1260          * changed. Catch that case here and clean up accordingly.
1261          */
1262         if (cmd->nc_callback == nvme_abort_cmd_cb) {
1263                 mutex_exit(&cmd->nc_mutex);
1264                 nvme_abort_cmd_cb(cmd);
1265                 return;
1266         }
1267 
1268         cmd->nc_completed = B_TRUE;
1269         cv_signal(&cmd->nc_cv);
1270         mutex_exit(&cmd->nc_mutex);
1271 }
1272 
1273 static void
1274 nvme_async_event_task(void *arg)
1275 {
1276         nvme_cmd_t *cmd = arg;
1277         nvme_t *nvme = cmd->nc_nvme;
1278         nvme_error_log_entry_t *error_log = NULL;
1279         nvme_health_log_t *health_log = NULL;
1280         nvme_async_event_t event;
1281         int ret;
1282 
1283         /*
1284          * Check for errors associated with the async request itself. The only
1285          * command-specific error is "async event limit exceeded", which
1286          * indicates a programming error in the driver and causes a panic in
1287          * nvme_check_cmd_status().
1288          *
1289          * Other possible errors are various scenarios where the async request
1290          * was aborted, or internal errors in the device. Internal errors are
1291          * reported to FMA, the command aborts need no special handling here.
1292          */
1293         if (nvme_check_cmd_status(cmd)) {
1294                 dev_err(cmd->nc_nvme->n_dip, CE_WARN,
1295                     "!async event request returned failure, sct = %x, "
1296                     "sc = %x, dnr = %d, m = %d", cmd->nc_cqe.cqe_sf.sf_sct,
1297                     cmd->nc_cqe.cqe_sf.sf_sc, cmd->nc_cqe.cqe_sf.sf_dnr,
1298                     cmd->nc_cqe.cqe_sf.sf_m);
1299 
1300                 if (cmd->nc_cqe.cqe_sf.sf_sct == NVME_CQE_SCT_GENERIC &&
1301                     cmd->nc_cqe.cqe_sf.sf_sc == NVME_CQE_SC_GEN_INTERNAL_ERR) {
1302                         cmd->nc_nvme->n_dead = B_TRUE;
1303                         ddi_fm_service_impact(cmd->nc_nvme->n_dip,
1304                             DDI_SERVICE_LOST);
1305                 }
1306                 nvme_free_cmd(cmd);
1307                 return;
1308         }
1309 
1310 
1311         event.r = cmd->nc_cqe.cqe_dw0;
1312 
1313         /* Clear CQE and re-submit the async request. */
1314         bzero(&cmd->nc_cqe, sizeof (nvme_cqe_t));
1315         ret = nvme_submit_cmd(nvme->n_adminq, cmd);
1316 
1317         if (ret != DDI_SUCCESS) {
1318                 dev_err(nvme->n_dip, CE_WARN,
1319                     "!failed to resubmit async event request");
1320                 atomic_inc_32(&nvme->n_async_resubmit_failed);
1321                 nvme_free_cmd(cmd);
1322         }
1323 
1324         switch (event.b.ae_type) {
1325         case NVME_ASYNC_TYPE_ERROR:
1326                 if (event.b.ae_logpage == NVME_LOGPAGE_ERROR) {
1327                         error_log = (nvme_error_log_entry_t *)
1328                             nvme_get_logpage(nvme, event.b.ae_logpage);
1329                 } else {
1330                         dev_err(nvme->n_dip, CE_WARN, "!wrong logpage in "
1331                             "async event reply: %d", event.b.ae_logpage);
1332                         atomic_inc_32(&nvme->n_wrong_logpage);
1333                 }
1334 
1335                 switch (event.b.ae_info) {
1336                 case NVME_ASYNC_ERROR_INV_SQ:
1337                         dev_err(nvme->n_dip, CE_PANIC, "programming error: "
1338                             "invalid submission queue");
1339                         return;
1340 
1341                 case NVME_ASYNC_ERROR_INV_DBL:
1342                         dev_err(nvme->n_dip, CE_PANIC, "programming error: "
1343                             "invalid doorbell write value");
1344                         return;
1345 
1346                 case NVME_ASYNC_ERROR_DIAGFAIL:
1347                         dev_err(nvme->n_dip, CE_WARN, "!diagnostic failure");
1348                         ddi_fm_service_impact(nvme->n_dip, DDI_SERVICE_LOST);
1349                         nvme->n_dead = B_TRUE;
1350                         atomic_inc_32(&nvme->n_diagfail_event);
1351                         break;
1352 
1353                 case NVME_ASYNC_ERROR_PERSISTENT:
1354                         dev_err(nvme->n_dip, CE_WARN, "!persistent internal "
1355                             "device error");
1356                         ddi_fm_service_impact(nvme->n_dip, DDI_SERVICE_LOST);
1357                         nvme->n_dead = B_TRUE;
1358                         atomic_inc_32(&nvme->n_persistent_event);
1359                         break;
1360 
1361                 case NVME_ASYNC_ERROR_TRANSIENT:
1362                         dev_err(nvme->n_dip, CE_WARN, "!transient internal "
1363                             "device error");
1364                         /* TODO: send ereport */
1365                         atomic_inc_32(&nvme->n_transient_event);
1366                         break;
1367 
1368                 case NVME_ASYNC_ERROR_FW_LOAD:
1369                         dev_err(nvme->n_dip, CE_WARN,
1370                             "!firmware image load error");
1371                         atomic_inc_32(&nvme->n_fw_load_event);
1372                         break;
1373                 }
1374                 break;
1375 
1376         case NVME_ASYNC_TYPE_HEALTH:
1377                 if (event.b.ae_logpage == NVME_LOGPAGE_HEALTH) {
1378                         health_log = (nvme_health_log_t *)
1379                             nvme_get_logpage(nvme, event.b.ae_logpage, -1);
1380                 } else {
1381                         dev_err(nvme->n_dip, CE_WARN, "!wrong logpage in "
1382                             "async event reply: %d", event.b.ae_logpage);
1383                         atomic_inc_32(&nvme->n_wrong_logpage);
1384                 }
1385 
1386                 switch (event.b.ae_info) {
1387                 case NVME_ASYNC_HEALTH_RELIABILITY:
1388                         dev_err(nvme->n_dip, CE_WARN,
1389                             "!device reliability compromised");
1390                         /* TODO: send ereport */
1391                         atomic_inc_32(&nvme->n_reliability_event);
1392                         break;
1393 
1394                 case NVME_ASYNC_HEALTH_TEMPERATURE:
1395                         dev_err(nvme->n_dip, CE_WARN,
1396                             "!temperature above threshold");
1397                         /* TODO: send ereport */
1398                         atomic_inc_32(&nvme->n_temperature_event);
1399                         break;
1400 
1401                 case NVME_ASYNC_HEALTH_SPARE:
1402                         dev_err(nvme->n_dip, CE_WARN,
1403                             "!spare space below threshold");
1404                         /* TODO: send ereport */
1405                         atomic_inc_32(&nvme->n_spare_event);
1406                         break;
1407                 }
1408                 break;
1409 
1410         case NVME_ASYNC_TYPE_VENDOR:
1411                 dev_err(nvme->n_dip, CE_WARN, "!vendor specific async event "
1412                     "received, info = %x, logpage = %x", event.b.ae_info,
1413                     event.b.ae_logpage);
1414                 atomic_inc_32(&nvme->n_vendor_event);
1415                 break;
1416 
1417         default:
1418                 dev_err(nvme->n_dip, CE_WARN, "!unknown async event received, "
1419                     "type = %x, info = %x, logpage = %x", event.b.ae_type,
1420                     event.b.ae_info, event.b.ae_logpage);
1421                 atomic_inc_32(&nvme->n_unknown_event);
1422                 break;
1423         }
1424 
1425         if (error_log)
1426                 kmem_free(error_log, sizeof (nvme_error_log_entry_t) *
1427                     nvme->n_error_log_len);
1428 
1429         if (health_log)
1430                 kmem_free(health_log, sizeof (nvme_health_log_t));
1431 }
1432 
1433 static int
1434 nvme_admin_cmd(nvme_cmd_t *cmd, int sec)
1435 {
1436         int ret;
1437 
1438         mutex_enter(&cmd->nc_mutex);
1439         ret = nvme_submit_cmd(cmd->nc_nvme->n_adminq, cmd);
1440 
1441         if (ret != DDI_SUCCESS) {
1442                 mutex_exit(&cmd->nc_mutex);
1443                 dev_err(cmd->nc_nvme->n_dip, CE_WARN,
1444                     "!nvme_submit_cmd failed");
1445                 atomic_inc_32(&cmd->nc_nvme->n_admin_queue_full);
1446                 nvme_free_cmd(cmd);
1447                 return (DDI_FAILURE);
1448         }
1449 
1450         if (nvme_wait_cmd(cmd, sec) == B_FALSE) {
1451                 /*
1452                  * The command timed out. An abort command was posted that
1453                  * will take care of the cleanup.
1454                  */
1455                 return (DDI_FAILURE);
1456         }
1457         mutex_exit(&cmd->nc_mutex);
1458 
1459         return (DDI_SUCCESS);
1460 }
1461 
1462 static int
1463 nvme_async_event(nvme_t *nvme)
1464 {
1465         nvme_cmd_t *cmd = nvme_alloc_cmd(nvme, KM_SLEEP);
1466         int ret;
1467 
1468         cmd->nc_sqid = 0;
1469         cmd->nc_sqe.sqe_opc = NVME_OPC_ASYNC_EVENT;
1470         cmd->nc_callback = nvme_async_event_task;
1471 
1472         ret = nvme_submit_cmd(nvme->n_adminq, cmd);
1473 
1474         if (ret != DDI_SUCCESS) {
1475                 dev_err(nvme->n_dip, CE_WARN,
1476                     "!nvme_submit_cmd failed for ASYNCHRONOUS EVENT");
1477                 nvme_free_cmd(cmd);
1478                 return (DDI_FAILURE);
1479         }
1480 
1481         return (DDI_SUCCESS);
1482 }
1483 
1484 static void *
1485 nvme_get_logpage(nvme_t *nvme, uint8_t logpage, ...)
1486 {
1487         nvme_cmd_t *cmd = nvme_alloc_cmd(nvme, KM_SLEEP);
1488         void *buf = NULL;
1489         nvme_getlogpage_t getlogpage = { 0 };
1490         size_t bufsize;
1491         va_list ap;
1492 
1493         va_start(ap, logpage);
1494 
1495         cmd->nc_sqid = 0;
1496         cmd->nc_callback = nvme_wakeup_cmd;
1497         cmd->nc_sqe.sqe_opc = NVME_OPC_GET_LOG_PAGE;
1498 
1499         getlogpage.b.lp_lid = logpage;
1500 
1501         switch (logpage) {
1502         case NVME_LOGPAGE_ERROR:
1503                 cmd->nc_sqe.sqe_nsid = (uint32_t)-1;
1504                 bufsize = nvme->n_error_log_len *
1505                     sizeof (nvme_error_log_entry_t);
1506                 break;
1507 
1508         case NVME_LOGPAGE_HEALTH:
1509                 cmd->nc_sqe.sqe_nsid = va_arg(ap, uint32_t);
1510                 bufsize = sizeof (nvme_health_log_t);
1511                 break;
1512 
1513         case NVME_LOGPAGE_FWSLOT:
1514                 cmd->nc_sqe.sqe_nsid = (uint32_t)-1;
1515                 bufsize = sizeof (nvme_fwslot_log_t);
1516                 break;
1517 
1518         default:
1519                 dev_err(nvme->n_dip, CE_WARN, "!unknown log page requested: %d",
1520                     logpage);
1521                 atomic_inc_32(&nvme->n_unknown_logpage);
1522                 goto fail;
1523         }
1524 
1525         va_end(ap);
1526 
1527         getlogpage.b.lp_numd = bufsize / sizeof (uint32_t) - 1;
1528 
1529         cmd->nc_sqe.sqe_cdw10 = getlogpage.r;
1530 
1531         if (nvme_zalloc_dma(nvme, getlogpage.b.lp_numd * sizeof (uint32_t),
1532             DDI_DMA_READ, &nvme->n_prp_dma_attr, &cmd->nc_dma) != DDI_SUCCESS) {
1533                 dev_err(nvme->n_dip, CE_WARN,
1534                     "!nvme_zalloc_dma failed for GET LOG PAGE");
1535                 goto fail;
1536         }
1537 
1538         if (cmd->nc_dma->nd_ncookie > 2) {
1539                 dev_err(nvme->n_dip, CE_WARN,
1540                     "!too many DMA cookies for GET LOG PAGE");
1541                 atomic_inc_32(&nvme->n_too_many_cookies);
1542                 goto fail;
1543         }
1544 
1545         cmd->nc_sqe.sqe_dptr.d_prp[0] = cmd->nc_dma->nd_cookie.dmac_laddress;
1546         if (cmd->nc_dma->nd_ncookie > 1) {
1547                 ddi_dma_nextcookie(cmd->nc_dma->nd_dmah,
1548                     &cmd->nc_dma->nd_cookie);
1549                 cmd->nc_sqe.sqe_dptr.d_prp[1] =
1550                     cmd->nc_dma->nd_cookie.dmac_laddress;
1551         }
1552 
1553         if (nvme_admin_cmd(cmd, nvme_admin_cmd_timeout) != DDI_SUCCESS) {
1554                 dev_err(nvme->n_dip, CE_WARN,
1555                     "!nvme_admin_cmd failed for GET LOG PAGE");
1556                 return (NULL);
1557         }
1558 
1559         if (nvme_check_cmd_status(cmd)) {
1560                 dev_err(nvme->n_dip, CE_WARN,
1561                     "!GET LOG PAGE failed with sct = %x, sc = %x",
1562                     cmd->nc_cqe.cqe_sf.sf_sct, cmd->nc_cqe.cqe_sf.sf_sc);
1563                 goto fail;
1564         }
1565 
1566         buf = kmem_alloc(bufsize, KM_SLEEP);
1567         bcopy(cmd->nc_dma->nd_memp, buf, bufsize);
1568 
1569 fail:
1570         nvme_free_cmd(cmd);
1571 
1572         return (buf);
1573 }
1574 
1575 static void *
1576 nvme_identify(nvme_t *nvme, uint32_t nsid)
1577 {
1578         nvme_cmd_t *cmd = nvme_alloc_cmd(nvme, KM_SLEEP);
1579         void *buf = NULL;
1580 
1581         cmd->nc_sqid = 0;
1582         cmd->nc_callback = nvme_wakeup_cmd;
1583         cmd->nc_sqe.sqe_opc = NVME_OPC_IDENTIFY;
1584         cmd->nc_sqe.sqe_nsid = nsid;
1585         cmd->nc_sqe.sqe_cdw10 = nsid ? NVME_IDENTIFY_NSID : NVME_IDENTIFY_CTRL;
1586 
1587         if (nvme_zalloc_dma(nvme, NVME_IDENTIFY_BUFSIZE, DDI_DMA_READ,
1588             &nvme->n_prp_dma_attr, &cmd->nc_dma) != DDI_SUCCESS) {
1589                 dev_err(nvme->n_dip, CE_WARN,
1590                     "!nvme_zalloc_dma failed for IDENTIFY");
1591                 goto fail;
1592         }
1593 
1594         if (cmd->nc_dma->nd_ncookie > 2) {
1595                 dev_err(nvme->n_dip, CE_WARN,
1596                     "!too many DMA cookies for IDENTIFY");
1597                 atomic_inc_32(&nvme->n_too_many_cookies);
1598                 goto fail;
1599         }
1600 
1601         cmd->nc_sqe.sqe_dptr.d_prp[0] = cmd->nc_dma->nd_cookie.dmac_laddress;
1602         if (cmd->nc_dma->nd_ncookie > 1) {
1603                 ddi_dma_nextcookie(cmd->nc_dma->nd_dmah,
1604                     &cmd->nc_dma->nd_cookie);
1605                 cmd->nc_sqe.sqe_dptr.d_prp[1] =
1606                     cmd->nc_dma->nd_cookie.dmac_laddress;
1607         }
1608 
1609         if (nvme_admin_cmd(cmd, nvme_admin_cmd_timeout) != DDI_SUCCESS) {
1610                 dev_err(nvme->n_dip, CE_WARN,
1611                     "!nvme_admin_cmd failed for IDENTIFY");
1612                 return (NULL);
1613         }
1614 
1615         if (nvme_check_cmd_status(cmd)) {
1616                 dev_err(nvme->n_dip, CE_WARN,
1617                     "!IDENTIFY failed with sct = %x, sc = %x",
1618                     cmd->nc_cqe.cqe_sf.sf_sct, cmd->nc_cqe.cqe_sf.sf_sc);
1619                 goto fail;
1620         }
1621 
1622         buf = kmem_alloc(NVME_IDENTIFY_BUFSIZE, KM_SLEEP);
1623         bcopy(cmd->nc_dma->nd_memp, buf, NVME_IDENTIFY_BUFSIZE);
1624 
1625 fail:
1626         nvme_free_cmd(cmd);
1627 
1628         return (buf);
1629 }
1630 
1631 static boolean_t
1632 nvme_set_features(nvme_t *nvme, uint32_t nsid, uint8_t feature, uint32_t val,
1633     uint32_t *res)
1634 {
1635         _NOTE(ARGUNUSED(nsid));
1636         nvme_cmd_t *cmd = nvme_alloc_cmd(nvme, KM_SLEEP);
1637         boolean_t ret = B_FALSE;
1638 
1639         ASSERT(res != NULL);
1640 
1641         cmd->nc_sqid = 0;
1642         cmd->nc_callback = nvme_wakeup_cmd;
1643         cmd->nc_sqe.sqe_opc = NVME_OPC_SET_FEATURES;
1644         cmd->nc_sqe.sqe_cdw10 = feature;
1645         cmd->nc_sqe.sqe_cdw11 = val;
1646 
1647         switch (feature) {
1648         case NVME_FEAT_WRITE_CACHE:
1649                 if (!nvme->n_write_cache_present)
1650                         goto fail;
1651                 break;
1652 
1653         case NVME_FEAT_NQUEUES:
1654                 break;
1655 
1656         default:
1657                 goto fail;
1658         }
1659 
1660         if (nvme_admin_cmd(cmd, nvme_admin_cmd_timeout) != DDI_SUCCESS) {
1661                 dev_err(nvme->n_dip, CE_WARN,
1662                     "!nvme_admin_cmd failed for SET FEATURES");
1663                 return (ret);
1664         }
1665 
1666         if (nvme_check_cmd_status(cmd)) {
1667                 dev_err(nvme->n_dip, CE_WARN,
1668                     "!SET FEATURES %d failed with sct = %x, sc = %x",
1669                     feature, cmd->nc_cqe.cqe_sf.sf_sct,
1670                     cmd->nc_cqe.cqe_sf.sf_sc);
1671                 goto fail;
1672         }
1673 
1674         *res = cmd->nc_cqe.cqe_dw0;
1675         ret = B_TRUE;
1676 
1677 fail:
1678         nvme_free_cmd(cmd);
1679         return (ret);
1680 }
1681 
1682 static boolean_t
1683 nvme_write_cache_set(nvme_t *nvme, boolean_t enable)
1684 {
1685         nvme_write_cache_t nwc = { 0 };
1686 
1687         if (enable)
1688                 nwc.b.wc_wce = 1;
1689 
1690         if (!nvme_set_features(nvme, 0, NVME_FEAT_WRITE_CACHE, nwc.r, &nwc.r))
1691                 return (B_FALSE);
1692 
1693         return (B_TRUE);
1694 }
1695 
1696 static int
1697 nvme_set_nqueues(nvme_t *nvme, uint16_t nqueues)
1698 {
1699         nvme_nqueue_t nq = { 0 };
1700 
1701         nq.b.nq_nsq = nq.b.nq_ncq = nqueues - 1;
1702 
1703         if (!nvme_set_features(nvme, 0, NVME_FEAT_NQUEUES, nq.r, &nq.r)) {
1704                 return (0);
1705         }
1706 
1707         /*
1708          * Always use the same number of submission and completion queues, and
1709          * never use more than the requested number of queues.
1710          */
1711         return (MIN(nqueues, MIN(nq.b.nq_nsq, nq.b.nq_ncq) + 1));
1712 }
1713 
1714 static int
1715 nvme_create_io_qpair(nvme_t *nvme, nvme_qpair_t *qp, uint16_t idx)
1716 {
1717         nvme_cmd_t *cmd = nvme_alloc_cmd(nvme, KM_SLEEP);
1718         nvme_create_queue_dw10_t dw10 = { 0 };
1719         nvme_create_cq_dw11_t c_dw11 = { 0 };
1720         nvme_create_sq_dw11_t s_dw11 = { 0 };
1721 
1722         dw10.b.q_qid = idx;
1723         dw10.b.q_qsize = qp->nq_nentry - 1;
1724 
1725         c_dw11.b.cq_pc = 1;
1726         c_dw11.b.cq_ien = 1;
1727         c_dw11.b.cq_iv = idx % nvme->n_intr_cnt;
1728 
1729         cmd->nc_sqid = 0;
1730         cmd->nc_callback = nvme_wakeup_cmd;
1731         cmd->nc_sqe.sqe_opc = NVME_OPC_CREATE_CQUEUE;
1732         cmd->nc_sqe.sqe_cdw10 = dw10.r;
1733         cmd->nc_sqe.sqe_cdw11 = c_dw11.r;
1734         cmd->nc_sqe.sqe_dptr.d_prp[0] = qp->nq_cqdma->nd_cookie.dmac_laddress;
1735 
1736         if (nvme_admin_cmd(cmd, nvme_admin_cmd_timeout) != DDI_SUCCESS) {
1737                 dev_err(nvme->n_dip, CE_WARN,
1738                     "!nvme_admin_cmd failed for CREATE CQUEUE");
1739                 return (DDI_FAILURE);
1740         }
1741 
1742         if (nvme_check_cmd_status(cmd)) {
1743                 dev_err(nvme->n_dip, CE_WARN,
1744                     "!CREATE CQUEUE failed with sct = %x, sc = %x",
1745                     cmd->nc_cqe.cqe_sf.sf_sct, cmd->nc_cqe.cqe_sf.sf_sc);
1746                 nvme_free_cmd(cmd);
1747                 return (DDI_FAILURE);
1748         }
1749 
1750         nvme_free_cmd(cmd);
1751 
1752         s_dw11.b.sq_pc = 1;
1753         s_dw11.b.sq_cqid = idx;
1754 
1755         cmd = nvme_alloc_cmd(nvme, KM_SLEEP);
1756         cmd->nc_sqid = 0;
1757         cmd->nc_callback = nvme_wakeup_cmd;
1758         cmd->nc_sqe.sqe_opc = NVME_OPC_CREATE_SQUEUE;
1759         cmd->nc_sqe.sqe_cdw10 = dw10.r;
1760         cmd->nc_sqe.sqe_cdw11 = s_dw11.r;
1761         cmd->nc_sqe.sqe_dptr.d_prp[0] = qp->nq_sqdma->nd_cookie.dmac_laddress;
1762 
1763         if (nvme_admin_cmd(cmd, nvme_admin_cmd_timeout) != DDI_SUCCESS) {
1764                 dev_err(nvme->n_dip, CE_WARN,
1765                     "!nvme_admin_cmd failed for CREATE SQUEUE");
1766                 return (DDI_FAILURE);
1767         }
1768 
1769         if (nvme_check_cmd_status(cmd)) {
1770                 dev_err(nvme->n_dip, CE_WARN,
1771                     "!CREATE SQUEUE failed with sct = %x, sc = %x",
1772                     cmd->nc_cqe.cqe_sf.sf_sct, cmd->nc_cqe.cqe_sf.sf_sc);
1773                 nvme_free_cmd(cmd);
1774                 return (DDI_FAILURE);
1775         }
1776 
1777         nvme_free_cmd(cmd);
1778 
1779         return (DDI_SUCCESS);
1780 }
1781 
1782 static boolean_t
1783 nvme_reset(nvme_t *nvme, boolean_t quiesce)
1784 {
1785         nvme_reg_csts_t csts;
1786         int i;
1787 
1788         nvme_put32(nvme, NVME_REG_CC, 0);
1789 
1790         csts.r = nvme_get32(nvme, NVME_REG_CSTS);
1791         if (csts.b.csts_rdy == 1) {
1792                 nvme_put32(nvme, NVME_REG_CC, 0);
1793                 for (i = 0; i != nvme->n_timeout * 10; i++) {
1794                         csts.r = nvme_get32(nvme, NVME_REG_CSTS);
1795                         if (csts.b.csts_rdy == 0)
1796                                 break;
1797 
1798                         if (quiesce)
1799                                 drv_usecwait(50000);
1800                         else
1801                                 delay(drv_usectohz(50000));
1802                 }
1803         }
1804 
1805         nvme_put32(nvme, NVME_REG_AQA, 0);
1806         nvme_put32(nvme, NVME_REG_ASQ, 0);
1807         nvme_put32(nvme, NVME_REG_ACQ, 0);
1808 
1809         csts.r = nvme_get32(nvme, NVME_REG_CSTS);
1810         return (csts.b.csts_rdy == 0 ? B_TRUE : B_FALSE);
1811 }
1812 
1813 static void
1814 nvme_shutdown(nvme_t *nvme, int mode, boolean_t quiesce)
1815 {
1816         nvme_reg_cc_t cc;
1817         nvme_reg_csts_t csts;
1818         int i;
1819 
1820         ASSERT(mode == NVME_CC_SHN_NORMAL || mode == NVME_CC_SHN_ABRUPT);
1821 
1822         cc.r = nvme_get32(nvme, NVME_REG_CC);
1823         cc.b.cc_shn = mode & 0x3;
1824         nvme_put32(nvme, NVME_REG_CC, cc.r);
1825 
1826         for (i = 0; i != 10; i++) {
1827                 csts.r = nvme_get32(nvme, NVME_REG_CSTS);
1828                 if (csts.b.csts_shst == NVME_CSTS_SHN_COMPLETE)
1829                         break;
1830 
1831                 if (quiesce)
1832                         drv_usecwait(100000);
1833                 else
1834                         delay(drv_usectohz(100000));
1835         }
1836 }
1837 
1838 
1839 static void
1840 nvme_prepare_devid(nvme_t *nvme, uint32_t nsid)
1841 {
1842         /*
1843          * Section 7.7 of the spec describes how to get a unique ID for
1844          * the controller: the vendor ID, the model name and the serial
1845          * number shall be unique when combined.
1846          *
1847          * If a namespace has no EUI64 we use the above and add the hex
1848          * namespace ID to get a unique ID for the namespace.
1849          */
1850         char model[sizeof (nvme->n_idctl->id_model) + 1];
1851         char serial[sizeof (nvme->n_idctl->id_serial) + 1];
1852 
1853         bcopy(nvme->n_idctl->id_model, model, sizeof (nvme->n_idctl->id_model));
1854         bcopy(nvme->n_idctl->id_serial, serial,
1855             sizeof (nvme->n_idctl->id_serial));
1856 
1857         model[sizeof (nvme->n_idctl->id_model)] = '\0';
1858         serial[sizeof (nvme->n_idctl->id_serial)] = '\0';
1859 
1860         nvme->n_ns[nsid - 1].ns_devid = kmem_asprintf("%4X-%s-%s-%X",
1861             nvme->n_idctl->id_vid, model, serial, nsid);
1862 }
1863 
1864 static int
1865 nvme_init(nvme_t *nvme)
1866 {
1867         nvme_reg_cc_t cc = { 0 };
1868         nvme_reg_aqa_t aqa = { 0 };
1869         nvme_reg_asq_t asq = { 0 };
1870         nvme_reg_acq_t acq = { 0 };
1871         nvme_reg_cap_t cap;
1872         nvme_reg_vs_t vs;
1873         nvme_reg_csts_t csts;
1874         int i = 0;
1875         int nqueues;
1876         char model[sizeof (nvme->n_idctl->id_model) + 1];
1877         char *vendor, *product;
1878 
1879         /* Check controller version */
1880         vs.r = nvme_get32(nvme, NVME_REG_VS);
1881         nvme->n_version.v_major = vs.b.vs_mjr;
1882         nvme->n_version.v_minor = vs.b.vs_mnr;
1883         dev_err(nvme->n_dip, CE_CONT, "?NVMe spec version %d.%d",
1884             nvme->n_version.v_major, nvme->n_version.v_minor);
1885 
1886         if (NVME_VERSION_HIGHER(&nvme->n_version,
1887             nvme_version_major, nvme_version_minor)) {
1888                 dev_err(nvme->n_dip, CE_WARN, "!no support for version > %d.%d",
1889                     nvme_version_major, nvme_version_minor);
1890                 if (nvme->n_strict_version)
1891                         goto fail;
1892         }
1893 
1894         /* retrieve controller configuration */
1895         cap.r = nvme_get64(nvme, NVME_REG_CAP);
1896 
1897         if ((cap.b.cap_css & NVME_CAP_CSS_NVM) == 0) {
1898                 dev_err(nvme->n_dip, CE_WARN,
1899                     "!NVM command set not supported by hardware");
1900                 goto fail;
1901         }
1902 
1903         nvme->n_nssr_supported = cap.b.cap_nssrs;
1904         nvme->n_doorbell_stride = 4 << cap.b.cap_dstrd;
1905         nvme->n_timeout = cap.b.cap_to;
1906         nvme->n_arbitration_mechanisms = cap.b.cap_ams;
1907         nvme->n_cont_queues_reqd = cap.b.cap_cqr;
1908         nvme->n_max_queue_entries = cap.b.cap_mqes + 1;
1909 
1910         /*
1911          * The MPSMIN and MPSMAX fields in the CAP register use 0 to specify
1912          * the base page size of 4k (1<<12), so add 12 here to get the real
1913          * page size value.
1914          */
1915         nvme->n_pageshift = MIN(MAX(cap.b.cap_mpsmin + 12, PAGESHIFT),
1916             cap.b.cap_mpsmax + 12);
1917         nvme->n_pagesize = 1UL << (nvme->n_pageshift);
1918 
1919         /*
1920          * Set up Queue DMA to transfer at least 1 page-aligned page at a time.
1921          */
1922         nvme->n_queue_dma_attr.dma_attr_align = nvme->n_pagesize;
1923         nvme->n_queue_dma_attr.dma_attr_minxfer = nvme->n_pagesize;
1924 
1925         /*
1926          * Set up PRP DMA to transfer 1 page-aligned page at a time.
1927          * Maxxfer may be increased after we identified the controller limits.
1928          */
1929         nvme->n_prp_dma_attr.dma_attr_maxxfer = nvme->n_pagesize;
1930         nvme->n_prp_dma_attr.dma_attr_minxfer = nvme->n_pagesize;
1931         nvme->n_prp_dma_attr.dma_attr_align = nvme->n_pagesize;
1932         nvme->n_prp_dma_attr.dma_attr_seg = nvme->n_pagesize - 1;
1933 
1934         /*
1935          * Reset controller if it's still in ready state.
1936          */
1937         if (nvme_reset(nvme, B_FALSE) == B_FALSE) {
1938                 dev_err(nvme->n_dip, CE_WARN, "!unable to reset controller");
1939                 ddi_fm_service_impact(nvme->n_dip, DDI_SERVICE_LOST);
1940                 nvme->n_dead = B_TRUE;
1941                 goto fail;
1942         }
1943 
1944         /*
1945          * Create the admin queue pair.
1946          */
1947         if (nvme_alloc_qpair(nvme, nvme->n_admin_queue_len, &nvme->n_adminq, 0)
1948             != DDI_SUCCESS) {
1949                 dev_err(nvme->n_dip, CE_WARN,
1950                     "!unable to allocate admin qpair");
1951                 goto fail;
1952         }
1953         nvme->n_ioq = kmem_alloc(sizeof (nvme_qpair_t *), KM_SLEEP);
1954         nvme->n_ioq[0] = nvme->n_adminq;
1955 
1956         nvme->n_progress |= NVME_ADMIN_QUEUE;
1957 
1958         (void) ddi_prop_update_int(DDI_DEV_T_NONE, nvme->n_dip,
1959             "admin-queue-len", nvme->n_admin_queue_len);
1960 
1961         aqa.b.aqa_asqs = aqa.b.aqa_acqs = nvme->n_admin_queue_len - 1;
1962         asq = nvme->n_adminq->nq_sqdma->nd_cookie.dmac_laddress;
1963         acq = nvme->n_adminq->nq_cqdma->nd_cookie.dmac_laddress;
1964 
1965         ASSERT((asq & (nvme->n_pagesize - 1)) == 0);
1966         ASSERT((acq & (nvme->n_pagesize - 1)) == 0);
1967 
1968         nvme_put32(nvme, NVME_REG_AQA, aqa.r);
1969         nvme_put64(nvme, NVME_REG_ASQ, asq);
1970         nvme_put64(nvme, NVME_REG_ACQ, acq);
1971 
1972         cc.b.cc_ams = 0;        /* use Round-Robin arbitration */
1973         cc.b.cc_css = 0;        /* use NVM command set */
1974         cc.b.cc_mps = nvme->n_pageshift - 12;
1975         cc.b.cc_shn = 0;        /* no shutdown in progress */
1976         cc.b.cc_en = 1;         /* enable controller */
1977         cc.b.cc_iosqes = 6;     /* submission queue entry is 2^6 bytes long */
1978         cc.b.cc_iocqes = 4;     /* completion queue entry is 2^4 bytes long */
1979 
1980         nvme_put32(nvme, NVME_REG_CC, cc.r);
1981 
1982         /*
1983          * Wait for the controller to become ready.
1984          */
1985         csts.r = nvme_get32(nvme, NVME_REG_CSTS);
1986         if (csts.b.csts_rdy == 0) {
1987                 for (i = 0; i != nvme->n_timeout * 10; i++) {
1988                         delay(drv_usectohz(50000));
1989                         csts.r = nvme_get32(nvme, NVME_REG_CSTS);
1990 
1991                         if (csts.b.csts_cfs == 1) {
1992                                 dev_err(nvme->n_dip, CE_WARN,
1993                                     "!controller fatal status at init");
1994                                 ddi_fm_service_impact(nvme->n_dip,
1995                                     DDI_SERVICE_LOST);
1996                                 nvme->n_dead = B_TRUE;
1997                                 goto fail;
1998                         }
1999 
2000                         if (csts.b.csts_rdy == 1)
2001                                 break;
2002                 }
2003         }
2004 
2005         if (csts.b.csts_rdy == 0) {
2006                 dev_err(nvme->n_dip, CE_WARN, "!controller not ready");
2007                 ddi_fm_service_impact(nvme->n_dip, DDI_SERVICE_LOST);
2008                 nvme->n_dead = B_TRUE;
2009                 goto fail;
2010         }
2011 
2012         /*
2013          * Assume an abort command limit of 1. We'll destroy and re-init
2014          * that later when we know the true abort command limit.
2015          */
2016         sema_init(&nvme->n_abort_sema, 1, NULL, SEMA_DRIVER, NULL);
2017 
2018         /*
2019          * Setup initial interrupt for admin queue.
2020          */
2021         if ((nvme_setup_interrupts(nvme, DDI_INTR_TYPE_MSIX, 1)
2022             != DDI_SUCCESS) &&
2023             (nvme_setup_interrupts(nvme, DDI_INTR_TYPE_MSI, 1)
2024             != DDI_SUCCESS) &&
2025             (nvme_setup_interrupts(nvme, DDI_INTR_TYPE_FIXED, 1)
2026             != DDI_SUCCESS)) {
2027                 dev_err(nvme->n_dip, CE_WARN,
2028                     "!failed to setup initial interrupt");
2029                 goto fail;
2030         }
2031 
2032         /*
2033          * Post an asynchronous event command to catch errors.
2034          */
2035         if (nvme_async_event(nvme) != DDI_SUCCESS) {
2036                 dev_err(nvme->n_dip, CE_WARN,
2037                     "!failed to post async event");
2038                 goto fail;
2039         }
2040 
2041         /*
2042          * Identify Controller
2043          */
2044         nvme->n_idctl = nvme_identify(nvme, 0);
2045         if (nvme->n_idctl == NULL) {
2046                 dev_err(nvme->n_dip, CE_WARN,
2047                     "!failed to identify controller");
2048                 goto fail;
2049         }
2050 
2051         /*
2052          * Get Vendor & Product ID
2053          */
2054         bcopy(nvme->n_idctl->id_model, model, sizeof (nvme->n_idctl->id_model));
2055         model[sizeof (nvme->n_idctl->id_model)] = '\0';
2056         sata_split_model(model, &vendor, &product);
2057 
2058         if (vendor == NULL)
2059                 nvme->n_vendor = strdup("NVMe");
2060         else
2061                 nvme->n_vendor = strdup(vendor);
2062 
2063         nvme->n_product = strdup(product);
2064 
2065         /*
2066          * Get controller limits.
2067          */
2068         nvme->n_async_event_limit = MAX(NVME_MIN_ASYNC_EVENT_LIMIT,
2069             MIN(nvme->n_admin_queue_len / 10,
2070             MIN(nvme->n_idctl->id_aerl + 1, nvme->n_async_event_limit)));
2071 
2072         (void) ddi_prop_update_int(DDI_DEV_T_NONE, nvme->n_dip,
2073             "async-event-limit", nvme->n_async_event_limit);
2074 
2075         nvme->n_abort_command_limit = nvme->n_idctl->id_acl + 1;
2076 
2077         /*
2078          * Reinitialize the semaphore with the true abort command limit
2079          * supported by the hardware. It's not necessary to disable interrupts
2080          * as only command aborts use the semaphore, and no commands are
2081          * executed or aborted while we're here.
2082          */
2083         sema_destroy(&nvme->n_abort_sema);
2084         sema_init(&nvme->n_abort_sema, nvme->n_abort_command_limit - 1, NULL,
2085             SEMA_DRIVER, NULL);
2086 
2087         nvme->n_progress |= NVME_CTRL_LIMITS;
2088 
2089         if (nvme->n_idctl->id_mdts == 0)
2090                 nvme->n_max_data_transfer_size = nvme->n_pagesize * 65536;
2091         else
2092                 nvme->n_max_data_transfer_size =
2093                     1ull << (nvme->n_pageshift + nvme->n_idctl->id_mdts);
2094 
2095         nvme->n_error_log_len = nvme->n_idctl->id_elpe + 1;
2096 
2097         /*
2098          * Limit n_max_data_transfer_size to what we can handle in one PRP.
2099          * Chained PRPs are currently unsupported.
2100          *
2101          * This is a no-op on hardware which doesn't support a transfer size
2102          * big enough to require chained PRPs.
2103          */
2104         nvme->n_max_data_transfer_size = MIN(nvme->n_max_data_transfer_size,
2105             (nvme->n_pagesize / sizeof (uint64_t) * nvme->n_pagesize));
2106 
2107         nvme->n_prp_dma_attr.dma_attr_maxxfer = nvme->n_max_data_transfer_size;
2108 
2109         /*
2110          * Make sure the minimum/maximum queue entry sizes are not
2111          * larger/smaller than the default.
2112          */
2113 
2114         if (((1 << nvme->n_idctl->id_sqes.qes_min) > sizeof (nvme_sqe_t)) ||
2115             ((1 << nvme->n_idctl->id_sqes.qes_max) < sizeof (nvme_sqe_t)) ||
2116             ((1 << nvme->n_idctl->id_cqes.qes_min) > sizeof (nvme_cqe_t)) ||
2117             ((1 << nvme->n_idctl->id_cqes.qes_max) < sizeof (nvme_cqe_t)))
2118                 goto fail;
2119 
2120         /*
2121          * Check for the presence of a Volatile Write Cache. If present,
2122          * enable or disable based on the value of the property
2123          * volatile-write-cache-enable (default is enabled).
2124          */
2125         nvme->n_write_cache_present =
2126             nvme->n_idctl->id_vwc.vwc_present == 0 ? B_FALSE : B_TRUE;
2127 
2128         (void) ddi_prop_update_int(DDI_DEV_T_NONE, nvme->n_dip,
2129             "volatile-write-cache-present",
2130             nvme->n_write_cache_present ? 1 : 0);
2131 
2132         if (!nvme->n_write_cache_present) {
2133                 nvme->n_write_cache_enabled = B_FALSE;
2134         } else if (!nvme_write_cache_set(nvme, nvme->n_write_cache_enabled)) {
2135                 dev_err(nvme->n_dip, CE_WARN,
2136                     "!failed to %sable volatile write cache",
2137                     nvme->n_write_cache_enabled ? "en" : "dis");
2138                 /*
2139                  * Assume the cache is (still) enabled.
2140                  */
2141                 nvme->n_write_cache_enabled = B_TRUE;
2142         }
2143 
2144         (void) ddi_prop_update_int(DDI_DEV_T_NONE, nvme->n_dip,
2145             "volatile-write-cache-enable",
2146             nvme->n_write_cache_enabled ? 1 : 0);
2147 
2148         /*
2149          * Grab a copy of all mandatory log pages.
2150          *
2151          * TODO: should go away once user space tool exists to print logs
2152          */
2153         nvme->n_error_log = (nvme_error_log_entry_t *)
2154             nvme_get_logpage(nvme, NVME_LOGPAGE_ERROR);
2155         nvme->n_health_log = (nvme_health_log_t *)
2156             nvme_get_logpage(nvme, NVME_LOGPAGE_HEALTH, -1);
2157         nvme->n_fwslot_log = (nvme_fwslot_log_t *)
2158             nvme_get_logpage(nvme, NVME_LOGPAGE_FWSLOT);
2159 
2160         /*
2161          * Identify Namespaces
2162          */
2163         nvme->n_namespace_count = nvme->n_idctl->id_nn;
2164         nvme->n_ns = kmem_zalloc(sizeof (nvme_namespace_t) *
2165             nvme->n_namespace_count, KM_SLEEP);
2166 
2167         for (i = 0; i != nvme->n_namespace_count; i++) {
2168                 nvme_identify_nsid_t *idns;
2169                 int last_rp;
2170 
2171                 nvme->n_ns[i].ns_nvme = nvme;
2172                 nvme->n_ns[i].ns_idns = idns = nvme_identify(nvme, i + 1);
2173 
2174                 if (idns == NULL) {
2175                         dev_err(nvme->n_dip, CE_WARN,
2176                             "!failed to identify namespace %d", i + 1);
2177                         goto fail;
2178                 }
2179 
2180                 nvme->n_ns[i].ns_id = i + 1;
2181                 nvme->n_ns[i].ns_block_count = idns->id_nsize;
2182                 nvme->n_ns[i].ns_block_size =
2183                     1 << idns->id_lbaf[idns->id_flbas.lba_format].lbaf_lbads;
2184                 nvme->n_ns[i].ns_best_block_size = nvme->n_ns[i].ns_block_size;
2185 
2186                 /*
2187                  * Get the EUI64 if present. If not present prepare the devid
2188                  * from other device data.
2189                  */
2190                 if (NVME_VERSION_ATLEAST(&nvme->n_version, 1, 1))
2191                         bcopy(idns->id_eui64, nvme->n_ns[i].ns_eui64,
2192                             sizeof (nvme->n_ns[i].ns_eui64));
2193 
2194                 /*LINTED: E_BAD_PTR_CAST_ALIGN*/
2195                 if (*(uint64_t *)nvme->n_ns[i].ns_eui64 == 0) {
2196                         nvme_prepare_devid(nvme, nvme->n_ns[i].ns_id);
2197                 } else {
2198                         /*
2199                          * Until EUI64 support is tested on real hardware we
2200                          * will ignore namespaces with an EUI64. This can
2201                          * be overriden by setting strict-version=0 in nvme.conf
2202                          */
2203                         if (nvme->n_strict_version)
2204                                 nvme->n_ns[i].ns_ignore = B_TRUE;
2205                 }
2206 
2207                 /*
2208                  * Find the LBA format with no metadata and the best relative
2209                  * performance. A value of 3 means "degraded", 0 is best.
2210                  */
2211                 last_rp = 3;
2212                 for (int j = 0; j <= idns->id_nlbaf; j++) {
2213                         if (idns->id_lbaf[j].lbaf_lbads == 0)
2214                                 break;
2215                         if (idns->id_lbaf[j].lbaf_ms != 0)
2216                                 continue;
2217                         if (idns->id_lbaf[j].lbaf_rp >= last_rp)
2218                                 continue;
2219                         last_rp = idns->id_lbaf[j].lbaf_rp;
2220                         nvme->n_ns[i].ns_best_block_size =
2221                             1 << idns->id_lbaf[j].lbaf_lbads;
2222                 }
2223 
2224                 if (nvme->n_ns[i].ns_best_block_size < nvme->n_min_block_size)
2225                         nvme->n_ns[i].ns_best_block_size =
2226                             nvme->n_min_block_size;
2227 
2228                 /*
2229                  * We currently don't support namespaces that use either:
2230                  * - thin provisioning
2231                  * - protection information
2232                  */
2233                 if (idns->id_nsfeat.f_thin ||
2234                     idns->id_dps.dp_pinfo) {
2235                         dev_err(nvme->n_dip, CE_WARN,
2236                             "!ignoring namespace %d, unsupported features: "
2237                             "thin = %d, pinfo = %d", i + 1,
2238                             idns->id_nsfeat.f_thin, idns->id_dps.dp_pinfo);
2239                         nvme->n_ns[i].ns_ignore = B_TRUE;
2240                 }
2241         }
2242 
2243         /*
2244          * Try to set up MSI/MSI-X interrupts.
2245          */
2246         if ((nvme->n_intr_types & (DDI_INTR_TYPE_MSI | DDI_INTR_TYPE_MSIX))
2247             != 0) {
2248                 nvme_release_interrupts(nvme);
2249 
2250                 nqueues = MIN(UINT16_MAX, ncpus);
2251 
2252                 if ((nvme_setup_interrupts(nvme, DDI_INTR_TYPE_MSIX,
2253                     nqueues) != DDI_SUCCESS) &&
2254                     (nvme_setup_interrupts(nvme, DDI_INTR_TYPE_MSI,
2255                     nqueues) != DDI_SUCCESS)) {
2256                         dev_err(nvme->n_dip, CE_WARN,
2257                             "!failed to setup MSI/MSI-X interrupts");
2258                         goto fail;
2259                 }
2260         }
2261 
2262         nqueues = nvme->n_intr_cnt;
2263 
2264         /*
2265          * Create I/O queue pairs.
2266          */
2267         nvme->n_ioq_count = nvme_set_nqueues(nvme, nqueues);
2268         if (nvme->n_ioq_count == 0) {
2269                 dev_err(nvme->n_dip, CE_WARN,
2270                     "!failed to set number of I/O queues to %d", nqueues);
2271                 goto fail;
2272         }
2273 
2274         /*
2275          * Reallocate I/O queue array
2276          */
2277         kmem_free(nvme->n_ioq, sizeof (nvme_qpair_t *));
2278         nvme->n_ioq = kmem_zalloc(sizeof (nvme_qpair_t *) *
2279             (nvme->n_ioq_count + 1), KM_SLEEP);
2280         nvme->n_ioq[0] = nvme->n_adminq;
2281 
2282         /*
2283          * If we got less queues than we asked for we might as well give
2284          * some of the interrupt vectors back to the system.
2285          */
2286         if (nvme->n_ioq_count < nqueues) {
2287                 nvme_release_interrupts(nvme);
2288 
2289                 if (nvme_setup_interrupts(nvme, nvme->n_intr_type,
2290                     nvme->n_ioq_count) != DDI_SUCCESS) {
2291                         dev_err(nvme->n_dip, CE_WARN,
2292                             "!failed to reduce number of interrupts");
2293                         goto fail;
2294                 }
2295         }
2296 
2297         /*
2298          * Alloc & register I/O queue pairs
2299          */
2300         nvme->n_io_queue_len =
2301             MIN(nvme->n_io_queue_len, nvme->n_max_queue_entries);
2302         (void) ddi_prop_update_int(DDI_DEV_T_NONE, nvme->n_dip, "io-queue-len",
2303             nvme->n_io_queue_len);
2304 
2305         for (i = 1; i != nvme->n_ioq_count + 1; i++) {
2306                 if (nvme_alloc_qpair(nvme, nvme->n_io_queue_len,
2307                     &nvme->n_ioq[i], i) != DDI_SUCCESS) {
2308                         dev_err(nvme->n_dip, CE_WARN,
2309                             "!unable to allocate I/O qpair %d", i);
2310                         goto fail;
2311                 }
2312 
2313                 if (nvme_create_io_qpair(nvme, nvme->n_ioq[i], i)
2314                     != DDI_SUCCESS) {
2315                         dev_err(nvme->n_dip, CE_WARN,
2316                             "!unable to create I/O qpair %d", i);
2317                         goto fail;
2318                 }
2319         }
2320 
2321         /*
2322          * Post more asynchronous events commands to reduce event reporting
2323          * latency as suggested by the spec.
2324          */
2325         for (i = 1; i != nvme->n_async_event_limit; i++) {
2326                 if (nvme_async_event(nvme) != DDI_SUCCESS) {
2327                         dev_err(nvme->n_dip, CE_WARN,
2328                             "!failed to post async event %d", i);
2329                         goto fail;
2330                 }
2331         }
2332 
2333         return (DDI_SUCCESS);
2334 
2335 fail:
2336         (void) nvme_reset(nvme, B_FALSE);
2337         return (DDI_FAILURE);
2338 }
2339 
2340 static uint_t
2341 nvme_intr(caddr_t arg1, caddr_t arg2)
2342 {
2343         /*LINTED: E_PTR_BAD_CAST_ALIGN*/
2344         nvme_t *nvme = (nvme_t *)arg1;
2345         int inum = (int)(uintptr_t)arg2;
2346         int ccnt = 0;
2347         int qnum;
2348         nvme_cmd_t *cmd;
2349 
2350         if (inum >= nvme->n_intr_cnt)
2351                 return (DDI_INTR_UNCLAIMED);
2352 
2353         /*
2354          * The interrupt vector a queue uses is calculated as queue_idx %
2355          * intr_cnt in nvme_create_io_qpair(). Iterate through the queue array
2356          * in steps of n_intr_cnt to process all queues using this vector.
2357          */
2358         for (qnum = inum;
2359             qnum < nvme->n_ioq_count + 1 && nvme->n_ioq[qnum] != NULL;
2360             qnum += nvme->n_intr_cnt) {
2361                 while ((cmd = nvme_retrieve_cmd(nvme, nvme->n_ioq[qnum]))) {
2362                         taskq_dispatch_ent((taskq_t *)cmd->nc_nvme->n_cmd_taskq,
2363                             cmd->nc_callback, cmd, TQ_NOSLEEP, &cmd->nc_tqent);
2364                         ccnt++;
2365                 }
2366         }
2367 
2368         return (ccnt > 0 ? DDI_INTR_CLAIMED : DDI_INTR_UNCLAIMED);
2369 }
2370 
2371 static void
2372 nvme_release_interrupts(nvme_t *nvme)
2373 {
2374         int i;
2375 
2376         for (i = 0; i < nvme->n_intr_cnt; i++) {
2377                 if (nvme->n_inth[i] == NULL)
2378                         break;
2379 
2380                 if (nvme->n_intr_cap & DDI_INTR_FLAG_BLOCK)
2381                         (void) ddi_intr_block_disable(&nvme->n_inth[i], 1);
2382                 else
2383                         (void) ddi_intr_disable(nvme->n_inth[i]);
2384 
2385                 (void) ddi_intr_remove_handler(nvme->n_inth[i]);
2386                 (void) ddi_intr_free(nvme->n_inth[i]);
2387         }
2388 
2389         kmem_free(nvme->n_inth, nvme->n_inth_sz);
2390         nvme->n_inth = NULL;
2391         nvme->n_inth_sz = 0;
2392 
2393         nvme->n_progress &= ~NVME_INTERRUPTS;
2394 }
2395 
2396 static int
2397 nvme_setup_interrupts(nvme_t *nvme, int intr_type, int nqpairs)
2398 {
2399         int nintrs, navail, count;
2400         int ret;
2401         int i;
2402 
2403         if (nvme->n_intr_types == 0) {
2404                 ret = ddi_intr_get_supported_types(nvme->n_dip,
2405                     &nvme->n_intr_types);
2406                 if (ret != DDI_SUCCESS) {
2407                         dev_err(nvme->n_dip, CE_WARN,
2408                             "!%s: ddi_intr_get_supported types failed",
2409                             __func__);
2410                         return (ret);
2411                 }
2412         }
2413 
2414         if ((nvme->n_intr_types & intr_type) == 0)
2415                 return (DDI_FAILURE);
2416 
2417         ret = ddi_intr_get_nintrs(nvme->n_dip, intr_type, &nintrs);
2418         if (ret != DDI_SUCCESS) {
2419                 dev_err(nvme->n_dip, CE_WARN, "!%s: ddi_intr_get_nintrs failed",
2420                     __func__);
2421                 return (ret);
2422         }
2423 
2424         ret = ddi_intr_get_navail(nvme->n_dip, intr_type, &navail);
2425         if (ret != DDI_SUCCESS) {
2426                 dev_err(nvme->n_dip, CE_WARN, "!%s: ddi_intr_get_navail failed",
2427                     __func__);
2428                 return (ret);
2429         }
2430 
2431         /* We want at most one interrupt per queue pair. */
2432         if (navail > nqpairs)
2433                 navail = nqpairs;
2434 
2435         nvme->n_inth_sz = sizeof (ddi_intr_handle_t) * navail;
2436         nvme->n_inth = kmem_zalloc(nvme->n_inth_sz, KM_SLEEP);
2437 
2438         ret = ddi_intr_alloc(nvme->n_dip, nvme->n_inth, intr_type, 0, navail,
2439             &count, 0);
2440         if (ret != DDI_SUCCESS) {
2441                 dev_err(nvme->n_dip, CE_WARN, "!%s: ddi_intr_alloc failed",
2442                     __func__);
2443                 goto fail;
2444         }
2445 
2446         nvme->n_intr_cnt = count;
2447 
2448         ret = ddi_intr_get_pri(nvme->n_inth[0], &nvme->n_intr_pri);
2449         if (ret != DDI_SUCCESS) {
2450                 dev_err(nvme->n_dip, CE_WARN, "!%s: ddi_intr_get_pri failed",
2451                     __func__);
2452                 goto fail;
2453         }
2454 
2455         for (i = 0; i < count; i++) {
2456                 ret = ddi_intr_add_handler(nvme->n_inth[i], nvme_intr,
2457                     (void *)nvme, (void *)(uintptr_t)i);
2458                 if (ret != DDI_SUCCESS) {
2459                         dev_err(nvme->n_dip, CE_WARN,
2460                             "!%s: ddi_intr_add_handler failed", __func__);
2461                         goto fail;
2462                 }
2463         }
2464 
2465         (void) ddi_intr_get_cap(nvme->n_inth[0], &nvme->n_intr_cap);
2466 
2467         for (i = 0; i < count; i++) {
2468                 if (nvme->n_intr_cap & DDI_INTR_FLAG_BLOCK)
2469                         ret = ddi_intr_block_enable(&nvme->n_inth[i], 1);
2470                 else
2471                         ret = ddi_intr_enable(nvme->n_inth[i]);
2472 
2473                 if (ret != DDI_SUCCESS) {
2474                         dev_err(nvme->n_dip, CE_WARN,
2475                             "!%s: enabling interrupt %d failed", __func__, i);
2476                         goto fail;
2477                 }
2478         }
2479 
2480         nvme->n_intr_type = intr_type;
2481 
2482         nvme->n_progress |= NVME_INTERRUPTS;
2483 
2484         return (DDI_SUCCESS);
2485 
2486 fail:
2487         nvme_release_interrupts(nvme);
2488 
2489         return (ret);
2490 }
2491 
2492 static int
2493 nvme_fm_errcb(dev_info_t *dip, ddi_fm_error_t *fm_error, const void *arg)
2494 {
2495         _NOTE(ARGUNUSED(arg));
2496 
2497         pci_ereport_post(dip, fm_error, NULL);
2498         return (fm_error->fme_status);
2499 }
2500 
2501 static int
2502 nvme_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
2503 {
2504         nvme_t *nvme;
2505         int instance;
2506         int nregs;
2507         off_t regsize;
2508         int i;
2509         char name[32];
2510 
2511         if (cmd != DDI_ATTACH)
2512                 return (DDI_FAILURE);
2513 
2514         instance = ddi_get_instance(dip);
2515 
2516         if (ddi_soft_state_zalloc(nvme_state, instance) != DDI_SUCCESS)
2517                 return (DDI_FAILURE);
2518 
2519         nvme = ddi_get_soft_state(nvme_state, instance);
2520         ddi_set_driver_private(dip, nvme);
2521         nvme->n_dip = dip;
2522 
2523         nvme->n_strict_version = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
2524             DDI_PROP_DONTPASS, "strict-version", 1) == 1 ? B_TRUE : B_FALSE;
2525         nvme->n_ignore_unknown_vendor_status = ddi_prop_get_int(DDI_DEV_T_ANY,
2526             dip, DDI_PROP_DONTPASS, "ignore-unknown-vendor-status", 0) == 1 ?
2527             B_TRUE : B_FALSE;
2528         nvme->n_admin_queue_len = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
2529             DDI_PROP_DONTPASS, "admin-queue-len", NVME_DEFAULT_ADMIN_QUEUE_LEN);
2530         nvme->n_io_queue_len = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
2531             DDI_PROP_DONTPASS, "io-queue-len", NVME_DEFAULT_IO_QUEUE_LEN);
2532         nvme->n_async_event_limit = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
2533             DDI_PROP_DONTPASS, "async-event-limit",
2534             NVME_DEFAULT_ASYNC_EVENT_LIMIT);
2535         nvme->n_write_cache_enabled = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
2536             DDI_PROP_DONTPASS, "volatile-write-cache-enable", 1) != 0 ?
2537             B_TRUE : B_FALSE;
2538         nvme->n_min_block_size = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
2539             DDI_PROP_DONTPASS, "min-phys-block-size",
2540             NVME_DEFAULT_MIN_BLOCK_SIZE);
2541 
2542         if (!ISP2(nvme->n_min_block_size) ||
2543             (nvme->n_min_block_size < NVME_DEFAULT_MIN_BLOCK_SIZE)) {
2544                 dev_err(dip, CE_WARN, "!min-phys-block-size %s, "
2545                     "using default %d", ISP2(nvme->n_min_block_size) ?
2546                     "too low" : "not a power of 2",
2547                     NVME_DEFAULT_MIN_BLOCK_SIZE);
2548                 nvme->n_min_block_size = NVME_DEFAULT_MIN_BLOCK_SIZE;
2549         }
2550 
2551         if (nvme->n_admin_queue_len < NVME_MIN_ADMIN_QUEUE_LEN)
2552                 nvme->n_admin_queue_len = NVME_MIN_ADMIN_QUEUE_LEN;
2553         else if (nvme->n_admin_queue_len > NVME_MAX_ADMIN_QUEUE_LEN)
2554                 nvme->n_admin_queue_len = NVME_MAX_ADMIN_QUEUE_LEN;
2555 
2556         if (nvme->n_io_queue_len < NVME_MIN_IO_QUEUE_LEN)
2557                 nvme->n_io_queue_len = NVME_MIN_IO_QUEUE_LEN;
2558 
2559         if (nvme->n_async_event_limit < 1)
2560                 nvme->n_async_event_limit = NVME_DEFAULT_ASYNC_EVENT_LIMIT;
2561 
2562         nvme->n_reg_acc_attr = nvme_reg_acc_attr;
2563         nvme->n_queue_dma_attr = nvme_queue_dma_attr;
2564         nvme->n_prp_dma_attr = nvme_prp_dma_attr;
2565         nvme->n_sgl_dma_attr = nvme_sgl_dma_attr;
2566 
2567         /*
2568          * Setup FMA support.
2569          */
2570         nvme->n_fm_cap = ddi_getprop(DDI_DEV_T_ANY, dip,
2571             DDI_PROP_CANSLEEP | DDI_PROP_DONTPASS, "fm-capable",
2572             DDI_FM_EREPORT_CAPABLE | DDI_FM_ACCCHK_CAPABLE |
2573             DDI_FM_DMACHK_CAPABLE | DDI_FM_ERRCB_CAPABLE);
2574 
2575         ddi_fm_init(dip, &nvme->n_fm_cap, &nvme->n_fm_ibc);
2576 
2577         if (nvme->n_fm_cap) {
2578                 if (nvme->n_fm_cap & DDI_FM_ACCCHK_CAPABLE)
2579                         nvme->n_reg_acc_attr.devacc_attr_access =
2580                             DDI_FLAGERR_ACC;
2581 
2582                 if (nvme->n_fm_cap & DDI_FM_DMACHK_CAPABLE) {
2583                         nvme->n_prp_dma_attr.dma_attr_flags |= DDI_DMA_FLAGERR;
2584                         nvme->n_sgl_dma_attr.dma_attr_flags |= DDI_DMA_FLAGERR;
2585                 }
2586 
2587                 if (DDI_FM_EREPORT_CAP(nvme->n_fm_cap) ||
2588                     DDI_FM_ERRCB_CAP(nvme->n_fm_cap))
2589                         pci_ereport_setup(dip);
2590 
2591                 if (DDI_FM_ERRCB_CAP(nvme->n_fm_cap))
2592                         ddi_fm_handler_register(dip, nvme_fm_errcb,
2593                             (void *)nvme);
2594         }
2595 
2596         nvme->n_progress |= NVME_FMA_INIT;
2597 
2598         /*
2599          * The spec defines several register sets. Only the controller
2600          * registers (set 1) are currently used.
2601          */
2602         if (ddi_dev_nregs(dip, &nregs) == DDI_FAILURE ||
2603             nregs < 2 ||
2604             ddi_dev_regsize(dip, 1, &regsize) == DDI_FAILURE)
2605                 goto fail;
2606 
2607         if (ddi_regs_map_setup(dip, 1, &nvme->n_regs, 0, regsize,
2608             &nvme->n_reg_acc_attr, &nvme->n_regh) != DDI_SUCCESS) {
2609                 dev_err(dip, CE_WARN, "!failed to map regset 1");
2610                 goto fail;
2611         }
2612 
2613         nvme->n_progress |= NVME_REGS_MAPPED;
2614 
2615         /*
2616          * Create taskq for command completion.
2617          */
2618         (void) snprintf(name, sizeof (name), "%s%d_cmd_taskq",
2619             ddi_driver_name(dip), ddi_get_instance(dip));
2620         nvme->n_cmd_taskq = ddi_taskq_create(dip, name, MIN(UINT16_MAX, ncpus),
2621             TASKQ_DEFAULTPRI, 0);
2622         if (nvme->n_cmd_taskq == NULL) {
2623                 dev_err(dip, CE_WARN, "!failed to create cmd taskq");
2624                 goto fail;
2625         }
2626 
2627         /*
2628          * Create PRP DMA cache
2629          */
2630         (void) snprintf(name, sizeof (name), "%s%d_prp_cache",
2631             ddi_driver_name(dip), ddi_get_instance(dip));
2632         nvme->n_prp_cache = kmem_cache_create(name, sizeof (nvme_dma_t),
2633             0, nvme_prp_dma_constructor, nvme_prp_dma_destructor,
2634             NULL, (void *)nvme, NULL, 0);
2635 
2636         if (nvme_init(nvme) != DDI_SUCCESS)
2637                 goto fail;
2638 
2639         /*
2640          * Attach the blkdev driver for each namespace.
2641          */
2642         for (i = 0; i != nvme->n_namespace_count; i++) {
2643                 if (nvme->n_ns[i].ns_ignore)
2644                         continue;
2645 
2646                 nvme->n_ns[i].ns_bd_hdl = bd_alloc_handle(&nvme->n_ns[i],
2647                     &nvme_bd_ops, &nvme->n_prp_dma_attr, KM_SLEEP);
2648 
2649                 if (nvme->n_ns[i].ns_bd_hdl == NULL) {
2650                         dev_err(dip, CE_WARN,
2651                             "!failed to get blkdev handle for namespace %d", i);
2652                         goto fail;
2653                 }
2654 
2655                 if (bd_attach_handle(dip, nvme->n_ns[i].ns_bd_hdl)
2656                     != DDI_SUCCESS) {
2657                         dev_err(dip, CE_WARN,
2658                             "!failed to attach blkdev handle for namespace %d",
2659                             i);
2660                         goto fail;
2661                 }
2662         }
2663 
2664         return (DDI_SUCCESS);
2665 
2666 fail:
2667         /* attach successful anyway so that FMA can retire the device */
2668         if (nvme->n_dead)
2669                 return (DDI_SUCCESS);
2670 
2671         (void) nvme_detach(dip, DDI_DETACH);
2672 
2673         return (DDI_FAILURE);
2674 }
2675 
2676 static int
2677 nvme_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
2678 {
2679         int instance, i;
2680         nvme_t *nvme;
2681 
2682         if (cmd != DDI_DETACH)
2683                 return (DDI_FAILURE);
2684 
2685         instance = ddi_get_instance(dip);
2686 
2687         nvme = ddi_get_soft_state(nvme_state, instance);
2688 
2689         if (nvme == NULL)
2690                 return (DDI_FAILURE);
2691 
2692         if (nvme->n_ns) {
2693                 for (i = 0; i != nvme->n_namespace_count; i++) {
2694                         if (nvme->n_ns[i].ns_bd_hdl) {
2695                                 (void) bd_detach_handle(
2696                                     nvme->n_ns[i].ns_bd_hdl);
2697                                 bd_free_handle(nvme->n_ns[i].ns_bd_hdl);
2698                         }
2699 
2700                         if (nvme->n_ns[i].ns_idns)
2701                                 kmem_free(nvme->n_ns[i].ns_idns,
2702                                     sizeof (nvme_identify_nsid_t));
2703                         if (nvme->n_ns[i].ns_devid)
2704                                 strfree(nvme->n_ns[i].ns_devid);
2705                 }
2706 
2707                 kmem_free(nvme->n_ns, sizeof (nvme_namespace_t) *
2708                     nvme->n_namespace_count);
2709         }
2710 
2711         if (nvme->n_progress & NVME_INTERRUPTS)
2712                 nvme_release_interrupts(nvme);
2713 
2714         if (nvme->n_cmd_taskq)
2715                 ddi_taskq_wait(nvme->n_cmd_taskq);
2716 
2717         if (nvme->n_ioq_count > 0) {
2718                 for (i = 1; i != nvme->n_ioq_count + 1; i++) {
2719                         if (nvme->n_ioq[i] != NULL) {
2720                                 /* TODO: send destroy queue commands */
2721                                 nvme_free_qpair(nvme->n_ioq[i]);
2722                         }
2723                 }
2724 
2725                 kmem_free(nvme->n_ioq, sizeof (nvme_qpair_t *) *
2726                     (nvme->n_ioq_count + 1));
2727         }
2728 
2729         if (nvme->n_prp_cache != NULL) {
2730                 kmem_cache_destroy(nvme->n_prp_cache);
2731         }
2732 
2733         if (nvme->n_progress & NVME_REGS_MAPPED) {
2734                 nvme_shutdown(nvme, NVME_CC_SHN_NORMAL, B_FALSE);
2735                 (void) nvme_reset(nvme, B_FALSE);
2736         }
2737 
2738         if (nvme->n_cmd_taskq)
2739                 ddi_taskq_destroy(nvme->n_cmd_taskq);
2740 
2741         if (nvme->n_progress & NVME_CTRL_LIMITS)
2742                 sema_destroy(&nvme->n_abort_sema);
2743 
2744         if (nvme->n_progress & NVME_ADMIN_QUEUE)
2745                 nvme_free_qpair(nvme->n_adminq);
2746 
2747         if (nvme->n_idctl)
2748                 kmem_free(nvme->n_idctl, sizeof (nvme_identify_ctrl_t));
2749 
2750         if (nvme->n_progress & NVME_REGS_MAPPED)
2751                 ddi_regs_map_free(&nvme->n_regh);
2752 
2753         if (nvme->n_progress & NVME_FMA_INIT) {
2754                 if (DDI_FM_ERRCB_CAP(nvme->n_fm_cap))
2755                         ddi_fm_handler_unregister(nvme->n_dip);
2756 
2757                 if (DDI_FM_EREPORT_CAP(nvme->n_fm_cap) ||
2758                     DDI_FM_ERRCB_CAP(nvme->n_fm_cap))
2759                         pci_ereport_teardown(nvme->n_dip);
2760 
2761                 ddi_fm_fini(nvme->n_dip);
2762         }
2763 
2764         if (nvme->n_vendor != NULL)
2765                 strfree(nvme->n_vendor);
2766 
2767         if (nvme->n_product != NULL)
2768                 strfree(nvme->n_product);
2769 
2770         ddi_soft_state_free(nvme_state, instance);
2771 
2772         return (DDI_SUCCESS);
2773 }
2774 
2775 static int
2776 nvme_quiesce(dev_info_t *dip)
2777 {
2778         int instance;
2779         nvme_t *nvme;
2780 
2781         instance = ddi_get_instance(dip);
2782 
2783         nvme = ddi_get_soft_state(nvme_state, instance);
2784 
2785         if (nvme == NULL)
2786                 return (DDI_FAILURE);
2787 
2788         nvme_shutdown(nvme, NVME_CC_SHN_ABRUPT, B_TRUE);
2789 
2790         (void) nvme_reset(nvme, B_TRUE);
2791 
2792         return (DDI_FAILURE);
2793 }
2794 
2795 static int
2796 nvme_fill_prp(nvme_cmd_t *cmd, bd_xfer_t *xfer)
2797 {
2798         nvme_t *nvme = cmd->nc_nvme;
2799         int nprp_page, nprp;
2800         uint64_t *prp;
2801 
2802         if (xfer->x_ndmac == 0)
2803                 return (DDI_FAILURE);
2804 
2805         cmd->nc_sqe.sqe_dptr.d_prp[0] = xfer->x_dmac.dmac_laddress;
2806         ddi_dma_nextcookie(xfer->x_dmah, &xfer->x_dmac);
2807 
2808         if (xfer->x_ndmac == 1) {
2809                 cmd->nc_sqe.sqe_dptr.d_prp[1] = 0;
2810                 return (DDI_SUCCESS);
2811         } else if (xfer->x_ndmac == 2) {
2812                 cmd->nc_sqe.sqe_dptr.d_prp[1] = xfer->x_dmac.dmac_laddress;
2813                 return (DDI_SUCCESS);
2814         }
2815 
2816         xfer->x_ndmac--;
2817 
2818         nprp_page = nvme->n_pagesize / sizeof (uint64_t) - 1;
2819         ASSERT(nprp_page > 0);
2820         nprp = (xfer->x_ndmac + nprp_page - 1) / nprp_page;
2821 
2822         /*
2823          * We currently don't support chained PRPs and set up our DMA
2824          * attributes to reflect that. If we still get an I/O request
2825          * that needs a chained PRP something is very wrong.
2826          */
2827         VERIFY(nprp == 1);
2828 
2829         cmd->nc_dma = kmem_cache_alloc(nvme->n_prp_cache, KM_SLEEP);
2830         bzero(cmd->nc_dma->nd_memp, cmd->nc_dma->nd_len);
2831 
2832         cmd->nc_sqe.sqe_dptr.d_prp[1] = cmd->nc_dma->nd_cookie.dmac_laddress;
2833 
2834         /*LINTED: E_PTR_BAD_CAST_ALIGN*/
2835         for (prp = (uint64_t *)cmd->nc_dma->nd_memp;
2836             xfer->x_ndmac > 0;
2837             prp++, xfer->x_ndmac--) {
2838                 *prp = xfer->x_dmac.dmac_laddress;
2839                 ddi_dma_nextcookie(xfer->x_dmah, &xfer->x_dmac);
2840         }
2841 
2842         (void) ddi_dma_sync(cmd->nc_dma->nd_dmah, 0, cmd->nc_dma->nd_len,
2843             DDI_DMA_SYNC_FORDEV);
2844         return (DDI_SUCCESS);
2845 }
2846 
2847 static nvme_cmd_t *
2848 nvme_create_nvm_cmd(nvme_namespace_t *ns, uint8_t opc, bd_xfer_t *xfer)
2849 {
2850         nvme_t *nvme = ns->ns_nvme;
2851         nvme_cmd_t *cmd;
2852 
2853         /*
2854          * Blkdev only sets BD_XFER_POLL when dumping, so don't sleep.
2855          */
2856         cmd = nvme_alloc_cmd(nvme, (xfer->x_flags & BD_XFER_POLL) ?
2857             KM_NOSLEEP : KM_SLEEP);
2858 
2859         if (cmd == NULL)
2860                 return (NULL);
2861 
2862         cmd->nc_sqe.sqe_opc = opc;
2863         cmd->nc_callback = nvme_bd_xfer_done;
2864         cmd->nc_xfer = xfer;
2865 
2866         switch (opc) {
2867         case NVME_OPC_NVM_WRITE:
2868         case NVME_OPC_NVM_READ:
2869                 VERIFY(xfer->x_nblks <= 0x10000);
2870 
2871                 cmd->nc_sqe.sqe_nsid = ns->ns_id;
2872 
2873                 cmd->nc_sqe.sqe_cdw10 = xfer->x_blkno & 0xffffffffu;
2874                 cmd->nc_sqe.sqe_cdw11 = (xfer->x_blkno >> 32);
2875                 cmd->nc_sqe.sqe_cdw12 = (uint16_t)(xfer->x_nblks - 1);
2876 
2877                 if (nvme_fill_prp(cmd, xfer) != DDI_SUCCESS)
2878                         goto fail;
2879                 break;
2880 
2881         case NVME_OPC_NVM_FLUSH:
2882                 cmd->nc_sqe.sqe_nsid = ns->ns_id;
2883                 break;
2884 
2885         default:
2886                 goto fail;
2887         }
2888 
2889         return (cmd);
2890 
2891 fail:
2892         nvme_free_cmd(cmd);
2893         return (NULL);
2894 }
2895 
2896 static void
2897 nvme_bd_xfer_done(void *arg)
2898 {
2899         nvme_cmd_t *cmd = arg;
2900         bd_xfer_t *xfer = cmd->nc_xfer;
2901         int error = 0;
2902 
2903         error = nvme_check_cmd_status(cmd);
2904         nvme_free_cmd(cmd);
2905 
2906         bd_xfer_done(xfer, error);
2907 }
2908 
2909 static void
2910 nvme_bd_driveinfo(void *arg, bd_drive_t *drive)
2911 {
2912         nvme_namespace_t *ns = arg;
2913         nvme_t *nvme = ns->ns_nvme;
2914 
2915         /*
2916          * blkdev maintains one queue size per instance (namespace),
2917          * but all namespace share the I/O queues.
2918          * TODO: need to figure out a sane default, or use per-NS I/O queues,
2919          * or change blkdev to handle EAGAIN
2920          */
2921         drive->d_qsize = nvme->n_ioq_count * nvme->n_io_queue_len
2922             / nvme->n_namespace_count;
2923 
2924         /*
2925          * d_maxxfer is not set, which means the value is taken from the DMA
2926          * attributes specified to bd_alloc_handle.
2927          */
2928 
2929         drive->d_removable = B_FALSE;
2930         drive->d_hotpluggable = B_FALSE;
2931 
2932         bcopy(ns->ns_eui64, drive->d_eui64, sizeof (drive->d_eui64));
2933         drive->d_target = ns->ns_id;
2934         drive->d_lun = 0;
2935 
2936         drive->d_model = nvme->n_idctl->id_model;
2937         drive->d_model_len = sizeof (nvme->n_idctl->id_model);
2938         drive->d_vendor = nvme->n_vendor;
2939         drive->d_vendor_len = strlen(nvme->n_vendor);
2940         drive->d_product = nvme->n_product;
2941         drive->d_product_len = strlen(nvme->n_product);
2942         drive->d_serial = nvme->n_idctl->id_serial;
2943         drive->d_serial_len = sizeof (nvme->n_idctl->id_serial);
2944         drive->d_revision = nvme->n_idctl->id_fwrev;
2945         drive->d_revision_len = sizeof (nvme->n_idctl->id_fwrev);
2946 }
2947 
2948 static int
2949 nvme_bd_mediainfo(void *arg, bd_media_t *media)
2950 {
2951         nvme_namespace_t *ns = arg;
2952 
2953         media->m_nblks = ns->ns_block_count;
2954         media->m_blksize = ns->ns_block_size;
2955         media->m_readonly = B_FALSE;
2956         media->m_solidstate = B_TRUE;
2957 
2958         media->m_pblksize = ns->ns_best_block_size;
2959 
2960         return (0);
2961 }
2962 
2963 static int
2964 nvme_bd_cmd(nvme_namespace_t *ns, bd_xfer_t *xfer, uint8_t opc)
2965 {
2966         nvme_t *nvme = ns->ns_nvme;
2967         nvme_cmd_t *cmd;
2968 
2969         if (nvme->n_dead)
2970                 return (EIO);
2971 
2972         /* No polling for now */
2973         if (xfer->x_flags & BD_XFER_POLL)
2974                 return (EIO);
2975 
2976         cmd = nvme_create_nvm_cmd(ns, opc, xfer);
2977         if (cmd == NULL)
2978                 return (ENOMEM);
2979 
2980         cmd->nc_sqid = (CPU->cpu_id % nvme->n_ioq_count) + 1;
2981         ASSERT(cmd->nc_sqid <= nvme->n_ioq_count);
2982 
2983         if (nvme_submit_cmd(nvme->n_ioq[cmd->nc_sqid], cmd)
2984             != DDI_SUCCESS)
2985                 return (EAGAIN);
2986 
2987         return (0);
2988 }
2989 
2990 static int
2991 nvme_bd_read(void *arg, bd_xfer_t *xfer)
2992 {
2993         nvme_namespace_t *ns = arg;
2994 
2995         return (nvme_bd_cmd(ns, xfer, NVME_OPC_NVM_READ));
2996 }
2997 
2998 static int
2999 nvme_bd_write(void *arg, bd_xfer_t *xfer)
3000 {
3001         nvme_namespace_t *ns = arg;
3002 
3003         return (nvme_bd_cmd(ns, xfer, NVME_OPC_NVM_WRITE));
3004 }
3005 
3006 static int
3007 nvme_bd_sync(void *arg, bd_xfer_t *xfer)
3008 {
3009         nvme_namespace_t *ns = arg;
3010 
3011         if (ns->ns_nvme->n_dead)
3012                 return (EIO);
3013 
3014         /*
3015          * If the volatile write cache is not present or not enabled the FLUSH
3016          * command is a no-op, so we can take a shortcut here.
3017          */
3018         if (!ns->ns_nvme->n_write_cache_present) {
3019                 bd_xfer_done(xfer, ENOTSUP);
3020                 return (0);
3021         }
3022 
3023         if (!ns->ns_nvme->n_write_cache_enabled) {
3024                 bd_xfer_done(xfer, 0);
3025                 return (0);
3026         }
3027 
3028         return (nvme_bd_cmd(ns, xfer, NVME_OPC_NVM_FLUSH));
3029 }
3030 
3031 static int
3032 nvme_bd_devid(void *arg, dev_info_t *devinfo, ddi_devid_t *devid)
3033 {
3034         nvme_namespace_t *ns = arg;
3035 
3036         /*LINTED: E_BAD_PTR_CAST_ALIGN*/
3037         if (*(uint64_t *)ns->ns_eui64 != 0) {
3038                 return (ddi_devid_init(devinfo, DEVID_SCSI3_WWN,
3039                     sizeof (ns->ns_eui64), ns->ns_eui64, devid));
3040         } else {
3041                 return (ddi_devid_init(devinfo, DEVID_ENCAP,
3042                     strlen(ns->ns_devid), ns->ns_devid, devid));
3043         }
3044 }