1 /*
   2  * This file and its contents are supplied under the terms of the
   3  * Common Development and Distribution License ("CDDL"), version 1.0.
   4  * You may only use this file in accordance with the terms of version
   5  * 1.0 of the CDDL.
   6  *
   7  * A full copy of the text of the CDDL should have accompanied this
   8  * source.  A copy of the CDDL is also available via the Internet at
   9  * http://www.illumos.org/license/CDDL.
  10  */
  11 
  12 /*
  13  * Copyright 2018 Nexenta Systems, Inc.
  14  * Copyright 2016 Tegile Systems, Inc. All rights reserved.
  15  * Copyright (c) 2016 The MathWorks, Inc.  All rights reserved.
  16  * Copyright 2017 Joyent, Inc.
  17  */
  18 
  19 /*
  20  * blkdev driver for NVMe compliant storage devices
  21  *
  22  * This driver was written to conform to version 1.2.1 of the NVMe
  23  * specification.  It may work with newer versions, but that is completely
  24  * untested and disabled by default.
  25  *
  26  * The driver has only been tested on x86 systems and will not work on big-
  27  * endian systems without changes to the code accessing registers and data
  28  * structures used by the hardware.
  29  *
  30  *
  31  * Interrupt Usage:
  32  *
  33  * The driver will use a single interrupt while configuring the device as the
  34  * specification requires, but contrary to the specification it will try to use
  35  * a single-message MSI(-X) or FIXED interrupt. Later in the attach process it
  36  * will switch to multiple-message MSI(-X) if supported. The driver wants to
  37  * have one interrupt vector per CPU, but it will work correctly if less are
  38  * available. Interrupts can be shared by queues, the interrupt handler will
  39  * iterate through the I/O queue array by steps of n_intr_cnt. Usually only
  40  * the admin queue will share an interrupt with one I/O queue. The interrupt
  41  * handler will retrieve completed commands from all queues sharing an interrupt
  42  * vector and will post them to a taskq for completion processing.
  43  *
  44  *
  45  * Command Processing:
  46  *
  47  * NVMe devices can have up to 65535 I/O queue pairs, with each queue holding up
  48  * to 65536 I/O commands. The driver will configure one I/O queue pair per
  49  * available interrupt vector, with the queue length usually much smaller than
  50  * the maximum of 65536. If the hardware doesn't provide enough queues, fewer
  51  * interrupt vectors will be used.
  52  *
  53  * Additionally the hardware provides a single special admin queue pair that can
  54  * hold up to 4096 admin commands.
  55  *
  56  * From the hardware perspective both queues of a queue pair are independent,
  57  * but they share some driver state: the command array (holding pointers to
  58  * commands currently being processed by the hardware) and the active command
  59  * counter. Access to a queue pair and the shared state is protected by
  60  * nq_mutex.
  61  *
  62  * When a command is submitted to a queue pair the active command counter is
  63  * incremented and a pointer to the command is stored in the command array. The
  64  * array index is used as command identifier (CID) in the submission queue
  65  * entry. Some commands may take a very long time to complete, and if the queue
  66  * wraps around in that time a submission may find the next array slot to still
  67  * be used by a long-running command. In this case the array is sequentially
  68  * searched for the next free slot. The length of the command array is the same
  69  * as the configured queue length. Queue overrun is prevented by the semaphore,
  70  * so a command submission may block if the queue is full.
  71  *
  72  *
  73  * Polled I/O Support:
  74  *
  75  * For kernel core dump support the driver can do polled I/O. As interrupts are
  76  * turned off while dumping the driver will just submit a command in the regular
  77  * way, and then repeatedly attempt a command retrieval until it gets the
  78  * command back.
  79  *
  80  *
  81  * Namespace Support:
  82  *
  83  * NVMe devices can have multiple namespaces, each being a independent data
  84  * store. The driver supports multiple namespaces and creates a blkdev interface
  85  * for each namespace found. Namespaces can have various attributes to support
  86  * thin provisioning and protection information. This driver does not support
  87  * any of this and ignores namespaces that have these attributes.
  88  *
  89  * As of NVMe 1.1 namespaces can have an 64bit Extended Unique Identifier
  90  * (EUI64). This driver uses the EUI64 if present to generate the devid and
  91  * passes it to blkdev to use it in the device node names. As this is currently
  92  * untested namespaces with EUI64 are ignored by default.
  93  *
  94  * We currently support only (2 << NVME_MINOR_INST_SHIFT) - 2 namespaces in a
  95  * single controller. This is an artificial limit imposed by the driver to be
  96  * able to address a reasonable number of controllers and namespaces using a
  97  * 32bit minor node number.
  98  *
  99  *
 100  * Minor nodes:
 101  *
 102  * For each NVMe device the driver exposes one minor node for the controller and
 103  * one minor node for each namespace. The only operations supported by those
 104  * minor nodes are open(9E), close(9E), and ioctl(9E). This serves as the
 105  * interface for the nvmeadm(1M) utility.
 106  *
 107  *
 108  * Blkdev Interface:
 109  *
 110  * This driver uses blkdev to do all the heavy lifting involved with presenting
 111  * a disk device to the system. As a result, the processing of I/O requests is
 112  * relatively simple as blkdev takes care of partitioning, boundary checks, DMA
 113  * setup, and splitting of transfers into manageable chunks.
 114  *
 115  * I/O requests coming in from blkdev are turned into NVM commands and posted to
 116  * an I/O queue. The queue is selected by taking the CPU id modulo the number of
 117  * queues. There is currently no timeout handling of I/O commands.
 118  *
 119  * Blkdev also supports querying device/media information and generating a
 120  * devid. The driver reports the best block size as determined by the namespace
 121  * format back to blkdev as physical block size to support partition and block
 122  * alignment. The devid is either based on the namespace EUI64, if present, or
 123  * composed using the device vendor ID, model number, serial number, and the
 124  * namespace ID.
 125  *
 126  *
 127  * Error Handling:
 128  *
 129  * Error handling is currently limited to detecting fatal hardware errors,
 130  * either by asynchronous events, or synchronously through command status or
 131  * admin command timeouts. In case of severe errors the device is fenced off,
 132  * all further requests will return EIO. FMA is then called to fault the device.
 133  *
 134  * The hardware has a limit for outstanding asynchronous event requests. Before
 135  * this limit is known the driver assumes it is at least 1 and posts a single
 136  * asynchronous request. Later when the limit is known more asynchronous event
 137  * requests are posted to allow quicker reception of error information. When an
 138  * asynchronous event is posted by the hardware the driver will parse the error
 139  * status fields and log information or fault the device, depending on the
 140  * severity of the asynchronous event. The asynchronous event request is then
 141  * reused and posted to the admin queue again.
 142  *
 143  * On command completion the command status is checked for errors. In case of
 144  * errors indicating a driver bug the driver panics. Almost all other error
 145  * status values just cause EIO to be returned.
 146  *
 147  * Command timeouts are currently detected for all admin commands except
 148  * asynchronous event requests. If a command times out and the hardware appears
 149  * to be healthy the driver attempts to abort the command. The original command
 150  * timeout is also applied to the abort command. If the abort times out too the
 151  * driver assumes the device to be dead, fences it off, and calls FMA to retire
 152  * it. In all other cases the aborted command should return immediately with a
 153  * status indicating it was aborted, and the driver will wait indefinitely for
 154  * that to happen. No timeout handling of normal I/O commands is presently done.
 155  *
 156  * Any command that times out due to the controller dropping dead will be put on
 157  * nvme_lost_cmds list if it references DMA memory. This will prevent the DMA
 158  * memory being reused by the system and later be written to by a "dead" NVMe
 159  * controller.
 160  *
 161  *
 162  * Locking:
 163  *
 164  * Each queue pair has its own nq_mutex, which must be held when accessing the
 165  * associated queue registers or the shared state of the queue pair. Callers of
 166  * nvme_unqueue_cmd() must make sure that nq_mutex is held, while
 167  * nvme_submit_{admin,io}_cmd() and nvme_retrieve_cmd() take care of this
 168  * themselves.
 169  *
 170  * Each command also has its own nc_mutex, which is associated with the
 171  * condition variable nc_cv. It is only used on admin commands which are run
 172  * synchronously. In that case it must be held across calls to
 173  * nvme_submit_{admin,io}_cmd() and nvme_wait_cmd(), which is taken care of by
 174  * nvme_admin_cmd(). It must also be held whenever the completion state of the
 175  * command is changed or while a admin command timeout is handled.
 176  *
 177  * If both nc_mutex and nq_mutex must be held, nc_mutex must be acquired first.
 178  * More than one nc_mutex may only be held when aborting commands. In this case,
 179  * the nc_mutex of the command to be aborted must be held across the call to
 180  * nvme_abort_cmd() to prevent the command from completing while the abort is in
 181  * progress.
 182  *
 183  * Each minor node has its own nm_mutex, which protects the open count nm_ocnt
 184  * and exclusive-open flag nm_oexcl.
 185  *
 186  *
 187  * Quiesce / Fast Reboot:
 188  *
 189  * The driver currently does not support fast reboot. A quiesce(9E) entry point
 190  * is still provided which is used to send a shutdown notification to the
 191  * device.
 192  *
 193  *
 194  * Driver Configuration:
 195  *
 196  * The following driver properties can be changed to control some aspects of the
 197  * drivers operation:
 198  * - strict-version: can be set to 0 to allow devices conforming to newer
 199  *   versions or namespaces with EUI64 to be used
 200  * - ignore-unknown-vendor-status: can be set to 1 to not handle any vendor
 201  *   specific command status as a fatal error leading device faulting
 202  * - admin-queue-len: the maximum length of the admin queue (16-4096)
 203  * - io-queue-len: the maximum length of the I/O queues (16-65536)
 204  * - async-event-limit: the maximum number of asynchronous event requests to be
 205  *   posted by the driver
 206  * - volatile-write-cache-enable: can be set to 0 to disable the volatile write
 207  *   cache
 208  * - min-phys-block-size: the minimum physical block size to report to blkdev,
 209  *   which is among other things the basis for ZFS vdev ashift
 210  *
 211  *
 212  * TODO:
 213  * - figure out sane default for I/O queue depth reported to blkdev
 214  * - FMA handling of media errors
 215  * - support for devices supporting very large I/O requests using chained PRPs
 216  * - support for configuring hardware parameters like interrupt coalescing
 217  * - support for media formatting and hard partitioning into namespaces
 218  * - support for big-endian systems
 219  * - support for fast reboot
 220  * - support for firmware updates
 221  * - support for NVMe Subsystem Reset (1.1)
 222  * - support for Scatter/Gather lists (1.1)
 223  * - support for Reservations (1.1)
 224  * - support for power management
 225  */
 226 
 227 #include <sys/byteorder.h>
 228 #ifdef _BIG_ENDIAN
 229 #error nvme driver needs porting for big-endian platforms
 230 #endif
 231 
 232 #include <sys/modctl.h>
 233 #include <sys/conf.h>
 234 #include <sys/devops.h>
 235 #include <sys/ddi.h>
 236 #include <sys/sunddi.h>
 237 #include <sys/sunndi.h>
 238 #include <sys/bitmap.h>
 239 #include <sys/sysmacros.h>
 240 #include <sys/param.h>
 241 #include <sys/varargs.h>
 242 #include <sys/cpuvar.h>
 243 #include <sys/disp.h>
 244 #include <sys/blkdev.h>
 245 #include <sys/atomic.h>
 246 #include <sys/archsystm.h>
 247 #include <sys/sata/sata_hba.h>
 248 #include <sys/stat.h>
 249 #include <sys/policy.h>
 250 #include <sys/list.h>
 251 
 252 #include <sys/nvme.h>
 253 
 254 #ifdef __x86
 255 #include <sys/x86_archext.h>
 256 #endif
 257 
 258 #include "nvme_reg.h"
 259 #include "nvme_var.h"
 260 
 261 
 262 /* NVMe spec version supported */
 263 static const int nvme_version_major = 1;
 264 static const int nvme_version_minor = 2;
 265 
 266 /* tunable for admin command timeout in seconds, default is 1s */
 267 int nvme_admin_cmd_timeout = 1;
 268 
 269 /* tunable for FORMAT NVM command timeout in seconds, default is 600s */
 270 int nvme_format_cmd_timeout = 600;
 271 
 272 static int nvme_attach(dev_info_t *, ddi_attach_cmd_t);
 273 static int nvme_detach(dev_info_t *, ddi_detach_cmd_t);
 274 static int nvme_quiesce(dev_info_t *);
 275 static int nvme_fm_errcb(dev_info_t *, ddi_fm_error_t *, const void *);
 276 static int nvme_setup_interrupts(nvme_t *, int, int);
 277 static void nvme_release_interrupts(nvme_t *);
 278 static uint_t nvme_intr(caddr_t, caddr_t);
 279 
 280 static void nvme_shutdown(nvme_t *, int, boolean_t);
 281 static boolean_t nvme_reset(nvme_t *, boolean_t);
 282 static int nvme_init(nvme_t *);
 283 static nvme_cmd_t *nvme_alloc_cmd(nvme_t *, int);
 284 static void nvme_free_cmd(nvme_cmd_t *);
 285 static nvme_cmd_t *nvme_create_nvm_cmd(nvme_namespace_t *, uint8_t,
 286     bd_xfer_t *);
 287 static void nvme_admin_cmd(nvme_cmd_t *, int);
 288 static void nvme_submit_admin_cmd(nvme_qpair_t *, nvme_cmd_t *);
 289 static int nvme_submit_io_cmd(nvme_qpair_t *, nvme_cmd_t *);
 290 static void nvme_submit_cmd_common(nvme_qpair_t *, nvme_cmd_t *);
 291 static nvme_cmd_t *nvme_unqueue_cmd(nvme_t *, nvme_qpair_t *, int);
 292 static nvme_cmd_t *nvme_retrieve_cmd(nvme_t *, nvme_qpair_t *);
 293 static void nvme_wait_cmd(nvme_cmd_t *, uint_t);
 294 static void nvme_wakeup_cmd(void *);
 295 static void nvme_async_event_task(void *);
 296 
 297 static int nvme_check_unknown_cmd_status(nvme_cmd_t *);
 298 static int nvme_check_vendor_cmd_status(nvme_cmd_t *);
 299 static int nvme_check_integrity_cmd_status(nvme_cmd_t *);
 300 static int nvme_check_specific_cmd_status(nvme_cmd_t *);
 301 static int nvme_check_generic_cmd_status(nvme_cmd_t *);
 302 static inline int nvme_check_cmd_status(nvme_cmd_t *);
 303 
 304 static int nvme_abort_cmd(nvme_cmd_t *, uint_t);
 305 static void nvme_async_event(nvme_t *);
 306 static int nvme_format_nvm(nvme_t *, uint32_t, uint8_t, boolean_t, uint8_t,
 307     boolean_t, uint8_t);
 308 static int nvme_get_logpage(nvme_t *, void **, size_t *, uint8_t, ...);
 309 static int nvme_identify(nvme_t *, uint32_t, void **);
 310 static int nvme_set_features(nvme_t *, uint32_t, uint8_t, uint32_t,
 311     uint32_t *);
 312 static int nvme_get_features(nvme_t *, uint32_t, uint8_t, uint32_t *,
 313     void **, size_t *);
 314 static int nvme_write_cache_set(nvme_t *, boolean_t);
 315 static int nvme_set_nqueues(nvme_t *, uint16_t *);
 316 
 317 static void nvme_free_dma(nvme_dma_t *);
 318 static int nvme_zalloc_dma(nvme_t *, size_t, uint_t, ddi_dma_attr_t *,
 319     nvme_dma_t **);
 320 static int nvme_zalloc_queue_dma(nvme_t *, uint32_t, uint16_t, uint_t,
 321     nvme_dma_t **);
 322 static void nvme_free_qpair(nvme_qpair_t *);
 323 static int nvme_alloc_qpair(nvme_t *, uint32_t, nvme_qpair_t **, int);
 324 static int nvme_create_io_qpair(nvme_t *, nvme_qpair_t *, uint16_t);
 325 
 326 static inline void nvme_put64(nvme_t *, uintptr_t, uint64_t);
 327 static inline void nvme_put32(nvme_t *, uintptr_t, uint32_t);
 328 static inline uint64_t nvme_get64(nvme_t *, uintptr_t);
 329 static inline uint32_t nvme_get32(nvme_t *, uintptr_t);
 330 
 331 static boolean_t nvme_check_regs_hdl(nvme_t *);
 332 static boolean_t nvme_check_dma_hdl(nvme_dma_t *);
 333 
 334 static int nvme_fill_prp(nvme_cmd_t *, bd_xfer_t *);
 335 
 336 static void nvme_bd_xfer_done(void *);
 337 static void nvme_bd_driveinfo(void *, bd_drive_t *);
 338 static int nvme_bd_mediainfo(void *, bd_media_t *);
 339 static int nvme_bd_cmd(nvme_namespace_t *, bd_xfer_t *, uint8_t);
 340 static int nvme_bd_read(void *, bd_xfer_t *);
 341 static int nvme_bd_write(void *, bd_xfer_t *);
 342 static int nvme_bd_sync(void *, bd_xfer_t *);
 343 static int nvme_bd_devid(void *, dev_info_t *, ddi_devid_t *);
 344 
 345 static int nvme_prp_dma_constructor(void *, void *, int);
 346 static void nvme_prp_dma_destructor(void *, void *);
 347 
 348 static void nvme_prepare_devid(nvme_t *, uint32_t);
 349 
 350 static int nvme_open(dev_t *, int, int, cred_t *);
 351 static int nvme_close(dev_t, int, int, cred_t *);
 352 static int nvme_ioctl(dev_t, int, intptr_t, int, cred_t *, int *);
 353 
 354 #define NVME_MINOR_INST_SHIFT   9
 355 #define NVME_MINOR(inst, nsid)  (((inst) << NVME_MINOR_INST_SHIFT) | (nsid))
 356 #define NVME_MINOR_INST(minor)  ((minor) >> NVME_MINOR_INST_SHIFT)
 357 #define NVME_MINOR_NSID(minor)  ((minor) & ((1 << NVME_MINOR_INST_SHIFT) - 1))
 358 #define NVME_MINOR_MAX          (NVME_MINOR(1, 0) - 2)
 359 
 360 static void *nvme_state;
 361 static kmem_cache_t *nvme_cmd_cache;
 362 
 363 /*
 364  * DMA attributes for queue DMA memory
 365  *
 366  * Queue DMA memory must be page aligned. The maximum length of a queue is
 367  * 65536 entries, and an entry can be 64 bytes long.
 368  */
 369 static ddi_dma_attr_t nvme_queue_dma_attr = {
 370         .dma_attr_version       = DMA_ATTR_V0,
 371         .dma_attr_addr_lo       = 0,
 372         .dma_attr_addr_hi       = 0xffffffffffffffffULL,
 373         .dma_attr_count_max     = (UINT16_MAX + 1) * sizeof (nvme_sqe_t) - 1,
 374         .dma_attr_align         = 0x1000,
 375         .dma_attr_burstsizes    = 0x7ff,
 376         .dma_attr_minxfer       = 0x1000,
 377         .dma_attr_maxxfer       = (UINT16_MAX + 1) * sizeof (nvme_sqe_t),
 378         .dma_attr_seg           = 0xffffffffffffffffULL,
 379         .dma_attr_sgllen        = 1,
 380         .dma_attr_granular      = 1,
 381         .dma_attr_flags         = 0,
 382 };
 383 
 384 /*
 385  * DMA attributes for transfers using Physical Region Page (PRP) entries
 386  *
 387  * A PRP entry describes one page of DMA memory using the page size specified
 388  * in the controller configuration's memory page size register (CC.MPS). It uses
 389  * a 64bit base address aligned to this page size. There is no limitation on
 390  * chaining PRPs together for arbitrarily large DMA transfers.
 391  */
 392 static ddi_dma_attr_t nvme_prp_dma_attr = {
 393         .dma_attr_version       = DMA_ATTR_V0,
 394         .dma_attr_addr_lo       = 0,
 395         .dma_attr_addr_hi       = 0xffffffffffffffffULL,
 396         .dma_attr_count_max     = 0xfff,
 397         .dma_attr_align         = 0x1000,
 398         .dma_attr_burstsizes    = 0x7ff,
 399         .dma_attr_minxfer       = 0x1000,
 400         .dma_attr_maxxfer       = 0x1000,
 401         .dma_attr_seg           = 0xfff,
 402         .dma_attr_sgllen        = -1,
 403         .dma_attr_granular      = 1,
 404         .dma_attr_flags         = 0,
 405 };
 406 
 407 /*
 408  * DMA attributes for transfers using scatter/gather lists
 409  *
 410  * A SGL entry describes a chunk of DMA memory using a 64bit base address and a
 411  * 32bit length field. SGL Segment and SGL Last Segment entries require the
 412  * length to be a multiple of 16 bytes.
 413  */
 414 static ddi_dma_attr_t nvme_sgl_dma_attr = {
 415         .dma_attr_version       = DMA_ATTR_V0,
 416         .dma_attr_addr_lo       = 0,
 417         .dma_attr_addr_hi       = 0xffffffffffffffffULL,
 418         .dma_attr_count_max     = 0xffffffffUL,
 419         .dma_attr_align         = 1,
 420         .dma_attr_burstsizes    = 0x7ff,
 421         .dma_attr_minxfer       = 0x10,
 422         .dma_attr_maxxfer       = 0xfffffffffULL,
 423         .dma_attr_seg           = 0xffffffffffffffffULL,
 424         .dma_attr_sgllen        = -1,
 425         .dma_attr_granular      = 0x10,
 426         .dma_attr_flags         = 0
 427 };
 428 
 429 static ddi_device_acc_attr_t nvme_reg_acc_attr = {
 430         .devacc_attr_version    = DDI_DEVICE_ATTR_V0,
 431         .devacc_attr_endian_flags = DDI_STRUCTURE_LE_ACC,
 432         .devacc_attr_dataorder  = DDI_STRICTORDER_ACC
 433 };
 434 
 435 static struct cb_ops nvme_cb_ops = {
 436         .cb_open        = nvme_open,
 437         .cb_close       = nvme_close,
 438         .cb_strategy    = nodev,
 439         .cb_print       = nodev,
 440         .cb_dump        = nodev,
 441         .cb_read        = nodev,
 442         .cb_write       = nodev,
 443         .cb_ioctl       = nvme_ioctl,
 444         .cb_devmap      = nodev,
 445         .cb_mmap        = nodev,
 446         .cb_segmap      = nodev,
 447         .cb_chpoll      = nochpoll,
 448         .cb_prop_op     = ddi_prop_op,
 449         .cb_str         = 0,
 450         .cb_flag        = D_NEW | D_MP,
 451         .cb_rev         = CB_REV,
 452         .cb_aread       = nodev,
 453         .cb_awrite      = nodev
 454 };
 455 
 456 static struct dev_ops nvme_dev_ops = {
 457         .devo_rev       = DEVO_REV,
 458         .devo_refcnt    = 0,
 459         .devo_getinfo   = ddi_no_info,
 460         .devo_identify  = nulldev,
 461         .devo_probe     = nulldev,
 462         .devo_attach    = nvme_attach,
 463         .devo_detach    = nvme_detach,
 464         .devo_reset     = nodev,
 465         .devo_cb_ops    = &nvme_cb_ops,
 466         .devo_bus_ops   = NULL,
 467         .devo_power     = NULL,
 468         .devo_quiesce   = nvme_quiesce,
 469 };
 470 
 471 static struct modldrv nvme_modldrv = {
 472         .drv_modops     = &mod_driverops,
 473         .drv_linkinfo   = "NVMe v1.1b",
 474         .drv_dev_ops    = &nvme_dev_ops
 475 };
 476 
 477 static struct modlinkage nvme_modlinkage = {
 478         .ml_rev         = MODREV_1,
 479         .ml_linkage     = { &nvme_modldrv, NULL }
 480 };
 481 
 482 static bd_ops_t nvme_bd_ops = {
 483         .o_version      = BD_OPS_VERSION_0,
 484         .o_drive_info   = nvme_bd_driveinfo,
 485         .o_media_info   = nvme_bd_mediainfo,
 486         .o_devid_init   = nvme_bd_devid,
 487         .o_sync_cache   = nvme_bd_sync,
 488         .o_read         = nvme_bd_read,
 489         .o_write        = nvme_bd_write,
 490 };
 491 
 492 /*
 493  * This list will hold commands that have timed out and couldn't be aborted.
 494  * As we don't know what the hardware may still do with the DMA memory we can't
 495  * free them, so we'll keep them forever on this list where we can easily look
 496  * at them with mdb.
 497  */
 498 static struct list nvme_lost_cmds;
 499 static kmutex_t nvme_lc_mutex;
 500 
 501 int
 502 _init(void)
 503 {
 504         int error;
 505 
 506         error = ddi_soft_state_init(&nvme_state, sizeof (nvme_t), 1);
 507         if (error != DDI_SUCCESS)
 508                 return (error);
 509 
 510         nvme_cmd_cache = kmem_cache_create("nvme_cmd_cache",
 511             sizeof (nvme_cmd_t), 64, NULL, NULL, NULL, NULL, NULL, 0);
 512 
 513         mutex_init(&nvme_lc_mutex, NULL, MUTEX_DRIVER, NULL);
 514         list_create(&nvme_lost_cmds, sizeof (nvme_cmd_t),
 515             offsetof(nvme_cmd_t, nc_list));
 516 
 517         bd_mod_init(&nvme_dev_ops);
 518 
 519         error = mod_install(&nvme_modlinkage);
 520         if (error != DDI_SUCCESS) {
 521                 ddi_soft_state_fini(&nvme_state);
 522                 mutex_destroy(&nvme_lc_mutex);
 523                 list_destroy(&nvme_lost_cmds);
 524                 bd_mod_fini(&nvme_dev_ops);
 525         }
 526 
 527         return (error);
 528 }
 529 
 530 int
 531 _fini(void)
 532 {
 533         int error;
 534 
 535         if (!list_is_empty(&nvme_lost_cmds))
 536                 return (DDI_FAILURE);
 537 
 538         error = mod_remove(&nvme_modlinkage);
 539         if (error == DDI_SUCCESS) {
 540                 ddi_soft_state_fini(&nvme_state);
 541                 kmem_cache_destroy(nvme_cmd_cache);
 542                 mutex_destroy(&nvme_lc_mutex);
 543                 list_destroy(&nvme_lost_cmds);
 544                 bd_mod_fini(&nvme_dev_ops);
 545         }
 546 
 547         return (error);
 548 }
 549 
 550 int
 551 _info(struct modinfo *modinfop)
 552 {
 553         return (mod_info(&nvme_modlinkage, modinfop));
 554 }
 555 
 556 static inline void
 557 nvme_put64(nvme_t *nvme, uintptr_t reg, uint64_t val)
 558 {
 559         ASSERT(((uintptr_t)(nvme->n_regs + reg) & 0x7) == 0);
 560 
 561         /*LINTED: E_BAD_PTR_CAST_ALIGN*/
 562         ddi_put64(nvme->n_regh, (uint64_t *)(nvme->n_regs + reg), val);
 563 }
 564 
 565 static inline void
 566 nvme_put32(nvme_t *nvme, uintptr_t reg, uint32_t val)
 567 {
 568         ASSERT(((uintptr_t)(nvme->n_regs + reg) & 0x3) == 0);
 569 
 570         /*LINTED: E_BAD_PTR_CAST_ALIGN*/
 571         ddi_put32(nvme->n_regh, (uint32_t *)(nvme->n_regs + reg), val);
 572 }
 573 
 574 static inline uint64_t
 575 nvme_get64(nvme_t *nvme, uintptr_t reg)
 576 {
 577         uint64_t val;
 578 
 579         ASSERT(((uintptr_t)(nvme->n_regs + reg) & 0x7) == 0);
 580 
 581         /*LINTED: E_BAD_PTR_CAST_ALIGN*/
 582         val = ddi_get64(nvme->n_regh, (uint64_t *)(nvme->n_regs + reg));
 583 
 584         return (val);
 585 }
 586 
 587 static inline uint32_t
 588 nvme_get32(nvme_t *nvme, uintptr_t reg)
 589 {
 590         uint32_t val;
 591 
 592         ASSERT(((uintptr_t)(nvme->n_regs + reg) & 0x3) == 0);
 593 
 594         /*LINTED: E_BAD_PTR_CAST_ALIGN*/
 595         val = ddi_get32(nvme->n_regh, (uint32_t *)(nvme->n_regs + reg));
 596 
 597         return (val);
 598 }
 599 
 600 static boolean_t
 601 nvme_check_regs_hdl(nvme_t *nvme)
 602 {
 603         ddi_fm_error_t error;
 604 
 605         ddi_fm_acc_err_get(nvme->n_regh, &error, DDI_FME_VERSION);
 606 
 607         if (error.fme_status != DDI_FM_OK)
 608                 return (B_TRUE);
 609 
 610         return (B_FALSE);
 611 }
 612 
 613 static boolean_t
 614 nvme_check_dma_hdl(nvme_dma_t *dma)
 615 {
 616         ddi_fm_error_t error;
 617 
 618         if (dma == NULL)
 619                 return (B_FALSE);
 620 
 621         ddi_fm_dma_err_get(dma->nd_dmah, &error, DDI_FME_VERSION);
 622 
 623         if (error.fme_status != DDI_FM_OK)
 624                 return (B_TRUE);
 625 
 626         return (B_FALSE);
 627 }
 628 
 629 static void
 630 nvme_free_dma_common(nvme_dma_t *dma)
 631 {
 632         if (dma->nd_dmah != NULL)
 633                 (void) ddi_dma_unbind_handle(dma->nd_dmah);
 634         if (dma->nd_acch != NULL)
 635                 ddi_dma_mem_free(&dma->nd_acch);
 636         if (dma->nd_dmah != NULL)
 637                 ddi_dma_free_handle(&dma->nd_dmah);
 638 }
 639 
 640 static void
 641 nvme_free_dma(nvme_dma_t *dma)
 642 {
 643         nvme_free_dma_common(dma);
 644         kmem_free(dma, sizeof (*dma));
 645 }
 646 
 647 /* ARGSUSED */
 648 static void
 649 nvme_prp_dma_destructor(void *buf, void *private)
 650 {
 651         nvme_dma_t *dma = (nvme_dma_t *)buf;
 652 
 653         nvme_free_dma_common(dma);
 654 }
 655 
 656 static int
 657 nvme_alloc_dma_common(nvme_t *nvme, nvme_dma_t *dma,
 658     size_t len, uint_t flags, ddi_dma_attr_t *dma_attr)
 659 {
 660         if (ddi_dma_alloc_handle(nvme->n_dip, dma_attr, DDI_DMA_SLEEP, NULL,
 661             &dma->nd_dmah) != DDI_SUCCESS) {
 662                 /*
 663                  * Due to DDI_DMA_SLEEP this can't be DDI_DMA_NORESOURCES, and
 664                  * the only other possible error is DDI_DMA_BADATTR which
 665                  * indicates a driver bug which should cause a panic.
 666                  */
 667                 dev_err(nvme->n_dip, CE_PANIC,
 668                     "!failed to get DMA handle, check DMA attributes");
 669                 return (DDI_FAILURE);
 670         }
 671 
 672         /*
 673          * ddi_dma_mem_alloc() can only fail when DDI_DMA_NOSLEEP is specified
 674          * or the flags are conflicting, which isn't the case here.
 675          */
 676         (void) ddi_dma_mem_alloc(dma->nd_dmah, len, &nvme->n_reg_acc_attr,
 677             DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL, &dma->nd_memp,
 678             &dma->nd_len, &dma->nd_acch);
 679 
 680         if (ddi_dma_addr_bind_handle(dma->nd_dmah, NULL, dma->nd_memp,
 681             dma->nd_len, flags | DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL,
 682             &dma->nd_cookie, &dma->nd_ncookie) != DDI_DMA_MAPPED) {
 683                 dev_err(nvme->n_dip, CE_WARN,
 684                     "!failed to bind DMA memory");
 685                 atomic_inc_32(&nvme->n_dma_bind_err);
 686                 nvme_free_dma_common(dma);
 687                 return (DDI_FAILURE);
 688         }
 689 
 690         return (DDI_SUCCESS);
 691 }
 692 
 693 static int
 694 nvme_zalloc_dma(nvme_t *nvme, size_t len, uint_t flags,
 695     ddi_dma_attr_t *dma_attr, nvme_dma_t **ret)
 696 {
 697         nvme_dma_t *dma = kmem_zalloc(sizeof (nvme_dma_t), KM_SLEEP);
 698 
 699         if (nvme_alloc_dma_common(nvme, dma, len, flags, dma_attr) !=
 700             DDI_SUCCESS) {
 701                 *ret = NULL;
 702                 kmem_free(dma, sizeof (nvme_dma_t));
 703                 return (DDI_FAILURE);
 704         }
 705 
 706         bzero(dma->nd_memp, dma->nd_len);
 707 
 708         *ret = dma;
 709         return (DDI_SUCCESS);
 710 }
 711 
 712 /* ARGSUSED */
 713 static int
 714 nvme_prp_dma_constructor(void *buf, void *private, int flags)
 715 {
 716         nvme_dma_t *dma = (nvme_dma_t *)buf;
 717         nvme_t *nvme = (nvme_t *)private;
 718 
 719         dma->nd_dmah = NULL;
 720         dma->nd_acch = NULL;
 721 
 722         if (nvme_alloc_dma_common(nvme, dma, nvme->n_pagesize,
 723             DDI_DMA_READ, &nvme->n_prp_dma_attr) != DDI_SUCCESS) {
 724                 return (-1);
 725         }
 726 
 727         ASSERT(dma->nd_ncookie == 1);
 728 
 729         dma->nd_cached = B_TRUE;
 730 
 731         return (0);
 732 }
 733 
 734 static int
 735 nvme_zalloc_queue_dma(nvme_t *nvme, uint32_t nentry, uint16_t qe_len,
 736     uint_t flags, nvme_dma_t **dma)
 737 {
 738         uint32_t len = nentry * qe_len;
 739         ddi_dma_attr_t q_dma_attr = nvme->n_queue_dma_attr;
 740 
 741         len = roundup(len, nvme->n_pagesize);
 742 
 743         q_dma_attr.dma_attr_minxfer = len;
 744 
 745         if (nvme_zalloc_dma(nvme, len, flags, &q_dma_attr, dma)
 746             != DDI_SUCCESS) {
 747                 dev_err(nvme->n_dip, CE_WARN,
 748                     "!failed to get DMA memory for queue");
 749                 goto fail;
 750         }
 751 
 752         if ((*dma)->nd_ncookie != 1) {
 753                 dev_err(nvme->n_dip, CE_WARN,
 754                     "!got too many cookies for queue DMA");
 755                 goto fail;
 756         }
 757 
 758         return (DDI_SUCCESS);
 759 
 760 fail:
 761         if (*dma) {
 762                 nvme_free_dma(*dma);
 763                 *dma = NULL;
 764         }
 765 
 766         return (DDI_FAILURE);
 767 }
 768 
 769 static void
 770 nvme_free_qpair(nvme_qpair_t *qp)
 771 {
 772         int i;
 773 
 774         mutex_destroy(&qp->nq_mutex);
 775         sema_destroy(&qp->nq_sema);
 776 
 777         if (qp->nq_sqdma != NULL)
 778                 nvme_free_dma(qp->nq_sqdma);
 779         if (qp->nq_cqdma != NULL)
 780                 nvme_free_dma(qp->nq_cqdma);
 781 
 782         if (qp->nq_active_cmds > 0)
 783                 for (i = 0; i != qp->nq_nentry; i++)
 784                         if (qp->nq_cmd[i] != NULL)
 785                                 nvme_free_cmd(qp->nq_cmd[i]);
 786 
 787         if (qp->nq_cmd != NULL)
 788                 kmem_free(qp->nq_cmd, sizeof (nvme_cmd_t *) * qp->nq_nentry);
 789 
 790         kmem_free(qp, sizeof (nvme_qpair_t));
 791 }
 792 
 793 static int
 794 nvme_alloc_qpair(nvme_t *nvme, uint32_t nentry, nvme_qpair_t **nqp,
 795     int idx)
 796 {
 797         nvme_qpair_t *qp = kmem_zalloc(sizeof (*qp), KM_SLEEP);
 798 
 799         mutex_init(&qp->nq_mutex, NULL, MUTEX_DRIVER,
 800             DDI_INTR_PRI(nvme->n_intr_pri));
 801         sema_init(&qp->nq_sema, nentry, NULL, SEMA_DRIVER, NULL);
 802 
 803         if (nvme_zalloc_queue_dma(nvme, nentry, sizeof (nvme_sqe_t),
 804             DDI_DMA_WRITE, &qp->nq_sqdma) != DDI_SUCCESS)
 805                 goto fail;
 806 
 807         if (nvme_zalloc_queue_dma(nvme, nentry, sizeof (nvme_cqe_t),
 808             DDI_DMA_READ, &qp->nq_cqdma) != DDI_SUCCESS)
 809                 goto fail;
 810 
 811         qp->nq_sq = (nvme_sqe_t *)qp->nq_sqdma->nd_memp;
 812         qp->nq_cq = (nvme_cqe_t *)qp->nq_cqdma->nd_memp;
 813         qp->nq_nentry = nentry;
 814 
 815         qp->nq_sqtdbl = NVME_REG_SQTDBL(nvme, idx);
 816         qp->nq_cqhdbl = NVME_REG_CQHDBL(nvme, idx);
 817 
 818         qp->nq_cmd = kmem_zalloc(sizeof (nvme_cmd_t *) * nentry, KM_SLEEP);
 819         qp->nq_next_cmd = 0;
 820 
 821         *nqp = qp;
 822         return (DDI_SUCCESS);
 823 
 824 fail:
 825         nvme_free_qpair(qp);
 826         *nqp = NULL;
 827 
 828         return (DDI_FAILURE);
 829 }
 830 
 831 static nvme_cmd_t *
 832 nvme_alloc_cmd(nvme_t *nvme, int kmflag)
 833 {
 834         nvme_cmd_t *cmd = kmem_cache_alloc(nvme_cmd_cache, kmflag);
 835 
 836         if (cmd == NULL)
 837                 return (cmd);
 838 
 839         bzero(cmd, sizeof (nvme_cmd_t));
 840 
 841         cmd->nc_nvme = nvme;
 842 
 843         mutex_init(&cmd->nc_mutex, NULL, MUTEX_DRIVER,
 844             DDI_INTR_PRI(nvme->n_intr_pri));
 845         cv_init(&cmd->nc_cv, NULL, CV_DRIVER, NULL);
 846 
 847         return (cmd);
 848 }
 849 
 850 static void
 851 nvme_free_cmd(nvme_cmd_t *cmd)
 852 {
 853         /* Don't free commands on the lost commands list. */
 854         if (list_link_active(&cmd->nc_list))
 855                 return;
 856 
 857         if (cmd->nc_dma) {
 858                 if (cmd->nc_dma->nd_cached)
 859                         kmem_cache_free(cmd->nc_nvme->n_prp_cache,
 860                             cmd->nc_dma);
 861                 else
 862                         nvme_free_dma(cmd->nc_dma);
 863                 cmd->nc_dma = NULL;
 864         }
 865 
 866         cv_destroy(&cmd->nc_cv);
 867         mutex_destroy(&cmd->nc_mutex);
 868 
 869         kmem_cache_free(nvme_cmd_cache, cmd);
 870 }
 871 
 872 static void
 873 nvme_submit_admin_cmd(nvme_qpair_t *qp, nvme_cmd_t *cmd)
 874 {
 875         sema_p(&qp->nq_sema);
 876         nvme_submit_cmd_common(qp, cmd);
 877 }
 878 
 879 static int
 880 nvme_submit_io_cmd(nvme_qpair_t *qp, nvme_cmd_t *cmd)
 881 {
 882         if (sema_tryp(&qp->nq_sema) == 0)
 883                 return (EAGAIN);
 884 
 885         nvme_submit_cmd_common(qp, cmd);
 886         return (0);
 887 }
 888 
 889 static void
 890 nvme_submit_cmd_common(nvme_qpair_t *qp, nvme_cmd_t *cmd)
 891 {
 892         nvme_reg_sqtdbl_t tail = { 0 };
 893 
 894         mutex_enter(&qp->nq_mutex);
 895         cmd->nc_completed = B_FALSE;
 896 
 897         /*
 898          * Try to insert the cmd into the active cmd array at the nq_next_cmd
 899          * slot. If the slot is already occupied advance to the next slot and
 900          * try again. This can happen for long running commands like async event
 901          * requests.
 902          */
 903         while (qp->nq_cmd[qp->nq_next_cmd] != NULL)
 904                 qp->nq_next_cmd = (qp->nq_next_cmd + 1) % qp->nq_nentry;
 905         qp->nq_cmd[qp->nq_next_cmd] = cmd;
 906 
 907         qp->nq_active_cmds++;
 908 
 909         cmd->nc_sqe.sqe_cid = qp->nq_next_cmd;
 910         bcopy(&cmd->nc_sqe, &qp->nq_sq[qp->nq_sqtail], sizeof (nvme_sqe_t));
 911         (void) ddi_dma_sync(qp->nq_sqdma->nd_dmah,
 912             sizeof (nvme_sqe_t) * qp->nq_sqtail,
 913             sizeof (nvme_sqe_t), DDI_DMA_SYNC_FORDEV);
 914         qp->nq_next_cmd = (qp->nq_next_cmd + 1) % qp->nq_nentry;
 915 
 916         tail.b.sqtdbl_sqt = qp->nq_sqtail = (qp->nq_sqtail + 1) % qp->nq_nentry;
 917         nvme_put32(cmd->nc_nvme, qp->nq_sqtdbl, tail.r);
 918 
 919         mutex_exit(&qp->nq_mutex);
 920 }
 921 
 922 static nvme_cmd_t *
 923 nvme_unqueue_cmd(nvme_t *nvme, nvme_qpair_t *qp, int cid)
 924 {
 925         nvme_cmd_t *cmd;
 926 
 927         ASSERT(mutex_owned(&qp->nq_mutex));
 928         ASSERT3S(cid, <, qp->nq_nentry);
 929 
 930         cmd = qp->nq_cmd[cid];
 931         qp->nq_cmd[cid] = NULL;
 932         ASSERT3U(qp->nq_active_cmds, >, 0);
 933         qp->nq_active_cmds--;
 934         sema_v(&qp->nq_sema);
 935 
 936         ASSERT3P(cmd, !=, NULL);
 937         ASSERT3P(cmd->nc_nvme, ==, nvme);
 938         ASSERT3S(cmd->nc_sqe.sqe_cid, ==, cid);
 939 
 940         return (cmd);
 941 }
 942 
 943 static nvme_cmd_t *
 944 nvme_retrieve_cmd(nvme_t *nvme, nvme_qpair_t *qp)
 945 {
 946         nvme_reg_cqhdbl_t head = { 0 };
 947 
 948         nvme_cqe_t *cqe;
 949         nvme_cmd_t *cmd;
 950 
 951         (void) ddi_dma_sync(qp->nq_cqdma->nd_dmah, 0,
 952             sizeof (nvme_cqe_t) * qp->nq_nentry, DDI_DMA_SYNC_FORKERNEL);
 953 
 954         mutex_enter(&qp->nq_mutex);
 955         cqe = &qp->nq_cq[qp->nq_cqhead];
 956 
 957         /* Check phase tag of CQE. Hardware inverts it for new entries. */
 958         if (cqe->cqe_sf.sf_p == qp->nq_phase) {
 959                 mutex_exit(&qp->nq_mutex);
 960                 return (NULL);
 961         }
 962 
 963         ASSERT(nvme->n_ioq[cqe->cqe_sqid] == qp);
 964 
 965         cmd = nvme_unqueue_cmd(nvme, qp, cqe->cqe_cid);
 966 
 967         ASSERT(cmd->nc_sqid == cqe->cqe_sqid);
 968         bcopy(cqe, &cmd->nc_cqe, sizeof (nvme_cqe_t));
 969 
 970         qp->nq_sqhead = cqe->cqe_sqhd;
 971 
 972         head.b.cqhdbl_cqh = qp->nq_cqhead = (qp->nq_cqhead + 1) % qp->nq_nentry;
 973 
 974         /* Toggle phase on wrap-around. */
 975         if (qp->nq_cqhead == 0)
 976                 qp->nq_phase = qp->nq_phase ? 0 : 1;
 977 
 978         nvme_put32(cmd->nc_nvme, qp->nq_cqhdbl, head.r);
 979         mutex_exit(&qp->nq_mutex);
 980 
 981         return (cmd);
 982 }
 983 
 984 static int
 985 nvme_check_unknown_cmd_status(nvme_cmd_t *cmd)
 986 {
 987         nvme_cqe_t *cqe = &cmd->nc_cqe;
 988 
 989         dev_err(cmd->nc_nvme->n_dip, CE_WARN,
 990             "!unknown command status received: opc = %x, sqid = %d, cid = %d, "
 991             "sc = %x, sct = %x, dnr = %d, m = %d", cmd->nc_sqe.sqe_opc,
 992             cqe->cqe_sqid, cqe->cqe_cid, cqe->cqe_sf.sf_sc, cqe->cqe_sf.sf_sct,
 993             cqe->cqe_sf.sf_dnr, cqe->cqe_sf.sf_m);
 994 
 995         if (cmd->nc_xfer != NULL)
 996                 bd_error(cmd->nc_xfer, BD_ERR_ILLRQ);
 997 
 998         if (cmd->nc_nvme->n_strict_version) {
 999                 cmd->nc_nvme->n_dead = B_TRUE;
1000                 ddi_fm_service_impact(cmd->nc_nvme->n_dip, DDI_SERVICE_LOST);
1001         }
1002 
1003         return (EIO);
1004 }
1005 
1006 static int
1007 nvme_check_vendor_cmd_status(nvme_cmd_t *cmd)
1008 {
1009         nvme_cqe_t *cqe = &cmd->nc_cqe;
1010 
1011         dev_err(cmd->nc_nvme->n_dip, CE_WARN,
1012             "!unknown command status received: opc = %x, sqid = %d, cid = %d, "
1013             "sc = %x, sct = %x, dnr = %d, m = %d", cmd->nc_sqe.sqe_opc,
1014             cqe->cqe_sqid, cqe->cqe_cid, cqe->cqe_sf.sf_sc, cqe->cqe_sf.sf_sct,
1015             cqe->cqe_sf.sf_dnr, cqe->cqe_sf.sf_m);
1016         if (!cmd->nc_nvme->n_ignore_unknown_vendor_status) {
1017                 cmd->nc_nvme->n_dead = B_TRUE;
1018                 ddi_fm_service_impact(cmd->nc_nvme->n_dip, DDI_SERVICE_LOST);
1019         }
1020 
1021         return (EIO);
1022 }
1023 
1024 static int
1025 nvme_check_integrity_cmd_status(nvme_cmd_t *cmd)
1026 {
1027         nvme_cqe_t *cqe = &cmd->nc_cqe;
1028 
1029         switch (cqe->cqe_sf.sf_sc) {
1030         case NVME_CQE_SC_INT_NVM_WRITE:
1031                 /* write fail */
1032                 /* TODO: post ereport */
1033                 if (cmd->nc_xfer != NULL)
1034                         bd_error(cmd->nc_xfer, BD_ERR_MEDIA);
1035                 return (EIO);
1036 
1037         case NVME_CQE_SC_INT_NVM_READ:
1038                 /* read fail */
1039                 /* TODO: post ereport */
1040                 if (cmd->nc_xfer != NULL)
1041                         bd_error(cmd->nc_xfer, BD_ERR_MEDIA);
1042                 return (EIO);
1043 
1044         default:
1045                 return (nvme_check_unknown_cmd_status(cmd));
1046         }
1047 }
1048 
1049 static int
1050 nvme_check_generic_cmd_status(nvme_cmd_t *cmd)
1051 {
1052         nvme_cqe_t *cqe = &cmd->nc_cqe;
1053 
1054         switch (cqe->cqe_sf.sf_sc) {
1055         case NVME_CQE_SC_GEN_SUCCESS:
1056                 return (0);
1057 
1058         /*
1059          * Errors indicating a bug in the driver should cause a panic.
1060          */
1061         case NVME_CQE_SC_GEN_INV_OPC:
1062                 /* Invalid Command Opcode */
1063                 if (!cmd->nc_dontpanic)
1064                         dev_err(cmd->nc_nvme->n_dip, CE_PANIC,
1065                             "programming error: invalid opcode in cmd %p",
1066                             (void *)cmd);
1067                 return (EINVAL);
1068 
1069         case NVME_CQE_SC_GEN_INV_FLD:
1070                 /* Invalid Field in Command */
1071                 if (!cmd->nc_dontpanic)
1072                         dev_err(cmd->nc_nvme->n_dip, CE_PANIC,
1073                             "programming error: invalid field in cmd %p",
1074                             (void *)cmd);
1075                 return (EIO);
1076 
1077         case NVME_CQE_SC_GEN_ID_CNFL:
1078                 /* Command ID Conflict */
1079                 dev_err(cmd->nc_nvme->n_dip, CE_PANIC, "programming error: "
1080                     "cmd ID conflict in cmd %p", (void *)cmd);
1081                 return (0);
1082 
1083         case NVME_CQE_SC_GEN_INV_NS:
1084                 /* Invalid Namespace or Format */
1085                 if (!cmd->nc_dontpanic)
1086                         dev_err(cmd->nc_nvme->n_dip, CE_PANIC,
1087                             "programming error: invalid NS/format in cmd %p",
1088                             (void *)cmd);
1089                 return (EINVAL);
1090 
1091         case NVME_CQE_SC_GEN_NVM_LBA_RANGE:
1092                 /* LBA Out Of Range */
1093                 dev_err(cmd->nc_nvme->n_dip, CE_PANIC, "programming error: "
1094                     "LBA out of range in cmd %p", (void *)cmd);
1095                 return (0);
1096 
1097         /*
1098          * Non-fatal errors, handle gracefully.
1099          */
1100         case NVME_CQE_SC_GEN_DATA_XFR_ERR:
1101                 /* Data Transfer Error (DMA) */
1102                 /* TODO: post ereport */
1103                 atomic_inc_32(&cmd->nc_nvme->n_data_xfr_err);
1104                 if (cmd->nc_xfer != NULL)
1105                         bd_error(cmd->nc_xfer, BD_ERR_NTRDY);
1106                 return (EIO);
1107 
1108         case NVME_CQE_SC_GEN_INTERNAL_ERR:
1109                 /*
1110                  * Internal Error. The spec (v1.0, section 4.5.1.2) says
1111                  * detailed error information is returned as async event,
1112                  * so we pretty much ignore the error here and handle it
1113                  * in the async event handler.
1114                  */
1115                 atomic_inc_32(&cmd->nc_nvme->n_internal_err);
1116                 if (cmd->nc_xfer != NULL)
1117                         bd_error(cmd->nc_xfer, BD_ERR_NTRDY);
1118                 return (EIO);
1119 
1120         case NVME_CQE_SC_GEN_ABORT_REQUEST:
1121                 /*
1122                  * Command Abort Requested. This normally happens only when a
1123                  * command times out.
1124                  */
1125                 /* TODO: post ereport or change blkdev to handle this? */
1126                 atomic_inc_32(&cmd->nc_nvme->n_abort_rq_err);
1127                 return (ECANCELED);
1128 
1129         case NVME_CQE_SC_GEN_ABORT_PWRLOSS:
1130                 /* Command Aborted due to Power Loss Notification */
1131                 ddi_fm_service_impact(cmd->nc_nvme->n_dip, DDI_SERVICE_LOST);
1132                 cmd->nc_nvme->n_dead = B_TRUE;
1133                 return (EIO);
1134 
1135         case NVME_CQE_SC_GEN_ABORT_SQ_DEL:
1136                 /* Command Aborted due to SQ Deletion */
1137                 atomic_inc_32(&cmd->nc_nvme->n_abort_sq_del);
1138                 return (EIO);
1139 
1140         case NVME_CQE_SC_GEN_NVM_CAP_EXC:
1141                 /* Capacity Exceeded */
1142                 atomic_inc_32(&cmd->nc_nvme->n_nvm_cap_exc);
1143                 if (cmd->nc_xfer != NULL)
1144                         bd_error(cmd->nc_xfer, BD_ERR_MEDIA);
1145                 return (EIO);
1146 
1147         case NVME_CQE_SC_GEN_NVM_NS_NOTRDY:
1148                 /* Namespace Not Ready */
1149                 atomic_inc_32(&cmd->nc_nvme->n_nvm_ns_notrdy);
1150                 if (cmd->nc_xfer != NULL)
1151                         bd_error(cmd->nc_xfer, BD_ERR_NTRDY);
1152                 return (EIO);
1153 
1154         default:
1155                 return (nvme_check_unknown_cmd_status(cmd));
1156         }
1157 }
1158 
1159 static int
1160 nvme_check_specific_cmd_status(nvme_cmd_t *cmd)
1161 {
1162         nvme_cqe_t *cqe = &cmd->nc_cqe;
1163 
1164         switch (cqe->cqe_sf.sf_sc) {
1165         case NVME_CQE_SC_SPC_INV_CQ:
1166                 /* Completion Queue Invalid */
1167                 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_CREATE_SQUEUE);
1168                 atomic_inc_32(&cmd->nc_nvme->n_inv_cq_err);
1169                 return (EINVAL);
1170 
1171         case NVME_CQE_SC_SPC_INV_QID:
1172                 /* Invalid Queue Identifier */
1173                 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_CREATE_SQUEUE ||
1174                     cmd->nc_sqe.sqe_opc == NVME_OPC_DELETE_SQUEUE ||
1175                     cmd->nc_sqe.sqe_opc == NVME_OPC_CREATE_CQUEUE ||
1176                     cmd->nc_sqe.sqe_opc == NVME_OPC_DELETE_CQUEUE);
1177                 atomic_inc_32(&cmd->nc_nvme->n_inv_qid_err);
1178                 return (EINVAL);
1179 
1180         case NVME_CQE_SC_SPC_MAX_QSZ_EXC:
1181                 /* Max Queue Size Exceeded */
1182                 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_CREATE_SQUEUE ||
1183                     cmd->nc_sqe.sqe_opc == NVME_OPC_CREATE_CQUEUE);
1184                 atomic_inc_32(&cmd->nc_nvme->n_max_qsz_exc);
1185                 return (EINVAL);
1186 
1187         case NVME_CQE_SC_SPC_ABRT_CMD_EXC:
1188                 /* Abort Command Limit Exceeded */
1189                 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_ABORT);
1190                 dev_err(cmd->nc_nvme->n_dip, CE_PANIC, "programming error: "
1191                     "abort command limit exceeded in cmd %p", (void *)cmd);
1192                 return (0);
1193 
1194         case NVME_CQE_SC_SPC_ASYNC_EVREQ_EXC:
1195                 /* Async Event Request Limit Exceeded */
1196                 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_ASYNC_EVENT);
1197                 dev_err(cmd->nc_nvme->n_dip, CE_PANIC, "programming error: "
1198                     "async event request limit exceeded in cmd %p",
1199                     (void *)cmd);
1200                 return (0);
1201 
1202         case NVME_CQE_SC_SPC_INV_INT_VECT:
1203                 /* Invalid Interrupt Vector */
1204                 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_CREATE_CQUEUE);
1205                 atomic_inc_32(&cmd->nc_nvme->n_inv_int_vect);
1206                 return (EINVAL);
1207 
1208         case NVME_CQE_SC_SPC_INV_LOG_PAGE:
1209                 /* Invalid Log Page */
1210                 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_GET_LOG_PAGE);
1211                 atomic_inc_32(&cmd->nc_nvme->n_inv_log_page);
1212                 return (EINVAL);
1213 
1214         case NVME_CQE_SC_SPC_INV_FORMAT:
1215                 /* Invalid Format */
1216                 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_NVM_FORMAT);
1217                 atomic_inc_32(&cmd->nc_nvme->n_inv_format);
1218                 if (cmd->nc_xfer != NULL)
1219                         bd_error(cmd->nc_xfer, BD_ERR_ILLRQ);
1220                 return (EINVAL);
1221 
1222         case NVME_CQE_SC_SPC_INV_Q_DEL:
1223                 /* Invalid Queue Deletion */
1224                 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_DELETE_CQUEUE);
1225                 atomic_inc_32(&cmd->nc_nvme->n_inv_q_del);
1226                 return (EINVAL);
1227 
1228         case NVME_CQE_SC_SPC_NVM_CNFL_ATTR:
1229                 /* Conflicting Attributes */
1230                 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_NVM_DSET_MGMT ||
1231                     cmd->nc_sqe.sqe_opc == NVME_OPC_NVM_READ ||
1232                     cmd->nc_sqe.sqe_opc == NVME_OPC_NVM_WRITE);
1233                 atomic_inc_32(&cmd->nc_nvme->n_cnfl_attr);
1234                 if (cmd->nc_xfer != NULL)
1235                         bd_error(cmd->nc_xfer, BD_ERR_ILLRQ);
1236                 return (EINVAL);
1237 
1238         case NVME_CQE_SC_SPC_NVM_INV_PROT:
1239                 /* Invalid Protection Information */
1240                 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_NVM_COMPARE ||
1241                     cmd->nc_sqe.sqe_opc == NVME_OPC_NVM_READ ||
1242                     cmd->nc_sqe.sqe_opc == NVME_OPC_NVM_WRITE);
1243                 atomic_inc_32(&cmd->nc_nvme->n_inv_prot);
1244                 if (cmd->nc_xfer != NULL)
1245                         bd_error(cmd->nc_xfer, BD_ERR_ILLRQ);
1246                 return (EINVAL);
1247 
1248         case NVME_CQE_SC_SPC_NVM_READONLY:
1249                 /* Write to Read Only Range */
1250                 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_NVM_WRITE);
1251                 atomic_inc_32(&cmd->nc_nvme->n_readonly);
1252                 if (cmd->nc_xfer != NULL)
1253                         bd_error(cmd->nc_xfer, BD_ERR_ILLRQ);
1254                 return (EROFS);
1255 
1256         default:
1257                 return (nvme_check_unknown_cmd_status(cmd));
1258         }
1259 }
1260 
1261 static inline int
1262 nvme_check_cmd_status(nvme_cmd_t *cmd)
1263 {
1264         nvme_cqe_t *cqe = &cmd->nc_cqe;
1265 
1266         /*
1267          * Take a shortcut if the controller is dead, or if
1268          * command status indicates no error.
1269          */
1270         if (cmd->nc_nvme->n_dead)
1271                 return (EIO);
1272 
1273         if (cqe->cqe_sf.sf_sct == NVME_CQE_SCT_GENERIC &&
1274             cqe->cqe_sf.sf_sc == NVME_CQE_SC_GEN_SUCCESS)
1275                 return (0);
1276 
1277         if (cqe->cqe_sf.sf_sct == NVME_CQE_SCT_GENERIC)
1278                 return (nvme_check_generic_cmd_status(cmd));
1279         else if (cqe->cqe_sf.sf_sct == NVME_CQE_SCT_SPECIFIC)
1280                 return (nvme_check_specific_cmd_status(cmd));
1281         else if (cqe->cqe_sf.sf_sct == NVME_CQE_SCT_INTEGRITY)
1282                 return (nvme_check_integrity_cmd_status(cmd));
1283         else if (cqe->cqe_sf.sf_sct == NVME_CQE_SCT_VENDOR)
1284                 return (nvme_check_vendor_cmd_status(cmd));
1285 
1286         return (nvme_check_unknown_cmd_status(cmd));
1287 }
1288 
1289 static int
1290 nvme_abort_cmd(nvme_cmd_t *abort_cmd, uint_t sec)
1291 {
1292         nvme_t *nvme = abort_cmd->nc_nvme;
1293         nvme_cmd_t *cmd = nvme_alloc_cmd(nvme, KM_SLEEP);
1294         nvme_abort_cmd_t ac = { 0 };
1295         int ret = 0;
1296 
1297         sema_p(&nvme->n_abort_sema);
1298 
1299         ac.b.ac_cid = abort_cmd->nc_sqe.sqe_cid;
1300         ac.b.ac_sqid = abort_cmd->nc_sqid;
1301 
1302         cmd->nc_sqid = 0;
1303         cmd->nc_sqe.sqe_opc = NVME_OPC_ABORT;
1304         cmd->nc_callback = nvme_wakeup_cmd;
1305         cmd->nc_sqe.sqe_cdw10 = ac.r;
1306 
1307         /*
1308          * Send the ABORT to the hardware. The ABORT command will return _after_
1309          * the aborted command has completed (aborted or otherwise), but since
1310          * we still hold the aborted command's mutex its callback hasn't been
1311          * processed yet.
1312          */
1313         nvme_admin_cmd(cmd, sec);
1314         sema_v(&nvme->n_abort_sema);
1315 
1316         if ((ret = nvme_check_cmd_status(cmd)) != 0) {
1317                 dev_err(nvme->n_dip, CE_WARN,
1318                     "!ABORT failed with sct = %x, sc = %x",
1319                     cmd->nc_cqe.cqe_sf.sf_sct, cmd->nc_cqe.cqe_sf.sf_sc);
1320                 atomic_inc_32(&nvme->n_abort_failed);
1321         } else {
1322                 dev_err(nvme->n_dip, CE_WARN,
1323                     "!ABORT of command %d/%d %ssuccessful",
1324                     abort_cmd->nc_sqe.sqe_cid, abort_cmd->nc_sqid,
1325                     cmd->nc_cqe.cqe_dw0 & 1 ? "un" : "");
1326                 if ((cmd->nc_cqe.cqe_dw0 & 1) == 0)
1327                         atomic_inc_32(&nvme->n_cmd_aborted);
1328         }
1329 
1330         nvme_free_cmd(cmd);
1331         return (ret);
1332 }
1333 
1334 /*
1335  * nvme_wait_cmd -- wait for command completion or timeout
1336  *
1337  * In case of a serious error or a timeout of the abort command the hardware
1338  * will be declared dead and FMA will be notified.
1339  */
1340 static void
1341 nvme_wait_cmd(nvme_cmd_t *cmd, uint_t sec)
1342 {
1343         clock_t timeout = ddi_get_lbolt() + drv_usectohz(sec * MICROSEC);
1344         nvme_t *nvme = cmd->nc_nvme;
1345         nvme_reg_csts_t csts;
1346         nvme_qpair_t *qp;
1347 
1348         ASSERT(mutex_owned(&cmd->nc_mutex));
1349 
1350         while (!cmd->nc_completed) {
1351                 if (cv_timedwait(&cmd->nc_cv, &cmd->nc_mutex, timeout) == -1)
1352                         break;
1353         }
1354 
1355         if (cmd->nc_completed)
1356                 return;
1357 
1358         /*
1359          * The command timed out.
1360          *
1361          * Check controller for fatal status, any errors associated with the
1362          * register or DMA handle, or for a double timeout (abort command timed
1363          * out). If necessary log a warning and call FMA.
1364          */
1365         csts.r = nvme_get32(nvme, NVME_REG_CSTS);
1366         dev_err(nvme->n_dip, CE_WARN, "!command %d/%d timeout, "
1367             "OPC = %x, CFS = %d", cmd->nc_sqe.sqe_cid, cmd->nc_sqid,
1368             cmd->nc_sqe.sqe_opc, csts.b.csts_cfs);
1369         atomic_inc_32(&nvme->n_cmd_timeout);
1370 
1371         if (csts.b.csts_cfs ||
1372             nvme_check_regs_hdl(nvme) ||
1373             nvme_check_dma_hdl(cmd->nc_dma) ||
1374             cmd->nc_sqe.sqe_opc == NVME_OPC_ABORT) {
1375                 ddi_fm_service_impact(nvme->n_dip, DDI_SERVICE_LOST);
1376                 nvme->n_dead = B_TRUE;
1377         } else if (nvme_abort_cmd(cmd, sec) == 0) {
1378                 /*
1379                  * If the abort succeeded the command should complete
1380                  * immediately with an appropriate status.
1381                  */
1382                 while (!cmd->nc_completed)
1383                         cv_wait(&cmd->nc_cv, &cmd->nc_mutex);
1384 
1385                 return;
1386         }
1387 
1388         qp = nvme->n_ioq[cmd->nc_sqid];
1389 
1390         mutex_enter(&qp->nq_mutex);
1391         (void) nvme_unqueue_cmd(nvme, qp, cmd->nc_sqe.sqe_cid);
1392         mutex_exit(&qp->nq_mutex);
1393 
1394         /*
1395          * As we don't know what the presumed dead hardware might still do with
1396          * the DMA memory, we'll put the command on the lost commands list if it
1397          * has any DMA memory.
1398          */
1399         if (cmd->nc_dma != NULL) {
1400                 mutex_enter(&nvme_lc_mutex);
1401                 list_insert_head(&nvme_lost_cmds, cmd);
1402                 mutex_exit(&nvme_lc_mutex);
1403         }
1404 }
1405 
1406 static void
1407 nvme_wakeup_cmd(void *arg)
1408 {
1409         nvme_cmd_t *cmd = arg;
1410 
1411         mutex_enter(&cmd->nc_mutex);
1412         cmd->nc_completed = B_TRUE;
1413         cv_signal(&cmd->nc_cv);
1414         mutex_exit(&cmd->nc_mutex);
1415 }
1416 
1417 static void
1418 nvme_async_event_task(void *arg)
1419 {
1420         nvme_cmd_t *cmd = arg;
1421         nvme_t *nvme = cmd->nc_nvme;
1422         nvme_error_log_entry_t *error_log = NULL;
1423         nvme_health_log_t *health_log = NULL;
1424         size_t logsize = 0;
1425         nvme_async_event_t event;
1426 
1427         /*
1428          * Check for errors associated with the async request itself. The only
1429          * command-specific error is "async event limit exceeded", which
1430          * indicates a programming error in the driver and causes a panic in
1431          * nvme_check_cmd_status().
1432          *
1433          * Other possible errors are various scenarios where the async request
1434          * was aborted, or internal errors in the device. Internal errors are
1435          * reported to FMA, the command aborts need no special handling here.
1436          *
1437          * And finally, at least qemu nvme does not support async events,
1438          * and will return NVME_CQE_SC_GEN_INV_OPC | DNR. If so, we
1439          * will avoid posting async events.
1440          */
1441 
1442         if (nvme_check_cmd_status(cmd) != 0) {
1443                 dev_err(cmd->nc_nvme->n_dip, CE_WARN,
1444                     "!async event request returned failure, sct = %x, "
1445                     "sc = %x, dnr = %d, m = %d", cmd->nc_cqe.cqe_sf.sf_sct,
1446                     cmd->nc_cqe.cqe_sf.sf_sc, cmd->nc_cqe.cqe_sf.sf_dnr,
1447                     cmd->nc_cqe.cqe_sf.sf_m);
1448 
1449                 if (cmd->nc_cqe.cqe_sf.sf_sct == NVME_CQE_SCT_GENERIC &&
1450                     cmd->nc_cqe.cqe_sf.sf_sc == NVME_CQE_SC_GEN_INTERNAL_ERR) {
1451                         cmd->nc_nvme->n_dead = B_TRUE;
1452                         ddi_fm_service_impact(cmd->nc_nvme->n_dip,
1453                             DDI_SERVICE_LOST);
1454                 }
1455 
1456                 if (cmd->nc_cqe.cqe_sf.sf_sct == NVME_CQE_SCT_GENERIC &&
1457                     cmd->nc_cqe.cqe_sf.sf_sc == NVME_CQE_SC_GEN_INV_OPC &&
1458                     cmd->nc_cqe.cqe_sf.sf_dnr == 1) {
1459                         nvme->n_async_event_supported = B_FALSE;
1460                 }
1461 
1462                 nvme_free_cmd(cmd);
1463                 return;
1464         }
1465 
1466 
1467         event.r = cmd->nc_cqe.cqe_dw0;
1468 
1469         /* Clear CQE and re-submit the async request. */
1470         bzero(&cmd->nc_cqe, sizeof (nvme_cqe_t));
1471         nvme_submit_admin_cmd(nvme->n_adminq, cmd);
1472 
1473         switch (event.b.ae_type) {
1474         case NVME_ASYNC_TYPE_ERROR:
1475                 if (event.b.ae_logpage == NVME_LOGPAGE_ERROR) {
1476                         (void) nvme_get_logpage(nvme, (void **)&error_log,
1477                             &logsize, event.b.ae_logpage);
1478                 } else {
1479                         dev_err(nvme->n_dip, CE_WARN, "!wrong logpage in "
1480                             "async event reply: %d", event.b.ae_logpage);
1481                         atomic_inc_32(&nvme->n_wrong_logpage);
1482                 }
1483 
1484                 switch (event.b.ae_info) {
1485                 case NVME_ASYNC_ERROR_INV_SQ:
1486                         dev_err(nvme->n_dip, CE_PANIC, "programming error: "
1487                             "invalid submission queue");
1488                         return;
1489 
1490                 case NVME_ASYNC_ERROR_INV_DBL:
1491                         dev_err(nvme->n_dip, CE_PANIC, "programming error: "
1492                             "invalid doorbell write value");
1493                         return;
1494 
1495                 case NVME_ASYNC_ERROR_DIAGFAIL:
1496                         dev_err(nvme->n_dip, CE_WARN, "!diagnostic failure");
1497                         ddi_fm_service_impact(nvme->n_dip, DDI_SERVICE_LOST);
1498                         nvme->n_dead = B_TRUE;
1499                         atomic_inc_32(&nvme->n_diagfail_event);
1500                         break;
1501 
1502                 case NVME_ASYNC_ERROR_PERSISTENT:
1503                         dev_err(nvme->n_dip, CE_WARN, "!persistent internal "
1504                             "device error");
1505                         ddi_fm_service_impact(nvme->n_dip, DDI_SERVICE_LOST);
1506                         nvme->n_dead = B_TRUE;
1507                         atomic_inc_32(&nvme->n_persistent_event);
1508                         break;
1509 
1510                 case NVME_ASYNC_ERROR_TRANSIENT:
1511                         dev_err(nvme->n_dip, CE_WARN, "!transient internal "
1512                             "device error");
1513                         /* TODO: send ereport */
1514                         atomic_inc_32(&nvme->n_transient_event);
1515                         break;
1516 
1517                 case NVME_ASYNC_ERROR_FW_LOAD:
1518                         dev_err(nvme->n_dip, CE_WARN,
1519                             "!firmware image load error");
1520                         atomic_inc_32(&nvme->n_fw_load_event);
1521                         break;
1522                 }
1523                 break;
1524 
1525         case NVME_ASYNC_TYPE_HEALTH:
1526                 if (event.b.ae_logpage == NVME_LOGPAGE_HEALTH) {
1527                         (void) nvme_get_logpage(nvme, (void **)&health_log,
1528                             &logsize, event.b.ae_logpage, -1);
1529                 } else {
1530                         dev_err(nvme->n_dip, CE_WARN, "!wrong logpage in "
1531                             "async event reply: %d", event.b.ae_logpage);
1532                         atomic_inc_32(&nvme->n_wrong_logpage);
1533                 }
1534 
1535                 switch (event.b.ae_info) {
1536                 case NVME_ASYNC_HEALTH_RELIABILITY:
1537                         dev_err(nvme->n_dip, CE_WARN,
1538                             "!device reliability compromised");
1539                         /* TODO: send ereport */
1540                         atomic_inc_32(&nvme->n_reliability_event);
1541                         break;
1542 
1543                 case NVME_ASYNC_HEALTH_TEMPERATURE:
1544                         dev_err(nvme->n_dip, CE_WARN,
1545                             "!temperature above threshold");
1546                         /* TODO: send ereport */
1547                         atomic_inc_32(&nvme->n_temperature_event);
1548                         break;
1549 
1550                 case NVME_ASYNC_HEALTH_SPARE:
1551                         dev_err(nvme->n_dip, CE_WARN,
1552                             "!spare space below threshold");
1553                         /* TODO: send ereport */
1554                         atomic_inc_32(&nvme->n_spare_event);
1555                         break;
1556                 }
1557                 break;
1558 
1559         case NVME_ASYNC_TYPE_VENDOR:
1560                 dev_err(nvme->n_dip, CE_WARN, "!vendor specific async event "
1561                     "received, info = %x, logpage = %x", event.b.ae_info,
1562                     event.b.ae_logpage);
1563                 atomic_inc_32(&nvme->n_vendor_event);
1564                 break;
1565 
1566         default:
1567                 dev_err(nvme->n_dip, CE_WARN, "!unknown async event received, "
1568                     "type = %x, info = %x, logpage = %x", event.b.ae_type,
1569                     event.b.ae_info, event.b.ae_logpage);
1570                 atomic_inc_32(&nvme->n_unknown_event);
1571                 break;
1572         }
1573 
1574         if (error_log)
1575                 kmem_free(error_log, logsize);
1576 
1577         if (health_log)
1578                 kmem_free(health_log, logsize);
1579 }
1580 
1581 static void
1582 nvme_admin_cmd(nvme_cmd_t *cmd, int sec)
1583 {
1584         mutex_enter(&cmd->nc_mutex);
1585         nvme_submit_admin_cmd(cmd->nc_nvme->n_adminq, cmd);
1586         nvme_wait_cmd(cmd, sec);
1587         mutex_exit(&cmd->nc_mutex);
1588 }
1589 
1590 static void
1591 nvme_async_event(nvme_t *nvme)
1592 {
1593         nvme_cmd_t *cmd;
1594 
1595         cmd = nvme_alloc_cmd(nvme, KM_SLEEP);
1596         cmd->nc_sqid = 0;
1597         cmd->nc_sqe.sqe_opc = NVME_OPC_ASYNC_EVENT;
1598         cmd->nc_callback = nvme_async_event_task;
1599         cmd->nc_dontpanic = B_TRUE;
1600 
1601         nvme_submit_admin_cmd(nvme->n_adminq, cmd);
1602 }
1603 
1604 static int
1605 nvme_format_nvm(nvme_t *nvme, uint32_t nsid, uint8_t lbaf, boolean_t ms,
1606     uint8_t pi, boolean_t pil, uint8_t ses)
1607 {
1608         nvme_cmd_t *cmd = nvme_alloc_cmd(nvme, KM_SLEEP);
1609         nvme_format_nvm_t format_nvm = { 0 };
1610         int ret;
1611 
1612         format_nvm.b.fm_lbaf = lbaf & 0xf;
1613         format_nvm.b.fm_ms = ms ? 1 : 0;
1614         format_nvm.b.fm_pi = pi & 0x7;
1615         format_nvm.b.fm_pil = pil ? 1 : 0;
1616         format_nvm.b.fm_ses = ses & 0x7;
1617 
1618         cmd->nc_sqid = 0;
1619         cmd->nc_callback = nvme_wakeup_cmd;
1620         cmd->nc_sqe.sqe_nsid = nsid;
1621         cmd->nc_sqe.sqe_opc = NVME_OPC_NVM_FORMAT;
1622         cmd->nc_sqe.sqe_cdw10 = format_nvm.r;
1623 
1624         /*
1625          * Some devices like Samsung SM951 don't allow formatting of all
1626          * namespaces in one command. Handle that gracefully.
1627          */
1628         if (nsid == (uint32_t)-1)
1629                 cmd->nc_dontpanic = B_TRUE;
1630 
1631         nvme_admin_cmd(cmd, nvme_format_cmd_timeout);
1632 
1633         if ((ret = nvme_check_cmd_status(cmd)) != 0) {
1634                 dev_err(nvme->n_dip, CE_WARN,
1635                     "!FORMAT failed with sct = %x, sc = %x",
1636                     cmd->nc_cqe.cqe_sf.sf_sct, cmd->nc_cqe.cqe_sf.sf_sc);
1637         }
1638 
1639         nvme_free_cmd(cmd);
1640         return (ret);
1641 }
1642 
1643 static int
1644 nvme_get_logpage(nvme_t *nvme, void **buf, size_t *bufsize, uint8_t logpage,
1645     ...)
1646 {
1647         nvme_cmd_t *cmd = nvme_alloc_cmd(nvme, KM_SLEEP);
1648         nvme_getlogpage_t getlogpage = { 0 };
1649         va_list ap;
1650         int ret;
1651 
1652         va_start(ap, logpage);
1653 
1654         cmd->nc_sqid = 0;
1655         cmd->nc_callback = nvme_wakeup_cmd;
1656         cmd->nc_sqe.sqe_opc = NVME_OPC_GET_LOG_PAGE;
1657 
1658         getlogpage.b.lp_lid = logpage;
1659 
1660         switch (logpage) {
1661         case NVME_LOGPAGE_ERROR:
1662                 cmd->nc_sqe.sqe_nsid = (uint32_t)-1;
1663                 /*
1664                  * The GET LOG PAGE command can use at most 2 pages to return
1665                  * data, PRP lists are not supported.
1666                  */
1667                 *bufsize = MIN(2 * nvme->n_pagesize,
1668                     nvme->n_error_log_len * sizeof (nvme_error_log_entry_t));
1669                 break;
1670 
1671         case NVME_LOGPAGE_HEALTH:
1672                 cmd->nc_sqe.sqe_nsid = va_arg(ap, uint32_t);
1673                 *bufsize = sizeof (nvme_health_log_t);
1674                 break;
1675 
1676         case NVME_LOGPAGE_FWSLOT:
1677                 cmd->nc_sqe.sqe_nsid = (uint32_t)-1;
1678                 *bufsize = sizeof (nvme_fwslot_log_t);
1679                 break;
1680 
1681         default:
1682                 dev_err(nvme->n_dip, CE_WARN, "!unknown log page requested: %d",
1683                     logpage);
1684                 atomic_inc_32(&nvme->n_unknown_logpage);
1685                 ret = EINVAL;
1686                 goto fail;
1687         }
1688 
1689         va_end(ap);
1690 
1691         getlogpage.b.lp_numd = *bufsize / sizeof (uint32_t) - 1;
1692 
1693         cmd->nc_sqe.sqe_cdw10 = getlogpage.r;
1694 
1695         if (nvme_zalloc_dma(nvme, getlogpage.b.lp_numd * sizeof (uint32_t),
1696             DDI_DMA_READ, &nvme->n_prp_dma_attr, &cmd->nc_dma) != DDI_SUCCESS) {
1697                 dev_err(nvme->n_dip, CE_WARN,
1698                     "!nvme_zalloc_dma failed for GET LOG PAGE");
1699                 ret = ENOMEM;
1700                 goto fail;
1701         }
1702 
1703         if (cmd->nc_dma->nd_ncookie > 2) {
1704                 dev_err(nvme->n_dip, CE_WARN,
1705                     "!too many DMA cookies for GET LOG PAGE");
1706                 atomic_inc_32(&nvme->n_too_many_cookies);
1707                 ret = ENOMEM;
1708                 goto fail;
1709         }
1710 
1711         cmd->nc_sqe.sqe_dptr.d_prp[0] = cmd->nc_dma->nd_cookie.dmac_laddress;
1712         if (cmd->nc_dma->nd_ncookie > 1) {
1713                 ddi_dma_nextcookie(cmd->nc_dma->nd_dmah,
1714                     &cmd->nc_dma->nd_cookie);
1715                 cmd->nc_sqe.sqe_dptr.d_prp[1] =
1716                     cmd->nc_dma->nd_cookie.dmac_laddress;
1717         }
1718 
1719         nvme_admin_cmd(cmd, nvme_admin_cmd_timeout);
1720 
1721         if ((ret = nvme_check_cmd_status(cmd)) != 0) {
1722                 dev_err(nvme->n_dip, CE_WARN,
1723                     "!GET LOG PAGE failed with sct = %x, sc = %x",
1724                     cmd->nc_cqe.cqe_sf.sf_sct, cmd->nc_cqe.cqe_sf.sf_sc);
1725                 goto fail;
1726         }
1727 
1728         *buf = kmem_alloc(*bufsize, KM_SLEEP);
1729         bcopy(cmd->nc_dma->nd_memp, *buf, *bufsize);
1730 
1731 fail:
1732         nvme_free_cmd(cmd);
1733 
1734         return (ret);
1735 }
1736 
1737 static int
1738 nvme_identify(nvme_t *nvme, uint32_t nsid, void **buf)
1739 {
1740         nvme_cmd_t *cmd = nvme_alloc_cmd(nvme, KM_SLEEP);
1741         int ret;
1742 
1743         if (buf == NULL)
1744                 return (EINVAL);
1745 
1746         cmd->nc_sqid = 0;
1747         cmd->nc_callback = nvme_wakeup_cmd;
1748         cmd->nc_sqe.sqe_opc = NVME_OPC_IDENTIFY;
1749         cmd->nc_sqe.sqe_nsid = nsid;
1750         cmd->nc_sqe.sqe_cdw10 = nsid ? NVME_IDENTIFY_NSID : NVME_IDENTIFY_CTRL;
1751 
1752         if (nvme_zalloc_dma(nvme, NVME_IDENTIFY_BUFSIZE, DDI_DMA_READ,
1753             &nvme->n_prp_dma_attr, &cmd->nc_dma) != DDI_SUCCESS) {
1754                 dev_err(nvme->n_dip, CE_WARN,
1755                     "!nvme_zalloc_dma failed for IDENTIFY");
1756                 ret = ENOMEM;
1757                 goto fail;
1758         }
1759 
1760         if (cmd->nc_dma->nd_ncookie > 2) {
1761                 dev_err(nvme->n_dip, CE_WARN,
1762                     "!too many DMA cookies for IDENTIFY");
1763                 atomic_inc_32(&nvme->n_too_many_cookies);
1764                 ret = ENOMEM;
1765                 goto fail;
1766         }
1767 
1768         cmd->nc_sqe.sqe_dptr.d_prp[0] = cmd->nc_dma->nd_cookie.dmac_laddress;
1769         if (cmd->nc_dma->nd_ncookie > 1) {
1770                 ddi_dma_nextcookie(cmd->nc_dma->nd_dmah,
1771                     &cmd->nc_dma->nd_cookie);
1772                 cmd->nc_sqe.sqe_dptr.d_prp[1] =
1773                     cmd->nc_dma->nd_cookie.dmac_laddress;
1774         }
1775 
1776         nvme_admin_cmd(cmd, nvme_admin_cmd_timeout);
1777 
1778         if ((ret = nvme_check_cmd_status(cmd)) != 0) {
1779                 dev_err(nvme->n_dip, CE_WARN,
1780                     "!IDENTIFY failed with sct = %x, sc = %x",
1781                     cmd->nc_cqe.cqe_sf.sf_sct, cmd->nc_cqe.cqe_sf.sf_sc);
1782                 goto fail;
1783         }
1784 
1785         *buf = kmem_alloc(NVME_IDENTIFY_BUFSIZE, KM_SLEEP);
1786         bcopy(cmd->nc_dma->nd_memp, *buf, NVME_IDENTIFY_BUFSIZE);
1787 
1788 fail:
1789         nvme_free_cmd(cmd);
1790 
1791         return (ret);
1792 }
1793 
1794 static int
1795 nvme_set_features(nvme_t *nvme, uint32_t nsid, uint8_t feature, uint32_t val,
1796     uint32_t *res)
1797 {
1798         _NOTE(ARGUNUSED(nsid));
1799         nvme_cmd_t *cmd = nvme_alloc_cmd(nvme, KM_SLEEP);
1800         int ret = EINVAL;
1801 
1802         ASSERT(res != NULL);
1803 
1804         cmd->nc_sqid = 0;
1805         cmd->nc_callback = nvme_wakeup_cmd;
1806         cmd->nc_sqe.sqe_opc = NVME_OPC_SET_FEATURES;
1807         cmd->nc_sqe.sqe_cdw10 = feature;
1808         cmd->nc_sqe.sqe_cdw11 = val;
1809 
1810         switch (feature) {
1811         case NVME_FEAT_WRITE_CACHE:
1812                 if (!nvme->n_write_cache_present)
1813                         goto fail;
1814                 break;
1815 
1816         case NVME_FEAT_NQUEUES:
1817                 break;
1818 
1819         default:
1820                 goto fail;
1821         }
1822 
1823         nvme_admin_cmd(cmd, nvme_admin_cmd_timeout);
1824 
1825         if ((ret = nvme_check_cmd_status(cmd)) != 0) {
1826                 dev_err(nvme->n_dip, CE_WARN,
1827                     "!SET FEATURES %d failed with sct = %x, sc = %x",
1828                     feature, cmd->nc_cqe.cqe_sf.sf_sct,
1829                     cmd->nc_cqe.cqe_sf.sf_sc);
1830                 goto fail;
1831         }
1832 
1833         *res = cmd->nc_cqe.cqe_dw0;
1834 
1835 fail:
1836         nvme_free_cmd(cmd);
1837         return (ret);
1838 }
1839 
1840 static int
1841 nvme_get_features(nvme_t *nvme, uint32_t nsid, uint8_t feature, uint32_t *res,
1842     void **buf, size_t *bufsize)
1843 {
1844         nvme_cmd_t *cmd = nvme_alloc_cmd(nvme, KM_SLEEP);
1845         int ret = EINVAL;
1846 
1847         ASSERT(res != NULL);
1848 
1849         if (bufsize != NULL)
1850                 *bufsize = 0;
1851 
1852         cmd->nc_sqid = 0;
1853         cmd->nc_callback = nvme_wakeup_cmd;
1854         cmd->nc_sqe.sqe_opc = NVME_OPC_GET_FEATURES;
1855         cmd->nc_sqe.sqe_cdw10 = feature;
1856         cmd->nc_sqe.sqe_cdw11 = *res;
1857 
1858         /*
1859          * For some of the optional features there doesn't seem to be a method
1860          * of detecting whether it is supported other than using it.  This will
1861          * cause "Invalid Field in Command" error, which is normally considered
1862          * a programming error.  Set the nc_dontpanic flag to override the panic
1863          * in nvme_check_generic_cmd_status().
1864          */
1865         switch (feature) {
1866         case NVME_FEAT_ARBITRATION:
1867         case NVME_FEAT_POWER_MGMT:
1868         case NVME_FEAT_TEMPERATURE:
1869         case NVME_FEAT_ERROR:
1870         case NVME_FEAT_NQUEUES:
1871         case NVME_FEAT_INTR_COAL:
1872         case NVME_FEAT_INTR_VECT:
1873         case NVME_FEAT_WRITE_ATOM:
1874         case NVME_FEAT_ASYNC_EVENT:
1875                 break;
1876 
1877         case NVME_FEAT_WRITE_CACHE:
1878                 if (!nvme->n_write_cache_present)
1879                         goto fail;
1880                 break;
1881 
1882         case NVME_FEAT_LBA_RANGE:
1883                 if (!nvme->n_lba_range_supported)
1884                         goto fail;
1885 
1886                 cmd->nc_dontpanic = B_TRUE;
1887                 cmd->nc_sqe.sqe_nsid = nsid;
1888                 ASSERT(bufsize != NULL);
1889                 *bufsize = NVME_LBA_RANGE_BUFSIZE;
1890                 break;
1891 
1892         case NVME_FEAT_AUTO_PST:
1893                 if (!nvme->n_auto_pst_supported)
1894                         goto fail;
1895 
1896                 ASSERT(bufsize != NULL);
1897                 *bufsize = NVME_AUTO_PST_BUFSIZE;
1898                 break;
1899 
1900         case NVME_FEAT_PROGRESS:
1901                 if (!nvme->n_progress_supported)
1902                         goto fail;
1903 
1904                 cmd->nc_dontpanic = B_TRUE;
1905                 break;
1906 
1907         default:
1908                 goto fail;
1909         }
1910 
1911         if (bufsize != NULL && *bufsize != 0) {
1912                 if (nvme_zalloc_dma(nvme, *bufsize, DDI_DMA_READ,
1913                     &nvme->n_prp_dma_attr, &cmd->nc_dma) != DDI_SUCCESS) {
1914                         dev_err(nvme->n_dip, CE_WARN,
1915                             "!nvme_zalloc_dma failed for GET FEATURES");
1916                         ret = ENOMEM;
1917                         goto fail;
1918                 }
1919 
1920                 if (cmd->nc_dma->nd_ncookie > 2) {
1921                         dev_err(nvme->n_dip, CE_WARN,
1922                             "!too many DMA cookies for GET FEATURES");
1923                         atomic_inc_32(&nvme->n_too_many_cookies);
1924                         ret = ENOMEM;
1925                         goto fail;
1926                 }
1927 
1928                 cmd->nc_sqe.sqe_dptr.d_prp[0] =
1929                     cmd->nc_dma->nd_cookie.dmac_laddress;
1930                 if (cmd->nc_dma->nd_ncookie > 1) {
1931                         ddi_dma_nextcookie(cmd->nc_dma->nd_dmah,
1932                             &cmd->nc_dma->nd_cookie);
1933                         cmd->nc_sqe.sqe_dptr.d_prp[1] =
1934                             cmd->nc_dma->nd_cookie.dmac_laddress;
1935                 }
1936         }
1937 
1938         nvme_admin_cmd(cmd, nvme_admin_cmd_timeout);
1939 
1940         if ((ret = nvme_check_cmd_status(cmd)) != 0) {
1941                 boolean_t known = B_TRUE;
1942 
1943                 /* Check if this is unsupported optional feature */
1944                 if (cmd->nc_cqe.cqe_sf.sf_sct == NVME_CQE_SCT_GENERIC &&
1945                     cmd->nc_cqe.cqe_sf.sf_sc == NVME_CQE_SC_GEN_INV_FLD) {
1946                         switch (feature) {
1947                         case NVME_FEAT_LBA_RANGE:
1948                                 nvme->n_lba_range_supported = B_FALSE;
1949                                 break;
1950                         case NVME_FEAT_PROGRESS:
1951                                 nvme->n_progress_supported = B_FALSE;
1952                                 break;
1953                         default:
1954                                 known = B_FALSE;
1955                                 break;
1956                         }
1957                 } else {
1958                         known = B_FALSE;
1959                 }
1960 
1961                 /* Report the error otherwise */
1962                 if (!known) {
1963                         dev_err(nvme->n_dip, CE_WARN,
1964                             "!GET FEATURES %d failed with sct = %x, sc = %x",
1965                             feature, cmd->nc_cqe.cqe_sf.sf_sct,
1966                             cmd->nc_cqe.cqe_sf.sf_sc);
1967                 }
1968 
1969                 goto fail;
1970         }
1971 
1972         if (bufsize != NULL && *bufsize != 0) {
1973                 ASSERT(buf != NULL);
1974                 *buf = kmem_alloc(*bufsize, KM_SLEEP);
1975                 bcopy(cmd->nc_dma->nd_memp, *buf, *bufsize);
1976         }
1977 
1978         *res = cmd->nc_cqe.cqe_dw0;
1979 
1980 fail:
1981         nvme_free_cmd(cmd);
1982         return (ret);
1983 }
1984 
1985 static int
1986 nvme_write_cache_set(nvme_t *nvme, boolean_t enable)
1987 {
1988         nvme_write_cache_t nwc = { 0 };
1989 
1990         if (enable)
1991                 nwc.b.wc_wce = 1;
1992 
1993         return (nvme_set_features(nvme, 0, NVME_FEAT_WRITE_CACHE, nwc.r,
1994             &nwc.r));
1995 }
1996 
1997 static int
1998 nvme_set_nqueues(nvme_t *nvme, uint16_t *nqueues)
1999 {
2000         nvme_nqueues_t nq = { 0 };
2001         int ret;
2002 
2003         nq.b.nq_nsq = nq.b.nq_ncq = *nqueues - 1;
2004 
2005         ret = nvme_set_features(nvme, 0, NVME_FEAT_NQUEUES, nq.r, &nq.r);
2006 
2007         if (ret == 0) {
2008                 /*
2009                  * Always use the same number of submission and completion
2010                  * queues, and never use more than the requested number of
2011                  * queues.
2012                  */
2013                 *nqueues = MIN(*nqueues, MIN(nq.b.nq_nsq, nq.b.nq_ncq) + 1);
2014         }
2015 
2016         return (ret);
2017 }
2018 
2019 static int
2020 nvme_create_io_qpair(nvme_t *nvme, nvme_qpair_t *qp, uint16_t idx)
2021 {
2022         nvme_cmd_t *cmd = nvme_alloc_cmd(nvme, KM_SLEEP);
2023         nvme_create_queue_dw10_t dw10 = { 0 };
2024         nvme_create_cq_dw11_t c_dw11 = { 0 };
2025         nvme_create_sq_dw11_t s_dw11 = { 0 };
2026         int ret;
2027 
2028         dw10.b.q_qid = idx;
2029         dw10.b.q_qsize = qp->nq_nentry - 1;
2030 
2031         c_dw11.b.cq_pc = 1;
2032         c_dw11.b.cq_ien = 1;
2033         c_dw11.b.cq_iv = idx % nvme->n_intr_cnt;
2034 
2035         cmd->nc_sqid = 0;
2036         cmd->nc_callback = nvme_wakeup_cmd;
2037         cmd->nc_sqe.sqe_opc = NVME_OPC_CREATE_CQUEUE;
2038         cmd->nc_sqe.sqe_cdw10 = dw10.r;
2039         cmd->nc_sqe.sqe_cdw11 = c_dw11.r;
2040         cmd->nc_sqe.sqe_dptr.d_prp[0] = qp->nq_cqdma->nd_cookie.dmac_laddress;
2041 
2042         nvme_admin_cmd(cmd, nvme_admin_cmd_timeout);
2043 
2044         if ((ret = nvme_check_cmd_status(cmd)) != 0) {
2045                 dev_err(nvme->n_dip, CE_WARN,
2046                     "!CREATE CQUEUE failed with sct = %x, sc = %x",
2047                     cmd->nc_cqe.cqe_sf.sf_sct, cmd->nc_cqe.cqe_sf.sf_sc);
2048                 goto fail;
2049         }
2050 
2051         nvme_free_cmd(cmd);
2052 
2053         s_dw11.b.sq_pc = 1;
2054         s_dw11.b.sq_cqid = idx;
2055 
2056         cmd = nvme_alloc_cmd(nvme, KM_SLEEP);
2057         cmd->nc_sqid = 0;
2058         cmd->nc_callback = nvme_wakeup_cmd;
2059         cmd->nc_sqe.sqe_opc = NVME_OPC_CREATE_SQUEUE;
2060         cmd->nc_sqe.sqe_cdw10 = dw10.r;
2061         cmd->nc_sqe.sqe_cdw11 = s_dw11.r;
2062         cmd->nc_sqe.sqe_dptr.d_prp[0] = qp->nq_sqdma->nd_cookie.dmac_laddress;
2063 
2064         nvme_admin_cmd(cmd, nvme_admin_cmd_timeout);
2065 
2066         if ((ret = nvme_check_cmd_status(cmd)) != 0) {
2067                 dev_err(nvme->n_dip, CE_WARN,
2068                     "!CREATE SQUEUE failed with sct = %x, sc = %x",
2069                     cmd->nc_cqe.cqe_sf.sf_sct, cmd->nc_cqe.cqe_sf.sf_sc);
2070                 goto fail;
2071         }
2072 
2073 fail:
2074         nvme_free_cmd(cmd);
2075 
2076         return (ret);
2077 }
2078 
2079 static boolean_t
2080 nvme_reset(nvme_t *nvme, boolean_t quiesce)
2081 {
2082         nvme_reg_csts_t csts;
2083         int i;
2084 
2085         nvme_put32(nvme, NVME_REG_CC, 0);
2086 
2087         csts.r = nvme_get32(nvme, NVME_REG_CSTS);
2088         if (csts.b.csts_rdy == 1) {
2089                 nvme_put32(nvme, NVME_REG_CC, 0);
2090                 for (i = 0; i != nvme->n_timeout * 10; i++) {
2091                         csts.r = nvme_get32(nvme, NVME_REG_CSTS);
2092                         if (csts.b.csts_rdy == 0)
2093                                 break;
2094 
2095                         if (quiesce)
2096                                 drv_usecwait(50000);
2097                         else
2098                                 delay(drv_usectohz(50000));
2099                 }
2100         }
2101 
2102         nvme_put32(nvme, NVME_REG_AQA, 0);
2103         nvme_put32(nvme, NVME_REG_ASQ, 0);
2104         nvme_put32(nvme, NVME_REG_ACQ, 0);
2105 
2106         csts.r = nvme_get32(nvme, NVME_REG_CSTS);
2107         return (csts.b.csts_rdy == 0 ? B_TRUE : B_FALSE);
2108 }
2109 
2110 static void
2111 nvme_shutdown(nvme_t *nvme, int mode, boolean_t quiesce)
2112 {
2113         nvme_reg_cc_t cc;
2114         nvme_reg_csts_t csts;
2115         int i;
2116 
2117         ASSERT(mode == NVME_CC_SHN_NORMAL || mode == NVME_CC_SHN_ABRUPT);
2118 
2119         cc.r = nvme_get32(nvme, NVME_REG_CC);
2120         cc.b.cc_shn = mode & 0x3;
2121         nvme_put32(nvme, NVME_REG_CC, cc.r);
2122 
2123         for (i = 0; i != 10; i++) {
2124                 csts.r = nvme_get32(nvme, NVME_REG_CSTS);
2125                 if (csts.b.csts_shst == NVME_CSTS_SHN_COMPLETE)
2126                         break;
2127 
2128                 if (quiesce)
2129                         drv_usecwait(100000);
2130                 else
2131                         delay(drv_usectohz(100000));
2132         }
2133 }
2134 
2135 
2136 static void
2137 nvme_prepare_devid(nvme_t *nvme, uint32_t nsid)
2138 {
2139         /*
2140          * Section 7.7 of the spec describes how to get a unique ID for
2141          * the controller: the vendor ID, the model name and the serial
2142          * number shall be unique when combined.
2143          *
2144          * If a namespace has no EUI64 we use the above and add the hex
2145          * namespace ID to get a unique ID for the namespace.
2146          */
2147         char model[sizeof (nvme->n_idctl->id_model) + 1];
2148         char serial[sizeof (nvme->n_idctl->id_serial) + 1];
2149 
2150         bcopy(nvme->n_idctl->id_model, model, sizeof (nvme->n_idctl->id_model));
2151         bcopy(nvme->n_idctl->id_serial, serial,
2152             sizeof (nvme->n_idctl->id_serial));
2153 
2154         model[sizeof (nvme->n_idctl->id_model)] = '\0';
2155         serial[sizeof (nvme->n_idctl->id_serial)] = '\0';
2156 
2157         nvme->n_ns[nsid - 1].ns_devid = kmem_asprintf("%4X-%s-%s-%X",
2158             nvme->n_idctl->id_vid, model, serial, nsid);
2159 }
2160 
2161 static int
2162 nvme_init_ns(nvme_t *nvme, int nsid)
2163 {
2164         nvme_namespace_t *ns = &nvme->n_ns[nsid - 1];
2165         nvme_identify_nsid_t *idns;
2166         int last_rp;
2167 
2168         ns->ns_nvme = nvme;
2169 
2170         if (nvme_identify(nvme, nsid, (void **)&idns) != 0) {
2171                 dev_err(nvme->n_dip, CE_WARN,
2172                     "!failed to identify namespace %d", nsid);
2173                 return (DDI_FAILURE);
2174         }
2175 
2176         ns->ns_idns = idns;
2177         ns->ns_id = nsid;
2178         ns->ns_block_count = idns->id_nsize;
2179         ns->ns_block_size =
2180             1 << idns->id_lbaf[idns->id_flbas.lba_format].lbaf_lbads;
2181         ns->ns_best_block_size = ns->ns_block_size;
2182 
2183         /*
2184          * Get the EUI64 if present. Use it for devid and device node names.
2185          */
2186         if (NVME_VERSION_ATLEAST(&nvme->n_version, 1, 1))
2187                 bcopy(idns->id_eui64, ns->ns_eui64, sizeof (ns->ns_eui64));
2188 
2189         /*LINTED: E_BAD_PTR_CAST_ALIGN*/
2190         if (*(uint64_t *)ns->ns_eui64 != 0) {
2191                 uint8_t *eui64 = ns->ns_eui64;
2192 
2193                 (void) snprintf(ns->ns_name, sizeof (ns->ns_name),
2194                     "%02x%02x%02x%02x%02x%02x%02x%02x",
2195                     eui64[0], eui64[1], eui64[2], eui64[3],
2196                     eui64[4], eui64[5], eui64[6], eui64[7]);
2197         } else {
2198                 (void) snprintf(ns->ns_name, sizeof (ns->ns_name), "%d",
2199                     ns->ns_id);
2200 
2201                 nvme_prepare_devid(nvme, ns->ns_id);
2202         }
2203 
2204         /*
2205          * Find the LBA format with no metadata and the best relative
2206          * performance. A value of 3 means "degraded", 0 is best.
2207          */
2208         last_rp = 3;
2209         for (int j = 0; j <= idns->id_nlbaf; j++) {
2210                 if (idns->id_lbaf[j].lbaf_lbads == 0)
2211                         break;
2212                 if (idns->id_lbaf[j].lbaf_ms != 0)
2213                         continue;
2214                 if (idns->id_lbaf[j].lbaf_rp >= last_rp)
2215                         continue;
2216                 last_rp = idns->id_lbaf[j].lbaf_rp;
2217                 ns->ns_best_block_size =
2218                     1 << idns->id_lbaf[j].lbaf_lbads;
2219         }
2220 
2221         if (ns->ns_best_block_size < nvme->n_min_block_size)
2222                 ns->ns_best_block_size = nvme->n_min_block_size;
2223 
2224         /*
2225          * We currently don't support namespaces that use either:
2226          * - thin provisioning
2227          * - protection information
2228          * - illegal block size (< 512)
2229          */
2230         if (idns->id_nsfeat.f_thin ||
2231             idns->id_dps.dp_pinfo) {
2232                 dev_err(nvme->n_dip, CE_WARN,
2233                     "!ignoring namespace %d, unsupported features: "
2234                     "thin = %d, pinfo = %d", nsid,
2235                     idns->id_nsfeat.f_thin, idns->id_dps.dp_pinfo);
2236                 ns->ns_ignore = B_TRUE;
2237         } else if (ns->ns_block_size < 512) {
2238                 dev_err(nvme->n_dip, CE_WARN,
2239                     "!ignoring namespace %d, unsupported block size %"PRIu64,
2240                     nsid, (uint64_t)ns->ns_block_size);
2241                 ns->ns_ignore = B_TRUE;
2242         } else {
2243                 ns->ns_ignore = B_FALSE;
2244         }
2245 
2246         return (DDI_SUCCESS);
2247 }
2248 
2249 static int
2250 nvme_init(nvme_t *nvme)
2251 {
2252         nvme_reg_cc_t cc = { 0 };
2253         nvme_reg_aqa_t aqa = { 0 };
2254         nvme_reg_asq_t asq = { 0 };
2255         nvme_reg_acq_t acq = { 0 };
2256         nvme_reg_cap_t cap;
2257         nvme_reg_vs_t vs;
2258         nvme_reg_csts_t csts;
2259         int i = 0;
2260         uint16_t nqueues;
2261         char model[sizeof (nvme->n_idctl->id_model) + 1];
2262         char *vendor, *product;
2263 
2264         /* Check controller version */
2265         vs.r = nvme_get32(nvme, NVME_REG_VS);
2266         nvme->n_version.v_major = vs.b.vs_mjr;
2267         nvme->n_version.v_minor = vs.b.vs_mnr;
2268         dev_err(nvme->n_dip, CE_CONT, "?NVMe spec version %d.%d",
2269             nvme->n_version.v_major, nvme->n_version.v_minor);
2270 
2271         if (NVME_VERSION_HIGHER(&nvme->n_version,
2272             nvme_version_major, nvme_version_minor)) {
2273                 dev_err(nvme->n_dip, CE_WARN, "!no support for version > %d.%d",
2274                     nvme_version_major, nvme_version_minor);
2275                 if (nvme->n_strict_version)
2276                         goto fail;
2277         }
2278 
2279         /* retrieve controller configuration */
2280         cap.r = nvme_get64(nvme, NVME_REG_CAP);
2281 
2282         if ((cap.b.cap_css & NVME_CAP_CSS_NVM) == 0) {
2283                 dev_err(nvme->n_dip, CE_WARN,
2284                     "!NVM command set not supported by hardware");
2285                 goto fail;
2286         }
2287 
2288         nvme->n_nssr_supported = cap.b.cap_nssrs;
2289         nvme->n_doorbell_stride = 4 << cap.b.cap_dstrd;
2290         nvme->n_timeout = cap.b.cap_to;
2291         nvme->n_arbitration_mechanisms = cap.b.cap_ams;
2292         nvme->n_cont_queues_reqd = cap.b.cap_cqr;
2293         nvme->n_max_queue_entries = cap.b.cap_mqes + 1;
2294 
2295         /*
2296          * The MPSMIN and MPSMAX fields in the CAP register use 0 to specify
2297          * the base page size of 4k (1<<12), so add 12 here to get the real
2298          * page size value.
2299          */
2300         nvme->n_pageshift = MIN(MAX(cap.b.cap_mpsmin + 12, PAGESHIFT),
2301             cap.b.cap_mpsmax + 12);
2302         nvme->n_pagesize = 1UL << (nvme->n_pageshift);
2303 
2304         /*
2305          * Set up Queue DMA to transfer at least 1 page-aligned page at a time.
2306          */
2307         nvme->n_queue_dma_attr.dma_attr_align = nvme->n_pagesize;
2308         nvme->n_queue_dma_attr.dma_attr_minxfer = nvme->n_pagesize;
2309 
2310         /*
2311          * Set up PRP DMA to transfer 1 page-aligned page at a time.
2312          * Maxxfer may be increased after we identified the controller limits.
2313          */
2314         nvme->n_prp_dma_attr.dma_attr_maxxfer = nvme->n_pagesize;
2315         nvme->n_prp_dma_attr.dma_attr_minxfer = nvme->n_pagesize;
2316         nvme->n_prp_dma_attr.dma_attr_align = nvme->n_pagesize;
2317         nvme->n_prp_dma_attr.dma_attr_seg = nvme->n_pagesize - 1;
2318 
2319         /*
2320          * Reset controller if it's still in ready state.
2321          */
2322         if (nvme_reset(nvme, B_FALSE) == B_FALSE) {
2323                 dev_err(nvme->n_dip, CE_WARN, "!unable to reset controller");
2324                 ddi_fm_service_impact(nvme->n_dip, DDI_SERVICE_LOST);
2325                 nvme->n_dead = B_TRUE;
2326                 goto fail;
2327         }
2328 
2329         /*
2330          * Create the admin queue pair.
2331          */
2332         if (nvme_alloc_qpair(nvme, nvme->n_admin_queue_len, &nvme->n_adminq, 0)
2333             != DDI_SUCCESS) {
2334                 dev_err(nvme->n_dip, CE_WARN,
2335                     "!unable to allocate admin qpair");
2336                 goto fail;
2337         }
2338         nvme->n_ioq = kmem_alloc(sizeof (nvme_qpair_t *), KM_SLEEP);
2339         nvme->n_ioq[0] = nvme->n_adminq;
2340 
2341         nvme->n_progress |= NVME_ADMIN_QUEUE;
2342 
2343         (void) ddi_prop_update_int(DDI_DEV_T_NONE, nvme->n_dip,
2344             "admin-queue-len", nvme->n_admin_queue_len);
2345 
2346         aqa.b.aqa_asqs = aqa.b.aqa_acqs = nvme->n_admin_queue_len - 1;
2347         asq = nvme->n_adminq->nq_sqdma->nd_cookie.dmac_laddress;
2348         acq = nvme->n_adminq->nq_cqdma->nd_cookie.dmac_laddress;
2349 
2350         ASSERT((asq & (nvme->n_pagesize - 1)) == 0);
2351         ASSERT((acq & (nvme->n_pagesize - 1)) == 0);
2352 
2353         nvme_put32(nvme, NVME_REG_AQA, aqa.r);
2354         nvme_put64(nvme, NVME_REG_ASQ, asq);
2355         nvme_put64(nvme, NVME_REG_ACQ, acq);
2356 
2357         cc.b.cc_ams = 0;        /* use Round-Robin arbitration */
2358         cc.b.cc_css = 0;        /* use NVM command set */
2359         cc.b.cc_mps = nvme->n_pageshift - 12;
2360         cc.b.cc_shn = 0;        /* no shutdown in progress */
2361         cc.b.cc_en = 1;         /* enable controller */
2362         cc.b.cc_iosqes = 6;     /* submission queue entry is 2^6 bytes long */
2363         cc.b.cc_iocqes = 4;     /* completion queue entry is 2^4 bytes long */
2364 
2365         nvme_put32(nvme, NVME_REG_CC, cc.r);
2366 
2367         /*
2368          * Wait for the controller to become ready.
2369          */
2370         csts.r = nvme_get32(nvme, NVME_REG_CSTS);
2371         if (csts.b.csts_rdy == 0) {
2372                 for (i = 0; i != nvme->n_timeout * 10; i++) {
2373                         delay(drv_usectohz(50000));
2374                         csts.r = nvme_get32(nvme, NVME_REG_CSTS);
2375 
2376                         if (csts.b.csts_cfs == 1) {
2377                                 dev_err(nvme->n_dip, CE_WARN,
2378                                     "!controller fatal status at init");
2379                                 ddi_fm_service_impact(nvme->n_dip,
2380                                     DDI_SERVICE_LOST);
2381                                 nvme->n_dead = B_TRUE;
2382                                 goto fail;
2383                         }
2384 
2385                         if (csts.b.csts_rdy == 1)
2386                                 break;
2387                 }
2388         }
2389 
2390         if (csts.b.csts_rdy == 0) {
2391                 dev_err(nvme->n_dip, CE_WARN, "!controller not ready");
2392                 ddi_fm_service_impact(nvme->n_dip, DDI_SERVICE_LOST);
2393                 nvme->n_dead = B_TRUE;
2394                 goto fail;
2395         }
2396 
2397         /*
2398          * Assume an abort command limit of 1. We'll destroy and re-init
2399          * that later when we know the true abort command limit.
2400          */
2401         sema_init(&nvme->n_abort_sema, 1, NULL, SEMA_DRIVER, NULL);
2402 
2403         /*
2404          * Setup initial interrupt for admin queue.
2405          */
2406         if ((nvme_setup_interrupts(nvme, DDI_INTR_TYPE_MSIX, 1)
2407             != DDI_SUCCESS) &&
2408             (nvme_setup_interrupts(nvme, DDI_INTR_TYPE_MSI, 1)
2409             != DDI_SUCCESS) &&
2410             (nvme_setup_interrupts(nvme, DDI_INTR_TYPE_FIXED, 1)
2411             != DDI_SUCCESS)) {
2412                 dev_err(nvme->n_dip, CE_WARN,
2413                     "!failed to setup initial interrupt");
2414                 goto fail;
2415         }
2416 
2417         /*
2418          * Post an asynchronous event command to catch errors.
2419          * We assume the asynchronous events are supported as required by
2420          * specification (Figure 40 in section 5 of NVMe 1.2).
2421          * However, since at least qemu does not follow the specification,
2422          * we need a mechanism to protect ourselves.
2423          */
2424         nvme->n_async_event_supported = B_TRUE;
2425         nvme_async_event(nvme);
2426 
2427         /*
2428          * Identify Controller
2429          */
2430         if (nvme_identify(nvme, 0, (void **)&nvme->n_idctl) != 0) {
2431                 dev_err(nvme->n_dip, CE_WARN,
2432                     "!failed to identify controller");
2433                 goto fail;
2434         }
2435 
2436         /*
2437          * Get Vendor & Product ID
2438          */
2439         bcopy(nvme->n_idctl->id_model, model, sizeof (nvme->n_idctl->id_model));
2440         model[sizeof (nvme->n_idctl->id_model)] = '\0';
2441         sata_split_model(model, &vendor, &product);
2442 
2443         if (vendor == NULL)
2444                 nvme->n_vendor = strdup("NVMe");
2445         else
2446                 nvme->n_vendor = strdup(vendor);
2447 
2448         nvme->n_product = strdup(product);
2449 
2450         /*
2451          * Get controller limits.
2452          */
2453         nvme->n_async_event_limit = MAX(NVME_MIN_ASYNC_EVENT_LIMIT,
2454             MIN(nvme->n_admin_queue_len / 10,
2455             MIN(nvme->n_idctl->id_aerl + 1, nvme->n_async_event_limit)));
2456 
2457         (void) ddi_prop_update_int(DDI_DEV_T_NONE, nvme->n_dip,
2458             "async-event-limit", nvme->n_async_event_limit);
2459 
2460         nvme->n_abort_command_limit = nvme->n_idctl->id_acl + 1;
2461 
2462         /*
2463          * Reinitialize the semaphore with the true abort command limit
2464          * supported by the hardware. It's not necessary to disable interrupts
2465          * as only command aborts use the semaphore, and no commands are
2466          * executed or aborted while we're here.
2467          */
2468         sema_destroy(&nvme->n_abort_sema);
2469         sema_init(&nvme->n_abort_sema, nvme->n_abort_command_limit - 1, NULL,
2470             SEMA_DRIVER, NULL);
2471 
2472         nvme->n_progress |= NVME_CTRL_LIMITS;
2473 
2474         if (nvme->n_idctl->id_mdts == 0)
2475                 nvme->n_max_data_transfer_size = nvme->n_pagesize * 65536;
2476         else
2477                 nvme->n_max_data_transfer_size =
2478                     1ull << (nvme->n_pageshift + nvme->n_idctl->id_mdts);
2479 
2480         nvme->n_error_log_len = nvme->n_idctl->id_elpe + 1;
2481 
2482         /*
2483          * Limit n_max_data_transfer_size to what we can handle in one PRP.
2484          * Chained PRPs are currently unsupported.
2485          *
2486          * This is a no-op on hardware which doesn't support a transfer size
2487          * big enough to require chained PRPs.
2488          */
2489         nvme->n_max_data_transfer_size = MIN(nvme->n_max_data_transfer_size,
2490             (nvme->n_pagesize / sizeof (uint64_t) * nvme->n_pagesize));
2491 
2492         nvme->n_prp_dma_attr.dma_attr_maxxfer = nvme->n_max_data_transfer_size;
2493 
2494         /*
2495          * Make sure the minimum/maximum queue entry sizes are not
2496          * larger/smaller than the default.
2497          */
2498 
2499         if (((1 << nvme->n_idctl->id_sqes.qes_min) > sizeof (nvme_sqe_t)) ||
2500             ((1 << nvme->n_idctl->id_sqes.qes_max) < sizeof (nvme_sqe_t)) ||
2501             ((1 << nvme->n_idctl->id_cqes.qes_min) > sizeof (nvme_cqe_t)) ||
2502             ((1 << nvme->n_idctl->id_cqes.qes_max) < sizeof (nvme_cqe_t)))
2503                 goto fail;
2504 
2505         /*
2506          * Check for the presence of a Volatile Write Cache. If present,
2507          * enable or disable based on the value of the property
2508          * volatile-write-cache-enable (default is enabled).
2509          */
2510         nvme->n_write_cache_present =
2511             nvme->n_idctl->id_vwc.vwc_present == 0 ? B_FALSE : B_TRUE;
2512 
2513         (void) ddi_prop_update_int(DDI_DEV_T_NONE, nvme->n_dip,
2514             "volatile-write-cache-present",
2515             nvme->n_write_cache_present ? 1 : 0);
2516 
2517         if (!nvme->n_write_cache_present) {
2518                 nvme->n_write_cache_enabled = B_FALSE;
2519         } else if (nvme_write_cache_set(nvme, nvme->n_write_cache_enabled)
2520             != 0) {
2521                 dev_err(nvme->n_dip, CE_WARN,
2522                     "!failed to %sable volatile write cache",
2523                     nvme->n_write_cache_enabled ? "en" : "dis");
2524                 /*
2525                  * Assume the cache is (still) enabled.
2526                  */
2527                 nvme->n_write_cache_enabled = B_TRUE;
2528         }
2529 
2530         (void) ddi_prop_update_int(DDI_DEV_T_NONE, nvme->n_dip,
2531             "volatile-write-cache-enable",
2532             nvme->n_write_cache_enabled ? 1 : 0);
2533 
2534         /*
2535          * Assume LBA Range Type feature is supported. If it isn't this
2536          * will be set to B_FALSE by nvme_get_features().
2537          */
2538         nvme->n_lba_range_supported = B_TRUE;
2539 
2540         /*
2541          * Check support for Autonomous Power State Transition.
2542          */
2543         if (NVME_VERSION_ATLEAST(&nvme->n_version, 1, 1))
2544                 nvme->n_auto_pst_supported =
2545                     nvme->n_idctl->id_apsta.ap_sup == 0 ? B_FALSE : B_TRUE;
2546 
2547         /*
2548          * Assume Software Progress Marker feature is supported.  If it isn't
2549          * this will be set to B_FALSE by nvme_get_features().
2550          */
2551         nvme->n_progress_supported = B_TRUE;
2552 
2553         /*
2554          * Identify Namespaces
2555          */
2556         nvme->n_namespace_count = nvme->n_idctl->id_nn;
2557 
2558         if (nvme->n_namespace_count == 0) {
2559                 dev_err(nvme->n_dip, CE_WARN,
2560                     "!controllers without namespaces are not supported");
2561                 goto fail;
2562         }
2563 
2564         if (nvme->n_namespace_count > NVME_MINOR_MAX) {
2565                 dev_err(nvme->n_dip, CE_WARN,
2566                     "!too many namespaces: %d, limiting to %d\n",
2567                     nvme->n_namespace_count, NVME_MINOR_MAX);
2568                 nvme->n_namespace_count = NVME_MINOR_MAX;
2569         }
2570 
2571         nvme->n_ns = kmem_zalloc(sizeof (nvme_namespace_t) *
2572             nvme->n_namespace_count, KM_SLEEP);
2573 
2574         for (i = 0; i != nvme->n_namespace_count; i++) {
2575                 mutex_init(&nvme->n_ns[i].ns_minor.nm_mutex, NULL, MUTEX_DRIVER,
2576                     NULL);
2577                 if (nvme_init_ns(nvme, i + 1) != DDI_SUCCESS)
2578                         goto fail;
2579         }
2580 
2581         /*
2582          * Try to set up MSI/MSI-X interrupts.
2583          */
2584         if ((nvme->n_intr_types & (DDI_INTR_TYPE_MSI | DDI_INTR_TYPE_MSIX))
2585             != 0) {
2586                 nvme_release_interrupts(nvme);
2587 
2588                 nqueues = MIN(UINT16_MAX, ncpus);
2589 
2590                 if ((nvme_setup_interrupts(nvme, DDI_INTR_TYPE_MSIX,
2591                     nqueues) != DDI_SUCCESS) &&
2592                     (nvme_setup_interrupts(nvme, DDI_INTR_TYPE_MSI,
2593                     nqueues) != DDI_SUCCESS)) {
2594                         dev_err(nvme->n_dip, CE_WARN,
2595                             "!failed to setup MSI/MSI-X interrupts");
2596                         goto fail;
2597                 }
2598         }
2599 
2600         nqueues = nvme->n_intr_cnt;
2601 
2602         /*
2603          * Create I/O queue pairs.
2604          */
2605 
2606         if (nvme_set_nqueues(nvme, &nqueues) != 0) {
2607                 dev_err(nvme->n_dip, CE_WARN,
2608                     "!failed to set number of I/O queues to %d",
2609                     nvme->n_intr_cnt);
2610                 goto fail;
2611         }
2612 
2613         /*
2614          * Reallocate I/O queue array
2615          */
2616         kmem_free(nvme->n_ioq, sizeof (nvme_qpair_t *));
2617         nvme->n_ioq = kmem_zalloc(sizeof (nvme_qpair_t *) *
2618             (nqueues + 1), KM_SLEEP);
2619         nvme->n_ioq[0] = nvme->n_adminq;
2620 
2621         nvme->n_ioq_count = nqueues;
2622 
2623         /*
2624          * If we got less queues than we asked for we might as well give
2625          * some of the interrupt vectors back to the system.
2626          */
2627         if (nvme->n_ioq_count < nvme->n_intr_cnt) {
2628                 nvme_release_interrupts(nvme);
2629 
2630                 if (nvme_setup_interrupts(nvme, nvme->n_intr_type,
2631                     nvme->n_ioq_count) != DDI_SUCCESS) {
2632                         dev_err(nvme->n_dip, CE_WARN,
2633                             "!failed to reduce number of interrupts");
2634                         goto fail;
2635                 }
2636         }
2637 
2638         /*
2639          * Alloc & register I/O queue pairs
2640          */
2641         nvme->n_io_queue_len =
2642             MIN(nvme->n_io_queue_len, nvme->n_max_queue_entries);
2643         (void) ddi_prop_update_int(DDI_DEV_T_NONE, nvme->n_dip, "io-queue-len",
2644             nvme->n_io_queue_len);
2645 
2646         for (i = 1; i != nvme->n_ioq_count + 1; i++) {
2647                 if (nvme_alloc_qpair(nvme, nvme->n_io_queue_len,
2648                     &nvme->n_ioq[i], i) != DDI_SUCCESS) {
2649                         dev_err(nvme->n_dip, CE_WARN,
2650                             "!unable to allocate I/O qpair %d", i);
2651                         goto fail;
2652                 }
2653 
2654                 if (nvme_create_io_qpair(nvme, nvme->n_ioq[i], i) != 0) {
2655                         dev_err(nvme->n_dip, CE_WARN,
2656                             "!unable to create I/O qpair %d", i);
2657                         goto fail;
2658                 }
2659         }
2660 
2661         /*
2662          * Post more asynchronous events commands to reduce event reporting
2663          * latency as suggested by the spec.
2664          */
2665         if (nvme->n_async_event_supported) {
2666                 for (i = 1; i != nvme->n_async_event_limit; i++)
2667                         nvme_async_event(nvme);
2668         }
2669 
2670         return (DDI_SUCCESS);
2671 
2672 fail:
2673         (void) nvme_reset(nvme, B_FALSE);
2674         return (DDI_FAILURE);
2675 }
2676 
2677 static uint_t
2678 nvme_intr(caddr_t arg1, caddr_t arg2)
2679 {
2680         /*LINTED: E_PTR_BAD_CAST_ALIGN*/
2681         nvme_t *nvme = (nvme_t *)arg1;
2682         int inum = (int)(uintptr_t)arg2;
2683         int ccnt = 0;
2684         int qnum;
2685         nvme_cmd_t *cmd;
2686 
2687         if (inum >= nvme->n_intr_cnt)
2688                 return (DDI_INTR_UNCLAIMED);
2689 
2690         if (nvme->n_dead)
2691                 return (nvme->n_intr_type == DDI_INTR_TYPE_FIXED ?
2692                     DDI_INTR_UNCLAIMED : DDI_INTR_CLAIMED);
2693 
2694         /*
2695          * The interrupt vector a queue uses is calculated as queue_idx %
2696          * intr_cnt in nvme_create_io_qpair(). Iterate through the queue array
2697          * in steps of n_intr_cnt to process all queues using this vector.
2698          */
2699         for (qnum = inum;
2700             qnum < nvme->n_ioq_count + 1 && nvme->n_ioq[qnum] != NULL;
2701             qnum += nvme->n_intr_cnt) {
2702                 while ((cmd = nvme_retrieve_cmd(nvme, nvme->n_ioq[qnum]))) {
2703                         taskq_dispatch_ent((taskq_t *)cmd->nc_nvme->n_cmd_taskq,
2704                             cmd->nc_callback, cmd, TQ_NOSLEEP, &cmd->nc_tqent);
2705                         ccnt++;
2706                 }
2707         }
2708 
2709         return (ccnt > 0 ? DDI_INTR_CLAIMED : DDI_INTR_UNCLAIMED);
2710 }
2711 
2712 static void
2713 nvme_release_interrupts(nvme_t *nvme)
2714 {
2715         int i;
2716 
2717         for (i = 0; i < nvme->n_intr_cnt; i++) {
2718                 if (nvme->n_inth[i] == NULL)
2719                         break;
2720 
2721                 if (nvme->n_intr_cap & DDI_INTR_FLAG_BLOCK)
2722                         (void) ddi_intr_block_disable(&nvme->n_inth[i], 1);
2723                 else
2724                         (void) ddi_intr_disable(nvme->n_inth[i]);
2725 
2726                 (void) ddi_intr_remove_handler(nvme->n_inth[i]);
2727                 (void) ddi_intr_free(nvme->n_inth[i]);
2728         }
2729 
2730         kmem_free(nvme->n_inth, nvme->n_inth_sz);
2731         nvme->n_inth = NULL;
2732         nvme->n_inth_sz = 0;
2733 
2734         nvme->n_progress &= ~NVME_INTERRUPTS;
2735 }
2736 
2737 static int
2738 nvme_setup_interrupts(nvme_t *nvme, int intr_type, int nqpairs)
2739 {
2740         int nintrs, navail, count;
2741         int ret;
2742         int i;
2743 
2744         if (nvme->n_intr_types == 0) {
2745                 ret = ddi_intr_get_supported_types(nvme->n_dip,
2746                     &nvme->n_intr_types);
2747                 if (ret != DDI_SUCCESS) {
2748                         dev_err(nvme->n_dip, CE_WARN,
2749                             "!%s: ddi_intr_get_supported types failed",
2750                             __func__);
2751                         return (ret);
2752                 }
2753 #ifdef __x86
2754                 if (get_hwenv() == HW_VMWARE)
2755                         nvme->n_intr_types &= ~DDI_INTR_TYPE_MSIX;
2756 #endif
2757         }
2758 
2759         if ((nvme->n_intr_types & intr_type) == 0)
2760                 return (DDI_FAILURE);
2761 
2762         ret = ddi_intr_get_nintrs(nvme->n_dip, intr_type, &nintrs);
2763         if (ret != DDI_SUCCESS) {
2764                 dev_err(nvme->n_dip, CE_WARN, "!%s: ddi_intr_get_nintrs failed",
2765                     __func__);
2766                 return (ret);
2767         }
2768 
2769         ret = ddi_intr_get_navail(nvme->n_dip, intr_type, &navail);
2770         if (ret != DDI_SUCCESS) {
2771                 dev_err(nvme->n_dip, CE_WARN, "!%s: ddi_intr_get_navail failed",
2772                     __func__);
2773                 return (ret);
2774         }
2775 
2776         /* We want at most one interrupt per queue pair. */
2777         if (navail > nqpairs)
2778                 navail = nqpairs;
2779 
2780         nvme->n_inth_sz = sizeof (ddi_intr_handle_t) * navail;
2781         nvme->n_inth = kmem_zalloc(nvme->n_inth_sz, KM_SLEEP);
2782 
2783         ret = ddi_intr_alloc(nvme->n_dip, nvme->n_inth, intr_type, 0, navail,
2784             &count, 0);
2785         if (ret != DDI_SUCCESS) {
2786                 dev_err(nvme->n_dip, CE_WARN, "!%s: ddi_intr_alloc failed",
2787                     __func__);
2788                 goto fail;
2789         }
2790 
2791         nvme->n_intr_cnt = count;
2792 
2793         ret = ddi_intr_get_pri(nvme->n_inth[0], &nvme->n_intr_pri);
2794         if (ret != DDI_SUCCESS) {
2795                 dev_err(nvme->n_dip, CE_WARN, "!%s: ddi_intr_get_pri failed",
2796                     __func__);
2797                 goto fail;
2798         }
2799 
2800         for (i = 0; i < count; i++) {
2801                 ret = ddi_intr_add_handler(nvme->n_inth[i], nvme_intr,
2802                     (void *)nvme, (void *)(uintptr_t)i);
2803                 if (ret != DDI_SUCCESS) {
2804                         dev_err(nvme->n_dip, CE_WARN,
2805                             "!%s: ddi_intr_add_handler failed", __func__);
2806                         goto fail;
2807                 }
2808         }
2809 
2810         (void) ddi_intr_get_cap(nvme->n_inth[0], &nvme->n_intr_cap);
2811 
2812         for (i = 0; i < count; i++) {
2813                 if (nvme->n_intr_cap & DDI_INTR_FLAG_BLOCK)
2814                         ret = ddi_intr_block_enable(&nvme->n_inth[i], 1);
2815                 else
2816                         ret = ddi_intr_enable(nvme->n_inth[i]);
2817 
2818                 if (ret != DDI_SUCCESS) {
2819                         dev_err(nvme->n_dip, CE_WARN,
2820                             "!%s: enabling interrupt %d failed", __func__, i);
2821                         goto fail;
2822                 }
2823         }
2824 
2825         nvme->n_intr_type = intr_type;
2826 
2827         nvme->n_progress |= NVME_INTERRUPTS;
2828 
2829         return (DDI_SUCCESS);
2830 
2831 fail:
2832         nvme_release_interrupts(nvme);
2833 
2834         return (ret);
2835 }
2836 
2837 static int
2838 nvme_fm_errcb(dev_info_t *dip, ddi_fm_error_t *fm_error, const void *arg)
2839 {
2840         _NOTE(ARGUNUSED(arg));
2841 
2842         pci_ereport_post(dip, fm_error, NULL);
2843         return (fm_error->fme_status);
2844 }
2845 
2846 static int
2847 nvme_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
2848 {
2849         nvme_t *nvme;
2850         int instance;
2851         int nregs;
2852         off_t regsize;
2853         int i;
2854         char name[32];
2855 
2856         if (cmd != DDI_ATTACH)
2857                 return (DDI_FAILURE);
2858 
2859         instance = ddi_get_instance(dip);
2860 
2861         if (ddi_soft_state_zalloc(nvme_state, instance) != DDI_SUCCESS)
2862                 return (DDI_FAILURE);
2863 
2864         nvme = ddi_get_soft_state(nvme_state, instance);
2865         ddi_set_driver_private(dip, nvme);
2866         nvme->n_dip = dip;
2867 
2868         mutex_init(&nvme->n_minor.nm_mutex, NULL, MUTEX_DRIVER, NULL);
2869 
2870         nvme->n_strict_version = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
2871             DDI_PROP_DONTPASS, "strict-version", 1) == 1 ? B_TRUE : B_FALSE;
2872         nvme->n_ignore_unknown_vendor_status = ddi_prop_get_int(DDI_DEV_T_ANY,
2873             dip, DDI_PROP_DONTPASS, "ignore-unknown-vendor-status", 0) == 1 ?
2874             B_TRUE : B_FALSE;
2875         nvme->n_admin_queue_len = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
2876             DDI_PROP_DONTPASS, "admin-queue-len", NVME_DEFAULT_ADMIN_QUEUE_LEN);
2877         nvme->n_io_queue_len = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
2878             DDI_PROP_DONTPASS, "io-queue-len", NVME_DEFAULT_IO_QUEUE_LEN);
2879         nvme->n_async_event_limit = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
2880             DDI_PROP_DONTPASS, "async-event-limit",
2881             NVME_DEFAULT_ASYNC_EVENT_LIMIT);
2882         nvme->n_write_cache_enabled = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
2883             DDI_PROP_DONTPASS, "volatile-write-cache-enable", 1) != 0 ?
2884             B_TRUE : B_FALSE;
2885         nvme->n_min_block_size = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
2886             DDI_PROP_DONTPASS, "min-phys-block-size",
2887             NVME_DEFAULT_MIN_BLOCK_SIZE);
2888 
2889         if (!ISP2(nvme->n_min_block_size) ||
2890             (nvme->n_min_block_size < NVME_DEFAULT_MIN_BLOCK_SIZE)) {
2891                 dev_err(dip, CE_WARN, "!min-phys-block-size %s, "
2892                     "using default %d", ISP2(nvme->n_min_block_size) ?
2893                     "too low" : "not a power of 2",
2894                     NVME_DEFAULT_MIN_BLOCK_SIZE);
2895                 nvme->n_min_block_size = NVME_DEFAULT_MIN_BLOCK_SIZE;
2896         }
2897 
2898         if (nvme->n_admin_queue_len < NVME_MIN_ADMIN_QUEUE_LEN)
2899                 nvme->n_admin_queue_len = NVME_MIN_ADMIN_QUEUE_LEN;
2900         else if (nvme->n_admin_queue_len > NVME_MAX_ADMIN_QUEUE_LEN)
2901                 nvme->n_admin_queue_len = NVME_MAX_ADMIN_QUEUE_LEN;
2902 
2903         if (nvme->n_io_queue_len < NVME_MIN_IO_QUEUE_LEN)
2904                 nvme->n_io_queue_len = NVME_MIN_IO_QUEUE_LEN;
2905 
2906         if (nvme->n_async_event_limit < 1)
2907                 nvme->n_async_event_limit = NVME_DEFAULT_ASYNC_EVENT_LIMIT;
2908 
2909         nvme->n_reg_acc_attr = nvme_reg_acc_attr;
2910         nvme->n_queue_dma_attr = nvme_queue_dma_attr;
2911         nvme->n_prp_dma_attr = nvme_prp_dma_attr;
2912         nvme->n_sgl_dma_attr = nvme_sgl_dma_attr;
2913 
2914         /*
2915          * Setup FMA support.
2916          */
2917         nvme->n_fm_cap = ddi_getprop(DDI_DEV_T_ANY, dip,
2918             DDI_PROP_CANSLEEP | DDI_PROP_DONTPASS, "fm-capable",
2919             DDI_FM_EREPORT_CAPABLE | DDI_FM_ACCCHK_CAPABLE |
2920             DDI_FM_DMACHK_CAPABLE | DDI_FM_ERRCB_CAPABLE);
2921 
2922         ddi_fm_init(dip, &nvme->n_fm_cap, &nvme->n_fm_ibc);
2923 
2924         if (nvme->n_fm_cap) {
2925                 if (nvme->n_fm_cap & DDI_FM_ACCCHK_CAPABLE)
2926                         nvme->n_reg_acc_attr.devacc_attr_access =
2927                             DDI_FLAGERR_ACC;
2928 
2929                 if (nvme->n_fm_cap & DDI_FM_DMACHK_CAPABLE) {
2930                         nvme->n_prp_dma_attr.dma_attr_flags |= DDI_DMA_FLAGERR;
2931                         nvme->n_sgl_dma_attr.dma_attr_flags |= DDI_DMA_FLAGERR;
2932                 }
2933 
2934                 if (DDI_FM_EREPORT_CAP(nvme->n_fm_cap) ||
2935                     DDI_FM_ERRCB_CAP(nvme->n_fm_cap))
2936                         pci_ereport_setup(dip);
2937 
2938                 if (DDI_FM_ERRCB_CAP(nvme->n_fm_cap))
2939                         ddi_fm_handler_register(dip, nvme_fm_errcb,
2940                             (void *)nvme);
2941         }
2942 
2943         nvme->n_progress |= NVME_FMA_INIT;
2944 
2945         /*
2946          * The spec defines several register sets. Only the controller
2947          * registers (set 1) are currently used.
2948          */
2949         if (ddi_dev_nregs(dip, &nregs) == DDI_FAILURE ||
2950             nregs < 2 ||
2951             ddi_dev_regsize(dip, 1, &regsize) == DDI_FAILURE)
2952                 goto fail;
2953 
2954         if (ddi_regs_map_setup(dip, 1, &nvme->n_regs, 0, regsize,
2955             &nvme->n_reg_acc_attr, &nvme->n_regh) != DDI_SUCCESS) {
2956                 dev_err(dip, CE_WARN, "!failed to map regset 1");
2957                 goto fail;
2958         }
2959 
2960         nvme->n_progress |= NVME_REGS_MAPPED;
2961 
2962         /*
2963          * Create taskq for command completion.
2964          */
2965         (void) snprintf(name, sizeof (name), "%s%d_cmd_taskq",
2966             ddi_driver_name(dip), ddi_get_instance(dip));
2967         nvme->n_cmd_taskq = ddi_taskq_create(dip, name, MIN(UINT16_MAX, ncpus),
2968             TASKQ_DEFAULTPRI, 0);
2969         if (nvme->n_cmd_taskq == NULL) {
2970                 dev_err(dip, CE_WARN, "!failed to create cmd taskq");
2971                 goto fail;
2972         }
2973 
2974         /*
2975          * Create PRP DMA cache
2976          */
2977         (void) snprintf(name, sizeof (name), "%s%d_prp_cache",
2978             ddi_driver_name(dip), ddi_get_instance(dip));
2979         nvme->n_prp_cache = kmem_cache_create(name, sizeof (nvme_dma_t),
2980             0, nvme_prp_dma_constructor, nvme_prp_dma_destructor,
2981             NULL, (void *)nvme, NULL, 0);
2982 
2983         if (nvme_init(nvme) != DDI_SUCCESS)
2984                 goto fail;
2985 
2986         /*
2987          * Attach the blkdev driver for each namespace.
2988          */
2989         for (i = 0; i != nvme->n_namespace_count; i++) {
2990                 if (ddi_create_minor_node(nvme->n_dip, nvme->n_ns[i].ns_name,
2991                     S_IFCHR, NVME_MINOR(ddi_get_instance(nvme->n_dip), i + 1),
2992                     DDI_NT_NVME_ATTACHMENT_POINT, 0) != DDI_SUCCESS) {
2993                         dev_err(dip, CE_WARN,
2994                             "!failed to create minor node for namespace %d", i);
2995                         goto fail;
2996                 }
2997 
2998                 if (nvme->n_ns[i].ns_ignore)
2999                         continue;
3000 
3001                 nvme->n_ns[i].ns_bd_hdl = bd_alloc_handle(&nvme->n_ns[i],
3002                     &nvme_bd_ops, &nvme->n_prp_dma_attr, KM_SLEEP);
3003 
3004                 if (nvme->n_ns[i].ns_bd_hdl == NULL) {
3005                         dev_err(dip, CE_WARN,
3006                             "!failed to get blkdev handle for namespace %d", i);
3007                         goto fail;
3008                 }
3009 
3010                 if (bd_attach_handle(dip, nvme->n_ns[i].ns_bd_hdl)
3011                     != DDI_SUCCESS) {
3012                         dev_err(dip, CE_WARN,
3013                             "!failed to attach blkdev handle for namespace %d",
3014                             i);
3015                         goto fail;
3016                 }
3017         }
3018 
3019         if (ddi_create_minor_node(dip, "devctl", S_IFCHR,
3020             NVME_MINOR(ddi_get_instance(dip), 0), DDI_NT_NVME_NEXUS, 0)
3021             != DDI_SUCCESS) {
3022                 dev_err(dip, CE_WARN, "nvme_attach: "
3023                     "cannot create devctl minor node");
3024                 goto fail;
3025         }
3026 
3027         return (DDI_SUCCESS);
3028 
3029 fail:
3030         /* attach successful anyway so that FMA can retire the device */
3031         if (nvme->n_dead)
3032                 return (DDI_SUCCESS);
3033 
3034         (void) nvme_detach(dip, DDI_DETACH);
3035 
3036         return (DDI_FAILURE);
3037 }
3038 
3039 static int
3040 nvme_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
3041 {
3042         int instance, i;
3043         nvme_t *nvme;
3044 
3045         if (cmd != DDI_DETACH)
3046                 return (DDI_FAILURE);
3047 
3048         instance = ddi_get_instance(dip);
3049 
3050         nvme = ddi_get_soft_state(nvme_state, instance);
3051 
3052         if (nvme == NULL)
3053                 return (DDI_FAILURE);
3054 
3055         ddi_remove_minor_node(dip, "devctl");
3056         mutex_destroy(&nvme->n_minor.nm_mutex);
3057 
3058         if (nvme->n_ns) {
3059                 for (i = 0; i != nvme->n_namespace_count; i++) {
3060                         ddi_remove_minor_node(dip, nvme->n_ns[i].ns_name);
3061                         mutex_destroy(&nvme->n_ns[i].ns_minor.nm_mutex);
3062 
3063                         if (nvme->n_ns[i].ns_bd_hdl) {
3064                                 (void) bd_detach_handle(
3065                                     nvme->n_ns[i].ns_bd_hdl);
3066                                 bd_free_handle(nvme->n_ns[i].ns_bd_hdl);
3067                         }
3068 
3069                         if (nvme->n_ns[i].ns_idns)
3070                                 kmem_free(nvme->n_ns[i].ns_idns,
3071                                     sizeof (nvme_identify_nsid_t));
3072                         if (nvme->n_ns[i].ns_devid)
3073                                 strfree(nvme->n_ns[i].ns_devid);
3074                 }
3075 
3076                 kmem_free(nvme->n_ns, sizeof (nvme_namespace_t) *
3077                     nvme->n_namespace_count);
3078         }
3079 
3080         if (nvme->n_progress & NVME_INTERRUPTS)
3081                 nvme_release_interrupts(nvme);
3082 
3083         if (nvme->n_cmd_taskq)
3084                 ddi_taskq_wait(nvme->n_cmd_taskq);
3085 
3086         if (nvme->n_ioq_count > 0) {
3087                 for (i = 1; i != nvme->n_ioq_count + 1; i++) {
3088                         if (nvme->n_ioq[i] != NULL) {
3089                                 /* TODO: send destroy queue commands */
3090                                 nvme_free_qpair(nvme->n_ioq[i]);
3091                         }
3092                 }
3093 
3094                 kmem_free(nvme->n_ioq, sizeof (nvme_qpair_t *) *
3095                     (nvme->n_ioq_count + 1));
3096         }
3097 
3098         if (nvme->n_prp_cache != NULL) {
3099                 kmem_cache_destroy(nvme->n_prp_cache);
3100         }
3101 
3102         if (nvme->n_progress & NVME_REGS_MAPPED) {
3103                 nvme_shutdown(nvme, NVME_CC_SHN_NORMAL, B_FALSE);
3104                 (void) nvme_reset(nvme, B_FALSE);
3105         }
3106 
3107         if (nvme->n_cmd_taskq)
3108                 ddi_taskq_destroy(nvme->n_cmd_taskq);
3109 
3110         if (nvme->n_progress & NVME_CTRL_LIMITS)
3111                 sema_destroy(&nvme->n_abort_sema);
3112 
3113         if (nvme->n_progress & NVME_ADMIN_QUEUE)
3114                 nvme_free_qpair(nvme->n_adminq);
3115 
3116         if (nvme->n_idctl)
3117                 kmem_free(nvme->n_idctl, NVME_IDENTIFY_BUFSIZE);
3118 
3119         if (nvme->n_progress & NVME_REGS_MAPPED)
3120                 ddi_regs_map_free(&nvme->n_regh);
3121 
3122         if (nvme->n_progress & NVME_FMA_INIT) {
3123                 if (DDI_FM_ERRCB_CAP(nvme->n_fm_cap))
3124                         ddi_fm_handler_unregister(nvme->n_dip);
3125 
3126                 if (DDI_FM_EREPORT_CAP(nvme->n_fm_cap) ||
3127                     DDI_FM_ERRCB_CAP(nvme->n_fm_cap))
3128                         pci_ereport_teardown(nvme->n_dip);
3129 
3130                 ddi_fm_fini(nvme->n_dip);
3131         }
3132 
3133         if (nvme->n_vendor != NULL)
3134                 strfree(nvme->n_vendor);
3135 
3136         if (nvme->n_product != NULL)
3137                 strfree(nvme->n_product);
3138 
3139         ddi_soft_state_free(nvme_state, instance);
3140 
3141         return (DDI_SUCCESS);
3142 }
3143 
3144 static int
3145 nvme_quiesce(dev_info_t *dip)
3146 {
3147         int instance;
3148         nvme_t *nvme;
3149 
3150         instance = ddi_get_instance(dip);
3151 
3152         nvme = ddi_get_soft_state(nvme_state, instance);
3153 
3154         if (nvme == NULL)
3155                 return (DDI_FAILURE);
3156 
3157         nvme_shutdown(nvme, NVME_CC_SHN_ABRUPT, B_TRUE);
3158 
3159         (void) nvme_reset(nvme, B_TRUE);
3160 
3161         return (DDI_FAILURE);
3162 }
3163 
3164 static int
3165 nvme_fill_prp(nvme_cmd_t *cmd, bd_xfer_t *xfer)
3166 {
3167         nvme_t *nvme = cmd->nc_nvme;
3168         int nprp_page, nprp;
3169         uint64_t *prp;
3170 
3171         if (xfer->x_ndmac == 0)
3172                 return (DDI_FAILURE);
3173 
3174         cmd->nc_sqe.sqe_dptr.d_prp[0] = xfer->x_dmac.dmac_laddress;
3175         ddi_dma_nextcookie(xfer->x_dmah, &xfer->x_dmac);
3176 
3177         if (xfer->x_ndmac == 1) {
3178                 cmd->nc_sqe.sqe_dptr.d_prp[1] = 0;
3179                 return (DDI_SUCCESS);
3180         } else if (xfer->x_ndmac == 2) {
3181                 cmd->nc_sqe.sqe_dptr.d_prp[1] = xfer->x_dmac.dmac_laddress;
3182                 return (DDI_SUCCESS);
3183         }
3184 
3185         xfer->x_ndmac--;
3186 
3187         nprp_page = nvme->n_pagesize / sizeof (uint64_t) - 1;
3188         ASSERT(nprp_page > 0);
3189         nprp = (xfer->x_ndmac + nprp_page - 1) / nprp_page;
3190 
3191         /*
3192          * We currently don't support chained PRPs and set up our DMA
3193          * attributes to reflect that. If we still get an I/O request
3194          * that needs a chained PRP something is very wrong.
3195          */
3196         VERIFY(nprp == 1);
3197 
3198         cmd->nc_dma = kmem_cache_alloc(nvme->n_prp_cache, KM_SLEEP);
3199         bzero(cmd->nc_dma->nd_memp, cmd->nc_dma->nd_len);
3200 
3201         cmd->nc_sqe.sqe_dptr.d_prp[1] = cmd->nc_dma->nd_cookie.dmac_laddress;
3202 
3203         /*LINTED: E_PTR_BAD_CAST_ALIGN*/
3204         for (prp = (uint64_t *)cmd->nc_dma->nd_memp;
3205             xfer->x_ndmac > 0;
3206             prp++, xfer->x_ndmac--) {
3207                 *prp = xfer->x_dmac.dmac_laddress;
3208                 ddi_dma_nextcookie(xfer->x_dmah, &xfer->x_dmac);
3209         }
3210 
3211         (void) ddi_dma_sync(cmd->nc_dma->nd_dmah, 0, cmd->nc_dma->nd_len,
3212             DDI_DMA_SYNC_FORDEV);
3213         return (DDI_SUCCESS);
3214 }
3215 
3216 static nvme_cmd_t *
3217 nvme_create_nvm_cmd(nvme_namespace_t *ns, uint8_t opc, bd_xfer_t *xfer)
3218 {
3219         nvme_t *nvme = ns->ns_nvme;
3220         nvme_cmd_t *cmd;
3221 
3222         /*
3223          * Blkdev only sets BD_XFER_POLL when dumping, so don't sleep.
3224          */
3225         cmd = nvme_alloc_cmd(nvme, (xfer->x_flags & BD_XFER_POLL) ?
3226             KM_NOSLEEP : KM_SLEEP);
3227 
3228         if (cmd == NULL)
3229                 return (NULL);
3230 
3231         cmd->nc_sqe.sqe_opc = opc;
3232         cmd->nc_callback = nvme_bd_xfer_done;
3233         cmd->nc_xfer = xfer;
3234 
3235         switch (opc) {
3236         case NVME_OPC_NVM_WRITE:
3237         case NVME_OPC_NVM_READ:
3238                 VERIFY(xfer->x_nblks <= 0x10000);
3239 
3240                 cmd->nc_sqe.sqe_nsid = ns->ns_id;
3241 
3242                 cmd->nc_sqe.sqe_cdw10 = xfer->x_blkno & 0xffffffffu;
3243                 cmd->nc_sqe.sqe_cdw11 = (xfer->x_blkno >> 32);
3244                 cmd->nc_sqe.sqe_cdw12 = (uint16_t)(xfer->x_nblks - 1);
3245 
3246                 if (nvme_fill_prp(cmd, xfer) != DDI_SUCCESS)
3247                         goto fail;
3248                 break;
3249 
3250         case NVME_OPC_NVM_FLUSH:
3251                 cmd->nc_sqe.sqe_nsid = ns->ns_id;
3252                 break;
3253 
3254         default:
3255                 goto fail;
3256         }
3257 
3258         return (cmd);
3259 
3260 fail:
3261         nvme_free_cmd(cmd);
3262         return (NULL);
3263 }
3264 
3265 static void
3266 nvme_bd_xfer_done(void *arg)
3267 {
3268         nvme_cmd_t *cmd = arg;
3269         bd_xfer_t *xfer = cmd->nc_xfer;
3270         int error = 0;
3271 
3272         error = nvme_check_cmd_status(cmd);
3273         nvme_free_cmd(cmd);
3274 
3275         bd_xfer_done(xfer, error);
3276 }
3277 
3278 static void
3279 nvme_bd_driveinfo(void *arg, bd_drive_t *drive)
3280 {
3281         nvme_namespace_t *ns = arg;
3282         nvme_t *nvme = ns->ns_nvme;
3283 
3284         /*
3285          * blkdev maintains one queue size per instance (namespace),
3286          * but all namespace share the I/O queues.
3287          * TODO: need to figure out a sane default, or use per-NS I/O queues,
3288          * or change blkdev to handle EAGAIN
3289          */
3290         drive->d_qsize = nvme->n_ioq_count * nvme->n_io_queue_len
3291             / nvme->n_namespace_count;
3292 
3293         /*
3294          * d_maxxfer is not set, which means the value is taken from the DMA
3295          * attributes specified to bd_alloc_handle.
3296          */
3297 
3298         drive->d_removable = B_FALSE;
3299         drive->d_hotpluggable = B_FALSE;
3300 
3301         bcopy(ns->ns_eui64, drive->d_eui64, sizeof (drive->d_eui64));
3302         drive->d_target = ns->ns_id;
3303         drive->d_lun = 0;
3304 
3305         drive->d_model = nvme->n_idctl->id_model;
3306         drive->d_model_len = sizeof (nvme->n_idctl->id_model);
3307         drive->d_vendor = nvme->n_vendor;
3308         drive->d_vendor_len = strlen(nvme->n_vendor);
3309         drive->d_product = nvme->n_product;
3310         drive->d_product_len = strlen(nvme->n_product);
3311         drive->d_serial = nvme->n_idctl->id_serial;
3312         drive->d_serial_len = sizeof (nvme->n_idctl->id_serial);
3313         drive->d_revision = nvme->n_idctl->id_fwrev;
3314         drive->d_revision_len = sizeof (nvme->n_idctl->id_fwrev);
3315 }
3316 
3317 static int
3318 nvme_bd_mediainfo(void *arg, bd_media_t *media)
3319 {
3320         nvme_namespace_t *ns = arg;
3321 
3322         media->m_nblks = ns->ns_block_count;
3323         media->m_blksize = ns->ns_block_size;
3324         media->m_readonly = B_FALSE;
3325         media->m_solidstate = B_TRUE;
3326 
3327         media->m_pblksize = ns->ns_best_block_size;
3328 
3329         return (0);
3330 }
3331 
3332 static int
3333 nvme_bd_cmd(nvme_namespace_t *ns, bd_xfer_t *xfer, uint8_t opc)
3334 {
3335         nvme_t *nvme = ns->ns_nvme;
3336         nvme_cmd_t *cmd;
3337         nvme_qpair_t *ioq;
3338         boolean_t poll;
3339         int ret;
3340 
3341         if (nvme->n_dead)
3342                 return (EIO);
3343 
3344         cmd = nvme_create_nvm_cmd(ns, opc, xfer);
3345         if (cmd == NULL)
3346                 return (ENOMEM);
3347 
3348         cmd->nc_sqid = (CPU->cpu_id % nvme->n_ioq_count) + 1;
3349         ASSERT(cmd->nc_sqid <= nvme->n_ioq_count);
3350         ioq = nvme->n_ioq[cmd->nc_sqid];
3351 
3352         /*
3353          * Get the polling flag before submitting the command. The command may
3354          * complete immediately after it was submitted, which means we must
3355          * treat both cmd and xfer as if they have been freed already.
3356          */
3357         poll = (xfer->x_flags & BD_XFER_POLL) != 0;
3358 
3359         ret = nvme_submit_io_cmd(ioq, cmd);
3360 
3361         if (ret != 0)
3362                 return (ret);
3363 
3364         if (!poll)
3365                 return (0);
3366 
3367         do {
3368                 cmd = nvme_retrieve_cmd(nvme, ioq);
3369                 if (cmd != NULL)
3370                         nvme_bd_xfer_done(cmd);
3371                 else
3372                         drv_usecwait(10);
3373         } while (ioq->nq_active_cmds != 0);
3374 
3375         return (0);
3376 }
3377 
3378 static int
3379 nvme_bd_read(void *arg, bd_xfer_t *xfer)
3380 {
3381         nvme_namespace_t *ns = arg;
3382 
3383         return (nvme_bd_cmd(ns, xfer, NVME_OPC_NVM_READ));
3384 }
3385 
3386 static int
3387 nvme_bd_write(void *arg, bd_xfer_t *xfer)
3388 {
3389         nvme_namespace_t *ns = arg;
3390 
3391         return (nvme_bd_cmd(ns, xfer, NVME_OPC_NVM_WRITE));
3392 }
3393 
3394 static int
3395 nvme_bd_sync(void *arg, bd_xfer_t *xfer)
3396 {
3397         nvme_namespace_t *ns = arg;
3398 
3399         if (ns->ns_nvme->n_dead)
3400                 return (EIO);
3401 
3402         /*
3403          * If the volatile write cache is not present or not enabled the FLUSH
3404          * command is a no-op, so we can take a shortcut here.
3405          */
3406         if (!ns->ns_nvme->n_write_cache_present) {
3407                 bd_xfer_done(xfer, ENOTSUP);
3408                 return (0);
3409         }
3410 
3411         if (!ns->ns_nvme->n_write_cache_enabled) {
3412                 bd_xfer_done(xfer, 0);
3413                 return (0);
3414         }
3415 
3416         return (nvme_bd_cmd(ns, xfer, NVME_OPC_NVM_FLUSH));
3417 }
3418 
3419 static int
3420 nvme_bd_devid(void *arg, dev_info_t *devinfo, ddi_devid_t *devid)
3421 {
3422         nvme_namespace_t *ns = arg;
3423 
3424         /*LINTED: E_BAD_PTR_CAST_ALIGN*/
3425         if (*(uint64_t *)ns->ns_eui64 != 0) {
3426                 return (ddi_devid_init(devinfo, DEVID_SCSI3_WWN,
3427                     sizeof (ns->ns_eui64), ns->ns_eui64, devid));
3428         } else {
3429                 return (ddi_devid_init(devinfo, DEVID_ENCAP,
3430                     strlen(ns->ns_devid), ns->ns_devid, devid));
3431         }
3432 }
3433 
3434 static int
3435 nvme_open(dev_t *devp, int flag, int otyp, cred_t *cred_p)
3436 {
3437 #ifndef __lock_lint
3438         _NOTE(ARGUNUSED(cred_p));
3439 #endif
3440         minor_t minor = getminor(*devp);
3441         nvme_t *nvme = ddi_get_soft_state(nvme_state, NVME_MINOR_INST(minor));
3442         int nsid = NVME_MINOR_NSID(minor);
3443         nvme_minor_state_t *nm;
3444         int rv = 0;
3445 
3446         if (otyp != OTYP_CHR)
3447                 return (EINVAL);
3448 
3449         if (nvme == NULL)
3450                 return (ENXIO);
3451 
3452         if (nsid > nvme->n_namespace_count)
3453                 return (ENXIO);
3454 
3455         if (nvme->n_dead)
3456                 return (EIO);
3457 
3458         nm = nsid == 0 ? &nvme->n_minor : &nvme->n_ns[nsid - 1].ns_minor;
3459 
3460         mutex_enter(&nm->nm_mutex);
3461         if (nm->nm_oexcl) {
3462                 rv = EBUSY;
3463                 goto out;
3464         }
3465 
3466         if (flag & FEXCL) {
3467                 if (nm->nm_ocnt != 0) {
3468                         rv = EBUSY;
3469                         goto out;
3470                 }
3471                 nm->nm_oexcl = B_TRUE;
3472         }
3473 
3474         nm->nm_ocnt++;
3475 
3476 out:
3477         mutex_exit(&nm->nm_mutex);
3478         return (rv);
3479 
3480 }
3481 
3482 static int
3483 nvme_close(dev_t dev, int flag, int otyp, cred_t *cred_p)
3484 {
3485 #ifndef __lock_lint
3486         _NOTE(ARGUNUSED(cred_p));
3487         _NOTE(ARGUNUSED(flag));
3488 #endif
3489         minor_t minor = getminor(dev);
3490         nvme_t *nvme = ddi_get_soft_state(nvme_state, NVME_MINOR_INST(minor));
3491         int nsid = NVME_MINOR_NSID(minor);
3492         nvme_minor_state_t *nm;
3493 
3494         if (otyp != OTYP_CHR)
3495                 return (ENXIO);
3496 
3497         if (nvme == NULL)
3498                 return (ENXIO);
3499 
3500         if (nsid > nvme->n_namespace_count)
3501                 return (ENXIO);
3502 
3503         nm = nsid == 0 ? &nvme->n_minor : &nvme->n_ns[nsid - 1].ns_minor;
3504 
3505         mutex_enter(&nm->nm_mutex);
3506         if (nm->nm_oexcl)
3507                 nm->nm_oexcl = B_FALSE;
3508 
3509         ASSERT(nm->nm_ocnt > 0);
3510         nm->nm_ocnt--;
3511         mutex_exit(&nm->nm_mutex);
3512 
3513         return (0);
3514 }
3515 
3516 static int
3517 nvme_ioctl_identify(nvme_t *nvme, int nsid, nvme_ioctl_t *nioc, int mode,
3518     cred_t *cred_p)
3519 {
3520         _NOTE(ARGUNUSED(cred_p));
3521         int rv = 0;
3522         void *idctl;
3523 
3524         if ((mode & FREAD) == 0)
3525                 return (EPERM);
3526 
3527         if (nioc->n_len < NVME_IDENTIFY_BUFSIZE)
3528                 return (EINVAL);
3529 
3530         if ((rv = nvme_identify(nvme, nsid, (void **)&idctl)) != 0)
3531                 return (rv);
3532 
3533         if (ddi_copyout(idctl, (void *)nioc->n_buf, NVME_IDENTIFY_BUFSIZE, mode)
3534             != 0)
3535                 rv = EFAULT;
3536 
3537         kmem_free(idctl, NVME_IDENTIFY_BUFSIZE);
3538 
3539         return (rv);
3540 }
3541 
3542 static int
3543 nvme_ioctl_capabilities(nvme_t *nvme, int nsid, nvme_ioctl_t *nioc,
3544     int mode, cred_t *cred_p)
3545 {
3546         _NOTE(ARGUNUSED(nsid, cred_p));
3547         int rv = 0;
3548         nvme_reg_cap_t cap = { 0 };
3549         nvme_capabilities_t nc;
3550 
3551         if ((mode & FREAD) == 0)
3552                 return (EPERM);
3553 
3554         if (nioc->n_len < sizeof (nc))
3555                 return (EINVAL);
3556 
3557         cap.r = nvme_get64(nvme, NVME_REG_CAP);
3558 
3559         /*
3560          * The MPSMIN and MPSMAX fields in the CAP register use 0 to
3561          * specify the base page size of 4k (1<<12), so add 12 here to
3562          * get the real page size value.
3563          */
3564         nc.mpsmax = 1 << (12 + cap.b.cap_mpsmax);
3565         nc.mpsmin = 1 << (12 + cap.b.cap_mpsmin);
3566 
3567         if (ddi_copyout(&nc, (void *)nioc->n_buf, sizeof (nc), mode) != 0)
3568                 rv = EFAULT;
3569 
3570         return (rv);
3571 }
3572 
3573 static int
3574 nvme_ioctl_get_logpage(nvme_t *nvme, int nsid, nvme_ioctl_t *nioc,
3575     int mode, cred_t *cred_p)
3576 {
3577         _NOTE(ARGUNUSED(cred_p));
3578         void *log = NULL;
3579         size_t bufsize = 0;
3580         int rv = 0;
3581 
3582         if ((mode & FREAD) == 0)
3583                 return (EPERM);
3584 
3585         switch (nioc->n_arg) {
3586         case NVME_LOGPAGE_ERROR:
3587                 if (nsid != 0)
3588                         return (EINVAL);
3589                 break;
3590         case NVME_LOGPAGE_HEALTH:
3591                 if (nsid != 0 && nvme->n_idctl->id_lpa.lp_smart == 0)
3592                         return (EINVAL);
3593 
3594                 if (nsid == 0)
3595                         nsid = (uint32_t)-1;
3596 
3597                 break;
3598         case NVME_LOGPAGE_FWSLOT:
3599                 if (nsid != 0)
3600                         return (EINVAL);
3601                 break;
3602         default:
3603                 return (EINVAL);
3604         }
3605 
3606         if (nvme_get_logpage(nvme, &log, &bufsize, nioc->n_arg, nsid)
3607             != DDI_SUCCESS)
3608                 return (EIO);
3609 
3610         if (nioc->n_len < bufsize) {
3611                 kmem_free(log, bufsize);
3612                 return (EINVAL);
3613         }
3614 
3615         if (ddi_copyout(log, (void *)nioc->n_buf, bufsize, mode) != 0)
3616                 rv = EFAULT;
3617 
3618         nioc->n_len = bufsize;
3619         kmem_free(log, bufsize);
3620 
3621         return (rv);
3622 }
3623 
3624 static int
3625 nvme_ioctl_get_features(nvme_t *nvme, int nsid, nvme_ioctl_t *nioc,
3626     int mode, cred_t *cred_p)
3627 {
3628         _NOTE(ARGUNUSED(cred_p));
3629         void *buf = NULL;
3630         size_t bufsize = 0;
3631         uint32_t res = 0;
3632         uint8_t feature;
3633         int rv = 0;
3634 
3635         if ((mode & FREAD) == 0)
3636                 return (EPERM);
3637 
3638         if ((nioc->n_arg >> 32) > 0xff)
3639                 return (EINVAL);
3640 
3641         feature = (uint8_t)(nioc->n_arg >> 32);
3642 
3643         switch (feature) {
3644         case NVME_FEAT_ARBITRATION:
3645         case NVME_FEAT_POWER_MGMT:
3646         case NVME_FEAT_TEMPERATURE:
3647         case NVME_FEAT_ERROR:
3648         case NVME_FEAT_NQUEUES:
3649         case NVME_FEAT_INTR_COAL:
3650         case NVME_FEAT_WRITE_ATOM:
3651         case NVME_FEAT_ASYNC_EVENT:
3652         case NVME_FEAT_PROGRESS:
3653                 if (nsid != 0)
3654                         return (EINVAL);
3655                 break;
3656 
3657         case NVME_FEAT_INTR_VECT:
3658                 if (nsid != 0)
3659                         return (EINVAL);
3660 
3661                 res = nioc->n_arg & 0xffffffffUL;
3662                 if (res >= nvme->n_intr_cnt)
3663                         return (EINVAL);
3664                 break;
3665 
3666         case NVME_FEAT_LBA_RANGE:
3667                 if (nvme->n_lba_range_supported == B_FALSE)
3668                         return (EINVAL);
3669 
3670                 if (nsid == 0 ||
3671                     nsid > nvme->n_namespace_count)
3672                         return (EINVAL);
3673 
3674                 break;
3675 
3676         case NVME_FEAT_WRITE_CACHE:
3677                 if (nsid != 0)
3678                         return (EINVAL);
3679 
3680                 if (!nvme->n_write_cache_present)
3681                         return (EINVAL);
3682 
3683                 break;
3684 
3685         case NVME_FEAT_AUTO_PST:
3686                 if (nsid != 0)
3687                         return (EINVAL);
3688 
3689                 if (!nvme->n_auto_pst_supported)
3690                         return (EINVAL);
3691 
3692                 break;
3693 
3694         default:
3695                 return (EINVAL);
3696         }
3697 
3698         rv = nvme_get_features(nvme, nsid, feature, &res, &buf, &bufsize);
3699         if (rv != 0)
3700                 return (rv);
3701 
3702         if (nioc->n_len < bufsize) {
3703                 kmem_free(buf, bufsize);
3704                 return (EINVAL);
3705         }
3706 
3707         if (buf && ddi_copyout(buf, (void*)nioc->n_buf, bufsize, mode) != 0)
3708                 rv = EFAULT;
3709 
3710         kmem_free(buf, bufsize);
3711         nioc->n_arg = res;
3712         nioc->n_len = bufsize;
3713 
3714         return (rv);
3715 }
3716 
3717 static int
3718 nvme_ioctl_intr_cnt(nvme_t *nvme, int nsid, nvme_ioctl_t *nioc, int mode,
3719     cred_t *cred_p)
3720 {
3721         _NOTE(ARGUNUSED(nsid, mode, cred_p));
3722 
3723         if ((mode & FREAD) == 0)
3724                 return (EPERM);
3725 
3726         nioc->n_arg = nvme->n_intr_cnt;
3727         return (0);
3728 }
3729 
3730 static int
3731 nvme_ioctl_version(nvme_t *nvme, int nsid, nvme_ioctl_t *nioc, int mode,
3732     cred_t *cred_p)
3733 {
3734         _NOTE(ARGUNUSED(nsid, cred_p));
3735         int rv = 0;
3736 
3737         if ((mode & FREAD) == 0)
3738                 return (EPERM);
3739 
3740         if (nioc->n_len < sizeof (nvme->n_version))
3741                 return (ENOMEM);
3742 
3743         if (ddi_copyout(&nvme->n_version, (void *)nioc->n_buf,
3744             sizeof (nvme->n_version), mode) != 0)
3745                 rv = EFAULT;
3746 
3747         return (rv);
3748 }
3749 
3750 static int
3751 nvme_ioctl_format(nvme_t *nvme, int nsid, nvme_ioctl_t *nioc, int mode,
3752     cred_t *cred_p)
3753 {
3754         _NOTE(ARGUNUSED(mode));
3755         nvme_format_nvm_t frmt = { 0 };
3756         int c_nsid = nsid != 0 ? nsid - 1 : 0;
3757 
3758         if ((mode & FWRITE) == 0 || secpolicy_sys_config(cred_p, B_FALSE) != 0)
3759                 return (EPERM);
3760 
3761         frmt.r = nioc->n_arg & 0xffffffff;
3762 
3763         /*
3764          * Check whether the FORMAT NVM command is supported.
3765          */
3766         if (nvme->n_idctl->id_oacs.oa_format == 0)
3767                 return (EINVAL);
3768 
3769         /*
3770          * Don't allow format or secure erase of individual namespace if that
3771          * would cause a format or secure erase of all namespaces.
3772          */
3773         if (nsid != 0 && nvme->n_idctl->id_fna.fn_format != 0)
3774                 return (EINVAL);
3775 
3776         if (nsid != 0 && frmt.b.fm_ses != NVME_FRMT_SES_NONE &&
3777             nvme->n_idctl->id_fna.fn_sec_erase != 0)
3778                 return (EINVAL);
3779 
3780         /*
3781          * Don't allow formatting with Protection Information.
3782          */
3783         if (frmt.b.fm_pi != 0 || frmt.b.fm_pil != 0 || frmt.b.fm_ms != 0)
3784                 return (EINVAL);
3785 
3786         /*
3787          * Don't allow formatting using an illegal LBA format, or any LBA format
3788          * that uses metadata.
3789          */
3790         if (frmt.b.fm_lbaf > nvme->n_ns[c_nsid].ns_idns->id_nlbaf ||
3791             nvme->n_ns[c_nsid].ns_idns->id_lbaf[frmt.b.fm_lbaf].lbaf_ms != 0)
3792                 return (EINVAL);
3793 
3794         /*
3795          * Don't allow formatting using an illegal Secure Erase setting.
3796          */
3797         if (frmt.b.fm_ses > NVME_FRMT_MAX_SES ||
3798             (frmt.b.fm_ses == NVME_FRMT_SES_CRYPTO &&
3799             nvme->n_idctl->id_fna.fn_crypt_erase == 0))
3800                 return (EINVAL);
3801 
3802         if (nsid == 0)
3803                 nsid = (uint32_t)-1;
3804 
3805         return (nvme_format_nvm(nvme, nsid, frmt.b.fm_lbaf, B_FALSE, 0, B_FALSE,
3806             frmt.b.fm_ses));
3807 }
3808 
3809 static int
3810 nvme_ioctl_detach(nvme_t *nvme, int nsid, nvme_ioctl_t *nioc, int mode,
3811     cred_t *cred_p)
3812 {
3813         _NOTE(ARGUNUSED(nioc, mode));
3814         int rv = 0;
3815 
3816         if ((mode & FWRITE) == 0 || secpolicy_sys_config(cred_p, B_FALSE) != 0)
3817                 return (EPERM);
3818 
3819         if (nsid == 0)
3820                 return (EINVAL);
3821 
3822         rv = bd_detach_handle(nvme->n_ns[nsid - 1].ns_bd_hdl);
3823         if (rv != DDI_SUCCESS)
3824                 rv = EBUSY;
3825 
3826         return (rv);
3827 }
3828 
3829 static int
3830 nvme_ioctl_attach(nvme_t *nvme, int nsid, nvme_ioctl_t *nioc, int mode,
3831     cred_t *cred_p)
3832 {
3833         _NOTE(ARGUNUSED(nioc, mode));
3834         nvme_identify_nsid_t *idns;
3835         int rv = 0;
3836 
3837         if ((mode & FWRITE) == 0 || secpolicy_sys_config(cred_p, B_FALSE) != 0)
3838                 return (EPERM);
3839 
3840         if (nsid == 0)
3841                 return (EINVAL);
3842 
3843         /*
3844          * Identify namespace again, free old identify data.
3845          */
3846         idns = nvme->n_ns[nsid - 1].ns_idns;
3847         if (nvme_init_ns(nvme, nsid) != DDI_SUCCESS)
3848                 return (EIO);
3849 
3850         kmem_free(idns, sizeof (nvme_identify_nsid_t));
3851 
3852         rv = bd_attach_handle(nvme->n_dip, nvme->n_ns[nsid - 1].ns_bd_hdl);
3853         if (rv != DDI_SUCCESS)
3854                 rv = EBUSY;
3855 
3856         return (rv);
3857 }
3858 
3859 static int
3860 nvme_ioctl(dev_t dev, int cmd, intptr_t arg, int mode, cred_t *cred_p,
3861     int *rval_p)
3862 {
3863 #ifndef __lock_lint
3864         _NOTE(ARGUNUSED(rval_p));
3865 #endif
3866         minor_t minor = getminor(dev);
3867         nvme_t *nvme = ddi_get_soft_state(nvme_state, NVME_MINOR_INST(minor));
3868         int nsid = NVME_MINOR_NSID(minor);
3869         int rv = 0;
3870         nvme_ioctl_t nioc;
3871 
3872         int (*nvme_ioctl[])(nvme_t *, int, nvme_ioctl_t *, int, cred_t *) = {
3873                 NULL,
3874                 nvme_ioctl_identify,
3875                 nvme_ioctl_identify,
3876                 nvme_ioctl_capabilities,
3877                 nvme_ioctl_get_logpage,
3878                 nvme_ioctl_get_features,
3879                 nvme_ioctl_intr_cnt,
3880                 nvme_ioctl_version,
3881                 nvme_ioctl_format,
3882                 nvme_ioctl_detach,
3883                 nvme_ioctl_attach
3884         };
3885 
3886         if (nvme == NULL)
3887                 return (ENXIO);
3888 
3889         if (nsid > nvme->n_namespace_count)
3890                 return (ENXIO);
3891 
3892         if (IS_DEVCTL(cmd))
3893                 return (ndi_devctl_ioctl(nvme->n_dip, cmd, arg, mode, 0));
3894 
3895 #ifdef _MULTI_DATAMODEL
3896         switch (ddi_model_convert_from(mode & FMODELS)) {
3897         case DDI_MODEL_ILP32: {
3898                 nvme_ioctl32_t nioc32;
3899                 if (ddi_copyin((void*)arg, &nioc32, sizeof (nvme_ioctl32_t),
3900                     mode) != 0)
3901                         return (EFAULT);
3902                 nioc.n_len = nioc32.n_len;
3903                 nioc.n_buf = nioc32.n_buf;
3904                 nioc.n_arg = nioc32.n_arg;
3905                 break;
3906         }
3907         case DDI_MODEL_NONE:
3908 #endif
3909                 if (ddi_copyin((void*)arg, &nioc, sizeof (nvme_ioctl_t), mode)
3910                     != 0)
3911                         return (EFAULT);
3912 #ifdef _MULTI_DATAMODEL
3913                 break;
3914         }
3915 #endif
3916 
3917         if (nvme->n_dead && cmd != NVME_IOC_DETACH)
3918                 return (EIO);
3919 
3920 
3921         if (cmd == NVME_IOC_IDENTIFY_CTRL) {
3922                 /*
3923                  * This makes NVME_IOC_IDENTIFY_CTRL work the same on devctl and
3924                  * attachment point nodes.
3925                  */
3926                 nsid = 0;
3927         } else if (cmd == NVME_IOC_IDENTIFY_NSID && nsid == 0) {
3928                 /*
3929                  * This makes NVME_IOC_IDENTIFY_NSID work on a devctl node, it
3930                  * will always return identify data for namespace 1.
3931                  */
3932                 nsid = 1;
3933         }
3934 
3935         if (IS_NVME_IOC(cmd) && nvme_ioctl[NVME_IOC_CMD(cmd)] != NULL)
3936                 rv = nvme_ioctl[NVME_IOC_CMD(cmd)](nvme, nsid, &nioc, mode,
3937                     cred_p);
3938         else
3939                 rv = EINVAL;
3940 
3941 #ifdef _MULTI_DATAMODEL
3942         switch (ddi_model_convert_from(mode & FMODELS)) {
3943         case DDI_MODEL_ILP32: {
3944                 nvme_ioctl32_t nioc32;
3945 
3946                 nioc32.n_len = (size32_t)nioc.n_len;
3947                 nioc32.n_buf = (uintptr32_t)nioc.n_buf;
3948                 nioc32.n_arg = nioc.n_arg;
3949 
3950                 if (ddi_copyout(&nioc32, (void *)arg, sizeof (nvme_ioctl32_t),
3951                     mode) != 0)
3952                         return (EFAULT);
3953                 break;
3954         }
3955         case DDI_MODEL_NONE:
3956 #endif
3957                 if (ddi_copyout(&nioc, (void *)arg, sizeof (nvme_ioctl_t), mode)
3958                     != 0)
3959                         return (EFAULT);
3960 #ifdef _MULTI_DATAMODEL
3961                 break;
3962         }
3963 #endif
3964 
3965         return (rv);
3966 }