Print this page
7364 NVMe driver performance can be improved by caching nvme_dma_t structs for PRPL.
Reviewed by: Hans Rosenfeld <hans.rosenfeld@nexenta.com>
Reviewed by: Robert Mustacchi <rm@joyent.com>
Reviewed by: Garrett D'Amore <garrett@lucera.com>


 240 
 241 static inline void nvme_put64(nvme_t *, uintptr_t, uint64_t);
 242 static inline void nvme_put32(nvme_t *, uintptr_t, uint32_t);
 243 static inline uint64_t nvme_get64(nvme_t *, uintptr_t);
 244 static inline uint32_t nvme_get32(nvme_t *, uintptr_t);
 245 
 246 static boolean_t nvme_check_regs_hdl(nvme_t *);
 247 static boolean_t nvme_check_dma_hdl(nvme_dma_t *);
 248 
 249 static int nvme_fill_prp(nvme_cmd_t *, bd_xfer_t *);
 250 
 251 static void nvme_bd_xfer_done(void *);
 252 static void nvme_bd_driveinfo(void *, bd_drive_t *);
 253 static int nvme_bd_mediainfo(void *, bd_media_t *);
 254 static int nvme_bd_cmd(nvme_namespace_t *, bd_xfer_t *, uint8_t);
 255 static int nvme_bd_read(void *, bd_xfer_t *);
 256 static int nvme_bd_write(void *, bd_xfer_t *);
 257 static int nvme_bd_sync(void *, bd_xfer_t *);
 258 static int nvme_bd_devid(void *, dev_info_t *, ddi_devid_t *);
 259 



 260 static void nvme_prepare_devid(nvme_t *, uint32_t);
 261 
 262 static void *nvme_state;
 263 static kmem_cache_t *nvme_cmd_cache;
 264 
 265 /*
 266  * DMA attributes for queue DMA memory
 267  *
 268  * Queue DMA memory must be page aligned. The maximum length of a queue is
 269  * 65536 entries, and an entry can be 64 bytes long.
 270  */
 271 static ddi_dma_attr_t nvme_queue_dma_attr = {
 272         .dma_attr_version       = DMA_ATTR_V0,
 273         .dma_attr_addr_lo       = 0,
 274         .dma_attr_addr_hi       = 0xffffffffffffffffULL,
 275         .dma_attr_count_max     = (UINT16_MAX + 1) * sizeof (nvme_sqe_t) - 1,
 276         .dma_attr_align         = 0x1000,
 277         .dma_attr_burstsizes    = 0x7ff,
 278         .dma_attr_minxfer       = 0x1000,
 279         .dma_attr_maxxfer       = (UINT16_MAX + 1) * sizeof (nvme_sqe_t),


 471         return (B_FALSE);
 472 }
 473 
 474 static boolean_t
 475 nvme_check_dma_hdl(nvme_dma_t *dma)
 476 {
 477         ddi_fm_error_t error;
 478 
 479         if (dma == NULL)
 480                 return (B_FALSE);
 481 
 482         ddi_fm_dma_err_get(dma->nd_dmah, &error, DDI_FME_VERSION);
 483 
 484         if (error.fme_status != DDI_FM_OK)
 485                 return (B_TRUE);
 486 
 487         return (B_FALSE);
 488 }
 489 
 490 static void
 491 nvme_free_dma(nvme_dma_t *dma)
 492 {
 493         if (dma->nd_dmah != NULL)
 494                 (void) ddi_dma_unbind_handle(dma->nd_dmah);
 495         if (dma->nd_acch != NULL)
 496                 ddi_dma_mem_free(&dma->nd_acch);
 497         if (dma->nd_dmah != NULL)
 498                 ddi_dma_free_handle(&dma->nd_dmah);
 499         kmem_free(dma, sizeof (nvme_dma_t));
 500 }
 501 
 502 static int
 503 nvme_zalloc_dma(nvme_t *nvme, size_t len, uint_t flags,
 504     ddi_dma_attr_t *dma_attr, nvme_dma_t **ret)
 505 {
 506         nvme_dma_t *dma = kmem_zalloc(sizeof (nvme_dma_t), KM_SLEEP);







 507 







 508         if (ddi_dma_alloc_handle(nvme->n_dip, dma_attr, DDI_DMA_SLEEP, NULL,
 509             &dma->nd_dmah) != DDI_SUCCESS) {
 510                 /*
 511                  * Due to DDI_DMA_SLEEP this can't be DDI_DMA_NORESOURCES, and
 512                  * the only other possible error is DDI_DMA_BADATTR which
 513                  * indicates a driver bug which should cause a panic.
 514                  */
 515                 dev_err(nvme->n_dip, CE_PANIC,
 516                     "!failed to get DMA handle, check DMA attributes");
 517                 return (DDI_FAILURE);
 518         }
 519 
 520         /*
 521          * ddi_dma_mem_alloc() can only fail when DDI_DMA_NOSLEEP is specified
 522          * or the flags are conflicting, which isn't the case here.
 523          */
 524         (void) ddi_dma_mem_alloc(dma->nd_dmah, len, &nvme->n_reg_acc_attr,
 525             DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL, &dma->nd_memp,
 526             &dma->nd_len, &dma->nd_acch);
 527 
 528         if (ddi_dma_addr_bind_handle(dma->nd_dmah, NULL, dma->nd_memp,
 529             dma->nd_len, flags | DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL,
 530             &dma->nd_cookie, &dma->nd_ncookie) != DDI_DMA_MAPPED) {
 531                 dev_err(nvme->n_dip, CE_WARN,
 532                     "!failed to bind DMA memory");
 533                 atomic_inc_32(&nvme->n_dma_bind_err);















 534                 *ret = NULL;
 535                 nvme_free_dma(dma);
 536                 return (DDI_FAILURE);
 537         }
 538 
 539         bzero(dma->nd_memp, dma->nd_len);
 540 
 541         *ret = dma;
 542         return (DDI_SUCCESS);
 543 }
 544 
 545 static int





















 546 nvme_zalloc_queue_dma(nvme_t *nvme, uint32_t nentry, uint16_t qe_len,
 547     uint_t flags, nvme_dma_t **dma)
 548 {
 549         uint32_t len = nentry * qe_len;
 550         ddi_dma_attr_t q_dma_attr = nvme->n_queue_dma_attr;
 551 
 552         len = roundup(len, nvme->n_pagesize);
 553 
 554         q_dma_attr.dma_attr_minxfer = len;
 555 
 556         if (nvme_zalloc_dma(nvme, len, flags, &q_dma_attr, dma)
 557             != DDI_SUCCESS) {
 558                 dev_err(nvme->n_dip, CE_WARN,
 559                     "!failed to get DMA memory for queue");
 560                 goto fail;
 561         }
 562 
 563         if ((*dma)->nd_ncookie != 1) {
 564                 dev_err(nvme->n_dip, CE_WARN,
 565                     "!got too many cookies for queue DMA");


 643         nvme_cmd_t *cmd = kmem_cache_alloc(nvme_cmd_cache, kmflag);
 644 
 645         if (cmd == NULL)
 646                 return (cmd);
 647 
 648         bzero(cmd, sizeof (nvme_cmd_t));
 649 
 650         cmd->nc_nvme = nvme;
 651 
 652         mutex_init(&cmd->nc_mutex, NULL, MUTEX_DRIVER,
 653             DDI_INTR_PRI(nvme->n_intr_pri));
 654         cv_init(&cmd->nc_cv, NULL, CV_DRIVER, NULL);
 655 
 656         return (cmd);
 657 }
 658 
 659 static void
 660 nvme_free_cmd(nvme_cmd_t *cmd)
 661 {
 662         if (cmd->nc_dma) {




 663                 nvme_free_dma(cmd->nc_dma);
 664                 cmd->nc_dma = NULL;
 665         }
 666 
 667         cv_destroy(&cmd->nc_cv);
 668         mutex_destroy(&cmd->nc_mutex);
 669 
 670         kmem_cache_free(nvme_cmd_cache, cmd);
 671 }
 672 
 673 static int
 674 nvme_submit_cmd(nvme_qpair_t *qp, nvme_cmd_t *cmd)
 675 {
 676         nvme_reg_sqtdbl_t tail = { 0 };
 677 
 678         mutex_enter(&qp->nq_mutex);
 679 
 680         if (qp->nq_active_cmds == qp->nq_nentry) {
 681                 mutex_exit(&qp->nq_mutex);
 682                 return (DDI_FAILURE);


2430         if (ddi_regs_map_setup(dip, 1, &nvme->n_regs, 0, regsize,
2431             &nvme->n_reg_acc_attr, &nvme->n_regh) != DDI_SUCCESS) {
2432                 dev_err(dip, CE_WARN, "!failed to map regset 1");
2433                 goto fail;
2434         }
2435 
2436         nvme->n_progress |= NVME_REGS_MAPPED;
2437 
2438         /*
2439          * Create taskq for command completion.
2440          */
2441         (void) snprintf(name, sizeof (name), "%s%d_cmd_taskq",
2442             ddi_driver_name(dip), ddi_get_instance(dip));
2443         nvme->n_cmd_taskq = ddi_taskq_create(dip, name, MIN(UINT16_MAX, ncpus),
2444             TASKQ_DEFAULTPRI, 0);
2445         if (nvme->n_cmd_taskq == NULL) {
2446                 dev_err(dip, CE_WARN, "!failed to create cmd taskq");
2447                 goto fail;
2448         }
2449 








2450 
2451         if (nvme_init(nvme) != DDI_SUCCESS)
2452                 goto fail;
2453 
2454         /*
2455          * Attach the blkdev driver for each namespace.
2456          */
2457         for (i = 0; i != nvme->n_namespace_count; i++) {
2458                 if (nvme->n_ns[i].ns_ignore)
2459                         continue;
2460 
2461                 nvme->n_ns[i].ns_bd_hdl = bd_alloc_handle(&nvme->n_ns[i],
2462                     &nvme_bd_ops, &nvme->n_prp_dma_attr, KM_SLEEP);
2463 
2464                 if (nvme->n_ns[i].ns_bd_hdl == NULL) {
2465                         dev_err(dip, CE_WARN,
2466                             "!failed to get blkdev handle for namespace %d", i);
2467                         goto fail;
2468                 }
2469 


2522         }
2523 
2524         if (nvme->n_progress & NVME_INTERRUPTS)
2525                 nvme_release_interrupts(nvme);
2526 
2527         if (nvme->n_cmd_taskq)
2528                 ddi_taskq_wait(nvme->n_cmd_taskq);
2529 
2530         if (nvme->n_ioq_count > 0) {
2531                 for (i = 1; i != nvme->n_ioq_count + 1; i++) {
2532                         if (nvme->n_ioq[i] != NULL) {
2533                                 /* TODO: send destroy queue commands */
2534                                 nvme_free_qpair(nvme->n_ioq[i]);
2535                         }
2536                 }
2537 
2538                 kmem_free(nvme->n_ioq, sizeof (nvme_qpair_t *) *
2539                     (nvme->n_ioq_count + 1));
2540         }
2541 




2542         if (nvme->n_progress & NVME_REGS_MAPPED) {
2543                 nvme_shutdown(nvme, NVME_CC_SHN_NORMAL, B_FALSE);
2544                 (void) nvme_reset(nvme, B_FALSE);
2545         }
2546 
2547         if (nvme->n_cmd_taskq)
2548                 ddi_taskq_destroy(nvme->n_cmd_taskq);
2549 
2550         if (nvme->n_progress & NVME_CTRL_LIMITS)
2551                 sema_destroy(&nvme->n_abort_sema);
2552 
2553         if (nvme->n_progress & NVME_ADMIN_QUEUE)
2554                 nvme_free_qpair(nvme->n_adminq);
2555 
2556         if (nvme->n_idctl)
2557                 kmem_free(nvme->n_idctl, sizeof (nvme_identify_ctrl_t));
2558 
2559         if (nvme->n_progress & NVME_REGS_MAPPED)
2560                 ddi_regs_map_free(&nvme->n_regh);
2561 


2618                 cmd->nc_sqe.sqe_dptr.d_prp[1] = 0;
2619                 return (DDI_SUCCESS);
2620         } else if (xfer->x_ndmac == 2) {
2621                 cmd->nc_sqe.sqe_dptr.d_prp[1] = xfer->x_dmac.dmac_laddress;
2622                 return (DDI_SUCCESS);
2623         }
2624 
2625         xfer->x_ndmac--;
2626 
2627         nprp_page = nvme->n_pagesize / sizeof (uint64_t) - 1;
2628         ASSERT(nprp_page > 0);
2629         nprp = (xfer->x_ndmac + nprp_page - 1) / nprp_page;
2630 
2631         /*
2632          * We currently don't support chained PRPs and set up our DMA
2633          * attributes to reflect that. If we still get an I/O request
2634          * that needs a chained PRP something is very wrong.
2635          */
2636         VERIFY(nprp == 1);
2637 
2638         if (nvme_zalloc_dma(nvme, nvme->n_pagesize * nprp, DDI_DMA_READ,
2639             &nvme->n_prp_dma_attr, &cmd->nc_dma) != DDI_SUCCESS) {
2640                 dev_err(nvme->n_dip, CE_WARN, "!%s: nvme_zalloc_dma failed",
2641                     __func__);
2642                 return (DDI_FAILURE);
2643         }
2644 
2645         cmd->nc_sqe.sqe_dptr.d_prp[1] = cmd->nc_dma->nd_cookie.dmac_laddress;
2646         ddi_dma_nextcookie(cmd->nc_dma->nd_dmah, &cmd->nc_dma->nd_cookie);
2647 
2648         /*LINTED: E_PTR_BAD_CAST_ALIGN*/
2649         for (prp = (uint64_t *)cmd->nc_dma->nd_memp;
2650             xfer->x_ndmac > 0;
2651             prp++, xfer->x_ndmac--) {
2652                 *prp = xfer->x_dmac.dmac_laddress;
2653                 ddi_dma_nextcookie(xfer->x_dmah, &xfer->x_dmac);
2654         }
2655 
2656         (void) ddi_dma_sync(cmd->nc_dma->nd_dmah, 0, cmd->nc_dma->nd_len,
2657             DDI_DMA_SYNC_FORDEV);
2658         return (DDI_SUCCESS);
2659 }
2660 
2661 static nvme_cmd_t *
2662 nvme_create_nvm_cmd(nvme_namespace_t *ns, uint8_t opc, bd_xfer_t *xfer)
2663 {
2664         nvme_t *nvme = ns->ns_nvme;
2665         nvme_cmd_t *cmd;
2666 




 240 
 241 static inline void nvme_put64(nvme_t *, uintptr_t, uint64_t);
 242 static inline void nvme_put32(nvme_t *, uintptr_t, uint32_t);
 243 static inline uint64_t nvme_get64(nvme_t *, uintptr_t);
 244 static inline uint32_t nvme_get32(nvme_t *, uintptr_t);
 245 
 246 static boolean_t nvme_check_regs_hdl(nvme_t *);
 247 static boolean_t nvme_check_dma_hdl(nvme_dma_t *);
 248 
 249 static int nvme_fill_prp(nvme_cmd_t *, bd_xfer_t *);
 250 
 251 static void nvme_bd_xfer_done(void *);
 252 static void nvme_bd_driveinfo(void *, bd_drive_t *);
 253 static int nvme_bd_mediainfo(void *, bd_media_t *);
 254 static int nvme_bd_cmd(nvme_namespace_t *, bd_xfer_t *, uint8_t);
 255 static int nvme_bd_read(void *, bd_xfer_t *);
 256 static int nvme_bd_write(void *, bd_xfer_t *);
 257 static int nvme_bd_sync(void *, bd_xfer_t *);
 258 static int nvme_bd_devid(void *, dev_info_t *, ddi_devid_t *);
 259 
 260 static int nvme_prp_dma_constructor(void *, void *, int);
 261 static void nvme_prp_dma_destructor(void *, void *);
 262 
 263 static void nvme_prepare_devid(nvme_t *, uint32_t);
 264 
 265 static void *nvme_state;
 266 static kmem_cache_t *nvme_cmd_cache;
 267 
 268 /*
 269  * DMA attributes for queue DMA memory
 270  *
 271  * Queue DMA memory must be page aligned. The maximum length of a queue is
 272  * 65536 entries, and an entry can be 64 bytes long.
 273  */
 274 static ddi_dma_attr_t nvme_queue_dma_attr = {
 275         .dma_attr_version       = DMA_ATTR_V0,
 276         .dma_attr_addr_lo       = 0,
 277         .dma_attr_addr_hi       = 0xffffffffffffffffULL,
 278         .dma_attr_count_max     = (UINT16_MAX + 1) * sizeof (nvme_sqe_t) - 1,
 279         .dma_attr_align         = 0x1000,
 280         .dma_attr_burstsizes    = 0x7ff,
 281         .dma_attr_minxfer       = 0x1000,
 282         .dma_attr_maxxfer       = (UINT16_MAX + 1) * sizeof (nvme_sqe_t),


 474         return (B_FALSE);
 475 }
 476 
 477 static boolean_t
 478 nvme_check_dma_hdl(nvme_dma_t *dma)
 479 {
 480         ddi_fm_error_t error;
 481 
 482         if (dma == NULL)
 483                 return (B_FALSE);
 484 
 485         ddi_fm_dma_err_get(dma->nd_dmah, &error, DDI_FME_VERSION);
 486 
 487         if (error.fme_status != DDI_FM_OK)
 488                 return (B_TRUE);
 489 
 490         return (B_FALSE);
 491 }
 492 
 493 static void
 494 nvme_free_dma_common(nvme_dma_t *dma)
 495 {
 496         if (dma->nd_dmah != NULL)
 497                 (void) ddi_dma_unbind_handle(dma->nd_dmah);
 498         if (dma->nd_acch != NULL)
 499                 ddi_dma_mem_free(&dma->nd_acch);
 500         if (dma->nd_dmah != NULL)
 501                 ddi_dma_free_handle(&dma->nd_dmah);

 502 }
 503 
 504 static void
 505 nvme_free_dma(nvme_dma_t *dma)

 506 {
 507         nvme_free_dma_common(dma);
 508         kmem_free(dma, sizeof (*dma));
 509 }
 510 
 511 static void
 512 nvme_prp_dma_destructor(void *buf, void *private)
 513 {
 514         nvme_dma_t *dma = (nvme_dma_t *)buf;
 515 
 516         nvme_free_dma_common(dma);
 517 }
 518 
 519 static int
 520 nvme_alloc_dma_common(nvme_t *nvme, nvme_dma_t *dma,
 521     size_t len, uint_t flags, ddi_dma_attr_t *dma_attr)
 522 {
 523         if (ddi_dma_alloc_handle(nvme->n_dip, dma_attr, DDI_DMA_SLEEP, NULL,
 524             &dma->nd_dmah) != DDI_SUCCESS) {
 525                 /*
 526                  * Due to DDI_DMA_SLEEP this can't be DDI_DMA_NORESOURCES, and
 527                  * the only other possible error is DDI_DMA_BADATTR which
 528                  * indicates a driver bug which should cause a panic.
 529                  */
 530                 dev_err(nvme->n_dip, CE_PANIC,
 531                     "!failed to get DMA handle, check DMA attributes");
 532                 return (DDI_FAILURE);
 533         }
 534 
 535         /*
 536          * ddi_dma_mem_alloc() can only fail when DDI_DMA_NOSLEEP is specified
 537          * or the flags are conflicting, which isn't the case here.
 538          */
 539         (void) ddi_dma_mem_alloc(dma->nd_dmah, len, &nvme->n_reg_acc_attr,
 540             DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL, &dma->nd_memp,
 541             &dma->nd_len, &dma->nd_acch);
 542 
 543         if (ddi_dma_addr_bind_handle(dma->nd_dmah, NULL, dma->nd_memp,
 544             dma->nd_len, flags | DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL,
 545             &dma->nd_cookie, &dma->nd_ncookie) != DDI_DMA_MAPPED) {
 546                 dev_err(nvme->n_dip, CE_WARN,
 547                     "!failed to bind DMA memory");
 548                 atomic_inc_32(&nvme->n_dma_bind_err);
 549                 nvme_free_dma_common(dma);
 550                 return (DDI_FAILURE);
 551         }
 552 
 553         return (DDI_SUCCESS);
 554 }
 555 
 556 static int
 557 nvme_zalloc_dma(nvme_t *nvme, size_t len, uint_t flags,
 558     ddi_dma_attr_t *dma_attr, nvme_dma_t **ret)
 559 {
 560         nvme_dma_t *dma = kmem_zalloc(sizeof (nvme_dma_t), KM_SLEEP);
 561 
 562         if (nvme_alloc_dma_common(nvme, dma, len, flags, dma_attr) !=
 563             DDI_SUCCESS) {
 564                 *ret = NULL;
 565                 kmem_free(dma, sizeof (nvme_dma_t));
 566                 return (DDI_FAILURE);
 567         }
 568 
 569         bzero(dma->nd_memp, dma->nd_len);
 570 
 571         *ret = dma;
 572         return (DDI_SUCCESS);
 573 }
 574 
 575 static int
 576 nvme_prp_dma_constructor(void *buf, void *private, int flags)
 577 {
 578         nvme_dma_t *dma = (nvme_dma_t *)buf;
 579         nvme_t *nvme = (nvme_t *)private;
 580 
 581         dma->nd_dmah = NULL;
 582         dma->nd_acch = NULL;
 583 
 584         if (nvme_alloc_dma_common(nvme, dma, nvme->n_pagesize,
 585             DDI_DMA_READ, &nvme->n_prp_dma_attr) != DDI_SUCCESS) {
 586                 return (-1);
 587         }
 588 
 589         ASSERT(dma->nd_ncookie == 1);
 590 
 591         dma->nd_cached = B_TRUE;
 592 
 593         return (0);
 594 }
 595 
 596 static int
 597 nvme_zalloc_queue_dma(nvme_t *nvme, uint32_t nentry, uint16_t qe_len,
 598     uint_t flags, nvme_dma_t **dma)
 599 {
 600         uint32_t len = nentry * qe_len;
 601         ddi_dma_attr_t q_dma_attr = nvme->n_queue_dma_attr;
 602 
 603         len = roundup(len, nvme->n_pagesize);
 604 
 605         q_dma_attr.dma_attr_minxfer = len;
 606 
 607         if (nvme_zalloc_dma(nvme, len, flags, &q_dma_attr, dma)
 608             != DDI_SUCCESS) {
 609                 dev_err(nvme->n_dip, CE_WARN,
 610                     "!failed to get DMA memory for queue");
 611                 goto fail;
 612         }
 613 
 614         if ((*dma)->nd_ncookie != 1) {
 615                 dev_err(nvme->n_dip, CE_WARN,
 616                     "!got too many cookies for queue DMA");


 694         nvme_cmd_t *cmd = kmem_cache_alloc(nvme_cmd_cache, kmflag);
 695 
 696         if (cmd == NULL)
 697                 return (cmd);
 698 
 699         bzero(cmd, sizeof (nvme_cmd_t));
 700 
 701         cmd->nc_nvme = nvme;
 702 
 703         mutex_init(&cmd->nc_mutex, NULL, MUTEX_DRIVER,
 704             DDI_INTR_PRI(nvme->n_intr_pri));
 705         cv_init(&cmd->nc_cv, NULL, CV_DRIVER, NULL);
 706 
 707         return (cmd);
 708 }
 709 
 710 static void
 711 nvme_free_cmd(nvme_cmd_t *cmd)
 712 {
 713         if (cmd->nc_dma) {
 714                 if (cmd->nc_dma->nd_cached)
 715                         kmem_cache_free(cmd->nc_nvme->n_prp_cache,
 716                             cmd->nc_dma);
 717                 else
 718                         nvme_free_dma(cmd->nc_dma);
 719                 cmd->nc_dma = NULL;
 720         }
 721 
 722         cv_destroy(&cmd->nc_cv);
 723         mutex_destroy(&cmd->nc_mutex);
 724 
 725         kmem_cache_free(nvme_cmd_cache, cmd);
 726 }
 727 
 728 static int
 729 nvme_submit_cmd(nvme_qpair_t *qp, nvme_cmd_t *cmd)
 730 {
 731         nvme_reg_sqtdbl_t tail = { 0 };
 732 
 733         mutex_enter(&qp->nq_mutex);
 734 
 735         if (qp->nq_active_cmds == qp->nq_nentry) {
 736                 mutex_exit(&qp->nq_mutex);
 737                 return (DDI_FAILURE);


2485         if (ddi_regs_map_setup(dip, 1, &nvme->n_regs, 0, regsize,
2486             &nvme->n_reg_acc_attr, &nvme->n_regh) != DDI_SUCCESS) {
2487                 dev_err(dip, CE_WARN, "!failed to map regset 1");
2488                 goto fail;
2489         }
2490 
2491         nvme->n_progress |= NVME_REGS_MAPPED;
2492 
2493         /*
2494          * Create taskq for command completion.
2495          */
2496         (void) snprintf(name, sizeof (name), "%s%d_cmd_taskq",
2497             ddi_driver_name(dip), ddi_get_instance(dip));
2498         nvme->n_cmd_taskq = ddi_taskq_create(dip, name, MIN(UINT16_MAX, ncpus),
2499             TASKQ_DEFAULTPRI, 0);
2500         if (nvme->n_cmd_taskq == NULL) {
2501                 dev_err(dip, CE_WARN, "!failed to create cmd taskq");
2502                 goto fail;
2503         }
2504 
2505         /*
2506          * Create PRP DMA cache
2507          */
2508         (void) snprintf(name, sizeof (name), "%s%d_prp_cache",
2509             ddi_driver_name(dip), ddi_get_instance(dip));
2510         nvme->n_prp_cache = kmem_cache_create(name, sizeof (nvme_dma_t),
2511             0, nvme_prp_dma_constructor, nvme_prp_dma_destructor,
2512             NULL, (void *)nvme, NULL, 0);
2513 
2514         if (nvme_init(nvme) != DDI_SUCCESS)
2515                 goto fail;
2516 
2517         /*
2518          * Attach the blkdev driver for each namespace.
2519          */
2520         for (i = 0; i != nvme->n_namespace_count; i++) {
2521                 if (nvme->n_ns[i].ns_ignore)
2522                         continue;
2523 
2524                 nvme->n_ns[i].ns_bd_hdl = bd_alloc_handle(&nvme->n_ns[i],
2525                     &nvme_bd_ops, &nvme->n_prp_dma_attr, KM_SLEEP);
2526 
2527                 if (nvme->n_ns[i].ns_bd_hdl == NULL) {
2528                         dev_err(dip, CE_WARN,
2529                             "!failed to get blkdev handle for namespace %d", i);
2530                         goto fail;
2531                 }
2532 


2585         }
2586 
2587         if (nvme->n_progress & NVME_INTERRUPTS)
2588                 nvme_release_interrupts(nvme);
2589 
2590         if (nvme->n_cmd_taskq)
2591                 ddi_taskq_wait(nvme->n_cmd_taskq);
2592 
2593         if (nvme->n_ioq_count > 0) {
2594                 for (i = 1; i != nvme->n_ioq_count + 1; i++) {
2595                         if (nvme->n_ioq[i] != NULL) {
2596                                 /* TODO: send destroy queue commands */
2597                                 nvme_free_qpair(nvme->n_ioq[i]);
2598                         }
2599                 }
2600 
2601                 kmem_free(nvme->n_ioq, sizeof (nvme_qpair_t *) *
2602                     (nvme->n_ioq_count + 1));
2603         }
2604 
2605         if (nvme->n_prp_cache != NULL) {
2606                 kmem_cache_destroy(nvme->n_prp_cache);
2607         }
2608 
2609         if (nvme->n_progress & NVME_REGS_MAPPED) {
2610                 nvme_shutdown(nvme, NVME_CC_SHN_NORMAL, B_FALSE);
2611                 (void) nvme_reset(nvme, B_FALSE);
2612         }
2613 
2614         if (nvme->n_cmd_taskq)
2615                 ddi_taskq_destroy(nvme->n_cmd_taskq);
2616 
2617         if (nvme->n_progress & NVME_CTRL_LIMITS)
2618                 sema_destroy(&nvme->n_abort_sema);
2619 
2620         if (nvme->n_progress & NVME_ADMIN_QUEUE)
2621                 nvme_free_qpair(nvme->n_adminq);
2622 
2623         if (nvme->n_idctl)
2624                 kmem_free(nvme->n_idctl, sizeof (nvme_identify_ctrl_t));
2625 
2626         if (nvme->n_progress & NVME_REGS_MAPPED)
2627                 ddi_regs_map_free(&nvme->n_regh);
2628 


2685                 cmd->nc_sqe.sqe_dptr.d_prp[1] = 0;
2686                 return (DDI_SUCCESS);
2687         } else if (xfer->x_ndmac == 2) {
2688                 cmd->nc_sqe.sqe_dptr.d_prp[1] = xfer->x_dmac.dmac_laddress;
2689                 return (DDI_SUCCESS);
2690         }
2691 
2692         xfer->x_ndmac--;
2693 
2694         nprp_page = nvme->n_pagesize / sizeof (uint64_t) - 1;
2695         ASSERT(nprp_page > 0);
2696         nprp = (xfer->x_ndmac + nprp_page - 1) / nprp_page;
2697 
2698         /*
2699          * We currently don't support chained PRPs and set up our DMA
2700          * attributes to reflect that. If we still get an I/O request
2701          * that needs a chained PRP something is very wrong.
2702          */
2703         VERIFY(nprp == 1);
2704 
2705         cmd->nc_dma = kmem_cache_alloc(nvme->n_prp_cache, KM_SLEEP);
2706         bzero(cmd->nc_dma->nd_memp, cmd->nc_dma->nd_len);




2707 
2708         cmd->nc_sqe.sqe_dptr.d_prp[1] = cmd->nc_dma->nd_cookie.dmac_laddress;

2709 
2710         /*LINTED: E_PTR_BAD_CAST_ALIGN*/
2711         for (prp = (uint64_t *)cmd->nc_dma->nd_memp;
2712             xfer->x_ndmac > 0;
2713             prp++, xfer->x_ndmac--) {
2714                 *prp = xfer->x_dmac.dmac_laddress;
2715                 ddi_dma_nextcookie(xfer->x_dmah, &xfer->x_dmac);
2716         }
2717 
2718         (void) ddi_dma_sync(cmd->nc_dma->nd_dmah, 0, cmd->nc_dma->nd_len,
2719             DDI_DMA_SYNC_FORDEV);
2720         return (DDI_SUCCESS);
2721 }
2722 
2723 static nvme_cmd_t *
2724 nvme_create_nvm_cmd(nvme_namespace_t *ns, uint8_t opc, bd_xfer_t *xfer)
2725 {
2726         nvme_t *nvme = ns->ns_nvme;
2727         nvme_cmd_t *cmd;
2728