654 return (cmd);
655 }
656
657 static void
658 nvme_free_cmd(nvme_cmd_t *cmd)
659 {
660 if (cmd->nc_dma) {
661 nvme_free_dma(cmd->nc_dma);
662 cmd->nc_dma = NULL;
663 }
664
665 cv_destroy(&cmd->nc_cv);
666 mutex_destroy(&cmd->nc_mutex);
667
668 kmem_cache_free(nvme_cmd_cache, cmd);
669 }
670
671 static int
672 nvme_submit_cmd(nvme_qpair_t *qp, nvme_cmd_t *cmd)
673 {
674 nvme_reg_sqtdbl_t tail = { 0 };
675
676 mutex_enter(&qp->nq_mutex);
677
678 if (qp->nq_active_cmds == qp->nq_nentry) {
679 mutex_exit(&qp->nq_mutex);
680 return (DDI_FAILURE);
681 }
682
683 cmd->nc_completed = B_FALSE;
684
685 /*
686 * Try to insert the cmd into the active cmd array at the nq_next_cmd
687 * slot. If the slot is already occupied advance to the next slot and
688 * try again. This can happen for long running commands like async event
689 * requests.
690 */
691 while (qp->nq_cmd[qp->nq_next_cmd] != NULL)
692 qp->nq_next_cmd = (qp->nq_next_cmd + 1) % qp->nq_nentry;
693 qp->nq_cmd[qp->nq_next_cmd] = cmd;
694
695 qp->nq_active_cmds++;
696
697 cmd->nc_sqe.sqe_cid = qp->nq_next_cmd;
698 bcopy(&cmd->nc_sqe, &qp->nq_sq[qp->nq_sqtail], sizeof (nvme_sqe_t));
699 (void) ddi_dma_sync(qp->nq_sqdma->nd_dmah,
700 sizeof (nvme_sqe_t) * qp->nq_sqtail,
701 sizeof (nvme_sqe_t), DDI_DMA_SYNC_FORDEV);
702 qp->nq_next_cmd = (qp->nq_next_cmd + 1) % qp->nq_nentry;
703
704 tail.b.sqtdbl_sqt = qp->nq_sqtail = (qp->nq_sqtail + 1) % qp->nq_nentry;
705 nvme_put32(cmd->nc_nvme, qp->nq_sqtdbl, tail.r);
706
707 mutex_exit(&qp->nq_mutex);
708 return (DDI_SUCCESS);
709 }
710
711 static nvme_cmd_t *
712 nvme_retrieve_cmd(nvme_t *nvme, nvme_qpair_t *qp)
713 {
714 nvme_reg_cqhdbl_t head = { 0 };
715
716 nvme_cqe_t *cqe;
717 nvme_cmd_t *cmd;
718
719 (void) ddi_dma_sync(qp->nq_cqdma->nd_dmah, 0,
720 sizeof (nvme_cqe_t) * qp->nq_nentry, DDI_DMA_SYNC_FORKERNEL);
721
722 cqe = &qp->nq_cq[qp->nq_cqhead];
723
724 /* Check phase tag of CQE. Hardware inverts it for new entries. */
725 if (cqe->cqe_sf.sf_p == qp->nq_phase)
726 return (NULL);
727
728 ASSERT(nvme->n_ioq[cqe->cqe_sqid] == qp);
729 ASSERT(cqe->cqe_cid < qp->nq_nentry);
730
731 mutex_enter(&qp->nq_mutex);
732 cmd = qp->nq_cmd[cqe->cqe_cid];
733 qp->nq_cmd[cqe->cqe_cid] = NULL;
734 qp->nq_active_cmds--;
1046 nvme_abort_cmd_cb(void *arg)
1047 {
1048 nvme_cmd_t *cmd = arg;
1049
1050 /*
1051 * Grab the command mutex. Once we have it we hold the last reference
1052 * to the command and can safely free it.
1053 */
1054 mutex_enter(&cmd->nc_mutex);
1055 (void) nvme_check_cmd_status(cmd);
1056 mutex_exit(&cmd->nc_mutex);
1057
1058 nvme_free_cmd(cmd);
1059 }
1060
1061 static void
1062 nvme_abort_cmd(nvme_cmd_t *abort_cmd)
1063 {
1064 nvme_t *nvme = abort_cmd->nc_nvme;
1065 nvme_cmd_t *cmd = nvme_alloc_cmd(nvme, KM_SLEEP);
1066 nvme_abort_cmd_t ac = { 0 };
1067
1068 sema_p(&nvme->n_abort_sema);
1069
1070 ac.b.ac_cid = abort_cmd->nc_sqe.sqe_cid;
1071 ac.b.ac_sqid = abort_cmd->nc_sqid;
1072
1073 /*
1074 * Drop the mutex of the aborted command. From this point on
1075 * we must assume that the abort callback has freed the command.
1076 */
1077 mutex_exit(&abort_cmd->nc_mutex);
1078
1079 cmd->nc_sqid = 0;
1080 cmd->nc_sqe.sqe_opc = NVME_OPC_ABORT;
1081 cmd->nc_callback = nvme_wakeup_cmd;
1082 cmd->nc_sqe.sqe_cdw10 = ac.r;
1083
1084 /*
1085 * Send the ABORT to the hardware. The ABORT command will return _after_
1086 * the aborted command has completed (aborted or otherwise).
1539 if (nvme_check_cmd_status(cmd)) {
1540 dev_err(nvme->n_dip, CE_WARN,
1541 "!IDENTIFY failed with sct = %x, sc = %x",
1542 cmd->nc_cqe.cqe_sf.sf_sct, cmd->nc_cqe.cqe_sf.sf_sc);
1543 goto fail;
1544 }
1545
1546 buf = kmem_alloc(NVME_IDENTIFY_BUFSIZE, KM_SLEEP);
1547 bcopy(cmd->nc_dma->nd_memp, buf, NVME_IDENTIFY_BUFSIZE);
1548
1549 fail:
1550 nvme_free_cmd(cmd);
1551
1552 return (buf);
1553 }
1554
1555 static int
1556 nvme_set_nqueues(nvme_t *nvme, uint16_t nqueues)
1557 {
1558 nvme_cmd_t *cmd = nvme_alloc_cmd(nvme, KM_SLEEP);
1559 nvme_nqueue_t nq = { 0 };
1560
1561 nq.b.nq_nsq = nq.b.nq_ncq = nqueues;
1562
1563 cmd->nc_sqid = 0;
1564 cmd->nc_callback = nvme_wakeup_cmd;
1565 cmd->nc_sqe.sqe_opc = NVME_OPC_SET_FEATURES;
1566 cmd->nc_sqe.sqe_cdw10 = NVME_FEAT_NQUEUES;
1567 cmd->nc_sqe.sqe_cdw11 = nq.r;
1568
1569 if (nvme_admin_cmd(cmd, NVME_ADMIN_CMD_TIMEOUT) != DDI_SUCCESS) {
1570 dev_err(nvme->n_dip, CE_WARN,
1571 "!nvme_admin_cmd failed for SET FEATURES (NQUEUES)");
1572 return (0);
1573 }
1574
1575 if (nvme_check_cmd_status(cmd)) {
1576 dev_err(nvme->n_dip, CE_WARN,
1577 "!SET FEATURES (NQUEUES) failed with sct = %x, sc = %x",
1578 cmd->nc_cqe.cqe_sf.sf_sct, cmd->nc_cqe.cqe_sf.sf_sc);
1579 nvme_free_cmd(cmd);
1580 return (0);
1581 }
1582
1583 nq.r = cmd->nc_cqe.cqe_dw0;
1584 nvme_free_cmd(cmd);
1585
1586 /*
1587 * Always use the same number of submission and completion queues, and
1588 * never use more than the requested number of queues.
1589 */
1590 return (MIN(nqueues, MIN(nq.b.nq_nsq, nq.b.nq_ncq)));
1591 }
1592
1593 static int
1594 nvme_create_io_qpair(nvme_t *nvme, nvme_qpair_t *qp, uint16_t idx)
1595 {
1596 nvme_cmd_t *cmd = nvme_alloc_cmd(nvme, KM_SLEEP);
1597 nvme_create_queue_dw10_t dw10 = { 0 };
1598 nvme_create_cq_dw11_t c_dw11 = { 0 };
1599 nvme_create_sq_dw11_t s_dw11 = { 0 };
1600
1601 dw10.b.q_qid = idx;
1602 dw10.b.q_qsize = qp->nq_nentry - 1;
1603
1604 c_dw11.b.cq_pc = 1;
1605 c_dw11.b.cq_ien = 1;
1606 c_dw11.b.cq_iv = idx % nvme->n_intr_cnt;
1607
1608 cmd->nc_sqid = 0;
1609 cmd->nc_callback = nvme_wakeup_cmd;
1610 cmd->nc_sqe.sqe_opc = NVME_OPC_CREATE_CQUEUE;
1611 cmd->nc_sqe.sqe_cdw10 = dw10.r;
1612 cmd->nc_sqe.sqe_cdw11 = c_dw11.r;
1613 cmd->nc_sqe.sqe_dptr.d_prp[0] = qp->nq_cqdma->nd_cookie.dmac_laddress;
1614
1615 if (nvme_admin_cmd(cmd, NVME_ADMIN_CMD_TIMEOUT) != DDI_SUCCESS) {
1616 dev_err(nvme->n_dip, CE_WARN,
1617 "!nvme_admin_cmd failed for CREATE CQUEUE");
1618 return (DDI_FAILURE);
1619 }
1719 nvme_prepare_devid(nvme_t *nvme, uint32_t nsid)
1720 {
1721 char model[sizeof (nvme->n_idctl->id_model) + 1];
1722 char serial[sizeof (nvme->n_idctl->id_serial) + 1];
1723
1724 bcopy(nvme->n_idctl->id_model, model, sizeof (nvme->n_idctl->id_model));
1725 bcopy(nvme->n_idctl->id_serial, serial,
1726 sizeof (nvme->n_idctl->id_serial));
1727
1728 model[sizeof (nvme->n_idctl->id_model)] = '\0';
1729 serial[sizeof (nvme->n_idctl->id_serial)] = '\0';
1730
1731 (void) snprintf(nvme->n_ns[nsid - 1].ns_devid,
1732 sizeof (nvme->n_ns[0].ns_devid), "%4X-%s-%s-%X",
1733 nvme->n_idctl->id_vid, model, serial, nsid);
1734 }
1735
1736 static int
1737 nvme_init(nvme_t *nvme)
1738 {
1739 nvme_reg_cc_t cc = { 0 };
1740 nvme_reg_aqa_t aqa = { 0 };
1741 nvme_reg_asq_t asq = { 0 };
1742 nvme_reg_acq_t acq = { 0 };
1743 nvme_reg_cap_t cap;
1744 nvme_reg_vs_t vs;
1745 nvme_reg_csts_t csts;
1746 int i = 0;
1747 int nqueues;
1748 char model[sizeof (nvme->n_idctl->id_model) + 1];
1749 char *vendor, *product;
1750
1751 /* Setup fixed interrupt for admin queue. */
1752 if (nvme_setup_interrupts(nvme, DDI_INTR_TYPE_FIXED, 1)
1753 != DDI_SUCCESS) {
1754 dev_err(nvme->n_dip, CE_WARN,
1755 "!failed to setup fixed interrupt");
1756 goto fail;
1757 }
1758
1759 /* Check controller version */
1760 vs.r = nvme_get32(nvme, NVME_REG_VS);
|
654 return (cmd);
655 }
656
657 static void
658 nvme_free_cmd(nvme_cmd_t *cmd)
659 {
660 if (cmd->nc_dma) {
661 nvme_free_dma(cmd->nc_dma);
662 cmd->nc_dma = NULL;
663 }
664
665 cv_destroy(&cmd->nc_cv);
666 mutex_destroy(&cmd->nc_mutex);
667
668 kmem_cache_free(nvme_cmd_cache, cmd);
669 }
670
671 static int
672 nvme_submit_cmd(nvme_qpair_t *qp, nvme_cmd_t *cmd)
673 {
674 nvme_reg_sqtdbl_t tail = { {0} };
675
676 mutex_enter(&qp->nq_mutex);
677
678 if (qp->nq_active_cmds == qp->nq_nentry) {
679 mutex_exit(&qp->nq_mutex);
680 return (DDI_FAILURE);
681 }
682
683 cmd->nc_completed = B_FALSE;
684
685 /*
686 * Try to insert the cmd into the active cmd array at the nq_next_cmd
687 * slot. If the slot is already occupied advance to the next slot and
688 * try again. This can happen for long running commands like async event
689 * requests.
690 */
691 while (qp->nq_cmd[qp->nq_next_cmd] != NULL)
692 qp->nq_next_cmd = (qp->nq_next_cmd + 1) % qp->nq_nentry;
693 qp->nq_cmd[qp->nq_next_cmd] = cmd;
694
695 qp->nq_active_cmds++;
696
697 cmd->nc_sqe.sqe_cid = qp->nq_next_cmd;
698 bcopy(&cmd->nc_sqe, &qp->nq_sq[qp->nq_sqtail], sizeof (nvme_sqe_t));
699 (void) ddi_dma_sync(qp->nq_sqdma->nd_dmah,
700 sizeof (nvme_sqe_t) * qp->nq_sqtail,
701 sizeof (nvme_sqe_t), DDI_DMA_SYNC_FORDEV);
702 qp->nq_next_cmd = (qp->nq_next_cmd + 1) % qp->nq_nentry;
703
704 tail.b.sqtdbl_sqt = qp->nq_sqtail = (qp->nq_sqtail + 1) % qp->nq_nentry;
705 nvme_put32(cmd->nc_nvme, qp->nq_sqtdbl, tail.r);
706
707 mutex_exit(&qp->nq_mutex);
708 return (DDI_SUCCESS);
709 }
710
711 static nvme_cmd_t *
712 nvme_retrieve_cmd(nvme_t *nvme, nvme_qpair_t *qp)
713 {
714 nvme_reg_cqhdbl_t head = { {0} };
715
716 nvme_cqe_t *cqe;
717 nvme_cmd_t *cmd;
718
719 (void) ddi_dma_sync(qp->nq_cqdma->nd_dmah, 0,
720 sizeof (nvme_cqe_t) * qp->nq_nentry, DDI_DMA_SYNC_FORKERNEL);
721
722 cqe = &qp->nq_cq[qp->nq_cqhead];
723
724 /* Check phase tag of CQE. Hardware inverts it for new entries. */
725 if (cqe->cqe_sf.sf_p == qp->nq_phase)
726 return (NULL);
727
728 ASSERT(nvme->n_ioq[cqe->cqe_sqid] == qp);
729 ASSERT(cqe->cqe_cid < qp->nq_nentry);
730
731 mutex_enter(&qp->nq_mutex);
732 cmd = qp->nq_cmd[cqe->cqe_cid];
733 qp->nq_cmd[cqe->cqe_cid] = NULL;
734 qp->nq_active_cmds--;
1046 nvme_abort_cmd_cb(void *arg)
1047 {
1048 nvme_cmd_t *cmd = arg;
1049
1050 /*
1051 * Grab the command mutex. Once we have it we hold the last reference
1052 * to the command and can safely free it.
1053 */
1054 mutex_enter(&cmd->nc_mutex);
1055 (void) nvme_check_cmd_status(cmd);
1056 mutex_exit(&cmd->nc_mutex);
1057
1058 nvme_free_cmd(cmd);
1059 }
1060
1061 static void
1062 nvme_abort_cmd(nvme_cmd_t *abort_cmd)
1063 {
1064 nvme_t *nvme = abort_cmd->nc_nvme;
1065 nvme_cmd_t *cmd = nvme_alloc_cmd(nvme, KM_SLEEP);
1066 nvme_abort_cmd_t ac = { {0} };
1067
1068 sema_p(&nvme->n_abort_sema);
1069
1070 ac.b.ac_cid = abort_cmd->nc_sqe.sqe_cid;
1071 ac.b.ac_sqid = abort_cmd->nc_sqid;
1072
1073 /*
1074 * Drop the mutex of the aborted command. From this point on
1075 * we must assume that the abort callback has freed the command.
1076 */
1077 mutex_exit(&abort_cmd->nc_mutex);
1078
1079 cmd->nc_sqid = 0;
1080 cmd->nc_sqe.sqe_opc = NVME_OPC_ABORT;
1081 cmd->nc_callback = nvme_wakeup_cmd;
1082 cmd->nc_sqe.sqe_cdw10 = ac.r;
1083
1084 /*
1085 * Send the ABORT to the hardware. The ABORT command will return _after_
1086 * the aborted command has completed (aborted or otherwise).
1539 if (nvme_check_cmd_status(cmd)) {
1540 dev_err(nvme->n_dip, CE_WARN,
1541 "!IDENTIFY failed with sct = %x, sc = %x",
1542 cmd->nc_cqe.cqe_sf.sf_sct, cmd->nc_cqe.cqe_sf.sf_sc);
1543 goto fail;
1544 }
1545
1546 buf = kmem_alloc(NVME_IDENTIFY_BUFSIZE, KM_SLEEP);
1547 bcopy(cmd->nc_dma->nd_memp, buf, NVME_IDENTIFY_BUFSIZE);
1548
1549 fail:
1550 nvme_free_cmd(cmd);
1551
1552 return (buf);
1553 }
1554
1555 static int
1556 nvme_set_nqueues(nvme_t *nvme, uint16_t nqueues)
1557 {
1558 nvme_cmd_t *cmd = nvme_alloc_cmd(nvme, KM_SLEEP);
1559 nvme_nqueue_t nq = { {0} };
1560
1561 nq.b.nq_nsq = nq.b.nq_ncq = nqueues;
1562
1563 cmd->nc_sqid = 0;
1564 cmd->nc_callback = nvme_wakeup_cmd;
1565 cmd->nc_sqe.sqe_opc = NVME_OPC_SET_FEATURES;
1566 cmd->nc_sqe.sqe_cdw10 = NVME_FEAT_NQUEUES;
1567 cmd->nc_sqe.sqe_cdw11 = nq.r;
1568
1569 if (nvme_admin_cmd(cmd, NVME_ADMIN_CMD_TIMEOUT) != DDI_SUCCESS) {
1570 dev_err(nvme->n_dip, CE_WARN,
1571 "!nvme_admin_cmd failed for SET FEATURES (NQUEUES)");
1572 return (0);
1573 }
1574
1575 if (nvme_check_cmd_status(cmd)) {
1576 dev_err(nvme->n_dip, CE_WARN,
1577 "!SET FEATURES (NQUEUES) failed with sct = %x, sc = %x",
1578 cmd->nc_cqe.cqe_sf.sf_sct, cmd->nc_cqe.cqe_sf.sf_sc);
1579 nvme_free_cmd(cmd);
1580 return (0);
1581 }
1582
1583 nq.r = cmd->nc_cqe.cqe_dw0;
1584 nvme_free_cmd(cmd);
1585
1586 /*
1587 * Always use the same number of submission and completion queues, and
1588 * never use more than the requested number of queues.
1589 */
1590 return (MIN(nqueues, MIN(nq.b.nq_nsq, nq.b.nq_ncq)));
1591 }
1592
1593 static int
1594 nvme_create_io_qpair(nvme_t *nvme, nvme_qpair_t *qp, uint16_t idx)
1595 {
1596 nvme_cmd_t *cmd = nvme_alloc_cmd(nvme, KM_SLEEP);
1597 nvme_create_queue_dw10_t dw10 = { {0} };
1598 nvme_create_cq_dw11_t c_dw11 = { {0} };
1599 nvme_create_sq_dw11_t s_dw11 = { {0} };
1600
1601 dw10.b.q_qid = idx;
1602 dw10.b.q_qsize = qp->nq_nentry - 1;
1603
1604 c_dw11.b.cq_pc = 1;
1605 c_dw11.b.cq_ien = 1;
1606 c_dw11.b.cq_iv = idx % nvme->n_intr_cnt;
1607
1608 cmd->nc_sqid = 0;
1609 cmd->nc_callback = nvme_wakeup_cmd;
1610 cmd->nc_sqe.sqe_opc = NVME_OPC_CREATE_CQUEUE;
1611 cmd->nc_sqe.sqe_cdw10 = dw10.r;
1612 cmd->nc_sqe.sqe_cdw11 = c_dw11.r;
1613 cmd->nc_sqe.sqe_dptr.d_prp[0] = qp->nq_cqdma->nd_cookie.dmac_laddress;
1614
1615 if (nvme_admin_cmd(cmd, NVME_ADMIN_CMD_TIMEOUT) != DDI_SUCCESS) {
1616 dev_err(nvme->n_dip, CE_WARN,
1617 "!nvme_admin_cmd failed for CREATE CQUEUE");
1618 return (DDI_FAILURE);
1619 }
1719 nvme_prepare_devid(nvme_t *nvme, uint32_t nsid)
1720 {
1721 char model[sizeof (nvme->n_idctl->id_model) + 1];
1722 char serial[sizeof (nvme->n_idctl->id_serial) + 1];
1723
1724 bcopy(nvme->n_idctl->id_model, model, sizeof (nvme->n_idctl->id_model));
1725 bcopy(nvme->n_idctl->id_serial, serial,
1726 sizeof (nvme->n_idctl->id_serial));
1727
1728 model[sizeof (nvme->n_idctl->id_model)] = '\0';
1729 serial[sizeof (nvme->n_idctl->id_serial)] = '\0';
1730
1731 (void) snprintf(nvme->n_ns[nsid - 1].ns_devid,
1732 sizeof (nvme->n_ns[0].ns_devid), "%4X-%s-%s-%X",
1733 nvme->n_idctl->id_vid, model, serial, nsid);
1734 }
1735
1736 static int
1737 nvme_init(nvme_t *nvme)
1738 {
1739 nvme_reg_cc_t cc = { {0} };
1740 nvme_reg_aqa_t aqa = { {0} };
1741 nvme_reg_asq_t asq = { 0 };
1742 nvme_reg_acq_t acq = { 0 };
1743 nvme_reg_cap_t cap;
1744 nvme_reg_vs_t vs;
1745 nvme_reg_csts_t csts;
1746 int i = 0;
1747 int nqueues;
1748 char model[sizeof (nvme->n_idctl->id_model) + 1];
1749 char *vendor, *product;
1750
1751 /* Setup fixed interrupt for admin queue. */
1752 if (nvme_setup_interrupts(nvme, DDI_INTR_TYPE_FIXED, 1)
1753 != DDI_SUCCESS) {
1754 dev_err(nvme->n_dip, CE_WARN,
1755 "!failed to setup fixed interrupt");
1756 goto fail;
1757 }
1758
1759 /* Check controller version */
1760 vs.r = nvme_get32(nvme, NVME_REG_VS);
|