Print this page
8368 remove warlock leftovers from usr/src/uts


 811 }
 812 
 813 
 814 /*
 815  * tavor_ci_alloc_qp()
 816  *    Allocate a Queue Pair
 817  *    Context: Can be called only from user or kernel context.
 818  */
 819 static ibt_status_t
 820 tavor_ci_alloc_qp(ibc_hca_hdl_t hca, ibtl_qp_hdl_t ibt_qphdl,
 821     ibt_qp_type_t type, ibt_qp_alloc_attr_t *attr_p,
 822     ibt_chan_sizes_t *queue_sizes_p, ib_qpn_t *qpn, ibc_qp_hdl_t *qp_p)
 823 {
 824         tavor_state_t           *state;
 825         tavor_qp_info_t         qpinfo;
 826         tavor_qp_options_t      op;
 827         int                     status;
 828 
 829         TAVOR_TNF_ENTER(tavor_ci_alloc_qp);
 830 
 831         _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*attr_p))
 832         _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*queue_sizes_p))
 833 
 834         /* Check for valid HCA handle */
 835         if (hca == NULL) {
 836                 TNF_PROBE_0(tavor_ci_alloc_qp_invhca_fail,
 837                     TAVOR_TNF_ERROR, "");
 838                 TAVOR_TNF_EXIT(tavor_ci_alloc_qp);
 839                 return (IBT_HCA_HDL_INVALID);
 840         }
 841 
 842         /* Grab the Tavor softstate pointer */
 843         state = (tavor_state_t *)hca;
 844 
 845         /* Allocate the QP */
 846         qpinfo.qpi_attrp        = attr_p;
 847         qpinfo.qpi_type         = type;
 848         qpinfo.qpi_ibt_qphdl    = ibt_qphdl;
 849         qpinfo.qpi_queueszp     = queue_sizes_p;
 850         qpinfo.qpi_qpn          = qpn;
 851         op.qpo_wq_loc           = state->ts_cfg_profile->cp_qp_wq_inddr;
 852         status = tavor_qp_alloc(state, &qpinfo, TAVOR_NOSLEEP, &op);
 853         if (status != DDI_SUCCESS) {


 866 
 867 
 868 /*
 869  * tavor_ci_alloc_special_qp()
 870  *    Allocate a Special Queue Pair
 871  *    Context: Can be called only from user or kernel context.
 872  */
 873 static ibt_status_t
 874 tavor_ci_alloc_special_qp(ibc_hca_hdl_t hca, uint8_t port,
 875     ibtl_qp_hdl_t ibt_qphdl, ibt_sqp_type_t type,
 876     ibt_qp_alloc_attr_t *attr_p, ibt_chan_sizes_t *queue_sizes_p,
 877     ibc_qp_hdl_t *qp_p)
 878 {
 879         tavor_state_t           *state;
 880         tavor_qp_info_t         qpinfo;
 881         tavor_qp_options_t      op;
 882         int                     status;
 883 
 884         TAVOR_TNF_ENTER(tavor_ci_alloc_special_qp);
 885 
 886         _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*attr_p))
 887         _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*queue_sizes_p))
 888 
 889         /* Check for valid HCA handle */
 890         if (hca == NULL) {
 891                 TNF_PROBE_0(tavor_ci_alloc_special_qp_invhca_fail,
 892                     TAVOR_TNF_ERROR, "");
 893                 TAVOR_TNF_EXIT(tavor_ci_alloc_special_qp);
 894                 return (IBT_HCA_HDL_INVALID);
 895         }
 896 
 897         /* Grab the Tavor softstate pointer */
 898         state = (tavor_state_t *)hca;
 899 
 900         /* Allocate the Special QP */
 901         qpinfo.qpi_attrp        = attr_p;
 902         qpinfo.qpi_type         = type;
 903         qpinfo.qpi_port         = port;
 904         qpinfo.qpi_ibt_qphdl    = ibt_qphdl;
 905         qpinfo.qpi_queueszp     = queue_sizes_p;
 906         op.qpo_wq_loc           = state->ts_cfg_profile->cp_qp_wq_inddr;
 907         status = tavor_special_qp_alloc(state, &qpinfo, TAVOR_NOSLEEP, &op);
 908         if (status != DDI_SUCCESS) {


1458 
1459 /*
1460  * tavor_ci_register_mr()
1461  *    Prepare a virtually addressed Memory Region for use by an HCA
1462  *    Context: Can be called from interrupt or base context.
1463  */
1464 /* ARGSUSED */
1465 static ibt_status_t
1466 tavor_ci_register_mr(ibc_hca_hdl_t hca, ibc_pd_hdl_t pd,
1467     ibt_mr_attr_t *mr_attr, void *ibtl_reserved, ibc_mr_hdl_t *mr_p,
1468     ibt_mr_desc_t *mr_desc)
1469 {
1470         tavor_mr_options_t      op;
1471         tavor_state_t           *state;
1472         tavor_pdhdl_t           pdhdl;
1473         tavor_mrhdl_t           mrhdl;
1474         int                     status;
1475 
1476         TAVOR_TNF_ENTER(tavor_ci_register_mr);
1477 
1478         _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*mr_desc))
1479 
1480         ASSERT(mr_attr != NULL);
1481         ASSERT(mr_p != NULL);
1482         ASSERT(mr_desc != NULL);
1483 
1484         /* Check for valid HCA handle */
1485         if (hca == NULL) {
1486                 TNF_PROBE_0(tavor_ci_register_mr_invhca_fail,
1487                     TAVOR_TNF_ERROR, "");
1488                 TAVOR_TNF_EXIT(tavor_ci_register_mr);
1489                 return (IBT_HCA_HDL_INVALID);
1490         }
1491 
1492         /* Check for valid PD handle pointer */
1493         if (pd == NULL) {
1494                 TNF_PROBE_0(tavor_ci_register_mr_invpdhdl_fail,
1495                     TAVOR_TNF_ERROR, "");
1496                 TAVOR_TNF_EXIT(tavor_ci_register_mr);
1497                 return (IBT_PD_HDL_INVALID);
1498         }
1499 


1508                     TAVOR_TNF_ERROR, "");
1509                 TAVOR_TNF_EXIT(tavor_ci_register_mr);
1510                 return (IBT_MR_ACCESS_REQ_INVALID);
1511         }
1512 
1513         /* Grab the Tavor softstate pointer and PD handle */
1514         state = (tavor_state_t *)hca;
1515         pdhdl = (tavor_pdhdl_t)pd;
1516 
1517         /* Register the memory region */
1518         op.mro_bind_type   = state->ts_cfg_profile->cp_iommu_bypass;
1519         op.mro_bind_dmahdl = NULL;
1520         op.mro_bind_override_addr = 0;
1521         status = tavor_mr_register(state, pdhdl, mr_attr, &mrhdl, &op);
1522         if (status != DDI_SUCCESS) {
1523                 TNF_PROBE_1(tavor_ci_register_mr_fail, TAVOR_TNF_ERROR, "",
1524                     tnf_uint, status, status);
1525                 TAVOR_TNF_EXIT(tavor_ci_register_mr);
1526                 return (status);
1527         }
1528         _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*mrhdl))
1529 
1530         /* Fill in the mr_desc structure */
1531         mr_desc->md_vaddr = mrhdl->mr_bindinfo.bi_addr;
1532         mr_desc->md_lkey  = mrhdl->mr_lkey;
1533         /* Only set RKey if remote access was requested */
1534         if ((mr_attr->mr_flags & IBT_MR_ENABLE_REMOTE_ATOMIC) ||
1535             (mr_attr->mr_flags & IBT_MR_ENABLE_REMOTE_WRITE) ||
1536             (mr_attr->mr_flags & IBT_MR_ENABLE_REMOTE_READ)) {
1537                 mr_desc->md_rkey = mrhdl->mr_rkey;
1538         }
1539 
1540         /*
1541          * If region is mapped for streaming (i.e. noncoherent), then set
1542          * sync is required
1543          */
1544         mr_desc->md_sync_required = (mrhdl->mr_bindinfo.bi_flags &
1545             IBT_MR_NONCOHERENT) ? B_TRUE : B_FALSE;
1546 
1547         /* Return the Tavor MR handle */
1548         *mr_p = (ibc_mr_hdl_t)mrhdl;


1555 /*
1556  * tavor_ci_register_buf()
1557  *    Prepare a Memory Region specified by buf structure for use by an HCA
1558  *    Context: Can be called from interrupt or base context.
1559  */
1560 /* ARGSUSED */
1561 static ibt_status_t
1562 tavor_ci_register_buf(ibc_hca_hdl_t hca, ibc_pd_hdl_t pd,
1563     ibt_smr_attr_t *attrp, struct buf *buf, void *ibtl_reserved,
1564     ibt_mr_hdl_t *mr_p, ibt_mr_desc_t *mr_desc)
1565 {
1566         tavor_mr_options_t      op;
1567         tavor_state_t           *state;
1568         tavor_pdhdl_t           pdhdl;
1569         tavor_mrhdl_t           mrhdl;
1570         int                     status;
1571         ibt_mr_flags_t          flags = attrp->mr_flags;
1572 
1573         TAVOR_TNF_ENTER(tavor_ci_register_buf);
1574 
1575         _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*mr_desc))
1576 
1577         ASSERT(mr_p != NULL);
1578         ASSERT(mr_desc != NULL);
1579 
1580         /* Check for valid HCA handle */
1581         if (hca == NULL) {
1582                 TNF_PROBE_0(tavor_ci_register_buf_invhca_fail,
1583                     TAVOR_TNF_ERROR, "");
1584                 TAVOR_TNF_EXIT(tavor_ci_register_buf);
1585                 return (IBT_HCA_HDL_INVALID);
1586         }
1587 
1588         /* Check for valid PD handle pointer */
1589         if (pd == NULL) {
1590                 TNF_PROBE_0(tavor_ci_register_buf_invpdhdl_fail,
1591                     TAVOR_TNF_ERROR, "");
1592                 TAVOR_TNF_EXIT(tavor_ci_register_buf);
1593                 return (IBT_PD_HDL_INVALID);
1594         }
1595 
1596         /*


1604                     TAVOR_TNF_ERROR, "");
1605                 TAVOR_TNF_EXIT(tavor_ci_register_buf);
1606                 return (IBT_MR_ACCESS_REQ_INVALID);
1607         }
1608 
1609         /* Grab the Tavor softstate pointer and PD handle */
1610         state = (tavor_state_t *)hca;
1611         pdhdl = (tavor_pdhdl_t)pd;
1612 
1613         /* Register the memory region */
1614         op.mro_bind_type   = state->ts_cfg_profile->cp_iommu_bypass;
1615         op.mro_bind_dmahdl = NULL;
1616         op.mro_bind_override_addr = 0;
1617         status = tavor_mr_register_buf(state, pdhdl, attrp, buf, &mrhdl, &op);
1618         if (status != DDI_SUCCESS) {
1619                 TNF_PROBE_1(tavor_ci_register_mr_fail, TAVOR_TNF_ERROR, "",
1620                     tnf_uint, status, status);
1621                 TAVOR_TNF_EXIT(tavor_ci_register_mr);
1622                 return (status);
1623         }
1624         _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*mrhdl))
1625 
1626         /* Fill in the mr_desc structure */
1627         mr_desc->md_vaddr = mrhdl->mr_bindinfo.bi_addr;
1628         mr_desc->md_lkey  = mrhdl->mr_lkey;
1629         /* Only set RKey if remote access was requested */
1630         if ((flags & IBT_MR_ENABLE_REMOTE_ATOMIC) ||
1631             (flags & IBT_MR_ENABLE_REMOTE_WRITE) ||
1632             (flags & IBT_MR_ENABLE_REMOTE_READ)) {
1633                 mr_desc->md_rkey = mrhdl->mr_rkey;
1634         }
1635 
1636         /*
1637          * If region is mapped for streaming (i.e. noncoherent), then set
1638          * sync is required
1639          */
1640         mr_desc->md_sync_required = (mrhdl->mr_bindinfo.bi_flags &
1641             IBT_MR_NONCOHERENT) ? B_TRUE : B_FALSE;
1642 
1643         /* Return the Tavor MR handle */
1644         *mr_p = (ibc_mr_hdl_t)mrhdl;


1751 
1752 
1753 /*
1754  * tavor_ci_register_shared_mr()
1755  *    Create a shared memory region matching an existing Memory Region
1756  *    Context: Can be called from interrupt or base context.
1757  */
1758 /* ARGSUSED */
1759 static ibt_status_t
1760 tavor_ci_register_shared_mr(ibc_hca_hdl_t hca, ibc_mr_hdl_t mr,
1761     ibc_pd_hdl_t pd, ibt_smr_attr_t *mr_attr, void *ibtl_reserved,
1762     ibc_mr_hdl_t *mr_p, ibt_mr_desc_t *mr_desc)
1763 {
1764         tavor_state_t           *state;
1765         tavor_pdhdl_t           pdhdl;
1766         tavor_mrhdl_t           mrhdl, mrhdl_new;
1767         int                     status;
1768 
1769         TAVOR_TNF_ENTER(tavor_ci_register_shared_mr);
1770 
1771         _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*mr_desc))
1772 
1773         ASSERT(mr_attr != NULL);
1774         ASSERT(mr_p != NULL);
1775         ASSERT(mr_desc != NULL);
1776 
1777         /* Check for valid HCA handle */
1778         if (hca == NULL) {
1779                 TNF_PROBE_0(tavor_ci_register_shared_mr_invhca_fail,
1780                     TAVOR_TNF_ERROR, "");
1781                 TAVOR_TNF_EXIT(tavor_ci_register_shared_mr);
1782                 return (IBT_HCA_HDL_INVALID);
1783         }
1784 
1785         /* Check for valid PD handle pointer */
1786         if (pd == NULL) {
1787                 TNF_PROBE_0(tavor_ci_register_shared_mr_invpdhdl_fail,
1788                     TAVOR_TNF_ERROR, "");
1789                 TAVOR_TNF_EXIT(tavor_ci_register_shared_mr);
1790                 return (IBT_PD_HDL_INVALID);
1791         }
1792 


1807                 TNF_PROBE_0(tavor_ci_register_shared_mr_accflags_inv,
1808                     TAVOR_TNF_ERROR, "");
1809                 TAVOR_TNF_EXIT(tavor_ci_register_shared_mr);
1810                 return (IBT_MR_ACCESS_REQ_INVALID);
1811         }
1812 
1813         /* Grab the Tavor softstate pointer and handles */
1814         state = (tavor_state_t *)hca;
1815         pdhdl = (tavor_pdhdl_t)pd;
1816         mrhdl = (tavor_mrhdl_t)mr;
1817 
1818         /* Register the shared memory region */
1819         status = tavor_mr_register_shared(state, mrhdl, pdhdl, mr_attr,
1820             &mrhdl_new);
1821         if (status != DDI_SUCCESS) {
1822                 TNF_PROBE_1(tavor_ci_register_shared_mr_fail, TAVOR_TNF_ERROR,
1823                     "", tnf_uint, status, status);
1824                 TAVOR_TNF_EXIT(tavor_ci_register_shared_mr);
1825                 return (status);
1826         }
1827         _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*mrhdl_new))
1828 
1829         /* Fill in the mr_desc structure */
1830         mr_desc->md_vaddr = mrhdl_new->mr_bindinfo.bi_addr;
1831         mr_desc->md_lkey  = mrhdl_new->mr_lkey;
1832         /* Only set RKey if remote access was requested */
1833         if ((mr_attr->mr_flags & IBT_MR_ENABLE_REMOTE_ATOMIC) ||
1834             (mr_attr->mr_flags & IBT_MR_ENABLE_REMOTE_WRITE) ||
1835             (mr_attr->mr_flags & IBT_MR_ENABLE_REMOTE_READ)) {
1836                 mr_desc->md_rkey = mrhdl_new->mr_rkey;
1837         }
1838 
1839         /*
1840          * If shared region is mapped for streaming (i.e. noncoherent), then
1841          * set sync is required
1842          */
1843         mr_desc->md_sync_required = (mrhdl_new->mr_bindinfo.bi_flags &
1844             IBT_MR_NONCOHERENT) ? B_TRUE : B_FALSE;
1845 
1846         /* Return the Tavor MR handle */
1847         *mr_p = (ibc_mr_hdl_t)mrhdl_new;


1853 
1854 /*
1855  * tavor_ci_reregister_mr()
1856  *    Modify the attributes of an existing Memory Region
1857  *    Context: Can be called from interrupt or base context.
1858  */
1859 /* ARGSUSED */
1860 static ibt_status_t
1861 tavor_ci_reregister_mr(ibc_hca_hdl_t hca, ibc_mr_hdl_t mr, ibc_pd_hdl_t pd,
1862     ibt_mr_attr_t *mr_attr, void *ibtl_reserved, ibc_mr_hdl_t *mr_new,
1863     ibt_mr_desc_t *mr_desc)
1864 {
1865         tavor_mr_options_t      op;
1866         tavor_state_t           *state;
1867         tavor_pdhdl_t           pdhdl;
1868         tavor_mrhdl_t           mrhdl, mrhdl_new;
1869         int                     status;
1870 
1871         TAVOR_TNF_ENTER(tavor_ci_reregister_mr);
1872 
1873         _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*mr_desc))
1874 
1875         ASSERT(mr_attr != NULL);
1876         ASSERT(mr_new != NULL);
1877         ASSERT(mr_desc != NULL);
1878 
1879         /* Check for valid HCA handle */
1880         if (hca == NULL) {
1881                 TNF_PROBE_0(tavor_ci_reregister_mr_hca_inv, TAVOR_TNF_ERROR,
1882                     "");
1883                 TAVOR_TNF_EXIT(tavor_ci_reregister_mr);
1884                 return (IBT_HCA_HDL_INVALID);
1885         }
1886 
1887         /* Check for valid memory region handle */
1888         if (mr == NULL) {
1889                 TNF_PROBE_0(tavor_ci_reregister_mr_invmrhdl_fail,
1890                     TAVOR_TNF_ERROR, "");
1891                 TAVOR_TNF_EXIT(tavor_ci_reregister_mr);
1892                 return (IBT_MR_HDL_INVALID);
1893         }
1894 
1895         /* Grab the Tavor softstate pointer, mrhdl, and pdhdl */
1896         state = (tavor_state_t *)hca;
1897         mrhdl = (tavor_mrhdl_t)mr;
1898         pdhdl = (tavor_pdhdl_t)pd;
1899 
1900         /* Reregister the memory region */
1901         op.mro_bind_type = state->ts_cfg_profile->cp_iommu_bypass;
1902         status = tavor_mr_reregister(state, mrhdl, pdhdl, mr_attr,
1903             &mrhdl_new, &op);
1904         if (status != DDI_SUCCESS) {
1905                 TNF_PROBE_1(tavor_ci_reregister_mr_fail, TAVOR_TNF_ERROR, "",
1906                     tnf_uint, status, status);
1907                 TAVOR_TNF_EXIT(tavor_ci_reregister_mr);
1908                 return (status);
1909         }
1910         _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*mrhdl_new))
1911 
1912         /* Fill in the mr_desc structure */
1913         mr_desc->md_vaddr = mrhdl_new->mr_bindinfo.bi_addr;
1914         mr_desc->md_lkey  = mrhdl_new->mr_lkey;
1915         /* Only set RKey if remote access was requested */
1916         if ((mr_attr->mr_flags & IBT_MR_ENABLE_REMOTE_ATOMIC) ||
1917             (mr_attr->mr_flags & IBT_MR_ENABLE_REMOTE_WRITE) ||
1918             (mr_attr->mr_flags & IBT_MR_ENABLE_REMOTE_READ)) {
1919                 mr_desc->md_rkey = mrhdl_new->mr_rkey;
1920         }
1921 
1922         /*
1923          * If region is mapped for streaming (i.e. noncoherent), then set
1924          * sync is required
1925          */
1926         mr_desc->md_sync_required = (mrhdl_new->mr_bindinfo.bi_flags &
1927             IBT_MR_NONCOHERENT) ? B_TRUE : B_FALSE;
1928 
1929         /* Return the Tavor MR handle */
1930         *mr_new = (ibc_mr_hdl_t)mrhdl_new;


1937 /*
1938  * tavor_ci_reregister_buf()
1939  *    Modify the attributes of an existing Memory Region
1940  *    Context: Can be called from interrupt or base context.
1941  */
1942 /* ARGSUSED */
1943 static ibt_status_t
1944 tavor_ci_reregister_buf(ibc_hca_hdl_t hca, ibc_mr_hdl_t mr, ibc_pd_hdl_t pd,
1945     ibt_smr_attr_t *attrp, struct buf *buf, void *ibtl_reserved,
1946     ibc_mr_hdl_t *mr_new, ibt_mr_desc_t *mr_desc)
1947 {
1948         tavor_mr_options_t      op;
1949         tavor_state_t           *state;
1950         tavor_pdhdl_t           pdhdl;
1951         tavor_mrhdl_t           mrhdl, mrhdl_new;
1952         int                     status;
1953         ibt_mr_flags_t          flags = attrp->mr_flags;
1954 
1955         TAVOR_TNF_ENTER(tavor_ci_reregister_buf);
1956 
1957         _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*mr_desc))
1958 
1959         ASSERT(mr_new != NULL);
1960         ASSERT(mr_desc != NULL);
1961 
1962         /* Check for valid HCA handle */
1963         if (hca == NULL) {
1964                 TNF_PROBE_0(tavor_ci_reregister_buf_hca_inv, TAVOR_TNF_ERROR,
1965                     "");
1966                 TAVOR_TNF_EXIT(tavor_ci_reregister_buf);
1967                 return (IBT_HCA_HDL_INVALID);
1968         }
1969 
1970         /* Check for valid memory region handle */
1971         if (mr == NULL) {
1972                 TNF_PROBE_0(tavor_ci_reregister_buf_invmrhdl_fail,
1973                     TAVOR_TNF_ERROR, "");
1974                 TAVOR_TNF_EXIT(tavor_ci_reregister_buf);
1975                 return (IBT_MR_HDL_INVALID);
1976         }
1977 
1978         /* Grab the Tavor softstate pointer, mrhdl, and pdhdl */
1979         state = (tavor_state_t *)hca;
1980         mrhdl = (tavor_mrhdl_t)mr;
1981         pdhdl = (tavor_pdhdl_t)pd;
1982 
1983         /* Reregister the memory region */
1984         op.mro_bind_type = state->ts_cfg_profile->cp_iommu_bypass;
1985         status = tavor_mr_reregister_buf(state, mrhdl, pdhdl, attrp, buf,
1986             &mrhdl_new, &op);
1987         if (status != DDI_SUCCESS) {
1988                 TNF_PROBE_1(tavor_ci_reregister_buf_fail, TAVOR_TNF_ERROR, "",
1989                     tnf_uint, status, status);
1990                 TAVOR_TNF_EXIT(tavor_ci_reregister_buf);
1991                 return (status);
1992         }
1993         _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*mrhdl_new))
1994 
1995         /* Fill in the mr_desc structure */
1996         mr_desc->md_vaddr = mrhdl_new->mr_bindinfo.bi_addr;
1997         mr_desc->md_lkey  = mrhdl_new->mr_lkey;
1998         /* Only set RKey if remote access was requested */
1999         if ((flags & IBT_MR_ENABLE_REMOTE_ATOMIC) ||
2000             (flags & IBT_MR_ENABLE_REMOTE_WRITE) ||
2001             (flags & IBT_MR_ENABLE_REMOTE_READ)) {
2002                 mr_desc->md_rkey = mrhdl_new->mr_rkey;
2003         }
2004 
2005         /*
2006          * If region is mapped for streaming (i.e. noncoherent), then set
2007          * sync is required
2008          */
2009         mr_desc->md_sync_required = (mrhdl_new->mr_bindinfo.bi_flags &
2010             IBT_MR_NONCOHERENT) ? B_TRUE : B_FALSE;
2011 
2012         /* Return the Tavor MR handle */
2013         *mr_new = (ibc_mr_hdl_t)mrhdl_new;


2086         /* Check for valid PD handle pointer */
2087         if (pd == NULL) {
2088                 TNF_PROBE_0(tavor_ci_alloc_mw_invpdhdl_fail,
2089                     TAVOR_TNF_ERROR, "");
2090                 TAVOR_TNF_EXIT(tavor_ci_alloc_mw);
2091                 return (IBT_PD_HDL_INVALID);
2092         }
2093 
2094         /* Grab the Tavor softstate pointer and PD handle */
2095         state = (tavor_state_t *)hca;
2096         pdhdl = (tavor_pdhdl_t)pd;
2097 
2098         /* Allocate the memory window */
2099         status = tavor_mw_alloc(state, pdhdl, flags, &mwhdl);
2100         if (status != DDI_SUCCESS) {
2101                 TNF_PROBE_1(tavor_ci_alloc_mw_fail, TAVOR_TNF_ERROR, "",
2102                     tnf_uint, status, status);
2103                 TAVOR_TNF_EXIT(tavor_ci_alloc_mw);
2104                 return (status);
2105         }
2106         _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*mwhdl))
2107 
2108         /* Return the MW handle and RKey */
2109         *mw_p = (ibc_mw_hdl_t)mwhdl;
2110         *rkey_p = mwhdl->mr_rkey;
2111 
2112         TAVOR_TNF_EXIT(tavor_ci_alloc_mw);
2113         return (IBT_SUCCESS);
2114 }
2115 
2116 
2117 /*
2118  * tavor_ci_free_mw()
2119  *    Free a Memory Window
2120  *    Context: Can be called from interrupt or base context.
2121  */
2122 static ibt_status_t
2123 tavor_ci_free_mw(ibc_hca_hdl_t hca, ibc_mw_hdl_t mw)
2124 {
2125         tavor_state_t           *state;
2126         tavor_mwhdl_t           mwhdl;


2199         mw_attr_p->mw_pd   = (ibc_pd_hdl_t)mwhdl->mr_pdhdl;
2200         mw_attr_p->mw_rkey = mwhdl->mr_rkey;
2201         mutex_exit(&mwhdl->mr_lock);
2202 
2203         TAVOR_TNF_EXIT(tavor_ci_query_mw);
2204         return (IBT_SUCCESS);
2205 }
2206 
2207 
2208 /* ARGSUSED */
2209 static ibt_status_t
2210 tavor_ci_register_dma_mr(ibc_hca_hdl_t hca, ibc_pd_hdl_t pd,
2211     ibt_dmr_attr_t *mr_attr, void *ibtl_reserved, ibc_mr_hdl_t *mr_p,
2212     ibt_mr_desc_t *mr_desc)
2213 {
2214         tavor_state_t           *state;
2215         tavor_pdhdl_t           pdhdl;
2216         tavor_mrhdl_t           mrhdl;
2217         int                     status;
2218 
2219         _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*mr_desc))
2220 
2221         ASSERT(mr_attr != NULL);
2222         ASSERT(mr_p != NULL);
2223         ASSERT(mr_desc != NULL);
2224 
2225         /* Check for valid HCA handle */
2226         if (hca == NULL) {
2227                 return (IBT_HCA_HDL_INVALID);
2228         }
2229 
2230         /* Check for valid PD handle pointer */
2231         if (pd == NULL) {
2232                 return (IBT_PD_HDL_INVALID);
2233         }
2234 
2235         /*
2236          * Validate the access flags.  Both Remote Write and Remote Atomic
2237          * require the Local Write flag to be set
2238          */
2239         if (((mr_attr->dmr_flags & IBT_MR_ENABLE_REMOTE_WRITE) ||
2240             (mr_attr->dmr_flags & IBT_MR_ENABLE_REMOTE_ATOMIC)) &&
2241             !(mr_attr->dmr_flags & IBT_MR_ENABLE_LOCAL_WRITE)) {
2242                 return (IBT_MR_ACCESS_REQ_INVALID);
2243         }
2244 
2245         /* Grab the Tavor softstate pointer and PD handle */
2246         state = (tavor_state_t *)hca;
2247         pdhdl = (tavor_pdhdl_t)pd;
2248 
2249         status = tavor_dma_mr_register(state, pdhdl, mr_attr, &mrhdl);
2250         if (status != DDI_SUCCESS) {
2251                 return (status);
2252         }
2253         _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*mrhdl))
2254 
2255         /* Fill in the mr_desc structure */
2256         mr_desc->md_vaddr = mr_attr->dmr_paddr;
2257         mr_desc->md_lkey  = mrhdl->mr_lkey;
2258         /* Only set RKey if remote access was requested */
2259         if ((mr_attr->dmr_flags & IBT_MR_ENABLE_REMOTE_ATOMIC) ||
2260             (mr_attr->dmr_flags & IBT_MR_ENABLE_REMOTE_WRITE) ||
2261             (mr_attr->dmr_flags & IBT_MR_ENABLE_REMOTE_READ)) {
2262                 mr_desc->md_rkey = mrhdl->mr_rkey;
2263         }
2264 
2265         /*
2266          * If region is mapped for streaming (i.e. noncoherent), then set
2267          * sync is required
2268          */
2269         mr_desc->md_sync_required = B_FALSE;
2270 
2271         /* Return the Hermon MR handle */
2272         *mr_p = (ibc_mr_hdl_t)mrhdl;
2273 


3056 {
3057         return (IBT_NOT_SUPPORTED);
3058 }
3059 
3060 /*
3061  * tavor_ci_unmap_mem_area()
3062  * Unmap the memory area
3063  *    Context: Can be called from interrupt or base context.
3064  */
3065 /* ARGSUSED */
3066 static ibt_status_t
3067 tavor_ci_unmap_mem_area(ibc_hca_hdl_t hca, ibc_ma_hdl_t ma_hdl)
3068 {
3069         return (IBT_NOT_SUPPORTED);
3070 }
3071 
3072 struct ibc_mi_s {
3073         int                     imh_len;
3074         ddi_dma_handle_t        imh_dmahandle[1];
3075 };
3076 _NOTE(SCHEME_PROTECTS_DATA("safe sharing",
3077     ibc_mi_s::imh_len
3078     ibc_mi_s::imh_dmahandle))
3079 
3080 
3081 /*
3082  * tavor_ci_map_mem_iov()
3083  * Map the memory
3084  *    Context: Can be called from interrupt or base context.
3085  */
3086 /* ARGSUSED */
3087 static ibt_status_t
3088 tavor_ci_map_mem_iov(ibc_hca_hdl_t hca, ibt_iov_attr_t *iov_attr,
3089     ibt_all_wr_t *wr, ibc_mi_hdl_t *mi_hdl_p)
3090 {
3091         int                     status;
3092         int                     i, j, nds, max_nds;
3093         uint_t                  len;
3094         ibt_status_t            ibt_status;
3095         ddi_dma_handle_t        dmahdl;
3096         ddi_dma_cookie_t        dmacookie;
3097         ddi_dma_attr_t          dma_attr;
3098         uint_t                  cookie_cnt;
3099         ibc_mi_hdl_t            mi_hdl;
3100         ibt_lkey_t              rsvd_lkey;
3101         ibt_wr_ds_t             *sgl;
3102         tavor_state_t           *state;
3103         int                     kmflag;
3104         int                     (*callback)(caddr_t);
3105 
3106         _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*wr))
3107 
3108         if (mi_hdl_p == NULL)
3109                 return (IBT_MI_HDL_INVALID);
3110 
3111         /* Check for valid HCA handle */
3112         if (hca == NULL)
3113                 return (IBT_HCA_HDL_INVALID);
3114 
3115         /* Tavor does not allow the default "use reserved lkey" */
3116         if ((iov_attr->iov_flags & IBT_IOV_ALT_LKEY) == 0)
3117                 return (IBT_INVALID_PARAM);
3118 
3119         rsvd_lkey = iov_attr->iov_alt_lkey;
3120 
3121         state = (tavor_state_t *)hca;
3122         tavor_dma_attr_init(&dma_attr);
3123 #ifdef  __sparc
3124         if (state->ts_cfg_profile->cp_iommu_bypass == TAVOR_BINDMEM_BYPASS)
3125                 dma_attr.dma_attr_flags = DDI_DMA_FORCE_PHYSICAL;
3126 #endif
3127 
3128         nds = 0;
3129         max_nds = iov_attr->iov_wr_nds;
3130         if (iov_attr->iov_lso_hdr_sz)
3131                 max_nds -= (iov_attr->iov_lso_hdr_sz + sizeof (uint32_t) +
3132                     0xf) >> 4;    /* 0xf is for rounding up to a multiple of 16 */
3133         if ((iov_attr->iov_flags & IBT_IOV_NOSLEEP) == 0) {
3134                 kmflag = KM_SLEEP;
3135                 callback = DDI_DMA_SLEEP;
3136         } else {
3137                 kmflag = KM_NOSLEEP;
3138                 callback = DDI_DMA_DONTWAIT;
3139         }
3140 
3141         if (iov_attr->iov_flags & IBT_IOV_BUF) {
3142                 mi_hdl = kmem_alloc(sizeof (*mi_hdl), kmflag);
3143                 if (mi_hdl == NULL)
3144                         return (IBT_INSUFF_RESOURCE);
3145                 sgl = wr->send.wr_sgl;
3146                 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*sgl))
3147 
3148                 status = ddi_dma_alloc_handle(state->ts_dip, &dma_attr,
3149                     callback, NULL, &dmahdl);
3150                 if (status != DDI_SUCCESS) {
3151                         kmem_free(mi_hdl, sizeof (*mi_hdl));
3152                         return (IBT_INSUFF_RESOURCE);
3153                 }
3154                 status = ddi_dma_buf_bind_handle(dmahdl, iov_attr->iov_buf,
3155                     DDI_DMA_RDWR | DDI_DMA_CONSISTENT, callback, NULL,
3156                     &dmacookie, &cookie_cnt);
3157                 if (status != DDI_DMA_MAPPED) {
3158                         ddi_dma_free_handle(&dmahdl);
3159                         kmem_free(mi_hdl, sizeof (*mi_hdl));
3160                         return (ibc_get_ci_failure(0));
3161                 }
3162                 while (cookie_cnt-- > 0) {
3163                         if (nds > max_nds) {
3164                                 status = ddi_dma_unbind_handle(dmahdl);
3165                                 ddi_dma_free_handle(&dmahdl);
3166                                 return (IBT_SGL_TOO_SMALL);
3167                         }
3168                         sgl[nds].ds_va = dmacookie.dmac_laddress;
3169                         sgl[nds].ds_key = rsvd_lkey;
3170                         sgl[nds].ds_len = (ib_msglen_t)dmacookie.dmac_size;
3171                         nds++;
3172                         if (cookie_cnt != 0)
3173                                 ddi_dma_nextcookie(dmahdl, &dmacookie);
3174                 }
3175                 wr->send.wr_nds = nds;
3176                 mi_hdl->imh_len = 1;
3177                 mi_hdl->imh_dmahandle[0] = dmahdl;
3178                 *mi_hdl_p = mi_hdl;
3179                 return (IBT_SUCCESS);
3180         }
3181 
3182         if (iov_attr->iov_flags & IBT_IOV_RECV)
3183                 sgl = wr->recv.wr_sgl;
3184         else
3185                 sgl = wr->send.wr_sgl;
3186         _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*sgl))
3187 
3188         len = iov_attr->iov_list_len;
3189         for (i = 0, j = 0; j < len; j++) {
3190                 if (iov_attr->iov[j].iov_len == 0)
3191                         continue;
3192                 i++;
3193         }
3194         mi_hdl = kmem_alloc(sizeof (*mi_hdl) +
3195             (i - 1) * sizeof (ddi_dma_handle_t), kmflag);
3196         if (mi_hdl == NULL)
3197                 return (IBT_INSUFF_RESOURCE);
3198         mi_hdl->imh_len = i;
3199         for (i = 0, j = 0; j < len; j++) {
3200                 if (iov_attr->iov[j].iov_len == 0)
3201                         continue;
3202                 status = ddi_dma_alloc_handle(state->ts_dip, &dma_attr,
3203                     callback, NULL, &dmahdl);
3204                 if (status != DDI_SUCCESS) {
3205                         ibt_status = IBT_INSUFF_RESOURCE;
3206                         goto fail2;


3451 
3452         /* Check for valid HCA handle */
3453         if (hca == NULL) {
3454                 TNF_PROBE_0(tavor_ci_free_io_mem_invhca_fail,
3455                     TAVOR_TNF_ERROR, "");
3456                 TAVOR_TNF_EXIT(tavor_ci_free_io_mem);
3457                 return (IBT_HCA_HDL_INVALID);
3458         }
3459 
3460         /* Check for valid mem_alloc_hdl handle pointer */
3461         if (mem_alloc_hdl == NULL) {
3462                 TNF_PROBE_0(tavor_ci_free_io_mem_hdl_fail,
3463                     TAVOR_TNF_ERROR, "");
3464                 TAVOR_TNF_EXIT(tavor_ci_free_io_mem);
3465                 return (IBT_MEM_ALLOC_HDL_INVALID);
3466         }
3467 
3468         memhdl = (tavor_mem_alloc_hdl_t)mem_alloc_hdl;
3469 
3470         /* free the memory */
3471         _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*memhdl))
3472         ddi_dma_mem_free(&memhdl->tavor_acc_hdl);
3473         ddi_dma_free_handle(&memhdl->tavor_dma_hdl);
3474 
3475         kmem_free(memhdl, sizeof (*memhdl));
3476         TAVOR_TNF_EXIT(tavor_dma_free);
3477         return (IBT_SUCCESS);
3478 }
3479 
3480 
3481 int
3482 tavor_mem_alloc(
3483         tavor_state_t *state,
3484         size_t size,
3485         ibt_mr_flags_t flags,
3486         caddr_t *kaddrp,
3487         tavor_mem_alloc_hdl_t *mem_hdl)
3488 {
3489         ddi_dma_handle_t        dma_hdl;
3490         ddi_dma_attr_t          dma_attr;
3491         ddi_acc_handle_t        acc_hdl;


3513             &state->ts_reg_accattr, DDI_DMA_CONSISTENT, ddi_cb,
3514             NULL,
3515             kaddrp, &real_len, &acc_hdl);
3516         if (status != DDI_SUCCESS) {
3517                 ddi_dma_free_handle(&dma_hdl);
3518                 TNF_PROBE_0(tavor_dma_alloc_memory_fail, TAVOR_TNF_ERROR, "");
3519                 TAVOR_TNF_EXIT(tavor_mem_alloc);
3520                 return (DDI_FAILURE);
3521         }
3522 
3523         /* Package the tavor_dma_info contents and return */
3524         *mem_hdl = kmem_alloc(sizeof (**mem_hdl),
3525             flags & IBT_MR_NOSLEEP ? KM_NOSLEEP : KM_SLEEP);
3526         if (*mem_hdl == NULL) {
3527                 ddi_dma_mem_free(&acc_hdl);
3528                 ddi_dma_free_handle(&dma_hdl);
3529                 TNF_PROBE_0(tavor_dma_alloc_memory_fail, TAVOR_TNF_ERROR, "");
3530                 TAVOR_TNF_EXIT(tavor_mem_alloc);
3531                 return (DDI_FAILURE);
3532         }
3533         _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(**mem_hdl))
3534         (*mem_hdl)->tavor_dma_hdl = dma_hdl;
3535         (*mem_hdl)->tavor_acc_hdl = acc_hdl;
3536 
3537         TAVOR_TNF_EXIT(tavor_mem_alloc);
3538         return (DDI_SUCCESS);
3539 }


 811 }
 812 
 813 
 814 /*
 815  * tavor_ci_alloc_qp()
 816  *    Allocate a Queue Pair
 817  *    Context: Can be called only from user or kernel context.
 818  */
 819 static ibt_status_t
 820 tavor_ci_alloc_qp(ibc_hca_hdl_t hca, ibtl_qp_hdl_t ibt_qphdl,
 821     ibt_qp_type_t type, ibt_qp_alloc_attr_t *attr_p,
 822     ibt_chan_sizes_t *queue_sizes_p, ib_qpn_t *qpn, ibc_qp_hdl_t *qp_p)
 823 {
 824         tavor_state_t           *state;
 825         tavor_qp_info_t         qpinfo;
 826         tavor_qp_options_t      op;
 827         int                     status;
 828 
 829         TAVOR_TNF_ENTER(tavor_ci_alloc_qp);
 830 



 831         /* Check for valid HCA handle */
 832         if (hca == NULL) {
 833                 TNF_PROBE_0(tavor_ci_alloc_qp_invhca_fail,
 834                     TAVOR_TNF_ERROR, "");
 835                 TAVOR_TNF_EXIT(tavor_ci_alloc_qp);
 836                 return (IBT_HCA_HDL_INVALID);
 837         }
 838 
 839         /* Grab the Tavor softstate pointer */
 840         state = (tavor_state_t *)hca;
 841 
 842         /* Allocate the QP */
 843         qpinfo.qpi_attrp        = attr_p;
 844         qpinfo.qpi_type         = type;
 845         qpinfo.qpi_ibt_qphdl    = ibt_qphdl;
 846         qpinfo.qpi_queueszp     = queue_sizes_p;
 847         qpinfo.qpi_qpn          = qpn;
 848         op.qpo_wq_loc           = state->ts_cfg_profile->cp_qp_wq_inddr;
 849         status = tavor_qp_alloc(state, &qpinfo, TAVOR_NOSLEEP, &op);
 850         if (status != DDI_SUCCESS) {


 863 
 864 
 865 /*
 866  * tavor_ci_alloc_special_qp()
 867  *    Allocate a Special Queue Pair
 868  *    Context: Can be called only from user or kernel context.
 869  */
 870 static ibt_status_t
 871 tavor_ci_alloc_special_qp(ibc_hca_hdl_t hca, uint8_t port,
 872     ibtl_qp_hdl_t ibt_qphdl, ibt_sqp_type_t type,
 873     ibt_qp_alloc_attr_t *attr_p, ibt_chan_sizes_t *queue_sizes_p,
 874     ibc_qp_hdl_t *qp_p)
 875 {
 876         tavor_state_t           *state;
 877         tavor_qp_info_t         qpinfo;
 878         tavor_qp_options_t      op;
 879         int                     status;
 880 
 881         TAVOR_TNF_ENTER(tavor_ci_alloc_special_qp);
 882 



 883         /* Check for valid HCA handle */
 884         if (hca == NULL) {
 885                 TNF_PROBE_0(tavor_ci_alloc_special_qp_invhca_fail,
 886                     TAVOR_TNF_ERROR, "");
 887                 TAVOR_TNF_EXIT(tavor_ci_alloc_special_qp);
 888                 return (IBT_HCA_HDL_INVALID);
 889         }
 890 
 891         /* Grab the Tavor softstate pointer */
 892         state = (tavor_state_t *)hca;
 893 
 894         /* Allocate the Special QP */
 895         qpinfo.qpi_attrp        = attr_p;
 896         qpinfo.qpi_type         = type;
 897         qpinfo.qpi_port         = port;
 898         qpinfo.qpi_ibt_qphdl    = ibt_qphdl;
 899         qpinfo.qpi_queueszp     = queue_sizes_p;
 900         op.qpo_wq_loc           = state->ts_cfg_profile->cp_qp_wq_inddr;
 901         status = tavor_special_qp_alloc(state, &qpinfo, TAVOR_NOSLEEP, &op);
 902         if (status != DDI_SUCCESS) {


1452 
1453 /*
1454  * tavor_ci_register_mr()
1455  *    Prepare a virtually addressed Memory Region for use by an HCA
1456  *    Context: Can be called from interrupt or base context.
1457  */
1458 /* ARGSUSED */
1459 static ibt_status_t
1460 tavor_ci_register_mr(ibc_hca_hdl_t hca, ibc_pd_hdl_t pd,
1461     ibt_mr_attr_t *mr_attr, void *ibtl_reserved, ibc_mr_hdl_t *mr_p,
1462     ibt_mr_desc_t *mr_desc)
1463 {
1464         tavor_mr_options_t      op;
1465         tavor_state_t           *state;
1466         tavor_pdhdl_t           pdhdl;
1467         tavor_mrhdl_t           mrhdl;
1468         int                     status;
1469 
1470         TAVOR_TNF_ENTER(tavor_ci_register_mr);
1471 


1472         ASSERT(mr_attr != NULL);
1473         ASSERT(mr_p != NULL);
1474         ASSERT(mr_desc != NULL);
1475 
1476         /* Check for valid HCA handle */
1477         if (hca == NULL) {
1478                 TNF_PROBE_0(tavor_ci_register_mr_invhca_fail,
1479                     TAVOR_TNF_ERROR, "");
1480                 TAVOR_TNF_EXIT(tavor_ci_register_mr);
1481                 return (IBT_HCA_HDL_INVALID);
1482         }
1483 
1484         /* Check for valid PD handle pointer */
1485         if (pd == NULL) {
1486                 TNF_PROBE_0(tavor_ci_register_mr_invpdhdl_fail,
1487                     TAVOR_TNF_ERROR, "");
1488                 TAVOR_TNF_EXIT(tavor_ci_register_mr);
1489                 return (IBT_PD_HDL_INVALID);
1490         }
1491 


1500                     TAVOR_TNF_ERROR, "");
1501                 TAVOR_TNF_EXIT(tavor_ci_register_mr);
1502                 return (IBT_MR_ACCESS_REQ_INVALID);
1503         }
1504 
1505         /* Grab the Tavor softstate pointer and PD handle */
1506         state = (tavor_state_t *)hca;
1507         pdhdl = (tavor_pdhdl_t)pd;
1508 
1509         /* Register the memory region */
1510         op.mro_bind_type   = state->ts_cfg_profile->cp_iommu_bypass;
1511         op.mro_bind_dmahdl = NULL;
1512         op.mro_bind_override_addr = 0;
1513         status = tavor_mr_register(state, pdhdl, mr_attr, &mrhdl, &op);
1514         if (status != DDI_SUCCESS) {
1515                 TNF_PROBE_1(tavor_ci_register_mr_fail, TAVOR_TNF_ERROR, "",
1516                     tnf_uint, status, status);
1517                 TAVOR_TNF_EXIT(tavor_ci_register_mr);
1518                 return (status);
1519         }

1520 
1521         /* Fill in the mr_desc structure */
1522         mr_desc->md_vaddr = mrhdl->mr_bindinfo.bi_addr;
1523         mr_desc->md_lkey  = mrhdl->mr_lkey;
1524         /* Only set RKey if remote access was requested */
1525         if ((mr_attr->mr_flags & IBT_MR_ENABLE_REMOTE_ATOMIC) ||
1526             (mr_attr->mr_flags & IBT_MR_ENABLE_REMOTE_WRITE) ||
1527             (mr_attr->mr_flags & IBT_MR_ENABLE_REMOTE_READ)) {
1528                 mr_desc->md_rkey = mrhdl->mr_rkey;
1529         }
1530 
1531         /*
1532          * If region is mapped for streaming (i.e. noncoherent), then set
1533          * sync is required
1534          */
1535         mr_desc->md_sync_required = (mrhdl->mr_bindinfo.bi_flags &
1536             IBT_MR_NONCOHERENT) ? B_TRUE : B_FALSE;
1537 
1538         /* Return the Tavor MR handle */
1539         *mr_p = (ibc_mr_hdl_t)mrhdl;


1546 /*
1547  * tavor_ci_register_buf()
1548  *    Prepare a Memory Region specified by buf structure for use by an HCA
1549  *    Context: Can be called from interrupt or base context.
1550  */
1551 /* ARGSUSED */
1552 static ibt_status_t
1553 tavor_ci_register_buf(ibc_hca_hdl_t hca, ibc_pd_hdl_t pd,
1554     ibt_smr_attr_t *attrp, struct buf *buf, void *ibtl_reserved,
1555     ibt_mr_hdl_t *mr_p, ibt_mr_desc_t *mr_desc)
1556 {
1557         tavor_mr_options_t      op;
1558         tavor_state_t           *state;
1559         tavor_pdhdl_t           pdhdl;
1560         tavor_mrhdl_t           mrhdl;
1561         int                     status;
1562         ibt_mr_flags_t          flags = attrp->mr_flags;
1563 
1564         TAVOR_TNF_ENTER(tavor_ci_register_buf);
1565 


1566         ASSERT(mr_p != NULL);
1567         ASSERT(mr_desc != NULL);
1568 
1569         /* Check for valid HCA handle */
1570         if (hca == NULL) {
1571                 TNF_PROBE_0(tavor_ci_register_buf_invhca_fail,
1572                     TAVOR_TNF_ERROR, "");
1573                 TAVOR_TNF_EXIT(tavor_ci_register_buf);
1574                 return (IBT_HCA_HDL_INVALID);
1575         }
1576 
1577         /* Check for valid PD handle pointer */
1578         if (pd == NULL) {
1579                 TNF_PROBE_0(tavor_ci_register_buf_invpdhdl_fail,
1580                     TAVOR_TNF_ERROR, "");
1581                 TAVOR_TNF_EXIT(tavor_ci_register_buf);
1582                 return (IBT_PD_HDL_INVALID);
1583         }
1584 
1585         /*


1593                     TAVOR_TNF_ERROR, "");
1594                 TAVOR_TNF_EXIT(tavor_ci_register_buf);
1595                 return (IBT_MR_ACCESS_REQ_INVALID);
1596         }
1597 
1598         /* Grab the Tavor softstate pointer and PD handle */
1599         state = (tavor_state_t *)hca;
1600         pdhdl = (tavor_pdhdl_t)pd;
1601 
1602         /* Register the memory region */
1603         op.mro_bind_type   = state->ts_cfg_profile->cp_iommu_bypass;
1604         op.mro_bind_dmahdl = NULL;
1605         op.mro_bind_override_addr = 0;
1606         status = tavor_mr_register_buf(state, pdhdl, attrp, buf, &mrhdl, &op);
1607         if (status != DDI_SUCCESS) {
1608                 TNF_PROBE_1(tavor_ci_register_mr_fail, TAVOR_TNF_ERROR, "",
1609                     tnf_uint, status, status);
1610                 TAVOR_TNF_EXIT(tavor_ci_register_mr);
1611                 return (status);
1612         }

1613 
1614         /* Fill in the mr_desc structure */
1615         mr_desc->md_vaddr = mrhdl->mr_bindinfo.bi_addr;
1616         mr_desc->md_lkey  = mrhdl->mr_lkey;
1617         /* Only set RKey if remote access was requested */
1618         if ((flags & IBT_MR_ENABLE_REMOTE_ATOMIC) ||
1619             (flags & IBT_MR_ENABLE_REMOTE_WRITE) ||
1620             (flags & IBT_MR_ENABLE_REMOTE_READ)) {
1621                 mr_desc->md_rkey = mrhdl->mr_rkey;
1622         }
1623 
1624         /*
1625          * If region is mapped for streaming (i.e. noncoherent), then set
1626          * sync is required
1627          */
1628         mr_desc->md_sync_required = (mrhdl->mr_bindinfo.bi_flags &
1629             IBT_MR_NONCOHERENT) ? B_TRUE : B_FALSE;
1630 
1631         /* Return the Tavor MR handle */
1632         *mr_p = (ibc_mr_hdl_t)mrhdl;


1739 
1740 
1741 /*
1742  * tavor_ci_register_shared_mr()
1743  *    Create a shared memory region matching an existing Memory Region
1744  *    Context: Can be called from interrupt or base context.
1745  */
1746 /* ARGSUSED */
1747 static ibt_status_t
1748 tavor_ci_register_shared_mr(ibc_hca_hdl_t hca, ibc_mr_hdl_t mr,
1749     ibc_pd_hdl_t pd, ibt_smr_attr_t *mr_attr, void *ibtl_reserved,
1750     ibc_mr_hdl_t *mr_p, ibt_mr_desc_t *mr_desc)
1751 {
1752         tavor_state_t           *state;
1753         tavor_pdhdl_t           pdhdl;
1754         tavor_mrhdl_t           mrhdl, mrhdl_new;
1755         int                     status;
1756 
1757         TAVOR_TNF_ENTER(tavor_ci_register_shared_mr);
1758 


1759         ASSERT(mr_attr != NULL);
1760         ASSERT(mr_p != NULL);
1761         ASSERT(mr_desc != NULL);
1762 
1763         /* Check for valid HCA handle */
1764         if (hca == NULL) {
1765                 TNF_PROBE_0(tavor_ci_register_shared_mr_invhca_fail,
1766                     TAVOR_TNF_ERROR, "");
1767                 TAVOR_TNF_EXIT(tavor_ci_register_shared_mr);
1768                 return (IBT_HCA_HDL_INVALID);
1769         }
1770 
1771         /* Check for valid PD handle pointer */
1772         if (pd == NULL) {
1773                 TNF_PROBE_0(tavor_ci_register_shared_mr_invpdhdl_fail,
1774                     TAVOR_TNF_ERROR, "");
1775                 TAVOR_TNF_EXIT(tavor_ci_register_shared_mr);
1776                 return (IBT_PD_HDL_INVALID);
1777         }
1778 


1793                 TNF_PROBE_0(tavor_ci_register_shared_mr_accflags_inv,
1794                     TAVOR_TNF_ERROR, "");
1795                 TAVOR_TNF_EXIT(tavor_ci_register_shared_mr);
1796                 return (IBT_MR_ACCESS_REQ_INVALID);
1797         }
1798 
1799         /* Grab the Tavor softstate pointer and handles */
1800         state = (tavor_state_t *)hca;
1801         pdhdl = (tavor_pdhdl_t)pd;
1802         mrhdl = (tavor_mrhdl_t)mr;
1803 
1804         /* Register the shared memory region */
1805         status = tavor_mr_register_shared(state, mrhdl, pdhdl, mr_attr,
1806             &mrhdl_new);
1807         if (status != DDI_SUCCESS) {
1808                 TNF_PROBE_1(tavor_ci_register_shared_mr_fail, TAVOR_TNF_ERROR,
1809                     "", tnf_uint, status, status);
1810                 TAVOR_TNF_EXIT(tavor_ci_register_shared_mr);
1811                 return (status);
1812         }

1813 
1814         /* Fill in the mr_desc structure */
1815         mr_desc->md_vaddr = mrhdl_new->mr_bindinfo.bi_addr;
1816         mr_desc->md_lkey  = mrhdl_new->mr_lkey;
1817         /* Only set RKey if remote access was requested */
1818         if ((mr_attr->mr_flags & IBT_MR_ENABLE_REMOTE_ATOMIC) ||
1819             (mr_attr->mr_flags & IBT_MR_ENABLE_REMOTE_WRITE) ||
1820             (mr_attr->mr_flags & IBT_MR_ENABLE_REMOTE_READ)) {
1821                 mr_desc->md_rkey = mrhdl_new->mr_rkey;
1822         }
1823 
1824         /*
1825          * If shared region is mapped for streaming (i.e. noncoherent), then
1826          * set sync is required
1827          */
1828         mr_desc->md_sync_required = (mrhdl_new->mr_bindinfo.bi_flags &
1829             IBT_MR_NONCOHERENT) ? B_TRUE : B_FALSE;
1830 
1831         /* Return the Tavor MR handle */
1832         *mr_p = (ibc_mr_hdl_t)mrhdl_new;


1838 
1839 /*
1840  * tavor_ci_reregister_mr()
1841  *    Modify the attributes of an existing Memory Region
1842  *    Context: Can be called from interrupt or base context.
1843  */
1844 /* ARGSUSED */
1845 static ibt_status_t
1846 tavor_ci_reregister_mr(ibc_hca_hdl_t hca, ibc_mr_hdl_t mr, ibc_pd_hdl_t pd,
1847     ibt_mr_attr_t *mr_attr, void *ibtl_reserved, ibc_mr_hdl_t *mr_new,
1848     ibt_mr_desc_t *mr_desc)
1849 {
1850         tavor_mr_options_t      op;
1851         tavor_state_t           *state;
1852         tavor_pdhdl_t           pdhdl;
1853         tavor_mrhdl_t           mrhdl, mrhdl_new;
1854         int                     status;
1855 
1856         TAVOR_TNF_ENTER(tavor_ci_reregister_mr);
1857 


1858         ASSERT(mr_attr != NULL);
1859         ASSERT(mr_new != NULL);
1860         ASSERT(mr_desc != NULL);
1861 
1862         /* Check for valid HCA handle */
1863         if (hca == NULL) {
1864                 TNF_PROBE_0(tavor_ci_reregister_mr_hca_inv, TAVOR_TNF_ERROR,
1865                     "");
1866                 TAVOR_TNF_EXIT(tavor_ci_reregister_mr);
1867                 return (IBT_HCA_HDL_INVALID);
1868         }
1869 
1870         /* Check for valid memory region handle */
1871         if (mr == NULL) {
1872                 TNF_PROBE_0(tavor_ci_reregister_mr_invmrhdl_fail,
1873                     TAVOR_TNF_ERROR, "");
1874                 TAVOR_TNF_EXIT(tavor_ci_reregister_mr);
1875                 return (IBT_MR_HDL_INVALID);
1876         }
1877 
1878         /* Grab the Tavor softstate pointer, mrhdl, and pdhdl */
1879         state = (tavor_state_t *)hca;
1880         mrhdl = (tavor_mrhdl_t)mr;
1881         pdhdl = (tavor_pdhdl_t)pd;
1882 
1883         /* Reregister the memory region */
1884         op.mro_bind_type = state->ts_cfg_profile->cp_iommu_bypass;
1885         status = tavor_mr_reregister(state, mrhdl, pdhdl, mr_attr,
1886             &mrhdl_new, &op);
1887         if (status != DDI_SUCCESS) {
1888                 TNF_PROBE_1(tavor_ci_reregister_mr_fail, TAVOR_TNF_ERROR, "",
1889                     tnf_uint, status, status);
1890                 TAVOR_TNF_EXIT(tavor_ci_reregister_mr);
1891                 return (status);
1892         }

1893 
1894         /* Fill in the mr_desc structure */
1895         mr_desc->md_vaddr = mrhdl_new->mr_bindinfo.bi_addr;
1896         mr_desc->md_lkey  = mrhdl_new->mr_lkey;
1897         /* Only set RKey if remote access was requested */
1898         if ((mr_attr->mr_flags & IBT_MR_ENABLE_REMOTE_ATOMIC) ||
1899             (mr_attr->mr_flags & IBT_MR_ENABLE_REMOTE_WRITE) ||
1900             (mr_attr->mr_flags & IBT_MR_ENABLE_REMOTE_READ)) {
1901                 mr_desc->md_rkey = mrhdl_new->mr_rkey;
1902         }
1903 
1904         /*
1905          * If region is mapped for streaming (i.e. noncoherent), then set
1906          * sync is required
1907          */
1908         mr_desc->md_sync_required = (mrhdl_new->mr_bindinfo.bi_flags &
1909             IBT_MR_NONCOHERENT) ? B_TRUE : B_FALSE;
1910 
1911         /* Return the Tavor MR handle */
1912         *mr_new = (ibc_mr_hdl_t)mrhdl_new;


1919 /*
1920  * tavor_ci_reregister_buf()
1921  *    Modify the attributes of an existing Memory Region
1922  *    Context: Can be called from interrupt or base context.
1923  */
1924 /* ARGSUSED */
1925 static ibt_status_t
1926 tavor_ci_reregister_buf(ibc_hca_hdl_t hca, ibc_mr_hdl_t mr, ibc_pd_hdl_t pd,
1927     ibt_smr_attr_t *attrp, struct buf *buf, void *ibtl_reserved,
1928     ibc_mr_hdl_t *mr_new, ibt_mr_desc_t *mr_desc)
1929 {
1930         tavor_mr_options_t      op;
1931         tavor_state_t           *state;
1932         tavor_pdhdl_t           pdhdl;
1933         tavor_mrhdl_t           mrhdl, mrhdl_new;
1934         int                     status;
1935         ibt_mr_flags_t          flags = attrp->mr_flags;
1936 
1937         TAVOR_TNF_ENTER(tavor_ci_reregister_buf);
1938 


1939         ASSERT(mr_new != NULL);
1940         ASSERT(mr_desc != NULL);
1941 
1942         /* Check for valid HCA handle */
1943         if (hca == NULL) {
1944                 TNF_PROBE_0(tavor_ci_reregister_buf_hca_inv, TAVOR_TNF_ERROR,
1945                     "");
1946                 TAVOR_TNF_EXIT(tavor_ci_reregister_buf);
1947                 return (IBT_HCA_HDL_INVALID);
1948         }
1949 
1950         /* Check for valid memory region handle */
1951         if (mr == NULL) {
1952                 TNF_PROBE_0(tavor_ci_reregister_buf_invmrhdl_fail,
1953                     TAVOR_TNF_ERROR, "");
1954                 TAVOR_TNF_EXIT(tavor_ci_reregister_buf);
1955                 return (IBT_MR_HDL_INVALID);
1956         }
1957 
1958         /* Grab the Tavor softstate pointer, mrhdl, and pdhdl */
1959         state = (tavor_state_t *)hca;
1960         mrhdl = (tavor_mrhdl_t)mr;
1961         pdhdl = (tavor_pdhdl_t)pd;
1962 
1963         /* Reregister the memory region */
1964         op.mro_bind_type = state->ts_cfg_profile->cp_iommu_bypass;
1965         status = tavor_mr_reregister_buf(state, mrhdl, pdhdl, attrp, buf,
1966             &mrhdl_new, &op);
1967         if (status != DDI_SUCCESS) {
1968                 TNF_PROBE_1(tavor_ci_reregister_buf_fail, TAVOR_TNF_ERROR, "",
1969                     tnf_uint, status, status);
1970                 TAVOR_TNF_EXIT(tavor_ci_reregister_buf);
1971                 return (status);
1972         }

1973 
1974         /* Fill in the mr_desc structure */
1975         mr_desc->md_vaddr = mrhdl_new->mr_bindinfo.bi_addr;
1976         mr_desc->md_lkey  = mrhdl_new->mr_lkey;
1977         /* Only set RKey if remote access was requested */
1978         if ((flags & IBT_MR_ENABLE_REMOTE_ATOMIC) ||
1979             (flags & IBT_MR_ENABLE_REMOTE_WRITE) ||
1980             (flags & IBT_MR_ENABLE_REMOTE_READ)) {
1981                 mr_desc->md_rkey = mrhdl_new->mr_rkey;
1982         }
1983 
1984         /*
1985          * If region is mapped for streaming (i.e. noncoherent), then set
1986          * sync is required
1987          */
1988         mr_desc->md_sync_required = (mrhdl_new->mr_bindinfo.bi_flags &
1989             IBT_MR_NONCOHERENT) ? B_TRUE : B_FALSE;
1990 
1991         /* Return the Tavor MR handle */
1992         *mr_new = (ibc_mr_hdl_t)mrhdl_new;


2065         /* Check for valid PD handle pointer */
2066         if (pd == NULL) {
2067                 TNF_PROBE_0(tavor_ci_alloc_mw_invpdhdl_fail,
2068                     TAVOR_TNF_ERROR, "");
2069                 TAVOR_TNF_EXIT(tavor_ci_alloc_mw);
2070                 return (IBT_PD_HDL_INVALID);
2071         }
2072 
2073         /* Grab the Tavor softstate pointer and PD handle */
2074         state = (tavor_state_t *)hca;
2075         pdhdl = (tavor_pdhdl_t)pd;
2076 
2077         /* Allocate the memory window */
2078         status = tavor_mw_alloc(state, pdhdl, flags, &mwhdl);
2079         if (status != DDI_SUCCESS) {
2080                 TNF_PROBE_1(tavor_ci_alloc_mw_fail, TAVOR_TNF_ERROR, "",
2081                     tnf_uint, status, status);
2082                 TAVOR_TNF_EXIT(tavor_ci_alloc_mw);
2083                 return (status);
2084         }

2085 
2086         /* Return the MW handle and RKey */
2087         *mw_p = (ibc_mw_hdl_t)mwhdl;
2088         *rkey_p = mwhdl->mr_rkey;
2089 
2090         TAVOR_TNF_EXIT(tavor_ci_alloc_mw);
2091         return (IBT_SUCCESS);
2092 }
2093 
2094 
2095 /*
2096  * tavor_ci_free_mw()
2097  *    Free a Memory Window
2098  *    Context: Can be called from interrupt or base context.
2099  */
2100 static ibt_status_t
2101 tavor_ci_free_mw(ibc_hca_hdl_t hca, ibc_mw_hdl_t mw)
2102 {
2103         tavor_state_t           *state;
2104         tavor_mwhdl_t           mwhdl;


2177         mw_attr_p->mw_pd   = (ibc_pd_hdl_t)mwhdl->mr_pdhdl;
2178         mw_attr_p->mw_rkey = mwhdl->mr_rkey;
2179         mutex_exit(&mwhdl->mr_lock);
2180 
2181         TAVOR_TNF_EXIT(tavor_ci_query_mw);
2182         return (IBT_SUCCESS);
2183 }
2184 
2185 
2186 /* ARGSUSED */
2187 static ibt_status_t
2188 tavor_ci_register_dma_mr(ibc_hca_hdl_t hca, ibc_pd_hdl_t pd,
2189     ibt_dmr_attr_t *mr_attr, void *ibtl_reserved, ibc_mr_hdl_t *mr_p,
2190     ibt_mr_desc_t *mr_desc)
2191 {
2192         tavor_state_t           *state;
2193         tavor_pdhdl_t           pdhdl;
2194         tavor_mrhdl_t           mrhdl;
2195         int                     status;
2196 


2197         ASSERT(mr_attr != NULL);
2198         ASSERT(mr_p != NULL);
2199         ASSERT(mr_desc != NULL);
2200 
2201         /* Check for valid HCA handle */
2202         if (hca == NULL) {
2203                 return (IBT_HCA_HDL_INVALID);
2204         }
2205 
2206         /* Check for valid PD handle pointer */
2207         if (pd == NULL) {
2208                 return (IBT_PD_HDL_INVALID);
2209         }
2210 
2211         /*
2212          * Validate the access flags.  Both Remote Write and Remote Atomic
2213          * require the Local Write flag to be set
2214          */
2215         if (((mr_attr->dmr_flags & IBT_MR_ENABLE_REMOTE_WRITE) ||
2216             (mr_attr->dmr_flags & IBT_MR_ENABLE_REMOTE_ATOMIC)) &&
2217             !(mr_attr->dmr_flags & IBT_MR_ENABLE_LOCAL_WRITE)) {
2218                 return (IBT_MR_ACCESS_REQ_INVALID);
2219         }
2220 
2221         /* Grab the Tavor softstate pointer and PD handle */
2222         state = (tavor_state_t *)hca;
2223         pdhdl = (tavor_pdhdl_t)pd;
2224 
2225         status = tavor_dma_mr_register(state, pdhdl, mr_attr, &mrhdl);
2226         if (status != DDI_SUCCESS) {
2227                 return (status);
2228         }

2229 
2230         /* Fill in the mr_desc structure */
2231         mr_desc->md_vaddr = mr_attr->dmr_paddr;
2232         mr_desc->md_lkey  = mrhdl->mr_lkey;
2233         /* Only set RKey if remote access was requested */
2234         if ((mr_attr->dmr_flags & IBT_MR_ENABLE_REMOTE_ATOMIC) ||
2235             (mr_attr->dmr_flags & IBT_MR_ENABLE_REMOTE_WRITE) ||
2236             (mr_attr->dmr_flags & IBT_MR_ENABLE_REMOTE_READ)) {
2237                 mr_desc->md_rkey = mrhdl->mr_rkey;
2238         }
2239 
2240         /*
2241          * If region is mapped for streaming (i.e. noncoherent), then set
2242          * sync is required
2243          */
2244         mr_desc->md_sync_required = B_FALSE;
2245 
2246         /* Return the Hermon MR handle */
2247         *mr_p = (ibc_mr_hdl_t)mrhdl;
2248 


3031 {
3032         return (IBT_NOT_SUPPORTED);
3033 }
3034 
3035 /*
3036  * tavor_ci_unmap_mem_area()
3037  * Unmap the memory area
3038  *    Context: Can be called from interrupt or base context.
3039  */
3040 /* ARGSUSED */
3041 static ibt_status_t
3042 tavor_ci_unmap_mem_area(ibc_hca_hdl_t hca, ibc_ma_hdl_t ma_hdl)
3043 {
3044         return (IBT_NOT_SUPPORTED);
3045 }
3046 
3047 struct ibc_mi_s {
3048         int                     imh_len;
3049         ddi_dma_handle_t        imh_dmahandle[1];
3050 };



3051 

3052 /*
3053  * tavor_ci_map_mem_iov()
3054  * Map the memory
3055  *    Context: Can be called from interrupt or base context.
3056  */
3057 /* ARGSUSED */
3058 static ibt_status_t
3059 tavor_ci_map_mem_iov(ibc_hca_hdl_t hca, ibt_iov_attr_t *iov_attr,
3060     ibt_all_wr_t *wr, ibc_mi_hdl_t *mi_hdl_p)
3061 {
3062         int                     status;
3063         int                     i, j, nds, max_nds;
3064         uint_t                  len;
3065         ibt_status_t            ibt_status;
3066         ddi_dma_handle_t        dmahdl;
3067         ddi_dma_cookie_t        dmacookie;
3068         ddi_dma_attr_t          dma_attr;
3069         uint_t                  cookie_cnt;
3070         ibc_mi_hdl_t            mi_hdl;
3071         ibt_lkey_t              rsvd_lkey;
3072         ibt_wr_ds_t             *sgl;
3073         tavor_state_t           *state;
3074         int                     kmflag;
3075         int                     (*callback)(caddr_t);
3076 


3077         if (mi_hdl_p == NULL)
3078                 return (IBT_MI_HDL_INVALID);
3079 
3080         /* Check for valid HCA handle */
3081         if (hca == NULL)
3082                 return (IBT_HCA_HDL_INVALID);
3083 
3084         /* Tavor does not allow the default "use reserved lkey" */
3085         if ((iov_attr->iov_flags & IBT_IOV_ALT_LKEY) == 0)
3086                 return (IBT_INVALID_PARAM);
3087 
3088         rsvd_lkey = iov_attr->iov_alt_lkey;
3089 
3090         state = (tavor_state_t *)hca;
3091         tavor_dma_attr_init(&dma_attr);
3092 #ifdef  __sparc
3093         if (state->ts_cfg_profile->cp_iommu_bypass == TAVOR_BINDMEM_BYPASS)
3094                 dma_attr.dma_attr_flags = DDI_DMA_FORCE_PHYSICAL;
3095 #endif
3096 
3097         nds = 0;
3098         max_nds = iov_attr->iov_wr_nds;
3099         if (iov_attr->iov_lso_hdr_sz)
3100                 max_nds -= (iov_attr->iov_lso_hdr_sz + sizeof (uint32_t) +
3101                     0xf) >> 4;    /* 0xf is for rounding up to a multiple of 16 */
3102         if ((iov_attr->iov_flags & IBT_IOV_NOSLEEP) == 0) {
3103                 kmflag = KM_SLEEP;
3104                 callback = DDI_DMA_SLEEP;
3105         } else {
3106                 kmflag = KM_NOSLEEP;
3107                 callback = DDI_DMA_DONTWAIT;
3108         }
3109 
3110         if (iov_attr->iov_flags & IBT_IOV_BUF) {
3111                 mi_hdl = kmem_alloc(sizeof (*mi_hdl), kmflag);
3112                 if (mi_hdl == NULL)
3113                         return (IBT_INSUFF_RESOURCE);
3114                 sgl = wr->send.wr_sgl;

3115 
3116                 status = ddi_dma_alloc_handle(state->ts_dip, &dma_attr,
3117                     callback, NULL, &dmahdl);
3118                 if (status != DDI_SUCCESS) {
3119                         kmem_free(mi_hdl, sizeof (*mi_hdl));
3120                         return (IBT_INSUFF_RESOURCE);
3121                 }
3122                 status = ddi_dma_buf_bind_handle(dmahdl, iov_attr->iov_buf,
3123                     DDI_DMA_RDWR | DDI_DMA_CONSISTENT, callback, NULL,
3124                     &dmacookie, &cookie_cnt);
3125                 if (status != DDI_DMA_MAPPED) {
3126                         ddi_dma_free_handle(&dmahdl);
3127                         kmem_free(mi_hdl, sizeof (*mi_hdl));
3128                         return (ibc_get_ci_failure(0));
3129                 }
3130                 while (cookie_cnt-- > 0) {
3131                         if (nds > max_nds) {
3132                                 status = ddi_dma_unbind_handle(dmahdl);
3133                                 ddi_dma_free_handle(&dmahdl);
3134                                 return (IBT_SGL_TOO_SMALL);
3135                         }
3136                         sgl[nds].ds_va = dmacookie.dmac_laddress;
3137                         sgl[nds].ds_key = rsvd_lkey;
3138                         sgl[nds].ds_len = (ib_msglen_t)dmacookie.dmac_size;
3139                         nds++;
3140                         if (cookie_cnt != 0)
3141                                 ddi_dma_nextcookie(dmahdl, &dmacookie);
3142                 }
3143                 wr->send.wr_nds = nds;
3144                 mi_hdl->imh_len = 1;
3145                 mi_hdl->imh_dmahandle[0] = dmahdl;
3146                 *mi_hdl_p = mi_hdl;
3147                 return (IBT_SUCCESS);
3148         }
3149 
3150         if (iov_attr->iov_flags & IBT_IOV_RECV)
3151                 sgl = wr->recv.wr_sgl;
3152         else
3153                 sgl = wr->send.wr_sgl;

3154 
3155         len = iov_attr->iov_list_len;
3156         for (i = 0, j = 0; j < len; j++) {
3157                 if (iov_attr->iov[j].iov_len == 0)
3158                         continue;
3159                 i++;
3160         }
3161         mi_hdl = kmem_alloc(sizeof (*mi_hdl) +
3162             (i - 1) * sizeof (ddi_dma_handle_t), kmflag);
3163         if (mi_hdl == NULL)
3164                 return (IBT_INSUFF_RESOURCE);
3165         mi_hdl->imh_len = i;
3166         for (i = 0, j = 0; j < len; j++) {
3167                 if (iov_attr->iov[j].iov_len == 0)
3168                         continue;
3169                 status = ddi_dma_alloc_handle(state->ts_dip, &dma_attr,
3170                     callback, NULL, &dmahdl);
3171                 if (status != DDI_SUCCESS) {
3172                         ibt_status = IBT_INSUFF_RESOURCE;
3173                         goto fail2;


3418 
3419         /* Check for valid HCA handle */
3420         if (hca == NULL) {
3421                 TNF_PROBE_0(tavor_ci_free_io_mem_invhca_fail,
3422                     TAVOR_TNF_ERROR, "");
3423                 TAVOR_TNF_EXIT(tavor_ci_free_io_mem);
3424                 return (IBT_HCA_HDL_INVALID);
3425         }
3426 
3427         /* Check for valid mem_alloc_hdl handle pointer */
3428         if (mem_alloc_hdl == NULL) {
3429                 TNF_PROBE_0(tavor_ci_free_io_mem_hdl_fail,
3430                     TAVOR_TNF_ERROR, "");
3431                 TAVOR_TNF_EXIT(tavor_ci_free_io_mem);
3432                 return (IBT_MEM_ALLOC_HDL_INVALID);
3433         }
3434 
3435         memhdl = (tavor_mem_alloc_hdl_t)mem_alloc_hdl;
3436 
3437         /* free the memory */

3438         ddi_dma_mem_free(&memhdl->tavor_acc_hdl);
3439         ddi_dma_free_handle(&memhdl->tavor_dma_hdl);
3440 
3441         kmem_free(memhdl, sizeof (*memhdl));
3442         TAVOR_TNF_EXIT(tavor_dma_free);
3443         return (IBT_SUCCESS);
3444 }
3445 
3446 
3447 int
3448 tavor_mem_alloc(
3449         tavor_state_t *state,
3450         size_t size,
3451         ibt_mr_flags_t flags,
3452         caddr_t *kaddrp,
3453         tavor_mem_alloc_hdl_t *mem_hdl)
3454 {
3455         ddi_dma_handle_t        dma_hdl;
3456         ddi_dma_attr_t          dma_attr;
3457         ddi_acc_handle_t        acc_hdl;


3479             &state->ts_reg_accattr, DDI_DMA_CONSISTENT, ddi_cb,
3480             NULL,
3481             kaddrp, &real_len, &acc_hdl);
3482         if (status != DDI_SUCCESS) {
3483                 ddi_dma_free_handle(&dma_hdl);
3484                 TNF_PROBE_0(tavor_dma_alloc_memory_fail, TAVOR_TNF_ERROR, "");
3485                 TAVOR_TNF_EXIT(tavor_mem_alloc);
3486                 return (DDI_FAILURE);
3487         }
3488 
3489         /* Package the tavor_dma_info contents and return */
3490         *mem_hdl = kmem_alloc(sizeof (**mem_hdl),
3491             flags & IBT_MR_NOSLEEP ? KM_NOSLEEP : KM_SLEEP);
3492         if (*mem_hdl == NULL) {
3493                 ddi_dma_mem_free(&acc_hdl);
3494                 ddi_dma_free_handle(&dma_hdl);
3495                 TNF_PROBE_0(tavor_dma_alloc_memory_fail, TAVOR_TNF_ERROR, "");
3496                 TAVOR_TNF_EXIT(tavor_mem_alloc);
3497                 return (DDI_FAILURE);
3498         }

3499         (*mem_hdl)->tavor_dma_hdl = dma_hdl;
3500         (*mem_hdl)->tavor_acc_hdl = acc_hdl;
3501 
3502         TAVOR_TNF_EXIT(tavor_mem_alloc);
3503         return (DDI_SUCCESS);
3504 }