Print this page
8368 remove warlock leftovers from usr/src/uts


 635         status = hermon_ah_modify(state, ahhdl, attr_p);
 636 
 637         return (status);
 638 }
 639 
 640 
 641 /*
 642  * hermon_ci_alloc_qp()
 643  *    Allocate a Queue Pair
 644  *    Context: Can be called only from user or kernel context.
 645  */
 646 static ibt_status_t
 647 hermon_ci_alloc_qp(ibc_hca_hdl_t hca, ibtl_qp_hdl_t ibt_qphdl,
 648     ibt_qp_type_t type, ibt_qp_alloc_attr_t *attr_p,
 649     ibt_chan_sizes_t *queue_sizes_p, ib_qpn_t *qpn, ibc_qp_hdl_t *qp_p)
 650 {
 651         hermon_state_t          *state;
 652         hermon_qp_info_t        qpinfo;
 653         int                     status;
 654 
 655         _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*attr_p))
 656         _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*queue_sizes_p))
 657 
 658         /* Grab the Hermon softstate pointer */
 659         state = (hermon_state_t *)hca;
 660 
 661         /* Allocate the QP */
 662         qpinfo.qpi_attrp        = attr_p;
 663         qpinfo.qpi_type         = type;
 664         qpinfo.qpi_ibt_qphdl    = ibt_qphdl;
 665         qpinfo.qpi_queueszp     = queue_sizes_p;
 666         qpinfo.qpi_qpn          = qpn;
 667         status = hermon_qp_alloc(state, &qpinfo, HERMON_NOSLEEP);
 668         if (status != DDI_SUCCESS) {
 669                 return (status);
 670         }
 671 
 672         /* Return the Hermon QP handle */
 673         *qp_p = (ibc_qp_hdl_t)qpinfo.qpi_qphdl;
 674 
 675         return (IBT_SUCCESS);
 676 }
 677 
 678 
 679 /*
 680  * hermon_ci_alloc_special_qp()
 681  *    Allocate a Special Queue Pair
 682  *    Context: Can be called only from user or kernel context.
 683  */
 684 static ibt_status_t
 685 hermon_ci_alloc_special_qp(ibc_hca_hdl_t hca, uint8_t port,
 686     ibtl_qp_hdl_t ibt_qphdl, ibt_sqp_type_t type,
 687     ibt_qp_alloc_attr_t *attr_p, ibt_chan_sizes_t *queue_sizes_p,
 688     ibc_qp_hdl_t *qp_p)
 689 {
 690         hermon_state_t          *state;
 691         hermon_qp_info_t        qpinfo;
 692         int                     status;
 693 
 694         _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*attr_p))
 695         _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*queue_sizes_p))
 696 
 697         /* Grab the Hermon softstate pointer */
 698         state = (hermon_state_t *)hca;
 699 
 700         /* Allocate the Special QP */
 701         qpinfo.qpi_attrp        = attr_p;
 702         qpinfo.qpi_type         = type;
 703         qpinfo.qpi_port         = port;
 704         qpinfo.qpi_ibt_qphdl    = ibt_qphdl;
 705         qpinfo.qpi_queueszp     = queue_sizes_p;
 706         status = hermon_special_qp_alloc(state, &qpinfo, HERMON_NOSLEEP);
 707         if (status != DDI_SUCCESS) {
 708                 return (status);
 709         }
 710         /* Return the Hermon QP handle */
 711         *qp_p = (ibc_qp_hdl_t)qpinfo.qpi_qphdl;
 712 
 713         return (IBT_SUCCESS);
 714 }
 715 
 716 /*
 717  * hermon_ci_alloc_qp_range()
 718  *    Free a Queue Pair
 719  *    Context: Can be called only from user or kernel context.
 720  */
 721 /* ARGSUSED */
 722 static ibt_status_t
 723 hermon_ci_alloc_qp_range(ibc_hca_hdl_t hca, uint_t log2,
 724     ibtl_qp_hdl_t *ibtl_qp, ibt_qp_type_t type,
 725     ibt_qp_alloc_attr_t *attr_p, ibt_chan_sizes_t *queue_sizes_p,
 726     ibc_cq_hdl_t *send_cq, ibc_cq_hdl_t *recv_cq,
 727     ib_qpn_t *qpn, ibc_qp_hdl_t *qp_p)
 728 {
 729         hermon_state_t          *state;
 730         hermon_qp_info_t        qpinfo;
 731         int                     status;
 732 
 733         _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*attr_p))
 734         _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*queue_sizes_p))
 735 
 736         /* Grab the Hermon softstate pointer */
 737         state = (hermon_state_t *)hca;
 738 
 739         /* Allocate the QP */
 740         qpinfo.qpi_attrp        = attr_p;
 741         qpinfo.qpi_type         = type;
 742         qpinfo.qpi_queueszp     = queue_sizes_p;
 743         qpinfo.qpi_qpn          = qpn;
 744         status = hermon_qp_alloc_range(state, log2, &qpinfo, ibtl_qp,
 745             send_cq, recv_cq, (hermon_qphdl_t *)qp_p, HERMON_NOSLEEP);
 746         return (status);
 747 }
 748 
 749 /*
 750  * hermon_ci_free_qp()
 751  *    Free a Queue Pair
 752  *    Context: Can be called only from user or kernel context.
 753  */
 754 static ibt_status_t
 755 hermon_ci_free_qp(ibc_hca_hdl_t hca, ibc_qp_hdl_t qp,


 896 /*
 897  * hermon_ci_query_cq()
 898  *    Return the size of a Completion Queue
 899  *    Context: Can be called only from user or kernel context.
 900  */
 901 static ibt_status_t
 902 hermon_ci_query_cq(ibc_hca_hdl_t hca, ibc_cq_hdl_t cq, uint_t *entries_p,
 903     uint_t *count_p, uint_t *usec_p, ibt_cq_handler_id_t *hid_p)
 904 {
 905         hermon_state_t  *state;
 906         hermon_cqhdl_t  cqhdl;
 907 
 908         /* Grab the CQ handle */
 909         state = (hermon_state_t *)hca;
 910         cqhdl = (hermon_cqhdl_t)cq;
 911 
 912         /* Query the current CQ size */
 913         *entries_p = cqhdl->cq_bufsz;
 914         *count_p = cqhdl->cq_intmod_count;
 915         *usec_p = cqhdl->cq_intmod_usec;
 916         _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*cqhdl))
 917         *hid_p = HERMON_EQNUM_TO_HID(state, cqhdl->cq_eqnum);
 918 
 919         return (IBT_SUCCESS);
 920 }
 921 
 922 
 923 /*
 924  * hermon_ci_resize_cq()
 925  *    Change the size of a Completion Queue
 926  *    Context: Can be called only from user or kernel context.
 927  */
 928 static ibt_status_t
 929 hermon_ci_resize_cq(ibc_hca_hdl_t hca, ibc_cq_hdl_t cq, uint_t size,
 930     uint_t *actual_size)
 931 {
 932         hermon_state_t          *state;
 933         hermon_cqhdl_t          cqhdl;
 934         int                     status;
 935 
 936         /* Grab the Hermon softstate pointer and CQ handle */


 998 hermon_ci_free_cq_sched(ibc_hca_hdl_t hca, ibc_sched_hdl_t sched_hdl)
 999 {
1000         int     status;
1001 
1002         status = hermon_cq_sched_free((hermon_state_t *)hca,
1003             (hermon_cq_sched_t *)sched_hdl);
1004         return (status);
1005 }
1006 
1007 static ibt_status_t
1008 hermon_ci_query_cq_handler_id(ibc_hca_hdl_t hca,
1009     ibt_cq_handler_id_t hid, ibt_cq_handler_attr_t *attrs)
1010 {
1011         hermon_state_t          *state;
1012 
1013         state = (hermon_state_t *)hca;
1014         if (!HERMON_HID_VALID(state, hid))
1015                 return (IBT_CQ_HID_INVALID);
1016         if (attrs == NULL)
1017                 return (IBT_INVALID_PARAM);
1018         _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*attrs))
1019         attrs->cha_ih = state->hs_intrmsi_hdl[hid - 1];
1020         attrs->cha_dip = state->hs_dip;
1021         return (IBT_SUCCESS);
1022 }
1023 
1024 /*
1025  * hermon_ci_alloc_eec()
1026  *    Allocate an End-to-End context
1027  *    Context: Can be called only from user or kernel context.
1028  */
1029 /* ARGSUSED */
1030 static ibt_status_t
1031 hermon_ci_alloc_eec(ibc_hca_hdl_t hca, ibc_eec_flags_t flags,
1032     ibt_eec_hdl_t ibt_eec, ibc_rdd_hdl_t rdd, ibc_eec_hdl_t *eec_p)
1033 {
1034         /*
1035          * This is an unsupported interface for the Hermon driver.  This
1036          * interface is necessary to support Reliable Datagram (RD)
1037          * operations.  Hermon does not support RD.
1038          */


1096 }
1097 
1098 
1099 /*
1100  * hermon_ci_register_mr()
1101  *    Prepare a virtually addressed Memory Region for use by an HCA
1102  *    Context: Can be called from interrupt or base context.
1103  */
1104 /* ARGSUSED */
1105 static ibt_status_t
1106 hermon_ci_register_mr(ibc_hca_hdl_t hca, ibc_pd_hdl_t pd,
1107     ibt_mr_attr_t *mr_attr, void *ibtl_reserved, ibc_mr_hdl_t *mr_p,
1108     ibt_mr_desc_t *mr_desc)
1109 {
1110         hermon_mr_options_t     op;
1111         hermon_state_t          *state;
1112         hermon_pdhdl_t          pdhdl;
1113         hermon_mrhdl_t          mrhdl;
1114         int                     status;
1115 
1116         _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*mr_desc))
1117 
1118         ASSERT(mr_attr != NULL);
1119         ASSERT(mr_p != NULL);
1120         ASSERT(mr_desc != NULL);
1121 
1122         /*
1123          * Validate the access flags.  Both Remote Write and Remote Atomic
1124          * require the Local Write flag to be set
1125          */
1126         if (((mr_attr->mr_flags & IBT_MR_ENABLE_REMOTE_WRITE) ||
1127             (mr_attr->mr_flags & IBT_MR_ENABLE_REMOTE_ATOMIC)) &&
1128             !(mr_attr->mr_flags & IBT_MR_ENABLE_LOCAL_WRITE)) {
1129                 return (IBT_MR_ACCESS_REQ_INVALID);
1130         }
1131 
1132         /* Grab the Hermon softstate pointer and PD handle */
1133         state = (hermon_state_t *)hca;
1134         pdhdl = (hermon_pdhdl_t)pd;
1135 
1136         /* Register the memory region */
1137         op.mro_bind_type   = state->hs_cfg_profile->cp_iommu_bypass;
1138         op.mro_bind_dmahdl = NULL;
1139         op.mro_bind_override_addr = 0;
1140         status = hermon_mr_register(state, pdhdl, mr_attr, &mrhdl,
1141             &op, HERMON_MPT_DMPT);
1142         if (status != DDI_SUCCESS) {
1143                 return (status);
1144         }
1145         _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*mrhdl))
1146 
1147         /* Fill in the mr_desc structure */
1148         mr_desc->md_vaddr = mrhdl->mr_bindinfo.bi_addr;
1149         mr_desc->md_lkey  = mrhdl->mr_lkey;
1150         /* Only set RKey if remote access was requested */
1151         if ((mr_attr->mr_flags & IBT_MR_ENABLE_REMOTE_ATOMIC) ||
1152             (mr_attr->mr_flags & IBT_MR_ENABLE_REMOTE_WRITE) ||
1153             (mr_attr->mr_flags & IBT_MR_ENABLE_REMOTE_READ)) {
1154                 mr_desc->md_rkey = mrhdl->mr_rkey;
1155         }
1156 
1157         /*
1158          * If region is mapped for streaming (i.e. noncoherent), then set
1159          * sync is required
1160          */
1161         mr_desc->md_sync_required = (mrhdl->mr_bindinfo.bi_flags &
1162             IBT_MR_NONCOHERENT) ? B_TRUE : B_FALSE;
1163 
1164         /* Return the Hermon MR handle */
1165         *mr_p = (ibc_mr_hdl_t)mrhdl;


1169 
1170 
1171 /*
1172  * hermon_ci_register_buf()
1173  *    Prepare a Memory Region specified by buf structure for use by an HCA
1174  *    Context: Can be called from interrupt or base context.
1175  */
1176 /* ARGSUSED */
1177 static ibt_status_t
1178 hermon_ci_register_buf(ibc_hca_hdl_t hca, ibc_pd_hdl_t pd,
1179     ibt_smr_attr_t *attrp, struct buf *buf, void *ibtl_reserved,
1180     ibt_mr_hdl_t *mr_p, ibt_mr_desc_t *mr_desc)
1181 {
1182         hermon_mr_options_t     op;
1183         hermon_state_t          *state;
1184         hermon_pdhdl_t          pdhdl;
1185         hermon_mrhdl_t          mrhdl;
1186         int                     status;
1187         ibt_mr_flags_t          flags = attrp->mr_flags;
1188 
1189         _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*mr_desc))
1190 
1191         ASSERT(mr_p != NULL);
1192         ASSERT(mr_desc != NULL);
1193 
1194         /*
1195          * Validate the access flags.  Both Remote Write and Remote Atomic
1196          * require the Local Write flag to be set
1197          */
1198         if (((flags & IBT_MR_ENABLE_REMOTE_WRITE) ||
1199             (flags & IBT_MR_ENABLE_REMOTE_ATOMIC)) &&
1200             !(flags & IBT_MR_ENABLE_LOCAL_WRITE)) {
1201                 return (IBT_MR_ACCESS_REQ_INVALID);
1202         }
1203 
1204         /* Grab the Hermon softstate pointer and PD handle */
1205         state = (hermon_state_t *)hca;
1206         pdhdl = (hermon_pdhdl_t)pd;
1207 
1208         /* Register the memory region */
1209         op.mro_bind_type   = state->hs_cfg_profile->cp_iommu_bypass;
1210         op.mro_bind_dmahdl = NULL;
1211         op.mro_bind_override_addr = 0;
1212         status = hermon_mr_register_buf(state, pdhdl, attrp, buf,
1213             &mrhdl, &op, HERMON_MPT_DMPT);
1214         if (status != DDI_SUCCESS) {
1215                 return (status);
1216         }
1217         _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*mrhdl))
1218 
1219         /* Fill in the mr_desc structure */
1220         mr_desc->md_vaddr = mrhdl->mr_bindinfo.bi_addr;
1221         mr_desc->md_lkey  = mrhdl->mr_lkey;
1222         /* Only set RKey if remote access was requested */
1223         if ((flags & IBT_MR_ENABLE_REMOTE_ATOMIC) ||
1224             (flags & IBT_MR_ENABLE_REMOTE_WRITE) ||
1225             (flags & IBT_MR_ENABLE_REMOTE_READ)) {
1226                 mr_desc->md_rkey = mrhdl->mr_rkey;
1227         }
1228 
1229         /*
1230          * If region is mapped for streaming (i.e. noncoherent), then set
1231          * sync is required
1232          */
1233         mr_desc->md_sync_required = (mrhdl->mr_bindinfo.bi_flags &
1234             IBT_MR_NONCOHERENT) ? B_TRUE : B_FALSE;
1235 
1236         /* Return the Hermon MR handle */
1237         *mr_p = (ibc_mr_hdl_t)mrhdl;


1289         return (status);
1290 }
1291 
1292 
1293 /*
1294  * hermon_ci_register_shared_mr()
1295  *    Create a shared memory region matching an existing Memory Region
1296  *    Context: Can be called from interrupt or base context.
1297  */
1298 /* ARGSUSED */
1299 static ibt_status_t
1300 hermon_ci_register_shared_mr(ibc_hca_hdl_t hca, ibc_mr_hdl_t mr,
1301     ibc_pd_hdl_t pd, ibt_smr_attr_t *mr_attr, void *ibtl_reserved,
1302     ibc_mr_hdl_t *mr_p, ibt_mr_desc_t *mr_desc)
1303 {
1304         hermon_state_t          *state;
1305         hermon_pdhdl_t          pdhdl;
1306         hermon_mrhdl_t          mrhdl, mrhdl_new;
1307         int                     status;
1308 
1309         _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*mr_desc))
1310 
1311         ASSERT(mr_attr != NULL);
1312         ASSERT(mr_p != NULL);
1313         ASSERT(mr_desc != NULL);
1314 
1315         /*
1316          * Validate the access flags.  Both Remote Write and Remote Atomic
1317          * require the Local Write flag to be set
1318          */
1319         if (((mr_attr->mr_flags & IBT_MR_ENABLE_REMOTE_WRITE) ||
1320             (mr_attr->mr_flags & IBT_MR_ENABLE_REMOTE_ATOMIC)) &&
1321             !(mr_attr->mr_flags & IBT_MR_ENABLE_LOCAL_WRITE)) {
1322                 return (IBT_MR_ACCESS_REQ_INVALID);
1323         }
1324 
1325         /* Grab the Hermon softstate pointer and handles */
1326         state = (hermon_state_t *)hca;
1327         pdhdl = (hermon_pdhdl_t)pd;
1328         mrhdl = (hermon_mrhdl_t)mr;
1329 
1330         /* Register the shared memory region */
1331         status = hermon_mr_register_shared(state, mrhdl, pdhdl, mr_attr,
1332             &mrhdl_new);
1333         if (status != DDI_SUCCESS) {
1334                 return (status);
1335         }
1336         _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*mrhdl_new))
1337 
1338         /* Fill in the mr_desc structure */
1339         mr_desc->md_vaddr = mrhdl_new->mr_bindinfo.bi_addr;
1340         mr_desc->md_lkey  = mrhdl_new->mr_lkey;
1341         /* Only set RKey if remote access was requested */
1342         if ((mr_attr->mr_flags & IBT_MR_ENABLE_REMOTE_ATOMIC) ||
1343             (mr_attr->mr_flags & IBT_MR_ENABLE_REMOTE_WRITE) ||
1344             (mr_attr->mr_flags & IBT_MR_ENABLE_REMOTE_READ)) {
1345                 mr_desc->md_rkey = mrhdl_new->mr_rkey;
1346         }
1347 
1348         /*
1349          * If shared region is mapped for streaming (i.e. noncoherent), then
1350          * set sync is required
1351          */
1352         mr_desc->md_sync_required = (mrhdl_new->mr_bindinfo.bi_flags &
1353             IBT_MR_NONCOHERENT) ? B_TRUE : B_FALSE;
1354 
1355         /* Return the Hermon MR handle */
1356         *mr_p = (ibc_mr_hdl_t)mrhdl_new;


1359 }
1360 
1361 
1362 /*
1363  * hermon_ci_reregister_mr()
1364  *    Modify the attributes of an existing Memory Region
1365  *    Context: Can be called from interrupt or base context.
1366  */
1367 /* ARGSUSED */
1368 static ibt_status_t
1369 hermon_ci_reregister_mr(ibc_hca_hdl_t hca, ibc_mr_hdl_t mr, ibc_pd_hdl_t pd,
1370     ibt_mr_attr_t *mr_attr, void *ibtl_reserved, ibc_mr_hdl_t *mr_new,
1371     ibt_mr_desc_t *mr_desc)
1372 {
1373         hermon_mr_options_t     op;
1374         hermon_state_t          *state;
1375         hermon_pdhdl_t          pdhdl;
1376         hermon_mrhdl_t          mrhdl, mrhdl_new;
1377         int                     status;
1378 
1379         _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*mr_desc))
1380 
1381         ASSERT(mr_attr != NULL);
1382         ASSERT(mr_new != NULL);
1383         ASSERT(mr_desc != NULL);
1384 
1385         /* Grab the Hermon softstate pointer, mrhdl, and pdhdl */
1386         state = (hermon_state_t *)hca;
1387         mrhdl = (hermon_mrhdl_t)mr;
1388         pdhdl = (hermon_pdhdl_t)pd;
1389 
1390         /* Reregister the memory region */
1391         op.mro_bind_type = state->hs_cfg_profile->cp_iommu_bypass;
1392         status = hermon_mr_reregister(state, mrhdl, pdhdl, mr_attr,
1393             &mrhdl_new, &op);
1394         if (status != DDI_SUCCESS) {
1395                 return (status);
1396         }
1397         _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*mrhdl_new))
1398 
1399         /* Fill in the mr_desc structure */
1400         mr_desc->md_vaddr = mrhdl_new->mr_bindinfo.bi_addr;
1401         mr_desc->md_lkey  = mrhdl_new->mr_lkey;
1402         /* Only set RKey if remote access was requested */
1403         if ((mr_attr->mr_flags & IBT_MR_ENABLE_REMOTE_ATOMIC) ||
1404             (mr_attr->mr_flags & IBT_MR_ENABLE_REMOTE_WRITE) ||
1405             (mr_attr->mr_flags & IBT_MR_ENABLE_REMOTE_READ)) {
1406                 mr_desc->md_rkey = mrhdl_new->mr_rkey;
1407         }
1408 
1409         /*
1410          * If region is mapped for streaming (i.e. noncoherent), then set
1411          * sync is required
1412          */
1413         mr_desc->md_sync_required = (mrhdl_new->mr_bindinfo.bi_flags &
1414             IBT_MR_NONCOHERENT) ? B_TRUE : B_FALSE;
1415 
1416         /* Return the Hermon MR handle */
1417         *mr_new = (ibc_mr_hdl_t)mrhdl_new;


1421 
1422 
1423 /*
1424  * hermon_ci_reregister_buf()
1425  *    Modify the attributes of an existing Memory Region
1426  *    Context: Can be called from interrupt or base context.
1427  */
1428 /* ARGSUSED */
1429 static ibt_status_t
1430 hermon_ci_reregister_buf(ibc_hca_hdl_t hca, ibc_mr_hdl_t mr, ibc_pd_hdl_t pd,
1431     ibt_smr_attr_t *attrp, struct buf *buf, void *ibtl_reserved,
1432     ibc_mr_hdl_t *mr_new, ibt_mr_desc_t *mr_desc)
1433 {
1434         hermon_mr_options_t     op;
1435         hermon_state_t          *state;
1436         hermon_pdhdl_t          pdhdl;
1437         hermon_mrhdl_t          mrhdl, mrhdl_new;
1438         int                     status;
1439         ibt_mr_flags_t          flags = attrp->mr_flags;
1440 
1441         _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*mr_desc))
1442 
1443         ASSERT(mr_new != NULL);
1444         ASSERT(mr_desc != NULL);
1445 
1446         /* Grab the Hermon softstate pointer, mrhdl, and pdhdl */
1447         state = (hermon_state_t *)hca;
1448         mrhdl = (hermon_mrhdl_t)mr;
1449         pdhdl = (hermon_pdhdl_t)pd;
1450 
1451         /* Reregister the memory region */
1452         op.mro_bind_type = state->hs_cfg_profile->cp_iommu_bypass;
1453         status = hermon_mr_reregister_buf(state, mrhdl, pdhdl, attrp, buf,
1454             &mrhdl_new, &op);
1455         if (status != DDI_SUCCESS) {
1456                 return (status);
1457         }
1458         _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*mrhdl_new))
1459 
1460         /* Fill in the mr_desc structure */
1461         mr_desc->md_vaddr = mrhdl_new->mr_bindinfo.bi_addr;
1462         mr_desc->md_lkey  = mrhdl_new->mr_lkey;
1463         /* Only set RKey if remote access was requested */
1464         if ((flags & IBT_MR_ENABLE_REMOTE_ATOMIC) ||
1465             (flags & IBT_MR_ENABLE_REMOTE_WRITE) ||
1466             (flags & IBT_MR_ENABLE_REMOTE_READ)) {
1467                 mr_desc->md_rkey = mrhdl_new->mr_rkey;
1468         }
1469 
1470         /*
1471          * If region is mapped for streaming (i.e. noncoherent), then set
1472          * sync is required
1473          */
1474         mr_desc->md_sync_required = (mrhdl_new->mr_bindinfo.bi_flags &
1475             IBT_MR_NONCOHERENT) ? B_TRUE : B_FALSE;
1476 
1477         /* Return the Hermon MR handle */
1478         *mr_new = (ibc_mr_hdl_t)mrhdl_new;


1511 hermon_ci_alloc_mw(ibc_hca_hdl_t hca, ibc_pd_hdl_t pd, ibt_mw_flags_t flags,
1512     ibc_mw_hdl_t *mw_p, ibt_rkey_t *rkey_p)
1513 {
1514         hermon_state_t          *state;
1515         hermon_pdhdl_t          pdhdl;
1516         hermon_mwhdl_t          mwhdl;
1517         int                     status;
1518 
1519         ASSERT(mw_p != NULL);
1520         ASSERT(rkey_p != NULL);
1521 
1522         /* Grab the Hermon softstate pointer and PD handle */
1523         state = (hermon_state_t *)hca;
1524         pdhdl = (hermon_pdhdl_t)pd;
1525 
1526         /* Allocate the memory window */
1527         status = hermon_mw_alloc(state, pdhdl, flags, &mwhdl);
1528         if (status != DDI_SUCCESS) {
1529                 return (status);
1530         }
1531         _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*mwhdl))
1532 
1533         /* Return the MW handle and RKey */
1534         *mw_p = (ibc_mw_hdl_t)mwhdl;
1535         *rkey_p = mwhdl->mr_rkey;
1536 
1537         return (IBT_SUCCESS);
1538 }
1539 
1540 
1541 /*
1542  * hermon_ci_free_mw()
1543  *    Free a Memory Window
1544  *    Context: Can be called from interrupt or base context.
1545  */
1546 static ibt_status_t
1547 hermon_ci_free_mw(ibc_hca_hdl_t hca, ibc_mw_hdl_t mw)
1548 {
1549         hermon_state_t          *state;
1550         hermon_mwhdl_t          mwhdl;
1551         int                     status;


1584         return (IBT_SUCCESS);
1585 }
1586 
1587 
1588 /*
1589  * hermon_ci_register_dma_mr()
1590  *    Allocate a memory region that maps physical addresses.
1591  *    Context: Can be called only from user or kernel context.
1592  */
1593 /* ARGSUSED */
1594 static ibt_status_t
1595 hermon_ci_register_dma_mr(ibc_hca_hdl_t hca, ibc_pd_hdl_t pd,
1596     ibt_dmr_attr_t *mr_attr, void *ibtl_reserved, ibc_mr_hdl_t *mr_p,
1597     ibt_mr_desc_t *mr_desc)
1598 {
1599         hermon_state_t          *state;
1600         hermon_pdhdl_t          pdhdl;
1601         hermon_mrhdl_t          mrhdl;
1602         int                     status;
1603 
1604         _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*mr_desc))
1605 
1606         ASSERT(mr_attr != NULL);
1607         ASSERT(mr_p != NULL);
1608         ASSERT(mr_desc != NULL);
1609 
1610         /*
1611          * Validate the access flags.  Both Remote Write and Remote Atomic
1612          * require the Local Write flag to be set
1613          */
1614         if (((mr_attr->dmr_flags & IBT_MR_ENABLE_REMOTE_WRITE) ||
1615             (mr_attr->dmr_flags & IBT_MR_ENABLE_REMOTE_ATOMIC)) &&
1616             !(mr_attr->dmr_flags & IBT_MR_ENABLE_LOCAL_WRITE)) {
1617                 return (IBT_MR_ACCESS_REQ_INVALID);
1618         }
1619 
1620         /* Grab the Hermon softstate pointer and PD handle */
1621         state = (hermon_state_t *)hca;
1622         pdhdl = (hermon_pdhdl_t)pd;
1623 
1624         status = hermon_dma_mr_register(state, pdhdl, mr_attr, &mrhdl);
1625         if (status != DDI_SUCCESS) {
1626                 return (status);
1627         }
1628         _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*mrhdl))
1629 
1630         /* Fill in the mr_desc structure */
1631         mr_desc->md_vaddr = mr_attr->dmr_paddr;
1632         mr_desc->md_lkey  = mrhdl->mr_lkey;
1633         /* Only set RKey if remote access was requested */
1634         if ((mr_attr->dmr_flags & IBT_MR_ENABLE_REMOTE_ATOMIC) ||
1635             (mr_attr->dmr_flags & IBT_MR_ENABLE_REMOTE_WRITE) ||
1636             (mr_attr->dmr_flags & IBT_MR_ENABLE_REMOTE_READ)) {
1637                 mr_desc->md_rkey = mrhdl->mr_rkey;
1638         }
1639 
1640         /*
1641          * If region is mapped for streaming (i.e. noncoherent), then set
1642          * sync is required
1643          */
1644         mr_desc->md_sync_required = B_FALSE;
1645 
1646         /* Return the Hermon MR handle */
1647         *mr_p = (ibc_mr_hdl_t)mrhdl;
1648 


2078         if (va_attrs->va_flags & IBT_VA_NOSLEEP) {
2079                 kmflag = KM_NOSLEEP;
2080                 callback = DDI_DMA_DONTWAIT;
2081         } else {
2082                 kmflag = KM_SLEEP;
2083                 callback = DDI_DMA_SLEEP;
2084         }
2085 
2086         ma_hdl = kmem_zalloc(sizeof (*ma_hdl), kmflag);
2087         if (ma_hdl == NULL) {
2088                 return (IBT_INSUFF_RESOURCE);
2089         }
2090 #ifdef  __sparc
2091         if (state->hs_cfg_profile->cp_iommu_bypass == HERMON_BINDMEM_BYPASS)
2092                 dma_attr.dma_attr_flags = DDI_DMA_FORCE_PHYSICAL;
2093 
2094         if (hermon_kernel_data_ro == HERMON_RO_ENABLED)
2095                 dma_attr.dma_attr_flags |= DDI_DMA_RELAXED_ORDERING;
2096 #endif
2097 
2098         _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*ma_hdl))
2099         status = ddi_dma_alloc_handle(state->hs_dip, &dma_attr,
2100             callback, NULL, &ma_hdl->h_ma_dmahdl);
2101         if (status != DDI_SUCCESS) {
2102                 kmem_free(ma_hdl, sizeof (*ma_hdl));
2103                 return (IBT_INSUFF_RESOURCE);
2104         }
2105         status = ddi_dma_buf_bind_handle(ma_hdl->h_ma_dmahdl,
2106             va_attrs->va_buf, DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
2107             callback, NULL, &dmacookie, &cookie_cnt);
2108         if (status != DDI_DMA_MAPPED) {
2109                 status = ibc_get_ci_failure(0);
2110                 goto marea_fail3;
2111         }
2112 
2113         ma_hdl->h_ma_real_len = list_len * sizeof (ibt_phys_addr_t);
2114         ma_hdl->h_ma_kaddr = kmem_zalloc(ma_hdl->h_ma_real_len, kmflag);
2115         if (ma_hdl->h_ma_kaddr == NULL) {
2116                 ibt_status = IBT_INSUFF_RESOURCE;
2117                 goto marea_fail4;
2118         }
2119 
2120         i = 0;
2121         len = 0;
2122         pagesize = PAGESIZE;
2123         kaddr = (uint64_t *)(void *)ma_hdl->h_ma_kaddr;
2124         while (cookie_cnt-- > 0) {
2125                 addr    = dmacookie.dmac_laddress;
2126                 len     += dmacookie.dmac_size;
2127                 endaddr = addr + (dmacookie.dmac_size - 1);
2128                 addr    = addr & ~(pagesize - 1);
2129                 while (addr <= endaddr) {
2130                         if (i >= list_len) {
2131                                 status = IBT_PBL_TOO_SMALL;
2132                                 goto marea_fail5;
2133                         }
2134                         kaddr[i] = htonll(addr | HERMON_MTT_ENTRY_PRESENT);
2135                         i++;
2136                         addr += pagesize;
2137                         if (addr == 0) {
2138                                 static int do_once = 1;
2139                                 _NOTE(SCHEME_PROTECTS_DATA("safe sharing",
2140                                     do_once))
2141                                 if (do_once) {
2142                                         do_once = 0;
2143                                         cmn_err(CE_NOTE, "probable error in "
2144                                             "dma_cookie address: map_mem_area");
2145                                 }
2146                                 break;
2147                         }
2148                 }
2149                 if (cookie_cnt != 0)
2150                         ddi_dma_nextcookie(ma_hdl->h_ma_dmahdl, &dmacookie);
2151         }
2152 
2153         _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*pmr))
2154         pmr->pmr_addr_list = (ibt_phys_addr_t *)(void *)ma_hdl->h_ma_kaddr;
2155         pmr->pmr_iova = va_attrs->va_vaddr;
2156         pmr->pmr_len = len;
2157         pmr->pmr_offset = va_attrs->va_vaddr & PAGEOFFSET;
2158         pmr->pmr_buf_sz = PAGESHIFT; /* PRM says "Page Sice", but... */
2159         pmr->pmr_num_buf = i;
2160         pmr->pmr_ma = ma_hdl;
2161 
2162         *ma_hdl_p = ma_hdl;
2163         return (IBT_SUCCESS);
2164 
2165 marea_fail5:
2166         kmem_free(ma_hdl->h_ma_kaddr, ma_hdl->h_ma_real_len);
2167 marea_fail4:
2168         status = ddi_dma_unbind_handle(ma_hdl->h_ma_dmahdl);
2169 marea_fail3:
2170         ddi_dma_free_handle(&ma_hdl->h_ma_dmahdl);
2171         kmem_free(ma_hdl, sizeof (*ma_hdl));
2172         *ma_hdl_p = NULL;
2173         return (ibt_status);


2220         hermon_dma_attr_init(state, &dma_attr);
2221 #ifdef  __sparc
2222         if (state->hs_cfg_profile->cp_iommu_bypass == HERMON_BINDMEM_BYPASS)
2223                 dma_attr.dma_attr_flags = DDI_DMA_FORCE_PHYSICAL;
2224 
2225         if (hermon_kernel_data_ro == HERMON_RO_ENABLED)
2226                 dma_attr.dma_attr_flags |= DDI_DMA_RELAXED_ORDERING;
2227 #endif
2228         if (va_attrs->va_flags & IBT_VA_NOSLEEP) {
2229                 kmflag = KM_NOSLEEP;
2230                 callback = DDI_DMA_DONTWAIT;
2231         } else {
2232                 kmflag = KM_SLEEP;
2233                 callback = DDI_DMA_SLEEP;
2234         }
2235 
2236         ma_hdl = kmem_zalloc(sizeof (*ma_hdl), kmflag);
2237         if (ma_hdl == NULL) {
2238                 return (IBT_INSUFF_RESOURCE);
2239         }
2240         _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*ma_hdl))
2241 
2242         status = ddi_dma_alloc_handle(state->hs_dip, &dma_attr,
2243             callback, NULL, &ma_hdl->h_ma_dmahdl);
2244         if (status != DDI_SUCCESS) {
2245                 ibt_status = IBT_INSUFF_RESOURCE;
2246                 goto marea_fail0;
2247         }
2248         dma_attr.dma_attr_align = 64;   /* as per PRM */
2249         status = ddi_dma_alloc_handle(state->hs_dip, &dma_attr,
2250             callback, NULL, &ma_hdl->h_ma_list_hdl);
2251         if (status != DDI_SUCCESS) {
2252                 ibt_status = IBT_INSUFF_RESOURCE;
2253                 goto marea_fail1;
2254         }
2255         /*
2256          * Entries in the list in the last slot on each page cannot be used,
2257          * so 1 extra ibt_phys_addr_t is allocated per page.  We add 1 more
2258          * to deal with the possibility of a less than 1 page allocation
2259          * across a page boundary.
2260          */


2315                                 ibt_status = IBT_PBL_TOO_SMALL;
2316                                 goto marea_fail5;
2317                         }
2318                         /* Deal with last entry on page. */
2319                         if (!((uintptr_t)&kaddr[i+j+1] & HERMON_PAGEOFFSET)) {
2320                                 if (kcookie.dmac_size > HERMON_PAGESIZE) {
2321                                         kcookie_paddr += HERMON_PAGESIZE;
2322                                         kcookie.dmac_size -= HERMON_PAGESIZE;
2323                                 } else {
2324                                         ddi_dma_nextcookie(khdl, &kcookie);
2325                                         kcookie_paddr = kcookie.dmac_laddress;
2326                                 }
2327                                 kaddr[i+j] = htonll(kcookie_paddr);
2328                                 j++;
2329                         }
2330                         kaddr[i+j] = htonll(addr | HERMON_MTT_ENTRY_PRESENT);
2331                         i++;
2332                         addr += pagesize;
2333                         if (addr == 0) {
2334                                 static int do_once = 1;
2335                                 _NOTE(SCHEME_PROTECTS_DATA("safe sharing",
2336                                     do_once))
2337                                 if (do_once) {
2338                                         do_once = 0;
2339                                         cmn_err(CE_NOTE, "probable error in "
2340                                             "dma_cookie address: map_mem_area");
2341                                 }
2342                                 break;
2343                         }
2344                 }
2345                 if (cookie_cnt != 0)
2346                         ddi_dma_nextcookie(ma_hdl->h_ma_dmahdl, &dmacookie);
2347         }
2348 
2349         pmr = &reg_req->wr;
2350         _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*pmr))
2351         pmr->pmr_len = len;
2352         pmr->pmr_offset = va_attrs->va_vaddr & PAGEOFFSET;
2353         pmr->pmr_buf_sz = PAGESHIFT; /* PRM says "Page Size", but... */
2354         pmr->pmr_num_buf = i;
2355         pmr->pmr_addr_list = &ma_hdl->h_ma_list_cookie;
2356 
2357         *ma_hdl_p = ma_hdl;
2358         return (IBT_SUCCESS);
2359 
2360 marea_fail5:
2361         status = ddi_dma_unbind_handle(ma_hdl->h_ma_dmahdl);
2362         if (status != DDI_SUCCESS)
2363                 HERMON_WARNING(state, "failed to unbind DMA mapping");
2364 marea_fail4:
2365         status = ddi_dma_unbind_handle(ma_hdl->h_ma_list_hdl);
2366         if (status != DDI_SUCCESS)
2367                 HERMON_WARNING(state, "failed to unbind DMA mapping");
2368 marea_fail3:
2369         ddi_dma_mem_free(&ma_hdl->h_ma_list_acc_hdl);
2370 marea_fail2:


2397                 status = ddi_dma_unbind_handle(ma_hdl->h_ma_list_hdl);
2398                 if (status != DDI_SUCCESS)
2399                         HERMON_WARNING(state, "failed to unbind DMA mapping");
2400                 ddi_dma_mem_free(&ma_hdl->h_ma_list_acc_hdl);
2401                 ddi_dma_free_handle(&ma_hdl->h_ma_list_hdl);
2402         } else {
2403                 kmem_free(ma_hdl->h_ma_kaddr, ma_hdl->h_ma_real_len);
2404         }
2405         status = ddi_dma_unbind_handle(ma_hdl->h_ma_dmahdl);
2406         if (status != DDI_SUCCESS)
2407                 HERMON_WARNING(state, "failed to unbind DMA mapping");
2408         ddi_dma_free_handle(&ma_hdl->h_ma_dmahdl);
2409         kmem_free(ma_hdl, sizeof (*ma_hdl));
2410         return (IBT_SUCCESS);
2411 }
2412 
2413 struct ibc_mi_s {
2414         int                     imh_len;
2415         ddi_dma_handle_t        imh_dmahandle[1];
2416 };
2417 _NOTE(SCHEME_PROTECTS_DATA("safe sharing",
2418     ibc_mi_s::imh_len
2419     ibc_mi_s::imh_dmahandle))
2420 
2421 
2422 /*
2423  * hermon_ci_map_mem_iov()
2424  * Map the memory
2425  *    Context: Can be called from interrupt or base context.
2426  */
2427 /* ARGSUSED */
2428 static ibt_status_t
2429 hermon_ci_map_mem_iov(ibc_hca_hdl_t hca, ibt_iov_attr_t *iov_attr,
2430     ibt_all_wr_t *wr, ibc_mi_hdl_t *mi_hdl_p)
2431 {
2432         int                     status;
2433         int                     i, j, nds, max_nds;
2434         uint_t                  len;
2435         ibt_status_t            ibt_status;
2436         ddi_dma_handle_t        dmahdl;
2437         ddi_dma_cookie_t        dmacookie;
2438         ddi_dma_attr_t          dma_attr;
2439         uint_t                  cookie_cnt;
2440         ibc_mi_hdl_t            mi_hdl;
2441         ibt_lkey_t              rsvd_lkey;
2442         ibt_wr_ds_t             *sgl;
2443         hermon_state_t          *state;
2444         int                     kmflag;
2445         int                     (*callback)(caddr_t);
2446 
2447         _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*wr))
2448 
2449         state = (hermon_state_t *)hca;
2450         hermon_dma_attr_init(state, &dma_attr);
2451 #ifdef  __sparc
2452         if (state->hs_cfg_profile->cp_iommu_bypass == HERMON_BINDMEM_BYPASS)
2453                 dma_attr.dma_attr_flags = DDI_DMA_FORCE_PHYSICAL;
2454 
2455         if (hermon_kernel_data_ro == HERMON_RO_ENABLED)
2456                 dma_attr.dma_attr_flags |= DDI_DMA_RELAXED_ORDERING;
2457 #endif
2458 
2459         nds = 0;
2460         max_nds = iov_attr->iov_wr_nds;
2461         if (iov_attr->iov_lso_hdr_sz)
2462                 max_nds -= (iov_attr->iov_lso_hdr_sz + sizeof (uint32_t) +
2463                     0xf) >> 4;    /* 0xf is for rounding up to a multiple of 16 */
2464         rsvd_lkey = (iov_attr->iov_flags & IBT_IOV_ALT_LKEY) ?
2465             iov_attr->iov_alt_lkey : state->hs_devlim.rsv_lkey;
2466         if ((iov_attr->iov_flags & IBT_IOV_NOSLEEP) == 0) {
2467                 kmflag = KM_SLEEP;
2468                 callback = DDI_DMA_SLEEP;
2469         } else {
2470                 kmflag = KM_NOSLEEP;
2471                 callback = DDI_DMA_DONTWAIT;
2472         }
2473 
2474         if (iov_attr->iov_flags & IBT_IOV_BUF) {
2475                 mi_hdl = kmem_alloc(sizeof (*mi_hdl), kmflag);
2476                 if (mi_hdl == NULL)
2477                         return (IBT_INSUFF_RESOURCE);
2478                 sgl = wr->send.wr_sgl;
2479                 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*sgl))
2480 
2481                 status = ddi_dma_alloc_handle(state->hs_dip, &dma_attr,
2482                     callback, NULL, &dmahdl);
2483                 if (status != DDI_SUCCESS) {
2484                         kmem_free(mi_hdl, sizeof (*mi_hdl));
2485                         return (IBT_INSUFF_RESOURCE);
2486                 }
2487                 status = ddi_dma_buf_bind_handle(dmahdl, iov_attr->iov_buf,
2488                     DDI_DMA_RDWR | DDI_DMA_CONSISTENT, callback, NULL,
2489                     &dmacookie, &cookie_cnt);
2490                 if (status != DDI_DMA_MAPPED) {
2491                         ddi_dma_free_handle(&dmahdl);
2492                         kmem_free(mi_hdl, sizeof (*mi_hdl));
2493                         return (ibc_get_ci_failure(0));
2494                 }
2495                 while (cookie_cnt-- > 0) {
2496                         if (nds > max_nds) {
2497                                 status = ddi_dma_unbind_handle(dmahdl);
2498                                 if (status != DDI_SUCCESS)
2499                                         HERMON_WARNING(state, "failed to "
2500                                             "unbind DMA mapping");


2502                                 return (IBT_SGL_TOO_SMALL);
2503                         }
2504                         sgl[nds].ds_va = dmacookie.dmac_laddress;
2505                         sgl[nds].ds_key = rsvd_lkey;
2506                         sgl[nds].ds_len = (ib_msglen_t)dmacookie.dmac_size;
2507                         nds++;
2508                         if (cookie_cnt != 0)
2509                                 ddi_dma_nextcookie(dmahdl, &dmacookie);
2510                 }
2511                 wr->send.wr_nds = nds;
2512                 mi_hdl->imh_len = 1;
2513                 mi_hdl->imh_dmahandle[0] = dmahdl;
2514                 *mi_hdl_p = mi_hdl;
2515                 return (IBT_SUCCESS);
2516         }
2517 
2518         if (iov_attr->iov_flags & IBT_IOV_RECV)
2519                 sgl = wr->recv.wr_sgl;
2520         else
2521                 sgl = wr->send.wr_sgl;
2522         _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*sgl))
2523 
2524         len = iov_attr->iov_list_len;
2525         for (i = 0, j = 0; j < len; j++) {
2526                 if (iov_attr->iov[j].iov_len == 0)
2527                         continue;
2528                 i++;
2529         }
2530         mi_hdl = kmem_alloc(sizeof (*mi_hdl) +
2531             (i - 1) * sizeof (ddi_dma_handle_t), kmflag);
2532         if (mi_hdl == NULL)
2533                 return (IBT_INSUFF_RESOURCE);
2534         mi_hdl->imh_len = i;
2535         for (i = 0, j = 0; j < len; j++) {
2536                 if (iov_attr->iov[j].iov_len == 0)
2537                         continue;
2538                 status = ddi_dma_alloc_handle(state->hs_dip, &dma_attr,
2539                     callback, NULL, &dmahdl);
2540                 if (status != DDI_SUCCESS) {
2541                         ibt_status = IBT_INSUFF_RESOURCE;
2542                         goto fail2;


2610             (mi_hdl->imh_len - 1) * sizeof (ddi_dma_handle_t));
2611         return (IBT_SUCCESS);
2612 }
2613 
2614 /*
2615  * hermon_ci_alloc_lkey()
2616  * Allocate an empty memory region for use with FRWR.
2617  *    Context: Can be called from user or base context.
2618  */
2619 /* ARGSUSED */
2620 static ibt_status_t
2621 hermon_ci_alloc_lkey(ibc_hca_hdl_t hca, ibc_pd_hdl_t pd,
2622     ibt_lkey_flags_t flags, uint_t list_sz, ibc_mr_hdl_t *mr_p,
2623     ibt_pmr_desc_t *mem_desc_p)
2624 {
2625         hermon_state_t          *state;
2626         hermon_pdhdl_t          pdhdl;
2627         hermon_mrhdl_t          mrhdl;
2628         int                     status;
2629 
2630         _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*mem_desc_p))
2631 
2632         ASSERT(mr_p != NULL);
2633         ASSERT(mem_desc_p != NULL);
2634 
2635         state = (hermon_state_t *)hca;
2636         pdhdl = (hermon_pdhdl_t)pd;
2637 
2638         if (!(state->hs_ibtfinfo.hca_attr->hca_flags2 & IBT_HCA2_MEM_MGT_EXT))
2639                 return (IBT_NOT_SUPPORTED);
2640 
2641         status = hermon_mr_alloc_lkey(state, pdhdl, flags, list_sz, &mrhdl);
2642         if (status != DDI_SUCCESS) {
2643                 return (status);
2644         }
2645         _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*mrhdl))
2646 
2647         /* Fill in the mem_desc_p structure */
2648         mem_desc_p->pmd_iova = 0;
2649         mem_desc_p->pmd_phys_buf_list_sz = list_sz;
2650         mem_desc_p->pmd_lkey = mrhdl->mr_lkey;
2651         /* Only set RKey if remote access was requested */
2652         if (flags & IBT_KEY_REMOTE) {
2653                 mem_desc_p->pmd_rkey = mrhdl->mr_rkey;
2654         }
2655         mem_desc_p->pmd_sync_required = B_FALSE;
2656 
2657         /* Return the Hermon MR handle */
2658         *mr_p = (ibc_mr_hdl_t)mrhdl;
2659         return (IBT_SUCCESS);
2660 }
2661 
2662 /* Physical Register Memory Region */
2663 /*
2664  * hermon_ci_register_physical_mr()
2665  */


2786 
2787         ASSERT(mem_pattr != NULL);
2788         ASSERT(mr_p != NULL);
2789         ASSERT(mem_desc_p != NULL);
2790 
2791         /* Grab the Hermon softstate pointer */
2792         state = (hermon_state_t *)hca;
2793 
2794         fmrpoolhdl = (hermon_fmrhdl_t)fmr_pool;
2795 
2796         status = hermon_register_physical_fmr(state, fmrpoolhdl, mem_pattr,
2797             &mrhdl, mem_desc_p);
2798         if (status != DDI_SUCCESS) {
2799                 return (status);
2800         }
2801 
2802         /*
2803          * If region is mapped for streaming (i.e. noncoherent), then set
2804          * sync is required
2805          */
2806         _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*mem_desc_p))
2807         mem_desc_p->pmd_sync_required = (mrhdl->mr_bindinfo.bi_flags &
2808             IBT_MR_NONCOHERENT) ? B_TRUE : B_FALSE;
2809         if (mem_desc_p->pmd_sync_required == B_TRUE) {
2810                 /* Fill in DMA handle for future sync operations */
2811                 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(mrhdl->mr_bindinfo))
2812                 mrhdl->mr_bindinfo.bi_dmahdl =
2813                     (ddi_dma_handle_t)mem_pattr->pmr_ma;
2814         }
2815 
2816         /* Return the Hermon MR handle */
2817         *mr_p = (ibc_mr_hdl_t)mrhdl;
2818 
2819         return (IBT_SUCCESS);
2820 }
2821 
2822 /*
2823  * hermon_ci_deregister_fmr()
2824  * Moves an FMR (specified by 'mr') to the deregistered state.
2825  *    Context: Can be called from base context only.
2826  */
2827 static ibt_status_t
2828 hermon_ci_deregister_fmr(ibc_hca_hdl_t hca, ibc_mr_hdl_t mr)
2829 {
2830         hermon_state_t          *state;
2831         hermon_mrhdl_t          mrhdl;




 635         status = hermon_ah_modify(state, ahhdl, attr_p);
 636 
 637         return (status);
 638 }
 639 
 640 
 641 /*
 642  * hermon_ci_alloc_qp()
 643  *    Allocate a Queue Pair
 644  *    Context: Can be called only from user or kernel context.
 645  */
 646 static ibt_status_t
 647 hermon_ci_alloc_qp(ibc_hca_hdl_t hca, ibtl_qp_hdl_t ibt_qphdl,
 648     ibt_qp_type_t type, ibt_qp_alloc_attr_t *attr_p,
 649     ibt_chan_sizes_t *queue_sizes_p, ib_qpn_t *qpn, ibc_qp_hdl_t *qp_p)
 650 {
 651         hermon_state_t          *state;
 652         hermon_qp_info_t        qpinfo;
 653         int                     status;
 654 



 655         /* Grab the Hermon softstate pointer */
 656         state = (hermon_state_t *)hca;
 657 
 658         /* Allocate the QP */
 659         qpinfo.qpi_attrp        = attr_p;
 660         qpinfo.qpi_type         = type;
 661         qpinfo.qpi_ibt_qphdl    = ibt_qphdl;
 662         qpinfo.qpi_queueszp     = queue_sizes_p;
 663         qpinfo.qpi_qpn          = qpn;
 664         status = hermon_qp_alloc(state, &qpinfo, HERMON_NOSLEEP);
 665         if (status != DDI_SUCCESS) {
 666                 return (status);
 667         }
 668 
 669         /* Return the Hermon QP handle */
 670         *qp_p = (ibc_qp_hdl_t)qpinfo.qpi_qphdl;
 671 
 672         return (IBT_SUCCESS);
 673 }
 674 
 675 
 676 /*
 677  * hermon_ci_alloc_special_qp()
 678  *    Allocate a Special Queue Pair
 679  *    Context: Can be called only from user or kernel context.
 680  */
 681 static ibt_status_t
 682 hermon_ci_alloc_special_qp(ibc_hca_hdl_t hca, uint8_t port,
 683     ibtl_qp_hdl_t ibt_qphdl, ibt_sqp_type_t type,
 684     ibt_qp_alloc_attr_t *attr_p, ibt_chan_sizes_t *queue_sizes_p,
 685     ibc_qp_hdl_t *qp_p)
 686 {
 687         hermon_state_t          *state;
 688         hermon_qp_info_t        qpinfo;
 689         int                     status;
 690 



 691         /* Grab the Hermon softstate pointer */
 692         state = (hermon_state_t *)hca;
 693 
 694         /* Allocate the Special QP */
 695         qpinfo.qpi_attrp        = attr_p;
 696         qpinfo.qpi_type         = type;
 697         qpinfo.qpi_port         = port;
 698         qpinfo.qpi_ibt_qphdl    = ibt_qphdl;
 699         qpinfo.qpi_queueszp     = queue_sizes_p;
 700         status = hermon_special_qp_alloc(state, &qpinfo, HERMON_NOSLEEP);
 701         if (status != DDI_SUCCESS) {
 702                 return (status);
 703         }
 704         /* Return the Hermon QP handle */
 705         *qp_p = (ibc_qp_hdl_t)qpinfo.qpi_qphdl;
 706 
 707         return (IBT_SUCCESS);
 708 }
 709 
 710 /*
 711  * hermon_ci_alloc_qp_range()
 712  *    Free a Queue Pair
 713  *    Context: Can be called only from user or kernel context.
 714  */
 715 /* ARGSUSED */
 716 static ibt_status_t
 717 hermon_ci_alloc_qp_range(ibc_hca_hdl_t hca, uint_t log2,
 718     ibtl_qp_hdl_t *ibtl_qp, ibt_qp_type_t type,
 719     ibt_qp_alloc_attr_t *attr_p, ibt_chan_sizes_t *queue_sizes_p,
 720     ibc_cq_hdl_t *send_cq, ibc_cq_hdl_t *recv_cq,
 721     ib_qpn_t *qpn, ibc_qp_hdl_t *qp_p)
 722 {
 723         hermon_state_t          *state;
 724         hermon_qp_info_t        qpinfo;
 725         int                     status;
 726 



 727         /* Grab the Hermon softstate pointer */
 728         state = (hermon_state_t *)hca;
 729 
 730         /* Allocate the QP */
 731         qpinfo.qpi_attrp        = attr_p;
 732         qpinfo.qpi_type         = type;
 733         qpinfo.qpi_queueszp     = queue_sizes_p;
 734         qpinfo.qpi_qpn          = qpn;
 735         status = hermon_qp_alloc_range(state, log2, &qpinfo, ibtl_qp,
 736             send_cq, recv_cq, (hermon_qphdl_t *)qp_p, HERMON_NOSLEEP);
 737         return (status);
 738 }
 739 
 740 /*
 741  * hermon_ci_free_qp()
 742  *    Free a Queue Pair
 743  *    Context: Can be called only from user or kernel context.
 744  */
 745 static ibt_status_t
 746 hermon_ci_free_qp(ibc_hca_hdl_t hca, ibc_qp_hdl_t qp,


 887 /*
 888  * hermon_ci_query_cq()
 889  *    Return the size of a Completion Queue
 890  *    Context: Can be called only from user or kernel context.
 891  */
 892 static ibt_status_t
 893 hermon_ci_query_cq(ibc_hca_hdl_t hca, ibc_cq_hdl_t cq, uint_t *entries_p,
 894     uint_t *count_p, uint_t *usec_p, ibt_cq_handler_id_t *hid_p)
 895 {
 896         hermon_state_t  *state;
 897         hermon_cqhdl_t  cqhdl;
 898 
 899         /* Grab the CQ handle */
 900         state = (hermon_state_t *)hca;
 901         cqhdl = (hermon_cqhdl_t)cq;
 902 
 903         /* Query the current CQ size */
 904         *entries_p = cqhdl->cq_bufsz;
 905         *count_p = cqhdl->cq_intmod_count;
 906         *usec_p = cqhdl->cq_intmod_usec;

 907         *hid_p = HERMON_EQNUM_TO_HID(state, cqhdl->cq_eqnum);
 908 
 909         return (IBT_SUCCESS);
 910 }
 911 
 912 
 913 /*
 914  * hermon_ci_resize_cq()
 915  *    Change the size of a Completion Queue
 916  *    Context: Can be called only from user or kernel context.
 917  */
 918 static ibt_status_t
 919 hermon_ci_resize_cq(ibc_hca_hdl_t hca, ibc_cq_hdl_t cq, uint_t size,
 920     uint_t *actual_size)
 921 {
 922         hermon_state_t          *state;
 923         hermon_cqhdl_t          cqhdl;
 924         int                     status;
 925 
 926         /* Grab the Hermon softstate pointer and CQ handle */


 988 hermon_ci_free_cq_sched(ibc_hca_hdl_t hca, ibc_sched_hdl_t sched_hdl)
 989 {
 990         int     status;
 991 
 992         status = hermon_cq_sched_free((hermon_state_t *)hca,
 993             (hermon_cq_sched_t *)sched_hdl);
 994         return (status);
 995 }
 996 
 997 static ibt_status_t
 998 hermon_ci_query_cq_handler_id(ibc_hca_hdl_t hca,
 999     ibt_cq_handler_id_t hid, ibt_cq_handler_attr_t *attrs)
1000 {
1001         hermon_state_t          *state;
1002 
1003         state = (hermon_state_t *)hca;
1004         if (!HERMON_HID_VALID(state, hid))
1005                 return (IBT_CQ_HID_INVALID);
1006         if (attrs == NULL)
1007                 return (IBT_INVALID_PARAM);

1008         attrs->cha_ih = state->hs_intrmsi_hdl[hid - 1];
1009         attrs->cha_dip = state->hs_dip;
1010         return (IBT_SUCCESS);
1011 }
1012 
1013 /*
1014  * hermon_ci_alloc_eec()
1015  *    Allocate an End-to-End context
1016  *    Context: Can be called only from user or kernel context.
1017  */
1018 /* ARGSUSED */
1019 static ibt_status_t
1020 hermon_ci_alloc_eec(ibc_hca_hdl_t hca, ibc_eec_flags_t flags,
1021     ibt_eec_hdl_t ibt_eec, ibc_rdd_hdl_t rdd, ibc_eec_hdl_t *eec_p)
1022 {
1023         /*
1024          * This is an unsupported interface for the Hermon driver.  This
1025          * interface is necessary to support Reliable Datagram (RD)
1026          * operations.  Hermon does not support RD.
1027          */


1085 }
1086 
1087 
1088 /*
1089  * hermon_ci_register_mr()
1090  *    Prepare a virtually addressed Memory Region for use by an HCA
1091  *    Context: Can be called from interrupt or base context.
1092  */
1093 /* ARGSUSED */
1094 static ibt_status_t
1095 hermon_ci_register_mr(ibc_hca_hdl_t hca, ibc_pd_hdl_t pd,
1096     ibt_mr_attr_t *mr_attr, void *ibtl_reserved, ibc_mr_hdl_t *mr_p,
1097     ibt_mr_desc_t *mr_desc)
1098 {
1099         hermon_mr_options_t     op;
1100         hermon_state_t          *state;
1101         hermon_pdhdl_t          pdhdl;
1102         hermon_mrhdl_t          mrhdl;
1103         int                     status;
1104 


1105         ASSERT(mr_attr != NULL);
1106         ASSERT(mr_p != NULL);
1107         ASSERT(mr_desc != NULL);
1108 
1109         /*
1110          * Validate the access flags.  Both Remote Write and Remote Atomic
1111          * require the Local Write flag to be set
1112          */
1113         if (((mr_attr->mr_flags & IBT_MR_ENABLE_REMOTE_WRITE) ||
1114             (mr_attr->mr_flags & IBT_MR_ENABLE_REMOTE_ATOMIC)) &&
1115             !(mr_attr->mr_flags & IBT_MR_ENABLE_LOCAL_WRITE)) {
1116                 return (IBT_MR_ACCESS_REQ_INVALID);
1117         }
1118 
1119         /* Grab the Hermon softstate pointer and PD handle */
1120         state = (hermon_state_t *)hca;
1121         pdhdl = (hermon_pdhdl_t)pd;
1122 
1123         /* Register the memory region */
1124         op.mro_bind_type   = state->hs_cfg_profile->cp_iommu_bypass;
1125         op.mro_bind_dmahdl = NULL;
1126         op.mro_bind_override_addr = 0;
1127         status = hermon_mr_register(state, pdhdl, mr_attr, &mrhdl,
1128             &op, HERMON_MPT_DMPT);
1129         if (status != DDI_SUCCESS) {
1130                 return (status);
1131         }

1132 
1133         /* Fill in the mr_desc structure */
1134         mr_desc->md_vaddr = mrhdl->mr_bindinfo.bi_addr;
1135         mr_desc->md_lkey  = mrhdl->mr_lkey;
1136         /* Only set RKey if remote access was requested */
1137         if ((mr_attr->mr_flags & IBT_MR_ENABLE_REMOTE_ATOMIC) ||
1138             (mr_attr->mr_flags & IBT_MR_ENABLE_REMOTE_WRITE) ||
1139             (mr_attr->mr_flags & IBT_MR_ENABLE_REMOTE_READ)) {
1140                 mr_desc->md_rkey = mrhdl->mr_rkey;
1141         }
1142 
1143         /*
1144          * If region is mapped for streaming (i.e. noncoherent), then set
1145          * sync is required
1146          */
1147         mr_desc->md_sync_required = (mrhdl->mr_bindinfo.bi_flags &
1148             IBT_MR_NONCOHERENT) ? B_TRUE : B_FALSE;
1149 
1150         /* Return the Hermon MR handle */
1151         *mr_p = (ibc_mr_hdl_t)mrhdl;


1155 
1156 
1157 /*
1158  * hermon_ci_register_buf()
1159  *    Prepare a Memory Region specified by buf structure for use by an HCA
1160  *    Context: Can be called from interrupt or base context.
1161  */
1162 /* ARGSUSED */
1163 static ibt_status_t
1164 hermon_ci_register_buf(ibc_hca_hdl_t hca, ibc_pd_hdl_t pd,
1165     ibt_smr_attr_t *attrp, struct buf *buf, void *ibtl_reserved,
1166     ibt_mr_hdl_t *mr_p, ibt_mr_desc_t *mr_desc)
1167 {
1168         hermon_mr_options_t     op;
1169         hermon_state_t          *state;
1170         hermon_pdhdl_t          pdhdl;
1171         hermon_mrhdl_t          mrhdl;
1172         int                     status;
1173         ibt_mr_flags_t          flags = attrp->mr_flags;
1174 


1175         ASSERT(mr_p != NULL);
1176         ASSERT(mr_desc != NULL);
1177 
1178         /*
1179          * Validate the access flags.  Both Remote Write and Remote Atomic
1180          * require the Local Write flag to be set
1181          */
1182         if (((flags & IBT_MR_ENABLE_REMOTE_WRITE) ||
1183             (flags & IBT_MR_ENABLE_REMOTE_ATOMIC)) &&
1184             !(flags & IBT_MR_ENABLE_LOCAL_WRITE)) {
1185                 return (IBT_MR_ACCESS_REQ_INVALID);
1186         }
1187 
1188         /* Grab the Hermon softstate pointer and PD handle */
1189         state = (hermon_state_t *)hca;
1190         pdhdl = (hermon_pdhdl_t)pd;
1191 
1192         /* Register the memory region */
1193         op.mro_bind_type   = state->hs_cfg_profile->cp_iommu_bypass;
1194         op.mro_bind_dmahdl = NULL;
1195         op.mro_bind_override_addr = 0;
1196         status = hermon_mr_register_buf(state, pdhdl, attrp, buf,
1197             &mrhdl, &op, HERMON_MPT_DMPT);
1198         if (status != DDI_SUCCESS) {
1199                 return (status);
1200         }

1201 
1202         /* Fill in the mr_desc structure */
1203         mr_desc->md_vaddr = mrhdl->mr_bindinfo.bi_addr;
1204         mr_desc->md_lkey  = mrhdl->mr_lkey;
1205         /* Only set RKey if remote access was requested */
1206         if ((flags & IBT_MR_ENABLE_REMOTE_ATOMIC) ||
1207             (flags & IBT_MR_ENABLE_REMOTE_WRITE) ||
1208             (flags & IBT_MR_ENABLE_REMOTE_READ)) {
1209                 mr_desc->md_rkey = mrhdl->mr_rkey;
1210         }
1211 
1212         /*
1213          * If region is mapped for streaming (i.e. noncoherent), then set
1214          * sync is required
1215          */
1216         mr_desc->md_sync_required = (mrhdl->mr_bindinfo.bi_flags &
1217             IBT_MR_NONCOHERENT) ? B_TRUE : B_FALSE;
1218 
1219         /* Return the Hermon MR handle */
1220         *mr_p = (ibc_mr_hdl_t)mrhdl;


1272         return (status);
1273 }
1274 
1275 
1276 /*
1277  * hermon_ci_register_shared_mr()
1278  *    Create a shared memory region matching an existing Memory Region
1279  *    Context: Can be called from interrupt or base context.
1280  */
1281 /* ARGSUSED */
1282 static ibt_status_t
1283 hermon_ci_register_shared_mr(ibc_hca_hdl_t hca, ibc_mr_hdl_t mr,
1284     ibc_pd_hdl_t pd, ibt_smr_attr_t *mr_attr, void *ibtl_reserved,
1285     ibc_mr_hdl_t *mr_p, ibt_mr_desc_t *mr_desc)
1286 {
1287         hermon_state_t          *state;
1288         hermon_pdhdl_t          pdhdl;
1289         hermon_mrhdl_t          mrhdl, mrhdl_new;
1290         int                     status;
1291 


1292         ASSERT(mr_attr != NULL);
1293         ASSERT(mr_p != NULL);
1294         ASSERT(mr_desc != NULL);
1295 
1296         /*
1297          * Validate the access flags.  Both Remote Write and Remote Atomic
1298          * require the Local Write flag to be set
1299          */
1300         if (((mr_attr->mr_flags & IBT_MR_ENABLE_REMOTE_WRITE) ||
1301             (mr_attr->mr_flags & IBT_MR_ENABLE_REMOTE_ATOMIC)) &&
1302             !(mr_attr->mr_flags & IBT_MR_ENABLE_LOCAL_WRITE)) {
1303                 return (IBT_MR_ACCESS_REQ_INVALID);
1304         }
1305 
1306         /* Grab the Hermon softstate pointer and handles */
1307         state = (hermon_state_t *)hca;
1308         pdhdl = (hermon_pdhdl_t)pd;
1309         mrhdl = (hermon_mrhdl_t)mr;
1310 
1311         /* Register the shared memory region */
1312         status = hermon_mr_register_shared(state, mrhdl, pdhdl, mr_attr,
1313             &mrhdl_new);
1314         if (status != DDI_SUCCESS) {
1315                 return (status);
1316         }

1317 
1318         /* Fill in the mr_desc structure */
1319         mr_desc->md_vaddr = mrhdl_new->mr_bindinfo.bi_addr;
1320         mr_desc->md_lkey  = mrhdl_new->mr_lkey;
1321         /* Only set RKey if remote access was requested */
1322         if ((mr_attr->mr_flags & IBT_MR_ENABLE_REMOTE_ATOMIC) ||
1323             (mr_attr->mr_flags & IBT_MR_ENABLE_REMOTE_WRITE) ||
1324             (mr_attr->mr_flags & IBT_MR_ENABLE_REMOTE_READ)) {
1325                 mr_desc->md_rkey = mrhdl_new->mr_rkey;
1326         }
1327 
1328         /*
1329          * If shared region is mapped for streaming (i.e. noncoherent), then
1330          * set sync is required
1331          */
1332         mr_desc->md_sync_required = (mrhdl_new->mr_bindinfo.bi_flags &
1333             IBT_MR_NONCOHERENT) ? B_TRUE : B_FALSE;
1334 
1335         /* Return the Hermon MR handle */
1336         *mr_p = (ibc_mr_hdl_t)mrhdl_new;


1339 }
1340 
1341 
1342 /*
1343  * hermon_ci_reregister_mr()
1344  *    Modify the attributes of an existing Memory Region
1345  *    Context: Can be called from interrupt or base context.
1346  */
1347 /* ARGSUSED */
1348 static ibt_status_t
1349 hermon_ci_reregister_mr(ibc_hca_hdl_t hca, ibc_mr_hdl_t mr, ibc_pd_hdl_t pd,
1350     ibt_mr_attr_t *mr_attr, void *ibtl_reserved, ibc_mr_hdl_t *mr_new,
1351     ibt_mr_desc_t *mr_desc)
1352 {
1353         hermon_mr_options_t     op;
1354         hermon_state_t          *state;
1355         hermon_pdhdl_t          pdhdl;
1356         hermon_mrhdl_t          mrhdl, mrhdl_new;
1357         int                     status;
1358 


1359         ASSERT(mr_attr != NULL);
1360         ASSERT(mr_new != NULL);
1361         ASSERT(mr_desc != NULL);
1362 
1363         /* Grab the Hermon softstate pointer, mrhdl, and pdhdl */
1364         state = (hermon_state_t *)hca;
1365         mrhdl = (hermon_mrhdl_t)mr;
1366         pdhdl = (hermon_pdhdl_t)pd;
1367 
1368         /* Reregister the memory region */
1369         op.mro_bind_type = state->hs_cfg_profile->cp_iommu_bypass;
1370         status = hermon_mr_reregister(state, mrhdl, pdhdl, mr_attr,
1371             &mrhdl_new, &op);
1372         if (status != DDI_SUCCESS) {
1373                 return (status);
1374         }

1375 
1376         /* Fill in the mr_desc structure */
1377         mr_desc->md_vaddr = mrhdl_new->mr_bindinfo.bi_addr;
1378         mr_desc->md_lkey  = mrhdl_new->mr_lkey;
1379         /* Only set RKey if remote access was requested */
1380         if ((mr_attr->mr_flags & IBT_MR_ENABLE_REMOTE_ATOMIC) ||
1381             (mr_attr->mr_flags & IBT_MR_ENABLE_REMOTE_WRITE) ||
1382             (mr_attr->mr_flags & IBT_MR_ENABLE_REMOTE_READ)) {
1383                 mr_desc->md_rkey = mrhdl_new->mr_rkey;
1384         }
1385 
1386         /*
1387          * If region is mapped for streaming (i.e. noncoherent), then set
1388          * sync is required
1389          */
1390         mr_desc->md_sync_required = (mrhdl_new->mr_bindinfo.bi_flags &
1391             IBT_MR_NONCOHERENT) ? B_TRUE : B_FALSE;
1392 
1393         /* Return the Hermon MR handle */
1394         *mr_new = (ibc_mr_hdl_t)mrhdl_new;


1398 
1399 
1400 /*
1401  * hermon_ci_reregister_buf()
1402  *    Modify the attributes of an existing Memory Region
1403  *    Context: Can be called from interrupt or base context.
1404  */
1405 /* ARGSUSED */
1406 static ibt_status_t
1407 hermon_ci_reregister_buf(ibc_hca_hdl_t hca, ibc_mr_hdl_t mr, ibc_pd_hdl_t pd,
1408     ibt_smr_attr_t *attrp, struct buf *buf, void *ibtl_reserved,
1409     ibc_mr_hdl_t *mr_new, ibt_mr_desc_t *mr_desc)
1410 {
1411         hermon_mr_options_t     op;
1412         hermon_state_t          *state;
1413         hermon_pdhdl_t          pdhdl;
1414         hermon_mrhdl_t          mrhdl, mrhdl_new;
1415         int                     status;
1416         ibt_mr_flags_t          flags = attrp->mr_flags;
1417 


1418         ASSERT(mr_new != NULL);
1419         ASSERT(mr_desc != NULL);
1420 
1421         /* Grab the Hermon softstate pointer, mrhdl, and pdhdl */
1422         state = (hermon_state_t *)hca;
1423         mrhdl = (hermon_mrhdl_t)mr;
1424         pdhdl = (hermon_pdhdl_t)pd;
1425 
1426         /* Reregister the memory region */
1427         op.mro_bind_type = state->hs_cfg_profile->cp_iommu_bypass;
1428         status = hermon_mr_reregister_buf(state, mrhdl, pdhdl, attrp, buf,
1429             &mrhdl_new, &op);
1430         if (status != DDI_SUCCESS) {
1431                 return (status);
1432         }

1433 
1434         /* Fill in the mr_desc structure */
1435         mr_desc->md_vaddr = mrhdl_new->mr_bindinfo.bi_addr;
1436         mr_desc->md_lkey  = mrhdl_new->mr_lkey;
1437         /* Only set RKey if remote access was requested */
1438         if ((flags & IBT_MR_ENABLE_REMOTE_ATOMIC) ||
1439             (flags & IBT_MR_ENABLE_REMOTE_WRITE) ||
1440             (flags & IBT_MR_ENABLE_REMOTE_READ)) {
1441                 mr_desc->md_rkey = mrhdl_new->mr_rkey;
1442         }
1443 
1444         /*
1445          * If region is mapped for streaming (i.e. noncoherent), then set
1446          * sync is required
1447          */
1448         mr_desc->md_sync_required = (mrhdl_new->mr_bindinfo.bi_flags &
1449             IBT_MR_NONCOHERENT) ? B_TRUE : B_FALSE;
1450 
1451         /* Return the Hermon MR handle */
1452         *mr_new = (ibc_mr_hdl_t)mrhdl_new;


1485 hermon_ci_alloc_mw(ibc_hca_hdl_t hca, ibc_pd_hdl_t pd, ibt_mw_flags_t flags,
1486     ibc_mw_hdl_t *mw_p, ibt_rkey_t *rkey_p)
1487 {
1488         hermon_state_t          *state;
1489         hermon_pdhdl_t          pdhdl;
1490         hermon_mwhdl_t          mwhdl;
1491         int                     status;
1492 
1493         ASSERT(mw_p != NULL);
1494         ASSERT(rkey_p != NULL);
1495 
1496         /* Grab the Hermon softstate pointer and PD handle */
1497         state = (hermon_state_t *)hca;
1498         pdhdl = (hermon_pdhdl_t)pd;
1499 
1500         /* Allocate the memory window */
1501         status = hermon_mw_alloc(state, pdhdl, flags, &mwhdl);
1502         if (status != DDI_SUCCESS) {
1503                 return (status);
1504         }

1505 
1506         /* Return the MW handle and RKey */
1507         *mw_p = (ibc_mw_hdl_t)mwhdl;
1508         *rkey_p = mwhdl->mr_rkey;
1509 
1510         return (IBT_SUCCESS);
1511 }
1512 
1513 
1514 /*
1515  * hermon_ci_free_mw()
1516  *    Free a Memory Window
1517  *    Context: Can be called from interrupt or base context.
1518  */
1519 static ibt_status_t
1520 hermon_ci_free_mw(ibc_hca_hdl_t hca, ibc_mw_hdl_t mw)
1521 {
1522         hermon_state_t          *state;
1523         hermon_mwhdl_t          mwhdl;
1524         int                     status;


1557         return (IBT_SUCCESS);
1558 }
1559 
1560 
1561 /*
1562  * hermon_ci_register_dma_mr()
1563  *    Allocate a memory region that maps physical addresses.
1564  *    Context: Can be called only from user or kernel context.
1565  */
1566 /* ARGSUSED */
1567 static ibt_status_t
1568 hermon_ci_register_dma_mr(ibc_hca_hdl_t hca, ibc_pd_hdl_t pd,
1569     ibt_dmr_attr_t *mr_attr, void *ibtl_reserved, ibc_mr_hdl_t *mr_p,
1570     ibt_mr_desc_t *mr_desc)
1571 {
1572         hermon_state_t          *state;
1573         hermon_pdhdl_t          pdhdl;
1574         hermon_mrhdl_t          mrhdl;
1575         int                     status;
1576 


1577         ASSERT(mr_attr != NULL);
1578         ASSERT(mr_p != NULL);
1579         ASSERT(mr_desc != NULL);
1580 
1581         /*
1582          * Validate the access flags.  Both Remote Write and Remote Atomic
1583          * require the Local Write flag to be set
1584          */
1585         if (((mr_attr->dmr_flags & IBT_MR_ENABLE_REMOTE_WRITE) ||
1586             (mr_attr->dmr_flags & IBT_MR_ENABLE_REMOTE_ATOMIC)) &&
1587             !(mr_attr->dmr_flags & IBT_MR_ENABLE_LOCAL_WRITE)) {
1588                 return (IBT_MR_ACCESS_REQ_INVALID);
1589         }
1590 
1591         /* Grab the Hermon softstate pointer and PD handle */
1592         state = (hermon_state_t *)hca;
1593         pdhdl = (hermon_pdhdl_t)pd;
1594 
1595         status = hermon_dma_mr_register(state, pdhdl, mr_attr, &mrhdl);
1596         if (status != DDI_SUCCESS) {
1597                 return (status);
1598         }

1599 
1600         /* Fill in the mr_desc structure */
1601         mr_desc->md_vaddr = mr_attr->dmr_paddr;
1602         mr_desc->md_lkey  = mrhdl->mr_lkey;
1603         /* Only set RKey if remote access was requested */
1604         if ((mr_attr->dmr_flags & IBT_MR_ENABLE_REMOTE_ATOMIC) ||
1605             (mr_attr->dmr_flags & IBT_MR_ENABLE_REMOTE_WRITE) ||
1606             (mr_attr->dmr_flags & IBT_MR_ENABLE_REMOTE_READ)) {
1607                 mr_desc->md_rkey = mrhdl->mr_rkey;
1608         }
1609 
1610         /*
1611          * If region is mapped for streaming (i.e. noncoherent), then set
1612          * sync is required
1613          */
1614         mr_desc->md_sync_required = B_FALSE;
1615 
1616         /* Return the Hermon MR handle */
1617         *mr_p = (ibc_mr_hdl_t)mrhdl;
1618 


2048         if (va_attrs->va_flags & IBT_VA_NOSLEEP) {
2049                 kmflag = KM_NOSLEEP;
2050                 callback = DDI_DMA_DONTWAIT;
2051         } else {
2052                 kmflag = KM_SLEEP;
2053                 callback = DDI_DMA_SLEEP;
2054         }
2055 
2056         ma_hdl = kmem_zalloc(sizeof (*ma_hdl), kmflag);
2057         if (ma_hdl == NULL) {
2058                 return (IBT_INSUFF_RESOURCE);
2059         }
2060 #ifdef  __sparc
2061         if (state->hs_cfg_profile->cp_iommu_bypass == HERMON_BINDMEM_BYPASS)
2062                 dma_attr.dma_attr_flags = DDI_DMA_FORCE_PHYSICAL;
2063 
2064         if (hermon_kernel_data_ro == HERMON_RO_ENABLED)
2065                 dma_attr.dma_attr_flags |= DDI_DMA_RELAXED_ORDERING;
2066 #endif
2067 

2068         status = ddi_dma_alloc_handle(state->hs_dip, &dma_attr,
2069             callback, NULL, &ma_hdl->h_ma_dmahdl);
2070         if (status != DDI_SUCCESS) {
2071                 kmem_free(ma_hdl, sizeof (*ma_hdl));
2072                 return (IBT_INSUFF_RESOURCE);
2073         }
2074         status = ddi_dma_buf_bind_handle(ma_hdl->h_ma_dmahdl,
2075             va_attrs->va_buf, DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
2076             callback, NULL, &dmacookie, &cookie_cnt);
2077         if (status != DDI_DMA_MAPPED) {
2078                 status = ibc_get_ci_failure(0);
2079                 goto marea_fail3;
2080         }
2081 
2082         ma_hdl->h_ma_real_len = list_len * sizeof (ibt_phys_addr_t);
2083         ma_hdl->h_ma_kaddr = kmem_zalloc(ma_hdl->h_ma_real_len, kmflag);
2084         if (ma_hdl->h_ma_kaddr == NULL) {
2085                 ibt_status = IBT_INSUFF_RESOURCE;
2086                 goto marea_fail4;
2087         }
2088 
2089         i = 0;
2090         len = 0;
2091         pagesize = PAGESIZE;
2092         kaddr = (uint64_t *)(void *)ma_hdl->h_ma_kaddr;
2093         while (cookie_cnt-- > 0) {
2094                 addr    = dmacookie.dmac_laddress;
2095                 len     += dmacookie.dmac_size;
2096                 endaddr = addr + (dmacookie.dmac_size - 1);
2097                 addr    = addr & ~(pagesize - 1);
2098                 while (addr <= endaddr) {
2099                         if (i >= list_len) {
2100                                 status = IBT_PBL_TOO_SMALL;
2101                                 goto marea_fail5;
2102                         }
2103                         kaddr[i] = htonll(addr | HERMON_MTT_ENTRY_PRESENT);
2104                         i++;
2105                         addr += pagesize;
2106                         if (addr == 0) {
2107                                 static int do_once = 1;


2108                                 if (do_once) {
2109                                         do_once = 0;
2110                                         cmn_err(CE_NOTE, "probable error in "
2111                                             "dma_cookie address: map_mem_area");
2112                                 }
2113                                 break;
2114                         }
2115                 }
2116                 if (cookie_cnt != 0)
2117                         ddi_dma_nextcookie(ma_hdl->h_ma_dmahdl, &dmacookie);
2118         }
2119 

2120         pmr->pmr_addr_list = (ibt_phys_addr_t *)(void *)ma_hdl->h_ma_kaddr;
2121         pmr->pmr_iova = va_attrs->va_vaddr;
2122         pmr->pmr_len = len;
2123         pmr->pmr_offset = va_attrs->va_vaddr & PAGEOFFSET;
2124         pmr->pmr_buf_sz = PAGESHIFT; /* PRM says "Page Sice", but... */
2125         pmr->pmr_num_buf = i;
2126         pmr->pmr_ma = ma_hdl;
2127 
2128         *ma_hdl_p = ma_hdl;
2129         return (IBT_SUCCESS);
2130 
2131 marea_fail5:
2132         kmem_free(ma_hdl->h_ma_kaddr, ma_hdl->h_ma_real_len);
2133 marea_fail4:
2134         status = ddi_dma_unbind_handle(ma_hdl->h_ma_dmahdl);
2135 marea_fail3:
2136         ddi_dma_free_handle(&ma_hdl->h_ma_dmahdl);
2137         kmem_free(ma_hdl, sizeof (*ma_hdl));
2138         *ma_hdl_p = NULL;
2139         return (ibt_status);


2186         hermon_dma_attr_init(state, &dma_attr);
2187 #ifdef  __sparc
2188         if (state->hs_cfg_profile->cp_iommu_bypass == HERMON_BINDMEM_BYPASS)
2189                 dma_attr.dma_attr_flags = DDI_DMA_FORCE_PHYSICAL;
2190 
2191         if (hermon_kernel_data_ro == HERMON_RO_ENABLED)
2192                 dma_attr.dma_attr_flags |= DDI_DMA_RELAXED_ORDERING;
2193 #endif
2194         if (va_attrs->va_flags & IBT_VA_NOSLEEP) {
2195                 kmflag = KM_NOSLEEP;
2196                 callback = DDI_DMA_DONTWAIT;
2197         } else {
2198                 kmflag = KM_SLEEP;
2199                 callback = DDI_DMA_SLEEP;
2200         }
2201 
2202         ma_hdl = kmem_zalloc(sizeof (*ma_hdl), kmflag);
2203         if (ma_hdl == NULL) {
2204                 return (IBT_INSUFF_RESOURCE);
2205         }

2206 
2207         status = ddi_dma_alloc_handle(state->hs_dip, &dma_attr,
2208             callback, NULL, &ma_hdl->h_ma_dmahdl);
2209         if (status != DDI_SUCCESS) {
2210                 ibt_status = IBT_INSUFF_RESOURCE;
2211                 goto marea_fail0;
2212         }
2213         dma_attr.dma_attr_align = 64;   /* as per PRM */
2214         status = ddi_dma_alloc_handle(state->hs_dip, &dma_attr,
2215             callback, NULL, &ma_hdl->h_ma_list_hdl);
2216         if (status != DDI_SUCCESS) {
2217                 ibt_status = IBT_INSUFF_RESOURCE;
2218                 goto marea_fail1;
2219         }
2220         /*
2221          * Entries in the list in the last slot on each page cannot be used,
2222          * so 1 extra ibt_phys_addr_t is allocated per page.  We add 1 more
2223          * to deal with the possibility of a less than 1 page allocation
2224          * across a page boundary.
2225          */


2280                                 ibt_status = IBT_PBL_TOO_SMALL;
2281                                 goto marea_fail5;
2282                         }
2283                         /* Deal with last entry on page. */
2284                         if (!((uintptr_t)&kaddr[i+j+1] & HERMON_PAGEOFFSET)) {
2285                                 if (kcookie.dmac_size > HERMON_PAGESIZE) {
2286                                         kcookie_paddr += HERMON_PAGESIZE;
2287                                         kcookie.dmac_size -= HERMON_PAGESIZE;
2288                                 } else {
2289                                         ddi_dma_nextcookie(khdl, &kcookie);
2290                                         kcookie_paddr = kcookie.dmac_laddress;
2291                                 }
2292                                 kaddr[i+j] = htonll(kcookie_paddr);
2293                                 j++;
2294                         }
2295                         kaddr[i+j] = htonll(addr | HERMON_MTT_ENTRY_PRESENT);
2296                         i++;
2297                         addr += pagesize;
2298                         if (addr == 0) {
2299                                 static int do_once = 1;


2300                                 if (do_once) {
2301                                         do_once = 0;
2302                                         cmn_err(CE_NOTE, "probable error in "
2303                                             "dma_cookie address: map_mem_area");
2304                                 }
2305                                 break;
2306                         }
2307                 }
2308                 if (cookie_cnt != 0)
2309                         ddi_dma_nextcookie(ma_hdl->h_ma_dmahdl, &dmacookie);
2310         }
2311 
2312         pmr = &reg_req->wr;

2313         pmr->pmr_len = len;
2314         pmr->pmr_offset = va_attrs->va_vaddr & PAGEOFFSET;
2315         pmr->pmr_buf_sz = PAGESHIFT; /* PRM says "Page Size", but... */
2316         pmr->pmr_num_buf = i;
2317         pmr->pmr_addr_list = &ma_hdl->h_ma_list_cookie;
2318 
2319         *ma_hdl_p = ma_hdl;
2320         return (IBT_SUCCESS);
2321 
2322 marea_fail5:
2323         status = ddi_dma_unbind_handle(ma_hdl->h_ma_dmahdl);
2324         if (status != DDI_SUCCESS)
2325                 HERMON_WARNING(state, "failed to unbind DMA mapping");
2326 marea_fail4:
2327         status = ddi_dma_unbind_handle(ma_hdl->h_ma_list_hdl);
2328         if (status != DDI_SUCCESS)
2329                 HERMON_WARNING(state, "failed to unbind DMA mapping");
2330 marea_fail3:
2331         ddi_dma_mem_free(&ma_hdl->h_ma_list_acc_hdl);
2332 marea_fail2:


2359                 status = ddi_dma_unbind_handle(ma_hdl->h_ma_list_hdl);
2360                 if (status != DDI_SUCCESS)
2361                         HERMON_WARNING(state, "failed to unbind DMA mapping");
2362                 ddi_dma_mem_free(&ma_hdl->h_ma_list_acc_hdl);
2363                 ddi_dma_free_handle(&ma_hdl->h_ma_list_hdl);
2364         } else {
2365                 kmem_free(ma_hdl->h_ma_kaddr, ma_hdl->h_ma_real_len);
2366         }
2367         status = ddi_dma_unbind_handle(ma_hdl->h_ma_dmahdl);
2368         if (status != DDI_SUCCESS)
2369                 HERMON_WARNING(state, "failed to unbind DMA mapping");
2370         ddi_dma_free_handle(&ma_hdl->h_ma_dmahdl);
2371         kmem_free(ma_hdl, sizeof (*ma_hdl));
2372         return (IBT_SUCCESS);
2373 }
2374 
2375 struct ibc_mi_s {
2376         int                     imh_len;
2377         ddi_dma_handle_t        imh_dmahandle[1];
2378 };



2379 

2380 /*
2381  * hermon_ci_map_mem_iov()
2382  * Map the memory
2383  *    Context: Can be called from interrupt or base context.
2384  */
2385 /* ARGSUSED */
2386 static ibt_status_t
2387 hermon_ci_map_mem_iov(ibc_hca_hdl_t hca, ibt_iov_attr_t *iov_attr,
2388     ibt_all_wr_t *wr, ibc_mi_hdl_t *mi_hdl_p)
2389 {
2390         int                     status;
2391         int                     i, j, nds, max_nds;
2392         uint_t                  len;
2393         ibt_status_t            ibt_status;
2394         ddi_dma_handle_t        dmahdl;
2395         ddi_dma_cookie_t        dmacookie;
2396         ddi_dma_attr_t          dma_attr;
2397         uint_t                  cookie_cnt;
2398         ibc_mi_hdl_t            mi_hdl;
2399         ibt_lkey_t              rsvd_lkey;
2400         ibt_wr_ds_t             *sgl;
2401         hermon_state_t          *state;
2402         int                     kmflag;
2403         int                     (*callback)(caddr_t);
2404 


2405         state = (hermon_state_t *)hca;
2406         hermon_dma_attr_init(state, &dma_attr);
2407 #ifdef  __sparc
2408         if (state->hs_cfg_profile->cp_iommu_bypass == HERMON_BINDMEM_BYPASS)
2409                 dma_attr.dma_attr_flags = DDI_DMA_FORCE_PHYSICAL;
2410 
2411         if (hermon_kernel_data_ro == HERMON_RO_ENABLED)
2412                 dma_attr.dma_attr_flags |= DDI_DMA_RELAXED_ORDERING;
2413 #endif
2414 
2415         nds = 0;
2416         max_nds = iov_attr->iov_wr_nds;
2417         if (iov_attr->iov_lso_hdr_sz)
2418                 max_nds -= (iov_attr->iov_lso_hdr_sz + sizeof (uint32_t) +
2419                     0xf) >> 4;    /* 0xf is for rounding up to a multiple of 16 */
2420         rsvd_lkey = (iov_attr->iov_flags & IBT_IOV_ALT_LKEY) ?
2421             iov_attr->iov_alt_lkey : state->hs_devlim.rsv_lkey;
2422         if ((iov_attr->iov_flags & IBT_IOV_NOSLEEP) == 0) {
2423                 kmflag = KM_SLEEP;
2424                 callback = DDI_DMA_SLEEP;
2425         } else {
2426                 kmflag = KM_NOSLEEP;
2427                 callback = DDI_DMA_DONTWAIT;
2428         }
2429 
2430         if (iov_attr->iov_flags & IBT_IOV_BUF) {
2431                 mi_hdl = kmem_alloc(sizeof (*mi_hdl), kmflag);
2432                 if (mi_hdl == NULL)
2433                         return (IBT_INSUFF_RESOURCE);
2434                 sgl = wr->send.wr_sgl;


2435                 status = ddi_dma_alloc_handle(state->hs_dip, &dma_attr,
2436                     callback, NULL, &dmahdl);
2437                 if (status != DDI_SUCCESS) {
2438                         kmem_free(mi_hdl, sizeof (*mi_hdl));
2439                         return (IBT_INSUFF_RESOURCE);
2440                 }
2441                 status = ddi_dma_buf_bind_handle(dmahdl, iov_attr->iov_buf,
2442                     DDI_DMA_RDWR | DDI_DMA_CONSISTENT, callback, NULL,
2443                     &dmacookie, &cookie_cnt);
2444                 if (status != DDI_DMA_MAPPED) {
2445                         ddi_dma_free_handle(&dmahdl);
2446                         kmem_free(mi_hdl, sizeof (*mi_hdl));
2447                         return (ibc_get_ci_failure(0));
2448                 }
2449                 while (cookie_cnt-- > 0) {
2450                         if (nds > max_nds) {
2451                                 status = ddi_dma_unbind_handle(dmahdl);
2452                                 if (status != DDI_SUCCESS)
2453                                         HERMON_WARNING(state, "failed to "
2454                                             "unbind DMA mapping");


2456                                 return (IBT_SGL_TOO_SMALL);
2457                         }
2458                         sgl[nds].ds_va = dmacookie.dmac_laddress;
2459                         sgl[nds].ds_key = rsvd_lkey;
2460                         sgl[nds].ds_len = (ib_msglen_t)dmacookie.dmac_size;
2461                         nds++;
2462                         if (cookie_cnt != 0)
2463                                 ddi_dma_nextcookie(dmahdl, &dmacookie);
2464                 }
2465                 wr->send.wr_nds = nds;
2466                 mi_hdl->imh_len = 1;
2467                 mi_hdl->imh_dmahandle[0] = dmahdl;
2468                 *mi_hdl_p = mi_hdl;
2469                 return (IBT_SUCCESS);
2470         }
2471 
2472         if (iov_attr->iov_flags & IBT_IOV_RECV)
2473                 sgl = wr->recv.wr_sgl;
2474         else
2475                 sgl = wr->send.wr_sgl;

2476 
2477         len = iov_attr->iov_list_len;
2478         for (i = 0, j = 0; j < len; j++) {
2479                 if (iov_attr->iov[j].iov_len == 0)
2480                         continue;
2481                 i++;
2482         }
2483         mi_hdl = kmem_alloc(sizeof (*mi_hdl) +
2484             (i - 1) * sizeof (ddi_dma_handle_t), kmflag);
2485         if (mi_hdl == NULL)
2486                 return (IBT_INSUFF_RESOURCE);
2487         mi_hdl->imh_len = i;
2488         for (i = 0, j = 0; j < len; j++) {
2489                 if (iov_attr->iov[j].iov_len == 0)
2490                         continue;
2491                 status = ddi_dma_alloc_handle(state->hs_dip, &dma_attr,
2492                     callback, NULL, &dmahdl);
2493                 if (status != DDI_SUCCESS) {
2494                         ibt_status = IBT_INSUFF_RESOURCE;
2495                         goto fail2;


2563             (mi_hdl->imh_len - 1) * sizeof (ddi_dma_handle_t));
2564         return (IBT_SUCCESS);
2565 }
2566 
2567 /*
2568  * hermon_ci_alloc_lkey()
2569  * Allocate an empty memory region for use with FRWR.
2570  *    Context: Can be called from user or base context.
2571  */
2572 /* ARGSUSED */
2573 static ibt_status_t
2574 hermon_ci_alloc_lkey(ibc_hca_hdl_t hca, ibc_pd_hdl_t pd,
2575     ibt_lkey_flags_t flags, uint_t list_sz, ibc_mr_hdl_t *mr_p,
2576     ibt_pmr_desc_t *mem_desc_p)
2577 {
2578         hermon_state_t          *state;
2579         hermon_pdhdl_t          pdhdl;
2580         hermon_mrhdl_t          mrhdl;
2581         int                     status;
2582 


2583         ASSERT(mr_p != NULL);
2584         ASSERT(mem_desc_p != NULL);
2585 
2586         state = (hermon_state_t *)hca;
2587         pdhdl = (hermon_pdhdl_t)pd;
2588 
2589         if (!(state->hs_ibtfinfo.hca_attr->hca_flags2 & IBT_HCA2_MEM_MGT_EXT))
2590                 return (IBT_NOT_SUPPORTED);
2591 
2592         status = hermon_mr_alloc_lkey(state, pdhdl, flags, list_sz, &mrhdl);
2593         if (status != DDI_SUCCESS) {
2594                 return (status);
2595         }

2596 
2597         /* Fill in the mem_desc_p structure */
2598         mem_desc_p->pmd_iova = 0;
2599         mem_desc_p->pmd_phys_buf_list_sz = list_sz;
2600         mem_desc_p->pmd_lkey = mrhdl->mr_lkey;
2601         /* Only set RKey if remote access was requested */
2602         if (flags & IBT_KEY_REMOTE) {
2603                 mem_desc_p->pmd_rkey = mrhdl->mr_rkey;
2604         }
2605         mem_desc_p->pmd_sync_required = B_FALSE;
2606 
2607         /* Return the Hermon MR handle */
2608         *mr_p = (ibc_mr_hdl_t)mrhdl;
2609         return (IBT_SUCCESS);
2610 }
2611 
2612 /* Physical Register Memory Region */
2613 /*
2614  * hermon_ci_register_physical_mr()
2615  */


2736 
2737         ASSERT(mem_pattr != NULL);
2738         ASSERT(mr_p != NULL);
2739         ASSERT(mem_desc_p != NULL);
2740 
2741         /* Grab the Hermon softstate pointer */
2742         state = (hermon_state_t *)hca;
2743 
2744         fmrpoolhdl = (hermon_fmrhdl_t)fmr_pool;
2745 
2746         status = hermon_register_physical_fmr(state, fmrpoolhdl, mem_pattr,
2747             &mrhdl, mem_desc_p);
2748         if (status != DDI_SUCCESS) {
2749                 return (status);
2750         }
2751 
2752         /*
2753          * If region is mapped for streaming (i.e. noncoherent), then set
2754          * sync is required
2755          */

2756         mem_desc_p->pmd_sync_required = (mrhdl->mr_bindinfo.bi_flags &
2757             IBT_MR_NONCOHERENT) ? B_TRUE : B_FALSE;
2758         if (mem_desc_p->pmd_sync_required == B_TRUE) {
2759                 /* Fill in DMA handle for future sync operations */

2760                 mrhdl->mr_bindinfo.bi_dmahdl =
2761                     (ddi_dma_handle_t)mem_pattr->pmr_ma;
2762         }
2763 
2764         /* Return the Hermon MR handle */
2765         *mr_p = (ibc_mr_hdl_t)mrhdl;
2766 
2767         return (IBT_SUCCESS);
2768 }
2769 
2770 /*
2771  * hermon_ci_deregister_fmr()
2772  * Moves an FMR (specified by 'mr') to the deregistered state.
2773  *    Context: Can be called from base context only.
2774  */
2775 static ibt_status_t
2776 hermon_ci_deregister_fmr(ibc_hca_hdl_t hca, ibc_mr_hdl_t mr)
2777 {
2778         hermon_state_t          *state;
2779         hermon_mrhdl_t          mrhdl;