1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. 24 */ 25 26 /* 27 * tavor_ci.c 28 * Tavor Channel Interface (CI) Routines 29 * 30 * Implements all the routines necessary to interface with the IBTF. 31 * Pointers to all of these functions are passed to the IBTF at attach() 32 * time in the ibc_operations_t structure. These functions include all 33 * of the necessary routines to implement the required InfiniBand "verbs" 34 * and additional IBTF-specific interfaces. 35 */ 36 37 #include <sys/types.h> 38 #include <sys/conf.h> 39 #include <sys/ddi.h> 40 #include <sys/sunddi.h> 41 42 #include <sys/ib/adapters/tavor/tavor.h> 43 44 /* HCA and port related operations */ 45 static ibt_status_t tavor_ci_query_hca_ports(ibc_hca_hdl_t, uint8_t, 46 ibt_hca_portinfo_t *); 47 static ibt_status_t tavor_ci_modify_ports(ibc_hca_hdl_t, uint8_t, 48 ibt_port_modify_flags_t, uint8_t); 49 static ibt_status_t tavor_ci_modify_system_image(ibc_hca_hdl_t, ib_guid_t); 50 51 /* Protection Domains */ 52 static ibt_status_t tavor_ci_alloc_pd(ibc_hca_hdl_t, ibt_pd_flags_t, 53 ibc_pd_hdl_t *); 54 static ibt_status_t tavor_ci_free_pd(ibc_hca_hdl_t, ibc_pd_hdl_t); 55 56 /* Reliable Datagram Domains */ 57 static ibt_status_t tavor_ci_alloc_rdd(ibc_hca_hdl_t, ibc_rdd_flags_t, 58 ibc_rdd_hdl_t *); 59 static ibt_status_t tavor_ci_free_rdd(ibc_hca_hdl_t, ibc_rdd_hdl_t); 60 61 /* Address Handles */ 62 static ibt_status_t tavor_ci_alloc_ah(ibc_hca_hdl_t, ibt_ah_flags_t, 63 ibc_pd_hdl_t, ibt_adds_vect_t *, ibc_ah_hdl_t *); 64 static ibt_status_t tavor_ci_free_ah(ibc_hca_hdl_t, ibc_ah_hdl_t); 65 static ibt_status_t tavor_ci_query_ah(ibc_hca_hdl_t, ibc_ah_hdl_t, 66 ibc_pd_hdl_t *, ibt_adds_vect_t *); 67 static ibt_status_t tavor_ci_modify_ah(ibc_hca_hdl_t, ibc_ah_hdl_t, 68 ibt_adds_vect_t *); 69 70 /* Queue Pairs */ 71 static ibt_status_t tavor_ci_alloc_qp(ibc_hca_hdl_t, ibtl_qp_hdl_t, 72 ibt_qp_type_t, ibt_qp_alloc_attr_t *, ibt_chan_sizes_t *, ib_qpn_t *, 73 ibc_qp_hdl_t *); 74 static ibt_status_t tavor_ci_alloc_special_qp(ibc_hca_hdl_t, uint8_t, 75 ibtl_qp_hdl_t, ibt_sqp_type_t, ibt_qp_alloc_attr_t *, 76 ibt_chan_sizes_t *, ibc_qp_hdl_t *); 77 static ibt_status_t tavor_ci_alloc_qp_range(ibc_hca_hdl_t, uint_t, 78 ibtl_qp_hdl_t *, ibt_qp_type_t, ibt_qp_alloc_attr_t *, ibt_chan_sizes_t *, 79 ibc_cq_hdl_t *, ibc_cq_hdl_t *, ib_qpn_t *, ibc_qp_hdl_t *); 80 static ibt_status_t tavor_ci_free_qp(ibc_hca_hdl_t, ibc_qp_hdl_t, 81 ibc_free_qp_flags_t, ibc_qpn_hdl_t *); 82 static ibt_status_t tavor_ci_release_qpn(ibc_hca_hdl_t, ibc_qpn_hdl_t); 83 static ibt_status_t tavor_ci_query_qp(ibc_hca_hdl_t, ibc_qp_hdl_t, 84 ibt_qp_query_attr_t *); 85 static ibt_status_t tavor_ci_modify_qp(ibc_hca_hdl_t, ibc_qp_hdl_t, 86 ibt_cep_modify_flags_t, ibt_qp_info_t *, ibt_queue_sizes_t *); 87 88 /* Completion Queues */ 89 static ibt_status_t tavor_ci_alloc_cq(ibc_hca_hdl_t, ibt_cq_hdl_t, 90 ibt_cq_attr_t *, ibc_cq_hdl_t *, uint_t *); 91 static ibt_status_t tavor_ci_free_cq(ibc_hca_hdl_t, ibc_cq_hdl_t); 92 static ibt_status_t tavor_ci_query_cq(ibc_hca_hdl_t, ibc_cq_hdl_t, uint_t *, 93 uint_t *, uint_t *, ibt_cq_handler_id_t *); 94 static ibt_status_t tavor_ci_resize_cq(ibc_hca_hdl_t, ibc_cq_hdl_t, 95 uint_t, uint_t *); 96 static ibt_status_t tavor_ci_modify_cq(ibc_hca_hdl_t, ibc_cq_hdl_t, 97 uint_t, uint_t, ibt_cq_handler_id_t); 98 static ibt_status_t tavor_ci_alloc_cq_sched(ibc_hca_hdl_t, 99 ibt_cq_sched_attr_t *, ibc_sched_hdl_t *); 100 static ibt_status_t tavor_ci_free_cq_sched(ibc_hca_hdl_t, ibc_sched_hdl_t); 101 102 /* EE Contexts */ 103 static ibt_status_t tavor_ci_alloc_eec(ibc_hca_hdl_t, ibc_eec_flags_t, 104 ibt_eec_hdl_t, ibc_rdd_hdl_t, ibc_eec_hdl_t *); 105 static ibt_status_t tavor_ci_free_eec(ibc_hca_hdl_t, ibc_eec_hdl_t); 106 static ibt_status_t tavor_ci_query_eec(ibc_hca_hdl_t, ibc_eec_hdl_t, 107 ibt_eec_query_attr_t *); 108 static ibt_status_t tavor_ci_modify_eec(ibc_hca_hdl_t, ibc_eec_hdl_t, 109 ibt_cep_modify_flags_t, ibt_eec_info_t *); 110 111 /* Memory Registration */ 112 static ibt_status_t tavor_ci_register_mr(ibc_hca_hdl_t, ibc_pd_hdl_t, 113 ibt_mr_attr_t *, void *, ibc_mr_hdl_t *, ibt_mr_desc_t *); 114 static ibt_status_t tavor_ci_register_buf(ibc_hca_hdl_t, ibc_pd_hdl_t, 115 ibt_smr_attr_t *, struct buf *, void *, ibt_mr_hdl_t *, ibt_mr_desc_t *); 116 static ibt_status_t tavor_ci_register_shared_mr(ibc_hca_hdl_t, 117 ibc_mr_hdl_t, ibc_pd_hdl_t, ibt_smr_attr_t *, void *, 118 ibc_mr_hdl_t *, ibt_mr_desc_t *); 119 static ibt_status_t tavor_ci_deregister_mr(ibc_hca_hdl_t, ibc_mr_hdl_t); 120 static ibt_status_t tavor_ci_query_mr(ibc_hca_hdl_t, ibc_mr_hdl_t, 121 ibt_mr_query_attr_t *); 122 static ibt_status_t tavor_ci_reregister_mr(ibc_hca_hdl_t, ibc_mr_hdl_t, 123 ibc_pd_hdl_t, ibt_mr_attr_t *, void *, ibc_mr_hdl_t *, 124 ibt_mr_desc_t *); 125 static ibt_status_t tavor_ci_reregister_buf(ibc_hca_hdl_t, ibc_mr_hdl_t, 126 ibc_pd_hdl_t, ibt_smr_attr_t *, struct buf *, void *, ibc_mr_hdl_t *, 127 ibt_mr_desc_t *); 128 static ibt_status_t tavor_ci_sync_mr(ibc_hca_hdl_t, ibt_mr_sync_t *, size_t); 129 static ibt_status_t tavor_ci_register_dma_mr(ibc_hca_hdl_t, ibc_pd_hdl_t, 130 ibt_dmr_attr_t *, void *, ibc_mr_hdl_t *, ibt_mr_desc_t *); 131 132 /* Memory Windows */ 133 static ibt_status_t tavor_ci_alloc_mw(ibc_hca_hdl_t, ibc_pd_hdl_t, 134 ibt_mw_flags_t, ibc_mw_hdl_t *, ibt_rkey_t *); 135 static ibt_status_t tavor_ci_free_mw(ibc_hca_hdl_t, ibc_mw_hdl_t); 136 static ibt_status_t tavor_ci_query_mw(ibc_hca_hdl_t, ibc_mw_hdl_t, 137 ibt_mw_query_attr_t *); 138 139 /* Multicast Groups */ 140 static ibt_status_t tavor_ci_attach_mcg(ibc_hca_hdl_t, ibc_qp_hdl_t, 141 ib_gid_t, ib_lid_t); 142 static ibt_status_t tavor_ci_detach_mcg(ibc_hca_hdl_t, ibc_qp_hdl_t, 143 ib_gid_t, ib_lid_t); 144 145 /* Work Request and Completion Processing */ 146 static ibt_status_t tavor_ci_post_send(ibc_hca_hdl_t, ibc_qp_hdl_t, 147 ibt_send_wr_t *, uint_t, uint_t *); 148 static ibt_status_t tavor_ci_post_recv(ibc_hca_hdl_t, ibc_qp_hdl_t, 149 ibt_recv_wr_t *, uint_t, uint_t *); 150 static ibt_status_t tavor_ci_poll_cq(ibc_hca_hdl_t, ibc_cq_hdl_t, 151 ibt_wc_t *, uint_t, uint_t *); 152 static ibt_status_t tavor_ci_notify_cq(ibc_hca_hdl_t, ibc_cq_hdl_t, 153 ibt_cq_notify_flags_t); 154 155 /* CI Object Private Data */ 156 static ibt_status_t tavor_ci_ci_data_in(ibc_hca_hdl_t, ibt_ci_data_flags_t, 157 ibt_object_type_t, void *, void *, size_t); 158 159 /* CI Object Private Data */ 160 static ibt_status_t tavor_ci_ci_data_out(ibc_hca_hdl_t, ibt_ci_data_flags_t, 161 ibt_object_type_t, void *, void *, size_t); 162 163 /* Shared Receive Queues */ 164 static ibt_status_t tavor_ci_alloc_srq(ibc_hca_hdl_t, ibt_srq_flags_t, 165 ibt_srq_hdl_t, ibc_pd_hdl_t, ibt_srq_sizes_t *, ibc_srq_hdl_t *, 166 ibt_srq_sizes_t *); 167 static ibt_status_t tavor_ci_free_srq(ibc_hca_hdl_t, ibc_srq_hdl_t); 168 static ibt_status_t tavor_ci_query_srq(ibc_hca_hdl_t, ibc_srq_hdl_t, 169 ibc_pd_hdl_t *, ibt_srq_sizes_t *, uint_t *); 170 static ibt_status_t tavor_ci_modify_srq(ibc_hca_hdl_t, ibc_srq_hdl_t, 171 ibt_srq_modify_flags_t, uint_t, uint_t, uint_t *); 172 static ibt_status_t tavor_ci_post_srq(ibc_hca_hdl_t, ibc_srq_hdl_t, 173 ibt_recv_wr_t *, uint_t, uint_t *); 174 175 /* Address translation */ 176 static ibt_status_t tavor_ci_map_mem_area(ibc_hca_hdl_t, ibt_va_attr_t *, 177 void *, uint_t, ibt_reg_req_t *, ibc_ma_hdl_t *); 178 static ibt_status_t tavor_ci_unmap_mem_area(ibc_hca_hdl_t, ibc_ma_hdl_t); 179 static ibt_status_t tavor_ci_map_mem_iov(ibc_hca_hdl_t, ibt_iov_attr_t *, 180 ibt_all_wr_t *, ibc_mi_hdl_t *); 181 static ibt_status_t tavor_ci_unmap_mem_iov(ibc_hca_hdl_t, ibc_mi_hdl_t); 182 183 /* Allocate L_Key */ 184 static ibt_status_t tavor_ci_alloc_lkey(ibc_hca_hdl_t, ibc_pd_hdl_t, 185 ibt_lkey_flags_t, uint_t, ibc_mr_hdl_t *, ibt_pmr_desc_t *); 186 187 /* Physical Register Memory Region */ 188 static ibt_status_t tavor_ci_register_physical_mr(ibc_hca_hdl_t, ibc_pd_hdl_t, 189 ibt_pmr_attr_t *, void *, ibc_mr_hdl_t *, ibt_pmr_desc_t *); 190 static ibt_status_t tavor_ci_reregister_physical_mr(ibc_hca_hdl_t, 191 ibc_mr_hdl_t, ibc_pd_hdl_t, ibt_pmr_attr_t *, void *, ibc_mr_hdl_t *, 192 ibt_pmr_desc_t *); 193 194 /* Mellanox FMR */ 195 static ibt_status_t tavor_ci_create_fmr_pool(ibc_hca_hdl_t hca, ibc_pd_hdl_t pd, 196 ibt_fmr_pool_attr_t *fmr_params, ibc_fmr_pool_hdl_t *fmr_pool); 197 static ibt_status_t tavor_ci_destroy_fmr_pool(ibc_hca_hdl_t hca, 198 ibc_fmr_pool_hdl_t fmr_pool); 199 static ibt_status_t tavor_ci_flush_fmr_pool(ibc_hca_hdl_t hca, 200 ibc_fmr_pool_hdl_t fmr_pool); 201 static ibt_status_t tavor_ci_register_physical_fmr(ibc_hca_hdl_t hca, 202 ibc_fmr_pool_hdl_t fmr_pool, ibt_pmr_attr_t *mem_pattr, 203 void *ibtl_reserved, ibc_mr_hdl_t *mr_hdl_p, ibt_pmr_desc_t *mem_desc_p); 204 static ibt_status_t tavor_ci_deregister_fmr(ibc_hca_hdl_t hca, 205 ibc_mr_hdl_t mr); 206 207 static ibt_status_t tavor_ci_alloc_io_mem(ibc_hca_hdl_t, size_t, 208 ibt_mr_flags_t, caddr_t *, ibc_mem_alloc_hdl_t *); 209 static ibt_status_t tavor_ci_free_io_mem(ibc_hca_hdl_t, ibc_mem_alloc_hdl_t); 210 static int tavor_mem_alloc(tavor_state_t *, size_t, ibt_mr_flags_t, 211 caddr_t *, tavor_mem_alloc_hdl_t *); 212 213 static ibt_status_t tavor_ci_not_supported(); 214 215 /* 216 * This ibc_operations_t structure includes pointers to all the entry points 217 * provided by the Tavor driver. This structure is passed to the IBTF at 218 * driver attach time, using the ibc_attach() call. 219 */ 220 ibc_operations_t tavor_ibc_ops = { 221 /* HCA and port related operations */ 222 tavor_ci_query_hca_ports, 223 tavor_ci_modify_ports, 224 tavor_ci_modify_system_image, 225 226 /* Protection Domains */ 227 tavor_ci_alloc_pd, 228 tavor_ci_free_pd, 229 230 /* Reliable Datagram Domains */ 231 tavor_ci_alloc_rdd, 232 tavor_ci_free_rdd, 233 234 /* Address Handles */ 235 tavor_ci_alloc_ah, 236 tavor_ci_free_ah, 237 tavor_ci_query_ah, 238 tavor_ci_modify_ah, 239 240 /* Queue Pairs */ 241 tavor_ci_alloc_qp, 242 tavor_ci_alloc_special_qp, 243 tavor_ci_alloc_qp_range, 244 tavor_ci_free_qp, 245 tavor_ci_release_qpn, 246 tavor_ci_query_qp, 247 tavor_ci_modify_qp, 248 249 /* Completion Queues */ 250 tavor_ci_alloc_cq, 251 tavor_ci_free_cq, 252 tavor_ci_query_cq, 253 tavor_ci_resize_cq, 254 tavor_ci_modify_cq, 255 tavor_ci_alloc_cq_sched, 256 tavor_ci_free_cq_sched, 257 tavor_ci_not_supported, /* query_cq_handler_id */ 258 259 /* EE Contexts */ 260 tavor_ci_alloc_eec, 261 tavor_ci_free_eec, 262 tavor_ci_query_eec, 263 tavor_ci_modify_eec, 264 265 /* Memory Registration */ 266 tavor_ci_register_mr, 267 tavor_ci_register_buf, 268 tavor_ci_register_shared_mr, 269 tavor_ci_deregister_mr, 270 tavor_ci_query_mr, 271 tavor_ci_reregister_mr, 272 tavor_ci_reregister_buf, 273 tavor_ci_sync_mr, 274 275 /* Memory Windows */ 276 tavor_ci_alloc_mw, 277 tavor_ci_free_mw, 278 tavor_ci_query_mw, 279 280 /* Multicast Groups */ 281 tavor_ci_attach_mcg, 282 tavor_ci_detach_mcg, 283 284 /* Work Request and Completion Processing */ 285 tavor_ci_post_send, 286 tavor_ci_post_recv, 287 tavor_ci_poll_cq, 288 tavor_ci_notify_cq, 289 290 /* CI Object Mapping Data */ 291 tavor_ci_ci_data_in, 292 tavor_ci_ci_data_out, 293 294 /* Shared Receive Queue */ 295 tavor_ci_alloc_srq, 296 tavor_ci_free_srq, 297 tavor_ci_query_srq, 298 tavor_ci_modify_srq, 299 tavor_ci_post_srq, 300 301 /* Address translation */ 302 tavor_ci_map_mem_area, 303 tavor_ci_unmap_mem_area, 304 tavor_ci_map_mem_iov, 305 tavor_ci_unmap_mem_iov, 306 307 /* Allocate L_key */ 308 tavor_ci_alloc_lkey, 309 310 /* Physical Register Memory Region */ 311 tavor_ci_register_physical_mr, 312 tavor_ci_reregister_physical_mr, 313 314 /* Mellanox FMR */ 315 tavor_ci_create_fmr_pool, 316 tavor_ci_destroy_fmr_pool, 317 tavor_ci_flush_fmr_pool, 318 tavor_ci_register_physical_fmr, 319 tavor_ci_deregister_fmr, 320 321 /* dmable memory */ 322 tavor_ci_alloc_io_mem, 323 tavor_ci_free_io_mem, 324 325 /* XRC not yet supported */ 326 tavor_ci_not_supported, /* ibc_alloc_xrc_domain */ 327 tavor_ci_not_supported, /* ibc_free_xrc_domain */ 328 tavor_ci_not_supported, /* ibc_alloc_xrc_srq */ 329 tavor_ci_not_supported, /* ibc_free_xrc_srq */ 330 tavor_ci_not_supported, /* ibc_query_xrc_srq */ 331 tavor_ci_not_supported, /* ibc_modify_xrc_srq */ 332 tavor_ci_not_supported, /* ibc_alloc_xrc_tgt_qp */ 333 tavor_ci_not_supported, /* ibc_free_xrc_tgt_qp */ 334 tavor_ci_not_supported, /* ibc_query_xrc_tgt_qp */ 335 tavor_ci_not_supported, /* ibc_modify_xrc_tgt_qp */ 336 337 /* Memory Region (physical) */ 338 tavor_ci_register_dma_mr, 339 340 /* Next enhancements */ 341 tavor_ci_not_supported, /* ibc_enhancement1 */ 342 tavor_ci_not_supported, /* ibc_enhancement2 */ 343 tavor_ci_not_supported, /* ibc_enhancement3 */ 344 tavor_ci_not_supported, /* ibc_enhancement4 */ 345 }; 346 347 /* 348 * Not yet implemented OPS 349 */ 350 /* ARGSUSED */ 351 static ibt_status_t 352 tavor_ci_not_supported() 353 { 354 return (IBT_NOT_SUPPORTED); 355 } 356 357 358 /* 359 * tavor_ci_query_hca_ports() 360 * Returns HCA port attributes for either one or all of the HCA's ports. 361 * Context: Can be called only from user or kernel context. 362 */ 363 static ibt_status_t 364 tavor_ci_query_hca_ports(ibc_hca_hdl_t hca, uint8_t query_port, 365 ibt_hca_portinfo_t *info_p) 366 { 367 tavor_state_t *state; 368 uint_t start, end, port; 369 int status, indx; 370 371 TAVOR_TNF_ENTER(tavor_ci_query_hca_ports); 372 373 /* Check for valid HCA handle */ 374 if (hca == NULL) { 375 TNF_PROBE_0(tavor_ci_query_hca_ports_invhca_fail, 376 TAVOR_TNF_ERROR, ""); 377 TAVOR_TNF_EXIT(tavor_ci_query_port); 378 return (IBT_HCA_HDL_INVALID); 379 } 380 381 /* Grab the Tavor softstate pointer */ 382 state = (tavor_state_t *)hca; 383 384 /* 385 * If the specified port is zero, then we are supposed to query all 386 * ports. Otherwise, we query only the port number specified. 387 * Setup the start and end port numbers as appropriate for the loop 388 * below. Note: The first Tavor port is port number one (1). 389 */ 390 if (query_port == 0) { 391 start = 1; 392 end = start + (state->ts_cfg_profile->cp_num_ports - 1); 393 } else { 394 end = start = query_port; 395 } 396 397 /* Query the port(s) */ 398 for (port = start, indx = 0; port <= end; port++, indx++) { 399 status = tavor_port_query(state, port, &info_p[indx]); 400 if (status != DDI_SUCCESS) { 401 TNF_PROBE_1(tavor_port_query_fail, TAVOR_TNF_ERROR, 402 "", tnf_uint, status, status); 403 TAVOR_TNF_EXIT(tavor_ci_query_hca_ports); 404 return (status); 405 } 406 } 407 408 TAVOR_TNF_EXIT(tavor_ci_query_hca_ports); 409 return (IBT_SUCCESS); 410 } 411 412 413 /* 414 * tavor_ci_modify_ports() 415 * Modify HCA port attributes 416 * Context: Can be called only from user or kernel context. 417 */ 418 static ibt_status_t 419 tavor_ci_modify_ports(ibc_hca_hdl_t hca, uint8_t port, 420 ibt_port_modify_flags_t flags, uint8_t init_type) 421 { 422 tavor_state_t *state; 423 int status; 424 425 TAVOR_TNF_ENTER(tavor_ci_modify_ports); 426 427 /* Check for valid HCA handle */ 428 if (hca == NULL) { 429 TNF_PROBE_0(tavor_ci_modify_ports_invhca_fail, 430 TAVOR_TNF_ERROR, ""); 431 TAVOR_TNF_EXIT(tavor_ci_modify_ports); 432 return (IBT_HCA_HDL_INVALID); 433 } 434 435 /* Grab the Tavor softstate pointer */ 436 state = (tavor_state_t *)hca; 437 438 /* Modify the port(s) */ 439 status = tavor_port_modify(state, port, flags, init_type); 440 if (status != DDI_SUCCESS) { 441 TNF_PROBE_1(tavor_ci_modify_ports_fail, 442 TAVOR_TNF_ERROR, "", tnf_uint, status, status); 443 TAVOR_TNF_EXIT(tavor_ci_modify_ports); 444 return (status); 445 } 446 447 TAVOR_TNF_EXIT(tavor_ci_modify_ports); 448 return (IBT_SUCCESS); 449 } 450 451 /* 452 * tavor_ci_modify_system_image() 453 * Modify the System Image GUID 454 * Context: Can be called only from user or kernel context. 455 */ 456 /* ARGSUSED */ 457 static ibt_status_t 458 tavor_ci_modify_system_image(ibc_hca_hdl_t hca, ib_guid_t sys_guid) 459 { 460 TAVOR_TNF_ENTER(tavor_ci_modify_system_image); 461 462 /* 463 * This is an unsupported interface for the Tavor driver. This 464 * interface is necessary to support modification of the System 465 * Image GUID. Tavor is only capable of modifying this parameter 466 * once (during driver initialization). 467 */ 468 469 TAVOR_TNF_EXIT(tavor_ci_modify_system_image); 470 return (IBT_NOT_SUPPORTED); 471 } 472 473 /* 474 * tavor_ci_alloc_pd() 475 * Allocate a Protection Domain 476 * Context: Can be called only from user or kernel context. 477 */ 478 /* ARGSUSED */ 479 static ibt_status_t 480 tavor_ci_alloc_pd(ibc_hca_hdl_t hca, ibt_pd_flags_t flags, ibc_pd_hdl_t *pd_p) 481 { 482 tavor_state_t *state; 483 tavor_pdhdl_t pdhdl; 484 int status; 485 486 TAVOR_TNF_ENTER(tavor_ci_alloc_pd); 487 488 ASSERT(pd_p != NULL); 489 490 /* Check for valid HCA handle */ 491 if (hca == NULL) { 492 TNF_PROBE_0(tavor_ci_alloc_pd_invhca_fail, 493 TAVOR_TNF_ERROR, ""); 494 TAVOR_TNF_EXIT(tavor_ci_alloc_pd); 495 return (IBT_HCA_HDL_INVALID); 496 } 497 498 /* Grab the Tavor softstate pointer */ 499 state = (tavor_state_t *)hca; 500 501 /* Allocate the PD */ 502 status = tavor_pd_alloc(state, &pdhdl, TAVOR_NOSLEEP); 503 if (status != DDI_SUCCESS) { 504 TNF_PROBE_1(tavor_ci_alloc_pd_fail, TAVOR_TNF_ERROR, "", 505 tnf_uint, status, status); 506 TAVOR_TNF_EXIT(tavor_ci_alloc_pd); 507 return (status); 508 } 509 510 /* Return the Tavor PD handle */ 511 *pd_p = (ibc_pd_hdl_t)pdhdl; 512 513 TAVOR_TNF_EXIT(tavor_ci_alloc_pd); 514 return (IBT_SUCCESS); 515 } 516 517 518 /* 519 * tavor_ci_free_pd() 520 * Free a Protection Domain 521 * Context: Can be called only from user or kernel context 522 */ 523 static ibt_status_t 524 tavor_ci_free_pd(ibc_hca_hdl_t hca, ibc_pd_hdl_t pd) 525 { 526 tavor_state_t *state; 527 tavor_pdhdl_t pdhdl; 528 int status; 529 530 TAVOR_TNF_ENTER(tavor_ci_free_pd); 531 532 /* Check for valid HCA handle */ 533 if (hca == NULL) { 534 TNF_PROBE_0(tavor_ci_free_pd_invhca_fail, 535 TAVOR_TNF_ERROR, ""); 536 TAVOR_TNF_EXIT(tavor_ci_free_pd); 537 return (IBT_HCA_HDL_INVALID); 538 } 539 540 /* Check for valid PD handle pointer */ 541 if (pd == NULL) { 542 TNF_PROBE_0(tavor_ci_free_pd_invpdhdl_fail, 543 TAVOR_TNF_ERROR, ""); 544 TAVOR_TNF_EXIT(tavor_ci_free_pd); 545 return (IBT_PD_HDL_INVALID); 546 } 547 548 /* Grab the Tavor softstate pointer and PD handle */ 549 state = (tavor_state_t *)hca; 550 pdhdl = (tavor_pdhdl_t)pd; 551 552 /* Free the PD */ 553 status = tavor_pd_free(state, &pdhdl); 554 if (status != DDI_SUCCESS) { 555 TNF_PROBE_1(tavor_ci_free_pd_fail, TAVOR_TNF_ERROR, "", 556 tnf_uint, status, status); 557 TAVOR_TNF_EXIT(tavor_ci_free_pd); 558 return (status); 559 } 560 561 TAVOR_TNF_EXIT(tavor_ci_free_pd); 562 return (IBT_SUCCESS); 563 } 564 565 566 /* 567 * tavor_ci_alloc_rdd() 568 * Allocate a Reliable Datagram Domain 569 * Context: Can be called only from user or kernel context. 570 */ 571 /* ARGSUSED */ 572 static ibt_status_t 573 tavor_ci_alloc_rdd(ibc_hca_hdl_t hca, ibc_rdd_flags_t flags, 574 ibc_rdd_hdl_t *rdd_p) 575 { 576 TAVOR_TNF_ENTER(tavor_ci_alloc_rdd); 577 578 /* 579 * This is an unsupported interface for the Tavor driver. This 580 * interface is necessary to support Reliable Datagram (RD) 581 * operations. Tavor does not support RD. 582 */ 583 584 TAVOR_TNF_EXIT(tavor_ci_alloc_rdd); 585 return (IBT_NOT_SUPPORTED); 586 } 587 588 589 /* 590 * tavor_free_rdd() 591 * Free a Reliable Datagram Domain 592 * Context: Can be called only from user or kernel context. 593 */ 594 /* ARGSUSED */ 595 static ibt_status_t 596 tavor_ci_free_rdd(ibc_hca_hdl_t hca, ibc_rdd_hdl_t rdd) 597 { 598 TAVOR_TNF_ENTER(tavor_ci_free_rdd); 599 600 /* 601 * This is an unsupported interface for the Tavor driver. This 602 * interface is necessary to support Reliable Datagram (RD) 603 * operations. Tavor does not support RD. 604 */ 605 606 TAVOR_TNF_EXIT(tavor_ci_free_rdd); 607 return (IBT_NOT_SUPPORTED); 608 } 609 610 611 /* 612 * tavor_ci_alloc_ah() 613 * Allocate an Address Handle 614 * Context: Can be called only from user or kernel context. 615 */ 616 /* ARGSUSED */ 617 static ibt_status_t 618 tavor_ci_alloc_ah(ibc_hca_hdl_t hca, ibt_ah_flags_t flags, ibc_pd_hdl_t pd, 619 ibt_adds_vect_t *attr_p, ibc_ah_hdl_t *ah_p) 620 { 621 tavor_state_t *state; 622 tavor_ahhdl_t ahhdl; 623 tavor_pdhdl_t pdhdl; 624 int status; 625 626 TAVOR_TNF_ENTER(tavor_ci_alloc_ah); 627 628 /* Check for valid HCA handle */ 629 if (hca == NULL) { 630 TNF_PROBE_0(tavor_ci_alloc_ah_invhca_fail, 631 TAVOR_TNF_ERROR, ""); 632 TAVOR_TNF_EXIT(tavor_ci_alloc_ah); 633 return (IBT_HCA_HDL_INVALID); 634 } 635 636 /* Check for valid PD handle pointer */ 637 if (pd == NULL) { 638 TNF_PROBE_0(tavor_ci_alloc_ah_invpdhdl_fail, 639 TAVOR_TNF_ERROR, ""); 640 TAVOR_TNF_EXIT(tavor_ci_alloc_ah); 641 return (IBT_PD_HDL_INVALID); 642 } 643 644 /* Grab the Tavor softstate pointer and PD handle */ 645 state = (tavor_state_t *)hca; 646 pdhdl = (tavor_pdhdl_t)pd; 647 648 /* Allocate the AH */ 649 status = tavor_ah_alloc(state, pdhdl, attr_p, &ahhdl, TAVOR_NOSLEEP); 650 if (status != DDI_SUCCESS) { 651 TNF_PROBE_1(tavor_ci_alloc_ah_fail, TAVOR_TNF_ERROR, "", 652 tnf_uint, status, status); 653 TAVOR_TNF_EXIT(tavor_ci_alloc_ah); 654 return (status); 655 } 656 657 /* Return the Tavor AH handle */ 658 *ah_p = (ibc_ah_hdl_t)ahhdl; 659 660 TAVOR_TNF_EXIT(tavor_ci_alloc_ah); 661 return (IBT_SUCCESS); 662 } 663 664 665 /* 666 * tavor_ci_free_ah() 667 * Free an Address Handle 668 * Context: Can be called only from user or kernel context. 669 */ 670 static ibt_status_t 671 tavor_ci_free_ah(ibc_hca_hdl_t hca, ibc_ah_hdl_t ah) 672 { 673 tavor_state_t *state; 674 tavor_ahhdl_t ahhdl; 675 int status; 676 677 TAVOR_TNF_ENTER(tavor_ci_free_ah); 678 679 /* Check for valid HCA handle */ 680 if (hca == NULL) { 681 TNF_PROBE_0(tavor_ci_free_ah_invhca_fail, 682 TAVOR_TNF_ERROR, ""); 683 TAVOR_TNF_EXIT(tavor_ci_free_ah); 684 return (IBT_HCA_HDL_INVALID); 685 } 686 687 /* Check for valid address handle pointer */ 688 if (ah == NULL) { 689 TNF_PROBE_0(tavor_ci_free_ah_invahhdl_fail, 690 TAVOR_TNF_ERROR, ""); 691 TAVOR_TNF_EXIT(tavor_ci_free_ah); 692 return (IBT_AH_HDL_INVALID); 693 } 694 695 /* Grab the Tavor softstate pointer and AH handle */ 696 state = (tavor_state_t *)hca; 697 ahhdl = (tavor_ahhdl_t)ah; 698 699 /* Free the AH */ 700 status = tavor_ah_free(state, &ahhdl, TAVOR_NOSLEEP); 701 if (status != DDI_SUCCESS) { 702 TNF_PROBE_1(tavor_ci_free_ah_fail, TAVOR_TNF_ERROR, "", 703 tnf_uint, status, status); 704 TAVOR_TNF_EXIT(tavor_ci_free_ah); 705 return (status); 706 } 707 708 TAVOR_TNF_EXIT(tavor_ci_free_ah); 709 return (IBT_SUCCESS); 710 } 711 712 713 /* 714 * tavor_ci_query_ah() 715 * Return the Address Vector information for a specified Address Handle 716 * Context: Can be called from interrupt or base context. 717 */ 718 static ibt_status_t 719 tavor_ci_query_ah(ibc_hca_hdl_t hca, ibc_ah_hdl_t ah, ibc_pd_hdl_t *pd_p, 720 ibt_adds_vect_t *attr_p) 721 { 722 tavor_state_t *state; 723 tavor_ahhdl_t ahhdl; 724 tavor_pdhdl_t pdhdl; 725 int status; 726 727 TAVOR_TNF_ENTER(tavor_ci_query_ah); 728 729 /* Check for valid HCA handle */ 730 if (hca == NULL) { 731 TNF_PROBE_0(tavor_ci_query_ah_invhca_fail, 732 TAVOR_TNF_ERROR, ""); 733 TAVOR_TNF_EXIT(tavor_ci_query_ah); 734 return (IBT_HCA_HDL_INVALID); 735 } 736 737 /* Check for valid address handle pointer */ 738 if (ah == NULL) { 739 TNF_PROBE_0(tavor_ci_query_ah_invahhdl_fail, 740 TAVOR_TNF_ERROR, ""); 741 TAVOR_TNF_EXIT(tavor_ci_query_ah); 742 return (IBT_AH_HDL_INVALID); 743 } 744 745 /* Grab the Tavor softstate pointer and AH handle */ 746 state = (tavor_state_t *)hca; 747 ahhdl = (tavor_ahhdl_t)ah; 748 749 /* Query the AH */ 750 status = tavor_ah_query(state, ahhdl, &pdhdl, attr_p); 751 if (status != DDI_SUCCESS) { 752 TNF_PROBE_1(tavor_ci_query_ah_fail, TAVOR_TNF_ERROR, "", 753 tnf_uint, status, status); 754 TAVOR_TNF_EXIT(tavor_ci_query_ah); 755 return (status); 756 } 757 758 /* Return the Tavor PD handle */ 759 *pd_p = (ibc_pd_hdl_t)pdhdl; 760 761 TAVOR_TNF_EXIT(tavor_ci_query_ah); 762 return (IBT_SUCCESS); 763 } 764 765 766 /* 767 * tavor_ci_modify_ah() 768 * Modify the Address Vector information of a specified Address Handle 769 * Context: Can be called from interrupt or base context. 770 */ 771 static ibt_status_t 772 tavor_ci_modify_ah(ibc_hca_hdl_t hca, ibc_ah_hdl_t ah, ibt_adds_vect_t *attr_p) 773 { 774 tavor_state_t *state; 775 tavor_ahhdl_t ahhdl; 776 int status; 777 778 TAVOR_TNF_ENTER(tavor_ci_modify_ah); 779 780 /* Check for valid HCA handle */ 781 if (hca == NULL) { 782 TNF_PROBE_0(tavor_ci_modify_ah_invhca_fail, 783 TAVOR_TNF_ERROR, ""); 784 TAVOR_TNF_EXIT(tavor_ci_modify_ah); 785 return (IBT_HCA_HDL_INVALID); 786 } 787 788 /* Check for valid address handle pointer */ 789 if (ah == NULL) { 790 TNF_PROBE_0(tavor_ci_modify_ah_invahhdl_fail, 791 TAVOR_TNF_ERROR, ""); 792 TAVOR_TNF_EXIT(tavor_ci_modify_ah); 793 return (IBT_AH_HDL_INVALID); 794 } 795 796 /* Grab the Tavor softstate pointer and AH handle */ 797 state = (tavor_state_t *)hca; 798 ahhdl = (tavor_ahhdl_t)ah; 799 800 /* Modify the AH */ 801 status = tavor_ah_modify(state, ahhdl, attr_p); 802 if (status != DDI_SUCCESS) { 803 TNF_PROBE_1(tavor_ci_modify_ah_fail, TAVOR_TNF_ERROR, "", 804 tnf_uint, status, status); 805 TAVOR_TNF_EXIT(tavor_ci_modify_ah); 806 return (status); 807 } 808 809 TAVOR_TNF_EXIT(tavor_ci_modify_ah); 810 return (IBT_SUCCESS); 811 } 812 813 814 /* 815 * tavor_ci_alloc_qp() 816 * Allocate a Queue Pair 817 * Context: Can be called only from user or kernel context. 818 */ 819 static ibt_status_t 820 tavor_ci_alloc_qp(ibc_hca_hdl_t hca, ibtl_qp_hdl_t ibt_qphdl, 821 ibt_qp_type_t type, ibt_qp_alloc_attr_t *attr_p, 822 ibt_chan_sizes_t *queue_sizes_p, ib_qpn_t *qpn, ibc_qp_hdl_t *qp_p) 823 { 824 tavor_state_t *state; 825 tavor_qp_info_t qpinfo; 826 tavor_qp_options_t op; 827 int status; 828 829 TAVOR_TNF_ENTER(tavor_ci_alloc_qp); 830 831 /* Check for valid HCA handle */ 832 if (hca == NULL) { 833 TNF_PROBE_0(tavor_ci_alloc_qp_invhca_fail, 834 TAVOR_TNF_ERROR, ""); 835 TAVOR_TNF_EXIT(tavor_ci_alloc_qp); 836 return (IBT_HCA_HDL_INVALID); 837 } 838 839 /* Grab the Tavor softstate pointer */ 840 state = (tavor_state_t *)hca; 841 842 /* Allocate the QP */ 843 qpinfo.qpi_attrp = attr_p; 844 qpinfo.qpi_type = type; 845 qpinfo.qpi_ibt_qphdl = ibt_qphdl; 846 qpinfo.qpi_queueszp = queue_sizes_p; 847 qpinfo.qpi_qpn = qpn; 848 op.qpo_wq_loc = state->ts_cfg_profile->cp_qp_wq_inddr; 849 status = tavor_qp_alloc(state, &qpinfo, TAVOR_NOSLEEP, &op); 850 if (status != DDI_SUCCESS) { 851 TNF_PROBE_1(tavor_ci_alloc_qp_fail, TAVOR_TNF_ERROR, "", 852 tnf_uint, status, status); 853 TAVOR_TNF_EXIT(tavor_ci_alloc_qp); 854 return (status); 855 } 856 857 /* Return the Tavor QP handle */ 858 *qp_p = (ibc_qp_hdl_t)qpinfo.qpi_qphdl; 859 860 TAVOR_TNF_EXIT(tavor_ci_alloc_qp); 861 return (IBT_SUCCESS); 862 } 863 864 865 /* 866 * tavor_ci_alloc_special_qp() 867 * Allocate a Special Queue Pair 868 * Context: Can be called only from user or kernel context. 869 */ 870 static ibt_status_t 871 tavor_ci_alloc_special_qp(ibc_hca_hdl_t hca, uint8_t port, 872 ibtl_qp_hdl_t ibt_qphdl, ibt_sqp_type_t type, 873 ibt_qp_alloc_attr_t *attr_p, ibt_chan_sizes_t *queue_sizes_p, 874 ibc_qp_hdl_t *qp_p) 875 { 876 tavor_state_t *state; 877 tavor_qp_info_t qpinfo; 878 tavor_qp_options_t op; 879 int status; 880 881 TAVOR_TNF_ENTER(tavor_ci_alloc_special_qp); 882 883 /* Check for valid HCA handle */ 884 if (hca == NULL) { 885 TNF_PROBE_0(tavor_ci_alloc_special_qp_invhca_fail, 886 TAVOR_TNF_ERROR, ""); 887 TAVOR_TNF_EXIT(tavor_ci_alloc_special_qp); 888 return (IBT_HCA_HDL_INVALID); 889 } 890 891 /* Grab the Tavor softstate pointer */ 892 state = (tavor_state_t *)hca; 893 894 /* Allocate the Special QP */ 895 qpinfo.qpi_attrp = attr_p; 896 qpinfo.qpi_type = type; 897 qpinfo.qpi_port = port; 898 qpinfo.qpi_ibt_qphdl = ibt_qphdl; 899 qpinfo.qpi_queueszp = queue_sizes_p; 900 op.qpo_wq_loc = state->ts_cfg_profile->cp_qp_wq_inddr; 901 status = tavor_special_qp_alloc(state, &qpinfo, TAVOR_NOSLEEP, &op); 902 if (status != DDI_SUCCESS) { 903 TNF_PROBE_1(tavor_ci_alloc_special_qp_fail, TAVOR_TNF_ERROR, 904 "", tnf_uint, status, status); 905 TAVOR_TNF_EXIT(tavor_ci_alloc_special_qp); 906 return (status); 907 } 908 909 /* Return the Tavor QP handle */ 910 *qp_p = (ibc_qp_hdl_t)qpinfo.qpi_qphdl; 911 912 TAVOR_TNF_EXIT(tavor_ci_alloc_special_qp); 913 return (IBT_SUCCESS); 914 } 915 916 917 /* ARGSUSED */ 918 static ibt_status_t 919 tavor_ci_alloc_qp_range(ibc_hca_hdl_t hca, uint_t log2, 920 ibtl_qp_hdl_t *ibtl_qp_p, ibt_qp_type_t type, 921 ibt_qp_alloc_attr_t *attr_p, ibt_chan_sizes_t *queue_sizes_p, 922 ibc_cq_hdl_t *send_cq_p, ibc_cq_hdl_t *recv_cq_p, 923 ib_qpn_t *qpn_p, ibc_qp_hdl_t *qp_p) 924 { 925 return (IBT_NOT_SUPPORTED); 926 } 927 928 /* 929 * tavor_ci_free_qp() 930 * Free a Queue Pair 931 * Context: Can be called only from user or kernel context. 932 */ 933 static ibt_status_t 934 tavor_ci_free_qp(ibc_hca_hdl_t hca, ibc_qp_hdl_t qp, 935 ibc_free_qp_flags_t free_qp_flags, ibc_qpn_hdl_t *qpnh_p) 936 { 937 tavor_state_t *state; 938 tavor_qphdl_t qphdl; 939 int status; 940 941 TAVOR_TNF_ENTER(tavor_ci_free_qp); 942 943 /* Check for valid HCA handle */ 944 if (hca == NULL) { 945 TNF_PROBE_0(tavor_ci_free_qp_invhca_fail, 946 TAVOR_TNF_ERROR, ""); 947 TAVOR_TNF_EXIT(tavor_ci_free_qp); 948 return (IBT_HCA_HDL_INVALID); 949 } 950 951 /* Check for valid QP handle pointer */ 952 if (qp == NULL) { 953 TNF_PROBE_0(tavor_ci_free_qp_invqphdl_fail, 954 TAVOR_TNF_ERROR, ""); 955 TAVOR_TNF_EXIT(tavor_ci_free_qp); 956 return (IBT_QP_HDL_INVALID); 957 } 958 959 /* Grab the Tavor softstate pointer and QP handle */ 960 state = (tavor_state_t *)hca; 961 qphdl = (tavor_qphdl_t)qp; 962 963 /* Free the QP */ 964 status = tavor_qp_free(state, &qphdl, free_qp_flags, qpnh_p, 965 TAVOR_NOSLEEP); 966 if (status != DDI_SUCCESS) { 967 TNF_PROBE_1(tavor_ci_free_qp_fail, TAVOR_TNF_ERROR, "", 968 tnf_uint, status, status); 969 TAVOR_TNF_EXIT(tavor_ci_free_qp); 970 return (status); 971 } 972 973 TAVOR_TNF_EXIT(tavor_ci_free_qp); 974 return (IBT_SUCCESS); 975 } 976 977 978 /* 979 * tavor_ci_release_qpn() 980 * Release a Queue Pair Number (QPN) 981 * Context: Can be called only from user or kernel context. 982 */ 983 static ibt_status_t 984 tavor_ci_release_qpn(ibc_hca_hdl_t hca, ibc_qpn_hdl_t qpnh) 985 { 986 tavor_state_t *state; 987 tavor_qpn_entry_t *entry; 988 989 TAVOR_TNF_ENTER(tavor_ci_release_qpn); 990 991 /* Check for valid HCA handle */ 992 if (hca == NULL) { 993 TNF_PROBE_0(tavor_ci_release_qpn_invhca_fail, 994 TAVOR_TNF_ERROR, ""); 995 TAVOR_TNF_EXIT(tavor_ci_release_qpn); 996 return (IBT_HCA_HDL_INVALID); 997 } 998 999 /* Check for valid QP handle pointer */ 1000 if (qpnh == NULL) { 1001 TNF_PROBE_0(tavor_ci_release_qpn_invqpnhdl_fail, 1002 TAVOR_TNF_ERROR, ""); 1003 TAVOR_TNF_EXIT(tavor_ci_release_qpn); 1004 return (IBT_QP_HDL_INVALID); 1005 } 1006 1007 /* Grab the Tavor softstate pointer and QP handle */ 1008 state = (tavor_state_t *)hca; 1009 entry = (tavor_qpn_entry_t *)qpnh; 1010 1011 /* Release the QP number */ 1012 tavor_qp_release_qpn(state, entry, TAVOR_QPN_RELEASE); 1013 1014 TAVOR_TNF_EXIT(tavor_ci_release_qpn); 1015 return (IBT_SUCCESS); 1016 } 1017 1018 1019 /* 1020 * tavor_ci_query_qp() 1021 * Query a Queue Pair 1022 * Context: Can be called from interrupt or base context. 1023 */ 1024 static ibt_status_t 1025 tavor_ci_query_qp(ibc_hca_hdl_t hca, ibc_qp_hdl_t qp, 1026 ibt_qp_query_attr_t *attr_p) 1027 { 1028 tavor_state_t *state; 1029 tavor_qphdl_t qphdl; 1030 int status; 1031 1032 TAVOR_TNF_ENTER(tavor_ci_query_qp); 1033 1034 /* Check for valid HCA handle */ 1035 if (hca == NULL) { 1036 TNF_PROBE_0(tavor_ci_query_qp_invhca_fail, 1037 TAVOR_TNF_ERROR, ""); 1038 TAVOR_TNF_EXIT(tavor_ci_query_qp); 1039 return (IBT_HCA_HDL_INVALID); 1040 } 1041 1042 /* Check for valid QP handle */ 1043 if (qp == NULL) { 1044 TNF_PROBE_0(tavor_ci_query_qp_invqphdl_fail, 1045 TAVOR_TNF_ERROR, ""); 1046 TAVOR_TNF_EXIT(tavor_ci_query_qp); 1047 return (IBT_QP_HDL_INVALID); 1048 } 1049 1050 /* Grab the Tavor softstate pointer and QP handle */ 1051 state = (tavor_state_t *)hca; 1052 qphdl = (tavor_qphdl_t)qp; 1053 1054 /* Query the QP */ 1055 status = tavor_qp_query(state, qphdl, attr_p); 1056 if (status != DDI_SUCCESS) { 1057 TNF_PROBE_1(tavor_ci_query_qp_fail, TAVOR_TNF_ERROR, "", 1058 tnf_uint, status, status); 1059 TAVOR_TNF_EXIT(tavor_ci_query_qp); 1060 return (status); 1061 } 1062 1063 TAVOR_TNF_EXIT(tavor_ci_query_qp); 1064 return (IBT_SUCCESS); 1065 } 1066 1067 1068 /* 1069 * tavor_ci_modify_qp() 1070 * Modify a Queue Pair 1071 * Context: Can be called from interrupt or base context. 1072 */ 1073 static ibt_status_t 1074 tavor_ci_modify_qp(ibc_hca_hdl_t hca, ibc_qp_hdl_t qp, 1075 ibt_cep_modify_flags_t flags, ibt_qp_info_t *info_p, 1076 ibt_queue_sizes_t *actual_sz) 1077 { 1078 tavor_state_t *state; 1079 tavor_qphdl_t qphdl; 1080 int status; 1081 1082 TAVOR_TNF_ENTER(tavor_ci_modify_qp); 1083 1084 /* Check for valid HCA handle */ 1085 if (hca == NULL) { 1086 TNF_PROBE_0(tavor_ci_modify_qp_invhca_fail, 1087 TAVOR_TNF_ERROR, ""); 1088 TAVOR_TNF_EXIT(tavor_ci_modify_qp); 1089 return (IBT_HCA_HDL_INVALID); 1090 } 1091 1092 /* Check for valid QP handle */ 1093 if (qp == NULL) { 1094 TNF_PROBE_0(tavor_ci_modify_qp_invqphdl_fail, 1095 TAVOR_TNF_ERROR, ""); 1096 TAVOR_TNF_EXIT(tavor_ci_modify_qp); 1097 return (IBT_QP_HDL_INVALID); 1098 } 1099 1100 /* Grab the Tavor softstate pointer and QP handle */ 1101 state = (tavor_state_t *)hca; 1102 qphdl = (tavor_qphdl_t)qp; 1103 1104 /* Modify the QP */ 1105 status = tavor_qp_modify(state, qphdl, flags, info_p, actual_sz); 1106 if (status != DDI_SUCCESS) { 1107 TNF_PROBE_1(tavor_ci_modify_qp_fail, TAVOR_TNF_ERROR, "", 1108 tnf_uint, status, status); 1109 TAVOR_TNF_EXIT(tavor_ci_modify_qp); 1110 return (status); 1111 } 1112 1113 TAVOR_TNF_EXIT(tavor_ci_modify_qp); 1114 return (IBT_SUCCESS); 1115 } 1116 1117 1118 /* 1119 * tavor_ci_alloc_cq() 1120 * Allocate a Completion Queue 1121 * Context: Can be called only from user or kernel context. 1122 */ 1123 /* ARGSUSED */ 1124 static ibt_status_t 1125 tavor_ci_alloc_cq(ibc_hca_hdl_t hca, ibt_cq_hdl_t ibt_cqhdl, 1126 ibt_cq_attr_t *attr_p, ibc_cq_hdl_t *cq_p, uint_t *actual_size) 1127 { 1128 tavor_state_t *state; 1129 tavor_cqhdl_t cqhdl; 1130 int status; 1131 1132 TAVOR_TNF_ENTER(tavor_ci_alloc_cq); 1133 1134 /* Check for valid HCA handle */ 1135 if (hca == NULL) { 1136 TNF_PROBE_0(tavor_ci_alloc_cq_invhca_fail, 1137 TAVOR_TNF_ERROR, ""); 1138 TAVOR_TNF_EXIT(tavor_ci_alloc_cq); 1139 return (IBT_HCA_HDL_INVALID); 1140 } 1141 1142 /* Grab the Tavor softstate pointer */ 1143 state = (tavor_state_t *)hca; 1144 1145 /* Allocate the CQ */ 1146 status = tavor_cq_alloc(state, ibt_cqhdl, attr_p, actual_size, 1147 &cqhdl, TAVOR_NOSLEEP); 1148 if (status != DDI_SUCCESS) { 1149 TNF_PROBE_1(tavor_ci_alloc_cq_fail, TAVOR_TNF_ERROR, "", 1150 tnf_uint, status, status); 1151 TAVOR_TNF_EXIT(tavor_ci_alloc_cq); 1152 return (status); 1153 } 1154 1155 /* Return the Tavor CQ handle */ 1156 *cq_p = (ibc_cq_hdl_t)cqhdl; 1157 1158 TAVOR_TNF_EXIT(tavor_ci_alloc_cq); 1159 return (IBT_SUCCESS); 1160 } 1161 1162 1163 /* 1164 * tavor_ci_free_cq() 1165 * Free a Completion Queue 1166 * Context: Can be called only from user or kernel context. 1167 */ 1168 static ibt_status_t 1169 tavor_ci_free_cq(ibc_hca_hdl_t hca, ibc_cq_hdl_t cq) 1170 { 1171 tavor_state_t *state; 1172 tavor_cqhdl_t cqhdl; 1173 int status; 1174 1175 TAVOR_TNF_ENTER(tavor_ci_free_cq); 1176 1177 1178 /* Check for valid HCA handle */ 1179 if (hca == NULL) { 1180 TNF_PROBE_0(tavor_ci_free_cq_invhca_fail, 1181 TAVOR_TNF_ERROR, ""); 1182 TAVOR_TNF_EXIT(tavor_ci_free_cq); 1183 return (IBT_HCA_HDL_INVALID); 1184 } 1185 1186 /* Check for valid CQ handle pointer */ 1187 if (cq == NULL) { 1188 TNF_PROBE_0(tavor_ci_free_cq_invcqhdl_fail, 1189 TAVOR_TNF_ERROR, ""); 1190 TAVOR_TNF_EXIT(tavor_ci_free_cq); 1191 return (IBT_CQ_HDL_INVALID); 1192 } 1193 1194 /* Grab the Tavor softstate pointer and CQ handle */ 1195 state = (tavor_state_t *)hca; 1196 cqhdl = (tavor_cqhdl_t)cq; 1197 1198 /* Free the CQ */ 1199 status = tavor_cq_free(state, &cqhdl, TAVOR_NOSLEEP); 1200 if (status != DDI_SUCCESS) { 1201 TNF_PROBE_1(tavor_ci_free_cq_fail, TAVOR_TNF_ERROR, "", 1202 tnf_uint, status, status); 1203 TAVOR_TNF_EXIT(tavor_ci_free_cq); 1204 return (status); 1205 } 1206 1207 TAVOR_TNF_EXIT(tavor_ci_free_cq); 1208 return (IBT_SUCCESS); 1209 } 1210 1211 1212 /* 1213 * tavor_ci_query_cq() 1214 * Return the size of a Completion Queue 1215 * Context: Can be called only from user or kernel context. 1216 */ 1217 static ibt_status_t 1218 tavor_ci_query_cq(ibc_hca_hdl_t hca, ibc_cq_hdl_t cq, uint_t *entries_p, 1219 uint_t *count_p, uint_t *usec_p, ibt_cq_handler_id_t *hid_p) 1220 { 1221 tavor_cqhdl_t cqhdl; 1222 1223 TAVOR_TNF_ENTER(tavor_ci_query_cq); 1224 1225 /* Check for valid HCA handle */ 1226 if (hca == NULL) { 1227 TNF_PROBE_0(tavor_ci_query_cq_invhca_fail, 1228 TAVOR_TNF_ERROR, ""); 1229 TAVOR_TNF_EXIT(tavor_ci_query_cq); 1230 return (IBT_HCA_HDL_INVALID); 1231 } 1232 1233 /* Check for valid CQ handle pointer */ 1234 if (cq == NULL) { 1235 TNF_PROBE_0(tavor_ci_query_cq_invcqhdl, 1236 TAVOR_TNF_ERROR, ""); 1237 TAVOR_TNF_EXIT(tavor_ci_query_cq); 1238 return (IBT_CQ_HDL_INVALID); 1239 } 1240 1241 /* Grab the CQ handle */ 1242 cqhdl = (tavor_cqhdl_t)cq; 1243 1244 /* Query the current CQ size */ 1245 *entries_p = cqhdl->cq_bufsz; 1246 1247 /* interrupt moderation is not supported */ 1248 *count_p = 0; 1249 *usec_p = 0; 1250 *hid_p = 0; 1251 1252 TAVOR_TNF_EXIT(tavor_ci_query_cq); 1253 return (IBT_SUCCESS); 1254 } 1255 1256 1257 /* 1258 * tavor_ci_resize_cq() 1259 * Change the size of a Completion Queue 1260 * Context: Can be called only from user or kernel context. 1261 */ 1262 static ibt_status_t 1263 tavor_ci_resize_cq(ibc_hca_hdl_t hca, ibc_cq_hdl_t cq, uint_t size, 1264 uint_t *actual_size) 1265 { 1266 tavor_state_t *state; 1267 tavor_cqhdl_t cqhdl; 1268 int status; 1269 1270 TAVOR_TNF_ENTER(tavor_ci_resize_cq); 1271 1272 /* Check for valid HCA handle */ 1273 if (hca == NULL) { 1274 TNF_PROBE_0(tavor_ci_resize_cq_invhca_fail, 1275 TAVOR_TNF_ERROR, ""); 1276 TAVOR_TNF_EXIT(tavor_ci_resize_cq); 1277 return (IBT_HCA_HDL_INVALID); 1278 } 1279 1280 /* Check for valid CQ handle pointer */ 1281 if (cq == NULL) { 1282 TNF_PROBE_0(tavor_ci_resize_cq_invcqhdl_fail, 1283 TAVOR_TNF_ERROR, ""); 1284 TAVOR_TNF_EXIT(tavor_ci_resize_cq); 1285 return (IBT_CQ_HDL_INVALID); 1286 } 1287 1288 /* Grab the Tavor softstate pointer and CQ handle */ 1289 state = (tavor_state_t *)hca; 1290 cqhdl = (tavor_cqhdl_t)cq; 1291 1292 /* Resize the CQ */ 1293 status = tavor_cq_resize(state, cqhdl, size, actual_size, 1294 TAVOR_NOSLEEP); 1295 if (status != DDI_SUCCESS) { 1296 TNF_PROBE_1(tavor_ci_resize_cq_fail, TAVOR_TNF_ERROR, "", 1297 tnf_uint, status, status); 1298 TAVOR_TNF_EXIT(tavor_ci_resize_cq); 1299 return (status); 1300 } 1301 1302 TAVOR_TNF_EXIT(tavor_ci_resize_cq); 1303 return (IBT_SUCCESS); 1304 } 1305 1306 /* 1307 * CQ interrupt moderation is not supported in tavor. 1308 */ 1309 1310 /* ARGSUSED */ 1311 static ibt_status_t 1312 tavor_ci_modify_cq(ibc_hca_hdl_t hca, ibc_cq_hdl_t cq, 1313 uint_t count, uint_t usec, ibt_cq_handler_id_t hid) 1314 { 1315 return (IBT_NOT_SUPPORTED); 1316 } 1317 1318 /* 1319 * tavor_ci_alloc_cq_sched() 1320 * Reserve a CQ scheduling class resource 1321 * Context: Can be called only from user or kernel context. 1322 */ 1323 /* ARGSUSED */ 1324 static ibt_status_t 1325 tavor_ci_alloc_cq_sched(ibc_hca_hdl_t hca, ibt_cq_sched_attr_t *attr, 1326 ibc_sched_hdl_t *sched_hdl_p) 1327 { 1328 if (hca == NULL) { 1329 return (IBT_HCA_HDL_INVALID); 1330 } 1331 *sched_hdl_p = NULL; 1332 1333 /* 1334 * This is an unsupported interface for the Tavor driver. Tavor 1335 * does not support CQ scheduling classes. 1336 */ 1337 return (IBT_SUCCESS); 1338 } 1339 1340 1341 /* 1342 * tavor_ci_free_cq_sched() 1343 * Free a CQ scheduling class resource 1344 * Context: Can be called only from user or kernel context. 1345 */ 1346 /* ARGSUSED */ 1347 static ibt_status_t 1348 tavor_ci_free_cq_sched(ibc_hca_hdl_t hca, ibc_sched_hdl_t sched_hdl) 1349 { 1350 if (hca == NULL) { 1351 return (IBT_HCA_HDL_INVALID); 1352 } 1353 1354 /* 1355 * This is an unsupported interface for the Tavor driver. Tavor 1356 * does not support CQ scheduling classes. 1357 */ 1358 return (IBT_SUCCESS); 1359 } 1360 1361 1362 /* 1363 * tavor_ci_alloc_eec() 1364 * Allocate an End-to-End context 1365 * Context: Can be called only from user or kernel context. 1366 */ 1367 /* ARGSUSED */ 1368 static ibt_status_t 1369 tavor_ci_alloc_eec(ibc_hca_hdl_t hca, ibc_eec_flags_t flags, 1370 ibt_eec_hdl_t ibt_eec, ibc_rdd_hdl_t rdd, ibc_eec_hdl_t *eec_p) 1371 { 1372 TAVOR_TNF_ENTER(tavor_ci_alloc_eec); 1373 1374 /* 1375 * This is an unsupported interface for the Tavor driver. This 1376 * interface is necessary to support Reliable Datagram (RD) 1377 * operations. Tavor does not support RD. 1378 */ 1379 1380 TAVOR_TNF_EXIT(tavor_ci_alloc_eec); 1381 return (IBT_NOT_SUPPORTED); 1382 } 1383 1384 1385 /* 1386 * tavor_ci_free_eec() 1387 * Free an End-to-End context 1388 * Context: Can be called only from user or kernel context. 1389 */ 1390 /* ARGSUSED */ 1391 static ibt_status_t 1392 tavor_ci_free_eec(ibc_hca_hdl_t hca, ibc_eec_hdl_t eec) 1393 { 1394 TAVOR_TNF_ENTER(tavor_ci_free_eec); 1395 1396 /* 1397 * This is an unsupported interface for the Tavor driver. This 1398 * interface is necessary to support Reliable Datagram (RD) 1399 * operations. Tavor does not support RD. 1400 */ 1401 1402 TAVOR_TNF_EXIT(tavor_ci_free_eec); 1403 return (IBT_NOT_SUPPORTED); 1404 } 1405 1406 1407 /* 1408 * tavor_ci_query_eec() 1409 * Query an End-to-End context 1410 * Context: Can be called from interrupt or base context. 1411 */ 1412 /* ARGSUSED */ 1413 static ibt_status_t 1414 tavor_ci_query_eec(ibc_hca_hdl_t hca, ibc_eec_hdl_t eec, 1415 ibt_eec_query_attr_t *attr_p) 1416 { 1417 TAVOR_TNF_ENTER(tavor_ci_query_eec); 1418 1419 /* 1420 * This is an unsupported interface for the Tavor driver. This 1421 * interface is necessary to support Reliable Datagram (RD) 1422 * operations. Tavor does not support RD. 1423 */ 1424 1425 TAVOR_TNF_EXIT(tavor_ci_query_eec); 1426 return (IBT_NOT_SUPPORTED); 1427 } 1428 1429 1430 /* 1431 * tavor_ci_modify_eec() 1432 * Modify an End-to-End context 1433 * Context: Can be called from interrupt or base context. 1434 */ 1435 /* ARGSUSED */ 1436 static ibt_status_t 1437 tavor_ci_modify_eec(ibc_hca_hdl_t hca, ibc_eec_hdl_t eec, 1438 ibt_cep_modify_flags_t flags, ibt_eec_info_t *info_p) 1439 { 1440 TAVOR_TNF_ENTER(tavor_ci_query_eec); 1441 1442 /* 1443 * This is an unsupported interface for the Tavor driver. This 1444 * interface is necessary to support Reliable Datagram (RD) 1445 * operations. Tavor does not support RD. 1446 */ 1447 1448 TAVOR_TNF_EXIT(tavor_ci_query_eec); 1449 return (IBT_NOT_SUPPORTED); 1450 } 1451 1452 1453 /* 1454 * tavor_ci_register_mr() 1455 * Prepare a virtually addressed Memory Region for use by an HCA 1456 * Context: Can be called from interrupt or base context. 1457 */ 1458 /* ARGSUSED */ 1459 static ibt_status_t 1460 tavor_ci_register_mr(ibc_hca_hdl_t hca, ibc_pd_hdl_t pd, 1461 ibt_mr_attr_t *mr_attr, void *ibtl_reserved, ibc_mr_hdl_t *mr_p, 1462 ibt_mr_desc_t *mr_desc) 1463 { 1464 tavor_mr_options_t op; 1465 tavor_state_t *state; 1466 tavor_pdhdl_t pdhdl; 1467 tavor_mrhdl_t mrhdl; 1468 int status; 1469 1470 TAVOR_TNF_ENTER(tavor_ci_register_mr); 1471 1472 ASSERT(mr_attr != NULL); 1473 ASSERT(mr_p != NULL); 1474 ASSERT(mr_desc != NULL); 1475 1476 /* Check for valid HCA handle */ 1477 if (hca == NULL) { 1478 TNF_PROBE_0(tavor_ci_register_mr_invhca_fail, 1479 TAVOR_TNF_ERROR, ""); 1480 TAVOR_TNF_EXIT(tavor_ci_register_mr); 1481 return (IBT_HCA_HDL_INVALID); 1482 } 1483 1484 /* Check for valid PD handle pointer */ 1485 if (pd == NULL) { 1486 TNF_PROBE_0(tavor_ci_register_mr_invpdhdl_fail, 1487 TAVOR_TNF_ERROR, ""); 1488 TAVOR_TNF_EXIT(tavor_ci_register_mr); 1489 return (IBT_PD_HDL_INVALID); 1490 } 1491 1492 /* 1493 * Validate the access flags. Both Remote Write and Remote Atomic 1494 * require the Local Write flag to be set 1495 */ 1496 if (((mr_attr->mr_flags & IBT_MR_ENABLE_REMOTE_WRITE) || 1497 (mr_attr->mr_flags & IBT_MR_ENABLE_REMOTE_ATOMIC)) && 1498 !(mr_attr->mr_flags & IBT_MR_ENABLE_LOCAL_WRITE)) { 1499 TNF_PROBE_0(tavor_ci_register_mr_inv_accflags_fail, 1500 TAVOR_TNF_ERROR, ""); 1501 TAVOR_TNF_EXIT(tavor_ci_register_mr); 1502 return (IBT_MR_ACCESS_REQ_INVALID); 1503 } 1504 1505 /* Grab the Tavor softstate pointer and PD handle */ 1506 state = (tavor_state_t *)hca; 1507 pdhdl = (tavor_pdhdl_t)pd; 1508 1509 /* Register the memory region */ 1510 op.mro_bind_type = state->ts_cfg_profile->cp_iommu_bypass; 1511 op.mro_bind_dmahdl = NULL; 1512 op.mro_bind_override_addr = 0; 1513 status = tavor_mr_register(state, pdhdl, mr_attr, &mrhdl, &op); 1514 if (status != DDI_SUCCESS) { 1515 TNF_PROBE_1(tavor_ci_register_mr_fail, TAVOR_TNF_ERROR, "", 1516 tnf_uint, status, status); 1517 TAVOR_TNF_EXIT(tavor_ci_register_mr); 1518 return (status); 1519 } 1520 1521 /* Fill in the mr_desc structure */ 1522 mr_desc->md_vaddr = mrhdl->mr_bindinfo.bi_addr; 1523 mr_desc->md_lkey = mrhdl->mr_lkey; 1524 /* Only set RKey if remote access was requested */ 1525 if ((mr_attr->mr_flags & IBT_MR_ENABLE_REMOTE_ATOMIC) || 1526 (mr_attr->mr_flags & IBT_MR_ENABLE_REMOTE_WRITE) || 1527 (mr_attr->mr_flags & IBT_MR_ENABLE_REMOTE_READ)) { 1528 mr_desc->md_rkey = mrhdl->mr_rkey; 1529 } 1530 1531 /* 1532 * If region is mapped for streaming (i.e. noncoherent), then set 1533 * sync is required 1534 */ 1535 mr_desc->md_sync_required = (mrhdl->mr_bindinfo.bi_flags & 1536 IBT_MR_NONCOHERENT) ? B_TRUE : B_FALSE; 1537 1538 /* Return the Tavor MR handle */ 1539 *mr_p = (ibc_mr_hdl_t)mrhdl; 1540 1541 TAVOR_TNF_EXIT(tavor_ci_register_mr); 1542 return (IBT_SUCCESS); 1543 } 1544 1545 1546 /* 1547 * tavor_ci_register_buf() 1548 * Prepare a Memory Region specified by buf structure for use by an HCA 1549 * Context: Can be called from interrupt or base context. 1550 */ 1551 /* ARGSUSED */ 1552 static ibt_status_t 1553 tavor_ci_register_buf(ibc_hca_hdl_t hca, ibc_pd_hdl_t pd, 1554 ibt_smr_attr_t *attrp, struct buf *buf, void *ibtl_reserved, 1555 ibt_mr_hdl_t *mr_p, ibt_mr_desc_t *mr_desc) 1556 { 1557 tavor_mr_options_t op; 1558 tavor_state_t *state; 1559 tavor_pdhdl_t pdhdl; 1560 tavor_mrhdl_t mrhdl; 1561 int status; 1562 ibt_mr_flags_t flags = attrp->mr_flags; 1563 1564 TAVOR_TNF_ENTER(tavor_ci_register_buf); 1565 1566 ASSERT(mr_p != NULL); 1567 ASSERT(mr_desc != NULL); 1568 1569 /* Check for valid HCA handle */ 1570 if (hca == NULL) { 1571 TNF_PROBE_0(tavor_ci_register_buf_invhca_fail, 1572 TAVOR_TNF_ERROR, ""); 1573 TAVOR_TNF_EXIT(tavor_ci_register_buf); 1574 return (IBT_HCA_HDL_INVALID); 1575 } 1576 1577 /* Check for valid PD handle pointer */ 1578 if (pd == NULL) { 1579 TNF_PROBE_0(tavor_ci_register_buf_invpdhdl_fail, 1580 TAVOR_TNF_ERROR, ""); 1581 TAVOR_TNF_EXIT(tavor_ci_register_buf); 1582 return (IBT_PD_HDL_INVALID); 1583 } 1584 1585 /* 1586 * Validate the access flags. Both Remote Write and Remote Atomic 1587 * require the Local Write flag to be set 1588 */ 1589 if (((flags & IBT_MR_ENABLE_REMOTE_WRITE) || 1590 (flags & IBT_MR_ENABLE_REMOTE_ATOMIC)) && 1591 !(flags & IBT_MR_ENABLE_LOCAL_WRITE)) { 1592 TNF_PROBE_0(tavor_ci_register_buf_accflags_inv, 1593 TAVOR_TNF_ERROR, ""); 1594 TAVOR_TNF_EXIT(tavor_ci_register_buf); 1595 return (IBT_MR_ACCESS_REQ_INVALID); 1596 } 1597 1598 /* Grab the Tavor softstate pointer and PD handle */ 1599 state = (tavor_state_t *)hca; 1600 pdhdl = (tavor_pdhdl_t)pd; 1601 1602 /* Register the memory region */ 1603 op.mro_bind_type = state->ts_cfg_profile->cp_iommu_bypass; 1604 op.mro_bind_dmahdl = NULL; 1605 op.mro_bind_override_addr = 0; 1606 status = tavor_mr_register_buf(state, pdhdl, attrp, buf, &mrhdl, &op); 1607 if (status != DDI_SUCCESS) { 1608 TNF_PROBE_1(tavor_ci_register_mr_fail, TAVOR_TNF_ERROR, "", 1609 tnf_uint, status, status); 1610 TAVOR_TNF_EXIT(tavor_ci_register_mr); 1611 return (status); 1612 } 1613 1614 /* Fill in the mr_desc structure */ 1615 mr_desc->md_vaddr = mrhdl->mr_bindinfo.bi_addr; 1616 mr_desc->md_lkey = mrhdl->mr_lkey; 1617 /* Only set RKey if remote access was requested */ 1618 if ((flags & IBT_MR_ENABLE_REMOTE_ATOMIC) || 1619 (flags & IBT_MR_ENABLE_REMOTE_WRITE) || 1620 (flags & IBT_MR_ENABLE_REMOTE_READ)) { 1621 mr_desc->md_rkey = mrhdl->mr_rkey; 1622 } 1623 1624 /* 1625 * If region is mapped for streaming (i.e. noncoherent), then set 1626 * sync is required 1627 */ 1628 mr_desc->md_sync_required = (mrhdl->mr_bindinfo.bi_flags & 1629 IBT_MR_NONCOHERENT) ? B_TRUE : B_FALSE; 1630 1631 /* Return the Tavor MR handle */ 1632 *mr_p = (ibc_mr_hdl_t)mrhdl; 1633 1634 TAVOR_TNF_EXIT(tavor_ci_register_buf); 1635 return (IBT_SUCCESS); 1636 } 1637 1638 1639 /* 1640 * tavor_ci_deregister_mr() 1641 * Deregister a Memory Region from an HCA translation table 1642 * Context: Can be called only from user or kernel context. 1643 */ 1644 static ibt_status_t 1645 tavor_ci_deregister_mr(ibc_hca_hdl_t hca, ibc_mr_hdl_t mr) 1646 { 1647 tavor_state_t *state; 1648 tavor_mrhdl_t mrhdl; 1649 int status; 1650 1651 TAVOR_TNF_ENTER(tavor_ci_deregister_mr); 1652 1653 /* Check for valid HCA handle */ 1654 if (hca == NULL) { 1655 TNF_PROBE_0(tavor_ci_deregister_mr_invhca_fail, 1656 TAVOR_TNF_ERROR, ""); 1657 TAVOR_TNF_EXIT(tavor_ci_deregister_mr); 1658 return (IBT_HCA_HDL_INVALID); 1659 } 1660 1661 /* Check for valid memory region handle */ 1662 if (mr == NULL) { 1663 TNF_PROBE_0(tavor_ci_deregister_mr_invmrhdl_fail, 1664 TAVOR_TNF_ERROR, ""); 1665 TAVOR_TNF_EXIT(tavor_ci_deregister_mr); 1666 return (IBT_MR_HDL_INVALID); 1667 } 1668 1669 /* Grab the Tavor softstate pointer */ 1670 state = (tavor_state_t *)hca; 1671 mrhdl = (tavor_mrhdl_t)mr; 1672 1673 /* 1674 * Deregister the memory region. 1675 */ 1676 status = tavor_mr_deregister(state, &mrhdl, TAVOR_MR_DEREG_ALL, 1677 TAVOR_NOSLEEP); 1678 if (status != DDI_SUCCESS) { 1679 TNF_PROBE_1(tavor_ci_deregister_mr_fail, 1680 TAVOR_TNF_ERROR, "", tnf_uint, status, status); 1681 TAVOR_TNF_EXIT(tavor_ci_deregister_mr); 1682 return (status); 1683 } 1684 1685 TAVOR_TNF_EXIT(tavor_ci_deregister_mr); 1686 return (IBT_SUCCESS); 1687 } 1688 1689 1690 /* 1691 * tavor_ci_query_mr() 1692 * Retrieve information about a specified Memory Region 1693 * Context: Can be called from interrupt or base context. 1694 */ 1695 static ibt_status_t 1696 tavor_ci_query_mr(ibc_hca_hdl_t hca, ibc_mr_hdl_t mr, 1697 ibt_mr_query_attr_t *mr_attr) 1698 { 1699 tavor_state_t *state; 1700 tavor_mrhdl_t mrhdl; 1701 int status; 1702 1703 TAVOR_TNF_ENTER(tavor_ci_query_mr); 1704 1705 ASSERT(mr_attr != NULL); 1706 1707 /* Check for valid HCA handle */ 1708 if (hca == NULL) { 1709 TNF_PROBE_0(tavor_ci_query_mr_invhca_fail, 1710 TAVOR_TNF_ERROR, ""); 1711 TAVOR_TNF_EXIT(tavor_ci_query_mr); 1712 return (IBT_HCA_HDL_INVALID); 1713 } 1714 1715 /* Check for MemRegion handle */ 1716 if (mr == NULL) { 1717 TNF_PROBE_0(tavor_ci_query_mr_invmrhdl_fail, 1718 TAVOR_TNF_ERROR, ""); 1719 TAVOR_TNF_EXIT(tavor_ci_query_mr); 1720 return (IBT_MR_HDL_INVALID); 1721 } 1722 1723 /* Grab the Tavor softstate pointer and MR handle */ 1724 state = (tavor_state_t *)hca; 1725 mrhdl = (tavor_mrhdl_t)mr; 1726 1727 /* Query the memory region */ 1728 status = tavor_mr_query(state, mrhdl, mr_attr); 1729 if (status != DDI_SUCCESS) { 1730 TNF_PROBE_1(tavor_ci_query_mr_fail, TAVOR_TNF_ERROR, "", 1731 tnf_uint, status, status); 1732 TAVOR_TNF_EXIT(tavor_ci_query_mr); 1733 return (status); 1734 } 1735 1736 TAVOR_TNF_EXIT(tavor_ci_query_mr); 1737 return (IBT_SUCCESS); 1738 } 1739 1740 1741 /* 1742 * tavor_ci_register_shared_mr() 1743 * Create a shared memory region matching an existing Memory Region 1744 * Context: Can be called from interrupt or base context. 1745 */ 1746 /* ARGSUSED */ 1747 static ibt_status_t 1748 tavor_ci_register_shared_mr(ibc_hca_hdl_t hca, ibc_mr_hdl_t mr, 1749 ibc_pd_hdl_t pd, ibt_smr_attr_t *mr_attr, void *ibtl_reserved, 1750 ibc_mr_hdl_t *mr_p, ibt_mr_desc_t *mr_desc) 1751 { 1752 tavor_state_t *state; 1753 tavor_pdhdl_t pdhdl; 1754 tavor_mrhdl_t mrhdl, mrhdl_new; 1755 int status; 1756 1757 TAVOR_TNF_ENTER(tavor_ci_register_shared_mr); 1758 1759 ASSERT(mr_attr != NULL); 1760 ASSERT(mr_p != NULL); 1761 ASSERT(mr_desc != NULL); 1762 1763 /* Check for valid HCA handle */ 1764 if (hca == NULL) { 1765 TNF_PROBE_0(tavor_ci_register_shared_mr_invhca_fail, 1766 TAVOR_TNF_ERROR, ""); 1767 TAVOR_TNF_EXIT(tavor_ci_register_shared_mr); 1768 return (IBT_HCA_HDL_INVALID); 1769 } 1770 1771 /* Check for valid PD handle pointer */ 1772 if (pd == NULL) { 1773 TNF_PROBE_0(tavor_ci_register_shared_mr_invpdhdl_fail, 1774 TAVOR_TNF_ERROR, ""); 1775 TAVOR_TNF_EXIT(tavor_ci_register_shared_mr); 1776 return (IBT_PD_HDL_INVALID); 1777 } 1778 1779 /* Check for valid memory region handle */ 1780 if (mr == NULL) { 1781 TNF_PROBE_0(tavor_ci_register_shared_mr_invmrhdl_fail, 1782 TAVOR_TNF_ERROR, ""); 1783 TAVOR_TNF_EXIT(tavor_ci_register_shared_mr); 1784 return (IBT_MR_HDL_INVALID); 1785 } 1786 /* 1787 * Validate the access flags. Both Remote Write and Remote Atomic 1788 * require the Local Write flag to be set 1789 */ 1790 if (((mr_attr->mr_flags & IBT_MR_ENABLE_REMOTE_WRITE) || 1791 (mr_attr->mr_flags & IBT_MR_ENABLE_REMOTE_ATOMIC)) && 1792 !(mr_attr->mr_flags & IBT_MR_ENABLE_LOCAL_WRITE)) { 1793 TNF_PROBE_0(tavor_ci_register_shared_mr_accflags_inv, 1794 TAVOR_TNF_ERROR, ""); 1795 TAVOR_TNF_EXIT(tavor_ci_register_shared_mr); 1796 return (IBT_MR_ACCESS_REQ_INVALID); 1797 } 1798 1799 /* Grab the Tavor softstate pointer and handles */ 1800 state = (tavor_state_t *)hca; 1801 pdhdl = (tavor_pdhdl_t)pd; 1802 mrhdl = (tavor_mrhdl_t)mr; 1803 1804 /* Register the shared memory region */ 1805 status = tavor_mr_register_shared(state, mrhdl, pdhdl, mr_attr, 1806 &mrhdl_new); 1807 if (status != DDI_SUCCESS) { 1808 TNF_PROBE_1(tavor_ci_register_shared_mr_fail, TAVOR_TNF_ERROR, 1809 "", tnf_uint, status, status); 1810 TAVOR_TNF_EXIT(tavor_ci_register_shared_mr); 1811 return (status); 1812 } 1813 1814 /* Fill in the mr_desc structure */ 1815 mr_desc->md_vaddr = mrhdl_new->mr_bindinfo.bi_addr; 1816 mr_desc->md_lkey = mrhdl_new->mr_lkey; 1817 /* Only set RKey if remote access was requested */ 1818 if ((mr_attr->mr_flags & IBT_MR_ENABLE_REMOTE_ATOMIC) || 1819 (mr_attr->mr_flags & IBT_MR_ENABLE_REMOTE_WRITE) || 1820 (mr_attr->mr_flags & IBT_MR_ENABLE_REMOTE_READ)) { 1821 mr_desc->md_rkey = mrhdl_new->mr_rkey; 1822 } 1823 1824 /* 1825 * If shared region is mapped for streaming (i.e. noncoherent), then 1826 * set sync is required 1827 */ 1828 mr_desc->md_sync_required = (mrhdl_new->mr_bindinfo.bi_flags & 1829 IBT_MR_NONCOHERENT) ? B_TRUE : B_FALSE; 1830 1831 /* Return the Tavor MR handle */ 1832 *mr_p = (ibc_mr_hdl_t)mrhdl_new; 1833 1834 TAVOR_TNF_EXIT(tavor_ci_register_mr); 1835 return (IBT_SUCCESS); 1836 } 1837 1838 1839 /* 1840 * tavor_ci_reregister_mr() 1841 * Modify the attributes of an existing Memory Region 1842 * Context: Can be called from interrupt or base context. 1843 */ 1844 /* ARGSUSED */ 1845 static ibt_status_t 1846 tavor_ci_reregister_mr(ibc_hca_hdl_t hca, ibc_mr_hdl_t mr, ibc_pd_hdl_t pd, 1847 ibt_mr_attr_t *mr_attr, void *ibtl_reserved, ibc_mr_hdl_t *mr_new, 1848 ibt_mr_desc_t *mr_desc) 1849 { 1850 tavor_mr_options_t op; 1851 tavor_state_t *state; 1852 tavor_pdhdl_t pdhdl; 1853 tavor_mrhdl_t mrhdl, mrhdl_new; 1854 int status; 1855 1856 TAVOR_TNF_ENTER(tavor_ci_reregister_mr); 1857 1858 ASSERT(mr_attr != NULL); 1859 ASSERT(mr_new != NULL); 1860 ASSERT(mr_desc != NULL); 1861 1862 /* Check for valid HCA handle */ 1863 if (hca == NULL) { 1864 TNF_PROBE_0(tavor_ci_reregister_mr_hca_inv, TAVOR_TNF_ERROR, 1865 ""); 1866 TAVOR_TNF_EXIT(tavor_ci_reregister_mr); 1867 return (IBT_HCA_HDL_INVALID); 1868 } 1869 1870 /* Check for valid memory region handle */ 1871 if (mr == NULL) { 1872 TNF_PROBE_0(tavor_ci_reregister_mr_invmrhdl_fail, 1873 TAVOR_TNF_ERROR, ""); 1874 TAVOR_TNF_EXIT(tavor_ci_reregister_mr); 1875 return (IBT_MR_HDL_INVALID); 1876 } 1877 1878 /* Grab the Tavor softstate pointer, mrhdl, and pdhdl */ 1879 state = (tavor_state_t *)hca; 1880 mrhdl = (tavor_mrhdl_t)mr; 1881 pdhdl = (tavor_pdhdl_t)pd; 1882 1883 /* Reregister the memory region */ 1884 op.mro_bind_type = state->ts_cfg_profile->cp_iommu_bypass; 1885 status = tavor_mr_reregister(state, mrhdl, pdhdl, mr_attr, 1886 &mrhdl_new, &op); 1887 if (status != DDI_SUCCESS) { 1888 TNF_PROBE_1(tavor_ci_reregister_mr_fail, TAVOR_TNF_ERROR, "", 1889 tnf_uint, status, status); 1890 TAVOR_TNF_EXIT(tavor_ci_reregister_mr); 1891 return (status); 1892 } 1893 1894 /* Fill in the mr_desc structure */ 1895 mr_desc->md_vaddr = mrhdl_new->mr_bindinfo.bi_addr; 1896 mr_desc->md_lkey = mrhdl_new->mr_lkey; 1897 /* Only set RKey if remote access was requested */ 1898 if ((mr_attr->mr_flags & IBT_MR_ENABLE_REMOTE_ATOMIC) || 1899 (mr_attr->mr_flags & IBT_MR_ENABLE_REMOTE_WRITE) || 1900 (mr_attr->mr_flags & IBT_MR_ENABLE_REMOTE_READ)) { 1901 mr_desc->md_rkey = mrhdl_new->mr_rkey; 1902 } 1903 1904 /* 1905 * If region is mapped for streaming (i.e. noncoherent), then set 1906 * sync is required 1907 */ 1908 mr_desc->md_sync_required = (mrhdl_new->mr_bindinfo.bi_flags & 1909 IBT_MR_NONCOHERENT) ? B_TRUE : B_FALSE; 1910 1911 /* Return the Tavor MR handle */ 1912 *mr_new = (ibc_mr_hdl_t)mrhdl_new; 1913 1914 TAVOR_TNF_EXIT(tavor_ci_reregister_mr); 1915 return (IBT_SUCCESS); 1916 } 1917 1918 1919 /* 1920 * tavor_ci_reregister_buf() 1921 * Modify the attributes of an existing Memory Region 1922 * Context: Can be called from interrupt or base context. 1923 */ 1924 /* ARGSUSED */ 1925 static ibt_status_t 1926 tavor_ci_reregister_buf(ibc_hca_hdl_t hca, ibc_mr_hdl_t mr, ibc_pd_hdl_t pd, 1927 ibt_smr_attr_t *attrp, struct buf *buf, void *ibtl_reserved, 1928 ibc_mr_hdl_t *mr_new, ibt_mr_desc_t *mr_desc) 1929 { 1930 tavor_mr_options_t op; 1931 tavor_state_t *state; 1932 tavor_pdhdl_t pdhdl; 1933 tavor_mrhdl_t mrhdl, mrhdl_new; 1934 int status; 1935 ibt_mr_flags_t flags = attrp->mr_flags; 1936 1937 TAVOR_TNF_ENTER(tavor_ci_reregister_buf); 1938 1939 ASSERT(mr_new != NULL); 1940 ASSERT(mr_desc != NULL); 1941 1942 /* Check for valid HCA handle */ 1943 if (hca == NULL) { 1944 TNF_PROBE_0(tavor_ci_reregister_buf_hca_inv, TAVOR_TNF_ERROR, 1945 ""); 1946 TAVOR_TNF_EXIT(tavor_ci_reregister_buf); 1947 return (IBT_HCA_HDL_INVALID); 1948 } 1949 1950 /* Check for valid memory region handle */ 1951 if (mr == NULL) { 1952 TNF_PROBE_0(tavor_ci_reregister_buf_invmrhdl_fail, 1953 TAVOR_TNF_ERROR, ""); 1954 TAVOR_TNF_EXIT(tavor_ci_reregister_buf); 1955 return (IBT_MR_HDL_INVALID); 1956 } 1957 1958 /* Grab the Tavor softstate pointer, mrhdl, and pdhdl */ 1959 state = (tavor_state_t *)hca; 1960 mrhdl = (tavor_mrhdl_t)mr; 1961 pdhdl = (tavor_pdhdl_t)pd; 1962 1963 /* Reregister the memory region */ 1964 op.mro_bind_type = state->ts_cfg_profile->cp_iommu_bypass; 1965 status = tavor_mr_reregister_buf(state, mrhdl, pdhdl, attrp, buf, 1966 &mrhdl_new, &op); 1967 if (status != DDI_SUCCESS) { 1968 TNF_PROBE_1(tavor_ci_reregister_buf_fail, TAVOR_TNF_ERROR, "", 1969 tnf_uint, status, status); 1970 TAVOR_TNF_EXIT(tavor_ci_reregister_buf); 1971 return (status); 1972 } 1973 1974 /* Fill in the mr_desc structure */ 1975 mr_desc->md_vaddr = mrhdl_new->mr_bindinfo.bi_addr; 1976 mr_desc->md_lkey = mrhdl_new->mr_lkey; 1977 /* Only set RKey if remote access was requested */ 1978 if ((flags & IBT_MR_ENABLE_REMOTE_ATOMIC) || 1979 (flags & IBT_MR_ENABLE_REMOTE_WRITE) || 1980 (flags & IBT_MR_ENABLE_REMOTE_READ)) { 1981 mr_desc->md_rkey = mrhdl_new->mr_rkey; 1982 } 1983 1984 /* 1985 * If region is mapped for streaming (i.e. noncoherent), then set 1986 * sync is required 1987 */ 1988 mr_desc->md_sync_required = (mrhdl_new->mr_bindinfo.bi_flags & 1989 IBT_MR_NONCOHERENT) ? B_TRUE : B_FALSE; 1990 1991 /* Return the Tavor MR handle */ 1992 *mr_new = (ibc_mr_hdl_t)mrhdl_new; 1993 1994 TAVOR_TNF_EXIT(tavor_ci_reregister_buf); 1995 return (IBT_SUCCESS); 1996 } 1997 1998 /* 1999 * tavor_ci_sync_mr() 2000 * Synchronize access to a Memory Region 2001 * Context: Can be called from interrupt or base context. 2002 */ 2003 static ibt_status_t 2004 tavor_ci_sync_mr(ibc_hca_hdl_t hca, ibt_mr_sync_t *mr_segs, size_t num_segs) 2005 { 2006 tavor_state_t *state; 2007 int status; 2008 2009 TAVOR_TNF_ENTER(tavor_ci_sync_mr); 2010 2011 ASSERT(mr_segs != NULL); 2012 2013 /* Check for valid HCA handle */ 2014 if (hca == NULL) { 2015 TNF_PROBE_0(tavor_ci_sync_mr_invhca_fail, 2016 TAVOR_TNF_ERROR, ""); 2017 TAVOR_TNF_EXIT(tavor_ci_sync_mr); 2018 return (IBT_HCA_HDL_INVALID); 2019 } 2020 2021 /* Grab the Tavor softstate pointer */ 2022 state = (tavor_state_t *)hca; 2023 2024 /* Sync the memory region */ 2025 status = tavor_mr_sync(state, mr_segs, num_segs); 2026 if (status != DDI_SUCCESS) { 2027 TNF_PROBE_1(tavor_ci_sync_mr_fail, TAVOR_TNF_ERROR, "", 2028 tnf_uint, status, status); 2029 TAVOR_TNF_EXIT(tavor_ci_sync_mr); 2030 return (status); 2031 } 2032 2033 TAVOR_TNF_EXIT(tavor_ci_sync_mr); 2034 return (IBT_SUCCESS); 2035 } 2036 2037 2038 /* 2039 * tavor_ci_alloc_mw() 2040 * Allocate a Memory Window 2041 * Context: Can be called from interrupt or base context. 2042 */ 2043 static ibt_status_t 2044 tavor_ci_alloc_mw(ibc_hca_hdl_t hca, ibc_pd_hdl_t pd, ibt_mw_flags_t flags, 2045 ibc_mw_hdl_t *mw_p, ibt_rkey_t *rkey_p) 2046 { 2047 tavor_state_t *state; 2048 tavor_pdhdl_t pdhdl; 2049 tavor_mwhdl_t mwhdl; 2050 int status; 2051 2052 TAVOR_TNF_ENTER(tavor_ci_alloc_mw); 2053 2054 ASSERT(mw_p != NULL); 2055 ASSERT(rkey_p != NULL); 2056 2057 /* Check for valid HCA handle */ 2058 if (hca == NULL) { 2059 TNF_PROBE_0(tavor_ci_alloc_mw_invhca_fail, 2060 TAVOR_TNF_ERROR, ""); 2061 TAVOR_TNF_EXIT(tavor_ci_alloc_mw); 2062 return (IBT_HCA_HDL_INVALID); 2063 } 2064 2065 /* Check for valid PD handle pointer */ 2066 if (pd == NULL) { 2067 TNF_PROBE_0(tavor_ci_alloc_mw_invpdhdl_fail, 2068 TAVOR_TNF_ERROR, ""); 2069 TAVOR_TNF_EXIT(tavor_ci_alloc_mw); 2070 return (IBT_PD_HDL_INVALID); 2071 } 2072 2073 /* Grab the Tavor softstate pointer and PD handle */ 2074 state = (tavor_state_t *)hca; 2075 pdhdl = (tavor_pdhdl_t)pd; 2076 2077 /* Allocate the memory window */ 2078 status = tavor_mw_alloc(state, pdhdl, flags, &mwhdl); 2079 if (status != DDI_SUCCESS) { 2080 TNF_PROBE_1(tavor_ci_alloc_mw_fail, TAVOR_TNF_ERROR, "", 2081 tnf_uint, status, status); 2082 TAVOR_TNF_EXIT(tavor_ci_alloc_mw); 2083 return (status); 2084 } 2085 2086 /* Return the MW handle and RKey */ 2087 *mw_p = (ibc_mw_hdl_t)mwhdl; 2088 *rkey_p = mwhdl->mr_rkey; 2089 2090 TAVOR_TNF_EXIT(tavor_ci_alloc_mw); 2091 return (IBT_SUCCESS); 2092 } 2093 2094 2095 /* 2096 * tavor_ci_free_mw() 2097 * Free a Memory Window 2098 * Context: Can be called from interrupt or base context. 2099 */ 2100 static ibt_status_t 2101 tavor_ci_free_mw(ibc_hca_hdl_t hca, ibc_mw_hdl_t mw) 2102 { 2103 tavor_state_t *state; 2104 tavor_mwhdl_t mwhdl; 2105 int status; 2106 2107 TAVOR_TNF_ENTER(tavor_ci_free_mw); 2108 2109 /* Check for valid HCA handle */ 2110 if (hca == NULL) { 2111 TNF_PROBE_0(tavor_ci_free_mw_invhca_fail, 2112 TAVOR_TNF_ERROR, ""); 2113 TAVOR_TNF_EXIT(tavor_ci_free_mw); 2114 return (IBT_HCA_HDL_INVALID); 2115 } 2116 2117 /* Check for valid MW handle */ 2118 if (mw == NULL) { 2119 TNF_PROBE_0(tavor_ci_free_mw_invmwhdl_fail, 2120 TAVOR_TNF_ERROR, ""); 2121 TAVOR_TNF_EXIT(tavor_ci_free_mw); 2122 return (IBT_MW_HDL_INVALID); 2123 } 2124 2125 /* Grab the Tavor softstate pointer and MW handle */ 2126 state = (tavor_state_t *)hca; 2127 mwhdl = (tavor_mwhdl_t)mw; 2128 2129 /* Free the memory window */ 2130 status = tavor_mw_free(state, &mwhdl, TAVOR_NOSLEEP); 2131 if (status != DDI_SUCCESS) { 2132 TNF_PROBE_1(tavor_ci_free_mw_fail, TAVOR_TNF_ERROR, "", 2133 tnf_uint, status, status); 2134 TAVOR_TNF_EXIT(tavor_ci_free_mw); 2135 return (status); 2136 } 2137 2138 TAVOR_TNF_EXIT(tavor_ci_free_mw); 2139 return (IBT_SUCCESS); 2140 } 2141 2142 2143 /* 2144 * tavor_ci_query_mw() 2145 * Return the attributes of the specified Memory Window 2146 * Context: Can be called from interrupt or base context. 2147 */ 2148 static ibt_status_t 2149 tavor_ci_query_mw(ibc_hca_hdl_t hca, ibc_mw_hdl_t mw, 2150 ibt_mw_query_attr_t *mw_attr_p) 2151 { 2152 tavor_mwhdl_t mwhdl; 2153 2154 TAVOR_TNF_ENTER(tavor_ci_query_mw); 2155 2156 ASSERT(mw_attr_p != NULL); 2157 2158 /* Check for valid HCA handle */ 2159 if (hca == NULL) { 2160 TNF_PROBE_0(tavor_ci_query_mw_invhca_fail, 2161 TAVOR_TNF_ERROR, ""); 2162 TAVOR_TNF_EXIT(tavor_ci_query_mw); 2163 return (IBT_HCA_HDL_INVALID); 2164 } 2165 2166 /* Check for valid MemWin handle */ 2167 if (mw == NULL) { 2168 TNF_PROBE_0(tavor_ci_query_mw_inc_mwhdl_fail, 2169 TAVOR_TNF_ERROR, ""); 2170 TAVOR_TNF_EXIT(tavor_ci_query_mw); 2171 return (IBT_MW_HDL_INVALID); 2172 } 2173 2174 /* Query the memory window pointer and fill in the return values */ 2175 mwhdl = (tavor_mwhdl_t)mw; 2176 mutex_enter(&mwhdl->mr_lock); 2177 mw_attr_p->mw_pd = (ibc_pd_hdl_t)mwhdl->mr_pdhdl; 2178 mw_attr_p->mw_rkey = mwhdl->mr_rkey; 2179 mutex_exit(&mwhdl->mr_lock); 2180 2181 TAVOR_TNF_EXIT(tavor_ci_query_mw); 2182 return (IBT_SUCCESS); 2183 } 2184 2185 2186 /* ARGSUSED */ 2187 static ibt_status_t 2188 tavor_ci_register_dma_mr(ibc_hca_hdl_t hca, ibc_pd_hdl_t pd, 2189 ibt_dmr_attr_t *mr_attr, void *ibtl_reserved, ibc_mr_hdl_t *mr_p, 2190 ibt_mr_desc_t *mr_desc) 2191 { 2192 tavor_state_t *state; 2193 tavor_pdhdl_t pdhdl; 2194 tavor_mrhdl_t mrhdl; 2195 int status; 2196 2197 ASSERT(mr_attr != NULL); 2198 ASSERT(mr_p != NULL); 2199 ASSERT(mr_desc != NULL); 2200 2201 /* Check for valid HCA handle */ 2202 if (hca == NULL) { 2203 return (IBT_HCA_HDL_INVALID); 2204 } 2205 2206 /* Check for valid PD handle pointer */ 2207 if (pd == NULL) { 2208 return (IBT_PD_HDL_INVALID); 2209 } 2210 2211 /* 2212 * Validate the access flags. Both Remote Write and Remote Atomic 2213 * require the Local Write flag to be set 2214 */ 2215 if (((mr_attr->dmr_flags & IBT_MR_ENABLE_REMOTE_WRITE) || 2216 (mr_attr->dmr_flags & IBT_MR_ENABLE_REMOTE_ATOMIC)) && 2217 !(mr_attr->dmr_flags & IBT_MR_ENABLE_LOCAL_WRITE)) { 2218 return (IBT_MR_ACCESS_REQ_INVALID); 2219 } 2220 2221 /* Grab the Tavor softstate pointer and PD handle */ 2222 state = (tavor_state_t *)hca; 2223 pdhdl = (tavor_pdhdl_t)pd; 2224 2225 status = tavor_dma_mr_register(state, pdhdl, mr_attr, &mrhdl); 2226 if (status != DDI_SUCCESS) { 2227 return (status); 2228 } 2229 2230 /* Fill in the mr_desc structure */ 2231 mr_desc->md_vaddr = mr_attr->dmr_paddr; 2232 mr_desc->md_lkey = mrhdl->mr_lkey; 2233 /* Only set RKey if remote access was requested */ 2234 if ((mr_attr->dmr_flags & IBT_MR_ENABLE_REMOTE_ATOMIC) || 2235 (mr_attr->dmr_flags & IBT_MR_ENABLE_REMOTE_WRITE) || 2236 (mr_attr->dmr_flags & IBT_MR_ENABLE_REMOTE_READ)) { 2237 mr_desc->md_rkey = mrhdl->mr_rkey; 2238 } 2239 2240 /* 2241 * If region is mapped for streaming (i.e. noncoherent), then set 2242 * sync is required 2243 */ 2244 mr_desc->md_sync_required = B_FALSE; 2245 2246 /* Return the Hermon MR handle */ 2247 *mr_p = (ibc_mr_hdl_t)mrhdl; 2248 2249 return (IBT_SUCCESS); 2250 } 2251 2252 2253 /* 2254 * tavor_ci_attach_mcg() 2255 * Attach a Queue Pair to a Multicast Group 2256 * Context: Can be called only from user or kernel context. 2257 */ 2258 static ibt_status_t 2259 tavor_ci_attach_mcg(ibc_hca_hdl_t hca, ibc_qp_hdl_t qp, ib_gid_t gid, 2260 ib_lid_t lid) 2261 { 2262 tavor_state_t *state; 2263 tavor_qphdl_t qphdl; 2264 int status; 2265 2266 TAVOR_TNF_ENTER(tavor_ci_attach_mcg); 2267 2268 /* Check for valid HCA handle */ 2269 if (hca == NULL) { 2270 TNF_PROBE_0(tavor_ci_attach_mcg_invhca_fail, 2271 TAVOR_TNF_ERROR, ""); 2272 TAVOR_TNF_EXIT(tavor_ci_attach_mcg); 2273 return (IBT_HCA_HDL_INVALID); 2274 } 2275 2276 /* Check for valid QP handle pointer */ 2277 if (qp == NULL) { 2278 TNF_PROBE_0(tavor_ci_attach_mcg_invqphdl_fail, 2279 TAVOR_TNF_ERROR, ""); 2280 TAVOR_TNF_EXIT(tavor_ci_attach_mcg); 2281 return (IBT_QP_HDL_INVALID); 2282 } 2283 2284 /* Grab the Tavor softstate pointer and QP handles */ 2285 state = (tavor_state_t *)hca; 2286 qphdl = (tavor_qphdl_t)qp; 2287 2288 /* Attach the QP to the multicast group */ 2289 status = tavor_mcg_attach(state, qphdl, gid, lid); 2290 if (status != DDI_SUCCESS) { 2291 TNF_PROBE_1(tavor_ci_attach_mcg_fail, TAVOR_TNF_ERROR, "", 2292 tnf_uint, status, status); 2293 TAVOR_TNF_EXIT(tavor_ci_attach_mcg); 2294 return (status); 2295 } 2296 2297 TAVOR_TNF_EXIT(tavor_ci_attach_mcg); 2298 return (IBT_SUCCESS); 2299 } 2300 2301 2302 /* 2303 * tavor_ci_detach_mcg() 2304 * Detach a Queue Pair to a Multicast Group 2305 * Context: Can be called only from user or kernel context. 2306 */ 2307 static ibt_status_t 2308 tavor_ci_detach_mcg(ibc_hca_hdl_t hca, ibc_qp_hdl_t qp, ib_gid_t gid, 2309 ib_lid_t lid) 2310 { 2311 tavor_state_t *state; 2312 tavor_qphdl_t qphdl; 2313 int status; 2314 2315 TAVOR_TNF_ENTER(tavor_ci_attach_mcg); 2316 2317 /* Check for valid HCA handle */ 2318 if (hca == NULL) { 2319 TNF_PROBE_0(tavor_ci_detach_mcg_invhca_fail, 2320 TAVOR_TNF_ERROR, ""); 2321 TAVOR_TNF_EXIT(tavor_ci_detach_mcg); 2322 return (IBT_HCA_HDL_INVALID); 2323 } 2324 2325 /* Check for valid QP handle pointer */ 2326 if (qp == NULL) { 2327 TNF_PROBE_0(tavor_ci_detach_mcg_invqphdl_fail, 2328 TAVOR_TNF_ERROR, ""); 2329 TAVOR_TNF_EXIT(tavor_ci_detach_mcg); 2330 return (IBT_QP_HDL_INVALID); 2331 } 2332 2333 /* Grab the Tavor softstate pointer and QP handle */ 2334 state = (tavor_state_t *)hca; 2335 qphdl = (tavor_qphdl_t)qp; 2336 2337 /* Detach the QP from the multicast group */ 2338 status = tavor_mcg_detach(state, qphdl, gid, lid); 2339 if (status != DDI_SUCCESS) { 2340 TNF_PROBE_1(tavor_ci_detach_mcg_fail, TAVOR_TNF_ERROR, "", 2341 tnf_uint, status, status); 2342 TAVOR_TNF_EXIT(tavor_ci_detach_mcg); 2343 return (status); 2344 } 2345 2346 TAVOR_TNF_EXIT(tavor_ci_detach_mcg); 2347 return (IBT_SUCCESS); 2348 } 2349 2350 2351 /* 2352 * tavor_ci_post_send() 2353 * Post send work requests to the send queue on the specified QP 2354 * Context: Can be called from interrupt or base context. 2355 */ 2356 static ibt_status_t 2357 tavor_ci_post_send(ibc_hca_hdl_t hca, ibc_qp_hdl_t qp, ibt_send_wr_t *wr_p, 2358 uint_t num_wr, uint_t *num_posted_p) 2359 { 2360 tavor_state_t *state; 2361 tavor_qphdl_t qphdl; 2362 int status; 2363 2364 TAVOR_TNF_ENTER(tavor_ci_post_send); 2365 2366 ASSERT(wr_p != NULL); 2367 ASSERT(num_wr != 0); 2368 2369 /* Check for valid HCA handle */ 2370 if (hca == NULL) { 2371 TNF_PROBE_0(tavor_ci_post_send_invhca_fail, 2372 TAVOR_TNF_ERROR, ""); 2373 TAVOR_TNF_EXIT(tavor_ci_post_send); 2374 return (IBT_HCA_HDL_INVALID); 2375 } 2376 2377 /* Check for valid QP handle pointer */ 2378 if (qp == NULL) { 2379 TNF_PROBE_0(tavor_ci_post_send_invqphdl_fail, 2380 TAVOR_TNF_ERROR, ""); 2381 TAVOR_TNF_EXIT(tavor_ci_post_send); 2382 return (IBT_QP_HDL_INVALID); 2383 } 2384 2385 /* Grab the Tavor softstate pointer and QP handle */ 2386 state = (tavor_state_t *)hca; 2387 qphdl = (tavor_qphdl_t)qp; 2388 2389 /* Post the send WQEs */ 2390 status = tavor_post_send(state, qphdl, wr_p, num_wr, num_posted_p); 2391 if (status != DDI_SUCCESS) { 2392 TNF_PROBE_1(tavor_ci_post_send_fail, TAVOR_TNF_ERROR, "", 2393 tnf_uint, status, status); 2394 TAVOR_TNF_EXIT(tavor_ci_post_send); 2395 return (status); 2396 } 2397 2398 TAVOR_TNF_EXIT(tavor_ci_post_send); 2399 return (IBT_SUCCESS); 2400 } 2401 2402 2403 /* 2404 * tavor_ci_post_recv() 2405 * Post receive work requests to the receive queue on the specified QP 2406 * Context: Can be called from interrupt or base context. 2407 */ 2408 static ibt_status_t 2409 tavor_ci_post_recv(ibc_hca_hdl_t hca, ibc_qp_hdl_t qp, ibt_recv_wr_t *wr_p, 2410 uint_t num_wr, uint_t *num_posted_p) 2411 { 2412 tavor_state_t *state; 2413 tavor_qphdl_t qphdl; 2414 int status; 2415 2416 TAVOR_TNF_ENTER(tavor_ci_post_recv); 2417 2418 ASSERT(wr_p != NULL); 2419 ASSERT(num_wr != 0); 2420 2421 /* Check for valid HCA handle */ 2422 if (hca == NULL) { 2423 TNF_PROBE_0(tavor_ci_post_recv_invhca_fail, 2424 TAVOR_TNF_ERROR, ""); 2425 TAVOR_TNF_EXIT(tavor_ci_post_recv); 2426 return (IBT_HCA_HDL_INVALID); 2427 } 2428 2429 /* Check for valid QP handle pointer */ 2430 if (qp == NULL) { 2431 TNF_PROBE_0(tavor_ci_post_recv_invqphdl_fail, 2432 TAVOR_TNF_ERROR, ""); 2433 TAVOR_TNF_EXIT(tavor_ci_post_recv); 2434 return (IBT_QP_HDL_INVALID); 2435 } 2436 2437 /* Grab the Tavor softstate pointer and QP handle */ 2438 state = (tavor_state_t *)hca; 2439 qphdl = (tavor_qphdl_t)qp; 2440 2441 /* Post the receive WQEs */ 2442 status = tavor_post_recv(state, qphdl, wr_p, num_wr, num_posted_p); 2443 if (status != DDI_SUCCESS) { 2444 TNF_PROBE_1(tavor_ci_post_recv_fail, TAVOR_TNF_ERROR, "", 2445 tnf_uint, status, status); 2446 TAVOR_TNF_EXIT(tavor_ci_post_recv); 2447 return (status); 2448 } 2449 2450 TAVOR_TNF_EXIT(tavor_ci_post_recv); 2451 return (IBT_SUCCESS); 2452 } 2453 2454 2455 /* 2456 * tavor_ci_poll_cq() 2457 * Poll for a work request completion 2458 * Context: Can be called from interrupt or base context. 2459 */ 2460 static ibt_status_t 2461 tavor_ci_poll_cq(ibc_hca_hdl_t hca, ibc_cq_hdl_t cq, ibt_wc_t *wc_p, 2462 uint_t num_wc, uint_t *num_polled) 2463 { 2464 tavor_state_t *state; 2465 tavor_cqhdl_t cqhdl; 2466 uint_t polled; 2467 int status; 2468 2469 TAVOR_TNF_ENTER(tavor_ci_poll_cq); 2470 2471 ASSERT(wc_p != NULL); 2472 2473 /* Check for valid HCA handle */ 2474 if (hca == NULL) { 2475 TNF_PROBE_0(tavor_ci_poll_cq_invhca_fail, 2476 TAVOR_TNF_ERROR, ""); 2477 TAVOR_TNF_EXIT(tavor_ci_poll_cq); 2478 return (IBT_HCA_HDL_INVALID); 2479 } 2480 2481 /* Check for valid CQ handle pointer */ 2482 if (cq == NULL) { 2483 TNF_PROBE_0(tavor_ci_poll_cq_invcqhdl_fail, 2484 TAVOR_TNF_ERROR, ""); 2485 TAVOR_TNF_EXIT(tavor_ci_poll_cq); 2486 return (IBT_CQ_HDL_INVALID); 2487 } 2488 2489 /* Check for valid num_wc field */ 2490 if (num_wc == 0) { 2491 TNF_PROBE_0(tavor_ci_poll_cq_num_wc_fail, 2492 TAVOR_TNF_ERROR, ""); 2493 TAVOR_TNF_EXIT(tavor_ci_poll_cq); 2494 return (IBT_INVALID_PARAM); 2495 } 2496 2497 /* Grab the Tavor softstate pointer and CQ handle */ 2498 state = (tavor_state_t *)hca; 2499 cqhdl = (tavor_cqhdl_t)cq; 2500 2501 /* Poll for work request completions */ 2502 status = tavor_cq_poll(state, cqhdl, wc_p, num_wc, &polled); 2503 2504 /* First fill in "num_polled" argument (only when valid) */ 2505 if (num_polled) { 2506 *num_polled = polled; 2507 } 2508 2509 /* 2510 * Check the status code; 2511 * If empty, we return empty. 2512 * If error, we print out an error and then return 2513 * If success (something was polled), we return success 2514 */ 2515 if (status != DDI_SUCCESS) { 2516 if (status != IBT_CQ_EMPTY) { 2517 TNF_PROBE_1(tavor_ci_poll_cq_fail, TAVOR_TNF_ERROR, "", 2518 tnf_uint, status, status); 2519 } 2520 TAVOR_TNF_EXIT(tavor_ci_poll_cq); 2521 return (status); 2522 } 2523 2524 TAVOR_TNF_EXIT(tavor_ci_poll_cq); 2525 return (IBT_SUCCESS); 2526 } 2527 2528 2529 /* 2530 * tavor_ci_notify_cq() 2531 * Enable notification events on the specified CQ 2532 * Context: Can be called from interrupt or base context. 2533 */ 2534 static ibt_status_t 2535 tavor_ci_notify_cq(ibc_hca_hdl_t hca, ibc_cq_hdl_t cq_hdl, 2536 ibt_cq_notify_flags_t flags) 2537 { 2538 tavor_state_t *state; 2539 tavor_cqhdl_t cqhdl; 2540 int status; 2541 2542 TAVOR_TNF_ENTER(tavor_ci_notify_cq); 2543 2544 /* Check for valid HCA handle */ 2545 if (hca == NULL) { 2546 TNF_PROBE_0(tavor_ci_notify_cq_invhca_fail, 2547 TAVOR_TNF_ERROR, ""); 2548 TAVOR_TNF_EXIT(tavor_ci_notify_cq); 2549 return (IBT_HCA_HDL_INVALID); 2550 } 2551 2552 /* Check for valid CQ handle pointer */ 2553 if (cq_hdl == NULL) { 2554 TNF_PROBE_0(tavor_ci_notify_cq_invcqhdl_fail, 2555 TAVOR_TNF_ERROR, ""); 2556 TAVOR_TNF_EXIT(tavor_ci_notify_cq); 2557 return (IBT_CQ_HDL_INVALID); 2558 } 2559 2560 /* Grab the Tavor softstate pointer and CQ handle */ 2561 state = (tavor_state_t *)hca; 2562 cqhdl = (tavor_cqhdl_t)cq_hdl; 2563 2564 /* Enable the CQ notification */ 2565 status = tavor_cq_notify(state, cqhdl, flags); 2566 if (status != DDI_SUCCESS) { 2567 TNF_PROBE_1(tavor_ci_notify_cq_fail, TAVOR_TNF_ERROR, "", 2568 tnf_uint, status, status); 2569 TAVOR_TNF_EXIT(tavor_ci_notify_cq); 2570 return (status); 2571 } 2572 2573 TAVOR_TNF_EXIT(tavor_ci_notify_cq); 2574 return (IBT_SUCCESS); 2575 } 2576 2577 /* 2578 * tavor_ci_ci_data_in() 2579 * Exchange CI-specific data. 2580 * Context: Can be called only from user or kernel context. 2581 */ 2582 static ibt_status_t 2583 tavor_ci_ci_data_in(ibc_hca_hdl_t hca, ibt_ci_data_flags_t flags, 2584 ibt_object_type_t object, void *ibc_object_handle, void *data_p, 2585 size_t data_sz) 2586 { 2587 tavor_state_t *state; 2588 int status; 2589 2590 TAVOR_TNF_ENTER(tavor_ci_ci_data_in); 2591 2592 /* Check for valid HCA handle */ 2593 if (hca == NULL) { 2594 TNF_PROBE_0(tavor_ci_ci_data_in_invhca_fail, 2595 TAVOR_TNF_ERROR, ""); 2596 TAVOR_TNF_EXIT(tavor_ci_ci_data_in); 2597 return (IBT_HCA_HDL_INVALID); 2598 } 2599 2600 /* Grab the Tavor softstate pointer */ 2601 state = (tavor_state_t *)hca; 2602 2603 /* Get the Tavor userland mapping information */ 2604 status = tavor_umap_ci_data_in(state, flags, object, 2605 ibc_object_handle, data_p, data_sz); 2606 if (status != DDI_SUCCESS) { 2607 TNF_PROBE_1(tavor_ci_ci_data_in_umap_fail, TAVOR_TNF_ERROR, 2608 "", tnf_uint, status, status); 2609 TAVOR_TNF_EXIT(tavor_ci_ci_data_in); 2610 return (status); 2611 } 2612 2613 TAVOR_TNF_EXIT(tavor_ci_ci_data_in); 2614 return (IBT_SUCCESS); 2615 } 2616 2617 /* 2618 * tavor_ci_ci_data_out() 2619 * Exchange CI-specific data. 2620 * Context: Can be called only from user or kernel context. 2621 */ 2622 static ibt_status_t 2623 tavor_ci_ci_data_out(ibc_hca_hdl_t hca, ibt_ci_data_flags_t flags, 2624 ibt_object_type_t object, void *ibc_object_handle, void *data_p, 2625 size_t data_sz) 2626 { 2627 tavor_state_t *state; 2628 int status; 2629 2630 TAVOR_TNF_ENTER(tavor_ci_ci_data_out); 2631 2632 /* Check for valid HCA handle */ 2633 if (hca == NULL) { 2634 TNF_PROBE_0(tavor_ci_ci_data_out_invhca_fail, 2635 TAVOR_TNF_ERROR, ""); 2636 TAVOR_TNF_EXIT(tavor_ci_ci_data_out); 2637 return (IBT_HCA_HDL_INVALID); 2638 } 2639 2640 /* Grab the Tavor softstate pointer */ 2641 state = (tavor_state_t *)hca; 2642 2643 /* Get the Tavor userland mapping information */ 2644 status = tavor_umap_ci_data_out(state, flags, object, 2645 ibc_object_handle, data_p, data_sz); 2646 if (status != DDI_SUCCESS) { 2647 TNF_PROBE_1(tavor_ci_ci_data_out_umap_fail, TAVOR_TNF_ERROR, 2648 "", tnf_uint, status, status); 2649 TAVOR_TNF_EXIT(tavor_ci_ci_data_out); 2650 return (status); 2651 } 2652 2653 TAVOR_TNF_EXIT(tavor_ci_ci_data_out); 2654 return (IBT_SUCCESS); 2655 } 2656 2657 2658 /* 2659 * tavor_ci_alloc_srq() 2660 * Allocate a Shared Receive Queue (SRQ) 2661 * Context: Can be called only from user or kernel context 2662 */ 2663 static ibt_status_t 2664 tavor_ci_alloc_srq(ibc_hca_hdl_t hca, ibt_srq_flags_t flags, 2665 ibt_srq_hdl_t ibt_srq, ibc_pd_hdl_t pd, ibt_srq_sizes_t *sizes, 2666 ibc_srq_hdl_t *ibc_srq_p, ibt_srq_sizes_t *ret_sizes_p) 2667 { 2668 tavor_state_t *state; 2669 tavor_pdhdl_t pdhdl; 2670 tavor_srqhdl_t srqhdl; 2671 tavor_srq_info_t srqinfo; 2672 tavor_srq_options_t op; 2673 int status; 2674 2675 TAVOR_TNF_ENTER(tavor_ci_alloc_srq); 2676 2677 /* Check for valid HCA handle */ 2678 if (hca == NULL) { 2679 TNF_PROBE_0(tavor_ci_alloc_srq_invhca_fail, 2680 TAVOR_TNF_ERROR, ""); 2681 TAVOR_TNF_EXIT(tavor_alloc_srq); 2682 return (IBT_HCA_HDL_INVALID); 2683 } 2684 2685 state = (tavor_state_t *)hca; 2686 2687 /* Check if SRQ is even supported */ 2688 if (state->ts_cfg_profile->cp_srq_enable == 0) { 2689 TNF_PROBE_0(tavor_ci_alloc_srq_not_supported_fail, 2690 TAVOR_TNF_ERROR, ""); 2691 TAVOR_TNF_EXIT(tavor_ci_alloc_srq); 2692 return (IBT_NOT_SUPPORTED); 2693 } 2694 2695 /* Check for valid PD handle pointer */ 2696 if (pd == NULL) { 2697 TNF_PROBE_0(tavor_ci_alloc_srq_invpdhdl_fail, 2698 TAVOR_TNF_ERROR, ""); 2699 TAVOR_TNF_EXIT(tavor_ci_alloc_srq); 2700 return (IBT_PD_HDL_INVALID); 2701 } 2702 2703 pdhdl = (tavor_pdhdl_t)pd; 2704 2705 srqinfo.srqi_ibt_srqhdl = ibt_srq; 2706 srqinfo.srqi_pd = pdhdl; 2707 srqinfo.srqi_sizes = sizes; 2708 srqinfo.srqi_real_sizes = ret_sizes_p; 2709 srqinfo.srqi_srqhdl = &srqhdl; 2710 srqinfo.srqi_flags = flags; 2711 op.srqo_wq_loc = state->ts_cfg_profile->cp_srq_wq_inddr; 2712 status = tavor_srq_alloc(state, &srqinfo, TAVOR_NOSLEEP, &op); 2713 if (status != DDI_SUCCESS) { 2714 TAVOR_TNF_EXIT(tavor_ci_alloc_srq); 2715 return (status); 2716 } 2717 2718 *ibc_srq_p = (ibc_srq_hdl_t)srqhdl; 2719 2720 TAVOR_TNF_EXIT(tavor_ci_alloc_srq); 2721 return (IBT_SUCCESS); 2722 } 2723 2724 /* 2725 * tavor_ci_free_srq() 2726 * Free a Shared Receive Queue (SRQ) 2727 * Context: Can be called only from user or kernel context 2728 */ 2729 static ibt_status_t 2730 tavor_ci_free_srq(ibc_hca_hdl_t hca, ibc_srq_hdl_t srq) 2731 { 2732 tavor_state_t *state; 2733 tavor_srqhdl_t srqhdl; 2734 int status; 2735 2736 TAVOR_TNF_ENTER(tavor_ci_free_srq); 2737 2738 /* Check for valid HCA handle */ 2739 if (hca == NULL) { 2740 TNF_PROBE_0(tavor_ci_free_srq_invhca_fail, 2741 TAVOR_TNF_ERROR, ""); 2742 TAVOR_TNF_EXIT(tavor_ci_free_srq); 2743 return (IBT_HCA_HDL_INVALID); 2744 } 2745 2746 state = (tavor_state_t *)hca; 2747 2748 /* Check if SRQ is even supported */ 2749 if (state->ts_cfg_profile->cp_srq_enable == 0) { 2750 TNF_PROBE_0(tavor_ci_alloc_srq_not_supported_fail, 2751 TAVOR_TNF_ERROR, ""); 2752 TAVOR_TNF_EXIT(tavor_ci_free_srq); 2753 return (IBT_NOT_SUPPORTED); 2754 } 2755 2756 /* Check for valid SRQ handle pointer */ 2757 if (srq == NULL) { 2758 TNF_PROBE_0(tavor_ci_free_srq_invsrqhdl_fail, 2759 TAVOR_TNF_ERROR, ""); 2760 TAVOR_TNF_EXIT(tavor_ci_free_srq); 2761 return (IBT_SRQ_HDL_INVALID); 2762 } 2763 2764 srqhdl = (tavor_srqhdl_t)srq; 2765 2766 /* Free the SRQ */ 2767 status = tavor_srq_free(state, &srqhdl, TAVOR_NOSLEEP); 2768 if (status != DDI_SUCCESS) { 2769 TNF_PROBE_1(tavor_ci_free_srq_fail, TAVOR_TNF_ERROR, "", 2770 tnf_uint, status, status); 2771 TAVOR_TNF_EXIT(tavor_ci_free_srq); 2772 return (status); 2773 } 2774 2775 TAVOR_TNF_EXIT(tavor_ci_free_srq); 2776 return (IBT_SUCCESS); 2777 } 2778 2779 /* 2780 * tavor_ci_query_srq() 2781 * Query properties of a Shared Receive Queue (SRQ) 2782 * Context: Can be called from interrupt or base context. 2783 */ 2784 static ibt_status_t 2785 tavor_ci_query_srq(ibc_hca_hdl_t hca, ibc_srq_hdl_t srq, ibc_pd_hdl_t *pd_p, 2786 ibt_srq_sizes_t *sizes_p, uint_t *limit_p) 2787 { 2788 tavor_state_t *state; 2789 tavor_srqhdl_t srqhdl; 2790 2791 TAVOR_TNF_ENTER(tavor_ci_query_srq); 2792 2793 /* Check for valid HCA handle */ 2794 if (hca == NULL) { 2795 TNF_PROBE_0(tavor_ci_query_srq_invhca_fail, 2796 TAVOR_TNF_ERROR, ""); 2797 TAVOR_TNF_EXIT(tavor_ci_query_srq); 2798 return (IBT_HCA_HDL_INVALID); 2799 } 2800 2801 state = (tavor_state_t *)hca; 2802 2803 /* Check if SRQ is even supported */ 2804 if (state->ts_cfg_profile->cp_srq_enable == 0) { 2805 TNF_PROBE_0(tavor_ci_query_srq_not_supported_fail, 2806 TAVOR_TNF_ERROR, ""); 2807 TAVOR_TNF_EXIT(tavor_ci_query_srq); 2808 return (IBT_NOT_SUPPORTED); 2809 } 2810 2811 /* Check for valid SRQ handle pointer */ 2812 if (srq == NULL) { 2813 TNF_PROBE_0(tavor_ci_query_srq_invsrqhdl_fail, 2814 TAVOR_TNF_ERROR, ""); 2815 TAVOR_TNF_EXIT(tavor_ci_query_srq); 2816 return (IBT_SRQ_HDL_INVALID); 2817 } 2818 2819 srqhdl = (tavor_srqhdl_t)srq; 2820 2821 mutex_enter(&srqhdl->srq_lock); 2822 if (srqhdl->srq_state == TAVOR_SRQ_STATE_ERROR) { 2823 mutex_exit(&srqhdl->srq_lock); 2824 TNF_PROBE_0(tavor_ci_query_srq_error_state, 2825 TAVOR_TNF_ERROR, ""); 2826 TAVOR_TNF_EXIT(tavor_ci_query_srq); 2827 return (IBT_SRQ_ERROR_STATE); 2828 } 2829 2830 *pd_p = (ibc_pd_hdl_t)srqhdl->srq_pdhdl; 2831 sizes_p->srq_wr_sz = srqhdl->srq_real_sizes.srq_wr_sz; 2832 sizes_p->srq_sgl_sz = srqhdl->srq_real_sizes.srq_sgl_sz; 2833 mutex_exit(&srqhdl->srq_lock); 2834 *limit_p = 0; 2835 2836 TAVOR_TNF_EXIT(tavor_ci_query_srq); 2837 return (IBT_SUCCESS); 2838 } 2839 2840 /* 2841 * tavor_ci_modify_srq() 2842 * Modify properties of a Shared Receive Queue (SRQ) 2843 * Context: Can be called from interrupt or base context. 2844 */ 2845 /* ARGSUSED */ 2846 static ibt_status_t 2847 tavor_ci_modify_srq(ibc_hca_hdl_t hca, ibc_srq_hdl_t srq, 2848 ibt_srq_modify_flags_t flags, uint_t size, uint_t limit, uint_t *ret_size_p) 2849 { 2850 tavor_state_t *state; 2851 tavor_srqhdl_t srqhdl; 2852 uint_t resize_supported, cur_srq_size; 2853 int status; 2854 2855 TAVOR_TNF_ENTER(tavor_ci_modify_srq); 2856 2857 /* Check for valid HCA handle */ 2858 if (hca == NULL) { 2859 TNF_PROBE_0(tavor_ci_modify_srq_invhca_fail, 2860 TAVOR_TNF_ERROR, ""); 2861 TAVOR_TNF_EXIT(tavor_ci_modify_srq); 2862 return (IBT_HCA_HDL_INVALID); 2863 } 2864 2865 state = (tavor_state_t *)hca; 2866 2867 /* Check if SRQ is even supported */ 2868 if (state->ts_cfg_profile->cp_srq_enable == 0) { 2869 TNF_PROBE_0(tavor_ci_modify_srq_not_supported_fail, 2870 TAVOR_TNF_ERROR, ""); 2871 TAVOR_TNF_EXIT(tavor_ci_modify_srq); 2872 return (IBT_NOT_SUPPORTED); 2873 } 2874 2875 /* Check for valid SRQ handle pointer */ 2876 if (srq == NULL) { 2877 TNF_PROBE_0(tavor_ci_modify_srq_invcqhdl_fail, 2878 TAVOR_TNF_ERROR, ""); 2879 TAVOR_TNF_EXIT(tavor_ci_modify_srq); 2880 return (IBT_SRQ_HDL_INVALID); 2881 } 2882 2883 srqhdl = (tavor_srqhdl_t)srq; 2884 2885 /* 2886 * Check Error State of SRQ. 2887 * Also, while we are holding the lock we save away the current SRQ 2888 * size for later use. 2889 */ 2890 mutex_enter(&srqhdl->srq_lock); 2891 cur_srq_size = srqhdl->srq_wq_bufsz; 2892 if (srqhdl->srq_state == TAVOR_SRQ_STATE_ERROR) { 2893 mutex_exit(&srqhdl->srq_lock); 2894 TNF_PROBE_0(tavor_ci_modify_srq_error_state, 2895 TAVOR_TNF_ERROR, ""); 2896 TAVOR_TNF_EXIT(tavor_ci_modify_srq); 2897 return (IBT_SRQ_ERROR_STATE); 2898 } 2899 mutex_exit(&srqhdl->srq_lock); 2900 2901 /* 2902 * Setting the limit watermark is not currently supported. This is a 2903 * tavor hardware (firmware) limitation. We return NOT_SUPPORTED here, 2904 * and have the limit code commented out for now. 2905 * 2906 * XXX If we enable the limit watermark support, we need to do checks 2907 * and set the 'srq->srq_wr_limit' here, instead of returning not 2908 * supported. The 'tavor_srq_modify' operation below is for resizing 2909 * the SRQ only, the limit work should be done here. If this is 2910 * changed to use the 'limit' field, the 'ARGSUSED' comment for this 2911 * function should also be removed at that time. 2912 */ 2913 if (flags & IBT_SRQ_SET_LIMIT) { 2914 TNF_PROBE_0(tavor_ci_modify_srq_limit_not_supported, 2915 TAVOR_TNF_ERROR, ""); 2916 TAVOR_TNF_EXIT(tavor_ci_modify_srq); 2917 return (IBT_NOT_SUPPORTED); 2918 } 2919 2920 /* 2921 * Check the SET_SIZE flag. If not set, we simply return success here. 2922 * However if it is set, we check if resize is supported and only then 2923 * do we continue on with our resize processing. 2924 */ 2925 if (!(flags & IBT_SRQ_SET_SIZE)) { 2926 TAVOR_TNF_EXIT(tavor_ci_modify_srq); 2927 return (IBT_SUCCESS); 2928 } 2929 2930 resize_supported = state->ts_ibtfinfo.hca_attr->hca_flags & 2931 IBT_HCA_RESIZE_SRQ; 2932 2933 if ((flags & IBT_SRQ_SET_SIZE) && !resize_supported) { 2934 TNF_PROBE_0(tavor_ci_modify_srq_resize_not_supp_fail, 2935 TAVOR_TNF_ERROR, ""); 2936 TAVOR_TNF_EXIT(tavor_ci_modify_srq); 2937 return (IBT_NOT_SUPPORTED); 2938 } 2939 2940 /* 2941 * We do not support resizing an SRQ to be smaller than it's current 2942 * size. If a smaller (or equal) size is requested, then we simply 2943 * return success, and do nothing. 2944 */ 2945 if (size <= cur_srq_size) { 2946 *ret_size_p = cur_srq_size; 2947 TAVOR_TNF_EXIT(tavor_ci_modify_srq); 2948 return (IBT_SUCCESS); 2949 } 2950 2951 status = tavor_srq_modify(state, srqhdl, size, ret_size_p, 2952 TAVOR_NOSLEEP); 2953 if (status != DDI_SUCCESS) { 2954 /* Set return value to current SRQ size */ 2955 *ret_size_p = cur_srq_size; 2956 TNF_PROBE_1(tavor_ci_modify_srq_fail, TAVOR_TNF_ERROR, "", 2957 tnf_uint, status, status); 2958 TAVOR_TNF_EXIT(tavor_ci_modify_srq); 2959 return (status); 2960 } 2961 2962 TAVOR_TNF_EXIT(tavor_ci_modify_srq); 2963 return (IBT_SUCCESS); 2964 } 2965 2966 /* 2967 * tavor_ci_post_srq() 2968 * Post a Work Request to the specified Shared Receive Queue (SRQ) 2969 * Context: Can be called from interrupt or base context. 2970 */ 2971 static ibt_status_t 2972 tavor_ci_post_srq(ibc_hca_hdl_t hca, ibc_srq_hdl_t srq, 2973 ibt_recv_wr_t *wr, uint_t num_wr, uint_t *num_posted_p) 2974 { 2975 tavor_state_t *state; 2976 tavor_srqhdl_t srqhdl; 2977 int status; 2978 2979 TAVOR_TNF_ENTER(tavor_ci_post_srq); 2980 2981 /* Check for valid HCA handle */ 2982 if (hca == NULL) { 2983 TNF_PROBE_0(tavor_ci_post_srq_invhca_fail, 2984 TAVOR_TNF_ERROR, ""); 2985 TAVOR_TNF_EXIT(tavor_ci_post_srq); 2986 return (IBT_HCA_HDL_INVALID); 2987 } 2988 2989 state = (tavor_state_t *)hca; 2990 2991 /* Check if SRQ is even supported */ 2992 if (state->ts_cfg_profile->cp_srq_enable == 0) { 2993 TNF_PROBE_0(tavor_ci_post_srq_not_supported_fail, 2994 TAVOR_TNF_ERROR, ""); 2995 TAVOR_TNF_EXIT(tavor_ci_post_srq); 2996 return (IBT_NOT_SUPPORTED); 2997 } 2998 2999 /* Check for valid SRQ handle pointer */ 3000 if (srq == NULL) { 3001 TNF_PROBE_0(tavor_ci_post_srq_invsrqhdl_fail, 3002 TAVOR_TNF_ERROR, ""); 3003 TAVOR_TNF_EXIT(tavor_ci_post_srq); 3004 return (IBT_SRQ_HDL_INVALID); 3005 } 3006 3007 srqhdl = (tavor_srqhdl_t)srq; 3008 3009 status = tavor_post_srq(state, srqhdl, wr, num_wr, num_posted_p); 3010 if (status != DDI_SUCCESS) { 3011 TNF_PROBE_1(tavor_ci_post_srq_fail, TAVOR_TNF_ERROR, "", 3012 tnf_uint, status, status); 3013 TAVOR_TNF_EXIT(tavor_ci_post_srq); 3014 return (status); 3015 } 3016 3017 TAVOR_TNF_EXIT(tavor_ci_post_srq); 3018 return (IBT_SUCCESS); 3019 } 3020 3021 /* Address translation */ 3022 /* 3023 * tavor_ci_map_mem_area() 3024 * Context: Can be called from interrupt or base context. 3025 */ 3026 /* ARGSUSED */ 3027 static ibt_status_t 3028 tavor_ci_map_mem_area(ibc_hca_hdl_t hca, ibt_va_attr_t *va_attrs, 3029 void *ibtl_reserved, uint_t list_len, ibt_reg_req_t *reg_req, 3030 ibc_ma_hdl_t *ibc_ma_hdl_p) 3031 { 3032 return (IBT_NOT_SUPPORTED); 3033 } 3034 3035 /* 3036 * tavor_ci_unmap_mem_area() 3037 * Unmap the memory area 3038 * Context: Can be called from interrupt or base context. 3039 */ 3040 /* ARGSUSED */ 3041 static ibt_status_t 3042 tavor_ci_unmap_mem_area(ibc_hca_hdl_t hca, ibc_ma_hdl_t ma_hdl) 3043 { 3044 return (IBT_NOT_SUPPORTED); 3045 } 3046 3047 struct ibc_mi_s { 3048 int imh_len; 3049 ddi_dma_handle_t imh_dmahandle[1]; 3050 }; 3051 3052 /* 3053 * tavor_ci_map_mem_iov() 3054 * Map the memory 3055 * Context: Can be called from interrupt or base context. 3056 */ 3057 /* ARGSUSED */ 3058 static ibt_status_t 3059 tavor_ci_map_mem_iov(ibc_hca_hdl_t hca, ibt_iov_attr_t *iov_attr, 3060 ibt_all_wr_t *wr, ibc_mi_hdl_t *mi_hdl_p) 3061 { 3062 int status; 3063 int i, j, nds, max_nds; 3064 uint_t len; 3065 ibt_status_t ibt_status; 3066 ddi_dma_handle_t dmahdl; 3067 ddi_dma_cookie_t dmacookie; 3068 ddi_dma_attr_t dma_attr; 3069 uint_t cookie_cnt; 3070 ibc_mi_hdl_t mi_hdl; 3071 ibt_lkey_t rsvd_lkey; 3072 ibt_wr_ds_t *sgl; 3073 tavor_state_t *state; 3074 int kmflag; 3075 int (*callback)(caddr_t); 3076 3077 if (mi_hdl_p == NULL) 3078 return (IBT_MI_HDL_INVALID); 3079 3080 /* Check for valid HCA handle */ 3081 if (hca == NULL) 3082 return (IBT_HCA_HDL_INVALID); 3083 3084 /* Tavor does not allow the default "use reserved lkey" */ 3085 if ((iov_attr->iov_flags & IBT_IOV_ALT_LKEY) == 0) 3086 return (IBT_INVALID_PARAM); 3087 3088 rsvd_lkey = iov_attr->iov_alt_lkey; 3089 3090 state = (tavor_state_t *)hca; 3091 tavor_dma_attr_init(&dma_attr); 3092 #ifdef __sparc 3093 if (state->ts_cfg_profile->cp_iommu_bypass == TAVOR_BINDMEM_BYPASS) 3094 dma_attr.dma_attr_flags = DDI_DMA_FORCE_PHYSICAL; 3095 #endif 3096 3097 nds = 0; 3098 max_nds = iov_attr->iov_wr_nds; 3099 if (iov_attr->iov_lso_hdr_sz) 3100 max_nds -= (iov_attr->iov_lso_hdr_sz + sizeof (uint32_t) + 3101 0xf) >> 4; /* 0xf is for rounding up to a multiple of 16 */ 3102 if ((iov_attr->iov_flags & IBT_IOV_NOSLEEP) == 0) { 3103 kmflag = KM_SLEEP; 3104 callback = DDI_DMA_SLEEP; 3105 } else { 3106 kmflag = KM_NOSLEEP; 3107 callback = DDI_DMA_DONTWAIT; 3108 } 3109 3110 if (iov_attr->iov_flags & IBT_IOV_BUF) { 3111 mi_hdl = kmem_alloc(sizeof (*mi_hdl), kmflag); 3112 if (mi_hdl == NULL) 3113 return (IBT_INSUFF_RESOURCE); 3114 sgl = wr->send.wr_sgl; 3115 3116 status = ddi_dma_alloc_handle(state->ts_dip, &dma_attr, 3117 callback, NULL, &dmahdl); 3118 if (status != DDI_SUCCESS) { 3119 kmem_free(mi_hdl, sizeof (*mi_hdl)); 3120 return (IBT_INSUFF_RESOURCE); 3121 } 3122 status = ddi_dma_buf_bind_handle(dmahdl, iov_attr->iov_buf, 3123 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, callback, NULL, 3124 &dmacookie, &cookie_cnt); 3125 if (status != DDI_DMA_MAPPED) { 3126 ddi_dma_free_handle(&dmahdl); 3127 kmem_free(mi_hdl, sizeof (*mi_hdl)); 3128 return (ibc_get_ci_failure(0)); 3129 } 3130 while (cookie_cnt-- > 0) { 3131 if (nds > max_nds) { 3132 status = ddi_dma_unbind_handle(dmahdl); 3133 ddi_dma_free_handle(&dmahdl); 3134 return (IBT_SGL_TOO_SMALL); 3135 } 3136 sgl[nds].ds_va = dmacookie.dmac_laddress; 3137 sgl[nds].ds_key = rsvd_lkey; 3138 sgl[nds].ds_len = (ib_msglen_t)dmacookie.dmac_size; 3139 nds++; 3140 if (cookie_cnt != 0) 3141 ddi_dma_nextcookie(dmahdl, &dmacookie); 3142 } 3143 wr->send.wr_nds = nds; 3144 mi_hdl->imh_len = 1; 3145 mi_hdl->imh_dmahandle[0] = dmahdl; 3146 *mi_hdl_p = mi_hdl; 3147 return (IBT_SUCCESS); 3148 } 3149 3150 if (iov_attr->iov_flags & IBT_IOV_RECV) 3151 sgl = wr->recv.wr_sgl; 3152 else 3153 sgl = wr->send.wr_sgl; 3154 3155 len = iov_attr->iov_list_len; 3156 for (i = 0, j = 0; j < len; j++) { 3157 if (iov_attr->iov[j].iov_len == 0) 3158 continue; 3159 i++; 3160 } 3161 mi_hdl = kmem_alloc(sizeof (*mi_hdl) + 3162 (i - 1) * sizeof (ddi_dma_handle_t), kmflag); 3163 if (mi_hdl == NULL) 3164 return (IBT_INSUFF_RESOURCE); 3165 mi_hdl->imh_len = i; 3166 for (i = 0, j = 0; j < len; j++) { 3167 if (iov_attr->iov[j].iov_len == 0) 3168 continue; 3169 status = ddi_dma_alloc_handle(state->ts_dip, &dma_attr, 3170 callback, NULL, &dmahdl); 3171 if (status != DDI_SUCCESS) { 3172 ibt_status = IBT_INSUFF_RESOURCE; 3173 goto fail2; 3174 } 3175 status = ddi_dma_addr_bind_handle(dmahdl, iov_attr->iov_as, 3176 iov_attr->iov[j].iov_addr, iov_attr->iov[j].iov_len, 3177 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, callback, NULL, 3178 &dmacookie, &cookie_cnt); 3179 if (status != DDI_DMA_MAPPED) { 3180 ibt_status = ibc_get_ci_failure(0); 3181 goto fail1; 3182 } 3183 if (nds + cookie_cnt > max_nds) { 3184 ibt_status = IBT_SGL_TOO_SMALL; 3185 goto fail2; 3186 } 3187 while (cookie_cnt-- > 0) { 3188 sgl[nds].ds_va = dmacookie.dmac_laddress; 3189 sgl[nds].ds_key = rsvd_lkey; 3190 sgl[nds].ds_len = (ib_msglen_t)dmacookie.dmac_size; 3191 nds++; 3192 if (cookie_cnt != 0) 3193 ddi_dma_nextcookie(dmahdl, &dmacookie); 3194 } 3195 mi_hdl->imh_dmahandle[i] = dmahdl; 3196 i++; 3197 } 3198 3199 if (iov_attr->iov_flags & IBT_IOV_RECV) 3200 wr->recv.wr_nds = nds; 3201 else 3202 wr->send.wr_nds = nds; 3203 *mi_hdl_p = mi_hdl; 3204 return (IBT_SUCCESS); 3205 3206 fail1: 3207 ddi_dma_free_handle(&dmahdl); 3208 fail2: 3209 while (--i >= 0) { 3210 status = ddi_dma_unbind_handle(mi_hdl->imh_dmahandle[i]); 3211 ddi_dma_free_handle(&mi_hdl->imh_dmahandle[i]); 3212 } 3213 kmem_free(mi_hdl, sizeof (*mi_hdl) + 3214 (len - 1) * sizeof (ddi_dma_handle_t)); 3215 *mi_hdl_p = NULL; 3216 return (ibt_status); 3217 } 3218 3219 /* 3220 * tavor_ci_unmap_mem_iov() 3221 * Unmap the memory 3222 * Context: Can be called from interrupt or base context. 3223 */ 3224 /* ARGSUSED */ 3225 static ibt_status_t 3226 tavor_ci_unmap_mem_iov(ibc_hca_hdl_t hca, ibc_mi_hdl_t mi_hdl) 3227 { 3228 int i; 3229 3230 /* Check for valid HCA handle */ 3231 if (hca == NULL) 3232 return (IBT_HCA_HDL_INVALID); 3233 3234 if (mi_hdl == NULL) 3235 return (IBT_MI_HDL_INVALID); 3236 3237 for (i = 0; i < mi_hdl->imh_len; i++) { 3238 (void) ddi_dma_unbind_handle(mi_hdl->imh_dmahandle[i]); 3239 ddi_dma_free_handle(&mi_hdl->imh_dmahandle[i]); 3240 } 3241 kmem_free(mi_hdl, sizeof (*mi_hdl) + 3242 (mi_hdl->imh_len - 1) * sizeof (ddi_dma_handle_t)); 3243 return (IBT_SUCCESS); 3244 } 3245 3246 /* Allocate L_Key */ 3247 /* 3248 * tavor_ci_alloc_lkey() 3249 */ 3250 /* ARGSUSED */ 3251 static ibt_status_t 3252 tavor_ci_alloc_lkey(ibc_hca_hdl_t hca, ibc_pd_hdl_t pd, 3253 ibt_lkey_flags_t flags, uint_t phys_buf_list_sz, ibc_mr_hdl_t *mr_p, 3254 ibt_pmr_desc_t *mem_desc_p) 3255 { 3256 TAVOR_TNF_ENTER(tavor_ci_alloc_lkey); 3257 TAVOR_TNF_EXIT(tavor_ci_alloc_lkey); 3258 return (IBT_NOT_SUPPORTED); 3259 } 3260 3261 /* Physical Register Memory Region */ 3262 /* 3263 * tavor_ci_register_physical_mr() 3264 */ 3265 /* ARGSUSED */ 3266 static ibt_status_t 3267 tavor_ci_register_physical_mr(ibc_hca_hdl_t hca, ibc_pd_hdl_t pd, 3268 ibt_pmr_attr_t *mem_pattrs, void *ibtl_reserved, ibc_mr_hdl_t *mr_p, 3269 ibt_pmr_desc_t *mem_desc_p) 3270 { 3271 TAVOR_TNF_ENTER(tavor_ci_register_physical_mr); 3272 TAVOR_TNF_EXIT(tavor_ci_register_physical_mr); 3273 return (IBT_NOT_SUPPORTED); 3274 } 3275 3276 /* 3277 * tavor_ci_reregister_physical_mr() 3278 */ 3279 /* ARGSUSED */ 3280 static ibt_status_t 3281 tavor_ci_reregister_physical_mr(ibc_hca_hdl_t hca, ibc_mr_hdl_t mr, 3282 ibc_pd_hdl_t pd, ibt_pmr_attr_t *mem_pattrs, void *ibtl_reserved, 3283 ibc_mr_hdl_t *mr_p, ibt_pmr_desc_t *mr_desc_p) 3284 { 3285 TAVOR_TNF_ENTER(tavor_ci_reregister_physical_mr); 3286 TAVOR_TNF_EXIT(tavor_ci_reregister_physical_mr); 3287 return (IBT_NOT_SUPPORTED); 3288 } 3289 3290 /* Mellanox FMR Support */ 3291 /* 3292 * tavor_ci_create_fmr_pool() 3293 * Creates a pool of memory regions suitable for FMR registration 3294 * Context: Can be called from base context only 3295 */ 3296 /* ARGSUSED */ 3297 static ibt_status_t 3298 tavor_ci_create_fmr_pool(ibc_hca_hdl_t hca, ibc_pd_hdl_t pd, 3299 ibt_fmr_pool_attr_t *params, ibc_fmr_pool_hdl_t *fmr_pool_p) 3300 { 3301 return (IBT_NOT_SUPPORTED); 3302 } 3303 3304 /* 3305 * tavor_ci_destroy_fmr_pool() 3306 * Free all resources associated with an FMR pool. 3307 * Context: Can be called from base context only. 3308 */ 3309 /* ARGSUSED */ 3310 static ibt_status_t 3311 tavor_ci_destroy_fmr_pool(ibc_hca_hdl_t hca, ibc_fmr_pool_hdl_t fmr_pool) 3312 { 3313 return (IBT_NOT_SUPPORTED); 3314 } 3315 3316 /* 3317 * tavor_ci_flush_fmr_pool() 3318 * Force a flush of the memory tables, cleaning up used FMR resources. 3319 * Context: Can be called from interrupt or base context. 3320 */ 3321 /* ARGSUSED */ 3322 static ibt_status_t 3323 tavor_ci_flush_fmr_pool(ibc_hca_hdl_t hca, ibc_fmr_pool_hdl_t fmr_pool) 3324 { 3325 return (IBT_NOT_SUPPORTED); 3326 } 3327 3328 /* 3329 * tavor_ci_register_physical_fmr() 3330 * From the 'pool' of FMR regions passed in, performs register physical 3331 * operation. 3332 * Context: Can be called from interrupt or base context. 3333 */ 3334 /* ARGSUSED */ 3335 static ibt_status_t 3336 tavor_ci_register_physical_fmr(ibc_hca_hdl_t hca, 3337 ibc_fmr_pool_hdl_t fmr_pool, ibt_pmr_attr_t *mem_pattr, 3338 void *ibtl_reserved, ibc_mr_hdl_t *mr_p, ibt_pmr_desc_t *mem_desc_p) 3339 { 3340 return (IBT_NOT_SUPPORTED); 3341 } 3342 3343 /* 3344 * tavor_ci_deregister_fmr() 3345 * Moves an FMR (specified by 'mr') to the deregistered state. 3346 * Context: Can be called from base context only. 3347 */ 3348 /* ARGSUSED */ 3349 static ibt_status_t 3350 tavor_ci_deregister_fmr(ibc_hca_hdl_t hca, ibc_mr_hdl_t mr) 3351 { 3352 return (IBT_NOT_SUPPORTED); 3353 } 3354 3355 /* 3356 * tavor_ci_alloc_io_mem() 3357 * Allocate dmable memory 3358 * 3359 */ 3360 ibt_status_t 3361 tavor_ci_alloc_io_mem( 3362 ibc_hca_hdl_t hca, 3363 size_t size, 3364 ibt_mr_flags_t mr_flag, 3365 caddr_t *kaddrp, 3366 ibc_mem_alloc_hdl_t *mem_alloc_hdl) 3367 { 3368 tavor_state_t *state; 3369 int status; 3370 3371 TAVOR_TNF_ENTER(tavor_ci_alloc_io_mem); 3372 3373 /* Check for valid HCA handle */ 3374 if (hca == NULL) { 3375 TNF_PROBE_0(tavor_ci_alloc_io_mem_invhca_fail, 3376 TAVOR_TNF_ERROR, ""); 3377 TAVOR_TNF_EXIT(tavor_ci_alloc_io_mem); 3378 return (IBT_HCA_HDL_INVALID); 3379 } 3380 3381 /* Check for valid mem_alloc_hdl handle pointer */ 3382 if (mem_alloc_hdl == NULL) { 3383 TNF_PROBE_0(tavor_ci_alloc_io_mem_hdl_fail, 3384 TAVOR_TNF_ERROR, ""); 3385 TAVOR_TNF_EXIT(tavor_ci_alloc_io_mem); 3386 return (IBT_MEM_ALLOC_HDL_INVALID); 3387 } 3388 3389 /* Grab the Tavor softstate pointer and mem handle */ 3390 state = (tavor_state_t *)hca; 3391 3392 /* Allocate the AH */ 3393 status = tavor_mem_alloc(state, size, mr_flag, kaddrp, 3394 (tavor_mem_alloc_hdl_t *)mem_alloc_hdl); 3395 3396 if (status != DDI_SUCCESS) { 3397 TNF_PROBE_1(tavor_ci_alloc_ah_fail, TAVOR_TNF_ERROR, "", 3398 tnf_uint, status, status); 3399 TAVOR_TNF_EXIT(tavor_ci_alloc_io_mem); 3400 return (status); 3401 } 3402 3403 TAVOR_TNF_EXIT(tavor_ci_alloc_io_mem); 3404 return (IBT_SUCCESS); 3405 } 3406 3407 3408 /* 3409 * tavor_ci_free_io_mem() 3410 * free the memory 3411 */ 3412 ibt_status_t 3413 tavor_ci_free_io_mem(ibc_hca_hdl_t hca, ibc_mem_alloc_hdl_t mem_alloc_hdl) 3414 { 3415 tavor_mem_alloc_hdl_t memhdl; 3416 3417 TAVOR_TNF_ENTER(tavor_ci_free_io_mem); 3418 3419 /* Check for valid HCA handle */ 3420 if (hca == NULL) { 3421 TNF_PROBE_0(tavor_ci_free_io_mem_invhca_fail, 3422 TAVOR_TNF_ERROR, ""); 3423 TAVOR_TNF_EXIT(tavor_ci_free_io_mem); 3424 return (IBT_HCA_HDL_INVALID); 3425 } 3426 3427 /* Check for valid mem_alloc_hdl handle pointer */ 3428 if (mem_alloc_hdl == NULL) { 3429 TNF_PROBE_0(tavor_ci_free_io_mem_hdl_fail, 3430 TAVOR_TNF_ERROR, ""); 3431 TAVOR_TNF_EXIT(tavor_ci_free_io_mem); 3432 return (IBT_MEM_ALLOC_HDL_INVALID); 3433 } 3434 3435 memhdl = (tavor_mem_alloc_hdl_t)mem_alloc_hdl; 3436 3437 /* free the memory */ 3438 ddi_dma_mem_free(&memhdl->tavor_acc_hdl); 3439 ddi_dma_free_handle(&memhdl->tavor_dma_hdl); 3440 3441 kmem_free(memhdl, sizeof (*memhdl)); 3442 TAVOR_TNF_EXIT(tavor_dma_free); 3443 return (IBT_SUCCESS); 3444 } 3445 3446 3447 int 3448 tavor_mem_alloc( 3449 tavor_state_t *state, 3450 size_t size, 3451 ibt_mr_flags_t flags, 3452 caddr_t *kaddrp, 3453 tavor_mem_alloc_hdl_t *mem_hdl) 3454 { 3455 ddi_dma_handle_t dma_hdl; 3456 ddi_dma_attr_t dma_attr; 3457 ddi_acc_handle_t acc_hdl; 3458 size_t real_len; 3459 int status; 3460 int (*ddi_cb)(caddr_t); 3461 3462 TAVOR_TNF_ENTER(tavor_mem_alloc); 3463 3464 tavor_dma_attr_init(&dma_attr); 3465 3466 ddi_cb = (flags & IBT_MR_NOSLEEP) ? DDI_DMA_DONTWAIT : DDI_DMA_SLEEP; 3467 3468 /* Allocate a DMA handle */ 3469 status = ddi_dma_alloc_handle(state->ts_dip, &dma_attr, ddi_cb, 3470 NULL, &dma_hdl); 3471 if (status != DDI_SUCCESS) { 3472 TNF_PROBE_0(tavor_dma_alloc_handle_fail, TAVOR_TNF_ERROR, ""); 3473 TAVOR_TNF_EXIT(tavor_mem_alloc); 3474 return (DDI_FAILURE); 3475 } 3476 3477 /* Allocate DMA memory */ 3478 status = ddi_dma_mem_alloc(dma_hdl, size, 3479 &state->ts_reg_accattr, DDI_DMA_CONSISTENT, ddi_cb, 3480 NULL, 3481 kaddrp, &real_len, &acc_hdl); 3482 if (status != DDI_SUCCESS) { 3483 ddi_dma_free_handle(&dma_hdl); 3484 TNF_PROBE_0(tavor_dma_alloc_memory_fail, TAVOR_TNF_ERROR, ""); 3485 TAVOR_TNF_EXIT(tavor_mem_alloc); 3486 return (DDI_FAILURE); 3487 } 3488 3489 /* Package the tavor_dma_info contents and return */ 3490 *mem_hdl = kmem_alloc(sizeof (**mem_hdl), 3491 flags & IBT_MR_NOSLEEP ? KM_NOSLEEP : KM_SLEEP); 3492 if (*mem_hdl == NULL) { 3493 ddi_dma_mem_free(&acc_hdl); 3494 ddi_dma_free_handle(&dma_hdl); 3495 TNF_PROBE_0(tavor_dma_alloc_memory_fail, TAVOR_TNF_ERROR, ""); 3496 TAVOR_TNF_EXIT(tavor_mem_alloc); 3497 return (DDI_FAILURE); 3498 } 3499 (*mem_hdl)->tavor_dma_hdl = dma_hdl; 3500 (*mem_hdl)->tavor_acc_hdl = acc_hdl; 3501 3502 TAVOR_TNF_EXIT(tavor_mem_alloc); 3503 return (DDI_SUCCESS); 3504 }