1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
24 */
25
26 /*
27 * hermon_ci.c
28 * Hermon Channel Interface (CI) Routines
29 *
30 * Implements all the routines necessary to interface with the IBTF.
31 * Pointers to all of these functions are passed to the IBTF at attach()
32 * time in the ibc_operations_t structure. These functions include all
33 * of the necessary routines to implement the required InfiniBand "verbs"
34 * and additional IBTF-specific interfaces.
35 */
36
37 #include <sys/types.h>
38 #include <sys/conf.h>
39 #include <sys/ddi.h>
40 #include <sys/sunddi.h>
41
42 #include <sys/ib/adapters/hermon/hermon.h>
43
44 extern uint32_t hermon_kernel_data_ro;
45 extern uint32_t hermon_user_data_ro;
46
47 /* HCA and port related operations */
48 static ibt_status_t hermon_ci_query_hca_ports(ibc_hca_hdl_t, uint8_t,
49 ibt_hca_portinfo_t *);
50 static ibt_status_t hermon_ci_modify_ports(ibc_hca_hdl_t, uint8_t,
51 ibt_port_modify_flags_t, uint8_t);
52 static ibt_status_t hermon_ci_modify_system_image(ibc_hca_hdl_t, ib_guid_t);
53
54 /* Protection Domains */
55 static ibt_status_t hermon_ci_alloc_pd(ibc_hca_hdl_t, ibt_pd_flags_t,
56 ibc_pd_hdl_t *);
57 static ibt_status_t hermon_ci_free_pd(ibc_hca_hdl_t, ibc_pd_hdl_t);
58
59 /* Reliable Datagram Domains */
60 static ibt_status_t hermon_ci_alloc_rdd(ibc_hca_hdl_t, ibc_rdd_flags_t,
61 ibc_rdd_hdl_t *);
62 static ibt_status_t hermon_ci_free_rdd(ibc_hca_hdl_t, ibc_rdd_hdl_t);
63
64 /* Address Handles */
65 static ibt_status_t hermon_ci_alloc_ah(ibc_hca_hdl_t, ibt_ah_flags_t,
66 ibc_pd_hdl_t, ibt_adds_vect_t *, ibc_ah_hdl_t *);
67 static ibt_status_t hermon_ci_free_ah(ibc_hca_hdl_t, ibc_ah_hdl_t);
68 static ibt_status_t hermon_ci_query_ah(ibc_hca_hdl_t, ibc_ah_hdl_t,
69 ibc_pd_hdl_t *, ibt_adds_vect_t *);
70 static ibt_status_t hermon_ci_modify_ah(ibc_hca_hdl_t, ibc_ah_hdl_t,
71 ibt_adds_vect_t *);
72
73 /* Queue Pairs */
74 static ibt_status_t hermon_ci_alloc_qp(ibc_hca_hdl_t, ibtl_qp_hdl_t,
75 ibt_qp_type_t, ibt_qp_alloc_attr_t *, ibt_chan_sizes_t *, ib_qpn_t *,
76 ibc_qp_hdl_t *);
77 static ibt_status_t hermon_ci_alloc_special_qp(ibc_hca_hdl_t, uint8_t,
78 ibtl_qp_hdl_t, ibt_sqp_type_t, ibt_qp_alloc_attr_t *,
79 ibt_chan_sizes_t *, ibc_qp_hdl_t *);
80 static ibt_status_t hermon_ci_alloc_qp_range(ibc_hca_hdl_t, uint_t,
81 ibtl_qp_hdl_t *, ibt_qp_type_t, ibt_qp_alloc_attr_t *, ibt_chan_sizes_t *,
82 ibc_cq_hdl_t *, ibc_cq_hdl_t *, ib_qpn_t *, ibc_qp_hdl_t *);
83 static ibt_status_t hermon_ci_free_qp(ibc_hca_hdl_t, ibc_qp_hdl_t,
84 ibc_free_qp_flags_t, ibc_qpn_hdl_t *);
85 static ibt_status_t hermon_ci_release_qpn(ibc_hca_hdl_t, ibc_qpn_hdl_t);
86 static ibt_status_t hermon_ci_query_qp(ibc_hca_hdl_t, ibc_qp_hdl_t,
87 ibt_qp_query_attr_t *);
88 static ibt_status_t hermon_ci_modify_qp(ibc_hca_hdl_t, ibc_qp_hdl_t,
89 ibt_cep_modify_flags_t, ibt_qp_info_t *, ibt_queue_sizes_t *);
90
91 /* Completion Queues */
92 static ibt_status_t hermon_ci_alloc_cq(ibc_hca_hdl_t, ibt_cq_hdl_t,
93 ibt_cq_attr_t *, ibc_cq_hdl_t *, uint_t *);
94 static ibt_status_t hermon_ci_free_cq(ibc_hca_hdl_t, ibc_cq_hdl_t);
95 static ibt_status_t hermon_ci_query_cq(ibc_hca_hdl_t, ibc_cq_hdl_t,
96 uint_t *, uint_t *, uint_t *, ibt_cq_handler_id_t *);
97 static ibt_status_t hermon_ci_resize_cq(ibc_hca_hdl_t, ibc_cq_hdl_t,
98 uint_t, uint_t *);
99 static ibt_status_t hermon_ci_modify_cq(ibc_hca_hdl_t, ibc_cq_hdl_t,
100 uint_t, uint_t, ibt_cq_handler_id_t);
101 static ibt_status_t hermon_ci_alloc_cq_sched(ibc_hca_hdl_t,
102 ibt_cq_sched_attr_t *, ibc_sched_hdl_t *);
103 static ibt_status_t hermon_ci_free_cq_sched(ibc_hca_hdl_t, ibc_sched_hdl_t);
104 static ibt_status_t hermon_ci_query_cq_handler_id(ibc_hca_hdl_t,
105 ibt_cq_handler_id_t, ibt_cq_handler_attr_t *);
106
107 /* EE Contexts */
108 static ibt_status_t hermon_ci_alloc_eec(ibc_hca_hdl_t, ibc_eec_flags_t,
109 ibt_eec_hdl_t, ibc_rdd_hdl_t, ibc_eec_hdl_t *);
110 static ibt_status_t hermon_ci_free_eec(ibc_hca_hdl_t, ibc_eec_hdl_t);
111 static ibt_status_t hermon_ci_query_eec(ibc_hca_hdl_t, ibc_eec_hdl_t,
112 ibt_eec_query_attr_t *);
113 static ibt_status_t hermon_ci_modify_eec(ibc_hca_hdl_t, ibc_eec_hdl_t,
114 ibt_cep_modify_flags_t, ibt_eec_info_t *);
115
116 /* Memory Registration */
117 static ibt_status_t hermon_ci_register_mr(ibc_hca_hdl_t, ibc_pd_hdl_t,
118 ibt_mr_attr_t *, void *, ibc_mr_hdl_t *, ibt_mr_desc_t *);
119 static ibt_status_t hermon_ci_register_buf(ibc_hca_hdl_t, ibc_pd_hdl_t,
120 ibt_smr_attr_t *, struct buf *, void *, ibt_mr_hdl_t *, ibt_mr_desc_t *);
121 static ibt_status_t hermon_ci_register_shared_mr(ibc_hca_hdl_t,
122 ibc_mr_hdl_t, ibc_pd_hdl_t, ibt_smr_attr_t *, void *,
123 ibc_mr_hdl_t *, ibt_mr_desc_t *);
124 static ibt_status_t hermon_ci_deregister_mr(ibc_hca_hdl_t, ibc_mr_hdl_t);
125 static ibt_status_t hermon_ci_query_mr(ibc_hca_hdl_t, ibc_mr_hdl_t,
126 ibt_mr_query_attr_t *);
127 static ibt_status_t hermon_ci_reregister_mr(ibc_hca_hdl_t, ibc_mr_hdl_t,
128 ibc_pd_hdl_t, ibt_mr_attr_t *, void *, ibc_mr_hdl_t *,
129 ibt_mr_desc_t *);
130 static ibt_status_t hermon_ci_reregister_buf(ibc_hca_hdl_t, ibc_mr_hdl_t,
131 ibc_pd_hdl_t, ibt_smr_attr_t *, struct buf *, void *, ibc_mr_hdl_t *,
132 ibt_mr_desc_t *);
133 static ibt_status_t hermon_ci_sync_mr(ibc_hca_hdl_t, ibt_mr_sync_t *, size_t);
134 static ibt_status_t hermon_ci_register_dma_mr(ibc_hca_hdl_t, ibc_pd_hdl_t,
135 ibt_dmr_attr_t *, void *, ibc_mr_hdl_t *, ibt_mr_desc_t *);
136
137 /* Memory Windows */
138 static ibt_status_t hermon_ci_alloc_mw(ibc_hca_hdl_t, ibc_pd_hdl_t,
139 ibt_mw_flags_t, ibc_mw_hdl_t *, ibt_rkey_t *);
140 static ibt_status_t hermon_ci_free_mw(ibc_hca_hdl_t, ibc_mw_hdl_t);
141 static ibt_status_t hermon_ci_query_mw(ibc_hca_hdl_t, ibc_mw_hdl_t,
142 ibt_mw_query_attr_t *);
143
144 /* Multicast Groups */
145 static ibt_status_t hermon_ci_attach_mcg(ibc_hca_hdl_t, ibc_qp_hdl_t,
146 ib_gid_t, ib_lid_t);
147 static ibt_status_t hermon_ci_detach_mcg(ibc_hca_hdl_t, ibc_qp_hdl_t,
148 ib_gid_t, ib_lid_t);
149
150 /* Work Request and Completion Processing */
151 static ibt_status_t hermon_ci_post_send(ibc_hca_hdl_t, ibc_qp_hdl_t,
152 ibt_send_wr_t *, uint_t, uint_t *);
153 static ibt_status_t hermon_ci_post_recv(ibc_hca_hdl_t, ibc_qp_hdl_t,
154 ibt_recv_wr_t *, uint_t, uint_t *);
155 static ibt_status_t hermon_ci_poll_cq(ibc_hca_hdl_t, ibc_cq_hdl_t,
156 ibt_wc_t *, uint_t, uint_t *);
157 static ibt_status_t hermon_ci_notify_cq(ibc_hca_hdl_t, ibc_cq_hdl_t,
158 ibt_cq_notify_flags_t);
159
160 /* CI Object Private Data */
161 static ibt_status_t hermon_ci_ci_data_in(ibc_hca_hdl_t, ibt_ci_data_flags_t,
162 ibt_object_type_t, void *, void *, size_t);
163
164 /* CI Object Private Data */
165 static ibt_status_t hermon_ci_ci_data_out(ibc_hca_hdl_t, ibt_ci_data_flags_t,
166 ibt_object_type_t, void *, void *, size_t);
167
168 /* Shared Receive Queues */
169 static ibt_status_t hermon_ci_alloc_srq(ibc_hca_hdl_t, ibt_srq_flags_t,
170 ibt_srq_hdl_t, ibc_pd_hdl_t, ibt_srq_sizes_t *, ibc_srq_hdl_t *,
171 ibt_srq_sizes_t *);
172 static ibt_status_t hermon_ci_free_srq(ibc_hca_hdl_t, ibc_srq_hdl_t);
173 static ibt_status_t hermon_ci_query_srq(ibc_hca_hdl_t, ibc_srq_hdl_t,
174 ibc_pd_hdl_t *, ibt_srq_sizes_t *, uint_t *);
175 static ibt_status_t hermon_ci_modify_srq(ibc_hca_hdl_t, ibc_srq_hdl_t,
176 ibt_srq_modify_flags_t, uint_t, uint_t, uint_t *);
177 static ibt_status_t hermon_ci_post_srq(ibc_hca_hdl_t, ibc_srq_hdl_t,
178 ibt_recv_wr_t *, uint_t, uint_t *);
179
180 /* Address translation */
181 static ibt_status_t hermon_ci_map_mem_area(ibc_hca_hdl_t, ibt_va_attr_t *,
182 void *, uint_t, ibt_reg_req_t *, ibc_ma_hdl_t *);
183 static ibt_status_t hermon_ci_unmap_mem_area(ibc_hca_hdl_t, ibc_ma_hdl_t);
184 static ibt_status_t hermon_ci_map_mem_iov(ibc_hca_hdl_t, ibt_iov_attr_t *,
185 ibt_all_wr_t *, ibc_mi_hdl_t *);
186 static ibt_status_t hermon_ci_unmap_mem_iov(ibc_hca_hdl_t, ibc_mi_hdl_t);
187
188 /* Allocate L_Key */
189 static ibt_status_t hermon_ci_alloc_lkey(ibc_hca_hdl_t, ibc_pd_hdl_t,
190 ibt_lkey_flags_t, uint_t, ibc_mr_hdl_t *, ibt_pmr_desc_t *);
191
192 /* Physical Register Memory Region */
193 static ibt_status_t hermon_ci_register_physical_mr(ibc_hca_hdl_t, ibc_pd_hdl_t,
194 ibt_pmr_attr_t *, void *, ibc_mr_hdl_t *, ibt_pmr_desc_t *);
195 static ibt_status_t hermon_ci_reregister_physical_mr(ibc_hca_hdl_t,
196 ibc_mr_hdl_t, ibc_pd_hdl_t, ibt_pmr_attr_t *, void *, ibc_mr_hdl_t *,
197 ibt_pmr_desc_t *);
198
199 /* Mellanox FMR */
200 static ibt_status_t hermon_ci_create_fmr_pool(ibc_hca_hdl_t hca,
201 ibc_pd_hdl_t pd, ibt_fmr_pool_attr_t *fmr_params,
202 ibc_fmr_pool_hdl_t *fmr_pool);
203 static ibt_status_t hermon_ci_destroy_fmr_pool(ibc_hca_hdl_t hca,
204 ibc_fmr_pool_hdl_t fmr_pool);
205 static ibt_status_t hermon_ci_flush_fmr_pool(ibc_hca_hdl_t hca,
206 ibc_fmr_pool_hdl_t fmr_pool);
207 static ibt_status_t hermon_ci_register_physical_fmr(ibc_hca_hdl_t hca,
208 ibc_fmr_pool_hdl_t fmr_pool, ibt_pmr_attr_t *mem_pattr,
209 void *ibtl_reserved, ibc_mr_hdl_t *mr_hdl_p, ibt_pmr_desc_t *mem_desc_p);
210 static ibt_status_t hermon_ci_deregister_fmr(ibc_hca_hdl_t hca,
211 ibc_mr_hdl_t mr);
212
213 /* Memory Allocation/Deallocation */
214 static ibt_status_t hermon_ci_alloc_io_mem(ibc_hca_hdl_t hca, size_t size,
215 ibt_mr_flags_t mr_flag, caddr_t *kaddrp,
216 ibc_mem_alloc_hdl_t *mem_alloc_hdl_p);
217 static ibt_status_t hermon_ci_free_io_mem(ibc_hca_hdl_t hca,
218 ibc_mem_alloc_hdl_t mem_alloc_hdl);
219 static ibt_status_t hermon_ci_not_supported();
220
221 /*
222 * This ibc_operations_t structure includes pointers to all the entry points
223 * provided by the Hermon driver. This structure is passed to the IBTF at
224 * driver attach time, using the ibc_attach() call.
225 */
226 ibc_operations_t hermon_ibc_ops = {
227 /* HCA and port related operations */
228 hermon_ci_query_hca_ports,
229 hermon_ci_modify_ports,
230 hermon_ci_modify_system_image,
231
232 /* Protection Domains */
233 hermon_ci_alloc_pd,
234 hermon_ci_free_pd,
235
236 /* Reliable Datagram Domains */
237 hermon_ci_alloc_rdd,
238 hermon_ci_free_rdd,
239
240 /* Address Handles */
241 hermon_ci_alloc_ah,
242 hermon_ci_free_ah,
243 hermon_ci_query_ah,
244 hermon_ci_modify_ah,
245
246 /* Queue Pairs */
247 hermon_ci_alloc_qp,
248 hermon_ci_alloc_special_qp,
249 hermon_ci_alloc_qp_range,
250 hermon_ci_free_qp,
251 hermon_ci_release_qpn,
252 hermon_ci_query_qp,
253 hermon_ci_modify_qp,
254
255 /* Completion Queues */
256 hermon_ci_alloc_cq,
257 hermon_ci_free_cq,
258 hermon_ci_query_cq,
259 hermon_ci_resize_cq,
260 hermon_ci_modify_cq,
261 hermon_ci_alloc_cq_sched,
262 hermon_ci_free_cq_sched,
263 hermon_ci_query_cq_handler_id,
264
265 /* EE Contexts */
266 hermon_ci_alloc_eec,
267 hermon_ci_free_eec,
268 hermon_ci_query_eec,
269 hermon_ci_modify_eec,
270
271 /* Memory Registration */
272 hermon_ci_register_mr,
273 hermon_ci_register_buf,
274 hermon_ci_register_shared_mr,
275 hermon_ci_deregister_mr,
276 hermon_ci_query_mr,
277 hermon_ci_reregister_mr,
278 hermon_ci_reregister_buf,
279 hermon_ci_sync_mr,
280
281 /* Memory Windows */
282 hermon_ci_alloc_mw,
283 hermon_ci_free_mw,
284 hermon_ci_query_mw,
285
286 /* Multicast Groups */
287 hermon_ci_attach_mcg,
288 hermon_ci_detach_mcg,
289
290 /* Work Request and Completion Processing */
291 hermon_ci_post_send,
292 hermon_ci_post_recv,
293 hermon_ci_poll_cq,
294 hermon_ci_notify_cq,
295
296 /* CI Object Mapping Data */
297 hermon_ci_ci_data_in,
298 hermon_ci_ci_data_out,
299
300 /* Shared Receive Queue */
301 hermon_ci_alloc_srq,
302 hermon_ci_free_srq,
303 hermon_ci_query_srq,
304 hermon_ci_modify_srq,
305 hermon_ci_post_srq,
306
307 /* Address translation */
308 hermon_ci_map_mem_area,
309 hermon_ci_unmap_mem_area,
310 hermon_ci_map_mem_iov,
311 hermon_ci_unmap_mem_iov,
312
313 /* Allocate L_key */
314 hermon_ci_alloc_lkey,
315
316 /* Physical Register Memory Region */
317 hermon_ci_register_physical_mr,
318 hermon_ci_reregister_physical_mr,
319
320 /* Mellanox FMR */
321 hermon_ci_create_fmr_pool,
322 hermon_ci_destroy_fmr_pool,
323 hermon_ci_flush_fmr_pool,
324 hermon_ci_register_physical_fmr,
325 hermon_ci_deregister_fmr,
326
327 /* Memory allocation */
328 hermon_ci_alloc_io_mem,
329 hermon_ci_free_io_mem,
330
331 /* XRC not yet supported */
332 hermon_ci_not_supported, /* ibc_alloc_xrc_domain */
333 hermon_ci_not_supported, /* ibc_free_xrc_domain */
334 hermon_ci_not_supported, /* ibc_alloc_xrc_srq */
335 hermon_ci_not_supported, /* ibc_free_xrc_srq */
336 hermon_ci_not_supported, /* ibc_query_xrc_srq */
337 hermon_ci_not_supported, /* ibc_modify_xrc_srq */
338 hermon_ci_not_supported, /* ibc_alloc_xrc_tgt_qp */
339 hermon_ci_not_supported, /* ibc_free_xrc_tgt_qp */
340 hermon_ci_not_supported, /* ibc_query_xrc_tgt_qp */
341 hermon_ci_not_supported, /* ibc_modify_xrc_tgt_qp */
342
343 /* Memory Region (physical) */
344 hermon_ci_register_dma_mr,
345
346 /* Next enhancements */
347 hermon_ci_not_supported, /* ibc_enhancement1 */
348 hermon_ci_not_supported, /* ibc_enhancement2 */
349 hermon_ci_not_supported, /* ibc_enhancement3 */
350 hermon_ci_not_supported, /* ibc_enhancement4 */
351 };
352
353 /*
354 * Not yet implemented OPS
355 */
356 /* ARGSUSED */
357 static ibt_status_t
358 hermon_ci_not_supported()
359 {
360 return (IBT_NOT_SUPPORTED);
361 }
362
363
364 /*
365 * hermon_ci_query_hca_ports()
366 * Returns HCA port attributes for either one or all of the HCA's ports.
367 * Context: Can be called only from user or kernel context.
368 */
369 static ibt_status_t
370 hermon_ci_query_hca_ports(ibc_hca_hdl_t hca, uint8_t query_port,
371 ibt_hca_portinfo_t *info_p)
372 {
373 hermon_state_t *state;
374 uint_t start, end, port;
375 int status, indx;
376
377 /* Grab the Hermon softstate pointer */
378 state = (hermon_state_t *)hca;
379
380 /*
381 * If the specified port is zero, then we are supposed to query all
382 * ports. Otherwise, we query only the port number specified.
383 * Setup the start and end port numbers as appropriate for the loop
384 * below. Note: The first Hermon port is port number one (1).
385 */
386 if (query_port == 0) {
387 start = 1;
388 end = start + (state->hs_cfg_profile->cp_num_ports - 1);
389 } else {
390 end = start = query_port;
391 }
392
393 /* Query the port(s) */
394 for (port = start, indx = 0; port <= end; port++, indx++) {
395 status = hermon_port_query(state, port, &info_p[indx]);
396 if (status != DDI_SUCCESS) {
397 return (status);
398 }
399 }
400 return (IBT_SUCCESS);
401 }
402
403
404 /*
405 * hermon_ci_modify_ports()
406 * Modify HCA port attributes
407 * Context: Can be called only from user or kernel context.
408 */
409 static ibt_status_t
410 hermon_ci_modify_ports(ibc_hca_hdl_t hca, uint8_t port,
411 ibt_port_modify_flags_t flags, uint8_t init_type)
412 {
413 hermon_state_t *state;
414 int status;
415
416 /* Grab the Hermon softstate pointer */
417 state = (hermon_state_t *)hca;
418
419 /* Modify the port(s) */
420 status = hermon_port_modify(state, port, flags, init_type);
421 return (status);
422 }
423
424 /*
425 * hermon_ci_modify_system_image()
426 * Modify the System Image GUID
427 * Context: Can be called only from user or kernel context.
428 */
429 /* ARGSUSED */
430 static ibt_status_t
431 hermon_ci_modify_system_image(ibc_hca_hdl_t hca, ib_guid_t sys_guid)
432 {
433 /*
434 * This is an unsupported interface for the Hermon driver. This
435 * interface is necessary to support modification of the System
436 * Image GUID. Hermon is only capable of modifying this parameter
437 * once (during driver initialization).
438 */
439 return (IBT_NOT_SUPPORTED);
440 }
441
442 /*
443 * hermon_ci_alloc_pd()
444 * Allocate a Protection Domain
445 * Context: Can be called only from user or kernel context.
446 */
447 /* ARGSUSED */
448 static ibt_status_t
449 hermon_ci_alloc_pd(ibc_hca_hdl_t hca, ibt_pd_flags_t flags, ibc_pd_hdl_t *pd_p)
450 {
451 hermon_state_t *state;
452 hermon_pdhdl_t pdhdl;
453 int status;
454
455 ASSERT(pd_p != NULL);
456
457 /* Grab the Hermon softstate pointer */
458 state = (hermon_state_t *)hca;
459
460 /* Allocate the PD */
461 status = hermon_pd_alloc(state, &pdhdl, HERMON_NOSLEEP);
462 if (status != DDI_SUCCESS) {
463 return (status);
464 }
465
466 /* Return the Hermon PD handle */
467 *pd_p = (ibc_pd_hdl_t)pdhdl;
468
469 return (IBT_SUCCESS);
470 }
471
472
473 /*
474 * hermon_ci_free_pd()
475 * Free a Protection Domain
476 * Context: Can be called only from user or kernel context
477 */
478 static ibt_status_t
479 hermon_ci_free_pd(ibc_hca_hdl_t hca, ibc_pd_hdl_t pd)
480 {
481 hermon_state_t *state;
482 hermon_pdhdl_t pdhdl;
483 int status;
484
485 /* Grab the Hermon softstate pointer and PD handle */
486 state = (hermon_state_t *)hca;
487 pdhdl = (hermon_pdhdl_t)pd;
488
489 /* Free the PD */
490 status = hermon_pd_free(state, &pdhdl);
491 return (status);
492 }
493
494
495 /*
496 * hermon_ci_alloc_rdd()
497 * Allocate a Reliable Datagram Domain
498 * Context: Can be called only from user or kernel context.
499 */
500 /* ARGSUSED */
501 static ibt_status_t
502 hermon_ci_alloc_rdd(ibc_hca_hdl_t hca, ibc_rdd_flags_t flags,
503 ibc_rdd_hdl_t *rdd_p)
504 {
505 /*
506 * This is an unsupported interface for the Hermon driver. This
507 * interface is necessary to support Reliable Datagram (RD)
508 * operations. Hermon does not support RD.
509 */
510 return (IBT_NOT_SUPPORTED);
511 }
512
513
514 /*
515 * hermon_free_rdd()
516 * Free a Reliable Datagram Domain
517 * Context: Can be called only from user or kernel context.
518 */
519 /* ARGSUSED */
520 static ibt_status_t
521 hermon_ci_free_rdd(ibc_hca_hdl_t hca, ibc_rdd_hdl_t rdd)
522 {
523 /*
524 * This is an unsupported interface for the Hermon driver. This
525 * interface is necessary to support Reliable Datagram (RD)
526 * operations. Hermon does not support RD.
527 */
528 return (IBT_NOT_SUPPORTED);
529 }
530
531
532 /*
533 * hermon_ci_alloc_ah()
534 * Allocate an Address Handle
535 * Context: Can be called only from user or kernel context.
536 */
537 /* ARGSUSED */
538 static ibt_status_t
539 hermon_ci_alloc_ah(ibc_hca_hdl_t hca, ibt_ah_flags_t flags, ibc_pd_hdl_t pd,
540 ibt_adds_vect_t *attr_p, ibc_ah_hdl_t *ah_p)
541 {
542 hermon_state_t *state;
543 hermon_ahhdl_t ahhdl;
544 hermon_pdhdl_t pdhdl;
545 int status;
546
547 /* Grab the Hermon softstate pointer and PD handle */
548 state = (hermon_state_t *)hca;
549 pdhdl = (hermon_pdhdl_t)pd;
550
551 /* Allocate the AH */
552 status = hermon_ah_alloc(state, pdhdl, attr_p, &ahhdl, HERMON_NOSLEEP);
553 if (status != DDI_SUCCESS) {
554 return (status);
555 }
556
557 /* Return the Hermon AH handle */
558 *ah_p = (ibc_ah_hdl_t)ahhdl;
559
560 return (IBT_SUCCESS);
561 }
562
563
564 /*
565 * hermon_ci_free_ah()
566 * Free an Address Handle
567 * Context: Can be called only from user or kernel context.
568 */
569 static ibt_status_t
570 hermon_ci_free_ah(ibc_hca_hdl_t hca, ibc_ah_hdl_t ah)
571 {
572 hermon_state_t *state;
573 hermon_ahhdl_t ahhdl;
574 int status;
575
576 /* Grab the Hermon softstate pointer and AH handle */
577 state = (hermon_state_t *)hca;
578 ahhdl = (hermon_ahhdl_t)ah;
579
580 /* Free the AH */
581 status = hermon_ah_free(state, &ahhdl, HERMON_NOSLEEP);
582
583 return (status);
584 }
585
586
587 /*
588 * hermon_ci_query_ah()
589 * Return the Address Vector information for a specified Address Handle
590 * Context: Can be called from interrupt or base context.
591 */
592 static ibt_status_t
593 hermon_ci_query_ah(ibc_hca_hdl_t hca, ibc_ah_hdl_t ah, ibc_pd_hdl_t *pd_p,
594 ibt_adds_vect_t *attr_p)
595 {
596 hermon_state_t *state;
597 hermon_ahhdl_t ahhdl;
598 hermon_pdhdl_t pdhdl;
599 int status;
600
601 /* Grab the Hermon softstate pointer and AH handle */
602 state = (hermon_state_t *)hca;
603 ahhdl = (hermon_ahhdl_t)ah;
604
605 /* Query the AH */
606 status = hermon_ah_query(state, ahhdl, &pdhdl, attr_p);
607 if (status != DDI_SUCCESS) {
608 return (status);
609 }
610
611 /* Return the Hermon PD handle */
612 *pd_p = (ibc_pd_hdl_t)pdhdl;
613
614 return (IBT_SUCCESS);
615 }
616
617
618 /*
619 * hermon_ci_modify_ah()
620 * Modify the Address Vector information of a specified Address Handle
621 * Context: Can be called from interrupt or base context.
622 */
623 static ibt_status_t
624 hermon_ci_modify_ah(ibc_hca_hdl_t hca, ibc_ah_hdl_t ah, ibt_adds_vect_t *attr_p)
625 {
626 hermon_state_t *state;
627 hermon_ahhdl_t ahhdl;
628 int status;
629
630 /* Grab the Hermon softstate pointer and AH handle */
631 state = (hermon_state_t *)hca;
632 ahhdl = (hermon_ahhdl_t)ah;
633
634 /* Modify the AH */
635 status = hermon_ah_modify(state, ahhdl, attr_p);
636
637 return (status);
638 }
639
640
641 /*
642 * hermon_ci_alloc_qp()
643 * Allocate a Queue Pair
644 * Context: Can be called only from user or kernel context.
645 */
646 static ibt_status_t
647 hermon_ci_alloc_qp(ibc_hca_hdl_t hca, ibtl_qp_hdl_t ibt_qphdl,
648 ibt_qp_type_t type, ibt_qp_alloc_attr_t *attr_p,
649 ibt_chan_sizes_t *queue_sizes_p, ib_qpn_t *qpn, ibc_qp_hdl_t *qp_p)
650 {
651 hermon_state_t *state;
652 hermon_qp_info_t qpinfo;
653 int status;
654
655 /* Grab the Hermon softstate pointer */
656 state = (hermon_state_t *)hca;
657
658 /* Allocate the QP */
659 qpinfo.qpi_attrp = attr_p;
660 qpinfo.qpi_type = type;
661 qpinfo.qpi_ibt_qphdl = ibt_qphdl;
662 qpinfo.qpi_queueszp = queue_sizes_p;
663 qpinfo.qpi_qpn = qpn;
664 status = hermon_qp_alloc(state, &qpinfo, HERMON_NOSLEEP);
665 if (status != DDI_SUCCESS) {
666 return (status);
667 }
668
669 /* Return the Hermon QP handle */
670 *qp_p = (ibc_qp_hdl_t)qpinfo.qpi_qphdl;
671
672 return (IBT_SUCCESS);
673 }
674
675
676 /*
677 * hermon_ci_alloc_special_qp()
678 * Allocate a Special Queue Pair
679 * Context: Can be called only from user or kernel context.
680 */
681 static ibt_status_t
682 hermon_ci_alloc_special_qp(ibc_hca_hdl_t hca, uint8_t port,
683 ibtl_qp_hdl_t ibt_qphdl, ibt_sqp_type_t type,
684 ibt_qp_alloc_attr_t *attr_p, ibt_chan_sizes_t *queue_sizes_p,
685 ibc_qp_hdl_t *qp_p)
686 {
687 hermon_state_t *state;
688 hermon_qp_info_t qpinfo;
689 int status;
690
691 /* Grab the Hermon softstate pointer */
692 state = (hermon_state_t *)hca;
693
694 /* Allocate the Special QP */
695 qpinfo.qpi_attrp = attr_p;
696 qpinfo.qpi_type = type;
697 qpinfo.qpi_port = port;
698 qpinfo.qpi_ibt_qphdl = ibt_qphdl;
699 qpinfo.qpi_queueszp = queue_sizes_p;
700 status = hermon_special_qp_alloc(state, &qpinfo, HERMON_NOSLEEP);
701 if (status != DDI_SUCCESS) {
702 return (status);
703 }
704 /* Return the Hermon QP handle */
705 *qp_p = (ibc_qp_hdl_t)qpinfo.qpi_qphdl;
706
707 return (IBT_SUCCESS);
708 }
709
710 /*
711 * hermon_ci_alloc_qp_range()
712 * Free a Queue Pair
713 * Context: Can be called only from user or kernel context.
714 */
715 /* ARGSUSED */
716 static ibt_status_t
717 hermon_ci_alloc_qp_range(ibc_hca_hdl_t hca, uint_t log2,
718 ibtl_qp_hdl_t *ibtl_qp, ibt_qp_type_t type,
719 ibt_qp_alloc_attr_t *attr_p, ibt_chan_sizes_t *queue_sizes_p,
720 ibc_cq_hdl_t *send_cq, ibc_cq_hdl_t *recv_cq,
721 ib_qpn_t *qpn, ibc_qp_hdl_t *qp_p)
722 {
723 hermon_state_t *state;
724 hermon_qp_info_t qpinfo;
725 int status;
726
727 /* Grab the Hermon softstate pointer */
728 state = (hermon_state_t *)hca;
729
730 /* Allocate the QP */
731 qpinfo.qpi_attrp = attr_p;
732 qpinfo.qpi_type = type;
733 qpinfo.qpi_queueszp = queue_sizes_p;
734 qpinfo.qpi_qpn = qpn;
735 status = hermon_qp_alloc_range(state, log2, &qpinfo, ibtl_qp,
736 send_cq, recv_cq, (hermon_qphdl_t *)qp_p, HERMON_NOSLEEP);
737 return (status);
738 }
739
740 /*
741 * hermon_ci_free_qp()
742 * Free a Queue Pair
743 * Context: Can be called only from user or kernel context.
744 */
745 static ibt_status_t
746 hermon_ci_free_qp(ibc_hca_hdl_t hca, ibc_qp_hdl_t qp,
747 ibc_free_qp_flags_t free_qp_flags, ibc_qpn_hdl_t *qpnh_p)
748 {
749 hermon_state_t *state;
750 hermon_qphdl_t qphdl;
751 int status;
752
753 /* Grab the Hermon softstate pointer and QP handle */
754 state = (hermon_state_t *)hca;
755 qphdl = (hermon_qphdl_t)qp;
756
757 /* Free the QP */
758 status = hermon_qp_free(state, &qphdl, free_qp_flags, qpnh_p,
759 HERMON_NOSLEEP);
760
761 return (status);
762 }
763
764
765 /*
766 * hermon_ci_release_qpn()
767 * Release a Queue Pair Number (QPN)
768 * Context: Can be called only from user or kernel context.
769 */
770 static ibt_status_t
771 hermon_ci_release_qpn(ibc_hca_hdl_t hca, ibc_qpn_hdl_t qpnh)
772 {
773 hermon_state_t *state;
774 hermon_qpn_entry_t *entry;
775
776 /* Grab the Hermon softstate pointer and QP handle */
777 state = (hermon_state_t *)hca;
778 entry = (hermon_qpn_entry_t *)qpnh;
779
780 /* Release the QP number */
781 hermon_qp_release_qpn(state, entry, HERMON_QPN_RELEASE);
782
783 return (IBT_SUCCESS);
784 }
785
786
787 /*
788 * hermon_ci_query_qp()
789 * Query a Queue Pair
790 * Context: Can be called from interrupt or base context.
791 */
792 static ibt_status_t
793 hermon_ci_query_qp(ibc_hca_hdl_t hca, ibc_qp_hdl_t qp,
794 ibt_qp_query_attr_t *attr_p)
795 {
796 hermon_state_t *state;
797 hermon_qphdl_t qphdl;
798 int status;
799
800 /* Grab the Hermon softstate pointer and QP handle */
801 state = (hermon_state_t *)hca;
802 qphdl = (hermon_qphdl_t)qp;
803
804 /* Query the QP */
805 status = hermon_qp_query(state, qphdl, attr_p);
806 return (status);
807 }
808
809
810 /*
811 * hermon_ci_modify_qp()
812 * Modify a Queue Pair
813 * Context: Can be called from interrupt or base context.
814 */
815 static ibt_status_t
816 hermon_ci_modify_qp(ibc_hca_hdl_t hca, ibc_qp_hdl_t qp,
817 ibt_cep_modify_flags_t flags, ibt_qp_info_t *info_p,
818 ibt_queue_sizes_t *actual_sz)
819 {
820 hermon_state_t *state;
821 hermon_qphdl_t qphdl;
822 int status;
823
824 /* Grab the Hermon softstate pointer and QP handle */
825 state = (hermon_state_t *)hca;
826 qphdl = (hermon_qphdl_t)qp;
827
828 /* Modify the QP */
829 status = hermon_qp_modify(state, qphdl, flags, info_p, actual_sz);
830 return (status);
831 }
832
833
834 /*
835 * hermon_ci_alloc_cq()
836 * Allocate a Completion Queue
837 * Context: Can be called only from user or kernel context.
838 */
839 /* ARGSUSED */
840 static ibt_status_t
841 hermon_ci_alloc_cq(ibc_hca_hdl_t hca, ibt_cq_hdl_t ibt_cqhdl,
842 ibt_cq_attr_t *attr_p, ibc_cq_hdl_t *cq_p, uint_t *actual_size)
843 {
844 hermon_state_t *state;
845 hermon_cqhdl_t cqhdl;
846 int status;
847
848 state = (hermon_state_t *)hca;
849
850 /* Allocate the CQ */
851 status = hermon_cq_alloc(state, ibt_cqhdl, attr_p, actual_size,
852 &cqhdl, HERMON_NOSLEEP);
853 if (status != DDI_SUCCESS) {
854 return (status);
855 }
856
857 /* Return the Hermon CQ handle */
858 *cq_p = (ibc_cq_hdl_t)cqhdl;
859
860 return (IBT_SUCCESS);
861 }
862
863
864 /*
865 * hermon_ci_free_cq()
866 * Free a Completion Queue
867 * Context: Can be called only from user or kernel context.
868 */
869 static ibt_status_t
870 hermon_ci_free_cq(ibc_hca_hdl_t hca, ibc_cq_hdl_t cq)
871 {
872 hermon_state_t *state;
873 hermon_cqhdl_t cqhdl;
874 int status;
875
876 /* Grab the Hermon softstate pointer and CQ handle */
877 state = (hermon_state_t *)hca;
878 cqhdl = (hermon_cqhdl_t)cq;
879
880
881 /* Free the CQ */
882 status = hermon_cq_free(state, &cqhdl, HERMON_NOSLEEP);
883 return (status);
884 }
885
886
887 /*
888 * hermon_ci_query_cq()
889 * Return the size of a Completion Queue
890 * Context: Can be called only from user or kernel context.
891 */
892 static ibt_status_t
893 hermon_ci_query_cq(ibc_hca_hdl_t hca, ibc_cq_hdl_t cq, uint_t *entries_p,
894 uint_t *count_p, uint_t *usec_p, ibt_cq_handler_id_t *hid_p)
895 {
896 hermon_state_t *state;
897 hermon_cqhdl_t cqhdl;
898
899 /* Grab the CQ handle */
900 state = (hermon_state_t *)hca;
901 cqhdl = (hermon_cqhdl_t)cq;
902
903 /* Query the current CQ size */
904 *entries_p = cqhdl->cq_bufsz;
905 *count_p = cqhdl->cq_intmod_count;
906 *usec_p = cqhdl->cq_intmod_usec;
907 *hid_p = HERMON_EQNUM_TO_HID(state, cqhdl->cq_eqnum);
908
909 return (IBT_SUCCESS);
910 }
911
912
913 /*
914 * hermon_ci_resize_cq()
915 * Change the size of a Completion Queue
916 * Context: Can be called only from user or kernel context.
917 */
918 static ibt_status_t
919 hermon_ci_resize_cq(ibc_hca_hdl_t hca, ibc_cq_hdl_t cq, uint_t size,
920 uint_t *actual_size)
921 {
922 hermon_state_t *state;
923 hermon_cqhdl_t cqhdl;
924 int status;
925
926 /* Grab the Hermon softstate pointer and CQ handle */
927 state = (hermon_state_t *)hca;
928 cqhdl = (hermon_cqhdl_t)cq;
929
930 /* Resize the CQ */
931 status = hermon_cq_resize(state, cqhdl, size, actual_size,
932 HERMON_NOSLEEP);
933 if (status != DDI_SUCCESS) {
934 return (status);
935 }
936 return (IBT_SUCCESS);
937 }
938
939 /*
940 * hermon_ci_modify_cq()
941 * Change the interrupt moderation values of a Completion Queue
942 * Context: Can be called only from user or kernel context.
943 */
944 static ibt_status_t
945 hermon_ci_modify_cq(ibc_hca_hdl_t hca, ibc_cq_hdl_t cq, uint_t count,
946 uint_t usec, ibt_cq_handler_id_t hid)
947 {
948 hermon_state_t *state;
949 hermon_cqhdl_t cqhdl;
950 int status;
951
952 /* Grab the Hermon softstate pointer and CQ handle */
953 state = (hermon_state_t *)hca;
954 cqhdl = (hermon_cqhdl_t)cq;
955
956 /* Resize the CQ */
957 status = hermon_cq_modify(state, cqhdl, count, usec, hid,
958 HERMON_NOSLEEP);
959 return (status);
960 }
961
962
963 /*
964 * hermon_ci_alloc_cq_sched()
965 * Reserve a CQ scheduling class resource
966 * Context: Can be called only from user or kernel context.
967 */
968 /* ARGSUSED */
969 static ibt_status_t
970 hermon_ci_alloc_cq_sched(ibc_hca_hdl_t hca, ibt_cq_sched_attr_t *attr,
971 ibc_sched_hdl_t *sched_hdl_p)
972 {
973 int status;
974
975 status = hermon_cq_sched_alloc((hermon_state_t *)hca, attr,
976 (hermon_cq_sched_t **)sched_hdl_p);
977 return (status);
978 }
979
980
981 /*
982 * hermon_ci_free_cq_sched()
983 * Free a CQ scheduling class resource
984 * Context: Can be called only from user or kernel context.
985 */
986 /* ARGSUSED */
987 static ibt_status_t
988 hermon_ci_free_cq_sched(ibc_hca_hdl_t hca, ibc_sched_hdl_t sched_hdl)
989 {
990 int status;
991
992 status = hermon_cq_sched_free((hermon_state_t *)hca,
993 (hermon_cq_sched_t *)sched_hdl);
994 return (status);
995 }
996
997 static ibt_status_t
998 hermon_ci_query_cq_handler_id(ibc_hca_hdl_t hca,
999 ibt_cq_handler_id_t hid, ibt_cq_handler_attr_t *attrs)
1000 {
1001 hermon_state_t *state;
1002
1003 state = (hermon_state_t *)hca;
1004 if (!HERMON_HID_VALID(state, hid))
1005 return (IBT_CQ_HID_INVALID);
1006 if (attrs == NULL)
1007 return (IBT_INVALID_PARAM);
1008 attrs->cha_ih = state->hs_intrmsi_hdl[hid - 1];
1009 attrs->cha_dip = state->hs_dip;
1010 return (IBT_SUCCESS);
1011 }
1012
1013 /*
1014 * hermon_ci_alloc_eec()
1015 * Allocate an End-to-End context
1016 * Context: Can be called only from user or kernel context.
1017 */
1018 /* ARGSUSED */
1019 static ibt_status_t
1020 hermon_ci_alloc_eec(ibc_hca_hdl_t hca, ibc_eec_flags_t flags,
1021 ibt_eec_hdl_t ibt_eec, ibc_rdd_hdl_t rdd, ibc_eec_hdl_t *eec_p)
1022 {
1023 /*
1024 * This is an unsupported interface for the Hermon driver. This
1025 * interface is necessary to support Reliable Datagram (RD)
1026 * operations. Hermon does not support RD.
1027 */
1028 return (IBT_NOT_SUPPORTED);
1029 }
1030
1031
1032 /*
1033 * hermon_ci_free_eec()
1034 * Free an End-to-End context
1035 * Context: Can be called only from user or kernel context.
1036 */
1037 /* ARGSUSED */
1038 static ibt_status_t
1039 hermon_ci_free_eec(ibc_hca_hdl_t hca, ibc_eec_hdl_t eec)
1040 {
1041 /*
1042 * This is an unsupported interface for the Hermon driver. This
1043 * interface is necessary to support Reliable Datagram (RD)
1044 * operations. Hermon does not support RD.
1045 */
1046 return (IBT_NOT_SUPPORTED);
1047 }
1048
1049
1050 /*
1051 * hermon_ci_query_eec()
1052 * Query an End-to-End context
1053 * Context: Can be called from interrupt or base context.
1054 */
1055 /* ARGSUSED */
1056 static ibt_status_t
1057 hermon_ci_query_eec(ibc_hca_hdl_t hca, ibc_eec_hdl_t eec,
1058 ibt_eec_query_attr_t *attr_p)
1059 {
1060 /*
1061 * This is an unsupported interface for the Hermon driver. This
1062 * interface is necessary to support Reliable Datagram (RD)
1063 * operations. Hermon does not support RD.
1064 */
1065 return (IBT_NOT_SUPPORTED);
1066 }
1067
1068
1069 /*
1070 * hermon_ci_modify_eec()
1071 * Modify an End-to-End context
1072 * Context: Can be called from interrupt or base context.
1073 */
1074 /* ARGSUSED */
1075 static ibt_status_t
1076 hermon_ci_modify_eec(ibc_hca_hdl_t hca, ibc_eec_hdl_t eec,
1077 ibt_cep_modify_flags_t flags, ibt_eec_info_t *info_p)
1078 {
1079 /*
1080 * This is an unsupported interface for the Hermon driver. This
1081 * interface is necessary to support Reliable Datagram (RD)
1082 * operations. Hermon does not support RD.
1083 */
1084 return (IBT_NOT_SUPPORTED);
1085 }
1086
1087
1088 /*
1089 * hermon_ci_register_mr()
1090 * Prepare a virtually addressed Memory Region for use by an HCA
1091 * Context: Can be called from interrupt or base context.
1092 */
1093 /* ARGSUSED */
1094 static ibt_status_t
1095 hermon_ci_register_mr(ibc_hca_hdl_t hca, ibc_pd_hdl_t pd,
1096 ibt_mr_attr_t *mr_attr, void *ibtl_reserved, ibc_mr_hdl_t *mr_p,
1097 ibt_mr_desc_t *mr_desc)
1098 {
1099 hermon_mr_options_t op;
1100 hermon_state_t *state;
1101 hermon_pdhdl_t pdhdl;
1102 hermon_mrhdl_t mrhdl;
1103 int status;
1104
1105 ASSERT(mr_attr != NULL);
1106 ASSERT(mr_p != NULL);
1107 ASSERT(mr_desc != NULL);
1108
1109 /*
1110 * Validate the access flags. Both Remote Write and Remote Atomic
1111 * require the Local Write flag to be set
1112 */
1113 if (((mr_attr->mr_flags & IBT_MR_ENABLE_REMOTE_WRITE) ||
1114 (mr_attr->mr_flags & IBT_MR_ENABLE_REMOTE_ATOMIC)) &&
1115 !(mr_attr->mr_flags & IBT_MR_ENABLE_LOCAL_WRITE)) {
1116 return (IBT_MR_ACCESS_REQ_INVALID);
1117 }
1118
1119 /* Grab the Hermon softstate pointer and PD handle */
1120 state = (hermon_state_t *)hca;
1121 pdhdl = (hermon_pdhdl_t)pd;
1122
1123 /* Register the memory region */
1124 op.mro_bind_type = state->hs_cfg_profile->cp_iommu_bypass;
1125 op.mro_bind_dmahdl = NULL;
1126 op.mro_bind_override_addr = 0;
1127 status = hermon_mr_register(state, pdhdl, mr_attr, &mrhdl,
1128 &op, HERMON_MPT_DMPT);
1129 if (status != DDI_SUCCESS) {
1130 return (status);
1131 }
1132
1133 /* Fill in the mr_desc structure */
1134 mr_desc->md_vaddr = mrhdl->mr_bindinfo.bi_addr;
1135 mr_desc->md_lkey = mrhdl->mr_lkey;
1136 /* Only set RKey if remote access was requested */
1137 if ((mr_attr->mr_flags & IBT_MR_ENABLE_REMOTE_ATOMIC) ||
1138 (mr_attr->mr_flags & IBT_MR_ENABLE_REMOTE_WRITE) ||
1139 (mr_attr->mr_flags & IBT_MR_ENABLE_REMOTE_READ)) {
1140 mr_desc->md_rkey = mrhdl->mr_rkey;
1141 }
1142
1143 /*
1144 * If region is mapped for streaming (i.e. noncoherent), then set
1145 * sync is required
1146 */
1147 mr_desc->md_sync_required = (mrhdl->mr_bindinfo.bi_flags &
1148 IBT_MR_NONCOHERENT) ? B_TRUE : B_FALSE;
1149
1150 /* Return the Hermon MR handle */
1151 *mr_p = (ibc_mr_hdl_t)mrhdl;
1152
1153 return (IBT_SUCCESS);
1154 }
1155
1156
1157 /*
1158 * hermon_ci_register_buf()
1159 * Prepare a Memory Region specified by buf structure for use by an HCA
1160 * Context: Can be called from interrupt or base context.
1161 */
1162 /* ARGSUSED */
1163 static ibt_status_t
1164 hermon_ci_register_buf(ibc_hca_hdl_t hca, ibc_pd_hdl_t pd,
1165 ibt_smr_attr_t *attrp, struct buf *buf, void *ibtl_reserved,
1166 ibt_mr_hdl_t *mr_p, ibt_mr_desc_t *mr_desc)
1167 {
1168 hermon_mr_options_t op;
1169 hermon_state_t *state;
1170 hermon_pdhdl_t pdhdl;
1171 hermon_mrhdl_t mrhdl;
1172 int status;
1173 ibt_mr_flags_t flags = attrp->mr_flags;
1174
1175 ASSERT(mr_p != NULL);
1176 ASSERT(mr_desc != NULL);
1177
1178 /*
1179 * Validate the access flags. Both Remote Write and Remote Atomic
1180 * require the Local Write flag to be set
1181 */
1182 if (((flags & IBT_MR_ENABLE_REMOTE_WRITE) ||
1183 (flags & IBT_MR_ENABLE_REMOTE_ATOMIC)) &&
1184 !(flags & IBT_MR_ENABLE_LOCAL_WRITE)) {
1185 return (IBT_MR_ACCESS_REQ_INVALID);
1186 }
1187
1188 /* Grab the Hermon softstate pointer and PD handle */
1189 state = (hermon_state_t *)hca;
1190 pdhdl = (hermon_pdhdl_t)pd;
1191
1192 /* Register the memory region */
1193 op.mro_bind_type = state->hs_cfg_profile->cp_iommu_bypass;
1194 op.mro_bind_dmahdl = NULL;
1195 op.mro_bind_override_addr = 0;
1196 status = hermon_mr_register_buf(state, pdhdl, attrp, buf,
1197 &mrhdl, &op, HERMON_MPT_DMPT);
1198 if (status != DDI_SUCCESS) {
1199 return (status);
1200 }
1201
1202 /* Fill in the mr_desc structure */
1203 mr_desc->md_vaddr = mrhdl->mr_bindinfo.bi_addr;
1204 mr_desc->md_lkey = mrhdl->mr_lkey;
1205 /* Only set RKey if remote access was requested */
1206 if ((flags & IBT_MR_ENABLE_REMOTE_ATOMIC) ||
1207 (flags & IBT_MR_ENABLE_REMOTE_WRITE) ||
1208 (flags & IBT_MR_ENABLE_REMOTE_READ)) {
1209 mr_desc->md_rkey = mrhdl->mr_rkey;
1210 }
1211
1212 /*
1213 * If region is mapped for streaming (i.e. noncoherent), then set
1214 * sync is required
1215 */
1216 mr_desc->md_sync_required = (mrhdl->mr_bindinfo.bi_flags &
1217 IBT_MR_NONCOHERENT) ? B_TRUE : B_FALSE;
1218
1219 /* Return the Hermon MR handle */
1220 *mr_p = (ibc_mr_hdl_t)mrhdl;
1221
1222 return (IBT_SUCCESS);
1223 }
1224
1225
1226 /*
1227 * hermon_ci_deregister_mr()
1228 * Deregister a Memory Region from an HCA translation table
1229 * Context: Can be called only from user or kernel context.
1230 */
1231 static ibt_status_t
1232 hermon_ci_deregister_mr(ibc_hca_hdl_t hca, ibc_mr_hdl_t mr)
1233 {
1234 hermon_state_t *state;
1235 hermon_mrhdl_t mrhdl;
1236 int status;
1237
1238 /* Grab the Hermon softstate pointer */
1239 state = (hermon_state_t *)hca;
1240 mrhdl = (hermon_mrhdl_t)mr;
1241
1242 /*
1243 * Deregister the memory region.
1244 */
1245 status = hermon_mr_deregister(state, &mrhdl, HERMON_MR_DEREG_ALL,
1246 HERMON_NOSLEEP);
1247 return (status);
1248 }
1249
1250
1251 /*
1252 * hermon_ci_query_mr()
1253 * Retrieve information about a specified Memory Region
1254 * Context: Can be called from interrupt or base context.
1255 */
1256 static ibt_status_t
1257 hermon_ci_query_mr(ibc_hca_hdl_t hca, ibc_mr_hdl_t mr,
1258 ibt_mr_query_attr_t *mr_attr)
1259 {
1260 hermon_state_t *state;
1261 hermon_mrhdl_t mrhdl;
1262 int status;
1263
1264 ASSERT(mr_attr != NULL);
1265
1266 /* Grab the Hermon softstate pointer and MR handle */
1267 state = (hermon_state_t *)hca;
1268 mrhdl = (hermon_mrhdl_t)mr;
1269
1270 /* Query the memory region */
1271 status = hermon_mr_query(state, mrhdl, mr_attr);
1272 return (status);
1273 }
1274
1275
1276 /*
1277 * hermon_ci_register_shared_mr()
1278 * Create a shared memory region matching an existing Memory Region
1279 * Context: Can be called from interrupt or base context.
1280 */
1281 /* ARGSUSED */
1282 static ibt_status_t
1283 hermon_ci_register_shared_mr(ibc_hca_hdl_t hca, ibc_mr_hdl_t mr,
1284 ibc_pd_hdl_t pd, ibt_smr_attr_t *mr_attr, void *ibtl_reserved,
1285 ibc_mr_hdl_t *mr_p, ibt_mr_desc_t *mr_desc)
1286 {
1287 hermon_state_t *state;
1288 hermon_pdhdl_t pdhdl;
1289 hermon_mrhdl_t mrhdl, mrhdl_new;
1290 int status;
1291
1292 ASSERT(mr_attr != NULL);
1293 ASSERT(mr_p != NULL);
1294 ASSERT(mr_desc != NULL);
1295
1296 /*
1297 * Validate the access flags. Both Remote Write and Remote Atomic
1298 * require the Local Write flag to be set
1299 */
1300 if (((mr_attr->mr_flags & IBT_MR_ENABLE_REMOTE_WRITE) ||
1301 (mr_attr->mr_flags & IBT_MR_ENABLE_REMOTE_ATOMIC)) &&
1302 !(mr_attr->mr_flags & IBT_MR_ENABLE_LOCAL_WRITE)) {
1303 return (IBT_MR_ACCESS_REQ_INVALID);
1304 }
1305
1306 /* Grab the Hermon softstate pointer and handles */
1307 state = (hermon_state_t *)hca;
1308 pdhdl = (hermon_pdhdl_t)pd;
1309 mrhdl = (hermon_mrhdl_t)mr;
1310
1311 /* Register the shared memory region */
1312 status = hermon_mr_register_shared(state, mrhdl, pdhdl, mr_attr,
1313 &mrhdl_new);
1314 if (status != DDI_SUCCESS) {
1315 return (status);
1316 }
1317
1318 /* Fill in the mr_desc structure */
1319 mr_desc->md_vaddr = mrhdl_new->mr_bindinfo.bi_addr;
1320 mr_desc->md_lkey = mrhdl_new->mr_lkey;
1321 /* Only set RKey if remote access was requested */
1322 if ((mr_attr->mr_flags & IBT_MR_ENABLE_REMOTE_ATOMIC) ||
1323 (mr_attr->mr_flags & IBT_MR_ENABLE_REMOTE_WRITE) ||
1324 (mr_attr->mr_flags & IBT_MR_ENABLE_REMOTE_READ)) {
1325 mr_desc->md_rkey = mrhdl_new->mr_rkey;
1326 }
1327
1328 /*
1329 * If shared region is mapped for streaming (i.e. noncoherent), then
1330 * set sync is required
1331 */
1332 mr_desc->md_sync_required = (mrhdl_new->mr_bindinfo.bi_flags &
1333 IBT_MR_NONCOHERENT) ? B_TRUE : B_FALSE;
1334
1335 /* Return the Hermon MR handle */
1336 *mr_p = (ibc_mr_hdl_t)mrhdl_new;
1337
1338 return (IBT_SUCCESS);
1339 }
1340
1341
1342 /*
1343 * hermon_ci_reregister_mr()
1344 * Modify the attributes of an existing Memory Region
1345 * Context: Can be called from interrupt or base context.
1346 */
1347 /* ARGSUSED */
1348 static ibt_status_t
1349 hermon_ci_reregister_mr(ibc_hca_hdl_t hca, ibc_mr_hdl_t mr, ibc_pd_hdl_t pd,
1350 ibt_mr_attr_t *mr_attr, void *ibtl_reserved, ibc_mr_hdl_t *mr_new,
1351 ibt_mr_desc_t *mr_desc)
1352 {
1353 hermon_mr_options_t op;
1354 hermon_state_t *state;
1355 hermon_pdhdl_t pdhdl;
1356 hermon_mrhdl_t mrhdl, mrhdl_new;
1357 int status;
1358
1359 ASSERT(mr_attr != NULL);
1360 ASSERT(mr_new != NULL);
1361 ASSERT(mr_desc != NULL);
1362
1363 /* Grab the Hermon softstate pointer, mrhdl, and pdhdl */
1364 state = (hermon_state_t *)hca;
1365 mrhdl = (hermon_mrhdl_t)mr;
1366 pdhdl = (hermon_pdhdl_t)pd;
1367
1368 /* Reregister the memory region */
1369 op.mro_bind_type = state->hs_cfg_profile->cp_iommu_bypass;
1370 status = hermon_mr_reregister(state, mrhdl, pdhdl, mr_attr,
1371 &mrhdl_new, &op);
1372 if (status != DDI_SUCCESS) {
1373 return (status);
1374 }
1375
1376 /* Fill in the mr_desc structure */
1377 mr_desc->md_vaddr = mrhdl_new->mr_bindinfo.bi_addr;
1378 mr_desc->md_lkey = mrhdl_new->mr_lkey;
1379 /* Only set RKey if remote access was requested */
1380 if ((mr_attr->mr_flags & IBT_MR_ENABLE_REMOTE_ATOMIC) ||
1381 (mr_attr->mr_flags & IBT_MR_ENABLE_REMOTE_WRITE) ||
1382 (mr_attr->mr_flags & IBT_MR_ENABLE_REMOTE_READ)) {
1383 mr_desc->md_rkey = mrhdl_new->mr_rkey;
1384 }
1385
1386 /*
1387 * If region is mapped for streaming (i.e. noncoherent), then set
1388 * sync is required
1389 */
1390 mr_desc->md_sync_required = (mrhdl_new->mr_bindinfo.bi_flags &
1391 IBT_MR_NONCOHERENT) ? B_TRUE : B_FALSE;
1392
1393 /* Return the Hermon MR handle */
1394 *mr_new = (ibc_mr_hdl_t)mrhdl_new;
1395
1396 return (IBT_SUCCESS);
1397 }
1398
1399
1400 /*
1401 * hermon_ci_reregister_buf()
1402 * Modify the attributes of an existing Memory Region
1403 * Context: Can be called from interrupt or base context.
1404 */
1405 /* ARGSUSED */
1406 static ibt_status_t
1407 hermon_ci_reregister_buf(ibc_hca_hdl_t hca, ibc_mr_hdl_t mr, ibc_pd_hdl_t pd,
1408 ibt_smr_attr_t *attrp, struct buf *buf, void *ibtl_reserved,
1409 ibc_mr_hdl_t *mr_new, ibt_mr_desc_t *mr_desc)
1410 {
1411 hermon_mr_options_t op;
1412 hermon_state_t *state;
1413 hermon_pdhdl_t pdhdl;
1414 hermon_mrhdl_t mrhdl, mrhdl_new;
1415 int status;
1416 ibt_mr_flags_t flags = attrp->mr_flags;
1417
1418 ASSERT(mr_new != NULL);
1419 ASSERT(mr_desc != NULL);
1420
1421 /* Grab the Hermon softstate pointer, mrhdl, and pdhdl */
1422 state = (hermon_state_t *)hca;
1423 mrhdl = (hermon_mrhdl_t)mr;
1424 pdhdl = (hermon_pdhdl_t)pd;
1425
1426 /* Reregister the memory region */
1427 op.mro_bind_type = state->hs_cfg_profile->cp_iommu_bypass;
1428 status = hermon_mr_reregister_buf(state, mrhdl, pdhdl, attrp, buf,
1429 &mrhdl_new, &op);
1430 if (status != DDI_SUCCESS) {
1431 return (status);
1432 }
1433
1434 /* Fill in the mr_desc structure */
1435 mr_desc->md_vaddr = mrhdl_new->mr_bindinfo.bi_addr;
1436 mr_desc->md_lkey = mrhdl_new->mr_lkey;
1437 /* Only set RKey if remote access was requested */
1438 if ((flags & IBT_MR_ENABLE_REMOTE_ATOMIC) ||
1439 (flags & IBT_MR_ENABLE_REMOTE_WRITE) ||
1440 (flags & IBT_MR_ENABLE_REMOTE_READ)) {
1441 mr_desc->md_rkey = mrhdl_new->mr_rkey;
1442 }
1443
1444 /*
1445 * If region is mapped for streaming (i.e. noncoherent), then set
1446 * sync is required
1447 */
1448 mr_desc->md_sync_required = (mrhdl_new->mr_bindinfo.bi_flags &
1449 IBT_MR_NONCOHERENT) ? B_TRUE : B_FALSE;
1450
1451 /* Return the Hermon MR handle */
1452 *mr_new = (ibc_mr_hdl_t)mrhdl_new;
1453
1454 return (IBT_SUCCESS);
1455 }
1456
1457 /*
1458 * hermon_ci_sync_mr()
1459 * Synchronize access to a Memory Region
1460 * Context: Can be called from interrupt or base context.
1461 */
1462 static ibt_status_t
1463 hermon_ci_sync_mr(ibc_hca_hdl_t hca, ibt_mr_sync_t *mr_segs, size_t num_segs)
1464 {
1465 hermon_state_t *state;
1466 int status;
1467
1468 ASSERT(mr_segs != NULL);
1469
1470 /* Grab the Hermon softstate pointer */
1471 state = (hermon_state_t *)hca;
1472
1473 /* Sync the memory region */
1474 status = hermon_mr_sync(state, mr_segs, num_segs);
1475 return (status);
1476 }
1477
1478
1479 /*
1480 * hermon_ci_alloc_mw()
1481 * Allocate a Memory Window
1482 * Context: Can be called from interrupt or base context.
1483 */
1484 static ibt_status_t
1485 hermon_ci_alloc_mw(ibc_hca_hdl_t hca, ibc_pd_hdl_t pd, ibt_mw_flags_t flags,
1486 ibc_mw_hdl_t *mw_p, ibt_rkey_t *rkey_p)
1487 {
1488 hermon_state_t *state;
1489 hermon_pdhdl_t pdhdl;
1490 hermon_mwhdl_t mwhdl;
1491 int status;
1492
1493 ASSERT(mw_p != NULL);
1494 ASSERT(rkey_p != NULL);
1495
1496 /* Grab the Hermon softstate pointer and PD handle */
1497 state = (hermon_state_t *)hca;
1498 pdhdl = (hermon_pdhdl_t)pd;
1499
1500 /* Allocate the memory window */
1501 status = hermon_mw_alloc(state, pdhdl, flags, &mwhdl);
1502 if (status != DDI_SUCCESS) {
1503 return (status);
1504 }
1505
1506 /* Return the MW handle and RKey */
1507 *mw_p = (ibc_mw_hdl_t)mwhdl;
1508 *rkey_p = mwhdl->mr_rkey;
1509
1510 return (IBT_SUCCESS);
1511 }
1512
1513
1514 /*
1515 * hermon_ci_free_mw()
1516 * Free a Memory Window
1517 * Context: Can be called from interrupt or base context.
1518 */
1519 static ibt_status_t
1520 hermon_ci_free_mw(ibc_hca_hdl_t hca, ibc_mw_hdl_t mw)
1521 {
1522 hermon_state_t *state;
1523 hermon_mwhdl_t mwhdl;
1524 int status;
1525
1526 /* Grab the Hermon softstate pointer and MW handle */
1527 state = (hermon_state_t *)hca;
1528 mwhdl = (hermon_mwhdl_t)mw;
1529
1530 /* Free the memory window */
1531 status = hermon_mw_free(state, &mwhdl, HERMON_NOSLEEP);
1532 return (status);
1533 }
1534
1535
1536 /*
1537 * hermon_ci_query_mw()
1538 * Return the attributes of the specified Memory Window
1539 * Context: Can be called from interrupt or base context.
1540 */
1541 /* ARGSUSED */
1542 static ibt_status_t
1543 hermon_ci_query_mw(ibc_hca_hdl_t hca, ibc_mw_hdl_t mw,
1544 ibt_mw_query_attr_t *mw_attr_p)
1545 {
1546 hermon_mwhdl_t mwhdl;
1547
1548 ASSERT(mw_attr_p != NULL);
1549
1550 /* Query the memory window pointer and fill in the return values */
1551 mwhdl = (hermon_mwhdl_t)mw;
1552 mutex_enter(&mwhdl->mr_lock);
1553 mw_attr_p->mw_pd = (ibc_pd_hdl_t)mwhdl->mr_pdhdl;
1554 mw_attr_p->mw_rkey = mwhdl->mr_rkey;
1555 mutex_exit(&mwhdl->mr_lock);
1556
1557 return (IBT_SUCCESS);
1558 }
1559
1560
1561 /*
1562 * hermon_ci_register_dma_mr()
1563 * Allocate a memory region that maps physical addresses.
1564 * Context: Can be called only from user or kernel context.
1565 */
1566 /* ARGSUSED */
1567 static ibt_status_t
1568 hermon_ci_register_dma_mr(ibc_hca_hdl_t hca, ibc_pd_hdl_t pd,
1569 ibt_dmr_attr_t *mr_attr, void *ibtl_reserved, ibc_mr_hdl_t *mr_p,
1570 ibt_mr_desc_t *mr_desc)
1571 {
1572 hermon_state_t *state;
1573 hermon_pdhdl_t pdhdl;
1574 hermon_mrhdl_t mrhdl;
1575 int status;
1576
1577 ASSERT(mr_attr != NULL);
1578 ASSERT(mr_p != NULL);
1579 ASSERT(mr_desc != NULL);
1580
1581 /*
1582 * Validate the access flags. Both Remote Write and Remote Atomic
1583 * require the Local Write flag to be set
1584 */
1585 if (((mr_attr->dmr_flags & IBT_MR_ENABLE_REMOTE_WRITE) ||
1586 (mr_attr->dmr_flags & IBT_MR_ENABLE_REMOTE_ATOMIC)) &&
1587 !(mr_attr->dmr_flags & IBT_MR_ENABLE_LOCAL_WRITE)) {
1588 return (IBT_MR_ACCESS_REQ_INVALID);
1589 }
1590
1591 /* Grab the Hermon softstate pointer and PD handle */
1592 state = (hermon_state_t *)hca;
1593 pdhdl = (hermon_pdhdl_t)pd;
1594
1595 status = hermon_dma_mr_register(state, pdhdl, mr_attr, &mrhdl);
1596 if (status != DDI_SUCCESS) {
1597 return (status);
1598 }
1599
1600 /* Fill in the mr_desc structure */
1601 mr_desc->md_vaddr = mr_attr->dmr_paddr;
1602 mr_desc->md_lkey = mrhdl->mr_lkey;
1603 /* Only set RKey if remote access was requested */
1604 if ((mr_attr->dmr_flags & IBT_MR_ENABLE_REMOTE_ATOMIC) ||
1605 (mr_attr->dmr_flags & IBT_MR_ENABLE_REMOTE_WRITE) ||
1606 (mr_attr->dmr_flags & IBT_MR_ENABLE_REMOTE_READ)) {
1607 mr_desc->md_rkey = mrhdl->mr_rkey;
1608 }
1609
1610 /*
1611 * If region is mapped for streaming (i.e. noncoherent), then set
1612 * sync is required
1613 */
1614 mr_desc->md_sync_required = B_FALSE;
1615
1616 /* Return the Hermon MR handle */
1617 *mr_p = (ibc_mr_hdl_t)mrhdl;
1618
1619 return (IBT_SUCCESS);
1620 }
1621
1622
1623 /*
1624 * hermon_ci_attach_mcg()
1625 * Attach a Queue Pair to a Multicast Group
1626 * Context: Can be called only from user or kernel context.
1627 */
1628 static ibt_status_t
1629 hermon_ci_attach_mcg(ibc_hca_hdl_t hca, ibc_qp_hdl_t qp, ib_gid_t gid,
1630 ib_lid_t lid)
1631 {
1632 hermon_state_t *state;
1633 hermon_qphdl_t qphdl;
1634 int status;
1635
1636 /* Grab the Hermon softstate pointer and QP handles */
1637 state = (hermon_state_t *)hca;
1638 qphdl = (hermon_qphdl_t)qp;
1639
1640 /* Attach the QP to the multicast group */
1641 status = hermon_mcg_attach(state, qphdl, gid, lid);
1642 return (status);
1643 }
1644
1645
1646 /*
1647 * hermon_ci_detach_mcg()
1648 * Detach a Queue Pair to a Multicast Group
1649 * Context: Can be called only from user or kernel context.
1650 */
1651 static ibt_status_t
1652 hermon_ci_detach_mcg(ibc_hca_hdl_t hca, ibc_qp_hdl_t qp, ib_gid_t gid,
1653 ib_lid_t lid)
1654 {
1655 hermon_state_t *state;
1656 hermon_qphdl_t qphdl;
1657 int status;
1658
1659 /* Grab the Hermon softstate pointer and QP handle */
1660 state = (hermon_state_t *)hca;
1661 qphdl = (hermon_qphdl_t)qp;
1662
1663 /* Detach the QP from the multicast group */
1664 status = hermon_mcg_detach(state, qphdl, gid, lid);
1665 return (status);
1666 }
1667
1668
1669 /*
1670 * hermon_ci_post_send()
1671 * Post send work requests to the send queue on the specified QP
1672 * Context: Can be called from interrupt or base context.
1673 */
1674 static ibt_status_t
1675 hermon_ci_post_send(ibc_hca_hdl_t hca, ibc_qp_hdl_t qp, ibt_send_wr_t *wr_p,
1676 uint_t num_wr, uint_t *num_posted_p)
1677 {
1678 hermon_state_t *state;
1679 hermon_qphdl_t qphdl;
1680 int status;
1681
1682 ASSERT(wr_p != NULL);
1683 ASSERT(num_wr != 0);
1684
1685 /* Grab the Hermon softstate pointer and QP handle */
1686 state = (hermon_state_t *)hca;
1687 qphdl = (hermon_qphdl_t)qp;
1688
1689 /* Post the send WQEs */
1690 status = hermon_post_send(state, qphdl, wr_p, num_wr, num_posted_p);
1691 return (status);
1692 }
1693
1694
1695 /*
1696 * hermon_ci_post_recv()
1697 * Post receive work requests to the receive queue on the specified QP
1698 * Context: Can be called from interrupt or base context.
1699 */
1700 static ibt_status_t
1701 hermon_ci_post_recv(ibc_hca_hdl_t hca, ibc_qp_hdl_t qp, ibt_recv_wr_t *wr_p,
1702 uint_t num_wr, uint_t *num_posted_p)
1703 {
1704 hermon_state_t *state;
1705 hermon_qphdl_t qphdl;
1706 int status;
1707
1708 ASSERT(wr_p != NULL);
1709 ASSERT(num_wr != 0);
1710
1711 state = (hermon_state_t *)hca;
1712 qphdl = (hermon_qphdl_t)qp;
1713
1714 /* Post the receive WQEs */
1715 status = hermon_post_recv(state, qphdl, wr_p, num_wr, num_posted_p);
1716 return (status);
1717 }
1718
1719
1720 /*
1721 * hermon_ci_poll_cq()
1722 * Poll for a work request completion
1723 * Context: Can be called from interrupt or base context.
1724 */
1725 static ibt_status_t
1726 hermon_ci_poll_cq(ibc_hca_hdl_t hca, ibc_cq_hdl_t cq, ibt_wc_t *wc_p,
1727 uint_t num_wc, uint_t *num_polled)
1728 {
1729 hermon_state_t *state;
1730 hermon_cqhdl_t cqhdl;
1731 int status;
1732
1733 ASSERT(wc_p != NULL);
1734
1735 /* Check for valid num_wc field */
1736 if (num_wc == 0) {
1737 return (IBT_INVALID_PARAM);
1738 }
1739
1740 /* Grab the Hermon softstate pointer and CQ handle */
1741 state = (hermon_state_t *)hca;
1742 cqhdl = (hermon_cqhdl_t)cq;
1743
1744 /* Poll for work request completions */
1745 status = hermon_cq_poll(state, cqhdl, wc_p, num_wc, num_polled);
1746 return (status);
1747 }
1748
1749
1750 /*
1751 * hermon_ci_notify_cq()
1752 * Enable notification events on the specified CQ
1753 * Context: Can be called from interrupt or base context.
1754 */
1755 static ibt_status_t
1756 hermon_ci_notify_cq(ibc_hca_hdl_t hca, ibc_cq_hdl_t cq_hdl,
1757 ibt_cq_notify_flags_t flags)
1758 {
1759 hermon_state_t *state;
1760 hermon_cqhdl_t cqhdl;
1761 int status;
1762
1763 /* Grab the Hermon softstate pointer and CQ handle */
1764 state = (hermon_state_t *)hca;
1765 cqhdl = (hermon_cqhdl_t)cq_hdl;
1766
1767 /* Enable the CQ notification */
1768 status = hermon_cq_notify(state, cqhdl, flags);
1769 return (status);
1770 }
1771
1772 /*
1773 * hermon_ci_ci_data_in()
1774 * Exchange CI-specific data.
1775 * Context: Can be called only from user or kernel context.
1776 */
1777 static ibt_status_t
1778 hermon_ci_ci_data_in(ibc_hca_hdl_t hca, ibt_ci_data_flags_t flags,
1779 ibt_object_type_t object, void *ibc_object_handle, void *data_p,
1780 size_t data_sz)
1781 {
1782 hermon_state_t *state;
1783 int status;
1784
1785 /* Grab the Hermon softstate pointer */
1786 state = (hermon_state_t *)hca;
1787
1788 /* Get the Hermon userland mapping information */
1789 status = hermon_umap_ci_data_in(state, flags, object,
1790 ibc_object_handle, data_p, data_sz);
1791 return (status);
1792 }
1793
1794 /*
1795 * hermon_ci_ci_data_out()
1796 * Exchange CI-specific data.
1797 * Context: Can be called only from user or kernel context.
1798 */
1799 static ibt_status_t
1800 hermon_ci_ci_data_out(ibc_hca_hdl_t hca, ibt_ci_data_flags_t flags,
1801 ibt_object_type_t object, void *ibc_object_handle, void *data_p,
1802 size_t data_sz)
1803 {
1804 hermon_state_t *state;
1805 int status;
1806
1807 /* Grab the Hermon softstate pointer */
1808 state = (hermon_state_t *)hca;
1809
1810 /* Get the Hermon userland mapping information */
1811 status = hermon_umap_ci_data_out(state, flags, object,
1812 ibc_object_handle, data_p, data_sz);
1813 return (status);
1814 }
1815
1816
1817 /*
1818 * hermon_ci_alloc_srq()
1819 * Allocate a Shared Receive Queue (SRQ)
1820 * Context: Can be called only from user or kernel context
1821 */
1822 static ibt_status_t
1823 hermon_ci_alloc_srq(ibc_hca_hdl_t hca, ibt_srq_flags_t flags,
1824 ibt_srq_hdl_t ibt_srq, ibc_pd_hdl_t pd, ibt_srq_sizes_t *sizes,
1825 ibc_srq_hdl_t *ibc_srq_p, ibt_srq_sizes_t *ret_sizes_p)
1826 {
1827 hermon_state_t *state;
1828 hermon_pdhdl_t pdhdl;
1829 hermon_srqhdl_t srqhdl;
1830 hermon_srq_info_t srqinfo;
1831 int status;
1832
1833 state = (hermon_state_t *)hca;
1834 pdhdl = (hermon_pdhdl_t)pd;
1835
1836 srqinfo.srqi_ibt_srqhdl = ibt_srq;
1837 srqinfo.srqi_pd = pdhdl;
1838 srqinfo.srqi_sizes = sizes;
1839 srqinfo.srqi_real_sizes = ret_sizes_p;
1840 srqinfo.srqi_srqhdl = &srqhdl;
1841 srqinfo.srqi_flags = flags;
1842
1843 status = hermon_srq_alloc(state, &srqinfo, HERMON_NOSLEEP);
1844 if (status != DDI_SUCCESS) {
1845 return (status);
1846 }
1847
1848 *ibc_srq_p = (ibc_srq_hdl_t)srqhdl;
1849
1850 return (IBT_SUCCESS);
1851 }
1852
1853 /*
1854 * hermon_ci_free_srq()
1855 * Free a Shared Receive Queue (SRQ)
1856 * Context: Can be called only from user or kernel context
1857 */
1858 static ibt_status_t
1859 hermon_ci_free_srq(ibc_hca_hdl_t hca, ibc_srq_hdl_t srq)
1860 {
1861 hermon_state_t *state;
1862 hermon_srqhdl_t srqhdl;
1863 int status;
1864
1865 state = (hermon_state_t *)hca;
1866
1867 /* Check for valid SRQ handle pointer */
1868 if (srq == NULL) {
1869 return (IBT_SRQ_HDL_INVALID);
1870 }
1871
1872 srqhdl = (hermon_srqhdl_t)srq;
1873
1874 /* Free the SRQ */
1875 status = hermon_srq_free(state, &srqhdl, HERMON_NOSLEEP);
1876 return (status);
1877 }
1878
1879 /*
1880 * hermon_ci_query_srq()
1881 * Query properties of a Shared Receive Queue (SRQ)
1882 * Context: Can be called from interrupt or base context.
1883 */
1884 /* ARGSUSED */
1885 static ibt_status_t
1886 hermon_ci_query_srq(ibc_hca_hdl_t hca, ibc_srq_hdl_t srq, ibc_pd_hdl_t *pd_p,
1887 ibt_srq_sizes_t *sizes_p, uint_t *limit_p)
1888 {
1889 hermon_srqhdl_t srqhdl;
1890
1891 srqhdl = (hermon_srqhdl_t)srq;
1892
1893 mutex_enter(&srqhdl->srq_lock);
1894 if (srqhdl->srq_state == HERMON_SRQ_STATE_ERROR) {
1895 mutex_exit(&srqhdl->srq_lock);
1896 return (IBT_SRQ_ERROR_STATE);
1897 }
1898
1899 *pd_p = (ibc_pd_hdl_t)srqhdl->srq_pdhdl;
1900 sizes_p->srq_wr_sz = srqhdl->srq_real_sizes.srq_wr_sz - 1;
1901 sizes_p->srq_sgl_sz = srqhdl->srq_real_sizes.srq_sgl_sz;
1902 mutex_exit(&srqhdl->srq_lock);
1903 *limit_p = 0;
1904
1905 return (IBT_SUCCESS);
1906 }
1907
1908 /*
1909 * hermon_ci_modify_srq()
1910 * Modify properties of a Shared Receive Queue (SRQ)
1911 * Context: Can be called from interrupt or base context.
1912 */
1913 /* ARGSUSED */
1914 static ibt_status_t
1915 hermon_ci_modify_srq(ibc_hca_hdl_t hca, ibc_srq_hdl_t srq,
1916 ibt_srq_modify_flags_t flags, uint_t size, uint_t limit, uint_t *ret_size_p)
1917 {
1918 hermon_state_t *state;
1919 hermon_srqhdl_t srqhdl;
1920 uint_t resize_supported, cur_srq_size;
1921 int status;
1922
1923 state = (hermon_state_t *)hca;
1924 srqhdl = (hermon_srqhdl_t)srq;
1925
1926 /*
1927 * Check Error State of SRQ.
1928 * Also, while we are holding the lock we save away the current SRQ
1929 * size for later use.
1930 */
1931 mutex_enter(&srqhdl->srq_lock);
1932 cur_srq_size = srqhdl->srq_wq_bufsz;
1933 if (srqhdl->srq_state == HERMON_SRQ_STATE_ERROR) {
1934 mutex_exit(&srqhdl->srq_lock);
1935 return (IBT_SRQ_ERROR_STATE);
1936 }
1937 mutex_exit(&srqhdl->srq_lock);
1938
1939 /*
1940 * Setting the limit watermark is not currently supported. This is a
1941 * hermon hardware (firmware) limitation. We return NOT_SUPPORTED here,
1942 * and have the limit code commented out for now.
1943 *
1944 * XXX If we enable the limit watermark support, we need to do checks
1945 * and set the 'srq->srq_wr_limit' here, instead of returning not
1946 * supported. The 'hermon_srq_modify' operation below is for resizing
1947 * the SRQ only, the limit work should be done here. If this is
1948 * changed to use the 'limit' field, the 'ARGSUSED' comment for this
1949 * function should also be removed at that time.
1950 */
1951 if (flags & IBT_SRQ_SET_LIMIT) {
1952 return (IBT_NOT_SUPPORTED);
1953 }
1954
1955 /*
1956 * Check the SET_SIZE flag. If not set, we simply return success here.
1957 * However if it is set, we check if resize is supported and only then
1958 * do we continue on with our resize processing.
1959 */
1960 if (!(flags & IBT_SRQ_SET_SIZE)) {
1961 return (IBT_SUCCESS);
1962 }
1963
1964 resize_supported = state->hs_ibtfinfo.hca_attr->hca_flags &
1965 IBT_HCA_RESIZE_SRQ;
1966
1967 if ((flags & IBT_SRQ_SET_SIZE) && !resize_supported) {
1968 return (IBT_NOT_SUPPORTED);
1969 }
1970
1971 /*
1972 * We do not support resizing an SRQ to be smaller than it's current
1973 * size. If a smaller (or equal) size is requested, then we simply
1974 * return success, and do nothing.
1975 */
1976 if (size <= cur_srq_size) {
1977 *ret_size_p = cur_srq_size;
1978 return (IBT_SUCCESS);
1979 }
1980
1981 status = hermon_srq_modify(state, srqhdl, size, ret_size_p,
1982 HERMON_NOSLEEP);
1983 if (status != DDI_SUCCESS) {
1984 /* Set return value to current SRQ size */
1985 *ret_size_p = cur_srq_size;
1986 return (status);
1987 }
1988
1989 return (IBT_SUCCESS);
1990 }
1991
1992 /*
1993 * hermon_ci_post_srq()
1994 * Post a Work Request to the specified Shared Receive Queue (SRQ)
1995 * Context: Can be called from interrupt or base context.
1996 */
1997 static ibt_status_t
1998 hermon_ci_post_srq(ibc_hca_hdl_t hca, ibc_srq_hdl_t srq,
1999 ibt_recv_wr_t *wr, uint_t num_wr, uint_t *num_posted_p)
2000 {
2001 hermon_state_t *state;
2002 hermon_srqhdl_t srqhdl;
2003 int status;
2004
2005 state = (hermon_state_t *)hca;
2006 srqhdl = (hermon_srqhdl_t)srq;
2007
2008 status = hermon_post_srq(state, srqhdl, wr, num_wr, num_posted_p);
2009 return (status);
2010 }
2011
2012 /* Address translation */
2013
2014 struct ibc_ma_s {
2015 int h_ma_addr_list_len;
2016 void *h_ma_addr_list;
2017 ddi_dma_handle_t h_ma_dmahdl;
2018 ddi_dma_handle_t h_ma_list_hdl;
2019 ddi_acc_handle_t h_ma_list_acc_hdl;
2020 size_t h_ma_real_len;
2021 caddr_t h_ma_kaddr;
2022 ibt_phys_addr_t h_ma_list_cookie;
2023 };
2024
2025 static ibt_status_t
2026 hermon_map_mem_area_fmr(ibc_hca_hdl_t hca, ibt_va_attr_t *va_attrs,
2027 uint_t list_len, ibt_pmr_attr_t *pmr, ibc_ma_hdl_t *ma_hdl_p)
2028 {
2029 int status;
2030 ibt_status_t ibt_status;
2031 ibc_ma_hdl_t ma_hdl;
2032 ib_memlen_t len;
2033 ddi_dma_attr_t dma_attr;
2034 uint_t cookie_cnt;
2035 ddi_dma_cookie_t dmacookie;
2036 hermon_state_t *state;
2037 uint64_t *kaddr;
2038 uint64_t addr, endaddr, pagesize;
2039 int i, kmflag;
2040 int (*callback)(caddr_t);
2041
2042 if ((va_attrs->va_flags & IBT_VA_BUF) == 0) {
2043 return (IBT_NOT_SUPPORTED); /* XXX - not yet implemented */
2044 }
2045
2046 state = (hermon_state_t *)hca;
2047 hermon_dma_attr_init(state, &dma_attr);
2048 if (va_attrs->va_flags & IBT_VA_NOSLEEP) {
2049 kmflag = KM_NOSLEEP;
2050 callback = DDI_DMA_DONTWAIT;
2051 } else {
2052 kmflag = KM_SLEEP;
2053 callback = DDI_DMA_SLEEP;
2054 }
2055
2056 ma_hdl = kmem_zalloc(sizeof (*ma_hdl), kmflag);
2057 if (ma_hdl == NULL) {
2058 return (IBT_INSUFF_RESOURCE);
2059 }
2060 #ifdef __sparc
2061 if (state->hs_cfg_profile->cp_iommu_bypass == HERMON_BINDMEM_BYPASS)
2062 dma_attr.dma_attr_flags = DDI_DMA_FORCE_PHYSICAL;
2063
2064 if (hermon_kernel_data_ro == HERMON_RO_ENABLED)
2065 dma_attr.dma_attr_flags |= DDI_DMA_RELAXED_ORDERING;
2066 #endif
2067
2068 status = ddi_dma_alloc_handle(state->hs_dip, &dma_attr,
2069 callback, NULL, &ma_hdl->h_ma_dmahdl);
2070 if (status != DDI_SUCCESS) {
2071 kmem_free(ma_hdl, sizeof (*ma_hdl));
2072 return (IBT_INSUFF_RESOURCE);
2073 }
2074 status = ddi_dma_buf_bind_handle(ma_hdl->h_ma_dmahdl,
2075 va_attrs->va_buf, DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
2076 callback, NULL, &dmacookie, &cookie_cnt);
2077 if (status != DDI_DMA_MAPPED) {
2078 status = ibc_get_ci_failure(0);
2079 goto marea_fail3;
2080 }
2081
2082 ma_hdl->h_ma_real_len = list_len * sizeof (ibt_phys_addr_t);
2083 ma_hdl->h_ma_kaddr = kmem_zalloc(ma_hdl->h_ma_real_len, kmflag);
2084 if (ma_hdl->h_ma_kaddr == NULL) {
2085 ibt_status = IBT_INSUFF_RESOURCE;
2086 goto marea_fail4;
2087 }
2088
2089 i = 0;
2090 len = 0;
2091 pagesize = PAGESIZE;
2092 kaddr = (uint64_t *)(void *)ma_hdl->h_ma_kaddr;
2093 while (cookie_cnt-- > 0) {
2094 addr = dmacookie.dmac_laddress;
2095 len += dmacookie.dmac_size;
2096 endaddr = addr + (dmacookie.dmac_size - 1);
2097 addr = addr & ~(pagesize - 1);
2098 while (addr <= endaddr) {
2099 if (i >= list_len) {
2100 status = IBT_PBL_TOO_SMALL;
2101 goto marea_fail5;
2102 }
2103 kaddr[i] = htonll(addr | HERMON_MTT_ENTRY_PRESENT);
2104 i++;
2105 addr += pagesize;
2106 if (addr == 0) {
2107 static int do_once = 1;
2108 if (do_once) {
2109 do_once = 0;
2110 cmn_err(CE_NOTE, "probable error in "
2111 "dma_cookie address: map_mem_area");
2112 }
2113 break;
2114 }
2115 }
2116 if (cookie_cnt != 0)
2117 ddi_dma_nextcookie(ma_hdl->h_ma_dmahdl, &dmacookie);
2118 }
2119
2120 pmr->pmr_addr_list = (ibt_phys_addr_t *)(void *)ma_hdl->h_ma_kaddr;
2121 pmr->pmr_iova = va_attrs->va_vaddr;
2122 pmr->pmr_len = len;
2123 pmr->pmr_offset = va_attrs->va_vaddr & PAGEOFFSET;
2124 pmr->pmr_buf_sz = PAGESHIFT; /* PRM says "Page Sice", but... */
2125 pmr->pmr_num_buf = i;
2126 pmr->pmr_ma = ma_hdl;
2127
2128 *ma_hdl_p = ma_hdl;
2129 return (IBT_SUCCESS);
2130
2131 marea_fail5:
2132 kmem_free(ma_hdl->h_ma_kaddr, ma_hdl->h_ma_real_len);
2133 marea_fail4:
2134 status = ddi_dma_unbind_handle(ma_hdl->h_ma_dmahdl);
2135 marea_fail3:
2136 ddi_dma_free_handle(&ma_hdl->h_ma_dmahdl);
2137 kmem_free(ma_hdl, sizeof (*ma_hdl));
2138 *ma_hdl_p = NULL;
2139 return (ibt_status);
2140 }
2141
2142 /*
2143 * hermon_ci_map_mem_area()
2144 * Context: Can be called from user or base context.
2145 *
2146 * Creates the memory mapping suitable for a subsequent posting of an
2147 * FRWR work request. All the info about the memory area for the
2148 * FRWR work request (wr member of "union ibt_reg_req_u") is filled
2149 * such that the client only needs to point wr.rc.rcwr.reg_pmr to it,
2150 * and then fill in the additional information only it knows.
2151 *
2152 * Alternatively, creates the memory mapping for FMR.
2153 */
2154 /* ARGSUSED */
2155 static ibt_status_t
2156 hermon_ci_map_mem_area(ibc_hca_hdl_t hca, ibt_va_attr_t *va_attrs,
2157 void *ibtl_reserved, uint_t list_len, ibt_reg_req_t *reg_req,
2158 ibc_ma_hdl_t *ma_hdl_p)
2159 {
2160 ibt_status_t ibt_status;
2161 int status;
2162 ibc_ma_hdl_t ma_hdl;
2163 ibt_wr_reg_pmr_t *pmr;
2164 ib_memlen_t len;
2165 ddi_dma_attr_t dma_attr;
2166 ddi_dma_handle_t khdl;
2167 uint_t cookie_cnt;
2168 ddi_dma_cookie_t dmacookie, kcookie;
2169 hermon_state_t *state;
2170 uint64_t *kaddr;
2171 uint64_t addr, endaddr, pagesize, kcookie_paddr;
2172 int i, j, kmflag;
2173 int (*callback)(caddr_t);
2174
2175 if (va_attrs->va_flags & (IBT_VA_FMR | IBT_VA_REG_FN)) {
2176 /* delegate FMR and Physical Register to other function */
2177 return (hermon_map_mem_area_fmr(hca, va_attrs, list_len,
2178 ®_req->fn_arg, ma_hdl_p));
2179 }
2180
2181 /* FRWR */
2182
2183 state = (hermon_state_t *)hca;
2184 if (!(state->hs_ibtfinfo.hca_attr->hca_flags2 & IBT_HCA2_MEM_MGT_EXT))
2185 return (IBT_NOT_SUPPORTED);
2186 hermon_dma_attr_init(state, &dma_attr);
2187 #ifdef __sparc
2188 if (state->hs_cfg_profile->cp_iommu_bypass == HERMON_BINDMEM_BYPASS)
2189 dma_attr.dma_attr_flags = DDI_DMA_FORCE_PHYSICAL;
2190
2191 if (hermon_kernel_data_ro == HERMON_RO_ENABLED)
2192 dma_attr.dma_attr_flags |= DDI_DMA_RELAXED_ORDERING;
2193 #endif
2194 if (va_attrs->va_flags & IBT_VA_NOSLEEP) {
2195 kmflag = KM_NOSLEEP;
2196 callback = DDI_DMA_DONTWAIT;
2197 } else {
2198 kmflag = KM_SLEEP;
2199 callback = DDI_DMA_SLEEP;
2200 }
2201
2202 ma_hdl = kmem_zalloc(sizeof (*ma_hdl), kmflag);
2203 if (ma_hdl == NULL) {
2204 return (IBT_INSUFF_RESOURCE);
2205 }
2206
2207 status = ddi_dma_alloc_handle(state->hs_dip, &dma_attr,
2208 callback, NULL, &ma_hdl->h_ma_dmahdl);
2209 if (status != DDI_SUCCESS) {
2210 ibt_status = IBT_INSUFF_RESOURCE;
2211 goto marea_fail0;
2212 }
2213 dma_attr.dma_attr_align = 64; /* as per PRM */
2214 status = ddi_dma_alloc_handle(state->hs_dip, &dma_attr,
2215 callback, NULL, &ma_hdl->h_ma_list_hdl);
2216 if (status != DDI_SUCCESS) {
2217 ibt_status = IBT_INSUFF_RESOURCE;
2218 goto marea_fail1;
2219 }
2220 /*
2221 * Entries in the list in the last slot on each page cannot be used,
2222 * so 1 extra ibt_phys_addr_t is allocated per page. We add 1 more
2223 * to deal with the possibility of a less than 1 page allocation
2224 * across a page boundary.
2225 */
2226 status = ddi_dma_mem_alloc(ma_hdl->h_ma_list_hdl, (list_len + 1 +
2227 list_len / (HERMON_PAGESIZE / sizeof (ibt_phys_addr_t))) *
2228 sizeof (ibt_phys_addr_t),
2229 &state->hs_reg_accattr, DDI_DMA_CONSISTENT, callback, NULL,
2230 &ma_hdl->h_ma_kaddr, &ma_hdl->h_ma_real_len,
2231 &ma_hdl->h_ma_list_acc_hdl);
2232 if (status != DDI_SUCCESS) {
2233 ibt_status = IBT_INSUFF_RESOURCE;
2234 goto marea_fail2;
2235 }
2236 status = ddi_dma_addr_bind_handle(ma_hdl->h_ma_list_hdl, NULL,
2237 ma_hdl->h_ma_kaddr, ma_hdl->h_ma_real_len, DDI_DMA_RDWR |
2238 DDI_DMA_CONSISTENT, callback, NULL,
2239 &kcookie, &cookie_cnt);
2240 if (status != DDI_SUCCESS) {
2241 ibt_status = IBT_INSUFF_RESOURCE;
2242 goto marea_fail3;
2243 }
2244 if ((kcookie.dmac_laddress & 0x3f) != 0) {
2245 cmn_err(CE_NOTE, "64-byte alignment assumption wrong");
2246 ibt_status = ibc_get_ci_failure(0);
2247 goto marea_fail4;
2248 }
2249 ma_hdl->h_ma_list_cookie.p_laddr = kcookie.dmac_laddress;
2250
2251 if (va_attrs->va_flags & IBT_VA_BUF) {
2252 status = ddi_dma_buf_bind_handle(ma_hdl->h_ma_dmahdl,
2253 va_attrs->va_buf, DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
2254 callback, NULL, &dmacookie, &cookie_cnt);
2255 } else {
2256 status = ddi_dma_addr_bind_handle(ma_hdl->h_ma_dmahdl,
2257 va_attrs->va_as, (caddr_t)(uintptr_t)va_attrs->va_vaddr,
2258 va_attrs->va_len, DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
2259 callback, NULL, &dmacookie, &cookie_cnt);
2260 }
2261 if (status != DDI_DMA_MAPPED) {
2262 ibt_status = ibc_get_ci_failure(0);
2263 goto marea_fail4;
2264 }
2265 i = 0; /* count the number of pbl entries */
2266 j = 0; /* count the number of links to next HERMON_PAGE */
2267 len = 0;
2268 pagesize = PAGESIZE;
2269 kaddr = (uint64_t *)(void *)ma_hdl->h_ma_kaddr;
2270 kcookie.dmac_size += kcookie.dmac_laddress & HERMON_PAGEOFFSET;
2271 kcookie_paddr = kcookie.dmac_laddress & HERMON_PAGEMASK;
2272 khdl = ma_hdl->h_ma_list_hdl;
2273 while (cookie_cnt-- > 0) {
2274 addr = dmacookie.dmac_laddress;
2275 len += dmacookie.dmac_size;
2276 endaddr = addr + (dmacookie.dmac_size - 1);
2277 addr = addr & ~(pagesize - 1);
2278 while (addr <= endaddr) {
2279 if (i >= list_len) {
2280 ibt_status = IBT_PBL_TOO_SMALL;
2281 goto marea_fail5;
2282 }
2283 /* Deal with last entry on page. */
2284 if (!((uintptr_t)&kaddr[i+j+1] & HERMON_PAGEOFFSET)) {
2285 if (kcookie.dmac_size > HERMON_PAGESIZE) {
2286 kcookie_paddr += HERMON_PAGESIZE;
2287 kcookie.dmac_size -= HERMON_PAGESIZE;
2288 } else {
2289 ddi_dma_nextcookie(khdl, &kcookie);
2290 kcookie_paddr = kcookie.dmac_laddress;
2291 }
2292 kaddr[i+j] = htonll(kcookie_paddr);
2293 j++;
2294 }
2295 kaddr[i+j] = htonll(addr | HERMON_MTT_ENTRY_PRESENT);
2296 i++;
2297 addr += pagesize;
2298 if (addr == 0) {
2299 static int do_once = 1;
2300 if (do_once) {
2301 do_once = 0;
2302 cmn_err(CE_NOTE, "probable error in "
2303 "dma_cookie address: map_mem_area");
2304 }
2305 break;
2306 }
2307 }
2308 if (cookie_cnt != 0)
2309 ddi_dma_nextcookie(ma_hdl->h_ma_dmahdl, &dmacookie);
2310 }
2311
2312 pmr = ®_req->wr;
2313 pmr->pmr_len = len;
2314 pmr->pmr_offset = va_attrs->va_vaddr & PAGEOFFSET;
2315 pmr->pmr_buf_sz = PAGESHIFT; /* PRM says "Page Size", but... */
2316 pmr->pmr_num_buf = i;
2317 pmr->pmr_addr_list = &ma_hdl->h_ma_list_cookie;
2318
2319 *ma_hdl_p = ma_hdl;
2320 return (IBT_SUCCESS);
2321
2322 marea_fail5:
2323 status = ddi_dma_unbind_handle(ma_hdl->h_ma_dmahdl);
2324 if (status != DDI_SUCCESS)
2325 HERMON_WARNING(state, "failed to unbind DMA mapping");
2326 marea_fail4:
2327 status = ddi_dma_unbind_handle(ma_hdl->h_ma_list_hdl);
2328 if (status != DDI_SUCCESS)
2329 HERMON_WARNING(state, "failed to unbind DMA mapping");
2330 marea_fail3:
2331 ddi_dma_mem_free(&ma_hdl->h_ma_list_acc_hdl);
2332 marea_fail2:
2333 ddi_dma_free_handle(&ma_hdl->h_ma_list_hdl);
2334 marea_fail1:
2335 ddi_dma_free_handle(&ma_hdl->h_ma_dmahdl);
2336 marea_fail0:
2337 kmem_free(ma_hdl, sizeof (*ma_hdl));
2338 *ma_hdl_p = NULL;
2339 return (ibt_status);
2340 }
2341
2342 /*
2343 * hermon_ci_unmap_mem_area()
2344 * Unmap the memory area
2345 * Context: Can be called from interrupt or base context.
2346 */
2347 /* ARGSUSED */
2348 static ibt_status_t
2349 hermon_ci_unmap_mem_area(ibc_hca_hdl_t hca, ibc_ma_hdl_t ma_hdl)
2350 {
2351 int status;
2352 hermon_state_t *state;
2353
2354 if (ma_hdl == NULL) {
2355 return (IBT_MA_HDL_INVALID);
2356 }
2357 state = (hermon_state_t *)hca;
2358 if (ma_hdl->h_ma_list_hdl != NULL) {
2359 status = ddi_dma_unbind_handle(ma_hdl->h_ma_list_hdl);
2360 if (status != DDI_SUCCESS)
2361 HERMON_WARNING(state, "failed to unbind DMA mapping");
2362 ddi_dma_mem_free(&ma_hdl->h_ma_list_acc_hdl);
2363 ddi_dma_free_handle(&ma_hdl->h_ma_list_hdl);
2364 } else {
2365 kmem_free(ma_hdl->h_ma_kaddr, ma_hdl->h_ma_real_len);
2366 }
2367 status = ddi_dma_unbind_handle(ma_hdl->h_ma_dmahdl);
2368 if (status != DDI_SUCCESS)
2369 HERMON_WARNING(state, "failed to unbind DMA mapping");
2370 ddi_dma_free_handle(&ma_hdl->h_ma_dmahdl);
2371 kmem_free(ma_hdl, sizeof (*ma_hdl));
2372 return (IBT_SUCCESS);
2373 }
2374
2375 struct ibc_mi_s {
2376 int imh_len;
2377 ddi_dma_handle_t imh_dmahandle[1];
2378 };
2379
2380 /*
2381 * hermon_ci_map_mem_iov()
2382 * Map the memory
2383 * Context: Can be called from interrupt or base context.
2384 */
2385 /* ARGSUSED */
2386 static ibt_status_t
2387 hermon_ci_map_mem_iov(ibc_hca_hdl_t hca, ibt_iov_attr_t *iov_attr,
2388 ibt_all_wr_t *wr, ibc_mi_hdl_t *mi_hdl_p)
2389 {
2390 int status;
2391 int i, j, nds, max_nds;
2392 uint_t len;
2393 ibt_status_t ibt_status;
2394 ddi_dma_handle_t dmahdl;
2395 ddi_dma_cookie_t dmacookie;
2396 ddi_dma_attr_t dma_attr;
2397 uint_t cookie_cnt;
2398 ibc_mi_hdl_t mi_hdl;
2399 ibt_lkey_t rsvd_lkey;
2400 ibt_wr_ds_t *sgl;
2401 hermon_state_t *state;
2402 int kmflag;
2403 int (*callback)(caddr_t);
2404
2405 state = (hermon_state_t *)hca;
2406 hermon_dma_attr_init(state, &dma_attr);
2407 #ifdef __sparc
2408 if (state->hs_cfg_profile->cp_iommu_bypass == HERMON_BINDMEM_BYPASS)
2409 dma_attr.dma_attr_flags = DDI_DMA_FORCE_PHYSICAL;
2410
2411 if (hermon_kernel_data_ro == HERMON_RO_ENABLED)
2412 dma_attr.dma_attr_flags |= DDI_DMA_RELAXED_ORDERING;
2413 #endif
2414
2415 nds = 0;
2416 max_nds = iov_attr->iov_wr_nds;
2417 if (iov_attr->iov_lso_hdr_sz)
2418 max_nds -= (iov_attr->iov_lso_hdr_sz + sizeof (uint32_t) +
2419 0xf) >> 4; /* 0xf is for rounding up to a multiple of 16 */
2420 rsvd_lkey = (iov_attr->iov_flags & IBT_IOV_ALT_LKEY) ?
2421 iov_attr->iov_alt_lkey : state->hs_devlim.rsv_lkey;
2422 if ((iov_attr->iov_flags & IBT_IOV_NOSLEEP) == 0) {
2423 kmflag = KM_SLEEP;
2424 callback = DDI_DMA_SLEEP;
2425 } else {
2426 kmflag = KM_NOSLEEP;
2427 callback = DDI_DMA_DONTWAIT;
2428 }
2429
2430 if (iov_attr->iov_flags & IBT_IOV_BUF) {
2431 mi_hdl = kmem_alloc(sizeof (*mi_hdl), kmflag);
2432 if (mi_hdl == NULL)
2433 return (IBT_INSUFF_RESOURCE);
2434 sgl = wr->send.wr_sgl;
2435 status = ddi_dma_alloc_handle(state->hs_dip, &dma_attr,
2436 callback, NULL, &dmahdl);
2437 if (status != DDI_SUCCESS) {
2438 kmem_free(mi_hdl, sizeof (*mi_hdl));
2439 return (IBT_INSUFF_RESOURCE);
2440 }
2441 status = ddi_dma_buf_bind_handle(dmahdl, iov_attr->iov_buf,
2442 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, callback, NULL,
2443 &dmacookie, &cookie_cnt);
2444 if (status != DDI_DMA_MAPPED) {
2445 ddi_dma_free_handle(&dmahdl);
2446 kmem_free(mi_hdl, sizeof (*mi_hdl));
2447 return (ibc_get_ci_failure(0));
2448 }
2449 while (cookie_cnt-- > 0) {
2450 if (nds > max_nds) {
2451 status = ddi_dma_unbind_handle(dmahdl);
2452 if (status != DDI_SUCCESS)
2453 HERMON_WARNING(state, "failed to "
2454 "unbind DMA mapping");
2455 ddi_dma_free_handle(&dmahdl);
2456 return (IBT_SGL_TOO_SMALL);
2457 }
2458 sgl[nds].ds_va = dmacookie.dmac_laddress;
2459 sgl[nds].ds_key = rsvd_lkey;
2460 sgl[nds].ds_len = (ib_msglen_t)dmacookie.dmac_size;
2461 nds++;
2462 if (cookie_cnt != 0)
2463 ddi_dma_nextcookie(dmahdl, &dmacookie);
2464 }
2465 wr->send.wr_nds = nds;
2466 mi_hdl->imh_len = 1;
2467 mi_hdl->imh_dmahandle[0] = dmahdl;
2468 *mi_hdl_p = mi_hdl;
2469 return (IBT_SUCCESS);
2470 }
2471
2472 if (iov_attr->iov_flags & IBT_IOV_RECV)
2473 sgl = wr->recv.wr_sgl;
2474 else
2475 sgl = wr->send.wr_sgl;
2476
2477 len = iov_attr->iov_list_len;
2478 for (i = 0, j = 0; j < len; j++) {
2479 if (iov_attr->iov[j].iov_len == 0)
2480 continue;
2481 i++;
2482 }
2483 mi_hdl = kmem_alloc(sizeof (*mi_hdl) +
2484 (i - 1) * sizeof (ddi_dma_handle_t), kmflag);
2485 if (mi_hdl == NULL)
2486 return (IBT_INSUFF_RESOURCE);
2487 mi_hdl->imh_len = i;
2488 for (i = 0, j = 0; j < len; j++) {
2489 if (iov_attr->iov[j].iov_len == 0)
2490 continue;
2491 status = ddi_dma_alloc_handle(state->hs_dip, &dma_attr,
2492 callback, NULL, &dmahdl);
2493 if (status != DDI_SUCCESS) {
2494 ibt_status = IBT_INSUFF_RESOURCE;
2495 goto fail2;
2496 }
2497 status = ddi_dma_addr_bind_handle(dmahdl, iov_attr->iov_as,
2498 iov_attr->iov[j].iov_addr, iov_attr->iov[j].iov_len,
2499 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, callback, NULL,
2500 &dmacookie, &cookie_cnt);
2501 if (status != DDI_DMA_MAPPED) {
2502 ibt_status = ibc_get_ci_failure(0);
2503 goto fail1;
2504 }
2505 if (nds + cookie_cnt > max_nds) {
2506 ibt_status = IBT_SGL_TOO_SMALL;
2507 goto fail2;
2508 }
2509 while (cookie_cnt-- > 0) {
2510 sgl[nds].ds_va = dmacookie.dmac_laddress;
2511 sgl[nds].ds_key = rsvd_lkey;
2512 sgl[nds].ds_len = (ib_msglen_t)dmacookie.dmac_size;
2513 nds++;
2514 if (cookie_cnt != 0)
2515 ddi_dma_nextcookie(dmahdl, &dmacookie);
2516 }
2517 mi_hdl->imh_dmahandle[i] = dmahdl;
2518 i++;
2519 }
2520
2521 if (iov_attr->iov_flags & IBT_IOV_RECV)
2522 wr->recv.wr_nds = nds;
2523 else
2524 wr->send.wr_nds = nds;
2525 *mi_hdl_p = mi_hdl;
2526 return (IBT_SUCCESS);
2527
2528 fail1:
2529 ddi_dma_free_handle(&dmahdl);
2530 fail2:
2531 while (--i >= 0) {
2532 status = ddi_dma_unbind_handle(mi_hdl->imh_dmahandle[i]);
2533 if (status != DDI_SUCCESS)
2534 HERMON_WARNING(state, "failed to unbind DMA mapping");
2535 ddi_dma_free_handle(&mi_hdl->imh_dmahandle[i]);
2536 }
2537 kmem_free(mi_hdl, sizeof (*mi_hdl) +
2538 (len - 1) * sizeof (ddi_dma_handle_t));
2539 *mi_hdl_p = NULL;
2540 return (ibt_status);
2541 }
2542
2543 /*
2544 * hermon_ci_unmap_mem_iov()
2545 * Unmap the memory
2546 * Context: Can be called from interrupt or base context.
2547 */
2548 static ibt_status_t
2549 hermon_ci_unmap_mem_iov(ibc_hca_hdl_t hca, ibc_mi_hdl_t mi_hdl)
2550 {
2551 int status, i;
2552 hermon_state_t *state;
2553
2554 state = (hermon_state_t *)hca;
2555
2556 for (i = mi_hdl->imh_len; --i >= 0; ) {
2557 status = ddi_dma_unbind_handle(mi_hdl->imh_dmahandle[i]);
2558 if (status != DDI_SUCCESS)
2559 HERMON_WARNING(state, "failed to unbind DMA mapping");
2560 ddi_dma_free_handle(&mi_hdl->imh_dmahandle[i]);
2561 }
2562 kmem_free(mi_hdl, sizeof (*mi_hdl) +
2563 (mi_hdl->imh_len - 1) * sizeof (ddi_dma_handle_t));
2564 return (IBT_SUCCESS);
2565 }
2566
2567 /*
2568 * hermon_ci_alloc_lkey()
2569 * Allocate an empty memory region for use with FRWR.
2570 * Context: Can be called from user or base context.
2571 */
2572 /* ARGSUSED */
2573 static ibt_status_t
2574 hermon_ci_alloc_lkey(ibc_hca_hdl_t hca, ibc_pd_hdl_t pd,
2575 ibt_lkey_flags_t flags, uint_t list_sz, ibc_mr_hdl_t *mr_p,
2576 ibt_pmr_desc_t *mem_desc_p)
2577 {
2578 hermon_state_t *state;
2579 hermon_pdhdl_t pdhdl;
2580 hermon_mrhdl_t mrhdl;
2581 int status;
2582
2583 ASSERT(mr_p != NULL);
2584 ASSERT(mem_desc_p != NULL);
2585
2586 state = (hermon_state_t *)hca;
2587 pdhdl = (hermon_pdhdl_t)pd;
2588
2589 if (!(state->hs_ibtfinfo.hca_attr->hca_flags2 & IBT_HCA2_MEM_MGT_EXT))
2590 return (IBT_NOT_SUPPORTED);
2591
2592 status = hermon_mr_alloc_lkey(state, pdhdl, flags, list_sz, &mrhdl);
2593 if (status != DDI_SUCCESS) {
2594 return (status);
2595 }
2596
2597 /* Fill in the mem_desc_p structure */
2598 mem_desc_p->pmd_iova = 0;
2599 mem_desc_p->pmd_phys_buf_list_sz = list_sz;
2600 mem_desc_p->pmd_lkey = mrhdl->mr_lkey;
2601 /* Only set RKey if remote access was requested */
2602 if (flags & IBT_KEY_REMOTE) {
2603 mem_desc_p->pmd_rkey = mrhdl->mr_rkey;
2604 }
2605 mem_desc_p->pmd_sync_required = B_FALSE;
2606
2607 /* Return the Hermon MR handle */
2608 *mr_p = (ibc_mr_hdl_t)mrhdl;
2609 return (IBT_SUCCESS);
2610 }
2611
2612 /* Physical Register Memory Region */
2613 /*
2614 * hermon_ci_register_physical_mr()
2615 */
2616 /* ARGSUSED */
2617 static ibt_status_t
2618 hermon_ci_register_physical_mr(ibc_hca_hdl_t hca, ibc_pd_hdl_t pd,
2619 ibt_pmr_attr_t *mem_pattrs, void *ibtl_reserved, ibc_mr_hdl_t *mr_p,
2620 ibt_pmr_desc_t *mem_desc_p)
2621 {
2622 return (IBT_NOT_SUPPORTED);
2623 }
2624
2625 /*
2626 * hermon_ci_reregister_physical_mr()
2627 */
2628 /* ARGSUSED */
2629 static ibt_status_t
2630 hermon_ci_reregister_physical_mr(ibc_hca_hdl_t hca, ibc_mr_hdl_t mr,
2631 ibc_pd_hdl_t pd, ibt_pmr_attr_t *mem_pattrs, void *ibtl_reserved,
2632 ibc_mr_hdl_t *mr_p, ibt_pmr_desc_t *mr_desc_p)
2633 {
2634 return (IBT_NOT_SUPPORTED);
2635 }
2636
2637 /* Mellanox FMR Support */
2638 /*
2639 * hermon_ci_create_fmr_pool()
2640 * Creates a pool of memory regions suitable for FMR registration
2641 * Context: Can be called from base context only
2642 */
2643 static ibt_status_t
2644 hermon_ci_create_fmr_pool(ibc_hca_hdl_t hca, ibc_pd_hdl_t pd,
2645 ibt_fmr_pool_attr_t *params, ibc_fmr_pool_hdl_t *fmr_pool_p)
2646 {
2647 hermon_state_t *state;
2648 hermon_pdhdl_t pdhdl;
2649 hermon_fmrhdl_t fmrpoolhdl;
2650 int status;
2651
2652 state = (hermon_state_t *)hca;
2653
2654 /* Check for valid PD handle pointer */
2655 if (pd == NULL) {
2656 return (IBT_PD_HDL_INVALID);
2657 }
2658
2659 pdhdl = (hermon_pdhdl_t)pd;
2660
2661 /*
2662 * Validate the access flags. Both Remote Write and Remote Atomic
2663 * require the Local Write flag to be set
2664 */
2665 if (((params->fmr_flags & IBT_MR_ENABLE_REMOTE_WRITE) ||
2666 (params->fmr_flags & IBT_MR_ENABLE_REMOTE_ATOMIC)) &&
2667 !(params->fmr_flags & IBT_MR_ENABLE_LOCAL_WRITE)) {
2668 return (IBT_MR_ACCESS_REQ_INVALID);
2669 }
2670
2671 status = hermon_create_fmr_pool(state, pdhdl, params, &fmrpoolhdl);
2672 if (status != DDI_SUCCESS) {
2673 return (status);
2674 }
2675
2676 /* Set fmr_pool from hermon handle */
2677 *fmr_pool_p = (ibc_fmr_pool_hdl_t)fmrpoolhdl;
2678
2679 return (IBT_SUCCESS);
2680 }
2681
2682 /*
2683 * hermon_ci_destroy_fmr_pool()
2684 * Free all resources associated with an FMR pool.
2685 * Context: Can be called from base context only.
2686 */
2687 static ibt_status_t
2688 hermon_ci_destroy_fmr_pool(ibc_hca_hdl_t hca, ibc_fmr_pool_hdl_t fmr_pool)
2689 {
2690 hermon_state_t *state;
2691 hermon_fmrhdl_t fmrpoolhdl;
2692 int status;
2693
2694 state = (hermon_state_t *)hca;
2695 fmrpoolhdl = (hermon_fmrhdl_t)fmr_pool;
2696
2697 status = hermon_destroy_fmr_pool(state, fmrpoolhdl);
2698 return (status);
2699 }
2700
2701 /*
2702 * hermon_ci_flush_fmr_pool()
2703 * Force a flush of the memory tables, cleaning up used FMR resources.
2704 * Context: Can be called from interrupt or base context.
2705 */
2706 static ibt_status_t
2707 hermon_ci_flush_fmr_pool(ibc_hca_hdl_t hca, ibc_fmr_pool_hdl_t fmr_pool)
2708 {
2709 hermon_state_t *state;
2710 hermon_fmrhdl_t fmrpoolhdl;
2711 int status;
2712
2713 state = (hermon_state_t *)hca;
2714
2715 fmrpoolhdl = (hermon_fmrhdl_t)fmr_pool;
2716 status = hermon_flush_fmr_pool(state, fmrpoolhdl);
2717 return (status);
2718 }
2719
2720 /*
2721 * hermon_ci_register_physical_fmr()
2722 * From the 'pool' of FMR regions passed in, performs register physical
2723 * operation.
2724 * Context: Can be called from interrupt or base context.
2725 */
2726 /* ARGSUSED */
2727 static ibt_status_t
2728 hermon_ci_register_physical_fmr(ibc_hca_hdl_t hca,
2729 ibc_fmr_pool_hdl_t fmr_pool, ibt_pmr_attr_t *mem_pattr,
2730 void *ibtl_reserved, ibc_mr_hdl_t *mr_p, ibt_pmr_desc_t *mem_desc_p)
2731 {
2732 hermon_state_t *state;
2733 hermon_mrhdl_t mrhdl;
2734 hermon_fmrhdl_t fmrpoolhdl;
2735 int status;
2736
2737 ASSERT(mem_pattr != NULL);
2738 ASSERT(mr_p != NULL);
2739 ASSERT(mem_desc_p != NULL);
2740
2741 /* Grab the Hermon softstate pointer */
2742 state = (hermon_state_t *)hca;
2743
2744 fmrpoolhdl = (hermon_fmrhdl_t)fmr_pool;
2745
2746 status = hermon_register_physical_fmr(state, fmrpoolhdl, mem_pattr,
2747 &mrhdl, mem_desc_p);
2748 if (status != DDI_SUCCESS) {
2749 return (status);
2750 }
2751
2752 /*
2753 * If region is mapped for streaming (i.e. noncoherent), then set
2754 * sync is required
2755 */
2756 mem_desc_p->pmd_sync_required = (mrhdl->mr_bindinfo.bi_flags &
2757 IBT_MR_NONCOHERENT) ? B_TRUE : B_FALSE;
2758 if (mem_desc_p->pmd_sync_required == B_TRUE) {
2759 /* Fill in DMA handle for future sync operations */
2760 mrhdl->mr_bindinfo.bi_dmahdl =
2761 (ddi_dma_handle_t)mem_pattr->pmr_ma;
2762 }
2763
2764 /* Return the Hermon MR handle */
2765 *mr_p = (ibc_mr_hdl_t)mrhdl;
2766
2767 return (IBT_SUCCESS);
2768 }
2769
2770 /*
2771 * hermon_ci_deregister_fmr()
2772 * Moves an FMR (specified by 'mr') to the deregistered state.
2773 * Context: Can be called from base context only.
2774 */
2775 static ibt_status_t
2776 hermon_ci_deregister_fmr(ibc_hca_hdl_t hca, ibc_mr_hdl_t mr)
2777 {
2778 hermon_state_t *state;
2779 hermon_mrhdl_t mrhdl;
2780 int status;
2781
2782 /* Grab the Hermon softstate pointer */
2783 state = (hermon_state_t *)hca;
2784 mrhdl = (hermon_mrhdl_t)mr;
2785
2786 /*
2787 * Deregister the memory region, either "unmap" the FMR or deregister
2788 * the normal memory region.
2789 */
2790 status = hermon_deregister_fmr(state, mrhdl);
2791 return (status);
2792 }
2793
2794 static int
2795 hermon_mem_alloc(hermon_state_t *state, size_t size, ibt_mr_flags_t flags,
2796 caddr_t *kaddrp, ibc_mem_alloc_hdl_t *mem_hdl)
2797 {
2798 ddi_dma_handle_t dma_hdl;
2799 ddi_dma_attr_t dma_attr;
2800 ddi_acc_handle_t acc_hdl;
2801 size_t real_len;
2802 int status;
2803 int (*ddi_cb)(caddr_t);
2804 ibc_mem_alloc_hdl_t mem_alloc_hdl;
2805
2806 hermon_dma_attr_init(state, &dma_attr);
2807
2808 ddi_cb = (flags & IBT_MR_NOSLEEP) ? DDI_DMA_DONTWAIT : DDI_DMA_SLEEP;
2809
2810 /* Allocate a DMA handle */
2811 status = ddi_dma_alloc_handle(state->hs_dip, &dma_attr, ddi_cb,
2812 NULL, &dma_hdl);
2813 if (status != DDI_SUCCESS) {
2814 return (DDI_FAILURE);
2815 }
2816
2817 /* Allocate DMA memory */
2818 status = ddi_dma_mem_alloc(dma_hdl, size,
2819 &state->hs_reg_accattr, DDI_DMA_CONSISTENT, ddi_cb,
2820 NULL, kaddrp, &real_len, &acc_hdl);
2821 if (status != DDI_SUCCESS) {
2822 ddi_dma_free_handle(&dma_hdl);
2823 return (DDI_FAILURE);
2824 }
2825
2826 /* Package the hermon_dma_info contents and return */
2827 mem_alloc_hdl = kmem_alloc(sizeof (**mem_hdl),
2828 (flags & IBT_MR_NOSLEEP) ? KM_NOSLEEP : KM_SLEEP);
2829 if (mem_alloc_hdl == NULL) {
2830 ddi_dma_mem_free(&acc_hdl);
2831 ddi_dma_free_handle(&dma_hdl);
2832 return (DDI_FAILURE);
2833 }
2834 mem_alloc_hdl->ibc_dma_hdl = dma_hdl;
2835 mem_alloc_hdl->ibc_acc_hdl = acc_hdl;
2836
2837 *mem_hdl = mem_alloc_hdl;
2838
2839 return (DDI_SUCCESS);
2840 }
2841
2842 /*
2843 * hermon_ci_alloc_io_mem()
2844 * Allocate dma-able memory
2845 *
2846 */
2847 static ibt_status_t
2848 hermon_ci_alloc_io_mem(ibc_hca_hdl_t hca, size_t size, ibt_mr_flags_t mr_flag,
2849 caddr_t *kaddrp, ibc_mem_alloc_hdl_t *mem_alloc_hdl_p)
2850 {
2851 hermon_state_t *state;
2852 int status;
2853
2854 /* Grab the Hermon softstate pointer and mem handle */
2855 state = (hermon_state_t *)hca;
2856
2857 /* Allocate the memory and handles */
2858 status = hermon_mem_alloc(state, size, mr_flag, kaddrp,
2859 mem_alloc_hdl_p);
2860
2861 if (status != DDI_SUCCESS) {
2862 *mem_alloc_hdl_p = NULL;
2863 *kaddrp = NULL;
2864 return (status);
2865 }
2866
2867 return (IBT_SUCCESS);
2868 }
2869
2870
2871 /*
2872 * hermon_ci_free_io_mem()
2873 * Unbind handl and free the memory
2874 */
2875 /* ARGSUSED */
2876 static ibt_status_t
2877 hermon_ci_free_io_mem(ibc_hca_hdl_t hca, ibc_mem_alloc_hdl_t mem_alloc_hdl)
2878 {
2879 /* Unbind the handles and free the memory */
2880 (void) ddi_dma_unbind_handle(mem_alloc_hdl->ibc_dma_hdl);
2881 ddi_dma_mem_free(&mem_alloc_hdl->ibc_acc_hdl);
2882 ddi_dma_free_handle(&mem_alloc_hdl->ibc_dma_hdl);
2883 kmem_free(mem_alloc_hdl, sizeof (*mem_alloc_hdl));
2884
2885 return (IBT_SUCCESS);
2886 }