Print this page
8368 remove warlock leftovers from usr/src/uts
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/io/ib/adapters/hermon/hermon_ci.c
+++ new/usr/src/uts/common/io/ib/adapters/hermon/hermon_ci.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21
22 22 /*
23 23 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
24 24 */
25 25
26 26 /*
27 27 * hermon_ci.c
28 28 * Hermon Channel Interface (CI) Routines
29 29 *
30 30 * Implements all the routines necessary to interface with the IBTF.
31 31 * Pointers to all of these functions are passed to the IBTF at attach()
32 32 * time in the ibc_operations_t structure. These functions include all
33 33 * of the necessary routines to implement the required InfiniBand "verbs"
34 34 * and additional IBTF-specific interfaces.
35 35 */
36 36
37 37 #include <sys/types.h>
38 38 #include <sys/conf.h>
39 39 #include <sys/ddi.h>
40 40 #include <sys/sunddi.h>
41 41
42 42 #include <sys/ib/adapters/hermon/hermon.h>
43 43
44 44 extern uint32_t hermon_kernel_data_ro;
45 45 extern uint32_t hermon_user_data_ro;
46 46
47 47 /* HCA and port related operations */
48 48 static ibt_status_t hermon_ci_query_hca_ports(ibc_hca_hdl_t, uint8_t,
49 49 ibt_hca_portinfo_t *);
50 50 static ibt_status_t hermon_ci_modify_ports(ibc_hca_hdl_t, uint8_t,
51 51 ibt_port_modify_flags_t, uint8_t);
52 52 static ibt_status_t hermon_ci_modify_system_image(ibc_hca_hdl_t, ib_guid_t);
53 53
54 54 /* Protection Domains */
55 55 static ibt_status_t hermon_ci_alloc_pd(ibc_hca_hdl_t, ibt_pd_flags_t,
56 56 ibc_pd_hdl_t *);
57 57 static ibt_status_t hermon_ci_free_pd(ibc_hca_hdl_t, ibc_pd_hdl_t);
58 58
59 59 /* Reliable Datagram Domains */
60 60 static ibt_status_t hermon_ci_alloc_rdd(ibc_hca_hdl_t, ibc_rdd_flags_t,
61 61 ibc_rdd_hdl_t *);
62 62 static ibt_status_t hermon_ci_free_rdd(ibc_hca_hdl_t, ibc_rdd_hdl_t);
63 63
64 64 /* Address Handles */
65 65 static ibt_status_t hermon_ci_alloc_ah(ibc_hca_hdl_t, ibt_ah_flags_t,
66 66 ibc_pd_hdl_t, ibt_adds_vect_t *, ibc_ah_hdl_t *);
67 67 static ibt_status_t hermon_ci_free_ah(ibc_hca_hdl_t, ibc_ah_hdl_t);
68 68 static ibt_status_t hermon_ci_query_ah(ibc_hca_hdl_t, ibc_ah_hdl_t,
69 69 ibc_pd_hdl_t *, ibt_adds_vect_t *);
70 70 static ibt_status_t hermon_ci_modify_ah(ibc_hca_hdl_t, ibc_ah_hdl_t,
71 71 ibt_adds_vect_t *);
72 72
73 73 /* Queue Pairs */
74 74 static ibt_status_t hermon_ci_alloc_qp(ibc_hca_hdl_t, ibtl_qp_hdl_t,
75 75 ibt_qp_type_t, ibt_qp_alloc_attr_t *, ibt_chan_sizes_t *, ib_qpn_t *,
76 76 ibc_qp_hdl_t *);
77 77 static ibt_status_t hermon_ci_alloc_special_qp(ibc_hca_hdl_t, uint8_t,
78 78 ibtl_qp_hdl_t, ibt_sqp_type_t, ibt_qp_alloc_attr_t *,
79 79 ibt_chan_sizes_t *, ibc_qp_hdl_t *);
80 80 static ibt_status_t hermon_ci_alloc_qp_range(ibc_hca_hdl_t, uint_t,
81 81 ibtl_qp_hdl_t *, ibt_qp_type_t, ibt_qp_alloc_attr_t *, ibt_chan_sizes_t *,
82 82 ibc_cq_hdl_t *, ibc_cq_hdl_t *, ib_qpn_t *, ibc_qp_hdl_t *);
83 83 static ibt_status_t hermon_ci_free_qp(ibc_hca_hdl_t, ibc_qp_hdl_t,
84 84 ibc_free_qp_flags_t, ibc_qpn_hdl_t *);
85 85 static ibt_status_t hermon_ci_release_qpn(ibc_hca_hdl_t, ibc_qpn_hdl_t);
86 86 static ibt_status_t hermon_ci_query_qp(ibc_hca_hdl_t, ibc_qp_hdl_t,
87 87 ibt_qp_query_attr_t *);
88 88 static ibt_status_t hermon_ci_modify_qp(ibc_hca_hdl_t, ibc_qp_hdl_t,
89 89 ibt_cep_modify_flags_t, ibt_qp_info_t *, ibt_queue_sizes_t *);
90 90
91 91 /* Completion Queues */
92 92 static ibt_status_t hermon_ci_alloc_cq(ibc_hca_hdl_t, ibt_cq_hdl_t,
93 93 ibt_cq_attr_t *, ibc_cq_hdl_t *, uint_t *);
94 94 static ibt_status_t hermon_ci_free_cq(ibc_hca_hdl_t, ibc_cq_hdl_t);
95 95 static ibt_status_t hermon_ci_query_cq(ibc_hca_hdl_t, ibc_cq_hdl_t,
96 96 uint_t *, uint_t *, uint_t *, ibt_cq_handler_id_t *);
97 97 static ibt_status_t hermon_ci_resize_cq(ibc_hca_hdl_t, ibc_cq_hdl_t,
98 98 uint_t, uint_t *);
99 99 static ibt_status_t hermon_ci_modify_cq(ibc_hca_hdl_t, ibc_cq_hdl_t,
100 100 uint_t, uint_t, ibt_cq_handler_id_t);
101 101 static ibt_status_t hermon_ci_alloc_cq_sched(ibc_hca_hdl_t,
102 102 ibt_cq_sched_attr_t *, ibc_sched_hdl_t *);
103 103 static ibt_status_t hermon_ci_free_cq_sched(ibc_hca_hdl_t, ibc_sched_hdl_t);
104 104 static ibt_status_t hermon_ci_query_cq_handler_id(ibc_hca_hdl_t,
105 105 ibt_cq_handler_id_t, ibt_cq_handler_attr_t *);
106 106
107 107 /* EE Contexts */
108 108 static ibt_status_t hermon_ci_alloc_eec(ibc_hca_hdl_t, ibc_eec_flags_t,
109 109 ibt_eec_hdl_t, ibc_rdd_hdl_t, ibc_eec_hdl_t *);
110 110 static ibt_status_t hermon_ci_free_eec(ibc_hca_hdl_t, ibc_eec_hdl_t);
111 111 static ibt_status_t hermon_ci_query_eec(ibc_hca_hdl_t, ibc_eec_hdl_t,
112 112 ibt_eec_query_attr_t *);
113 113 static ibt_status_t hermon_ci_modify_eec(ibc_hca_hdl_t, ibc_eec_hdl_t,
114 114 ibt_cep_modify_flags_t, ibt_eec_info_t *);
115 115
116 116 /* Memory Registration */
117 117 static ibt_status_t hermon_ci_register_mr(ibc_hca_hdl_t, ibc_pd_hdl_t,
118 118 ibt_mr_attr_t *, void *, ibc_mr_hdl_t *, ibt_mr_desc_t *);
119 119 static ibt_status_t hermon_ci_register_buf(ibc_hca_hdl_t, ibc_pd_hdl_t,
120 120 ibt_smr_attr_t *, struct buf *, void *, ibt_mr_hdl_t *, ibt_mr_desc_t *);
121 121 static ibt_status_t hermon_ci_register_shared_mr(ibc_hca_hdl_t,
122 122 ibc_mr_hdl_t, ibc_pd_hdl_t, ibt_smr_attr_t *, void *,
123 123 ibc_mr_hdl_t *, ibt_mr_desc_t *);
124 124 static ibt_status_t hermon_ci_deregister_mr(ibc_hca_hdl_t, ibc_mr_hdl_t);
125 125 static ibt_status_t hermon_ci_query_mr(ibc_hca_hdl_t, ibc_mr_hdl_t,
126 126 ibt_mr_query_attr_t *);
127 127 static ibt_status_t hermon_ci_reregister_mr(ibc_hca_hdl_t, ibc_mr_hdl_t,
128 128 ibc_pd_hdl_t, ibt_mr_attr_t *, void *, ibc_mr_hdl_t *,
129 129 ibt_mr_desc_t *);
130 130 static ibt_status_t hermon_ci_reregister_buf(ibc_hca_hdl_t, ibc_mr_hdl_t,
131 131 ibc_pd_hdl_t, ibt_smr_attr_t *, struct buf *, void *, ibc_mr_hdl_t *,
132 132 ibt_mr_desc_t *);
133 133 static ibt_status_t hermon_ci_sync_mr(ibc_hca_hdl_t, ibt_mr_sync_t *, size_t);
134 134 static ibt_status_t hermon_ci_register_dma_mr(ibc_hca_hdl_t, ibc_pd_hdl_t,
135 135 ibt_dmr_attr_t *, void *, ibc_mr_hdl_t *, ibt_mr_desc_t *);
136 136
137 137 /* Memory Windows */
138 138 static ibt_status_t hermon_ci_alloc_mw(ibc_hca_hdl_t, ibc_pd_hdl_t,
139 139 ibt_mw_flags_t, ibc_mw_hdl_t *, ibt_rkey_t *);
140 140 static ibt_status_t hermon_ci_free_mw(ibc_hca_hdl_t, ibc_mw_hdl_t);
141 141 static ibt_status_t hermon_ci_query_mw(ibc_hca_hdl_t, ibc_mw_hdl_t,
142 142 ibt_mw_query_attr_t *);
143 143
144 144 /* Multicast Groups */
145 145 static ibt_status_t hermon_ci_attach_mcg(ibc_hca_hdl_t, ibc_qp_hdl_t,
146 146 ib_gid_t, ib_lid_t);
147 147 static ibt_status_t hermon_ci_detach_mcg(ibc_hca_hdl_t, ibc_qp_hdl_t,
148 148 ib_gid_t, ib_lid_t);
149 149
150 150 /* Work Request and Completion Processing */
151 151 static ibt_status_t hermon_ci_post_send(ibc_hca_hdl_t, ibc_qp_hdl_t,
152 152 ibt_send_wr_t *, uint_t, uint_t *);
153 153 static ibt_status_t hermon_ci_post_recv(ibc_hca_hdl_t, ibc_qp_hdl_t,
154 154 ibt_recv_wr_t *, uint_t, uint_t *);
155 155 static ibt_status_t hermon_ci_poll_cq(ibc_hca_hdl_t, ibc_cq_hdl_t,
156 156 ibt_wc_t *, uint_t, uint_t *);
157 157 static ibt_status_t hermon_ci_notify_cq(ibc_hca_hdl_t, ibc_cq_hdl_t,
158 158 ibt_cq_notify_flags_t);
159 159
160 160 /* CI Object Private Data */
161 161 static ibt_status_t hermon_ci_ci_data_in(ibc_hca_hdl_t, ibt_ci_data_flags_t,
162 162 ibt_object_type_t, void *, void *, size_t);
163 163
164 164 /* CI Object Private Data */
165 165 static ibt_status_t hermon_ci_ci_data_out(ibc_hca_hdl_t, ibt_ci_data_flags_t,
166 166 ibt_object_type_t, void *, void *, size_t);
167 167
168 168 /* Shared Receive Queues */
169 169 static ibt_status_t hermon_ci_alloc_srq(ibc_hca_hdl_t, ibt_srq_flags_t,
170 170 ibt_srq_hdl_t, ibc_pd_hdl_t, ibt_srq_sizes_t *, ibc_srq_hdl_t *,
171 171 ibt_srq_sizes_t *);
172 172 static ibt_status_t hermon_ci_free_srq(ibc_hca_hdl_t, ibc_srq_hdl_t);
173 173 static ibt_status_t hermon_ci_query_srq(ibc_hca_hdl_t, ibc_srq_hdl_t,
174 174 ibc_pd_hdl_t *, ibt_srq_sizes_t *, uint_t *);
175 175 static ibt_status_t hermon_ci_modify_srq(ibc_hca_hdl_t, ibc_srq_hdl_t,
176 176 ibt_srq_modify_flags_t, uint_t, uint_t, uint_t *);
177 177 static ibt_status_t hermon_ci_post_srq(ibc_hca_hdl_t, ibc_srq_hdl_t,
178 178 ibt_recv_wr_t *, uint_t, uint_t *);
179 179
180 180 /* Address translation */
181 181 static ibt_status_t hermon_ci_map_mem_area(ibc_hca_hdl_t, ibt_va_attr_t *,
182 182 void *, uint_t, ibt_reg_req_t *, ibc_ma_hdl_t *);
183 183 static ibt_status_t hermon_ci_unmap_mem_area(ibc_hca_hdl_t, ibc_ma_hdl_t);
184 184 static ibt_status_t hermon_ci_map_mem_iov(ibc_hca_hdl_t, ibt_iov_attr_t *,
185 185 ibt_all_wr_t *, ibc_mi_hdl_t *);
186 186 static ibt_status_t hermon_ci_unmap_mem_iov(ibc_hca_hdl_t, ibc_mi_hdl_t);
187 187
188 188 /* Allocate L_Key */
189 189 static ibt_status_t hermon_ci_alloc_lkey(ibc_hca_hdl_t, ibc_pd_hdl_t,
190 190 ibt_lkey_flags_t, uint_t, ibc_mr_hdl_t *, ibt_pmr_desc_t *);
191 191
192 192 /* Physical Register Memory Region */
193 193 static ibt_status_t hermon_ci_register_physical_mr(ibc_hca_hdl_t, ibc_pd_hdl_t,
194 194 ibt_pmr_attr_t *, void *, ibc_mr_hdl_t *, ibt_pmr_desc_t *);
195 195 static ibt_status_t hermon_ci_reregister_physical_mr(ibc_hca_hdl_t,
196 196 ibc_mr_hdl_t, ibc_pd_hdl_t, ibt_pmr_attr_t *, void *, ibc_mr_hdl_t *,
197 197 ibt_pmr_desc_t *);
198 198
199 199 /* Mellanox FMR */
200 200 static ibt_status_t hermon_ci_create_fmr_pool(ibc_hca_hdl_t hca,
201 201 ibc_pd_hdl_t pd, ibt_fmr_pool_attr_t *fmr_params,
202 202 ibc_fmr_pool_hdl_t *fmr_pool);
203 203 static ibt_status_t hermon_ci_destroy_fmr_pool(ibc_hca_hdl_t hca,
204 204 ibc_fmr_pool_hdl_t fmr_pool);
205 205 static ibt_status_t hermon_ci_flush_fmr_pool(ibc_hca_hdl_t hca,
206 206 ibc_fmr_pool_hdl_t fmr_pool);
207 207 static ibt_status_t hermon_ci_register_physical_fmr(ibc_hca_hdl_t hca,
208 208 ibc_fmr_pool_hdl_t fmr_pool, ibt_pmr_attr_t *mem_pattr,
209 209 void *ibtl_reserved, ibc_mr_hdl_t *mr_hdl_p, ibt_pmr_desc_t *mem_desc_p);
210 210 static ibt_status_t hermon_ci_deregister_fmr(ibc_hca_hdl_t hca,
211 211 ibc_mr_hdl_t mr);
212 212
213 213 /* Memory Allocation/Deallocation */
214 214 static ibt_status_t hermon_ci_alloc_io_mem(ibc_hca_hdl_t hca, size_t size,
215 215 ibt_mr_flags_t mr_flag, caddr_t *kaddrp,
216 216 ibc_mem_alloc_hdl_t *mem_alloc_hdl_p);
217 217 static ibt_status_t hermon_ci_free_io_mem(ibc_hca_hdl_t hca,
218 218 ibc_mem_alloc_hdl_t mem_alloc_hdl);
219 219 static ibt_status_t hermon_ci_not_supported();
220 220
221 221 /*
222 222 * This ibc_operations_t structure includes pointers to all the entry points
223 223 * provided by the Hermon driver. This structure is passed to the IBTF at
224 224 * driver attach time, using the ibc_attach() call.
225 225 */
226 226 ibc_operations_t hermon_ibc_ops = {
227 227 /* HCA and port related operations */
228 228 hermon_ci_query_hca_ports,
229 229 hermon_ci_modify_ports,
230 230 hermon_ci_modify_system_image,
231 231
232 232 /* Protection Domains */
233 233 hermon_ci_alloc_pd,
234 234 hermon_ci_free_pd,
235 235
236 236 /* Reliable Datagram Domains */
237 237 hermon_ci_alloc_rdd,
238 238 hermon_ci_free_rdd,
239 239
240 240 /* Address Handles */
241 241 hermon_ci_alloc_ah,
242 242 hermon_ci_free_ah,
243 243 hermon_ci_query_ah,
244 244 hermon_ci_modify_ah,
245 245
246 246 /* Queue Pairs */
247 247 hermon_ci_alloc_qp,
248 248 hermon_ci_alloc_special_qp,
249 249 hermon_ci_alloc_qp_range,
250 250 hermon_ci_free_qp,
251 251 hermon_ci_release_qpn,
252 252 hermon_ci_query_qp,
253 253 hermon_ci_modify_qp,
254 254
255 255 /* Completion Queues */
256 256 hermon_ci_alloc_cq,
257 257 hermon_ci_free_cq,
258 258 hermon_ci_query_cq,
259 259 hermon_ci_resize_cq,
260 260 hermon_ci_modify_cq,
261 261 hermon_ci_alloc_cq_sched,
262 262 hermon_ci_free_cq_sched,
263 263 hermon_ci_query_cq_handler_id,
264 264
265 265 /* EE Contexts */
266 266 hermon_ci_alloc_eec,
267 267 hermon_ci_free_eec,
268 268 hermon_ci_query_eec,
269 269 hermon_ci_modify_eec,
270 270
271 271 /* Memory Registration */
272 272 hermon_ci_register_mr,
273 273 hermon_ci_register_buf,
274 274 hermon_ci_register_shared_mr,
275 275 hermon_ci_deregister_mr,
276 276 hermon_ci_query_mr,
277 277 hermon_ci_reregister_mr,
278 278 hermon_ci_reregister_buf,
279 279 hermon_ci_sync_mr,
280 280
281 281 /* Memory Windows */
282 282 hermon_ci_alloc_mw,
283 283 hermon_ci_free_mw,
284 284 hermon_ci_query_mw,
285 285
286 286 /* Multicast Groups */
287 287 hermon_ci_attach_mcg,
288 288 hermon_ci_detach_mcg,
289 289
290 290 /* Work Request and Completion Processing */
291 291 hermon_ci_post_send,
292 292 hermon_ci_post_recv,
293 293 hermon_ci_poll_cq,
294 294 hermon_ci_notify_cq,
295 295
296 296 /* CI Object Mapping Data */
297 297 hermon_ci_ci_data_in,
298 298 hermon_ci_ci_data_out,
299 299
300 300 /* Shared Receive Queue */
301 301 hermon_ci_alloc_srq,
302 302 hermon_ci_free_srq,
303 303 hermon_ci_query_srq,
304 304 hermon_ci_modify_srq,
305 305 hermon_ci_post_srq,
306 306
307 307 /* Address translation */
308 308 hermon_ci_map_mem_area,
309 309 hermon_ci_unmap_mem_area,
310 310 hermon_ci_map_mem_iov,
311 311 hermon_ci_unmap_mem_iov,
312 312
313 313 /* Allocate L_key */
314 314 hermon_ci_alloc_lkey,
315 315
316 316 /* Physical Register Memory Region */
317 317 hermon_ci_register_physical_mr,
318 318 hermon_ci_reregister_physical_mr,
319 319
320 320 /* Mellanox FMR */
321 321 hermon_ci_create_fmr_pool,
322 322 hermon_ci_destroy_fmr_pool,
323 323 hermon_ci_flush_fmr_pool,
324 324 hermon_ci_register_physical_fmr,
325 325 hermon_ci_deregister_fmr,
326 326
327 327 /* Memory allocation */
328 328 hermon_ci_alloc_io_mem,
329 329 hermon_ci_free_io_mem,
330 330
331 331 /* XRC not yet supported */
332 332 hermon_ci_not_supported, /* ibc_alloc_xrc_domain */
333 333 hermon_ci_not_supported, /* ibc_free_xrc_domain */
334 334 hermon_ci_not_supported, /* ibc_alloc_xrc_srq */
335 335 hermon_ci_not_supported, /* ibc_free_xrc_srq */
336 336 hermon_ci_not_supported, /* ibc_query_xrc_srq */
337 337 hermon_ci_not_supported, /* ibc_modify_xrc_srq */
338 338 hermon_ci_not_supported, /* ibc_alloc_xrc_tgt_qp */
339 339 hermon_ci_not_supported, /* ibc_free_xrc_tgt_qp */
340 340 hermon_ci_not_supported, /* ibc_query_xrc_tgt_qp */
341 341 hermon_ci_not_supported, /* ibc_modify_xrc_tgt_qp */
342 342
343 343 /* Memory Region (physical) */
344 344 hermon_ci_register_dma_mr,
345 345
346 346 /* Next enhancements */
347 347 hermon_ci_not_supported, /* ibc_enhancement1 */
348 348 hermon_ci_not_supported, /* ibc_enhancement2 */
349 349 hermon_ci_not_supported, /* ibc_enhancement3 */
350 350 hermon_ci_not_supported, /* ibc_enhancement4 */
351 351 };
352 352
353 353 /*
354 354 * Not yet implemented OPS
355 355 */
356 356 /* ARGSUSED */
357 357 static ibt_status_t
358 358 hermon_ci_not_supported()
359 359 {
360 360 return (IBT_NOT_SUPPORTED);
361 361 }
362 362
363 363
364 364 /*
365 365 * hermon_ci_query_hca_ports()
366 366 * Returns HCA port attributes for either one or all of the HCA's ports.
367 367 * Context: Can be called only from user or kernel context.
368 368 */
369 369 static ibt_status_t
370 370 hermon_ci_query_hca_ports(ibc_hca_hdl_t hca, uint8_t query_port,
371 371 ibt_hca_portinfo_t *info_p)
372 372 {
373 373 hermon_state_t *state;
374 374 uint_t start, end, port;
375 375 int status, indx;
376 376
377 377 /* Grab the Hermon softstate pointer */
378 378 state = (hermon_state_t *)hca;
379 379
380 380 /*
381 381 * If the specified port is zero, then we are supposed to query all
382 382 * ports. Otherwise, we query only the port number specified.
383 383 * Setup the start and end port numbers as appropriate for the loop
384 384 * below. Note: The first Hermon port is port number one (1).
385 385 */
386 386 if (query_port == 0) {
387 387 start = 1;
388 388 end = start + (state->hs_cfg_profile->cp_num_ports - 1);
389 389 } else {
390 390 end = start = query_port;
391 391 }
392 392
393 393 /* Query the port(s) */
394 394 for (port = start, indx = 0; port <= end; port++, indx++) {
395 395 status = hermon_port_query(state, port, &info_p[indx]);
396 396 if (status != DDI_SUCCESS) {
397 397 return (status);
398 398 }
399 399 }
400 400 return (IBT_SUCCESS);
401 401 }
402 402
403 403
404 404 /*
405 405 * hermon_ci_modify_ports()
406 406 * Modify HCA port attributes
407 407 * Context: Can be called only from user or kernel context.
408 408 */
409 409 static ibt_status_t
410 410 hermon_ci_modify_ports(ibc_hca_hdl_t hca, uint8_t port,
411 411 ibt_port_modify_flags_t flags, uint8_t init_type)
412 412 {
413 413 hermon_state_t *state;
414 414 int status;
415 415
416 416 /* Grab the Hermon softstate pointer */
417 417 state = (hermon_state_t *)hca;
418 418
419 419 /* Modify the port(s) */
420 420 status = hermon_port_modify(state, port, flags, init_type);
421 421 return (status);
422 422 }
423 423
424 424 /*
425 425 * hermon_ci_modify_system_image()
426 426 * Modify the System Image GUID
427 427 * Context: Can be called only from user or kernel context.
428 428 */
429 429 /* ARGSUSED */
430 430 static ibt_status_t
431 431 hermon_ci_modify_system_image(ibc_hca_hdl_t hca, ib_guid_t sys_guid)
432 432 {
433 433 /*
434 434 * This is an unsupported interface for the Hermon driver. This
435 435 * interface is necessary to support modification of the System
436 436 * Image GUID. Hermon is only capable of modifying this parameter
437 437 * once (during driver initialization).
438 438 */
439 439 return (IBT_NOT_SUPPORTED);
440 440 }
441 441
442 442 /*
443 443 * hermon_ci_alloc_pd()
444 444 * Allocate a Protection Domain
445 445 * Context: Can be called only from user or kernel context.
446 446 */
447 447 /* ARGSUSED */
448 448 static ibt_status_t
449 449 hermon_ci_alloc_pd(ibc_hca_hdl_t hca, ibt_pd_flags_t flags, ibc_pd_hdl_t *pd_p)
450 450 {
451 451 hermon_state_t *state;
452 452 hermon_pdhdl_t pdhdl;
453 453 int status;
454 454
455 455 ASSERT(pd_p != NULL);
456 456
457 457 /* Grab the Hermon softstate pointer */
458 458 state = (hermon_state_t *)hca;
459 459
460 460 /* Allocate the PD */
461 461 status = hermon_pd_alloc(state, &pdhdl, HERMON_NOSLEEP);
462 462 if (status != DDI_SUCCESS) {
463 463 return (status);
464 464 }
465 465
466 466 /* Return the Hermon PD handle */
467 467 *pd_p = (ibc_pd_hdl_t)pdhdl;
468 468
469 469 return (IBT_SUCCESS);
470 470 }
471 471
472 472
473 473 /*
474 474 * hermon_ci_free_pd()
475 475 * Free a Protection Domain
476 476 * Context: Can be called only from user or kernel context
477 477 */
478 478 static ibt_status_t
479 479 hermon_ci_free_pd(ibc_hca_hdl_t hca, ibc_pd_hdl_t pd)
480 480 {
481 481 hermon_state_t *state;
482 482 hermon_pdhdl_t pdhdl;
483 483 int status;
484 484
485 485 /* Grab the Hermon softstate pointer and PD handle */
486 486 state = (hermon_state_t *)hca;
487 487 pdhdl = (hermon_pdhdl_t)pd;
488 488
489 489 /* Free the PD */
490 490 status = hermon_pd_free(state, &pdhdl);
491 491 return (status);
492 492 }
493 493
494 494
495 495 /*
496 496 * hermon_ci_alloc_rdd()
497 497 * Allocate a Reliable Datagram Domain
498 498 * Context: Can be called only from user or kernel context.
499 499 */
500 500 /* ARGSUSED */
501 501 static ibt_status_t
502 502 hermon_ci_alloc_rdd(ibc_hca_hdl_t hca, ibc_rdd_flags_t flags,
503 503 ibc_rdd_hdl_t *rdd_p)
504 504 {
505 505 /*
506 506 * This is an unsupported interface for the Hermon driver. This
507 507 * interface is necessary to support Reliable Datagram (RD)
508 508 * operations. Hermon does not support RD.
509 509 */
510 510 return (IBT_NOT_SUPPORTED);
511 511 }
512 512
513 513
514 514 /*
515 515 * hermon_free_rdd()
516 516 * Free a Reliable Datagram Domain
517 517 * Context: Can be called only from user or kernel context.
518 518 */
519 519 /* ARGSUSED */
520 520 static ibt_status_t
521 521 hermon_ci_free_rdd(ibc_hca_hdl_t hca, ibc_rdd_hdl_t rdd)
522 522 {
523 523 /*
524 524 * This is an unsupported interface for the Hermon driver. This
525 525 * interface is necessary to support Reliable Datagram (RD)
526 526 * operations. Hermon does not support RD.
527 527 */
528 528 return (IBT_NOT_SUPPORTED);
529 529 }
530 530
531 531
532 532 /*
533 533 * hermon_ci_alloc_ah()
534 534 * Allocate an Address Handle
535 535 * Context: Can be called only from user or kernel context.
536 536 */
537 537 /* ARGSUSED */
538 538 static ibt_status_t
539 539 hermon_ci_alloc_ah(ibc_hca_hdl_t hca, ibt_ah_flags_t flags, ibc_pd_hdl_t pd,
540 540 ibt_adds_vect_t *attr_p, ibc_ah_hdl_t *ah_p)
541 541 {
542 542 hermon_state_t *state;
543 543 hermon_ahhdl_t ahhdl;
544 544 hermon_pdhdl_t pdhdl;
545 545 int status;
546 546
547 547 /* Grab the Hermon softstate pointer and PD handle */
548 548 state = (hermon_state_t *)hca;
549 549 pdhdl = (hermon_pdhdl_t)pd;
550 550
551 551 /* Allocate the AH */
552 552 status = hermon_ah_alloc(state, pdhdl, attr_p, &ahhdl, HERMON_NOSLEEP);
553 553 if (status != DDI_SUCCESS) {
554 554 return (status);
555 555 }
556 556
557 557 /* Return the Hermon AH handle */
558 558 *ah_p = (ibc_ah_hdl_t)ahhdl;
559 559
560 560 return (IBT_SUCCESS);
561 561 }
562 562
563 563
564 564 /*
565 565 * hermon_ci_free_ah()
566 566 * Free an Address Handle
567 567 * Context: Can be called only from user or kernel context.
568 568 */
569 569 static ibt_status_t
570 570 hermon_ci_free_ah(ibc_hca_hdl_t hca, ibc_ah_hdl_t ah)
571 571 {
572 572 hermon_state_t *state;
573 573 hermon_ahhdl_t ahhdl;
574 574 int status;
575 575
576 576 /* Grab the Hermon softstate pointer and AH handle */
577 577 state = (hermon_state_t *)hca;
578 578 ahhdl = (hermon_ahhdl_t)ah;
579 579
580 580 /* Free the AH */
581 581 status = hermon_ah_free(state, &ahhdl, HERMON_NOSLEEP);
582 582
583 583 return (status);
584 584 }
585 585
586 586
587 587 /*
588 588 * hermon_ci_query_ah()
589 589 * Return the Address Vector information for a specified Address Handle
590 590 * Context: Can be called from interrupt or base context.
591 591 */
592 592 static ibt_status_t
593 593 hermon_ci_query_ah(ibc_hca_hdl_t hca, ibc_ah_hdl_t ah, ibc_pd_hdl_t *pd_p,
594 594 ibt_adds_vect_t *attr_p)
595 595 {
596 596 hermon_state_t *state;
597 597 hermon_ahhdl_t ahhdl;
598 598 hermon_pdhdl_t pdhdl;
599 599 int status;
600 600
601 601 /* Grab the Hermon softstate pointer and AH handle */
602 602 state = (hermon_state_t *)hca;
603 603 ahhdl = (hermon_ahhdl_t)ah;
604 604
605 605 /* Query the AH */
606 606 status = hermon_ah_query(state, ahhdl, &pdhdl, attr_p);
607 607 if (status != DDI_SUCCESS) {
608 608 return (status);
609 609 }
610 610
611 611 /* Return the Hermon PD handle */
612 612 *pd_p = (ibc_pd_hdl_t)pdhdl;
613 613
614 614 return (IBT_SUCCESS);
615 615 }
616 616
617 617
618 618 /*
619 619 * hermon_ci_modify_ah()
620 620 * Modify the Address Vector information of a specified Address Handle
621 621 * Context: Can be called from interrupt or base context.
622 622 */
623 623 static ibt_status_t
624 624 hermon_ci_modify_ah(ibc_hca_hdl_t hca, ibc_ah_hdl_t ah, ibt_adds_vect_t *attr_p)
625 625 {
626 626 hermon_state_t *state;
627 627 hermon_ahhdl_t ahhdl;
628 628 int status;
629 629
630 630 /* Grab the Hermon softstate pointer and AH handle */
631 631 state = (hermon_state_t *)hca;
632 632 ahhdl = (hermon_ahhdl_t)ah;
633 633
634 634 /* Modify the AH */
635 635 status = hermon_ah_modify(state, ahhdl, attr_p);
636 636
637 637 return (status);
638 638 }
639 639
640 640
641 641 /*
642 642 * hermon_ci_alloc_qp()
643 643 * Allocate a Queue Pair
644 644 * Context: Can be called only from user or kernel context.
↓ open down ↓ |
644 lines elided |
↑ open up ↑ |
645 645 */
646 646 static ibt_status_t
647 647 hermon_ci_alloc_qp(ibc_hca_hdl_t hca, ibtl_qp_hdl_t ibt_qphdl,
648 648 ibt_qp_type_t type, ibt_qp_alloc_attr_t *attr_p,
649 649 ibt_chan_sizes_t *queue_sizes_p, ib_qpn_t *qpn, ibc_qp_hdl_t *qp_p)
650 650 {
651 651 hermon_state_t *state;
652 652 hermon_qp_info_t qpinfo;
653 653 int status;
654 654
655 - _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*attr_p))
656 - _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*queue_sizes_p))
657 -
658 655 /* Grab the Hermon softstate pointer */
659 656 state = (hermon_state_t *)hca;
660 657
661 658 /* Allocate the QP */
662 659 qpinfo.qpi_attrp = attr_p;
663 660 qpinfo.qpi_type = type;
664 661 qpinfo.qpi_ibt_qphdl = ibt_qphdl;
665 662 qpinfo.qpi_queueszp = queue_sizes_p;
666 663 qpinfo.qpi_qpn = qpn;
667 664 status = hermon_qp_alloc(state, &qpinfo, HERMON_NOSLEEP);
668 665 if (status != DDI_SUCCESS) {
669 666 return (status);
670 667 }
671 668
672 669 /* Return the Hermon QP handle */
673 670 *qp_p = (ibc_qp_hdl_t)qpinfo.qpi_qphdl;
674 671
675 672 return (IBT_SUCCESS);
676 673 }
677 674
678 675
679 676 /*
680 677 * hermon_ci_alloc_special_qp()
681 678 * Allocate a Special Queue Pair
682 679 * Context: Can be called only from user or kernel context.
683 680 */
↓ open down ↓ |
16 lines elided |
↑ open up ↑ |
684 681 static ibt_status_t
685 682 hermon_ci_alloc_special_qp(ibc_hca_hdl_t hca, uint8_t port,
686 683 ibtl_qp_hdl_t ibt_qphdl, ibt_sqp_type_t type,
687 684 ibt_qp_alloc_attr_t *attr_p, ibt_chan_sizes_t *queue_sizes_p,
688 685 ibc_qp_hdl_t *qp_p)
689 686 {
690 687 hermon_state_t *state;
691 688 hermon_qp_info_t qpinfo;
692 689 int status;
693 690
694 - _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*attr_p))
695 - _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*queue_sizes_p))
696 -
697 691 /* Grab the Hermon softstate pointer */
698 692 state = (hermon_state_t *)hca;
699 693
700 694 /* Allocate the Special QP */
701 695 qpinfo.qpi_attrp = attr_p;
702 696 qpinfo.qpi_type = type;
703 697 qpinfo.qpi_port = port;
704 698 qpinfo.qpi_ibt_qphdl = ibt_qphdl;
705 699 qpinfo.qpi_queueszp = queue_sizes_p;
706 700 status = hermon_special_qp_alloc(state, &qpinfo, HERMON_NOSLEEP);
707 701 if (status != DDI_SUCCESS) {
708 702 return (status);
709 703 }
710 704 /* Return the Hermon QP handle */
711 705 *qp_p = (ibc_qp_hdl_t)qpinfo.qpi_qphdl;
712 706
713 707 return (IBT_SUCCESS);
714 708 }
715 709
716 710 /*
717 711 * hermon_ci_alloc_qp_range()
718 712 * Free a Queue Pair
719 713 * Context: Can be called only from user or kernel context.
720 714 */
721 715 /* ARGSUSED */
722 716 static ibt_status_t
↓ open down ↓ |
16 lines elided |
↑ open up ↑ |
723 717 hermon_ci_alloc_qp_range(ibc_hca_hdl_t hca, uint_t log2,
724 718 ibtl_qp_hdl_t *ibtl_qp, ibt_qp_type_t type,
725 719 ibt_qp_alloc_attr_t *attr_p, ibt_chan_sizes_t *queue_sizes_p,
726 720 ibc_cq_hdl_t *send_cq, ibc_cq_hdl_t *recv_cq,
727 721 ib_qpn_t *qpn, ibc_qp_hdl_t *qp_p)
728 722 {
729 723 hermon_state_t *state;
730 724 hermon_qp_info_t qpinfo;
731 725 int status;
732 726
733 - _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*attr_p))
734 - _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*queue_sizes_p))
735 -
736 727 /* Grab the Hermon softstate pointer */
737 728 state = (hermon_state_t *)hca;
738 729
739 730 /* Allocate the QP */
740 731 qpinfo.qpi_attrp = attr_p;
741 732 qpinfo.qpi_type = type;
742 733 qpinfo.qpi_queueszp = queue_sizes_p;
743 734 qpinfo.qpi_qpn = qpn;
744 735 status = hermon_qp_alloc_range(state, log2, &qpinfo, ibtl_qp,
745 736 send_cq, recv_cq, (hermon_qphdl_t *)qp_p, HERMON_NOSLEEP);
746 737 return (status);
747 738 }
748 739
749 740 /*
750 741 * hermon_ci_free_qp()
751 742 * Free a Queue Pair
752 743 * Context: Can be called only from user or kernel context.
753 744 */
754 745 static ibt_status_t
755 746 hermon_ci_free_qp(ibc_hca_hdl_t hca, ibc_qp_hdl_t qp,
756 747 ibc_free_qp_flags_t free_qp_flags, ibc_qpn_hdl_t *qpnh_p)
757 748 {
758 749 hermon_state_t *state;
759 750 hermon_qphdl_t qphdl;
760 751 int status;
761 752
762 753 /* Grab the Hermon softstate pointer and QP handle */
763 754 state = (hermon_state_t *)hca;
764 755 qphdl = (hermon_qphdl_t)qp;
765 756
766 757 /* Free the QP */
767 758 status = hermon_qp_free(state, &qphdl, free_qp_flags, qpnh_p,
768 759 HERMON_NOSLEEP);
769 760
770 761 return (status);
771 762 }
772 763
773 764
774 765 /*
775 766 * hermon_ci_release_qpn()
776 767 * Release a Queue Pair Number (QPN)
777 768 * Context: Can be called only from user or kernel context.
778 769 */
779 770 static ibt_status_t
780 771 hermon_ci_release_qpn(ibc_hca_hdl_t hca, ibc_qpn_hdl_t qpnh)
781 772 {
782 773 hermon_state_t *state;
783 774 hermon_qpn_entry_t *entry;
784 775
785 776 /* Grab the Hermon softstate pointer and QP handle */
786 777 state = (hermon_state_t *)hca;
787 778 entry = (hermon_qpn_entry_t *)qpnh;
788 779
789 780 /* Release the QP number */
790 781 hermon_qp_release_qpn(state, entry, HERMON_QPN_RELEASE);
791 782
792 783 return (IBT_SUCCESS);
793 784 }
794 785
795 786
796 787 /*
797 788 * hermon_ci_query_qp()
798 789 * Query a Queue Pair
799 790 * Context: Can be called from interrupt or base context.
800 791 */
801 792 static ibt_status_t
802 793 hermon_ci_query_qp(ibc_hca_hdl_t hca, ibc_qp_hdl_t qp,
803 794 ibt_qp_query_attr_t *attr_p)
804 795 {
805 796 hermon_state_t *state;
806 797 hermon_qphdl_t qphdl;
807 798 int status;
808 799
809 800 /* Grab the Hermon softstate pointer and QP handle */
810 801 state = (hermon_state_t *)hca;
811 802 qphdl = (hermon_qphdl_t)qp;
812 803
813 804 /* Query the QP */
814 805 status = hermon_qp_query(state, qphdl, attr_p);
815 806 return (status);
816 807 }
817 808
818 809
819 810 /*
820 811 * hermon_ci_modify_qp()
821 812 * Modify a Queue Pair
822 813 * Context: Can be called from interrupt or base context.
823 814 */
824 815 static ibt_status_t
825 816 hermon_ci_modify_qp(ibc_hca_hdl_t hca, ibc_qp_hdl_t qp,
826 817 ibt_cep_modify_flags_t flags, ibt_qp_info_t *info_p,
827 818 ibt_queue_sizes_t *actual_sz)
828 819 {
829 820 hermon_state_t *state;
830 821 hermon_qphdl_t qphdl;
831 822 int status;
832 823
833 824 /* Grab the Hermon softstate pointer and QP handle */
834 825 state = (hermon_state_t *)hca;
835 826 qphdl = (hermon_qphdl_t)qp;
836 827
837 828 /* Modify the QP */
838 829 status = hermon_qp_modify(state, qphdl, flags, info_p, actual_sz);
839 830 return (status);
840 831 }
841 832
842 833
843 834 /*
844 835 * hermon_ci_alloc_cq()
845 836 * Allocate a Completion Queue
846 837 * Context: Can be called only from user or kernel context.
847 838 */
848 839 /* ARGSUSED */
849 840 static ibt_status_t
850 841 hermon_ci_alloc_cq(ibc_hca_hdl_t hca, ibt_cq_hdl_t ibt_cqhdl,
851 842 ibt_cq_attr_t *attr_p, ibc_cq_hdl_t *cq_p, uint_t *actual_size)
852 843 {
853 844 hermon_state_t *state;
854 845 hermon_cqhdl_t cqhdl;
855 846 int status;
856 847
857 848 state = (hermon_state_t *)hca;
858 849
859 850 /* Allocate the CQ */
860 851 status = hermon_cq_alloc(state, ibt_cqhdl, attr_p, actual_size,
861 852 &cqhdl, HERMON_NOSLEEP);
862 853 if (status != DDI_SUCCESS) {
863 854 return (status);
864 855 }
865 856
866 857 /* Return the Hermon CQ handle */
867 858 *cq_p = (ibc_cq_hdl_t)cqhdl;
868 859
869 860 return (IBT_SUCCESS);
870 861 }
871 862
872 863
873 864 /*
874 865 * hermon_ci_free_cq()
875 866 * Free a Completion Queue
876 867 * Context: Can be called only from user or kernel context.
877 868 */
878 869 static ibt_status_t
879 870 hermon_ci_free_cq(ibc_hca_hdl_t hca, ibc_cq_hdl_t cq)
880 871 {
881 872 hermon_state_t *state;
882 873 hermon_cqhdl_t cqhdl;
883 874 int status;
884 875
885 876 /* Grab the Hermon softstate pointer and CQ handle */
886 877 state = (hermon_state_t *)hca;
887 878 cqhdl = (hermon_cqhdl_t)cq;
888 879
889 880
890 881 /* Free the CQ */
891 882 status = hermon_cq_free(state, &cqhdl, HERMON_NOSLEEP);
892 883 return (status);
893 884 }
894 885
895 886
896 887 /*
897 888 * hermon_ci_query_cq()
898 889 * Return the size of a Completion Queue
899 890 * Context: Can be called only from user or kernel context.
900 891 */
901 892 static ibt_status_t
902 893 hermon_ci_query_cq(ibc_hca_hdl_t hca, ibc_cq_hdl_t cq, uint_t *entries_p,
903 894 uint_t *count_p, uint_t *usec_p, ibt_cq_handler_id_t *hid_p)
904 895 {
905 896 hermon_state_t *state;
↓ open down ↓ |
160 lines elided |
↑ open up ↑ |
906 897 hermon_cqhdl_t cqhdl;
907 898
908 899 /* Grab the CQ handle */
909 900 state = (hermon_state_t *)hca;
910 901 cqhdl = (hermon_cqhdl_t)cq;
911 902
912 903 /* Query the current CQ size */
913 904 *entries_p = cqhdl->cq_bufsz;
914 905 *count_p = cqhdl->cq_intmod_count;
915 906 *usec_p = cqhdl->cq_intmod_usec;
916 - _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*cqhdl))
917 907 *hid_p = HERMON_EQNUM_TO_HID(state, cqhdl->cq_eqnum);
918 908
919 909 return (IBT_SUCCESS);
920 910 }
921 911
922 912
923 913 /*
924 914 * hermon_ci_resize_cq()
925 915 * Change the size of a Completion Queue
926 916 * Context: Can be called only from user or kernel context.
927 917 */
928 918 static ibt_status_t
929 919 hermon_ci_resize_cq(ibc_hca_hdl_t hca, ibc_cq_hdl_t cq, uint_t size,
930 920 uint_t *actual_size)
931 921 {
932 922 hermon_state_t *state;
933 923 hermon_cqhdl_t cqhdl;
934 924 int status;
935 925
936 926 /* Grab the Hermon softstate pointer and CQ handle */
937 927 state = (hermon_state_t *)hca;
938 928 cqhdl = (hermon_cqhdl_t)cq;
939 929
940 930 /* Resize the CQ */
941 931 status = hermon_cq_resize(state, cqhdl, size, actual_size,
942 932 HERMON_NOSLEEP);
943 933 if (status != DDI_SUCCESS) {
944 934 return (status);
945 935 }
946 936 return (IBT_SUCCESS);
947 937 }
948 938
949 939 /*
950 940 * hermon_ci_modify_cq()
951 941 * Change the interrupt moderation values of a Completion Queue
952 942 * Context: Can be called only from user or kernel context.
953 943 */
954 944 static ibt_status_t
955 945 hermon_ci_modify_cq(ibc_hca_hdl_t hca, ibc_cq_hdl_t cq, uint_t count,
956 946 uint_t usec, ibt_cq_handler_id_t hid)
957 947 {
958 948 hermon_state_t *state;
959 949 hermon_cqhdl_t cqhdl;
960 950 int status;
961 951
962 952 /* Grab the Hermon softstate pointer and CQ handle */
963 953 state = (hermon_state_t *)hca;
964 954 cqhdl = (hermon_cqhdl_t)cq;
965 955
966 956 /* Resize the CQ */
967 957 status = hermon_cq_modify(state, cqhdl, count, usec, hid,
968 958 HERMON_NOSLEEP);
969 959 return (status);
970 960 }
971 961
972 962
973 963 /*
974 964 * hermon_ci_alloc_cq_sched()
975 965 * Reserve a CQ scheduling class resource
976 966 * Context: Can be called only from user or kernel context.
977 967 */
978 968 /* ARGSUSED */
979 969 static ibt_status_t
980 970 hermon_ci_alloc_cq_sched(ibc_hca_hdl_t hca, ibt_cq_sched_attr_t *attr,
981 971 ibc_sched_hdl_t *sched_hdl_p)
982 972 {
983 973 int status;
984 974
985 975 status = hermon_cq_sched_alloc((hermon_state_t *)hca, attr,
986 976 (hermon_cq_sched_t **)sched_hdl_p);
987 977 return (status);
988 978 }
989 979
990 980
991 981 /*
992 982 * hermon_ci_free_cq_sched()
993 983 * Free a CQ scheduling class resource
994 984 * Context: Can be called only from user or kernel context.
995 985 */
996 986 /* ARGSUSED */
997 987 static ibt_status_t
998 988 hermon_ci_free_cq_sched(ibc_hca_hdl_t hca, ibc_sched_hdl_t sched_hdl)
999 989 {
1000 990 int status;
1001 991
1002 992 status = hermon_cq_sched_free((hermon_state_t *)hca,
1003 993 (hermon_cq_sched_t *)sched_hdl);
1004 994 return (status);
1005 995 }
1006 996
1007 997 static ibt_status_t
↓ open down ↓ |
81 lines elided |
↑ open up ↑ |
1008 998 hermon_ci_query_cq_handler_id(ibc_hca_hdl_t hca,
1009 999 ibt_cq_handler_id_t hid, ibt_cq_handler_attr_t *attrs)
1010 1000 {
1011 1001 hermon_state_t *state;
1012 1002
1013 1003 state = (hermon_state_t *)hca;
1014 1004 if (!HERMON_HID_VALID(state, hid))
1015 1005 return (IBT_CQ_HID_INVALID);
1016 1006 if (attrs == NULL)
1017 1007 return (IBT_INVALID_PARAM);
1018 - _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*attrs))
1019 1008 attrs->cha_ih = state->hs_intrmsi_hdl[hid - 1];
1020 1009 attrs->cha_dip = state->hs_dip;
1021 1010 return (IBT_SUCCESS);
1022 1011 }
1023 1012
1024 1013 /*
1025 1014 * hermon_ci_alloc_eec()
1026 1015 * Allocate an End-to-End context
1027 1016 * Context: Can be called only from user or kernel context.
1028 1017 */
1029 1018 /* ARGSUSED */
1030 1019 static ibt_status_t
1031 1020 hermon_ci_alloc_eec(ibc_hca_hdl_t hca, ibc_eec_flags_t flags,
1032 1021 ibt_eec_hdl_t ibt_eec, ibc_rdd_hdl_t rdd, ibc_eec_hdl_t *eec_p)
1033 1022 {
1034 1023 /*
1035 1024 * This is an unsupported interface for the Hermon driver. This
1036 1025 * interface is necessary to support Reliable Datagram (RD)
1037 1026 * operations. Hermon does not support RD.
1038 1027 */
1039 1028 return (IBT_NOT_SUPPORTED);
1040 1029 }
1041 1030
1042 1031
1043 1032 /*
1044 1033 * hermon_ci_free_eec()
1045 1034 * Free an End-to-End context
1046 1035 * Context: Can be called only from user or kernel context.
1047 1036 */
1048 1037 /* ARGSUSED */
1049 1038 static ibt_status_t
1050 1039 hermon_ci_free_eec(ibc_hca_hdl_t hca, ibc_eec_hdl_t eec)
1051 1040 {
1052 1041 /*
1053 1042 * This is an unsupported interface for the Hermon driver. This
1054 1043 * interface is necessary to support Reliable Datagram (RD)
1055 1044 * operations. Hermon does not support RD.
1056 1045 */
1057 1046 return (IBT_NOT_SUPPORTED);
1058 1047 }
1059 1048
1060 1049
1061 1050 /*
1062 1051 * hermon_ci_query_eec()
1063 1052 * Query an End-to-End context
1064 1053 * Context: Can be called from interrupt or base context.
1065 1054 */
1066 1055 /* ARGSUSED */
1067 1056 static ibt_status_t
1068 1057 hermon_ci_query_eec(ibc_hca_hdl_t hca, ibc_eec_hdl_t eec,
1069 1058 ibt_eec_query_attr_t *attr_p)
1070 1059 {
1071 1060 /*
1072 1061 * This is an unsupported interface for the Hermon driver. This
1073 1062 * interface is necessary to support Reliable Datagram (RD)
1074 1063 * operations. Hermon does not support RD.
1075 1064 */
1076 1065 return (IBT_NOT_SUPPORTED);
1077 1066 }
1078 1067
1079 1068
1080 1069 /*
1081 1070 * hermon_ci_modify_eec()
1082 1071 * Modify an End-to-End context
1083 1072 * Context: Can be called from interrupt or base context.
1084 1073 */
1085 1074 /* ARGSUSED */
1086 1075 static ibt_status_t
1087 1076 hermon_ci_modify_eec(ibc_hca_hdl_t hca, ibc_eec_hdl_t eec,
1088 1077 ibt_cep_modify_flags_t flags, ibt_eec_info_t *info_p)
1089 1078 {
1090 1079 /*
1091 1080 * This is an unsupported interface for the Hermon driver. This
1092 1081 * interface is necessary to support Reliable Datagram (RD)
1093 1082 * operations. Hermon does not support RD.
1094 1083 */
1095 1084 return (IBT_NOT_SUPPORTED);
1096 1085 }
1097 1086
1098 1087
1099 1088 /*
1100 1089 * hermon_ci_register_mr()
1101 1090 * Prepare a virtually addressed Memory Region for use by an HCA
1102 1091 * Context: Can be called from interrupt or base context.
1103 1092 */
1104 1093 /* ARGSUSED */
1105 1094 static ibt_status_t
↓ open down ↓ |
77 lines elided |
↑ open up ↑ |
1106 1095 hermon_ci_register_mr(ibc_hca_hdl_t hca, ibc_pd_hdl_t pd,
1107 1096 ibt_mr_attr_t *mr_attr, void *ibtl_reserved, ibc_mr_hdl_t *mr_p,
1108 1097 ibt_mr_desc_t *mr_desc)
1109 1098 {
1110 1099 hermon_mr_options_t op;
1111 1100 hermon_state_t *state;
1112 1101 hermon_pdhdl_t pdhdl;
1113 1102 hermon_mrhdl_t mrhdl;
1114 1103 int status;
1115 1104
1116 - _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*mr_desc))
1117 -
1118 1105 ASSERT(mr_attr != NULL);
1119 1106 ASSERT(mr_p != NULL);
1120 1107 ASSERT(mr_desc != NULL);
1121 1108
1122 1109 /*
1123 1110 * Validate the access flags. Both Remote Write and Remote Atomic
1124 1111 * require the Local Write flag to be set
1125 1112 */
1126 1113 if (((mr_attr->mr_flags & IBT_MR_ENABLE_REMOTE_WRITE) ||
1127 1114 (mr_attr->mr_flags & IBT_MR_ENABLE_REMOTE_ATOMIC)) &&
1128 1115 !(mr_attr->mr_flags & IBT_MR_ENABLE_LOCAL_WRITE)) {
1129 1116 return (IBT_MR_ACCESS_REQ_INVALID);
1130 1117 }
1131 1118
1132 1119 /* Grab the Hermon softstate pointer and PD handle */
1133 1120 state = (hermon_state_t *)hca;
1134 1121 pdhdl = (hermon_pdhdl_t)pd;
↓ open down ↓ |
7 lines elided |
↑ open up ↑ |
1135 1122
1136 1123 /* Register the memory region */
1137 1124 op.mro_bind_type = state->hs_cfg_profile->cp_iommu_bypass;
1138 1125 op.mro_bind_dmahdl = NULL;
1139 1126 op.mro_bind_override_addr = 0;
1140 1127 status = hermon_mr_register(state, pdhdl, mr_attr, &mrhdl,
1141 1128 &op, HERMON_MPT_DMPT);
1142 1129 if (status != DDI_SUCCESS) {
1143 1130 return (status);
1144 1131 }
1145 - _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*mrhdl))
1146 1132
1147 1133 /* Fill in the mr_desc structure */
1148 1134 mr_desc->md_vaddr = mrhdl->mr_bindinfo.bi_addr;
1149 1135 mr_desc->md_lkey = mrhdl->mr_lkey;
1150 1136 /* Only set RKey if remote access was requested */
1151 1137 if ((mr_attr->mr_flags & IBT_MR_ENABLE_REMOTE_ATOMIC) ||
1152 1138 (mr_attr->mr_flags & IBT_MR_ENABLE_REMOTE_WRITE) ||
1153 1139 (mr_attr->mr_flags & IBT_MR_ENABLE_REMOTE_READ)) {
1154 1140 mr_desc->md_rkey = mrhdl->mr_rkey;
1155 1141 }
1156 1142
1157 1143 /*
1158 1144 * If region is mapped for streaming (i.e. noncoherent), then set
1159 1145 * sync is required
1160 1146 */
1161 1147 mr_desc->md_sync_required = (mrhdl->mr_bindinfo.bi_flags &
1162 1148 IBT_MR_NONCOHERENT) ? B_TRUE : B_FALSE;
1163 1149
1164 1150 /* Return the Hermon MR handle */
1165 1151 *mr_p = (ibc_mr_hdl_t)mrhdl;
1166 1152
1167 1153 return (IBT_SUCCESS);
1168 1154 }
1169 1155
1170 1156
1171 1157 /*
1172 1158 * hermon_ci_register_buf()
1173 1159 * Prepare a Memory Region specified by buf structure for use by an HCA
1174 1160 * Context: Can be called from interrupt or base context.
1175 1161 */
1176 1162 /* ARGSUSED */
1177 1163 static ibt_status_t
1178 1164 hermon_ci_register_buf(ibc_hca_hdl_t hca, ibc_pd_hdl_t pd,
↓ open down ↓ |
23 lines elided |
↑ open up ↑ |
1179 1165 ibt_smr_attr_t *attrp, struct buf *buf, void *ibtl_reserved,
1180 1166 ibt_mr_hdl_t *mr_p, ibt_mr_desc_t *mr_desc)
1181 1167 {
1182 1168 hermon_mr_options_t op;
1183 1169 hermon_state_t *state;
1184 1170 hermon_pdhdl_t pdhdl;
1185 1171 hermon_mrhdl_t mrhdl;
1186 1172 int status;
1187 1173 ibt_mr_flags_t flags = attrp->mr_flags;
1188 1174
1189 - _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*mr_desc))
1190 -
1191 1175 ASSERT(mr_p != NULL);
1192 1176 ASSERT(mr_desc != NULL);
1193 1177
1194 1178 /*
1195 1179 * Validate the access flags. Both Remote Write and Remote Atomic
1196 1180 * require the Local Write flag to be set
1197 1181 */
1198 1182 if (((flags & IBT_MR_ENABLE_REMOTE_WRITE) ||
1199 1183 (flags & IBT_MR_ENABLE_REMOTE_ATOMIC)) &&
1200 1184 !(flags & IBT_MR_ENABLE_LOCAL_WRITE)) {
1201 1185 return (IBT_MR_ACCESS_REQ_INVALID);
1202 1186 }
1203 1187
1204 1188 /* Grab the Hermon softstate pointer and PD handle */
1205 1189 state = (hermon_state_t *)hca;
1206 1190 pdhdl = (hermon_pdhdl_t)pd;
↓ open down ↓ |
6 lines elided |
↑ open up ↑ |
1207 1191
1208 1192 /* Register the memory region */
1209 1193 op.mro_bind_type = state->hs_cfg_profile->cp_iommu_bypass;
1210 1194 op.mro_bind_dmahdl = NULL;
1211 1195 op.mro_bind_override_addr = 0;
1212 1196 status = hermon_mr_register_buf(state, pdhdl, attrp, buf,
1213 1197 &mrhdl, &op, HERMON_MPT_DMPT);
1214 1198 if (status != DDI_SUCCESS) {
1215 1199 return (status);
1216 1200 }
1217 - _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*mrhdl))
1218 1201
1219 1202 /* Fill in the mr_desc structure */
1220 1203 mr_desc->md_vaddr = mrhdl->mr_bindinfo.bi_addr;
1221 1204 mr_desc->md_lkey = mrhdl->mr_lkey;
1222 1205 /* Only set RKey if remote access was requested */
1223 1206 if ((flags & IBT_MR_ENABLE_REMOTE_ATOMIC) ||
1224 1207 (flags & IBT_MR_ENABLE_REMOTE_WRITE) ||
1225 1208 (flags & IBT_MR_ENABLE_REMOTE_READ)) {
1226 1209 mr_desc->md_rkey = mrhdl->mr_rkey;
1227 1210 }
1228 1211
1229 1212 /*
1230 1213 * If region is mapped for streaming (i.e. noncoherent), then set
1231 1214 * sync is required
1232 1215 */
1233 1216 mr_desc->md_sync_required = (mrhdl->mr_bindinfo.bi_flags &
1234 1217 IBT_MR_NONCOHERENT) ? B_TRUE : B_FALSE;
1235 1218
1236 1219 /* Return the Hermon MR handle */
1237 1220 *mr_p = (ibc_mr_hdl_t)mrhdl;
1238 1221
1239 1222 return (IBT_SUCCESS);
1240 1223 }
1241 1224
1242 1225
1243 1226 /*
1244 1227 * hermon_ci_deregister_mr()
1245 1228 * Deregister a Memory Region from an HCA translation table
1246 1229 * Context: Can be called only from user or kernel context.
1247 1230 */
1248 1231 static ibt_status_t
1249 1232 hermon_ci_deregister_mr(ibc_hca_hdl_t hca, ibc_mr_hdl_t mr)
1250 1233 {
1251 1234 hermon_state_t *state;
1252 1235 hermon_mrhdl_t mrhdl;
1253 1236 int status;
1254 1237
1255 1238 /* Grab the Hermon softstate pointer */
1256 1239 state = (hermon_state_t *)hca;
1257 1240 mrhdl = (hermon_mrhdl_t)mr;
1258 1241
1259 1242 /*
1260 1243 * Deregister the memory region.
1261 1244 */
1262 1245 status = hermon_mr_deregister(state, &mrhdl, HERMON_MR_DEREG_ALL,
1263 1246 HERMON_NOSLEEP);
1264 1247 return (status);
1265 1248 }
1266 1249
1267 1250
1268 1251 /*
1269 1252 * hermon_ci_query_mr()
1270 1253 * Retrieve information about a specified Memory Region
1271 1254 * Context: Can be called from interrupt or base context.
1272 1255 */
1273 1256 static ibt_status_t
1274 1257 hermon_ci_query_mr(ibc_hca_hdl_t hca, ibc_mr_hdl_t mr,
1275 1258 ibt_mr_query_attr_t *mr_attr)
1276 1259 {
1277 1260 hermon_state_t *state;
1278 1261 hermon_mrhdl_t mrhdl;
1279 1262 int status;
1280 1263
1281 1264 ASSERT(mr_attr != NULL);
1282 1265
1283 1266 /* Grab the Hermon softstate pointer and MR handle */
1284 1267 state = (hermon_state_t *)hca;
1285 1268 mrhdl = (hermon_mrhdl_t)mr;
1286 1269
1287 1270 /* Query the memory region */
1288 1271 status = hermon_mr_query(state, mrhdl, mr_attr);
1289 1272 return (status);
1290 1273 }
1291 1274
1292 1275
1293 1276 /*
1294 1277 * hermon_ci_register_shared_mr()
1295 1278 * Create a shared memory region matching an existing Memory Region
1296 1279 * Context: Can be called from interrupt or base context.
1297 1280 */
1298 1281 /* ARGSUSED */
↓ open down ↓ |
71 lines elided |
↑ open up ↑ |
1299 1282 static ibt_status_t
1300 1283 hermon_ci_register_shared_mr(ibc_hca_hdl_t hca, ibc_mr_hdl_t mr,
1301 1284 ibc_pd_hdl_t pd, ibt_smr_attr_t *mr_attr, void *ibtl_reserved,
1302 1285 ibc_mr_hdl_t *mr_p, ibt_mr_desc_t *mr_desc)
1303 1286 {
1304 1287 hermon_state_t *state;
1305 1288 hermon_pdhdl_t pdhdl;
1306 1289 hermon_mrhdl_t mrhdl, mrhdl_new;
1307 1290 int status;
1308 1291
1309 - _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*mr_desc))
1310 -
1311 1292 ASSERT(mr_attr != NULL);
1312 1293 ASSERT(mr_p != NULL);
1313 1294 ASSERT(mr_desc != NULL);
1314 1295
1315 1296 /*
1316 1297 * Validate the access flags. Both Remote Write and Remote Atomic
1317 1298 * require the Local Write flag to be set
1318 1299 */
1319 1300 if (((mr_attr->mr_flags & IBT_MR_ENABLE_REMOTE_WRITE) ||
1320 1301 (mr_attr->mr_flags & IBT_MR_ENABLE_REMOTE_ATOMIC)) &&
1321 1302 !(mr_attr->mr_flags & IBT_MR_ENABLE_LOCAL_WRITE)) {
1322 1303 return (IBT_MR_ACCESS_REQ_INVALID);
1323 1304 }
1324 1305
1325 1306 /* Grab the Hermon softstate pointer and handles */
↓ open down ↓ |
5 lines elided |
↑ open up ↑ |
1326 1307 state = (hermon_state_t *)hca;
1327 1308 pdhdl = (hermon_pdhdl_t)pd;
1328 1309 mrhdl = (hermon_mrhdl_t)mr;
1329 1310
1330 1311 /* Register the shared memory region */
1331 1312 status = hermon_mr_register_shared(state, mrhdl, pdhdl, mr_attr,
1332 1313 &mrhdl_new);
1333 1314 if (status != DDI_SUCCESS) {
1334 1315 return (status);
1335 1316 }
1336 - _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*mrhdl_new))
1337 1317
1338 1318 /* Fill in the mr_desc structure */
1339 1319 mr_desc->md_vaddr = mrhdl_new->mr_bindinfo.bi_addr;
1340 1320 mr_desc->md_lkey = mrhdl_new->mr_lkey;
1341 1321 /* Only set RKey if remote access was requested */
1342 1322 if ((mr_attr->mr_flags & IBT_MR_ENABLE_REMOTE_ATOMIC) ||
1343 1323 (mr_attr->mr_flags & IBT_MR_ENABLE_REMOTE_WRITE) ||
1344 1324 (mr_attr->mr_flags & IBT_MR_ENABLE_REMOTE_READ)) {
1345 1325 mr_desc->md_rkey = mrhdl_new->mr_rkey;
1346 1326 }
1347 1327
1348 1328 /*
1349 1329 * If shared region is mapped for streaming (i.e. noncoherent), then
1350 1330 * set sync is required
1351 1331 */
1352 1332 mr_desc->md_sync_required = (mrhdl_new->mr_bindinfo.bi_flags &
1353 1333 IBT_MR_NONCOHERENT) ? B_TRUE : B_FALSE;
1354 1334
1355 1335 /* Return the Hermon MR handle */
1356 1336 *mr_p = (ibc_mr_hdl_t)mrhdl_new;
1357 1337
1358 1338 return (IBT_SUCCESS);
1359 1339 }
1360 1340
1361 1341
1362 1342 /*
1363 1343 * hermon_ci_reregister_mr()
1364 1344 * Modify the attributes of an existing Memory Region
1365 1345 * Context: Can be called from interrupt or base context.
1366 1346 */
1367 1347 /* ARGSUSED */
1368 1348 static ibt_status_t
↓ open down ↓ |
22 lines elided |
↑ open up ↑ |
1369 1349 hermon_ci_reregister_mr(ibc_hca_hdl_t hca, ibc_mr_hdl_t mr, ibc_pd_hdl_t pd,
1370 1350 ibt_mr_attr_t *mr_attr, void *ibtl_reserved, ibc_mr_hdl_t *mr_new,
1371 1351 ibt_mr_desc_t *mr_desc)
1372 1352 {
1373 1353 hermon_mr_options_t op;
1374 1354 hermon_state_t *state;
1375 1355 hermon_pdhdl_t pdhdl;
1376 1356 hermon_mrhdl_t mrhdl, mrhdl_new;
1377 1357 int status;
1378 1358
1379 - _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*mr_desc))
1380 -
1381 1359 ASSERT(mr_attr != NULL);
1382 1360 ASSERT(mr_new != NULL);
1383 1361 ASSERT(mr_desc != NULL);
1384 1362
1385 1363 /* Grab the Hermon softstate pointer, mrhdl, and pdhdl */
1386 1364 state = (hermon_state_t *)hca;
1387 1365 mrhdl = (hermon_mrhdl_t)mr;
1388 1366 pdhdl = (hermon_pdhdl_t)pd;
1389 1367
1390 1368 /* Reregister the memory region */
1391 1369 op.mro_bind_type = state->hs_cfg_profile->cp_iommu_bypass;
1392 1370 status = hermon_mr_reregister(state, mrhdl, pdhdl, mr_attr,
1393 1371 &mrhdl_new, &op);
1394 1372 if (status != DDI_SUCCESS) {
1395 1373 return (status);
1396 1374 }
1397 - _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*mrhdl_new))
1398 1375
1399 1376 /* Fill in the mr_desc structure */
1400 1377 mr_desc->md_vaddr = mrhdl_new->mr_bindinfo.bi_addr;
1401 1378 mr_desc->md_lkey = mrhdl_new->mr_lkey;
1402 1379 /* Only set RKey if remote access was requested */
1403 1380 if ((mr_attr->mr_flags & IBT_MR_ENABLE_REMOTE_ATOMIC) ||
1404 1381 (mr_attr->mr_flags & IBT_MR_ENABLE_REMOTE_WRITE) ||
1405 1382 (mr_attr->mr_flags & IBT_MR_ENABLE_REMOTE_READ)) {
1406 1383 mr_desc->md_rkey = mrhdl_new->mr_rkey;
1407 1384 }
1408 1385
1409 1386 /*
1410 1387 * If region is mapped for streaming (i.e. noncoherent), then set
1411 1388 * sync is required
1412 1389 */
1413 1390 mr_desc->md_sync_required = (mrhdl_new->mr_bindinfo.bi_flags &
1414 1391 IBT_MR_NONCOHERENT) ? B_TRUE : B_FALSE;
1415 1392
1416 1393 /* Return the Hermon MR handle */
1417 1394 *mr_new = (ibc_mr_hdl_t)mrhdl_new;
1418 1395
1419 1396 return (IBT_SUCCESS);
1420 1397 }
1421 1398
1422 1399
1423 1400 /*
1424 1401 * hermon_ci_reregister_buf()
1425 1402 * Modify the attributes of an existing Memory Region
1426 1403 * Context: Can be called from interrupt or base context.
1427 1404 */
1428 1405 /* ARGSUSED */
1429 1406 static ibt_status_t
1430 1407 hermon_ci_reregister_buf(ibc_hca_hdl_t hca, ibc_mr_hdl_t mr, ibc_pd_hdl_t pd,
↓ open down ↓ |
23 lines elided |
↑ open up ↑ |
1431 1408 ibt_smr_attr_t *attrp, struct buf *buf, void *ibtl_reserved,
1432 1409 ibc_mr_hdl_t *mr_new, ibt_mr_desc_t *mr_desc)
1433 1410 {
1434 1411 hermon_mr_options_t op;
1435 1412 hermon_state_t *state;
1436 1413 hermon_pdhdl_t pdhdl;
1437 1414 hermon_mrhdl_t mrhdl, mrhdl_new;
1438 1415 int status;
1439 1416 ibt_mr_flags_t flags = attrp->mr_flags;
1440 1417
1441 - _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*mr_desc))
1442 -
1443 1418 ASSERT(mr_new != NULL);
1444 1419 ASSERT(mr_desc != NULL);
1445 1420
1446 1421 /* Grab the Hermon softstate pointer, mrhdl, and pdhdl */
1447 1422 state = (hermon_state_t *)hca;
1448 1423 mrhdl = (hermon_mrhdl_t)mr;
1449 1424 pdhdl = (hermon_pdhdl_t)pd;
1450 1425
1451 1426 /* Reregister the memory region */
1452 1427 op.mro_bind_type = state->hs_cfg_profile->cp_iommu_bypass;
1453 1428 status = hermon_mr_reregister_buf(state, mrhdl, pdhdl, attrp, buf,
1454 1429 &mrhdl_new, &op);
1455 1430 if (status != DDI_SUCCESS) {
1456 1431 return (status);
1457 1432 }
1458 - _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*mrhdl_new))
1459 1433
1460 1434 /* Fill in the mr_desc structure */
1461 1435 mr_desc->md_vaddr = mrhdl_new->mr_bindinfo.bi_addr;
1462 1436 mr_desc->md_lkey = mrhdl_new->mr_lkey;
1463 1437 /* Only set RKey if remote access was requested */
1464 1438 if ((flags & IBT_MR_ENABLE_REMOTE_ATOMIC) ||
1465 1439 (flags & IBT_MR_ENABLE_REMOTE_WRITE) ||
1466 1440 (flags & IBT_MR_ENABLE_REMOTE_READ)) {
1467 1441 mr_desc->md_rkey = mrhdl_new->mr_rkey;
1468 1442 }
1469 1443
1470 1444 /*
1471 1445 * If region is mapped for streaming (i.e. noncoherent), then set
1472 1446 * sync is required
1473 1447 */
1474 1448 mr_desc->md_sync_required = (mrhdl_new->mr_bindinfo.bi_flags &
1475 1449 IBT_MR_NONCOHERENT) ? B_TRUE : B_FALSE;
1476 1450
1477 1451 /* Return the Hermon MR handle */
1478 1452 *mr_new = (ibc_mr_hdl_t)mrhdl_new;
1479 1453
1480 1454 return (IBT_SUCCESS);
1481 1455 }
1482 1456
1483 1457 /*
1484 1458 * hermon_ci_sync_mr()
1485 1459 * Synchronize access to a Memory Region
1486 1460 * Context: Can be called from interrupt or base context.
1487 1461 */
1488 1462 static ibt_status_t
1489 1463 hermon_ci_sync_mr(ibc_hca_hdl_t hca, ibt_mr_sync_t *mr_segs, size_t num_segs)
1490 1464 {
1491 1465 hermon_state_t *state;
1492 1466 int status;
1493 1467
1494 1468 ASSERT(mr_segs != NULL);
1495 1469
1496 1470 /* Grab the Hermon softstate pointer */
1497 1471 state = (hermon_state_t *)hca;
1498 1472
1499 1473 /* Sync the memory region */
1500 1474 status = hermon_mr_sync(state, mr_segs, num_segs);
1501 1475 return (status);
1502 1476 }
1503 1477
1504 1478
1505 1479 /*
1506 1480 * hermon_ci_alloc_mw()
1507 1481 * Allocate a Memory Window
1508 1482 * Context: Can be called from interrupt or base context.
1509 1483 */
1510 1484 static ibt_status_t
1511 1485 hermon_ci_alloc_mw(ibc_hca_hdl_t hca, ibc_pd_hdl_t pd, ibt_mw_flags_t flags,
1512 1486 ibc_mw_hdl_t *mw_p, ibt_rkey_t *rkey_p)
1513 1487 {
1514 1488 hermon_state_t *state;
1515 1489 hermon_pdhdl_t pdhdl;
1516 1490 hermon_mwhdl_t mwhdl;
1517 1491 int status;
1518 1492
1519 1493 ASSERT(mw_p != NULL);
1520 1494 ASSERT(rkey_p != NULL);
↓ open down ↓ |
52 lines elided |
↑ open up ↑ |
1521 1495
1522 1496 /* Grab the Hermon softstate pointer and PD handle */
1523 1497 state = (hermon_state_t *)hca;
1524 1498 pdhdl = (hermon_pdhdl_t)pd;
1525 1499
1526 1500 /* Allocate the memory window */
1527 1501 status = hermon_mw_alloc(state, pdhdl, flags, &mwhdl);
1528 1502 if (status != DDI_SUCCESS) {
1529 1503 return (status);
1530 1504 }
1531 - _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*mwhdl))
1532 1505
1533 1506 /* Return the MW handle and RKey */
1534 1507 *mw_p = (ibc_mw_hdl_t)mwhdl;
1535 1508 *rkey_p = mwhdl->mr_rkey;
1536 1509
1537 1510 return (IBT_SUCCESS);
1538 1511 }
1539 1512
1540 1513
1541 1514 /*
1542 1515 * hermon_ci_free_mw()
1543 1516 * Free a Memory Window
1544 1517 * Context: Can be called from interrupt or base context.
1545 1518 */
1546 1519 static ibt_status_t
1547 1520 hermon_ci_free_mw(ibc_hca_hdl_t hca, ibc_mw_hdl_t mw)
1548 1521 {
1549 1522 hermon_state_t *state;
1550 1523 hermon_mwhdl_t mwhdl;
1551 1524 int status;
1552 1525
1553 1526 /* Grab the Hermon softstate pointer and MW handle */
1554 1527 state = (hermon_state_t *)hca;
1555 1528 mwhdl = (hermon_mwhdl_t)mw;
1556 1529
1557 1530 /* Free the memory window */
1558 1531 status = hermon_mw_free(state, &mwhdl, HERMON_NOSLEEP);
1559 1532 return (status);
1560 1533 }
1561 1534
1562 1535
1563 1536 /*
1564 1537 * hermon_ci_query_mw()
1565 1538 * Return the attributes of the specified Memory Window
1566 1539 * Context: Can be called from interrupt or base context.
1567 1540 */
1568 1541 /* ARGSUSED */
1569 1542 static ibt_status_t
1570 1543 hermon_ci_query_mw(ibc_hca_hdl_t hca, ibc_mw_hdl_t mw,
1571 1544 ibt_mw_query_attr_t *mw_attr_p)
1572 1545 {
1573 1546 hermon_mwhdl_t mwhdl;
1574 1547
1575 1548 ASSERT(mw_attr_p != NULL);
1576 1549
1577 1550 /* Query the memory window pointer and fill in the return values */
1578 1551 mwhdl = (hermon_mwhdl_t)mw;
1579 1552 mutex_enter(&mwhdl->mr_lock);
1580 1553 mw_attr_p->mw_pd = (ibc_pd_hdl_t)mwhdl->mr_pdhdl;
1581 1554 mw_attr_p->mw_rkey = mwhdl->mr_rkey;
1582 1555 mutex_exit(&mwhdl->mr_lock);
1583 1556
1584 1557 return (IBT_SUCCESS);
1585 1558 }
1586 1559
1587 1560
1588 1561 /*
1589 1562 * hermon_ci_register_dma_mr()
1590 1563 * Allocate a memory region that maps physical addresses.
1591 1564 * Context: Can be called only from user or kernel context.
1592 1565 */
1593 1566 /* ARGSUSED */
↓ open down ↓ |
52 lines elided |
↑ open up ↑ |
1594 1567 static ibt_status_t
1595 1568 hermon_ci_register_dma_mr(ibc_hca_hdl_t hca, ibc_pd_hdl_t pd,
1596 1569 ibt_dmr_attr_t *mr_attr, void *ibtl_reserved, ibc_mr_hdl_t *mr_p,
1597 1570 ibt_mr_desc_t *mr_desc)
1598 1571 {
1599 1572 hermon_state_t *state;
1600 1573 hermon_pdhdl_t pdhdl;
1601 1574 hermon_mrhdl_t mrhdl;
1602 1575 int status;
1603 1576
1604 - _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*mr_desc))
1605 -
1606 1577 ASSERT(mr_attr != NULL);
1607 1578 ASSERT(mr_p != NULL);
1608 1579 ASSERT(mr_desc != NULL);
1609 1580
1610 1581 /*
1611 1582 * Validate the access flags. Both Remote Write and Remote Atomic
1612 1583 * require the Local Write flag to be set
1613 1584 */
1614 1585 if (((mr_attr->dmr_flags & IBT_MR_ENABLE_REMOTE_WRITE) ||
1615 1586 (mr_attr->dmr_flags & IBT_MR_ENABLE_REMOTE_ATOMIC)) &&
1616 1587 !(mr_attr->dmr_flags & IBT_MR_ENABLE_LOCAL_WRITE)) {
1617 1588 return (IBT_MR_ACCESS_REQ_INVALID);
↓ open down ↓ |
2 lines elided |
↑ open up ↑ |
1618 1589 }
1619 1590
1620 1591 /* Grab the Hermon softstate pointer and PD handle */
1621 1592 state = (hermon_state_t *)hca;
1622 1593 pdhdl = (hermon_pdhdl_t)pd;
1623 1594
1624 1595 status = hermon_dma_mr_register(state, pdhdl, mr_attr, &mrhdl);
1625 1596 if (status != DDI_SUCCESS) {
1626 1597 return (status);
1627 1598 }
1628 - _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*mrhdl))
1629 1599
1630 1600 /* Fill in the mr_desc structure */
1631 1601 mr_desc->md_vaddr = mr_attr->dmr_paddr;
1632 1602 mr_desc->md_lkey = mrhdl->mr_lkey;
1633 1603 /* Only set RKey if remote access was requested */
1634 1604 if ((mr_attr->dmr_flags & IBT_MR_ENABLE_REMOTE_ATOMIC) ||
1635 1605 (mr_attr->dmr_flags & IBT_MR_ENABLE_REMOTE_WRITE) ||
1636 1606 (mr_attr->dmr_flags & IBT_MR_ENABLE_REMOTE_READ)) {
1637 1607 mr_desc->md_rkey = mrhdl->mr_rkey;
1638 1608 }
1639 1609
1640 1610 /*
1641 1611 * If region is mapped for streaming (i.e. noncoherent), then set
1642 1612 * sync is required
1643 1613 */
1644 1614 mr_desc->md_sync_required = B_FALSE;
1645 1615
1646 1616 /* Return the Hermon MR handle */
1647 1617 *mr_p = (ibc_mr_hdl_t)mrhdl;
1648 1618
1649 1619 return (IBT_SUCCESS);
1650 1620 }
1651 1621
1652 1622
1653 1623 /*
1654 1624 * hermon_ci_attach_mcg()
1655 1625 * Attach a Queue Pair to a Multicast Group
1656 1626 * Context: Can be called only from user or kernel context.
1657 1627 */
1658 1628 static ibt_status_t
1659 1629 hermon_ci_attach_mcg(ibc_hca_hdl_t hca, ibc_qp_hdl_t qp, ib_gid_t gid,
1660 1630 ib_lid_t lid)
1661 1631 {
1662 1632 hermon_state_t *state;
1663 1633 hermon_qphdl_t qphdl;
1664 1634 int status;
1665 1635
1666 1636 /* Grab the Hermon softstate pointer and QP handles */
1667 1637 state = (hermon_state_t *)hca;
1668 1638 qphdl = (hermon_qphdl_t)qp;
1669 1639
1670 1640 /* Attach the QP to the multicast group */
1671 1641 status = hermon_mcg_attach(state, qphdl, gid, lid);
1672 1642 return (status);
1673 1643 }
1674 1644
1675 1645
1676 1646 /*
1677 1647 * hermon_ci_detach_mcg()
1678 1648 * Detach a Queue Pair to a Multicast Group
1679 1649 * Context: Can be called only from user or kernel context.
1680 1650 */
1681 1651 static ibt_status_t
1682 1652 hermon_ci_detach_mcg(ibc_hca_hdl_t hca, ibc_qp_hdl_t qp, ib_gid_t gid,
1683 1653 ib_lid_t lid)
1684 1654 {
1685 1655 hermon_state_t *state;
1686 1656 hermon_qphdl_t qphdl;
1687 1657 int status;
1688 1658
1689 1659 /* Grab the Hermon softstate pointer and QP handle */
1690 1660 state = (hermon_state_t *)hca;
1691 1661 qphdl = (hermon_qphdl_t)qp;
1692 1662
1693 1663 /* Detach the QP from the multicast group */
1694 1664 status = hermon_mcg_detach(state, qphdl, gid, lid);
1695 1665 return (status);
1696 1666 }
1697 1667
1698 1668
1699 1669 /*
1700 1670 * hermon_ci_post_send()
1701 1671 * Post send work requests to the send queue on the specified QP
1702 1672 * Context: Can be called from interrupt or base context.
1703 1673 */
1704 1674 static ibt_status_t
1705 1675 hermon_ci_post_send(ibc_hca_hdl_t hca, ibc_qp_hdl_t qp, ibt_send_wr_t *wr_p,
1706 1676 uint_t num_wr, uint_t *num_posted_p)
1707 1677 {
1708 1678 hermon_state_t *state;
1709 1679 hermon_qphdl_t qphdl;
1710 1680 int status;
1711 1681
1712 1682 ASSERT(wr_p != NULL);
1713 1683 ASSERT(num_wr != 0);
1714 1684
1715 1685 /* Grab the Hermon softstate pointer and QP handle */
1716 1686 state = (hermon_state_t *)hca;
1717 1687 qphdl = (hermon_qphdl_t)qp;
1718 1688
1719 1689 /* Post the send WQEs */
1720 1690 status = hermon_post_send(state, qphdl, wr_p, num_wr, num_posted_p);
1721 1691 return (status);
1722 1692 }
1723 1693
1724 1694
1725 1695 /*
1726 1696 * hermon_ci_post_recv()
1727 1697 * Post receive work requests to the receive queue on the specified QP
1728 1698 * Context: Can be called from interrupt or base context.
1729 1699 */
1730 1700 static ibt_status_t
1731 1701 hermon_ci_post_recv(ibc_hca_hdl_t hca, ibc_qp_hdl_t qp, ibt_recv_wr_t *wr_p,
1732 1702 uint_t num_wr, uint_t *num_posted_p)
1733 1703 {
1734 1704 hermon_state_t *state;
1735 1705 hermon_qphdl_t qphdl;
1736 1706 int status;
1737 1707
1738 1708 ASSERT(wr_p != NULL);
1739 1709 ASSERT(num_wr != 0);
1740 1710
1741 1711 state = (hermon_state_t *)hca;
1742 1712 qphdl = (hermon_qphdl_t)qp;
1743 1713
1744 1714 /* Post the receive WQEs */
1745 1715 status = hermon_post_recv(state, qphdl, wr_p, num_wr, num_posted_p);
1746 1716 return (status);
1747 1717 }
1748 1718
1749 1719
1750 1720 /*
1751 1721 * hermon_ci_poll_cq()
1752 1722 * Poll for a work request completion
1753 1723 * Context: Can be called from interrupt or base context.
1754 1724 */
1755 1725 static ibt_status_t
1756 1726 hermon_ci_poll_cq(ibc_hca_hdl_t hca, ibc_cq_hdl_t cq, ibt_wc_t *wc_p,
1757 1727 uint_t num_wc, uint_t *num_polled)
1758 1728 {
1759 1729 hermon_state_t *state;
1760 1730 hermon_cqhdl_t cqhdl;
1761 1731 int status;
1762 1732
1763 1733 ASSERT(wc_p != NULL);
1764 1734
1765 1735 /* Check for valid num_wc field */
1766 1736 if (num_wc == 0) {
1767 1737 return (IBT_INVALID_PARAM);
1768 1738 }
1769 1739
1770 1740 /* Grab the Hermon softstate pointer and CQ handle */
1771 1741 state = (hermon_state_t *)hca;
1772 1742 cqhdl = (hermon_cqhdl_t)cq;
1773 1743
1774 1744 /* Poll for work request completions */
1775 1745 status = hermon_cq_poll(state, cqhdl, wc_p, num_wc, num_polled);
1776 1746 return (status);
1777 1747 }
1778 1748
1779 1749
1780 1750 /*
1781 1751 * hermon_ci_notify_cq()
1782 1752 * Enable notification events on the specified CQ
1783 1753 * Context: Can be called from interrupt or base context.
1784 1754 */
1785 1755 static ibt_status_t
1786 1756 hermon_ci_notify_cq(ibc_hca_hdl_t hca, ibc_cq_hdl_t cq_hdl,
1787 1757 ibt_cq_notify_flags_t flags)
1788 1758 {
1789 1759 hermon_state_t *state;
1790 1760 hermon_cqhdl_t cqhdl;
1791 1761 int status;
1792 1762
1793 1763 /* Grab the Hermon softstate pointer and CQ handle */
1794 1764 state = (hermon_state_t *)hca;
1795 1765 cqhdl = (hermon_cqhdl_t)cq_hdl;
1796 1766
1797 1767 /* Enable the CQ notification */
1798 1768 status = hermon_cq_notify(state, cqhdl, flags);
1799 1769 return (status);
1800 1770 }
1801 1771
1802 1772 /*
1803 1773 * hermon_ci_ci_data_in()
1804 1774 * Exchange CI-specific data.
1805 1775 * Context: Can be called only from user or kernel context.
1806 1776 */
1807 1777 static ibt_status_t
1808 1778 hermon_ci_ci_data_in(ibc_hca_hdl_t hca, ibt_ci_data_flags_t flags,
1809 1779 ibt_object_type_t object, void *ibc_object_handle, void *data_p,
1810 1780 size_t data_sz)
1811 1781 {
1812 1782 hermon_state_t *state;
1813 1783 int status;
1814 1784
1815 1785 /* Grab the Hermon softstate pointer */
1816 1786 state = (hermon_state_t *)hca;
1817 1787
1818 1788 /* Get the Hermon userland mapping information */
1819 1789 status = hermon_umap_ci_data_in(state, flags, object,
1820 1790 ibc_object_handle, data_p, data_sz);
1821 1791 return (status);
1822 1792 }
1823 1793
1824 1794 /*
1825 1795 * hermon_ci_ci_data_out()
1826 1796 * Exchange CI-specific data.
1827 1797 * Context: Can be called only from user or kernel context.
1828 1798 */
1829 1799 static ibt_status_t
1830 1800 hermon_ci_ci_data_out(ibc_hca_hdl_t hca, ibt_ci_data_flags_t flags,
1831 1801 ibt_object_type_t object, void *ibc_object_handle, void *data_p,
1832 1802 size_t data_sz)
1833 1803 {
1834 1804 hermon_state_t *state;
1835 1805 int status;
1836 1806
1837 1807 /* Grab the Hermon softstate pointer */
1838 1808 state = (hermon_state_t *)hca;
1839 1809
1840 1810 /* Get the Hermon userland mapping information */
1841 1811 status = hermon_umap_ci_data_out(state, flags, object,
1842 1812 ibc_object_handle, data_p, data_sz);
1843 1813 return (status);
1844 1814 }
1845 1815
1846 1816
1847 1817 /*
1848 1818 * hermon_ci_alloc_srq()
1849 1819 * Allocate a Shared Receive Queue (SRQ)
1850 1820 * Context: Can be called only from user or kernel context
1851 1821 */
1852 1822 static ibt_status_t
1853 1823 hermon_ci_alloc_srq(ibc_hca_hdl_t hca, ibt_srq_flags_t flags,
1854 1824 ibt_srq_hdl_t ibt_srq, ibc_pd_hdl_t pd, ibt_srq_sizes_t *sizes,
1855 1825 ibc_srq_hdl_t *ibc_srq_p, ibt_srq_sizes_t *ret_sizes_p)
1856 1826 {
1857 1827 hermon_state_t *state;
1858 1828 hermon_pdhdl_t pdhdl;
1859 1829 hermon_srqhdl_t srqhdl;
1860 1830 hermon_srq_info_t srqinfo;
1861 1831 int status;
1862 1832
1863 1833 state = (hermon_state_t *)hca;
1864 1834 pdhdl = (hermon_pdhdl_t)pd;
1865 1835
1866 1836 srqinfo.srqi_ibt_srqhdl = ibt_srq;
1867 1837 srqinfo.srqi_pd = pdhdl;
1868 1838 srqinfo.srqi_sizes = sizes;
1869 1839 srqinfo.srqi_real_sizes = ret_sizes_p;
1870 1840 srqinfo.srqi_srqhdl = &srqhdl;
1871 1841 srqinfo.srqi_flags = flags;
1872 1842
1873 1843 status = hermon_srq_alloc(state, &srqinfo, HERMON_NOSLEEP);
1874 1844 if (status != DDI_SUCCESS) {
1875 1845 return (status);
1876 1846 }
1877 1847
1878 1848 *ibc_srq_p = (ibc_srq_hdl_t)srqhdl;
1879 1849
1880 1850 return (IBT_SUCCESS);
1881 1851 }
1882 1852
1883 1853 /*
1884 1854 * hermon_ci_free_srq()
1885 1855 * Free a Shared Receive Queue (SRQ)
1886 1856 * Context: Can be called only from user or kernel context
1887 1857 */
1888 1858 static ibt_status_t
1889 1859 hermon_ci_free_srq(ibc_hca_hdl_t hca, ibc_srq_hdl_t srq)
1890 1860 {
1891 1861 hermon_state_t *state;
1892 1862 hermon_srqhdl_t srqhdl;
1893 1863 int status;
1894 1864
1895 1865 state = (hermon_state_t *)hca;
1896 1866
1897 1867 /* Check for valid SRQ handle pointer */
1898 1868 if (srq == NULL) {
1899 1869 return (IBT_SRQ_HDL_INVALID);
1900 1870 }
1901 1871
1902 1872 srqhdl = (hermon_srqhdl_t)srq;
1903 1873
1904 1874 /* Free the SRQ */
1905 1875 status = hermon_srq_free(state, &srqhdl, HERMON_NOSLEEP);
1906 1876 return (status);
1907 1877 }
1908 1878
1909 1879 /*
1910 1880 * hermon_ci_query_srq()
1911 1881 * Query properties of a Shared Receive Queue (SRQ)
1912 1882 * Context: Can be called from interrupt or base context.
1913 1883 */
1914 1884 /* ARGSUSED */
1915 1885 static ibt_status_t
1916 1886 hermon_ci_query_srq(ibc_hca_hdl_t hca, ibc_srq_hdl_t srq, ibc_pd_hdl_t *pd_p,
1917 1887 ibt_srq_sizes_t *sizes_p, uint_t *limit_p)
1918 1888 {
1919 1889 hermon_srqhdl_t srqhdl;
1920 1890
1921 1891 srqhdl = (hermon_srqhdl_t)srq;
1922 1892
1923 1893 mutex_enter(&srqhdl->srq_lock);
1924 1894 if (srqhdl->srq_state == HERMON_SRQ_STATE_ERROR) {
1925 1895 mutex_exit(&srqhdl->srq_lock);
1926 1896 return (IBT_SRQ_ERROR_STATE);
1927 1897 }
1928 1898
1929 1899 *pd_p = (ibc_pd_hdl_t)srqhdl->srq_pdhdl;
1930 1900 sizes_p->srq_wr_sz = srqhdl->srq_real_sizes.srq_wr_sz - 1;
1931 1901 sizes_p->srq_sgl_sz = srqhdl->srq_real_sizes.srq_sgl_sz;
1932 1902 mutex_exit(&srqhdl->srq_lock);
1933 1903 *limit_p = 0;
1934 1904
1935 1905 return (IBT_SUCCESS);
1936 1906 }
1937 1907
1938 1908 /*
1939 1909 * hermon_ci_modify_srq()
1940 1910 * Modify properties of a Shared Receive Queue (SRQ)
1941 1911 * Context: Can be called from interrupt or base context.
1942 1912 */
1943 1913 /* ARGSUSED */
1944 1914 static ibt_status_t
1945 1915 hermon_ci_modify_srq(ibc_hca_hdl_t hca, ibc_srq_hdl_t srq,
1946 1916 ibt_srq_modify_flags_t flags, uint_t size, uint_t limit, uint_t *ret_size_p)
1947 1917 {
1948 1918 hermon_state_t *state;
1949 1919 hermon_srqhdl_t srqhdl;
1950 1920 uint_t resize_supported, cur_srq_size;
1951 1921 int status;
1952 1922
1953 1923 state = (hermon_state_t *)hca;
1954 1924 srqhdl = (hermon_srqhdl_t)srq;
1955 1925
1956 1926 /*
1957 1927 * Check Error State of SRQ.
1958 1928 * Also, while we are holding the lock we save away the current SRQ
1959 1929 * size for later use.
1960 1930 */
1961 1931 mutex_enter(&srqhdl->srq_lock);
1962 1932 cur_srq_size = srqhdl->srq_wq_bufsz;
1963 1933 if (srqhdl->srq_state == HERMON_SRQ_STATE_ERROR) {
1964 1934 mutex_exit(&srqhdl->srq_lock);
1965 1935 return (IBT_SRQ_ERROR_STATE);
1966 1936 }
1967 1937 mutex_exit(&srqhdl->srq_lock);
1968 1938
1969 1939 /*
1970 1940 * Setting the limit watermark is not currently supported. This is a
1971 1941 * hermon hardware (firmware) limitation. We return NOT_SUPPORTED here,
1972 1942 * and have the limit code commented out for now.
1973 1943 *
1974 1944 * XXX If we enable the limit watermark support, we need to do checks
1975 1945 * and set the 'srq->srq_wr_limit' here, instead of returning not
1976 1946 * supported. The 'hermon_srq_modify' operation below is for resizing
1977 1947 * the SRQ only, the limit work should be done here. If this is
1978 1948 * changed to use the 'limit' field, the 'ARGSUSED' comment for this
1979 1949 * function should also be removed at that time.
1980 1950 */
1981 1951 if (flags & IBT_SRQ_SET_LIMIT) {
1982 1952 return (IBT_NOT_SUPPORTED);
1983 1953 }
1984 1954
1985 1955 /*
1986 1956 * Check the SET_SIZE flag. If not set, we simply return success here.
1987 1957 * However if it is set, we check if resize is supported and only then
1988 1958 * do we continue on with our resize processing.
1989 1959 */
1990 1960 if (!(flags & IBT_SRQ_SET_SIZE)) {
1991 1961 return (IBT_SUCCESS);
1992 1962 }
1993 1963
1994 1964 resize_supported = state->hs_ibtfinfo.hca_attr->hca_flags &
1995 1965 IBT_HCA_RESIZE_SRQ;
1996 1966
1997 1967 if ((flags & IBT_SRQ_SET_SIZE) && !resize_supported) {
1998 1968 return (IBT_NOT_SUPPORTED);
1999 1969 }
2000 1970
2001 1971 /*
2002 1972 * We do not support resizing an SRQ to be smaller than it's current
2003 1973 * size. If a smaller (or equal) size is requested, then we simply
2004 1974 * return success, and do nothing.
2005 1975 */
2006 1976 if (size <= cur_srq_size) {
2007 1977 *ret_size_p = cur_srq_size;
2008 1978 return (IBT_SUCCESS);
2009 1979 }
2010 1980
2011 1981 status = hermon_srq_modify(state, srqhdl, size, ret_size_p,
2012 1982 HERMON_NOSLEEP);
2013 1983 if (status != DDI_SUCCESS) {
2014 1984 /* Set return value to current SRQ size */
2015 1985 *ret_size_p = cur_srq_size;
2016 1986 return (status);
2017 1987 }
2018 1988
2019 1989 return (IBT_SUCCESS);
2020 1990 }
2021 1991
2022 1992 /*
2023 1993 * hermon_ci_post_srq()
2024 1994 * Post a Work Request to the specified Shared Receive Queue (SRQ)
2025 1995 * Context: Can be called from interrupt or base context.
2026 1996 */
2027 1997 static ibt_status_t
2028 1998 hermon_ci_post_srq(ibc_hca_hdl_t hca, ibc_srq_hdl_t srq,
2029 1999 ibt_recv_wr_t *wr, uint_t num_wr, uint_t *num_posted_p)
2030 2000 {
2031 2001 hermon_state_t *state;
2032 2002 hermon_srqhdl_t srqhdl;
2033 2003 int status;
2034 2004
2035 2005 state = (hermon_state_t *)hca;
2036 2006 srqhdl = (hermon_srqhdl_t)srq;
2037 2007
2038 2008 status = hermon_post_srq(state, srqhdl, wr, num_wr, num_posted_p);
2039 2009 return (status);
2040 2010 }
2041 2011
2042 2012 /* Address translation */
2043 2013
2044 2014 struct ibc_ma_s {
2045 2015 int h_ma_addr_list_len;
2046 2016 void *h_ma_addr_list;
2047 2017 ddi_dma_handle_t h_ma_dmahdl;
2048 2018 ddi_dma_handle_t h_ma_list_hdl;
2049 2019 ddi_acc_handle_t h_ma_list_acc_hdl;
2050 2020 size_t h_ma_real_len;
2051 2021 caddr_t h_ma_kaddr;
2052 2022 ibt_phys_addr_t h_ma_list_cookie;
2053 2023 };
2054 2024
2055 2025 static ibt_status_t
2056 2026 hermon_map_mem_area_fmr(ibc_hca_hdl_t hca, ibt_va_attr_t *va_attrs,
2057 2027 uint_t list_len, ibt_pmr_attr_t *pmr, ibc_ma_hdl_t *ma_hdl_p)
2058 2028 {
2059 2029 int status;
2060 2030 ibt_status_t ibt_status;
2061 2031 ibc_ma_hdl_t ma_hdl;
2062 2032 ib_memlen_t len;
2063 2033 ddi_dma_attr_t dma_attr;
2064 2034 uint_t cookie_cnt;
2065 2035 ddi_dma_cookie_t dmacookie;
2066 2036 hermon_state_t *state;
2067 2037 uint64_t *kaddr;
2068 2038 uint64_t addr, endaddr, pagesize;
2069 2039 int i, kmflag;
2070 2040 int (*callback)(caddr_t);
2071 2041
2072 2042 if ((va_attrs->va_flags & IBT_VA_BUF) == 0) {
2073 2043 return (IBT_NOT_SUPPORTED); /* XXX - not yet implemented */
2074 2044 }
2075 2045
2076 2046 state = (hermon_state_t *)hca;
2077 2047 hermon_dma_attr_init(state, &dma_attr);
2078 2048 if (va_attrs->va_flags & IBT_VA_NOSLEEP) {
2079 2049 kmflag = KM_NOSLEEP;
2080 2050 callback = DDI_DMA_DONTWAIT;
2081 2051 } else {
2082 2052 kmflag = KM_SLEEP;
2083 2053 callback = DDI_DMA_SLEEP;
2084 2054 }
2085 2055
2086 2056 ma_hdl = kmem_zalloc(sizeof (*ma_hdl), kmflag);
2087 2057 if (ma_hdl == NULL) {
↓ open down ↓ |
449 lines elided |
↑ open up ↑ |
2088 2058 return (IBT_INSUFF_RESOURCE);
2089 2059 }
2090 2060 #ifdef __sparc
2091 2061 if (state->hs_cfg_profile->cp_iommu_bypass == HERMON_BINDMEM_BYPASS)
2092 2062 dma_attr.dma_attr_flags = DDI_DMA_FORCE_PHYSICAL;
2093 2063
2094 2064 if (hermon_kernel_data_ro == HERMON_RO_ENABLED)
2095 2065 dma_attr.dma_attr_flags |= DDI_DMA_RELAXED_ORDERING;
2096 2066 #endif
2097 2067
2098 - _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*ma_hdl))
2099 2068 status = ddi_dma_alloc_handle(state->hs_dip, &dma_attr,
2100 2069 callback, NULL, &ma_hdl->h_ma_dmahdl);
2101 2070 if (status != DDI_SUCCESS) {
2102 2071 kmem_free(ma_hdl, sizeof (*ma_hdl));
2103 2072 return (IBT_INSUFF_RESOURCE);
2104 2073 }
2105 2074 status = ddi_dma_buf_bind_handle(ma_hdl->h_ma_dmahdl,
2106 2075 va_attrs->va_buf, DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
2107 2076 callback, NULL, &dmacookie, &cookie_cnt);
2108 2077 if (status != DDI_DMA_MAPPED) {
2109 2078 status = ibc_get_ci_failure(0);
2110 2079 goto marea_fail3;
2111 2080 }
2112 2081
2113 2082 ma_hdl->h_ma_real_len = list_len * sizeof (ibt_phys_addr_t);
2114 2083 ma_hdl->h_ma_kaddr = kmem_zalloc(ma_hdl->h_ma_real_len, kmflag);
2115 2084 if (ma_hdl->h_ma_kaddr == NULL) {
2116 2085 ibt_status = IBT_INSUFF_RESOURCE;
2117 2086 goto marea_fail4;
2118 2087 }
2119 2088
2120 2089 i = 0;
2121 2090 len = 0;
2122 2091 pagesize = PAGESIZE;
2123 2092 kaddr = (uint64_t *)(void *)ma_hdl->h_ma_kaddr;
2124 2093 while (cookie_cnt-- > 0) {
2125 2094 addr = dmacookie.dmac_laddress;
2126 2095 len += dmacookie.dmac_size;
2127 2096 endaddr = addr + (dmacookie.dmac_size - 1);
2128 2097 addr = addr & ~(pagesize - 1);
↓ open down ↓ |
20 lines elided |
↑ open up ↑ |
2129 2098 while (addr <= endaddr) {
2130 2099 if (i >= list_len) {
2131 2100 status = IBT_PBL_TOO_SMALL;
2132 2101 goto marea_fail5;
2133 2102 }
2134 2103 kaddr[i] = htonll(addr | HERMON_MTT_ENTRY_PRESENT);
2135 2104 i++;
2136 2105 addr += pagesize;
2137 2106 if (addr == 0) {
2138 2107 static int do_once = 1;
2139 - _NOTE(SCHEME_PROTECTS_DATA("safe sharing",
2140 - do_once))
2141 2108 if (do_once) {
2142 2109 do_once = 0;
2143 2110 cmn_err(CE_NOTE, "probable error in "
2144 2111 "dma_cookie address: map_mem_area");
2145 2112 }
2146 2113 break;
2147 2114 }
2148 2115 }
2149 2116 if (cookie_cnt != 0)
2150 2117 ddi_dma_nextcookie(ma_hdl->h_ma_dmahdl, &dmacookie);
2151 2118 }
2152 2119
2153 - _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*pmr))
2154 2120 pmr->pmr_addr_list = (ibt_phys_addr_t *)(void *)ma_hdl->h_ma_kaddr;
2155 2121 pmr->pmr_iova = va_attrs->va_vaddr;
2156 2122 pmr->pmr_len = len;
2157 2123 pmr->pmr_offset = va_attrs->va_vaddr & PAGEOFFSET;
2158 2124 pmr->pmr_buf_sz = PAGESHIFT; /* PRM says "Page Sice", but... */
2159 2125 pmr->pmr_num_buf = i;
2160 2126 pmr->pmr_ma = ma_hdl;
2161 2127
2162 2128 *ma_hdl_p = ma_hdl;
2163 2129 return (IBT_SUCCESS);
2164 2130
2165 2131 marea_fail5:
2166 2132 kmem_free(ma_hdl->h_ma_kaddr, ma_hdl->h_ma_real_len);
2167 2133 marea_fail4:
2168 2134 status = ddi_dma_unbind_handle(ma_hdl->h_ma_dmahdl);
2169 2135 marea_fail3:
2170 2136 ddi_dma_free_handle(&ma_hdl->h_ma_dmahdl);
2171 2137 kmem_free(ma_hdl, sizeof (*ma_hdl));
2172 2138 *ma_hdl_p = NULL;
2173 2139 return (ibt_status);
2174 2140 }
2175 2141
2176 2142 /*
2177 2143 * hermon_ci_map_mem_area()
2178 2144 * Context: Can be called from user or base context.
2179 2145 *
2180 2146 * Creates the memory mapping suitable for a subsequent posting of an
2181 2147 * FRWR work request. All the info about the memory area for the
2182 2148 * FRWR work request (wr member of "union ibt_reg_req_u") is filled
2183 2149 * such that the client only needs to point wr.rc.rcwr.reg_pmr to it,
2184 2150 * and then fill in the additional information only it knows.
2185 2151 *
2186 2152 * Alternatively, creates the memory mapping for FMR.
2187 2153 */
2188 2154 /* ARGSUSED */
2189 2155 static ibt_status_t
2190 2156 hermon_ci_map_mem_area(ibc_hca_hdl_t hca, ibt_va_attr_t *va_attrs,
2191 2157 void *ibtl_reserved, uint_t list_len, ibt_reg_req_t *reg_req,
2192 2158 ibc_ma_hdl_t *ma_hdl_p)
2193 2159 {
2194 2160 ibt_status_t ibt_status;
2195 2161 int status;
2196 2162 ibc_ma_hdl_t ma_hdl;
2197 2163 ibt_wr_reg_pmr_t *pmr;
2198 2164 ib_memlen_t len;
2199 2165 ddi_dma_attr_t dma_attr;
2200 2166 ddi_dma_handle_t khdl;
2201 2167 uint_t cookie_cnt;
2202 2168 ddi_dma_cookie_t dmacookie, kcookie;
2203 2169 hermon_state_t *state;
2204 2170 uint64_t *kaddr;
2205 2171 uint64_t addr, endaddr, pagesize, kcookie_paddr;
2206 2172 int i, j, kmflag;
2207 2173 int (*callback)(caddr_t);
2208 2174
2209 2175 if (va_attrs->va_flags & (IBT_VA_FMR | IBT_VA_REG_FN)) {
2210 2176 /* delegate FMR and Physical Register to other function */
2211 2177 return (hermon_map_mem_area_fmr(hca, va_attrs, list_len,
2212 2178 ®_req->fn_arg, ma_hdl_p));
2213 2179 }
2214 2180
2215 2181 /* FRWR */
2216 2182
2217 2183 state = (hermon_state_t *)hca;
2218 2184 if (!(state->hs_ibtfinfo.hca_attr->hca_flags2 & IBT_HCA2_MEM_MGT_EXT))
2219 2185 return (IBT_NOT_SUPPORTED);
2220 2186 hermon_dma_attr_init(state, &dma_attr);
2221 2187 #ifdef __sparc
2222 2188 if (state->hs_cfg_profile->cp_iommu_bypass == HERMON_BINDMEM_BYPASS)
2223 2189 dma_attr.dma_attr_flags = DDI_DMA_FORCE_PHYSICAL;
2224 2190
2225 2191 if (hermon_kernel_data_ro == HERMON_RO_ENABLED)
2226 2192 dma_attr.dma_attr_flags |= DDI_DMA_RELAXED_ORDERING;
2227 2193 #endif
2228 2194 if (va_attrs->va_flags & IBT_VA_NOSLEEP) {
2229 2195 kmflag = KM_NOSLEEP;
↓ open down ↓ |
66 lines elided |
↑ open up ↑ |
2230 2196 callback = DDI_DMA_DONTWAIT;
2231 2197 } else {
2232 2198 kmflag = KM_SLEEP;
2233 2199 callback = DDI_DMA_SLEEP;
2234 2200 }
2235 2201
2236 2202 ma_hdl = kmem_zalloc(sizeof (*ma_hdl), kmflag);
2237 2203 if (ma_hdl == NULL) {
2238 2204 return (IBT_INSUFF_RESOURCE);
2239 2205 }
2240 - _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*ma_hdl))
2241 2206
2242 2207 status = ddi_dma_alloc_handle(state->hs_dip, &dma_attr,
2243 2208 callback, NULL, &ma_hdl->h_ma_dmahdl);
2244 2209 if (status != DDI_SUCCESS) {
2245 2210 ibt_status = IBT_INSUFF_RESOURCE;
2246 2211 goto marea_fail0;
2247 2212 }
2248 2213 dma_attr.dma_attr_align = 64; /* as per PRM */
2249 2214 status = ddi_dma_alloc_handle(state->hs_dip, &dma_attr,
2250 2215 callback, NULL, &ma_hdl->h_ma_list_hdl);
2251 2216 if (status != DDI_SUCCESS) {
2252 2217 ibt_status = IBT_INSUFF_RESOURCE;
2253 2218 goto marea_fail1;
2254 2219 }
2255 2220 /*
2256 2221 * Entries in the list in the last slot on each page cannot be used,
2257 2222 * so 1 extra ibt_phys_addr_t is allocated per page. We add 1 more
2258 2223 * to deal with the possibility of a less than 1 page allocation
2259 2224 * across a page boundary.
2260 2225 */
2261 2226 status = ddi_dma_mem_alloc(ma_hdl->h_ma_list_hdl, (list_len + 1 +
2262 2227 list_len / (HERMON_PAGESIZE / sizeof (ibt_phys_addr_t))) *
2263 2228 sizeof (ibt_phys_addr_t),
2264 2229 &state->hs_reg_accattr, DDI_DMA_CONSISTENT, callback, NULL,
2265 2230 &ma_hdl->h_ma_kaddr, &ma_hdl->h_ma_real_len,
2266 2231 &ma_hdl->h_ma_list_acc_hdl);
2267 2232 if (status != DDI_SUCCESS) {
2268 2233 ibt_status = IBT_INSUFF_RESOURCE;
2269 2234 goto marea_fail2;
2270 2235 }
2271 2236 status = ddi_dma_addr_bind_handle(ma_hdl->h_ma_list_hdl, NULL,
2272 2237 ma_hdl->h_ma_kaddr, ma_hdl->h_ma_real_len, DDI_DMA_RDWR |
2273 2238 DDI_DMA_CONSISTENT, callback, NULL,
2274 2239 &kcookie, &cookie_cnt);
2275 2240 if (status != DDI_SUCCESS) {
2276 2241 ibt_status = IBT_INSUFF_RESOURCE;
2277 2242 goto marea_fail3;
2278 2243 }
2279 2244 if ((kcookie.dmac_laddress & 0x3f) != 0) {
2280 2245 cmn_err(CE_NOTE, "64-byte alignment assumption wrong");
2281 2246 ibt_status = ibc_get_ci_failure(0);
2282 2247 goto marea_fail4;
2283 2248 }
2284 2249 ma_hdl->h_ma_list_cookie.p_laddr = kcookie.dmac_laddress;
2285 2250
2286 2251 if (va_attrs->va_flags & IBT_VA_BUF) {
2287 2252 status = ddi_dma_buf_bind_handle(ma_hdl->h_ma_dmahdl,
2288 2253 va_attrs->va_buf, DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
2289 2254 callback, NULL, &dmacookie, &cookie_cnt);
2290 2255 } else {
2291 2256 status = ddi_dma_addr_bind_handle(ma_hdl->h_ma_dmahdl,
2292 2257 va_attrs->va_as, (caddr_t)(uintptr_t)va_attrs->va_vaddr,
2293 2258 va_attrs->va_len, DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
2294 2259 callback, NULL, &dmacookie, &cookie_cnt);
2295 2260 }
2296 2261 if (status != DDI_DMA_MAPPED) {
2297 2262 ibt_status = ibc_get_ci_failure(0);
2298 2263 goto marea_fail4;
2299 2264 }
2300 2265 i = 0; /* count the number of pbl entries */
2301 2266 j = 0; /* count the number of links to next HERMON_PAGE */
2302 2267 len = 0;
2303 2268 pagesize = PAGESIZE;
2304 2269 kaddr = (uint64_t *)(void *)ma_hdl->h_ma_kaddr;
2305 2270 kcookie.dmac_size += kcookie.dmac_laddress & HERMON_PAGEOFFSET;
2306 2271 kcookie_paddr = kcookie.dmac_laddress & HERMON_PAGEMASK;
2307 2272 khdl = ma_hdl->h_ma_list_hdl;
2308 2273 while (cookie_cnt-- > 0) {
2309 2274 addr = dmacookie.dmac_laddress;
2310 2275 len += dmacookie.dmac_size;
2311 2276 endaddr = addr + (dmacookie.dmac_size - 1);
2312 2277 addr = addr & ~(pagesize - 1);
2313 2278 while (addr <= endaddr) {
2314 2279 if (i >= list_len) {
2315 2280 ibt_status = IBT_PBL_TOO_SMALL;
2316 2281 goto marea_fail5;
2317 2282 }
2318 2283 /* Deal with last entry on page. */
2319 2284 if (!((uintptr_t)&kaddr[i+j+1] & HERMON_PAGEOFFSET)) {
2320 2285 if (kcookie.dmac_size > HERMON_PAGESIZE) {
2321 2286 kcookie_paddr += HERMON_PAGESIZE;
2322 2287 kcookie.dmac_size -= HERMON_PAGESIZE;
2323 2288 } else {
2324 2289 ddi_dma_nextcookie(khdl, &kcookie);
↓ open down ↓ |
74 lines elided |
↑ open up ↑ |
2325 2290 kcookie_paddr = kcookie.dmac_laddress;
2326 2291 }
2327 2292 kaddr[i+j] = htonll(kcookie_paddr);
2328 2293 j++;
2329 2294 }
2330 2295 kaddr[i+j] = htonll(addr | HERMON_MTT_ENTRY_PRESENT);
2331 2296 i++;
2332 2297 addr += pagesize;
2333 2298 if (addr == 0) {
2334 2299 static int do_once = 1;
2335 - _NOTE(SCHEME_PROTECTS_DATA("safe sharing",
2336 - do_once))
2337 2300 if (do_once) {
2338 2301 do_once = 0;
2339 2302 cmn_err(CE_NOTE, "probable error in "
2340 2303 "dma_cookie address: map_mem_area");
2341 2304 }
2342 2305 break;
2343 2306 }
2344 2307 }
2345 2308 if (cookie_cnt != 0)
2346 2309 ddi_dma_nextcookie(ma_hdl->h_ma_dmahdl, &dmacookie);
2347 2310 }
2348 2311
2349 2312 pmr = ®_req->wr;
2350 - _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*pmr))
2351 2313 pmr->pmr_len = len;
2352 2314 pmr->pmr_offset = va_attrs->va_vaddr & PAGEOFFSET;
2353 2315 pmr->pmr_buf_sz = PAGESHIFT; /* PRM says "Page Size", but... */
2354 2316 pmr->pmr_num_buf = i;
2355 2317 pmr->pmr_addr_list = &ma_hdl->h_ma_list_cookie;
2356 2318
2357 2319 *ma_hdl_p = ma_hdl;
2358 2320 return (IBT_SUCCESS);
2359 2321
2360 2322 marea_fail5:
2361 2323 status = ddi_dma_unbind_handle(ma_hdl->h_ma_dmahdl);
2362 2324 if (status != DDI_SUCCESS)
2363 2325 HERMON_WARNING(state, "failed to unbind DMA mapping");
2364 2326 marea_fail4:
2365 2327 status = ddi_dma_unbind_handle(ma_hdl->h_ma_list_hdl);
2366 2328 if (status != DDI_SUCCESS)
2367 2329 HERMON_WARNING(state, "failed to unbind DMA mapping");
2368 2330 marea_fail3:
2369 2331 ddi_dma_mem_free(&ma_hdl->h_ma_list_acc_hdl);
2370 2332 marea_fail2:
2371 2333 ddi_dma_free_handle(&ma_hdl->h_ma_list_hdl);
2372 2334 marea_fail1:
2373 2335 ddi_dma_free_handle(&ma_hdl->h_ma_dmahdl);
2374 2336 marea_fail0:
2375 2337 kmem_free(ma_hdl, sizeof (*ma_hdl));
2376 2338 *ma_hdl_p = NULL;
2377 2339 return (ibt_status);
2378 2340 }
2379 2341
2380 2342 /*
2381 2343 * hermon_ci_unmap_mem_area()
2382 2344 * Unmap the memory area
2383 2345 * Context: Can be called from interrupt or base context.
2384 2346 */
2385 2347 /* ARGSUSED */
2386 2348 static ibt_status_t
2387 2349 hermon_ci_unmap_mem_area(ibc_hca_hdl_t hca, ibc_ma_hdl_t ma_hdl)
2388 2350 {
2389 2351 int status;
2390 2352 hermon_state_t *state;
2391 2353
2392 2354 if (ma_hdl == NULL) {
2393 2355 return (IBT_MA_HDL_INVALID);
2394 2356 }
2395 2357 state = (hermon_state_t *)hca;
2396 2358 if (ma_hdl->h_ma_list_hdl != NULL) {
2397 2359 status = ddi_dma_unbind_handle(ma_hdl->h_ma_list_hdl);
2398 2360 if (status != DDI_SUCCESS)
2399 2361 HERMON_WARNING(state, "failed to unbind DMA mapping");
2400 2362 ddi_dma_mem_free(&ma_hdl->h_ma_list_acc_hdl);
2401 2363 ddi_dma_free_handle(&ma_hdl->h_ma_list_hdl);
2402 2364 } else {
2403 2365 kmem_free(ma_hdl->h_ma_kaddr, ma_hdl->h_ma_real_len);
2404 2366 }
2405 2367 status = ddi_dma_unbind_handle(ma_hdl->h_ma_dmahdl);
2406 2368 if (status != DDI_SUCCESS)
↓ open down ↓ |
46 lines elided |
↑ open up ↑ |
2407 2369 HERMON_WARNING(state, "failed to unbind DMA mapping");
2408 2370 ddi_dma_free_handle(&ma_hdl->h_ma_dmahdl);
2409 2371 kmem_free(ma_hdl, sizeof (*ma_hdl));
2410 2372 return (IBT_SUCCESS);
2411 2373 }
2412 2374
2413 2375 struct ibc_mi_s {
2414 2376 int imh_len;
2415 2377 ddi_dma_handle_t imh_dmahandle[1];
2416 2378 };
2417 -_NOTE(SCHEME_PROTECTS_DATA("safe sharing",
2418 - ibc_mi_s::imh_len
2419 - ibc_mi_s::imh_dmahandle))
2420 2379
2421 -
2422 2380 /*
2423 2381 * hermon_ci_map_mem_iov()
2424 2382 * Map the memory
2425 2383 * Context: Can be called from interrupt or base context.
2426 2384 */
2427 2385 /* ARGSUSED */
2428 2386 static ibt_status_t
2429 2387 hermon_ci_map_mem_iov(ibc_hca_hdl_t hca, ibt_iov_attr_t *iov_attr,
2430 2388 ibt_all_wr_t *wr, ibc_mi_hdl_t *mi_hdl_p)
2431 2389 {
2432 2390 int status;
2433 2391 int i, j, nds, max_nds;
2434 2392 uint_t len;
2435 2393 ibt_status_t ibt_status;
2436 2394 ddi_dma_handle_t dmahdl;
↓ open down ↓ |
5 lines elided |
↑ open up ↑ |
2437 2395 ddi_dma_cookie_t dmacookie;
2438 2396 ddi_dma_attr_t dma_attr;
2439 2397 uint_t cookie_cnt;
2440 2398 ibc_mi_hdl_t mi_hdl;
2441 2399 ibt_lkey_t rsvd_lkey;
2442 2400 ibt_wr_ds_t *sgl;
2443 2401 hermon_state_t *state;
2444 2402 int kmflag;
2445 2403 int (*callback)(caddr_t);
2446 2404
2447 - _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*wr))
2448 -
2449 2405 state = (hermon_state_t *)hca;
2450 2406 hermon_dma_attr_init(state, &dma_attr);
2451 2407 #ifdef __sparc
2452 2408 if (state->hs_cfg_profile->cp_iommu_bypass == HERMON_BINDMEM_BYPASS)
2453 2409 dma_attr.dma_attr_flags = DDI_DMA_FORCE_PHYSICAL;
2454 2410
2455 2411 if (hermon_kernel_data_ro == HERMON_RO_ENABLED)
2456 2412 dma_attr.dma_attr_flags |= DDI_DMA_RELAXED_ORDERING;
2457 2413 #endif
2458 2414
2459 2415 nds = 0;
2460 2416 max_nds = iov_attr->iov_wr_nds;
2461 2417 if (iov_attr->iov_lso_hdr_sz)
2462 2418 max_nds -= (iov_attr->iov_lso_hdr_sz + sizeof (uint32_t) +
2463 2419 0xf) >> 4; /* 0xf is for rounding up to a multiple of 16 */
2464 2420 rsvd_lkey = (iov_attr->iov_flags & IBT_IOV_ALT_LKEY) ?
2465 2421 iov_attr->iov_alt_lkey : state->hs_devlim.rsv_lkey;
2466 2422 if ((iov_attr->iov_flags & IBT_IOV_NOSLEEP) == 0) {
2467 2423 kmflag = KM_SLEEP;
2468 2424 callback = DDI_DMA_SLEEP;
↓ open down ↓ |
10 lines elided |
↑ open up ↑ |
2469 2425 } else {
2470 2426 kmflag = KM_NOSLEEP;
2471 2427 callback = DDI_DMA_DONTWAIT;
2472 2428 }
2473 2429
2474 2430 if (iov_attr->iov_flags & IBT_IOV_BUF) {
2475 2431 mi_hdl = kmem_alloc(sizeof (*mi_hdl), kmflag);
2476 2432 if (mi_hdl == NULL)
2477 2433 return (IBT_INSUFF_RESOURCE);
2478 2434 sgl = wr->send.wr_sgl;
2479 - _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*sgl))
2480 -
2481 2435 status = ddi_dma_alloc_handle(state->hs_dip, &dma_attr,
2482 2436 callback, NULL, &dmahdl);
2483 2437 if (status != DDI_SUCCESS) {
2484 2438 kmem_free(mi_hdl, sizeof (*mi_hdl));
2485 2439 return (IBT_INSUFF_RESOURCE);
2486 2440 }
2487 2441 status = ddi_dma_buf_bind_handle(dmahdl, iov_attr->iov_buf,
2488 2442 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, callback, NULL,
2489 2443 &dmacookie, &cookie_cnt);
2490 2444 if (status != DDI_DMA_MAPPED) {
2491 2445 ddi_dma_free_handle(&dmahdl);
2492 2446 kmem_free(mi_hdl, sizeof (*mi_hdl));
2493 2447 return (ibc_get_ci_failure(0));
2494 2448 }
2495 2449 while (cookie_cnt-- > 0) {
2496 2450 if (nds > max_nds) {
2497 2451 status = ddi_dma_unbind_handle(dmahdl);
2498 2452 if (status != DDI_SUCCESS)
2499 2453 HERMON_WARNING(state, "failed to "
2500 2454 "unbind DMA mapping");
2501 2455 ddi_dma_free_handle(&dmahdl);
2502 2456 return (IBT_SGL_TOO_SMALL);
2503 2457 }
2504 2458 sgl[nds].ds_va = dmacookie.dmac_laddress;
2505 2459 sgl[nds].ds_key = rsvd_lkey;
2506 2460 sgl[nds].ds_len = (ib_msglen_t)dmacookie.dmac_size;
2507 2461 nds++;
2508 2462 if (cookie_cnt != 0)
2509 2463 ddi_dma_nextcookie(dmahdl, &dmacookie);
2510 2464 }
2511 2465 wr->send.wr_nds = nds;
↓ open down ↓ |
21 lines elided |
↑ open up ↑ |
2512 2466 mi_hdl->imh_len = 1;
2513 2467 mi_hdl->imh_dmahandle[0] = dmahdl;
2514 2468 *mi_hdl_p = mi_hdl;
2515 2469 return (IBT_SUCCESS);
2516 2470 }
2517 2471
2518 2472 if (iov_attr->iov_flags & IBT_IOV_RECV)
2519 2473 sgl = wr->recv.wr_sgl;
2520 2474 else
2521 2475 sgl = wr->send.wr_sgl;
2522 - _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*sgl))
2523 2476
2524 2477 len = iov_attr->iov_list_len;
2525 2478 for (i = 0, j = 0; j < len; j++) {
2526 2479 if (iov_attr->iov[j].iov_len == 0)
2527 2480 continue;
2528 2481 i++;
2529 2482 }
2530 2483 mi_hdl = kmem_alloc(sizeof (*mi_hdl) +
2531 2484 (i - 1) * sizeof (ddi_dma_handle_t), kmflag);
2532 2485 if (mi_hdl == NULL)
2533 2486 return (IBT_INSUFF_RESOURCE);
2534 2487 mi_hdl->imh_len = i;
2535 2488 for (i = 0, j = 0; j < len; j++) {
2536 2489 if (iov_attr->iov[j].iov_len == 0)
2537 2490 continue;
2538 2491 status = ddi_dma_alloc_handle(state->hs_dip, &dma_attr,
2539 2492 callback, NULL, &dmahdl);
2540 2493 if (status != DDI_SUCCESS) {
2541 2494 ibt_status = IBT_INSUFF_RESOURCE;
2542 2495 goto fail2;
2543 2496 }
2544 2497 status = ddi_dma_addr_bind_handle(dmahdl, iov_attr->iov_as,
2545 2498 iov_attr->iov[j].iov_addr, iov_attr->iov[j].iov_len,
2546 2499 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, callback, NULL,
2547 2500 &dmacookie, &cookie_cnt);
2548 2501 if (status != DDI_DMA_MAPPED) {
2549 2502 ibt_status = ibc_get_ci_failure(0);
2550 2503 goto fail1;
2551 2504 }
2552 2505 if (nds + cookie_cnt > max_nds) {
2553 2506 ibt_status = IBT_SGL_TOO_SMALL;
2554 2507 goto fail2;
2555 2508 }
2556 2509 while (cookie_cnt-- > 0) {
2557 2510 sgl[nds].ds_va = dmacookie.dmac_laddress;
2558 2511 sgl[nds].ds_key = rsvd_lkey;
2559 2512 sgl[nds].ds_len = (ib_msglen_t)dmacookie.dmac_size;
2560 2513 nds++;
2561 2514 if (cookie_cnt != 0)
2562 2515 ddi_dma_nextcookie(dmahdl, &dmacookie);
2563 2516 }
2564 2517 mi_hdl->imh_dmahandle[i] = dmahdl;
2565 2518 i++;
2566 2519 }
2567 2520
2568 2521 if (iov_attr->iov_flags & IBT_IOV_RECV)
2569 2522 wr->recv.wr_nds = nds;
2570 2523 else
2571 2524 wr->send.wr_nds = nds;
2572 2525 *mi_hdl_p = mi_hdl;
2573 2526 return (IBT_SUCCESS);
2574 2527
2575 2528 fail1:
2576 2529 ddi_dma_free_handle(&dmahdl);
2577 2530 fail2:
2578 2531 while (--i >= 0) {
2579 2532 status = ddi_dma_unbind_handle(mi_hdl->imh_dmahandle[i]);
2580 2533 if (status != DDI_SUCCESS)
2581 2534 HERMON_WARNING(state, "failed to unbind DMA mapping");
2582 2535 ddi_dma_free_handle(&mi_hdl->imh_dmahandle[i]);
2583 2536 }
2584 2537 kmem_free(mi_hdl, sizeof (*mi_hdl) +
2585 2538 (len - 1) * sizeof (ddi_dma_handle_t));
2586 2539 *mi_hdl_p = NULL;
2587 2540 return (ibt_status);
2588 2541 }
2589 2542
2590 2543 /*
2591 2544 * hermon_ci_unmap_mem_iov()
2592 2545 * Unmap the memory
2593 2546 * Context: Can be called from interrupt or base context.
2594 2547 */
2595 2548 static ibt_status_t
2596 2549 hermon_ci_unmap_mem_iov(ibc_hca_hdl_t hca, ibc_mi_hdl_t mi_hdl)
2597 2550 {
2598 2551 int status, i;
2599 2552 hermon_state_t *state;
2600 2553
2601 2554 state = (hermon_state_t *)hca;
2602 2555
2603 2556 for (i = mi_hdl->imh_len; --i >= 0; ) {
2604 2557 status = ddi_dma_unbind_handle(mi_hdl->imh_dmahandle[i]);
2605 2558 if (status != DDI_SUCCESS)
2606 2559 HERMON_WARNING(state, "failed to unbind DMA mapping");
2607 2560 ddi_dma_free_handle(&mi_hdl->imh_dmahandle[i]);
2608 2561 }
2609 2562 kmem_free(mi_hdl, sizeof (*mi_hdl) +
2610 2563 (mi_hdl->imh_len - 1) * sizeof (ddi_dma_handle_t));
2611 2564 return (IBT_SUCCESS);
2612 2565 }
2613 2566
2614 2567 /*
2615 2568 * hermon_ci_alloc_lkey()
2616 2569 * Allocate an empty memory region for use with FRWR.
2617 2570 * Context: Can be called from user or base context.
2618 2571 */
2619 2572 /* ARGSUSED */
↓ open down ↓ |
87 lines elided |
↑ open up ↑ |
2620 2573 static ibt_status_t
2621 2574 hermon_ci_alloc_lkey(ibc_hca_hdl_t hca, ibc_pd_hdl_t pd,
2622 2575 ibt_lkey_flags_t flags, uint_t list_sz, ibc_mr_hdl_t *mr_p,
2623 2576 ibt_pmr_desc_t *mem_desc_p)
2624 2577 {
2625 2578 hermon_state_t *state;
2626 2579 hermon_pdhdl_t pdhdl;
2627 2580 hermon_mrhdl_t mrhdl;
2628 2581 int status;
2629 2582
2630 - _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*mem_desc_p))
2631 -
2632 2583 ASSERT(mr_p != NULL);
2633 2584 ASSERT(mem_desc_p != NULL);
2634 2585
2635 2586 state = (hermon_state_t *)hca;
2636 2587 pdhdl = (hermon_pdhdl_t)pd;
2637 2588
2638 2589 if (!(state->hs_ibtfinfo.hca_attr->hca_flags2 & IBT_HCA2_MEM_MGT_EXT))
2639 2590 return (IBT_NOT_SUPPORTED);
2640 2591
2641 2592 status = hermon_mr_alloc_lkey(state, pdhdl, flags, list_sz, &mrhdl);
2642 2593 if (status != DDI_SUCCESS) {
2643 2594 return (status);
2644 2595 }
2645 - _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*mrhdl))
2646 2596
2647 2597 /* Fill in the mem_desc_p structure */
2648 2598 mem_desc_p->pmd_iova = 0;
2649 2599 mem_desc_p->pmd_phys_buf_list_sz = list_sz;
2650 2600 mem_desc_p->pmd_lkey = mrhdl->mr_lkey;
2651 2601 /* Only set RKey if remote access was requested */
2652 2602 if (flags & IBT_KEY_REMOTE) {
2653 2603 mem_desc_p->pmd_rkey = mrhdl->mr_rkey;
2654 2604 }
2655 2605 mem_desc_p->pmd_sync_required = B_FALSE;
2656 2606
2657 2607 /* Return the Hermon MR handle */
2658 2608 *mr_p = (ibc_mr_hdl_t)mrhdl;
2659 2609 return (IBT_SUCCESS);
2660 2610 }
2661 2611
2662 2612 /* Physical Register Memory Region */
2663 2613 /*
2664 2614 * hermon_ci_register_physical_mr()
2665 2615 */
2666 2616 /* ARGSUSED */
2667 2617 static ibt_status_t
2668 2618 hermon_ci_register_physical_mr(ibc_hca_hdl_t hca, ibc_pd_hdl_t pd,
2669 2619 ibt_pmr_attr_t *mem_pattrs, void *ibtl_reserved, ibc_mr_hdl_t *mr_p,
2670 2620 ibt_pmr_desc_t *mem_desc_p)
2671 2621 {
2672 2622 return (IBT_NOT_SUPPORTED);
2673 2623 }
2674 2624
2675 2625 /*
2676 2626 * hermon_ci_reregister_physical_mr()
2677 2627 */
2678 2628 /* ARGSUSED */
2679 2629 static ibt_status_t
2680 2630 hermon_ci_reregister_physical_mr(ibc_hca_hdl_t hca, ibc_mr_hdl_t mr,
2681 2631 ibc_pd_hdl_t pd, ibt_pmr_attr_t *mem_pattrs, void *ibtl_reserved,
2682 2632 ibc_mr_hdl_t *mr_p, ibt_pmr_desc_t *mr_desc_p)
2683 2633 {
2684 2634 return (IBT_NOT_SUPPORTED);
2685 2635 }
2686 2636
2687 2637 /* Mellanox FMR Support */
2688 2638 /*
2689 2639 * hermon_ci_create_fmr_pool()
2690 2640 * Creates a pool of memory regions suitable for FMR registration
2691 2641 * Context: Can be called from base context only
2692 2642 */
2693 2643 static ibt_status_t
2694 2644 hermon_ci_create_fmr_pool(ibc_hca_hdl_t hca, ibc_pd_hdl_t pd,
2695 2645 ibt_fmr_pool_attr_t *params, ibc_fmr_pool_hdl_t *fmr_pool_p)
2696 2646 {
2697 2647 hermon_state_t *state;
2698 2648 hermon_pdhdl_t pdhdl;
2699 2649 hermon_fmrhdl_t fmrpoolhdl;
2700 2650 int status;
2701 2651
2702 2652 state = (hermon_state_t *)hca;
2703 2653
2704 2654 /* Check for valid PD handle pointer */
2705 2655 if (pd == NULL) {
2706 2656 return (IBT_PD_HDL_INVALID);
2707 2657 }
2708 2658
2709 2659 pdhdl = (hermon_pdhdl_t)pd;
2710 2660
2711 2661 /*
2712 2662 * Validate the access flags. Both Remote Write and Remote Atomic
2713 2663 * require the Local Write flag to be set
2714 2664 */
2715 2665 if (((params->fmr_flags & IBT_MR_ENABLE_REMOTE_WRITE) ||
2716 2666 (params->fmr_flags & IBT_MR_ENABLE_REMOTE_ATOMIC)) &&
2717 2667 !(params->fmr_flags & IBT_MR_ENABLE_LOCAL_WRITE)) {
2718 2668 return (IBT_MR_ACCESS_REQ_INVALID);
2719 2669 }
2720 2670
2721 2671 status = hermon_create_fmr_pool(state, pdhdl, params, &fmrpoolhdl);
2722 2672 if (status != DDI_SUCCESS) {
2723 2673 return (status);
2724 2674 }
2725 2675
2726 2676 /* Set fmr_pool from hermon handle */
2727 2677 *fmr_pool_p = (ibc_fmr_pool_hdl_t)fmrpoolhdl;
2728 2678
2729 2679 return (IBT_SUCCESS);
2730 2680 }
2731 2681
2732 2682 /*
2733 2683 * hermon_ci_destroy_fmr_pool()
2734 2684 * Free all resources associated with an FMR pool.
2735 2685 * Context: Can be called from base context only.
2736 2686 */
2737 2687 static ibt_status_t
2738 2688 hermon_ci_destroy_fmr_pool(ibc_hca_hdl_t hca, ibc_fmr_pool_hdl_t fmr_pool)
2739 2689 {
2740 2690 hermon_state_t *state;
2741 2691 hermon_fmrhdl_t fmrpoolhdl;
2742 2692 int status;
2743 2693
2744 2694 state = (hermon_state_t *)hca;
2745 2695 fmrpoolhdl = (hermon_fmrhdl_t)fmr_pool;
2746 2696
2747 2697 status = hermon_destroy_fmr_pool(state, fmrpoolhdl);
2748 2698 return (status);
2749 2699 }
2750 2700
2751 2701 /*
2752 2702 * hermon_ci_flush_fmr_pool()
2753 2703 * Force a flush of the memory tables, cleaning up used FMR resources.
2754 2704 * Context: Can be called from interrupt or base context.
2755 2705 */
2756 2706 static ibt_status_t
2757 2707 hermon_ci_flush_fmr_pool(ibc_hca_hdl_t hca, ibc_fmr_pool_hdl_t fmr_pool)
2758 2708 {
2759 2709 hermon_state_t *state;
2760 2710 hermon_fmrhdl_t fmrpoolhdl;
2761 2711 int status;
2762 2712
2763 2713 state = (hermon_state_t *)hca;
2764 2714
2765 2715 fmrpoolhdl = (hermon_fmrhdl_t)fmr_pool;
2766 2716 status = hermon_flush_fmr_pool(state, fmrpoolhdl);
2767 2717 return (status);
2768 2718 }
2769 2719
2770 2720 /*
2771 2721 * hermon_ci_register_physical_fmr()
2772 2722 * From the 'pool' of FMR regions passed in, performs register physical
2773 2723 * operation.
2774 2724 * Context: Can be called from interrupt or base context.
2775 2725 */
2776 2726 /* ARGSUSED */
2777 2727 static ibt_status_t
2778 2728 hermon_ci_register_physical_fmr(ibc_hca_hdl_t hca,
2779 2729 ibc_fmr_pool_hdl_t fmr_pool, ibt_pmr_attr_t *mem_pattr,
2780 2730 void *ibtl_reserved, ibc_mr_hdl_t *mr_p, ibt_pmr_desc_t *mem_desc_p)
2781 2731 {
2782 2732 hermon_state_t *state;
2783 2733 hermon_mrhdl_t mrhdl;
2784 2734 hermon_fmrhdl_t fmrpoolhdl;
2785 2735 int status;
2786 2736
2787 2737 ASSERT(mem_pattr != NULL);
2788 2738 ASSERT(mr_p != NULL);
2789 2739 ASSERT(mem_desc_p != NULL);
2790 2740
2791 2741 /* Grab the Hermon softstate pointer */
2792 2742 state = (hermon_state_t *)hca;
2793 2743
2794 2744 fmrpoolhdl = (hermon_fmrhdl_t)fmr_pool;
2795 2745
↓ open down ↓ |
140 lines elided |
↑ open up ↑ |
2796 2746 status = hermon_register_physical_fmr(state, fmrpoolhdl, mem_pattr,
2797 2747 &mrhdl, mem_desc_p);
2798 2748 if (status != DDI_SUCCESS) {
2799 2749 return (status);
2800 2750 }
2801 2751
2802 2752 /*
2803 2753 * If region is mapped for streaming (i.e. noncoherent), then set
2804 2754 * sync is required
2805 2755 */
2806 - _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*mem_desc_p))
2807 2756 mem_desc_p->pmd_sync_required = (mrhdl->mr_bindinfo.bi_flags &
2808 2757 IBT_MR_NONCOHERENT) ? B_TRUE : B_FALSE;
2809 2758 if (mem_desc_p->pmd_sync_required == B_TRUE) {
2810 2759 /* Fill in DMA handle for future sync operations */
2811 - _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(mrhdl->mr_bindinfo))
2812 2760 mrhdl->mr_bindinfo.bi_dmahdl =
2813 2761 (ddi_dma_handle_t)mem_pattr->pmr_ma;
2814 2762 }
2815 2763
2816 2764 /* Return the Hermon MR handle */
2817 2765 *mr_p = (ibc_mr_hdl_t)mrhdl;
2818 2766
2819 2767 return (IBT_SUCCESS);
2820 2768 }
2821 2769
2822 2770 /*
2823 2771 * hermon_ci_deregister_fmr()
2824 2772 * Moves an FMR (specified by 'mr') to the deregistered state.
2825 2773 * Context: Can be called from base context only.
2826 2774 */
2827 2775 static ibt_status_t
2828 2776 hermon_ci_deregister_fmr(ibc_hca_hdl_t hca, ibc_mr_hdl_t mr)
2829 2777 {
2830 2778 hermon_state_t *state;
2831 2779 hermon_mrhdl_t mrhdl;
2832 2780 int status;
2833 2781
2834 2782 /* Grab the Hermon softstate pointer */
2835 2783 state = (hermon_state_t *)hca;
2836 2784 mrhdl = (hermon_mrhdl_t)mr;
2837 2785
2838 2786 /*
2839 2787 * Deregister the memory region, either "unmap" the FMR or deregister
2840 2788 * the normal memory region.
2841 2789 */
2842 2790 status = hermon_deregister_fmr(state, mrhdl);
2843 2791 return (status);
2844 2792 }
2845 2793
2846 2794 static int
2847 2795 hermon_mem_alloc(hermon_state_t *state, size_t size, ibt_mr_flags_t flags,
2848 2796 caddr_t *kaddrp, ibc_mem_alloc_hdl_t *mem_hdl)
2849 2797 {
2850 2798 ddi_dma_handle_t dma_hdl;
2851 2799 ddi_dma_attr_t dma_attr;
2852 2800 ddi_acc_handle_t acc_hdl;
2853 2801 size_t real_len;
2854 2802 int status;
2855 2803 int (*ddi_cb)(caddr_t);
2856 2804 ibc_mem_alloc_hdl_t mem_alloc_hdl;
2857 2805
2858 2806 hermon_dma_attr_init(state, &dma_attr);
2859 2807
2860 2808 ddi_cb = (flags & IBT_MR_NOSLEEP) ? DDI_DMA_DONTWAIT : DDI_DMA_SLEEP;
2861 2809
2862 2810 /* Allocate a DMA handle */
2863 2811 status = ddi_dma_alloc_handle(state->hs_dip, &dma_attr, ddi_cb,
2864 2812 NULL, &dma_hdl);
2865 2813 if (status != DDI_SUCCESS) {
2866 2814 return (DDI_FAILURE);
2867 2815 }
2868 2816
2869 2817 /* Allocate DMA memory */
2870 2818 status = ddi_dma_mem_alloc(dma_hdl, size,
2871 2819 &state->hs_reg_accattr, DDI_DMA_CONSISTENT, ddi_cb,
2872 2820 NULL, kaddrp, &real_len, &acc_hdl);
2873 2821 if (status != DDI_SUCCESS) {
2874 2822 ddi_dma_free_handle(&dma_hdl);
2875 2823 return (DDI_FAILURE);
2876 2824 }
2877 2825
2878 2826 /* Package the hermon_dma_info contents and return */
2879 2827 mem_alloc_hdl = kmem_alloc(sizeof (**mem_hdl),
2880 2828 (flags & IBT_MR_NOSLEEP) ? KM_NOSLEEP : KM_SLEEP);
2881 2829 if (mem_alloc_hdl == NULL) {
2882 2830 ddi_dma_mem_free(&acc_hdl);
2883 2831 ddi_dma_free_handle(&dma_hdl);
2884 2832 return (DDI_FAILURE);
2885 2833 }
2886 2834 mem_alloc_hdl->ibc_dma_hdl = dma_hdl;
2887 2835 mem_alloc_hdl->ibc_acc_hdl = acc_hdl;
2888 2836
2889 2837 *mem_hdl = mem_alloc_hdl;
2890 2838
2891 2839 return (DDI_SUCCESS);
2892 2840 }
2893 2841
2894 2842 /*
2895 2843 * hermon_ci_alloc_io_mem()
2896 2844 * Allocate dma-able memory
2897 2845 *
2898 2846 */
2899 2847 static ibt_status_t
2900 2848 hermon_ci_alloc_io_mem(ibc_hca_hdl_t hca, size_t size, ibt_mr_flags_t mr_flag,
2901 2849 caddr_t *kaddrp, ibc_mem_alloc_hdl_t *mem_alloc_hdl_p)
2902 2850 {
2903 2851 hermon_state_t *state;
2904 2852 int status;
2905 2853
2906 2854 /* Grab the Hermon softstate pointer and mem handle */
2907 2855 state = (hermon_state_t *)hca;
2908 2856
2909 2857 /* Allocate the memory and handles */
2910 2858 status = hermon_mem_alloc(state, size, mr_flag, kaddrp,
2911 2859 mem_alloc_hdl_p);
2912 2860
2913 2861 if (status != DDI_SUCCESS) {
2914 2862 *mem_alloc_hdl_p = NULL;
2915 2863 *kaddrp = NULL;
2916 2864 return (status);
2917 2865 }
2918 2866
2919 2867 return (IBT_SUCCESS);
2920 2868 }
2921 2869
2922 2870
2923 2871 /*
2924 2872 * hermon_ci_free_io_mem()
2925 2873 * Unbind handl and free the memory
2926 2874 */
2927 2875 /* ARGSUSED */
2928 2876 static ibt_status_t
2929 2877 hermon_ci_free_io_mem(ibc_hca_hdl_t hca, ibc_mem_alloc_hdl_t mem_alloc_hdl)
2930 2878 {
2931 2879 /* Unbind the handles and free the memory */
2932 2880 (void) ddi_dma_unbind_handle(mem_alloc_hdl->ibc_dma_hdl);
2933 2881 ddi_dma_mem_free(&mem_alloc_hdl->ibc_acc_hdl);
2934 2882 ddi_dma_free_handle(&mem_alloc_hdl->ibc_dma_hdl);
2935 2883 kmem_free(mem_alloc_hdl, sizeof (*mem_alloc_hdl));
2936 2884
2937 2885 return (IBT_SUCCESS);
2938 2886 }
↓ open down ↓ |
117 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX