Print this page
8368 remove warlock leftovers from usr/src/uts
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/io/ib/adapters/tavor/tavor_ci.c
+++ new/usr/src/uts/common/io/ib/adapters/tavor/tavor_ci.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21
22 22 /*
23 23 * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
24 24 */
25 25
26 26 /*
27 27 * tavor_ci.c
28 28 * Tavor Channel Interface (CI) Routines
29 29 *
30 30 * Implements all the routines necessary to interface with the IBTF.
31 31 * Pointers to all of these functions are passed to the IBTF at attach()
32 32 * time in the ibc_operations_t structure. These functions include all
33 33 * of the necessary routines to implement the required InfiniBand "verbs"
34 34 * and additional IBTF-specific interfaces.
35 35 */
36 36
37 37 #include <sys/types.h>
38 38 #include <sys/conf.h>
39 39 #include <sys/ddi.h>
40 40 #include <sys/sunddi.h>
41 41
42 42 #include <sys/ib/adapters/tavor/tavor.h>
43 43
44 44 /* HCA and port related operations */
45 45 static ibt_status_t tavor_ci_query_hca_ports(ibc_hca_hdl_t, uint8_t,
46 46 ibt_hca_portinfo_t *);
47 47 static ibt_status_t tavor_ci_modify_ports(ibc_hca_hdl_t, uint8_t,
48 48 ibt_port_modify_flags_t, uint8_t);
49 49 static ibt_status_t tavor_ci_modify_system_image(ibc_hca_hdl_t, ib_guid_t);
50 50
51 51 /* Protection Domains */
52 52 static ibt_status_t tavor_ci_alloc_pd(ibc_hca_hdl_t, ibt_pd_flags_t,
53 53 ibc_pd_hdl_t *);
54 54 static ibt_status_t tavor_ci_free_pd(ibc_hca_hdl_t, ibc_pd_hdl_t);
55 55
56 56 /* Reliable Datagram Domains */
57 57 static ibt_status_t tavor_ci_alloc_rdd(ibc_hca_hdl_t, ibc_rdd_flags_t,
58 58 ibc_rdd_hdl_t *);
59 59 static ibt_status_t tavor_ci_free_rdd(ibc_hca_hdl_t, ibc_rdd_hdl_t);
60 60
61 61 /* Address Handles */
62 62 static ibt_status_t tavor_ci_alloc_ah(ibc_hca_hdl_t, ibt_ah_flags_t,
63 63 ibc_pd_hdl_t, ibt_adds_vect_t *, ibc_ah_hdl_t *);
64 64 static ibt_status_t tavor_ci_free_ah(ibc_hca_hdl_t, ibc_ah_hdl_t);
65 65 static ibt_status_t tavor_ci_query_ah(ibc_hca_hdl_t, ibc_ah_hdl_t,
66 66 ibc_pd_hdl_t *, ibt_adds_vect_t *);
67 67 static ibt_status_t tavor_ci_modify_ah(ibc_hca_hdl_t, ibc_ah_hdl_t,
68 68 ibt_adds_vect_t *);
69 69
70 70 /* Queue Pairs */
71 71 static ibt_status_t tavor_ci_alloc_qp(ibc_hca_hdl_t, ibtl_qp_hdl_t,
72 72 ibt_qp_type_t, ibt_qp_alloc_attr_t *, ibt_chan_sizes_t *, ib_qpn_t *,
73 73 ibc_qp_hdl_t *);
74 74 static ibt_status_t tavor_ci_alloc_special_qp(ibc_hca_hdl_t, uint8_t,
75 75 ibtl_qp_hdl_t, ibt_sqp_type_t, ibt_qp_alloc_attr_t *,
76 76 ibt_chan_sizes_t *, ibc_qp_hdl_t *);
77 77 static ibt_status_t tavor_ci_alloc_qp_range(ibc_hca_hdl_t, uint_t,
78 78 ibtl_qp_hdl_t *, ibt_qp_type_t, ibt_qp_alloc_attr_t *, ibt_chan_sizes_t *,
79 79 ibc_cq_hdl_t *, ibc_cq_hdl_t *, ib_qpn_t *, ibc_qp_hdl_t *);
80 80 static ibt_status_t tavor_ci_free_qp(ibc_hca_hdl_t, ibc_qp_hdl_t,
81 81 ibc_free_qp_flags_t, ibc_qpn_hdl_t *);
82 82 static ibt_status_t tavor_ci_release_qpn(ibc_hca_hdl_t, ibc_qpn_hdl_t);
83 83 static ibt_status_t tavor_ci_query_qp(ibc_hca_hdl_t, ibc_qp_hdl_t,
84 84 ibt_qp_query_attr_t *);
85 85 static ibt_status_t tavor_ci_modify_qp(ibc_hca_hdl_t, ibc_qp_hdl_t,
86 86 ibt_cep_modify_flags_t, ibt_qp_info_t *, ibt_queue_sizes_t *);
87 87
88 88 /* Completion Queues */
89 89 static ibt_status_t tavor_ci_alloc_cq(ibc_hca_hdl_t, ibt_cq_hdl_t,
90 90 ibt_cq_attr_t *, ibc_cq_hdl_t *, uint_t *);
91 91 static ibt_status_t tavor_ci_free_cq(ibc_hca_hdl_t, ibc_cq_hdl_t);
92 92 static ibt_status_t tavor_ci_query_cq(ibc_hca_hdl_t, ibc_cq_hdl_t, uint_t *,
93 93 uint_t *, uint_t *, ibt_cq_handler_id_t *);
94 94 static ibt_status_t tavor_ci_resize_cq(ibc_hca_hdl_t, ibc_cq_hdl_t,
95 95 uint_t, uint_t *);
96 96 static ibt_status_t tavor_ci_modify_cq(ibc_hca_hdl_t, ibc_cq_hdl_t,
97 97 uint_t, uint_t, ibt_cq_handler_id_t);
98 98 static ibt_status_t tavor_ci_alloc_cq_sched(ibc_hca_hdl_t,
99 99 ibt_cq_sched_attr_t *, ibc_sched_hdl_t *);
100 100 static ibt_status_t tavor_ci_free_cq_sched(ibc_hca_hdl_t, ibc_sched_hdl_t);
101 101
102 102 /* EE Contexts */
103 103 static ibt_status_t tavor_ci_alloc_eec(ibc_hca_hdl_t, ibc_eec_flags_t,
104 104 ibt_eec_hdl_t, ibc_rdd_hdl_t, ibc_eec_hdl_t *);
105 105 static ibt_status_t tavor_ci_free_eec(ibc_hca_hdl_t, ibc_eec_hdl_t);
106 106 static ibt_status_t tavor_ci_query_eec(ibc_hca_hdl_t, ibc_eec_hdl_t,
107 107 ibt_eec_query_attr_t *);
108 108 static ibt_status_t tavor_ci_modify_eec(ibc_hca_hdl_t, ibc_eec_hdl_t,
109 109 ibt_cep_modify_flags_t, ibt_eec_info_t *);
110 110
111 111 /* Memory Registration */
112 112 static ibt_status_t tavor_ci_register_mr(ibc_hca_hdl_t, ibc_pd_hdl_t,
113 113 ibt_mr_attr_t *, void *, ibc_mr_hdl_t *, ibt_mr_desc_t *);
114 114 static ibt_status_t tavor_ci_register_buf(ibc_hca_hdl_t, ibc_pd_hdl_t,
115 115 ibt_smr_attr_t *, struct buf *, void *, ibt_mr_hdl_t *, ibt_mr_desc_t *);
116 116 static ibt_status_t tavor_ci_register_shared_mr(ibc_hca_hdl_t,
117 117 ibc_mr_hdl_t, ibc_pd_hdl_t, ibt_smr_attr_t *, void *,
118 118 ibc_mr_hdl_t *, ibt_mr_desc_t *);
119 119 static ibt_status_t tavor_ci_deregister_mr(ibc_hca_hdl_t, ibc_mr_hdl_t);
120 120 static ibt_status_t tavor_ci_query_mr(ibc_hca_hdl_t, ibc_mr_hdl_t,
121 121 ibt_mr_query_attr_t *);
122 122 static ibt_status_t tavor_ci_reregister_mr(ibc_hca_hdl_t, ibc_mr_hdl_t,
123 123 ibc_pd_hdl_t, ibt_mr_attr_t *, void *, ibc_mr_hdl_t *,
124 124 ibt_mr_desc_t *);
125 125 static ibt_status_t tavor_ci_reregister_buf(ibc_hca_hdl_t, ibc_mr_hdl_t,
126 126 ibc_pd_hdl_t, ibt_smr_attr_t *, struct buf *, void *, ibc_mr_hdl_t *,
127 127 ibt_mr_desc_t *);
128 128 static ibt_status_t tavor_ci_sync_mr(ibc_hca_hdl_t, ibt_mr_sync_t *, size_t);
129 129 static ibt_status_t tavor_ci_register_dma_mr(ibc_hca_hdl_t, ibc_pd_hdl_t,
130 130 ibt_dmr_attr_t *, void *, ibc_mr_hdl_t *, ibt_mr_desc_t *);
131 131
132 132 /* Memory Windows */
133 133 static ibt_status_t tavor_ci_alloc_mw(ibc_hca_hdl_t, ibc_pd_hdl_t,
134 134 ibt_mw_flags_t, ibc_mw_hdl_t *, ibt_rkey_t *);
135 135 static ibt_status_t tavor_ci_free_mw(ibc_hca_hdl_t, ibc_mw_hdl_t);
136 136 static ibt_status_t tavor_ci_query_mw(ibc_hca_hdl_t, ibc_mw_hdl_t,
137 137 ibt_mw_query_attr_t *);
138 138
139 139 /* Multicast Groups */
140 140 static ibt_status_t tavor_ci_attach_mcg(ibc_hca_hdl_t, ibc_qp_hdl_t,
141 141 ib_gid_t, ib_lid_t);
142 142 static ibt_status_t tavor_ci_detach_mcg(ibc_hca_hdl_t, ibc_qp_hdl_t,
143 143 ib_gid_t, ib_lid_t);
144 144
145 145 /* Work Request and Completion Processing */
146 146 static ibt_status_t tavor_ci_post_send(ibc_hca_hdl_t, ibc_qp_hdl_t,
147 147 ibt_send_wr_t *, uint_t, uint_t *);
148 148 static ibt_status_t tavor_ci_post_recv(ibc_hca_hdl_t, ibc_qp_hdl_t,
149 149 ibt_recv_wr_t *, uint_t, uint_t *);
150 150 static ibt_status_t tavor_ci_poll_cq(ibc_hca_hdl_t, ibc_cq_hdl_t,
151 151 ibt_wc_t *, uint_t, uint_t *);
152 152 static ibt_status_t tavor_ci_notify_cq(ibc_hca_hdl_t, ibc_cq_hdl_t,
153 153 ibt_cq_notify_flags_t);
154 154
155 155 /* CI Object Private Data */
156 156 static ibt_status_t tavor_ci_ci_data_in(ibc_hca_hdl_t, ibt_ci_data_flags_t,
157 157 ibt_object_type_t, void *, void *, size_t);
158 158
159 159 /* CI Object Private Data */
160 160 static ibt_status_t tavor_ci_ci_data_out(ibc_hca_hdl_t, ibt_ci_data_flags_t,
161 161 ibt_object_type_t, void *, void *, size_t);
162 162
163 163 /* Shared Receive Queues */
164 164 static ibt_status_t tavor_ci_alloc_srq(ibc_hca_hdl_t, ibt_srq_flags_t,
165 165 ibt_srq_hdl_t, ibc_pd_hdl_t, ibt_srq_sizes_t *, ibc_srq_hdl_t *,
166 166 ibt_srq_sizes_t *);
167 167 static ibt_status_t tavor_ci_free_srq(ibc_hca_hdl_t, ibc_srq_hdl_t);
168 168 static ibt_status_t tavor_ci_query_srq(ibc_hca_hdl_t, ibc_srq_hdl_t,
169 169 ibc_pd_hdl_t *, ibt_srq_sizes_t *, uint_t *);
170 170 static ibt_status_t tavor_ci_modify_srq(ibc_hca_hdl_t, ibc_srq_hdl_t,
171 171 ibt_srq_modify_flags_t, uint_t, uint_t, uint_t *);
172 172 static ibt_status_t tavor_ci_post_srq(ibc_hca_hdl_t, ibc_srq_hdl_t,
173 173 ibt_recv_wr_t *, uint_t, uint_t *);
174 174
175 175 /* Address translation */
176 176 static ibt_status_t tavor_ci_map_mem_area(ibc_hca_hdl_t, ibt_va_attr_t *,
177 177 void *, uint_t, ibt_reg_req_t *, ibc_ma_hdl_t *);
178 178 static ibt_status_t tavor_ci_unmap_mem_area(ibc_hca_hdl_t, ibc_ma_hdl_t);
179 179 static ibt_status_t tavor_ci_map_mem_iov(ibc_hca_hdl_t, ibt_iov_attr_t *,
180 180 ibt_all_wr_t *, ibc_mi_hdl_t *);
181 181 static ibt_status_t tavor_ci_unmap_mem_iov(ibc_hca_hdl_t, ibc_mi_hdl_t);
182 182
183 183 /* Allocate L_Key */
184 184 static ibt_status_t tavor_ci_alloc_lkey(ibc_hca_hdl_t, ibc_pd_hdl_t,
185 185 ibt_lkey_flags_t, uint_t, ibc_mr_hdl_t *, ibt_pmr_desc_t *);
186 186
187 187 /* Physical Register Memory Region */
188 188 static ibt_status_t tavor_ci_register_physical_mr(ibc_hca_hdl_t, ibc_pd_hdl_t,
189 189 ibt_pmr_attr_t *, void *, ibc_mr_hdl_t *, ibt_pmr_desc_t *);
190 190 static ibt_status_t tavor_ci_reregister_physical_mr(ibc_hca_hdl_t,
191 191 ibc_mr_hdl_t, ibc_pd_hdl_t, ibt_pmr_attr_t *, void *, ibc_mr_hdl_t *,
192 192 ibt_pmr_desc_t *);
193 193
194 194 /* Mellanox FMR */
195 195 static ibt_status_t tavor_ci_create_fmr_pool(ibc_hca_hdl_t hca, ibc_pd_hdl_t pd,
196 196 ibt_fmr_pool_attr_t *fmr_params, ibc_fmr_pool_hdl_t *fmr_pool);
197 197 static ibt_status_t tavor_ci_destroy_fmr_pool(ibc_hca_hdl_t hca,
198 198 ibc_fmr_pool_hdl_t fmr_pool);
199 199 static ibt_status_t tavor_ci_flush_fmr_pool(ibc_hca_hdl_t hca,
200 200 ibc_fmr_pool_hdl_t fmr_pool);
201 201 static ibt_status_t tavor_ci_register_physical_fmr(ibc_hca_hdl_t hca,
202 202 ibc_fmr_pool_hdl_t fmr_pool, ibt_pmr_attr_t *mem_pattr,
203 203 void *ibtl_reserved, ibc_mr_hdl_t *mr_hdl_p, ibt_pmr_desc_t *mem_desc_p);
204 204 static ibt_status_t tavor_ci_deregister_fmr(ibc_hca_hdl_t hca,
205 205 ibc_mr_hdl_t mr);
206 206
207 207 static ibt_status_t tavor_ci_alloc_io_mem(ibc_hca_hdl_t, size_t,
208 208 ibt_mr_flags_t, caddr_t *, ibc_mem_alloc_hdl_t *);
209 209 static ibt_status_t tavor_ci_free_io_mem(ibc_hca_hdl_t, ibc_mem_alloc_hdl_t);
210 210 static int tavor_mem_alloc(tavor_state_t *, size_t, ibt_mr_flags_t,
211 211 caddr_t *, tavor_mem_alloc_hdl_t *);
212 212
213 213 static ibt_status_t tavor_ci_not_supported();
214 214
215 215 /*
216 216 * This ibc_operations_t structure includes pointers to all the entry points
217 217 * provided by the Tavor driver. This structure is passed to the IBTF at
218 218 * driver attach time, using the ibc_attach() call.
219 219 */
220 220 ibc_operations_t tavor_ibc_ops = {
221 221 /* HCA and port related operations */
222 222 tavor_ci_query_hca_ports,
223 223 tavor_ci_modify_ports,
224 224 tavor_ci_modify_system_image,
225 225
226 226 /* Protection Domains */
227 227 tavor_ci_alloc_pd,
228 228 tavor_ci_free_pd,
229 229
230 230 /* Reliable Datagram Domains */
231 231 tavor_ci_alloc_rdd,
232 232 tavor_ci_free_rdd,
233 233
234 234 /* Address Handles */
235 235 tavor_ci_alloc_ah,
236 236 tavor_ci_free_ah,
237 237 tavor_ci_query_ah,
238 238 tavor_ci_modify_ah,
239 239
240 240 /* Queue Pairs */
241 241 tavor_ci_alloc_qp,
242 242 tavor_ci_alloc_special_qp,
243 243 tavor_ci_alloc_qp_range,
244 244 tavor_ci_free_qp,
245 245 tavor_ci_release_qpn,
246 246 tavor_ci_query_qp,
247 247 tavor_ci_modify_qp,
248 248
249 249 /* Completion Queues */
250 250 tavor_ci_alloc_cq,
251 251 tavor_ci_free_cq,
252 252 tavor_ci_query_cq,
253 253 tavor_ci_resize_cq,
254 254 tavor_ci_modify_cq,
255 255 tavor_ci_alloc_cq_sched,
256 256 tavor_ci_free_cq_sched,
257 257 tavor_ci_not_supported, /* query_cq_handler_id */
258 258
259 259 /* EE Contexts */
260 260 tavor_ci_alloc_eec,
261 261 tavor_ci_free_eec,
262 262 tavor_ci_query_eec,
263 263 tavor_ci_modify_eec,
264 264
265 265 /* Memory Registration */
266 266 tavor_ci_register_mr,
267 267 tavor_ci_register_buf,
268 268 tavor_ci_register_shared_mr,
269 269 tavor_ci_deregister_mr,
270 270 tavor_ci_query_mr,
271 271 tavor_ci_reregister_mr,
272 272 tavor_ci_reregister_buf,
273 273 tavor_ci_sync_mr,
274 274
275 275 /* Memory Windows */
276 276 tavor_ci_alloc_mw,
277 277 tavor_ci_free_mw,
278 278 tavor_ci_query_mw,
279 279
280 280 /* Multicast Groups */
281 281 tavor_ci_attach_mcg,
282 282 tavor_ci_detach_mcg,
283 283
284 284 /* Work Request and Completion Processing */
285 285 tavor_ci_post_send,
286 286 tavor_ci_post_recv,
287 287 tavor_ci_poll_cq,
288 288 tavor_ci_notify_cq,
289 289
290 290 /* CI Object Mapping Data */
291 291 tavor_ci_ci_data_in,
292 292 tavor_ci_ci_data_out,
293 293
294 294 /* Shared Receive Queue */
295 295 tavor_ci_alloc_srq,
296 296 tavor_ci_free_srq,
297 297 tavor_ci_query_srq,
298 298 tavor_ci_modify_srq,
299 299 tavor_ci_post_srq,
300 300
301 301 /* Address translation */
302 302 tavor_ci_map_mem_area,
303 303 tavor_ci_unmap_mem_area,
304 304 tavor_ci_map_mem_iov,
305 305 tavor_ci_unmap_mem_iov,
306 306
307 307 /* Allocate L_key */
308 308 tavor_ci_alloc_lkey,
309 309
310 310 /* Physical Register Memory Region */
311 311 tavor_ci_register_physical_mr,
312 312 tavor_ci_reregister_physical_mr,
313 313
314 314 /* Mellanox FMR */
315 315 tavor_ci_create_fmr_pool,
316 316 tavor_ci_destroy_fmr_pool,
317 317 tavor_ci_flush_fmr_pool,
318 318 tavor_ci_register_physical_fmr,
319 319 tavor_ci_deregister_fmr,
320 320
321 321 /* dmable memory */
322 322 tavor_ci_alloc_io_mem,
323 323 tavor_ci_free_io_mem,
324 324
325 325 /* XRC not yet supported */
326 326 tavor_ci_not_supported, /* ibc_alloc_xrc_domain */
327 327 tavor_ci_not_supported, /* ibc_free_xrc_domain */
328 328 tavor_ci_not_supported, /* ibc_alloc_xrc_srq */
329 329 tavor_ci_not_supported, /* ibc_free_xrc_srq */
330 330 tavor_ci_not_supported, /* ibc_query_xrc_srq */
331 331 tavor_ci_not_supported, /* ibc_modify_xrc_srq */
332 332 tavor_ci_not_supported, /* ibc_alloc_xrc_tgt_qp */
333 333 tavor_ci_not_supported, /* ibc_free_xrc_tgt_qp */
334 334 tavor_ci_not_supported, /* ibc_query_xrc_tgt_qp */
335 335 tavor_ci_not_supported, /* ibc_modify_xrc_tgt_qp */
336 336
337 337 /* Memory Region (physical) */
338 338 tavor_ci_register_dma_mr,
339 339
340 340 /* Next enhancements */
341 341 tavor_ci_not_supported, /* ibc_enhancement1 */
342 342 tavor_ci_not_supported, /* ibc_enhancement2 */
343 343 tavor_ci_not_supported, /* ibc_enhancement3 */
344 344 tavor_ci_not_supported, /* ibc_enhancement4 */
345 345 };
346 346
347 347 /*
348 348 * Not yet implemented OPS
349 349 */
350 350 /* ARGSUSED */
351 351 static ibt_status_t
352 352 tavor_ci_not_supported()
353 353 {
354 354 return (IBT_NOT_SUPPORTED);
355 355 }
356 356
357 357
358 358 /*
359 359 * tavor_ci_query_hca_ports()
360 360 * Returns HCA port attributes for either one or all of the HCA's ports.
361 361 * Context: Can be called only from user or kernel context.
362 362 */
363 363 static ibt_status_t
364 364 tavor_ci_query_hca_ports(ibc_hca_hdl_t hca, uint8_t query_port,
365 365 ibt_hca_portinfo_t *info_p)
366 366 {
367 367 tavor_state_t *state;
368 368 uint_t start, end, port;
369 369 int status, indx;
370 370
371 371 TAVOR_TNF_ENTER(tavor_ci_query_hca_ports);
372 372
373 373 /* Check for valid HCA handle */
374 374 if (hca == NULL) {
375 375 TNF_PROBE_0(tavor_ci_query_hca_ports_invhca_fail,
376 376 TAVOR_TNF_ERROR, "");
377 377 TAVOR_TNF_EXIT(tavor_ci_query_port);
378 378 return (IBT_HCA_HDL_INVALID);
379 379 }
380 380
381 381 /* Grab the Tavor softstate pointer */
382 382 state = (tavor_state_t *)hca;
383 383
384 384 /*
385 385 * If the specified port is zero, then we are supposed to query all
386 386 * ports. Otherwise, we query only the port number specified.
387 387 * Setup the start and end port numbers as appropriate for the loop
388 388 * below. Note: The first Tavor port is port number one (1).
389 389 */
390 390 if (query_port == 0) {
391 391 start = 1;
392 392 end = start + (state->ts_cfg_profile->cp_num_ports - 1);
393 393 } else {
394 394 end = start = query_port;
395 395 }
396 396
397 397 /* Query the port(s) */
398 398 for (port = start, indx = 0; port <= end; port++, indx++) {
399 399 status = tavor_port_query(state, port, &info_p[indx]);
400 400 if (status != DDI_SUCCESS) {
401 401 TNF_PROBE_1(tavor_port_query_fail, TAVOR_TNF_ERROR,
402 402 "", tnf_uint, status, status);
403 403 TAVOR_TNF_EXIT(tavor_ci_query_hca_ports);
404 404 return (status);
405 405 }
406 406 }
407 407
408 408 TAVOR_TNF_EXIT(tavor_ci_query_hca_ports);
409 409 return (IBT_SUCCESS);
410 410 }
411 411
412 412
413 413 /*
414 414 * tavor_ci_modify_ports()
415 415 * Modify HCA port attributes
416 416 * Context: Can be called only from user or kernel context.
417 417 */
418 418 static ibt_status_t
419 419 tavor_ci_modify_ports(ibc_hca_hdl_t hca, uint8_t port,
420 420 ibt_port_modify_flags_t flags, uint8_t init_type)
421 421 {
422 422 tavor_state_t *state;
423 423 int status;
424 424
425 425 TAVOR_TNF_ENTER(tavor_ci_modify_ports);
426 426
427 427 /* Check for valid HCA handle */
428 428 if (hca == NULL) {
429 429 TNF_PROBE_0(tavor_ci_modify_ports_invhca_fail,
430 430 TAVOR_TNF_ERROR, "");
431 431 TAVOR_TNF_EXIT(tavor_ci_modify_ports);
432 432 return (IBT_HCA_HDL_INVALID);
433 433 }
434 434
435 435 /* Grab the Tavor softstate pointer */
436 436 state = (tavor_state_t *)hca;
437 437
438 438 /* Modify the port(s) */
439 439 status = tavor_port_modify(state, port, flags, init_type);
440 440 if (status != DDI_SUCCESS) {
441 441 TNF_PROBE_1(tavor_ci_modify_ports_fail,
442 442 TAVOR_TNF_ERROR, "", tnf_uint, status, status);
443 443 TAVOR_TNF_EXIT(tavor_ci_modify_ports);
444 444 return (status);
445 445 }
446 446
447 447 TAVOR_TNF_EXIT(tavor_ci_modify_ports);
448 448 return (IBT_SUCCESS);
449 449 }
450 450
451 451 /*
452 452 * tavor_ci_modify_system_image()
453 453 * Modify the System Image GUID
454 454 * Context: Can be called only from user or kernel context.
455 455 */
456 456 /* ARGSUSED */
457 457 static ibt_status_t
458 458 tavor_ci_modify_system_image(ibc_hca_hdl_t hca, ib_guid_t sys_guid)
459 459 {
460 460 TAVOR_TNF_ENTER(tavor_ci_modify_system_image);
461 461
462 462 /*
463 463 * This is an unsupported interface for the Tavor driver. This
464 464 * interface is necessary to support modification of the System
465 465 * Image GUID. Tavor is only capable of modifying this parameter
466 466 * once (during driver initialization).
467 467 */
468 468
469 469 TAVOR_TNF_EXIT(tavor_ci_modify_system_image);
470 470 return (IBT_NOT_SUPPORTED);
471 471 }
472 472
473 473 /*
474 474 * tavor_ci_alloc_pd()
475 475 * Allocate a Protection Domain
476 476 * Context: Can be called only from user or kernel context.
477 477 */
478 478 /* ARGSUSED */
479 479 static ibt_status_t
480 480 tavor_ci_alloc_pd(ibc_hca_hdl_t hca, ibt_pd_flags_t flags, ibc_pd_hdl_t *pd_p)
481 481 {
482 482 tavor_state_t *state;
483 483 tavor_pdhdl_t pdhdl;
484 484 int status;
485 485
486 486 TAVOR_TNF_ENTER(tavor_ci_alloc_pd);
487 487
488 488 ASSERT(pd_p != NULL);
489 489
490 490 /* Check for valid HCA handle */
491 491 if (hca == NULL) {
492 492 TNF_PROBE_0(tavor_ci_alloc_pd_invhca_fail,
493 493 TAVOR_TNF_ERROR, "");
494 494 TAVOR_TNF_EXIT(tavor_ci_alloc_pd);
495 495 return (IBT_HCA_HDL_INVALID);
496 496 }
497 497
498 498 /* Grab the Tavor softstate pointer */
499 499 state = (tavor_state_t *)hca;
500 500
501 501 /* Allocate the PD */
502 502 status = tavor_pd_alloc(state, &pdhdl, TAVOR_NOSLEEP);
503 503 if (status != DDI_SUCCESS) {
504 504 TNF_PROBE_1(tavor_ci_alloc_pd_fail, TAVOR_TNF_ERROR, "",
505 505 tnf_uint, status, status);
506 506 TAVOR_TNF_EXIT(tavor_ci_alloc_pd);
507 507 return (status);
508 508 }
509 509
510 510 /* Return the Tavor PD handle */
511 511 *pd_p = (ibc_pd_hdl_t)pdhdl;
512 512
513 513 TAVOR_TNF_EXIT(tavor_ci_alloc_pd);
514 514 return (IBT_SUCCESS);
515 515 }
516 516
517 517
518 518 /*
519 519 * tavor_ci_free_pd()
520 520 * Free a Protection Domain
521 521 * Context: Can be called only from user or kernel context
522 522 */
523 523 static ibt_status_t
524 524 tavor_ci_free_pd(ibc_hca_hdl_t hca, ibc_pd_hdl_t pd)
525 525 {
526 526 tavor_state_t *state;
527 527 tavor_pdhdl_t pdhdl;
528 528 int status;
529 529
530 530 TAVOR_TNF_ENTER(tavor_ci_free_pd);
531 531
532 532 /* Check for valid HCA handle */
533 533 if (hca == NULL) {
534 534 TNF_PROBE_0(tavor_ci_free_pd_invhca_fail,
535 535 TAVOR_TNF_ERROR, "");
536 536 TAVOR_TNF_EXIT(tavor_ci_free_pd);
537 537 return (IBT_HCA_HDL_INVALID);
538 538 }
539 539
540 540 /* Check for valid PD handle pointer */
541 541 if (pd == NULL) {
542 542 TNF_PROBE_0(tavor_ci_free_pd_invpdhdl_fail,
543 543 TAVOR_TNF_ERROR, "");
544 544 TAVOR_TNF_EXIT(tavor_ci_free_pd);
545 545 return (IBT_PD_HDL_INVALID);
546 546 }
547 547
548 548 /* Grab the Tavor softstate pointer and PD handle */
549 549 state = (tavor_state_t *)hca;
550 550 pdhdl = (tavor_pdhdl_t)pd;
551 551
552 552 /* Free the PD */
553 553 status = tavor_pd_free(state, &pdhdl);
554 554 if (status != DDI_SUCCESS) {
555 555 TNF_PROBE_1(tavor_ci_free_pd_fail, TAVOR_TNF_ERROR, "",
556 556 tnf_uint, status, status);
557 557 TAVOR_TNF_EXIT(tavor_ci_free_pd);
558 558 return (status);
559 559 }
560 560
561 561 TAVOR_TNF_EXIT(tavor_ci_free_pd);
562 562 return (IBT_SUCCESS);
563 563 }
564 564
565 565
566 566 /*
567 567 * tavor_ci_alloc_rdd()
568 568 * Allocate a Reliable Datagram Domain
569 569 * Context: Can be called only from user or kernel context.
570 570 */
571 571 /* ARGSUSED */
572 572 static ibt_status_t
573 573 tavor_ci_alloc_rdd(ibc_hca_hdl_t hca, ibc_rdd_flags_t flags,
574 574 ibc_rdd_hdl_t *rdd_p)
575 575 {
576 576 TAVOR_TNF_ENTER(tavor_ci_alloc_rdd);
577 577
578 578 /*
579 579 * This is an unsupported interface for the Tavor driver. This
580 580 * interface is necessary to support Reliable Datagram (RD)
581 581 * operations. Tavor does not support RD.
582 582 */
583 583
584 584 TAVOR_TNF_EXIT(tavor_ci_alloc_rdd);
585 585 return (IBT_NOT_SUPPORTED);
586 586 }
587 587
588 588
589 589 /*
590 590 * tavor_free_rdd()
591 591 * Free a Reliable Datagram Domain
592 592 * Context: Can be called only from user or kernel context.
593 593 */
594 594 /* ARGSUSED */
595 595 static ibt_status_t
596 596 tavor_ci_free_rdd(ibc_hca_hdl_t hca, ibc_rdd_hdl_t rdd)
597 597 {
598 598 TAVOR_TNF_ENTER(tavor_ci_free_rdd);
599 599
600 600 /*
601 601 * This is an unsupported interface for the Tavor driver. This
602 602 * interface is necessary to support Reliable Datagram (RD)
603 603 * operations. Tavor does not support RD.
604 604 */
605 605
606 606 TAVOR_TNF_EXIT(tavor_ci_free_rdd);
607 607 return (IBT_NOT_SUPPORTED);
608 608 }
609 609
610 610
611 611 /*
612 612 * tavor_ci_alloc_ah()
613 613 * Allocate an Address Handle
614 614 * Context: Can be called only from user or kernel context.
615 615 */
616 616 /* ARGSUSED */
617 617 static ibt_status_t
618 618 tavor_ci_alloc_ah(ibc_hca_hdl_t hca, ibt_ah_flags_t flags, ibc_pd_hdl_t pd,
619 619 ibt_adds_vect_t *attr_p, ibc_ah_hdl_t *ah_p)
620 620 {
621 621 tavor_state_t *state;
622 622 tavor_ahhdl_t ahhdl;
623 623 tavor_pdhdl_t pdhdl;
624 624 int status;
625 625
626 626 TAVOR_TNF_ENTER(tavor_ci_alloc_ah);
627 627
628 628 /* Check for valid HCA handle */
629 629 if (hca == NULL) {
630 630 TNF_PROBE_0(tavor_ci_alloc_ah_invhca_fail,
631 631 TAVOR_TNF_ERROR, "");
632 632 TAVOR_TNF_EXIT(tavor_ci_alloc_ah);
633 633 return (IBT_HCA_HDL_INVALID);
634 634 }
635 635
636 636 /* Check for valid PD handle pointer */
637 637 if (pd == NULL) {
638 638 TNF_PROBE_0(tavor_ci_alloc_ah_invpdhdl_fail,
639 639 TAVOR_TNF_ERROR, "");
640 640 TAVOR_TNF_EXIT(tavor_ci_alloc_ah);
641 641 return (IBT_PD_HDL_INVALID);
642 642 }
643 643
644 644 /* Grab the Tavor softstate pointer and PD handle */
645 645 state = (tavor_state_t *)hca;
646 646 pdhdl = (tavor_pdhdl_t)pd;
647 647
648 648 /* Allocate the AH */
649 649 status = tavor_ah_alloc(state, pdhdl, attr_p, &ahhdl, TAVOR_NOSLEEP);
650 650 if (status != DDI_SUCCESS) {
651 651 TNF_PROBE_1(tavor_ci_alloc_ah_fail, TAVOR_TNF_ERROR, "",
652 652 tnf_uint, status, status);
653 653 TAVOR_TNF_EXIT(tavor_ci_alloc_ah);
654 654 return (status);
655 655 }
656 656
657 657 /* Return the Tavor AH handle */
658 658 *ah_p = (ibc_ah_hdl_t)ahhdl;
659 659
660 660 TAVOR_TNF_EXIT(tavor_ci_alloc_ah);
661 661 return (IBT_SUCCESS);
662 662 }
663 663
664 664
665 665 /*
666 666 * tavor_ci_free_ah()
667 667 * Free an Address Handle
668 668 * Context: Can be called only from user or kernel context.
669 669 */
670 670 static ibt_status_t
671 671 tavor_ci_free_ah(ibc_hca_hdl_t hca, ibc_ah_hdl_t ah)
672 672 {
673 673 tavor_state_t *state;
674 674 tavor_ahhdl_t ahhdl;
675 675 int status;
676 676
677 677 TAVOR_TNF_ENTER(tavor_ci_free_ah);
678 678
679 679 /* Check for valid HCA handle */
680 680 if (hca == NULL) {
681 681 TNF_PROBE_0(tavor_ci_free_ah_invhca_fail,
682 682 TAVOR_TNF_ERROR, "");
683 683 TAVOR_TNF_EXIT(tavor_ci_free_ah);
684 684 return (IBT_HCA_HDL_INVALID);
685 685 }
686 686
687 687 /* Check for valid address handle pointer */
688 688 if (ah == NULL) {
689 689 TNF_PROBE_0(tavor_ci_free_ah_invahhdl_fail,
690 690 TAVOR_TNF_ERROR, "");
691 691 TAVOR_TNF_EXIT(tavor_ci_free_ah);
692 692 return (IBT_AH_HDL_INVALID);
693 693 }
694 694
695 695 /* Grab the Tavor softstate pointer and AH handle */
696 696 state = (tavor_state_t *)hca;
697 697 ahhdl = (tavor_ahhdl_t)ah;
698 698
699 699 /* Free the AH */
700 700 status = tavor_ah_free(state, &ahhdl, TAVOR_NOSLEEP);
701 701 if (status != DDI_SUCCESS) {
702 702 TNF_PROBE_1(tavor_ci_free_ah_fail, TAVOR_TNF_ERROR, "",
703 703 tnf_uint, status, status);
704 704 TAVOR_TNF_EXIT(tavor_ci_free_ah);
705 705 return (status);
706 706 }
707 707
708 708 TAVOR_TNF_EXIT(tavor_ci_free_ah);
709 709 return (IBT_SUCCESS);
710 710 }
711 711
712 712
713 713 /*
714 714 * tavor_ci_query_ah()
715 715 * Return the Address Vector information for a specified Address Handle
716 716 * Context: Can be called from interrupt or base context.
717 717 */
718 718 static ibt_status_t
719 719 tavor_ci_query_ah(ibc_hca_hdl_t hca, ibc_ah_hdl_t ah, ibc_pd_hdl_t *pd_p,
720 720 ibt_adds_vect_t *attr_p)
721 721 {
722 722 tavor_state_t *state;
723 723 tavor_ahhdl_t ahhdl;
724 724 tavor_pdhdl_t pdhdl;
725 725 int status;
726 726
727 727 TAVOR_TNF_ENTER(tavor_ci_query_ah);
728 728
729 729 /* Check for valid HCA handle */
730 730 if (hca == NULL) {
731 731 TNF_PROBE_0(tavor_ci_query_ah_invhca_fail,
732 732 TAVOR_TNF_ERROR, "");
733 733 TAVOR_TNF_EXIT(tavor_ci_query_ah);
734 734 return (IBT_HCA_HDL_INVALID);
735 735 }
736 736
737 737 /* Check for valid address handle pointer */
738 738 if (ah == NULL) {
739 739 TNF_PROBE_0(tavor_ci_query_ah_invahhdl_fail,
740 740 TAVOR_TNF_ERROR, "");
741 741 TAVOR_TNF_EXIT(tavor_ci_query_ah);
742 742 return (IBT_AH_HDL_INVALID);
743 743 }
744 744
745 745 /* Grab the Tavor softstate pointer and AH handle */
746 746 state = (tavor_state_t *)hca;
747 747 ahhdl = (tavor_ahhdl_t)ah;
748 748
749 749 /* Query the AH */
750 750 status = tavor_ah_query(state, ahhdl, &pdhdl, attr_p);
751 751 if (status != DDI_SUCCESS) {
752 752 TNF_PROBE_1(tavor_ci_query_ah_fail, TAVOR_TNF_ERROR, "",
753 753 tnf_uint, status, status);
754 754 TAVOR_TNF_EXIT(tavor_ci_query_ah);
755 755 return (status);
756 756 }
757 757
758 758 /* Return the Tavor PD handle */
759 759 *pd_p = (ibc_pd_hdl_t)pdhdl;
760 760
761 761 TAVOR_TNF_EXIT(tavor_ci_query_ah);
762 762 return (IBT_SUCCESS);
763 763 }
764 764
765 765
766 766 /*
767 767 * tavor_ci_modify_ah()
768 768 * Modify the Address Vector information of a specified Address Handle
769 769 * Context: Can be called from interrupt or base context.
770 770 */
771 771 static ibt_status_t
772 772 tavor_ci_modify_ah(ibc_hca_hdl_t hca, ibc_ah_hdl_t ah, ibt_adds_vect_t *attr_p)
773 773 {
774 774 tavor_state_t *state;
775 775 tavor_ahhdl_t ahhdl;
776 776 int status;
777 777
778 778 TAVOR_TNF_ENTER(tavor_ci_modify_ah);
779 779
780 780 /* Check for valid HCA handle */
781 781 if (hca == NULL) {
782 782 TNF_PROBE_0(tavor_ci_modify_ah_invhca_fail,
783 783 TAVOR_TNF_ERROR, "");
784 784 TAVOR_TNF_EXIT(tavor_ci_modify_ah);
785 785 return (IBT_HCA_HDL_INVALID);
786 786 }
787 787
788 788 /* Check for valid address handle pointer */
789 789 if (ah == NULL) {
790 790 TNF_PROBE_0(tavor_ci_modify_ah_invahhdl_fail,
791 791 TAVOR_TNF_ERROR, "");
792 792 TAVOR_TNF_EXIT(tavor_ci_modify_ah);
793 793 return (IBT_AH_HDL_INVALID);
794 794 }
795 795
796 796 /* Grab the Tavor softstate pointer and AH handle */
797 797 state = (tavor_state_t *)hca;
798 798 ahhdl = (tavor_ahhdl_t)ah;
799 799
800 800 /* Modify the AH */
801 801 status = tavor_ah_modify(state, ahhdl, attr_p);
802 802 if (status != DDI_SUCCESS) {
803 803 TNF_PROBE_1(tavor_ci_modify_ah_fail, TAVOR_TNF_ERROR, "",
804 804 tnf_uint, status, status);
805 805 TAVOR_TNF_EXIT(tavor_ci_modify_ah);
806 806 return (status);
807 807 }
808 808
809 809 TAVOR_TNF_EXIT(tavor_ci_modify_ah);
810 810 return (IBT_SUCCESS);
811 811 }
812 812
813 813
814 814 /*
815 815 * tavor_ci_alloc_qp()
816 816 * Allocate a Queue Pair
817 817 * Context: Can be called only from user or kernel context.
818 818 */
819 819 static ibt_status_t
820 820 tavor_ci_alloc_qp(ibc_hca_hdl_t hca, ibtl_qp_hdl_t ibt_qphdl,
↓ open down ↓ |
820 lines elided |
↑ open up ↑ |
821 821 ibt_qp_type_t type, ibt_qp_alloc_attr_t *attr_p,
822 822 ibt_chan_sizes_t *queue_sizes_p, ib_qpn_t *qpn, ibc_qp_hdl_t *qp_p)
823 823 {
824 824 tavor_state_t *state;
825 825 tavor_qp_info_t qpinfo;
826 826 tavor_qp_options_t op;
827 827 int status;
828 828
829 829 TAVOR_TNF_ENTER(tavor_ci_alloc_qp);
830 830
831 - _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*attr_p))
832 - _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*queue_sizes_p))
833 -
834 831 /* Check for valid HCA handle */
835 832 if (hca == NULL) {
836 833 TNF_PROBE_0(tavor_ci_alloc_qp_invhca_fail,
837 834 TAVOR_TNF_ERROR, "");
838 835 TAVOR_TNF_EXIT(tavor_ci_alloc_qp);
839 836 return (IBT_HCA_HDL_INVALID);
840 837 }
841 838
842 839 /* Grab the Tavor softstate pointer */
843 840 state = (tavor_state_t *)hca;
844 841
845 842 /* Allocate the QP */
846 843 qpinfo.qpi_attrp = attr_p;
847 844 qpinfo.qpi_type = type;
848 845 qpinfo.qpi_ibt_qphdl = ibt_qphdl;
849 846 qpinfo.qpi_queueszp = queue_sizes_p;
850 847 qpinfo.qpi_qpn = qpn;
851 848 op.qpo_wq_loc = state->ts_cfg_profile->cp_qp_wq_inddr;
852 849 status = tavor_qp_alloc(state, &qpinfo, TAVOR_NOSLEEP, &op);
853 850 if (status != DDI_SUCCESS) {
854 851 TNF_PROBE_1(tavor_ci_alloc_qp_fail, TAVOR_TNF_ERROR, "",
855 852 tnf_uint, status, status);
856 853 TAVOR_TNF_EXIT(tavor_ci_alloc_qp);
857 854 return (status);
858 855 }
859 856
860 857 /* Return the Tavor QP handle */
861 858 *qp_p = (ibc_qp_hdl_t)qpinfo.qpi_qphdl;
862 859
863 860 TAVOR_TNF_EXIT(tavor_ci_alloc_qp);
864 861 return (IBT_SUCCESS);
865 862 }
866 863
867 864
868 865 /*
869 866 * tavor_ci_alloc_special_qp()
870 867 * Allocate a Special Queue Pair
871 868 * Context: Can be called only from user or kernel context.
872 869 */
873 870 static ibt_status_t
874 871 tavor_ci_alloc_special_qp(ibc_hca_hdl_t hca, uint8_t port,
875 872 ibtl_qp_hdl_t ibt_qphdl, ibt_sqp_type_t type,
↓ open down ↓ |
32 lines elided |
↑ open up ↑ |
876 873 ibt_qp_alloc_attr_t *attr_p, ibt_chan_sizes_t *queue_sizes_p,
877 874 ibc_qp_hdl_t *qp_p)
878 875 {
879 876 tavor_state_t *state;
880 877 tavor_qp_info_t qpinfo;
881 878 tavor_qp_options_t op;
882 879 int status;
883 880
884 881 TAVOR_TNF_ENTER(tavor_ci_alloc_special_qp);
885 882
886 - _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*attr_p))
887 - _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*queue_sizes_p))
888 -
889 883 /* Check for valid HCA handle */
890 884 if (hca == NULL) {
891 885 TNF_PROBE_0(tavor_ci_alloc_special_qp_invhca_fail,
892 886 TAVOR_TNF_ERROR, "");
893 887 TAVOR_TNF_EXIT(tavor_ci_alloc_special_qp);
894 888 return (IBT_HCA_HDL_INVALID);
895 889 }
896 890
897 891 /* Grab the Tavor softstate pointer */
898 892 state = (tavor_state_t *)hca;
899 893
900 894 /* Allocate the Special QP */
901 895 qpinfo.qpi_attrp = attr_p;
902 896 qpinfo.qpi_type = type;
903 897 qpinfo.qpi_port = port;
904 898 qpinfo.qpi_ibt_qphdl = ibt_qphdl;
905 899 qpinfo.qpi_queueszp = queue_sizes_p;
906 900 op.qpo_wq_loc = state->ts_cfg_profile->cp_qp_wq_inddr;
907 901 status = tavor_special_qp_alloc(state, &qpinfo, TAVOR_NOSLEEP, &op);
908 902 if (status != DDI_SUCCESS) {
909 903 TNF_PROBE_1(tavor_ci_alloc_special_qp_fail, TAVOR_TNF_ERROR,
910 904 "", tnf_uint, status, status);
911 905 TAVOR_TNF_EXIT(tavor_ci_alloc_special_qp);
912 906 return (status);
913 907 }
914 908
915 909 /* Return the Tavor QP handle */
916 910 *qp_p = (ibc_qp_hdl_t)qpinfo.qpi_qphdl;
917 911
918 912 TAVOR_TNF_EXIT(tavor_ci_alloc_special_qp);
919 913 return (IBT_SUCCESS);
920 914 }
921 915
922 916
923 917 /* ARGSUSED */
924 918 static ibt_status_t
925 919 tavor_ci_alloc_qp_range(ibc_hca_hdl_t hca, uint_t log2,
926 920 ibtl_qp_hdl_t *ibtl_qp_p, ibt_qp_type_t type,
927 921 ibt_qp_alloc_attr_t *attr_p, ibt_chan_sizes_t *queue_sizes_p,
928 922 ibc_cq_hdl_t *send_cq_p, ibc_cq_hdl_t *recv_cq_p,
929 923 ib_qpn_t *qpn_p, ibc_qp_hdl_t *qp_p)
930 924 {
931 925 return (IBT_NOT_SUPPORTED);
932 926 }
933 927
934 928 /*
935 929 * tavor_ci_free_qp()
936 930 * Free a Queue Pair
937 931 * Context: Can be called only from user or kernel context.
938 932 */
939 933 static ibt_status_t
940 934 tavor_ci_free_qp(ibc_hca_hdl_t hca, ibc_qp_hdl_t qp,
941 935 ibc_free_qp_flags_t free_qp_flags, ibc_qpn_hdl_t *qpnh_p)
942 936 {
943 937 tavor_state_t *state;
944 938 tavor_qphdl_t qphdl;
945 939 int status;
946 940
947 941 TAVOR_TNF_ENTER(tavor_ci_free_qp);
948 942
949 943 /* Check for valid HCA handle */
950 944 if (hca == NULL) {
951 945 TNF_PROBE_0(tavor_ci_free_qp_invhca_fail,
952 946 TAVOR_TNF_ERROR, "");
953 947 TAVOR_TNF_EXIT(tavor_ci_free_qp);
954 948 return (IBT_HCA_HDL_INVALID);
955 949 }
956 950
957 951 /* Check for valid QP handle pointer */
958 952 if (qp == NULL) {
959 953 TNF_PROBE_0(tavor_ci_free_qp_invqphdl_fail,
960 954 TAVOR_TNF_ERROR, "");
961 955 TAVOR_TNF_EXIT(tavor_ci_free_qp);
962 956 return (IBT_QP_HDL_INVALID);
963 957 }
964 958
965 959 /* Grab the Tavor softstate pointer and QP handle */
966 960 state = (tavor_state_t *)hca;
967 961 qphdl = (tavor_qphdl_t)qp;
968 962
969 963 /* Free the QP */
970 964 status = tavor_qp_free(state, &qphdl, free_qp_flags, qpnh_p,
971 965 TAVOR_NOSLEEP);
972 966 if (status != DDI_SUCCESS) {
973 967 TNF_PROBE_1(tavor_ci_free_qp_fail, TAVOR_TNF_ERROR, "",
974 968 tnf_uint, status, status);
975 969 TAVOR_TNF_EXIT(tavor_ci_free_qp);
976 970 return (status);
977 971 }
978 972
979 973 TAVOR_TNF_EXIT(tavor_ci_free_qp);
980 974 return (IBT_SUCCESS);
981 975 }
982 976
983 977
984 978 /*
985 979 * tavor_ci_release_qpn()
986 980 * Release a Queue Pair Number (QPN)
987 981 * Context: Can be called only from user or kernel context.
988 982 */
989 983 static ibt_status_t
990 984 tavor_ci_release_qpn(ibc_hca_hdl_t hca, ibc_qpn_hdl_t qpnh)
991 985 {
992 986 tavor_state_t *state;
993 987 tavor_qpn_entry_t *entry;
994 988
995 989 TAVOR_TNF_ENTER(tavor_ci_release_qpn);
996 990
997 991 /* Check for valid HCA handle */
998 992 if (hca == NULL) {
999 993 TNF_PROBE_0(tavor_ci_release_qpn_invhca_fail,
1000 994 TAVOR_TNF_ERROR, "");
1001 995 TAVOR_TNF_EXIT(tavor_ci_release_qpn);
1002 996 return (IBT_HCA_HDL_INVALID);
1003 997 }
1004 998
1005 999 /* Check for valid QP handle pointer */
1006 1000 if (qpnh == NULL) {
1007 1001 TNF_PROBE_0(tavor_ci_release_qpn_invqpnhdl_fail,
1008 1002 TAVOR_TNF_ERROR, "");
1009 1003 TAVOR_TNF_EXIT(tavor_ci_release_qpn);
1010 1004 return (IBT_QP_HDL_INVALID);
1011 1005 }
1012 1006
1013 1007 /* Grab the Tavor softstate pointer and QP handle */
1014 1008 state = (tavor_state_t *)hca;
1015 1009 entry = (tavor_qpn_entry_t *)qpnh;
1016 1010
1017 1011 /* Release the QP number */
1018 1012 tavor_qp_release_qpn(state, entry, TAVOR_QPN_RELEASE);
1019 1013
1020 1014 TAVOR_TNF_EXIT(tavor_ci_release_qpn);
1021 1015 return (IBT_SUCCESS);
1022 1016 }
1023 1017
1024 1018
1025 1019 /*
1026 1020 * tavor_ci_query_qp()
1027 1021 * Query a Queue Pair
1028 1022 * Context: Can be called from interrupt or base context.
1029 1023 */
1030 1024 static ibt_status_t
1031 1025 tavor_ci_query_qp(ibc_hca_hdl_t hca, ibc_qp_hdl_t qp,
1032 1026 ibt_qp_query_attr_t *attr_p)
1033 1027 {
1034 1028 tavor_state_t *state;
1035 1029 tavor_qphdl_t qphdl;
1036 1030 int status;
1037 1031
1038 1032 TAVOR_TNF_ENTER(tavor_ci_query_qp);
1039 1033
1040 1034 /* Check for valid HCA handle */
1041 1035 if (hca == NULL) {
1042 1036 TNF_PROBE_0(tavor_ci_query_qp_invhca_fail,
1043 1037 TAVOR_TNF_ERROR, "");
1044 1038 TAVOR_TNF_EXIT(tavor_ci_query_qp);
1045 1039 return (IBT_HCA_HDL_INVALID);
1046 1040 }
1047 1041
1048 1042 /* Check for valid QP handle */
1049 1043 if (qp == NULL) {
1050 1044 TNF_PROBE_0(tavor_ci_query_qp_invqphdl_fail,
1051 1045 TAVOR_TNF_ERROR, "");
1052 1046 TAVOR_TNF_EXIT(tavor_ci_query_qp);
1053 1047 return (IBT_QP_HDL_INVALID);
1054 1048 }
1055 1049
1056 1050 /* Grab the Tavor softstate pointer and QP handle */
1057 1051 state = (tavor_state_t *)hca;
1058 1052 qphdl = (tavor_qphdl_t)qp;
1059 1053
1060 1054 /* Query the QP */
1061 1055 status = tavor_qp_query(state, qphdl, attr_p);
1062 1056 if (status != DDI_SUCCESS) {
1063 1057 TNF_PROBE_1(tavor_ci_query_qp_fail, TAVOR_TNF_ERROR, "",
1064 1058 tnf_uint, status, status);
1065 1059 TAVOR_TNF_EXIT(tavor_ci_query_qp);
1066 1060 return (status);
1067 1061 }
1068 1062
1069 1063 TAVOR_TNF_EXIT(tavor_ci_query_qp);
1070 1064 return (IBT_SUCCESS);
1071 1065 }
1072 1066
1073 1067
1074 1068 /*
1075 1069 * tavor_ci_modify_qp()
1076 1070 * Modify a Queue Pair
1077 1071 * Context: Can be called from interrupt or base context.
1078 1072 */
1079 1073 static ibt_status_t
1080 1074 tavor_ci_modify_qp(ibc_hca_hdl_t hca, ibc_qp_hdl_t qp,
1081 1075 ibt_cep_modify_flags_t flags, ibt_qp_info_t *info_p,
1082 1076 ibt_queue_sizes_t *actual_sz)
1083 1077 {
1084 1078 tavor_state_t *state;
1085 1079 tavor_qphdl_t qphdl;
1086 1080 int status;
1087 1081
1088 1082 TAVOR_TNF_ENTER(tavor_ci_modify_qp);
1089 1083
1090 1084 /* Check for valid HCA handle */
1091 1085 if (hca == NULL) {
1092 1086 TNF_PROBE_0(tavor_ci_modify_qp_invhca_fail,
1093 1087 TAVOR_TNF_ERROR, "");
1094 1088 TAVOR_TNF_EXIT(tavor_ci_modify_qp);
1095 1089 return (IBT_HCA_HDL_INVALID);
1096 1090 }
1097 1091
1098 1092 /* Check for valid QP handle */
1099 1093 if (qp == NULL) {
1100 1094 TNF_PROBE_0(tavor_ci_modify_qp_invqphdl_fail,
1101 1095 TAVOR_TNF_ERROR, "");
1102 1096 TAVOR_TNF_EXIT(tavor_ci_modify_qp);
1103 1097 return (IBT_QP_HDL_INVALID);
1104 1098 }
1105 1099
1106 1100 /* Grab the Tavor softstate pointer and QP handle */
1107 1101 state = (tavor_state_t *)hca;
1108 1102 qphdl = (tavor_qphdl_t)qp;
1109 1103
1110 1104 /* Modify the QP */
1111 1105 status = tavor_qp_modify(state, qphdl, flags, info_p, actual_sz);
1112 1106 if (status != DDI_SUCCESS) {
1113 1107 TNF_PROBE_1(tavor_ci_modify_qp_fail, TAVOR_TNF_ERROR, "",
1114 1108 tnf_uint, status, status);
1115 1109 TAVOR_TNF_EXIT(tavor_ci_modify_qp);
1116 1110 return (status);
1117 1111 }
1118 1112
1119 1113 TAVOR_TNF_EXIT(tavor_ci_modify_qp);
1120 1114 return (IBT_SUCCESS);
1121 1115 }
1122 1116
1123 1117
1124 1118 /*
1125 1119 * tavor_ci_alloc_cq()
1126 1120 * Allocate a Completion Queue
1127 1121 * Context: Can be called only from user or kernel context.
1128 1122 */
1129 1123 /* ARGSUSED */
1130 1124 static ibt_status_t
1131 1125 tavor_ci_alloc_cq(ibc_hca_hdl_t hca, ibt_cq_hdl_t ibt_cqhdl,
1132 1126 ibt_cq_attr_t *attr_p, ibc_cq_hdl_t *cq_p, uint_t *actual_size)
1133 1127 {
1134 1128 tavor_state_t *state;
1135 1129 tavor_cqhdl_t cqhdl;
1136 1130 int status;
1137 1131
1138 1132 TAVOR_TNF_ENTER(tavor_ci_alloc_cq);
1139 1133
1140 1134 /* Check for valid HCA handle */
1141 1135 if (hca == NULL) {
1142 1136 TNF_PROBE_0(tavor_ci_alloc_cq_invhca_fail,
1143 1137 TAVOR_TNF_ERROR, "");
1144 1138 TAVOR_TNF_EXIT(tavor_ci_alloc_cq);
1145 1139 return (IBT_HCA_HDL_INVALID);
1146 1140 }
1147 1141
1148 1142 /* Grab the Tavor softstate pointer */
1149 1143 state = (tavor_state_t *)hca;
1150 1144
1151 1145 /* Allocate the CQ */
1152 1146 status = tavor_cq_alloc(state, ibt_cqhdl, attr_p, actual_size,
1153 1147 &cqhdl, TAVOR_NOSLEEP);
1154 1148 if (status != DDI_SUCCESS) {
1155 1149 TNF_PROBE_1(tavor_ci_alloc_cq_fail, TAVOR_TNF_ERROR, "",
1156 1150 tnf_uint, status, status);
1157 1151 TAVOR_TNF_EXIT(tavor_ci_alloc_cq);
1158 1152 return (status);
1159 1153 }
1160 1154
1161 1155 /* Return the Tavor CQ handle */
1162 1156 *cq_p = (ibc_cq_hdl_t)cqhdl;
1163 1157
1164 1158 TAVOR_TNF_EXIT(tavor_ci_alloc_cq);
1165 1159 return (IBT_SUCCESS);
1166 1160 }
1167 1161
1168 1162
1169 1163 /*
1170 1164 * tavor_ci_free_cq()
1171 1165 * Free a Completion Queue
1172 1166 * Context: Can be called only from user or kernel context.
1173 1167 */
1174 1168 static ibt_status_t
1175 1169 tavor_ci_free_cq(ibc_hca_hdl_t hca, ibc_cq_hdl_t cq)
1176 1170 {
1177 1171 tavor_state_t *state;
1178 1172 tavor_cqhdl_t cqhdl;
1179 1173 int status;
1180 1174
1181 1175 TAVOR_TNF_ENTER(tavor_ci_free_cq);
1182 1176
1183 1177
1184 1178 /* Check for valid HCA handle */
1185 1179 if (hca == NULL) {
1186 1180 TNF_PROBE_0(tavor_ci_free_cq_invhca_fail,
1187 1181 TAVOR_TNF_ERROR, "");
1188 1182 TAVOR_TNF_EXIT(tavor_ci_free_cq);
1189 1183 return (IBT_HCA_HDL_INVALID);
1190 1184 }
1191 1185
1192 1186 /* Check for valid CQ handle pointer */
1193 1187 if (cq == NULL) {
1194 1188 TNF_PROBE_0(tavor_ci_free_cq_invcqhdl_fail,
1195 1189 TAVOR_TNF_ERROR, "");
1196 1190 TAVOR_TNF_EXIT(tavor_ci_free_cq);
1197 1191 return (IBT_CQ_HDL_INVALID);
1198 1192 }
1199 1193
1200 1194 /* Grab the Tavor softstate pointer and CQ handle */
1201 1195 state = (tavor_state_t *)hca;
1202 1196 cqhdl = (tavor_cqhdl_t)cq;
1203 1197
1204 1198 /* Free the CQ */
1205 1199 status = tavor_cq_free(state, &cqhdl, TAVOR_NOSLEEP);
1206 1200 if (status != DDI_SUCCESS) {
1207 1201 TNF_PROBE_1(tavor_ci_free_cq_fail, TAVOR_TNF_ERROR, "",
1208 1202 tnf_uint, status, status);
1209 1203 TAVOR_TNF_EXIT(tavor_ci_free_cq);
1210 1204 return (status);
1211 1205 }
1212 1206
1213 1207 TAVOR_TNF_EXIT(tavor_ci_free_cq);
1214 1208 return (IBT_SUCCESS);
1215 1209 }
1216 1210
1217 1211
1218 1212 /*
1219 1213 * tavor_ci_query_cq()
1220 1214 * Return the size of a Completion Queue
1221 1215 * Context: Can be called only from user or kernel context.
1222 1216 */
1223 1217 static ibt_status_t
1224 1218 tavor_ci_query_cq(ibc_hca_hdl_t hca, ibc_cq_hdl_t cq, uint_t *entries_p,
1225 1219 uint_t *count_p, uint_t *usec_p, ibt_cq_handler_id_t *hid_p)
1226 1220 {
1227 1221 tavor_cqhdl_t cqhdl;
1228 1222
1229 1223 TAVOR_TNF_ENTER(tavor_ci_query_cq);
1230 1224
1231 1225 /* Check for valid HCA handle */
1232 1226 if (hca == NULL) {
1233 1227 TNF_PROBE_0(tavor_ci_query_cq_invhca_fail,
1234 1228 TAVOR_TNF_ERROR, "");
1235 1229 TAVOR_TNF_EXIT(tavor_ci_query_cq);
1236 1230 return (IBT_HCA_HDL_INVALID);
1237 1231 }
1238 1232
1239 1233 /* Check for valid CQ handle pointer */
1240 1234 if (cq == NULL) {
1241 1235 TNF_PROBE_0(tavor_ci_query_cq_invcqhdl,
1242 1236 TAVOR_TNF_ERROR, "");
1243 1237 TAVOR_TNF_EXIT(tavor_ci_query_cq);
1244 1238 return (IBT_CQ_HDL_INVALID);
1245 1239 }
1246 1240
1247 1241 /* Grab the CQ handle */
1248 1242 cqhdl = (tavor_cqhdl_t)cq;
1249 1243
1250 1244 /* Query the current CQ size */
1251 1245 *entries_p = cqhdl->cq_bufsz;
1252 1246
1253 1247 /* interrupt moderation is not supported */
1254 1248 *count_p = 0;
1255 1249 *usec_p = 0;
1256 1250 *hid_p = 0;
1257 1251
1258 1252 TAVOR_TNF_EXIT(tavor_ci_query_cq);
1259 1253 return (IBT_SUCCESS);
1260 1254 }
1261 1255
1262 1256
1263 1257 /*
1264 1258 * tavor_ci_resize_cq()
1265 1259 * Change the size of a Completion Queue
1266 1260 * Context: Can be called only from user or kernel context.
1267 1261 */
1268 1262 static ibt_status_t
1269 1263 tavor_ci_resize_cq(ibc_hca_hdl_t hca, ibc_cq_hdl_t cq, uint_t size,
1270 1264 uint_t *actual_size)
1271 1265 {
1272 1266 tavor_state_t *state;
1273 1267 tavor_cqhdl_t cqhdl;
1274 1268 int status;
1275 1269
1276 1270 TAVOR_TNF_ENTER(tavor_ci_resize_cq);
1277 1271
1278 1272 /* Check for valid HCA handle */
1279 1273 if (hca == NULL) {
1280 1274 TNF_PROBE_0(tavor_ci_resize_cq_invhca_fail,
1281 1275 TAVOR_TNF_ERROR, "");
1282 1276 TAVOR_TNF_EXIT(tavor_ci_resize_cq);
1283 1277 return (IBT_HCA_HDL_INVALID);
1284 1278 }
1285 1279
1286 1280 /* Check for valid CQ handle pointer */
1287 1281 if (cq == NULL) {
1288 1282 TNF_PROBE_0(tavor_ci_resize_cq_invcqhdl_fail,
1289 1283 TAVOR_TNF_ERROR, "");
1290 1284 TAVOR_TNF_EXIT(tavor_ci_resize_cq);
1291 1285 return (IBT_CQ_HDL_INVALID);
1292 1286 }
1293 1287
1294 1288 /* Grab the Tavor softstate pointer and CQ handle */
1295 1289 state = (tavor_state_t *)hca;
1296 1290 cqhdl = (tavor_cqhdl_t)cq;
1297 1291
1298 1292 /* Resize the CQ */
1299 1293 status = tavor_cq_resize(state, cqhdl, size, actual_size,
1300 1294 TAVOR_NOSLEEP);
1301 1295 if (status != DDI_SUCCESS) {
1302 1296 TNF_PROBE_1(tavor_ci_resize_cq_fail, TAVOR_TNF_ERROR, "",
1303 1297 tnf_uint, status, status);
1304 1298 TAVOR_TNF_EXIT(tavor_ci_resize_cq);
1305 1299 return (status);
1306 1300 }
1307 1301
1308 1302 TAVOR_TNF_EXIT(tavor_ci_resize_cq);
1309 1303 return (IBT_SUCCESS);
1310 1304 }
1311 1305
1312 1306 /*
1313 1307 * CQ interrupt moderation is not supported in tavor.
1314 1308 */
1315 1309
1316 1310 /* ARGSUSED */
1317 1311 static ibt_status_t
1318 1312 tavor_ci_modify_cq(ibc_hca_hdl_t hca, ibc_cq_hdl_t cq,
1319 1313 uint_t count, uint_t usec, ibt_cq_handler_id_t hid)
1320 1314 {
1321 1315 return (IBT_NOT_SUPPORTED);
1322 1316 }
1323 1317
1324 1318 /*
1325 1319 * tavor_ci_alloc_cq_sched()
1326 1320 * Reserve a CQ scheduling class resource
1327 1321 * Context: Can be called only from user or kernel context.
1328 1322 */
1329 1323 /* ARGSUSED */
1330 1324 static ibt_status_t
1331 1325 tavor_ci_alloc_cq_sched(ibc_hca_hdl_t hca, ibt_cq_sched_attr_t *attr,
1332 1326 ibc_sched_hdl_t *sched_hdl_p)
1333 1327 {
1334 1328 if (hca == NULL) {
1335 1329 return (IBT_HCA_HDL_INVALID);
1336 1330 }
1337 1331 *sched_hdl_p = NULL;
1338 1332
1339 1333 /*
1340 1334 * This is an unsupported interface for the Tavor driver. Tavor
1341 1335 * does not support CQ scheduling classes.
1342 1336 */
1343 1337 return (IBT_SUCCESS);
1344 1338 }
1345 1339
1346 1340
1347 1341 /*
1348 1342 * tavor_ci_free_cq_sched()
1349 1343 * Free a CQ scheduling class resource
1350 1344 * Context: Can be called only from user or kernel context.
1351 1345 */
1352 1346 /* ARGSUSED */
1353 1347 static ibt_status_t
1354 1348 tavor_ci_free_cq_sched(ibc_hca_hdl_t hca, ibc_sched_hdl_t sched_hdl)
1355 1349 {
1356 1350 if (hca == NULL) {
1357 1351 return (IBT_HCA_HDL_INVALID);
1358 1352 }
1359 1353
1360 1354 /*
1361 1355 * This is an unsupported interface for the Tavor driver. Tavor
1362 1356 * does not support CQ scheduling classes.
1363 1357 */
1364 1358 return (IBT_SUCCESS);
1365 1359 }
1366 1360
1367 1361
1368 1362 /*
1369 1363 * tavor_ci_alloc_eec()
1370 1364 * Allocate an End-to-End context
1371 1365 * Context: Can be called only from user or kernel context.
1372 1366 */
1373 1367 /* ARGSUSED */
1374 1368 static ibt_status_t
1375 1369 tavor_ci_alloc_eec(ibc_hca_hdl_t hca, ibc_eec_flags_t flags,
1376 1370 ibt_eec_hdl_t ibt_eec, ibc_rdd_hdl_t rdd, ibc_eec_hdl_t *eec_p)
1377 1371 {
1378 1372 TAVOR_TNF_ENTER(tavor_ci_alloc_eec);
1379 1373
1380 1374 /*
1381 1375 * This is an unsupported interface for the Tavor driver. This
1382 1376 * interface is necessary to support Reliable Datagram (RD)
1383 1377 * operations. Tavor does not support RD.
1384 1378 */
1385 1379
1386 1380 TAVOR_TNF_EXIT(tavor_ci_alloc_eec);
1387 1381 return (IBT_NOT_SUPPORTED);
1388 1382 }
1389 1383
1390 1384
1391 1385 /*
1392 1386 * tavor_ci_free_eec()
1393 1387 * Free an End-to-End context
1394 1388 * Context: Can be called only from user or kernel context.
1395 1389 */
1396 1390 /* ARGSUSED */
1397 1391 static ibt_status_t
1398 1392 tavor_ci_free_eec(ibc_hca_hdl_t hca, ibc_eec_hdl_t eec)
1399 1393 {
1400 1394 TAVOR_TNF_ENTER(tavor_ci_free_eec);
1401 1395
1402 1396 /*
1403 1397 * This is an unsupported interface for the Tavor driver. This
1404 1398 * interface is necessary to support Reliable Datagram (RD)
1405 1399 * operations. Tavor does not support RD.
1406 1400 */
1407 1401
1408 1402 TAVOR_TNF_EXIT(tavor_ci_free_eec);
1409 1403 return (IBT_NOT_SUPPORTED);
1410 1404 }
1411 1405
1412 1406
1413 1407 /*
1414 1408 * tavor_ci_query_eec()
1415 1409 * Query an End-to-End context
1416 1410 * Context: Can be called from interrupt or base context.
1417 1411 */
1418 1412 /* ARGSUSED */
1419 1413 static ibt_status_t
1420 1414 tavor_ci_query_eec(ibc_hca_hdl_t hca, ibc_eec_hdl_t eec,
1421 1415 ibt_eec_query_attr_t *attr_p)
1422 1416 {
1423 1417 TAVOR_TNF_ENTER(tavor_ci_query_eec);
1424 1418
1425 1419 /*
1426 1420 * This is an unsupported interface for the Tavor driver. This
1427 1421 * interface is necessary to support Reliable Datagram (RD)
1428 1422 * operations. Tavor does not support RD.
1429 1423 */
1430 1424
1431 1425 TAVOR_TNF_EXIT(tavor_ci_query_eec);
1432 1426 return (IBT_NOT_SUPPORTED);
1433 1427 }
1434 1428
1435 1429
1436 1430 /*
1437 1431 * tavor_ci_modify_eec()
1438 1432 * Modify an End-to-End context
1439 1433 * Context: Can be called from interrupt or base context.
1440 1434 */
1441 1435 /* ARGSUSED */
1442 1436 static ibt_status_t
1443 1437 tavor_ci_modify_eec(ibc_hca_hdl_t hca, ibc_eec_hdl_t eec,
1444 1438 ibt_cep_modify_flags_t flags, ibt_eec_info_t *info_p)
1445 1439 {
1446 1440 TAVOR_TNF_ENTER(tavor_ci_query_eec);
1447 1441
1448 1442 /*
1449 1443 * This is an unsupported interface for the Tavor driver. This
1450 1444 * interface is necessary to support Reliable Datagram (RD)
1451 1445 * operations. Tavor does not support RD.
1452 1446 */
1453 1447
1454 1448 TAVOR_TNF_EXIT(tavor_ci_query_eec);
1455 1449 return (IBT_NOT_SUPPORTED);
1456 1450 }
1457 1451
1458 1452
1459 1453 /*
1460 1454 * tavor_ci_register_mr()
1461 1455 * Prepare a virtually addressed Memory Region for use by an HCA
1462 1456 * Context: Can be called from interrupt or base context.
1463 1457 */
1464 1458 /* ARGSUSED */
1465 1459 static ibt_status_t
1466 1460 tavor_ci_register_mr(ibc_hca_hdl_t hca, ibc_pd_hdl_t pd,
1467 1461 ibt_mr_attr_t *mr_attr, void *ibtl_reserved, ibc_mr_hdl_t *mr_p,
↓ open down ↓ |
569 lines elided |
↑ open up ↑ |
1468 1462 ibt_mr_desc_t *mr_desc)
1469 1463 {
1470 1464 tavor_mr_options_t op;
1471 1465 tavor_state_t *state;
1472 1466 tavor_pdhdl_t pdhdl;
1473 1467 tavor_mrhdl_t mrhdl;
1474 1468 int status;
1475 1469
1476 1470 TAVOR_TNF_ENTER(tavor_ci_register_mr);
1477 1471
1478 - _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*mr_desc))
1479 -
1480 1472 ASSERT(mr_attr != NULL);
1481 1473 ASSERT(mr_p != NULL);
1482 1474 ASSERT(mr_desc != NULL);
1483 1475
1484 1476 /* Check for valid HCA handle */
1485 1477 if (hca == NULL) {
1486 1478 TNF_PROBE_0(tavor_ci_register_mr_invhca_fail,
1487 1479 TAVOR_TNF_ERROR, "");
1488 1480 TAVOR_TNF_EXIT(tavor_ci_register_mr);
1489 1481 return (IBT_HCA_HDL_INVALID);
1490 1482 }
1491 1483
1492 1484 /* Check for valid PD handle pointer */
1493 1485 if (pd == NULL) {
1494 1486 TNF_PROBE_0(tavor_ci_register_mr_invpdhdl_fail,
1495 1487 TAVOR_TNF_ERROR, "");
1496 1488 TAVOR_TNF_EXIT(tavor_ci_register_mr);
1497 1489 return (IBT_PD_HDL_INVALID);
1498 1490 }
1499 1491
1500 1492 /*
1501 1493 * Validate the access flags. Both Remote Write and Remote Atomic
1502 1494 * require the Local Write flag to be set
1503 1495 */
1504 1496 if (((mr_attr->mr_flags & IBT_MR_ENABLE_REMOTE_WRITE) ||
1505 1497 (mr_attr->mr_flags & IBT_MR_ENABLE_REMOTE_ATOMIC)) &&
1506 1498 !(mr_attr->mr_flags & IBT_MR_ENABLE_LOCAL_WRITE)) {
1507 1499 TNF_PROBE_0(tavor_ci_register_mr_inv_accflags_fail,
1508 1500 TAVOR_TNF_ERROR, "");
1509 1501 TAVOR_TNF_EXIT(tavor_ci_register_mr);
1510 1502 return (IBT_MR_ACCESS_REQ_INVALID);
1511 1503 }
1512 1504
1513 1505 /* Grab the Tavor softstate pointer and PD handle */
1514 1506 state = (tavor_state_t *)hca;
1515 1507 pdhdl = (tavor_pdhdl_t)pd;
1516 1508
1517 1509 /* Register the memory region */
↓ open down ↓ |
28 lines elided |
↑ open up ↑ |
1518 1510 op.mro_bind_type = state->ts_cfg_profile->cp_iommu_bypass;
1519 1511 op.mro_bind_dmahdl = NULL;
1520 1512 op.mro_bind_override_addr = 0;
1521 1513 status = tavor_mr_register(state, pdhdl, mr_attr, &mrhdl, &op);
1522 1514 if (status != DDI_SUCCESS) {
1523 1515 TNF_PROBE_1(tavor_ci_register_mr_fail, TAVOR_TNF_ERROR, "",
1524 1516 tnf_uint, status, status);
1525 1517 TAVOR_TNF_EXIT(tavor_ci_register_mr);
1526 1518 return (status);
1527 1519 }
1528 - _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*mrhdl))
1529 1520
1530 1521 /* Fill in the mr_desc structure */
1531 1522 mr_desc->md_vaddr = mrhdl->mr_bindinfo.bi_addr;
1532 1523 mr_desc->md_lkey = mrhdl->mr_lkey;
1533 1524 /* Only set RKey if remote access was requested */
1534 1525 if ((mr_attr->mr_flags & IBT_MR_ENABLE_REMOTE_ATOMIC) ||
1535 1526 (mr_attr->mr_flags & IBT_MR_ENABLE_REMOTE_WRITE) ||
1536 1527 (mr_attr->mr_flags & IBT_MR_ENABLE_REMOTE_READ)) {
1537 1528 mr_desc->md_rkey = mrhdl->mr_rkey;
1538 1529 }
1539 1530
1540 1531 /*
1541 1532 * If region is mapped for streaming (i.e. noncoherent), then set
1542 1533 * sync is required
1543 1534 */
1544 1535 mr_desc->md_sync_required = (mrhdl->mr_bindinfo.bi_flags &
1545 1536 IBT_MR_NONCOHERENT) ? B_TRUE : B_FALSE;
1546 1537
1547 1538 /* Return the Tavor MR handle */
1548 1539 *mr_p = (ibc_mr_hdl_t)mrhdl;
1549 1540
1550 1541 TAVOR_TNF_EXIT(tavor_ci_register_mr);
1551 1542 return (IBT_SUCCESS);
1552 1543 }
1553 1544
1554 1545
1555 1546 /*
1556 1547 * tavor_ci_register_buf()
1557 1548 * Prepare a Memory Region specified by buf structure for use by an HCA
1558 1549 * Context: Can be called from interrupt or base context.
1559 1550 */
1560 1551 /* ARGSUSED */
1561 1552 static ibt_status_t
1562 1553 tavor_ci_register_buf(ibc_hca_hdl_t hca, ibc_pd_hdl_t pd,
1563 1554 ibt_smr_attr_t *attrp, struct buf *buf, void *ibtl_reserved,
1564 1555 ibt_mr_hdl_t *mr_p, ibt_mr_desc_t *mr_desc)
↓ open down ↓ |
26 lines elided |
↑ open up ↑ |
1565 1556 {
1566 1557 tavor_mr_options_t op;
1567 1558 tavor_state_t *state;
1568 1559 tavor_pdhdl_t pdhdl;
1569 1560 tavor_mrhdl_t mrhdl;
1570 1561 int status;
1571 1562 ibt_mr_flags_t flags = attrp->mr_flags;
1572 1563
1573 1564 TAVOR_TNF_ENTER(tavor_ci_register_buf);
1574 1565
1575 - _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*mr_desc))
1576 -
1577 1566 ASSERT(mr_p != NULL);
1578 1567 ASSERT(mr_desc != NULL);
1579 1568
1580 1569 /* Check for valid HCA handle */
1581 1570 if (hca == NULL) {
1582 1571 TNF_PROBE_0(tavor_ci_register_buf_invhca_fail,
1583 1572 TAVOR_TNF_ERROR, "");
1584 1573 TAVOR_TNF_EXIT(tavor_ci_register_buf);
1585 1574 return (IBT_HCA_HDL_INVALID);
1586 1575 }
1587 1576
1588 1577 /* Check for valid PD handle pointer */
1589 1578 if (pd == NULL) {
1590 1579 TNF_PROBE_0(tavor_ci_register_buf_invpdhdl_fail,
1591 1580 TAVOR_TNF_ERROR, "");
1592 1581 TAVOR_TNF_EXIT(tavor_ci_register_buf);
1593 1582 return (IBT_PD_HDL_INVALID);
1594 1583 }
1595 1584
1596 1585 /*
1597 1586 * Validate the access flags. Both Remote Write and Remote Atomic
1598 1587 * require the Local Write flag to be set
1599 1588 */
1600 1589 if (((flags & IBT_MR_ENABLE_REMOTE_WRITE) ||
1601 1590 (flags & IBT_MR_ENABLE_REMOTE_ATOMIC)) &&
1602 1591 !(flags & IBT_MR_ENABLE_LOCAL_WRITE)) {
1603 1592 TNF_PROBE_0(tavor_ci_register_buf_accflags_inv,
1604 1593 TAVOR_TNF_ERROR, "");
1605 1594 TAVOR_TNF_EXIT(tavor_ci_register_buf);
1606 1595 return (IBT_MR_ACCESS_REQ_INVALID);
1607 1596 }
1608 1597
1609 1598 /* Grab the Tavor softstate pointer and PD handle */
1610 1599 state = (tavor_state_t *)hca;
1611 1600 pdhdl = (tavor_pdhdl_t)pd;
1612 1601
1613 1602 /* Register the memory region */
↓ open down ↓ |
27 lines elided |
↑ open up ↑ |
1614 1603 op.mro_bind_type = state->ts_cfg_profile->cp_iommu_bypass;
1615 1604 op.mro_bind_dmahdl = NULL;
1616 1605 op.mro_bind_override_addr = 0;
1617 1606 status = tavor_mr_register_buf(state, pdhdl, attrp, buf, &mrhdl, &op);
1618 1607 if (status != DDI_SUCCESS) {
1619 1608 TNF_PROBE_1(tavor_ci_register_mr_fail, TAVOR_TNF_ERROR, "",
1620 1609 tnf_uint, status, status);
1621 1610 TAVOR_TNF_EXIT(tavor_ci_register_mr);
1622 1611 return (status);
1623 1612 }
1624 - _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*mrhdl))
1625 1613
1626 1614 /* Fill in the mr_desc structure */
1627 1615 mr_desc->md_vaddr = mrhdl->mr_bindinfo.bi_addr;
1628 1616 mr_desc->md_lkey = mrhdl->mr_lkey;
1629 1617 /* Only set RKey if remote access was requested */
1630 1618 if ((flags & IBT_MR_ENABLE_REMOTE_ATOMIC) ||
1631 1619 (flags & IBT_MR_ENABLE_REMOTE_WRITE) ||
1632 1620 (flags & IBT_MR_ENABLE_REMOTE_READ)) {
1633 1621 mr_desc->md_rkey = mrhdl->mr_rkey;
1634 1622 }
1635 1623
1636 1624 /*
1637 1625 * If region is mapped for streaming (i.e. noncoherent), then set
1638 1626 * sync is required
1639 1627 */
1640 1628 mr_desc->md_sync_required = (mrhdl->mr_bindinfo.bi_flags &
1641 1629 IBT_MR_NONCOHERENT) ? B_TRUE : B_FALSE;
1642 1630
1643 1631 /* Return the Tavor MR handle */
1644 1632 *mr_p = (ibc_mr_hdl_t)mrhdl;
1645 1633
1646 1634 TAVOR_TNF_EXIT(tavor_ci_register_buf);
1647 1635 return (IBT_SUCCESS);
1648 1636 }
1649 1637
1650 1638
1651 1639 /*
1652 1640 * tavor_ci_deregister_mr()
1653 1641 * Deregister a Memory Region from an HCA translation table
1654 1642 * Context: Can be called only from user or kernel context.
1655 1643 */
1656 1644 static ibt_status_t
1657 1645 tavor_ci_deregister_mr(ibc_hca_hdl_t hca, ibc_mr_hdl_t mr)
1658 1646 {
1659 1647 tavor_state_t *state;
1660 1648 tavor_mrhdl_t mrhdl;
1661 1649 int status;
1662 1650
1663 1651 TAVOR_TNF_ENTER(tavor_ci_deregister_mr);
1664 1652
1665 1653 /* Check for valid HCA handle */
1666 1654 if (hca == NULL) {
1667 1655 TNF_PROBE_0(tavor_ci_deregister_mr_invhca_fail,
1668 1656 TAVOR_TNF_ERROR, "");
1669 1657 TAVOR_TNF_EXIT(tavor_ci_deregister_mr);
1670 1658 return (IBT_HCA_HDL_INVALID);
1671 1659 }
1672 1660
1673 1661 /* Check for valid memory region handle */
1674 1662 if (mr == NULL) {
1675 1663 TNF_PROBE_0(tavor_ci_deregister_mr_invmrhdl_fail,
1676 1664 TAVOR_TNF_ERROR, "");
1677 1665 TAVOR_TNF_EXIT(tavor_ci_deregister_mr);
1678 1666 return (IBT_MR_HDL_INVALID);
1679 1667 }
1680 1668
1681 1669 /* Grab the Tavor softstate pointer */
1682 1670 state = (tavor_state_t *)hca;
1683 1671 mrhdl = (tavor_mrhdl_t)mr;
1684 1672
1685 1673 /*
1686 1674 * Deregister the memory region.
1687 1675 */
1688 1676 status = tavor_mr_deregister(state, &mrhdl, TAVOR_MR_DEREG_ALL,
1689 1677 TAVOR_NOSLEEP);
1690 1678 if (status != DDI_SUCCESS) {
1691 1679 TNF_PROBE_1(tavor_ci_deregister_mr_fail,
1692 1680 TAVOR_TNF_ERROR, "", tnf_uint, status, status);
1693 1681 TAVOR_TNF_EXIT(tavor_ci_deregister_mr);
1694 1682 return (status);
1695 1683 }
1696 1684
1697 1685 TAVOR_TNF_EXIT(tavor_ci_deregister_mr);
1698 1686 return (IBT_SUCCESS);
1699 1687 }
1700 1688
1701 1689
1702 1690 /*
1703 1691 * tavor_ci_query_mr()
1704 1692 * Retrieve information about a specified Memory Region
1705 1693 * Context: Can be called from interrupt or base context.
1706 1694 */
1707 1695 static ibt_status_t
1708 1696 tavor_ci_query_mr(ibc_hca_hdl_t hca, ibc_mr_hdl_t mr,
1709 1697 ibt_mr_query_attr_t *mr_attr)
1710 1698 {
1711 1699 tavor_state_t *state;
1712 1700 tavor_mrhdl_t mrhdl;
1713 1701 int status;
1714 1702
1715 1703 TAVOR_TNF_ENTER(tavor_ci_query_mr);
1716 1704
1717 1705 ASSERT(mr_attr != NULL);
1718 1706
1719 1707 /* Check for valid HCA handle */
1720 1708 if (hca == NULL) {
1721 1709 TNF_PROBE_0(tavor_ci_query_mr_invhca_fail,
1722 1710 TAVOR_TNF_ERROR, "");
1723 1711 TAVOR_TNF_EXIT(tavor_ci_query_mr);
1724 1712 return (IBT_HCA_HDL_INVALID);
1725 1713 }
1726 1714
1727 1715 /* Check for MemRegion handle */
1728 1716 if (mr == NULL) {
1729 1717 TNF_PROBE_0(tavor_ci_query_mr_invmrhdl_fail,
1730 1718 TAVOR_TNF_ERROR, "");
1731 1719 TAVOR_TNF_EXIT(tavor_ci_query_mr);
1732 1720 return (IBT_MR_HDL_INVALID);
1733 1721 }
1734 1722
1735 1723 /* Grab the Tavor softstate pointer and MR handle */
1736 1724 state = (tavor_state_t *)hca;
1737 1725 mrhdl = (tavor_mrhdl_t)mr;
1738 1726
1739 1727 /* Query the memory region */
1740 1728 status = tavor_mr_query(state, mrhdl, mr_attr);
1741 1729 if (status != DDI_SUCCESS) {
1742 1730 TNF_PROBE_1(tavor_ci_query_mr_fail, TAVOR_TNF_ERROR, "",
1743 1731 tnf_uint, status, status);
1744 1732 TAVOR_TNF_EXIT(tavor_ci_query_mr);
1745 1733 return (status);
1746 1734 }
1747 1735
1748 1736 TAVOR_TNF_EXIT(tavor_ci_query_mr);
1749 1737 return (IBT_SUCCESS);
1750 1738 }
1751 1739
1752 1740
1753 1741 /*
1754 1742 * tavor_ci_register_shared_mr()
1755 1743 * Create a shared memory region matching an existing Memory Region
1756 1744 * Context: Can be called from interrupt or base context.
1757 1745 */
1758 1746 /* ARGSUSED */
1759 1747 static ibt_status_t
1760 1748 tavor_ci_register_shared_mr(ibc_hca_hdl_t hca, ibc_mr_hdl_t mr,
↓ open down ↓ |
126 lines elided |
↑ open up ↑ |
1761 1749 ibc_pd_hdl_t pd, ibt_smr_attr_t *mr_attr, void *ibtl_reserved,
1762 1750 ibc_mr_hdl_t *mr_p, ibt_mr_desc_t *mr_desc)
1763 1751 {
1764 1752 tavor_state_t *state;
1765 1753 tavor_pdhdl_t pdhdl;
1766 1754 tavor_mrhdl_t mrhdl, mrhdl_new;
1767 1755 int status;
1768 1756
1769 1757 TAVOR_TNF_ENTER(tavor_ci_register_shared_mr);
1770 1758
1771 - _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*mr_desc))
1772 -
1773 1759 ASSERT(mr_attr != NULL);
1774 1760 ASSERT(mr_p != NULL);
1775 1761 ASSERT(mr_desc != NULL);
1776 1762
1777 1763 /* Check for valid HCA handle */
1778 1764 if (hca == NULL) {
1779 1765 TNF_PROBE_0(tavor_ci_register_shared_mr_invhca_fail,
1780 1766 TAVOR_TNF_ERROR, "");
1781 1767 TAVOR_TNF_EXIT(tavor_ci_register_shared_mr);
1782 1768 return (IBT_HCA_HDL_INVALID);
1783 1769 }
1784 1770
1785 1771 /* Check for valid PD handle pointer */
1786 1772 if (pd == NULL) {
1787 1773 TNF_PROBE_0(tavor_ci_register_shared_mr_invpdhdl_fail,
1788 1774 TAVOR_TNF_ERROR, "");
1789 1775 TAVOR_TNF_EXIT(tavor_ci_register_shared_mr);
1790 1776 return (IBT_PD_HDL_INVALID);
1791 1777 }
1792 1778
1793 1779 /* Check for valid memory region handle */
1794 1780 if (mr == NULL) {
1795 1781 TNF_PROBE_0(tavor_ci_register_shared_mr_invmrhdl_fail,
1796 1782 TAVOR_TNF_ERROR, "");
1797 1783 TAVOR_TNF_EXIT(tavor_ci_register_shared_mr);
1798 1784 return (IBT_MR_HDL_INVALID);
1799 1785 }
1800 1786 /*
1801 1787 * Validate the access flags. Both Remote Write and Remote Atomic
1802 1788 * require the Local Write flag to be set
1803 1789 */
1804 1790 if (((mr_attr->mr_flags & IBT_MR_ENABLE_REMOTE_WRITE) ||
1805 1791 (mr_attr->mr_flags & IBT_MR_ENABLE_REMOTE_ATOMIC)) &&
1806 1792 !(mr_attr->mr_flags & IBT_MR_ENABLE_LOCAL_WRITE)) {
1807 1793 TNF_PROBE_0(tavor_ci_register_shared_mr_accflags_inv,
1808 1794 TAVOR_TNF_ERROR, "");
1809 1795 TAVOR_TNF_EXIT(tavor_ci_register_shared_mr);
1810 1796 return (IBT_MR_ACCESS_REQ_INVALID);
1811 1797 }
1812 1798
1813 1799 /* Grab the Tavor softstate pointer and handles */
1814 1800 state = (tavor_state_t *)hca;
1815 1801 pdhdl = (tavor_pdhdl_t)pd;
1816 1802 mrhdl = (tavor_mrhdl_t)mr;
↓ open down ↓ |
34 lines elided |
↑ open up ↑ |
1817 1803
1818 1804 /* Register the shared memory region */
1819 1805 status = tavor_mr_register_shared(state, mrhdl, pdhdl, mr_attr,
1820 1806 &mrhdl_new);
1821 1807 if (status != DDI_SUCCESS) {
1822 1808 TNF_PROBE_1(tavor_ci_register_shared_mr_fail, TAVOR_TNF_ERROR,
1823 1809 "", tnf_uint, status, status);
1824 1810 TAVOR_TNF_EXIT(tavor_ci_register_shared_mr);
1825 1811 return (status);
1826 1812 }
1827 - _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*mrhdl_new))
1828 1813
1829 1814 /* Fill in the mr_desc structure */
1830 1815 mr_desc->md_vaddr = mrhdl_new->mr_bindinfo.bi_addr;
1831 1816 mr_desc->md_lkey = mrhdl_new->mr_lkey;
1832 1817 /* Only set RKey if remote access was requested */
1833 1818 if ((mr_attr->mr_flags & IBT_MR_ENABLE_REMOTE_ATOMIC) ||
1834 1819 (mr_attr->mr_flags & IBT_MR_ENABLE_REMOTE_WRITE) ||
1835 1820 (mr_attr->mr_flags & IBT_MR_ENABLE_REMOTE_READ)) {
1836 1821 mr_desc->md_rkey = mrhdl_new->mr_rkey;
1837 1822 }
1838 1823
1839 1824 /*
1840 1825 * If shared region is mapped for streaming (i.e. noncoherent), then
1841 1826 * set sync is required
1842 1827 */
1843 1828 mr_desc->md_sync_required = (mrhdl_new->mr_bindinfo.bi_flags &
1844 1829 IBT_MR_NONCOHERENT) ? B_TRUE : B_FALSE;
1845 1830
1846 1831 /* Return the Tavor MR handle */
1847 1832 *mr_p = (ibc_mr_hdl_t)mrhdl_new;
1848 1833
1849 1834 TAVOR_TNF_EXIT(tavor_ci_register_mr);
1850 1835 return (IBT_SUCCESS);
1851 1836 }
1852 1837
1853 1838
1854 1839 /*
1855 1840 * tavor_ci_reregister_mr()
1856 1841 * Modify the attributes of an existing Memory Region
1857 1842 * Context: Can be called from interrupt or base context.
1858 1843 */
1859 1844 /* ARGSUSED */
1860 1845 static ibt_status_t
1861 1846 tavor_ci_reregister_mr(ibc_hca_hdl_t hca, ibc_mr_hdl_t mr, ibc_pd_hdl_t pd,
1862 1847 ibt_mr_attr_t *mr_attr, void *ibtl_reserved, ibc_mr_hdl_t *mr_new,
↓ open down ↓ |
25 lines elided |
↑ open up ↑ |
1863 1848 ibt_mr_desc_t *mr_desc)
1864 1849 {
1865 1850 tavor_mr_options_t op;
1866 1851 tavor_state_t *state;
1867 1852 tavor_pdhdl_t pdhdl;
1868 1853 tavor_mrhdl_t mrhdl, mrhdl_new;
1869 1854 int status;
1870 1855
1871 1856 TAVOR_TNF_ENTER(tavor_ci_reregister_mr);
1872 1857
1873 - _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*mr_desc))
1874 -
1875 1858 ASSERT(mr_attr != NULL);
1876 1859 ASSERT(mr_new != NULL);
1877 1860 ASSERT(mr_desc != NULL);
1878 1861
1879 1862 /* Check for valid HCA handle */
1880 1863 if (hca == NULL) {
1881 1864 TNF_PROBE_0(tavor_ci_reregister_mr_hca_inv, TAVOR_TNF_ERROR,
1882 1865 "");
1883 1866 TAVOR_TNF_EXIT(tavor_ci_reregister_mr);
1884 1867 return (IBT_HCA_HDL_INVALID);
1885 1868 }
1886 1869
1887 1870 /* Check for valid memory region handle */
1888 1871 if (mr == NULL) {
1889 1872 TNF_PROBE_0(tavor_ci_reregister_mr_invmrhdl_fail,
1890 1873 TAVOR_TNF_ERROR, "");
1891 1874 TAVOR_TNF_EXIT(tavor_ci_reregister_mr);
1892 1875 return (IBT_MR_HDL_INVALID);
1893 1876 }
1894 1877
1895 1878 /* Grab the Tavor softstate pointer, mrhdl, and pdhdl */
1896 1879 state = (tavor_state_t *)hca;
1897 1880 mrhdl = (tavor_mrhdl_t)mr;
1898 1881 pdhdl = (tavor_pdhdl_t)pd;
1899 1882
↓ open down ↓ |
15 lines elided |
↑ open up ↑ |
1900 1883 /* Reregister the memory region */
1901 1884 op.mro_bind_type = state->ts_cfg_profile->cp_iommu_bypass;
1902 1885 status = tavor_mr_reregister(state, mrhdl, pdhdl, mr_attr,
1903 1886 &mrhdl_new, &op);
1904 1887 if (status != DDI_SUCCESS) {
1905 1888 TNF_PROBE_1(tavor_ci_reregister_mr_fail, TAVOR_TNF_ERROR, "",
1906 1889 tnf_uint, status, status);
1907 1890 TAVOR_TNF_EXIT(tavor_ci_reregister_mr);
1908 1891 return (status);
1909 1892 }
1910 - _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*mrhdl_new))
1911 1893
1912 1894 /* Fill in the mr_desc structure */
1913 1895 mr_desc->md_vaddr = mrhdl_new->mr_bindinfo.bi_addr;
1914 1896 mr_desc->md_lkey = mrhdl_new->mr_lkey;
1915 1897 /* Only set RKey if remote access was requested */
1916 1898 if ((mr_attr->mr_flags & IBT_MR_ENABLE_REMOTE_ATOMIC) ||
1917 1899 (mr_attr->mr_flags & IBT_MR_ENABLE_REMOTE_WRITE) ||
1918 1900 (mr_attr->mr_flags & IBT_MR_ENABLE_REMOTE_READ)) {
1919 1901 mr_desc->md_rkey = mrhdl_new->mr_rkey;
1920 1902 }
1921 1903
1922 1904 /*
1923 1905 * If region is mapped for streaming (i.e. noncoherent), then set
1924 1906 * sync is required
1925 1907 */
1926 1908 mr_desc->md_sync_required = (mrhdl_new->mr_bindinfo.bi_flags &
1927 1909 IBT_MR_NONCOHERENT) ? B_TRUE : B_FALSE;
1928 1910
1929 1911 /* Return the Tavor MR handle */
1930 1912 *mr_new = (ibc_mr_hdl_t)mrhdl_new;
1931 1913
1932 1914 TAVOR_TNF_EXIT(tavor_ci_reregister_mr);
1933 1915 return (IBT_SUCCESS);
1934 1916 }
1935 1917
1936 1918
1937 1919 /*
1938 1920 * tavor_ci_reregister_buf()
1939 1921 * Modify the attributes of an existing Memory Region
1940 1922 * Context: Can be called from interrupt or base context.
1941 1923 */
1942 1924 /* ARGSUSED */
1943 1925 static ibt_status_t
1944 1926 tavor_ci_reregister_buf(ibc_hca_hdl_t hca, ibc_mr_hdl_t mr, ibc_pd_hdl_t pd,
1945 1927 ibt_smr_attr_t *attrp, struct buf *buf, void *ibtl_reserved,
1946 1928 ibc_mr_hdl_t *mr_new, ibt_mr_desc_t *mr_desc)
↓ open down ↓ |
26 lines elided |
↑ open up ↑ |
1947 1929 {
1948 1930 tavor_mr_options_t op;
1949 1931 tavor_state_t *state;
1950 1932 tavor_pdhdl_t pdhdl;
1951 1933 tavor_mrhdl_t mrhdl, mrhdl_new;
1952 1934 int status;
1953 1935 ibt_mr_flags_t flags = attrp->mr_flags;
1954 1936
1955 1937 TAVOR_TNF_ENTER(tavor_ci_reregister_buf);
1956 1938
1957 - _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*mr_desc))
1958 -
1959 1939 ASSERT(mr_new != NULL);
1960 1940 ASSERT(mr_desc != NULL);
1961 1941
1962 1942 /* Check for valid HCA handle */
1963 1943 if (hca == NULL) {
1964 1944 TNF_PROBE_0(tavor_ci_reregister_buf_hca_inv, TAVOR_TNF_ERROR,
1965 1945 "");
1966 1946 TAVOR_TNF_EXIT(tavor_ci_reregister_buf);
1967 1947 return (IBT_HCA_HDL_INVALID);
1968 1948 }
1969 1949
1970 1950 /* Check for valid memory region handle */
1971 1951 if (mr == NULL) {
1972 1952 TNF_PROBE_0(tavor_ci_reregister_buf_invmrhdl_fail,
1973 1953 TAVOR_TNF_ERROR, "");
1974 1954 TAVOR_TNF_EXIT(tavor_ci_reregister_buf);
1975 1955 return (IBT_MR_HDL_INVALID);
1976 1956 }
1977 1957
1978 1958 /* Grab the Tavor softstate pointer, mrhdl, and pdhdl */
1979 1959 state = (tavor_state_t *)hca;
1980 1960 mrhdl = (tavor_mrhdl_t)mr;
1981 1961 pdhdl = (tavor_pdhdl_t)pd;
1982 1962
↓ open down ↓ |
14 lines elided |
↑ open up ↑ |
1983 1963 /* Reregister the memory region */
1984 1964 op.mro_bind_type = state->ts_cfg_profile->cp_iommu_bypass;
1985 1965 status = tavor_mr_reregister_buf(state, mrhdl, pdhdl, attrp, buf,
1986 1966 &mrhdl_new, &op);
1987 1967 if (status != DDI_SUCCESS) {
1988 1968 TNF_PROBE_1(tavor_ci_reregister_buf_fail, TAVOR_TNF_ERROR, "",
1989 1969 tnf_uint, status, status);
1990 1970 TAVOR_TNF_EXIT(tavor_ci_reregister_buf);
1991 1971 return (status);
1992 1972 }
1993 - _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*mrhdl_new))
1994 1973
1995 1974 /* Fill in the mr_desc structure */
1996 1975 mr_desc->md_vaddr = mrhdl_new->mr_bindinfo.bi_addr;
1997 1976 mr_desc->md_lkey = mrhdl_new->mr_lkey;
1998 1977 /* Only set RKey if remote access was requested */
1999 1978 if ((flags & IBT_MR_ENABLE_REMOTE_ATOMIC) ||
2000 1979 (flags & IBT_MR_ENABLE_REMOTE_WRITE) ||
2001 1980 (flags & IBT_MR_ENABLE_REMOTE_READ)) {
2002 1981 mr_desc->md_rkey = mrhdl_new->mr_rkey;
2003 1982 }
2004 1983
2005 1984 /*
2006 1985 * If region is mapped for streaming (i.e. noncoherent), then set
2007 1986 * sync is required
2008 1987 */
2009 1988 mr_desc->md_sync_required = (mrhdl_new->mr_bindinfo.bi_flags &
2010 1989 IBT_MR_NONCOHERENT) ? B_TRUE : B_FALSE;
2011 1990
2012 1991 /* Return the Tavor MR handle */
2013 1992 *mr_new = (ibc_mr_hdl_t)mrhdl_new;
2014 1993
2015 1994 TAVOR_TNF_EXIT(tavor_ci_reregister_buf);
2016 1995 return (IBT_SUCCESS);
2017 1996 }
2018 1997
2019 1998 /*
2020 1999 * tavor_ci_sync_mr()
2021 2000 * Synchronize access to a Memory Region
2022 2001 * Context: Can be called from interrupt or base context.
2023 2002 */
2024 2003 static ibt_status_t
2025 2004 tavor_ci_sync_mr(ibc_hca_hdl_t hca, ibt_mr_sync_t *mr_segs, size_t num_segs)
2026 2005 {
2027 2006 tavor_state_t *state;
2028 2007 int status;
2029 2008
2030 2009 TAVOR_TNF_ENTER(tavor_ci_sync_mr);
2031 2010
2032 2011 ASSERT(mr_segs != NULL);
2033 2012
2034 2013 /* Check for valid HCA handle */
2035 2014 if (hca == NULL) {
2036 2015 TNF_PROBE_0(tavor_ci_sync_mr_invhca_fail,
2037 2016 TAVOR_TNF_ERROR, "");
2038 2017 TAVOR_TNF_EXIT(tavor_ci_sync_mr);
2039 2018 return (IBT_HCA_HDL_INVALID);
2040 2019 }
2041 2020
2042 2021 /* Grab the Tavor softstate pointer */
2043 2022 state = (tavor_state_t *)hca;
2044 2023
2045 2024 /* Sync the memory region */
2046 2025 status = tavor_mr_sync(state, mr_segs, num_segs);
2047 2026 if (status != DDI_SUCCESS) {
2048 2027 TNF_PROBE_1(tavor_ci_sync_mr_fail, TAVOR_TNF_ERROR, "",
2049 2028 tnf_uint, status, status);
2050 2029 TAVOR_TNF_EXIT(tavor_ci_sync_mr);
2051 2030 return (status);
2052 2031 }
2053 2032
2054 2033 TAVOR_TNF_EXIT(tavor_ci_sync_mr);
2055 2034 return (IBT_SUCCESS);
2056 2035 }
2057 2036
2058 2037
2059 2038 /*
2060 2039 * tavor_ci_alloc_mw()
2061 2040 * Allocate a Memory Window
2062 2041 * Context: Can be called from interrupt or base context.
2063 2042 */
2064 2043 static ibt_status_t
2065 2044 tavor_ci_alloc_mw(ibc_hca_hdl_t hca, ibc_pd_hdl_t pd, ibt_mw_flags_t flags,
2066 2045 ibc_mw_hdl_t *mw_p, ibt_rkey_t *rkey_p)
2067 2046 {
2068 2047 tavor_state_t *state;
2069 2048 tavor_pdhdl_t pdhdl;
2070 2049 tavor_mwhdl_t mwhdl;
2071 2050 int status;
2072 2051
2073 2052 TAVOR_TNF_ENTER(tavor_ci_alloc_mw);
2074 2053
2075 2054 ASSERT(mw_p != NULL);
2076 2055 ASSERT(rkey_p != NULL);
2077 2056
2078 2057 /* Check for valid HCA handle */
2079 2058 if (hca == NULL) {
2080 2059 TNF_PROBE_0(tavor_ci_alloc_mw_invhca_fail,
2081 2060 TAVOR_TNF_ERROR, "");
2082 2061 TAVOR_TNF_EXIT(tavor_ci_alloc_mw);
2083 2062 return (IBT_HCA_HDL_INVALID);
2084 2063 }
2085 2064
2086 2065 /* Check for valid PD handle pointer */
2087 2066 if (pd == NULL) {
2088 2067 TNF_PROBE_0(tavor_ci_alloc_mw_invpdhdl_fail,
2089 2068 TAVOR_TNF_ERROR, "");
2090 2069 TAVOR_TNF_EXIT(tavor_ci_alloc_mw);
2091 2070 return (IBT_PD_HDL_INVALID);
2092 2071 }
2093 2072
2094 2073 /* Grab the Tavor softstate pointer and PD handle */
2095 2074 state = (tavor_state_t *)hca;
↓ open down ↓ |
92 lines elided |
↑ open up ↑ |
2096 2075 pdhdl = (tavor_pdhdl_t)pd;
2097 2076
2098 2077 /* Allocate the memory window */
2099 2078 status = tavor_mw_alloc(state, pdhdl, flags, &mwhdl);
2100 2079 if (status != DDI_SUCCESS) {
2101 2080 TNF_PROBE_1(tavor_ci_alloc_mw_fail, TAVOR_TNF_ERROR, "",
2102 2081 tnf_uint, status, status);
2103 2082 TAVOR_TNF_EXIT(tavor_ci_alloc_mw);
2104 2083 return (status);
2105 2084 }
2106 - _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*mwhdl))
2107 2085
2108 2086 /* Return the MW handle and RKey */
2109 2087 *mw_p = (ibc_mw_hdl_t)mwhdl;
2110 2088 *rkey_p = mwhdl->mr_rkey;
2111 2089
2112 2090 TAVOR_TNF_EXIT(tavor_ci_alloc_mw);
2113 2091 return (IBT_SUCCESS);
2114 2092 }
2115 2093
2116 2094
2117 2095 /*
2118 2096 * tavor_ci_free_mw()
2119 2097 * Free a Memory Window
2120 2098 * Context: Can be called from interrupt or base context.
2121 2099 */
2122 2100 static ibt_status_t
2123 2101 tavor_ci_free_mw(ibc_hca_hdl_t hca, ibc_mw_hdl_t mw)
2124 2102 {
2125 2103 tavor_state_t *state;
2126 2104 tavor_mwhdl_t mwhdl;
2127 2105 int status;
2128 2106
2129 2107 TAVOR_TNF_ENTER(tavor_ci_free_mw);
2130 2108
2131 2109 /* Check for valid HCA handle */
2132 2110 if (hca == NULL) {
2133 2111 TNF_PROBE_0(tavor_ci_free_mw_invhca_fail,
2134 2112 TAVOR_TNF_ERROR, "");
2135 2113 TAVOR_TNF_EXIT(tavor_ci_free_mw);
2136 2114 return (IBT_HCA_HDL_INVALID);
2137 2115 }
2138 2116
2139 2117 /* Check for valid MW handle */
2140 2118 if (mw == NULL) {
2141 2119 TNF_PROBE_0(tavor_ci_free_mw_invmwhdl_fail,
2142 2120 TAVOR_TNF_ERROR, "");
2143 2121 TAVOR_TNF_EXIT(tavor_ci_free_mw);
2144 2122 return (IBT_MW_HDL_INVALID);
2145 2123 }
2146 2124
2147 2125 /* Grab the Tavor softstate pointer and MW handle */
2148 2126 state = (tavor_state_t *)hca;
2149 2127 mwhdl = (tavor_mwhdl_t)mw;
2150 2128
2151 2129 /* Free the memory window */
2152 2130 status = tavor_mw_free(state, &mwhdl, TAVOR_NOSLEEP);
2153 2131 if (status != DDI_SUCCESS) {
2154 2132 TNF_PROBE_1(tavor_ci_free_mw_fail, TAVOR_TNF_ERROR, "",
2155 2133 tnf_uint, status, status);
2156 2134 TAVOR_TNF_EXIT(tavor_ci_free_mw);
2157 2135 return (status);
2158 2136 }
2159 2137
2160 2138 TAVOR_TNF_EXIT(tavor_ci_free_mw);
2161 2139 return (IBT_SUCCESS);
2162 2140 }
2163 2141
2164 2142
2165 2143 /*
2166 2144 * tavor_ci_query_mw()
2167 2145 * Return the attributes of the specified Memory Window
2168 2146 * Context: Can be called from interrupt or base context.
2169 2147 */
2170 2148 static ibt_status_t
2171 2149 tavor_ci_query_mw(ibc_hca_hdl_t hca, ibc_mw_hdl_t mw,
2172 2150 ibt_mw_query_attr_t *mw_attr_p)
2173 2151 {
2174 2152 tavor_mwhdl_t mwhdl;
2175 2153
2176 2154 TAVOR_TNF_ENTER(tavor_ci_query_mw);
2177 2155
2178 2156 ASSERT(mw_attr_p != NULL);
2179 2157
2180 2158 /* Check for valid HCA handle */
2181 2159 if (hca == NULL) {
2182 2160 TNF_PROBE_0(tavor_ci_query_mw_invhca_fail,
2183 2161 TAVOR_TNF_ERROR, "");
2184 2162 TAVOR_TNF_EXIT(tavor_ci_query_mw);
2185 2163 return (IBT_HCA_HDL_INVALID);
2186 2164 }
2187 2165
2188 2166 /* Check for valid MemWin handle */
2189 2167 if (mw == NULL) {
2190 2168 TNF_PROBE_0(tavor_ci_query_mw_inc_mwhdl_fail,
2191 2169 TAVOR_TNF_ERROR, "");
2192 2170 TAVOR_TNF_EXIT(tavor_ci_query_mw);
2193 2171 return (IBT_MW_HDL_INVALID);
2194 2172 }
2195 2173
2196 2174 /* Query the memory window pointer and fill in the return values */
2197 2175 mwhdl = (tavor_mwhdl_t)mw;
2198 2176 mutex_enter(&mwhdl->mr_lock);
2199 2177 mw_attr_p->mw_pd = (ibc_pd_hdl_t)mwhdl->mr_pdhdl;
2200 2178 mw_attr_p->mw_rkey = mwhdl->mr_rkey;
2201 2179 mutex_exit(&mwhdl->mr_lock);
2202 2180
2203 2181 TAVOR_TNF_EXIT(tavor_ci_query_mw);
2204 2182 return (IBT_SUCCESS);
2205 2183 }
2206 2184
2207 2185
2208 2186 /* ARGSUSED */
↓ open down ↓ |
92 lines elided |
↑ open up ↑ |
2209 2187 static ibt_status_t
2210 2188 tavor_ci_register_dma_mr(ibc_hca_hdl_t hca, ibc_pd_hdl_t pd,
2211 2189 ibt_dmr_attr_t *mr_attr, void *ibtl_reserved, ibc_mr_hdl_t *mr_p,
2212 2190 ibt_mr_desc_t *mr_desc)
2213 2191 {
2214 2192 tavor_state_t *state;
2215 2193 tavor_pdhdl_t pdhdl;
2216 2194 tavor_mrhdl_t mrhdl;
2217 2195 int status;
2218 2196
2219 - _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*mr_desc))
2220 -
2221 2197 ASSERT(mr_attr != NULL);
2222 2198 ASSERT(mr_p != NULL);
2223 2199 ASSERT(mr_desc != NULL);
2224 2200
2225 2201 /* Check for valid HCA handle */
2226 2202 if (hca == NULL) {
2227 2203 return (IBT_HCA_HDL_INVALID);
2228 2204 }
2229 2205
2230 2206 /* Check for valid PD handle pointer */
2231 2207 if (pd == NULL) {
2232 2208 return (IBT_PD_HDL_INVALID);
2233 2209 }
2234 2210
2235 2211 /*
2236 2212 * Validate the access flags. Both Remote Write and Remote Atomic
2237 2213 * require the Local Write flag to be set
2238 2214 */
2239 2215 if (((mr_attr->dmr_flags & IBT_MR_ENABLE_REMOTE_WRITE) ||
2240 2216 (mr_attr->dmr_flags & IBT_MR_ENABLE_REMOTE_ATOMIC)) &&
2241 2217 !(mr_attr->dmr_flags & IBT_MR_ENABLE_LOCAL_WRITE)) {
2242 2218 return (IBT_MR_ACCESS_REQ_INVALID);
↓ open down ↓ |
12 lines elided |
↑ open up ↑ |
2243 2219 }
2244 2220
2245 2221 /* Grab the Tavor softstate pointer and PD handle */
2246 2222 state = (tavor_state_t *)hca;
2247 2223 pdhdl = (tavor_pdhdl_t)pd;
2248 2224
2249 2225 status = tavor_dma_mr_register(state, pdhdl, mr_attr, &mrhdl);
2250 2226 if (status != DDI_SUCCESS) {
2251 2227 return (status);
2252 2228 }
2253 - _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*mrhdl))
2254 2229
2255 2230 /* Fill in the mr_desc structure */
2256 2231 mr_desc->md_vaddr = mr_attr->dmr_paddr;
2257 2232 mr_desc->md_lkey = mrhdl->mr_lkey;
2258 2233 /* Only set RKey if remote access was requested */
2259 2234 if ((mr_attr->dmr_flags & IBT_MR_ENABLE_REMOTE_ATOMIC) ||
2260 2235 (mr_attr->dmr_flags & IBT_MR_ENABLE_REMOTE_WRITE) ||
2261 2236 (mr_attr->dmr_flags & IBT_MR_ENABLE_REMOTE_READ)) {
2262 2237 mr_desc->md_rkey = mrhdl->mr_rkey;
2263 2238 }
2264 2239
2265 2240 /*
2266 2241 * If region is mapped for streaming (i.e. noncoherent), then set
2267 2242 * sync is required
2268 2243 */
2269 2244 mr_desc->md_sync_required = B_FALSE;
2270 2245
2271 2246 /* Return the Hermon MR handle */
2272 2247 *mr_p = (ibc_mr_hdl_t)mrhdl;
2273 2248
2274 2249 return (IBT_SUCCESS);
2275 2250 }
2276 2251
2277 2252
2278 2253 /*
2279 2254 * tavor_ci_attach_mcg()
2280 2255 * Attach a Queue Pair to a Multicast Group
2281 2256 * Context: Can be called only from user or kernel context.
2282 2257 */
2283 2258 static ibt_status_t
2284 2259 tavor_ci_attach_mcg(ibc_hca_hdl_t hca, ibc_qp_hdl_t qp, ib_gid_t gid,
2285 2260 ib_lid_t lid)
2286 2261 {
2287 2262 tavor_state_t *state;
2288 2263 tavor_qphdl_t qphdl;
2289 2264 int status;
2290 2265
2291 2266 TAVOR_TNF_ENTER(tavor_ci_attach_mcg);
2292 2267
2293 2268 /* Check for valid HCA handle */
2294 2269 if (hca == NULL) {
2295 2270 TNF_PROBE_0(tavor_ci_attach_mcg_invhca_fail,
2296 2271 TAVOR_TNF_ERROR, "");
2297 2272 TAVOR_TNF_EXIT(tavor_ci_attach_mcg);
2298 2273 return (IBT_HCA_HDL_INVALID);
2299 2274 }
2300 2275
2301 2276 /* Check for valid QP handle pointer */
2302 2277 if (qp == NULL) {
2303 2278 TNF_PROBE_0(tavor_ci_attach_mcg_invqphdl_fail,
2304 2279 TAVOR_TNF_ERROR, "");
2305 2280 TAVOR_TNF_EXIT(tavor_ci_attach_mcg);
2306 2281 return (IBT_QP_HDL_INVALID);
2307 2282 }
2308 2283
2309 2284 /* Grab the Tavor softstate pointer and QP handles */
2310 2285 state = (tavor_state_t *)hca;
2311 2286 qphdl = (tavor_qphdl_t)qp;
2312 2287
2313 2288 /* Attach the QP to the multicast group */
2314 2289 status = tavor_mcg_attach(state, qphdl, gid, lid);
2315 2290 if (status != DDI_SUCCESS) {
2316 2291 TNF_PROBE_1(tavor_ci_attach_mcg_fail, TAVOR_TNF_ERROR, "",
2317 2292 tnf_uint, status, status);
2318 2293 TAVOR_TNF_EXIT(tavor_ci_attach_mcg);
2319 2294 return (status);
2320 2295 }
2321 2296
2322 2297 TAVOR_TNF_EXIT(tavor_ci_attach_mcg);
2323 2298 return (IBT_SUCCESS);
2324 2299 }
2325 2300
2326 2301
2327 2302 /*
2328 2303 * tavor_ci_detach_mcg()
2329 2304 * Detach a Queue Pair to a Multicast Group
2330 2305 * Context: Can be called only from user or kernel context.
2331 2306 */
2332 2307 static ibt_status_t
2333 2308 tavor_ci_detach_mcg(ibc_hca_hdl_t hca, ibc_qp_hdl_t qp, ib_gid_t gid,
2334 2309 ib_lid_t lid)
2335 2310 {
2336 2311 tavor_state_t *state;
2337 2312 tavor_qphdl_t qphdl;
2338 2313 int status;
2339 2314
2340 2315 TAVOR_TNF_ENTER(tavor_ci_attach_mcg);
2341 2316
2342 2317 /* Check for valid HCA handle */
2343 2318 if (hca == NULL) {
2344 2319 TNF_PROBE_0(tavor_ci_detach_mcg_invhca_fail,
2345 2320 TAVOR_TNF_ERROR, "");
2346 2321 TAVOR_TNF_EXIT(tavor_ci_detach_mcg);
2347 2322 return (IBT_HCA_HDL_INVALID);
2348 2323 }
2349 2324
2350 2325 /* Check for valid QP handle pointer */
2351 2326 if (qp == NULL) {
2352 2327 TNF_PROBE_0(tavor_ci_detach_mcg_invqphdl_fail,
2353 2328 TAVOR_TNF_ERROR, "");
2354 2329 TAVOR_TNF_EXIT(tavor_ci_detach_mcg);
2355 2330 return (IBT_QP_HDL_INVALID);
2356 2331 }
2357 2332
2358 2333 /* Grab the Tavor softstate pointer and QP handle */
2359 2334 state = (tavor_state_t *)hca;
2360 2335 qphdl = (tavor_qphdl_t)qp;
2361 2336
2362 2337 /* Detach the QP from the multicast group */
2363 2338 status = tavor_mcg_detach(state, qphdl, gid, lid);
2364 2339 if (status != DDI_SUCCESS) {
2365 2340 TNF_PROBE_1(tavor_ci_detach_mcg_fail, TAVOR_TNF_ERROR, "",
2366 2341 tnf_uint, status, status);
2367 2342 TAVOR_TNF_EXIT(tavor_ci_detach_mcg);
2368 2343 return (status);
2369 2344 }
2370 2345
2371 2346 TAVOR_TNF_EXIT(tavor_ci_detach_mcg);
2372 2347 return (IBT_SUCCESS);
2373 2348 }
2374 2349
2375 2350
2376 2351 /*
2377 2352 * tavor_ci_post_send()
2378 2353 * Post send work requests to the send queue on the specified QP
2379 2354 * Context: Can be called from interrupt or base context.
2380 2355 */
2381 2356 static ibt_status_t
2382 2357 tavor_ci_post_send(ibc_hca_hdl_t hca, ibc_qp_hdl_t qp, ibt_send_wr_t *wr_p,
2383 2358 uint_t num_wr, uint_t *num_posted_p)
2384 2359 {
2385 2360 tavor_state_t *state;
2386 2361 tavor_qphdl_t qphdl;
2387 2362 int status;
2388 2363
2389 2364 TAVOR_TNF_ENTER(tavor_ci_post_send);
2390 2365
2391 2366 ASSERT(wr_p != NULL);
2392 2367 ASSERT(num_wr != 0);
2393 2368
2394 2369 /* Check for valid HCA handle */
2395 2370 if (hca == NULL) {
2396 2371 TNF_PROBE_0(tavor_ci_post_send_invhca_fail,
2397 2372 TAVOR_TNF_ERROR, "");
2398 2373 TAVOR_TNF_EXIT(tavor_ci_post_send);
2399 2374 return (IBT_HCA_HDL_INVALID);
2400 2375 }
2401 2376
2402 2377 /* Check for valid QP handle pointer */
2403 2378 if (qp == NULL) {
2404 2379 TNF_PROBE_0(tavor_ci_post_send_invqphdl_fail,
2405 2380 TAVOR_TNF_ERROR, "");
2406 2381 TAVOR_TNF_EXIT(tavor_ci_post_send);
2407 2382 return (IBT_QP_HDL_INVALID);
2408 2383 }
2409 2384
2410 2385 /* Grab the Tavor softstate pointer and QP handle */
2411 2386 state = (tavor_state_t *)hca;
2412 2387 qphdl = (tavor_qphdl_t)qp;
2413 2388
2414 2389 /* Post the send WQEs */
2415 2390 status = tavor_post_send(state, qphdl, wr_p, num_wr, num_posted_p);
2416 2391 if (status != DDI_SUCCESS) {
2417 2392 TNF_PROBE_1(tavor_ci_post_send_fail, TAVOR_TNF_ERROR, "",
2418 2393 tnf_uint, status, status);
2419 2394 TAVOR_TNF_EXIT(tavor_ci_post_send);
2420 2395 return (status);
2421 2396 }
2422 2397
2423 2398 TAVOR_TNF_EXIT(tavor_ci_post_send);
2424 2399 return (IBT_SUCCESS);
2425 2400 }
2426 2401
2427 2402
2428 2403 /*
2429 2404 * tavor_ci_post_recv()
2430 2405 * Post receive work requests to the receive queue on the specified QP
2431 2406 * Context: Can be called from interrupt or base context.
2432 2407 */
2433 2408 static ibt_status_t
2434 2409 tavor_ci_post_recv(ibc_hca_hdl_t hca, ibc_qp_hdl_t qp, ibt_recv_wr_t *wr_p,
2435 2410 uint_t num_wr, uint_t *num_posted_p)
2436 2411 {
2437 2412 tavor_state_t *state;
2438 2413 tavor_qphdl_t qphdl;
2439 2414 int status;
2440 2415
2441 2416 TAVOR_TNF_ENTER(tavor_ci_post_recv);
2442 2417
2443 2418 ASSERT(wr_p != NULL);
2444 2419 ASSERT(num_wr != 0);
2445 2420
2446 2421 /* Check for valid HCA handle */
2447 2422 if (hca == NULL) {
2448 2423 TNF_PROBE_0(tavor_ci_post_recv_invhca_fail,
2449 2424 TAVOR_TNF_ERROR, "");
2450 2425 TAVOR_TNF_EXIT(tavor_ci_post_recv);
2451 2426 return (IBT_HCA_HDL_INVALID);
2452 2427 }
2453 2428
2454 2429 /* Check for valid QP handle pointer */
2455 2430 if (qp == NULL) {
2456 2431 TNF_PROBE_0(tavor_ci_post_recv_invqphdl_fail,
2457 2432 TAVOR_TNF_ERROR, "");
2458 2433 TAVOR_TNF_EXIT(tavor_ci_post_recv);
2459 2434 return (IBT_QP_HDL_INVALID);
2460 2435 }
2461 2436
2462 2437 /* Grab the Tavor softstate pointer and QP handle */
2463 2438 state = (tavor_state_t *)hca;
2464 2439 qphdl = (tavor_qphdl_t)qp;
2465 2440
2466 2441 /* Post the receive WQEs */
2467 2442 status = tavor_post_recv(state, qphdl, wr_p, num_wr, num_posted_p);
2468 2443 if (status != DDI_SUCCESS) {
2469 2444 TNF_PROBE_1(tavor_ci_post_recv_fail, TAVOR_TNF_ERROR, "",
2470 2445 tnf_uint, status, status);
2471 2446 TAVOR_TNF_EXIT(tavor_ci_post_recv);
2472 2447 return (status);
2473 2448 }
2474 2449
2475 2450 TAVOR_TNF_EXIT(tavor_ci_post_recv);
2476 2451 return (IBT_SUCCESS);
2477 2452 }
2478 2453
2479 2454
2480 2455 /*
2481 2456 * tavor_ci_poll_cq()
2482 2457 * Poll for a work request completion
2483 2458 * Context: Can be called from interrupt or base context.
2484 2459 */
2485 2460 static ibt_status_t
2486 2461 tavor_ci_poll_cq(ibc_hca_hdl_t hca, ibc_cq_hdl_t cq, ibt_wc_t *wc_p,
2487 2462 uint_t num_wc, uint_t *num_polled)
2488 2463 {
2489 2464 tavor_state_t *state;
2490 2465 tavor_cqhdl_t cqhdl;
2491 2466 uint_t polled;
2492 2467 int status;
2493 2468
2494 2469 TAVOR_TNF_ENTER(tavor_ci_poll_cq);
2495 2470
2496 2471 ASSERT(wc_p != NULL);
2497 2472
2498 2473 /* Check for valid HCA handle */
2499 2474 if (hca == NULL) {
2500 2475 TNF_PROBE_0(tavor_ci_poll_cq_invhca_fail,
2501 2476 TAVOR_TNF_ERROR, "");
2502 2477 TAVOR_TNF_EXIT(tavor_ci_poll_cq);
2503 2478 return (IBT_HCA_HDL_INVALID);
2504 2479 }
2505 2480
2506 2481 /* Check for valid CQ handle pointer */
2507 2482 if (cq == NULL) {
2508 2483 TNF_PROBE_0(tavor_ci_poll_cq_invcqhdl_fail,
2509 2484 TAVOR_TNF_ERROR, "");
2510 2485 TAVOR_TNF_EXIT(tavor_ci_poll_cq);
2511 2486 return (IBT_CQ_HDL_INVALID);
2512 2487 }
2513 2488
2514 2489 /* Check for valid num_wc field */
2515 2490 if (num_wc == 0) {
2516 2491 TNF_PROBE_0(tavor_ci_poll_cq_num_wc_fail,
2517 2492 TAVOR_TNF_ERROR, "");
2518 2493 TAVOR_TNF_EXIT(tavor_ci_poll_cq);
2519 2494 return (IBT_INVALID_PARAM);
2520 2495 }
2521 2496
2522 2497 /* Grab the Tavor softstate pointer and CQ handle */
2523 2498 state = (tavor_state_t *)hca;
2524 2499 cqhdl = (tavor_cqhdl_t)cq;
2525 2500
2526 2501 /* Poll for work request completions */
2527 2502 status = tavor_cq_poll(state, cqhdl, wc_p, num_wc, &polled);
2528 2503
2529 2504 /* First fill in "num_polled" argument (only when valid) */
2530 2505 if (num_polled) {
2531 2506 *num_polled = polled;
2532 2507 }
2533 2508
2534 2509 /*
2535 2510 * Check the status code;
2536 2511 * If empty, we return empty.
2537 2512 * If error, we print out an error and then return
2538 2513 * If success (something was polled), we return success
2539 2514 */
2540 2515 if (status != DDI_SUCCESS) {
2541 2516 if (status != IBT_CQ_EMPTY) {
2542 2517 TNF_PROBE_1(tavor_ci_poll_cq_fail, TAVOR_TNF_ERROR, "",
2543 2518 tnf_uint, status, status);
2544 2519 }
2545 2520 TAVOR_TNF_EXIT(tavor_ci_poll_cq);
2546 2521 return (status);
2547 2522 }
2548 2523
2549 2524 TAVOR_TNF_EXIT(tavor_ci_poll_cq);
2550 2525 return (IBT_SUCCESS);
2551 2526 }
2552 2527
2553 2528
2554 2529 /*
2555 2530 * tavor_ci_notify_cq()
2556 2531 * Enable notification events on the specified CQ
2557 2532 * Context: Can be called from interrupt or base context.
2558 2533 */
2559 2534 static ibt_status_t
2560 2535 tavor_ci_notify_cq(ibc_hca_hdl_t hca, ibc_cq_hdl_t cq_hdl,
2561 2536 ibt_cq_notify_flags_t flags)
2562 2537 {
2563 2538 tavor_state_t *state;
2564 2539 tavor_cqhdl_t cqhdl;
2565 2540 int status;
2566 2541
2567 2542 TAVOR_TNF_ENTER(tavor_ci_notify_cq);
2568 2543
2569 2544 /* Check for valid HCA handle */
2570 2545 if (hca == NULL) {
2571 2546 TNF_PROBE_0(tavor_ci_notify_cq_invhca_fail,
2572 2547 TAVOR_TNF_ERROR, "");
2573 2548 TAVOR_TNF_EXIT(tavor_ci_notify_cq);
2574 2549 return (IBT_HCA_HDL_INVALID);
2575 2550 }
2576 2551
2577 2552 /* Check for valid CQ handle pointer */
2578 2553 if (cq_hdl == NULL) {
2579 2554 TNF_PROBE_0(tavor_ci_notify_cq_invcqhdl_fail,
2580 2555 TAVOR_TNF_ERROR, "");
2581 2556 TAVOR_TNF_EXIT(tavor_ci_notify_cq);
2582 2557 return (IBT_CQ_HDL_INVALID);
2583 2558 }
2584 2559
2585 2560 /* Grab the Tavor softstate pointer and CQ handle */
2586 2561 state = (tavor_state_t *)hca;
2587 2562 cqhdl = (tavor_cqhdl_t)cq_hdl;
2588 2563
2589 2564 /* Enable the CQ notification */
2590 2565 status = tavor_cq_notify(state, cqhdl, flags);
2591 2566 if (status != DDI_SUCCESS) {
2592 2567 TNF_PROBE_1(tavor_ci_notify_cq_fail, TAVOR_TNF_ERROR, "",
2593 2568 tnf_uint, status, status);
2594 2569 TAVOR_TNF_EXIT(tavor_ci_notify_cq);
2595 2570 return (status);
2596 2571 }
2597 2572
2598 2573 TAVOR_TNF_EXIT(tavor_ci_notify_cq);
2599 2574 return (IBT_SUCCESS);
2600 2575 }
2601 2576
2602 2577 /*
2603 2578 * tavor_ci_ci_data_in()
2604 2579 * Exchange CI-specific data.
2605 2580 * Context: Can be called only from user or kernel context.
2606 2581 */
2607 2582 static ibt_status_t
2608 2583 tavor_ci_ci_data_in(ibc_hca_hdl_t hca, ibt_ci_data_flags_t flags,
2609 2584 ibt_object_type_t object, void *ibc_object_handle, void *data_p,
2610 2585 size_t data_sz)
2611 2586 {
2612 2587 tavor_state_t *state;
2613 2588 int status;
2614 2589
2615 2590 TAVOR_TNF_ENTER(tavor_ci_ci_data_in);
2616 2591
2617 2592 /* Check for valid HCA handle */
2618 2593 if (hca == NULL) {
2619 2594 TNF_PROBE_0(tavor_ci_ci_data_in_invhca_fail,
2620 2595 TAVOR_TNF_ERROR, "");
2621 2596 TAVOR_TNF_EXIT(tavor_ci_ci_data_in);
2622 2597 return (IBT_HCA_HDL_INVALID);
2623 2598 }
2624 2599
2625 2600 /* Grab the Tavor softstate pointer */
2626 2601 state = (tavor_state_t *)hca;
2627 2602
2628 2603 /* Get the Tavor userland mapping information */
2629 2604 status = tavor_umap_ci_data_in(state, flags, object,
2630 2605 ibc_object_handle, data_p, data_sz);
2631 2606 if (status != DDI_SUCCESS) {
2632 2607 TNF_PROBE_1(tavor_ci_ci_data_in_umap_fail, TAVOR_TNF_ERROR,
2633 2608 "", tnf_uint, status, status);
2634 2609 TAVOR_TNF_EXIT(tavor_ci_ci_data_in);
2635 2610 return (status);
2636 2611 }
2637 2612
2638 2613 TAVOR_TNF_EXIT(tavor_ci_ci_data_in);
2639 2614 return (IBT_SUCCESS);
2640 2615 }
2641 2616
2642 2617 /*
2643 2618 * tavor_ci_ci_data_out()
2644 2619 * Exchange CI-specific data.
2645 2620 * Context: Can be called only from user or kernel context.
2646 2621 */
2647 2622 static ibt_status_t
2648 2623 tavor_ci_ci_data_out(ibc_hca_hdl_t hca, ibt_ci_data_flags_t flags,
2649 2624 ibt_object_type_t object, void *ibc_object_handle, void *data_p,
2650 2625 size_t data_sz)
2651 2626 {
2652 2627 tavor_state_t *state;
2653 2628 int status;
2654 2629
2655 2630 TAVOR_TNF_ENTER(tavor_ci_ci_data_out);
2656 2631
2657 2632 /* Check for valid HCA handle */
2658 2633 if (hca == NULL) {
2659 2634 TNF_PROBE_0(tavor_ci_ci_data_out_invhca_fail,
2660 2635 TAVOR_TNF_ERROR, "");
2661 2636 TAVOR_TNF_EXIT(tavor_ci_ci_data_out);
2662 2637 return (IBT_HCA_HDL_INVALID);
2663 2638 }
2664 2639
2665 2640 /* Grab the Tavor softstate pointer */
2666 2641 state = (tavor_state_t *)hca;
2667 2642
2668 2643 /* Get the Tavor userland mapping information */
2669 2644 status = tavor_umap_ci_data_out(state, flags, object,
2670 2645 ibc_object_handle, data_p, data_sz);
2671 2646 if (status != DDI_SUCCESS) {
2672 2647 TNF_PROBE_1(tavor_ci_ci_data_out_umap_fail, TAVOR_TNF_ERROR,
2673 2648 "", tnf_uint, status, status);
2674 2649 TAVOR_TNF_EXIT(tavor_ci_ci_data_out);
2675 2650 return (status);
2676 2651 }
2677 2652
2678 2653 TAVOR_TNF_EXIT(tavor_ci_ci_data_out);
2679 2654 return (IBT_SUCCESS);
2680 2655 }
2681 2656
2682 2657
2683 2658 /*
2684 2659 * tavor_ci_alloc_srq()
2685 2660 * Allocate a Shared Receive Queue (SRQ)
2686 2661 * Context: Can be called only from user or kernel context
2687 2662 */
2688 2663 static ibt_status_t
2689 2664 tavor_ci_alloc_srq(ibc_hca_hdl_t hca, ibt_srq_flags_t flags,
2690 2665 ibt_srq_hdl_t ibt_srq, ibc_pd_hdl_t pd, ibt_srq_sizes_t *sizes,
2691 2666 ibc_srq_hdl_t *ibc_srq_p, ibt_srq_sizes_t *ret_sizes_p)
2692 2667 {
2693 2668 tavor_state_t *state;
2694 2669 tavor_pdhdl_t pdhdl;
2695 2670 tavor_srqhdl_t srqhdl;
2696 2671 tavor_srq_info_t srqinfo;
2697 2672 tavor_srq_options_t op;
2698 2673 int status;
2699 2674
2700 2675 TAVOR_TNF_ENTER(tavor_ci_alloc_srq);
2701 2676
2702 2677 /* Check for valid HCA handle */
2703 2678 if (hca == NULL) {
2704 2679 TNF_PROBE_0(tavor_ci_alloc_srq_invhca_fail,
2705 2680 TAVOR_TNF_ERROR, "");
2706 2681 TAVOR_TNF_EXIT(tavor_alloc_srq);
2707 2682 return (IBT_HCA_HDL_INVALID);
2708 2683 }
2709 2684
2710 2685 state = (tavor_state_t *)hca;
2711 2686
2712 2687 /* Check if SRQ is even supported */
2713 2688 if (state->ts_cfg_profile->cp_srq_enable == 0) {
2714 2689 TNF_PROBE_0(tavor_ci_alloc_srq_not_supported_fail,
2715 2690 TAVOR_TNF_ERROR, "");
2716 2691 TAVOR_TNF_EXIT(tavor_ci_alloc_srq);
2717 2692 return (IBT_NOT_SUPPORTED);
2718 2693 }
2719 2694
2720 2695 /* Check for valid PD handle pointer */
2721 2696 if (pd == NULL) {
2722 2697 TNF_PROBE_0(tavor_ci_alloc_srq_invpdhdl_fail,
2723 2698 TAVOR_TNF_ERROR, "");
2724 2699 TAVOR_TNF_EXIT(tavor_ci_alloc_srq);
2725 2700 return (IBT_PD_HDL_INVALID);
2726 2701 }
2727 2702
2728 2703 pdhdl = (tavor_pdhdl_t)pd;
2729 2704
2730 2705 srqinfo.srqi_ibt_srqhdl = ibt_srq;
2731 2706 srqinfo.srqi_pd = pdhdl;
2732 2707 srqinfo.srqi_sizes = sizes;
2733 2708 srqinfo.srqi_real_sizes = ret_sizes_p;
2734 2709 srqinfo.srqi_srqhdl = &srqhdl;
2735 2710 srqinfo.srqi_flags = flags;
2736 2711 op.srqo_wq_loc = state->ts_cfg_profile->cp_srq_wq_inddr;
2737 2712 status = tavor_srq_alloc(state, &srqinfo, TAVOR_NOSLEEP, &op);
2738 2713 if (status != DDI_SUCCESS) {
2739 2714 TAVOR_TNF_EXIT(tavor_ci_alloc_srq);
2740 2715 return (status);
2741 2716 }
2742 2717
2743 2718 *ibc_srq_p = (ibc_srq_hdl_t)srqhdl;
2744 2719
2745 2720 TAVOR_TNF_EXIT(tavor_ci_alloc_srq);
2746 2721 return (IBT_SUCCESS);
2747 2722 }
2748 2723
2749 2724 /*
2750 2725 * tavor_ci_free_srq()
2751 2726 * Free a Shared Receive Queue (SRQ)
2752 2727 * Context: Can be called only from user or kernel context
2753 2728 */
2754 2729 static ibt_status_t
2755 2730 tavor_ci_free_srq(ibc_hca_hdl_t hca, ibc_srq_hdl_t srq)
2756 2731 {
2757 2732 tavor_state_t *state;
2758 2733 tavor_srqhdl_t srqhdl;
2759 2734 int status;
2760 2735
2761 2736 TAVOR_TNF_ENTER(tavor_ci_free_srq);
2762 2737
2763 2738 /* Check for valid HCA handle */
2764 2739 if (hca == NULL) {
2765 2740 TNF_PROBE_0(tavor_ci_free_srq_invhca_fail,
2766 2741 TAVOR_TNF_ERROR, "");
2767 2742 TAVOR_TNF_EXIT(tavor_ci_free_srq);
2768 2743 return (IBT_HCA_HDL_INVALID);
2769 2744 }
2770 2745
2771 2746 state = (tavor_state_t *)hca;
2772 2747
2773 2748 /* Check if SRQ is even supported */
2774 2749 if (state->ts_cfg_profile->cp_srq_enable == 0) {
2775 2750 TNF_PROBE_0(tavor_ci_alloc_srq_not_supported_fail,
2776 2751 TAVOR_TNF_ERROR, "");
2777 2752 TAVOR_TNF_EXIT(tavor_ci_free_srq);
2778 2753 return (IBT_NOT_SUPPORTED);
2779 2754 }
2780 2755
2781 2756 /* Check for valid SRQ handle pointer */
2782 2757 if (srq == NULL) {
2783 2758 TNF_PROBE_0(tavor_ci_free_srq_invsrqhdl_fail,
2784 2759 TAVOR_TNF_ERROR, "");
2785 2760 TAVOR_TNF_EXIT(tavor_ci_free_srq);
2786 2761 return (IBT_SRQ_HDL_INVALID);
2787 2762 }
2788 2763
2789 2764 srqhdl = (tavor_srqhdl_t)srq;
2790 2765
2791 2766 /* Free the SRQ */
2792 2767 status = tavor_srq_free(state, &srqhdl, TAVOR_NOSLEEP);
2793 2768 if (status != DDI_SUCCESS) {
2794 2769 TNF_PROBE_1(tavor_ci_free_srq_fail, TAVOR_TNF_ERROR, "",
2795 2770 tnf_uint, status, status);
2796 2771 TAVOR_TNF_EXIT(tavor_ci_free_srq);
2797 2772 return (status);
2798 2773 }
2799 2774
2800 2775 TAVOR_TNF_EXIT(tavor_ci_free_srq);
2801 2776 return (IBT_SUCCESS);
2802 2777 }
2803 2778
2804 2779 /*
2805 2780 * tavor_ci_query_srq()
2806 2781 * Query properties of a Shared Receive Queue (SRQ)
2807 2782 * Context: Can be called from interrupt or base context.
2808 2783 */
2809 2784 static ibt_status_t
2810 2785 tavor_ci_query_srq(ibc_hca_hdl_t hca, ibc_srq_hdl_t srq, ibc_pd_hdl_t *pd_p,
2811 2786 ibt_srq_sizes_t *sizes_p, uint_t *limit_p)
2812 2787 {
2813 2788 tavor_state_t *state;
2814 2789 tavor_srqhdl_t srqhdl;
2815 2790
2816 2791 TAVOR_TNF_ENTER(tavor_ci_query_srq);
2817 2792
2818 2793 /* Check for valid HCA handle */
2819 2794 if (hca == NULL) {
2820 2795 TNF_PROBE_0(tavor_ci_query_srq_invhca_fail,
2821 2796 TAVOR_TNF_ERROR, "");
2822 2797 TAVOR_TNF_EXIT(tavor_ci_query_srq);
2823 2798 return (IBT_HCA_HDL_INVALID);
2824 2799 }
2825 2800
2826 2801 state = (tavor_state_t *)hca;
2827 2802
2828 2803 /* Check if SRQ is even supported */
2829 2804 if (state->ts_cfg_profile->cp_srq_enable == 0) {
2830 2805 TNF_PROBE_0(tavor_ci_query_srq_not_supported_fail,
2831 2806 TAVOR_TNF_ERROR, "");
2832 2807 TAVOR_TNF_EXIT(tavor_ci_query_srq);
2833 2808 return (IBT_NOT_SUPPORTED);
2834 2809 }
2835 2810
2836 2811 /* Check for valid SRQ handle pointer */
2837 2812 if (srq == NULL) {
2838 2813 TNF_PROBE_0(tavor_ci_query_srq_invsrqhdl_fail,
2839 2814 TAVOR_TNF_ERROR, "");
2840 2815 TAVOR_TNF_EXIT(tavor_ci_query_srq);
2841 2816 return (IBT_SRQ_HDL_INVALID);
2842 2817 }
2843 2818
2844 2819 srqhdl = (tavor_srqhdl_t)srq;
2845 2820
2846 2821 mutex_enter(&srqhdl->srq_lock);
2847 2822 if (srqhdl->srq_state == TAVOR_SRQ_STATE_ERROR) {
2848 2823 mutex_exit(&srqhdl->srq_lock);
2849 2824 TNF_PROBE_0(tavor_ci_query_srq_error_state,
2850 2825 TAVOR_TNF_ERROR, "");
2851 2826 TAVOR_TNF_EXIT(tavor_ci_query_srq);
2852 2827 return (IBT_SRQ_ERROR_STATE);
2853 2828 }
2854 2829
2855 2830 *pd_p = (ibc_pd_hdl_t)srqhdl->srq_pdhdl;
2856 2831 sizes_p->srq_wr_sz = srqhdl->srq_real_sizes.srq_wr_sz;
2857 2832 sizes_p->srq_sgl_sz = srqhdl->srq_real_sizes.srq_sgl_sz;
2858 2833 mutex_exit(&srqhdl->srq_lock);
2859 2834 *limit_p = 0;
2860 2835
2861 2836 TAVOR_TNF_EXIT(tavor_ci_query_srq);
2862 2837 return (IBT_SUCCESS);
2863 2838 }
2864 2839
2865 2840 /*
2866 2841 * tavor_ci_modify_srq()
2867 2842 * Modify properties of a Shared Receive Queue (SRQ)
2868 2843 * Context: Can be called from interrupt or base context.
2869 2844 */
2870 2845 /* ARGSUSED */
2871 2846 static ibt_status_t
2872 2847 tavor_ci_modify_srq(ibc_hca_hdl_t hca, ibc_srq_hdl_t srq,
2873 2848 ibt_srq_modify_flags_t flags, uint_t size, uint_t limit, uint_t *ret_size_p)
2874 2849 {
2875 2850 tavor_state_t *state;
2876 2851 tavor_srqhdl_t srqhdl;
2877 2852 uint_t resize_supported, cur_srq_size;
2878 2853 int status;
2879 2854
2880 2855 TAVOR_TNF_ENTER(tavor_ci_modify_srq);
2881 2856
2882 2857 /* Check for valid HCA handle */
2883 2858 if (hca == NULL) {
2884 2859 TNF_PROBE_0(tavor_ci_modify_srq_invhca_fail,
2885 2860 TAVOR_TNF_ERROR, "");
2886 2861 TAVOR_TNF_EXIT(tavor_ci_modify_srq);
2887 2862 return (IBT_HCA_HDL_INVALID);
2888 2863 }
2889 2864
2890 2865 state = (tavor_state_t *)hca;
2891 2866
2892 2867 /* Check if SRQ is even supported */
2893 2868 if (state->ts_cfg_profile->cp_srq_enable == 0) {
2894 2869 TNF_PROBE_0(tavor_ci_modify_srq_not_supported_fail,
2895 2870 TAVOR_TNF_ERROR, "");
2896 2871 TAVOR_TNF_EXIT(tavor_ci_modify_srq);
2897 2872 return (IBT_NOT_SUPPORTED);
2898 2873 }
2899 2874
2900 2875 /* Check for valid SRQ handle pointer */
2901 2876 if (srq == NULL) {
2902 2877 TNF_PROBE_0(tavor_ci_modify_srq_invcqhdl_fail,
2903 2878 TAVOR_TNF_ERROR, "");
2904 2879 TAVOR_TNF_EXIT(tavor_ci_modify_srq);
2905 2880 return (IBT_SRQ_HDL_INVALID);
2906 2881 }
2907 2882
2908 2883 srqhdl = (tavor_srqhdl_t)srq;
2909 2884
2910 2885 /*
2911 2886 * Check Error State of SRQ.
2912 2887 * Also, while we are holding the lock we save away the current SRQ
2913 2888 * size for later use.
2914 2889 */
2915 2890 mutex_enter(&srqhdl->srq_lock);
2916 2891 cur_srq_size = srqhdl->srq_wq_bufsz;
2917 2892 if (srqhdl->srq_state == TAVOR_SRQ_STATE_ERROR) {
2918 2893 mutex_exit(&srqhdl->srq_lock);
2919 2894 TNF_PROBE_0(tavor_ci_modify_srq_error_state,
2920 2895 TAVOR_TNF_ERROR, "");
2921 2896 TAVOR_TNF_EXIT(tavor_ci_modify_srq);
2922 2897 return (IBT_SRQ_ERROR_STATE);
2923 2898 }
2924 2899 mutex_exit(&srqhdl->srq_lock);
2925 2900
2926 2901 /*
2927 2902 * Setting the limit watermark is not currently supported. This is a
2928 2903 * tavor hardware (firmware) limitation. We return NOT_SUPPORTED here,
2929 2904 * and have the limit code commented out for now.
2930 2905 *
2931 2906 * XXX If we enable the limit watermark support, we need to do checks
2932 2907 * and set the 'srq->srq_wr_limit' here, instead of returning not
2933 2908 * supported. The 'tavor_srq_modify' operation below is for resizing
2934 2909 * the SRQ only, the limit work should be done here. If this is
2935 2910 * changed to use the 'limit' field, the 'ARGSUSED' comment for this
2936 2911 * function should also be removed at that time.
2937 2912 */
2938 2913 if (flags & IBT_SRQ_SET_LIMIT) {
2939 2914 TNF_PROBE_0(tavor_ci_modify_srq_limit_not_supported,
2940 2915 TAVOR_TNF_ERROR, "");
2941 2916 TAVOR_TNF_EXIT(tavor_ci_modify_srq);
2942 2917 return (IBT_NOT_SUPPORTED);
2943 2918 }
2944 2919
2945 2920 /*
2946 2921 * Check the SET_SIZE flag. If not set, we simply return success here.
2947 2922 * However if it is set, we check if resize is supported and only then
2948 2923 * do we continue on with our resize processing.
2949 2924 */
2950 2925 if (!(flags & IBT_SRQ_SET_SIZE)) {
2951 2926 TAVOR_TNF_EXIT(tavor_ci_modify_srq);
2952 2927 return (IBT_SUCCESS);
2953 2928 }
2954 2929
2955 2930 resize_supported = state->ts_ibtfinfo.hca_attr->hca_flags &
2956 2931 IBT_HCA_RESIZE_SRQ;
2957 2932
2958 2933 if ((flags & IBT_SRQ_SET_SIZE) && !resize_supported) {
2959 2934 TNF_PROBE_0(tavor_ci_modify_srq_resize_not_supp_fail,
2960 2935 TAVOR_TNF_ERROR, "");
2961 2936 TAVOR_TNF_EXIT(tavor_ci_modify_srq);
2962 2937 return (IBT_NOT_SUPPORTED);
2963 2938 }
2964 2939
2965 2940 /*
2966 2941 * We do not support resizing an SRQ to be smaller than it's current
2967 2942 * size. If a smaller (or equal) size is requested, then we simply
2968 2943 * return success, and do nothing.
2969 2944 */
2970 2945 if (size <= cur_srq_size) {
2971 2946 *ret_size_p = cur_srq_size;
2972 2947 TAVOR_TNF_EXIT(tavor_ci_modify_srq);
2973 2948 return (IBT_SUCCESS);
2974 2949 }
2975 2950
2976 2951 status = tavor_srq_modify(state, srqhdl, size, ret_size_p,
2977 2952 TAVOR_NOSLEEP);
2978 2953 if (status != DDI_SUCCESS) {
2979 2954 /* Set return value to current SRQ size */
2980 2955 *ret_size_p = cur_srq_size;
2981 2956 TNF_PROBE_1(tavor_ci_modify_srq_fail, TAVOR_TNF_ERROR, "",
2982 2957 tnf_uint, status, status);
2983 2958 TAVOR_TNF_EXIT(tavor_ci_modify_srq);
2984 2959 return (status);
2985 2960 }
2986 2961
2987 2962 TAVOR_TNF_EXIT(tavor_ci_modify_srq);
2988 2963 return (IBT_SUCCESS);
2989 2964 }
2990 2965
2991 2966 /*
2992 2967 * tavor_ci_post_srq()
2993 2968 * Post a Work Request to the specified Shared Receive Queue (SRQ)
2994 2969 * Context: Can be called from interrupt or base context.
2995 2970 */
2996 2971 static ibt_status_t
2997 2972 tavor_ci_post_srq(ibc_hca_hdl_t hca, ibc_srq_hdl_t srq,
2998 2973 ibt_recv_wr_t *wr, uint_t num_wr, uint_t *num_posted_p)
2999 2974 {
3000 2975 tavor_state_t *state;
3001 2976 tavor_srqhdl_t srqhdl;
3002 2977 int status;
3003 2978
3004 2979 TAVOR_TNF_ENTER(tavor_ci_post_srq);
3005 2980
3006 2981 /* Check for valid HCA handle */
3007 2982 if (hca == NULL) {
3008 2983 TNF_PROBE_0(tavor_ci_post_srq_invhca_fail,
3009 2984 TAVOR_TNF_ERROR, "");
3010 2985 TAVOR_TNF_EXIT(tavor_ci_post_srq);
3011 2986 return (IBT_HCA_HDL_INVALID);
3012 2987 }
3013 2988
3014 2989 state = (tavor_state_t *)hca;
3015 2990
3016 2991 /* Check if SRQ is even supported */
3017 2992 if (state->ts_cfg_profile->cp_srq_enable == 0) {
3018 2993 TNF_PROBE_0(tavor_ci_post_srq_not_supported_fail,
3019 2994 TAVOR_TNF_ERROR, "");
3020 2995 TAVOR_TNF_EXIT(tavor_ci_post_srq);
3021 2996 return (IBT_NOT_SUPPORTED);
3022 2997 }
3023 2998
3024 2999 /* Check for valid SRQ handle pointer */
3025 3000 if (srq == NULL) {
3026 3001 TNF_PROBE_0(tavor_ci_post_srq_invsrqhdl_fail,
3027 3002 TAVOR_TNF_ERROR, "");
3028 3003 TAVOR_TNF_EXIT(tavor_ci_post_srq);
3029 3004 return (IBT_SRQ_HDL_INVALID);
3030 3005 }
3031 3006
3032 3007 srqhdl = (tavor_srqhdl_t)srq;
3033 3008
3034 3009 status = tavor_post_srq(state, srqhdl, wr, num_wr, num_posted_p);
3035 3010 if (status != DDI_SUCCESS) {
3036 3011 TNF_PROBE_1(tavor_ci_post_srq_fail, TAVOR_TNF_ERROR, "",
3037 3012 tnf_uint, status, status);
3038 3013 TAVOR_TNF_EXIT(tavor_ci_post_srq);
3039 3014 return (status);
3040 3015 }
3041 3016
3042 3017 TAVOR_TNF_EXIT(tavor_ci_post_srq);
3043 3018 return (IBT_SUCCESS);
3044 3019 }
3045 3020
3046 3021 /* Address translation */
3047 3022 /*
3048 3023 * tavor_ci_map_mem_area()
3049 3024 * Context: Can be called from interrupt or base context.
3050 3025 */
3051 3026 /* ARGSUSED */
3052 3027 static ibt_status_t
3053 3028 tavor_ci_map_mem_area(ibc_hca_hdl_t hca, ibt_va_attr_t *va_attrs,
3054 3029 void *ibtl_reserved, uint_t list_len, ibt_reg_req_t *reg_req,
3055 3030 ibc_ma_hdl_t *ibc_ma_hdl_p)
3056 3031 {
3057 3032 return (IBT_NOT_SUPPORTED);
3058 3033 }
3059 3034
3060 3035 /*
3061 3036 * tavor_ci_unmap_mem_area()
3062 3037 * Unmap the memory area
3063 3038 * Context: Can be called from interrupt or base context.
3064 3039 */
3065 3040 /* ARGSUSED */
↓ open down ↓ |
802 lines elided |
↑ open up ↑ |
3066 3041 static ibt_status_t
3067 3042 tavor_ci_unmap_mem_area(ibc_hca_hdl_t hca, ibc_ma_hdl_t ma_hdl)
3068 3043 {
3069 3044 return (IBT_NOT_SUPPORTED);
3070 3045 }
3071 3046
3072 3047 struct ibc_mi_s {
3073 3048 int imh_len;
3074 3049 ddi_dma_handle_t imh_dmahandle[1];
3075 3050 };
3076 -_NOTE(SCHEME_PROTECTS_DATA("safe sharing",
3077 - ibc_mi_s::imh_len
3078 - ibc_mi_s::imh_dmahandle))
3079 3051
3080 -
3081 3052 /*
3082 3053 * tavor_ci_map_mem_iov()
3083 3054 * Map the memory
3084 3055 * Context: Can be called from interrupt or base context.
3085 3056 */
3086 3057 /* ARGSUSED */
3087 3058 static ibt_status_t
3088 3059 tavor_ci_map_mem_iov(ibc_hca_hdl_t hca, ibt_iov_attr_t *iov_attr,
3089 3060 ibt_all_wr_t *wr, ibc_mi_hdl_t *mi_hdl_p)
3090 3061 {
3091 3062 int status;
3092 3063 int i, j, nds, max_nds;
3093 3064 uint_t len;
3094 3065 ibt_status_t ibt_status;
3095 3066 ddi_dma_handle_t dmahdl;
↓ open down ↓ |
5 lines elided |
↑ open up ↑ |
3096 3067 ddi_dma_cookie_t dmacookie;
3097 3068 ddi_dma_attr_t dma_attr;
3098 3069 uint_t cookie_cnt;
3099 3070 ibc_mi_hdl_t mi_hdl;
3100 3071 ibt_lkey_t rsvd_lkey;
3101 3072 ibt_wr_ds_t *sgl;
3102 3073 tavor_state_t *state;
3103 3074 int kmflag;
3104 3075 int (*callback)(caddr_t);
3105 3076
3106 - _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*wr))
3107 -
3108 3077 if (mi_hdl_p == NULL)
3109 3078 return (IBT_MI_HDL_INVALID);
3110 3079
3111 3080 /* Check for valid HCA handle */
3112 3081 if (hca == NULL)
3113 3082 return (IBT_HCA_HDL_INVALID);
3114 3083
3115 3084 /* Tavor does not allow the default "use reserved lkey" */
3116 3085 if ((iov_attr->iov_flags & IBT_IOV_ALT_LKEY) == 0)
3117 3086 return (IBT_INVALID_PARAM);
3118 3087
3119 3088 rsvd_lkey = iov_attr->iov_alt_lkey;
3120 3089
3121 3090 state = (tavor_state_t *)hca;
3122 3091 tavor_dma_attr_init(&dma_attr);
3123 3092 #ifdef __sparc
3124 3093 if (state->ts_cfg_profile->cp_iommu_bypass == TAVOR_BINDMEM_BYPASS)
3125 3094 dma_attr.dma_attr_flags = DDI_DMA_FORCE_PHYSICAL;
3126 3095 #endif
3127 3096
3128 3097 nds = 0;
3129 3098 max_nds = iov_attr->iov_wr_nds;
3130 3099 if (iov_attr->iov_lso_hdr_sz)
3131 3100 max_nds -= (iov_attr->iov_lso_hdr_sz + sizeof (uint32_t) +
3132 3101 0xf) >> 4; /* 0xf is for rounding up to a multiple of 16 */
3133 3102 if ((iov_attr->iov_flags & IBT_IOV_NOSLEEP) == 0) {
3134 3103 kmflag = KM_SLEEP;
3135 3104 callback = DDI_DMA_SLEEP;
↓ open down ↓ |
18 lines elided |
↑ open up ↑ |
3136 3105 } else {
3137 3106 kmflag = KM_NOSLEEP;
3138 3107 callback = DDI_DMA_DONTWAIT;
3139 3108 }
3140 3109
3141 3110 if (iov_attr->iov_flags & IBT_IOV_BUF) {
3142 3111 mi_hdl = kmem_alloc(sizeof (*mi_hdl), kmflag);
3143 3112 if (mi_hdl == NULL)
3144 3113 return (IBT_INSUFF_RESOURCE);
3145 3114 sgl = wr->send.wr_sgl;
3146 - _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*sgl))
3147 3115
3148 3116 status = ddi_dma_alloc_handle(state->ts_dip, &dma_attr,
3149 3117 callback, NULL, &dmahdl);
3150 3118 if (status != DDI_SUCCESS) {
3151 3119 kmem_free(mi_hdl, sizeof (*mi_hdl));
3152 3120 return (IBT_INSUFF_RESOURCE);
3153 3121 }
3154 3122 status = ddi_dma_buf_bind_handle(dmahdl, iov_attr->iov_buf,
3155 3123 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, callback, NULL,
3156 3124 &dmacookie, &cookie_cnt);
3157 3125 if (status != DDI_DMA_MAPPED) {
3158 3126 ddi_dma_free_handle(&dmahdl);
3159 3127 kmem_free(mi_hdl, sizeof (*mi_hdl));
3160 3128 return (ibc_get_ci_failure(0));
3161 3129 }
3162 3130 while (cookie_cnt-- > 0) {
3163 3131 if (nds > max_nds) {
3164 3132 status = ddi_dma_unbind_handle(dmahdl);
3165 3133 ddi_dma_free_handle(&dmahdl);
3166 3134 return (IBT_SGL_TOO_SMALL);
3167 3135 }
3168 3136 sgl[nds].ds_va = dmacookie.dmac_laddress;
3169 3137 sgl[nds].ds_key = rsvd_lkey;
3170 3138 sgl[nds].ds_len = (ib_msglen_t)dmacookie.dmac_size;
3171 3139 nds++;
3172 3140 if (cookie_cnt != 0)
3173 3141 ddi_dma_nextcookie(dmahdl, &dmacookie);
3174 3142 }
3175 3143 wr->send.wr_nds = nds;
↓ open down ↓ |
19 lines elided |
↑ open up ↑ |
3176 3144 mi_hdl->imh_len = 1;
3177 3145 mi_hdl->imh_dmahandle[0] = dmahdl;
3178 3146 *mi_hdl_p = mi_hdl;
3179 3147 return (IBT_SUCCESS);
3180 3148 }
3181 3149
3182 3150 if (iov_attr->iov_flags & IBT_IOV_RECV)
3183 3151 sgl = wr->recv.wr_sgl;
3184 3152 else
3185 3153 sgl = wr->send.wr_sgl;
3186 - _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*sgl))
3187 3154
3188 3155 len = iov_attr->iov_list_len;
3189 3156 for (i = 0, j = 0; j < len; j++) {
3190 3157 if (iov_attr->iov[j].iov_len == 0)
3191 3158 continue;
3192 3159 i++;
3193 3160 }
3194 3161 mi_hdl = kmem_alloc(sizeof (*mi_hdl) +
3195 3162 (i - 1) * sizeof (ddi_dma_handle_t), kmflag);
3196 3163 if (mi_hdl == NULL)
3197 3164 return (IBT_INSUFF_RESOURCE);
3198 3165 mi_hdl->imh_len = i;
3199 3166 for (i = 0, j = 0; j < len; j++) {
3200 3167 if (iov_attr->iov[j].iov_len == 0)
3201 3168 continue;
3202 3169 status = ddi_dma_alloc_handle(state->ts_dip, &dma_attr,
3203 3170 callback, NULL, &dmahdl);
3204 3171 if (status != DDI_SUCCESS) {
3205 3172 ibt_status = IBT_INSUFF_RESOURCE;
3206 3173 goto fail2;
3207 3174 }
3208 3175 status = ddi_dma_addr_bind_handle(dmahdl, iov_attr->iov_as,
3209 3176 iov_attr->iov[j].iov_addr, iov_attr->iov[j].iov_len,
3210 3177 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, callback, NULL,
3211 3178 &dmacookie, &cookie_cnt);
3212 3179 if (status != DDI_DMA_MAPPED) {
3213 3180 ibt_status = ibc_get_ci_failure(0);
3214 3181 goto fail1;
3215 3182 }
3216 3183 if (nds + cookie_cnt > max_nds) {
3217 3184 ibt_status = IBT_SGL_TOO_SMALL;
3218 3185 goto fail2;
3219 3186 }
3220 3187 while (cookie_cnt-- > 0) {
3221 3188 sgl[nds].ds_va = dmacookie.dmac_laddress;
3222 3189 sgl[nds].ds_key = rsvd_lkey;
3223 3190 sgl[nds].ds_len = (ib_msglen_t)dmacookie.dmac_size;
3224 3191 nds++;
3225 3192 if (cookie_cnt != 0)
3226 3193 ddi_dma_nextcookie(dmahdl, &dmacookie);
3227 3194 }
3228 3195 mi_hdl->imh_dmahandle[i] = dmahdl;
3229 3196 i++;
3230 3197 }
3231 3198
3232 3199 if (iov_attr->iov_flags & IBT_IOV_RECV)
3233 3200 wr->recv.wr_nds = nds;
3234 3201 else
3235 3202 wr->send.wr_nds = nds;
3236 3203 *mi_hdl_p = mi_hdl;
3237 3204 return (IBT_SUCCESS);
3238 3205
3239 3206 fail1:
3240 3207 ddi_dma_free_handle(&dmahdl);
3241 3208 fail2:
3242 3209 while (--i >= 0) {
3243 3210 status = ddi_dma_unbind_handle(mi_hdl->imh_dmahandle[i]);
3244 3211 ddi_dma_free_handle(&mi_hdl->imh_dmahandle[i]);
3245 3212 }
3246 3213 kmem_free(mi_hdl, sizeof (*mi_hdl) +
3247 3214 (len - 1) * sizeof (ddi_dma_handle_t));
3248 3215 *mi_hdl_p = NULL;
3249 3216 return (ibt_status);
3250 3217 }
3251 3218
3252 3219 /*
3253 3220 * tavor_ci_unmap_mem_iov()
3254 3221 * Unmap the memory
3255 3222 * Context: Can be called from interrupt or base context.
3256 3223 */
3257 3224 /* ARGSUSED */
3258 3225 static ibt_status_t
3259 3226 tavor_ci_unmap_mem_iov(ibc_hca_hdl_t hca, ibc_mi_hdl_t mi_hdl)
3260 3227 {
3261 3228 int i;
3262 3229
3263 3230 /* Check for valid HCA handle */
3264 3231 if (hca == NULL)
3265 3232 return (IBT_HCA_HDL_INVALID);
3266 3233
3267 3234 if (mi_hdl == NULL)
3268 3235 return (IBT_MI_HDL_INVALID);
3269 3236
3270 3237 for (i = 0; i < mi_hdl->imh_len; i++) {
3271 3238 (void) ddi_dma_unbind_handle(mi_hdl->imh_dmahandle[i]);
3272 3239 ddi_dma_free_handle(&mi_hdl->imh_dmahandle[i]);
3273 3240 }
3274 3241 kmem_free(mi_hdl, sizeof (*mi_hdl) +
3275 3242 (mi_hdl->imh_len - 1) * sizeof (ddi_dma_handle_t));
3276 3243 return (IBT_SUCCESS);
3277 3244 }
3278 3245
3279 3246 /* Allocate L_Key */
3280 3247 /*
3281 3248 * tavor_ci_alloc_lkey()
3282 3249 */
3283 3250 /* ARGSUSED */
3284 3251 static ibt_status_t
3285 3252 tavor_ci_alloc_lkey(ibc_hca_hdl_t hca, ibc_pd_hdl_t pd,
3286 3253 ibt_lkey_flags_t flags, uint_t phys_buf_list_sz, ibc_mr_hdl_t *mr_p,
3287 3254 ibt_pmr_desc_t *mem_desc_p)
3288 3255 {
3289 3256 TAVOR_TNF_ENTER(tavor_ci_alloc_lkey);
3290 3257 TAVOR_TNF_EXIT(tavor_ci_alloc_lkey);
3291 3258 return (IBT_NOT_SUPPORTED);
3292 3259 }
3293 3260
3294 3261 /* Physical Register Memory Region */
3295 3262 /*
3296 3263 * tavor_ci_register_physical_mr()
3297 3264 */
3298 3265 /* ARGSUSED */
3299 3266 static ibt_status_t
3300 3267 tavor_ci_register_physical_mr(ibc_hca_hdl_t hca, ibc_pd_hdl_t pd,
3301 3268 ibt_pmr_attr_t *mem_pattrs, void *ibtl_reserved, ibc_mr_hdl_t *mr_p,
3302 3269 ibt_pmr_desc_t *mem_desc_p)
3303 3270 {
3304 3271 TAVOR_TNF_ENTER(tavor_ci_register_physical_mr);
3305 3272 TAVOR_TNF_EXIT(tavor_ci_register_physical_mr);
3306 3273 return (IBT_NOT_SUPPORTED);
3307 3274 }
3308 3275
3309 3276 /*
3310 3277 * tavor_ci_reregister_physical_mr()
3311 3278 */
3312 3279 /* ARGSUSED */
3313 3280 static ibt_status_t
3314 3281 tavor_ci_reregister_physical_mr(ibc_hca_hdl_t hca, ibc_mr_hdl_t mr,
3315 3282 ibc_pd_hdl_t pd, ibt_pmr_attr_t *mem_pattrs, void *ibtl_reserved,
3316 3283 ibc_mr_hdl_t *mr_p, ibt_pmr_desc_t *mr_desc_p)
3317 3284 {
3318 3285 TAVOR_TNF_ENTER(tavor_ci_reregister_physical_mr);
3319 3286 TAVOR_TNF_EXIT(tavor_ci_reregister_physical_mr);
3320 3287 return (IBT_NOT_SUPPORTED);
3321 3288 }
3322 3289
3323 3290 /* Mellanox FMR Support */
3324 3291 /*
3325 3292 * tavor_ci_create_fmr_pool()
3326 3293 * Creates a pool of memory regions suitable for FMR registration
3327 3294 * Context: Can be called from base context only
3328 3295 */
3329 3296 /* ARGSUSED */
3330 3297 static ibt_status_t
3331 3298 tavor_ci_create_fmr_pool(ibc_hca_hdl_t hca, ibc_pd_hdl_t pd,
3332 3299 ibt_fmr_pool_attr_t *params, ibc_fmr_pool_hdl_t *fmr_pool_p)
3333 3300 {
3334 3301 return (IBT_NOT_SUPPORTED);
3335 3302 }
3336 3303
3337 3304 /*
3338 3305 * tavor_ci_destroy_fmr_pool()
3339 3306 * Free all resources associated with an FMR pool.
3340 3307 * Context: Can be called from base context only.
3341 3308 */
3342 3309 /* ARGSUSED */
3343 3310 static ibt_status_t
3344 3311 tavor_ci_destroy_fmr_pool(ibc_hca_hdl_t hca, ibc_fmr_pool_hdl_t fmr_pool)
3345 3312 {
3346 3313 return (IBT_NOT_SUPPORTED);
3347 3314 }
3348 3315
3349 3316 /*
3350 3317 * tavor_ci_flush_fmr_pool()
3351 3318 * Force a flush of the memory tables, cleaning up used FMR resources.
3352 3319 * Context: Can be called from interrupt or base context.
3353 3320 */
3354 3321 /* ARGSUSED */
3355 3322 static ibt_status_t
3356 3323 tavor_ci_flush_fmr_pool(ibc_hca_hdl_t hca, ibc_fmr_pool_hdl_t fmr_pool)
3357 3324 {
3358 3325 return (IBT_NOT_SUPPORTED);
3359 3326 }
3360 3327
3361 3328 /*
3362 3329 * tavor_ci_register_physical_fmr()
3363 3330 * From the 'pool' of FMR regions passed in, performs register physical
3364 3331 * operation.
3365 3332 * Context: Can be called from interrupt or base context.
3366 3333 */
3367 3334 /* ARGSUSED */
3368 3335 static ibt_status_t
3369 3336 tavor_ci_register_physical_fmr(ibc_hca_hdl_t hca,
3370 3337 ibc_fmr_pool_hdl_t fmr_pool, ibt_pmr_attr_t *mem_pattr,
3371 3338 void *ibtl_reserved, ibc_mr_hdl_t *mr_p, ibt_pmr_desc_t *mem_desc_p)
3372 3339 {
3373 3340 return (IBT_NOT_SUPPORTED);
3374 3341 }
3375 3342
3376 3343 /*
3377 3344 * tavor_ci_deregister_fmr()
3378 3345 * Moves an FMR (specified by 'mr') to the deregistered state.
3379 3346 * Context: Can be called from base context only.
3380 3347 */
3381 3348 /* ARGSUSED */
3382 3349 static ibt_status_t
3383 3350 tavor_ci_deregister_fmr(ibc_hca_hdl_t hca, ibc_mr_hdl_t mr)
3384 3351 {
3385 3352 return (IBT_NOT_SUPPORTED);
3386 3353 }
3387 3354
3388 3355 /*
3389 3356 * tavor_ci_alloc_io_mem()
3390 3357 * Allocate dmable memory
3391 3358 *
3392 3359 */
3393 3360 ibt_status_t
3394 3361 tavor_ci_alloc_io_mem(
3395 3362 ibc_hca_hdl_t hca,
3396 3363 size_t size,
3397 3364 ibt_mr_flags_t mr_flag,
3398 3365 caddr_t *kaddrp,
3399 3366 ibc_mem_alloc_hdl_t *mem_alloc_hdl)
3400 3367 {
3401 3368 tavor_state_t *state;
3402 3369 int status;
3403 3370
3404 3371 TAVOR_TNF_ENTER(tavor_ci_alloc_io_mem);
3405 3372
3406 3373 /* Check for valid HCA handle */
3407 3374 if (hca == NULL) {
3408 3375 TNF_PROBE_0(tavor_ci_alloc_io_mem_invhca_fail,
3409 3376 TAVOR_TNF_ERROR, "");
3410 3377 TAVOR_TNF_EXIT(tavor_ci_alloc_io_mem);
3411 3378 return (IBT_HCA_HDL_INVALID);
3412 3379 }
3413 3380
3414 3381 /* Check for valid mem_alloc_hdl handle pointer */
3415 3382 if (mem_alloc_hdl == NULL) {
3416 3383 TNF_PROBE_0(tavor_ci_alloc_io_mem_hdl_fail,
3417 3384 TAVOR_TNF_ERROR, "");
3418 3385 TAVOR_TNF_EXIT(tavor_ci_alloc_io_mem);
3419 3386 return (IBT_MEM_ALLOC_HDL_INVALID);
3420 3387 }
3421 3388
3422 3389 /* Grab the Tavor softstate pointer and mem handle */
3423 3390 state = (tavor_state_t *)hca;
3424 3391
3425 3392 /* Allocate the AH */
3426 3393 status = tavor_mem_alloc(state, size, mr_flag, kaddrp,
3427 3394 (tavor_mem_alloc_hdl_t *)mem_alloc_hdl);
3428 3395
3429 3396 if (status != DDI_SUCCESS) {
3430 3397 TNF_PROBE_1(tavor_ci_alloc_ah_fail, TAVOR_TNF_ERROR, "",
3431 3398 tnf_uint, status, status);
3432 3399 TAVOR_TNF_EXIT(tavor_ci_alloc_io_mem);
3433 3400 return (status);
3434 3401 }
3435 3402
3436 3403 TAVOR_TNF_EXIT(tavor_ci_alloc_io_mem);
3437 3404 return (IBT_SUCCESS);
3438 3405 }
3439 3406
3440 3407
3441 3408 /*
3442 3409 * tavor_ci_free_io_mem()
3443 3410 * free the memory
3444 3411 */
3445 3412 ibt_status_t
3446 3413 tavor_ci_free_io_mem(ibc_hca_hdl_t hca, ibc_mem_alloc_hdl_t mem_alloc_hdl)
3447 3414 {
3448 3415 tavor_mem_alloc_hdl_t memhdl;
3449 3416
3450 3417 TAVOR_TNF_ENTER(tavor_ci_free_io_mem);
3451 3418
3452 3419 /* Check for valid HCA handle */
3453 3420 if (hca == NULL) {
3454 3421 TNF_PROBE_0(tavor_ci_free_io_mem_invhca_fail,
3455 3422 TAVOR_TNF_ERROR, "");
3456 3423 TAVOR_TNF_EXIT(tavor_ci_free_io_mem);
3457 3424 return (IBT_HCA_HDL_INVALID);
3458 3425 }
3459 3426
3460 3427 /* Check for valid mem_alloc_hdl handle pointer */
↓ open down ↓ |
264 lines elided |
↑ open up ↑ |
3461 3428 if (mem_alloc_hdl == NULL) {
3462 3429 TNF_PROBE_0(tavor_ci_free_io_mem_hdl_fail,
3463 3430 TAVOR_TNF_ERROR, "");
3464 3431 TAVOR_TNF_EXIT(tavor_ci_free_io_mem);
3465 3432 return (IBT_MEM_ALLOC_HDL_INVALID);
3466 3433 }
3467 3434
3468 3435 memhdl = (tavor_mem_alloc_hdl_t)mem_alloc_hdl;
3469 3436
3470 3437 /* free the memory */
3471 - _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*memhdl))
3472 3438 ddi_dma_mem_free(&memhdl->tavor_acc_hdl);
3473 3439 ddi_dma_free_handle(&memhdl->tavor_dma_hdl);
3474 3440
3475 3441 kmem_free(memhdl, sizeof (*memhdl));
3476 3442 TAVOR_TNF_EXIT(tavor_dma_free);
3477 3443 return (IBT_SUCCESS);
3478 3444 }
3479 3445
3480 3446
3481 3447 int
3482 3448 tavor_mem_alloc(
3483 3449 tavor_state_t *state,
3484 3450 size_t size,
3485 3451 ibt_mr_flags_t flags,
3486 3452 caddr_t *kaddrp,
3487 3453 tavor_mem_alloc_hdl_t *mem_hdl)
3488 3454 {
3489 3455 ddi_dma_handle_t dma_hdl;
3490 3456 ddi_dma_attr_t dma_attr;
3491 3457 ddi_acc_handle_t acc_hdl;
3492 3458 size_t real_len;
3493 3459 int status;
3494 3460 int (*ddi_cb)(caddr_t);
3495 3461
3496 3462 TAVOR_TNF_ENTER(tavor_mem_alloc);
3497 3463
3498 3464 tavor_dma_attr_init(&dma_attr);
3499 3465
3500 3466 ddi_cb = (flags & IBT_MR_NOSLEEP) ? DDI_DMA_DONTWAIT : DDI_DMA_SLEEP;
3501 3467
3502 3468 /* Allocate a DMA handle */
3503 3469 status = ddi_dma_alloc_handle(state->ts_dip, &dma_attr, ddi_cb,
3504 3470 NULL, &dma_hdl);
3505 3471 if (status != DDI_SUCCESS) {
3506 3472 TNF_PROBE_0(tavor_dma_alloc_handle_fail, TAVOR_TNF_ERROR, "");
3507 3473 TAVOR_TNF_EXIT(tavor_mem_alloc);
3508 3474 return (DDI_FAILURE);
3509 3475 }
3510 3476
3511 3477 /* Allocate DMA memory */
3512 3478 status = ddi_dma_mem_alloc(dma_hdl, size,
3513 3479 &state->ts_reg_accattr, DDI_DMA_CONSISTENT, ddi_cb,
3514 3480 NULL,
3515 3481 kaddrp, &real_len, &acc_hdl);
3516 3482 if (status != DDI_SUCCESS) {
3517 3483 ddi_dma_free_handle(&dma_hdl);
3518 3484 TNF_PROBE_0(tavor_dma_alloc_memory_fail, TAVOR_TNF_ERROR, "");
3519 3485 TAVOR_TNF_EXIT(tavor_mem_alloc);
3520 3486 return (DDI_FAILURE);
3521 3487 }
3522 3488
↓ open down ↓ |
41 lines elided |
↑ open up ↑ |
3523 3489 /* Package the tavor_dma_info contents and return */
3524 3490 *mem_hdl = kmem_alloc(sizeof (**mem_hdl),
3525 3491 flags & IBT_MR_NOSLEEP ? KM_NOSLEEP : KM_SLEEP);
3526 3492 if (*mem_hdl == NULL) {
3527 3493 ddi_dma_mem_free(&acc_hdl);
3528 3494 ddi_dma_free_handle(&dma_hdl);
3529 3495 TNF_PROBE_0(tavor_dma_alloc_memory_fail, TAVOR_TNF_ERROR, "");
3530 3496 TAVOR_TNF_EXIT(tavor_mem_alloc);
3531 3497 return (DDI_FAILURE);
3532 3498 }
3533 - _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(**mem_hdl))
3534 3499 (*mem_hdl)->tavor_dma_hdl = dma_hdl;
3535 3500 (*mem_hdl)->tavor_acc_hdl = acc_hdl;
3536 3501
3537 3502 TAVOR_TNF_EXIT(tavor_mem_alloc);
3538 3503 return (DDI_SUCCESS);
3539 3504 }
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX