1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
24 */
25
26 /*
27 * sol_cma is a part of sol_ofs misc module. This file
28 * provides interfaces for supporting the communication
29 * management API defined in "rdma_cm.h". In-Kernel
30 * consumers of the "rdma_cm.h" API should link sol_ofs
31 * misc module using :
32 * -N misc/sol_ofs
33 * Solaris uCMA (sol_ucma) driver is the current consumer for
34 * sol_cma.
35 */
36
37 /* Standard driver includes */
38 #include <sys/types.h>
39 #include <sys/modctl.h>
40 #include <sys/errno.h>
41 #include <sys/stat.h>
42 #include <sys/ddi.h>
43 #include <sys/sunddi.h>
44 #include <sys/modctl.h>
45
46 #include <sys/ib/clients/of/ofed_kernel.h>
47 #include <sys/ib/clients/of/rdma/ib_addr.h>
48
49 #include <sys/ib/clients/of/sol_ofs/sol_cma.h>
50 #include <sys/ib/clients/of/sol_ofs/sol_kverb_impl.h>
51
52 /* Modload support */
53 static struct modlmisc sol_ofs_modmisc = {
54 &mod_miscops,
55 "Solaris OFS Misc module"
56 };
57
58 struct modlinkage sol_ofs_modlinkage = {
59 MODREV_1,
60 { (void *)&sol_ofs_modmisc, NULL }
61 };
62
63 static ib_client_t *sol_cma_ib_client;
64 sol_cma_glbl_listen_t sol_cma_glbl_listen;
65 avl_tree_t sol_cma_glbl_listen_tree;
66
67 static void sol_cma_add_dev(struct ib_device *);
68 static void sol_cma_rem_dev(struct ib_device *);
69
70 static llist_head_t sol_cma_dev_list = LLIST_HEAD_INIT(sol_cma_dev_list);
71 kmutex_t sol_cma_dev_mutex;
72 kmutex_t sol_cma_glob_mutex;
73
74 char *sol_rdmacm_dbg_str = "sol_rdmacm";
75 char *sol_ofs_dbg_str = "sol_ofs_mod";
76
77 /*
78 * Local functions defines.
79 */
80 int sol_cma_req_cmid_cmp(const void *p1, const void *p2);
81 int sol_cma_cmid_cmp(const void *p1, const void *p2);
82 int sol_cma_svc_cmp(const void *, const void *);
83
84 static struct rdma_cm_id *cma_alloc_chan(rdma_cm_event_handler,
85 void *, enum rdma_port_space);
86 static void cma_set_chan_state(sol_cma_chan_t *, cma_chan_state_t);
87 static int cma_cas_chan_state(sol_cma_chan_t *, cma_chan_state_t,
88 cma_chan_state_t);
89 static void cma_free_listen_list(struct rdma_cm_id *);
90 static void cma_destroy_id(struct rdma_cm_id *);
91 static void cma_handle_nomore_events(sol_cma_chan_t *);
92
93 extern void sol_ofs_dprintf_init();
94 extern void sol_ofs_dprintf_fini();
95
96 cma_chan_state_t cma_get_chan_state(sol_cma_chan_t *);
97 extern int ibcma_init_root_chan(sol_cma_chan_t *, sol_cma_glbl_listen_t *);
98 extern int ibcma_fini_root_chan(sol_cma_chan_t *);
99 extern void ibcma_copy_srv_hdl(sol_cma_chan_t *, sol_cma_glbl_listen_t *);
100 extern int ibcma_fini_ep_chan(sol_cma_chan_t *);
101 extern uint64_t ibcma_init_root_sid(sol_cma_chan_t *);
102 extern void rdma_ib_destroy_id(struct rdma_cm_id *);
103 extern int rdma_ib_bind_addr(struct rdma_cm_id *, struct sockaddr *);
104 extern int rdma_ib_resolve_addr(struct rdma_cm_id *, struct sockaddr *,
105 struct sockaddr *, int);
106 extern int rdma_ib_resolve_route(struct rdma_cm_id *, int);
107 extern int rdma_ib_init_qp_attr(struct rdma_cm_id *, struct ib_qp_attr *,
108 int *);
109 extern int rdma_ib_connect(struct rdma_cm_id *, struct rdma_conn_param *);
110 extern int rdma_ib_listen(struct rdma_cm_id *, int);
111 extern int rdma_ib_accept(struct rdma_cm_id *, struct rdma_conn_param *);
112 extern int rdma_ib_reject(struct rdma_cm_id *, const void *, uint8_t);
113 extern int rdma_ib_disconnect(struct rdma_cm_id *);
114 extern int rdma_ib_join_multicast(struct rdma_cm_id *, struct sockaddr *,
115 void *);
116 extern void rdma_ib_leave_multicast(struct rdma_cm_id *, struct sockaddr *);
117
118 int
119 _init(void)
120 {
121 int err;
122
123 sol_ofs_dprintf_init();
124 SOL_OFS_DPRINTF_L5(sol_ofs_dbg_str, "_init()");
125
126 mutex_init(&sol_cma_glob_mutex, NULL, MUTEX_DRIVER, NULL);
127 mutex_init(&sol_cma_dev_mutex, NULL, MUTEX_DRIVER, NULL);
128 avl_create(&sol_cma_glbl_listen_tree,
129 sol_cma_svc_cmp, sizeof (sol_cma_glbl_listen_t),
130 offsetof(sol_cma_glbl_listen_t, cma_listen_node));
131
132 sol_cma_ib_client = kmem_zalloc(sizeof (ib_client_t), KM_NOSLEEP);
133 if (!sol_cma_ib_client) {
134 SOL_OFS_DPRINTF_L2(sol_ofs_dbg_str,
135 "_init() - mem alloc failed");
136 avl_destroy(&sol_cma_glbl_listen_tree);
137 mutex_destroy(&sol_cma_dev_mutex);
138 mutex_destroy(&sol_cma_glob_mutex);
139 sol_ofs_dprintf_fini();
140 return (ENOMEM);
141 }
142
143 sol_cma_ib_client->name = "sol_ofs";
144 sol_cma_ib_client->add = sol_cma_add_dev;
145 sol_cma_ib_client->remove = sol_cma_rem_dev;
146 sol_cma_ib_client->dip = NULL;
147
148 if ((err = ib_register_client(sol_cma_ib_client)) != 0) {
149 SOL_OFS_DPRINTF_L2(sol_ofs_dbg_str,
150 "_init() ib_register_client() failed with err %d",
151 err);
152 kmem_free(sol_cma_ib_client, sizeof (ib_client_t));
153 avl_destroy(&sol_cma_glbl_listen_tree);
154 mutex_destroy(&sol_cma_dev_mutex);
155 mutex_destroy(&sol_cma_glob_mutex);
156 sol_ofs_dprintf_fini();
157 return (err);
158 }
159
160 if ((err = mod_install(&sol_ofs_modlinkage)) != 0) {
161 SOL_OFS_DPRINTF_L2(sol_ofs_dbg_str,
162 "_init() - mod_install() failed");
163 ib_unregister_client(sol_cma_ib_client);
164 kmem_free(sol_cma_ib_client, sizeof (ib_client_t));
165 avl_destroy(&sol_cma_glbl_listen_tree);
166 mutex_destroy(&sol_cma_dev_mutex);
167 mutex_destroy(&sol_cma_glob_mutex);
168 sol_ofs_dprintf_fini();
169 return (err);
170 }
171
172 SOL_OFS_DPRINTF_L5(sol_ofs_dbg_str, "_init() - ret");
173 return (err);
174 }
175
176 int
177 _fini(void)
178 {
179 int err;
180
181 SOL_OFS_DPRINTF_L5(sol_ofs_dbg_str, "_fini()");
182
183 if (avl_numnodes(&sol_cma_glbl_listen_tree)) {
184 SOL_OFS_DPRINTF_L2(sol_ofs_dbg_str, "_fini - "
185 "listen CMIDs still active");
186 return (EBUSY);
187 }
188 if ((err = mod_remove(&sol_ofs_modlinkage)) != 0) {
189 SOL_OFS_DPRINTF_L3(sol_ofs_dbg_str,
190 "_fini: mod_remove failed");
191 return (err);
192 }
193
194 ib_unregister_client(sol_cma_ib_client);
195 kmem_free(sol_cma_ib_client, sizeof (ib_client_t));
196 avl_destroy(&sol_cma_glbl_listen_tree);
197 mutex_destroy(&sol_cma_dev_mutex);
198 mutex_destroy(&sol_cma_glob_mutex);
199 SOL_OFS_DPRINTF_L5(sol_ofs_dbg_str, "_fini() - ret");
200 sol_ofs_dprintf_fini();
201 return (err);
202 }
203
204 int
205 _info(struct modinfo *modinfop)
206 {
207 return (mod_info(&sol_ofs_modlinkage, modinfop));
208 }
209
210 typedef struct cma_device {
211 kmutex_t cma_mutex;
212 /* Ptr in the global sol_cma_dev_list */
213 llist_head_t cma_list;
214 /* List of listeners for this device */
215 genlist_t cma_epchan_list;
216 struct ib_device *cma_device;
217 uint_t cma_ref_count;
218 enum {
219 SOL_CMA_DEV_ADDED,
220 SOL_CMA_DEV_REM_IN_PROGRESS
221 } cma_dev_state;
222 } cma_device_t;
223
224 static void
225 sol_cma_add_dev(struct ib_device *dev)
226 {
227 cma_device_t *new_device;
228
229 new_device = kmem_zalloc(sizeof (cma_device_t), KM_NOSLEEP);
230 if (!new_device) {
231 SOL_OFS_DPRINTF_L2(sol_ofs_dbg_str, "sol_cma_add_dev() "
232 "alloc failed!!");
233 return;
234 }
235 mutex_init(&new_device->cma_mutex, NULL, MUTEX_DRIVER, NULL);
236 llist_head_init(&new_device->cma_list, new_device);
237 init_genlist(&new_device->cma_epchan_list);
238 new_device->cma_device = dev;
239
240 ib_set_client_data(dev, sol_cma_ib_client, new_device);
241
242 mutex_enter(&sol_cma_dev_mutex);
243 llist_add_tail(&new_device->cma_list, &sol_cma_dev_list);
244 mutex_exit(&sol_cma_dev_mutex);
245 }
246
247 static void
248 sol_cma_rem_dev(struct ib_device *dev)
249 {
250 cma_device_t *rem_device;
251 genlist_entry_t *entry;
252
253 SOL_OFS_DPRINTF_L5(sol_ofs_dbg_str, "sol_rem_dev(%p)", dev);
254
255 rem_device = (cma_device_t *)ib_get_client_data(dev, sol_cma_ib_client);
256 if (!rem_device) {
257 SOL_OFS_DPRINTF_L2(sol_ofs_dbg_str, "sol_cma_rem_dev() "
258 "NULL cma_dev!!");
259 return;
260 }
261
262 mutex_enter(&rem_device->cma_mutex);
263 rem_device->cma_dev_state = SOL_CMA_DEV_REM_IN_PROGRESS;
264 if (rem_device->cma_ref_count) {
265 mutex_exit(&rem_device->cma_mutex);
266 SOL_OFS_DPRINTF_L3(sol_ofs_dbg_str, "sol_cma_rem_dev() "
267 "BUSY cma_dev!!");
268 return;
269 }
270 entry = remove_genlist_head(&rem_device->cma_epchan_list);
271 while (entry) {
272 sol_cma_chan_t *ep_chanp;
273
274 ep_chanp = (sol_cma_chan_t *)entry->data;
275 if (ibcma_fini_ep_chan(ep_chanp) == 0) {
276 genlist_entry_t *entry1;
277 sol_cma_chan_t *root_chanp;
278
279 ASSERT(ep_chanp->chan_listenp);
280 entry1 = ep_chanp->chan_listenp->listen_ep_root_entry;
281 root_chanp = (sol_cma_chan_t *)ep_chanp->listen_root;
282 root_chanp->chan_listenp->listen_eps--;
283 delete_genlist(&root_chanp->chan_listenp->listen_list,
284 entry1);
285
286 kmem_free(ep_chanp, sizeof (sol_cma_chan_t));
287 kmem_free(entry, sizeof (genlist_entry_t));
288 }
289
290 entry = remove_genlist_head(&rem_device->cma_epchan_list);
291 }
292 mutex_exit(&rem_device->cma_mutex);
293
294 mutex_enter(&sol_cma_dev_mutex);
295 llist_del(&rem_device->cma_list);
296 mutex_exit(&sol_cma_dev_mutex);
297
298 kmem_free(rem_device, sizeof (cma_device_t));
299 }
300
301 struct ib_device *
302 sol_cma_acquire_device(ib_guid_t hca_guid)
303 {
304 llist_head_t *entry;
305 cma_device_t *cma_devp;
306
307 mutex_enter(&sol_cma_dev_mutex);
308 list_for_each(entry, &sol_cma_dev_list) {
309 cma_devp = (cma_device_t *)entry->ptr;
310
311 if (cma_devp->cma_device->node_guid != hca_guid)
312 continue;
313
314 mutex_enter(&cma_devp->cma_mutex);
315 if (cma_devp->cma_dev_state == SOL_CMA_DEV_REM_IN_PROGRESS) {
316 SOL_OFS_DPRINTF_L3(sol_ofs_dbg_str,
317 "sol_cma_acquire_dev() - Device getting removed!!");
318 mutex_exit(&cma_devp->cma_mutex);
319 mutex_exit(&sol_cma_dev_mutex);
320 return (NULL);
321 }
322 cma_devp->cma_ref_count++;
323 mutex_exit(&cma_devp->cma_mutex);
324 mutex_exit(&sol_cma_dev_mutex);
325 return (cma_devp->cma_device);
326
327 }
328 mutex_exit(&sol_cma_dev_mutex);
329 return (NULL);
330 }
331
332 static void
333 sol_cma_release_device(struct rdma_cm_id *id)
334 {
335 ib_device_t *device = id->device;
336 llist_head_t *entry;
337 cma_device_t *cma_devp;
338
339 mutex_enter(&sol_cma_dev_mutex);
340 list_for_each(entry, &sol_cma_dev_list) {
341 cma_devp = (cma_device_t *)entry->ptr;
342
343 if (cma_devp->cma_device != device)
344 continue;
345
346 mutex_enter(&cma_devp->cma_mutex);
347 cma_devp->cma_ref_count--;
348 if (cma_devp->cma_dev_state == SOL_CMA_DEV_REM_IN_PROGRESS &&
349 cma_devp->cma_ref_count == 0) {
350 SOL_OFS_DPRINTF_L3(sol_ofs_dbg_str,
351 "sol_cma_release_dev() - Device free removed!!");
352 mutex_exit(&cma_devp->cma_mutex);
353 llist_del(&cma_devp->cma_list);
354 kmem_free(cma_devp, sizeof (cma_device_t));
355 mutex_exit(&sol_cma_dev_mutex);
356 return;
357 }
358 mutex_exit(&cma_devp->cma_mutex);
359 }
360 mutex_exit(&sol_cma_dev_mutex);
361 }
362
363 void
364 sol_cma_add_hca_list(sol_cma_chan_t *ep_chanp, ib_guid_t hca_guid)
365 {
366 llist_head_t *entry;
367 cma_device_t *cma_devp;
368
369 SOL_OFS_DPRINTF_L5(sol_rdmacm_dbg_str, "add_hca_list(%p, %llx)",
370 ep_chanp, hca_guid);
371 mutex_enter(&sol_cma_dev_mutex);
372 list_for_each(entry, &sol_cma_dev_list) {
373 cma_devp = (cma_device_t *)entry->ptr;
374
375 if ((cma_devp->cma_device)->node_guid != hca_guid)
376 continue;
377
378 mutex_enter(&cma_devp->cma_mutex);
379 ep_chanp->chan_listenp->listen_ep_dev_entry =
380 add_genlist(&cma_devp->cma_epchan_list,
381 (uintptr_t)ep_chanp, NULL);
382 ep_chanp->chan_listenp->listen_ep_device = cma_devp->cma_device;
383 mutex_exit(&cma_devp->cma_mutex);
384 mutex_exit(&sol_cma_dev_mutex);
385 return;
386 }
387 mutex_exit(&sol_cma_dev_mutex);
388 SOL_OFS_DPRINTF_L2(sol_rdmacm_dbg_str, "add_hca_list(%p, %llx): "
389 "No matching HCA in list!!", ep_chanp, hca_guid);
390 }
391
392 /*
393 * rdma_cm.h API functions.
394 */
395 struct rdma_cm_id *
396 rdma_create_id(rdma_cm_event_handler evt_hdlr, void *context,
397 enum rdma_port_space ps)
398 {
399 struct rdma_cm_id *rdma_idp;
400
401 SOL_OFS_DPRINTF_L5(sol_rdmacm_dbg_str, "rdma_create_id(%p, %p, %x)",
402 evt_hdlr, context, ps);
403
404 if (ps != RDMA_PS_TCP && ps != RDMA_PS_UDP && ps != RDMA_PS_IPOIB) {
405 SOL_OFS_DPRINTF_L2(sol_rdmacm_dbg_str,
406 "rdma_create_id: unsupported protocol %x", ps);
407 return (NULL);
408 }
409
410 rdma_idp = cma_alloc_chan(evt_hdlr, context, ps);
411 SOL_OFS_DPRINTF_L5(sol_rdmacm_dbg_str,
412 "rdma_create_id : ret %p", rdma_idp);
413
414 return (rdma_idp);
415 }
416
417 void
418 rdma_map_id2clnthdl(struct rdma_cm_id *rdma_idp, void *ib_client_hdl,
419 void *iw_client_hdl)
420 {
421 sol_cma_chan_t *chanp = (sol_cma_chan_t *)rdma_idp;
422
423 SOL_OFS_DPRINTF_L5(sol_rdmacm_dbg_str,
424 "rdma_map_id2clnthdl(%p, %p, %p)",
425 rdma_idp, ib_client_hdl, iw_client_hdl);
426 ASSERT(ib_client_hdl != NULL || iw_client_hdl != NULL);
427 chanp->chan_ib_client_hdl = ib_client_hdl;
428 chanp->chan_iw_client_hdl = iw_client_hdl;
429 }
430
431 void
432 rdma_map_id2qphdl(struct rdma_cm_id *rdma_idp, void *qp_hdl)
433 {
434 sol_cma_chan_t *chanp = (sol_cma_chan_t *)rdma_idp;
435
436 ASSERT(rdma_idp);
437 ASSERT(qp_hdl);
438 SOL_OFS_DPRINTF_L5(sol_rdmacm_dbg_str, "rdma_mapid2qphdl(%p, %p)",
439 rdma_idp, qp_hdl);
440 chanp->chan_qp_hdl = qp_hdl;
441 }
442
443
444 void
445 rdma_destroy_id(struct rdma_cm_id *rdma_idp)
446 {
447 sol_cma_chan_t *chanp, *root_chanp;
448 cma_chan_state_t state;
449 int rc, is_root_cmid, do_wait, is_passive;
450
451 SOL_OFS_DPRINTF_L5(sol_rdmacm_dbg_str, "rdma_destroy_id(%p)", rdma_idp);
452
453 if (!rdma_idp)
454 return;
455
456 is_root_cmid = do_wait = is_passive = 0;
457
458 chanp = (sol_cma_chan_t *)rdma_idp;
459 root_chanp = (sol_cma_chan_t *)chanp->listen_root;
460 SOL_OFS_DPRINTF_L5(sol_rdmacm_dbg_str, "rdma_destroy_id(%p), %p",
461 rdma_idp, root_chanp);
462
463 mutex_enter(&chanp->chan_mutex);
464 chanp->chan_cmid_destroy_state |= SOL_CMA_CALLER_CMID_DESTROYED;
465
466 /*
467 * Wait in destroy of CMID when rdma_resolve_addr() / rdma_listen()
468 * rdma_resolve_route() API is in progress.
469 */
470 while (chanp->chan_cmid_destroy_state & SOL_CMA_CALLER_API_PROGRESS)
471 cv_wait(&chanp->chan_destroy_cv, &chanp->chan_mutex);
472
473 /* Wait if Event is been notified to consumer */
474 while (chanp->chan_cmid_destroy_state & SOL_CMA_CALLER_EVENT_PROGRESS)
475 cv_wait(&chanp->chan_destroy_cv, &chanp->chan_mutex);
476
477 if (rdma_idp->device)
478 sol_cma_release_device(rdma_idp);
479
480 if (chanp->chan_listenp && chanp->chan_listenp->listen_is_root)
481 is_root_cmid = 1;
482 if (root_chanp == NULL && is_root_cmid == 0)
483 is_passive = 1;
484
485 /*
486 * Skip Active side handling for passive CMIDs and listen CMID
487 * for which REQ CMIDs have not been created.
488 */
489 if (is_passive || (is_root_cmid && chanp->chan_req_state !=
490 REQ_CMID_QUEUED)) {
491 SOL_OFS_DPRINTF_L5(sol_rdmacm_dbg_str, "rdma_destroy_id: "
492 "Skipping passive %p, %x, %x", chanp->chan_listenp,
493 is_root_cmid, chanp->chan_req_state);
494 goto skip_passive_handling;
495 }
496
497 /*
498 * destroy_id() called for listening CMID and there are REQ
499 * CMIDs not yet notified. Reject such CMIDs and decrement
500 * the count.
501 */
502 if (is_root_cmid && chanp->chan_req_cnt) {
503 sol_cma_chan_t *req_cmid_chan, *next_chan;
504
505 SOL_OFS_DPRINTF_L5(sol_rdmacm_dbg_str, "rdma_destroy_id: "
506 "not notified handling");
507 for (req_cmid_chan = (sol_cma_chan_t *)avl_first(
508 &chanp->chan_req_avl_tree); req_cmid_chan &&
509 chanp->chan_req_cnt; req_cmid_chan = next_chan) {
510 next_chan = AVL_NEXT(
511 &chanp->chan_req_avl_tree, req_cmid_chan);
512 if (req_cmid_chan->chan_req_state ==
513 REQ_CMID_NOTIFIED) {
514 avl_remove(&chanp->chan_req_avl_tree,
515 req_cmid_chan);
516 chanp->chan_req_cnt--;
517 chanp->chan_req_total_cnt--;
518 mutex_exit(&chanp->chan_mutex);
519 mutex_enter(&req_cmid_chan->chan_mutex);
520 req_cmid_chan->chan_req_state =
521 REQ_CMID_SERVER_NONE;
522 if (rdma_idp->ps == RDMA_PS_TCP)
523 cma_set_chan_state(req_cmid_chan,
524 SOL_CMA_CHAN_DESTROY_PENDING);
525 mutex_exit(&req_cmid_chan->chan_mutex);
526 (void) rdma_disconnect(
527 (struct rdma_cm_id *)req_cmid_chan);
528 mutex_enter(&chanp->chan_mutex);
529 if (rdma_idp->ps == RDMA_PS_TCP) {
530 mutex_enter(
531 &req_cmid_chan->chan_mutex);
532 req_cmid_chan->listen_root =
533 rdma_idp;
534 mutex_exit(
535 &req_cmid_chan->chan_mutex);
536 } else {
537 mutex_destroy(
538 &req_cmid_chan->chan_mutex);
539 cv_destroy(
540 &req_cmid_chan->chan_destroy_cv);
541 kmem_free(req_cmid_chan,
542 sizeof (sol_cma_chan_t));
543 }
544 }
545 }
546 }
547
548 /*
549 * destroy_id() called for :
550 * listening CMID and all REQ CMIDs destroy_id() called
551 * REQ CMID and 1 more REQ CMID not yet destroyed.
552 * wait till the CMID is completly destroyed.
553 */
554 if (is_root_cmid && chanp->chan_req_total_cnt == 0) {
555 SOL_OFS_DPRINTF_L5(sol_rdmacm_dbg_str, "rdma_destroy_id: "
556 "root idp waiting");
557 cma_set_chan_state(chanp, SOL_CMA_CHAN_DESTROY_WAIT);
558 cv_wait(&chanp->chan_destroy_cv, &chanp->chan_mutex);
559 }
560 mutex_exit(&chanp->chan_mutex);
561
562 if (root_chanp)
563 mutex_enter(&root_chanp->chan_mutex);
564 mutex_enter(&chanp->chan_mutex);
565 #ifdef DEBUG
566 SOL_OFS_DPRINTF_L5(sol_rdmacm_dbg_str, "rdma_destroy_id: "
567 "root_idp %p, cnt %x, state %x", root_chanp,
568 root_chanp ? root_chanp->chan_req_total_cnt : 0,
569 root_chanp ? cma_get_chan_state(root_chanp) : 0);
570 #endif
571
572 if (root_chanp && root_chanp->chan_req_total_cnt == 1 &&
573 cma_get_chan_state(root_chanp) == SOL_CMA_CHAN_DESTROY_PENDING)
574 do_wait = 1;
575 if (root_chanp)
576 mutex_exit(&root_chanp->chan_mutex);
577
578 skip_passive_handling :
579 state = cma_get_chan_state(chanp);
580 if (is_root_cmid == 0 && state != SOL_CMA_CHAN_DISCONNECT &&
581 SOL_CMAID_CONNECTED(chanp)) {
582 /*
583 * A connected CM ID has not been disconnected.
584 * Call rdma_disconnect() to disconnect it.
585 */
586 mutex_exit(&chanp->chan_mutex);
587 rc = rdma_disconnect(rdma_idp);
588 if (rc) {
589 SOL_OFS_DPRINTF_L2(sol_rdmacm_dbg_str,
590 "rdma_destroy_id(%p)- disconnect failed!!",
591 rdma_idp);
592 return;
593 }
594 mutex_enter(&chanp->chan_mutex);
595 if (root_chanp && chanp->listen_root == NULL)
596 chanp->listen_root = (struct rdma_cm_id *)root_chanp;
597 SOL_OFS_DPRINTF_L5(sol_rdmacm_dbg_str,
598 "rdma_destroy_id(chanp %p, connect %x, ps %x)",
599 chanp, chanp->chan_connect_flag, rdma_idp->ps);
600 if (SOL_CMAID_CONNECTED(chanp)) {
601 if (do_wait) {
602 cma_set_chan_state(chanp,
603 SOL_CMA_CHAN_DESTROY_WAIT);
604 cv_wait(&chanp->chan_destroy_cv,
605 &chanp->chan_mutex);
606 mutex_exit(&chanp->chan_mutex);
607 cma_destroy_id(rdma_idp);
608 } else {
609 cma_set_chan_state(chanp,
610 SOL_CMA_CHAN_DESTROY_PENDING);
611 mutex_exit(&chanp->chan_mutex);
612 }
613 } else {
614 /*
615 * No more callbacks are expected for this CMID.
616 * Free this CMID.
617 */
618 mutex_exit(&chanp->chan_mutex);
619 cma_destroy_id(rdma_idp);
620 }
621 } else if (is_root_cmid == 0 && state ==
622 SOL_CMA_CHAN_DISCONNECT && SOL_CMAID_CONNECTED(chanp)) {
623 /*
624 * CM ID was connected and disconnect is process.
625 * Free of this CM ID is done for the DISCONNECT
626 * notification for this CMID.
627 */
628 cma_set_chan_state(chanp, SOL_CMA_CHAN_DESTROY_PENDING);
629 mutex_exit(&chanp->chan_mutex);
630 } else if (state != SOL_CMA_CHAN_DESTROY_PENDING) {
631 /* CM ID, not connected, just free it. */
632 mutex_exit(&chanp->chan_mutex);
633 cma_destroy_id(rdma_idp);
634 } else
635 mutex_exit(&chanp->chan_mutex);
636
637 SOL_OFS_DPRINTF_L5(sol_rdmacm_dbg_str, "rdma_destroy_id: ret");
638 }
639
640 /*
641 * State transitions for Address resolution :
642 * Active Side (Client) :
643 * 1. CREATE_ID-->BIND_ADDR-->RESOLVE_ADDR-->RESOLVE_ROUTE
644 *
645 * Passive Side (Server) :
646 * 2. CREATE_ID-->RESOLVE_ADDR-->RESOLVE_ROUTE
647 * IF_ADDR_ANY can be passed as local address in RESOLVE_ADDR
648 */
649 int
650 rdma_bind_addr(struct rdma_cm_id *idp, struct sockaddr *addr)
651 {
652 sol_cma_chan_t *chanp;
653 struct rdma_addr *addrp;
654 int ret;
655
656 ASSERT(idp);
657 ASSERT(addr);
658 chanp = (sol_cma_chan_t *)idp;
659 addrp = &(idp->route.addr);
660 SOL_OFS_DPRINTF_L5(sol_rdmacm_dbg_str, "rdma_bind_addr(%p, %p)",
661 idp, addr);
662
663 mutex_enter(&chanp->chan_mutex);
664 ret = cma_cas_chan_state(chanp, SOL_CMA_CHAN_IDLE, SOL_CMA_CHAN_BOUND);
665 if (ret) {
666 mutex_exit(&chanp->chan_mutex);
667 return (ret);
668 }
669 /* Copy the local address to rdma_id structure */
670 bcopy((void *)addr, (void *)&(addrp->src_addr),
671 sizeof (struct sockaddr));
672 mutex_exit(&chanp->chan_mutex);
673
674 /*
675 * First call rdma_ib_bind_addr() to bind this address.
676 * Next call rdma_iw_bind_addr() to bind this address.
677 * For IF_ADDR_ANY, IB address is given priority over
678 * iWARP.
679 */
680 if (chanp->chan_ib_client_hdl == NULL) {
681 ofs_client_t *ofs_clnt;
682
683 ofs_clnt = (ofs_client_t *)sol_cma_ib_client->clnt_hdl;
684 chanp->chan_ib_client_hdl = ofs_clnt->ibt_hdl;
685 }
686 if (chanp->chan_ib_client_hdl && rdma_ib_bind_addr(idp, addr) == 0) {
687 SOL_OFS_DPRINTF_L5(sol_rdmacm_dbg_str,
688 "rdma_bind_addr: ret IB @");
689 return (0);
690 #ifdef IWARP_SUPPORT
691 } else if (chanp->chan_iw_client_hdl && rdma_iw_bind_addr(idp, addr)
692 == 0) {
693 SOL_OFS_DPRINTF_L5(sol_rdmacm_dbg_str,
694 "rdma_bind_addr: ret iWARP @");
695 return (0);
696 #endif /* IWARP_SUPPORT */
697 }
698
699 mutex_enter(&chanp->chan_mutex);
700 cma_set_chan_state(chanp, SOL_CMA_CHAN_IDLE);
701 mutex_exit(&chanp->chan_mutex);
702 SOL_OFS_DPRINTF_L4(sol_rdmacm_dbg_str, "rdma_bind_addr: ret failure!");
703 return (EINVAL);
704 }
705
706 int
707 rdma_resolve_addr(struct rdma_cm_id *idp, struct sockaddr *src_addr,
708 struct sockaddr *dst_addr, int timeout_ms)
709 {
710 sol_cma_chan_t *chanp;
711 struct rdma_addr *addrp;
712 cma_chan_state_t state;
713
714 ASSERT(idp);
715 chanp = (sol_cma_chan_t *)idp;
716 addrp = &(idp->route.addr);
717 SOL_OFS_DPRINTF_L5(sol_rdmacm_dbg_str, "rdma_resolve_addr(%p, %p, "
718 "%p, %x)", idp, src_addr, dst_addr, timeout_ms);
719
720 mutex_enter(&chanp->chan_mutex);
721 state = cma_get_chan_state(chanp);
722 if (state != SOL_CMA_CHAN_IDLE && state != SOL_CMA_CHAN_BOUND) {
723 SOL_OFS_DPRINTF_L2(sol_rdmacm_dbg_str,
724 "rdma_resolve_addr : invalid chan state %x", state);
725 mutex_exit(&chanp->chan_mutex);
726 return (EINVAL);
727 }
728 if (chanp->chan_cmid_destroy_state &
729 SOL_CMA_CALLER_CMID_DESTROYED) {
730 SOL_OFS_DPRINTF_L3(sol_rdmacm_dbg_str,
731 "rdma_resolve_addr : CMID %p, destroy called", chanp);
732 mutex_exit(&chanp->chan_mutex);
733 return (EINVAL);
734 }
735 chanp->chan_cmid_destroy_state |= SOL_CMA_CALLER_API_PROGRESS;
736
737 if (chanp->chan_xport_type == SOL_CMA_XPORT_NONE) {
738 bcopy((void *)src_addr, (void *)&(addrp->src_addr),
739 sizeof (struct sockaddr));
740 }
741 bcopy((void *)dst_addr, (void *)&(addrp->dst_addr),
742 sizeof (struct sockaddr));
743 mutex_exit(&chanp->chan_mutex);
744
745 /*
746 * First resolve this as an @ corresponding to IB fabric
747 * if this fails, resolve this as an @ corresponding to iWARP
748 */
749 if (chanp->chan_ib_client_hdl == NULL) {
750 ofs_client_t *ofs_clnt;
751
752 ofs_clnt = (ofs_client_t *)sol_cma_ib_client->clnt_hdl;
753 chanp->chan_ib_client_hdl = ofs_clnt->ibt_hdl;
754 }
755 if (chanp->chan_ib_client_hdl && rdma_ib_resolve_addr(idp, src_addr,
756 dst_addr, timeout_ms) == 0) {
757 SOL_OFS_DPRINTF_L4(sol_rdmacm_dbg_str,
758 "rdma_resolve_addr: ret IB @");
759 #ifdef IWARP_SUPPORT
760 } else if (chanp->chan_iw_client_hdl && rdma_iw_resolve_addr(idp,
761 src_addr, dst_addr, timeout_ms) == 0) {
762 SOL_OFS_DPRINTF_L2(sol_rdmacm_dbg_str,
763 "rdma_resolve_addr: ret iWARP @");
764 #endif /* IWARP_SUPPORT */
765 } else {
766 SOL_OFS_DPRINTF_L2(sol_rdmacm_dbg_str,
767 "rdma_resolve_addr: Invalid @");
768 return (EINVAL);
769 }
770 SOL_OFS_DPRINTF_L5(sol_rdmacm_dbg_str, "rdma_resolve_addr: ret 0");
771 return (0);
772 }
773
774 static void cma_generate_event_sync(struct rdma_cm_id *,
775 enum rdma_cm_event_type, int, struct rdma_conn_param *,
776 struct rdma_ud_param *);
777
778 void
779 cma_resolve_addr_callback(sol_cma_chan_t *chanp, int rc)
780 {
781 enum rdma_cm_event_type event;
782
783 mutex_enter(&chanp->chan_mutex);
784 if (chanp->chan_cmid_destroy_state &
785 SOL_CMA_CALLER_CMID_DESTROYED) {
786 SOL_OFS_DPRINTF_L3(sol_rdmacm_dbg_str,
787 "cma_resolve_addr : CMID %p, destroy called", chanp);
788 chanp->chan_cmid_destroy_state &=
789 ~SOL_CMA_CALLER_API_PROGRESS;
790 cv_broadcast(&chanp->chan_destroy_cv);
791 mutex_exit(&chanp->chan_mutex);
792 return;
793 }
794 if (rc == 0) {
795 cma_set_chan_state(chanp, SOL_CMA_CHAN_ADDR_RESLVD);
796 event = RDMA_CM_EVENT_ADDR_RESOLVED;
797 } else
798 event = RDMA_CM_EVENT_ADDR_ERROR;
799
800 /*
801 * Generate RDMA_CM_EVENT_ADDR_RESOLVED event
802 * This will result in RDMA_USER_CM_CMD_RESOLVE_ROUTE in
803 * userland.
804 */
805 chanp->chan_cmid_destroy_state |= SOL_CMA_CALLER_EVENT_PROGRESS;
806 mutex_exit(&chanp->chan_mutex);
807 cma_generate_event_sync((struct rdma_cm_id *)chanp, event, 0,
808 NULL, NULL);
809
810 mutex_enter(&chanp->chan_mutex);
811 chanp->chan_cmid_destroy_state &= ~SOL_CMA_CALLER_API_PROGRESS;
812 if (chanp->chan_cmid_destroy_state & SOL_CMA_CALLER_CMID_DESTROYED)
813 cv_broadcast(&chanp->chan_destroy_cv);
814 mutex_exit(&chanp->chan_mutex);
815 }
816
817 int
818 rdma_resolve_route(struct rdma_cm_id *idp, int timeout_ms)
819 {
820 sol_cma_chan_t *chanp;
821
822 ASSERT(idp);
823 chanp = (sol_cma_chan_t *)idp;
824 SOL_OFS_DPRINTF_L5(sol_rdmacm_dbg_str, "resolve_route(%p, %x)", idp,
825 timeout_ms);
826
827 mutex_enter(&chanp->chan_mutex);
828 if (cma_cas_chan_state(chanp, SOL_CMA_CHAN_ADDR_RESLVD,
829 SOL_CMA_CHAN_ROUTE_RESLVD) != 0) {
830 mutex_exit(&chanp->chan_mutex);
831 SOL_OFS_DPRINTF_L2(sol_rdmacm_dbg_str,
832 "resolve_route: Invalid state");
833 return (EINVAL);
834 }
835 if (chanp->chan_cmid_destroy_state &
836 SOL_CMA_CALLER_CMID_DESTROYED) {
837 SOL_OFS_DPRINTF_L3(sol_rdmacm_dbg_str,
838 "rdma_resolve_route : CMID %p, destroy called", chanp);
839 mutex_exit(&chanp->chan_mutex);
840 return (EINVAL);
841 }
842 chanp->chan_cmid_destroy_state |= SOL_CMA_CALLER_API_PROGRESS;
843 mutex_exit(&chanp->chan_mutex);
844
845 /*
846 * Generate RDMA_CM_EVENT_ROUTE_RESOLVED event
847 * This will result in RDMA_USER_CM_CMD_RESOLVE_ROUTE in
848 * userland
849 */
850 cma_generate_event(idp, RDMA_CM_EVENT_ROUTE_RESOLVED, 0,
851 NULL, NULL);
852
853 mutex_enter(&chanp->chan_mutex);
854 chanp->chan_cmid_destroy_state &= ~SOL_CMA_CALLER_API_PROGRESS;
855 if (chanp->chan_cmid_destroy_state & SOL_CMA_CALLER_CMID_DESTROYED)
856 cv_broadcast(&chanp->chan_destroy_cv);
857 mutex_exit(&chanp->chan_mutex);
858
859 SOL_OFS_DPRINTF_L5(sol_rdmacm_dbg_str, "resolve_route: ret 0");
860 return (0);
861 }
862
863 /*
864 * Connect or Listen request should be send after Route is resolved
865 *
866 * Active Side (Client) :
867 * 1. (State ROUTE_RESOLVED)-->CONNECT-->ACCEPT/REJECT-->DISCONNECT
868 * -->DESTROY_ID-->close(9E)
869 * 2. Same as (1), DESTROY_ID without DISCONNECT
870 * 3. Same as (1), close(9e) without DESTROY_ID.
871 *
872 * Passive Side (Server) :
873 * 4. (State ROUTE_RESOLVED)-->LISTEN->DISCONNECT
874 * -->DESTROY_ID-->close(9E)
875 * 5. Same as (4), DESTROY_ID without DISCONNECT
876 * 6. Same as (4), close(9e) without DESTROY_ID.
877 */
878 int
879 rdma_connect(struct rdma_cm_id *idp, struct rdma_conn_param *conn_param)
880 {
881 sol_cma_chan_t *chanp;
882 int ret = EINVAL;
883
884 ASSERT(idp);
885 chanp = (sol_cma_chan_t *)idp;
886 SOL_OFS_DPRINTF_L5(sol_rdmacm_dbg_str, "rdma_connect(%p, %p)", idp,
887 conn_param);
888
889 mutex_enter(&chanp->chan_mutex);
890 if (chanp->chan_xport_type == SOL_CMA_XPORT_NONE) {
891 mutex_exit(&chanp->chan_mutex);
892 SOL_OFS_DPRINTF_L2(sol_rdmacm_dbg_str,
893 "rdma_connect, Invalid Xport");
894 return (EINVAL);
895 }
896 if (cma_cas_chan_state(chanp, SOL_CMA_CHAN_ROUTE_RESLVD,
897 SOL_CMA_CHAN_CONNECT)) {
898 mutex_exit(&chanp->chan_mutex);
899 SOL_OFS_DPRINTF_L2(sol_rdmacm_dbg_str,
900 "rdma_connect, Invalid state");
901 return (EINVAL);
902 }
903
904 if (chanp->chan_xport_type == SOL_CMA_XPORT_IB) {
905 ret = rdma_ib_connect(idp, conn_param);
906 #ifdef IWARP_SUPPORT
907 } else if (chanp->chan_xport_type == SOL_CMA_XPORT_IWARP) {
908 ret = rdma_iw_connect(idp, conn_param);
909 #endif /* IWARP_SUPPORT */
910 }
911 mutex_exit(&chanp->chan_mutex);
912
913 SOL_OFS_DPRINTF_L5(sol_rdmacm_dbg_str, "rdma_connect: ret %x", ret);
914 return (ret);
915 }
916
917 static int cma_init_listen_root(sol_cma_chan_t *);
918 static void cma_fini_listen_root(sol_cma_chan_t *);
919
920 int
921 rdma_listen(struct rdma_cm_id *idp, int bklog)
922 {
923 sol_cma_chan_t *chanp;
924 int ret = 0;
925 genlist_entry_t *entry;
926 cma_chan_state_t state;
927
928 ASSERT(idp);
929 chanp = (sol_cma_chan_t *)idp;
930 SOL_OFS_DPRINTF_L5(sol_rdmacm_dbg_str, "rdma_listen(%p, %x)",
931 idp, bklog);
932
933 mutex_enter(&chanp->chan_mutex);
934 state = cma_get_chan_state(chanp);
935 if (state == SOL_CMA_CHAN_IDLE) {
936 mutex_exit(&chanp->chan_mutex);
937 return (EINVAL);
938 }
939 cma_set_chan_state(chanp, SOL_CMA_CHAN_LISTEN);
940
941 if (chanp->chan_cmid_destroy_state &
942 SOL_CMA_CALLER_CMID_DESTROYED) {
943 SOL_OFS_DPRINTF_L3(sol_rdmacm_dbg_str,
944 "rdma_listen : CMID %p, destroy called", chanp);
945 mutex_exit(&chanp->chan_mutex);
946 return (EINVAL);
947 }
948 chanp->chan_cmid_destroy_state |= SOL_CMA_CALLER_API_PROGRESS;
949
950 ASSERT(chanp->chan_listenp == NULL);
951
952 chanp->chan_listenp = kmem_zalloc(sizeof (sol_cma_listen_info_t),
953 KM_SLEEP);
954 init_genlist(&(CHAN_LISTEN_LIST(chanp)));
955 (chanp->chan_listenp)->listen_is_root = 1;
956 ret = cma_init_listen_root(chanp);
957 if (ret) {
958 chanp->chan_listenp = NULL;
959 mutex_exit(&chanp->chan_mutex);
960 SOL_OFS_DPRINTF_L2(sol_rdmacm_dbg_str, "rdma_listen: "
961 "cma_init_listen_root: failed");
962 kmem_free(chanp->chan_listenp,
963 sizeof (sol_cma_listen_info_t));
964 return (EINVAL);
965 }
966
967 if (chanp->chan_xport_type == SOL_CMA_XPORT_NONE) {
968 ibcma_append_listen_list(idp);
969 #ifdef IWARP_SUPPORT
970 iwcma_append_listen_list(idp);
971 #endif
972 } else if (chanp->chan_xport_type == SOL_CMA_XPORT_IB) {
973 ibcma_append_listen_list(idp);
974 #ifdef IWARP_SUPPORT
975 } else if (chanp->chan_xport_type == SOL_CMA_XPORT_IWARP) {
976 iwcma_append_listen_list(idp);
977 #endif /* IWARP_SUPPORT */
978 }
979
980 if (genlist_empty(&(CHAN_LISTEN_LIST(chanp)))) {
981 cma_fini_listen_root(chanp);
982 kmem_free((void *)chanp->chan_listenp,
983 sizeof (sol_cma_listen_info_t));
984 chanp->chan_listenp = NULL;
985 SOL_OFS_DPRINTF_L2(sol_rdmacm_dbg_str, "rdma_listen: "
986 "No listeners");
987 mutex_exit(&chanp->chan_mutex);
988 return (0);
989 }
990
991 if (chanp->chan_cmid_destroy_state & SOL_CMA_CALLER_CMID_DESTROYED) {
992 chanp->chan_cmid_destroy_state &=
993 ~SOL_CMA_CALLER_API_PROGRESS;
994 cv_broadcast(&chanp->chan_destroy_cv);
995 }
996
997 genlist_for_each(entry, &(CHAN_LISTEN_LIST(chanp))) {
998 struct rdma_cm_id *ep_idp;
999 sol_cma_chan_t *ep_chanp;
1000
1001 ep_idp = (struct rdma_cm_id *)entry->data;
1002 ep_chanp = (sol_cma_chan_t *)ep_idp;
1003 if (ep_chanp->chan_xport_type == SOL_CMA_XPORT_IB)
1004 ret = rdma_ib_listen(ep_idp, bklog);
1005 #ifdef IWARP_SUPPORT
1006 if (ep_chanp->chan_xport_type == SOL_CMA_XPORT_IWARP)
1007 ret = rdma_iw_listen(ep_idp, bklog);
1008 #endif
1009 if (ret)
1010 break;
1011 }
1012
1013 chanp->chan_cmid_destroy_state &= ~SOL_CMA_CALLER_API_PROGRESS;
1014 if (chanp->chan_cmid_destroy_state & SOL_CMA_CALLER_CMID_DESTROYED)
1015 cv_broadcast(&chanp->chan_destroy_cv);
1016 mutex_exit(&chanp->chan_mutex);
1017
1018 SOL_OFS_DPRINTF_L5(sol_rdmacm_dbg_str, "rdma_listen: ret %x", ret);
1019 return (ret);
1020 }
1021
1022 int
1023 rdma_accept(struct rdma_cm_id *idp, struct rdma_conn_param *conn_param)
1024 {
1025 struct rdma_cm_id *root_idp;
1026 sol_cma_chan_t *root_chanp, *chanp;
1027 int ret = EINVAL;
1028
1029 ASSERT(idp);
1030 chanp = (sol_cma_chan_t *)idp;
1031 SOL_OFS_DPRINTF_L5(sol_rdmacm_dbg_str, "rdma_accept(%p, %p)",
1032 idp, conn_param);
1033
1034 mutex_enter(&chanp->chan_mutex);
1035 if (cma_cas_chan_state(chanp, SOL_CMA_CHAN_LISTEN,
1036 SOL_CMA_CHAN_ACCEPT) && cma_cas_chan_state(chanp,
1037 SOL_CMA_CHAN_CONNECT, SOL_CMA_CHAN_ACCEPT)) {
1038 SOL_OFS_DPRINTF_L2(sol_rdmacm_dbg_str,
1039 "rdma_accept, Invalid state");
1040 mutex_exit(&chanp->chan_mutex);
1041 return (EINVAL);
1042 }
1043 mutex_exit(&chanp->chan_mutex);
1044
1045 root_idp = CHAN_LISTEN_ROOT(chanp);
1046 root_chanp = (sol_cma_chan_t *)root_idp;
1047 SOL_OFS_DPRINTF_L5(sol_rdmacm_dbg_str, "accept: root_idp %p",
1048 root_idp);
1049
1050 /* For TCP, delete from REQ AVL & insert to ACPT AVL */
1051 if (root_idp && root_idp->ps == RDMA_PS_TCP) {
1052 void *find_ret;
1053 avl_index_t where;
1054
1055 SOL_OFS_DPRINTF_L5(sol_rdmacm_dbg_str, "accept: root_idp %p"
1056 "REQ AVL remove %p", root_chanp, idp);
1057 mutex_enter(&root_chanp->chan_mutex);
1058 mutex_enter(&chanp->chan_mutex);
1059
1060 /*
1061 * This CMID has been deleted, maybe because of timeout.
1062 * Return EINVAL.
1063 */
1064 if (chanp->chan_req_state != REQ_CMID_NOTIFIED) {
1065 mutex_exit(&chanp->chan_mutex);
1066 mutex_exit(&root_chanp->chan_mutex);
1067 SOL_OFS_DPRINTF_L3(sol_rdmacm_dbg_str,
1068 "accept: root_idp %p chanp %p, not in REQ "
1069 "AVL tree", root_chanp, chanp);
1070 return (EINVAL);
1071 }
1072 ASSERT(cma_get_req_idp(root_idp, chanp->chan_session_id));
1073 avl_remove(&root_chanp->chan_req_avl_tree, idp);
1074
1075
1076 SOL_OFS_DPRINTF_L5(sol_rdmacm_dbg_str,
1077 "Add to ACPT AVL of %p IDP, idp %p, qp_hdl %p",
1078 root_idp, idp, chanp->chan_qp_hdl);
1079 find_ret = avl_find(&root_chanp->chan_acpt_avl_tree,
1080 (void *)chanp->chan_qp_hdl, &where);
1081 if (find_ret) {
1082 chanp->chan_req_state = REQ_CMID_SERVER_NONE;
1083 mutex_exit(&chanp->chan_mutex);
1084 mutex_exit(&root_chanp->chan_mutex);
1085 SOL_OFS_DPRINTF_L2(sol_rdmacm_dbg_str,
1086 "DUPLICATE ENTRY in ACPT AVL : root %p, "
1087 "idp %p, qp_hdl %p",
1088 root_idp, idp, chanp->chan_qp_hdl);
1089 return (EINVAL);
1090 }
1091 avl_insert(&root_chanp->chan_acpt_avl_tree,
1092 (void *)idp, where);
1093 chanp->chan_req_state = REQ_CMID_ACCEPTED;
1094 mutex_exit(&chanp->chan_mutex);
1095 mutex_exit(&root_chanp->chan_mutex);
1096 }
1097
1098 if (root_idp && IS_UDP_CMID(root_idp)) {
1099 cma_chan_state_t chan_state;
1100
1101 /*
1102 * Accepting the connect request, no more events for this
1103 * connection.
1104 */
1105 cma_handle_nomore_events(chanp);
1106 mutex_enter(&chanp->chan_mutex);
1107 chan_state = cma_get_chan_state(chanp);
1108 mutex_exit(&chanp->chan_mutex);
1109 /* If rdma_destroy_id() was called, destroy CMID */
1110 if (chan_state == SOL_CMA_CHAN_DESTROY_PENDING) {
1111 cma_destroy_id((struct rdma_cm_id *)chanp);
1112 return (EINVAL);
1113 }
1114 }
1115
1116 if (chanp->chan_xport_type == SOL_CMA_XPORT_IB)
1117 ret = rdma_ib_accept(idp, conn_param);
1118 #ifdef IWARP_SUPPORT
1119 if (chanp->chan_xport_type == SOL_CMA_XPORT_IWARP)
1120 ret = rdma_iw_accept(idp, conn_param);
1121 #endif /* IWARP_SUPPORT */
1122
1123 if (ret && root_idp && idp->ps == RDMA_PS_TCP) {
1124 void *find_ret;
1125 avl_index_t where;
1126
1127 SOL_OFS_DPRINTF_L5(sol_rdmacm_dbg_str,
1128 "Delete from REQ AVL of %p IDP, idp %p",
1129 root_idp, idp);
1130 mutex_enter(&root_chanp->chan_mutex);
1131 mutex_enter(&chanp->chan_mutex);
1132 if (chanp->chan_req_state == REQ_CMID_ACCEPTED) {
1133 ASSERT(cma_get_acpt_idp(root_idp,
1134 chanp->chan_qp_hdl));
1135 avl_remove(&root_chanp->chan_acpt_avl_tree,
1136 idp);
1137 find_ret = avl_find(&root_chanp->chan_req_avl_tree,
1138 (void *)chanp->chan_qp_hdl, &where);
1139 if (find_ret) {
1140 chanp->chan_req_state = REQ_CMID_SERVER_NONE;
1141 mutex_exit(&chanp->chan_mutex);
1142 mutex_exit(&root_chanp->chan_mutex);
1143 SOL_OFS_DPRINTF_L2(sol_rdmacm_dbg_str,
1144 "DUPLICATE ENTRY in REQ AVL : root %p, "
1145 "idp %p, session_id %p",
1146 root_idp, idp, chanp->chan_session_id);
1147 return (EINVAL);
1148 }
1149 avl_insert(&root_chanp->chan_req_avl_tree, idp, where);
1150 chanp->chan_req_state = REQ_CMID_NOTIFIED;
1151 }
1152 mutex_exit(&chanp->chan_mutex);
1153 mutex_exit(&root_chanp->chan_mutex);
1154 }
1155
1156 SOL_OFS_DPRINTF_L5(sol_rdmacm_dbg_str, "rdma_accept: ret %x", ret);
1157 return (ret);
1158 }
1159
1160 int
1161 rdma_notify(struct rdma_cm_id *idp, enum ib_event_type evt)
1162 {
1163 sol_cma_chan_t *chanp;
1164
1165 ASSERT(idp);
1166 chanp = (sol_cma_chan_t *)idp;
1167 SOL_OFS_DPRINTF_L5(sol_rdmacm_dbg_str, "rdma_notify(%p, %x)", idp, evt);
1168
1169 mutex_enter(&chanp->chan_mutex);
1170 if (cma_cas_chan_state(chanp, SOL_CMA_CHAN_ROUTE_RESLVD,
1171 SOL_CMA_CHAN_EVENT_NOTIFIED)) {
1172 SOL_OFS_DPRINTF_L2(sol_rdmacm_dbg_str,
1173 "rdma_notify, Invalid state");
1174 mutex_exit(&chanp->chan_mutex);
1175 return (EINVAL);
1176 }
1177 mutex_exit(&chanp->chan_mutex);
1178
1179 SOL_OFS_DPRINTF_L5(sol_rdmacm_dbg_str, "rdma_notify: ret 0");
1180 return (0);
1181 }
1182
1183 int
1184 rdma_reject(struct rdma_cm_id *idp, const void *priv_data,
1185 uint8_t priv_data_len)
1186 {
1187 struct rdma_cm_id *root_idp;
1188 sol_cma_chan_t *root_chanp, *chanp;
1189 int ret = EINVAL;
1190
1191 ASSERT(idp);
1192 chanp = (sol_cma_chan_t *)idp;
1193 root_idp = CHAN_LISTEN_ROOT(chanp);
1194 root_chanp = (sol_cma_chan_t *)root_idp;
1195 SOL_OFS_DPRINTF_L5(sol_rdmacm_dbg_str, "rdma_reject(%p, %p)", idp,
1196 priv_data, priv_data_len);
1197
1198 mutex_enter(&chanp->chan_mutex);
1199 if (cma_cas_chan_state(chanp, SOL_CMA_CHAN_LISTEN,
1200 SOL_CMA_CHAN_REJECT)) {
1201 SOL_OFS_DPRINTF_L2(sol_rdmacm_dbg_str,
1202 "rdma_accept, Invalid state");
1203 mutex_exit(&chanp->chan_mutex);
1204 return (EINVAL);
1205 }
1206 mutex_exit(&chanp->chan_mutex);
1207
1208 if (root_idp) {
1209 SOL_OFS_DPRINTF_L5(sol_rdmacm_dbg_str, "reject: root_idp %p"
1210 "REQ AVL remove %p", root_chanp, idp);
1211
1212 /*
1213 * Remove from REQ AVL tree. If this CMID has been deleted,
1214 * it maybe because of timeout. Return EINVAL.
1215 */
1216 mutex_enter(&root_chanp->chan_mutex);
1217 mutex_enter(&chanp->chan_mutex);
1218 if (chanp->chan_req_state != REQ_CMID_NOTIFIED &&
1219 chanp->chan_req_state != REQ_CMID_QUEUED) {
1220 mutex_exit(&chanp->chan_mutex);
1221 mutex_exit(&root_chanp->chan_mutex);
1222 SOL_OFS_DPRINTF_L3(sol_rdmacm_dbg_str,
1223 "reject: root_idp %p chanp %p, not in REQ "
1224 "AVL tree", root_chanp, chanp);
1225 return (EINVAL);
1226 }
1227 ASSERT(cma_get_req_idp(root_idp, chanp->chan_session_id));
1228 avl_remove(&root_chanp->chan_req_avl_tree, idp);
1229 chanp->chan_req_state = REQ_CMID_SERVER_NONE;
1230 mutex_exit(&chanp->chan_mutex);
1231 mutex_exit(&root_chanp->chan_mutex);
1232 }
1233
1234 if (chanp->chan_xport_type == SOL_CMA_XPORT_IB)
1235 ret = rdma_ib_reject(idp, priv_data, priv_data_len);
1236 #ifdef IWARP_SUPPORT
1237 if (chanp->chan_xport_type == SOL_CMA_XPORT_IWARP)
1238 ret = rdma_iw_reject(idp, priv_data, priv_data_len);
1239 #endif /* IWARP_SUPPORT */
1240
1241
1242 if (!ret && root_idp) {
1243 cma_chan_state_t chan_state;
1244
1245 /*
1246 * Rejecting connect request, no more events for this
1247 * connection.
1248 */
1249 cma_handle_nomore_events(chanp);
1250 mutex_enter(&chanp->chan_mutex);
1251 chan_state = cma_get_chan_state(chanp);
1252 mutex_exit(&chanp->chan_mutex);
1253 /* If rdma_destroy_id() was called, destroy CMID */
1254 if (chan_state == SOL_CMA_CHAN_DESTROY_PENDING)
1255 cma_destroy_id((struct rdma_cm_id *)chanp);
1256 } else if (ret && root_idp) {
1257 avl_index_t where;
1258
1259 SOL_OFS_DPRINTF_L5(sol_rdmacm_dbg_str,
1260 "reject fail: Add to Req AVL of %p IDP, idp %p,"
1261 "session_id %p", root_idp, idp,
1262 chanp->chan_session_id);
1263 mutex_enter(&root_chanp->chan_mutex);
1264 mutex_enter(&chanp->chan_mutex);
1265 if (chanp->chan_req_state == REQ_CMID_SERVER_NONE) {
1266 if (avl_find(&root_chanp->chan_req_avl_tree,
1267 (void *)chanp->chan_session_id, &where)) {
1268 mutex_exit(&chanp->chan_mutex);
1269 mutex_exit(&root_chanp->chan_mutex);
1270 SOL_OFS_DPRINTF_L2(sol_rdmacm_dbg_str,
1271 "DUPLICATE ENTRY in REQ AVL : root %p, "
1272 "idp %p, session_id %p",
1273 root_idp, idp, chanp->chan_session_id);
1274 return (EINVAL);
1275 }
1276 avl_insert(&root_chanp->chan_req_avl_tree,
1277 (void *)idp, where);
1278 chanp->chan_req_state = REQ_CMID_NOTIFIED;
1279 }
1280 mutex_exit(&chanp->chan_mutex);
1281 mutex_exit(&root_chanp->chan_mutex);
1282 }
1283
1284 SOL_OFS_DPRINTF_L5(sol_rdmacm_dbg_str, "rdma_reject: ret %x", ret);
1285 return (ret);
1286 }
1287
1288 int
1289 rdma_disconnect(struct rdma_cm_id *idp)
1290 {
1291 sol_cma_chan_t *chanp;
1292 int ret = EINVAL;
1293 cma_chan_state_t state;
1294
1295 chanp = (sol_cma_chan_t *)idp;
1296 SOL_OFS_DPRINTF_L5(sol_rdmacm_dbg_str, "rdma_disconnect(%p)", idp);
1297
1298 if (!idp)
1299 return (0);
1300
1301 mutex_enter(&chanp->chan_mutex);
1302 if (!(SOL_CMAID_CONNECTED(chanp))) {
1303 SOL_OFS_DPRINTF_L3(sol_rdmacm_dbg_str,
1304 "rdma_disconnect(%p) - Not connected!!", idp);
1305 mutex_exit(&chanp->chan_mutex);
1306 return (EINVAL);
1307 }
1308 state = cma_get_chan_state(chanp);
1309 cma_set_chan_state(chanp, SOL_CMA_CHAN_DISCONNECT);
1310 mutex_exit(&chanp->chan_mutex);
1311
1312 if (chanp->chan_xport_type == SOL_CMA_XPORT_IB) {
1313 ret = rdma_ib_disconnect(idp);
1314 #ifdef IWARP_SUPPORT
1315 } else if (chanp->chan_xport_type == SOL_CMA_XPORT_IWARP) {
1316 ret = rdma_iw_disconnect(idp);
1317 #endif /* IWARP_SUPPORT */
1318 }
1319
1320 if (ret) {
1321 mutex_enter(&chanp->chan_mutex);
1322 cma_set_chan_state(chanp, state);
1323 mutex_exit(&chanp->chan_mutex);
1324 return (ret);
1325 }
1326
1327 SOL_OFS_DPRINTF_L5(sol_rdmacm_dbg_str, "rdma_disconnect: ret %x", ret);
1328 return (ret);
1329 }
1330
1331 int
1332 rdma_init_qp_attr(struct rdma_cm_id *idp, struct ib_qp_attr *qpattr,
1333 int *qp_attr_mask)
1334 {
1335 sol_cma_chan_t *chanp;
1336 int ret = EINVAL;
1337
1338 ASSERT(idp);
1339 chanp = (sol_cma_chan_t *)idp;
1340 SOL_OFS_DPRINTF_L5(sol_rdmacm_dbg_str, "rdma_init_qp_attr(%p, %p, %p)",
1341 idp, qpattr, qp_attr_mask);
1342
1343 if (chanp->chan_xport_type == SOL_CMA_XPORT_IB) {
1344 ret = rdma_ib_init_qp_attr(idp, qpattr, qp_attr_mask);
1345 #ifdef IWARP_SUPPORT
1346 } else if (chanp->chan_xport_type == SOL_CMA_XPORT_IWARP)
1347 ret = rdma_iw_init_qp_attr(idp, qpattr, qp_attr_mask);
1348 #endif /* IWARP_SUPPORT */
1349 } else {
1350 ret = EINVAL;
1351 }
1352
1353 SOL_OFS_DPRINTF_L5(sol_rdmacm_dbg_str,
1354 "rdma_init_qp_attr: ret %x", ret);
1355
1356 return (ret);
1357 }
1358
1359 int
1360 rdma_join_multicast(struct rdma_cm_id *idp, struct sockaddr *addr,
1361 void *context)
1362 {
1363 sol_cma_chan_t *chanp;
1364 int ret = ENODEV;
1365 cma_chan_state_t state;
1366
1367 ASSERT(idp);
1368 chanp = (sol_cma_chan_t *)idp;
1369 SOL_OFS_DPRINTF_L5(sol_rdmacm_dbg_str,
1370 "rdma_join_multicast(%p, %p, %p)",
1371 idp, addr, context);
1372
1373 mutex_enter(&chanp->chan_mutex);
1374 state = cma_get_chan_state(chanp);
1375 if (state != SOL_CMA_CHAN_BOUND &&
1376 state != SOL_CMA_CHAN_ROUTE_RESLVD &&
1377 state != SOL_CMA_CHAN_ADDR_RESLVD) {
1378 SOL_OFS_DPRINTF_L2(sol_rdmacm_dbg_str,
1379 "rdma_join_multicast, Invalid state");
1380 mutex_exit(&chanp->chan_mutex);
1381 return (EINVAL);
1382 }
1383
1384 if (chanp->chan_xport_type == SOL_CMA_XPORT_IB)
1385 ret = rdma_ib_join_multicast(idp, addr, context);
1386 #ifdef IWARP_SUPPORT
1387 /* No support for Multicast on iWARP */
1388 else if (chanp->chan_xport_type == SOL_CMA_XPORT_IWARP)
1389 ret = ENOTSUP;
1390 #endif /* IWARP_SUPPORT */
1391 mutex_exit(&chanp->chan_mutex);
1392
1393 SOL_OFS_DPRINTF_L5(sol_rdmacm_dbg_str,
1394 "rdma_join_multicast: ret %x", ret);
1395 return (ret);
1396 }
1397
1398 void
1399 rdma_leave_multicast(struct rdma_cm_id *idp, struct sockaddr *addr)
1400 {
1401 sol_cma_chan_t *chanp;
1402 cma_chan_state_t state;
1403
1404 ASSERT(idp);
1405 chanp = (sol_cma_chan_t *)idp;
1406 SOL_OFS_DPRINTF_L5(sol_rdmacm_dbg_str, "rdma_leave_multicast(%p, %p)",
1407 idp, addr);
1408
1409 mutex_enter(&chanp->chan_mutex);
1410 state = cma_get_chan_state(chanp);
1411 if (state != SOL_CMA_CHAN_BOUND &&
1412 state != SOL_CMA_CHAN_ROUTE_RESLVD &&
1413 state != SOL_CMA_CHAN_ADDR_RESLVD) {
1414 SOL_OFS_DPRINTF_L2(sol_rdmacm_dbg_str,
1415 "rdma_leave_multicast, Invalid state");
1416 mutex_exit(&chanp->chan_mutex);
1417 return;
1418 }
1419
1420 if (chanp->chan_xport_type == SOL_CMA_XPORT_IB)
1421 rdma_ib_leave_multicast(idp, addr);
1422 #ifdef IWARP_SUPPORT
1423 /* No support for Multicast on iWARP */
1424 else if (chanp->chan_xport_type == SOL_CMA_XPORT_IWARP)
1425 SOL_OFS_DPRINTF_L2(sol_rdmacm_dbg_str,
1426 "rdma_leave_multicast, iWARP");
1427 #endif /* IWARP_SUPPORT */
1428 mutex_exit(&chanp->chan_mutex);
1429
1430 SOL_OFS_DPRINTF_L5(sol_rdmacm_dbg_str, "rdma_join_multicast: ret");
1431 }
1432
1433 /*
1434 * Functions to compare to rdma_cm_id *, used by AVL tree
1435 * routines.
1436 */
1437 int
1438 sol_cma_req_cmid_cmp(const void *p1, const void *p2)
1439 {
1440 sol_cma_chan_t *chanp;
1441
1442 chanp = (sol_cma_chan_t *)p2;
1443 if (chanp->chan_session_id > p1)
1444 return (+1);
1445 else if (chanp->chan_session_id < p1)
1446 return (-1);
1447 else
1448 return (0);
1449 }
1450
1451 int
1452 sol_cma_cmid_cmp(const void *p1, const void *p2)
1453 {
1454 sol_cma_chan_t *chanp;
1455
1456 chanp = (sol_cma_chan_t *)p2;
1457 if (chanp->chan_qp_hdl > p1)
1458 return (+1);
1459 else if (chanp->chan_qp_hdl < p1)
1460 return (-1);
1461 else
1462 return (0);
1463 }
1464
1465 /*
1466 * Function to compare two sol_cma_glbl_listen_t *, used by
1467 * AVL tree routines.
1468 */
1469 int
1470 sol_cma_svc_cmp(const void *p1, const void *p2)
1471 {
1472 sol_cma_glbl_listen_t *listenp;
1473 uint64_t sid;
1474
1475 sid = *(uint64_t *)p1;
1476 listenp = (sol_cma_glbl_listen_t *)p2;
1477 if (listenp->cma_listen_chan_sid > sid)
1478 return (+1);
1479 else if (listenp->cma_listen_chan_sid < sid)
1480 return (-1);
1481 else
1482 return (0);
1483 }
1484
1485 static int
1486 cma_init_listen_root(sol_cma_chan_t *chanp)
1487 {
1488 sol_cma_glbl_listen_t *cma_listenp;
1489 sol_cma_listen_info_t *chan_listenp;
1490 int rc = 0;
1491 avl_index_t where = 0;
1492 uint64_t listen_sid;
1493
1494 ASSERT(chanp);
1495 ASSERT(chanp->chan_listenp);
1496 chan_listenp = chanp->chan_listenp;
1497
1498 SOL_OFS_DPRINTF_L5(sol_rdmacm_dbg_str,
1499 "cma_init_listen_root(%p)", chanp);
1500
1501 /*
1502 * First search for matching global listen_info for this SID.
1503 * If found with the same client handle, reuse the service
1504 * handle, if matching SID is found with different client
1505 * handle, return EINVAL.
1506 */
1507 listen_sid = ibcma_init_root_sid(chanp);
1508 SOL_OFS_DPRINTF_L5(sol_rdmacm_dbg_str,
1509 "cma_init_listen_root: search SID 0x%llx",
1510 listen_sid);
1511
1512 mutex_enter(&sol_cma_glob_mutex);
1513 cma_listenp = avl_find(&sol_cma_glbl_listen_tree,
1514 (void *) &listen_sid, &where);
1515 if (cma_listenp && cma_listenp->cma_listen_clnt_hdl ==
1516 chanp->chan_ib_client_hdl) {
1517 SOL_OFS_DPRINTF_L5(sol_rdmacm_dbg_str,
1518 "cma_init_listen_root: matching listenp %p SID 0x%llx",
1519 cma_listenp, listen_sid);
1520 chan_listenp->listen_entry = add_genlist(
1521 &cma_listenp->cma_listen_chan_list,
1522 (uintptr_t)chanp, NULL);
1523 chan_listenp->chan_glbl_listen_info = cma_listenp;
1524 ibcma_copy_srv_hdl(chanp, cma_listenp);
1525 mutex_exit(&sol_cma_glob_mutex);
1526 return (0);
1527 } else if (cma_listenp) {
1528 SOL_OFS_DPRINTF_L2(sol_rdmacm_dbg_str,
1529 "cma_init_listen_root: listenp %p, SID 0x%llx match, "
1530 "client hdl prev %p, new %p mismatch",
1531 cma_listenp, listen_sid,
1532 cma_listenp->cma_listen_clnt_hdl,
1533 chanp->chan_ib_client_hdl);
1534 mutex_exit(&sol_cma_glob_mutex);
1535 return (EINVAL);
1536 }
1537
1538 cma_listenp = kmem_zalloc(sizeof (sol_cma_glbl_listen_t), KM_SLEEP);
1539 init_genlist(&cma_listenp->cma_listen_chan_list);
1540 chan_listenp->listen_entry = add_genlist(
1541 &cma_listenp->cma_listen_chan_list, (uintptr_t)chanp, NULL);
1542 chan_listenp->chan_glbl_listen_info = cma_listenp;
1543 cma_listenp->cma_listen_clnt_hdl = chanp->chan_ib_client_hdl;
1544 cma_listenp->cma_listen_chan_sid = listen_sid;
1545
1546 rc = ibcma_init_root_chan(chanp, cma_listenp);
1547 if (rc) {
1548 mutex_exit(&sol_cma_glob_mutex);
1549 SOL_OFS_DPRINTF_L2(sol_rdmacm_dbg_str,
1550 "cma_init_listen_root: ibcma_init_root_chan failed!!");
1551 delete_genlist(&cma_listenp->cma_listen_chan_list,
1552 chan_listenp->listen_entry);
1553 kmem_free(cma_listenp, sizeof (sol_cma_glbl_listen_t));
1554 return (rc);
1555 }
1556 avl_insert(&sol_cma_glbl_listen_tree, cma_listenp, where);
1557 mutex_exit(&sol_cma_glob_mutex);
1558 return (0);
1559 }
1560
1561 static void
1562 cma_fini_listen_root(sol_cma_chan_t *chanp)
1563 {
1564 sol_cma_glbl_listen_t *cma_listenp;
1565 sol_cma_listen_info_t *chan_listenp;
1566
1567 ASSERT(chanp);
1568 ASSERT(chanp->chan_listenp);
1569 SOL_OFS_DPRINTF_L5(sol_rdmacm_dbg_str, "cma_fini_listen_root(%p)",
1570 chanp);
1571 chan_listenp = chanp->chan_listenp;
1572 cma_listenp = chan_listenp->chan_glbl_listen_info;
1573 ASSERT(cma_listenp);
1574 mutex_enter(&sol_cma_glob_mutex);
1575 delete_genlist(&cma_listenp->cma_listen_chan_list,
1576 chan_listenp->listen_entry);
1577 if (genlist_empty(&cma_listenp->cma_listen_chan_list)) {
1578 if (ibcma_fini_root_chan(chanp) == 0) {
1579 avl_remove(&sol_cma_glbl_listen_tree,
1580 cma_listenp);
1581 kmem_free(cma_listenp,
1582 sizeof (sol_cma_glbl_listen_t));
1583 } else
1584 SOL_OFS_DPRINTF_L2(sol_rdmacm_dbg_str,
1585 "cma_fini_listen_root: "
1586 "ibcma_fini_root_chan failed");
1587 }
1588
1589 mutex_exit(&sol_cma_glob_mutex);
1590 }
1591
1592 typedef struct cma_event_async_arg {
1593 struct rdma_cm_id *idp;
1594 enum rdma_cm_event_type event;
1595 int status;
1596 union {
1597 struct rdma_conn_param conn;
1598 struct rdma_ud_param param;
1599 } un;
1600 struct rdma_conn_param *conn_param;
1601 struct rdma_ud_param *ud_paramp;
1602 } cma_event_async_arg_t;
1603
1604 static void cma_generate_event_sync(struct rdma_cm_id *,
1605 enum rdma_cm_event_type, int, struct rdma_conn_param *,
1606 struct rdma_ud_param *);
1607
1608 void
1609 cma_generate_event_thr(void *arg)
1610 {
1611 cma_event_async_arg_t *event_arg = (cma_event_async_arg_t *)arg;
1612
1613 cma_generate_event_sync(event_arg->idp, event_arg->event,
1614 event_arg->status, event_arg->conn_param,
1615 event_arg->ud_paramp);
1616
1617 if (event_arg->conn_param && event_arg->conn_param->private_data_len)
1618 kmem_free((void *)event_arg->conn_param->private_data,
1619 event_arg->conn_param->private_data_len);
1620 if (event_arg->ud_paramp && event_arg->ud_paramp->private_data_len)
1621 kmem_free((void *)event_arg->ud_paramp->private_data,
1622 event_arg->ud_paramp->private_data_len);
1623 kmem_free(arg, sizeof (cma_event_async_arg_t));
1624 }
1625
1626 void
1627 cma_generate_event(struct rdma_cm_id *idp, enum rdma_cm_event_type event,
1628 int status, struct rdma_conn_param *conn_param,
1629 struct rdma_ud_param *ud_paramp)
1630 {
1631 cma_event_async_arg_t *event_arg;
1632 sol_cma_chan_t *chanp = (sol_cma_chan_t *)idp;
1633
1634 /*
1635 * Set SOL_CMA_CALLER_EVENT_PROGRESS to indicate event
1636 * notification is in progress, so that races between
1637 * rdma_destroy_id() and event notification is taken care.
1638 *
1639 * If rdma_destroy_id() has been called for this CMID, call
1640 * cma_generate_event_sync() which skips notification to the
1641 * consumer and handles the event.
1642 */
1643 mutex_enter(&chanp->chan_mutex);
1644 chanp->chan_cmid_destroy_state |= SOL_CMA_CALLER_EVENT_PROGRESS;
1645 if (chanp->chan_cmid_destroy_state & SOL_CMA_CALLER_CMID_DESTROYED) {
1646 mutex_exit(&chanp->chan_mutex);
1647 cma_generate_event_sync(idp, event, status, conn_param,
1648 ud_paramp);
1649 return;
1650 }
1651 mutex_exit(&chanp->chan_mutex);
1652
1653 event_arg = kmem_zalloc(sizeof (cma_event_async_arg_t), KM_SLEEP);
1654 event_arg->idp = idp;
1655 event_arg->event = event;
1656 event_arg->status = status;
1657 event_arg->conn_param = NULL;
1658 event_arg->ud_paramp = NULL;
1659 if (conn_param && conn_param->private_data_len) {
1660 bcopy(conn_param, &(event_arg->un.conn),
1661 sizeof (struct rdma_conn_param));
1662 event_arg->conn_param = &(event_arg->un.conn);
1663 event_arg->conn_param->private_data = kmem_zalloc(
1664 conn_param->private_data_len, KM_SLEEP);
1665 bcopy(conn_param->private_data,
1666 (void *)event_arg->conn_param->private_data,
1667 conn_param->private_data_len);
1668 } else if (conn_param && conn_param->private_data_len == 0) {
1669 bcopy(conn_param, &(event_arg->un.conn),
1670 sizeof (struct rdma_conn_param));
1671 } else if (ud_paramp) {
1672 bcopy(ud_paramp, &(event_arg->un.param),
1673 sizeof (struct rdma_ud_param));
1674 event_arg->ud_paramp = &(event_arg->un.param);
1675 if (ud_paramp->private_data_len) {
1676 event_arg->ud_paramp->private_data = kmem_zalloc(
1677 ud_paramp->private_data_len, KM_SLEEP);
1678 bcopy(ud_paramp->private_data,
1679 (void *)event_arg->ud_paramp->private_data,
1680 ud_paramp->private_data_len);
1681 } else if (ud_paramp->private_data) {
1682 event_arg->ud_paramp->private_data =
1683 ud_paramp->private_data;
1684 }
1685 }
1686
1687 if (taskq_dispatch(system_taskq, cma_generate_event_thr,
1688 (void *)event_arg, TQ_SLEEP) == 0) {
1689 SOL_OFS_DPRINTF_L2(sol_rdmacm_dbg_str,
1690 "generate_event_async: taskq_dispatch() failed!!");
1691 mutex_enter(&chanp->chan_mutex);
1692 chanp->chan_cmid_destroy_state &=
1693 ~SOL_CMA_CALLER_EVENT_PROGRESS;
1694 if (chanp->chan_cmid_destroy_state &
1695 SOL_CMA_CALLER_CMID_DESTROYED)
1696 cv_broadcast(&chanp->chan_destroy_cv);
1697 mutex_exit(&chanp->chan_mutex);
1698 }
1699 }
1700
1701 static void
1702 cma_generate_event_sync(struct rdma_cm_id *idp, enum rdma_cm_event_type event,
1703 int status, struct rdma_conn_param *conn_param,
1704 struct rdma_ud_param *ud_paramp)
1705 {
1706 struct rdma_cm_event cm_event;
1707 sol_cma_chan_t *chanp = (sol_cma_chan_t *)idp;
1708 struct rdma_cm_id *root_idp = NULL;
1709 sol_cma_chan_t *root_chanp;
1710 int ret;
1711 cma_chan_state_t chan_state;
1712
1713 SOL_OFS_DPRINTF_L5(sol_rdmacm_dbg_str, "generate_event_sync(%p, %x, "
1714 "%x, %p, %p", idp, event, status, conn_param, ud_paramp);
1715
1716 bzero(&cm_event, sizeof (cm_event));
1717 cm_event.event = event;
1718 cm_event.status = status;
1719 if (conn_param)
1720 bcopy((void *)conn_param, (void *)(&(cm_event.param.conn)),
1721 sizeof (struct rdma_conn_param));
1722 else if (ud_paramp)
1723 bcopy((void *)ud_paramp, (void *)(&(cm_event.param.ud)),
1724 sizeof (struct rdma_ud_param));
1725
1726 /*
1727 * If the consumer has destroyed the context for this CMID -
1728 * do not notify, skip to handling the sol_ofs specific
1729 * handling of the event.
1730 */
1731 mutex_enter(&chanp->chan_mutex);
1732 if (chanp->chan_cmid_destroy_state & SOL_CMA_CALLER_CMID_DESTROYED) {
1733 mutex_exit(&chanp->chan_mutex);
1734 goto ofs_consume_event;
1735 }
1736 mutex_exit(&chanp->chan_mutex);
1737
1738 root_idp = CHAN_LISTEN_ROOT(chanp);
1739 root_chanp = (sol_cma_chan_t *)root_idp;
1740 SOL_OFS_DPRINTF_L5(sol_rdmacm_dbg_str, "gen_event: root_idp %p",
1741 root_idp);
1742
1743 if (event == RDMA_CM_EVENT_CONNECT_REQUEST) {
1744 /*
1745 * Update chan_req_state for the REQ CMID. Decrement
1746 * count of REQ CMIDs not notifed to consumer.
1747 */
1748 ASSERT(root_idp);
1749 mutex_enter(&root_chanp->chan_mutex);
1750 root_chanp->chan_req_cnt--;
1751 #ifdef DEBUG
1752 SOL_OFS_DPRINTF_L5(sol_rdmacm_dbg_str,
1753 "Dec req_cnt of %p IDP, idp %p, req_cnt %x",
1754 root_idp, idp, root_chanp->chan_req_cnt);
1755 #endif
1756 mutex_exit(&root_chanp->chan_mutex);
1757 }
1758
1759 /* Pass the event to the client */
1760 ret = (idp->event_handler) (idp, &cm_event);
1761
1762 if (ret) {
1763 /*
1764 * If the consumer returned failure :
1765 * CONNECT_REQUEST :
1766 * 1. rdma_disconnect() to disconnect connection.
1767 * 2. wakeup destroy, if destroy has been called
1768 * for this CMID
1769 * 3. Destroy CMID if rdma_destroy has not been
1770 * called.
1771 * DISCONNECTED :
1772 * 1. call cma_handle_nomore_events() to cleanup
1773 * Other Events :
1774 * 1. Client is expected to destroy the CMID.
1775 */
1776 if (event == RDMA_CM_EVENT_CONNECT_REQUEST) {
1777 SOL_OFS_DPRINTF_L4(sol_rdmacm_dbg_str,
1778 "cma_generate_event_async: consumer failed %d "
1779 "event", event);
1780 if (rdma_disconnect(idp)) {
1781 SOL_OFS_DPRINTF_L2(sol_rdmacm_dbg_str,
1782 "generate_event_async: rdma_disconnect "
1783 "failed");
1784 }
1785 mutex_enter(&chanp->chan_mutex);
1786 ASSERT(SOL_IS_SERVER_CMID(chanp));
1787 chanp->chan_req_state = REQ_CMID_SERVER_NONE;
1788 chanp->chan_cmid_destroy_state &=
1789 ~SOL_CMA_CALLER_EVENT_PROGRESS;
1790 if (chanp->chan_cmid_destroy_state &
1791 SOL_CMA_CALLER_CMID_DESTROYED) {
1792 cv_broadcast(&chanp->chan_destroy_cv);
1793 mutex_exit(&chanp->chan_mutex);
1794 } else {
1795 mutex_exit(&chanp->chan_mutex);
1796 rdma_destroy_id(idp);
1797 }
1798 } else if (event == RDMA_CM_EVENT_DISCONNECTED) {
1799 SOL_OFS_DPRINTF_L2(sol_rdmacm_dbg_str,
1800 "generate_event_async: consumer failed %d event",
1801 event);
1802 cma_handle_nomore_events(chanp);
1803 mutex_enter(&chanp->chan_mutex);
1804 chan_state = cma_get_chan_state(chanp);
1805 chanp->chan_cmid_destroy_state &=
1806 ~SOL_CMA_CALLER_EVENT_PROGRESS;
1807 if (chanp->chan_cmid_destroy_state &
1808 SOL_CMA_CALLER_CMID_DESTROYED) {
1809 cv_broadcast(&chanp->chan_destroy_cv);
1810 mutex_exit(&chanp->chan_mutex);
1811 } else if (chan_state == SOL_CMA_CHAN_DESTROY_PENDING) {
1812 /* rdma_destroy_id() called: destroy CMID */
1813 mutex_exit(&chanp->chan_mutex);
1814 cma_destroy_id((struct rdma_cm_id *)chanp);
1815 } else
1816 mutex_exit(&chanp->chan_mutex);
1817 } else {
1818 SOL_OFS_DPRINTF_L2(sol_rdmacm_dbg_str,
1819 "generate_event_async: consumer failed %d event",
1820 event);
1821 }
1822
1823 return;
1824 }
1825 ofs_consume_event:
1826 if (event == RDMA_CM_EVENT_DISCONNECTED) {
1827 cma_chan_state_t chan_state;
1828
1829 cma_handle_nomore_events(chanp);
1830 mutex_enter(&chanp->chan_mutex);
1831 chan_state = cma_get_chan_state(chanp);
1832 chanp->chan_cmid_destroy_state &=
1833 ~SOL_CMA_CALLER_EVENT_PROGRESS;
1834 if (chanp->chan_cmid_destroy_state &
1835 SOL_CMA_CALLER_CMID_DESTROYED) {
1836 cv_broadcast(&chanp->chan_destroy_cv);
1837 mutex_exit(&chanp->chan_mutex);
1838 } else if (chan_state == SOL_CMA_CHAN_DESTROY_PENDING) {
1839 /* If rdma_destroy_id() was called, destroy CMID */
1840 mutex_exit(&chanp->chan_mutex);
1841 cma_destroy_id((struct rdma_cm_id *)chanp);
1842 } else
1843 mutex_exit(&chanp->chan_mutex);
1844 return;
1845 } else if (IS_UDP_CMID(idp) && event == RDMA_CM_EVENT_UNREACHABLE) {
1846 /*
1847 * If rdma_destroy_id() was called, destroy CMID
1848 * If not chan_connect_flag/ chan_req_state has already been
1849 * set to indicate that it can be deleted.
1850 */
1851 mutex_enter(&chanp->chan_mutex);
1852 chan_state = cma_get_chan_state(chanp);
1853 chanp->chan_cmid_destroy_state &=
1854 ~SOL_CMA_CALLER_EVENT_PROGRESS;
1855 if (chanp->chan_cmid_destroy_state &
1856 SOL_CMA_CALLER_CMID_DESTROYED) {
1857 cv_broadcast(&chanp->chan_destroy_cv);
1858 mutex_exit(&chanp->chan_mutex);
1859 } else if (chan_state == SOL_CMA_CHAN_DESTROY_PENDING) {
1860 mutex_exit(&chanp->chan_mutex);
1861 cma_destroy_id(idp);
1862 } else
1863 mutex_exit(&chanp->chan_mutex);
1864 return;
1865 }
1866
1867 mutex_enter(&chanp->chan_mutex);
1868 chanp->chan_cmid_destroy_state &= ~SOL_CMA_CALLER_EVENT_PROGRESS;
1869 if (chanp->chan_cmid_destroy_state & SOL_CMA_CALLER_CMID_DESTROYED)
1870 cv_broadcast(&chanp->chan_destroy_cv);
1871 mutex_exit(&chanp->chan_mutex);
1872 }
1873
1874 /* Local Static functions */
1875 static struct rdma_cm_id *
1876 cma_alloc_chan(rdma_cm_event_handler evt_hdlr, void *context,
1877 enum rdma_port_space ps)
1878 {
1879 struct rdma_cm_id *rdma_idp;
1880 sol_cma_chan_t *chanp;
1881
1882 chanp = kmem_zalloc(sizeof (sol_cma_chan_t), KM_SLEEP);
1883 mutex_init(&chanp->chan_mutex, NULL, MUTEX_DRIVER, NULL);
1884 cv_init(&chanp->chan_destroy_cv, NULL, CV_DRIVER, NULL);
1885 rdma_idp = &(chanp->chan_rdma_cm);
1886 rdma_idp->context = context;
1887 rdma_idp->ps = ps;
1888 rdma_idp->event_handler = evt_hdlr;
1889 mutex_enter(&chanp->chan_mutex);
1890 cma_set_chan_state(chanp, SOL_CMA_CHAN_IDLE);
1891 avl_create(&chanp->chan_req_avl_tree, sol_cma_req_cmid_cmp,
1892 sizeof (sol_cma_chan_t),
1893 offsetof(sol_cma_chan_t, chan_req_avl_node));
1894 avl_create(&chanp->chan_acpt_avl_tree, sol_cma_cmid_cmp,
1895 sizeof (sol_cma_chan_t),
1896 offsetof(sol_cma_chan_t, chan_acpt_avl_node));
1897 mutex_exit(&chanp->chan_mutex);
1898
1899 return (rdma_idp);
1900 }
1901
1902 /* Change the state of sol_cma_chan_t */
1903 static void
1904 cma_set_chan_state(sol_cma_chan_t *chanp, cma_chan_state_t newstate)
1905 {
1906 ASSERT(MUTEX_HELD(&chanp->chan_mutex));
1907 chanp->chan_state = newstate;
1908 }
1909
1910 cma_chan_state_t
1911 cma_get_chan_state(sol_cma_chan_t *chanp)
1912 {
1913 ASSERT(MUTEX_HELD(&chanp->chan_mutex));
1914 return (chanp->chan_state);
1915 }
1916
1917 /* Check & Swap the state of sol_ucma_chan_t */
1918 static int
1919 cma_cas_chan_state(sol_cma_chan_t *chanp, cma_chan_state_t prevstate,
1920 cma_chan_state_t newstate)
1921 {
1922 int ret = 0;
1923
1924 ASSERT(MUTEX_HELD(&chanp->chan_mutex));
1925 if (chanp->chan_state != prevstate)
1926 ret = -1;
1927 else
1928 chanp->chan_state = newstate;
1929
1930 return (ret);
1931 }
1932
1933 static void
1934 cma_free_listen_list(struct rdma_cm_id *idp)
1935 {
1936 genlist_entry_t *entry;
1937 sol_cma_chan_t *chanp = (sol_cma_chan_t *)idp;
1938
1939 SOL_OFS_DPRINTF_L5(sol_rdmacm_dbg_str, "cma_free_listen_list(%p)", idp);
1940 mutex_enter(&chanp->chan_mutex);
1941 entry = remove_genlist_head(&(CHAN_LISTEN_LIST(chanp)));
1942 mutex_exit(&chanp->chan_mutex);
1943 while (entry) {
1944 sol_cma_chan_t *ep_chanp;
1945
1946 ep_chanp = (sol_cma_chan_t *)entry->data;
1947 SOL_OFS_DPRINTF_L5(sol_rdmacm_dbg_str, "fini_ep_chan: %p",
1948 ep_chanp);
1949 if (ibcma_fini_ep_chan(ep_chanp) == 0) {
1950 genlist_entry_t *entry1;
1951 struct ib_device *device;
1952 cma_device_t *cma_device;
1953
1954 ASSERT(ep_chanp->chan_listenp);
1955 mutex_enter(&ep_chanp->chan_mutex);
1956 entry1 = ep_chanp->chan_listenp->listen_ep_dev_entry;
1957 device = ep_chanp->chan_listenp->listen_ep_device;
1958 ASSERT(device);
1959 cma_device = device->data;
1960 delete_genlist(&cma_device->cma_epchan_list,
1961 entry1);
1962 sol_cma_release_device(
1963 (struct rdma_cm_id *)ep_chanp);
1964 mutex_exit(&ep_chanp->chan_mutex);
1965 if (ep_chanp->chan_listenp)
1966 kmem_free(ep_chanp->chan_listenp,
1967 sizeof (sol_cma_listen_info_t));
1968
1969 mutex_destroy(&ep_chanp->chan_mutex);
1970 cv_destroy(&ep_chanp->chan_destroy_cv);
1971 kmem_free(ep_chanp, sizeof (sol_cma_chan_t));
1972 kmem_free(entry, sizeof (genlist_entry_t));
1973 }
1974
1975 mutex_enter(&chanp->chan_mutex);
1976 entry = remove_genlist_head(&(CHAN_LISTEN_LIST(chanp)));
1977 mutex_exit(&chanp->chan_mutex);
1978 }
1979 }
1980
1981 /*
1982 * Destroy a listening CMID when :
1983 * a. All CONNECTION REQUEST recieved have been rejected
1984 * or closed.
1985 * b. No CONNECTION REQUEST recieved.
1986 * Do not destroy a listening CMID when :
1987 * a. CONNECTION REQUEST has been recieved and not been
1988 * accepted from the passive / server side.
1989 * b. CONNECTION REQUEST has been recieved and has been
1990 * accepted from the passive server side.
1991 * Mark the listening CMID as destroy pending.
1992 *
1993 * For CMIDs created for rdma_connect() or created for a
1994 * CONNECT request, destroy the CMID only when :
1995 * CONNECTION has been closed or rejected.
1996 *
1997 * Mark the CMID as destroy pending.
1998 *
1999 * When a connection is rejected or closed :
2000 * Check if flag indicates - destroy pending,
2001 * cma_destroy_id() is called, this also does
2002 *
2003 * If there is a listening CMID assosiated with it,
2004 * call cma_destroy_if(listen_cmid);
2005 */
2006 void
2007 cma_destroy_id(struct rdma_cm_id *idp)
2008 {
2009 sol_cma_chan_t *chanp = (sol_cma_chan_t *)idp;
2010 cma_chan_state_t state;
2011 ulong_t acpt_nodes, req_nodes;
2012
2013 mutex_enter(&chanp->chan_mutex);
2014 acpt_nodes = avl_numnodes(&chanp->chan_acpt_avl_tree);
2015 req_nodes = avl_numnodes(&chanp->chan_req_avl_tree);
2016 state = cma_get_chan_state(chanp);
2017 SOL_OFS_DPRINTF_L5(sol_rdmacm_dbg_str, "cma_destroy_id(%p)- "
2018 "est CMIDs %ld, req CMID %ld, listen_root %p, state %x, %x",
2019 idp, acpt_nodes, req_nodes, chanp->listen_root,
2020 state, chanp->chan_req_state);
2021
2022 /*
2023 * If there are either REQ recieved or Established CMIDs just return.
2024 * rdma_destroy() for these CMIDs can be called by client later.
2025 */
2026 if (acpt_nodes || req_nodes) {
2027 cma_set_chan_state(chanp, SOL_CMA_CHAN_DESTROY_PENDING);
2028 mutex_exit(&chanp->chan_mutex);
2029 return;
2030 }
2031 cma_set_chan_state(chanp, SOL_CMA_CHAN_DESTROYING);
2032 avl_destroy(&chanp->chan_req_avl_tree);
2033 avl_destroy(&chanp->chan_acpt_avl_tree);
2034
2035 mutex_exit(&chanp->chan_mutex);
2036 if (idp->route.path_rec) {
2037 kmem_free(idp->route.path_rec,
2038 sizeof (struct ib_sa_path_rec) * idp->route.num_paths);
2039 idp->route.path_rec = NULL;
2040 }
2041
2042 switch (chanp->chan_xport_type) {
2043 case SOL_CMA_XPORT_NONE :
2044 break;
2045 case SOL_CMA_XPORT_IB :
2046 rdma_ib_destroy_id(idp);
2047 break;
2048 #ifdef IWARP_SUPPORT
2049 case SOL_CMA_XPORT_IWARP :
2050 rdma_iw_destroy_id(idp);
2051 break;
2052 #endif /* IWARP_SUPPORT */
2053 default :
2054 SOL_OFS_DPRINTF_L2(sol_rdmacm_dbg_str,
2055 "cma_destroy_id: Unsupported xport type %x",
2056 chanp->chan_xport_type);
2057 break;
2058 }
2059
2060 /*
2061 * Flush out & Free all listeners wrt to this ID
2062 * No locking is required as this code is executed
2063 * all REQ CMIDs have been destroyed. listen_list
2064 * will therefore not be modified during this loop.
2065 */
2066 if (chanp->chan_listenp) {
2067 cma_free_listen_list(idp);
2068 cma_fini_listen_root(chanp);
2069 kmem_free((void *)chanp->chan_listenp,
2070 sizeof (sol_cma_listen_info_t));
2071 chanp->chan_listenp = NULL;
2072 }
2073
2074 if (chanp->listen_root) {
2075 struct rdma_cm_id *root_idp;
2076 sol_cma_chan_t *root_chanp;
2077
2078 root_idp = chanp->listen_root;
2079 root_chanp = (sol_cma_chan_t *)root_idp;
2080 mutex_enter(&root_chanp->chan_mutex);
2081 state = cma_get_chan_state(root_chanp);
2082 acpt_nodes = avl_numnodes(&root_chanp->chan_acpt_avl_tree);
2083 req_nodes = avl_numnodes(&root_chanp->chan_req_avl_tree);
2084 mutex_exit(&root_chanp->chan_mutex);
2085 SOL_OFS_DPRINTF_L5(sol_rdmacm_dbg_str, "cma_destroy_id(%p)-"
2086 " root idp %p, state %x, acpt_nodes %ld, req_nodes %ld",
2087 idp, root_idp, state, acpt_nodes, req_nodes);
2088
2089 if (state == SOL_CMA_CHAN_DESTROY_PENDING &&
2090 req_nodes == 0UL && acpt_nodes == 0UL) {
2091 mutex_enter(&root_chanp->chan_mutex);
2092 root_chanp->chan_req_state = REQ_CMID_SERVER_NONE;
2093 mutex_exit(&root_chanp->chan_mutex);
2094 cma_destroy_id(root_idp);
2095 } else if (state == SOL_CMA_CHAN_DESTROY_WAIT &&
2096 req_nodes == 0UL && acpt_nodes == 0UL) {
2097 mutex_enter(&root_chanp->chan_mutex);
2098 cma_set_chan_state(root_chanp,
2099 SOL_CMA_CHAN_DESTROY_PENDING);
2100 root_chanp->chan_req_state = REQ_CMID_SERVER_NONE;
2101 cv_broadcast(&root_chanp->chan_destroy_cv);
2102 mutex_exit(&root_chanp->chan_mutex);
2103 }
2104 }
2105
2106 mutex_destroy(&chanp->chan_mutex);
2107 cv_destroy(&chanp->chan_destroy_cv);
2108 kmem_free(chanp, sizeof (sol_cma_chan_t));
2109 }
2110
2111 /*
2112 * Server TCP disconnect for an established channel.
2113 * If destroy_id() has been called for the listening
2114 * CMID and there are no more CMIDs with pending
2115 * events corresponding to the listening CMID, free
2116 * the listening CMID.
2117 *
2118 */
2119 static void
2120 cma_handle_nomore_events(sol_cma_chan_t *chanp)
2121 {
2122 struct rdma_cm_id *idp, *root_idp;
2123 sol_cma_chan_t *root_chanp;
2124 cma_chan_state_t state;
2125 ulong_t req_nodes, acpt_nodes;
2126
2127 idp = (struct rdma_cm_id *)chanp;
2128 root_idp = CHAN_LISTEN_ROOT(chanp);
2129 root_chanp = (sol_cma_chan_t *)root_idp;
2130 if (!root_chanp)
2131 return;
2132
2133 mutex_enter(&root_chanp->chan_mutex);
2134 mutex_enter(&chanp->chan_mutex);
2135 CHAN_LISTEN_ROOT(chanp) = NULL;
2136 root_chanp->chan_req_total_cnt--;
2137
2138 /*
2139 * Removal of CMID from the AVL trees should already have been done
2140 * by now. Below code mainly as a safety net.
2141 */
2142 if (chanp->chan_req_state == REQ_CMID_ACCEPTED) {
2143 ASSERT(chanp->chan_qp_hdl);
2144 ASSERT(cma_get_acpt_idp(root_idp,
2145 chanp->chan_qp_hdl));
2146 avl_remove(&root_chanp->chan_acpt_avl_tree, idp);
2147 chanp->chan_req_state = REQ_CMID_SERVER_NONE;
2148 }
2149 if (REQ_CMID_IN_REQ_AVL_TREE(chanp)) {
2150 ASSERT(chanp->chan_session_id);
2151 ASSERT(cma_get_req_idp(root_idp,
2152 chanp->chan_session_id));
2153 avl_remove(&root_chanp->chan_req_avl_tree, idp);
2154 chanp->chan_req_state = REQ_CMID_SERVER_NONE;
2155 }
2156
2157 state = cma_get_chan_state(root_chanp);
2158 req_nodes = avl_numnodes(&root_chanp->chan_req_avl_tree);
2159 acpt_nodes = avl_numnodes(&root_chanp->chan_acpt_avl_tree);
2160 mutex_exit(&chanp->chan_mutex);
2161 mutex_exit(&root_chanp->chan_mutex);
2162 if (state == SOL_CMA_CHAN_DESTROY_PENDING && req_nodes == 0UL &&
2163 acpt_nodes == 0UL)
2164 cma_destroy_id(root_idp);
2165 }
2166
2167 extern int ib_modify_qp(struct ib_qp *, struct ib_qp_attr *, int);
2168 extern int rdma_init_qp_attr(struct rdma_cm_id *, struct ib_qp_attr *,
2169 int *);
2170
2171 static int
2172 cma_init_ud_qp(sol_cma_chan_t *chanp, struct ib_qp *qp)
2173 {
2174 struct ib_qp_attr qp_attr;
2175 int qp_attr_mask, ret;
2176
2177 qp_attr.qp_state = IB_QPS_INIT;
2178 ret = rdma_init_qp_attr(&chanp->chan_rdma_cm, &qp_attr, &qp_attr_mask);
2179 if (ret)
2180 return (ret);
2181
2182 ret = ib_modify_qp(qp, &qp_attr, qp_attr_mask);
2183 if (ret)
2184 return (ret);
2185
2186 qp_attr.qp_state = IB_QPS_RTR;
2187 ret = ib_modify_qp(qp, &qp_attr, IB_QP_STATE);
2188 if (ret)
2189 return (ret);
2190
2191 qp_attr.qp_state = IB_QPS_RTS;
2192 qp_attr.sq_psn = 0;
2193 ret = ib_modify_qp(qp, &qp_attr, IB_QP_STATE | IB_QP_SQ_PSN);
2194
2195 return (ret);
2196 }
2197
2198 static int
2199 cma_init_conn_qp(sol_cma_chan_t *chanp, struct ib_qp *qp)
2200 {
2201 struct ib_qp_attr qp_attr;
2202 int qp_attr_mask, ret;
2203
2204 qp_attr.qp_state = IB_QPS_INIT;
2205 ret = rdma_init_qp_attr(&chanp->chan_rdma_cm, &qp_attr, &qp_attr_mask);
2206 if (ret)
2207 return (ret);
2208
2209 return (ib_modify_qp(qp, &qp_attr, qp_attr_mask));
2210 }
2211
2212 static inline int
2213 cma_is_ud_ps(enum rdma_port_space ps)
2214 {
2215 return (ps == RDMA_PS_UDP || ps == RDMA_PS_IPOIB);
2216 }
2217
2218 int
2219 rdma_create_qp(struct rdma_cm_id *idp, struct ib_pd *pd,
2220 struct ib_qp_init_attr *qp_init_attr)
2221 {
2222 sol_cma_chan_t *chanp;
2223 struct ib_qp *qp;
2224 int ret;
2225 ofs_client_t *dev_ofs_client;
2226
2227 ASSERT(idp);
2228 chanp = (sol_cma_chan_t *)idp;
2229 if (idp->device->node_guid != pd->device->node_guid)
2230 return (-EINVAL);
2231
2232 dev_ofs_client = (ofs_client_t *)pd->device->clnt_hdl;
2233 rdma_map_id2clnthdl(idp, dev_ofs_client->ibt_hdl, NULL);
2234
2235 qp = ib_create_qp(pd, qp_init_attr);
2236 if ((uintptr_t)qp >= (uintptr_t)-0xFFF) {
2237 return ((intptr_t)qp);
2238 }
2239 rdma_map_id2qphdl(idp, (void *)qp->ibt_qp);
2240
2241 if (cma_is_ud_ps(idp->ps)) {
2242 ret = cma_init_ud_qp(chanp, qp);
2243 } else {
2244 ret = cma_init_conn_qp(chanp, qp);
2245 }
2246
2247 if (ret) {
2248 goto err;
2249 }
2250
2251 idp->qp = qp;
2252 chanp->chan_qp_num = qp->qp_num;
2253 chanp->chan_is_srq = (qp->srq != NULL);
2254 return (0);
2255 err:
2256 (void) ib_destroy_qp(qp);
2257 return (ret);
2258 }
2259
2260 void
2261 rdma_destroy_qp(struct rdma_cm_id *idp)
2262 {
2263 ASSERT(idp);
2264 (void) ib_destroy_qp(idp->qp);
2265 idp->qp = NULL;
2266 }