Print this page
7127 remove -Wno-missing-braces from Makefile.uts
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/io/ib/clients/of/sol_ofs/sol_cma.c
+++ new/usr/src/uts/common/io/ib/clients/of/sol_ofs/sol_cma.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21
22 22 /*
23 23 * Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
24 24 */
25 25
26 26 /*
27 27 * sol_cma is a part of sol_ofs misc module. This file
28 28 * provides interfaces for supporting the communication
29 29 * management API defined in "rdma_cm.h". In-Kernel
30 30 * consumers of the "rdma_cm.h" API should link sol_ofs
31 31 * misc module using :
32 32 * -N misc/sol_ofs
33 33 * Solaris uCMA (sol_ucma) driver is the current consumer for
34 34 * sol_cma.
35 35 */
36 36
37 37 /* Standard driver includes */
38 38 #include <sys/types.h>
39 39 #include <sys/modctl.h>
40 40 #include <sys/errno.h>
41 41 #include <sys/stat.h>
42 42 #include <sys/ddi.h>
43 43 #include <sys/sunddi.h>
44 44 #include <sys/modctl.h>
45 45
46 46 #include <sys/ib/clients/of/ofed_kernel.h>
47 47 #include <sys/ib/clients/of/rdma/ib_addr.h>
48 48
49 49 #include <sys/ib/clients/of/sol_ofs/sol_cma.h>
↓ open down ↓ |
49 lines elided |
↑ open up ↑ |
50 50 #include <sys/ib/clients/of/sol_ofs/sol_kverb_impl.h>
51 51
52 52 /* Modload support */
53 53 static struct modlmisc sol_ofs_modmisc = {
54 54 &mod_miscops,
55 55 "Solaris OFS Misc module"
56 56 };
57 57
58 58 struct modlinkage sol_ofs_modlinkage = {
59 59 MODREV_1,
60 - (void *)&sol_ofs_modmisc,
61 - NULL
60 + { (void *)&sol_ofs_modmisc, NULL }
62 61 };
63 62
64 63 static ib_client_t *sol_cma_ib_client;
65 64 sol_cma_glbl_listen_t sol_cma_glbl_listen;
66 65 avl_tree_t sol_cma_glbl_listen_tree;
67 66
68 67 static void sol_cma_add_dev(struct ib_device *);
69 68 static void sol_cma_rem_dev(struct ib_device *);
70 69
71 70 static llist_head_t sol_cma_dev_list = LLIST_HEAD_INIT(sol_cma_dev_list);
72 71 kmutex_t sol_cma_dev_mutex;
73 72 kmutex_t sol_cma_glob_mutex;
74 73
75 74 char *sol_rdmacm_dbg_str = "sol_rdmacm";
76 75 char *sol_ofs_dbg_str = "sol_ofs_mod";
77 76
78 77 /*
79 78 * Local functions defines.
80 79 */
81 80 int sol_cma_req_cmid_cmp(const void *p1, const void *p2);
82 81 int sol_cma_cmid_cmp(const void *p1, const void *p2);
83 82 int sol_cma_svc_cmp(const void *, const void *);
84 83
85 84 static struct rdma_cm_id *cma_alloc_chan(rdma_cm_event_handler,
86 85 void *, enum rdma_port_space);
87 86 static void cma_set_chan_state(sol_cma_chan_t *, cma_chan_state_t);
88 87 static int cma_cas_chan_state(sol_cma_chan_t *, cma_chan_state_t,
89 88 cma_chan_state_t);
90 89 static void cma_free_listen_list(struct rdma_cm_id *);
91 90 static void cma_destroy_id(struct rdma_cm_id *);
92 91 static void cma_handle_nomore_events(sol_cma_chan_t *);
93 92
94 93 extern void sol_ofs_dprintf_init();
95 94 extern void sol_ofs_dprintf_fini();
96 95
97 96 cma_chan_state_t cma_get_chan_state(sol_cma_chan_t *);
98 97 extern int ibcma_init_root_chan(sol_cma_chan_t *, sol_cma_glbl_listen_t *);
99 98 extern int ibcma_fini_root_chan(sol_cma_chan_t *);
100 99 extern void ibcma_copy_srv_hdl(sol_cma_chan_t *, sol_cma_glbl_listen_t *);
101 100 extern int ibcma_fini_ep_chan(sol_cma_chan_t *);
102 101 extern uint64_t ibcma_init_root_sid(sol_cma_chan_t *);
103 102 extern void rdma_ib_destroy_id(struct rdma_cm_id *);
104 103 extern int rdma_ib_bind_addr(struct rdma_cm_id *, struct sockaddr *);
105 104 extern int rdma_ib_resolve_addr(struct rdma_cm_id *, struct sockaddr *,
106 105 struct sockaddr *, int);
107 106 extern int rdma_ib_resolve_route(struct rdma_cm_id *, int);
108 107 extern int rdma_ib_init_qp_attr(struct rdma_cm_id *, struct ib_qp_attr *,
109 108 int *);
110 109 extern int rdma_ib_connect(struct rdma_cm_id *, struct rdma_conn_param *);
111 110 extern int rdma_ib_listen(struct rdma_cm_id *, int);
112 111 extern int rdma_ib_accept(struct rdma_cm_id *, struct rdma_conn_param *);
113 112 extern int rdma_ib_reject(struct rdma_cm_id *, const void *, uint8_t);
114 113 extern int rdma_ib_disconnect(struct rdma_cm_id *);
115 114 extern int rdma_ib_join_multicast(struct rdma_cm_id *, struct sockaddr *,
116 115 void *);
117 116 extern void rdma_ib_leave_multicast(struct rdma_cm_id *, struct sockaddr *);
118 117
119 118 int
120 119 _init(void)
121 120 {
122 121 int err;
123 122
124 123 sol_ofs_dprintf_init();
125 124 SOL_OFS_DPRINTF_L5(sol_ofs_dbg_str, "_init()");
126 125
127 126 mutex_init(&sol_cma_glob_mutex, NULL, MUTEX_DRIVER, NULL);
128 127 mutex_init(&sol_cma_dev_mutex, NULL, MUTEX_DRIVER, NULL);
129 128 avl_create(&sol_cma_glbl_listen_tree,
130 129 sol_cma_svc_cmp, sizeof (sol_cma_glbl_listen_t),
131 130 offsetof(sol_cma_glbl_listen_t, cma_listen_node));
132 131
133 132 sol_cma_ib_client = kmem_zalloc(sizeof (ib_client_t), KM_NOSLEEP);
134 133 if (!sol_cma_ib_client) {
135 134 SOL_OFS_DPRINTF_L2(sol_ofs_dbg_str,
136 135 "_init() - mem alloc failed");
137 136 avl_destroy(&sol_cma_glbl_listen_tree);
138 137 mutex_destroy(&sol_cma_dev_mutex);
139 138 mutex_destroy(&sol_cma_glob_mutex);
140 139 sol_ofs_dprintf_fini();
141 140 return (ENOMEM);
142 141 }
143 142
144 143 sol_cma_ib_client->name = "sol_ofs";
145 144 sol_cma_ib_client->add = sol_cma_add_dev;
146 145 sol_cma_ib_client->remove = sol_cma_rem_dev;
147 146 sol_cma_ib_client->dip = NULL;
148 147
149 148 if ((err = ib_register_client(sol_cma_ib_client)) != 0) {
150 149 SOL_OFS_DPRINTF_L2(sol_ofs_dbg_str,
151 150 "_init() ib_register_client() failed with err %d",
152 151 err);
153 152 kmem_free(sol_cma_ib_client, sizeof (ib_client_t));
154 153 avl_destroy(&sol_cma_glbl_listen_tree);
155 154 mutex_destroy(&sol_cma_dev_mutex);
156 155 mutex_destroy(&sol_cma_glob_mutex);
157 156 sol_ofs_dprintf_fini();
158 157 return (err);
159 158 }
160 159
161 160 if ((err = mod_install(&sol_ofs_modlinkage)) != 0) {
162 161 SOL_OFS_DPRINTF_L2(sol_ofs_dbg_str,
163 162 "_init() - mod_install() failed");
164 163 ib_unregister_client(sol_cma_ib_client);
165 164 kmem_free(sol_cma_ib_client, sizeof (ib_client_t));
166 165 avl_destroy(&sol_cma_glbl_listen_tree);
167 166 mutex_destroy(&sol_cma_dev_mutex);
168 167 mutex_destroy(&sol_cma_glob_mutex);
169 168 sol_ofs_dprintf_fini();
170 169 return (err);
171 170 }
172 171
173 172 SOL_OFS_DPRINTF_L5(sol_ofs_dbg_str, "_init() - ret");
174 173 return (err);
175 174 }
176 175
177 176 int
178 177 _fini(void)
179 178 {
180 179 int err;
181 180
182 181 SOL_OFS_DPRINTF_L5(sol_ofs_dbg_str, "_fini()");
183 182
184 183 if (avl_numnodes(&sol_cma_glbl_listen_tree)) {
185 184 SOL_OFS_DPRINTF_L2(sol_ofs_dbg_str, "_fini - "
186 185 "listen CMIDs still active");
187 186 return (EBUSY);
188 187 }
189 188 if ((err = mod_remove(&sol_ofs_modlinkage)) != 0) {
190 189 SOL_OFS_DPRINTF_L3(sol_ofs_dbg_str,
191 190 "_fini: mod_remove failed");
192 191 return (err);
193 192 }
194 193
195 194 ib_unregister_client(sol_cma_ib_client);
196 195 kmem_free(sol_cma_ib_client, sizeof (ib_client_t));
197 196 avl_destroy(&sol_cma_glbl_listen_tree);
198 197 mutex_destroy(&sol_cma_dev_mutex);
199 198 mutex_destroy(&sol_cma_glob_mutex);
200 199 SOL_OFS_DPRINTF_L5(sol_ofs_dbg_str, "_fini() - ret");
201 200 sol_ofs_dprintf_fini();
202 201 return (err);
203 202 }
204 203
205 204 int
206 205 _info(struct modinfo *modinfop)
207 206 {
208 207 return (mod_info(&sol_ofs_modlinkage, modinfop));
209 208 }
210 209
211 210 typedef struct cma_device {
212 211 kmutex_t cma_mutex;
213 212 /* Ptr in the global sol_cma_dev_list */
214 213 llist_head_t cma_list;
215 214 /* List of listeners for this device */
216 215 genlist_t cma_epchan_list;
217 216 struct ib_device *cma_device;
218 217 uint_t cma_ref_count;
219 218 enum {
220 219 SOL_CMA_DEV_ADDED,
221 220 SOL_CMA_DEV_REM_IN_PROGRESS
222 221 } cma_dev_state;
223 222 } cma_device_t;
224 223
225 224 static void
226 225 sol_cma_add_dev(struct ib_device *dev)
227 226 {
228 227 cma_device_t *new_device;
229 228
230 229 new_device = kmem_zalloc(sizeof (cma_device_t), KM_NOSLEEP);
231 230 if (!new_device) {
232 231 SOL_OFS_DPRINTF_L2(sol_ofs_dbg_str, "sol_cma_add_dev() "
233 232 "alloc failed!!");
234 233 return;
235 234 }
236 235 mutex_init(&new_device->cma_mutex, NULL, MUTEX_DRIVER, NULL);
237 236 llist_head_init(&new_device->cma_list, new_device);
238 237 init_genlist(&new_device->cma_epchan_list);
239 238 new_device->cma_device = dev;
240 239
241 240 ib_set_client_data(dev, sol_cma_ib_client, new_device);
242 241
243 242 mutex_enter(&sol_cma_dev_mutex);
244 243 llist_add_tail(&new_device->cma_list, &sol_cma_dev_list);
245 244 mutex_exit(&sol_cma_dev_mutex);
246 245 }
247 246
248 247 static void
249 248 sol_cma_rem_dev(struct ib_device *dev)
250 249 {
251 250 cma_device_t *rem_device;
252 251 genlist_entry_t *entry;
253 252
254 253 SOL_OFS_DPRINTF_L5(sol_ofs_dbg_str, "sol_rem_dev(%p)", dev);
255 254
256 255 rem_device = (cma_device_t *)ib_get_client_data(dev, sol_cma_ib_client);
257 256 if (!rem_device) {
258 257 SOL_OFS_DPRINTF_L2(sol_ofs_dbg_str, "sol_cma_rem_dev() "
259 258 "NULL cma_dev!!");
260 259 return;
261 260 }
262 261
263 262 mutex_enter(&rem_device->cma_mutex);
264 263 rem_device->cma_dev_state = SOL_CMA_DEV_REM_IN_PROGRESS;
265 264 if (rem_device->cma_ref_count) {
266 265 mutex_exit(&rem_device->cma_mutex);
267 266 SOL_OFS_DPRINTF_L3(sol_ofs_dbg_str, "sol_cma_rem_dev() "
268 267 "BUSY cma_dev!!");
269 268 return;
270 269 }
271 270 entry = remove_genlist_head(&rem_device->cma_epchan_list);
272 271 while (entry) {
273 272 sol_cma_chan_t *ep_chanp;
274 273
275 274 ep_chanp = (sol_cma_chan_t *)entry->data;
276 275 if (ibcma_fini_ep_chan(ep_chanp) == 0) {
277 276 genlist_entry_t *entry1;
278 277 sol_cma_chan_t *root_chanp;
279 278
280 279 ASSERT(ep_chanp->chan_listenp);
281 280 entry1 = ep_chanp->chan_listenp->listen_ep_root_entry;
282 281 root_chanp = (sol_cma_chan_t *)ep_chanp->listen_root;
283 282 root_chanp->chan_listenp->listen_eps--;
284 283 delete_genlist(&root_chanp->chan_listenp->listen_list,
285 284 entry1);
286 285
287 286 kmem_free(ep_chanp, sizeof (sol_cma_chan_t));
288 287 kmem_free(entry, sizeof (genlist_entry_t));
289 288 }
290 289
291 290 entry = remove_genlist_head(&rem_device->cma_epchan_list);
292 291 }
293 292 mutex_exit(&rem_device->cma_mutex);
294 293
295 294 mutex_enter(&sol_cma_dev_mutex);
296 295 llist_del(&rem_device->cma_list);
297 296 mutex_exit(&sol_cma_dev_mutex);
298 297
299 298 kmem_free(rem_device, sizeof (cma_device_t));
300 299 }
301 300
302 301 struct ib_device *
303 302 sol_cma_acquire_device(ib_guid_t hca_guid)
304 303 {
305 304 llist_head_t *entry;
306 305 cma_device_t *cma_devp;
307 306
308 307 mutex_enter(&sol_cma_dev_mutex);
309 308 list_for_each(entry, &sol_cma_dev_list) {
310 309 cma_devp = (cma_device_t *)entry->ptr;
311 310
312 311 if (cma_devp->cma_device->node_guid != hca_guid)
313 312 continue;
314 313
315 314 mutex_enter(&cma_devp->cma_mutex);
316 315 if (cma_devp->cma_dev_state == SOL_CMA_DEV_REM_IN_PROGRESS) {
317 316 SOL_OFS_DPRINTF_L3(sol_ofs_dbg_str,
318 317 "sol_cma_acquire_dev() - Device getting removed!!");
319 318 mutex_exit(&cma_devp->cma_mutex);
320 319 mutex_exit(&sol_cma_dev_mutex);
321 320 return (NULL);
322 321 }
323 322 cma_devp->cma_ref_count++;
324 323 mutex_exit(&cma_devp->cma_mutex);
325 324 mutex_exit(&sol_cma_dev_mutex);
326 325 return (cma_devp->cma_device);
327 326
328 327 }
329 328 mutex_exit(&sol_cma_dev_mutex);
330 329 return (NULL);
331 330 }
332 331
333 332 static void
334 333 sol_cma_release_device(struct rdma_cm_id *id)
335 334 {
336 335 ib_device_t *device = id->device;
337 336 llist_head_t *entry;
338 337 cma_device_t *cma_devp;
339 338
340 339 mutex_enter(&sol_cma_dev_mutex);
341 340 list_for_each(entry, &sol_cma_dev_list) {
342 341 cma_devp = (cma_device_t *)entry->ptr;
343 342
344 343 if (cma_devp->cma_device != device)
345 344 continue;
346 345
347 346 mutex_enter(&cma_devp->cma_mutex);
348 347 cma_devp->cma_ref_count--;
349 348 if (cma_devp->cma_dev_state == SOL_CMA_DEV_REM_IN_PROGRESS &&
350 349 cma_devp->cma_ref_count == 0) {
351 350 SOL_OFS_DPRINTF_L3(sol_ofs_dbg_str,
352 351 "sol_cma_release_dev() - Device free removed!!");
353 352 mutex_exit(&cma_devp->cma_mutex);
354 353 llist_del(&cma_devp->cma_list);
355 354 kmem_free(cma_devp, sizeof (cma_device_t));
356 355 mutex_exit(&sol_cma_dev_mutex);
357 356 return;
358 357 }
359 358 mutex_exit(&cma_devp->cma_mutex);
360 359 }
361 360 mutex_exit(&sol_cma_dev_mutex);
362 361 }
363 362
364 363 void
365 364 sol_cma_add_hca_list(sol_cma_chan_t *ep_chanp, ib_guid_t hca_guid)
366 365 {
367 366 llist_head_t *entry;
368 367 cma_device_t *cma_devp;
369 368
370 369 SOL_OFS_DPRINTF_L5(sol_rdmacm_dbg_str, "add_hca_list(%p, %llx)",
371 370 ep_chanp, hca_guid);
372 371 mutex_enter(&sol_cma_dev_mutex);
373 372 list_for_each(entry, &sol_cma_dev_list) {
374 373 cma_devp = (cma_device_t *)entry->ptr;
375 374
376 375 if ((cma_devp->cma_device)->node_guid != hca_guid)
377 376 continue;
378 377
379 378 mutex_enter(&cma_devp->cma_mutex);
380 379 ep_chanp->chan_listenp->listen_ep_dev_entry =
381 380 add_genlist(&cma_devp->cma_epchan_list,
382 381 (uintptr_t)ep_chanp, NULL);
383 382 ep_chanp->chan_listenp->listen_ep_device = cma_devp->cma_device;
384 383 mutex_exit(&cma_devp->cma_mutex);
385 384 mutex_exit(&sol_cma_dev_mutex);
386 385 return;
387 386 }
388 387 mutex_exit(&sol_cma_dev_mutex);
389 388 SOL_OFS_DPRINTF_L2(sol_rdmacm_dbg_str, "add_hca_list(%p, %llx): "
390 389 "No matching HCA in list!!", ep_chanp, hca_guid);
391 390 }
392 391
393 392 /*
394 393 * rdma_cm.h API functions.
395 394 */
396 395 struct rdma_cm_id *
397 396 rdma_create_id(rdma_cm_event_handler evt_hdlr, void *context,
398 397 enum rdma_port_space ps)
399 398 {
400 399 struct rdma_cm_id *rdma_idp;
401 400
402 401 SOL_OFS_DPRINTF_L5(sol_rdmacm_dbg_str, "rdma_create_id(%p, %p, %x)",
403 402 evt_hdlr, context, ps);
404 403
405 404 if (ps != RDMA_PS_TCP && ps != RDMA_PS_UDP && ps != RDMA_PS_IPOIB) {
406 405 SOL_OFS_DPRINTF_L2(sol_rdmacm_dbg_str,
407 406 "rdma_create_id: unsupported protocol %x", ps);
408 407 return (NULL);
409 408 }
410 409
411 410 rdma_idp = cma_alloc_chan(evt_hdlr, context, ps);
412 411 SOL_OFS_DPRINTF_L5(sol_rdmacm_dbg_str,
413 412 "rdma_create_id : ret %p", rdma_idp);
414 413
415 414 return (rdma_idp);
416 415 }
417 416
418 417 void
419 418 rdma_map_id2clnthdl(struct rdma_cm_id *rdma_idp, void *ib_client_hdl,
420 419 void *iw_client_hdl)
421 420 {
422 421 sol_cma_chan_t *chanp = (sol_cma_chan_t *)rdma_idp;
423 422
424 423 SOL_OFS_DPRINTF_L5(sol_rdmacm_dbg_str,
425 424 "rdma_map_id2clnthdl(%p, %p, %p)",
426 425 rdma_idp, ib_client_hdl, iw_client_hdl);
427 426 ASSERT(ib_client_hdl != NULL || iw_client_hdl != NULL);
428 427 chanp->chan_ib_client_hdl = ib_client_hdl;
429 428 chanp->chan_iw_client_hdl = iw_client_hdl;
430 429 }
431 430
432 431 void
433 432 rdma_map_id2qphdl(struct rdma_cm_id *rdma_idp, void *qp_hdl)
434 433 {
435 434 sol_cma_chan_t *chanp = (sol_cma_chan_t *)rdma_idp;
436 435
437 436 ASSERT(rdma_idp);
438 437 ASSERT(qp_hdl);
439 438 SOL_OFS_DPRINTF_L5(sol_rdmacm_dbg_str, "rdma_mapid2qphdl(%p, %p)",
440 439 rdma_idp, qp_hdl);
441 440 chanp->chan_qp_hdl = qp_hdl;
442 441 }
443 442
444 443
445 444 void
446 445 rdma_destroy_id(struct rdma_cm_id *rdma_idp)
447 446 {
448 447 sol_cma_chan_t *chanp, *root_chanp;
449 448 cma_chan_state_t state;
450 449 int rc, is_root_cmid, do_wait, is_passive;
451 450
452 451 SOL_OFS_DPRINTF_L5(sol_rdmacm_dbg_str, "rdma_destroy_id(%p)", rdma_idp);
453 452
454 453 if (!rdma_idp)
455 454 return;
456 455
457 456 is_root_cmid = do_wait = is_passive = 0;
458 457
459 458 chanp = (sol_cma_chan_t *)rdma_idp;
460 459 root_chanp = (sol_cma_chan_t *)chanp->listen_root;
461 460 SOL_OFS_DPRINTF_L5(sol_rdmacm_dbg_str, "rdma_destroy_id(%p), %p",
462 461 rdma_idp, root_chanp);
463 462
464 463 mutex_enter(&chanp->chan_mutex);
465 464 chanp->chan_cmid_destroy_state |= SOL_CMA_CALLER_CMID_DESTROYED;
466 465
467 466 /*
468 467 * Wait in destroy of CMID when rdma_resolve_addr() / rdma_listen()
469 468 * rdma_resolve_route() API is in progress.
470 469 */
471 470 while (chanp->chan_cmid_destroy_state & SOL_CMA_CALLER_API_PROGRESS)
472 471 cv_wait(&chanp->chan_destroy_cv, &chanp->chan_mutex);
473 472
474 473 /* Wait if Event is been notified to consumer */
475 474 while (chanp->chan_cmid_destroy_state & SOL_CMA_CALLER_EVENT_PROGRESS)
476 475 cv_wait(&chanp->chan_destroy_cv, &chanp->chan_mutex);
477 476
478 477 if (rdma_idp->device)
479 478 sol_cma_release_device(rdma_idp);
480 479
481 480 if (chanp->chan_listenp && chanp->chan_listenp->listen_is_root)
482 481 is_root_cmid = 1;
483 482 if (root_chanp == NULL && is_root_cmid == 0)
484 483 is_passive = 1;
485 484
486 485 /*
487 486 * Skip Active side handling for passive CMIDs and listen CMID
488 487 * for which REQ CMIDs have not been created.
489 488 */
490 489 if (is_passive || (is_root_cmid && chanp->chan_req_state !=
491 490 REQ_CMID_QUEUED)) {
492 491 SOL_OFS_DPRINTF_L5(sol_rdmacm_dbg_str, "rdma_destroy_id: "
493 492 "Skipping passive %p, %x, %x", chanp->chan_listenp,
494 493 is_root_cmid, chanp->chan_req_state);
495 494 goto skip_passive_handling;
496 495 }
497 496
498 497 /*
499 498 * destroy_id() called for listening CMID and there are REQ
500 499 * CMIDs not yet notified. Reject such CMIDs and decrement
501 500 * the count.
502 501 */
503 502 if (is_root_cmid && chanp->chan_req_cnt) {
504 503 sol_cma_chan_t *req_cmid_chan, *next_chan;
505 504
506 505 SOL_OFS_DPRINTF_L5(sol_rdmacm_dbg_str, "rdma_destroy_id: "
507 506 "not notified handling");
508 507 for (req_cmid_chan = (sol_cma_chan_t *)avl_first(
509 508 &chanp->chan_req_avl_tree); req_cmid_chan &&
510 509 chanp->chan_req_cnt; req_cmid_chan = next_chan) {
511 510 next_chan = AVL_NEXT(
512 511 &chanp->chan_req_avl_tree, req_cmid_chan);
513 512 if (req_cmid_chan->chan_req_state ==
514 513 REQ_CMID_NOTIFIED) {
515 514 avl_remove(&chanp->chan_req_avl_tree,
516 515 req_cmid_chan);
517 516 chanp->chan_req_cnt--;
518 517 chanp->chan_req_total_cnt--;
519 518 mutex_exit(&chanp->chan_mutex);
520 519 mutex_enter(&req_cmid_chan->chan_mutex);
521 520 req_cmid_chan->chan_req_state =
522 521 REQ_CMID_SERVER_NONE;
523 522 if (rdma_idp->ps == RDMA_PS_TCP)
524 523 cma_set_chan_state(req_cmid_chan,
525 524 SOL_CMA_CHAN_DESTROY_PENDING);
526 525 mutex_exit(&req_cmid_chan->chan_mutex);
527 526 (void) rdma_disconnect(
528 527 (struct rdma_cm_id *)req_cmid_chan);
529 528 mutex_enter(&chanp->chan_mutex);
530 529 if (rdma_idp->ps == RDMA_PS_TCP) {
531 530 mutex_enter(
532 531 &req_cmid_chan->chan_mutex);
533 532 req_cmid_chan->listen_root =
534 533 rdma_idp;
535 534 mutex_exit(
536 535 &req_cmid_chan->chan_mutex);
537 536 } else {
538 537 mutex_destroy(
539 538 &req_cmid_chan->chan_mutex);
540 539 cv_destroy(
541 540 &req_cmid_chan->chan_destroy_cv);
542 541 kmem_free(req_cmid_chan,
543 542 sizeof (sol_cma_chan_t));
544 543 }
545 544 }
546 545 }
547 546 }
548 547
549 548 /*
550 549 * destroy_id() called for :
551 550 * listening CMID and all REQ CMIDs destroy_id() called
552 551 * REQ CMID and 1 more REQ CMID not yet destroyed.
553 552 * wait till the CMID is completly destroyed.
554 553 */
555 554 if (is_root_cmid && chanp->chan_req_total_cnt == 0) {
556 555 SOL_OFS_DPRINTF_L5(sol_rdmacm_dbg_str, "rdma_destroy_id: "
557 556 "root idp waiting");
558 557 cma_set_chan_state(chanp, SOL_CMA_CHAN_DESTROY_WAIT);
559 558 cv_wait(&chanp->chan_destroy_cv, &chanp->chan_mutex);
560 559 }
561 560 mutex_exit(&chanp->chan_mutex);
562 561
563 562 if (root_chanp)
564 563 mutex_enter(&root_chanp->chan_mutex);
565 564 mutex_enter(&chanp->chan_mutex);
566 565 #ifdef DEBUG
567 566 SOL_OFS_DPRINTF_L5(sol_rdmacm_dbg_str, "rdma_destroy_id: "
568 567 "root_idp %p, cnt %x, state %x", root_chanp,
569 568 root_chanp ? root_chanp->chan_req_total_cnt : 0,
570 569 root_chanp ? cma_get_chan_state(root_chanp) : 0);
571 570 #endif
572 571
573 572 if (root_chanp && root_chanp->chan_req_total_cnt == 1 &&
574 573 cma_get_chan_state(root_chanp) == SOL_CMA_CHAN_DESTROY_PENDING)
575 574 do_wait = 1;
576 575 if (root_chanp)
577 576 mutex_exit(&root_chanp->chan_mutex);
578 577
579 578 skip_passive_handling :
580 579 state = cma_get_chan_state(chanp);
581 580 if (is_root_cmid == 0 && state != SOL_CMA_CHAN_DISCONNECT &&
582 581 SOL_CMAID_CONNECTED(chanp)) {
583 582 /*
584 583 * A connected CM ID has not been disconnected.
585 584 * Call rdma_disconnect() to disconnect it.
586 585 */
587 586 mutex_exit(&chanp->chan_mutex);
588 587 rc = rdma_disconnect(rdma_idp);
589 588 if (rc) {
590 589 SOL_OFS_DPRINTF_L2(sol_rdmacm_dbg_str,
591 590 "rdma_destroy_id(%p)- disconnect failed!!",
592 591 rdma_idp);
593 592 return;
594 593 }
595 594 mutex_enter(&chanp->chan_mutex);
596 595 if (root_chanp && chanp->listen_root == NULL)
597 596 chanp->listen_root = (struct rdma_cm_id *)root_chanp;
598 597 SOL_OFS_DPRINTF_L5(sol_rdmacm_dbg_str,
599 598 "rdma_destroy_id(chanp %p, connect %x, ps %x)",
600 599 chanp, chanp->chan_connect_flag, rdma_idp->ps);
601 600 if (SOL_CMAID_CONNECTED(chanp)) {
602 601 if (do_wait) {
603 602 cma_set_chan_state(chanp,
604 603 SOL_CMA_CHAN_DESTROY_WAIT);
605 604 cv_wait(&chanp->chan_destroy_cv,
606 605 &chanp->chan_mutex);
607 606 mutex_exit(&chanp->chan_mutex);
608 607 cma_destroy_id(rdma_idp);
609 608 } else {
610 609 cma_set_chan_state(chanp,
611 610 SOL_CMA_CHAN_DESTROY_PENDING);
612 611 mutex_exit(&chanp->chan_mutex);
613 612 }
614 613 } else {
615 614 /*
616 615 * No more callbacks are expected for this CMID.
617 616 * Free this CMID.
618 617 */
619 618 mutex_exit(&chanp->chan_mutex);
620 619 cma_destroy_id(rdma_idp);
621 620 }
622 621 } else if (is_root_cmid == 0 && state ==
623 622 SOL_CMA_CHAN_DISCONNECT && SOL_CMAID_CONNECTED(chanp)) {
624 623 /*
625 624 * CM ID was connected and disconnect is process.
626 625 * Free of this CM ID is done for the DISCONNECT
627 626 * notification for this CMID.
628 627 */
629 628 cma_set_chan_state(chanp, SOL_CMA_CHAN_DESTROY_PENDING);
630 629 mutex_exit(&chanp->chan_mutex);
631 630 } else if (state != SOL_CMA_CHAN_DESTROY_PENDING) {
632 631 /* CM ID, not connected, just free it. */
633 632 mutex_exit(&chanp->chan_mutex);
634 633 cma_destroy_id(rdma_idp);
635 634 } else
636 635 mutex_exit(&chanp->chan_mutex);
637 636
638 637 SOL_OFS_DPRINTF_L5(sol_rdmacm_dbg_str, "rdma_destroy_id: ret");
639 638 }
640 639
641 640 /*
642 641 * State transitions for Address resolution :
643 642 * Active Side (Client) :
644 643 * 1. CREATE_ID-->BIND_ADDR-->RESOLVE_ADDR-->RESOLVE_ROUTE
645 644 *
646 645 * Passive Side (Server) :
647 646 * 2. CREATE_ID-->RESOLVE_ADDR-->RESOLVE_ROUTE
648 647 * IF_ADDR_ANY can be passed as local address in RESOLVE_ADDR
649 648 */
650 649 int
651 650 rdma_bind_addr(struct rdma_cm_id *idp, struct sockaddr *addr)
652 651 {
653 652 sol_cma_chan_t *chanp;
654 653 struct rdma_addr *addrp;
655 654 int ret;
656 655
657 656 ASSERT(idp);
658 657 ASSERT(addr);
659 658 chanp = (sol_cma_chan_t *)idp;
660 659 addrp = &(idp->route.addr);
661 660 SOL_OFS_DPRINTF_L5(sol_rdmacm_dbg_str, "rdma_bind_addr(%p, %p)",
662 661 idp, addr);
663 662
664 663 mutex_enter(&chanp->chan_mutex);
665 664 ret = cma_cas_chan_state(chanp, SOL_CMA_CHAN_IDLE, SOL_CMA_CHAN_BOUND);
666 665 if (ret) {
667 666 mutex_exit(&chanp->chan_mutex);
668 667 return (ret);
669 668 }
670 669 /* Copy the local address to rdma_id structure */
671 670 bcopy((void *)addr, (void *)&(addrp->src_addr),
672 671 sizeof (struct sockaddr));
673 672 mutex_exit(&chanp->chan_mutex);
674 673
675 674 /*
676 675 * First call rdma_ib_bind_addr() to bind this address.
677 676 * Next call rdma_iw_bind_addr() to bind this address.
678 677 * For IF_ADDR_ANY, IB address is given priority over
679 678 * iWARP.
680 679 */
681 680 if (chanp->chan_ib_client_hdl == NULL) {
682 681 ofs_client_t *ofs_clnt;
683 682
684 683 ofs_clnt = (ofs_client_t *)sol_cma_ib_client->clnt_hdl;
685 684 chanp->chan_ib_client_hdl = ofs_clnt->ibt_hdl;
686 685 }
687 686 if (chanp->chan_ib_client_hdl && rdma_ib_bind_addr(idp, addr) == 0) {
688 687 SOL_OFS_DPRINTF_L5(sol_rdmacm_dbg_str,
689 688 "rdma_bind_addr: ret IB @");
690 689 return (0);
691 690 #ifdef IWARP_SUPPORT
692 691 } else if (chanp->chan_iw_client_hdl && rdma_iw_bind_addr(idp, addr)
693 692 == 0) {
694 693 SOL_OFS_DPRINTF_L5(sol_rdmacm_dbg_str,
695 694 "rdma_bind_addr: ret iWARP @");
696 695 return (0);
697 696 #endif /* IWARP_SUPPORT */
698 697 }
699 698
700 699 mutex_enter(&chanp->chan_mutex);
701 700 cma_set_chan_state(chanp, SOL_CMA_CHAN_IDLE);
702 701 mutex_exit(&chanp->chan_mutex);
703 702 SOL_OFS_DPRINTF_L4(sol_rdmacm_dbg_str, "rdma_bind_addr: ret failure!");
704 703 return (EINVAL);
705 704 }
706 705
707 706 int
708 707 rdma_resolve_addr(struct rdma_cm_id *idp, struct sockaddr *src_addr,
709 708 struct sockaddr *dst_addr, int timeout_ms)
710 709 {
711 710 sol_cma_chan_t *chanp;
712 711 struct rdma_addr *addrp;
713 712 cma_chan_state_t state;
714 713
715 714 ASSERT(idp);
716 715 chanp = (sol_cma_chan_t *)idp;
717 716 addrp = &(idp->route.addr);
718 717 SOL_OFS_DPRINTF_L5(sol_rdmacm_dbg_str, "rdma_resolve_addr(%p, %p, "
719 718 "%p, %x)", idp, src_addr, dst_addr, timeout_ms);
720 719
721 720 mutex_enter(&chanp->chan_mutex);
722 721 state = cma_get_chan_state(chanp);
723 722 if (state != SOL_CMA_CHAN_IDLE && state != SOL_CMA_CHAN_BOUND) {
724 723 SOL_OFS_DPRINTF_L2(sol_rdmacm_dbg_str,
725 724 "rdma_resolve_addr : invalid chan state %x", state);
726 725 mutex_exit(&chanp->chan_mutex);
727 726 return (EINVAL);
728 727 }
729 728 if (chanp->chan_cmid_destroy_state &
730 729 SOL_CMA_CALLER_CMID_DESTROYED) {
731 730 SOL_OFS_DPRINTF_L3(sol_rdmacm_dbg_str,
732 731 "rdma_resolve_addr : CMID %p, destroy called", chanp);
733 732 mutex_exit(&chanp->chan_mutex);
734 733 return (EINVAL);
735 734 }
736 735 chanp->chan_cmid_destroy_state |= SOL_CMA_CALLER_API_PROGRESS;
737 736
738 737 if (chanp->chan_xport_type == SOL_CMA_XPORT_NONE) {
739 738 bcopy((void *)src_addr, (void *)&(addrp->src_addr),
740 739 sizeof (struct sockaddr));
741 740 }
742 741 bcopy((void *)dst_addr, (void *)&(addrp->dst_addr),
743 742 sizeof (struct sockaddr));
744 743 mutex_exit(&chanp->chan_mutex);
745 744
746 745 /*
747 746 * First resolve this as an @ corresponding to IB fabric
748 747 * if this fails, resolve this as an @ corresponding to iWARP
749 748 */
750 749 if (chanp->chan_ib_client_hdl == NULL) {
751 750 ofs_client_t *ofs_clnt;
752 751
753 752 ofs_clnt = (ofs_client_t *)sol_cma_ib_client->clnt_hdl;
754 753 chanp->chan_ib_client_hdl = ofs_clnt->ibt_hdl;
755 754 }
756 755 if (chanp->chan_ib_client_hdl && rdma_ib_resolve_addr(idp, src_addr,
757 756 dst_addr, timeout_ms) == 0) {
758 757 SOL_OFS_DPRINTF_L4(sol_rdmacm_dbg_str,
759 758 "rdma_resolve_addr: ret IB @");
760 759 #ifdef IWARP_SUPPORT
761 760 } else if (chanp->chan_iw_client_hdl && rdma_iw_resolve_addr(idp,
762 761 src_addr, dst_addr, timeout_ms) == 0) {
763 762 SOL_OFS_DPRINTF_L2(sol_rdmacm_dbg_str,
764 763 "rdma_resolve_addr: ret iWARP @");
765 764 #endif /* IWARP_SUPPORT */
766 765 } else {
767 766 SOL_OFS_DPRINTF_L2(sol_rdmacm_dbg_str,
768 767 "rdma_resolve_addr: Invalid @");
769 768 return (EINVAL);
770 769 }
771 770 SOL_OFS_DPRINTF_L5(sol_rdmacm_dbg_str, "rdma_resolve_addr: ret 0");
772 771 return (0);
773 772 }
774 773
775 774 static void cma_generate_event_sync(struct rdma_cm_id *,
776 775 enum rdma_cm_event_type, int, struct rdma_conn_param *,
777 776 struct rdma_ud_param *);
778 777
779 778 void
780 779 cma_resolve_addr_callback(sol_cma_chan_t *chanp, int rc)
781 780 {
782 781 enum rdma_cm_event_type event;
783 782
784 783 mutex_enter(&chanp->chan_mutex);
785 784 if (chanp->chan_cmid_destroy_state &
786 785 SOL_CMA_CALLER_CMID_DESTROYED) {
787 786 SOL_OFS_DPRINTF_L3(sol_rdmacm_dbg_str,
788 787 "cma_resolve_addr : CMID %p, destroy called", chanp);
789 788 chanp->chan_cmid_destroy_state &=
790 789 ~SOL_CMA_CALLER_API_PROGRESS;
791 790 cv_broadcast(&chanp->chan_destroy_cv);
792 791 mutex_exit(&chanp->chan_mutex);
793 792 return;
794 793 }
795 794 if (rc == 0) {
796 795 cma_set_chan_state(chanp, SOL_CMA_CHAN_ADDR_RESLVD);
797 796 event = RDMA_CM_EVENT_ADDR_RESOLVED;
798 797 } else
799 798 event = RDMA_CM_EVENT_ADDR_ERROR;
800 799
801 800 /*
802 801 * Generate RDMA_CM_EVENT_ADDR_RESOLVED event
803 802 * This will result in RDMA_USER_CM_CMD_RESOLVE_ROUTE in
804 803 * userland.
805 804 */
806 805 chanp->chan_cmid_destroy_state |= SOL_CMA_CALLER_EVENT_PROGRESS;
807 806 mutex_exit(&chanp->chan_mutex);
808 807 cma_generate_event_sync((struct rdma_cm_id *)chanp, event, 0,
809 808 NULL, NULL);
810 809
811 810 mutex_enter(&chanp->chan_mutex);
812 811 chanp->chan_cmid_destroy_state &= ~SOL_CMA_CALLER_API_PROGRESS;
813 812 if (chanp->chan_cmid_destroy_state & SOL_CMA_CALLER_CMID_DESTROYED)
814 813 cv_broadcast(&chanp->chan_destroy_cv);
815 814 mutex_exit(&chanp->chan_mutex);
816 815 }
817 816
818 817 int
819 818 rdma_resolve_route(struct rdma_cm_id *idp, int timeout_ms)
820 819 {
821 820 sol_cma_chan_t *chanp;
822 821
823 822 ASSERT(idp);
824 823 chanp = (sol_cma_chan_t *)idp;
825 824 SOL_OFS_DPRINTF_L5(sol_rdmacm_dbg_str, "resolve_route(%p, %x)", idp,
826 825 timeout_ms);
827 826
828 827 mutex_enter(&chanp->chan_mutex);
829 828 if (cma_cas_chan_state(chanp, SOL_CMA_CHAN_ADDR_RESLVD,
830 829 SOL_CMA_CHAN_ROUTE_RESLVD) != 0) {
831 830 mutex_exit(&chanp->chan_mutex);
832 831 SOL_OFS_DPRINTF_L2(sol_rdmacm_dbg_str,
833 832 "resolve_route: Invalid state");
834 833 return (EINVAL);
835 834 }
836 835 if (chanp->chan_cmid_destroy_state &
837 836 SOL_CMA_CALLER_CMID_DESTROYED) {
838 837 SOL_OFS_DPRINTF_L3(sol_rdmacm_dbg_str,
839 838 "rdma_resolve_route : CMID %p, destroy called", chanp);
840 839 mutex_exit(&chanp->chan_mutex);
841 840 return (EINVAL);
842 841 }
843 842 chanp->chan_cmid_destroy_state |= SOL_CMA_CALLER_API_PROGRESS;
844 843 mutex_exit(&chanp->chan_mutex);
845 844
846 845 /*
847 846 * Generate RDMA_CM_EVENT_ROUTE_RESOLVED event
848 847 * This will result in RDMA_USER_CM_CMD_RESOLVE_ROUTE in
849 848 * userland
850 849 */
851 850 cma_generate_event(idp, RDMA_CM_EVENT_ROUTE_RESOLVED, 0,
852 851 NULL, NULL);
853 852
854 853 mutex_enter(&chanp->chan_mutex);
855 854 chanp->chan_cmid_destroy_state &= ~SOL_CMA_CALLER_API_PROGRESS;
856 855 if (chanp->chan_cmid_destroy_state & SOL_CMA_CALLER_CMID_DESTROYED)
857 856 cv_broadcast(&chanp->chan_destroy_cv);
858 857 mutex_exit(&chanp->chan_mutex);
859 858
860 859 SOL_OFS_DPRINTF_L5(sol_rdmacm_dbg_str, "resolve_route: ret 0");
861 860 return (0);
862 861 }
863 862
864 863 /*
865 864 * Connect or Listen request should be send after Route is resolved
866 865 *
867 866 * Active Side (Client) :
868 867 * 1. (State ROUTE_RESOLVED)-->CONNECT-->ACCEPT/REJECT-->DISCONNECT
869 868 * -->DESTROY_ID-->close(9E)
870 869 * 2. Same as (1), DESTROY_ID without DISCONNECT
871 870 * 3. Same as (1), close(9e) without DESTROY_ID.
872 871 *
873 872 * Passive Side (Server) :
874 873 * 4. (State ROUTE_RESOLVED)-->LISTEN->DISCONNECT
875 874 * -->DESTROY_ID-->close(9E)
876 875 * 5. Same as (4), DESTROY_ID without DISCONNECT
877 876 * 6. Same as (4), close(9e) without DESTROY_ID.
878 877 */
879 878 int
880 879 rdma_connect(struct rdma_cm_id *idp, struct rdma_conn_param *conn_param)
881 880 {
882 881 sol_cma_chan_t *chanp;
883 882 int ret = EINVAL;
884 883
885 884 ASSERT(idp);
886 885 chanp = (sol_cma_chan_t *)idp;
887 886 SOL_OFS_DPRINTF_L5(sol_rdmacm_dbg_str, "rdma_connect(%p, %p)", idp,
888 887 conn_param);
889 888
890 889 mutex_enter(&chanp->chan_mutex);
891 890 if (chanp->chan_xport_type == SOL_CMA_XPORT_NONE) {
892 891 mutex_exit(&chanp->chan_mutex);
893 892 SOL_OFS_DPRINTF_L2(sol_rdmacm_dbg_str,
894 893 "rdma_connect, Invalid Xport");
895 894 return (EINVAL);
896 895 }
897 896 if (cma_cas_chan_state(chanp, SOL_CMA_CHAN_ROUTE_RESLVD,
898 897 SOL_CMA_CHAN_CONNECT)) {
899 898 mutex_exit(&chanp->chan_mutex);
900 899 SOL_OFS_DPRINTF_L2(sol_rdmacm_dbg_str,
901 900 "rdma_connect, Invalid state");
902 901 return (EINVAL);
903 902 }
904 903
905 904 if (chanp->chan_xport_type == SOL_CMA_XPORT_IB) {
906 905 ret = rdma_ib_connect(idp, conn_param);
907 906 #ifdef IWARP_SUPPORT
908 907 } else if (chanp->chan_xport_type == SOL_CMA_XPORT_IWARP) {
909 908 ret = rdma_iw_connect(idp, conn_param);
910 909 #endif /* IWARP_SUPPORT */
911 910 }
912 911 mutex_exit(&chanp->chan_mutex);
913 912
914 913 SOL_OFS_DPRINTF_L5(sol_rdmacm_dbg_str, "rdma_connect: ret %x", ret);
915 914 return (ret);
916 915 }
917 916
918 917 static int cma_init_listen_root(sol_cma_chan_t *);
919 918 static void cma_fini_listen_root(sol_cma_chan_t *);
920 919
921 920 int
922 921 rdma_listen(struct rdma_cm_id *idp, int bklog)
923 922 {
924 923 sol_cma_chan_t *chanp;
925 924 int ret = 0;
926 925 genlist_entry_t *entry;
927 926 cma_chan_state_t state;
928 927
929 928 ASSERT(idp);
930 929 chanp = (sol_cma_chan_t *)idp;
931 930 SOL_OFS_DPRINTF_L5(sol_rdmacm_dbg_str, "rdma_listen(%p, %x)",
932 931 idp, bklog);
933 932
934 933 mutex_enter(&chanp->chan_mutex);
935 934 state = cma_get_chan_state(chanp);
936 935 if (state == SOL_CMA_CHAN_IDLE) {
937 936 mutex_exit(&chanp->chan_mutex);
938 937 return (EINVAL);
939 938 }
940 939 cma_set_chan_state(chanp, SOL_CMA_CHAN_LISTEN);
941 940
942 941 if (chanp->chan_cmid_destroy_state &
943 942 SOL_CMA_CALLER_CMID_DESTROYED) {
944 943 SOL_OFS_DPRINTF_L3(sol_rdmacm_dbg_str,
945 944 "rdma_listen : CMID %p, destroy called", chanp);
946 945 mutex_exit(&chanp->chan_mutex);
947 946 return (EINVAL);
948 947 }
949 948 chanp->chan_cmid_destroy_state |= SOL_CMA_CALLER_API_PROGRESS;
950 949
951 950 ASSERT(chanp->chan_listenp == NULL);
952 951
953 952 chanp->chan_listenp = kmem_zalloc(sizeof (sol_cma_listen_info_t),
954 953 KM_SLEEP);
955 954 init_genlist(&(CHAN_LISTEN_LIST(chanp)));
956 955 (chanp->chan_listenp)->listen_is_root = 1;
957 956 ret = cma_init_listen_root(chanp);
958 957 if (ret) {
959 958 chanp->chan_listenp = NULL;
960 959 mutex_exit(&chanp->chan_mutex);
961 960 SOL_OFS_DPRINTF_L2(sol_rdmacm_dbg_str, "rdma_listen: "
962 961 "cma_init_listen_root: failed");
963 962 kmem_free(chanp->chan_listenp,
964 963 sizeof (sol_cma_listen_info_t));
965 964 return (EINVAL);
966 965 }
967 966
968 967 if (chanp->chan_xport_type == SOL_CMA_XPORT_NONE) {
969 968 ibcma_append_listen_list(idp);
970 969 #ifdef IWARP_SUPPORT
971 970 iwcma_append_listen_list(idp);
972 971 #endif
973 972 } else if (chanp->chan_xport_type == SOL_CMA_XPORT_IB) {
974 973 ibcma_append_listen_list(idp);
975 974 #ifdef IWARP_SUPPORT
976 975 } else if (chanp->chan_xport_type == SOL_CMA_XPORT_IWARP) {
977 976 iwcma_append_listen_list(idp);
978 977 #endif /* IWARP_SUPPORT */
979 978 }
980 979
981 980 if (genlist_empty(&(CHAN_LISTEN_LIST(chanp)))) {
982 981 cma_fini_listen_root(chanp);
983 982 kmem_free((void *)chanp->chan_listenp,
984 983 sizeof (sol_cma_listen_info_t));
985 984 chanp->chan_listenp = NULL;
986 985 SOL_OFS_DPRINTF_L2(sol_rdmacm_dbg_str, "rdma_listen: "
987 986 "No listeners");
988 987 mutex_exit(&chanp->chan_mutex);
989 988 return (0);
990 989 }
991 990
992 991 if (chanp->chan_cmid_destroy_state & SOL_CMA_CALLER_CMID_DESTROYED) {
993 992 chanp->chan_cmid_destroy_state &=
994 993 ~SOL_CMA_CALLER_API_PROGRESS;
995 994 cv_broadcast(&chanp->chan_destroy_cv);
996 995 }
997 996
998 997 genlist_for_each(entry, &(CHAN_LISTEN_LIST(chanp))) {
999 998 struct rdma_cm_id *ep_idp;
1000 999 sol_cma_chan_t *ep_chanp;
1001 1000
1002 1001 ep_idp = (struct rdma_cm_id *)entry->data;
1003 1002 ep_chanp = (sol_cma_chan_t *)ep_idp;
1004 1003 if (ep_chanp->chan_xport_type == SOL_CMA_XPORT_IB)
1005 1004 ret = rdma_ib_listen(ep_idp, bklog);
1006 1005 #ifdef IWARP_SUPPORT
1007 1006 if (ep_chanp->chan_xport_type == SOL_CMA_XPORT_IWARP)
1008 1007 ret = rdma_iw_listen(ep_idp, bklog);
1009 1008 #endif
1010 1009 if (ret)
1011 1010 break;
1012 1011 }
1013 1012
1014 1013 chanp->chan_cmid_destroy_state &= ~SOL_CMA_CALLER_API_PROGRESS;
1015 1014 if (chanp->chan_cmid_destroy_state & SOL_CMA_CALLER_CMID_DESTROYED)
1016 1015 cv_broadcast(&chanp->chan_destroy_cv);
1017 1016 mutex_exit(&chanp->chan_mutex);
1018 1017
1019 1018 SOL_OFS_DPRINTF_L5(sol_rdmacm_dbg_str, "rdma_listen: ret %x", ret);
1020 1019 return (ret);
1021 1020 }
1022 1021
1023 1022 int
1024 1023 rdma_accept(struct rdma_cm_id *idp, struct rdma_conn_param *conn_param)
1025 1024 {
1026 1025 struct rdma_cm_id *root_idp;
1027 1026 sol_cma_chan_t *root_chanp, *chanp;
1028 1027 int ret = EINVAL;
1029 1028
1030 1029 ASSERT(idp);
1031 1030 chanp = (sol_cma_chan_t *)idp;
1032 1031 SOL_OFS_DPRINTF_L5(sol_rdmacm_dbg_str, "rdma_accept(%p, %p)",
1033 1032 idp, conn_param);
1034 1033
1035 1034 mutex_enter(&chanp->chan_mutex);
1036 1035 if (cma_cas_chan_state(chanp, SOL_CMA_CHAN_LISTEN,
1037 1036 SOL_CMA_CHAN_ACCEPT) && cma_cas_chan_state(chanp,
1038 1037 SOL_CMA_CHAN_CONNECT, SOL_CMA_CHAN_ACCEPT)) {
1039 1038 SOL_OFS_DPRINTF_L2(sol_rdmacm_dbg_str,
1040 1039 "rdma_accept, Invalid state");
1041 1040 mutex_exit(&chanp->chan_mutex);
1042 1041 return (EINVAL);
1043 1042 }
1044 1043 mutex_exit(&chanp->chan_mutex);
1045 1044
1046 1045 root_idp = CHAN_LISTEN_ROOT(chanp);
1047 1046 root_chanp = (sol_cma_chan_t *)root_idp;
1048 1047 SOL_OFS_DPRINTF_L5(sol_rdmacm_dbg_str, "accept: root_idp %p",
1049 1048 root_idp);
1050 1049
1051 1050 /* For TCP, delete from REQ AVL & insert to ACPT AVL */
1052 1051 if (root_idp && root_idp->ps == RDMA_PS_TCP) {
1053 1052 void *find_ret;
1054 1053 avl_index_t where;
1055 1054
1056 1055 SOL_OFS_DPRINTF_L5(sol_rdmacm_dbg_str, "accept: root_idp %p"
1057 1056 "REQ AVL remove %p", root_chanp, idp);
1058 1057 mutex_enter(&root_chanp->chan_mutex);
1059 1058 mutex_enter(&chanp->chan_mutex);
1060 1059
1061 1060 /*
1062 1061 * This CMID has been deleted, maybe because of timeout.
1063 1062 * Return EINVAL.
1064 1063 */
1065 1064 if (chanp->chan_req_state != REQ_CMID_NOTIFIED) {
1066 1065 mutex_exit(&chanp->chan_mutex);
1067 1066 mutex_exit(&root_chanp->chan_mutex);
1068 1067 SOL_OFS_DPRINTF_L3(sol_rdmacm_dbg_str,
1069 1068 "accept: root_idp %p chanp %p, not in REQ "
1070 1069 "AVL tree", root_chanp, chanp);
1071 1070 return (EINVAL);
1072 1071 }
1073 1072 ASSERT(cma_get_req_idp(root_idp, chanp->chan_session_id));
1074 1073 avl_remove(&root_chanp->chan_req_avl_tree, idp);
1075 1074
1076 1075
1077 1076 SOL_OFS_DPRINTF_L5(sol_rdmacm_dbg_str,
1078 1077 "Add to ACPT AVL of %p IDP, idp %p, qp_hdl %p",
1079 1078 root_idp, idp, chanp->chan_qp_hdl);
1080 1079 find_ret = avl_find(&root_chanp->chan_acpt_avl_tree,
1081 1080 (void *)chanp->chan_qp_hdl, &where);
1082 1081 if (find_ret) {
1083 1082 chanp->chan_req_state = REQ_CMID_SERVER_NONE;
1084 1083 mutex_exit(&chanp->chan_mutex);
1085 1084 mutex_exit(&root_chanp->chan_mutex);
1086 1085 SOL_OFS_DPRINTF_L2(sol_rdmacm_dbg_str,
1087 1086 "DUPLICATE ENTRY in ACPT AVL : root %p, "
1088 1087 "idp %p, qp_hdl %p",
1089 1088 root_idp, idp, chanp->chan_qp_hdl);
1090 1089 return (EINVAL);
1091 1090 }
1092 1091 avl_insert(&root_chanp->chan_acpt_avl_tree,
1093 1092 (void *)idp, where);
1094 1093 chanp->chan_req_state = REQ_CMID_ACCEPTED;
1095 1094 mutex_exit(&chanp->chan_mutex);
1096 1095 mutex_exit(&root_chanp->chan_mutex);
1097 1096 }
1098 1097
1099 1098 if (root_idp && IS_UDP_CMID(root_idp)) {
1100 1099 cma_chan_state_t chan_state;
1101 1100
1102 1101 /*
1103 1102 * Accepting the connect request, no more events for this
1104 1103 * connection.
1105 1104 */
1106 1105 cma_handle_nomore_events(chanp);
1107 1106 mutex_enter(&chanp->chan_mutex);
1108 1107 chan_state = cma_get_chan_state(chanp);
1109 1108 mutex_exit(&chanp->chan_mutex);
1110 1109 /* If rdma_destroy_id() was called, destroy CMID */
1111 1110 if (chan_state == SOL_CMA_CHAN_DESTROY_PENDING) {
1112 1111 cma_destroy_id((struct rdma_cm_id *)chanp);
1113 1112 return (EINVAL);
1114 1113 }
1115 1114 }
1116 1115
1117 1116 if (chanp->chan_xport_type == SOL_CMA_XPORT_IB)
1118 1117 ret = rdma_ib_accept(idp, conn_param);
1119 1118 #ifdef IWARP_SUPPORT
1120 1119 if (chanp->chan_xport_type == SOL_CMA_XPORT_IWARP)
1121 1120 ret = rdma_iw_accept(idp, conn_param);
1122 1121 #endif /* IWARP_SUPPORT */
1123 1122
1124 1123 if (ret && root_idp && idp->ps == RDMA_PS_TCP) {
1125 1124 void *find_ret;
1126 1125 avl_index_t where;
1127 1126
1128 1127 SOL_OFS_DPRINTF_L5(sol_rdmacm_dbg_str,
1129 1128 "Delete from REQ AVL of %p IDP, idp %p",
1130 1129 root_idp, idp);
1131 1130 mutex_enter(&root_chanp->chan_mutex);
1132 1131 mutex_enter(&chanp->chan_mutex);
1133 1132 if (chanp->chan_req_state == REQ_CMID_ACCEPTED) {
1134 1133 ASSERT(cma_get_acpt_idp(root_idp,
1135 1134 chanp->chan_qp_hdl));
1136 1135 avl_remove(&root_chanp->chan_acpt_avl_tree,
1137 1136 idp);
1138 1137 find_ret = avl_find(&root_chanp->chan_req_avl_tree,
1139 1138 (void *)chanp->chan_qp_hdl, &where);
1140 1139 if (find_ret) {
1141 1140 chanp->chan_req_state = REQ_CMID_SERVER_NONE;
1142 1141 mutex_exit(&chanp->chan_mutex);
1143 1142 mutex_exit(&root_chanp->chan_mutex);
1144 1143 SOL_OFS_DPRINTF_L2(sol_rdmacm_dbg_str,
1145 1144 "DUPLICATE ENTRY in REQ AVL : root %p, "
1146 1145 "idp %p, session_id %p",
1147 1146 root_idp, idp, chanp->chan_session_id);
1148 1147 return (EINVAL);
1149 1148 }
1150 1149 avl_insert(&root_chanp->chan_req_avl_tree, idp, where);
1151 1150 chanp->chan_req_state = REQ_CMID_NOTIFIED;
1152 1151 }
1153 1152 mutex_exit(&chanp->chan_mutex);
1154 1153 mutex_exit(&root_chanp->chan_mutex);
1155 1154 }
1156 1155
1157 1156 SOL_OFS_DPRINTF_L5(sol_rdmacm_dbg_str, "rdma_accept: ret %x", ret);
1158 1157 return (ret);
1159 1158 }
1160 1159
1161 1160 int
1162 1161 rdma_notify(struct rdma_cm_id *idp, enum ib_event_type evt)
1163 1162 {
1164 1163 sol_cma_chan_t *chanp;
1165 1164
1166 1165 ASSERT(idp);
1167 1166 chanp = (sol_cma_chan_t *)idp;
1168 1167 SOL_OFS_DPRINTF_L5(sol_rdmacm_dbg_str, "rdma_notify(%p, %x)", idp, evt);
1169 1168
1170 1169 mutex_enter(&chanp->chan_mutex);
1171 1170 if (cma_cas_chan_state(chanp, SOL_CMA_CHAN_ROUTE_RESLVD,
1172 1171 SOL_CMA_CHAN_EVENT_NOTIFIED)) {
1173 1172 SOL_OFS_DPRINTF_L2(sol_rdmacm_dbg_str,
1174 1173 "rdma_notify, Invalid state");
1175 1174 mutex_exit(&chanp->chan_mutex);
1176 1175 return (EINVAL);
1177 1176 }
1178 1177 mutex_exit(&chanp->chan_mutex);
1179 1178
1180 1179 SOL_OFS_DPRINTF_L5(sol_rdmacm_dbg_str, "rdma_notify: ret 0");
1181 1180 return (0);
1182 1181 }
1183 1182
1184 1183 int
1185 1184 rdma_reject(struct rdma_cm_id *idp, const void *priv_data,
1186 1185 uint8_t priv_data_len)
1187 1186 {
1188 1187 struct rdma_cm_id *root_idp;
1189 1188 sol_cma_chan_t *root_chanp, *chanp;
1190 1189 int ret = EINVAL;
1191 1190
1192 1191 ASSERT(idp);
1193 1192 chanp = (sol_cma_chan_t *)idp;
1194 1193 root_idp = CHAN_LISTEN_ROOT(chanp);
1195 1194 root_chanp = (sol_cma_chan_t *)root_idp;
1196 1195 SOL_OFS_DPRINTF_L5(sol_rdmacm_dbg_str, "rdma_reject(%p, %p)", idp,
1197 1196 priv_data, priv_data_len);
1198 1197
1199 1198 mutex_enter(&chanp->chan_mutex);
1200 1199 if (cma_cas_chan_state(chanp, SOL_CMA_CHAN_LISTEN,
1201 1200 SOL_CMA_CHAN_REJECT)) {
1202 1201 SOL_OFS_DPRINTF_L2(sol_rdmacm_dbg_str,
1203 1202 "rdma_accept, Invalid state");
1204 1203 mutex_exit(&chanp->chan_mutex);
1205 1204 return (EINVAL);
1206 1205 }
1207 1206 mutex_exit(&chanp->chan_mutex);
1208 1207
1209 1208 if (root_idp) {
1210 1209 SOL_OFS_DPRINTF_L5(sol_rdmacm_dbg_str, "reject: root_idp %p"
1211 1210 "REQ AVL remove %p", root_chanp, idp);
1212 1211
1213 1212 /*
1214 1213 * Remove from REQ AVL tree. If this CMID has been deleted,
1215 1214 * it maybe because of timeout. Return EINVAL.
1216 1215 */
1217 1216 mutex_enter(&root_chanp->chan_mutex);
1218 1217 mutex_enter(&chanp->chan_mutex);
1219 1218 if (chanp->chan_req_state != REQ_CMID_NOTIFIED &&
1220 1219 chanp->chan_req_state != REQ_CMID_QUEUED) {
1221 1220 mutex_exit(&chanp->chan_mutex);
1222 1221 mutex_exit(&root_chanp->chan_mutex);
1223 1222 SOL_OFS_DPRINTF_L3(sol_rdmacm_dbg_str,
1224 1223 "reject: root_idp %p chanp %p, not in REQ "
1225 1224 "AVL tree", root_chanp, chanp);
1226 1225 return (EINVAL);
1227 1226 }
1228 1227 ASSERT(cma_get_req_idp(root_idp, chanp->chan_session_id));
1229 1228 avl_remove(&root_chanp->chan_req_avl_tree, idp);
1230 1229 chanp->chan_req_state = REQ_CMID_SERVER_NONE;
1231 1230 mutex_exit(&chanp->chan_mutex);
1232 1231 mutex_exit(&root_chanp->chan_mutex);
1233 1232 }
1234 1233
1235 1234 if (chanp->chan_xport_type == SOL_CMA_XPORT_IB)
1236 1235 ret = rdma_ib_reject(idp, priv_data, priv_data_len);
1237 1236 #ifdef IWARP_SUPPORT
1238 1237 if (chanp->chan_xport_type == SOL_CMA_XPORT_IWARP)
1239 1238 ret = rdma_iw_reject(idp, priv_data, priv_data_len);
1240 1239 #endif /* IWARP_SUPPORT */
1241 1240
1242 1241
1243 1242 if (!ret && root_idp) {
1244 1243 cma_chan_state_t chan_state;
1245 1244
1246 1245 /*
1247 1246 * Rejecting connect request, no more events for this
1248 1247 * connection.
1249 1248 */
1250 1249 cma_handle_nomore_events(chanp);
1251 1250 mutex_enter(&chanp->chan_mutex);
1252 1251 chan_state = cma_get_chan_state(chanp);
1253 1252 mutex_exit(&chanp->chan_mutex);
1254 1253 /* If rdma_destroy_id() was called, destroy CMID */
1255 1254 if (chan_state == SOL_CMA_CHAN_DESTROY_PENDING)
1256 1255 cma_destroy_id((struct rdma_cm_id *)chanp);
1257 1256 } else if (ret && root_idp) {
1258 1257 avl_index_t where;
1259 1258
1260 1259 SOL_OFS_DPRINTF_L5(sol_rdmacm_dbg_str,
1261 1260 "reject fail: Add to Req AVL of %p IDP, idp %p,"
1262 1261 "session_id %p", root_idp, idp,
1263 1262 chanp->chan_session_id);
1264 1263 mutex_enter(&root_chanp->chan_mutex);
1265 1264 mutex_enter(&chanp->chan_mutex);
1266 1265 if (chanp->chan_req_state == REQ_CMID_SERVER_NONE) {
1267 1266 if (avl_find(&root_chanp->chan_req_avl_tree,
1268 1267 (void *)chanp->chan_session_id, &where)) {
1269 1268 mutex_exit(&chanp->chan_mutex);
1270 1269 mutex_exit(&root_chanp->chan_mutex);
1271 1270 SOL_OFS_DPRINTF_L2(sol_rdmacm_dbg_str,
1272 1271 "DUPLICATE ENTRY in REQ AVL : root %p, "
1273 1272 "idp %p, session_id %p",
1274 1273 root_idp, idp, chanp->chan_session_id);
1275 1274 return (EINVAL);
1276 1275 }
1277 1276 avl_insert(&root_chanp->chan_req_avl_tree,
1278 1277 (void *)idp, where);
1279 1278 chanp->chan_req_state = REQ_CMID_NOTIFIED;
1280 1279 }
1281 1280 mutex_exit(&chanp->chan_mutex);
1282 1281 mutex_exit(&root_chanp->chan_mutex);
1283 1282 }
1284 1283
1285 1284 SOL_OFS_DPRINTF_L5(sol_rdmacm_dbg_str, "rdma_reject: ret %x", ret);
1286 1285 return (ret);
1287 1286 }
1288 1287
1289 1288 int
1290 1289 rdma_disconnect(struct rdma_cm_id *idp)
1291 1290 {
1292 1291 sol_cma_chan_t *chanp;
1293 1292 int ret = EINVAL;
1294 1293 cma_chan_state_t state;
1295 1294
1296 1295 chanp = (sol_cma_chan_t *)idp;
1297 1296 SOL_OFS_DPRINTF_L5(sol_rdmacm_dbg_str, "rdma_disconnect(%p)", idp);
1298 1297
1299 1298 if (!idp)
1300 1299 return (0);
1301 1300
1302 1301 mutex_enter(&chanp->chan_mutex);
1303 1302 if (!(SOL_CMAID_CONNECTED(chanp))) {
1304 1303 SOL_OFS_DPRINTF_L3(sol_rdmacm_dbg_str,
1305 1304 "rdma_disconnect(%p) - Not connected!!", idp);
1306 1305 mutex_exit(&chanp->chan_mutex);
1307 1306 return (EINVAL);
1308 1307 }
1309 1308 state = cma_get_chan_state(chanp);
1310 1309 cma_set_chan_state(chanp, SOL_CMA_CHAN_DISCONNECT);
1311 1310 mutex_exit(&chanp->chan_mutex);
1312 1311
1313 1312 if (chanp->chan_xport_type == SOL_CMA_XPORT_IB) {
1314 1313 ret = rdma_ib_disconnect(idp);
1315 1314 #ifdef IWARP_SUPPORT
1316 1315 } else if (chanp->chan_xport_type == SOL_CMA_XPORT_IWARP) {
1317 1316 ret = rdma_iw_disconnect(idp);
1318 1317 #endif /* IWARP_SUPPORT */
1319 1318 }
1320 1319
1321 1320 if (ret) {
1322 1321 mutex_enter(&chanp->chan_mutex);
1323 1322 cma_set_chan_state(chanp, state);
1324 1323 mutex_exit(&chanp->chan_mutex);
1325 1324 return (ret);
1326 1325 }
1327 1326
1328 1327 SOL_OFS_DPRINTF_L5(sol_rdmacm_dbg_str, "rdma_disconnect: ret %x", ret);
1329 1328 return (ret);
1330 1329 }
1331 1330
1332 1331 int
1333 1332 rdma_init_qp_attr(struct rdma_cm_id *idp, struct ib_qp_attr *qpattr,
1334 1333 int *qp_attr_mask)
1335 1334 {
1336 1335 sol_cma_chan_t *chanp;
1337 1336 int ret = EINVAL;
1338 1337
1339 1338 ASSERT(idp);
1340 1339 chanp = (sol_cma_chan_t *)idp;
1341 1340 SOL_OFS_DPRINTF_L5(sol_rdmacm_dbg_str, "rdma_init_qp_attr(%p, %p, %p)",
1342 1341 idp, qpattr, qp_attr_mask);
1343 1342
1344 1343 if (chanp->chan_xport_type == SOL_CMA_XPORT_IB) {
1345 1344 ret = rdma_ib_init_qp_attr(idp, qpattr, qp_attr_mask);
1346 1345 #ifdef IWARP_SUPPORT
1347 1346 } else if (chanp->chan_xport_type == SOL_CMA_XPORT_IWARP)
1348 1347 ret = rdma_iw_init_qp_attr(idp, qpattr, qp_attr_mask);
1349 1348 #endif /* IWARP_SUPPORT */
1350 1349 } else {
1351 1350 ret = EINVAL;
1352 1351 }
1353 1352
1354 1353 SOL_OFS_DPRINTF_L5(sol_rdmacm_dbg_str,
1355 1354 "rdma_init_qp_attr: ret %x", ret);
1356 1355
1357 1356 return (ret);
1358 1357 }
1359 1358
1360 1359 int
1361 1360 rdma_join_multicast(struct rdma_cm_id *idp, struct sockaddr *addr,
1362 1361 void *context)
1363 1362 {
1364 1363 sol_cma_chan_t *chanp;
1365 1364 int ret = ENODEV;
1366 1365 cma_chan_state_t state;
1367 1366
1368 1367 ASSERT(idp);
1369 1368 chanp = (sol_cma_chan_t *)idp;
1370 1369 SOL_OFS_DPRINTF_L5(sol_rdmacm_dbg_str,
1371 1370 "rdma_join_multicast(%p, %p, %p)",
1372 1371 idp, addr, context);
1373 1372
1374 1373 mutex_enter(&chanp->chan_mutex);
1375 1374 state = cma_get_chan_state(chanp);
1376 1375 if (state != SOL_CMA_CHAN_BOUND &&
1377 1376 state != SOL_CMA_CHAN_ROUTE_RESLVD &&
1378 1377 state != SOL_CMA_CHAN_ADDR_RESLVD) {
1379 1378 SOL_OFS_DPRINTF_L2(sol_rdmacm_dbg_str,
1380 1379 "rdma_join_multicast, Invalid state");
1381 1380 mutex_exit(&chanp->chan_mutex);
1382 1381 return (EINVAL);
1383 1382 }
1384 1383
1385 1384 if (chanp->chan_xport_type == SOL_CMA_XPORT_IB)
1386 1385 ret = rdma_ib_join_multicast(idp, addr, context);
1387 1386 #ifdef IWARP_SUPPORT
1388 1387 /* No support for Multicast on iWARP */
1389 1388 else if (chanp->chan_xport_type == SOL_CMA_XPORT_IWARP)
1390 1389 ret = ENOTSUP;
1391 1390 #endif /* IWARP_SUPPORT */
1392 1391 mutex_exit(&chanp->chan_mutex);
1393 1392
1394 1393 SOL_OFS_DPRINTF_L5(sol_rdmacm_dbg_str,
1395 1394 "rdma_join_multicast: ret %x", ret);
1396 1395 return (ret);
1397 1396 }
1398 1397
1399 1398 void
1400 1399 rdma_leave_multicast(struct rdma_cm_id *idp, struct sockaddr *addr)
1401 1400 {
1402 1401 sol_cma_chan_t *chanp;
1403 1402 cma_chan_state_t state;
1404 1403
1405 1404 ASSERT(idp);
1406 1405 chanp = (sol_cma_chan_t *)idp;
1407 1406 SOL_OFS_DPRINTF_L5(sol_rdmacm_dbg_str, "rdma_leave_multicast(%p, %p)",
1408 1407 idp, addr);
1409 1408
1410 1409 mutex_enter(&chanp->chan_mutex);
1411 1410 state = cma_get_chan_state(chanp);
1412 1411 if (state != SOL_CMA_CHAN_BOUND &&
1413 1412 state != SOL_CMA_CHAN_ROUTE_RESLVD &&
1414 1413 state != SOL_CMA_CHAN_ADDR_RESLVD) {
1415 1414 SOL_OFS_DPRINTF_L2(sol_rdmacm_dbg_str,
1416 1415 "rdma_leave_multicast, Invalid state");
1417 1416 mutex_exit(&chanp->chan_mutex);
1418 1417 return;
1419 1418 }
1420 1419
1421 1420 if (chanp->chan_xport_type == SOL_CMA_XPORT_IB)
1422 1421 rdma_ib_leave_multicast(idp, addr);
1423 1422 #ifdef IWARP_SUPPORT
1424 1423 /* No support for Multicast on iWARP */
1425 1424 else if (chanp->chan_xport_type == SOL_CMA_XPORT_IWARP)
1426 1425 SOL_OFS_DPRINTF_L2(sol_rdmacm_dbg_str,
1427 1426 "rdma_leave_multicast, iWARP");
1428 1427 #endif /* IWARP_SUPPORT */
1429 1428 mutex_exit(&chanp->chan_mutex);
1430 1429
1431 1430 SOL_OFS_DPRINTF_L5(sol_rdmacm_dbg_str, "rdma_join_multicast: ret");
1432 1431 }
1433 1432
1434 1433 /*
1435 1434 * Functions to compare to rdma_cm_id *, used by AVL tree
1436 1435 * routines.
1437 1436 */
1438 1437 int
1439 1438 sol_cma_req_cmid_cmp(const void *p1, const void *p2)
1440 1439 {
1441 1440 sol_cma_chan_t *chanp;
1442 1441
1443 1442 chanp = (sol_cma_chan_t *)p2;
1444 1443 if (chanp->chan_session_id > p1)
1445 1444 return (+1);
1446 1445 else if (chanp->chan_session_id < p1)
1447 1446 return (-1);
1448 1447 else
1449 1448 return (0);
1450 1449 }
1451 1450
1452 1451 int
1453 1452 sol_cma_cmid_cmp(const void *p1, const void *p2)
1454 1453 {
1455 1454 sol_cma_chan_t *chanp;
1456 1455
1457 1456 chanp = (sol_cma_chan_t *)p2;
1458 1457 if (chanp->chan_qp_hdl > p1)
1459 1458 return (+1);
1460 1459 else if (chanp->chan_qp_hdl < p1)
1461 1460 return (-1);
1462 1461 else
1463 1462 return (0);
1464 1463 }
1465 1464
1466 1465 /*
1467 1466 * Function to compare two sol_cma_glbl_listen_t *, used by
1468 1467 * AVL tree routines.
1469 1468 */
1470 1469 int
1471 1470 sol_cma_svc_cmp(const void *p1, const void *p2)
1472 1471 {
1473 1472 sol_cma_glbl_listen_t *listenp;
1474 1473 uint64_t sid;
1475 1474
1476 1475 sid = *(uint64_t *)p1;
1477 1476 listenp = (sol_cma_glbl_listen_t *)p2;
1478 1477 if (listenp->cma_listen_chan_sid > sid)
1479 1478 return (+1);
1480 1479 else if (listenp->cma_listen_chan_sid < sid)
1481 1480 return (-1);
1482 1481 else
1483 1482 return (0);
1484 1483 }
1485 1484
1486 1485 static int
1487 1486 cma_init_listen_root(sol_cma_chan_t *chanp)
1488 1487 {
1489 1488 sol_cma_glbl_listen_t *cma_listenp;
1490 1489 sol_cma_listen_info_t *chan_listenp;
1491 1490 int rc = 0;
1492 1491 avl_index_t where = 0;
1493 1492 uint64_t listen_sid;
1494 1493
1495 1494 ASSERT(chanp);
1496 1495 ASSERT(chanp->chan_listenp);
1497 1496 chan_listenp = chanp->chan_listenp;
1498 1497
1499 1498 SOL_OFS_DPRINTF_L5(sol_rdmacm_dbg_str,
1500 1499 "cma_init_listen_root(%p)", chanp);
1501 1500
1502 1501 /*
1503 1502 * First search for matching global listen_info for this SID.
1504 1503 * If found with the same client handle, reuse the service
1505 1504 * handle, if matching SID is found with different client
1506 1505 * handle, return EINVAL.
1507 1506 */
1508 1507 listen_sid = ibcma_init_root_sid(chanp);
1509 1508 SOL_OFS_DPRINTF_L5(sol_rdmacm_dbg_str,
1510 1509 "cma_init_listen_root: search SID 0x%llx",
1511 1510 listen_sid);
1512 1511
1513 1512 mutex_enter(&sol_cma_glob_mutex);
1514 1513 cma_listenp = avl_find(&sol_cma_glbl_listen_tree,
1515 1514 (void *) &listen_sid, &where);
1516 1515 if (cma_listenp && cma_listenp->cma_listen_clnt_hdl ==
1517 1516 chanp->chan_ib_client_hdl) {
1518 1517 SOL_OFS_DPRINTF_L5(sol_rdmacm_dbg_str,
1519 1518 "cma_init_listen_root: matching listenp %p SID 0x%llx",
1520 1519 cma_listenp, listen_sid);
1521 1520 chan_listenp->listen_entry = add_genlist(
1522 1521 &cma_listenp->cma_listen_chan_list,
1523 1522 (uintptr_t)chanp, NULL);
1524 1523 chan_listenp->chan_glbl_listen_info = cma_listenp;
1525 1524 ibcma_copy_srv_hdl(chanp, cma_listenp);
1526 1525 mutex_exit(&sol_cma_glob_mutex);
1527 1526 return (0);
1528 1527 } else if (cma_listenp) {
1529 1528 SOL_OFS_DPRINTF_L2(sol_rdmacm_dbg_str,
1530 1529 "cma_init_listen_root: listenp %p, SID 0x%llx match, "
1531 1530 "client hdl prev %p, new %p mismatch",
1532 1531 cma_listenp, listen_sid,
1533 1532 cma_listenp->cma_listen_clnt_hdl,
1534 1533 chanp->chan_ib_client_hdl);
1535 1534 mutex_exit(&sol_cma_glob_mutex);
1536 1535 return (EINVAL);
1537 1536 }
1538 1537
1539 1538 cma_listenp = kmem_zalloc(sizeof (sol_cma_glbl_listen_t), KM_SLEEP);
1540 1539 init_genlist(&cma_listenp->cma_listen_chan_list);
1541 1540 chan_listenp->listen_entry = add_genlist(
1542 1541 &cma_listenp->cma_listen_chan_list, (uintptr_t)chanp, NULL);
1543 1542 chan_listenp->chan_glbl_listen_info = cma_listenp;
1544 1543 cma_listenp->cma_listen_clnt_hdl = chanp->chan_ib_client_hdl;
1545 1544 cma_listenp->cma_listen_chan_sid = listen_sid;
1546 1545
1547 1546 rc = ibcma_init_root_chan(chanp, cma_listenp);
1548 1547 if (rc) {
1549 1548 mutex_exit(&sol_cma_glob_mutex);
1550 1549 SOL_OFS_DPRINTF_L2(sol_rdmacm_dbg_str,
1551 1550 "cma_init_listen_root: ibcma_init_root_chan failed!!");
1552 1551 delete_genlist(&cma_listenp->cma_listen_chan_list,
1553 1552 chan_listenp->listen_entry);
1554 1553 kmem_free(cma_listenp, sizeof (sol_cma_glbl_listen_t));
1555 1554 return (rc);
1556 1555 }
1557 1556 avl_insert(&sol_cma_glbl_listen_tree, cma_listenp, where);
1558 1557 mutex_exit(&sol_cma_glob_mutex);
1559 1558 return (0);
1560 1559 }
1561 1560
1562 1561 static void
1563 1562 cma_fini_listen_root(sol_cma_chan_t *chanp)
1564 1563 {
1565 1564 sol_cma_glbl_listen_t *cma_listenp;
1566 1565 sol_cma_listen_info_t *chan_listenp;
1567 1566
1568 1567 ASSERT(chanp);
1569 1568 ASSERT(chanp->chan_listenp);
1570 1569 SOL_OFS_DPRINTF_L5(sol_rdmacm_dbg_str, "cma_fini_listen_root(%p)",
1571 1570 chanp);
1572 1571 chan_listenp = chanp->chan_listenp;
1573 1572 cma_listenp = chan_listenp->chan_glbl_listen_info;
1574 1573 ASSERT(cma_listenp);
1575 1574 mutex_enter(&sol_cma_glob_mutex);
1576 1575 delete_genlist(&cma_listenp->cma_listen_chan_list,
1577 1576 chan_listenp->listen_entry);
1578 1577 if (genlist_empty(&cma_listenp->cma_listen_chan_list)) {
1579 1578 if (ibcma_fini_root_chan(chanp) == 0) {
1580 1579 avl_remove(&sol_cma_glbl_listen_tree,
1581 1580 cma_listenp);
1582 1581 kmem_free(cma_listenp,
1583 1582 sizeof (sol_cma_glbl_listen_t));
1584 1583 } else
1585 1584 SOL_OFS_DPRINTF_L2(sol_rdmacm_dbg_str,
1586 1585 "cma_fini_listen_root: "
1587 1586 "ibcma_fini_root_chan failed");
1588 1587 }
1589 1588
1590 1589 mutex_exit(&sol_cma_glob_mutex);
1591 1590 }
1592 1591
1593 1592 typedef struct cma_event_async_arg {
1594 1593 struct rdma_cm_id *idp;
1595 1594 enum rdma_cm_event_type event;
1596 1595 int status;
1597 1596 union {
1598 1597 struct rdma_conn_param conn;
1599 1598 struct rdma_ud_param param;
1600 1599 } un;
1601 1600 struct rdma_conn_param *conn_param;
1602 1601 struct rdma_ud_param *ud_paramp;
1603 1602 } cma_event_async_arg_t;
1604 1603
1605 1604 static void cma_generate_event_sync(struct rdma_cm_id *,
1606 1605 enum rdma_cm_event_type, int, struct rdma_conn_param *,
1607 1606 struct rdma_ud_param *);
1608 1607
1609 1608 void
1610 1609 cma_generate_event_thr(void *arg)
1611 1610 {
1612 1611 cma_event_async_arg_t *event_arg = (cma_event_async_arg_t *)arg;
1613 1612
1614 1613 cma_generate_event_sync(event_arg->idp, event_arg->event,
1615 1614 event_arg->status, event_arg->conn_param,
1616 1615 event_arg->ud_paramp);
1617 1616
1618 1617 if (event_arg->conn_param && event_arg->conn_param->private_data_len)
1619 1618 kmem_free((void *)event_arg->conn_param->private_data,
1620 1619 event_arg->conn_param->private_data_len);
1621 1620 if (event_arg->ud_paramp && event_arg->ud_paramp->private_data_len)
1622 1621 kmem_free((void *)event_arg->ud_paramp->private_data,
1623 1622 event_arg->ud_paramp->private_data_len);
1624 1623 kmem_free(arg, sizeof (cma_event_async_arg_t));
1625 1624 }
1626 1625
1627 1626 void
1628 1627 cma_generate_event(struct rdma_cm_id *idp, enum rdma_cm_event_type event,
1629 1628 int status, struct rdma_conn_param *conn_param,
1630 1629 struct rdma_ud_param *ud_paramp)
1631 1630 {
1632 1631 cma_event_async_arg_t *event_arg;
1633 1632 sol_cma_chan_t *chanp = (sol_cma_chan_t *)idp;
1634 1633
1635 1634 /*
1636 1635 * Set SOL_CMA_CALLER_EVENT_PROGRESS to indicate event
1637 1636 * notification is in progress, so that races between
1638 1637 * rdma_destroy_id() and event notification is taken care.
1639 1638 *
1640 1639 * If rdma_destroy_id() has been called for this CMID, call
1641 1640 * cma_generate_event_sync() which skips notification to the
1642 1641 * consumer and handles the event.
1643 1642 */
1644 1643 mutex_enter(&chanp->chan_mutex);
1645 1644 chanp->chan_cmid_destroy_state |= SOL_CMA_CALLER_EVENT_PROGRESS;
1646 1645 if (chanp->chan_cmid_destroy_state & SOL_CMA_CALLER_CMID_DESTROYED) {
1647 1646 mutex_exit(&chanp->chan_mutex);
1648 1647 cma_generate_event_sync(idp, event, status, conn_param,
1649 1648 ud_paramp);
1650 1649 return;
1651 1650 }
1652 1651 mutex_exit(&chanp->chan_mutex);
1653 1652
1654 1653 event_arg = kmem_zalloc(sizeof (cma_event_async_arg_t), KM_SLEEP);
1655 1654 event_arg->idp = idp;
1656 1655 event_arg->event = event;
1657 1656 event_arg->status = status;
1658 1657 event_arg->conn_param = NULL;
1659 1658 event_arg->ud_paramp = NULL;
1660 1659 if (conn_param && conn_param->private_data_len) {
1661 1660 bcopy(conn_param, &(event_arg->un.conn),
1662 1661 sizeof (struct rdma_conn_param));
1663 1662 event_arg->conn_param = &(event_arg->un.conn);
1664 1663 event_arg->conn_param->private_data = kmem_zalloc(
1665 1664 conn_param->private_data_len, KM_SLEEP);
1666 1665 bcopy(conn_param->private_data,
1667 1666 (void *)event_arg->conn_param->private_data,
1668 1667 conn_param->private_data_len);
1669 1668 } else if (conn_param && conn_param->private_data_len == 0) {
1670 1669 bcopy(conn_param, &(event_arg->un.conn),
1671 1670 sizeof (struct rdma_conn_param));
1672 1671 } else if (ud_paramp) {
1673 1672 bcopy(ud_paramp, &(event_arg->un.param),
1674 1673 sizeof (struct rdma_ud_param));
1675 1674 event_arg->ud_paramp = &(event_arg->un.param);
1676 1675 if (ud_paramp->private_data_len) {
1677 1676 event_arg->ud_paramp->private_data = kmem_zalloc(
1678 1677 ud_paramp->private_data_len, KM_SLEEP);
1679 1678 bcopy(ud_paramp->private_data,
1680 1679 (void *)event_arg->ud_paramp->private_data,
1681 1680 ud_paramp->private_data_len);
1682 1681 } else if (ud_paramp->private_data) {
1683 1682 event_arg->ud_paramp->private_data =
1684 1683 ud_paramp->private_data;
1685 1684 }
1686 1685 }
1687 1686
1688 1687 if (taskq_dispatch(system_taskq, cma_generate_event_thr,
1689 1688 (void *)event_arg, TQ_SLEEP) == 0) {
1690 1689 SOL_OFS_DPRINTF_L2(sol_rdmacm_dbg_str,
1691 1690 "generate_event_async: taskq_dispatch() failed!!");
1692 1691 mutex_enter(&chanp->chan_mutex);
1693 1692 chanp->chan_cmid_destroy_state &=
1694 1693 ~SOL_CMA_CALLER_EVENT_PROGRESS;
1695 1694 if (chanp->chan_cmid_destroy_state &
1696 1695 SOL_CMA_CALLER_CMID_DESTROYED)
1697 1696 cv_broadcast(&chanp->chan_destroy_cv);
1698 1697 mutex_exit(&chanp->chan_mutex);
1699 1698 }
1700 1699 }
1701 1700
1702 1701 static void
1703 1702 cma_generate_event_sync(struct rdma_cm_id *idp, enum rdma_cm_event_type event,
1704 1703 int status, struct rdma_conn_param *conn_param,
1705 1704 struct rdma_ud_param *ud_paramp)
1706 1705 {
1707 1706 struct rdma_cm_event cm_event;
1708 1707 sol_cma_chan_t *chanp = (sol_cma_chan_t *)idp;
1709 1708 struct rdma_cm_id *root_idp = NULL;
1710 1709 sol_cma_chan_t *root_chanp;
1711 1710 int ret;
1712 1711 cma_chan_state_t chan_state;
1713 1712
1714 1713 SOL_OFS_DPRINTF_L5(sol_rdmacm_dbg_str, "generate_event_sync(%p, %x, "
1715 1714 "%x, %p, %p", idp, event, status, conn_param, ud_paramp);
1716 1715
1717 1716 bzero(&cm_event, sizeof (cm_event));
1718 1717 cm_event.event = event;
1719 1718 cm_event.status = status;
1720 1719 if (conn_param)
1721 1720 bcopy((void *)conn_param, (void *)(&(cm_event.param.conn)),
1722 1721 sizeof (struct rdma_conn_param));
1723 1722 else if (ud_paramp)
1724 1723 bcopy((void *)ud_paramp, (void *)(&(cm_event.param.ud)),
1725 1724 sizeof (struct rdma_ud_param));
1726 1725
1727 1726 /*
1728 1727 * If the consumer has destroyed the context for this CMID -
1729 1728 * do not notify, skip to handling the sol_ofs specific
1730 1729 * handling of the event.
1731 1730 */
1732 1731 mutex_enter(&chanp->chan_mutex);
1733 1732 if (chanp->chan_cmid_destroy_state & SOL_CMA_CALLER_CMID_DESTROYED) {
1734 1733 mutex_exit(&chanp->chan_mutex);
1735 1734 goto ofs_consume_event;
1736 1735 }
1737 1736 mutex_exit(&chanp->chan_mutex);
1738 1737
1739 1738 root_idp = CHAN_LISTEN_ROOT(chanp);
1740 1739 root_chanp = (sol_cma_chan_t *)root_idp;
1741 1740 SOL_OFS_DPRINTF_L5(sol_rdmacm_dbg_str, "gen_event: root_idp %p",
1742 1741 root_idp);
1743 1742
1744 1743 if (event == RDMA_CM_EVENT_CONNECT_REQUEST) {
1745 1744 /*
1746 1745 * Update chan_req_state for the REQ CMID. Decrement
1747 1746 * count of REQ CMIDs not notifed to consumer.
1748 1747 */
1749 1748 ASSERT(root_idp);
1750 1749 mutex_enter(&root_chanp->chan_mutex);
1751 1750 root_chanp->chan_req_cnt--;
1752 1751 #ifdef DEBUG
1753 1752 SOL_OFS_DPRINTF_L5(sol_rdmacm_dbg_str,
1754 1753 "Dec req_cnt of %p IDP, idp %p, req_cnt %x",
1755 1754 root_idp, idp, root_chanp->chan_req_cnt);
1756 1755 #endif
1757 1756 mutex_exit(&root_chanp->chan_mutex);
1758 1757 }
1759 1758
1760 1759 /* Pass the event to the client */
1761 1760 ret = (idp->event_handler) (idp, &cm_event);
1762 1761
1763 1762 if (ret) {
1764 1763 /*
1765 1764 * If the consumer returned failure :
1766 1765 * CONNECT_REQUEST :
1767 1766 * 1. rdma_disconnect() to disconnect connection.
1768 1767 * 2. wakeup destroy, if destroy has been called
1769 1768 * for this CMID
1770 1769 * 3. Destroy CMID if rdma_destroy has not been
1771 1770 * called.
1772 1771 * DISCONNECTED :
1773 1772 * 1. call cma_handle_nomore_events() to cleanup
1774 1773 * Other Events :
1775 1774 * 1. Client is expected to destroy the CMID.
1776 1775 */
1777 1776 if (event == RDMA_CM_EVENT_CONNECT_REQUEST) {
1778 1777 SOL_OFS_DPRINTF_L4(sol_rdmacm_dbg_str,
1779 1778 "cma_generate_event_async: consumer failed %d "
1780 1779 "event", event);
1781 1780 if (rdma_disconnect(idp)) {
1782 1781 SOL_OFS_DPRINTF_L2(sol_rdmacm_dbg_str,
1783 1782 "generate_event_async: rdma_disconnect "
1784 1783 "failed");
1785 1784 }
1786 1785 mutex_enter(&chanp->chan_mutex);
1787 1786 ASSERT(SOL_IS_SERVER_CMID(chanp));
1788 1787 chanp->chan_req_state = REQ_CMID_SERVER_NONE;
1789 1788 chanp->chan_cmid_destroy_state &=
1790 1789 ~SOL_CMA_CALLER_EVENT_PROGRESS;
1791 1790 if (chanp->chan_cmid_destroy_state &
1792 1791 SOL_CMA_CALLER_CMID_DESTROYED) {
1793 1792 cv_broadcast(&chanp->chan_destroy_cv);
1794 1793 mutex_exit(&chanp->chan_mutex);
1795 1794 } else {
1796 1795 mutex_exit(&chanp->chan_mutex);
1797 1796 rdma_destroy_id(idp);
1798 1797 }
1799 1798 } else if (event == RDMA_CM_EVENT_DISCONNECTED) {
1800 1799 SOL_OFS_DPRINTF_L2(sol_rdmacm_dbg_str,
1801 1800 "generate_event_async: consumer failed %d event",
1802 1801 event);
1803 1802 cma_handle_nomore_events(chanp);
1804 1803 mutex_enter(&chanp->chan_mutex);
1805 1804 chan_state = cma_get_chan_state(chanp);
1806 1805 chanp->chan_cmid_destroy_state &=
1807 1806 ~SOL_CMA_CALLER_EVENT_PROGRESS;
1808 1807 if (chanp->chan_cmid_destroy_state &
1809 1808 SOL_CMA_CALLER_CMID_DESTROYED) {
1810 1809 cv_broadcast(&chanp->chan_destroy_cv);
1811 1810 mutex_exit(&chanp->chan_mutex);
1812 1811 } else if (chan_state == SOL_CMA_CHAN_DESTROY_PENDING) {
1813 1812 /* rdma_destroy_id() called: destroy CMID */
1814 1813 mutex_exit(&chanp->chan_mutex);
1815 1814 cma_destroy_id((struct rdma_cm_id *)chanp);
1816 1815 } else
1817 1816 mutex_exit(&chanp->chan_mutex);
1818 1817 } else {
1819 1818 SOL_OFS_DPRINTF_L2(sol_rdmacm_dbg_str,
1820 1819 "generate_event_async: consumer failed %d event",
1821 1820 event);
1822 1821 }
1823 1822
1824 1823 return;
1825 1824 }
1826 1825 ofs_consume_event:
1827 1826 if (event == RDMA_CM_EVENT_DISCONNECTED) {
1828 1827 cma_chan_state_t chan_state;
1829 1828
1830 1829 cma_handle_nomore_events(chanp);
1831 1830 mutex_enter(&chanp->chan_mutex);
1832 1831 chan_state = cma_get_chan_state(chanp);
1833 1832 chanp->chan_cmid_destroy_state &=
1834 1833 ~SOL_CMA_CALLER_EVENT_PROGRESS;
1835 1834 if (chanp->chan_cmid_destroy_state &
1836 1835 SOL_CMA_CALLER_CMID_DESTROYED) {
1837 1836 cv_broadcast(&chanp->chan_destroy_cv);
1838 1837 mutex_exit(&chanp->chan_mutex);
1839 1838 } else if (chan_state == SOL_CMA_CHAN_DESTROY_PENDING) {
1840 1839 /* If rdma_destroy_id() was called, destroy CMID */
1841 1840 mutex_exit(&chanp->chan_mutex);
1842 1841 cma_destroy_id((struct rdma_cm_id *)chanp);
1843 1842 } else
1844 1843 mutex_exit(&chanp->chan_mutex);
1845 1844 return;
1846 1845 } else if (IS_UDP_CMID(idp) && event == RDMA_CM_EVENT_UNREACHABLE) {
1847 1846 /*
1848 1847 * If rdma_destroy_id() was called, destroy CMID
1849 1848 * If not chan_connect_flag/ chan_req_state has already been
1850 1849 * set to indicate that it can be deleted.
1851 1850 */
1852 1851 mutex_enter(&chanp->chan_mutex);
1853 1852 chan_state = cma_get_chan_state(chanp);
1854 1853 chanp->chan_cmid_destroy_state &=
1855 1854 ~SOL_CMA_CALLER_EVENT_PROGRESS;
1856 1855 if (chanp->chan_cmid_destroy_state &
1857 1856 SOL_CMA_CALLER_CMID_DESTROYED) {
1858 1857 cv_broadcast(&chanp->chan_destroy_cv);
1859 1858 mutex_exit(&chanp->chan_mutex);
1860 1859 } else if (chan_state == SOL_CMA_CHAN_DESTROY_PENDING) {
1861 1860 mutex_exit(&chanp->chan_mutex);
1862 1861 cma_destroy_id(idp);
1863 1862 } else
1864 1863 mutex_exit(&chanp->chan_mutex);
1865 1864 return;
1866 1865 }
1867 1866
1868 1867 mutex_enter(&chanp->chan_mutex);
1869 1868 chanp->chan_cmid_destroy_state &= ~SOL_CMA_CALLER_EVENT_PROGRESS;
1870 1869 if (chanp->chan_cmid_destroy_state & SOL_CMA_CALLER_CMID_DESTROYED)
1871 1870 cv_broadcast(&chanp->chan_destroy_cv);
1872 1871 mutex_exit(&chanp->chan_mutex);
1873 1872 }
1874 1873
1875 1874 /* Local Static functions */
1876 1875 static struct rdma_cm_id *
1877 1876 cma_alloc_chan(rdma_cm_event_handler evt_hdlr, void *context,
1878 1877 enum rdma_port_space ps)
1879 1878 {
1880 1879 struct rdma_cm_id *rdma_idp;
1881 1880 sol_cma_chan_t *chanp;
1882 1881
1883 1882 chanp = kmem_zalloc(sizeof (sol_cma_chan_t), KM_SLEEP);
1884 1883 mutex_init(&chanp->chan_mutex, NULL, MUTEX_DRIVER, NULL);
1885 1884 cv_init(&chanp->chan_destroy_cv, NULL, CV_DRIVER, NULL);
1886 1885 rdma_idp = &(chanp->chan_rdma_cm);
1887 1886 rdma_idp->context = context;
1888 1887 rdma_idp->ps = ps;
1889 1888 rdma_idp->event_handler = evt_hdlr;
1890 1889 mutex_enter(&chanp->chan_mutex);
1891 1890 cma_set_chan_state(chanp, SOL_CMA_CHAN_IDLE);
1892 1891 avl_create(&chanp->chan_req_avl_tree, sol_cma_req_cmid_cmp,
1893 1892 sizeof (sol_cma_chan_t),
1894 1893 offsetof(sol_cma_chan_t, chan_req_avl_node));
1895 1894 avl_create(&chanp->chan_acpt_avl_tree, sol_cma_cmid_cmp,
1896 1895 sizeof (sol_cma_chan_t),
1897 1896 offsetof(sol_cma_chan_t, chan_acpt_avl_node));
1898 1897 mutex_exit(&chanp->chan_mutex);
1899 1898
1900 1899 return (rdma_idp);
1901 1900 }
1902 1901
1903 1902 /* Change the state of sol_cma_chan_t */
1904 1903 static void
1905 1904 cma_set_chan_state(sol_cma_chan_t *chanp, cma_chan_state_t newstate)
1906 1905 {
1907 1906 ASSERT(MUTEX_HELD(&chanp->chan_mutex));
1908 1907 chanp->chan_state = newstate;
1909 1908 }
1910 1909
1911 1910 cma_chan_state_t
1912 1911 cma_get_chan_state(sol_cma_chan_t *chanp)
1913 1912 {
1914 1913 ASSERT(MUTEX_HELD(&chanp->chan_mutex));
1915 1914 return (chanp->chan_state);
1916 1915 }
1917 1916
1918 1917 /* Check & Swap the state of sol_ucma_chan_t */
1919 1918 static int
1920 1919 cma_cas_chan_state(sol_cma_chan_t *chanp, cma_chan_state_t prevstate,
1921 1920 cma_chan_state_t newstate)
1922 1921 {
1923 1922 int ret = 0;
1924 1923
1925 1924 ASSERT(MUTEX_HELD(&chanp->chan_mutex));
1926 1925 if (chanp->chan_state != prevstate)
1927 1926 ret = -1;
1928 1927 else
1929 1928 chanp->chan_state = newstate;
1930 1929
1931 1930 return (ret);
1932 1931 }
1933 1932
1934 1933 static void
1935 1934 cma_free_listen_list(struct rdma_cm_id *idp)
1936 1935 {
1937 1936 genlist_entry_t *entry;
1938 1937 sol_cma_chan_t *chanp = (sol_cma_chan_t *)idp;
1939 1938
1940 1939 SOL_OFS_DPRINTF_L5(sol_rdmacm_dbg_str, "cma_free_listen_list(%p)", idp);
1941 1940 mutex_enter(&chanp->chan_mutex);
1942 1941 entry = remove_genlist_head(&(CHAN_LISTEN_LIST(chanp)));
1943 1942 mutex_exit(&chanp->chan_mutex);
1944 1943 while (entry) {
1945 1944 sol_cma_chan_t *ep_chanp;
1946 1945
1947 1946 ep_chanp = (sol_cma_chan_t *)entry->data;
1948 1947 SOL_OFS_DPRINTF_L5(sol_rdmacm_dbg_str, "fini_ep_chan: %p",
1949 1948 ep_chanp);
1950 1949 if (ibcma_fini_ep_chan(ep_chanp) == 0) {
1951 1950 genlist_entry_t *entry1;
1952 1951 struct ib_device *device;
1953 1952 cma_device_t *cma_device;
1954 1953
1955 1954 ASSERT(ep_chanp->chan_listenp);
1956 1955 mutex_enter(&ep_chanp->chan_mutex);
1957 1956 entry1 = ep_chanp->chan_listenp->listen_ep_dev_entry;
1958 1957 device = ep_chanp->chan_listenp->listen_ep_device;
1959 1958 ASSERT(device);
1960 1959 cma_device = device->data;
1961 1960 delete_genlist(&cma_device->cma_epchan_list,
1962 1961 entry1);
1963 1962 sol_cma_release_device(
1964 1963 (struct rdma_cm_id *)ep_chanp);
1965 1964 mutex_exit(&ep_chanp->chan_mutex);
1966 1965 if (ep_chanp->chan_listenp)
1967 1966 kmem_free(ep_chanp->chan_listenp,
1968 1967 sizeof (sol_cma_listen_info_t));
1969 1968
1970 1969 mutex_destroy(&ep_chanp->chan_mutex);
1971 1970 cv_destroy(&ep_chanp->chan_destroy_cv);
1972 1971 kmem_free(ep_chanp, sizeof (sol_cma_chan_t));
1973 1972 kmem_free(entry, sizeof (genlist_entry_t));
1974 1973 }
1975 1974
1976 1975 mutex_enter(&chanp->chan_mutex);
1977 1976 entry = remove_genlist_head(&(CHAN_LISTEN_LIST(chanp)));
1978 1977 mutex_exit(&chanp->chan_mutex);
1979 1978 }
1980 1979 }
1981 1980
1982 1981 /*
1983 1982 * Destroy a listening CMID when :
1984 1983 * a. All CONNECTION REQUEST recieved have been rejected
1985 1984 * or closed.
1986 1985 * b. No CONNECTION REQUEST recieved.
1987 1986 * Do not destroy a listening CMID when :
1988 1987 * a. CONNECTION REQUEST has been recieved and not been
1989 1988 * accepted from the passive / server side.
1990 1989 * b. CONNECTION REQUEST has been recieved and has been
1991 1990 * accepted from the passive server side.
1992 1991 * Mark the listening CMID as destroy pending.
1993 1992 *
1994 1993 * For CMIDs created for rdma_connect() or created for a
1995 1994 * CONNECT request, destroy the CMID only when :
1996 1995 * CONNECTION has been closed or rejected.
1997 1996 *
1998 1997 * Mark the CMID as destroy pending.
1999 1998 *
2000 1999 * When a connection is rejected or closed :
2001 2000 * Check if flag indicates - destroy pending,
2002 2001 * cma_destroy_id() is called, this also does
2003 2002 *
2004 2003 * If there is a listening CMID assosiated with it,
2005 2004 * call cma_destroy_if(listen_cmid);
2006 2005 */
2007 2006 void
2008 2007 cma_destroy_id(struct rdma_cm_id *idp)
2009 2008 {
2010 2009 sol_cma_chan_t *chanp = (sol_cma_chan_t *)idp;
2011 2010 cma_chan_state_t state;
2012 2011 ulong_t acpt_nodes, req_nodes;
2013 2012
2014 2013 mutex_enter(&chanp->chan_mutex);
2015 2014 acpt_nodes = avl_numnodes(&chanp->chan_acpt_avl_tree);
2016 2015 req_nodes = avl_numnodes(&chanp->chan_req_avl_tree);
2017 2016 state = cma_get_chan_state(chanp);
2018 2017 SOL_OFS_DPRINTF_L5(sol_rdmacm_dbg_str, "cma_destroy_id(%p)- "
2019 2018 "est CMIDs %ld, req CMID %ld, listen_root %p, state %x, %x",
2020 2019 idp, acpt_nodes, req_nodes, chanp->listen_root,
2021 2020 state, chanp->chan_req_state);
2022 2021
2023 2022 /*
2024 2023 * If there are either REQ recieved or Established CMIDs just return.
2025 2024 * rdma_destroy() for these CMIDs can be called by client later.
2026 2025 */
2027 2026 if (acpt_nodes || req_nodes) {
2028 2027 cma_set_chan_state(chanp, SOL_CMA_CHAN_DESTROY_PENDING);
2029 2028 mutex_exit(&chanp->chan_mutex);
2030 2029 return;
2031 2030 }
2032 2031 cma_set_chan_state(chanp, SOL_CMA_CHAN_DESTROYING);
2033 2032 avl_destroy(&chanp->chan_req_avl_tree);
2034 2033 avl_destroy(&chanp->chan_acpt_avl_tree);
2035 2034
2036 2035 mutex_exit(&chanp->chan_mutex);
2037 2036 if (idp->route.path_rec) {
2038 2037 kmem_free(idp->route.path_rec,
2039 2038 sizeof (struct ib_sa_path_rec) * idp->route.num_paths);
2040 2039 idp->route.path_rec = NULL;
2041 2040 }
2042 2041
2043 2042 switch (chanp->chan_xport_type) {
2044 2043 case SOL_CMA_XPORT_NONE :
2045 2044 break;
2046 2045 case SOL_CMA_XPORT_IB :
2047 2046 rdma_ib_destroy_id(idp);
2048 2047 break;
2049 2048 #ifdef IWARP_SUPPORT
2050 2049 case SOL_CMA_XPORT_IWARP :
2051 2050 rdma_iw_destroy_id(idp);
2052 2051 break;
2053 2052 #endif /* IWARP_SUPPORT */
2054 2053 default :
2055 2054 SOL_OFS_DPRINTF_L2(sol_rdmacm_dbg_str,
2056 2055 "cma_destroy_id: Unsupported xport type %x",
2057 2056 chanp->chan_xport_type);
2058 2057 break;
2059 2058 }
2060 2059
2061 2060 /*
2062 2061 * Flush out & Free all listeners wrt to this ID
2063 2062 * No locking is required as this code is executed
2064 2063 * all REQ CMIDs have been destroyed. listen_list
2065 2064 * will therefore not be modified during this loop.
2066 2065 */
2067 2066 if (chanp->chan_listenp) {
2068 2067 cma_free_listen_list(idp);
2069 2068 cma_fini_listen_root(chanp);
2070 2069 kmem_free((void *)chanp->chan_listenp,
2071 2070 sizeof (sol_cma_listen_info_t));
2072 2071 chanp->chan_listenp = NULL;
2073 2072 }
2074 2073
2075 2074 if (chanp->listen_root) {
2076 2075 struct rdma_cm_id *root_idp;
2077 2076 sol_cma_chan_t *root_chanp;
2078 2077
2079 2078 root_idp = chanp->listen_root;
2080 2079 root_chanp = (sol_cma_chan_t *)root_idp;
2081 2080 mutex_enter(&root_chanp->chan_mutex);
2082 2081 state = cma_get_chan_state(root_chanp);
2083 2082 acpt_nodes = avl_numnodes(&root_chanp->chan_acpt_avl_tree);
2084 2083 req_nodes = avl_numnodes(&root_chanp->chan_req_avl_tree);
2085 2084 mutex_exit(&root_chanp->chan_mutex);
2086 2085 SOL_OFS_DPRINTF_L5(sol_rdmacm_dbg_str, "cma_destroy_id(%p)-"
2087 2086 " root idp %p, state %x, acpt_nodes %ld, req_nodes %ld",
2088 2087 idp, root_idp, state, acpt_nodes, req_nodes);
2089 2088
2090 2089 if (state == SOL_CMA_CHAN_DESTROY_PENDING &&
2091 2090 req_nodes == 0UL && acpt_nodes == 0UL) {
2092 2091 mutex_enter(&root_chanp->chan_mutex);
2093 2092 root_chanp->chan_req_state = REQ_CMID_SERVER_NONE;
2094 2093 mutex_exit(&root_chanp->chan_mutex);
2095 2094 cma_destroy_id(root_idp);
2096 2095 } else if (state == SOL_CMA_CHAN_DESTROY_WAIT &&
2097 2096 req_nodes == 0UL && acpt_nodes == 0UL) {
2098 2097 mutex_enter(&root_chanp->chan_mutex);
2099 2098 cma_set_chan_state(root_chanp,
2100 2099 SOL_CMA_CHAN_DESTROY_PENDING);
2101 2100 root_chanp->chan_req_state = REQ_CMID_SERVER_NONE;
2102 2101 cv_broadcast(&root_chanp->chan_destroy_cv);
2103 2102 mutex_exit(&root_chanp->chan_mutex);
2104 2103 }
2105 2104 }
2106 2105
2107 2106 mutex_destroy(&chanp->chan_mutex);
2108 2107 cv_destroy(&chanp->chan_destroy_cv);
2109 2108 kmem_free(chanp, sizeof (sol_cma_chan_t));
2110 2109 }
2111 2110
2112 2111 /*
2113 2112 * Server TCP disconnect for an established channel.
2114 2113 * If destroy_id() has been called for the listening
2115 2114 * CMID and there are no more CMIDs with pending
2116 2115 * events corresponding to the listening CMID, free
2117 2116 * the listening CMID.
2118 2117 *
2119 2118 */
2120 2119 static void
2121 2120 cma_handle_nomore_events(sol_cma_chan_t *chanp)
2122 2121 {
2123 2122 struct rdma_cm_id *idp, *root_idp;
2124 2123 sol_cma_chan_t *root_chanp;
2125 2124 cma_chan_state_t state;
2126 2125 ulong_t req_nodes, acpt_nodes;
2127 2126
2128 2127 idp = (struct rdma_cm_id *)chanp;
2129 2128 root_idp = CHAN_LISTEN_ROOT(chanp);
2130 2129 root_chanp = (sol_cma_chan_t *)root_idp;
2131 2130 if (!root_chanp)
2132 2131 return;
2133 2132
2134 2133 mutex_enter(&root_chanp->chan_mutex);
2135 2134 mutex_enter(&chanp->chan_mutex);
2136 2135 CHAN_LISTEN_ROOT(chanp) = NULL;
2137 2136 root_chanp->chan_req_total_cnt--;
2138 2137
2139 2138 /*
2140 2139 * Removal of CMID from the AVL trees should already have been done
2141 2140 * by now. Below code mainly as a safety net.
2142 2141 */
2143 2142 if (chanp->chan_req_state == REQ_CMID_ACCEPTED) {
2144 2143 ASSERT(chanp->chan_qp_hdl);
2145 2144 ASSERT(cma_get_acpt_idp(root_idp,
2146 2145 chanp->chan_qp_hdl));
2147 2146 avl_remove(&root_chanp->chan_acpt_avl_tree, idp);
2148 2147 chanp->chan_req_state = REQ_CMID_SERVER_NONE;
2149 2148 }
2150 2149 if (REQ_CMID_IN_REQ_AVL_TREE(chanp)) {
2151 2150 ASSERT(chanp->chan_session_id);
2152 2151 ASSERT(cma_get_req_idp(root_idp,
2153 2152 chanp->chan_session_id));
2154 2153 avl_remove(&root_chanp->chan_req_avl_tree, idp);
2155 2154 chanp->chan_req_state = REQ_CMID_SERVER_NONE;
2156 2155 }
2157 2156
2158 2157 state = cma_get_chan_state(root_chanp);
2159 2158 req_nodes = avl_numnodes(&root_chanp->chan_req_avl_tree);
2160 2159 acpt_nodes = avl_numnodes(&root_chanp->chan_acpt_avl_tree);
2161 2160 mutex_exit(&chanp->chan_mutex);
2162 2161 mutex_exit(&root_chanp->chan_mutex);
2163 2162 if (state == SOL_CMA_CHAN_DESTROY_PENDING && req_nodes == 0UL &&
2164 2163 acpt_nodes == 0UL)
2165 2164 cma_destroy_id(root_idp);
2166 2165 }
2167 2166
2168 2167 extern int ib_modify_qp(struct ib_qp *, struct ib_qp_attr *, int);
2169 2168 extern int rdma_init_qp_attr(struct rdma_cm_id *, struct ib_qp_attr *,
2170 2169 int *);
2171 2170
2172 2171 static int
2173 2172 cma_init_ud_qp(sol_cma_chan_t *chanp, struct ib_qp *qp)
2174 2173 {
2175 2174 struct ib_qp_attr qp_attr;
2176 2175 int qp_attr_mask, ret;
2177 2176
2178 2177 qp_attr.qp_state = IB_QPS_INIT;
2179 2178 ret = rdma_init_qp_attr(&chanp->chan_rdma_cm, &qp_attr, &qp_attr_mask);
2180 2179 if (ret)
2181 2180 return (ret);
2182 2181
2183 2182 ret = ib_modify_qp(qp, &qp_attr, qp_attr_mask);
2184 2183 if (ret)
2185 2184 return (ret);
2186 2185
2187 2186 qp_attr.qp_state = IB_QPS_RTR;
2188 2187 ret = ib_modify_qp(qp, &qp_attr, IB_QP_STATE);
2189 2188 if (ret)
2190 2189 return (ret);
2191 2190
2192 2191 qp_attr.qp_state = IB_QPS_RTS;
2193 2192 qp_attr.sq_psn = 0;
2194 2193 ret = ib_modify_qp(qp, &qp_attr, IB_QP_STATE | IB_QP_SQ_PSN);
2195 2194
2196 2195 return (ret);
2197 2196 }
2198 2197
2199 2198 static int
2200 2199 cma_init_conn_qp(sol_cma_chan_t *chanp, struct ib_qp *qp)
2201 2200 {
2202 2201 struct ib_qp_attr qp_attr;
2203 2202 int qp_attr_mask, ret;
2204 2203
2205 2204 qp_attr.qp_state = IB_QPS_INIT;
2206 2205 ret = rdma_init_qp_attr(&chanp->chan_rdma_cm, &qp_attr, &qp_attr_mask);
2207 2206 if (ret)
2208 2207 return (ret);
2209 2208
2210 2209 return (ib_modify_qp(qp, &qp_attr, qp_attr_mask));
2211 2210 }
2212 2211
2213 2212 static inline int
2214 2213 cma_is_ud_ps(enum rdma_port_space ps)
2215 2214 {
2216 2215 return (ps == RDMA_PS_UDP || ps == RDMA_PS_IPOIB);
2217 2216 }
2218 2217
2219 2218 int
2220 2219 rdma_create_qp(struct rdma_cm_id *idp, struct ib_pd *pd,
2221 2220 struct ib_qp_init_attr *qp_init_attr)
2222 2221 {
2223 2222 sol_cma_chan_t *chanp;
2224 2223 struct ib_qp *qp;
2225 2224 int ret;
2226 2225 ofs_client_t *dev_ofs_client;
2227 2226
2228 2227 ASSERT(idp);
2229 2228 chanp = (sol_cma_chan_t *)idp;
2230 2229 if (idp->device->node_guid != pd->device->node_guid)
2231 2230 return (-EINVAL);
2232 2231
2233 2232 dev_ofs_client = (ofs_client_t *)pd->device->clnt_hdl;
2234 2233 rdma_map_id2clnthdl(idp, dev_ofs_client->ibt_hdl, NULL);
2235 2234
2236 2235 qp = ib_create_qp(pd, qp_init_attr);
2237 2236 if ((uintptr_t)qp >= (uintptr_t)-0xFFF) {
2238 2237 return ((intptr_t)qp);
2239 2238 }
2240 2239 rdma_map_id2qphdl(idp, (void *)qp->ibt_qp);
2241 2240
2242 2241 if (cma_is_ud_ps(idp->ps)) {
2243 2242 ret = cma_init_ud_qp(chanp, qp);
2244 2243 } else {
2245 2244 ret = cma_init_conn_qp(chanp, qp);
2246 2245 }
2247 2246
2248 2247 if (ret) {
2249 2248 goto err;
2250 2249 }
2251 2250
2252 2251 idp->qp = qp;
2253 2252 chanp->chan_qp_num = qp->qp_num;
2254 2253 chanp->chan_is_srq = (qp->srq != NULL);
2255 2254 return (0);
2256 2255 err:
2257 2256 (void) ib_destroy_qp(qp);
2258 2257 return (ret);
2259 2258 }
2260 2259
2261 2260 void
2262 2261 rdma_destroy_qp(struct rdma_cm_id *idp)
2263 2262 {
2264 2263 ASSERT(idp);
2265 2264 (void) ib_destroy_qp(idp->qp);
2266 2265 idp->qp = NULL;
2267 2266 }
↓ open down ↓ |
2196 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX