Print this page
8368 remove warlock leftovers from usr/src/uts
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/io/ib/mgt/ibcm/ibcm_impl.c
+++ new/usr/src/uts/common/io/ib/mgt/ibcm/ibcm_impl.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21 /*
22 22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
23 23 * Use is subject to license terms.
24 24 */
25 25
26 26 /*
27 27 * ibcm_impl.c
28 28 *
29 29 * contains internal functions of IB CM module.
30 30 *
31 31 * TBD:
32 32 * 1. HCA CATASTROPHIC/RECOVERED not handled yet
33 33 */
34 34
35 35 #include <sys/ib/mgt/ibcm/ibcm_impl.h>
36 36 #include <sys/disp.h>
37 37
38 38
39 39 /* function prototypes */
40 40 static ibcm_status_t ibcm_init(void);
41 41 static ibcm_status_t ibcm_fini(void);
42 42
43 43 /* Routines to initialize and destroy CM global locks and CVs */
44 44 static void ibcm_init_locks(void);
45 45 static void ibcm_fini_locks(void);
46 46
47 47 /* Routines that initialize/teardown CM's global hca structures */
48 48 static void ibcm_init_hcas();
49 49 static ibcm_status_t ibcm_fini_hcas();
50 50
51 51 static void ibcm_init_classportinfo();
52 52 static void ibcm_stop_timeout_thread();
53 53
54 54 /* Routines that handle HCA attach/detach asyncs */
55 55 static void ibcm_hca_attach(ib_guid_t);
56 56 static ibcm_status_t ibcm_hca_detach(ibcm_hca_info_t *);
57 57
58 58 /* Routines that initialize the HCA's port related fields */
59 59 static ibt_status_t ibcm_hca_init_port(ibcm_hca_info_t *hcap,
60 60 uint8_t port_index);
61 61 static ibcm_status_t ibcm_hca_fini_port(ibcm_hca_info_t *hcap,
62 62 uint8_t port_index);
63 63
64 64 static void ibcm_rc_flow_control_init(void);
65 65 static void ibcm_rc_flow_control_fini(void);
66 66
67 67 /*
68 68 * Routines that check if hca's avl trees and sidr lists are free of any
69 69 * active client resources ie., RC or UD state structures in certain states
70 70 */
71 71 static ibcm_status_t ibcm_check_avl_clean(ibcm_hca_info_t *hcap);
72 72 static ibcm_status_t ibcm_check_sidr_clean(ibcm_hca_info_t *hcap);
73 73
74 74 /* Add a new hca structure to CM's global hca list */
75 75 static ibcm_hca_info_t *ibcm_add_hca_entry(ib_guid_t hcaguid, uint_t nports);
76 76
77 77 static void ibcm_comm_est_handler(ibt_async_event_t *);
78 78 void ibcm_async_handler(void *, ibt_hca_hdl_t,
79 79 ibt_async_code_t, ibt_async_event_t *);
80 80
81 81 /* Global variables */
82 82 char cmlog[] = "ibcm"; /* for debug log messages */
83 83 ibt_clnt_hdl_t ibcm_ibt_handle; /* IBT handle */
84 84 kmutex_t ibcm_svc_info_lock; /* list lock */
85 85 kcondvar_t ibcm_svc_info_cv; /* cv for deregister */
86 86 kmutex_t ibcm_recv_mutex;
87 87 avl_tree_t ibcm_svc_avl_tree;
88 88 taskq_t *ibcm_taskq = NULL;
↓ open down ↓ |
88 lines elided |
↑ open up ↑ |
89 89 int taskq_dispatch_fail_cnt;
90 90
91 91 kmutex_t ibcm_mcglist_lock; /* MCG list lock */
92 92 kmutex_t ibcm_trace_mutex; /* Trace mutex */
93 93 kmutex_t ibcm_trace_print_mutex; /* Trace print mutex */
94 94 int ibcm_conn_max_trcnt = IBCM_MAX_CONN_TRCNT;
95 95
96 96 int ibcm_enable_trace = 2; /* Trace level 4 by default */
97 97 int ibcm_dtrace = 0; /* conditionally enable more dtrace */
98 98
99 -_NOTE(MUTEX_PROTECTS_DATA(ibcm_svc_info_lock, ibcm_svc_info_s::{svc_bind_list
100 - svc_ref_cnt svc_to_delete}))
101 -
102 -_NOTE(MUTEX_PROTECTS_DATA(ibcm_svc_info_lock, ibcm_svc_bind_s::{sbind_link}))
103 -
104 -_NOTE(MUTEX_PROTECTS_DATA(ibcm_trace_mutex, ibcm_conn_trace_s))
105 -
106 -_NOTE(DATA_READABLE_WITHOUT_LOCK(ibcm_conn_trace_s))
107 -
108 -_NOTE(MUTEX_PROTECTS_DATA(ibcm_trace_print_mutex, ibcm_debug_buf))
109 -
110 -_NOTE(DATA_READABLE_WITHOUT_LOCK(ibcm_debug_buf))
111 -
112 99 /*
113 100 * Initial state is INIT. All hca dr's return success immediately in this
114 101 * state, without adding or deleting any hca's to CM.
115 102 */
116 103 ibcm_finit_state_t ibcm_finit_state = IBCM_FINIT_INIT;
117 104
118 105 /* mutex and cv to manage hca's reference and resource count(s) */
119 106 kmutex_t ibcm_global_hca_lock;
120 107 kcondvar_t ibcm_global_hca_cv;
121 108
122 109 /* mutex and cv to sa session open */
123 110 kmutex_t ibcm_sa_open_lock;
124 111 kcondvar_t ibcm_sa_open_cv;
125 112 int ibcm_sa_timeout_delay = 1; /* in ticks */
126 -_NOTE(MUTEX_PROTECTS_DATA(ibcm_sa_open_lock,
127 - ibcm_port_info_s::{port_ibmf_saa_hdl port_saa_open_in_progress}))
128 113
129 -_NOTE(DATA_READABLE_WITHOUT_LOCK(ibcm_port_info_s::{port_ibmf_saa_hdl}))
130 -
131 114 /* serialize sm notice callbacks */
132 115 kmutex_t ibcm_sm_notice_serialize_lock;
133 116
134 -_NOTE(LOCK_ORDER(ibcm_sm_notice_serialize_lock ibcm_global_hca_lock))
135 -
136 -_NOTE(MUTEX_PROTECTS_DATA(ibcm_global_hca_lock, ibcm_hca_info_s::{hca_state
137 - hca_svc_cnt hca_acc_cnt hca_res_cnt hca_next}))
138 -
139 -_NOTE(MUTEX_PROTECTS_DATA(ibcm_global_hca_lock,
140 - ibcm_port_info_s::{port_ibmf_hdl}))
141 -
142 -_NOTE(MUTEX_PROTECTS_DATA(ibcm_sm_notice_serialize_lock,
143 - ibcm_port_info_s::{port_event_status}))
144 -
145 -_NOTE(DATA_READABLE_WITHOUT_LOCK(ibcm_hca_info_s::{hca_state}))
146 -_NOTE(DATA_READABLE_WITHOUT_LOCK(
147 - ibcm_hca_info_s::{hca_port_info.port_ibmf_hdl}))
148 -
149 117 /* mutex for CM's qp list management */
150 118 kmutex_t ibcm_qp_list_lock;
151 119
152 -_NOTE(MUTEX_PROTECTS_DATA(ibcm_qp_list_lock, ibcm_port_info_s::{port_qplist}))
153 -_NOTE(MUTEX_PROTECTS_DATA(ibcm_qp_list_lock, ibcm_qp_list_s))
154 -_NOTE(MUTEX_PROTECTS_DATA(ibcm_qp_list_lock, ibcm_qp_list_s))
155 -
156 120 kcondvar_t ibcm_timeout_list_cv;
157 121 kcondvar_t ibcm_timeout_thread_done_cv;
158 122 kt_did_t ibcm_timeout_thread_did;
159 123 ibcm_state_data_t *ibcm_timeout_list_hdr, *ibcm_timeout_list_tail;
160 124 ibcm_ud_state_data_t *ibcm_ud_timeout_list_hdr, *ibcm_ud_timeout_list_tail;
161 125 kmutex_t ibcm_timeout_list_lock;
162 126 uint8_t ibcm_timeout_list_flags = 0;
163 127 pri_t ibcm_timeout_thread_pri = MINCLSYSPRI;
164 128
165 -_NOTE(MUTEX_PROTECTS_DATA(ibcm_timeout_list_lock,
166 - ibcm_state_data_s::timeout_next))
167 -
168 -_NOTE(MUTEX_PROTECTS_DATA(ibcm_timeout_list_lock,
169 - ibcm_ud_state_data_s::ud_timeout_next))
170 -
171 129 /*
172 130 * Flow control logic for open_rc_channel uses the following.
173 131 */
174 132
175 133 struct ibcm_open_s {
176 134 kmutex_t mutex;
177 135 kcondvar_t cv;
178 136 uint8_t task_running;
179 137 uint_t queued;
180 138 uint_t exit_deferred;
181 139 uint_t in_progress;
182 140 uint_t in_progress_max;
183 141 uint_t sends;
184 142 uint_t sends_max;
185 143 uint_t sends_lowat;
186 144 uint_t sends_hiwat;
187 145 ibcm_state_data_t *tail;
188 146 ibcm_state_data_t head;
189 147 } ibcm_open;
190 148
191 149 /*
192 150 * Flow control logic for SA access and close_rc_channel calls follows.
193 151 */
194 152
195 153 int ibcm_close_simul_max = 12;
196 154 int ibcm_lapr_simul_max = 12;
197 155 int ibcm_saa_simul_max = 8;
198 156
199 157 typedef struct ibcm_flow1_s {
200 158 struct ibcm_flow1_s *link;
201 159 kcondvar_t cv;
202 160 uint8_t waiters; /* 1 to IBCM_FLOW_SIMUL_MAX */
203 161 } ibcm_flow1_t;
204 162
205 163 typedef struct ibcm_flow_s {
206 164 ibcm_flow1_t *list;
207 165 uint_t simul; /* #requests currently outstanding */
208 166 uint_t simul_max;
209 167 uint_t waiters_per_chunk;
210 168 uint_t lowat;
211 169 uint_t lowat_default;
212 170 /* statistics */
213 171 uint_t total;
214 172 } ibcm_flow_t;
215 173
216 174 ibcm_flow_t ibcm_saa_flow;
217 175 ibcm_flow_t ibcm_close_flow;
218 176 ibcm_flow_t ibcm_lapr_flow;
219 177
220 178 /* NONBLOCKING close requests are queued */
221 179 struct ibcm_close_s {
222 180 kmutex_t mutex;
223 181 ibcm_state_data_t *tail;
224 182 ibcm_state_data_t head;
225 183 } ibcm_close;
226 184
227 185 static ibt_clnt_modinfo_t ibcm_ibt_modinfo = { /* Client's modinfop */
228 186 IBTI_V_CURR,
229 187 IBT_CM,
230 188 ibcm_async_handler,
231 189 NULL,
232 190 "IBCM"
233 191 };
234 192
235 193 /* IBCM's list of HCAs registered with it */
236 194 static ibcm_hca_info_t *ibcm_hca_listp = NULL; /* CM's HCA list */
237 195
238 196 /* Array of CM state call table functions */
239 197 ibcm_state_handler_t ibcm_sm_funcs_tbl[] = {
240 198 ibcm_process_req_msg,
241 199 ibcm_process_mra_msg,
242 200 ibcm_process_rej_msg,
243 201 ibcm_process_rep_msg,
244 202 ibcm_process_rtu_msg,
245 203 ibcm_process_dreq_msg,
246 204 ibcm_process_drep_msg,
247 205 ibcm_process_sidr_req_msg,
248 206 ibcm_process_sidr_rep_msg,
249 207 ibcm_process_lap_msg,
250 208 ibcm_process_apr_msg
251 209 };
252 210
253 211 /* the following globals are CM tunables */
254 212 ibt_rnr_nak_time_t ibcm_default_rnr_nak_time = IBT_RNR_NAK_655ms;
255 213
256 214 uint8_t ibcm_max_retries = IBCM_MAX_RETRIES;
257 215 clock_t ibcm_local_processing_time = IBCM_LOCAL_RESPONSE_TIME;
258 216 clock_t ibcm_remote_response_time = IBCM_REMOTE_RESPONSE_TIME;
259 217 ib_time_t ibcm_max_sidr_rep_proctime = IBCM_MAX_SIDR_PROCESS_TIME;
260 218 ib_time_t ibcm_max_sidr_pktlife_time = IBCM_MAX_SIDR_PKT_LIFE_TIME;
261 219
262 220 ib_time_t ibcm_max_sidr_rep_store_time = 18;
263 221 uint32_t ibcm_wait_for_acc_cnt_timeout = 2000000; /* 2 sec */
264 222
265 223 ib_time_t ibcm_max_ib_pkt_lt = IBCM_MAX_IB_PKT_LT;
266 224 ib_time_t ibcm_max_ib_mad_pkt_lt = IBCM_MAX_IB_MAD_PKT_LT;
267 225
268 226 /*
269 227 * This delay accounts for time involved in various activities as follows :
270 228 *
271 229 * IBMF delays for posting the MADs in non-blocking mode
272 230 * IBMF delays for receiving the MADs and delivering to CM
273 231 * CM delays in processing the MADs before invoking client handlers,
274 232 * Any other delays associated with HCA driver in processing the MADs and
275 233 * other subsystems that CM may invoke (ex : SA, HCA driver)
276 234 */
277 235 uint32_t ibcm_sw_delay = 1000; /* 1000us / 1ms */
278 236 uint32_t ibcm_max_sa_retries = IBCM_MAX_SA_RETRIES + 1;
279 237
280 238 /* approx boot time */
281 239 uint32_t ibcm_adj_btime = 4; /* 4 seconds */
282 240
283 241 /*
284 242 * The information in ibcm_clpinfo is kept in wireformat and is setup at
285 243 * init time, and used read-only after that
286 244 */
287 245 ibcm_classportinfo_msg_t ibcm_clpinfo;
288 246
289 247 char *event_str[] = {
290 248 "NEVER SEE THIS ",
291 249 "SESSION_ID ",
292 250 "CHAN_HDL ",
293 251 "LOCAL_COMID/HCA/PORT ",
294 252 "LOCAL_QPN ",
295 253 "REMOTE_COMID/HCA ",
296 254 "REMOTE_QPN ",
297 255 "BASE_TIME ",
298 256 "INCOMING_REQ ",
299 257 "INCOMING_REP ",
300 258 "INCOMING_RTU ",
301 259 "INCOMING_COMEST ",
302 260 "INCOMING_MRA ",
303 261 "INCOMING_REJ ",
304 262 "INCOMING_LAP ",
305 263 "INCOMING_APR ",
306 264 "INCOMING_DREQ ",
307 265 "INCOMING_DREP ",
308 266 "OUTGOING_REQ ",
309 267 "OUTGOING_REP ",
310 268 "OUTGOING_RTU ",
311 269 "OUTGOING_LAP ",
312 270 "OUTGOING_APR ",
313 271 "OUTGOING_MRA ",
314 272 "OUTGOING_REJ ",
315 273 "OUTGOING_DREQ ",
316 274 "OUTGOING_DREP ",
317 275 "REQ_POST_COMPLETE ",
318 276 "REP_POST_COMPLETE ",
319 277 "RTU_POST_COMPLETE ",
320 278 "MRA_POST_COMPLETE ",
321 279 "REJ_POST_COMPLETE ",
322 280 "LAP_POST_COMPLETE ",
323 281 "APR_POST_COMPLETE ",
324 282 "DREQ_POST_COMPLETE ",
325 283 "DREP_POST_COMPLETE ",
326 284 "TIMEOUT_REP ",
327 285 "CALLED_REQ_RCVD_EVENT ",
328 286 "RET_REQ_RCVD_EVENT ",
329 287 "CALLED_REP_RCVD_EVENT ",
330 288 "RET_REP_RCVD_EVENT ",
331 289 "CALLED_CONN_EST_EVENT ",
332 290 "RET_CONN_EST_EVENT ",
333 291 "CALLED_CONN_FAIL_EVENT ",
334 292 "RET_CONN_FAIL_EVENT ",
335 293 "CALLED_CONN_CLOSE_EVENT ",
336 294 "RET_CONN_CLOSE_EVENT ",
337 295 "INIT_INIT ",
338 296 "INIT_INIT_FAIL ",
339 297 "INIT_RTR ",
340 298 "INIT_RTR_FAIL ",
341 299 "RTR_RTS ",
342 300 "RTR_RTS_FAIL ",
343 301 "RTS_RTS ",
344 302 "RTS_RTS_FAIL ",
345 303 "TO_ERROR ",
346 304 "ERROR_FAIL ",
347 305 "SET_ALT ",
348 306 "SET_ALT_FAIL ",
349 307 "STALE_DETECT ",
↓ open down ↓ |
169 lines elided |
↑ open up ↑ |
350 308 "OUTGOING_REQ_RETRY ",
351 309 "OUTGOING_REP_RETRY ",
352 310 "OUTGOING_LAP_RETRY ",
353 311 "OUTGOING_MRA_RETRY ",
354 312 "OUTGOING_DREQ_RETRY ",
355 313 "NEVER SEE THIS "
356 314 };
357 315
358 316 char ibcm_debug_buf[IBCM_DEBUG_BUF_SIZE];
359 317
360 -_NOTE(SCHEME_PROTECTS_DATA("used in a localized function consistently",
361 - ibcm_debug_buf))
362 -_NOTE(READ_ONLY_DATA(ibcm_taskq))
363 -
364 -_NOTE(MUTEX_PROTECTS_DATA(ibcm_timeout_list_lock, ibcm_timeout_list_flags))
365 -_NOTE(MUTEX_PROTECTS_DATA(ibcm_timeout_list_lock, ibcm_timeout_list_hdr))
366 -_NOTE(MUTEX_PROTECTS_DATA(ibcm_timeout_list_lock, ibcm_ud_timeout_list_hdr))
367 -
368 318 #ifdef DEBUG
369 319 int ibcm_test_mode = 0; /* set to 1, if running tests */
370 320 #endif
371 321
372 322
373 323 /* Module Driver Info */
374 324 static struct modlmisc ibcm_modlmisc = {
375 325 &mod_miscops,
376 326 "IB Communication Manager"
377 327 };
378 328
379 329 /* Module Linkage */
380 330 static struct modlinkage ibcm_modlinkage = {
381 331 MODREV_1,
382 332 &ibcm_modlmisc,
383 333 NULL
384 334 };
385 335
386 336
387 337 int
388 338 _init(void)
389 339 {
390 340 int rval;
391 341 ibcm_status_t status;
392 342
393 343 status = ibcm_init();
394 344 if (status != IBCM_SUCCESS) {
395 345 IBTF_DPRINTF_L2(cmlog, "_init: ibcm failed %d", status);
396 346 return (EINVAL);
397 347 }
398 348
399 349 rval = mod_install(&ibcm_modlinkage);
400 350 if (rval != 0) {
401 351 IBTF_DPRINTF_L2(cmlog, "_init: ibcm mod_install failed %d",
402 352 rval);
403 353 (void) ibcm_fini();
404 354 }
405 355
406 356 IBTF_DPRINTF_L5(cmlog, "_init: ibcm successful");
407 357 return (rval);
408 358
409 359 }
410 360
411 361
412 362 int
413 363 _info(struct modinfo *modinfop)
414 364 {
415 365 return (mod_info(&ibcm_modlinkage, modinfop));
416 366 }
417 367
418 368
419 369 int
420 370 _fini(void)
421 371 {
422 372 int status;
423 373
424 374 if (ibcm_fini() != IBCM_SUCCESS)
425 375 return (EBUSY);
426 376
427 377 if ((status = mod_remove(&ibcm_modlinkage)) != 0) {
428 378 IBTF_DPRINTF_L2(cmlog, "_fini: ibcm mod_remove failed %d",
429 379 status);
430 380 return (status);
431 381 }
432 382
433 383 IBTF_DPRINTF_L5(cmlog, "_fini: ibcm successful");
434 384
435 385 return (status);
436 386 }
437 387
438 388 /* Initializes all global mutex and CV in cm module */
439 389 static void
440 390 ibcm_init_locks()
441 391 {
442 392
443 393 /* Verify CM MAD sizes */
444 394 #ifdef DEBUG
445 395
446 396 if (ibcm_test_mode > 1) {
447 397
448 398 IBTF_DPRINTF_L1(cmlog, "REQ MAD SIZE %d",
449 399 sizeof (ibcm_req_msg_t));
450 400 IBTF_DPRINTF_L1(cmlog, "REP MAD SIZE %d",
451 401 sizeof (ibcm_rep_msg_t));
452 402 IBTF_DPRINTF_L1(cmlog, "RTU MAD SIZE %d",
453 403 sizeof (ibcm_rtu_msg_t));
454 404 IBTF_DPRINTF_L1(cmlog, "MRA MAD SIZE %d",
455 405 sizeof (ibcm_mra_msg_t));
456 406 IBTF_DPRINTF_L1(cmlog, "REJ MAD SIZE %d",
457 407 sizeof (ibcm_rej_msg_t));
458 408 IBTF_DPRINTF_L1(cmlog, "LAP MAD SIZE %d",
459 409 sizeof (ibcm_lap_msg_t));
460 410 IBTF_DPRINTF_L1(cmlog, "APR MAD SIZE %d",
461 411 sizeof (ibcm_apr_msg_t));
462 412 IBTF_DPRINTF_L1(cmlog, "DREQ MAD SIZE %d",
463 413 sizeof (ibcm_dreq_msg_t));
464 414 IBTF_DPRINTF_L1(cmlog, "DREP MAD SIZE %d",
465 415 sizeof (ibcm_drep_msg_t));
466 416 IBTF_DPRINTF_L1(cmlog, "SIDR REQ MAD SIZE %d",
467 417 sizeof (ibcm_sidr_req_msg_t));
468 418 IBTF_DPRINTF_L1(cmlog, "SIDR REP MAD SIZE %d",
469 419 sizeof (ibcm_sidr_rep_msg_t));
470 420 }
471 421
472 422 #endif
473 423
474 424 /* Create all global locks within cm module */
475 425 mutex_init(&ibcm_svc_info_lock, NULL, MUTEX_DEFAULT, NULL);
476 426 mutex_init(&ibcm_mcglist_lock, NULL, MUTEX_DEFAULT, NULL);
477 427 mutex_init(&ibcm_timeout_list_lock, NULL, MUTEX_DEFAULT, NULL);
478 428 mutex_init(&ibcm_global_hca_lock, NULL, MUTEX_DEFAULT, NULL);
479 429 mutex_init(&ibcm_sa_open_lock, NULL, MUTEX_DEFAULT, NULL);
480 430 mutex_init(&ibcm_recv_mutex, NULL, MUTEX_DEFAULT, NULL);
481 431 mutex_init(&ibcm_sm_notice_serialize_lock, NULL, MUTEX_DEFAULT, NULL);
482 432 mutex_init(&ibcm_qp_list_lock, NULL, MUTEX_DEFAULT, NULL);
483 433 mutex_init(&ibcm_trace_mutex, NULL, MUTEX_DEFAULT, NULL);
484 434 mutex_init(&ibcm_trace_print_mutex, NULL, MUTEX_DEFAULT, NULL);
485 435 cv_init(&ibcm_svc_info_cv, NULL, CV_DRIVER, NULL);
486 436 cv_init(&ibcm_timeout_list_cv, NULL, CV_DRIVER, NULL);
487 437 cv_init(&ibcm_timeout_thread_done_cv, NULL, CV_DRIVER, NULL);
488 438 cv_init(&ibcm_global_hca_cv, NULL, CV_DRIVER, NULL);
489 439 cv_init(&ibcm_sa_open_cv, NULL, CV_DRIVER, NULL);
490 440 avl_create(&ibcm_svc_avl_tree, ibcm_svc_compare,
491 441 sizeof (ibcm_svc_info_t),
492 442 offsetof(struct ibcm_svc_info_s, svc_link));
493 443
494 444 IBTF_DPRINTF_L5(cmlog, "ibcm_init_locks: done");
495 445 }
496 446
497 447 /* Destroys all global mutex and CV in cm module */
498 448 static void
499 449 ibcm_fini_locks()
500 450 {
501 451 /* Destroy all global locks within cm module */
502 452 mutex_destroy(&ibcm_svc_info_lock);
503 453 mutex_destroy(&ibcm_mcglist_lock);
504 454 mutex_destroy(&ibcm_timeout_list_lock);
505 455 mutex_destroy(&ibcm_global_hca_lock);
506 456 mutex_destroy(&ibcm_sa_open_lock);
507 457 mutex_destroy(&ibcm_recv_mutex);
508 458 mutex_destroy(&ibcm_sm_notice_serialize_lock);
509 459 mutex_destroy(&ibcm_qp_list_lock);
510 460 mutex_destroy(&ibcm_trace_mutex);
511 461 mutex_destroy(&ibcm_trace_print_mutex);
512 462 cv_destroy(&ibcm_svc_info_cv);
513 463 cv_destroy(&ibcm_timeout_list_cv);
514 464 cv_destroy(&ibcm_timeout_thread_done_cv);
515 465 cv_destroy(&ibcm_global_hca_cv);
516 466 cv_destroy(&ibcm_sa_open_cv);
↓ open down ↓ |
139 lines elided |
↑ open up ↑ |
517 467 avl_destroy(&ibcm_svc_avl_tree);
518 468
519 469 IBTF_DPRINTF_L5(cmlog, "ibcm_fini_locks: done");
520 470 }
521 471
522 472
523 473 /* Initialize CM's classport info */
524 474 static void
525 475 ibcm_init_classportinfo()
526 476 {
527 - _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(ibcm_clpinfo));
528 -
529 477 ibcm_clpinfo.BaseVersion = IBCM_MAD_BASE_VERSION;
530 478 ibcm_clpinfo.ClassVersion = IBCM_MAD_CLASS_VERSION;
531 479
532 480 /* For now, CM supports same capabilities at all ports */
533 481 ibcm_clpinfo.CapabilityMask =
534 482 h2b16(IBCM_CPINFO_CAP_RC | IBCM_CPINFO_CAP_SIDR);
535 483
536 484 /* Bits 0-7 are all 0 for Communication Mgmt Class */
537 485
538 486 /* For now, CM has the same respvalue at all ports */
539 487 ibcm_clpinfo.RespTimeValue_plus =
540 488 h2b32(ibt_usec2ib(ibcm_local_processing_time) & 0x1f);
541 489
542 490 /* For now, redirect fields are set to 0 */
543 491 /* Trap fields are not applicable to CM, hence set to 0 */
544 492
545 - _NOTE(NOW_VISIBLE_TO_OTHER_THREADS(ibcm_clpinfo));
546 493 IBTF_DPRINTF_L5(cmlog, "ibcm_init_classportinfo: done");
547 494 }
548 495
549 496 /*
550 497 * ibcm_init():
551 498 * - call ibt_attach()
552 499 * - create AVL trees
553 500 * - Attach HCA handlers that are already present before
554 501 * CM got loaded.
555 502 *
556 503 * Arguments: NONE
557 504 *
558 505 * Return values:
559 506 * IBCM_SUCCESS - success
560 507 */
561 508 static ibcm_status_t
562 509 ibcm_init(void)
563 510 {
564 511 ibt_status_t status;
565 512 kthread_t *t;
566 513
567 514 IBTF_DPRINTF_L3(cmlog, "ibcm_init:");
568 515
569 516 ibcm_init_classportinfo();
570 517
571 518 if (ibcm_init_ids() != IBCM_SUCCESS) {
572 519 IBTF_DPRINTF_L1(cmlog, "ibcm_init: "
573 520 "fatal error: vmem_create() failed");
574 521 return (IBCM_FAILURE);
575 522 }
576 523 ibcm_init_locks();
↓ open down ↓ |
21 lines elided |
↑ open up ↑ |
577 524
578 525 if (ibcm_ar_init() != IBCM_SUCCESS) {
579 526 IBTF_DPRINTF_L1(cmlog, "ibcm_init: "
580 527 "fatal error: ibcm_ar_init() failed");
581 528 ibcm_fini_ids();
582 529 ibcm_fini_locks();
583 530 return (IBCM_FAILURE);
584 531 }
585 532 ibcm_rc_flow_control_init();
586 533
587 - _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(ibcm_taskq))
588 534 ibcm_taskq = system_taskq;
589 - _NOTE(NOW_VISIBLE_TO_OTHER_THREADS(ibcm_taskq))
590 535
591 - _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(ibcm_timeout_list_flags))
592 - _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(ibcm_timeout_thread_did))
593 -
594 536 /* Start the timeout list processing thread */
595 537 ibcm_timeout_list_flags = 0;
596 538 t = thread_create(NULL, 0, ibcm_process_tlist, 0, 0, &p0, TS_RUN,
597 539 ibcm_timeout_thread_pri);
598 540 ibcm_timeout_thread_did = t->t_did;
599 541
600 - _NOTE(NOW_VISIBLE_TO_OTHER_THREADS(ibcm_timeout_list_flags))
601 - _NOTE(NOW_VISIBLE_TO_OTHER_THREADS(ibcm_timeout_thread_did))
602 -
603 542 /*
604 543 * NOTE : if ibt_attach is done after ibcm_init_hcas, then some
605 544 * HCA DR events may be lost. CM could call re-init hca list
606 545 * again, but it is more complicated. Some HCA's DR's lost may
607 546 * be HCA detach, which makes hca list re-syncing and locking more
608 547 * complex
609 548 */
610 549 status = ibt_attach(&ibcm_ibt_modinfo, NULL, NULL, &ibcm_ibt_handle);
611 550 if (status != IBT_SUCCESS) {
612 551 IBTF_DPRINTF_L2(cmlog, "ibcm_init(): ibt_attach failed %d",
613 552 status);
614 553 (void) ibcm_ar_fini();
615 554 ibcm_stop_timeout_thread();
616 555 ibcm_fini_ids();
617 556 ibcm_fini_locks();
618 557 ibcm_rc_flow_control_fini();
619 558 return (IBCM_FAILURE);
620 559 }
621 560
622 561 /* Block all HCA attach/detach asyncs */
623 562 mutex_enter(&ibcm_global_hca_lock);
624 563
625 564 ibcm_init_hcas();
626 565 ibcm_finit_state = IBCM_FINIT_IDLE;
627 566
628 567 ibcm_path_cache_init();
629 568 /*
630 569 * This callback will be used by IBTL to get the Node record for a
631 570 * given LID via the speccified HCA and port.
632 571 */
633 572 ibtl_cm_set_node_info_cb(ibcm_ibtl_node_info);
634 573
635 574 /* Unblock any waiting HCA DR asyncs in CM */
636 575 mutex_exit(&ibcm_global_hca_lock);
637 576
638 577 IBTF_DPRINTF_L4(cmlog, "ibcm_init: done");
639 578 return (IBCM_SUCCESS);
640 579 }
641 580
642 581 /* Allocates and initializes the "per hca" global data in CM */
643 582 static void
644 583 ibcm_init_hcas()
645 584 {
646 585 uint_t num_hcas = 0;
647 586 ib_guid_t *guid_array;
648 587 int i;
649 588
650 589 IBTF_DPRINTF_L5(cmlog, "ibcm_init_hcas:");
651 590
652 591 /* Get the number of HCAs */
653 592 num_hcas = ibt_get_hca_list(&guid_array);
654 593 IBTF_DPRINTF_L4(cmlog, "ibcm_init_hcas: ibt_get_hca_list() "
655 594 "returned %d hcas", num_hcas);
656 595
657 596 ASSERT(MUTEX_HELD(&ibcm_global_hca_lock));
658 597
659 598 for (i = 0; i < num_hcas; i++)
660 599 ibcm_hca_attach(guid_array[i]);
661 600
662 601 if (num_hcas)
663 602 ibt_free_hca_list(guid_array, num_hcas);
664 603
665 604 IBTF_DPRINTF_L5(cmlog, "ibcm_init_hcas: done");
666 605 }
667 606
668 607
669 608 /*
670 609 * ibcm_fini():
671 610 * - Deregister w/ ibt
672 611 * - Cleanup IBCM HCA listp
673 612 * - Destroy mutexes
674 613 *
675 614 * Arguments: NONE
676 615 *
677 616 * Return values:
678 617 * IBCM_SUCCESS - success
679 618 */
680 619 static ibcm_status_t
681 620 ibcm_fini(void)
682 621 {
683 622 ibt_status_t status;
684 623
685 624 IBTF_DPRINTF_L3(cmlog, "ibcm_fini:");
686 625
687 626 /*
688 627 * CM assumes that the all general clients got rid of all the
689 628 * established connections and service registrations, completed all
690 629 * pending SIDR operations before a call to ibcm_fini()
691 630 */
692 631
693 632 if (ibcm_ar_fini() != IBCM_SUCCESS) {
694 633 IBTF_DPRINTF_L2(cmlog, "ibcm_fini: ibcm_ar_fini failed");
695 634 return (IBCM_FAILURE);
696 635 }
697 636
698 637 /* cleanup the svcinfo list */
699 638 mutex_enter(&ibcm_svc_info_lock);
700 639 if (avl_first(&ibcm_svc_avl_tree) != NULL) {
701 640 IBTF_DPRINTF_L2(cmlog, "ibcm_fini: "
702 641 "ibcm_svc_avl_tree is not empty");
703 642 mutex_exit(&ibcm_svc_info_lock);
704 643 return (IBCM_FAILURE);
705 644 }
706 645 mutex_exit(&ibcm_svc_info_lock);
707 646
708 647 /* disables any new hca attach/detaches */
709 648 mutex_enter(&ibcm_global_hca_lock);
710 649
711 650 ibcm_finit_state = IBCM_FINIT_BUSY;
712 651
713 652 if (ibcm_fini_hcas() != IBCM_SUCCESS) {
714 653 IBTF_DPRINTF_L2(cmlog, "ibcm_fini: "
715 654 "some hca's still have client resources");
716 655
717 656 /* First, re-initialize the hcas */
↓ open down ↓ |
105 lines elided |
↑ open up ↑ |
718 657 ibcm_init_hcas();
719 658 /* and then enable the HCA asyncs */
720 659 ibcm_finit_state = IBCM_FINIT_IDLE;
721 660 mutex_exit(&ibcm_global_hca_lock);
722 661 if (ibcm_ar_init() != IBCM_SUCCESS) {
723 662 IBTF_DPRINTF_L1(cmlog, "ibcm_fini:ibcm_ar_init failed");
724 663 }
725 664 return (IBCM_FAILURE);
726 665 }
727 666
728 - _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(ibcm_timeout_list_hdr))
729 - _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(ibcm_ud_timeout_list_hdr))
730 -
731 667 ASSERT(ibcm_timeout_list_hdr == NULL);
732 668 ASSERT(ibcm_ud_timeout_list_hdr == NULL);
733 669
734 - _NOTE(NOW_VISIBLE_TO_OTHER_THREADS(ibcm_timeout_list_hdr))
735 - _NOTE(NOW_VISIBLE_TO_OTHER_THREADS(ibcm_ud_timeout_list_hdr))
736 -
737 670 /* Release any pending asyncs on ibcm_global_hca_lock */
738 671 ibcm_finit_state = IBCM_FINIT_SUCCESS;
739 672 mutex_exit(&ibcm_global_hca_lock);
740 673
741 674 ibcm_stop_timeout_thread();
742 675
743 676 ibtl_cm_set_node_info_cb(NULL);
744 677 /*
745 678 * Detach from IBTL. Waits until all pending asyncs are complete.
746 679 * Above cv_broadcast wakes up any waiting hca attach/detach asyncs
747 680 */
748 681 status = ibt_detach(ibcm_ibt_handle);
749 682
750 683 /* if detach fails, CM didn't free up some resources, so assert */
751 684 if (status != IBT_SUCCESS)
752 685 IBTF_DPRINTF_L1(cmlog, "ibcm_fini: ibt_detach failed %d",
753 686 status);
754 687
755 688 ibcm_rc_flow_control_fini();
756 689
757 690 ibcm_path_cache_fini();
758 691
759 692 ibcm_fini_ids();
760 693 ibcm_fini_locks();
761 694 IBTF_DPRINTF_L3(cmlog, "ibcm_fini: done");
762 695 return (IBCM_SUCCESS);
763 696 }
764 697
765 698 /* This routine exit's the ibcm timeout thread */
766 699 static void
767 700 ibcm_stop_timeout_thread()
768 701 {
769 702 mutex_enter(&ibcm_timeout_list_lock);
770 703
771 704 /* Stop the timeout list processing thread */
772 705 ibcm_timeout_list_flags =
773 706 ibcm_timeout_list_flags | IBCM_TIMEOUT_THREAD_EXIT;
774 707
775 708 /* Wake up, if the timeout thread is on a cv_wait */
776 709 cv_signal(&ibcm_timeout_list_cv);
777 710
778 711 mutex_exit(&ibcm_timeout_list_lock);
779 712 thread_join(ibcm_timeout_thread_did);
780 713
781 714 IBTF_DPRINTF_L5(cmlog, "ibcm_stop_timeout_thread: done");
782 715 }
783 716
784 717
785 718 /* Attempts to release all the hca's associated with CM */
786 719 static ibcm_status_t
787 720 ibcm_fini_hcas()
788 721 {
789 722 ibcm_hca_info_t *hcap, *next;
790 723
791 724 IBTF_DPRINTF_L4(cmlog, "ibcm_fini_hcas:");
792 725
793 726 ASSERT(MUTEX_HELD(&ibcm_global_hca_lock));
794 727
795 728 hcap = ibcm_hca_listp;
796 729 while (hcap != NULL) {
797 730 next = hcap->hca_next;
798 731 if (ibcm_hca_detach(hcap) != IBCM_SUCCESS) {
799 732 ibcm_hca_listp = hcap;
800 733 return (IBCM_FAILURE);
801 734 }
802 735 hcap = next;
803 736 }
804 737
805 738 IBTF_DPRINTF_L4(cmlog, "ibcm_fini_hcas: SUCCEEDED");
806 739 return (IBCM_SUCCESS);
807 740 }
808 741
809 742
810 743 /*
811 744 * ibcm_hca_attach():
812 745 * Called as an asynchronous event to notify CM of an attach of HCA.
813 746 * Here ibcm_hca_info_t is initialized and all fields are
814 747 * filled in along with SA Access handles and IBMA handles.
815 748 * Also called from ibcm_init to initialize ibcm_hca_info_t's for each
816 749 * hca's
817 750 *
818 751 * Arguments: (WILL CHANGE BASED ON ASYNC EVENT CODE)
819 752 * hca_guid - HCA's guid
820 753 *
821 754 * Return values: NONE
822 755 */
823 756 static void
824 757 ibcm_hca_attach(ib_guid_t hcaguid)
825 758 {
↓ open down ↓ |
79 lines elided |
↑ open up ↑ |
826 759 int i;
827 760 ibt_status_t status;
828 761 uint8_t nports = 0;
829 762 ibcm_hca_info_t *hcap;
830 763 ibt_hca_attr_t hca_attrs;
831 764
832 765 IBTF_DPRINTF_L3(cmlog, "ibcm_hca_attach: guid = 0x%llX", hcaguid);
833 766
834 767 ASSERT(MUTEX_HELD(&ibcm_global_hca_lock));
835 768
836 - _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*hcap))
837 -
838 769 status = ibt_query_hca_byguid(hcaguid, &hca_attrs);
839 770 if (status != IBT_SUCCESS) {
840 771 IBTF_DPRINTF_L2(cmlog, "ibcm_hca_attach: "
841 772 "ibt_query_hca_byguid failed = %d", status);
842 773 return;
843 774 }
844 775 nports = hca_attrs.hca_nports;
845 776
846 777 IBTF_DPRINTF_L4(cmlog, "ibcm_hca_attach: num ports = %x", nports);
847 778
848 779 if ((hcap = ibcm_add_hca_entry(hcaguid, nports)) == NULL)
849 780 return;
850 781
851 782 hcap->hca_guid = hcaguid; /* Set GUID */
852 783 hcap->hca_num_ports = nports; /* Set number of ports */
853 784
854 785 if (ibcm_init_hca_ids(hcap) != IBCM_SUCCESS) {
855 786 ibcm_delete_hca_entry(hcap);
856 787 return;
857 788 }
858 789
859 790 /* Store the static hca attribute data */
860 791 hcap->hca_caps = hca_attrs.hca_flags;
861 792 hcap->hca_vendor_id = hca_attrs.hca_vendor_id;
862 793 hcap->hca_device_id = hca_attrs.hca_device_id;
863 794 hcap->hca_ack_delay = hca_attrs.hca_local_ack_delay;
864 795 hcap->hca_max_rdma_in_qp = hca_attrs.hca_max_rdma_in_qp;
865 796 hcap->hca_max_rdma_out_qp = hca_attrs.hca_max_rdma_out_qp;
866 797
867 798 /* loop thru nports and initialize IBMF handles */
868 799 for (i = 0; i < hcap->hca_num_ports; i++) {
869 800 status = ibt_get_port_state_byguid(hcaguid, i + 1, NULL, NULL);
870 801 if (status != IBT_SUCCESS) {
871 802 IBTF_DPRINTF_L2(cmlog, "ibcm_hca_attach: "
872 803 "port_num %d state DOWN", i + 1);
873 804 }
874 805
875 806 hcap->hca_port_info[i].port_hcap = hcap;
876 807 hcap->hca_port_info[i].port_num = i+1;
877 808
878 809 if (ibcm_hca_init_port(hcap, i) != IBT_SUCCESS)
879 810 IBTF_DPRINTF_L2(cmlog, "ibcm_hca_attach: "
880 811 "ibcm_hca_init_port failed %d port_num %d",
881 812 status, i+1);
882 813 }
883 814
884 815 /* create the "active" CM AVL tree */
885 816 avl_create(&hcap->hca_active_tree, ibcm_active_node_compare,
886 817 sizeof (ibcm_state_data_t),
887 818 offsetof(struct ibcm_state_data_s, avl_active_link));
888 819
889 820 /* create the "passive" CM AVL tree */
890 821 avl_create(&hcap->hca_passive_tree, ibcm_passive_node_compare,
891 822 sizeof (ibcm_state_data_t),
892 823 offsetof(struct ibcm_state_data_s, avl_passive_link));
893 824
894 825 /* create the "passive comid" CM AVL tree */
895 826 avl_create(&hcap->hca_passive_comid_tree,
↓ open down ↓ |
48 lines elided |
↑ open up ↑ |
896 827 ibcm_passive_comid_node_compare,
897 828 sizeof (ibcm_state_data_t),
898 829 offsetof(struct ibcm_state_data_s, avl_passive_comid_link));
899 830
900 831 /*
901 832 * Mark the state of the HCA to "attach" only at the end
902 833 * Now CM starts accepting incoming MADs and client API calls
903 834 */
904 835 hcap->hca_state = IBCM_HCA_ACTIVE;
905 836
906 - _NOTE(NOW_VISIBLE_TO_OTHER_THREADS(*hcap))
907 -
908 837 IBTF_DPRINTF_L3(cmlog, "ibcm_hca_attach: ATTACH Done");
909 838 }
910 839
911 840 /*
912 841 * ibcm_hca_detach():
913 842 * Called as an asynchronous event to notify CM of a detach of HCA.
914 843 * Here ibcm_hca_info_t is freed up and all fields that
915 844 * were initialized earlier are cleaned up
916 845 *
917 846 * Arguments: (WILL CHANGE BASED ON ASYNC EVENT CODE)
918 847 * hca_guid - HCA's guid
919 848 *
920 849 * Return values:
921 850 * IBCM_SUCCESS - able to detach HCA
922 851 * IBCM_FAILURE - failed to detach HCA
923 852 */
924 853 static ibcm_status_t
925 854 ibcm_hca_detach(ibcm_hca_info_t *hcap)
926 855 {
927 856 int port_index, i;
928 857 ibcm_status_t status = IBCM_SUCCESS;
929 858 clock_t absolute_time;
930 859
931 860 IBTF_DPRINTF_L3(cmlog, "ibcm_hca_detach: hcap = 0x%p guid = 0x%llX",
932 861 hcap, hcap->hca_guid);
933 862
934 863 ASSERT(MUTEX_HELD(&ibcm_global_hca_lock));
935 864
936 865 /*
937 866 * Declare hca is going away to all CM clients. Wait until the
938 867 * access count becomes zero.
939 868 */
940 869 hcap->hca_state = IBCM_HCA_NOT_ACTIVE;
941 870
942 871 /* wait on response CV */
943 872 absolute_time = ddi_get_lbolt() +
944 873 drv_usectohz(ibcm_wait_for_acc_cnt_timeout);
945 874
946 875 while (hcap->hca_acc_cnt > 0)
947 876 if (cv_timedwait(&ibcm_global_hca_cv, &ibcm_global_hca_lock,
948 877 absolute_time) == -1)
949 878 break;
950 879
951 880 if (hcap->hca_acc_cnt != 0) {
952 881 /* We got a timeout */
953 882 IBTF_DPRINTF_L2(cmlog, "ibcm_hca_detach: Aborting due"
954 883 " to timeout on hca_acc_cnt %u, \n Some CM Clients are "
955 884 "still active, looks like we need to wait some more time "
956 885 "(ibcm_wait_for_acc_cnt_timeout).", hcap->hca_acc_cnt);
957 886 hcap->hca_state = IBCM_HCA_ACTIVE;
958 887 return (IBCM_FAILURE);
959 888 }
960 889
961 890 /*
962 891 * First make sure, there are no active users of ibma handles,
963 892 * and then de-register handles.
964 893 */
965 894
966 895 /* make sure that there are no "Service"s registered w/ this HCA. */
967 896 if (hcap->hca_svc_cnt != 0) {
968 897 IBTF_DPRINTF_L2(cmlog, "ibcm_hca_detach: "
969 898 "Active services still there %d", hcap->hca_svc_cnt);
970 899 hcap->hca_state = IBCM_HCA_ACTIVE;
971 900 return (IBCM_FAILURE);
972 901 }
973 902
974 903 if (ibcm_check_sidr_clean(hcap) != IBCM_SUCCESS) {
975 904 IBTF_DPRINTF_L2(cmlog, "ibcm_hca_detach:"
976 905 "There are active SIDR operations");
977 906 hcap->hca_state = IBCM_HCA_ACTIVE;
978 907 return (IBCM_FAILURE);
979 908 }
980 909
981 910 if (ibcm_check_avl_clean(hcap) != IBCM_SUCCESS) {
982 911 IBTF_DPRINTF_L2(cmlog, "ibcm_hca_detach: "
983 912 "There are active RC connections");
984 913 hcap->hca_state = IBCM_HCA_ACTIVE;
985 914 return (IBCM_FAILURE);
986 915 }
987 916
988 917 /*
989 918 * Now, wait until all rc and sidr stateps go away
990 919 * All these stateps must be short lived ones, waiting to be cleaned
991 920 * up after some timeout value, based on the current state.
992 921 */
993 922 IBTF_DPRINTF_L3(cmlog, "ibcm_hca_detach:hca_guid = 0x%llX res_cnt = %d",
994 923 hcap->hca_guid, hcap->hca_res_cnt);
995 924
996 925 while (hcap->hca_res_cnt > 0)
997 926 cv_wait(&ibcm_global_hca_cv, &ibcm_global_hca_lock);
998 927
999 928 /* Re-assert the while loop step above */
1000 929 ASSERT(hcap->hca_sidr_list == NULL);
1001 930 avl_destroy(&hcap->hca_active_tree);
1002 931 avl_destroy(&hcap->hca_passive_tree);
1003 932 avl_destroy(&hcap->hca_passive_comid_tree);
1004 933
1005 934 /*
1006 935 * Unregister all ports from IBMA
1007 936 * If there is a failure, re-initialize any free'd ibma handles. This
1008 937 * is required to receive the incoming mads
1009 938 */
1010 939 status = IBCM_SUCCESS;
1011 940 for (port_index = 0; port_index < hcap->hca_num_ports; port_index++) {
1012 941 if ((status = ibcm_hca_fini_port(hcap, port_index)) !=
1013 942 IBCM_SUCCESS) {
1014 943 IBTF_DPRINTF_L2(cmlog, "ibcm_hca_detach: "
1015 944 "Failed to free IBMA Handle for port_num %d",
1016 945 port_index + 1);
1017 946 break;
1018 947 }
1019 948 }
1020 949
1021 950 /* If detach fails, re-initialize ibma handles for incoming mads */
1022 951 if (status != IBCM_SUCCESS) {
1023 952 for (i = 0; i < port_index; i++) {
1024 953 if (ibcm_hca_init_port(hcap, i) != IBT_SUCCESS)
1025 954 IBTF_DPRINTF_L2(cmlog, "ibcm_hca_detach: "
1026 955 "Failed to re-allocate IBMA Handles for"
1027 956 " port_num %d", port_index + 1);
1028 957 }
1029 958 hcap->hca_state = IBCM_HCA_ACTIVE;
1030 959 return (IBCM_FAILURE);
1031 960 }
1032 961
1033 962 ibcm_fini_hca_ids(hcap);
1034 963 ibcm_delete_hca_entry(hcap);
1035 964
1036 965 IBTF_DPRINTF_L3(cmlog, "ibcm_hca_detach: DETACH succeeded");
1037 966 return (IBCM_SUCCESS);
1038 967 }
1039 968
1040 969 /* Checks, if there are any active sidr state entries in the specified hca */
1041 970 static ibcm_status_t
1042 971 ibcm_check_sidr_clean(ibcm_hca_info_t *hcap)
1043 972 {
1044 973 ibcm_ud_state_data_t *usp;
1045 974 uint32_t transient_cnt = 0;
1046 975
1047 976 IBTF_DPRINTF_L5(cmlog, "ibcm_check_sidr_clean:");
1048 977
1049 978 rw_enter(&hcap->hca_sidr_list_lock, RW_WRITER);
1050 979 usp = hcap->hca_sidr_list; /* Point to the list */
1051 980 while (usp != NULL) {
1052 981 mutex_enter(&usp->ud_state_mutex);
1053 982 if ((usp->ud_state != IBCM_STATE_SIDR_REP_SENT) &&
1054 983 (usp->ud_state != IBCM_STATE_TIMED_OUT) &&
1055 984 (usp->ud_state != IBCM_STATE_DELETE)) {
1056 985
1057 986 IBTF_DPRINTF_L3(cmlog, "ibcm_check_sidr_clean:"
1058 987 "usp = %p not in transient state = %d", usp,
1059 988 usp->ud_state);
1060 989
1061 990 mutex_exit(&usp->ud_state_mutex);
1062 991 rw_exit(&hcap->hca_sidr_list_lock);
1063 992 return (IBCM_FAILURE);
1064 993 } else {
1065 994 mutex_exit(&usp->ud_state_mutex);
1066 995 ++transient_cnt;
1067 996 }
1068 997
1069 998 usp = usp->ud_nextp;
1070 999 }
1071 1000 rw_exit(&hcap->hca_sidr_list_lock);
1072 1001
1073 1002 IBTF_DPRINTF_L4(cmlog, "ibcm_check_sidr_clean: transient_cnt %d",
1074 1003 transient_cnt);
1075 1004
1076 1005 return (IBCM_SUCCESS);
1077 1006 }
1078 1007
1079 1008 /* Checks, if there are any active rc state entries, in the specified hca */
1080 1009 static ibcm_status_t
1081 1010 ibcm_check_avl_clean(ibcm_hca_info_t *hcap)
1082 1011
1083 1012 {
1084 1013 ibcm_state_data_t *sp;
1085 1014 avl_tree_t *avl_tree;
1086 1015 uint32_t transient_cnt = 0;
1087 1016
1088 1017 IBTF_DPRINTF_L5(cmlog, "ibcm_check_avl_clean:");
1089 1018 /*
1090 1019 * Both the trees ie., active and passive must reference to all
1091 1020 * statep's, so let's use one
1092 1021 */
1093 1022 avl_tree = &hcap->hca_active_tree;
1094 1023
1095 1024 rw_enter(&hcap->hca_state_rwlock, RW_WRITER);
1096 1025
1097 1026 for (sp = avl_first(avl_tree); sp != NULL;
1098 1027 sp = avl_walk(avl_tree, sp, AVL_AFTER)) {
1099 1028 mutex_enter(&sp->state_mutex);
1100 1029 if ((sp->state != IBCM_STATE_TIMEWAIT) &&
1101 1030 (sp->state != IBCM_STATE_REJ_SENT) &&
1102 1031 (sp->state != IBCM_STATE_DELETE)) {
1103 1032 IBTF_DPRINTF_L3(cmlog, "ibcm_check_avl_clean: "
1104 1033 "sp = %p not in transient state = %d", sp,
1105 1034 sp->state);
1106 1035 mutex_exit(&sp->state_mutex);
1107 1036 rw_exit(&hcap->hca_state_rwlock);
1108 1037 return (IBCM_FAILURE);
1109 1038 } else {
1110 1039 mutex_exit(&sp->state_mutex);
1111 1040 ++transient_cnt;
1112 1041 }
1113 1042 }
1114 1043
1115 1044 rw_exit(&hcap->hca_state_rwlock);
1116 1045
1117 1046 IBTF_DPRINTF_L4(cmlog, "ibcm_check_avl_clean: transient_cnt %d",
1118 1047 transient_cnt);
1119 1048
1120 1049 return (IBCM_SUCCESS);
1121 1050 }
1122 1051
1123 1052 /* Adds a new entry into CM's global hca list, if hca_guid is not there yet */
1124 1053 static ibcm_hca_info_t *
1125 1054 ibcm_add_hca_entry(ib_guid_t hcaguid, uint_t nports)
1126 1055 {
1127 1056 ibcm_hca_info_t *hcap;
1128 1057
1129 1058 IBTF_DPRINTF_L5(cmlog, "ibcm_add_hca_entry: guid = 0x%llX",
1130 1059 hcaguid);
1131 1060
1132 1061 ASSERT(MUTEX_HELD(&ibcm_global_hca_lock));
1133 1062
1134 1063 /*
1135 1064 * Check if this hca_guid already in the list
1136 1065 * If yes, then ignore this and return NULL
1137 1066 */
1138 1067
1139 1068 hcap = ibcm_hca_listp;
1140 1069
1141 1070 /* search for this HCA */
1142 1071 while (hcap != NULL) {
1143 1072 if (hcap->hca_guid == hcaguid) {
1144 1073 /* already exists */
1145 1074 IBTF_DPRINTF_L2(cmlog, "ibcm_add_hca_entry: "
1146 1075 "hcap %p guid 0x%llX, entry already exists !!",
1147 1076 hcap, hcap->hca_guid);
1148 1077 return (NULL);
1149 1078 }
1150 1079 hcap = hcap->hca_next;
1151 1080 }
1152 1081
1153 1082 /* Allocate storage for the new HCA entry found */
1154 1083 hcap = kmem_zalloc(sizeof (ibcm_hca_info_t) +
1155 1084 (nports - 1) * sizeof (ibcm_port_info_t), KM_SLEEP);
1156 1085
1157 1086 /* initialize RW lock */
1158 1087 rw_init(&hcap->hca_state_rwlock, NULL, RW_DRIVER, NULL);
1159 1088 /* initialize SIDR list lock */
1160 1089 rw_init(&hcap->hca_sidr_list_lock, NULL, RW_DRIVER, NULL);
1161 1090 /* Insert "hcap" into the global HCA list maintained by CM */
1162 1091 hcap->hca_next = ibcm_hca_listp;
1163 1092 ibcm_hca_listp = hcap;
1164 1093
1165 1094 IBTF_DPRINTF_L5(cmlog, "ibcm_add_hca_entry: done hcap = 0x%p", hcap);
1166 1095
1167 1096 return (hcap);
1168 1097
1169 1098 }
1170 1099
1171 1100 /* deletes the given ibcm_hca_info_t from CM's global hca list */
1172 1101 void
1173 1102 ibcm_delete_hca_entry(ibcm_hca_info_t *hcap)
1174 1103 {
1175 1104 ibcm_hca_info_t *headp, *prevp = NULL;
1176 1105
1177 1106 /* ibcm_hca_global_lock is held */
1178 1107 IBTF_DPRINTF_L5(cmlog, "ibcm_delete_hca_entry: guid = 0x%llX "
1179 1108 "hcap = 0x%p", hcap->hca_guid, hcap);
1180 1109
1181 1110 ASSERT(MUTEX_HELD(&ibcm_global_hca_lock));
1182 1111
1183 1112 headp = ibcm_hca_listp;
1184 1113 while (headp != NULL) {
1185 1114 if (headp == hcap) {
1186 1115 IBTF_DPRINTF_L3(cmlog, "ibcm_delete_hca_entry: "
1187 1116 "deleting hcap %p hcaguid %llX", hcap,
1188 1117 hcap->hca_guid);
1189 1118 if (prevp) {
1190 1119 prevp->hca_next = headp->hca_next;
1191 1120 } else {
1192 1121 prevp = headp->hca_next;
1193 1122 ibcm_hca_listp = prevp;
1194 1123 }
1195 1124 rw_destroy(&hcap->hca_state_rwlock);
1196 1125 rw_destroy(&hcap->hca_sidr_list_lock);
1197 1126 kmem_free(hcap, sizeof (ibcm_hca_info_t) +
1198 1127 (hcap->hca_num_ports - 1) *
1199 1128 sizeof (ibcm_port_info_t));
1200 1129 return;
1201 1130 }
1202 1131
1203 1132 prevp = headp;
1204 1133 headp = headp->hca_next;
1205 1134 }
1206 1135 }
1207 1136
1208 1137 /*
1209 1138 * ibcm_find_hca_entry:
1210 1139 * Given a HCA's GUID find out ibcm_hca_info_t entry for that HCA
1211 1140 * This entry can be then used to access AVL tree/SIDR list etc.
1212 1141 * If entry exists and in HCA ATTACH state, then hca's ref cnt is
1213 1142 * incremented and entry returned. Else NULL returned.
1214 1143 *
1215 1144 * All functions that use ibcm_find_hca_entry and get a non-NULL
1216 1145 * return values must call ibcm_dec_hca_acc_cnt to decrement the
1217 1146 * respective hca ref cnt. There shouldn't be any usage of
1218 1147 * ibcm_hca_info_t * returned from ibcm_find_hca_entry,
1219 1148 * after decrementing the hca_acc_cnt
1220 1149 *
1221 1150 * INPUTS:
1222 1151 * hca_guid - HCA's guid
1223 1152 *
1224 1153 * RETURN VALUE:
1225 1154 * hcap - if a match is found, else NULL
1226 1155 */
1227 1156 ibcm_hca_info_t *
1228 1157 ibcm_find_hca_entry(ib_guid_t hca_guid)
1229 1158 {
1230 1159 ibcm_hca_info_t *hcap;
1231 1160
1232 1161 IBTF_DPRINTF_L5(cmlog, "ibcm_find_hca_entry: guid = 0x%llX", hca_guid);
1233 1162
1234 1163 mutex_enter(&ibcm_global_hca_lock);
1235 1164
1236 1165 hcap = ibcm_hca_listp;
1237 1166 /* search for this HCA */
1238 1167 while (hcap != NULL) {
1239 1168 if (hcap->hca_guid == hca_guid)
1240 1169 break;
1241 1170 hcap = hcap->hca_next;
1242 1171 }
1243 1172
1244 1173 /* if no hcap for the hca_guid, return NULL */
1245 1174 if (hcap == NULL) {
1246 1175 mutex_exit(&ibcm_global_hca_lock);
1247 1176 return (NULL);
1248 1177 }
1249 1178
1250 1179 /* return hcap, only if it valid to use */
1251 1180 if (hcap->hca_state == IBCM_HCA_ACTIVE) {
1252 1181 ++(hcap->hca_acc_cnt);
1253 1182
1254 1183 IBTF_DPRINTF_L5(cmlog, "ibcm_find_hca_entry: "
1255 1184 "found hcap = 0x%p hca_acc_cnt %u", hcap,
1256 1185 hcap->hca_acc_cnt);
1257 1186
1258 1187 mutex_exit(&ibcm_global_hca_lock);
1259 1188 return (hcap);
1260 1189 } else {
1261 1190 mutex_exit(&ibcm_global_hca_lock);
1262 1191
1263 1192 IBTF_DPRINTF_L2(cmlog, "ibcm_find_hca_entry: "
1264 1193 "found hcap = 0x%p not in active state", hcap);
1265 1194 return (NULL);
1266 1195 }
1267 1196 }
1268 1197
1269 1198 /*
1270 1199 * Searches for ibcm_hca_info_t entry based on hca_guid, but doesn't increment
1271 1200 * the hca's reference count. This function is used, where the calling context
1272 1201 * is attempting to delete hcap itself and hence acc_cnt cannot be incremented
1273 1202 * OR assumes that valid hcap must be available in ibcm's global hca list.
1274 1203 */
1275 1204 ibcm_hca_info_t *
1276 1205 ibcm_find_hcap_entry(ib_guid_t hca_guid)
1277 1206 {
1278 1207 ibcm_hca_info_t *hcap;
1279 1208
1280 1209 IBTF_DPRINTF_L5(cmlog, "ibcm_find_hcap_entry: guid = 0x%llX", hca_guid);
1281 1210
1282 1211 ASSERT(MUTEX_HELD(&ibcm_global_hca_lock));
1283 1212
1284 1213 hcap = ibcm_hca_listp;
1285 1214 /* search for this HCA */
1286 1215 while (hcap != NULL) {
1287 1216 if (hcap->hca_guid == hca_guid)
1288 1217 break;
1289 1218 hcap = hcap->hca_next;
1290 1219 }
1291 1220
1292 1221 if (hcap == NULL)
1293 1222 IBTF_DPRINTF_L2(cmlog, "ibcm_find_hcap_entry: No hcap found for"
1294 1223 " hca_guid 0x%llX", hca_guid);
1295 1224 else
1296 1225 IBTF_DPRINTF_L5(cmlog, "ibcm_find_hcap_entry: hcap found for"
1297 1226 " hca_guid 0x%llX", hca_guid);
1298 1227
1299 1228 return (hcap);
1300 1229 }
1301 1230
1302 1231 /* increment the hca's temporary reference count */
1303 1232 ibcm_status_t
1304 1233 ibcm_inc_hca_acc_cnt(ibcm_hca_info_t *hcap)
1305 1234 {
1306 1235 mutex_enter(&ibcm_global_hca_lock);
1307 1236 if (hcap->hca_state == IBCM_HCA_ACTIVE) {
1308 1237 ++(hcap->hca_acc_cnt);
1309 1238 IBTF_DPRINTF_L5(cmlog, "ibcm_inc_hca_acc_cnt: "
1310 1239 "hcap = 0x%p acc_cnt = %d ", hcap, hcap->hca_acc_cnt);
1311 1240 mutex_exit(&ibcm_global_hca_lock);
1312 1241 return (IBCM_SUCCESS);
1313 1242 } else {
1314 1243 IBTF_DPRINTF_L2(cmlog, "ibcm_inc_hca_acc_cnt: "
1315 1244 "hcap INACTIVE 0x%p acc_cnt = %d ", hcap,
1316 1245 hcap->hca_acc_cnt);
1317 1246 mutex_exit(&ibcm_global_hca_lock);
1318 1247 return (IBCM_FAILURE);
1319 1248 }
1320 1249 }
1321 1250
1322 1251 /* decrement the hca's ref count, and wake up any waiting threads */
1323 1252 void
1324 1253 ibcm_dec_hca_acc_cnt(ibcm_hca_info_t *hcap)
1325 1254 {
1326 1255 mutex_enter(&ibcm_global_hca_lock);
1327 1256 ASSERT(hcap->hca_acc_cnt > 0);
1328 1257 --(hcap->hca_acc_cnt);
1329 1258 IBTF_DPRINTF_L5(cmlog, "ibcm_dec_hca_acc_cnt: hcap = 0x%p "
1330 1259 "acc_cnt = %d", hcap, hcap->hca_acc_cnt);
1331 1260 if ((hcap->hca_state == IBCM_HCA_NOT_ACTIVE) &&
1332 1261 (hcap->hca_acc_cnt == 0)) {
1333 1262 IBTF_DPRINTF_L3(cmlog, "ibcm_dec_hca_acc_cnt: "
1334 1263 "cv_broadcast for hcap = 0x%p", hcap);
1335 1264 cv_broadcast(&ibcm_global_hca_cv);
1336 1265 }
1337 1266 mutex_exit(&ibcm_global_hca_lock);
1338 1267 }
1339 1268
1340 1269 /* increment the hca's resource count */
1341 1270 void
1342 1271 ibcm_inc_hca_res_cnt(ibcm_hca_info_t *hcap)
1343 1272
1344 1273 {
1345 1274 mutex_enter(&ibcm_global_hca_lock);
1346 1275 ++(hcap->hca_res_cnt);
1347 1276 IBTF_DPRINTF_L5(cmlog, "ibcm_inc_hca_res_cnt: hcap = 0x%p "
1348 1277 "ref_cnt = %d", hcap, hcap->hca_res_cnt);
1349 1278 mutex_exit(&ibcm_global_hca_lock);
1350 1279 }
1351 1280
1352 1281 /* decrement the hca's resource count, and wake up any waiting threads */
1353 1282 void
1354 1283 ibcm_dec_hca_res_cnt(ibcm_hca_info_t *hcap)
1355 1284 {
1356 1285 mutex_enter(&ibcm_global_hca_lock);
1357 1286 ASSERT(hcap->hca_res_cnt > 0);
1358 1287 --(hcap->hca_res_cnt);
1359 1288 IBTF_DPRINTF_L5(cmlog, "ibcm_dec_hca_res_cnt: hcap = 0x%p "
1360 1289 "ref_cnt = %d", hcap, hcap->hca_res_cnt);
1361 1290 if ((hcap->hca_state == IBCM_HCA_NOT_ACTIVE) &&
1362 1291 (hcap->hca_res_cnt == 0)) {
1363 1292 IBTF_DPRINTF_L3(cmlog, "ibcm_dec_hca_res_cnt: "
1364 1293 "cv_broadcast for hcap = 0x%p", hcap);
1365 1294 cv_broadcast(&ibcm_global_hca_cv);
1366 1295 }
1367 1296 mutex_exit(&ibcm_global_hca_lock);
1368 1297 }
1369 1298
1370 1299 /* increment the hca's service count */
1371 1300 void
1372 1301 ibcm_inc_hca_svc_cnt(ibcm_hca_info_t *hcap)
1373 1302
1374 1303 {
1375 1304 mutex_enter(&ibcm_global_hca_lock);
1376 1305 ++(hcap->hca_svc_cnt);
1377 1306 IBTF_DPRINTF_L5(cmlog, "ibcm_inc_hca_svc_cnt: hcap = 0x%p "
1378 1307 "svc_cnt = %d", hcap, hcap->hca_svc_cnt);
1379 1308 mutex_exit(&ibcm_global_hca_lock);
1380 1309 }
1381 1310
1382 1311 /* decrement the hca's service count */
1383 1312 void
1384 1313 ibcm_dec_hca_svc_cnt(ibcm_hca_info_t *hcap)
1385 1314 {
1386 1315 mutex_enter(&ibcm_global_hca_lock);
1387 1316 ASSERT(hcap->hca_svc_cnt > 0);
1388 1317 --(hcap->hca_svc_cnt);
1389 1318 IBTF_DPRINTF_L5(cmlog, "ibcm_dec_hca_svc_cnt: hcap = 0x%p "
1390 1319 "svc_cnt = %d", hcap, hcap->hca_svc_cnt);
1391 1320 mutex_exit(&ibcm_global_hca_lock);
1392 1321 }
1393 1322
1394 1323 /*
1395 1324 * The following code manages three classes of requests that CM makes to
1396 1325 * the fabric. Those three classes are SA_ACCESS, REQ/REP/RTU, and DREQ/DREP.
1397 1326 * The main issue is that the fabric can become very busy, and the CM
1398 1327 * protocols rely on responses being made based on a predefined timeout
1399 1328 * value. By managing how many simultaneous sessions are allowed, there
1400 1329 * is observed extremely high reliability of CM protocol succeeding when
1401 1330 * it should.
1402 1331 *
1403 1332 * SA_ACCESS and DREQ/DREP are managed at the thread level, whereby the
1404 1333 * thread blocks until there are less than some number of threads doing
1405 1334 * similar requests.
1406 1335 *
1407 1336 * REQ/REP/RTU requests beyond a given limit are added to a list,
1408 1337 * allowing the thread to return immediately to its caller in the
1409 1338 * case where the "mode" is IBT_NONBLOCKING. This is the mode used
1410 1339 * by uDAPL and seems to be an important feature/behavior.
1411 1340 */
1412 1341
1413 1342 static int
1414 1343 ibcm_ok_to_start(struct ibcm_open_s *openp)
1415 1344 {
1416 1345 return (openp->sends < openp->sends_hiwat &&
1417 1346 openp->in_progress < openp->in_progress_max);
1418 1347 }
1419 1348
1420 1349 void
1421 1350 ibcm_open_done(ibcm_state_data_t *statep)
1422 1351 {
1423 1352 int run;
1424 1353 ibcm_state_data_t **linkp, *tmp;
1425 1354
1426 1355 ASSERT(MUTEX_HELD(&statep->state_mutex));
1427 1356 if (statep->open_flow == 1) {
1428 1357 statep->open_flow = 0;
1429 1358 mutex_enter(&ibcm_open.mutex);
1430 1359 if (statep->open_link == NULL) {
1431 1360 ibcm_open.in_progress--;
1432 1361 run = ibcm_ok_to_start(&ibcm_open);
1433 1362 } else {
1434 1363 ibcm_open.queued--;
1435 1364 linkp = &ibcm_open.head.open_link;
1436 1365 while (*linkp != statep)
1437 1366 linkp = &((*linkp)->open_link);
1438 1367 *linkp = statep->open_link;
1439 1368 statep->open_link = NULL;
1440 1369 /*
1441 1370 * If we remove what tail pointed to, we need
1442 1371 * to reassign tail (it is never NULL).
1443 1372 * tail points to head for the empty list.
1444 1373 */
1445 1374 if (ibcm_open.tail == statep) {
1446 1375 tmp = &ibcm_open.head;
1447 1376 while (tmp->open_link != &ibcm_open.head)
1448 1377 tmp = tmp->open_link;
1449 1378 ibcm_open.tail = tmp;
1450 1379 }
1451 1380 run = 0;
1452 1381 }
1453 1382 mutex_exit(&ibcm_open.mutex);
1454 1383 if (run)
1455 1384 ibcm_run_tlist_thread();
1456 1385 }
1457 1386 }
1458 1387
1459 1388 /* dtrace */
1460 1389 void
1461 1390 ibcm_open_wait(hrtime_t delta)
1462 1391 {
1463 1392 if (delta > 1000000)
1464 1393 IBTF_DPRINTF_L2(cmlog, "ibcm_open_wait: flow more %lld", delta);
1465 1394 }
1466 1395
1467 1396 void
1468 1397 ibcm_open_start(ibcm_state_data_t *statep)
1469 1398 {
1470 1399 ibcm_insert_trace(statep, IBCM_TRACE_OUTGOING_REQ);
1471 1400
1472 1401 mutex_enter(&statep->state_mutex);
1473 1402 ibcm_open_wait(gethrtime() - statep->post_time);
1474 1403 mutex_exit(&statep->state_mutex);
1475 1404
1476 1405 ibcm_post_rc_mad(statep, statep->stored_msg, ibcm_post_req_complete,
1477 1406 statep);
1478 1407
1479 1408 mutex_enter(&statep->state_mutex);
1480 1409 IBCM_REF_CNT_DECR(statep);
1481 1410 mutex_exit(&statep->state_mutex);
1482 1411 }
1483 1412
1484 1413 void
1485 1414 ibcm_open_enqueue(ibcm_state_data_t *statep)
1486 1415 {
1487 1416 int run;
1488 1417
1489 1418 mutex_enter(&statep->state_mutex);
1490 1419 statep->post_time = gethrtime();
1491 1420 mutex_exit(&statep->state_mutex);
1492 1421 mutex_enter(&ibcm_open.mutex);
1493 1422 if (ibcm_open.queued == 0 && ibcm_ok_to_start(&ibcm_open)) {
1494 1423 ibcm_open.in_progress++;
1495 1424 mutex_exit(&ibcm_open.mutex);
1496 1425 ibcm_open_start(statep);
1497 1426 } else {
1498 1427 ibcm_open.queued++;
1499 1428 statep->open_link = &ibcm_open.head;
1500 1429 ibcm_open.tail->open_link = statep;
1501 1430 ibcm_open.tail = statep;
1502 1431 run = ibcm_ok_to_start(&ibcm_open);
1503 1432 mutex_exit(&ibcm_open.mutex);
1504 1433 if (run)
1505 1434 ibcm_run_tlist_thread();
1506 1435 }
1507 1436 }
1508 1437
1509 1438 ibcm_state_data_t *
1510 1439 ibcm_open_dequeue(void)
1511 1440 {
1512 1441 ibcm_state_data_t *statep;
1513 1442
1514 1443 ASSERT(MUTEX_HELD(&ibcm_open.mutex));
1515 1444 ibcm_open.queued--;
1516 1445 ibcm_open.in_progress++;
1517 1446 statep = ibcm_open.head.open_link;
1518 1447 ibcm_open.head.open_link = statep->open_link;
1519 1448 statep->open_link = NULL;
1520 1449 /*
1521 1450 * If we remove what tail pointed to, we need
1522 1451 * to reassign tail (it is never NULL).
1523 1452 * tail points to head for the empty list.
1524 1453 */
1525 1454 if (ibcm_open.tail == statep)
1526 1455 ibcm_open.tail = &ibcm_open.head;
1527 1456 return (statep);
1528 1457 }
1529 1458
1530 1459 void
1531 1460 ibcm_check_for_opens(void)
1532 1461 {
1533 1462 ibcm_state_data_t *statep;
1534 1463
1535 1464 mutex_enter(&ibcm_open.mutex);
1536 1465
1537 1466 while (ibcm_open.queued > 0) {
1538 1467 if (ibcm_ok_to_start(&ibcm_open)) {
1539 1468 statep = ibcm_open_dequeue();
1540 1469 mutex_exit(&ibcm_open.mutex);
1541 1470
1542 1471 ibcm_open_start(statep);
1543 1472
1544 1473 mutex_enter(&ibcm_open.mutex);
1545 1474 } else {
1546 1475 break;
1547 1476 }
1548 1477 }
1549 1478 mutex_exit(&ibcm_open.mutex);
1550 1479 }
1551 1480
1552 1481
1553 1482 static void
1554 1483 ibcm_flow_init(ibcm_flow_t *flow, uint_t simul_max)
1555 1484 {
1556 1485 flow->list = NULL;
1557 1486 flow->simul = 0;
1558 1487 flow->waiters_per_chunk = 4;
1559 1488 flow->simul_max = simul_max;
1560 1489 flow->lowat = simul_max - flow->waiters_per_chunk;
1561 1490 flow->lowat_default = flow->lowat;
1562 1491 /* stats */
1563 1492 flow->total = 0;
1564 1493 }
1565 1494
1566 1495 static void
1567 1496 ibcm_rc_flow_control_init(void)
1568 1497 {
1569 1498 mutex_init(&ibcm_open.mutex, NULL, MUTEX_DEFAULT, NULL);
1570 1499 mutex_enter(&ibcm_open.mutex);
1571 1500 ibcm_flow_init(&ibcm_close_flow, ibcm_close_simul_max);
1572 1501 ibcm_flow_init(&ibcm_lapr_flow, ibcm_lapr_simul_max);
1573 1502 ibcm_flow_init(&ibcm_saa_flow, ibcm_saa_simul_max);
1574 1503
1575 1504 ibcm_open.queued = 0;
1576 1505 ibcm_open.exit_deferred = 0;
1577 1506 ibcm_open.in_progress = 0;
1578 1507 ibcm_open.in_progress_max = 16;
1579 1508 ibcm_open.sends = 0;
1580 1509 ibcm_open.sends_max = 0;
1581 1510 ibcm_open.sends_lowat = 8;
1582 1511 ibcm_open.sends_hiwat = 16;
1583 1512 ibcm_open.tail = &ibcm_open.head;
1584 1513 ibcm_open.head.open_link = NULL;
1585 1514 mutex_exit(&ibcm_open.mutex);
1586 1515
1587 1516 mutex_init(&ibcm_close.mutex, NULL, MUTEX_DEFAULT, NULL);
1588 1517 mutex_enter(&ibcm_close.mutex);
1589 1518 ibcm_close.tail = &ibcm_close.head;
1590 1519 ibcm_close.head.close_link = NULL;
1591 1520 mutex_exit(&ibcm_close.mutex);
1592 1521 }
1593 1522
1594 1523 static void
1595 1524 ibcm_rc_flow_control_fini(void)
1596 1525 {
1597 1526 mutex_destroy(&ibcm_open.mutex);
1598 1527 mutex_destroy(&ibcm_close.mutex);
1599 1528 }
1600 1529
1601 1530 static ibcm_flow1_t *
1602 1531 ibcm_flow_find(ibcm_flow_t *flow)
1603 1532 {
1604 1533 ibcm_flow1_t *flow1;
1605 1534 ibcm_flow1_t *f;
1606 1535
1607 1536 f = flow->list;
1608 1537 if (f) { /* most likely code path */
1609 1538 while (f->link != NULL)
1610 1539 f = f->link;
1611 1540 if (f->waiters < flow->waiters_per_chunk)
1612 1541 return (f);
1613 1542 }
1614 1543
1615 1544 /* There was no flow1 list element ready for another waiter */
1616 1545 mutex_exit(&ibcm_open.mutex);
1617 1546 flow1 = kmem_alloc(sizeof (*flow1), KM_SLEEP);
1618 1547 mutex_enter(&ibcm_open.mutex);
1619 1548
1620 1549 f = flow->list;
1621 1550 if (f) {
1622 1551 while (f->link != NULL)
1623 1552 f = f->link;
1624 1553 if (f->waiters < flow->waiters_per_chunk) {
1625 1554 kmem_free(flow1, sizeof (*flow1));
1626 1555 return (f);
1627 1556 }
1628 1557 f->link = flow1;
1629 1558 } else {
1630 1559 flow->list = flow1;
1631 1560 }
1632 1561 cv_init(&flow1->cv, NULL, CV_DRIVER, NULL);
1633 1562 flow1->waiters = 0;
1634 1563 flow1->link = NULL;
1635 1564 return (flow1);
1636 1565 }
1637 1566
1638 1567 static void
1639 1568 ibcm_flow_enter(ibcm_flow_t *flow)
1640 1569 {
1641 1570 mutex_enter(&ibcm_open.mutex);
1642 1571 if (flow->list == NULL && flow->simul < flow->simul_max) {
1643 1572 flow->simul++;
1644 1573 flow->total++;
1645 1574 mutex_exit(&ibcm_open.mutex);
1646 1575 } else {
1647 1576 ibcm_flow1_t *flow1;
1648 1577
1649 1578 flow1 = ibcm_flow_find(flow);
1650 1579 flow1->waiters++;
1651 1580 cv_wait(&flow1->cv, &ibcm_open.mutex);
1652 1581 if (--flow1->waiters == 0) {
1653 1582 cv_destroy(&flow1->cv);
1654 1583 mutex_exit(&ibcm_open.mutex);
1655 1584 kmem_free(flow1, sizeof (*flow1));
1656 1585 } else
1657 1586 mutex_exit(&ibcm_open.mutex);
1658 1587 }
1659 1588 }
1660 1589
1661 1590 static void
1662 1591 ibcm_flow_exit(ibcm_flow_t *flow)
1663 1592 {
1664 1593 mutex_enter(&ibcm_open.mutex);
1665 1594 if (--flow->simul < flow->lowat) {
1666 1595 if (flow->lowat < flow->lowat_default)
1667 1596 flow->lowat++;
1668 1597 if (flow->list) {
1669 1598 ibcm_flow1_t *flow1;
1670 1599
1671 1600 flow1 = flow->list;
1672 1601 flow->list = flow1->link; /* unlink */
1673 1602 flow1->link = NULL; /* be clean */
1674 1603 flow->total += flow1->waiters;
1675 1604 flow->simul += flow1->waiters;
1676 1605 cv_broadcast(&flow1->cv);
1677 1606 }
1678 1607 }
1679 1608 mutex_exit(&ibcm_open.mutex);
1680 1609 }
1681 1610
1682 1611 void
1683 1612 ibcm_flow_inc(void)
1684 1613 {
1685 1614 mutex_enter(&ibcm_open.mutex);
1686 1615 if (++ibcm_open.sends > ibcm_open.sends_max) {
1687 1616 ibcm_open.sends_max = ibcm_open.sends;
1688 1617 IBTF_DPRINTF_L2(cmlog, "ibcm_flow_inc: sends max = %d",
1689 1618 ibcm_open.sends_max);
1690 1619 }
1691 1620 mutex_exit(&ibcm_open.mutex);
1692 1621 }
1693 1622
1694 1623 static void
1695 1624 ibcm_check_send_cmpltn_time(hrtime_t delta, char *event_msg)
1696 1625 {
1697 1626 if (delta > 4000000LL) {
1698 1627 IBTF_DPRINTF_L2(cmlog, "ibcm_check_send_cmpltn_time: "
1699 1628 "%s: %lldns", event_msg, delta);
1700 1629 }
1701 1630 }
1702 1631
1703 1632 void
1704 1633 ibcm_flow_dec(hrtime_t time, char *mad_type)
1705 1634 {
1706 1635 int flow_exit = 0;
1707 1636 int run = 0;
1708 1637
1709 1638 if (ibcm_dtrace)
1710 1639 ibcm_check_send_cmpltn_time(gethrtime() - time, mad_type);
1711 1640 mutex_enter(&ibcm_open.mutex);
1712 1641 ibcm_open.sends--;
1713 1642 if (ibcm_open.sends < ibcm_open.sends_lowat) {
1714 1643 run = ibcm_ok_to_start(&ibcm_open);
1715 1644 if (ibcm_open.exit_deferred) {
1716 1645 ibcm_open.exit_deferred--;
1717 1646 flow_exit = 1;
1718 1647 }
1719 1648 }
1720 1649 mutex_exit(&ibcm_open.mutex);
1721 1650 if (flow_exit)
1722 1651 ibcm_flow_exit(&ibcm_close_flow);
1723 1652 if (run)
1724 1653 ibcm_run_tlist_thread();
1725 1654 }
1726 1655
1727 1656 void
1728 1657 ibcm_close_enqueue(ibcm_state_data_t *statep)
1729 1658 {
1730 1659 mutex_enter(&ibcm_close.mutex);
1731 1660 statep->close_link = NULL;
1732 1661 ibcm_close.tail->close_link = statep;
1733 1662 ibcm_close.tail = statep;
1734 1663 mutex_exit(&ibcm_close.mutex);
1735 1664 ibcm_run_tlist_thread();
1736 1665 }
1737 1666
1738 1667 void
1739 1668 ibcm_check_for_async_close()
1740 1669 {
1741 1670 ibcm_state_data_t *statep;
1742 1671
1743 1672 mutex_enter(&ibcm_close.mutex);
1744 1673
1745 1674 while (ibcm_close.head.close_link) {
1746 1675 statep = ibcm_close.head.close_link;
1747 1676 ibcm_close.head.close_link = statep->close_link;
1748 1677 statep->close_link = NULL;
1749 1678 if (ibcm_close.tail == statep)
1750 1679 ibcm_close.tail = &ibcm_close.head;
1751 1680 mutex_exit(&ibcm_close.mutex);
1752 1681 ibcm_close_start(statep);
1753 1682 mutex_enter(&ibcm_close.mutex);
1754 1683 }
1755 1684 mutex_exit(&ibcm_close.mutex);
1756 1685 }
1757 1686
1758 1687 void
1759 1688 ibcm_close_enter(void)
1760 1689 {
1761 1690 ibcm_flow_enter(&ibcm_close_flow);
1762 1691 }
1763 1692
1764 1693 void
1765 1694 ibcm_close_exit(void)
1766 1695 {
1767 1696 int flow_exit;
1768 1697
1769 1698 mutex_enter(&ibcm_open.mutex);
1770 1699 if (ibcm_open.sends < ibcm_open.sends_lowat ||
1771 1700 ibcm_open.exit_deferred >= 4)
1772 1701 flow_exit = 1;
1773 1702 else {
1774 1703 flow_exit = 0;
1775 1704 ibcm_open.exit_deferred++;
1776 1705 }
1777 1706 mutex_exit(&ibcm_open.mutex);
1778 1707 if (flow_exit)
1779 1708 ibcm_flow_exit(&ibcm_close_flow);
1780 1709 }
1781 1710
1782 1711 /*
1783 1712 * This function needs to be called twice to finish our flow
1784 1713 * control accounting when closing down a connection. One
1785 1714 * call has send_done set to 1, while the other has it set to 0.
1786 1715 * Because of retries, this could get called more than once
1787 1716 * with either 0 or 1, but additional calls have no effect.
1788 1717 */
1789 1718 void
1790 1719 ibcm_close_done(ibcm_state_data_t *statep, int send_done)
1791 1720 {
1792 1721 int flow_exit;
1793 1722
1794 1723 ASSERT(MUTEX_HELD(&statep->state_mutex));
1795 1724 if (statep->close_flow == 1) {
1796 1725 if (send_done)
1797 1726 statep->close_flow = 3;
1798 1727 else
1799 1728 statep->close_flow = 2;
1800 1729 } else if ((send_done && statep->close_flow == 2) ||
1801 1730 (!send_done && statep->close_flow == 3)) {
1802 1731 statep->close_flow = 0;
1803 1732 mutex_enter(&ibcm_open.mutex);
1804 1733 if (ibcm_open.sends < ibcm_open.sends_lowat ||
1805 1734 ibcm_open.exit_deferred >= 4)
1806 1735 flow_exit = 1;
1807 1736 else {
1808 1737 flow_exit = 0;
1809 1738 ibcm_open.exit_deferred++;
1810 1739 }
1811 1740 mutex_exit(&ibcm_open.mutex);
1812 1741 if (flow_exit)
1813 1742 ibcm_flow_exit(&ibcm_close_flow);
1814 1743 }
1815 1744 }
1816 1745
1817 1746 void
1818 1747 ibcm_lapr_enter(void)
1819 1748 {
1820 1749 ibcm_flow_enter(&ibcm_lapr_flow);
1821 1750 }
1822 1751
1823 1752 void
1824 1753 ibcm_lapr_exit(void)
1825 1754 {
1826 1755 ibcm_flow_exit(&ibcm_lapr_flow);
1827 1756 }
1828 1757
1829 1758 void
1830 1759 ibcm_sa_access_enter()
1831 1760 {
1832 1761 ibcm_flow_enter(&ibcm_saa_flow);
1833 1762 }
1834 1763
1835 1764 void
1836 1765 ibcm_sa_access_exit()
1837 1766 {
1838 1767 ibcm_flow_exit(&ibcm_saa_flow);
1839 1768 }
1840 1769
1841 1770 static void
1842 1771 ibcm_sm_notice_handler(ibmf_saa_handle_t saa_handle,
1843 1772 ibmf_saa_subnet_event_t saa_event_code,
1844 1773 ibmf_saa_event_details_t *saa_event_details,
1845 1774 void *callback_arg)
1846 1775 {
1847 1776 ibcm_port_info_t *portp = (ibcm_port_info_t *)callback_arg;
1848 1777 ibt_subnet_event_code_t code;
1849 1778 ibt_subnet_event_t event;
1850 1779 uint8_t event_status;
1851 1780
1852 1781 IBTF_DPRINTF_L3(cmlog, "ibcm_sm_notice_handler: saa_hdl %p, code = %d",
1853 1782 saa_handle, saa_event_code);
1854 1783
1855 1784 mutex_enter(&ibcm_sm_notice_serialize_lock);
1856 1785
1857 1786 switch (saa_event_code) {
1858 1787 case IBMF_SAA_EVENT_MCG_CREATED:
1859 1788 code = IBT_SM_EVENT_MCG_CREATED;
1860 1789 break;
1861 1790 case IBMF_SAA_EVENT_MCG_DELETED:
1862 1791 code = IBT_SM_EVENT_MCG_DELETED;
1863 1792 break;
1864 1793 case IBMF_SAA_EVENT_GID_AVAILABLE:
1865 1794 code = IBT_SM_EVENT_GID_AVAIL;
1866 1795 ibcm_path_cache_purge();
1867 1796 break;
1868 1797 case IBMF_SAA_EVENT_GID_UNAVAILABLE:
1869 1798 code = IBT_SM_EVENT_GID_UNAVAIL;
1870 1799 ibcm_path_cache_purge();
1871 1800 break;
1872 1801 case IBMF_SAA_EVENT_SUBSCRIBER_STATUS_CHG:
1873 1802 event_status =
1874 1803 saa_event_details->ie_producer_event_status_mask &
1875 1804 IBMF_SAA_EVENT_STATUS_MASK_PRODUCER_SM;
1876 1805 if (event_status == (portp->port_event_status &
1877 1806 IBMF_SAA_EVENT_STATUS_MASK_PRODUCER_SM)) {
1878 1807 mutex_exit(&ibcm_sm_notice_serialize_lock);
1879 1808 return; /* no change */
1880 1809 }
1881 1810 portp->port_event_status = event_status;
1882 1811 if (event_status == IBMF_SAA_EVENT_STATUS_MASK_PRODUCER_SM)
1883 1812 code = IBT_SM_EVENT_AVAILABLE;
1884 1813 else
1885 1814 code = IBT_SM_EVENT_UNAVAILABLE;
1886 1815 break;
1887 1816 default:
1888 1817 mutex_exit(&ibcm_sm_notice_serialize_lock);
1889 1818 return;
1890 1819 }
1891 1820
1892 1821 mutex_enter(&ibcm_global_hca_lock);
1893 1822
1894 1823 /* don't send the event if we're tearing down */
1895 1824 if (!IBCM_ACCESS_HCA_OK(portp->port_hcap)) {
1896 1825 mutex_exit(&ibcm_global_hca_lock);
1897 1826 mutex_exit(&ibcm_sm_notice_serialize_lock);
1898 1827 return;
1899 1828 }
1900 1829
1901 1830 ++(portp->port_hcap->hca_acc_cnt);
1902 1831 mutex_exit(&ibcm_global_hca_lock);
1903 1832
1904 1833 event.sm_notice_gid = saa_event_details->ie_gid;
1905 1834 ibtl_cm_sm_notice_handler(portp->port_sgid0, code, &event);
1906 1835
1907 1836 mutex_exit(&ibcm_sm_notice_serialize_lock);
1908 1837
1909 1838 ibcm_dec_hca_acc_cnt(portp->port_hcap);
1910 1839 }
1911 1840
1912 1841 void
1913 1842 ibt_register_subnet_notices(ibt_clnt_hdl_t ibt_hdl,
1914 1843 ibt_sm_notice_handler_t sm_notice_handler, void *private)
1915 1844 {
1916 1845 ibcm_port_info_t *portp;
1917 1846 ibcm_hca_info_t *hcap;
1918 1847 uint8_t port;
1919 1848 int num_failed_sgids;
1920 1849 ibtl_cm_sm_init_fail_t *ifail;
1921 1850 ib_gid_t *sgidp;
1922 1851
1923 1852 IBTF_DPRINTF_L3(cmlog, "ibt_register_subnet_notices(%p, %s)",
1924 1853 ibt_hdl, ibtl_cm_get_clnt_name(ibt_hdl));
1925 1854
1926 1855 mutex_enter(&ibcm_sm_notice_serialize_lock);
1927 1856
1928 1857 ibtl_cm_set_sm_notice_handler(ibt_hdl, sm_notice_handler, private);
1929 1858 if (sm_notice_handler == NULL) {
1930 1859 mutex_exit(&ibcm_sm_notice_serialize_lock);
1931 1860 return;
1932 1861 }
1933 1862
1934 1863 /* for each port, if service is not available, make a call */
1935 1864 mutex_enter(&ibcm_global_hca_lock);
1936 1865 num_failed_sgids = 0;
1937 1866 hcap = ibcm_hca_listp;
1938 1867 while (hcap != NULL) {
1939 1868 portp = hcap->hca_port_info;
1940 1869 for (port = 0; port < hcap->hca_num_ports; port++) {
1941 1870 if (!(portp->port_event_status &
1942 1871 IBMF_SAA_EVENT_STATUS_MASK_PRODUCER_SM))
1943 1872 num_failed_sgids++;
1944 1873 portp++;
1945 1874 }
1946 1875 hcap = hcap->hca_next;
1947 1876 }
1948 1877 if (num_failed_sgids != 0) {
1949 1878 ifail = kmem_alloc(sizeof (*ifail) +
1950 1879 (num_failed_sgids - 1) * sizeof (ib_gid_t), KM_SLEEP);
1951 1880 ifail->smf_num_sgids = num_failed_sgids;
1952 1881 ifail->smf_ibt_hdl = ibt_hdl;
1953 1882 sgidp = &ifail->smf_sgid[0];
1954 1883 hcap = ibcm_hca_listp;
1955 1884 while (hcap != NULL) {
1956 1885 portp = hcap->hca_port_info;
1957 1886 for (port = 0; port < hcap->hca_num_ports; port++) {
1958 1887 if (!(portp->port_event_status &
1959 1888 IBMF_SAA_EVENT_STATUS_MASK_PRODUCER_SM))
1960 1889 *sgidp++ = portp->port_sgid0;
1961 1890 portp++;
1962 1891 }
1963 1892 hcap = hcap->hca_next;
1964 1893 }
1965 1894 }
1966 1895 mutex_exit(&ibcm_global_hca_lock);
1967 1896
1968 1897 if (num_failed_sgids != 0) {
1969 1898 ibtl_cm_sm_notice_init_failure(ifail);
1970 1899 kmem_free(ifail, sizeof (*ifail) +
1971 1900 (num_failed_sgids - 1) * sizeof (ib_gid_t));
1972 1901 }
1973 1902 mutex_exit(&ibcm_sm_notice_serialize_lock);
1974 1903 }
1975 1904
1976 1905 /* The following is run from a taskq because we've seen the stack overflow. */
1977 1906 static void
1978 1907 ibcm_init_saa(void *arg)
1979 1908 {
1980 1909 ibcm_port_info_t *portp = (ibcm_port_info_t *)arg;
1981 1910 int status;
1982 1911 ib_guid_t port_guid;
1983 1912 ibmf_saa_subnet_event_args_t event_args;
1984 1913
1985 1914 port_guid = portp->port_sgid0.gid_guid;
1986 1915
1987 1916 IBTF_DPRINTF_L3(cmlog, "ibcm_init_saa: port guid %llX", port_guid);
1988 1917
1989 1918 event_args.is_event_callback_arg = portp;
1990 1919 event_args.is_event_callback = ibcm_sm_notice_handler;
1991 1920
1992 1921 if ((status = ibmf_sa_session_open(port_guid, 0, &event_args,
1993 1922 IBMF_VERSION, 0, &portp->port_ibmf_saa_hdl)) != IBMF_SUCCESS) {
1994 1923 IBTF_DPRINTF_L2(cmlog, "ibcm_init_saa: "
1995 1924 "ibmf_sa_session_open failed for port guid %llX "
1996 1925 "status = %d", port_guid, status);
1997 1926 } else {
1998 1927 IBTF_DPRINTF_L2(cmlog, "ibcm_init_saa: "
1999 1928 "registered sa_hdl 0x%p for port guid %llX",
2000 1929 portp->port_ibmf_saa_hdl, port_guid);
2001 1930 }
2002 1931
2003 1932 mutex_enter(&ibcm_sa_open_lock);
2004 1933 portp->port_saa_open_in_progress = 0;
2005 1934 cv_broadcast(&ibcm_sa_open_cv);
2006 1935 mutex_exit(&ibcm_sa_open_lock);
2007 1936 }
2008 1937
2009 1938 void
2010 1939 ibcm_init_saa_handle(ibcm_hca_info_t *hcap, uint8_t port)
2011 1940 {
2012 1941 ibmf_saa_handle_t saa_handle;
2013 1942 uint8_t port_index = port - 1;
2014 1943 ibcm_port_info_t *portp = &hcap->hca_port_info[port_index];
2015 1944 ibt_status_t ibt_status;
2016 1945
2017 1946 if (port_index >= hcap->hca_num_ports)
2018 1947 return;
2019 1948
2020 1949 mutex_enter(&ibcm_sa_open_lock);
2021 1950 if (portp->port_saa_open_in_progress) {
2022 1951 mutex_exit(&ibcm_sa_open_lock);
2023 1952 return;
2024 1953 }
↓ open down ↓ |
1107 lines elided |
↑ open up ↑ |
2025 1954
2026 1955 saa_handle = portp->port_ibmf_saa_hdl;
2027 1956 if (saa_handle != NULL) {
2028 1957 mutex_exit(&ibcm_sa_open_lock);
2029 1958 return;
2030 1959 }
2031 1960
2032 1961 portp->port_saa_open_in_progress = 1;
2033 1962 mutex_exit(&ibcm_sa_open_lock);
2034 1963
2035 - _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(portp->port_event_status))
2036 -
2037 1964 /* The assumption is that we're getting event notifications */
2038 1965 portp->port_event_status = IBMF_SAA_EVENT_STATUS_MASK_PRODUCER_SM;
2039 1966
2040 - _NOTE(NOW_VISIBLE_TO_OTHER_THREADS(portp->port_event_status))
2041 -
2042 1967 ibt_status = ibt_get_port_state_byguid(portp->port_hcap->hca_guid,
2043 1968 portp->port_num, &portp->port_sgid0, NULL);
2044 1969 if (ibt_status != IBT_SUCCESS) {
2045 1970 IBTF_DPRINTF_L2(cmlog, "ibcm_init_saa_handle: "
2046 1971 "ibt_get_port_state_byguid failed for guid %llX "
2047 1972 "with status %d", portp->port_hcap->hca_guid, ibt_status);
2048 1973 mutex_enter(&ibcm_sa_open_lock);
2049 1974 portp->port_saa_open_in_progress = 0;
2050 1975 cv_broadcast(&ibcm_sa_open_cv);
2051 1976 mutex_exit(&ibcm_sa_open_lock);
2052 1977 return;
2053 1978 }
2054 1979 /* if the port is UP, try sa_session_open */
2055 1980 (void) taskq_dispatch(ibcm_taskq, ibcm_init_saa, portp, TQ_SLEEP);
2056 1981 }
2057 1982
2058 1983
2059 1984 ibmf_saa_handle_t
2060 1985 ibcm_get_saa_handle(ibcm_hca_info_t *hcap, uint8_t port)
2061 1986 {
2062 1987 ibmf_saa_handle_t saa_handle;
2063 1988 uint8_t port_index = port - 1;
2064 1989 ibcm_port_info_t *portp = &hcap->hca_port_info[port_index];
2065 1990 ibt_status_t ibt_status;
2066 1991
2067 1992 if (port_index >= hcap->hca_num_ports)
2068 1993 return (NULL);
2069 1994
2070 1995 mutex_enter(&ibcm_sa_open_lock);
2071 1996 while (portp->port_saa_open_in_progress) {
2072 1997 cv_wait(&ibcm_sa_open_cv, &ibcm_sa_open_lock);
2073 1998 }
2074 1999
2075 2000 saa_handle = portp->port_ibmf_saa_hdl;
2076 2001 if (saa_handle != NULL) {
2077 2002 mutex_exit(&ibcm_sa_open_lock);
2078 2003 return (saa_handle);
2079 2004 }
2080 2005
2081 2006 portp->port_saa_open_in_progress = 1;
2082 2007 mutex_exit(&ibcm_sa_open_lock);
2083 2008
2084 2009 ibt_status = ibt_get_port_state_byguid(portp->port_hcap->hca_guid,
2085 2010 portp->port_num, &portp->port_sgid0, NULL);
2086 2011 if (ibt_status != IBT_SUCCESS) {
2087 2012 IBTF_DPRINTF_L2(cmlog, "ibcm_get_saa_handle: "
2088 2013 "ibt_get_port_state_byguid failed for guid %llX "
2089 2014 "with status %d", portp->port_hcap->hca_guid, ibt_status);
2090 2015 mutex_enter(&ibcm_sa_open_lock);
2091 2016 portp->port_saa_open_in_progress = 0;
2092 2017 cv_broadcast(&ibcm_sa_open_cv);
2093 2018 mutex_exit(&ibcm_sa_open_lock);
2094 2019 return (NULL);
2095 2020 }
2096 2021 /* if the port is UP, try sa_session_open */
2097 2022 (void) taskq_dispatch(ibcm_taskq, ibcm_init_saa, portp, TQ_SLEEP);
2098 2023
2099 2024 mutex_enter(&ibcm_sa_open_lock);
2100 2025 while (portp->port_saa_open_in_progress) {
2101 2026 cv_wait(&ibcm_sa_open_cv, &ibcm_sa_open_lock);
2102 2027 }
2103 2028 saa_handle = portp->port_ibmf_saa_hdl;
2104 2029 mutex_exit(&ibcm_sa_open_lock);
2105 2030 return (saa_handle);
2106 2031 }
2107 2032
2108 2033
2109 2034 /*
2110 2035 * ibcm_hca_init_port():
2111 2036 * - Register port with IBMA
2112 2037 *
2113 2038 * Arguments:
2114 2039 * hcap - HCA's guid
2115 2040 * port_index - port number minus 1
2116 2041 *
2117 2042 * Return values:
2118 2043 * IBCM_SUCCESS - success
2119 2044 */
2120 2045 ibt_status_t
↓ open down ↓ |
69 lines elided |
↑ open up ↑ |
2121 2046 ibcm_hca_init_port(ibcm_hca_info_t *hcap, uint8_t port_index)
2122 2047 {
2123 2048 int status;
2124 2049 ibmf_register_info_t *ibmf_reg;
2125 2050
2126 2051 IBTF_DPRINTF_L4(cmlog, "ibcm_hca_init_port: hcap = 0x%p port_num %d",
2127 2052 hcap, port_index + 1);
2128 2053
2129 2054 ASSERT(MUTEX_HELD(&ibcm_global_hca_lock));
2130 2055
2131 - _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(hcap->hca_port_info))
2132 -
2133 2056 if (hcap->hca_port_info[port_index].port_ibmf_hdl == NULL) {
2134 2057 /* Register with IBMF */
2135 2058 ibmf_reg = &hcap->hca_port_info[port_index].port_ibmf_reg;
2136 2059 ibmf_reg->ir_ci_guid = hcap->hca_guid;
2137 2060 ibmf_reg->ir_port_num = port_index + 1;
2138 2061 ibmf_reg->ir_client_class = COMM_MGT_MANAGER_AGENT;
2139 2062
2140 2063 /*
2141 2064 * register with management framework
2142 2065 */
2143 2066 status = ibmf_register(ibmf_reg, IBMF_VERSION,
2144 2067 IBMF_REG_FLAG_NO_OFFLOAD, NULL, NULL,
2145 2068 &(hcap->hca_port_info[port_index].port_ibmf_hdl),
2146 2069 &(hcap->hca_port_info[port_index].port_ibmf_caps));
2147 2070
2148 2071 if (status != IBMF_SUCCESS) {
2149 2072 IBTF_DPRINTF_L2(cmlog, "ibcm_hca_init_port: "
2150 2073 "ibmf_register failed for port_num %x, "
2151 2074 "status = %x", port_index + 1, status);
2152 2075 return (ibcm_ibmf_analyze_error(status));
2153 2076 }
2154 2077
2155 2078 hcap->hca_port_info[port_index].port_qp1.qp_cm =
2156 2079 IBMF_QP_HANDLE_DEFAULT;
2157 2080 hcap->hca_port_info[port_index].port_qp1.qp_port =
2158 2081 &(hcap->hca_port_info[port_index]);
2159 2082
2160 2083 /*
2161 2084 * Register the read callback with IBMF.
2162 2085 * Since we just did an ibmf_register, handle is
2163 2086 * valid and ibcm_recv_cb() is valid so we can
2164 2087 * safely assert for success of ibmf_setup_recv_cb()
2165 2088 *
2166 2089 * Depending on the "state" of the HCA,
2167 2090 * CM may drop incoming packets
2168 2091 */
2169 2092 status = ibmf_setup_async_cb(
2170 2093 hcap->hca_port_info[port_index].port_ibmf_hdl,
2171 2094 IBMF_QP_HANDLE_DEFAULT, ibcm_recv_cb,
2172 2095 &(hcap->hca_port_info[port_index].port_qp1), 0);
2173 2096 ASSERT(status == IBMF_SUCCESS);
2174 2097
2175 2098 IBTF_DPRINTF_L5(cmlog, "ibcm_hca_init_port: "
2176 2099 "IBMF hdl[%x] = 0x%p", port_index,
2177 2100 hcap->hca_port_info[port_index].port_ibmf_hdl);
2178 2101
2179 2102 /* Attempt to get the saa_handle for this port */
2180 2103 ibcm_init_saa_handle(hcap, port_index + 1);
2181 2104 }
2182 2105
2183 2106 return (IBT_SUCCESS);
2184 2107 }
2185 2108
2186 2109 /*
2187 2110 * useful, to re attempt to initialize port ibma handles from elsewhere in
2188 2111 * cm code
2189 2112 */
2190 2113 ibt_status_t
2191 2114 ibcm_hca_reinit_port(ibcm_hca_info_t *hcap, uint8_t port_index)
2192 2115 {
2193 2116 ibt_status_t status;
2194 2117
2195 2118 IBTF_DPRINTF_L5(cmlog, "ibcm_hca_reinit_port: hcap 0x%p port_num %d",
2196 2119 hcap, port_index + 1);
2197 2120
2198 2121 mutex_enter(&ibcm_global_hca_lock);
2199 2122 status = ibcm_hca_init_port(hcap, port_index);
2200 2123 mutex_exit(&ibcm_global_hca_lock);
2201 2124 return (status);
2202 2125 }
2203 2126
2204 2127
2205 2128 /*
2206 2129 * ibcm_hca_fini_port():
2207 2130 * - Deregister port with IBMA
2208 2131 *
2209 2132 * Arguments:
2210 2133 * hcap - HCA's guid
2211 2134 * port_index - port number minus 1
2212 2135 *
2213 2136 * Return values:
2214 2137 * IBCM_SUCCESS - success
2215 2138 */
2216 2139 static ibcm_status_t
2217 2140 ibcm_hca_fini_port(ibcm_hca_info_t *hcap, uint8_t port_index)
2218 2141 {
2219 2142 int ibmf_status;
2220 2143 ibcm_status_t ibcm_status;
2221 2144
2222 2145 IBTF_DPRINTF_L4(cmlog, "ibcm_hca_fini_port: hcap = 0x%p port_num %d ",
2223 2146 hcap, port_index + 1);
2224 2147
2225 2148 ASSERT(MUTEX_HELD(&ibcm_global_hca_lock));
2226 2149
2227 2150 if (hcap->hca_port_info[port_index].port_ibmf_saa_hdl != NULL) {
2228 2151 IBTF_DPRINTF_L5(cmlog, "ibcm_hca_fini_port: "
2229 2152 "ibmf_sa_session_close IBMF SAA hdl %p",
2230 2153 hcap->hca_port_info[port_index].port_ibmf_saa_hdl);
2231 2154
2232 2155 ibmf_status = ibmf_sa_session_close(
2233 2156 &hcap->hca_port_info[port_index].port_ibmf_saa_hdl, 0);
2234 2157 if (ibmf_status != IBMF_SUCCESS) {
2235 2158 IBTF_DPRINTF_L2(cmlog, "ibcm_hca_fini_port: "
2236 2159 "ibmf_sa_session_close of port %d returned %x",
2237 2160 port_index + 1, ibmf_status);
2238 2161 return (IBCM_FAILURE);
2239 2162 }
2240 2163 }
2241 2164
2242 2165 if (hcap->hca_port_info[port_index].port_ibmf_hdl != NULL) {
2243 2166 IBTF_DPRINTF_L5(cmlog, "ibcm_hca_fini_port: "
2244 2167 "ibmf_unregister IBMF Hdl %p",
2245 2168 hcap->hca_port_info[port_index].port_ibmf_hdl);
2246 2169
2247 2170 /* clean-up all the ibmf qp's allocated on this port */
2248 2171 ibcm_status = ibcm_free_allqps(hcap, port_index + 1);
2249 2172
2250 2173 if (ibcm_status != IBCM_SUCCESS) {
2251 2174
2252 2175 IBTF_DPRINTF_L2(cmlog, "ibcm_hca_fini_port: "
2253 2176 "ibcm_free_allqps failed for port_num %d",
2254 2177 port_index + 1);
2255 2178 return (IBCM_FAILURE);
2256 2179 }
2257 2180
2258 2181 /* Tear down the receive callback */
2259 2182 ibmf_status = ibmf_tear_down_async_cb(
2260 2183 hcap->hca_port_info[port_index].port_ibmf_hdl,
2261 2184 IBMF_QP_HANDLE_DEFAULT, 0);
2262 2185
2263 2186 if (ibmf_status != IBMF_SUCCESS) {
2264 2187 IBTF_DPRINTF_L2(cmlog, "ibcm_hca_fini_port: "
2265 2188 "ibmf_tear_down_async_cb failed %d port_num %d",
2266 2189 ibmf_status, port_index + 1);
2267 2190 return (IBCM_FAILURE);
2268 2191 }
2269 2192
2270 2193 /* Now, unregister with IBMF */
2271 2194 ibmf_status = ibmf_unregister(
2272 2195 &hcap->hca_port_info[port_index].port_ibmf_hdl, 0);
2273 2196 IBTF_DPRINTF_L4(cmlog, "ibcm_hca_fini_port: "
2274 2197 "ibmf_unregister of port_num %x returned %x",
2275 2198 port_index + 1, ibmf_status);
2276 2199
2277 2200 if (ibmf_status == IBMF_SUCCESS)
2278 2201 hcap->hca_port_info[port_index].port_ibmf_hdl = NULL;
2279 2202 else {
2280 2203 IBTF_DPRINTF_L2(cmlog, "ibcm_hca_fini_port: "
2281 2204 "ibmf_unregister failed %d port_num %d",
2282 2205 ibmf_status, port_index + 1);
2283 2206 return (IBCM_FAILURE);
2284 2207 }
2285 2208 }
2286 2209 return (IBCM_SUCCESS);
2287 2210 }
2288 2211
2289 2212 /*
2290 2213 * ibcm_comm_est_handler():
2291 2214 * Check if the given channel is in ESTABLISHED state or not
2292 2215 *
2293 2216 * Arguments:
2294 2217 * eventp - A pointer to an ibt_async_event_t struct
2295 2218 *
2296 2219 * Return values: NONE
2297 2220 */
2298 2221 static void
2299 2222 ibcm_comm_est_handler(ibt_async_event_t *eventp)
2300 2223 {
2301 2224 ibcm_state_data_t *statep;
2302 2225
2303 2226 IBTF_DPRINTF_L4(cmlog, "ibcm_comm_est_handler:");
2304 2227
2305 2228 /* Both QP and EEC handles can't be NULL */
2306 2229 if (eventp->ev_chan_hdl == NULL) {
2307 2230 IBTF_DPRINTF_L2(cmlog, "ibcm_comm_est_handler: "
2308 2231 "both QP and EEC handles are NULL");
2309 2232 return;
2310 2233 }
2311 2234
2312 2235 /* get the "statep" from qp/eec handles */
2313 2236 IBCM_GET_CHAN_PRIVATE(eventp->ev_chan_hdl, statep);
2314 2237 if (statep == NULL) {
2315 2238 IBTF_DPRINTF_L2(cmlog, "ibcm_comm_est_handler: statep is NULL");
2316 2239 return;
2317 2240 }
2318 2241
2319 2242 mutex_enter(&statep->state_mutex);
2320 2243
2321 2244 IBCM_RELEASE_CHAN_PRIVATE(eventp->ev_chan_hdl);
2322 2245
2323 2246 IBTF_DPRINTF_L4(cmlog, "ibcm_comm_est_handler: statep = %p", statep);
2324 2247
2325 2248 IBCM_REF_CNT_INCR(statep);
2326 2249
2327 2250 if ((statep->state == IBCM_STATE_REP_SENT) ||
2328 2251 (statep->state == IBCM_STATE_MRA_REP_RCVD)) {
2329 2252 timeout_id_t timer_val = statep->timerid;
2330 2253
2331 2254 statep->state = IBCM_STATE_TRANSIENT_ESTABLISHED;
2332 2255
2333 2256 if (timer_val) {
2334 2257 statep->timerid = 0;
2335 2258 mutex_exit(&statep->state_mutex);
2336 2259 (void) untimeout(timer_val);
2337 2260 } else
2338 2261 mutex_exit(&statep->state_mutex);
2339 2262
2340 2263 /* CM doesn't have RTU message here */
2341 2264 ibcm_cep_state_rtu(statep, NULL);
2342 2265
2343 2266 } else {
2344 2267 if (statep->state == IBCM_STATE_ESTABLISHED ||
2345 2268 statep->state == IBCM_STATE_TRANSIENT_ESTABLISHED) {
2346 2269 IBTF_DPRINTF_L4(cmlog, "ibcm_comm_est_handler: "
2347 2270 "Channel already in ESTABLISHED state");
2348 2271 } else {
2349 2272 /* An unexpected behavior from remote */
2350 2273 IBTF_DPRINTF_L2(cmlog, "ibcm_comm_est_handler: "
2351 2274 "Unexpected in state = %d", statep->state);
2352 2275 }
2353 2276 mutex_exit(&statep->state_mutex);
2354 2277
2355 2278 ibcm_insert_trace(statep, IBCM_TRACE_INCOMING_COMEST);
2356 2279 }
2357 2280
2358 2281 mutex_enter(&statep->state_mutex);
2359 2282 IBCM_REF_CNT_DECR(statep);
2360 2283 mutex_exit(&statep->state_mutex);
2361 2284 }
2362 2285
2363 2286
2364 2287 /*
2365 2288 * ibcm_async_handler():
2366 2289 * CM's Async Handler
2367 2290 * (Handles ATTACH, DETACH, COM_EST events)
2368 2291 *
2369 2292 * Arguments:
2370 2293 * eventp - A pointer to an ibt_async_event_t struct
2371 2294 *
2372 2295 * Return values: None
2373 2296 *
2374 2297 * NOTE : CM assumes that all HCA DR events are delivered sequentially
2375 2298 * i.e., until ibcm_async_handler completes for a given HCA DR, framework
2376 2299 * shall not invoke ibcm_async_handler with another DR event for the same
2377 2300 * HCA
2378 2301 */
2379 2302 /* ARGSUSED */
2380 2303 void
2381 2304 ibcm_async_handler(void *clnt_hdl, ibt_hca_hdl_t hca_hdl,
2382 2305 ibt_async_code_t code, ibt_async_event_t *eventp)
2383 2306 {
2384 2307 ibcm_hca_info_t *hcap;
2385 2308 ibcm_port_up_t *pup;
2386 2309
2387 2310 IBTF_DPRINTF_L3(cmlog, "ibcm_async_handler: "
2388 2311 "clnt_hdl = %p, code = 0x%x, eventp = 0x%p",
2389 2312 clnt_hdl, code, eventp);
2390 2313
2391 2314 mutex_enter(&ibcm_global_hca_lock);
2392 2315
2393 2316 /* If fini is going to complete successfully, then return */
2394 2317 if (ibcm_finit_state != IBCM_FINIT_IDLE) {
2395 2318
2396 2319 /*
2397 2320 * This finit state implies one of the following:
2398 2321 * Init either didn't start or didn't complete OR
2399 2322 * Fini is about to return SUCCESS and release the global lock.
2400 2323 * In all these cases, it is safe to ignore the async.
2401 2324 */
2402 2325
2403 2326 IBTF_DPRINTF_L2(cmlog, "ibcm_async_handler: ignoring event %x, "
2404 2327 "as either init didn't complete or fini about to succeed",
2405 2328 code);
2406 2329 mutex_exit(&ibcm_global_hca_lock);
2407 2330 return;
2408 2331 }
↓ open down ↓ |
266 lines elided |
↑ open up ↑ |
2409 2332
2410 2333 switch (code) {
2411 2334 case IBT_PORT_CHANGE_EVENT:
2412 2335 if ((eventp->ev_port_flags & IBT_PORT_CHANGE_SM_LID) == 0)
2413 2336 break;
2414 2337 /* FALLTHROUGH */
2415 2338 case IBT_CLNT_REREG_EVENT:
2416 2339 case IBT_EVENT_PORT_UP:
2417 2340 mutex_exit(&ibcm_global_hca_lock);
2418 2341 pup = kmem_alloc(sizeof (ibcm_port_up_t), KM_SLEEP);
2419 - _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*pup))
2420 2342 pup->pup_hca_guid = eventp->ev_hca_guid;
2421 2343 pup->pup_port = eventp->ev_port;
2422 - _NOTE(NOW_VISIBLE_TO_OTHER_THREADS(*pup))
2423 2344 (void) taskq_dispatch(ibcm_taskq,
2424 2345 ibcm_service_record_rewrite_task, pup, TQ_SLEEP);
2425 2346 ibcm_path_cache_purge();
2426 2347 return;
2427 2348
2428 2349 case IBT_HCA_ATTACH_EVENT:
2429 2350
2430 2351 /* eventp->ev_hcaguid is the HCA GUID of interest */
2431 2352 ibcm_hca_attach(eventp->ev_hca_guid);
2432 2353 break;
2433 2354
2434 2355 case IBT_HCA_DETACH_EVENT:
2435 2356
2436 2357 /* eventp->ev_hca_guid is the HCA GUID of interest */
2437 2358 if ((hcap = ibcm_find_hcap_entry(eventp->ev_hca_guid)) ==
2438 2359 NULL) {
2439 2360 IBTF_DPRINTF_L2(cmlog, "ibcm_async_handler:"
2440 2361 " hca %llX doesn't exist", eventp->ev_hca_guid);
2441 2362 break;
2442 2363 }
2443 2364
2444 2365 (void) ibcm_hca_detach(hcap);
2445 2366 break;
2446 2367
2447 2368 case IBT_EVENT_COM_EST_QP:
2448 2369 /* eventp->ev_qp_hdl is the ibt_qp_hdl_t of interest */
2449 2370 case IBT_EVENT_COM_EST_EEC:
2450 2371 /* eventp->ev_eec_hdl is the ibt_eec_hdl_t of interest */
2451 2372 ibcm_comm_est_handler(eventp);
2452 2373 break;
2453 2374 default:
2454 2375 break;
2455 2376 }
2456 2377
2457 2378 /* Unblock, any blocked fini/init operations */
2458 2379 mutex_exit(&ibcm_global_hca_lock);
2459 2380 }
↓ open down ↓ |
27 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX