Print this page
8368 remove warlock leftovers from usr/src/uts
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/io/ib/ibtl/ibtl_handlers.c
+++ new/usr/src/uts/common/io/ib/ibtl/ibtl_handlers.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21 /*
22 22 * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
23 23 */
24 24
25 25 #include <sys/ib/ibtl/impl/ibtl.h>
26 26 #include <sys/ib/ibtl/impl/ibtl_cm.h>
27 27 #include <sys/taskq.h>
28 28 #include <sys/disp.h>
29 29 #include <sys/callb.h>
30 30 #include <sys/proc.h>
31 31
32 32 /*
33 33 * ibtl_handlers.c
34 34 */
35 35
36 36 /*
37 37 * What's in this file?
38 38 *
39 39 * This file started as an implementation of Asynchronous Event/Error
40 40 * handling and Completion Queue handling. As the implementation
41 41 * evolved, code has been added for other ibc_* interfaces (resume,
42 42 * predetach, etc.) that use the same mechanisms as used for asyncs.
43 43 *
44 44 * Async and CQ handling at interrupt level.
45 45 *
46 46 * CQ handling is normally done at interrupt level using the CQ callback
47 47 * handler to call the appropriate IBT Client (owner of the CQ). For
48 48 * clients that would prefer a fully flexible non-interrupt context to
49 49 * do their CQ handling, a CQ can be created so that its handler is
50 50 * called from a non-interrupt thread. CQ handling is done frequently
51 51 * whereas Async handling is expected to occur very infrequently.
52 52 *
53 53 * Async handling is done by marking (or'ing in of an async_code of) the
54 54 * pertinent IBTL data structure, and then notifying the async_thread(s)
55 55 * that the data structure has async work to be done. The notification
56 56 * occurs by linking the data structure through its async_link onto a
57 57 * list of like data structures and waking up an async_thread. This
58 58 * list append is not done if there is already async work pending on
59 59 * this data structure (IBTL_ASYNC_PENDING).
60 60 *
61 61 * Async Mutex and CQ Mutex
62 62 *
63 63 * The global ibtl_async_mutex is "the" mutex used to control access
64 64 * to all the data needed by ibc_async_handler. All the threads that
65 65 * use this mutex are written so that the mutex is held for very short
66 66 * periods of time, and never held while making calls to functions
67 67 * that may block.
68 68 *
69 69 * The global ibtl_cq_mutex is used similarly by ibc_cq_handler and
70 70 * the ibtl_cq_thread(s).
71 71 *
72 72 * Mutex hierarchy
73 73 *
74 74 * The ibtl_clnt_list_mutex is above the ibtl_async_mutex.
75 75 * ibtl_clnt_list_mutex protects all of the various lists.
76 76 * The ibtl_async_mutex is below this in the hierarchy.
77 77 *
78 78 * The ibtl_cq_mutex is independent of the above mutexes.
79 79 *
80 80 * Threads
81 81 *
82 82 * There are "ibtl_cq_threads" number of threads created for handling
83 83 * Completion Queues in threads. If this feature really gets used,
84 84 * then we will want to do some suitable tuning. Similarly, we may
85 85 * want to tune the number of "ibtl_async_thread_init".
86 86 *
87 87 * The function ibtl_cq_thread is the main loop for handling a CQ in a
88 88 * thread. There can be multiple threads executing this same code.
89 89 * The code sleeps when there is no work to be done (list is empty),
90 90 * otherwise it pulls the first CQ structure off the list and performs
91 91 * the CQ handler callback to the client. After that returns, a check
92 92 * is made, and if another ibc_cq_handler call was made for this CQ,
93 93 * the client is called again.
94 94 *
95 95 * The function ibtl_async_thread is the main loop for handling async
96 96 * events/errors. There can be multiple threads executing this same code.
97 97 * The code sleeps when there is no work to be done (lists are empty),
98 98 * otherwise it pulls the first structure off one of the lists and
99 99 * performs the async callback(s) to the client(s). Note that HCA
100 100 * async handling is done by calling each of the clients using the HCA.
101 101 * When the async handling completes, the data structure having the async
102 102 * event/error is checked for more work before it's considered "done".
103 103 *
104 104 * Taskq
105 105 *
106 106 * The async_taskq is used here for allowing async handler callbacks to
107 107 * occur simultaneously to multiple clients of an HCA. This taskq could
108 108 * be used for other purposes, e.g., if all the async_threads are in
109 109 * use, but this is deemed as overkill since asyncs should occur rarely.
110 110 */
111 111
112 112 /* Globals */
113 113 static char ibtf_handlers[] = "ibtl_handlers";
114 114
115 115 /* priority for IBTL threads (async, cq, and taskq) */
116 116 static pri_t ibtl_pri = MAXCLSYSPRI - 1; /* maybe override in /etc/system */
117 117
118 118 /* taskq used for HCA asyncs */
119 119 #define ibtl_async_taskq system_taskq
120 120
121 121 /* data for async handling by threads */
122 122 static kmutex_t ibtl_async_mutex; /* protects most *_async_* data */
123 123 static kcondvar_t ibtl_async_cv; /* async_threads wait on this */
124 124 static kcondvar_t ibtl_clnt_cv; /* ibt_detach might wait on this */
125 125 static void ibtl_dec_clnt_async_cnt(ibtl_clnt_t *clntp);
126 126 static void ibtl_inc_clnt_async_cnt(ibtl_clnt_t *clntp);
127 127
128 128 static kt_did_t *ibtl_async_did; /* for thread_join() */
129 129 int ibtl_async_thread_init = 4; /* total # of async_threads to create */
130 130 static int ibtl_async_thread_exit = 0; /* set if/when thread(s) should exit */
131 131
132 132 /* async lists for various structures */
133 133 static ibtl_hca_devinfo_t *ibtl_async_hca_list_start, *ibtl_async_hca_list_end;
134 134 static ibtl_eec_t *ibtl_async_eec_list_start, *ibtl_async_eec_list_end;
135 135 static ibtl_qp_t *ibtl_async_qp_list_start, *ibtl_async_qp_list_end;
136 136 static ibtl_cq_t *ibtl_async_cq_list_start, *ibtl_async_cq_list_end;
137 137 static ibtl_srq_t *ibtl_async_srq_list_start, *ibtl_async_srq_list_end;
138 138
139 139 /* data for CQ completion handling by threads */
140 140 static kmutex_t ibtl_cq_mutex; /* protects the cv and the list below */
141 141 static kcondvar_t ibtl_cq_cv;
142 142 static ibtl_cq_t *ibtl_cq_list_start, *ibtl_cq_list_end;
143 143
144 144 static int ibtl_cq_threads = 0; /* total # of cq threads */
145 145 static int ibtl_cqs_using_threads = 0; /* total # of cqs using threads */
146 146 static int ibtl_cq_thread_exit = 0; /* set if/when thread(s) should exit */
147 147
148 148 /* value used to tell IBTL threads to exit */
149 149 #define IBTL_THREAD_EXIT 0x1b7fdead /* IBTF DEAD */
150 150 /* Cisco Topspin Vendor ID for Rereg hack */
↓ open down ↓ |
150 lines elided |
↑ open up ↑ |
151 151 #define IBT_VENDOR_CISCO 0x05ad
152 152
153 153 int ibtl_eec_not_supported = 1;
154 154
155 155 char *ibtl_last_client_name; /* may help debugging */
156 156 typedef ibt_status_t (*ibtl_node_info_cb_t)(ib_guid_t, uint8_t, ib_lid_t,
157 157 ibt_node_info_t *);
158 158
159 159 ibtl_node_info_cb_t ibtl_node_info_cb;
160 160
161 -_NOTE(LOCK_ORDER(ibtl_clnt_list_mutex ibtl_async_mutex))
162 -
163 161 void
164 162 ibtl_cm_set_node_info_cb(ibt_status_t (*node_info_cb)(ib_guid_t, uint8_t,
165 163 ib_lid_t, ibt_node_info_t *))
166 164 {
167 165 mutex_enter(&ibtl_clnt_list_mutex);
168 166 ibtl_node_info_cb = node_info_cb;
169 167 mutex_exit(&ibtl_clnt_list_mutex);
170 168 }
171 169
172 170 /*
173 171 * ibc_async_handler()
174 172 *
175 173 * Asynchronous Event/Error Handler.
176 174 *
177 175 * This is the function called HCA drivers to post various async
178 176 * event and errors mention in the IB architecture spec. See
179 177 * ibtl_types.h for additional details of this.
180 178 *
181 179 * This function marks the pertinent IBTF object with the async_code,
182 180 * and queues the object for handling by an ibtl_async_thread. If
183 181 * the object is NOT already marked for async processing, it is added
184 182 * to the associated list for that type of object, and an
185 183 * ibtl_async_thread is signaled to finish the async work.
186 184 */
187 185 void
188 186 ibc_async_handler(ibc_clnt_hdl_t hca_devp, ibt_async_code_t code,
189 187 ibc_async_event_t *event_p)
190 188 {
191 189 ibtl_qp_t *ibtl_qp;
192 190 ibtl_cq_t *ibtl_cq;
193 191 ibtl_srq_t *ibtl_srq;
194 192 ibtl_eec_t *ibtl_eec;
195 193 uint8_t port_minus1;
196 194
197 195 ibtl_async_port_event_t *portp;
198 196
199 197 IBTF_DPRINTF_L2(ibtf_handlers, "ibc_async_handler(%p, 0x%x, %p)",
200 198 hca_devp, code, event_p);
201 199
202 200 mutex_enter(&ibtl_async_mutex);
203 201
204 202 switch (code) {
205 203 case IBT_EVENT_PATH_MIGRATED_QP:
206 204 case IBT_EVENT_SQD:
207 205 case IBT_ERROR_CATASTROPHIC_QP:
208 206 case IBT_ERROR_PATH_MIGRATE_REQ_QP:
209 207 case IBT_EVENT_COM_EST_QP:
210 208 case IBT_ERROR_INVALID_REQUEST_QP:
211 209 case IBT_ERROR_ACCESS_VIOLATION_QP:
212 210 case IBT_EVENT_EMPTY_QP:
213 211 case IBT_FEXCH_ERROR:
214 212 ibtl_qp = event_p->ev_qp_hdl;
215 213 if (ibtl_qp == NULL) {
216 214 IBTF_DPRINTF_L2(ibtf_handlers, "ibc_async_handler: "
217 215 "bad qp handle");
218 216 break;
219 217 }
220 218 switch (code) {
221 219 case IBT_ERROR_CATASTROPHIC_QP:
222 220 ibtl_qp->qp_cat_fma_ena = event_p->ev_fma_ena; break;
223 221 case IBT_ERROR_PATH_MIGRATE_REQ_QP:
224 222 ibtl_qp->qp_pth_fma_ena = event_p->ev_fma_ena; break;
225 223 case IBT_ERROR_INVALID_REQUEST_QP:
226 224 ibtl_qp->qp_inv_fma_ena = event_p->ev_fma_ena; break;
227 225 case IBT_ERROR_ACCESS_VIOLATION_QP:
228 226 ibtl_qp->qp_acc_fma_ena = event_p->ev_fma_ena; break;
229 227 }
230 228
231 229 ibtl_qp->qp_async_codes |= code;
232 230 if ((ibtl_qp->qp_async_flags & IBTL_ASYNC_PENDING) == 0) {
233 231 ibtl_qp->qp_async_flags |= IBTL_ASYNC_PENDING;
234 232 ibtl_qp->qp_async_link = NULL;
235 233 if (ibtl_async_qp_list_end == NULL)
236 234 ibtl_async_qp_list_start = ibtl_qp;
237 235 else
238 236 ibtl_async_qp_list_end->qp_async_link = ibtl_qp;
239 237 ibtl_async_qp_list_end = ibtl_qp;
240 238 cv_signal(&ibtl_async_cv);
241 239 }
242 240 break;
243 241
244 242 case IBT_ERROR_CQ:
245 243 ibtl_cq = event_p->ev_cq_hdl;
246 244 if (ibtl_cq == NULL) {
247 245 IBTF_DPRINTF_L2(ibtf_handlers, "ibc_async_handler: "
248 246 "bad cq handle");
249 247 break;
250 248 }
251 249 ibtl_cq->cq_async_codes |= code;
252 250 ibtl_cq->cq_fma_ena = event_p->ev_fma_ena;
253 251 if ((ibtl_cq->cq_async_flags & IBTL_ASYNC_PENDING) == 0) {
254 252 ibtl_cq->cq_async_flags |= IBTL_ASYNC_PENDING;
255 253 ibtl_cq->cq_async_link = NULL;
256 254 if (ibtl_async_cq_list_end == NULL)
257 255 ibtl_async_cq_list_start = ibtl_cq;
258 256 else
259 257 ibtl_async_cq_list_end->cq_async_link = ibtl_cq;
260 258 ibtl_async_cq_list_end = ibtl_cq;
261 259 cv_signal(&ibtl_async_cv);
262 260 }
263 261 break;
264 262
265 263 case IBT_ERROR_CATASTROPHIC_SRQ:
266 264 case IBT_EVENT_LIMIT_REACHED_SRQ:
267 265 ibtl_srq = event_p->ev_srq_hdl;
268 266 if (ibtl_srq == NULL) {
269 267 IBTF_DPRINTF_L2(ibtf_handlers, "ibc_async_handler: "
270 268 "bad srq handle");
271 269 break;
272 270 }
273 271 ibtl_srq->srq_async_codes |= code;
274 272 ibtl_srq->srq_fma_ena = event_p->ev_fma_ena;
275 273 if ((ibtl_srq->srq_async_flags & IBTL_ASYNC_PENDING) == 0) {
276 274 ibtl_srq->srq_async_flags |= IBTL_ASYNC_PENDING;
277 275 ibtl_srq->srq_async_link = NULL;
278 276 if (ibtl_async_srq_list_end == NULL)
279 277 ibtl_async_srq_list_start = ibtl_srq;
280 278 else
281 279 ibtl_async_srq_list_end->srq_async_link =
282 280 ibtl_srq;
283 281 ibtl_async_srq_list_end = ibtl_srq;
284 282 cv_signal(&ibtl_async_cv);
285 283 }
286 284 break;
287 285
288 286 case IBT_EVENT_PATH_MIGRATED_EEC:
289 287 case IBT_ERROR_PATH_MIGRATE_REQ_EEC:
290 288 case IBT_ERROR_CATASTROPHIC_EEC:
291 289 case IBT_EVENT_COM_EST_EEC:
292 290 if (ibtl_eec_not_supported) {
293 291 IBTF_DPRINTF_L2(ibtf_handlers, "ibc_async_handler: "
294 292 "EEC events are disabled.");
295 293 break;
296 294 }
297 295 ibtl_eec = event_p->ev_eec_hdl;
298 296 if (ibtl_eec == NULL) {
299 297 IBTF_DPRINTF_L2(ibtf_handlers, "ibc_async_handler: "
300 298 "bad eec handle");
301 299 break;
302 300 }
303 301 switch (code) {
304 302 case IBT_ERROR_PATH_MIGRATE_REQ_EEC:
305 303 ibtl_eec->eec_pth_fma_ena = event_p->ev_fma_ena; break;
306 304 case IBT_ERROR_CATASTROPHIC_EEC:
307 305 ibtl_eec->eec_cat_fma_ena = event_p->ev_fma_ena; break;
308 306 }
309 307 ibtl_eec->eec_async_codes |= code;
310 308 if ((ibtl_eec->eec_async_flags & IBTL_ASYNC_PENDING) == 0) {
311 309 ibtl_eec->eec_async_flags |= IBTL_ASYNC_PENDING;
312 310 ibtl_eec->eec_async_link = NULL;
313 311 if (ibtl_async_eec_list_end == NULL)
314 312 ibtl_async_eec_list_start = ibtl_eec;
315 313 else
316 314 ibtl_async_eec_list_end->eec_async_link =
317 315 ibtl_eec;
318 316 ibtl_async_eec_list_end = ibtl_eec;
319 317 cv_signal(&ibtl_async_cv);
320 318 }
321 319 break;
322 320
323 321 case IBT_ERROR_LOCAL_CATASTROPHIC:
324 322 hca_devp->hd_async_codes |= code;
325 323 hca_devp->hd_fma_ena = event_p->ev_fma_ena;
326 324 /* FALLTHROUGH */
327 325
328 326 case IBT_EVENT_PORT_UP:
329 327 case IBT_PORT_CHANGE_EVENT:
330 328 case IBT_CLNT_REREG_EVENT:
331 329 case IBT_ERROR_PORT_DOWN:
332 330 if ((code & IBT_PORT_EVENTS) != 0) {
333 331 if ((port_minus1 = event_p->ev_port - 1) >=
334 332 hca_devp->hd_hca_attr->hca_nports) {
335 333 IBTF_DPRINTF_L2(ibtf_handlers,
336 334 "ibc_async_handler: bad port #: %d",
337 335 event_p->ev_port);
338 336 break;
339 337 }
340 338 portp = &hca_devp->hd_async_port[port_minus1];
341 339 if (code == IBT_EVENT_PORT_UP) {
342 340 /*
343 341 * The port is just coming UP we can't have any
344 342 * valid older events.
345 343 */
346 344 portp->status = IBTL_HCA_PORT_UP;
347 345 } else if (code == IBT_ERROR_PORT_DOWN) {
348 346 /*
349 347 * The port is going DOWN older events don't
350 348 * count.
351 349 */
352 350 portp->status = IBTL_HCA_PORT_DOWN;
353 351 } else if (code == IBT_PORT_CHANGE_EVENT) {
354 352 /*
355 353 * For port UP and DOWN events only the latest
356 354 * event counts. If we get a UP after DOWN it
357 355 * is sufficient to send just UP and vice versa.
358 356 * In the case of port CHANGE event it is valid
359 357 * only when the port is UP already but if we
360 358 * receive it after UP but before UP is
361 359 * delivered we still need to deliver CHANGE
362 360 * after we deliver UP event.
363 361 *
364 362 * We will not get a CHANGE event when the port
365 363 * is down or DOWN event is pending.
366 364 */
367 365 portp->flags |= event_p->ev_port_flags;
368 366 portp->status |= IBTL_HCA_PORT_CHG;
369 367 } else if (code == IBT_CLNT_REREG_EVENT) {
370 368 /*
371 369 * SM has requested a re-register of
372 370 * subscription to SM events notification.
373 371 */
374 372 portp->status |= IBTL_HCA_PORT_ASYNC_CLNT_REREG;
375 373 }
376 374
377 375 hca_devp->hd_async_codes |= code;
378 376 }
379 377
380 378 if ((hca_devp->hd_async_flags & IBTL_ASYNC_PENDING) == 0) {
381 379 hca_devp->hd_async_flags |= IBTL_ASYNC_PENDING;
382 380 hca_devp->hd_async_link = NULL;
383 381 if (ibtl_async_hca_list_end == NULL)
384 382 ibtl_async_hca_list_start = hca_devp;
385 383 else
386 384 ibtl_async_hca_list_end->hd_async_link =
387 385 hca_devp;
388 386 ibtl_async_hca_list_end = hca_devp;
389 387 cv_signal(&ibtl_async_cv);
390 388 }
391 389
392 390 break;
393 391
394 392 default:
395 393 IBTF_DPRINTF_L1(ibtf_handlers, "ibc_async_handler: "
396 394 "invalid code (0x%x)", code);
397 395 }
398 396
399 397 mutex_exit(&ibtl_async_mutex);
400 398 }
401 399
402 400
403 401 /* Finally, make the async call to the client. */
404 402
405 403 static void
406 404 ibtl_async_client_call(ibtl_hca_t *ibt_hca, ibt_async_code_t code,
407 405 ibt_async_event_t *event_p)
408 406 {
↓ open down ↓ |
236 lines elided |
↑ open up ↑ |
409 407 ibtl_clnt_t *clntp;
410 408 void *client_private;
411 409 ibt_async_handler_t async_handler;
412 410 char *client_name;
413 411
414 412 IBTF_DPRINTF_L2(ibtf_handlers, "ibtl_async_client_call(%p, 0x%x, %p)",
415 413 ibt_hca, code, event_p);
416 414
417 415 clntp = ibt_hca->ha_clnt_devp;
418 416
419 - _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(ibtl_last_client_name))
420 417 /* Record who is being called (just a debugging aid) */
421 418 ibtl_last_client_name = client_name = clntp->clnt_name;
422 - _NOTE(NOW_VISIBLE_TO_OTHER_THREADS(ibtl_last_client_name))
423 419
424 420 client_private = clntp->clnt_private;
425 421 async_handler = clntp->clnt_modinfop->mi_async_handler;
426 422
427 423 if (code & (IBT_EVENT_COM_EST_QP | IBT_EVENT_COM_EST_EEC)) {
428 424 mutex_enter(&ibtl_clnt_list_mutex);
429 425 async_handler = ibtl_cm_async_handler;
430 426 client_private = ibtl_cm_clnt_private;
431 427 mutex_exit(&ibtl_clnt_list_mutex);
432 428 ibt_hca = NULL;
433 429 IBTF_DPRINTF_L2(ibtf_handlers, "ibtl_async_client_call: "
434 430 "calling CM for COM_EST");
435 431 } else {
436 432 IBTF_DPRINTF_L2(ibtf_handlers, "ibtl_async_client_call: "
437 433 "calling client '%s'", client_name);
438 434 }
439 435 if (async_handler != NULL)
440 436 async_handler(client_private, ibt_hca, code, event_p);
441 437 else
442 438 IBTF_DPRINTF_L2(ibtf_handlers, "ibtl_async_client_call: "
443 439 "client '%s' has no async handler", client_name);
444 440 }
445 441
446 442 /*
447 443 * Inform CM or DM about HCA events.
448 444 *
449 445 * We use taskqs to allow simultaneous notification, with sleeping.
450 446 * Since taskqs only allow one argument, we define a structure
451 447 * because we need to pass in more than one argument.
452 448 */
453 449
454 450 struct ibtl_mgr_s {
455 451 ibtl_hca_devinfo_t *mgr_hca_devp;
456 452 ibt_async_handler_t mgr_async_handler;
457 453 void *mgr_clnt_private;
458 454 };
459 455
460 456 /*
461 457 * Asyncs of HCA level events for CM and DM. Call CM or DM and tell them
462 458 * about the HCA for the event recorded in the ibtl_hca_devinfo_t.
463 459 */
464 460 static void
465 461 ibtl_do_mgr_async_task(void *arg)
466 462 {
467 463 struct ibtl_mgr_s *mgrp = (struct ibtl_mgr_s *)arg;
468 464 ibtl_hca_devinfo_t *hca_devp = mgrp->mgr_hca_devp;
469 465
470 466 IBTF_DPRINTF_L2(ibtf_handlers, "ibtl_do_mgr_async_task(0x%x)",
471 467 hca_devp->hd_async_code);
472 468
473 469 mgrp->mgr_async_handler(mgrp->mgr_clnt_private, NULL,
474 470 hca_devp->hd_async_code, &hca_devp->hd_async_event);
475 471 kmem_free(mgrp, sizeof (*mgrp));
476 472
477 473 mutex_enter(&ibtl_clnt_list_mutex);
478 474 if (--hca_devp->hd_async_task_cnt == 0)
479 475 cv_signal(&hca_devp->hd_async_task_cv);
480 476 mutex_exit(&ibtl_clnt_list_mutex);
481 477 }
482 478
483 479 static void
484 480 ibt_cisco_embedded_sm_rereg_fix(void *arg)
485 481 {
486 482 struct ibtl_mgr_s *mgrp = arg;
487 483 ibtl_hca_devinfo_t *hca_devp;
488 484 ibt_node_info_t node_info;
489 485 ibt_status_t ibt_status;
490 486 ibtl_async_port_event_t *portp;
491 487 ib_lid_t sm_lid;
492 488 ib_guid_t hca_guid;
493 489 ibt_async_event_t *event_p;
494 490 ibt_hca_portinfo_t *pinfop;
495 491 uint8_t port;
496 492
497 493 hca_devp = mgrp->mgr_hca_devp;
498 494
499 495 mutex_enter(&ibtl_clnt_list_mutex);
500 496 event_p = &hca_devp->hd_async_event;
501 497 port = event_p->ev_port;
502 498 portp = &hca_devp->hd_async_port[port - 1];
503 499 pinfop = &hca_devp->hd_portinfop[port - 1];
504 500 sm_lid = pinfop->p_sm_lid;
505 501 hca_guid = hca_devp->hd_hca_attr->hca_node_guid;
506 502 mutex_exit(&ibtl_clnt_list_mutex);
507 503
508 504 ibt_status = ((ibtl_node_info_cb_t)mgrp->mgr_async_handler)(hca_guid,
509 505 port, sm_lid, &node_info);
510 506 if (ibt_status == IBT_SUCCESS) {
511 507 if ((node_info.n_vendor_id == IBT_VENDOR_CISCO) &&
512 508 (node_info.n_node_type == IBT_NODE_TYPE_SWITCH)) {
513 509 mutex_enter(&ibtl_async_mutex);
514 510 portp->status |= IBTL_HCA_PORT_ASYNC_CLNT_REREG;
515 511 hca_devp->hd_async_codes |= IBT_CLNT_REREG_EVENT;
516 512 mutex_exit(&ibtl_async_mutex);
517 513 }
518 514 }
519 515 kmem_free(mgrp, sizeof (*mgrp));
520 516
521 517 mutex_enter(&ibtl_clnt_list_mutex);
522 518 if (--hca_devp->hd_async_task_cnt == 0)
523 519 cv_signal(&hca_devp->hd_async_task_cv);
524 520 mutex_exit(&ibtl_clnt_list_mutex);
525 521 }
↓ open down ↓ |
93 lines elided |
↑ open up ↑ |
526 522
527 523 static void
528 524 ibtl_cm_get_node_info(ibtl_hca_devinfo_t *hca_devp,
529 525 ibt_async_handler_t async_handler)
530 526 {
531 527 struct ibtl_mgr_s *mgrp;
532 528
533 529 if (async_handler == NULL)
534 530 return;
535 531
536 - _NOTE(NO_COMPETING_THREADS_NOW)
537 532 mgrp = kmem_alloc(sizeof (*mgrp), KM_SLEEP);
538 533 mgrp->mgr_hca_devp = hca_devp;
539 534 mgrp->mgr_async_handler = async_handler;
540 535 mgrp->mgr_clnt_private = NULL;
541 536 hca_devp->hd_async_task_cnt++;
542 537
543 538 (void) taskq_dispatch(ibtl_async_taskq,
544 539 ibt_cisco_embedded_sm_rereg_fix, mgrp, TQ_SLEEP);
545 -#ifndef lint
546 - _NOTE(COMPETING_THREADS_NOW)
547 -#endif
548 540 }
549 541
550 542 static void
551 543 ibtl_tell_mgr(ibtl_hca_devinfo_t *hca_devp, ibt_async_handler_t async_handler,
552 544 void *clnt_private)
553 545 {
554 546 struct ibtl_mgr_s *mgrp;
555 547
556 548 if (async_handler == NULL)
557 549 return;
558 550
559 - _NOTE(NO_COMPETING_THREADS_NOW)
560 551 mgrp = kmem_alloc(sizeof (*mgrp), KM_SLEEP);
561 552 mgrp->mgr_hca_devp = hca_devp;
562 553 mgrp->mgr_async_handler = async_handler;
563 554 mgrp->mgr_clnt_private = clnt_private;
564 555 hca_devp->hd_async_task_cnt++;
565 556
566 557 (void) taskq_dispatch(ibtl_async_taskq, ibtl_do_mgr_async_task, mgrp,
567 558 TQ_SLEEP);
568 -#ifndef lint
569 - _NOTE(COMPETING_THREADS_NOW)
570 -#endif
571 559 }
572 560
573 561 /*
574 562 * Per client-device asyncs for HCA level events. Call each client that is
575 563 * using the HCA for the event recorded in the ibtl_hca_devinfo_t.
576 564 */
577 565 static void
578 566 ibtl_hca_client_async_task(void *arg)
579 567 {
580 568 ibtl_hca_t *ibt_hca = (ibtl_hca_t *)arg;
581 569 ibtl_hca_devinfo_t *hca_devp = ibt_hca->ha_hca_devp;
582 570 ibtl_clnt_t *clntp = ibt_hca->ha_clnt_devp;
583 571 ibt_async_event_t async_event;
584 572
585 573 IBTF_DPRINTF_L3(ibtf_handlers, "ibtl_hca_client_async_task(%p, 0x%x)",
586 574 ibt_hca, hca_devp->hd_async_code);
587 575
588 576 bcopy(&hca_devp->hd_async_event, &async_event, sizeof (async_event));
589 577 ibtl_async_client_call(ibt_hca, hca_devp->hd_async_code, &async_event);
590 578
591 579 mutex_enter(&ibtl_async_mutex);
592 580 if (--ibt_hca->ha_async_cnt == 0 &&
593 581 (ibt_hca->ha_async_flags & IBTL_ASYNC_FREE_OBJECT)) {
594 582 mutex_exit(&ibtl_async_mutex);
595 583 kmem_free(ibt_hca, sizeof (ibtl_hca_t));
596 584 } else
597 585 mutex_exit(&ibtl_async_mutex);
598 586
599 587 mutex_enter(&ibtl_clnt_list_mutex);
600 588 if (--hca_devp->hd_async_task_cnt == 0)
601 589 cv_signal(&hca_devp->hd_async_task_cv);
602 590 if (--clntp->clnt_async_cnt == 0)
603 591 cv_broadcast(&ibtl_clnt_cv);
604 592
605 593 mutex_exit(&ibtl_clnt_list_mutex);
606 594 }
607 595
608 596 /*
609 597 * Asyncs for HCA level events.
610 598 *
611 599 * The function continues to run until there are no more async
612 600 * events/errors for this HCA. An event is chosen for dispatch
613 601 * to all clients of this HCA. This thread dispatches them via
614 602 * the ibtl_async_taskq, then sleeps until all tasks are done.
615 603 *
616 604 * This thread records the async_code and async_event in the
617 605 * ibtl_hca_devinfo_t for all client taskq threads to reference.
618 606 *
619 607 * This is called from an async or taskq thread with ibtl_async_mutex held.
620 608 */
621 609 static void
622 610 ibtl_do_hca_asyncs(ibtl_hca_devinfo_t *hca_devp)
623 611 {
624 612 ibtl_hca_t *ibt_hca;
625 613 ibt_async_event_t *eventp;
626 614 ibt_async_code_t code;
627 615 ibtl_async_port_status_t temp;
628 616 uint8_t nports;
629 617 uint8_t port_minus1;
630 618 ibtl_async_port_event_t *portp;
631 619
632 620 mutex_exit(&ibtl_async_mutex);
633 621
634 622 mutex_enter(&ibtl_clnt_list_mutex);
635 623 while (hca_devp->hd_async_busy)
636 624 cv_wait(&hca_devp->hd_async_busy_cv, &ibtl_clnt_list_mutex);
637 625 hca_devp->hd_async_busy = 1;
638 626 mutex_enter(&ibtl_async_mutex);
639 627
640 628 bzero(&hca_devp->hd_async_event, sizeof (hca_devp->hd_async_event));
641 629 for (;;) {
642 630
643 631 hca_devp->hd_async_event.ev_fma_ena = 0;
644 632
645 633 code = hca_devp->hd_async_codes;
646 634 if (code & IBT_ERROR_LOCAL_CATASTROPHIC) {
647 635 code = IBT_ERROR_LOCAL_CATASTROPHIC;
648 636 hca_devp->hd_async_event.ev_fma_ena =
649 637 hca_devp->hd_fma_ena;
650 638 } else if (code & IBT_ERROR_PORT_DOWN) {
651 639 code = IBT_ERROR_PORT_DOWN;
652 640 temp = IBTL_HCA_PORT_DOWN;
653 641 } else if (code & IBT_EVENT_PORT_UP) {
654 642 code = IBT_EVENT_PORT_UP;
655 643 temp = IBTL_HCA_PORT_UP;
656 644 } else if (code & IBT_PORT_CHANGE_EVENT) {
657 645 code = IBT_PORT_CHANGE_EVENT;
658 646 temp = IBTL_HCA_PORT_CHG;
659 647 } else if (code & IBT_CLNT_REREG_EVENT) {
660 648 code = IBT_CLNT_REREG_EVENT;
661 649 temp = IBTL_HCA_PORT_ASYNC_CLNT_REREG;
662 650 } else {
663 651 hca_devp->hd_async_codes = 0;
664 652 code = 0;
665 653 }
666 654
667 655 if (code == 0) {
668 656 hca_devp->hd_async_flags &= ~IBTL_ASYNC_PENDING;
669 657 break;
670 658 }
671 659 hca_devp->hd_async_codes &= ~code;
672 660
673 661 /* PORT_UP, PORT_CHANGE, PORT_DOWN or ASYNC_REREG */
674 662 if ((code & IBT_PORT_EVENTS) != 0) {
675 663 portp = hca_devp->hd_async_port;
676 664 nports = hca_devp->hd_hca_attr->hca_nports;
677 665 for (port_minus1 = 0; port_minus1 < nports;
678 666 port_minus1++) {
679 667 /*
680 668 * Matching event in this port, let's go handle
681 669 * it.
682 670 */
683 671 if ((portp[port_minus1].status & temp) != 0)
684 672 break;
685 673 }
686 674 if (port_minus1 >= nports) {
687 675 /* we checked again, but found nothing */
688 676 continue;
689 677 }
690 678 IBTF_DPRINTF_L4(ibtf_handlers, "ibtl_do_hca_asyncs: "
691 679 "async: port# %x code %x", port_minus1 + 1, code);
692 680 /* mark it to check for other ports after we're done */
693 681 hca_devp->hd_async_codes |= code;
694 682
695 683 /*
696 684 * Copy the event information into hca_devp and clear
697 685 * event information from the per port data.
698 686 */
699 687 hca_devp->hd_async_event.ev_port = port_minus1 + 1;
700 688 if (temp == IBTL_HCA_PORT_CHG) {
701 689 hca_devp->hd_async_event.ev_port_flags =
702 690 hca_devp->hd_async_port[port_minus1].flags;
703 691 hca_devp->hd_async_port[port_minus1].flags = 0;
704 692 }
705 693 hca_devp->hd_async_port[port_minus1].status &= ~temp;
706 694
707 695 mutex_exit(&ibtl_async_mutex);
708 696 ibtl_reinit_hca_portinfo(hca_devp, port_minus1 + 1);
709 697 mutex_enter(&ibtl_async_mutex);
710 698 eventp = &hca_devp->hd_async_event;
711 699 eventp->ev_hca_guid =
712 700 hca_devp->hd_hca_attr->hca_node_guid;
713 701 }
714 702
715 703 hca_devp->hd_async_code = code;
716 704 hca_devp->hd_async_event.ev_hca_guid =
717 705 hca_devp->hd_hca_attr->hca_node_guid;
718 706 mutex_exit(&ibtl_async_mutex);
719 707
720 708 /*
721 709 * Make sure to inform CM, DM, and IBMA if we know of them.
722 710 * Also, make sure not to inform them a second time, which
723 711 * would occur if they have the HCA open.
724 712 */
725 713
726 714 if (ibtl_ibma_async_handler)
727 715 ibtl_tell_mgr(hca_devp, ibtl_ibma_async_handler,
728 716 ibtl_ibma_clnt_private);
729 717 /* wait for all tasks to complete */
730 718 while (hca_devp->hd_async_task_cnt != 0)
731 719 cv_wait(&hca_devp->hd_async_task_cv,
732 720 &ibtl_clnt_list_mutex);
733 721
734 722 /*
735 723 * Hack Alert:
736 724 * The ibmf handler would have updated the Master SM LID if it
737 725 * was SM LID change event. Now lets check if the new Master SM
738 726 * is a Embedded Cisco Topspin SM.
739 727 */
740 728 if ((code == IBT_PORT_CHANGE_EVENT) &&
741 729 eventp->ev_port_flags & IBT_PORT_CHANGE_SM_LID)
742 730 ibtl_cm_get_node_info(hca_devp,
743 731 (ibt_async_handler_t)ibtl_node_info_cb);
744 732 /* wait for node info task to complete */
745 733 while (hca_devp->hd_async_task_cnt != 0)
746 734 cv_wait(&hca_devp->hd_async_task_cv,
747 735 &ibtl_clnt_list_mutex);
748 736
749 737 if (ibtl_dm_async_handler)
750 738 ibtl_tell_mgr(hca_devp, ibtl_dm_async_handler,
751 739 ibtl_dm_clnt_private);
752 740 if (ibtl_cm_async_handler)
753 741 ibtl_tell_mgr(hca_devp, ibtl_cm_async_handler,
754 742 ibtl_cm_clnt_private);
755 743 /* wait for all tasks to complete */
756 744 while (hca_devp->hd_async_task_cnt != 0)
757 745 cv_wait(&hca_devp->hd_async_task_cv,
758 746 &ibtl_clnt_list_mutex);
759 747
760 748 for (ibt_hca = hca_devp->hd_clnt_list;
761 749 ibt_hca != NULL;
762 750 ibt_hca = ibt_hca->ha_clnt_link) {
763 751
764 752 /* Managers are handled above */
765 753 if (IBTL_HCA2MODI_P(ibt_hca)->mi_async_handler ==
766 754 ibtl_cm_async_handler)
767 755 continue;
768 756 if (IBTL_HCA2MODI_P(ibt_hca)->mi_async_handler ==
769 757 ibtl_dm_async_handler)
770 758 continue;
771 759 if (IBTL_HCA2MODI_P(ibt_hca)->mi_async_handler ==
772 760 ibtl_ibma_async_handler)
773 761 continue;
774 762 ++ibt_hca->ha_clnt_devp->clnt_async_cnt;
775 763
776 764 mutex_enter(&ibtl_async_mutex);
777 765 ibt_hca->ha_async_cnt++;
778 766 mutex_exit(&ibtl_async_mutex);
779 767 hca_devp->hd_async_task_cnt++;
780 768 (void) taskq_dispatch(ibtl_async_taskq,
781 769 ibtl_hca_client_async_task, ibt_hca, TQ_SLEEP);
782 770 }
783 771
784 772 /* wait for all tasks to complete */
785 773 while (hca_devp->hd_async_task_cnt != 0)
786 774 cv_wait(&hca_devp->hd_async_task_cv,
787 775 &ibtl_clnt_list_mutex);
788 776
789 777 mutex_enter(&ibtl_async_mutex);
790 778 }
791 779 hca_devp->hd_async_code = 0;
792 780 hca_devp->hd_async_busy = 0;
793 781 cv_broadcast(&hca_devp->hd_async_busy_cv);
794 782 mutex_exit(&ibtl_clnt_list_mutex);
795 783 }
796 784
797 785 /*
798 786 * Asyncs for QP objects.
799 787 *
800 788 * The function continues to run until there are no more async
801 789 * events/errors for this object.
802 790 */
803 791 static void
804 792 ibtl_do_qp_asyncs(ibtl_qp_t *ibtl_qp)
805 793 {
806 794 ibt_async_code_t code;
807 795 ibt_async_event_t async_event;
808 796
809 797 ASSERT(MUTEX_HELD(&ibtl_async_mutex));
810 798 bzero(&async_event, sizeof (async_event));
811 799 async_event.ev_chan_hdl = IBTL_QP2CHAN(ibtl_qp);
812 800
813 801 while ((code = ibtl_qp->qp_async_codes) != 0) {
814 802 async_event.ev_fma_ena = 0;
815 803 if (ibtl_qp->qp_async_flags & IBTL_ASYNC_FREE_OBJECT)
816 804 code = 0; /* fallthrough to "kmem_free" */
817 805 else if (code & IBT_ERROR_CATASTROPHIC_QP) {
818 806 code = IBT_ERROR_CATASTROPHIC_QP;
819 807 async_event.ev_fma_ena = ibtl_qp->qp_cat_fma_ena;
820 808 } else if (code & IBT_ERROR_INVALID_REQUEST_QP) {
821 809 code = IBT_ERROR_INVALID_REQUEST_QP;
822 810 async_event.ev_fma_ena = ibtl_qp->qp_inv_fma_ena;
823 811 } else if (code & IBT_ERROR_ACCESS_VIOLATION_QP) {
824 812 code = IBT_ERROR_ACCESS_VIOLATION_QP;
825 813 async_event.ev_fma_ena = ibtl_qp->qp_acc_fma_ena;
826 814 } else if (code & IBT_ERROR_PATH_MIGRATE_REQ_QP) {
827 815 code = IBT_ERROR_PATH_MIGRATE_REQ_QP;
828 816 async_event.ev_fma_ena = ibtl_qp->qp_pth_fma_ena;
829 817 } else if (code & IBT_EVENT_PATH_MIGRATED_QP)
830 818 code = IBT_EVENT_PATH_MIGRATED_QP;
831 819 else if (code & IBT_EVENT_SQD)
832 820 code = IBT_EVENT_SQD;
833 821 else if (code & IBT_EVENT_COM_EST_QP)
834 822 code = IBT_EVENT_COM_EST_QP;
835 823 else if (code & IBT_EVENT_EMPTY_QP)
836 824 code = IBT_EVENT_EMPTY_QP;
837 825 else {
838 826 IBTF_DPRINTF_L2(ibtf_handlers, "ibtl_do_qp_asyncs: "
839 827 "async: unexpected QP async code 0x%x", code);
840 828 ibtl_qp->qp_async_codes = 0;
841 829 code = 0;
842 830 }
843 831 ibtl_qp->qp_async_codes &= ~code;
844 832
845 833 if (code) {
846 834 mutex_exit(&ibtl_async_mutex);
847 835 ibtl_async_client_call(ibtl_qp->qp_hca,
848 836 code, &async_event);
849 837 mutex_enter(&ibtl_async_mutex);
850 838 }
851 839
852 840 if (ibtl_qp->qp_async_flags & IBTL_ASYNC_FREE_OBJECT) {
853 841 mutex_exit(&ibtl_async_mutex);
854 842 cv_destroy(&(IBTL_QP2CHAN(ibtl_qp))->ch_cm_cv);
855 843 mutex_destroy(&(IBTL_QP2CHAN(ibtl_qp))->ch_cm_mutex);
856 844 kmem_free(IBTL_QP2CHAN(ibtl_qp),
857 845 sizeof (ibtl_channel_t));
858 846 mutex_enter(&ibtl_async_mutex);
859 847 return;
860 848 }
861 849 }
862 850 ibtl_qp->qp_async_flags &= ~IBTL_ASYNC_PENDING;
863 851 }
864 852
865 853 /*
866 854 * Asyncs for SRQ objects.
867 855 *
868 856 * The function continues to run until there are no more async
869 857 * events/errors for this object.
870 858 */
871 859 static void
872 860 ibtl_do_srq_asyncs(ibtl_srq_t *ibtl_srq)
873 861 {
874 862 ibt_async_code_t code;
875 863 ibt_async_event_t async_event;
876 864
877 865 ASSERT(MUTEX_HELD(&ibtl_async_mutex));
878 866 bzero(&async_event, sizeof (async_event));
879 867 async_event.ev_srq_hdl = ibtl_srq;
880 868 async_event.ev_fma_ena = ibtl_srq->srq_fma_ena;
881 869
882 870 while ((code = ibtl_srq->srq_async_codes) != 0) {
883 871 if (ibtl_srq->srq_async_flags & IBTL_ASYNC_FREE_OBJECT)
884 872 code = 0; /* fallthrough to "kmem_free" */
885 873 else if (code & IBT_ERROR_CATASTROPHIC_SRQ)
886 874 code = IBT_ERROR_CATASTROPHIC_SRQ;
887 875 else if (code & IBT_EVENT_LIMIT_REACHED_SRQ)
888 876 code = IBT_EVENT_LIMIT_REACHED_SRQ;
889 877 else {
890 878 IBTF_DPRINTF_L2(ibtf_handlers, "ibtl_do_srq_asyncs: "
891 879 "async: unexpected SRQ async code 0x%x", code);
892 880 ibtl_srq->srq_async_codes = 0;
893 881 code = 0;
894 882 }
895 883 ibtl_srq->srq_async_codes &= ~code;
896 884
897 885 if (code) {
898 886 mutex_exit(&ibtl_async_mutex);
899 887 ibtl_async_client_call(ibtl_srq->srq_hca,
900 888 code, &async_event);
901 889 mutex_enter(&ibtl_async_mutex);
902 890 }
903 891
904 892 if (ibtl_srq->srq_async_flags & IBTL_ASYNC_FREE_OBJECT) {
905 893 mutex_exit(&ibtl_async_mutex);
906 894 kmem_free(ibtl_srq, sizeof (struct ibtl_srq_s));
907 895 mutex_enter(&ibtl_async_mutex);
908 896 return;
909 897 }
910 898 }
911 899 ibtl_srq->srq_async_flags &= ~IBTL_ASYNC_PENDING;
912 900 }
913 901
914 902 /*
915 903 * Asyncs for CQ objects.
916 904 *
917 905 * The function continues to run until there are no more async
918 906 * events/errors for this object.
919 907 */
920 908 static void
921 909 ibtl_do_cq_asyncs(ibtl_cq_t *ibtl_cq)
922 910 {
923 911 ibt_async_code_t code;
924 912 ibt_async_event_t async_event;
925 913
926 914 ASSERT(MUTEX_HELD(&ibtl_async_mutex));
927 915 bzero(&async_event, sizeof (async_event));
928 916 async_event.ev_cq_hdl = ibtl_cq;
929 917 async_event.ev_fma_ena = ibtl_cq->cq_fma_ena;
930 918
931 919 while ((code = ibtl_cq->cq_async_codes) != 0) {
932 920 if (ibtl_cq->cq_async_flags & IBTL_ASYNC_FREE_OBJECT)
933 921 code = 0; /* fallthrough to "kmem_free" */
934 922 else if (code & IBT_ERROR_CQ)
935 923 code = IBT_ERROR_CQ;
936 924 else {
937 925 IBTF_DPRINTF_L2(ibtf_handlers, "ibtl_do_cq_asyncs: "
938 926 "async: unexpected CQ async code 0x%x", code);
939 927 ibtl_cq->cq_async_codes = 0;
940 928 code = 0;
941 929 }
942 930 ibtl_cq->cq_async_codes &= ~code;
943 931
944 932 if (code) {
945 933 mutex_exit(&ibtl_async_mutex);
946 934 ibtl_async_client_call(ibtl_cq->cq_hca,
947 935 code, &async_event);
948 936 mutex_enter(&ibtl_async_mutex);
949 937 }
950 938
951 939 if (ibtl_cq->cq_async_flags & IBTL_ASYNC_FREE_OBJECT) {
952 940 mutex_exit(&ibtl_async_mutex);
953 941 mutex_destroy(&ibtl_cq->cq_mutex);
954 942 kmem_free(ibtl_cq, sizeof (struct ibtl_cq_s));
955 943 mutex_enter(&ibtl_async_mutex);
956 944 return;
957 945 }
958 946 }
959 947 ibtl_cq->cq_async_flags &= ~IBTL_ASYNC_PENDING;
960 948 }
961 949
962 950 /*
963 951 * Asyncs for EEC objects.
964 952 *
965 953 * The function continues to run until there are no more async
966 954 * events/errors for this object.
967 955 */
968 956 static void
969 957 ibtl_do_eec_asyncs(ibtl_eec_t *ibtl_eec)
970 958 {
971 959 ibt_async_code_t code;
972 960 ibt_async_event_t async_event;
973 961
974 962 ASSERT(MUTEX_HELD(&ibtl_async_mutex));
975 963 bzero(&async_event, sizeof (async_event));
976 964 async_event.ev_chan_hdl = ibtl_eec->eec_channel;
977 965
978 966 while ((code = ibtl_eec->eec_async_codes) != 0) {
979 967 async_event.ev_fma_ena = 0;
980 968 if (ibtl_eec->eec_async_flags & IBTL_ASYNC_FREE_OBJECT)
981 969 code = 0; /* fallthrough to "kmem_free" */
982 970 else if (code & IBT_ERROR_CATASTROPHIC_EEC) {
983 971 code = IBT_ERROR_CATASTROPHIC_CHAN;
984 972 async_event.ev_fma_ena = ibtl_eec->eec_cat_fma_ena;
985 973 } else if (code & IBT_ERROR_PATH_MIGRATE_REQ_EEC) {
986 974 code = IBT_ERROR_PATH_MIGRATE_REQ;
987 975 async_event.ev_fma_ena = ibtl_eec->eec_pth_fma_ena;
988 976 } else if (code & IBT_EVENT_PATH_MIGRATED_EEC)
989 977 code = IBT_EVENT_PATH_MIGRATED;
990 978 else if (code & IBT_EVENT_COM_EST_EEC)
991 979 code = IBT_EVENT_COM_EST;
992 980 else {
993 981 IBTF_DPRINTF_L2(ibtf_handlers, "ibtl_do_eec_asyncs: "
994 982 "async: unexpected code 0x%x", code);
995 983 ibtl_eec->eec_async_codes = 0;
996 984 code = 0;
997 985 }
998 986 ibtl_eec->eec_async_codes &= ~code;
999 987
1000 988 if (code) {
1001 989 mutex_exit(&ibtl_async_mutex);
1002 990 ibtl_async_client_call(ibtl_eec->eec_hca,
1003 991 code, &async_event);
1004 992 mutex_enter(&ibtl_async_mutex);
1005 993 }
1006 994
↓ open down ↓ |
426 lines elided |
↑ open up ↑ |
1007 995 if (ibtl_eec->eec_async_flags & IBTL_ASYNC_FREE_OBJECT) {
1008 996 mutex_exit(&ibtl_async_mutex);
1009 997 kmem_free(ibtl_eec, sizeof (struct ibtl_eec_s));
1010 998 mutex_enter(&ibtl_async_mutex);
1011 999 return;
1012 1000 }
1013 1001 }
1014 1002 ibtl_eec->eec_async_flags &= ~IBTL_ASYNC_PENDING;
1015 1003 }
1016 1004
1017 -#ifdef __lock_lint
1018 -kmutex_t cpr_mutex;
1019 -#endif
1020 -
1021 1005 /*
1022 1006 * Loop forever, calling async_handlers until all of the async lists
1023 1007 * are empty.
1024 1008 */
1025 1009
1026 1010 static void
1027 1011 ibtl_async_thread(void)
1028 1012 {
1029 -#ifndef __lock_lint
1030 1013 kmutex_t cpr_mutex;
1031 -#endif
1032 1014 callb_cpr_t cprinfo;
1033 1015
1034 - _NOTE(MUTEX_PROTECTS_DATA(cpr_mutex, cprinfo))
1035 - _NOTE(NO_COMPETING_THREADS_NOW)
1036 1016 mutex_init(&cpr_mutex, NULL, MUTEX_DRIVER, NULL);
1037 1017 CALLB_CPR_INIT(&cprinfo, &cpr_mutex, callb_generic_cpr,
1038 1018 "ibtl_async_thread");
1039 -#ifndef lint
1040 - _NOTE(COMPETING_THREADS_NOW)
1041 -#endif
1042 1019
1043 1020 mutex_enter(&ibtl_async_mutex);
1044 1021
1045 1022 for (;;) {
1046 1023 if (ibtl_async_hca_list_start) {
1047 1024 ibtl_hca_devinfo_t *hca_devp;
1048 1025
1049 1026 /* remove first entry from list */
1050 1027 hca_devp = ibtl_async_hca_list_start;
1051 1028 ibtl_async_hca_list_start = hca_devp->hd_async_link;
1052 1029 hca_devp->hd_async_link = NULL;
1053 1030 if (ibtl_async_hca_list_start == NULL)
1054 1031 ibtl_async_hca_list_end = NULL;
1055 1032
1056 1033 ibtl_do_hca_asyncs(hca_devp);
1057 1034
1058 1035 } else if (ibtl_async_qp_list_start) {
1059 1036 ibtl_qp_t *ibtl_qp;
1060 1037
1061 1038 /* remove from list */
1062 1039 ibtl_qp = ibtl_async_qp_list_start;
1063 1040 ibtl_async_qp_list_start = ibtl_qp->qp_async_link;
1064 1041 ibtl_qp->qp_async_link = NULL;
1065 1042 if (ibtl_async_qp_list_start == NULL)
1066 1043 ibtl_async_qp_list_end = NULL;
1067 1044
1068 1045 ibtl_do_qp_asyncs(ibtl_qp);
1069 1046
1070 1047 } else if (ibtl_async_srq_list_start) {
1071 1048 ibtl_srq_t *ibtl_srq;
1072 1049
1073 1050 /* remove from list */
1074 1051 ibtl_srq = ibtl_async_srq_list_start;
1075 1052 ibtl_async_srq_list_start = ibtl_srq->srq_async_link;
1076 1053 ibtl_srq->srq_async_link = NULL;
1077 1054 if (ibtl_async_srq_list_start == NULL)
1078 1055 ibtl_async_srq_list_end = NULL;
1079 1056
1080 1057 ibtl_do_srq_asyncs(ibtl_srq);
1081 1058
1082 1059 } else if (ibtl_async_eec_list_start) {
1083 1060 ibtl_eec_t *ibtl_eec;
1084 1061
1085 1062 /* remove from list */
1086 1063 ibtl_eec = ibtl_async_eec_list_start;
1087 1064 ibtl_async_eec_list_start = ibtl_eec->eec_async_link;
1088 1065 ibtl_eec->eec_async_link = NULL;
1089 1066 if (ibtl_async_eec_list_start == NULL)
1090 1067 ibtl_async_eec_list_end = NULL;
1091 1068
1092 1069 ibtl_do_eec_asyncs(ibtl_eec);
1093 1070
1094 1071 } else if (ibtl_async_cq_list_start) {
1095 1072 ibtl_cq_t *ibtl_cq;
1096 1073
1097 1074 /* remove from list */
1098 1075 ibtl_cq = ibtl_async_cq_list_start;
1099 1076 ibtl_async_cq_list_start = ibtl_cq->cq_async_link;
1100 1077 ibtl_cq->cq_async_link = NULL;
1101 1078 if (ibtl_async_cq_list_start == NULL)
1102 1079 ibtl_async_cq_list_end = NULL;
1103 1080
1104 1081 ibtl_do_cq_asyncs(ibtl_cq);
1105 1082
1106 1083 } else {
1107 1084 if (ibtl_async_thread_exit == IBTL_THREAD_EXIT)
1108 1085 break;
1109 1086 mutex_enter(&cpr_mutex);
1110 1087 CALLB_CPR_SAFE_BEGIN(&cprinfo);
1111 1088 mutex_exit(&cpr_mutex);
1112 1089
1113 1090 cv_wait(&ibtl_async_cv, &ibtl_async_mutex);
1114 1091
↓ open down ↓ |
63 lines elided |
↑ open up ↑ |
1115 1092 mutex_exit(&ibtl_async_mutex);
1116 1093 mutex_enter(&cpr_mutex);
1117 1094 CALLB_CPR_SAFE_END(&cprinfo, &cpr_mutex);
1118 1095 mutex_exit(&cpr_mutex);
1119 1096 mutex_enter(&ibtl_async_mutex);
1120 1097 }
1121 1098 }
1122 1099
1123 1100 mutex_exit(&ibtl_async_mutex);
1124 1101
1125 -#ifndef __lock_lint
1126 1102 mutex_enter(&cpr_mutex);
1127 1103 CALLB_CPR_EXIT(&cprinfo);
1128 -#endif
1129 1104 mutex_destroy(&cpr_mutex);
1130 1105 }
1131 1106
1132 1107
1133 1108 void
1134 1109 ibtl_free_qp_async_check(ibtl_qp_t *ibtl_qp)
1135 1110 {
1136 1111 IBTF_DPRINTF_L3(ibtf_handlers, "ibtl_free_qp_async_check(%p)", ibtl_qp);
1137 1112
1138 1113 mutex_enter(&ibtl_async_mutex);
1139 1114
1140 1115 /*
1141 1116 * If there is an active async, mark this object to be freed
1142 1117 * by the async_thread when it's done.
1143 1118 */
1144 1119 if (ibtl_qp->qp_async_flags & IBTL_ASYNC_PENDING) {
1145 1120 ibtl_qp->qp_async_flags |= IBTL_ASYNC_FREE_OBJECT;
1146 1121 mutex_exit(&ibtl_async_mutex);
1147 1122 } else { /* free the object now */
1148 1123 mutex_exit(&ibtl_async_mutex);
1149 1124 cv_destroy(&(IBTL_QP2CHAN(ibtl_qp))->ch_cm_cv);
1150 1125 mutex_destroy(&(IBTL_QP2CHAN(ibtl_qp))->ch_cm_mutex);
1151 1126 kmem_free(IBTL_QP2CHAN(ibtl_qp), sizeof (ibtl_channel_t));
1152 1127 }
1153 1128 }
1154 1129
1155 1130 void
1156 1131 ibtl_free_cq_async_check(ibtl_cq_t *ibtl_cq)
1157 1132 {
1158 1133 IBTF_DPRINTF_L3(ibtf_handlers, "ibtl_free_cq_async_check(%p)", ibtl_cq);
1159 1134
1160 1135 mutex_enter(&ibtl_async_mutex);
1161 1136
1162 1137 /* if there is an active async, mark this object to be freed */
1163 1138 if (ibtl_cq->cq_async_flags & IBTL_ASYNC_PENDING) {
1164 1139 ibtl_cq->cq_async_flags |= IBTL_ASYNC_FREE_OBJECT;
1165 1140 mutex_exit(&ibtl_async_mutex);
1166 1141 } else { /* free the object now */
1167 1142 mutex_exit(&ibtl_async_mutex);
1168 1143 mutex_destroy(&ibtl_cq->cq_mutex);
1169 1144 kmem_free(ibtl_cq, sizeof (struct ibtl_cq_s));
1170 1145 }
1171 1146 }
1172 1147
1173 1148 void
1174 1149 ibtl_free_srq_async_check(ibtl_srq_t *ibtl_srq)
1175 1150 {
1176 1151 IBTF_DPRINTF_L3(ibtf_handlers, "ibtl_free_srq_async_check(%p)",
1177 1152 ibtl_srq);
1178 1153
1179 1154 mutex_enter(&ibtl_async_mutex);
1180 1155
1181 1156 /* if there is an active async, mark this object to be freed */
1182 1157 if (ibtl_srq->srq_async_flags & IBTL_ASYNC_PENDING) {
1183 1158 ibtl_srq->srq_async_flags |= IBTL_ASYNC_FREE_OBJECT;
1184 1159 mutex_exit(&ibtl_async_mutex);
1185 1160 } else { /* free the object now */
1186 1161 mutex_exit(&ibtl_async_mutex);
1187 1162 kmem_free(ibtl_srq, sizeof (struct ibtl_srq_s));
1188 1163 }
1189 1164 }
1190 1165
1191 1166 void
1192 1167 ibtl_free_eec_async_check(ibtl_eec_t *ibtl_eec)
1193 1168 {
1194 1169 IBTF_DPRINTF_L3(ibtf_handlers, "ibtl_free_eec_async_check(%p)",
1195 1170 ibtl_eec);
1196 1171
1197 1172 mutex_enter(&ibtl_async_mutex);
1198 1173
1199 1174 /* if there is an active async, mark this object to be freed */
1200 1175 if (ibtl_eec->eec_async_flags & IBTL_ASYNC_PENDING) {
1201 1176 ibtl_eec->eec_async_flags |= IBTL_ASYNC_FREE_OBJECT;
1202 1177 mutex_exit(&ibtl_async_mutex);
1203 1178 } else { /* free the object now */
1204 1179 mutex_exit(&ibtl_async_mutex);
1205 1180 kmem_free(ibtl_eec, sizeof (struct ibtl_eec_s));
1206 1181 }
1207 1182 }
1208 1183
1209 1184 /*
1210 1185 * This function differs from above in that we assume this is called
1211 1186 * from non-interrupt context, and never called from the async_thread.
1212 1187 */
1213 1188
1214 1189 void
1215 1190 ibtl_free_hca_async_check(ibtl_hca_t *ibt_hca)
1216 1191 {
1217 1192 IBTF_DPRINTF_L3(ibtf_handlers, "ibtl_free_hca_async_check(%p)",
1218 1193 ibt_hca);
1219 1194
1220 1195 mutex_enter(&ibtl_async_mutex);
1221 1196
1222 1197 /* if there is an active async, mark this object to be freed */
1223 1198 if (ibt_hca->ha_async_cnt > 0) {
1224 1199 ibt_hca->ha_async_flags |= IBTL_ASYNC_FREE_OBJECT;
1225 1200 mutex_exit(&ibtl_async_mutex);
1226 1201 } else { /* free the object now */
1227 1202 mutex_exit(&ibtl_async_mutex);
1228 1203 kmem_free(ibt_hca, sizeof (ibtl_hca_t));
1229 1204 }
1230 1205 }
1231 1206
1232 1207 /*
1233 1208 * Completion Queue Handling.
1234 1209 *
1235 1210 * A completion queue can be handled through a simple callback
1236 1211 * at interrupt level, or it may be queued for an ibtl_cq_thread
1237 1212 * to handle. The latter is chosen during ibt_alloc_cq when the
1238 1213 * IBTF_CQ_HANDLER_IN_THREAD is specified.
↓ open down ↓ |
100 lines elided |
↑ open up ↑ |
1239 1214 */
1240 1215
1241 1216 static void
1242 1217 ibtl_cq_handler_call(ibtl_cq_t *ibtl_cq)
1243 1218 {
1244 1219 ibt_cq_handler_t cq_handler;
1245 1220 void *arg;
1246 1221
1247 1222 IBTF_DPRINTF_L4(ibtf_handlers, "ibtl_cq_handler_call(%p)", ibtl_cq);
1248 1223
1249 - _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*ibtl_cq))
1250 1224 cq_handler = ibtl_cq->cq_comp_handler;
1251 1225 arg = ibtl_cq->cq_arg;
1252 1226 if (cq_handler != NULL)
1253 1227 cq_handler(ibtl_cq, arg);
1254 1228 else
1255 1229 IBTF_DPRINTF_L2(ibtf_handlers, "ibtl_cq_handler_call: "
1256 1230 "no cq_handler for cq %p", ibtl_cq);
1257 1231 }
1258 1232
1259 1233 /*
1260 1234 * Before ibt_free_cq can continue, we need to ensure no more cq_handler
1261 1235 * callbacks can occur. When we get the mutex, we know there are no
1262 1236 * outstanding cq_handler callbacks. We set the cq_handler to NULL to
1263 1237 * prohibit future callbacks.
1264 1238 */
1265 1239 void
1266 1240 ibtl_free_cq_check(ibtl_cq_t *ibtl_cq)
1267 1241 {
1268 1242 mutex_enter(&ibtl_cq->cq_mutex);
1269 1243 ibtl_cq->cq_comp_handler = NULL;
1270 1244 mutex_exit(&ibtl_cq->cq_mutex);
1271 1245 if (ibtl_cq->cq_in_thread) {
1272 1246 mutex_enter(&ibtl_cq_mutex);
1273 1247 --ibtl_cqs_using_threads;
1274 1248 while (ibtl_cq->cq_impl_flags & IBTL_CQ_PENDING) {
1275 1249 ibtl_cq->cq_impl_flags &= ~IBTL_CQ_CALL_CLIENT;
1276 1250 ibtl_cq->cq_impl_flags |= IBTL_CQ_FREE;
1277 1251 cv_wait(&ibtl_cq_cv, &ibtl_cq_mutex);
1278 1252 }
1279 1253 mutex_exit(&ibtl_cq_mutex);
1280 1254 }
↓ open down ↓ |
21 lines elided |
↑ open up ↑ |
1281 1255 }
1282 1256
1283 1257 /*
1284 1258 * Loop forever, calling cq_handlers until the cq list
1285 1259 * is empty.
1286 1260 */
1287 1261
1288 1262 static void
1289 1263 ibtl_cq_thread(void)
1290 1264 {
1291 -#ifndef __lock_lint
1292 1265 kmutex_t cpr_mutex;
1293 -#endif
1294 1266 callb_cpr_t cprinfo;
1295 1267
1296 - _NOTE(MUTEX_PROTECTS_DATA(cpr_mutex, cprinfo))
1297 - _NOTE(NO_COMPETING_THREADS_NOW)
1298 1268 mutex_init(&cpr_mutex, NULL, MUTEX_DRIVER, NULL);
1299 1269 CALLB_CPR_INIT(&cprinfo, &cpr_mutex, callb_generic_cpr,
1300 1270 "ibtl_cq_thread");
1301 -#ifndef lint
1302 - _NOTE(COMPETING_THREADS_NOW)
1303 -#endif
1304 1271
1305 1272 mutex_enter(&ibtl_cq_mutex);
1306 1273
1307 1274 for (;;) {
1308 1275 if (ibtl_cq_list_start) {
1309 1276 ibtl_cq_t *ibtl_cq;
1310 1277
1311 1278 ibtl_cq = ibtl_cq_list_start;
1312 1279 ibtl_cq_list_start = ibtl_cq->cq_link;
1313 1280 ibtl_cq->cq_link = NULL;
1314 1281 if (ibtl_cq == ibtl_cq_list_end)
1315 1282 ibtl_cq_list_end = NULL;
1316 1283
1317 1284 while (ibtl_cq->cq_impl_flags & IBTL_CQ_CALL_CLIENT) {
1318 1285 ibtl_cq->cq_impl_flags &= ~IBTL_CQ_CALL_CLIENT;
1319 1286 mutex_exit(&ibtl_cq_mutex);
1320 1287 ibtl_cq_handler_call(ibtl_cq);
1321 1288 mutex_enter(&ibtl_cq_mutex);
1322 1289 }
1323 1290 ibtl_cq->cq_impl_flags &= ~IBTL_CQ_PENDING;
1324 1291 if (ibtl_cq->cq_impl_flags & IBTL_CQ_FREE)
1325 1292 cv_broadcast(&ibtl_cq_cv);
1326 1293 } else {
1327 1294 if (ibtl_cq_thread_exit == IBTL_THREAD_EXIT)
1328 1295 break;
1329 1296 mutex_enter(&cpr_mutex);
1330 1297 CALLB_CPR_SAFE_BEGIN(&cprinfo);
1331 1298 mutex_exit(&cpr_mutex);
1332 1299
1333 1300 cv_wait(&ibtl_cq_cv, &ibtl_cq_mutex);
↓ open down ↓ |
20 lines elided |
↑ open up ↑ |
1334 1301
1335 1302 mutex_exit(&ibtl_cq_mutex);
1336 1303 mutex_enter(&cpr_mutex);
1337 1304 CALLB_CPR_SAFE_END(&cprinfo, &cpr_mutex);
1338 1305 mutex_exit(&cpr_mutex);
1339 1306 mutex_enter(&ibtl_cq_mutex);
1340 1307 }
1341 1308 }
1342 1309
1343 1310 mutex_exit(&ibtl_cq_mutex);
1344 -#ifndef __lock_lint
1345 1311 mutex_enter(&cpr_mutex);
1346 1312 CALLB_CPR_EXIT(&cprinfo);
1347 -#endif
1348 1313 mutex_destroy(&cpr_mutex);
1349 1314 }
1350 1315
1351 1316
1352 1317 /*
1353 1318 * ibc_cq_handler()
1354 1319 *
1355 1320 * Completion Queue Notification Handler.
1356 1321 *
1357 1322 */
1358 1323 /*ARGSUSED*/
1359 1324 void
1360 1325 ibc_cq_handler(ibc_clnt_hdl_t ibc_hdl, ibt_cq_hdl_t ibtl_cq)
1361 1326 {
1362 1327 IBTF_DPRINTF_L4(ibtf_handlers, "ibc_cq_handler(%p, %p)",
1363 1328 ibc_hdl, ibtl_cq);
1364 1329
1365 1330 if (ibtl_cq->cq_in_thread) {
1366 1331 mutex_enter(&ibtl_cq_mutex);
1367 1332 ibtl_cq->cq_impl_flags |= IBTL_CQ_CALL_CLIENT;
1368 1333 if ((ibtl_cq->cq_impl_flags & IBTL_CQ_PENDING) == 0) {
1369 1334 ibtl_cq->cq_impl_flags |= IBTL_CQ_PENDING;
1370 1335 ibtl_cq->cq_link = NULL;
1371 1336 if (ibtl_cq_list_end == NULL)
1372 1337 ibtl_cq_list_start = ibtl_cq;
1373 1338 else
1374 1339 ibtl_cq_list_end->cq_link = ibtl_cq;
1375 1340 ibtl_cq_list_end = ibtl_cq;
1376 1341 cv_signal(&ibtl_cq_cv);
1377 1342 }
1378 1343 mutex_exit(&ibtl_cq_mutex);
1379 1344 return;
1380 1345 } else
1381 1346 ibtl_cq_handler_call(ibtl_cq);
1382 1347 }
1383 1348
1384 1349
1385 1350 /*
1386 1351 * ibt_enable_cq_notify()
1387 1352 * Enable Notification requests on the specified CQ.
1388 1353 *
1389 1354 * ibt_cq The CQ handle.
1390 1355 *
1391 1356 * notify_type Enable notifications for all (IBT_NEXT_COMPLETION)
1392 1357 * completions, or the next Solicited completion
1393 1358 * (IBT_NEXT_SOLICITED) only.
1394 1359 *
1395 1360 * Completion notifications are disabled by setting the completion
1396 1361 * handler to NULL by calling ibt_set_cq_handler().
1397 1362 */
1398 1363 ibt_status_t
1399 1364 ibt_enable_cq_notify(ibt_cq_hdl_t ibtl_cq, ibt_cq_notify_flags_t notify_type)
1400 1365 {
1401 1366 IBTF_DPRINTF_L3(ibtf_handlers, "ibt_enable_cq_notify(%p, %d)",
1402 1367 ibtl_cq, notify_type);
1403 1368
1404 1369 return (IBTL_CQ2CIHCAOPS_P(ibtl_cq)->ibc_notify_cq(
1405 1370 IBTL_CQ2CIHCA(ibtl_cq), ibtl_cq->cq_ibc_cq_hdl, notify_type));
1406 1371 }
1407 1372
1408 1373
1409 1374 /*
1410 1375 * ibt_set_cq_handler()
1411 1376 * Register a work request completion handler with the IBTF.
1412 1377 *
1413 1378 * ibt_cq The CQ handle.
1414 1379 *
1415 1380 * completion_handler The completion handler.
1416 1381 *
1417 1382 * arg The IBTF client private argument to be passed
1418 1383 * back to the client when calling the CQ
1419 1384 * completion handler.
1420 1385 *
1421 1386 * Completion notifications are disabled by setting the completion
1422 1387 * handler to NULL. When setting the handler to NULL, no additional
1423 1388 * calls to the previous CQ handler will be initiated, but there may
1424 1389 * be one in progress.
1425 1390 *
↓ open down ↓ |
68 lines elided |
↑ open up ↑ |
1426 1391 * This function does not otherwise change the state of previous
1427 1392 * calls to ibt_enable_cq_notify().
1428 1393 */
1429 1394 void
1430 1395 ibt_set_cq_handler(ibt_cq_hdl_t ibtl_cq, ibt_cq_handler_t completion_handler,
1431 1396 void *arg)
1432 1397 {
1433 1398 IBTF_DPRINTF_L3(ibtf_handlers, "ibt_set_cq_handler(%p, %p, %p)",
1434 1399 ibtl_cq, completion_handler, arg);
1435 1400
1436 - _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*ibtl_cq))
1437 1401 ibtl_cq->cq_comp_handler = completion_handler;
1438 1402 ibtl_cq->cq_arg = arg;
1439 1403 }
1440 1404
1441 1405
1442 1406 /*
1443 1407 * Inform IBT clients about New HCAs.
1444 1408 *
1445 1409 * We use taskqs to allow simultaneous notification, with sleeping.
1446 1410 * Since taskqs only allow one argument, we define a structure
1447 1411 * because we need to pass in two arguments.
1448 1412 */
1449 1413
1450 1414 struct ibtl_new_hca_s {
1451 1415 ibtl_clnt_t *nh_clntp;
1452 1416 ibtl_hca_devinfo_t *nh_hca_devp;
1453 1417 ibt_async_code_t nh_code;
1454 1418 };
1455 1419
1456 1420 static void
1457 1421 ibtl_tell_client_about_new_hca(void *arg)
1458 1422 {
↓ open down ↓ |
12 lines elided |
↑ open up ↑ |
1459 1423 struct ibtl_new_hca_s *new_hcap = (struct ibtl_new_hca_s *)arg;
1460 1424 ibtl_clnt_t *clntp = new_hcap->nh_clntp;
1461 1425 ibt_async_event_t async_event;
1462 1426 ibtl_hca_devinfo_t *hca_devp = new_hcap->nh_hca_devp;
1463 1427
1464 1428 bzero(&async_event, sizeof (async_event));
1465 1429 async_event.ev_hca_guid = hca_devp->hd_hca_attr->hca_node_guid;
1466 1430 clntp->clnt_modinfop->mi_async_handler(
1467 1431 clntp->clnt_private, NULL, new_hcap->nh_code, &async_event);
1468 1432 kmem_free(new_hcap, sizeof (*new_hcap));
1469 -#ifdef __lock_lint
1470 - {
1471 - ibt_hca_hdl_t hca_hdl;
1472 - (void) ibt_open_hca(clntp, 0ULL, &hca_hdl);
1473 - }
1474 -#endif
1475 1433 mutex_enter(&ibtl_clnt_list_mutex);
1476 1434 if (--hca_devp->hd_async_task_cnt == 0)
1477 1435 cv_signal(&hca_devp->hd_async_task_cv);
1478 1436 if (--clntp->clnt_async_cnt == 0)
1479 1437 cv_broadcast(&ibtl_clnt_cv);
1480 1438 mutex_exit(&ibtl_clnt_list_mutex);
1481 1439 }
1482 1440
1483 1441 /*
1484 1442 * ibtl_announce_new_hca:
1485 1443 *
1486 1444 * o First attach these clients in the given order
1487 1445 * IBMA
1488 1446 * IBCM
1489 1447 *
1490 1448 * o Next attach all other clients in parallel.
1491 1449 *
1492 1450 * NOTE: Use the taskq to simultaneously notify all clients of the new HCA.
1493 1451 * Retval from clients is ignored.
1494 1452 */
1495 1453 void
1496 1454 ibtl_announce_new_hca(ibtl_hca_devinfo_t *hca_devp)
1497 1455 {
1498 1456 ibtl_clnt_t *clntp;
1499 1457 struct ibtl_new_hca_s *new_hcap;
1500 1458
1501 1459 IBTF_DPRINTF_L2(ibtf_handlers, "ibtl_announce_new_hca(%p, %llX)",
↓ open down ↓ |
17 lines elided |
↑ open up ↑ |
1502 1460 hca_devp, hca_devp->hd_hca_attr->hca_node_guid);
1503 1461
1504 1462 mutex_enter(&ibtl_clnt_list_mutex);
1505 1463
1506 1464 clntp = ibtl_clnt_list;
1507 1465 while (clntp != NULL) {
1508 1466 if (clntp->clnt_modinfop->mi_clnt_class == IBT_IBMA) {
1509 1467 IBTF_DPRINTF_L4(ibtf_handlers,
1510 1468 "ibtl_announce_new_hca: calling IBMF");
1511 1469 if (clntp->clnt_modinfop->mi_async_handler) {
1512 - _NOTE(NO_COMPETING_THREADS_NOW)
1513 1470 new_hcap = kmem_alloc(sizeof (*new_hcap),
1514 1471 KM_SLEEP);
1515 1472 new_hcap->nh_clntp = clntp;
1516 1473 new_hcap->nh_hca_devp = hca_devp;
1517 1474 new_hcap->nh_code = IBT_HCA_ATTACH_EVENT;
1518 -#ifndef lint
1519 - _NOTE(COMPETING_THREADS_NOW)
1520 -#endif
1521 1475 clntp->clnt_async_cnt++;
1522 1476 hca_devp->hd_async_task_cnt++;
1523 1477
1524 1478 (void) taskq_dispatch(ibtl_async_taskq,
1525 1479 ibtl_tell_client_about_new_hca, new_hcap,
1526 1480 TQ_SLEEP);
1527 1481 }
1528 1482 break;
1529 1483 }
1530 1484 clntp = clntp->clnt_list_link;
1531 1485 }
1532 1486 if (clntp != NULL)
1533 1487 while (clntp->clnt_async_cnt > 0)
1534 1488 cv_wait(&ibtl_clnt_cv, &ibtl_clnt_list_mutex);
1535 1489 clntp = ibtl_clnt_list;
1536 1490 while (clntp != NULL) {
1537 1491 if (clntp->clnt_modinfop->mi_clnt_class == IBT_DM) {
1538 1492 IBTF_DPRINTF_L4(ibtf_handlers, "ibtl_announce_new_hca: "
1539 1493 "calling %s", clntp->clnt_modinfop->mi_clnt_name);
1540 1494 if (clntp->clnt_modinfop->mi_async_handler) {
1541 - _NOTE(NO_COMPETING_THREADS_NOW)
1542 1495 new_hcap = kmem_alloc(sizeof (*new_hcap),
1543 1496 KM_SLEEP);
1544 1497 new_hcap->nh_clntp = clntp;
1545 1498 new_hcap->nh_hca_devp = hca_devp;
1546 1499 new_hcap->nh_code = IBT_HCA_ATTACH_EVENT;
1547 -#ifndef lint
1548 - _NOTE(COMPETING_THREADS_NOW)
1549 -#endif
1550 1500 clntp->clnt_async_cnt++;
1551 1501 hca_devp->hd_async_task_cnt++;
1552 1502
1553 1503 mutex_exit(&ibtl_clnt_list_mutex);
1554 1504 (void) ibtl_tell_client_about_new_hca(
1555 1505 new_hcap);
1556 1506 mutex_enter(&ibtl_clnt_list_mutex);
1557 1507 }
1558 1508 break;
1559 1509 }
1560 1510 clntp = clntp->clnt_list_link;
1561 1511 }
1562 1512
1563 1513 clntp = ibtl_clnt_list;
1564 1514 while (clntp != NULL) {
1565 1515 if (clntp->clnt_modinfop->mi_clnt_class == IBT_CM) {
1566 1516 IBTF_DPRINTF_L4(ibtf_handlers, "ibtl_announce_new_hca: "
1567 1517 "calling %s", clntp->clnt_modinfop->mi_clnt_name);
1568 1518 if (clntp->clnt_modinfop->mi_async_handler) {
1569 - _NOTE(NO_COMPETING_THREADS_NOW)
1570 1519 new_hcap = kmem_alloc(sizeof (*new_hcap),
1571 1520 KM_SLEEP);
1572 1521 new_hcap->nh_clntp = clntp;
1573 1522 new_hcap->nh_hca_devp = hca_devp;
1574 1523 new_hcap->nh_code = IBT_HCA_ATTACH_EVENT;
1575 -#ifndef lint
1576 - _NOTE(COMPETING_THREADS_NOW)
1577 -#endif
1578 1524 clntp->clnt_async_cnt++;
1579 1525 hca_devp->hd_async_task_cnt++;
1580 1526
1581 1527 (void) taskq_dispatch(ibtl_async_taskq,
1582 1528 ibtl_tell_client_about_new_hca, new_hcap,
1583 1529 TQ_SLEEP);
1584 1530 }
1585 1531 break;
1586 1532 }
1587 1533 clntp = clntp->clnt_list_link;
1588 1534 }
1589 1535 if (clntp != NULL)
1590 1536 while (clntp->clnt_async_cnt > 0)
↓ open down ↓ |
3 lines elided |
↑ open up ↑ |
1591 1537 cv_wait(&ibtl_clnt_cv, &ibtl_clnt_list_mutex);
1592 1538 clntp = ibtl_clnt_list;
1593 1539 while (clntp != NULL) {
1594 1540 if ((clntp->clnt_modinfop->mi_clnt_class != IBT_DM) &&
1595 1541 (clntp->clnt_modinfop->mi_clnt_class != IBT_CM) &&
1596 1542 (clntp->clnt_modinfop->mi_clnt_class != IBT_IBMA)) {
1597 1543 IBTF_DPRINTF_L4(ibtf_handlers,
1598 1544 "ibtl_announce_new_hca: Calling %s ",
1599 1545 clntp->clnt_modinfop->mi_clnt_name);
1600 1546 if (clntp->clnt_modinfop->mi_async_handler) {
1601 - _NOTE(NO_COMPETING_THREADS_NOW)
1602 1547 new_hcap = kmem_alloc(sizeof (*new_hcap),
1603 1548 KM_SLEEP);
1604 1549 new_hcap->nh_clntp = clntp;
1605 1550 new_hcap->nh_hca_devp = hca_devp;
1606 1551 new_hcap->nh_code = IBT_HCA_ATTACH_EVENT;
1607 -#ifndef lint
1608 - _NOTE(COMPETING_THREADS_NOW)
1609 -#endif
1610 1552 clntp->clnt_async_cnt++;
1611 1553 hca_devp->hd_async_task_cnt++;
1612 1554
1613 1555 (void) taskq_dispatch(ibtl_async_taskq,
1614 1556 ibtl_tell_client_about_new_hca, new_hcap,
1615 1557 TQ_SLEEP);
1616 1558 }
1617 1559 }
1618 1560 clntp = clntp->clnt_list_link;
1619 1561 }
1620 1562
1621 1563 /* wait for all tasks to complete */
1622 1564 while (hca_devp->hd_async_task_cnt != 0)
1623 1565 cv_wait(&hca_devp->hd_async_task_cv, &ibtl_clnt_list_mutex);
1624 1566
1625 1567 /* wakeup thread that may be waiting to send an HCA async */
1626 1568 ASSERT(hca_devp->hd_async_busy == 1);
1627 1569 hca_devp->hd_async_busy = 0;
1628 1570 cv_broadcast(&hca_devp->hd_async_busy_cv);
1629 1571 mutex_exit(&ibtl_clnt_list_mutex);
1630 1572 }
1631 1573
1632 1574 /*
1633 1575 * ibtl_detach_all_clients:
1634 1576 *
1635 1577 * Return value - 0 for Success, 1 for Failure
1636 1578 *
1637 1579 * o First detach general clients.
1638 1580 *
1639 1581 * o Next detach these clients
1640 1582 * IBCM
1641 1583 * IBDM
1642 1584 *
1643 1585 * o Finally, detach this client
1644 1586 * IBMA
1645 1587 */
1646 1588 int
1647 1589 ibtl_detach_all_clients(ibtl_hca_devinfo_t *hca_devp)
1648 1590 {
1649 1591 ib_guid_t hcaguid = hca_devp->hd_hca_attr->hca_node_guid;
1650 1592 ibtl_hca_t *ibt_hca;
1651 1593 ibtl_clnt_t *clntp;
1652 1594 int retval;
1653 1595
1654 1596 IBTF_DPRINTF_L2(ibtf_handlers, "ibtl_detach_all_clients(%llX)",
1655 1597 hcaguid);
1656 1598
1657 1599 ASSERT(MUTEX_HELD(&ibtl_clnt_list_mutex));
1658 1600
1659 1601 while (hca_devp->hd_async_busy)
1660 1602 cv_wait(&hca_devp->hd_async_busy_cv, &ibtl_clnt_list_mutex);
1661 1603 hca_devp->hd_async_busy = 1;
1662 1604
1663 1605 /* First inform general clients asynchronously */
1664 1606 hca_devp->hd_async_event.ev_hca_guid = hcaguid;
1665 1607 hca_devp->hd_async_event.ev_fma_ena = 0;
1666 1608 hca_devp->hd_async_event.ev_chan_hdl = NULL;
1667 1609 hca_devp->hd_async_event.ev_cq_hdl = NULL;
1668 1610 hca_devp->hd_async_code = IBT_HCA_DETACH_EVENT;
1669 1611
1670 1612 ibt_hca = hca_devp->hd_clnt_list;
1671 1613 while (ibt_hca != NULL) {
1672 1614 clntp = ibt_hca->ha_clnt_devp;
1673 1615 if (IBTL_GENERIC_CLIENT(clntp)) {
1674 1616 ++ibt_hca->ha_clnt_devp->clnt_async_cnt;
1675 1617 mutex_enter(&ibtl_async_mutex);
1676 1618 ibt_hca->ha_async_cnt++;
1677 1619 mutex_exit(&ibtl_async_mutex);
1678 1620 hca_devp->hd_async_task_cnt++;
1679 1621
1680 1622 (void) taskq_dispatch(ibtl_async_taskq,
1681 1623 ibtl_hca_client_async_task, ibt_hca, TQ_SLEEP);
1682 1624 }
1683 1625 ibt_hca = ibt_hca->ha_clnt_link;
1684 1626 }
1685 1627
1686 1628 /* wait for all clients to complete */
1687 1629 while (hca_devp->hd_async_task_cnt != 0) {
1688 1630 cv_wait(&hca_devp->hd_async_task_cv, &ibtl_clnt_list_mutex);
1689 1631 }
1690 1632 /* Go thru the clients and check if any have not closed this HCA. */
1691 1633 retval = 0;
1692 1634 ibt_hca = hca_devp->hd_clnt_list;
1693 1635 while (ibt_hca != NULL) {
1694 1636 clntp = ibt_hca->ha_clnt_devp;
1695 1637 if (IBTL_GENERIC_CLIENT(clntp)) {
1696 1638 IBTF_DPRINTF_L2(ibtf_handlers,
1697 1639 "ibtl_detach_all_clients: "
1698 1640 "client '%s' failed to close the HCA.",
1699 1641 ibt_hca->ha_clnt_devp->clnt_modinfop->mi_clnt_name);
1700 1642 retval = 1;
1701 1643 }
1702 1644 ibt_hca = ibt_hca->ha_clnt_link;
1703 1645 }
1704 1646 if (retval == 1)
1705 1647 goto bailout;
1706 1648
1707 1649 /* Next inform IBDM asynchronously */
1708 1650 ibt_hca = hca_devp->hd_clnt_list;
1709 1651 while (ibt_hca != NULL) {
1710 1652 clntp = ibt_hca->ha_clnt_devp;
1711 1653 if (clntp->clnt_modinfop->mi_clnt_class == IBT_DM) {
1712 1654 ++ibt_hca->ha_clnt_devp->clnt_async_cnt;
1713 1655 mutex_enter(&ibtl_async_mutex);
1714 1656 ibt_hca->ha_async_cnt++;
1715 1657 mutex_exit(&ibtl_async_mutex);
1716 1658 hca_devp->hd_async_task_cnt++;
1717 1659
1718 1660 mutex_exit(&ibtl_clnt_list_mutex);
1719 1661 ibtl_hca_client_async_task(ibt_hca);
1720 1662 mutex_enter(&ibtl_clnt_list_mutex);
1721 1663 break;
1722 1664 }
1723 1665 ibt_hca = ibt_hca->ha_clnt_link;
1724 1666 }
1725 1667
1726 1668 /*
1727 1669 * Next inform IBCM.
1728 1670 * As IBCM doesn't perform ibt_open_hca(), IBCM will not be
1729 1671 * accessible via hca_devp->hd_clnt_list.
1730 1672 * ibtl_cm_async_handler will NOT be NULL, if IBCM is registered.
1731 1673 */
1732 1674 if (ibtl_cm_async_handler) {
1733 1675 ibtl_tell_mgr(hca_devp, ibtl_cm_async_handler,
1734 1676 ibtl_cm_clnt_private);
1735 1677
1736 1678 /* wait for all tasks to complete */
1737 1679 while (hca_devp->hd_async_task_cnt != 0)
1738 1680 cv_wait(&hca_devp->hd_async_task_cv,
1739 1681 &ibtl_clnt_list_mutex);
1740 1682 }
1741 1683
1742 1684 /* Go thru the clients and check if any have not closed this HCA. */
1743 1685 retval = 0;
1744 1686 ibt_hca = hca_devp->hd_clnt_list;
1745 1687 while (ibt_hca != NULL) {
1746 1688 clntp = ibt_hca->ha_clnt_devp;
1747 1689 if (clntp->clnt_modinfop->mi_clnt_class != IBT_IBMA) {
1748 1690 IBTF_DPRINTF_L2(ibtf_handlers,
1749 1691 "ibtl_detach_all_clients: "
1750 1692 "client '%s' failed to close the HCA.",
1751 1693 ibt_hca->ha_clnt_devp->clnt_modinfop->mi_clnt_name);
1752 1694 retval = 1;
1753 1695 }
1754 1696 ibt_hca = ibt_hca->ha_clnt_link;
1755 1697 }
1756 1698 if (retval == 1)
1757 1699 goto bailout;
1758 1700
1759 1701 /* Finally, inform IBMA */
1760 1702 ibt_hca = hca_devp->hd_clnt_list;
1761 1703 while (ibt_hca != NULL) {
1762 1704 clntp = ibt_hca->ha_clnt_devp;
1763 1705 if (clntp->clnt_modinfop->mi_clnt_class == IBT_IBMA) {
1764 1706 ++ibt_hca->ha_clnt_devp->clnt_async_cnt;
1765 1707 mutex_enter(&ibtl_async_mutex);
1766 1708 ibt_hca->ha_async_cnt++;
1767 1709 mutex_exit(&ibtl_async_mutex);
1768 1710 hca_devp->hd_async_task_cnt++;
1769 1711
1770 1712 (void) taskq_dispatch(ibtl_async_taskq,
1771 1713 ibtl_hca_client_async_task, ibt_hca, TQ_SLEEP);
1772 1714 } else
1773 1715 IBTF_DPRINTF_L2(ibtf_handlers,
1774 1716 "ibtl_detach_all_clients: "
1775 1717 "client '%s' is unexpectedly on the client list",
1776 1718 ibt_hca->ha_clnt_devp->clnt_modinfop->mi_clnt_name);
1777 1719 ibt_hca = ibt_hca->ha_clnt_link;
1778 1720 }
1779 1721
1780 1722 /* wait for IBMA to complete */
1781 1723 while (hca_devp->hd_async_task_cnt != 0) {
1782 1724 cv_wait(&hca_devp->hd_async_task_cv, &ibtl_clnt_list_mutex);
1783 1725 }
1784 1726
1785 1727 /* Check if this HCA's client list is empty. */
1786 1728 ibt_hca = hca_devp->hd_clnt_list;
1787 1729 if (ibt_hca != NULL) {
1788 1730 IBTF_DPRINTF_L2(ibtf_handlers,
1789 1731 "ibtl_detach_all_clients: "
1790 1732 "client '%s' failed to close the HCA.",
1791 1733 ibt_hca->ha_clnt_devp->clnt_modinfop->mi_clnt_name);
1792 1734 retval = 1;
1793 1735 } else
1794 1736 retval = 0;
1795 1737
1796 1738 bailout:
1797 1739 if (retval) {
1798 1740 hca_devp->hd_state = IBTL_HCA_DEV_ATTACHED; /* fix hd_state */
1799 1741 mutex_exit(&ibtl_clnt_list_mutex);
1800 1742 ibtl_announce_new_hca(hca_devp);
1801 1743 mutex_enter(&ibtl_clnt_list_mutex);
1802 1744 } else {
1803 1745 hca_devp->hd_async_busy = 0;
1804 1746 cv_broadcast(&hca_devp->hd_async_busy_cv);
1805 1747 }
1806 1748
1807 1749 return (retval);
1808 1750 }
1809 1751
1810 1752 void
1811 1753 ibtl_free_clnt_async_check(ibtl_clnt_t *clntp)
1812 1754 {
1813 1755 IBTF_DPRINTF_L3(ibtf_handlers, "ibtl_free_clnt_async_check(%p)", clntp);
1814 1756
1815 1757 ASSERT(MUTEX_HELD(&ibtl_clnt_list_mutex));
1816 1758
1817 1759 /* wait for all asyncs based on "ibtl_clnt_list" to complete */
1818 1760 while (clntp->clnt_async_cnt != 0) {
1819 1761 cv_wait(&ibtl_clnt_cv, &ibtl_clnt_list_mutex);
1820 1762 }
1821 1763 }
1822 1764
1823 1765 static void
1824 1766 ibtl_dec_clnt_async_cnt(ibtl_clnt_t *clntp)
1825 1767 {
1826 1768 mutex_enter(&ibtl_clnt_list_mutex);
1827 1769 if (--clntp->clnt_async_cnt == 0) {
1828 1770 cv_broadcast(&ibtl_clnt_cv);
1829 1771 }
1830 1772 mutex_exit(&ibtl_clnt_list_mutex);
1831 1773 }
1832 1774
1833 1775 static void
1834 1776 ibtl_inc_clnt_async_cnt(ibtl_clnt_t *clntp)
1835 1777 {
1836 1778 mutex_enter(&ibtl_clnt_list_mutex);
1837 1779 ++clntp->clnt_async_cnt;
1838 1780 mutex_exit(&ibtl_clnt_list_mutex);
1839 1781 }
1840 1782
1841 1783
1842 1784 /*
1843 1785 * Functions and data structures to inform clients that a notification
1844 1786 * has occurred about Multicast Groups that might interest them.
1845 1787 */
1846 1788 struct ibtl_sm_notice {
1847 1789 ibt_clnt_hdl_t np_ibt_hdl;
1848 1790 ib_gid_t np_sgid;
1849 1791 ibt_subnet_event_code_t np_code;
1850 1792 ibt_subnet_event_t np_event;
1851 1793 };
1852 1794
1853 1795 static void
1854 1796 ibtl_sm_notice_task(void *arg)
1855 1797 {
1856 1798 struct ibtl_sm_notice *noticep = (struct ibtl_sm_notice *)arg;
1857 1799 ibt_clnt_hdl_t ibt_hdl = noticep->np_ibt_hdl;
1858 1800 ibt_sm_notice_handler_t sm_notice_handler;
1859 1801
1860 1802 sm_notice_handler = ibt_hdl->clnt_sm_trap_handler;
1861 1803 if (sm_notice_handler != NULL)
1862 1804 sm_notice_handler(ibt_hdl->clnt_sm_trap_handler_arg,
1863 1805 noticep->np_sgid, noticep->np_code, ¬icep->np_event);
1864 1806 kmem_free(noticep, sizeof (*noticep));
1865 1807 ibtl_dec_clnt_async_cnt(ibt_hdl);
1866 1808 }
1867 1809
1868 1810 /*
1869 1811 * Inform the client that MCG notices are not working at this time.
↓ open down ↓ |
250 lines elided |
↑ open up ↑ |
1870 1812 */
1871 1813 void
1872 1814 ibtl_cm_sm_notice_init_failure(ibtl_cm_sm_init_fail_t *ifail)
1873 1815 {
1874 1816 ibt_clnt_hdl_t ibt_hdl = ifail->smf_ibt_hdl;
1875 1817 struct ibtl_sm_notice *noticep;
1876 1818 ib_gid_t *sgidp = &ifail->smf_sgid[0];
1877 1819 int i;
1878 1820
1879 1821 for (i = 0; i < ifail->smf_num_sgids; i++) {
1880 - _NOTE(NO_COMPETING_THREADS_NOW)
1881 1822 noticep = kmem_zalloc(sizeof (*noticep), KM_SLEEP);
1882 1823 noticep->np_ibt_hdl = ibt_hdl;
1883 1824 noticep->np_sgid = *sgidp++;
1884 1825 noticep->np_code = IBT_SM_EVENT_UNAVAILABLE;
1885 -#ifndef lint
1886 - _NOTE(COMPETING_THREADS_NOW)
1887 -#endif
1888 1826 ibtl_inc_clnt_async_cnt(ibt_hdl);
1889 1827 (void) taskq_dispatch(ibtl_async_taskq,
1890 1828 ibtl_sm_notice_task, noticep, TQ_SLEEP);
1891 1829 }
1892 1830 }
1893 1831
1894 1832 /*
1895 1833 * Inform all clients of the event.
1896 1834 */
1897 1835 void
1898 1836 ibtl_cm_sm_notice_handler(ib_gid_t sgid, ibt_subnet_event_code_t code,
1899 1837 ibt_subnet_event_t *event)
1900 1838 {
1901 - _NOTE(NO_COMPETING_THREADS_NOW)
1902 1839 struct ibtl_sm_notice *noticep;
1903 1840 ibtl_clnt_t *clntp;
1904 1841
1905 1842 mutex_enter(&ibtl_clnt_list_mutex);
1906 1843 clntp = ibtl_clnt_list;
1907 1844 while (clntp != NULL) {
1908 1845 if (clntp->clnt_sm_trap_handler) {
1909 1846 noticep = kmem_zalloc(sizeof (*noticep), KM_SLEEP);
1910 1847 noticep->np_ibt_hdl = clntp;
1911 1848 noticep->np_sgid = sgid;
1912 1849 noticep->np_code = code;
1913 1850 noticep->np_event = *event;
1914 1851 ++clntp->clnt_async_cnt;
1915 1852 (void) taskq_dispatch(ibtl_async_taskq,
1916 1853 ibtl_sm_notice_task, noticep, TQ_SLEEP);
1917 1854 }
1918 1855 clntp = clntp->clnt_list_link;
1919 1856 }
1920 1857 mutex_exit(&ibtl_clnt_list_mutex);
1921 -#ifndef lint
1922 - _NOTE(COMPETING_THREADS_NOW)
1923 -#endif
1924 1858 }
1925 1859
1926 1860 /*
1927 1861 * Record the handler for this client.
1928 1862 */
1929 1863 void
1930 1864 ibtl_cm_set_sm_notice_handler(ibt_clnt_hdl_t ibt_hdl,
1931 1865 ibt_sm_notice_handler_t sm_notice_handler, void *private)
1932 1866 {
1933 - _NOTE(NO_COMPETING_THREADS_NOW)
1934 1867 ibt_hdl->clnt_sm_trap_handler = sm_notice_handler;
1935 1868 ibt_hdl->clnt_sm_trap_handler_arg = private;
1936 -#ifndef lint
1937 - _NOTE(COMPETING_THREADS_NOW)
1938 -#endif
1939 1869 }
1940 1870
1941 1871
1942 1872 /*
1943 1873 * ibtl_another_cq_handler_in_thread()
1944 1874 *
1945 1875 * Conditionally increase the number of cq_threads.
1946 1876 * The number of threads grows, based on the number of cqs using threads.
1947 1877 *
1948 1878 * The table below controls the number of threads as follows:
1949 1879 *
1950 1880 * Number of CQs Number of cq_threads
1951 1881 * 0 0
1952 1882 * 1 1
1953 1883 * 2-3 2
1954 1884 * 4-5 3
1955 1885 * 6-9 4
1956 1886 * 10-15 5
1957 1887 * 16-23 6
1958 1888 * 24-31 7
1959 1889 * 32+ 8
1960 1890 */
1961 1891
1962 1892 #define IBTL_CQ_MAXTHREADS 8
1963 1893 static uint8_t ibtl_cq_scaling[IBTL_CQ_MAXTHREADS] = {
1964 1894 1, 2, 4, 6, 10, 16, 24, 32
1965 1895 };
1966 1896
1967 1897 static kt_did_t ibtl_cq_did[IBTL_CQ_MAXTHREADS];
1968 1898
1969 1899 void
1970 1900 ibtl_another_cq_handler_in_thread(void)
1971 1901 {
1972 1902 kthread_t *t;
1973 1903 int my_idx;
1974 1904
↓ open down ↓ |
26 lines elided |
↑ open up ↑ |
1975 1905 mutex_enter(&ibtl_cq_mutex);
1976 1906 if ((ibtl_cq_threads == IBTL_CQ_MAXTHREADS) ||
1977 1907 (++ibtl_cqs_using_threads < ibtl_cq_scaling[ibtl_cq_threads])) {
1978 1908 mutex_exit(&ibtl_cq_mutex);
1979 1909 return;
1980 1910 }
1981 1911 my_idx = ibtl_cq_threads++;
1982 1912 mutex_exit(&ibtl_cq_mutex);
1983 1913 t = thread_create(NULL, 0, ibtl_cq_thread, NULL, 0, &p0, TS_RUN,
1984 1914 ibtl_pri - 1);
1985 - _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(ibtl_cq_did))
1986 1915 ibtl_cq_did[my_idx] = t->t_did; /* save for thread_join() */
1987 - _NOTE(NOW_VISIBLE_TO_OTHER_THREADS(ibtl_cq_did))
1988 1916 }
1989 1917
1990 1918 void
1991 1919 ibtl_thread_init(void)
1992 1920 {
1993 1921 IBTF_DPRINTF_L3(ibtf_handlers, "ibtl_thread_init()");
1994 1922
1995 1923 mutex_init(&ibtl_async_mutex, NULL, MUTEX_DEFAULT, NULL);
1996 1924 cv_init(&ibtl_async_cv, NULL, CV_DEFAULT, NULL);
1997 1925 cv_init(&ibtl_clnt_cv, NULL, CV_DEFAULT, NULL);
1998 1926
1999 1927 mutex_init(&ibtl_cq_mutex, NULL, MUTEX_DEFAULT, NULL);
2000 1928 cv_init(&ibtl_cq_cv, NULL, CV_DEFAULT, NULL);
2001 1929 }
2002 1930
2003 1931 void
2004 1932 ibtl_thread_init2(void)
2005 1933 {
2006 1934 int i;
↓ open down ↓ |
9 lines elided |
↑ open up ↑ |
2007 1935 static int initted = 0;
2008 1936 kthread_t *t;
2009 1937
2010 1938 mutex_enter(&ibtl_async_mutex);
2011 1939 if (initted == 1) {
2012 1940 mutex_exit(&ibtl_async_mutex);
2013 1941 return;
2014 1942 }
2015 1943 initted = 1;
2016 1944 mutex_exit(&ibtl_async_mutex);
2017 - _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(ibtl_async_did))
2018 1945 ibtl_async_did = kmem_zalloc(ibtl_async_thread_init * sizeof (kt_did_t),
2019 1946 KM_SLEEP);
2020 1947
2021 1948 IBTF_DPRINTF_L3(ibtf_handlers, "ibtl_thread_init2()");
2022 1949
2023 1950 for (i = 0; i < ibtl_async_thread_init; i++) {
2024 1951 t = thread_create(NULL, 0, ibtl_async_thread, NULL, 0, &p0,
2025 1952 TS_RUN, ibtl_pri - 1);
2026 1953 ibtl_async_did[i] = t->t_did; /* thread_join() */
2027 1954 }
2028 - _NOTE(NOW_VISIBLE_TO_OTHER_THREADS(ibtl_async_did))
2029 - _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(ibtl_cq_threads))
2030 1955 for (i = 0; i < ibtl_cq_threads; i++) {
2031 1956 t = thread_create(NULL, 0, ibtl_cq_thread, NULL, 0, &p0,
2032 1957 TS_RUN, ibtl_pri - 1);
2033 - _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(ibtl_cq_did))
2034 1958 ibtl_cq_did[i] = t->t_did; /* save for thread_join() */
2035 - _NOTE(NOW_VISIBLE_TO_OTHER_THREADS(ibtl_cq_did))
2036 1959 }
2037 - _NOTE(NOW_VISIBLE_TO_OTHER_THREADS(ibtl_cq_threads))
2038 1960 }
2039 1961
2040 1962 void
2041 1963 ibtl_thread_fini(void)
2042 1964 {
2043 1965 int i;
2044 1966
2045 1967 IBTF_DPRINTF_L3(ibtf_handlers, "ibtl_thread_fini()");
2046 1968
2047 1969 /* undo the work done by ibtl_thread_init() */
2048 1970
↓ open down ↓ |
1 lines elided |
↑ open up ↑ |
2049 1971 mutex_enter(&ibtl_cq_mutex);
2050 1972 ibtl_cq_thread_exit = IBTL_THREAD_EXIT;
2051 1973 cv_broadcast(&ibtl_cq_cv);
2052 1974 mutex_exit(&ibtl_cq_mutex);
2053 1975
2054 1976 mutex_enter(&ibtl_async_mutex);
2055 1977 ibtl_async_thread_exit = IBTL_THREAD_EXIT;
2056 1978 cv_broadcast(&ibtl_async_cv);
2057 1979 mutex_exit(&ibtl_async_mutex);
2058 1980
2059 - _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(ibtl_cq_threads))
2060 1981 for (i = 0; i < ibtl_cq_threads; i++)
2061 1982 thread_join(ibtl_cq_did[i]);
2062 - _NOTE(NOW_VISIBLE_TO_OTHER_THREADS(ibtl_cq_threads))
2063 1983
2064 1984 if (ibtl_async_did) {
2065 1985 for (i = 0; i < ibtl_async_thread_init; i++)
2066 1986 thread_join(ibtl_async_did[i]);
2067 1987
2068 1988 kmem_free(ibtl_async_did,
2069 1989 ibtl_async_thread_init * sizeof (kt_did_t));
2070 1990 }
2071 1991 mutex_destroy(&ibtl_cq_mutex);
2072 1992 cv_destroy(&ibtl_cq_cv);
2073 1993
2074 1994 mutex_destroy(&ibtl_async_mutex);
2075 1995 cv_destroy(&ibtl_async_cv);
2076 1996 cv_destroy(&ibtl_clnt_cv);
2077 1997 }
2078 1998
2079 1999 /* ARGSUSED */
2080 2000 ibt_status_t ibtl_dummy_node_info_cb(ib_guid_t hca_guid, uint8_t port,
2081 2001 ib_lid_t lid, ibt_node_info_t *node_info)
2082 2002 {
2083 2003 return (IBT_SUCCESS);
2084 2004 }
↓ open down ↓ |
12 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX