141 static kcondvar_t ibtl_cq_cv;
142 static ibtl_cq_t *ibtl_cq_list_start, *ibtl_cq_list_end;
143
144 static int ibtl_cq_threads = 0; /* total # of cq threads */
145 static int ibtl_cqs_using_threads = 0; /* total # of cqs using threads */
146 static int ibtl_cq_thread_exit = 0; /* set if/when thread(s) should exit */
147
148 /* value used to tell IBTL threads to exit */
149 #define IBTL_THREAD_EXIT 0x1b7fdead /* IBTF DEAD */
150 /* Cisco Topspin Vendor ID for Rereg hack */
151 #define IBT_VENDOR_CISCO 0x05ad
152
153 int ibtl_eec_not_supported = 1;
154
155 char *ibtl_last_client_name; /* may help debugging */
156 typedef ibt_status_t (*ibtl_node_info_cb_t)(ib_guid_t, uint8_t, ib_lid_t,
157 ibt_node_info_t *);
158
159 ibtl_node_info_cb_t ibtl_node_info_cb;
160
161 _NOTE(LOCK_ORDER(ibtl_clnt_list_mutex ibtl_async_mutex))
162
163 void
164 ibtl_cm_set_node_info_cb(ibt_status_t (*node_info_cb)(ib_guid_t, uint8_t,
165 ib_lid_t, ibt_node_info_t *))
166 {
167 mutex_enter(&ibtl_clnt_list_mutex);
168 ibtl_node_info_cb = node_info_cb;
169 mutex_exit(&ibtl_clnt_list_mutex);
170 }
171
172 /*
173 * ibc_async_handler()
174 *
175 * Asynchronous Event/Error Handler.
176 *
177 * This is the function called HCA drivers to post various async
178 * event and errors mention in the IB architecture spec. See
179 * ibtl_types.h for additional details of this.
180 *
181 * This function marks the pertinent IBTF object with the async_code,
182 * and queues the object for handling by an ibtl_async_thread. If
399 mutex_exit(&ibtl_async_mutex);
400 }
401
402
403 /* Finally, make the async call to the client. */
404
405 static void
406 ibtl_async_client_call(ibtl_hca_t *ibt_hca, ibt_async_code_t code,
407 ibt_async_event_t *event_p)
408 {
409 ibtl_clnt_t *clntp;
410 void *client_private;
411 ibt_async_handler_t async_handler;
412 char *client_name;
413
414 IBTF_DPRINTF_L2(ibtf_handlers, "ibtl_async_client_call(%p, 0x%x, %p)",
415 ibt_hca, code, event_p);
416
417 clntp = ibt_hca->ha_clnt_devp;
418
419 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(ibtl_last_client_name))
420 /* Record who is being called (just a debugging aid) */
421 ibtl_last_client_name = client_name = clntp->clnt_name;
422 _NOTE(NOW_VISIBLE_TO_OTHER_THREADS(ibtl_last_client_name))
423
424 client_private = clntp->clnt_private;
425 async_handler = clntp->clnt_modinfop->mi_async_handler;
426
427 if (code & (IBT_EVENT_COM_EST_QP | IBT_EVENT_COM_EST_EEC)) {
428 mutex_enter(&ibtl_clnt_list_mutex);
429 async_handler = ibtl_cm_async_handler;
430 client_private = ibtl_cm_clnt_private;
431 mutex_exit(&ibtl_clnt_list_mutex);
432 ibt_hca = NULL;
433 IBTF_DPRINTF_L2(ibtf_handlers, "ibtl_async_client_call: "
434 "calling CM for COM_EST");
435 } else {
436 IBTF_DPRINTF_L2(ibtf_handlers, "ibtl_async_client_call: "
437 "calling client '%s'", client_name);
438 }
439 if (async_handler != NULL)
440 async_handler(client_private, ibt_hca, code, event_p);
441 else
442 IBTF_DPRINTF_L2(ibtf_handlers, "ibtl_async_client_call: "
516 mutex_exit(&ibtl_async_mutex);
517 }
518 }
519 kmem_free(mgrp, sizeof (*mgrp));
520
521 mutex_enter(&ibtl_clnt_list_mutex);
522 if (--hca_devp->hd_async_task_cnt == 0)
523 cv_signal(&hca_devp->hd_async_task_cv);
524 mutex_exit(&ibtl_clnt_list_mutex);
525 }
526
527 static void
528 ibtl_cm_get_node_info(ibtl_hca_devinfo_t *hca_devp,
529 ibt_async_handler_t async_handler)
530 {
531 struct ibtl_mgr_s *mgrp;
532
533 if (async_handler == NULL)
534 return;
535
536 _NOTE(NO_COMPETING_THREADS_NOW)
537 mgrp = kmem_alloc(sizeof (*mgrp), KM_SLEEP);
538 mgrp->mgr_hca_devp = hca_devp;
539 mgrp->mgr_async_handler = async_handler;
540 mgrp->mgr_clnt_private = NULL;
541 hca_devp->hd_async_task_cnt++;
542
543 (void) taskq_dispatch(ibtl_async_taskq,
544 ibt_cisco_embedded_sm_rereg_fix, mgrp, TQ_SLEEP);
545 #ifndef lint
546 _NOTE(COMPETING_THREADS_NOW)
547 #endif
548 }
549
550 static void
551 ibtl_tell_mgr(ibtl_hca_devinfo_t *hca_devp, ibt_async_handler_t async_handler,
552 void *clnt_private)
553 {
554 struct ibtl_mgr_s *mgrp;
555
556 if (async_handler == NULL)
557 return;
558
559 _NOTE(NO_COMPETING_THREADS_NOW)
560 mgrp = kmem_alloc(sizeof (*mgrp), KM_SLEEP);
561 mgrp->mgr_hca_devp = hca_devp;
562 mgrp->mgr_async_handler = async_handler;
563 mgrp->mgr_clnt_private = clnt_private;
564 hca_devp->hd_async_task_cnt++;
565
566 (void) taskq_dispatch(ibtl_async_taskq, ibtl_do_mgr_async_task, mgrp,
567 TQ_SLEEP);
568 #ifndef lint
569 _NOTE(COMPETING_THREADS_NOW)
570 #endif
571 }
572
573 /*
574 * Per client-device asyncs for HCA level events. Call each client that is
575 * using the HCA for the event recorded in the ibtl_hca_devinfo_t.
576 */
577 static void
578 ibtl_hca_client_async_task(void *arg)
579 {
580 ibtl_hca_t *ibt_hca = (ibtl_hca_t *)arg;
581 ibtl_hca_devinfo_t *hca_devp = ibt_hca->ha_hca_devp;
582 ibtl_clnt_t *clntp = ibt_hca->ha_clnt_devp;
583 ibt_async_event_t async_event;
584
585 IBTF_DPRINTF_L3(ibtf_handlers, "ibtl_hca_client_async_task(%p, 0x%x)",
586 ibt_hca, hca_devp->hd_async_code);
587
588 bcopy(&hca_devp->hd_async_event, &async_event, sizeof (async_event));
589 ibtl_async_client_call(ibt_hca, hca_devp->hd_async_code, &async_event);
590
997 }
998 ibtl_eec->eec_async_codes &= ~code;
999
1000 if (code) {
1001 mutex_exit(&ibtl_async_mutex);
1002 ibtl_async_client_call(ibtl_eec->eec_hca,
1003 code, &async_event);
1004 mutex_enter(&ibtl_async_mutex);
1005 }
1006
1007 if (ibtl_eec->eec_async_flags & IBTL_ASYNC_FREE_OBJECT) {
1008 mutex_exit(&ibtl_async_mutex);
1009 kmem_free(ibtl_eec, sizeof (struct ibtl_eec_s));
1010 mutex_enter(&ibtl_async_mutex);
1011 return;
1012 }
1013 }
1014 ibtl_eec->eec_async_flags &= ~IBTL_ASYNC_PENDING;
1015 }
1016
1017 #ifdef __lock_lint
1018 kmutex_t cpr_mutex;
1019 #endif
1020
1021 /*
1022 * Loop forever, calling async_handlers until all of the async lists
1023 * are empty.
1024 */
1025
1026 static void
1027 ibtl_async_thread(void)
1028 {
1029 #ifndef __lock_lint
1030 kmutex_t cpr_mutex;
1031 #endif
1032 callb_cpr_t cprinfo;
1033
1034 _NOTE(MUTEX_PROTECTS_DATA(cpr_mutex, cprinfo))
1035 _NOTE(NO_COMPETING_THREADS_NOW)
1036 mutex_init(&cpr_mutex, NULL, MUTEX_DRIVER, NULL);
1037 CALLB_CPR_INIT(&cprinfo, &cpr_mutex, callb_generic_cpr,
1038 "ibtl_async_thread");
1039 #ifndef lint
1040 _NOTE(COMPETING_THREADS_NOW)
1041 #endif
1042
1043 mutex_enter(&ibtl_async_mutex);
1044
1045 for (;;) {
1046 if (ibtl_async_hca_list_start) {
1047 ibtl_hca_devinfo_t *hca_devp;
1048
1049 /* remove first entry from list */
1050 hca_devp = ibtl_async_hca_list_start;
1051 ibtl_async_hca_list_start = hca_devp->hd_async_link;
1052 hca_devp->hd_async_link = NULL;
1053 if (ibtl_async_hca_list_start == NULL)
1054 ibtl_async_hca_list_end = NULL;
1055
1056 ibtl_do_hca_asyncs(hca_devp);
1057
1058 } else if (ibtl_async_qp_list_start) {
1059 ibtl_qp_t *ibtl_qp;
1060
1061 /* remove from list */
1105
1106 } else {
1107 if (ibtl_async_thread_exit == IBTL_THREAD_EXIT)
1108 break;
1109 mutex_enter(&cpr_mutex);
1110 CALLB_CPR_SAFE_BEGIN(&cprinfo);
1111 mutex_exit(&cpr_mutex);
1112
1113 cv_wait(&ibtl_async_cv, &ibtl_async_mutex);
1114
1115 mutex_exit(&ibtl_async_mutex);
1116 mutex_enter(&cpr_mutex);
1117 CALLB_CPR_SAFE_END(&cprinfo, &cpr_mutex);
1118 mutex_exit(&cpr_mutex);
1119 mutex_enter(&ibtl_async_mutex);
1120 }
1121 }
1122
1123 mutex_exit(&ibtl_async_mutex);
1124
1125 #ifndef __lock_lint
1126 mutex_enter(&cpr_mutex);
1127 CALLB_CPR_EXIT(&cprinfo);
1128 #endif
1129 mutex_destroy(&cpr_mutex);
1130 }
1131
1132
1133 void
1134 ibtl_free_qp_async_check(ibtl_qp_t *ibtl_qp)
1135 {
1136 IBTF_DPRINTF_L3(ibtf_handlers, "ibtl_free_qp_async_check(%p)", ibtl_qp);
1137
1138 mutex_enter(&ibtl_async_mutex);
1139
1140 /*
1141 * If there is an active async, mark this object to be freed
1142 * by the async_thread when it's done.
1143 */
1144 if (ibtl_qp->qp_async_flags & IBTL_ASYNC_PENDING) {
1145 ibtl_qp->qp_async_flags |= IBTL_ASYNC_FREE_OBJECT;
1146 mutex_exit(&ibtl_async_mutex);
1147 } else { /* free the object now */
1148 mutex_exit(&ibtl_async_mutex);
1229 }
1230 }
1231
1232 /*
1233 * Completion Queue Handling.
1234 *
1235 * A completion queue can be handled through a simple callback
1236 * at interrupt level, or it may be queued for an ibtl_cq_thread
1237 * to handle. The latter is chosen during ibt_alloc_cq when the
1238 * IBTF_CQ_HANDLER_IN_THREAD is specified.
1239 */
1240
1241 static void
1242 ibtl_cq_handler_call(ibtl_cq_t *ibtl_cq)
1243 {
1244 ibt_cq_handler_t cq_handler;
1245 void *arg;
1246
1247 IBTF_DPRINTF_L4(ibtf_handlers, "ibtl_cq_handler_call(%p)", ibtl_cq);
1248
1249 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*ibtl_cq))
1250 cq_handler = ibtl_cq->cq_comp_handler;
1251 arg = ibtl_cq->cq_arg;
1252 if (cq_handler != NULL)
1253 cq_handler(ibtl_cq, arg);
1254 else
1255 IBTF_DPRINTF_L2(ibtf_handlers, "ibtl_cq_handler_call: "
1256 "no cq_handler for cq %p", ibtl_cq);
1257 }
1258
1259 /*
1260 * Before ibt_free_cq can continue, we need to ensure no more cq_handler
1261 * callbacks can occur. When we get the mutex, we know there are no
1262 * outstanding cq_handler callbacks. We set the cq_handler to NULL to
1263 * prohibit future callbacks.
1264 */
1265 void
1266 ibtl_free_cq_check(ibtl_cq_t *ibtl_cq)
1267 {
1268 mutex_enter(&ibtl_cq->cq_mutex);
1269 ibtl_cq->cq_comp_handler = NULL;
1271 if (ibtl_cq->cq_in_thread) {
1272 mutex_enter(&ibtl_cq_mutex);
1273 --ibtl_cqs_using_threads;
1274 while (ibtl_cq->cq_impl_flags & IBTL_CQ_PENDING) {
1275 ibtl_cq->cq_impl_flags &= ~IBTL_CQ_CALL_CLIENT;
1276 ibtl_cq->cq_impl_flags |= IBTL_CQ_FREE;
1277 cv_wait(&ibtl_cq_cv, &ibtl_cq_mutex);
1278 }
1279 mutex_exit(&ibtl_cq_mutex);
1280 }
1281 }
1282
1283 /*
1284 * Loop forever, calling cq_handlers until the cq list
1285 * is empty.
1286 */
1287
1288 static void
1289 ibtl_cq_thread(void)
1290 {
1291 #ifndef __lock_lint
1292 kmutex_t cpr_mutex;
1293 #endif
1294 callb_cpr_t cprinfo;
1295
1296 _NOTE(MUTEX_PROTECTS_DATA(cpr_mutex, cprinfo))
1297 _NOTE(NO_COMPETING_THREADS_NOW)
1298 mutex_init(&cpr_mutex, NULL, MUTEX_DRIVER, NULL);
1299 CALLB_CPR_INIT(&cprinfo, &cpr_mutex, callb_generic_cpr,
1300 "ibtl_cq_thread");
1301 #ifndef lint
1302 _NOTE(COMPETING_THREADS_NOW)
1303 #endif
1304
1305 mutex_enter(&ibtl_cq_mutex);
1306
1307 for (;;) {
1308 if (ibtl_cq_list_start) {
1309 ibtl_cq_t *ibtl_cq;
1310
1311 ibtl_cq = ibtl_cq_list_start;
1312 ibtl_cq_list_start = ibtl_cq->cq_link;
1313 ibtl_cq->cq_link = NULL;
1314 if (ibtl_cq == ibtl_cq_list_end)
1315 ibtl_cq_list_end = NULL;
1316
1317 while (ibtl_cq->cq_impl_flags & IBTL_CQ_CALL_CLIENT) {
1318 ibtl_cq->cq_impl_flags &= ~IBTL_CQ_CALL_CLIENT;
1319 mutex_exit(&ibtl_cq_mutex);
1320 ibtl_cq_handler_call(ibtl_cq);
1321 mutex_enter(&ibtl_cq_mutex);
1322 }
1323 ibtl_cq->cq_impl_flags &= ~IBTL_CQ_PENDING;
1324 if (ibtl_cq->cq_impl_flags & IBTL_CQ_FREE)
1325 cv_broadcast(&ibtl_cq_cv);
1326 } else {
1327 if (ibtl_cq_thread_exit == IBTL_THREAD_EXIT)
1328 break;
1329 mutex_enter(&cpr_mutex);
1330 CALLB_CPR_SAFE_BEGIN(&cprinfo);
1331 mutex_exit(&cpr_mutex);
1332
1333 cv_wait(&ibtl_cq_cv, &ibtl_cq_mutex);
1334
1335 mutex_exit(&ibtl_cq_mutex);
1336 mutex_enter(&cpr_mutex);
1337 CALLB_CPR_SAFE_END(&cprinfo, &cpr_mutex);
1338 mutex_exit(&cpr_mutex);
1339 mutex_enter(&ibtl_cq_mutex);
1340 }
1341 }
1342
1343 mutex_exit(&ibtl_cq_mutex);
1344 #ifndef __lock_lint
1345 mutex_enter(&cpr_mutex);
1346 CALLB_CPR_EXIT(&cprinfo);
1347 #endif
1348 mutex_destroy(&cpr_mutex);
1349 }
1350
1351
1352 /*
1353 * ibc_cq_handler()
1354 *
1355 * Completion Queue Notification Handler.
1356 *
1357 */
1358 /*ARGSUSED*/
1359 void
1360 ibc_cq_handler(ibc_clnt_hdl_t ibc_hdl, ibt_cq_hdl_t ibtl_cq)
1361 {
1362 IBTF_DPRINTF_L4(ibtf_handlers, "ibc_cq_handler(%p, %p)",
1363 ibc_hdl, ibtl_cq);
1364
1365 if (ibtl_cq->cq_in_thread) {
1366 mutex_enter(&ibtl_cq_mutex);
1367 ibtl_cq->cq_impl_flags |= IBTL_CQ_CALL_CLIENT;
1416 *
1417 * arg The IBTF client private argument to be passed
1418 * back to the client when calling the CQ
1419 * completion handler.
1420 *
1421 * Completion notifications are disabled by setting the completion
1422 * handler to NULL. When setting the handler to NULL, no additional
1423 * calls to the previous CQ handler will be initiated, but there may
1424 * be one in progress.
1425 *
1426 * This function does not otherwise change the state of previous
1427 * calls to ibt_enable_cq_notify().
1428 */
1429 void
1430 ibt_set_cq_handler(ibt_cq_hdl_t ibtl_cq, ibt_cq_handler_t completion_handler,
1431 void *arg)
1432 {
1433 IBTF_DPRINTF_L3(ibtf_handlers, "ibt_set_cq_handler(%p, %p, %p)",
1434 ibtl_cq, completion_handler, arg);
1435
1436 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*ibtl_cq))
1437 ibtl_cq->cq_comp_handler = completion_handler;
1438 ibtl_cq->cq_arg = arg;
1439 }
1440
1441
1442 /*
1443 * Inform IBT clients about New HCAs.
1444 *
1445 * We use taskqs to allow simultaneous notification, with sleeping.
1446 * Since taskqs only allow one argument, we define a structure
1447 * because we need to pass in two arguments.
1448 */
1449
1450 struct ibtl_new_hca_s {
1451 ibtl_clnt_t *nh_clntp;
1452 ibtl_hca_devinfo_t *nh_hca_devp;
1453 ibt_async_code_t nh_code;
1454 };
1455
1456 static void
1457 ibtl_tell_client_about_new_hca(void *arg)
1458 {
1459 struct ibtl_new_hca_s *new_hcap = (struct ibtl_new_hca_s *)arg;
1460 ibtl_clnt_t *clntp = new_hcap->nh_clntp;
1461 ibt_async_event_t async_event;
1462 ibtl_hca_devinfo_t *hca_devp = new_hcap->nh_hca_devp;
1463
1464 bzero(&async_event, sizeof (async_event));
1465 async_event.ev_hca_guid = hca_devp->hd_hca_attr->hca_node_guid;
1466 clntp->clnt_modinfop->mi_async_handler(
1467 clntp->clnt_private, NULL, new_hcap->nh_code, &async_event);
1468 kmem_free(new_hcap, sizeof (*new_hcap));
1469 #ifdef __lock_lint
1470 {
1471 ibt_hca_hdl_t hca_hdl;
1472 (void) ibt_open_hca(clntp, 0ULL, &hca_hdl);
1473 }
1474 #endif
1475 mutex_enter(&ibtl_clnt_list_mutex);
1476 if (--hca_devp->hd_async_task_cnt == 0)
1477 cv_signal(&hca_devp->hd_async_task_cv);
1478 if (--clntp->clnt_async_cnt == 0)
1479 cv_broadcast(&ibtl_clnt_cv);
1480 mutex_exit(&ibtl_clnt_list_mutex);
1481 }
1482
1483 /*
1484 * ibtl_announce_new_hca:
1485 *
1486 * o First attach these clients in the given order
1487 * IBMA
1488 * IBCM
1489 *
1490 * o Next attach all other clients in parallel.
1491 *
1492 * NOTE: Use the taskq to simultaneously notify all clients of the new HCA.
1493 * Retval from clients is ignored.
1494 */
1495 void
1496 ibtl_announce_new_hca(ibtl_hca_devinfo_t *hca_devp)
1497 {
1498 ibtl_clnt_t *clntp;
1499 struct ibtl_new_hca_s *new_hcap;
1500
1501 IBTF_DPRINTF_L2(ibtf_handlers, "ibtl_announce_new_hca(%p, %llX)",
1502 hca_devp, hca_devp->hd_hca_attr->hca_node_guid);
1503
1504 mutex_enter(&ibtl_clnt_list_mutex);
1505
1506 clntp = ibtl_clnt_list;
1507 while (clntp != NULL) {
1508 if (clntp->clnt_modinfop->mi_clnt_class == IBT_IBMA) {
1509 IBTF_DPRINTF_L4(ibtf_handlers,
1510 "ibtl_announce_new_hca: calling IBMF");
1511 if (clntp->clnt_modinfop->mi_async_handler) {
1512 _NOTE(NO_COMPETING_THREADS_NOW)
1513 new_hcap = kmem_alloc(sizeof (*new_hcap),
1514 KM_SLEEP);
1515 new_hcap->nh_clntp = clntp;
1516 new_hcap->nh_hca_devp = hca_devp;
1517 new_hcap->nh_code = IBT_HCA_ATTACH_EVENT;
1518 #ifndef lint
1519 _NOTE(COMPETING_THREADS_NOW)
1520 #endif
1521 clntp->clnt_async_cnt++;
1522 hca_devp->hd_async_task_cnt++;
1523
1524 (void) taskq_dispatch(ibtl_async_taskq,
1525 ibtl_tell_client_about_new_hca, new_hcap,
1526 TQ_SLEEP);
1527 }
1528 break;
1529 }
1530 clntp = clntp->clnt_list_link;
1531 }
1532 if (clntp != NULL)
1533 while (clntp->clnt_async_cnt > 0)
1534 cv_wait(&ibtl_clnt_cv, &ibtl_clnt_list_mutex);
1535 clntp = ibtl_clnt_list;
1536 while (clntp != NULL) {
1537 if (clntp->clnt_modinfop->mi_clnt_class == IBT_DM) {
1538 IBTF_DPRINTF_L4(ibtf_handlers, "ibtl_announce_new_hca: "
1539 "calling %s", clntp->clnt_modinfop->mi_clnt_name);
1540 if (clntp->clnt_modinfop->mi_async_handler) {
1541 _NOTE(NO_COMPETING_THREADS_NOW)
1542 new_hcap = kmem_alloc(sizeof (*new_hcap),
1543 KM_SLEEP);
1544 new_hcap->nh_clntp = clntp;
1545 new_hcap->nh_hca_devp = hca_devp;
1546 new_hcap->nh_code = IBT_HCA_ATTACH_EVENT;
1547 #ifndef lint
1548 _NOTE(COMPETING_THREADS_NOW)
1549 #endif
1550 clntp->clnt_async_cnt++;
1551 hca_devp->hd_async_task_cnt++;
1552
1553 mutex_exit(&ibtl_clnt_list_mutex);
1554 (void) ibtl_tell_client_about_new_hca(
1555 new_hcap);
1556 mutex_enter(&ibtl_clnt_list_mutex);
1557 }
1558 break;
1559 }
1560 clntp = clntp->clnt_list_link;
1561 }
1562
1563 clntp = ibtl_clnt_list;
1564 while (clntp != NULL) {
1565 if (clntp->clnt_modinfop->mi_clnt_class == IBT_CM) {
1566 IBTF_DPRINTF_L4(ibtf_handlers, "ibtl_announce_new_hca: "
1567 "calling %s", clntp->clnt_modinfop->mi_clnt_name);
1568 if (clntp->clnt_modinfop->mi_async_handler) {
1569 _NOTE(NO_COMPETING_THREADS_NOW)
1570 new_hcap = kmem_alloc(sizeof (*new_hcap),
1571 KM_SLEEP);
1572 new_hcap->nh_clntp = clntp;
1573 new_hcap->nh_hca_devp = hca_devp;
1574 new_hcap->nh_code = IBT_HCA_ATTACH_EVENT;
1575 #ifndef lint
1576 _NOTE(COMPETING_THREADS_NOW)
1577 #endif
1578 clntp->clnt_async_cnt++;
1579 hca_devp->hd_async_task_cnt++;
1580
1581 (void) taskq_dispatch(ibtl_async_taskq,
1582 ibtl_tell_client_about_new_hca, new_hcap,
1583 TQ_SLEEP);
1584 }
1585 break;
1586 }
1587 clntp = clntp->clnt_list_link;
1588 }
1589 if (clntp != NULL)
1590 while (clntp->clnt_async_cnt > 0)
1591 cv_wait(&ibtl_clnt_cv, &ibtl_clnt_list_mutex);
1592 clntp = ibtl_clnt_list;
1593 while (clntp != NULL) {
1594 if ((clntp->clnt_modinfop->mi_clnt_class != IBT_DM) &&
1595 (clntp->clnt_modinfop->mi_clnt_class != IBT_CM) &&
1596 (clntp->clnt_modinfop->mi_clnt_class != IBT_IBMA)) {
1597 IBTF_DPRINTF_L4(ibtf_handlers,
1598 "ibtl_announce_new_hca: Calling %s ",
1599 clntp->clnt_modinfop->mi_clnt_name);
1600 if (clntp->clnt_modinfop->mi_async_handler) {
1601 _NOTE(NO_COMPETING_THREADS_NOW)
1602 new_hcap = kmem_alloc(sizeof (*new_hcap),
1603 KM_SLEEP);
1604 new_hcap->nh_clntp = clntp;
1605 new_hcap->nh_hca_devp = hca_devp;
1606 new_hcap->nh_code = IBT_HCA_ATTACH_EVENT;
1607 #ifndef lint
1608 _NOTE(COMPETING_THREADS_NOW)
1609 #endif
1610 clntp->clnt_async_cnt++;
1611 hca_devp->hd_async_task_cnt++;
1612
1613 (void) taskq_dispatch(ibtl_async_taskq,
1614 ibtl_tell_client_about_new_hca, new_hcap,
1615 TQ_SLEEP);
1616 }
1617 }
1618 clntp = clntp->clnt_list_link;
1619 }
1620
1621 /* wait for all tasks to complete */
1622 while (hca_devp->hd_async_task_cnt != 0)
1623 cv_wait(&hca_devp->hd_async_task_cv, &ibtl_clnt_list_mutex);
1624
1625 /* wakeup thread that may be waiting to send an HCA async */
1626 ASSERT(hca_devp->hd_async_busy == 1);
1627 hca_devp->hd_async_busy = 0;
1628 cv_broadcast(&hca_devp->hd_async_busy_cv);
1629 mutex_exit(&ibtl_clnt_list_mutex);
1860 sm_notice_handler = ibt_hdl->clnt_sm_trap_handler;
1861 if (sm_notice_handler != NULL)
1862 sm_notice_handler(ibt_hdl->clnt_sm_trap_handler_arg,
1863 noticep->np_sgid, noticep->np_code, ¬icep->np_event);
1864 kmem_free(noticep, sizeof (*noticep));
1865 ibtl_dec_clnt_async_cnt(ibt_hdl);
1866 }
1867
1868 /*
1869 * Inform the client that MCG notices are not working at this time.
1870 */
1871 void
1872 ibtl_cm_sm_notice_init_failure(ibtl_cm_sm_init_fail_t *ifail)
1873 {
1874 ibt_clnt_hdl_t ibt_hdl = ifail->smf_ibt_hdl;
1875 struct ibtl_sm_notice *noticep;
1876 ib_gid_t *sgidp = &ifail->smf_sgid[0];
1877 int i;
1878
1879 for (i = 0; i < ifail->smf_num_sgids; i++) {
1880 _NOTE(NO_COMPETING_THREADS_NOW)
1881 noticep = kmem_zalloc(sizeof (*noticep), KM_SLEEP);
1882 noticep->np_ibt_hdl = ibt_hdl;
1883 noticep->np_sgid = *sgidp++;
1884 noticep->np_code = IBT_SM_EVENT_UNAVAILABLE;
1885 #ifndef lint
1886 _NOTE(COMPETING_THREADS_NOW)
1887 #endif
1888 ibtl_inc_clnt_async_cnt(ibt_hdl);
1889 (void) taskq_dispatch(ibtl_async_taskq,
1890 ibtl_sm_notice_task, noticep, TQ_SLEEP);
1891 }
1892 }
1893
1894 /*
1895 * Inform all clients of the event.
1896 */
1897 void
1898 ibtl_cm_sm_notice_handler(ib_gid_t sgid, ibt_subnet_event_code_t code,
1899 ibt_subnet_event_t *event)
1900 {
1901 _NOTE(NO_COMPETING_THREADS_NOW)
1902 struct ibtl_sm_notice *noticep;
1903 ibtl_clnt_t *clntp;
1904
1905 mutex_enter(&ibtl_clnt_list_mutex);
1906 clntp = ibtl_clnt_list;
1907 while (clntp != NULL) {
1908 if (clntp->clnt_sm_trap_handler) {
1909 noticep = kmem_zalloc(sizeof (*noticep), KM_SLEEP);
1910 noticep->np_ibt_hdl = clntp;
1911 noticep->np_sgid = sgid;
1912 noticep->np_code = code;
1913 noticep->np_event = *event;
1914 ++clntp->clnt_async_cnt;
1915 (void) taskq_dispatch(ibtl_async_taskq,
1916 ibtl_sm_notice_task, noticep, TQ_SLEEP);
1917 }
1918 clntp = clntp->clnt_list_link;
1919 }
1920 mutex_exit(&ibtl_clnt_list_mutex);
1921 #ifndef lint
1922 _NOTE(COMPETING_THREADS_NOW)
1923 #endif
1924 }
1925
1926 /*
1927 * Record the handler for this client.
1928 */
1929 void
1930 ibtl_cm_set_sm_notice_handler(ibt_clnt_hdl_t ibt_hdl,
1931 ibt_sm_notice_handler_t sm_notice_handler, void *private)
1932 {
1933 _NOTE(NO_COMPETING_THREADS_NOW)
1934 ibt_hdl->clnt_sm_trap_handler = sm_notice_handler;
1935 ibt_hdl->clnt_sm_trap_handler_arg = private;
1936 #ifndef lint
1937 _NOTE(COMPETING_THREADS_NOW)
1938 #endif
1939 }
1940
1941
1942 /*
1943 * ibtl_another_cq_handler_in_thread()
1944 *
1945 * Conditionally increase the number of cq_threads.
1946 * The number of threads grows, based on the number of cqs using threads.
1947 *
1948 * The table below controls the number of threads as follows:
1949 *
1950 * Number of CQs Number of cq_threads
1951 * 0 0
1952 * 1 1
1953 * 2-3 2
1954 * 4-5 3
1955 * 6-9 4
1956 * 10-15 5
1957 * 16-23 6
1958 * 24-31 7
1965 };
1966
1967 static kt_did_t ibtl_cq_did[IBTL_CQ_MAXTHREADS];
1968
1969 void
1970 ibtl_another_cq_handler_in_thread(void)
1971 {
1972 kthread_t *t;
1973 int my_idx;
1974
1975 mutex_enter(&ibtl_cq_mutex);
1976 if ((ibtl_cq_threads == IBTL_CQ_MAXTHREADS) ||
1977 (++ibtl_cqs_using_threads < ibtl_cq_scaling[ibtl_cq_threads])) {
1978 mutex_exit(&ibtl_cq_mutex);
1979 return;
1980 }
1981 my_idx = ibtl_cq_threads++;
1982 mutex_exit(&ibtl_cq_mutex);
1983 t = thread_create(NULL, 0, ibtl_cq_thread, NULL, 0, &p0, TS_RUN,
1984 ibtl_pri - 1);
1985 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(ibtl_cq_did))
1986 ibtl_cq_did[my_idx] = t->t_did; /* save for thread_join() */
1987 _NOTE(NOW_VISIBLE_TO_OTHER_THREADS(ibtl_cq_did))
1988 }
1989
1990 void
1991 ibtl_thread_init(void)
1992 {
1993 IBTF_DPRINTF_L3(ibtf_handlers, "ibtl_thread_init()");
1994
1995 mutex_init(&ibtl_async_mutex, NULL, MUTEX_DEFAULT, NULL);
1996 cv_init(&ibtl_async_cv, NULL, CV_DEFAULT, NULL);
1997 cv_init(&ibtl_clnt_cv, NULL, CV_DEFAULT, NULL);
1998
1999 mutex_init(&ibtl_cq_mutex, NULL, MUTEX_DEFAULT, NULL);
2000 cv_init(&ibtl_cq_cv, NULL, CV_DEFAULT, NULL);
2001 }
2002
2003 void
2004 ibtl_thread_init2(void)
2005 {
2006 int i;
2007 static int initted = 0;
2008 kthread_t *t;
2009
2010 mutex_enter(&ibtl_async_mutex);
2011 if (initted == 1) {
2012 mutex_exit(&ibtl_async_mutex);
2013 return;
2014 }
2015 initted = 1;
2016 mutex_exit(&ibtl_async_mutex);
2017 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(ibtl_async_did))
2018 ibtl_async_did = kmem_zalloc(ibtl_async_thread_init * sizeof (kt_did_t),
2019 KM_SLEEP);
2020
2021 IBTF_DPRINTF_L3(ibtf_handlers, "ibtl_thread_init2()");
2022
2023 for (i = 0; i < ibtl_async_thread_init; i++) {
2024 t = thread_create(NULL, 0, ibtl_async_thread, NULL, 0, &p0,
2025 TS_RUN, ibtl_pri - 1);
2026 ibtl_async_did[i] = t->t_did; /* thread_join() */
2027 }
2028 _NOTE(NOW_VISIBLE_TO_OTHER_THREADS(ibtl_async_did))
2029 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(ibtl_cq_threads))
2030 for (i = 0; i < ibtl_cq_threads; i++) {
2031 t = thread_create(NULL, 0, ibtl_cq_thread, NULL, 0, &p0,
2032 TS_RUN, ibtl_pri - 1);
2033 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(ibtl_cq_did))
2034 ibtl_cq_did[i] = t->t_did; /* save for thread_join() */
2035 _NOTE(NOW_VISIBLE_TO_OTHER_THREADS(ibtl_cq_did))
2036 }
2037 _NOTE(NOW_VISIBLE_TO_OTHER_THREADS(ibtl_cq_threads))
2038 }
2039
2040 void
2041 ibtl_thread_fini(void)
2042 {
2043 int i;
2044
2045 IBTF_DPRINTF_L3(ibtf_handlers, "ibtl_thread_fini()");
2046
2047 /* undo the work done by ibtl_thread_init() */
2048
2049 mutex_enter(&ibtl_cq_mutex);
2050 ibtl_cq_thread_exit = IBTL_THREAD_EXIT;
2051 cv_broadcast(&ibtl_cq_cv);
2052 mutex_exit(&ibtl_cq_mutex);
2053
2054 mutex_enter(&ibtl_async_mutex);
2055 ibtl_async_thread_exit = IBTL_THREAD_EXIT;
2056 cv_broadcast(&ibtl_async_cv);
2057 mutex_exit(&ibtl_async_mutex);
2058
2059 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(ibtl_cq_threads))
2060 for (i = 0; i < ibtl_cq_threads; i++)
2061 thread_join(ibtl_cq_did[i]);
2062 _NOTE(NOW_VISIBLE_TO_OTHER_THREADS(ibtl_cq_threads))
2063
2064 if (ibtl_async_did) {
2065 for (i = 0; i < ibtl_async_thread_init; i++)
2066 thread_join(ibtl_async_did[i]);
2067
2068 kmem_free(ibtl_async_did,
2069 ibtl_async_thread_init * sizeof (kt_did_t));
2070 }
2071 mutex_destroy(&ibtl_cq_mutex);
2072 cv_destroy(&ibtl_cq_cv);
2073
2074 mutex_destroy(&ibtl_async_mutex);
2075 cv_destroy(&ibtl_async_cv);
2076 cv_destroy(&ibtl_clnt_cv);
2077 }
2078
2079 /* ARGSUSED */
2080 ibt_status_t ibtl_dummy_node_info_cb(ib_guid_t hca_guid, uint8_t port,
2081 ib_lid_t lid, ibt_node_info_t *node_info)
2082 {
|
141 static kcondvar_t ibtl_cq_cv;
142 static ibtl_cq_t *ibtl_cq_list_start, *ibtl_cq_list_end;
143
144 static int ibtl_cq_threads = 0; /* total # of cq threads */
145 static int ibtl_cqs_using_threads = 0; /* total # of cqs using threads */
146 static int ibtl_cq_thread_exit = 0; /* set if/when thread(s) should exit */
147
148 /* value used to tell IBTL threads to exit */
149 #define IBTL_THREAD_EXIT 0x1b7fdead /* IBTF DEAD */
150 /* Cisco Topspin Vendor ID for Rereg hack */
151 #define IBT_VENDOR_CISCO 0x05ad
152
153 int ibtl_eec_not_supported = 1;
154
155 char *ibtl_last_client_name; /* may help debugging */
156 typedef ibt_status_t (*ibtl_node_info_cb_t)(ib_guid_t, uint8_t, ib_lid_t,
157 ibt_node_info_t *);
158
159 ibtl_node_info_cb_t ibtl_node_info_cb;
160
161 void
162 ibtl_cm_set_node_info_cb(ibt_status_t (*node_info_cb)(ib_guid_t, uint8_t,
163 ib_lid_t, ibt_node_info_t *))
164 {
165 mutex_enter(&ibtl_clnt_list_mutex);
166 ibtl_node_info_cb = node_info_cb;
167 mutex_exit(&ibtl_clnt_list_mutex);
168 }
169
170 /*
171 * ibc_async_handler()
172 *
173 * Asynchronous Event/Error Handler.
174 *
175 * This is the function called HCA drivers to post various async
176 * event and errors mention in the IB architecture spec. See
177 * ibtl_types.h for additional details of this.
178 *
179 * This function marks the pertinent IBTF object with the async_code,
180 * and queues the object for handling by an ibtl_async_thread. If
397 mutex_exit(&ibtl_async_mutex);
398 }
399
400
401 /* Finally, make the async call to the client. */
402
403 static void
404 ibtl_async_client_call(ibtl_hca_t *ibt_hca, ibt_async_code_t code,
405 ibt_async_event_t *event_p)
406 {
407 ibtl_clnt_t *clntp;
408 void *client_private;
409 ibt_async_handler_t async_handler;
410 char *client_name;
411
412 IBTF_DPRINTF_L2(ibtf_handlers, "ibtl_async_client_call(%p, 0x%x, %p)",
413 ibt_hca, code, event_p);
414
415 clntp = ibt_hca->ha_clnt_devp;
416
417 /* Record who is being called (just a debugging aid) */
418 ibtl_last_client_name = client_name = clntp->clnt_name;
419
420 client_private = clntp->clnt_private;
421 async_handler = clntp->clnt_modinfop->mi_async_handler;
422
423 if (code & (IBT_EVENT_COM_EST_QP | IBT_EVENT_COM_EST_EEC)) {
424 mutex_enter(&ibtl_clnt_list_mutex);
425 async_handler = ibtl_cm_async_handler;
426 client_private = ibtl_cm_clnt_private;
427 mutex_exit(&ibtl_clnt_list_mutex);
428 ibt_hca = NULL;
429 IBTF_DPRINTF_L2(ibtf_handlers, "ibtl_async_client_call: "
430 "calling CM for COM_EST");
431 } else {
432 IBTF_DPRINTF_L2(ibtf_handlers, "ibtl_async_client_call: "
433 "calling client '%s'", client_name);
434 }
435 if (async_handler != NULL)
436 async_handler(client_private, ibt_hca, code, event_p);
437 else
438 IBTF_DPRINTF_L2(ibtf_handlers, "ibtl_async_client_call: "
512 mutex_exit(&ibtl_async_mutex);
513 }
514 }
515 kmem_free(mgrp, sizeof (*mgrp));
516
517 mutex_enter(&ibtl_clnt_list_mutex);
518 if (--hca_devp->hd_async_task_cnt == 0)
519 cv_signal(&hca_devp->hd_async_task_cv);
520 mutex_exit(&ibtl_clnt_list_mutex);
521 }
522
523 static void
524 ibtl_cm_get_node_info(ibtl_hca_devinfo_t *hca_devp,
525 ibt_async_handler_t async_handler)
526 {
527 struct ibtl_mgr_s *mgrp;
528
529 if (async_handler == NULL)
530 return;
531
532 mgrp = kmem_alloc(sizeof (*mgrp), KM_SLEEP);
533 mgrp->mgr_hca_devp = hca_devp;
534 mgrp->mgr_async_handler = async_handler;
535 mgrp->mgr_clnt_private = NULL;
536 hca_devp->hd_async_task_cnt++;
537
538 (void) taskq_dispatch(ibtl_async_taskq,
539 ibt_cisco_embedded_sm_rereg_fix, mgrp, TQ_SLEEP);
540 }
541
542 static void
543 ibtl_tell_mgr(ibtl_hca_devinfo_t *hca_devp, ibt_async_handler_t async_handler,
544 void *clnt_private)
545 {
546 struct ibtl_mgr_s *mgrp;
547
548 if (async_handler == NULL)
549 return;
550
551 mgrp = kmem_alloc(sizeof (*mgrp), KM_SLEEP);
552 mgrp->mgr_hca_devp = hca_devp;
553 mgrp->mgr_async_handler = async_handler;
554 mgrp->mgr_clnt_private = clnt_private;
555 hca_devp->hd_async_task_cnt++;
556
557 (void) taskq_dispatch(ibtl_async_taskq, ibtl_do_mgr_async_task, mgrp,
558 TQ_SLEEP);
559 }
560
561 /*
562 * Per client-device asyncs for HCA level events. Call each client that is
563 * using the HCA for the event recorded in the ibtl_hca_devinfo_t.
564 */
565 static void
566 ibtl_hca_client_async_task(void *arg)
567 {
568 ibtl_hca_t *ibt_hca = (ibtl_hca_t *)arg;
569 ibtl_hca_devinfo_t *hca_devp = ibt_hca->ha_hca_devp;
570 ibtl_clnt_t *clntp = ibt_hca->ha_clnt_devp;
571 ibt_async_event_t async_event;
572
573 IBTF_DPRINTF_L3(ibtf_handlers, "ibtl_hca_client_async_task(%p, 0x%x)",
574 ibt_hca, hca_devp->hd_async_code);
575
576 bcopy(&hca_devp->hd_async_event, &async_event, sizeof (async_event));
577 ibtl_async_client_call(ibt_hca, hca_devp->hd_async_code, &async_event);
578
985 }
986 ibtl_eec->eec_async_codes &= ~code;
987
988 if (code) {
989 mutex_exit(&ibtl_async_mutex);
990 ibtl_async_client_call(ibtl_eec->eec_hca,
991 code, &async_event);
992 mutex_enter(&ibtl_async_mutex);
993 }
994
995 if (ibtl_eec->eec_async_flags & IBTL_ASYNC_FREE_OBJECT) {
996 mutex_exit(&ibtl_async_mutex);
997 kmem_free(ibtl_eec, sizeof (struct ibtl_eec_s));
998 mutex_enter(&ibtl_async_mutex);
999 return;
1000 }
1001 }
1002 ibtl_eec->eec_async_flags &= ~IBTL_ASYNC_PENDING;
1003 }
1004
1005 /*
1006 * Loop forever, calling async_handlers until all of the async lists
1007 * are empty.
1008 */
1009
1010 static void
1011 ibtl_async_thread(void)
1012 {
1013 kmutex_t cpr_mutex;
1014 callb_cpr_t cprinfo;
1015
1016 mutex_init(&cpr_mutex, NULL, MUTEX_DRIVER, NULL);
1017 CALLB_CPR_INIT(&cprinfo, &cpr_mutex, callb_generic_cpr,
1018 "ibtl_async_thread");
1019
1020 mutex_enter(&ibtl_async_mutex);
1021
1022 for (;;) {
1023 if (ibtl_async_hca_list_start) {
1024 ibtl_hca_devinfo_t *hca_devp;
1025
1026 /* remove first entry from list */
1027 hca_devp = ibtl_async_hca_list_start;
1028 ibtl_async_hca_list_start = hca_devp->hd_async_link;
1029 hca_devp->hd_async_link = NULL;
1030 if (ibtl_async_hca_list_start == NULL)
1031 ibtl_async_hca_list_end = NULL;
1032
1033 ibtl_do_hca_asyncs(hca_devp);
1034
1035 } else if (ibtl_async_qp_list_start) {
1036 ibtl_qp_t *ibtl_qp;
1037
1038 /* remove from list */
1082
1083 } else {
1084 if (ibtl_async_thread_exit == IBTL_THREAD_EXIT)
1085 break;
1086 mutex_enter(&cpr_mutex);
1087 CALLB_CPR_SAFE_BEGIN(&cprinfo);
1088 mutex_exit(&cpr_mutex);
1089
1090 cv_wait(&ibtl_async_cv, &ibtl_async_mutex);
1091
1092 mutex_exit(&ibtl_async_mutex);
1093 mutex_enter(&cpr_mutex);
1094 CALLB_CPR_SAFE_END(&cprinfo, &cpr_mutex);
1095 mutex_exit(&cpr_mutex);
1096 mutex_enter(&ibtl_async_mutex);
1097 }
1098 }
1099
1100 mutex_exit(&ibtl_async_mutex);
1101
1102 mutex_enter(&cpr_mutex);
1103 CALLB_CPR_EXIT(&cprinfo);
1104 mutex_destroy(&cpr_mutex);
1105 }
1106
1107
1108 void
1109 ibtl_free_qp_async_check(ibtl_qp_t *ibtl_qp)
1110 {
1111 IBTF_DPRINTF_L3(ibtf_handlers, "ibtl_free_qp_async_check(%p)", ibtl_qp);
1112
1113 mutex_enter(&ibtl_async_mutex);
1114
1115 /*
1116 * If there is an active async, mark this object to be freed
1117 * by the async_thread when it's done.
1118 */
1119 if (ibtl_qp->qp_async_flags & IBTL_ASYNC_PENDING) {
1120 ibtl_qp->qp_async_flags |= IBTL_ASYNC_FREE_OBJECT;
1121 mutex_exit(&ibtl_async_mutex);
1122 } else { /* free the object now */
1123 mutex_exit(&ibtl_async_mutex);
1204 }
1205 }
1206
1207 /*
1208 * Completion Queue Handling.
1209 *
1210 * A completion queue can be handled through a simple callback
1211 * at interrupt level, or it may be queued for an ibtl_cq_thread
1212 * to handle. The latter is chosen during ibt_alloc_cq when the
1213 * IBTF_CQ_HANDLER_IN_THREAD is specified.
1214 */
1215
1216 static void
1217 ibtl_cq_handler_call(ibtl_cq_t *ibtl_cq)
1218 {
1219 ibt_cq_handler_t cq_handler;
1220 void *arg;
1221
1222 IBTF_DPRINTF_L4(ibtf_handlers, "ibtl_cq_handler_call(%p)", ibtl_cq);
1223
1224 cq_handler = ibtl_cq->cq_comp_handler;
1225 arg = ibtl_cq->cq_arg;
1226 if (cq_handler != NULL)
1227 cq_handler(ibtl_cq, arg);
1228 else
1229 IBTF_DPRINTF_L2(ibtf_handlers, "ibtl_cq_handler_call: "
1230 "no cq_handler for cq %p", ibtl_cq);
1231 }
1232
1233 /*
1234 * Before ibt_free_cq can continue, we need to ensure no more cq_handler
1235 * callbacks can occur. When we get the mutex, we know there are no
1236 * outstanding cq_handler callbacks. We set the cq_handler to NULL to
1237 * prohibit future callbacks.
1238 */
1239 void
1240 ibtl_free_cq_check(ibtl_cq_t *ibtl_cq)
1241 {
1242 mutex_enter(&ibtl_cq->cq_mutex);
1243 ibtl_cq->cq_comp_handler = NULL;
1245 if (ibtl_cq->cq_in_thread) {
1246 mutex_enter(&ibtl_cq_mutex);
1247 --ibtl_cqs_using_threads;
1248 while (ibtl_cq->cq_impl_flags & IBTL_CQ_PENDING) {
1249 ibtl_cq->cq_impl_flags &= ~IBTL_CQ_CALL_CLIENT;
1250 ibtl_cq->cq_impl_flags |= IBTL_CQ_FREE;
1251 cv_wait(&ibtl_cq_cv, &ibtl_cq_mutex);
1252 }
1253 mutex_exit(&ibtl_cq_mutex);
1254 }
1255 }
1256
1257 /*
1258 * Loop forever, calling cq_handlers until the cq list
1259 * is empty.
1260 */
1261
1262 static void
1263 ibtl_cq_thread(void)
1264 {
1265 kmutex_t cpr_mutex;
1266 callb_cpr_t cprinfo;
1267
1268 mutex_init(&cpr_mutex, NULL, MUTEX_DRIVER, NULL);
1269 CALLB_CPR_INIT(&cprinfo, &cpr_mutex, callb_generic_cpr,
1270 "ibtl_cq_thread");
1271
1272 mutex_enter(&ibtl_cq_mutex);
1273
1274 for (;;) {
1275 if (ibtl_cq_list_start) {
1276 ibtl_cq_t *ibtl_cq;
1277
1278 ibtl_cq = ibtl_cq_list_start;
1279 ibtl_cq_list_start = ibtl_cq->cq_link;
1280 ibtl_cq->cq_link = NULL;
1281 if (ibtl_cq == ibtl_cq_list_end)
1282 ibtl_cq_list_end = NULL;
1283
1284 while (ibtl_cq->cq_impl_flags & IBTL_CQ_CALL_CLIENT) {
1285 ibtl_cq->cq_impl_flags &= ~IBTL_CQ_CALL_CLIENT;
1286 mutex_exit(&ibtl_cq_mutex);
1287 ibtl_cq_handler_call(ibtl_cq);
1288 mutex_enter(&ibtl_cq_mutex);
1289 }
1290 ibtl_cq->cq_impl_flags &= ~IBTL_CQ_PENDING;
1291 if (ibtl_cq->cq_impl_flags & IBTL_CQ_FREE)
1292 cv_broadcast(&ibtl_cq_cv);
1293 } else {
1294 if (ibtl_cq_thread_exit == IBTL_THREAD_EXIT)
1295 break;
1296 mutex_enter(&cpr_mutex);
1297 CALLB_CPR_SAFE_BEGIN(&cprinfo);
1298 mutex_exit(&cpr_mutex);
1299
1300 cv_wait(&ibtl_cq_cv, &ibtl_cq_mutex);
1301
1302 mutex_exit(&ibtl_cq_mutex);
1303 mutex_enter(&cpr_mutex);
1304 CALLB_CPR_SAFE_END(&cprinfo, &cpr_mutex);
1305 mutex_exit(&cpr_mutex);
1306 mutex_enter(&ibtl_cq_mutex);
1307 }
1308 }
1309
1310 mutex_exit(&ibtl_cq_mutex);
1311 mutex_enter(&cpr_mutex);
1312 CALLB_CPR_EXIT(&cprinfo);
1313 mutex_destroy(&cpr_mutex);
1314 }
1315
1316
1317 /*
1318 * ibc_cq_handler()
1319 *
1320 * Completion Queue Notification Handler.
1321 *
1322 */
1323 /*ARGSUSED*/
1324 void
1325 ibc_cq_handler(ibc_clnt_hdl_t ibc_hdl, ibt_cq_hdl_t ibtl_cq)
1326 {
1327 IBTF_DPRINTF_L4(ibtf_handlers, "ibc_cq_handler(%p, %p)",
1328 ibc_hdl, ibtl_cq);
1329
1330 if (ibtl_cq->cq_in_thread) {
1331 mutex_enter(&ibtl_cq_mutex);
1332 ibtl_cq->cq_impl_flags |= IBTL_CQ_CALL_CLIENT;
1381 *
1382 * arg The IBTF client private argument to be passed
1383 * back to the client when calling the CQ
1384 * completion handler.
1385 *
1386 * Completion notifications are disabled by setting the completion
1387 * handler to NULL. When setting the handler to NULL, no additional
1388 * calls to the previous CQ handler will be initiated, but there may
1389 * be one in progress.
1390 *
1391 * This function does not otherwise change the state of previous
1392 * calls to ibt_enable_cq_notify().
1393 */
1394 void
1395 ibt_set_cq_handler(ibt_cq_hdl_t ibtl_cq, ibt_cq_handler_t completion_handler,
1396 void *arg)
1397 {
1398 IBTF_DPRINTF_L3(ibtf_handlers, "ibt_set_cq_handler(%p, %p, %p)",
1399 ibtl_cq, completion_handler, arg);
1400
1401 ibtl_cq->cq_comp_handler = completion_handler;
1402 ibtl_cq->cq_arg = arg;
1403 }
1404
1405
1406 /*
1407 * Inform IBT clients about New HCAs.
1408 *
1409 * We use taskqs to allow simultaneous notification, with sleeping.
1410 * Since taskqs only allow one argument, we define a structure
1411 * because we need to pass in two arguments.
1412 */
1413
1414 struct ibtl_new_hca_s {
1415 ibtl_clnt_t *nh_clntp;
1416 ibtl_hca_devinfo_t *nh_hca_devp;
1417 ibt_async_code_t nh_code;
1418 };
1419
1420 static void
1421 ibtl_tell_client_about_new_hca(void *arg)
1422 {
1423 struct ibtl_new_hca_s *new_hcap = (struct ibtl_new_hca_s *)arg;
1424 ibtl_clnt_t *clntp = new_hcap->nh_clntp;
1425 ibt_async_event_t async_event;
1426 ibtl_hca_devinfo_t *hca_devp = new_hcap->nh_hca_devp;
1427
1428 bzero(&async_event, sizeof (async_event));
1429 async_event.ev_hca_guid = hca_devp->hd_hca_attr->hca_node_guid;
1430 clntp->clnt_modinfop->mi_async_handler(
1431 clntp->clnt_private, NULL, new_hcap->nh_code, &async_event);
1432 kmem_free(new_hcap, sizeof (*new_hcap));
1433 mutex_enter(&ibtl_clnt_list_mutex);
1434 if (--hca_devp->hd_async_task_cnt == 0)
1435 cv_signal(&hca_devp->hd_async_task_cv);
1436 if (--clntp->clnt_async_cnt == 0)
1437 cv_broadcast(&ibtl_clnt_cv);
1438 mutex_exit(&ibtl_clnt_list_mutex);
1439 }
1440
1441 /*
1442 * ibtl_announce_new_hca:
1443 *
1444 * o First attach these clients in the given order
1445 * IBMA
1446 * IBCM
1447 *
1448 * o Next attach all other clients in parallel.
1449 *
1450 * NOTE: Use the taskq to simultaneously notify all clients of the new HCA.
1451 * Retval from clients is ignored.
1452 */
1453 void
1454 ibtl_announce_new_hca(ibtl_hca_devinfo_t *hca_devp)
1455 {
1456 ibtl_clnt_t *clntp;
1457 struct ibtl_new_hca_s *new_hcap;
1458
1459 IBTF_DPRINTF_L2(ibtf_handlers, "ibtl_announce_new_hca(%p, %llX)",
1460 hca_devp, hca_devp->hd_hca_attr->hca_node_guid);
1461
1462 mutex_enter(&ibtl_clnt_list_mutex);
1463
1464 clntp = ibtl_clnt_list;
1465 while (clntp != NULL) {
1466 if (clntp->clnt_modinfop->mi_clnt_class == IBT_IBMA) {
1467 IBTF_DPRINTF_L4(ibtf_handlers,
1468 "ibtl_announce_new_hca: calling IBMF");
1469 if (clntp->clnt_modinfop->mi_async_handler) {
1470 new_hcap = kmem_alloc(sizeof (*new_hcap),
1471 KM_SLEEP);
1472 new_hcap->nh_clntp = clntp;
1473 new_hcap->nh_hca_devp = hca_devp;
1474 new_hcap->nh_code = IBT_HCA_ATTACH_EVENT;
1475 clntp->clnt_async_cnt++;
1476 hca_devp->hd_async_task_cnt++;
1477
1478 (void) taskq_dispatch(ibtl_async_taskq,
1479 ibtl_tell_client_about_new_hca, new_hcap,
1480 TQ_SLEEP);
1481 }
1482 break;
1483 }
1484 clntp = clntp->clnt_list_link;
1485 }
1486 if (clntp != NULL)
1487 while (clntp->clnt_async_cnt > 0)
1488 cv_wait(&ibtl_clnt_cv, &ibtl_clnt_list_mutex);
1489 clntp = ibtl_clnt_list;
1490 while (clntp != NULL) {
1491 if (clntp->clnt_modinfop->mi_clnt_class == IBT_DM) {
1492 IBTF_DPRINTF_L4(ibtf_handlers, "ibtl_announce_new_hca: "
1493 "calling %s", clntp->clnt_modinfop->mi_clnt_name);
1494 if (clntp->clnt_modinfop->mi_async_handler) {
1495 new_hcap = kmem_alloc(sizeof (*new_hcap),
1496 KM_SLEEP);
1497 new_hcap->nh_clntp = clntp;
1498 new_hcap->nh_hca_devp = hca_devp;
1499 new_hcap->nh_code = IBT_HCA_ATTACH_EVENT;
1500 clntp->clnt_async_cnt++;
1501 hca_devp->hd_async_task_cnt++;
1502
1503 mutex_exit(&ibtl_clnt_list_mutex);
1504 (void) ibtl_tell_client_about_new_hca(
1505 new_hcap);
1506 mutex_enter(&ibtl_clnt_list_mutex);
1507 }
1508 break;
1509 }
1510 clntp = clntp->clnt_list_link;
1511 }
1512
1513 clntp = ibtl_clnt_list;
1514 while (clntp != NULL) {
1515 if (clntp->clnt_modinfop->mi_clnt_class == IBT_CM) {
1516 IBTF_DPRINTF_L4(ibtf_handlers, "ibtl_announce_new_hca: "
1517 "calling %s", clntp->clnt_modinfop->mi_clnt_name);
1518 if (clntp->clnt_modinfop->mi_async_handler) {
1519 new_hcap = kmem_alloc(sizeof (*new_hcap),
1520 KM_SLEEP);
1521 new_hcap->nh_clntp = clntp;
1522 new_hcap->nh_hca_devp = hca_devp;
1523 new_hcap->nh_code = IBT_HCA_ATTACH_EVENT;
1524 clntp->clnt_async_cnt++;
1525 hca_devp->hd_async_task_cnt++;
1526
1527 (void) taskq_dispatch(ibtl_async_taskq,
1528 ibtl_tell_client_about_new_hca, new_hcap,
1529 TQ_SLEEP);
1530 }
1531 break;
1532 }
1533 clntp = clntp->clnt_list_link;
1534 }
1535 if (clntp != NULL)
1536 while (clntp->clnt_async_cnt > 0)
1537 cv_wait(&ibtl_clnt_cv, &ibtl_clnt_list_mutex);
1538 clntp = ibtl_clnt_list;
1539 while (clntp != NULL) {
1540 if ((clntp->clnt_modinfop->mi_clnt_class != IBT_DM) &&
1541 (clntp->clnt_modinfop->mi_clnt_class != IBT_CM) &&
1542 (clntp->clnt_modinfop->mi_clnt_class != IBT_IBMA)) {
1543 IBTF_DPRINTF_L4(ibtf_handlers,
1544 "ibtl_announce_new_hca: Calling %s ",
1545 clntp->clnt_modinfop->mi_clnt_name);
1546 if (clntp->clnt_modinfop->mi_async_handler) {
1547 new_hcap = kmem_alloc(sizeof (*new_hcap),
1548 KM_SLEEP);
1549 new_hcap->nh_clntp = clntp;
1550 new_hcap->nh_hca_devp = hca_devp;
1551 new_hcap->nh_code = IBT_HCA_ATTACH_EVENT;
1552 clntp->clnt_async_cnt++;
1553 hca_devp->hd_async_task_cnt++;
1554
1555 (void) taskq_dispatch(ibtl_async_taskq,
1556 ibtl_tell_client_about_new_hca, new_hcap,
1557 TQ_SLEEP);
1558 }
1559 }
1560 clntp = clntp->clnt_list_link;
1561 }
1562
1563 /* wait for all tasks to complete */
1564 while (hca_devp->hd_async_task_cnt != 0)
1565 cv_wait(&hca_devp->hd_async_task_cv, &ibtl_clnt_list_mutex);
1566
1567 /* wakeup thread that may be waiting to send an HCA async */
1568 ASSERT(hca_devp->hd_async_busy == 1);
1569 hca_devp->hd_async_busy = 0;
1570 cv_broadcast(&hca_devp->hd_async_busy_cv);
1571 mutex_exit(&ibtl_clnt_list_mutex);
1802 sm_notice_handler = ibt_hdl->clnt_sm_trap_handler;
1803 if (sm_notice_handler != NULL)
1804 sm_notice_handler(ibt_hdl->clnt_sm_trap_handler_arg,
1805 noticep->np_sgid, noticep->np_code, ¬icep->np_event);
1806 kmem_free(noticep, sizeof (*noticep));
1807 ibtl_dec_clnt_async_cnt(ibt_hdl);
1808 }
1809
1810 /*
1811 * Inform the client that MCG notices are not working at this time.
1812 */
1813 void
1814 ibtl_cm_sm_notice_init_failure(ibtl_cm_sm_init_fail_t *ifail)
1815 {
1816 ibt_clnt_hdl_t ibt_hdl = ifail->smf_ibt_hdl;
1817 struct ibtl_sm_notice *noticep;
1818 ib_gid_t *sgidp = &ifail->smf_sgid[0];
1819 int i;
1820
1821 for (i = 0; i < ifail->smf_num_sgids; i++) {
1822 noticep = kmem_zalloc(sizeof (*noticep), KM_SLEEP);
1823 noticep->np_ibt_hdl = ibt_hdl;
1824 noticep->np_sgid = *sgidp++;
1825 noticep->np_code = IBT_SM_EVENT_UNAVAILABLE;
1826 ibtl_inc_clnt_async_cnt(ibt_hdl);
1827 (void) taskq_dispatch(ibtl_async_taskq,
1828 ibtl_sm_notice_task, noticep, TQ_SLEEP);
1829 }
1830 }
1831
1832 /*
1833 * Inform all clients of the event.
1834 */
1835 void
1836 ibtl_cm_sm_notice_handler(ib_gid_t sgid, ibt_subnet_event_code_t code,
1837 ibt_subnet_event_t *event)
1838 {
1839 struct ibtl_sm_notice *noticep;
1840 ibtl_clnt_t *clntp;
1841
1842 mutex_enter(&ibtl_clnt_list_mutex);
1843 clntp = ibtl_clnt_list;
1844 while (clntp != NULL) {
1845 if (clntp->clnt_sm_trap_handler) {
1846 noticep = kmem_zalloc(sizeof (*noticep), KM_SLEEP);
1847 noticep->np_ibt_hdl = clntp;
1848 noticep->np_sgid = sgid;
1849 noticep->np_code = code;
1850 noticep->np_event = *event;
1851 ++clntp->clnt_async_cnt;
1852 (void) taskq_dispatch(ibtl_async_taskq,
1853 ibtl_sm_notice_task, noticep, TQ_SLEEP);
1854 }
1855 clntp = clntp->clnt_list_link;
1856 }
1857 mutex_exit(&ibtl_clnt_list_mutex);
1858 }
1859
1860 /*
1861 * Record the handler for this client.
1862 */
1863 void
1864 ibtl_cm_set_sm_notice_handler(ibt_clnt_hdl_t ibt_hdl,
1865 ibt_sm_notice_handler_t sm_notice_handler, void *private)
1866 {
1867 ibt_hdl->clnt_sm_trap_handler = sm_notice_handler;
1868 ibt_hdl->clnt_sm_trap_handler_arg = private;
1869 }
1870
1871
1872 /*
1873 * ibtl_another_cq_handler_in_thread()
1874 *
1875 * Conditionally increase the number of cq_threads.
1876 * The number of threads grows, based on the number of cqs using threads.
1877 *
1878 * The table below controls the number of threads as follows:
1879 *
1880 * Number of CQs Number of cq_threads
1881 * 0 0
1882 * 1 1
1883 * 2-3 2
1884 * 4-5 3
1885 * 6-9 4
1886 * 10-15 5
1887 * 16-23 6
1888 * 24-31 7
1895 };
1896
1897 static kt_did_t ibtl_cq_did[IBTL_CQ_MAXTHREADS];
1898
1899 void
1900 ibtl_another_cq_handler_in_thread(void)
1901 {
1902 kthread_t *t;
1903 int my_idx;
1904
1905 mutex_enter(&ibtl_cq_mutex);
1906 if ((ibtl_cq_threads == IBTL_CQ_MAXTHREADS) ||
1907 (++ibtl_cqs_using_threads < ibtl_cq_scaling[ibtl_cq_threads])) {
1908 mutex_exit(&ibtl_cq_mutex);
1909 return;
1910 }
1911 my_idx = ibtl_cq_threads++;
1912 mutex_exit(&ibtl_cq_mutex);
1913 t = thread_create(NULL, 0, ibtl_cq_thread, NULL, 0, &p0, TS_RUN,
1914 ibtl_pri - 1);
1915 ibtl_cq_did[my_idx] = t->t_did; /* save for thread_join() */
1916 }
1917
1918 void
1919 ibtl_thread_init(void)
1920 {
1921 IBTF_DPRINTF_L3(ibtf_handlers, "ibtl_thread_init()");
1922
1923 mutex_init(&ibtl_async_mutex, NULL, MUTEX_DEFAULT, NULL);
1924 cv_init(&ibtl_async_cv, NULL, CV_DEFAULT, NULL);
1925 cv_init(&ibtl_clnt_cv, NULL, CV_DEFAULT, NULL);
1926
1927 mutex_init(&ibtl_cq_mutex, NULL, MUTEX_DEFAULT, NULL);
1928 cv_init(&ibtl_cq_cv, NULL, CV_DEFAULT, NULL);
1929 }
1930
1931 void
1932 ibtl_thread_init2(void)
1933 {
1934 int i;
1935 static int initted = 0;
1936 kthread_t *t;
1937
1938 mutex_enter(&ibtl_async_mutex);
1939 if (initted == 1) {
1940 mutex_exit(&ibtl_async_mutex);
1941 return;
1942 }
1943 initted = 1;
1944 mutex_exit(&ibtl_async_mutex);
1945 ibtl_async_did = kmem_zalloc(ibtl_async_thread_init * sizeof (kt_did_t),
1946 KM_SLEEP);
1947
1948 IBTF_DPRINTF_L3(ibtf_handlers, "ibtl_thread_init2()");
1949
1950 for (i = 0; i < ibtl_async_thread_init; i++) {
1951 t = thread_create(NULL, 0, ibtl_async_thread, NULL, 0, &p0,
1952 TS_RUN, ibtl_pri - 1);
1953 ibtl_async_did[i] = t->t_did; /* thread_join() */
1954 }
1955 for (i = 0; i < ibtl_cq_threads; i++) {
1956 t = thread_create(NULL, 0, ibtl_cq_thread, NULL, 0, &p0,
1957 TS_RUN, ibtl_pri - 1);
1958 ibtl_cq_did[i] = t->t_did; /* save for thread_join() */
1959 }
1960 }
1961
1962 void
1963 ibtl_thread_fini(void)
1964 {
1965 int i;
1966
1967 IBTF_DPRINTF_L3(ibtf_handlers, "ibtl_thread_fini()");
1968
1969 /* undo the work done by ibtl_thread_init() */
1970
1971 mutex_enter(&ibtl_cq_mutex);
1972 ibtl_cq_thread_exit = IBTL_THREAD_EXIT;
1973 cv_broadcast(&ibtl_cq_cv);
1974 mutex_exit(&ibtl_cq_mutex);
1975
1976 mutex_enter(&ibtl_async_mutex);
1977 ibtl_async_thread_exit = IBTL_THREAD_EXIT;
1978 cv_broadcast(&ibtl_async_cv);
1979 mutex_exit(&ibtl_async_mutex);
1980
1981 for (i = 0; i < ibtl_cq_threads; i++)
1982 thread_join(ibtl_cq_did[i]);
1983
1984 if (ibtl_async_did) {
1985 for (i = 0; i < ibtl_async_thread_init; i++)
1986 thread_join(ibtl_async_did[i]);
1987
1988 kmem_free(ibtl_async_did,
1989 ibtl_async_thread_init * sizeof (kt_did_t));
1990 }
1991 mutex_destroy(&ibtl_cq_mutex);
1992 cv_destroy(&ibtl_cq_cv);
1993
1994 mutex_destroy(&ibtl_async_mutex);
1995 cv_destroy(&ibtl_async_cv);
1996 cv_destroy(&ibtl_clnt_cv);
1997 }
1998
1999 /* ARGSUSED */
2000 ibt_status_t ibtl_dummy_node_info_cb(ib_guid_t hca_guid, uint8_t port,
2001 ib_lid_t lid, ibt_node_info_t *node_info)
2002 {
|