Print this page
8368 remove warlock leftovers from usr/src/uts

Split Close
Expand all
Collapse all
          --- old/usr/src/uts/common/io/ib/ibtl/ibtl_handlers.c
          +++ new/usr/src/uts/common/io/ib/ibtl/ibtl_handlers.c
↓ open down ↓ 150 lines elided ↑ open up ↑
 151  151  #define IBT_VENDOR_CISCO 0x05ad
 152  152  
 153  153  int ibtl_eec_not_supported = 1;
 154  154  
 155  155  char *ibtl_last_client_name;    /* may help debugging */
 156  156  typedef ibt_status_t (*ibtl_node_info_cb_t)(ib_guid_t, uint8_t, ib_lid_t,
 157  157      ibt_node_info_t *);
 158  158  
 159  159  ibtl_node_info_cb_t ibtl_node_info_cb;
 160  160  
 161      -_NOTE(LOCK_ORDER(ibtl_clnt_list_mutex ibtl_async_mutex))
 162      -
 163  161  void
 164  162  ibtl_cm_set_node_info_cb(ibt_status_t (*node_info_cb)(ib_guid_t, uint8_t,
 165  163      ib_lid_t, ibt_node_info_t *))
 166  164  {
 167  165          mutex_enter(&ibtl_clnt_list_mutex);
 168  166          ibtl_node_info_cb = node_info_cb;
 169  167          mutex_exit(&ibtl_clnt_list_mutex);
 170  168  }
 171  169  
 172  170  /*
↓ open down ↓ 236 lines elided ↑ open up ↑
 409  407          ibtl_clnt_t             *clntp;
 410  408          void                    *client_private;
 411  409          ibt_async_handler_t     async_handler;
 412  410          char                    *client_name;
 413  411  
 414  412          IBTF_DPRINTF_L2(ibtf_handlers, "ibtl_async_client_call(%p, 0x%x, %p)",
 415  413              ibt_hca, code, event_p);
 416  414  
 417  415          clntp = ibt_hca->ha_clnt_devp;
 418  416  
 419      -        _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(ibtl_last_client_name))
 420  417          /* Record who is being called (just a debugging aid) */
 421  418          ibtl_last_client_name = client_name = clntp->clnt_name;
 422      -        _NOTE(NOW_VISIBLE_TO_OTHER_THREADS(ibtl_last_client_name))
 423  419  
 424  420          client_private = clntp->clnt_private;
 425  421          async_handler = clntp->clnt_modinfop->mi_async_handler;
 426  422  
 427  423          if (code & (IBT_EVENT_COM_EST_QP | IBT_EVENT_COM_EST_EEC)) {
 428  424                  mutex_enter(&ibtl_clnt_list_mutex);
 429  425                  async_handler = ibtl_cm_async_handler;
 430  426                  client_private = ibtl_cm_clnt_private;
 431  427                  mutex_exit(&ibtl_clnt_list_mutex);
 432  428                  ibt_hca = NULL;
↓ open down ↓ 93 lines elided ↑ open up ↑
 526  522  
 527  523  static void
 528  524  ibtl_cm_get_node_info(ibtl_hca_devinfo_t *hca_devp,
 529  525      ibt_async_handler_t async_handler)
 530  526  {
 531  527          struct ibtl_mgr_s *mgrp;
 532  528  
 533  529          if (async_handler == NULL)
 534  530                  return;
 535  531  
 536      -        _NOTE(NO_COMPETING_THREADS_NOW)
 537  532          mgrp = kmem_alloc(sizeof (*mgrp), KM_SLEEP);
 538  533          mgrp->mgr_hca_devp = hca_devp;
 539  534          mgrp->mgr_async_handler = async_handler;
 540  535          mgrp->mgr_clnt_private = NULL;
 541  536          hca_devp->hd_async_task_cnt++;
 542  537  
 543  538          (void) taskq_dispatch(ibtl_async_taskq,
 544  539              ibt_cisco_embedded_sm_rereg_fix, mgrp, TQ_SLEEP);
 545      -#ifndef lint
 546      -        _NOTE(COMPETING_THREADS_NOW)
 547      -#endif
 548  540  }
 549  541  
 550  542  static void
 551  543  ibtl_tell_mgr(ibtl_hca_devinfo_t *hca_devp, ibt_async_handler_t async_handler,
 552  544      void *clnt_private)
 553  545  {
 554  546          struct ibtl_mgr_s *mgrp;
 555  547  
 556  548          if (async_handler == NULL)
 557  549                  return;
 558  550  
 559      -        _NOTE(NO_COMPETING_THREADS_NOW)
 560  551          mgrp = kmem_alloc(sizeof (*mgrp), KM_SLEEP);
 561  552          mgrp->mgr_hca_devp = hca_devp;
 562  553          mgrp->mgr_async_handler = async_handler;
 563  554          mgrp->mgr_clnt_private = clnt_private;
 564  555          hca_devp->hd_async_task_cnt++;
 565  556  
 566  557          (void) taskq_dispatch(ibtl_async_taskq, ibtl_do_mgr_async_task, mgrp,
 567  558              TQ_SLEEP);
 568      -#ifndef lint
 569      -        _NOTE(COMPETING_THREADS_NOW)
 570      -#endif
 571  559  }
 572  560  
 573  561  /*
 574  562   * Per client-device asyncs for HCA level events.  Call each client that is
 575  563   * using the HCA for the event recorded in the ibtl_hca_devinfo_t.
 576  564   */
 577  565  static void
 578  566  ibtl_hca_client_async_task(void *arg)
 579  567  {
 580  568          ibtl_hca_t              *ibt_hca = (ibtl_hca_t *)arg;
↓ open down ↓ 426 lines elided ↑ open up ↑
1007  995                  if (ibtl_eec->eec_async_flags & IBTL_ASYNC_FREE_OBJECT) {
1008  996                          mutex_exit(&ibtl_async_mutex);
1009  997                          kmem_free(ibtl_eec, sizeof (struct ibtl_eec_s));
1010  998                          mutex_enter(&ibtl_async_mutex);
1011  999                          return;
1012 1000                  }
1013 1001          }
1014 1002          ibtl_eec->eec_async_flags &= ~IBTL_ASYNC_PENDING;
1015 1003  }
1016 1004  
1017      -#ifdef __lock_lint
1018      -kmutex_t cpr_mutex;
1019      -#endif
1020      -
1021 1005  /*
1022 1006   * Loop forever, calling async_handlers until all of the async lists
1023 1007   * are empty.
1024 1008   */
1025 1009  
1026 1010  static void
1027 1011  ibtl_async_thread(void)
1028 1012  {
1029      -#ifndef __lock_lint
1030 1013          kmutex_t cpr_mutex;
1031      -#endif
1032 1014          callb_cpr_t     cprinfo;
1033 1015  
1034      -        _NOTE(MUTEX_PROTECTS_DATA(cpr_mutex, cprinfo))
1035      -        _NOTE(NO_COMPETING_THREADS_NOW)
1036 1016          mutex_init(&cpr_mutex, NULL, MUTEX_DRIVER, NULL);
1037 1017          CALLB_CPR_INIT(&cprinfo, &cpr_mutex, callb_generic_cpr,
1038 1018              "ibtl_async_thread");
1039      -#ifndef lint
1040      -        _NOTE(COMPETING_THREADS_NOW)
1041      -#endif
1042 1019  
1043 1020          mutex_enter(&ibtl_async_mutex);
1044 1021  
1045 1022          for (;;) {
1046 1023                  if (ibtl_async_hca_list_start) {
1047 1024                          ibtl_hca_devinfo_t *hca_devp;
1048 1025  
1049 1026                          /* remove first entry from list */
1050 1027                          hca_devp = ibtl_async_hca_list_start;
1051 1028                          ibtl_async_hca_list_start = hca_devp->hd_async_link;
↓ open down ↓ 63 lines elided ↑ open up ↑
1115 1092                          mutex_exit(&ibtl_async_mutex);
1116 1093                          mutex_enter(&cpr_mutex);
1117 1094                          CALLB_CPR_SAFE_END(&cprinfo, &cpr_mutex);
1118 1095                          mutex_exit(&cpr_mutex);
1119 1096                          mutex_enter(&ibtl_async_mutex);
1120 1097                  }
1121 1098          }
1122 1099  
1123 1100          mutex_exit(&ibtl_async_mutex);
1124 1101  
1125      -#ifndef __lock_lint
1126 1102          mutex_enter(&cpr_mutex);
1127 1103          CALLB_CPR_EXIT(&cprinfo);
1128      -#endif
1129 1104          mutex_destroy(&cpr_mutex);
1130 1105  }
1131 1106  
1132 1107  
1133 1108  void
1134 1109  ibtl_free_qp_async_check(ibtl_qp_t *ibtl_qp)
1135 1110  {
1136 1111          IBTF_DPRINTF_L3(ibtf_handlers, "ibtl_free_qp_async_check(%p)", ibtl_qp);
1137 1112  
1138 1113          mutex_enter(&ibtl_async_mutex);
↓ open down ↓ 100 lines elided ↑ open up ↑
1239 1214   */
1240 1215  
1241 1216  static void
1242 1217  ibtl_cq_handler_call(ibtl_cq_t *ibtl_cq)
1243 1218  {
1244 1219          ibt_cq_handler_t        cq_handler;
1245 1220          void                    *arg;
1246 1221  
1247 1222          IBTF_DPRINTF_L4(ibtf_handlers, "ibtl_cq_handler_call(%p)", ibtl_cq);
1248 1223  
1249      -        _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*ibtl_cq))
1250 1224          cq_handler = ibtl_cq->cq_comp_handler;
1251 1225          arg = ibtl_cq->cq_arg;
1252 1226          if (cq_handler != NULL)
1253 1227                  cq_handler(ibtl_cq, arg);
1254 1228          else
1255 1229                  IBTF_DPRINTF_L2(ibtf_handlers, "ibtl_cq_handler_call: "
1256 1230                      "no cq_handler for cq %p", ibtl_cq);
1257 1231  }
1258 1232  
1259 1233  /*
↓ open down ↓ 21 lines elided ↑ open up ↑
1281 1255  }
1282 1256  
1283 1257  /*
1284 1258   * Loop forever, calling cq_handlers until the cq list
1285 1259   * is empty.
1286 1260   */
1287 1261  
1288 1262  static void
1289 1263  ibtl_cq_thread(void)
1290 1264  {
1291      -#ifndef __lock_lint
1292 1265          kmutex_t cpr_mutex;
1293      -#endif
1294 1266          callb_cpr_t     cprinfo;
1295 1267  
1296      -        _NOTE(MUTEX_PROTECTS_DATA(cpr_mutex, cprinfo))
1297      -        _NOTE(NO_COMPETING_THREADS_NOW)
1298 1268          mutex_init(&cpr_mutex, NULL, MUTEX_DRIVER, NULL);
1299 1269          CALLB_CPR_INIT(&cprinfo, &cpr_mutex, callb_generic_cpr,
1300 1270              "ibtl_cq_thread");
1301      -#ifndef lint
1302      -        _NOTE(COMPETING_THREADS_NOW)
1303      -#endif
1304 1271  
1305 1272          mutex_enter(&ibtl_cq_mutex);
1306 1273  
1307 1274          for (;;) {
1308 1275                  if (ibtl_cq_list_start) {
1309 1276                          ibtl_cq_t *ibtl_cq;
1310 1277  
1311 1278                          ibtl_cq = ibtl_cq_list_start;
1312 1279                          ibtl_cq_list_start = ibtl_cq->cq_link;
1313 1280                          ibtl_cq->cq_link = NULL;
↓ open down ↓ 20 lines elided ↑ open up ↑
1334 1301  
1335 1302                          mutex_exit(&ibtl_cq_mutex);
1336 1303                          mutex_enter(&cpr_mutex);
1337 1304                          CALLB_CPR_SAFE_END(&cprinfo, &cpr_mutex);
1338 1305                          mutex_exit(&cpr_mutex);
1339 1306                          mutex_enter(&ibtl_cq_mutex);
1340 1307                  }
1341 1308          }
1342 1309  
1343 1310          mutex_exit(&ibtl_cq_mutex);
1344      -#ifndef __lock_lint
1345 1311          mutex_enter(&cpr_mutex);
1346 1312          CALLB_CPR_EXIT(&cprinfo);
1347      -#endif
1348 1313          mutex_destroy(&cpr_mutex);
1349 1314  }
1350 1315  
1351 1316  
1352 1317  /*
1353 1318   * ibc_cq_handler()
1354 1319   *
1355 1320   *    Completion Queue Notification Handler.
1356 1321   *
1357 1322   */
↓ open down ↓ 68 lines elided ↑ open up ↑
1426 1391   *      This function does not otherwise change the state of previous
1427 1392   *      calls to ibt_enable_cq_notify().
1428 1393   */
1429 1394  void
1430 1395  ibt_set_cq_handler(ibt_cq_hdl_t ibtl_cq, ibt_cq_handler_t completion_handler,
1431 1396      void *arg)
1432 1397  {
1433 1398          IBTF_DPRINTF_L3(ibtf_handlers, "ibt_set_cq_handler(%p, %p, %p)",
1434 1399              ibtl_cq, completion_handler, arg);
1435 1400  
1436      -        _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*ibtl_cq))
1437 1401          ibtl_cq->cq_comp_handler = completion_handler;
1438 1402          ibtl_cq->cq_arg = arg;
1439 1403  }
1440 1404  
1441 1405  
1442 1406  /*
1443 1407   * Inform IBT clients about New HCAs.
1444 1408   *
1445 1409   *      We use taskqs to allow simultaneous notification, with sleeping.
1446 1410   *      Since taskqs only allow one argument, we define a structure
↓ open down ↓ 12 lines elided ↑ open up ↑
1459 1423          struct ibtl_new_hca_s   *new_hcap = (struct ibtl_new_hca_s *)arg;
1460 1424          ibtl_clnt_t             *clntp = new_hcap->nh_clntp;
1461 1425          ibt_async_event_t       async_event;
1462 1426          ibtl_hca_devinfo_t      *hca_devp = new_hcap->nh_hca_devp;
1463 1427  
1464 1428          bzero(&async_event, sizeof (async_event));
1465 1429          async_event.ev_hca_guid = hca_devp->hd_hca_attr->hca_node_guid;
1466 1430          clntp->clnt_modinfop->mi_async_handler(
1467 1431              clntp->clnt_private, NULL, new_hcap->nh_code, &async_event);
1468 1432          kmem_free(new_hcap, sizeof (*new_hcap));
1469      -#ifdef __lock_lint
1470      -        {
1471      -                ibt_hca_hdl_t hca_hdl;
1472      -                (void) ibt_open_hca(clntp, 0ULL, &hca_hdl);
1473      -        }
1474      -#endif
1475 1433          mutex_enter(&ibtl_clnt_list_mutex);
1476 1434          if (--hca_devp->hd_async_task_cnt == 0)
1477 1435                  cv_signal(&hca_devp->hd_async_task_cv);
1478 1436          if (--clntp->clnt_async_cnt == 0)
1479 1437                  cv_broadcast(&ibtl_clnt_cv);
1480 1438          mutex_exit(&ibtl_clnt_list_mutex);
1481 1439  }
1482 1440  
1483 1441  /*
1484 1442   * ibtl_announce_new_hca:
↓ open down ↓ 17 lines elided ↑ open up ↑
1502 1460              hca_devp, hca_devp->hd_hca_attr->hca_node_guid);
1503 1461  
1504 1462          mutex_enter(&ibtl_clnt_list_mutex);
1505 1463  
1506 1464          clntp = ibtl_clnt_list;
1507 1465          while (clntp != NULL) {
1508 1466                  if (clntp->clnt_modinfop->mi_clnt_class == IBT_IBMA) {
1509 1467                          IBTF_DPRINTF_L4(ibtf_handlers,
1510 1468                              "ibtl_announce_new_hca: calling IBMF");
1511 1469                          if (clntp->clnt_modinfop->mi_async_handler) {
1512      -                                _NOTE(NO_COMPETING_THREADS_NOW)
1513 1470                                  new_hcap = kmem_alloc(sizeof (*new_hcap),
1514 1471                                      KM_SLEEP);
1515 1472                                  new_hcap->nh_clntp = clntp;
1516 1473                                  new_hcap->nh_hca_devp = hca_devp;
1517 1474                                  new_hcap->nh_code = IBT_HCA_ATTACH_EVENT;
1518      -#ifndef lint
1519      -                                _NOTE(COMPETING_THREADS_NOW)
1520      -#endif
1521 1475                                  clntp->clnt_async_cnt++;
1522 1476                                  hca_devp->hd_async_task_cnt++;
1523 1477  
1524 1478                                  (void) taskq_dispatch(ibtl_async_taskq,
1525 1479                                      ibtl_tell_client_about_new_hca, new_hcap,
1526 1480                                      TQ_SLEEP);
1527 1481                          }
1528 1482                          break;
1529 1483                  }
1530 1484                  clntp = clntp->clnt_list_link;
1531 1485          }
1532 1486          if (clntp != NULL)
1533 1487                  while (clntp->clnt_async_cnt > 0)
1534 1488                          cv_wait(&ibtl_clnt_cv, &ibtl_clnt_list_mutex);
1535 1489          clntp = ibtl_clnt_list;
1536 1490          while (clntp != NULL) {
1537 1491                  if (clntp->clnt_modinfop->mi_clnt_class == IBT_DM) {
1538 1492                          IBTF_DPRINTF_L4(ibtf_handlers, "ibtl_announce_new_hca: "
1539 1493                              "calling  %s", clntp->clnt_modinfop->mi_clnt_name);
1540 1494                          if (clntp->clnt_modinfop->mi_async_handler) {
1541      -                                _NOTE(NO_COMPETING_THREADS_NOW)
1542 1495                                  new_hcap = kmem_alloc(sizeof (*new_hcap),
1543 1496                                      KM_SLEEP);
1544 1497                                  new_hcap->nh_clntp = clntp;
1545 1498                                  new_hcap->nh_hca_devp = hca_devp;
1546 1499                                  new_hcap->nh_code = IBT_HCA_ATTACH_EVENT;
1547      -#ifndef lint
1548      -                                _NOTE(COMPETING_THREADS_NOW)
1549      -#endif
1550 1500                                  clntp->clnt_async_cnt++;
1551 1501                                  hca_devp->hd_async_task_cnt++;
1552 1502  
1553 1503                                  mutex_exit(&ibtl_clnt_list_mutex);
1554 1504                                  (void) ibtl_tell_client_about_new_hca(
1555 1505                                      new_hcap);
1556 1506                                  mutex_enter(&ibtl_clnt_list_mutex);
1557 1507                          }
1558 1508                          break;
1559 1509                  }
1560 1510                  clntp = clntp->clnt_list_link;
1561 1511          }
1562 1512  
1563 1513          clntp = ibtl_clnt_list;
1564 1514          while (clntp != NULL) {
1565 1515                  if (clntp->clnt_modinfop->mi_clnt_class == IBT_CM) {
1566 1516                          IBTF_DPRINTF_L4(ibtf_handlers, "ibtl_announce_new_hca: "
1567 1517                              "calling  %s", clntp->clnt_modinfop->mi_clnt_name);
1568 1518                          if (clntp->clnt_modinfop->mi_async_handler) {
1569      -                                _NOTE(NO_COMPETING_THREADS_NOW)
1570 1519                                  new_hcap = kmem_alloc(sizeof (*new_hcap),
1571 1520                                      KM_SLEEP);
1572 1521                                  new_hcap->nh_clntp = clntp;
1573 1522                                  new_hcap->nh_hca_devp = hca_devp;
1574 1523                                  new_hcap->nh_code = IBT_HCA_ATTACH_EVENT;
1575      -#ifndef lint
1576      -                                _NOTE(COMPETING_THREADS_NOW)
1577      -#endif
1578 1524                                  clntp->clnt_async_cnt++;
1579 1525                                  hca_devp->hd_async_task_cnt++;
1580 1526  
1581 1527                                  (void) taskq_dispatch(ibtl_async_taskq,
1582 1528                                      ibtl_tell_client_about_new_hca, new_hcap,
1583 1529                                      TQ_SLEEP);
1584 1530                          }
1585 1531                          break;
1586 1532                  }
1587 1533                  clntp = clntp->clnt_list_link;
↓ open down ↓ 3 lines elided ↑ open up ↑
1591 1537                          cv_wait(&ibtl_clnt_cv, &ibtl_clnt_list_mutex);
1592 1538          clntp = ibtl_clnt_list;
1593 1539          while (clntp != NULL) {
1594 1540                  if ((clntp->clnt_modinfop->mi_clnt_class != IBT_DM) &&
1595 1541                      (clntp->clnt_modinfop->mi_clnt_class != IBT_CM) &&
1596 1542                      (clntp->clnt_modinfop->mi_clnt_class != IBT_IBMA)) {
1597 1543                          IBTF_DPRINTF_L4(ibtf_handlers,
1598 1544                              "ibtl_announce_new_hca: Calling %s ",
1599 1545                              clntp->clnt_modinfop->mi_clnt_name);
1600 1546                          if (clntp->clnt_modinfop->mi_async_handler) {
1601      -                                _NOTE(NO_COMPETING_THREADS_NOW)
1602 1547                                  new_hcap = kmem_alloc(sizeof (*new_hcap),
1603 1548                                      KM_SLEEP);
1604 1549                                  new_hcap->nh_clntp = clntp;
1605 1550                                  new_hcap->nh_hca_devp = hca_devp;
1606 1551                                  new_hcap->nh_code = IBT_HCA_ATTACH_EVENT;
1607      -#ifndef lint
1608      -                                _NOTE(COMPETING_THREADS_NOW)
1609      -#endif
1610 1552                                  clntp->clnt_async_cnt++;
1611 1553                                  hca_devp->hd_async_task_cnt++;
1612 1554  
1613 1555                                  (void) taskq_dispatch(ibtl_async_taskq,
1614 1556                                      ibtl_tell_client_about_new_hca, new_hcap,
1615 1557                                      TQ_SLEEP);
1616 1558                          }
1617 1559                  }
1618 1560                  clntp = clntp->clnt_list_link;
1619 1561          }
↓ open down ↓ 250 lines elided ↑ open up ↑
1870 1812   */
1871 1813  void
1872 1814  ibtl_cm_sm_notice_init_failure(ibtl_cm_sm_init_fail_t *ifail)
1873 1815  {
1874 1816          ibt_clnt_hdl_t ibt_hdl = ifail->smf_ibt_hdl;
1875 1817          struct ibtl_sm_notice *noticep;
1876 1818          ib_gid_t *sgidp = &ifail->smf_sgid[0];
1877 1819          int i;
1878 1820  
1879 1821          for (i = 0; i < ifail->smf_num_sgids; i++) {
1880      -                _NOTE(NO_COMPETING_THREADS_NOW)
1881 1822                  noticep = kmem_zalloc(sizeof (*noticep), KM_SLEEP);
1882 1823                  noticep->np_ibt_hdl = ibt_hdl;
1883 1824                  noticep->np_sgid = *sgidp++;
1884 1825                  noticep->np_code = IBT_SM_EVENT_UNAVAILABLE;
1885      -#ifndef lint
1886      -                _NOTE(COMPETING_THREADS_NOW)
1887      -#endif
1888 1826                  ibtl_inc_clnt_async_cnt(ibt_hdl);
1889 1827                  (void) taskq_dispatch(ibtl_async_taskq,
1890 1828                      ibtl_sm_notice_task, noticep, TQ_SLEEP);
1891 1829          }
1892 1830  }
1893 1831  
1894 1832  /*
1895 1833   * Inform all clients of the event.
1896 1834   */
1897 1835  void
1898 1836  ibtl_cm_sm_notice_handler(ib_gid_t sgid, ibt_subnet_event_code_t code,
1899 1837      ibt_subnet_event_t *event)
1900 1838  {
1901      -        _NOTE(NO_COMPETING_THREADS_NOW)
1902 1839          struct ibtl_sm_notice   *noticep;
1903 1840          ibtl_clnt_t             *clntp;
1904 1841  
1905 1842          mutex_enter(&ibtl_clnt_list_mutex);
1906 1843          clntp = ibtl_clnt_list;
1907 1844          while (clntp != NULL) {
1908 1845                  if (clntp->clnt_sm_trap_handler) {
1909 1846                          noticep = kmem_zalloc(sizeof (*noticep), KM_SLEEP);
1910 1847                          noticep->np_ibt_hdl = clntp;
1911 1848                          noticep->np_sgid = sgid;
1912 1849                          noticep->np_code = code;
1913 1850                          noticep->np_event = *event;
1914 1851                          ++clntp->clnt_async_cnt;
1915 1852                          (void) taskq_dispatch(ibtl_async_taskq,
1916 1853                              ibtl_sm_notice_task, noticep, TQ_SLEEP);
1917 1854                  }
1918 1855                  clntp = clntp->clnt_list_link;
1919 1856          }
1920 1857          mutex_exit(&ibtl_clnt_list_mutex);
1921      -#ifndef lint
1922      -        _NOTE(COMPETING_THREADS_NOW)
1923      -#endif
1924 1858  }
1925 1859  
1926 1860  /*
1927 1861   * Record the handler for this client.
1928 1862   */
1929 1863  void
1930 1864  ibtl_cm_set_sm_notice_handler(ibt_clnt_hdl_t ibt_hdl,
1931 1865      ibt_sm_notice_handler_t sm_notice_handler, void *private)
1932 1866  {
1933      -        _NOTE(NO_COMPETING_THREADS_NOW)
1934 1867          ibt_hdl->clnt_sm_trap_handler = sm_notice_handler;
1935 1868          ibt_hdl->clnt_sm_trap_handler_arg = private;
1936      -#ifndef lint
1937      -        _NOTE(COMPETING_THREADS_NOW)
1938      -#endif
1939 1869  }
1940 1870  
1941 1871  
1942 1872  /*
1943 1873   * ibtl_another_cq_handler_in_thread()
1944 1874   *
1945 1875   * Conditionally increase the number of cq_threads.
1946 1876   * The number of threads grows, based on the number of cqs using threads.
1947 1877   *
1948 1878   * The table below controls the number of threads as follows:
↓ open down ↓ 26 lines elided ↑ open up ↑
1975 1905          mutex_enter(&ibtl_cq_mutex);
1976 1906          if ((ibtl_cq_threads == IBTL_CQ_MAXTHREADS) ||
1977 1907              (++ibtl_cqs_using_threads < ibtl_cq_scaling[ibtl_cq_threads])) {
1978 1908                  mutex_exit(&ibtl_cq_mutex);
1979 1909                  return;
1980 1910          }
1981 1911          my_idx = ibtl_cq_threads++;
1982 1912          mutex_exit(&ibtl_cq_mutex);
1983 1913          t = thread_create(NULL, 0, ibtl_cq_thread, NULL, 0, &p0, TS_RUN,
1984 1914              ibtl_pri - 1);
1985      -        _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(ibtl_cq_did))
1986 1915          ibtl_cq_did[my_idx] = t->t_did; /* save for thread_join() */
1987      -        _NOTE(NOW_VISIBLE_TO_OTHER_THREADS(ibtl_cq_did))
1988 1916  }
1989 1917  
1990 1918  void
1991 1919  ibtl_thread_init(void)
1992 1920  {
1993 1921          IBTF_DPRINTF_L3(ibtf_handlers, "ibtl_thread_init()");
1994 1922  
1995 1923          mutex_init(&ibtl_async_mutex, NULL, MUTEX_DEFAULT, NULL);
1996 1924          cv_init(&ibtl_async_cv, NULL, CV_DEFAULT, NULL);
1997 1925          cv_init(&ibtl_clnt_cv, NULL, CV_DEFAULT, NULL);
↓ open down ↓ 9 lines elided ↑ open up ↑
2007 1935          static int initted = 0;
2008 1936          kthread_t *t;
2009 1937  
2010 1938          mutex_enter(&ibtl_async_mutex);
2011 1939          if (initted == 1) {
2012 1940                  mutex_exit(&ibtl_async_mutex);
2013 1941                  return;
2014 1942          }
2015 1943          initted = 1;
2016 1944          mutex_exit(&ibtl_async_mutex);
2017      -        _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(ibtl_async_did))
2018 1945          ibtl_async_did = kmem_zalloc(ibtl_async_thread_init * sizeof (kt_did_t),
2019 1946              KM_SLEEP);
2020 1947  
2021 1948          IBTF_DPRINTF_L3(ibtf_handlers, "ibtl_thread_init2()");
2022 1949  
2023 1950          for (i = 0; i < ibtl_async_thread_init; i++) {
2024 1951                  t = thread_create(NULL, 0, ibtl_async_thread, NULL, 0, &p0,
2025 1952                      TS_RUN, ibtl_pri - 1);
2026 1953                  ibtl_async_did[i] = t->t_did; /* thread_join() */
2027 1954          }
2028      -        _NOTE(NOW_VISIBLE_TO_OTHER_THREADS(ibtl_async_did))
2029      -        _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(ibtl_cq_threads))
2030 1955          for (i = 0; i < ibtl_cq_threads; i++) {
2031 1956                  t = thread_create(NULL, 0, ibtl_cq_thread, NULL, 0, &p0,
2032 1957                      TS_RUN, ibtl_pri - 1);
2033      -                _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(ibtl_cq_did))
2034 1958                  ibtl_cq_did[i] = t->t_did; /* save for thread_join() */
2035      -                _NOTE(NOW_VISIBLE_TO_OTHER_THREADS(ibtl_cq_did))
2036 1959          }
2037      -        _NOTE(NOW_VISIBLE_TO_OTHER_THREADS(ibtl_cq_threads))
2038 1960  }
2039 1961  
2040 1962  void
2041 1963  ibtl_thread_fini(void)
2042 1964  {
2043 1965          int i;
2044 1966  
2045 1967          IBTF_DPRINTF_L3(ibtf_handlers, "ibtl_thread_fini()");
2046 1968  
2047 1969          /* undo the work done by ibtl_thread_init() */
↓ open down ↓ 1 lines elided ↑ open up ↑
2049 1971          mutex_enter(&ibtl_cq_mutex);
2050 1972          ibtl_cq_thread_exit = IBTL_THREAD_EXIT;
2051 1973          cv_broadcast(&ibtl_cq_cv);
2052 1974          mutex_exit(&ibtl_cq_mutex);
2053 1975  
2054 1976          mutex_enter(&ibtl_async_mutex);
2055 1977          ibtl_async_thread_exit = IBTL_THREAD_EXIT;
2056 1978          cv_broadcast(&ibtl_async_cv);
2057 1979          mutex_exit(&ibtl_async_mutex);
2058 1980  
2059      -        _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(ibtl_cq_threads))
2060 1981          for (i = 0; i < ibtl_cq_threads; i++)
2061 1982                  thread_join(ibtl_cq_did[i]);
2062      -        _NOTE(NOW_VISIBLE_TO_OTHER_THREADS(ibtl_cq_threads))
2063 1983  
2064 1984          if (ibtl_async_did) {
2065 1985                  for (i = 0; i < ibtl_async_thread_init; i++)
2066 1986                          thread_join(ibtl_async_did[i]);
2067 1987  
2068 1988                  kmem_free(ibtl_async_did,
2069 1989                      ibtl_async_thread_init * sizeof (kt_did_t));
2070 1990          }
2071 1991          mutex_destroy(&ibtl_cq_mutex);
2072 1992          cv_destroy(&ibtl_cq_cv);
↓ open down ↓ 12 lines elided ↑ open up ↑
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX