Print this page
8368 remove warlock leftovers from usr/src/uts
@@ -156,12 +156,10 @@
typedef ibt_status_t (*ibtl_node_info_cb_t)(ib_guid_t, uint8_t, ib_lid_t,
ibt_node_info_t *);
ibtl_node_info_cb_t ibtl_node_info_cb;
-_NOTE(LOCK_ORDER(ibtl_clnt_list_mutex ibtl_async_mutex))
-
void
ibtl_cm_set_node_info_cb(ibt_status_t (*node_info_cb)(ib_guid_t, uint8_t,
ib_lid_t, ibt_node_info_t *))
{
mutex_enter(&ibtl_clnt_list_mutex);
@@ -414,14 +412,12 @@
IBTF_DPRINTF_L2(ibtf_handlers, "ibtl_async_client_call(%p, 0x%x, %p)",
ibt_hca, code, event_p);
clntp = ibt_hca->ha_clnt_devp;
- _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(ibtl_last_client_name))
/* Record who is being called (just a debugging aid) */
ibtl_last_client_name = client_name = clntp->clnt_name;
- _NOTE(NOW_VISIBLE_TO_OTHER_THREADS(ibtl_last_client_name))
client_private = clntp->clnt_private;
async_handler = clntp->clnt_modinfop->mi_async_handler;
if (code & (IBT_EVENT_COM_EST_QP | IBT_EVENT_COM_EST_EEC)) {
@@ -531,22 +527,18 @@
struct ibtl_mgr_s *mgrp;
if (async_handler == NULL)
return;
- _NOTE(NO_COMPETING_THREADS_NOW)
mgrp = kmem_alloc(sizeof (*mgrp), KM_SLEEP);
mgrp->mgr_hca_devp = hca_devp;
mgrp->mgr_async_handler = async_handler;
mgrp->mgr_clnt_private = NULL;
hca_devp->hd_async_task_cnt++;
(void) taskq_dispatch(ibtl_async_taskq,
ibt_cisco_embedded_sm_rereg_fix, mgrp, TQ_SLEEP);
-#ifndef lint
- _NOTE(COMPETING_THREADS_NOW)
-#endif
}
static void
ibtl_tell_mgr(ibtl_hca_devinfo_t *hca_devp, ibt_async_handler_t async_handler,
void *clnt_private)
@@ -554,22 +546,18 @@
struct ibtl_mgr_s *mgrp;
if (async_handler == NULL)
return;
- _NOTE(NO_COMPETING_THREADS_NOW)
mgrp = kmem_alloc(sizeof (*mgrp), KM_SLEEP);
mgrp->mgr_hca_devp = hca_devp;
mgrp->mgr_async_handler = async_handler;
mgrp->mgr_clnt_private = clnt_private;
hca_devp->hd_async_task_cnt++;
(void) taskq_dispatch(ibtl_async_taskq, ibtl_do_mgr_async_task, mgrp,
TQ_SLEEP);
-#ifndef lint
- _NOTE(COMPETING_THREADS_NOW)
-#endif
}
/*
* Per client-device asyncs for HCA level events. Call each client that is
* using the HCA for the event recorded in the ibtl_hca_devinfo_t.
@@ -1012,35 +1000,24 @@
}
}
ibtl_eec->eec_async_flags &= ~IBTL_ASYNC_PENDING;
}
-#ifdef __lock_lint
-kmutex_t cpr_mutex;
-#endif
-
/*
* Loop forever, calling async_handlers until all of the async lists
* are empty.
*/
static void
ibtl_async_thread(void)
{
-#ifndef __lock_lint
kmutex_t cpr_mutex;
-#endif
callb_cpr_t cprinfo;
- _NOTE(MUTEX_PROTECTS_DATA(cpr_mutex, cprinfo))
- _NOTE(NO_COMPETING_THREADS_NOW)
mutex_init(&cpr_mutex, NULL, MUTEX_DRIVER, NULL);
CALLB_CPR_INIT(&cprinfo, &cpr_mutex, callb_generic_cpr,
"ibtl_async_thread");
-#ifndef lint
- _NOTE(COMPETING_THREADS_NOW)
-#endif
mutex_enter(&ibtl_async_mutex);
for (;;) {
if (ibtl_async_hca_list_start) {
@@ -1120,14 +1097,12 @@
}
}
mutex_exit(&ibtl_async_mutex);
-#ifndef __lock_lint
mutex_enter(&cpr_mutex);
CALLB_CPR_EXIT(&cprinfo);
-#endif
mutex_destroy(&cpr_mutex);
}
void
@@ -1244,11 +1219,10 @@
ibt_cq_handler_t cq_handler;
void *arg;
IBTF_DPRINTF_L4(ibtf_handlers, "ibtl_cq_handler_call(%p)", ibtl_cq);
- _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*ibtl_cq))
cq_handler = ibtl_cq->cq_comp_handler;
arg = ibtl_cq->cq_arg;
if (cq_handler != NULL)
cq_handler(ibtl_cq, arg);
else
@@ -1286,23 +1260,16 @@
*/
static void
ibtl_cq_thread(void)
{
-#ifndef __lock_lint
kmutex_t cpr_mutex;
-#endif
callb_cpr_t cprinfo;
- _NOTE(MUTEX_PROTECTS_DATA(cpr_mutex, cprinfo))
- _NOTE(NO_COMPETING_THREADS_NOW)
mutex_init(&cpr_mutex, NULL, MUTEX_DRIVER, NULL);
CALLB_CPR_INIT(&cprinfo, &cpr_mutex, callb_generic_cpr,
"ibtl_cq_thread");
-#ifndef lint
- _NOTE(COMPETING_THREADS_NOW)
-#endif
mutex_enter(&ibtl_cq_mutex);
for (;;) {
if (ibtl_cq_list_start) {
@@ -1339,14 +1306,12 @@
mutex_enter(&ibtl_cq_mutex);
}
}
mutex_exit(&ibtl_cq_mutex);
-#ifndef __lock_lint
mutex_enter(&cpr_mutex);
CALLB_CPR_EXIT(&cprinfo);
-#endif
mutex_destroy(&cpr_mutex);
}
/*
@@ -1431,11 +1396,10 @@
void *arg)
{
IBTF_DPRINTF_L3(ibtf_handlers, "ibt_set_cq_handler(%p, %p, %p)",
ibtl_cq, completion_handler, arg);
- _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*ibtl_cq))
ibtl_cq->cq_comp_handler = completion_handler;
ibtl_cq->cq_arg = arg;
}
@@ -1464,16 +1428,10 @@
bzero(&async_event, sizeof (async_event));
async_event.ev_hca_guid = hca_devp->hd_hca_attr->hca_node_guid;
clntp->clnt_modinfop->mi_async_handler(
clntp->clnt_private, NULL, new_hcap->nh_code, &async_event);
kmem_free(new_hcap, sizeof (*new_hcap));
-#ifdef __lock_lint
- {
- ibt_hca_hdl_t hca_hdl;
- (void) ibt_open_hca(clntp, 0ULL, &hca_hdl);
- }
-#endif
mutex_enter(&ibtl_clnt_list_mutex);
if (--hca_devp->hd_async_task_cnt == 0)
cv_signal(&hca_devp->hd_async_task_cv);
if (--clntp->clnt_async_cnt == 0)
cv_broadcast(&ibtl_clnt_cv);
@@ -1507,19 +1465,15 @@
while (clntp != NULL) {
if (clntp->clnt_modinfop->mi_clnt_class == IBT_IBMA) {
IBTF_DPRINTF_L4(ibtf_handlers,
"ibtl_announce_new_hca: calling IBMF");
if (clntp->clnt_modinfop->mi_async_handler) {
- _NOTE(NO_COMPETING_THREADS_NOW)
new_hcap = kmem_alloc(sizeof (*new_hcap),
KM_SLEEP);
new_hcap->nh_clntp = clntp;
new_hcap->nh_hca_devp = hca_devp;
new_hcap->nh_code = IBT_HCA_ATTACH_EVENT;
-#ifndef lint
- _NOTE(COMPETING_THREADS_NOW)
-#endif
clntp->clnt_async_cnt++;
hca_devp->hd_async_task_cnt++;
(void) taskq_dispatch(ibtl_async_taskq,
ibtl_tell_client_about_new_hca, new_hcap,
@@ -1536,19 +1490,15 @@
while (clntp != NULL) {
if (clntp->clnt_modinfop->mi_clnt_class == IBT_DM) {
IBTF_DPRINTF_L4(ibtf_handlers, "ibtl_announce_new_hca: "
"calling %s", clntp->clnt_modinfop->mi_clnt_name);
if (clntp->clnt_modinfop->mi_async_handler) {
- _NOTE(NO_COMPETING_THREADS_NOW)
new_hcap = kmem_alloc(sizeof (*new_hcap),
KM_SLEEP);
new_hcap->nh_clntp = clntp;
new_hcap->nh_hca_devp = hca_devp;
new_hcap->nh_code = IBT_HCA_ATTACH_EVENT;
-#ifndef lint
- _NOTE(COMPETING_THREADS_NOW)
-#endif
clntp->clnt_async_cnt++;
hca_devp->hd_async_task_cnt++;
mutex_exit(&ibtl_clnt_list_mutex);
(void) ibtl_tell_client_about_new_hca(
@@ -1564,19 +1514,15 @@
while (clntp != NULL) {
if (clntp->clnt_modinfop->mi_clnt_class == IBT_CM) {
IBTF_DPRINTF_L4(ibtf_handlers, "ibtl_announce_new_hca: "
"calling %s", clntp->clnt_modinfop->mi_clnt_name);
if (clntp->clnt_modinfop->mi_async_handler) {
- _NOTE(NO_COMPETING_THREADS_NOW)
new_hcap = kmem_alloc(sizeof (*new_hcap),
KM_SLEEP);
new_hcap->nh_clntp = clntp;
new_hcap->nh_hca_devp = hca_devp;
new_hcap->nh_code = IBT_HCA_ATTACH_EVENT;
-#ifndef lint
- _NOTE(COMPETING_THREADS_NOW)
-#endif
clntp->clnt_async_cnt++;
hca_devp->hd_async_task_cnt++;
(void) taskq_dispatch(ibtl_async_taskq,
ibtl_tell_client_about_new_hca, new_hcap,
@@ -1596,19 +1542,15 @@
(clntp->clnt_modinfop->mi_clnt_class != IBT_IBMA)) {
IBTF_DPRINTF_L4(ibtf_handlers,
"ibtl_announce_new_hca: Calling %s ",
clntp->clnt_modinfop->mi_clnt_name);
if (clntp->clnt_modinfop->mi_async_handler) {
- _NOTE(NO_COMPETING_THREADS_NOW)
new_hcap = kmem_alloc(sizeof (*new_hcap),
KM_SLEEP);
new_hcap->nh_clntp = clntp;
new_hcap->nh_hca_devp = hca_devp;
new_hcap->nh_code = IBT_HCA_ATTACH_EVENT;
-#ifndef lint
- _NOTE(COMPETING_THREADS_NOW)
-#endif
clntp->clnt_async_cnt++;
hca_devp->hd_async_task_cnt++;
(void) taskq_dispatch(ibtl_async_taskq,
ibtl_tell_client_about_new_hca, new_hcap,
@@ -1875,18 +1817,14 @@
struct ibtl_sm_notice *noticep;
ib_gid_t *sgidp = &ifail->smf_sgid[0];
int i;
for (i = 0; i < ifail->smf_num_sgids; i++) {
- _NOTE(NO_COMPETING_THREADS_NOW)
noticep = kmem_zalloc(sizeof (*noticep), KM_SLEEP);
noticep->np_ibt_hdl = ibt_hdl;
noticep->np_sgid = *sgidp++;
noticep->np_code = IBT_SM_EVENT_UNAVAILABLE;
-#ifndef lint
- _NOTE(COMPETING_THREADS_NOW)
-#endif
ibtl_inc_clnt_async_cnt(ibt_hdl);
(void) taskq_dispatch(ibtl_async_taskq,
ibtl_sm_notice_task, noticep, TQ_SLEEP);
}
}
@@ -1896,11 +1834,10 @@
*/
void
ibtl_cm_sm_notice_handler(ib_gid_t sgid, ibt_subnet_event_code_t code,
ibt_subnet_event_t *event)
{
- _NOTE(NO_COMPETING_THREADS_NOW)
struct ibtl_sm_notice *noticep;
ibtl_clnt_t *clntp;
mutex_enter(&ibtl_clnt_list_mutex);
clntp = ibtl_clnt_list;
@@ -1916,28 +1853,21 @@
ibtl_sm_notice_task, noticep, TQ_SLEEP);
}
clntp = clntp->clnt_list_link;
}
mutex_exit(&ibtl_clnt_list_mutex);
-#ifndef lint
- _NOTE(COMPETING_THREADS_NOW)
-#endif
}
/*
* Record the handler for this client.
*/
void
ibtl_cm_set_sm_notice_handler(ibt_clnt_hdl_t ibt_hdl,
ibt_sm_notice_handler_t sm_notice_handler, void *private)
{
- _NOTE(NO_COMPETING_THREADS_NOW)
ibt_hdl->clnt_sm_trap_handler = sm_notice_handler;
ibt_hdl->clnt_sm_trap_handler_arg = private;
-#ifndef lint
- _NOTE(COMPETING_THREADS_NOW)
-#endif
}
/*
* ibtl_another_cq_handler_in_thread()
@@ -1980,13 +1910,11 @@
}
my_idx = ibtl_cq_threads++;
mutex_exit(&ibtl_cq_mutex);
t = thread_create(NULL, 0, ibtl_cq_thread, NULL, 0, &p0, TS_RUN,
ibtl_pri - 1);
- _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(ibtl_cq_did))
ibtl_cq_did[my_idx] = t->t_did; /* save for thread_join() */
- _NOTE(NOW_VISIBLE_TO_OTHER_THREADS(ibtl_cq_did))
}
void
ibtl_thread_init(void)
{
@@ -2012,11 +1940,10 @@
mutex_exit(&ibtl_async_mutex);
return;
}
initted = 1;
mutex_exit(&ibtl_async_mutex);
- _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(ibtl_async_did))
ibtl_async_did = kmem_zalloc(ibtl_async_thread_init * sizeof (kt_did_t),
KM_SLEEP);
IBTF_DPRINTF_L3(ibtf_handlers, "ibtl_thread_init2()");
@@ -2023,20 +1950,15 @@
for (i = 0; i < ibtl_async_thread_init; i++) {
t = thread_create(NULL, 0, ibtl_async_thread, NULL, 0, &p0,
TS_RUN, ibtl_pri - 1);
ibtl_async_did[i] = t->t_did; /* thread_join() */
}
- _NOTE(NOW_VISIBLE_TO_OTHER_THREADS(ibtl_async_did))
- _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(ibtl_cq_threads))
for (i = 0; i < ibtl_cq_threads; i++) {
t = thread_create(NULL, 0, ibtl_cq_thread, NULL, 0, &p0,
TS_RUN, ibtl_pri - 1);
- _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(ibtl_cq_did))
ibtl_cq_did[i] = t->t_did; /* save for thread_join() */
- _NOTE(NOW_VISIBLE_TO_OTHER_THREADS(ibtl_cq_did))
}
- _NOTE(NOW_VISIBLE_TO_OTHER_THREADS(ibtl_cq_threads))
}
void
ibtl_thread_fini(void)
{
@@ -2054,14 +1976,12 @@
mutex_enter(&ibtl_async_mutex);
ibtl_async_thread_exit = IBTL_THREAD_EXIT;
cv_broadcast(&ibtl_async_cv);
mutex_exit(&ibtl_async_mutex);
- _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(ibtl_cq_threads))
for (i = 0; i < ibtl_cq_threads; i++)
thread_join(ibtl_cq_did[i]);
- _NOTE(NOW_VISIBLE_TO_OTHER_THREADS(ibtl_cq_threads))
if (ibtl_async_did) {
for (i = 0; i < ibtl_async_thread_init; i++)
thread_join(ibtl_async_did[i]);