26 /*
27 * ibcm_utils.c
28 *
29 * contains internal lookup functions of IB CM module
30 * along with some other miscellaneous stuff
31 *
32 * TBD:
33 * 1. Code needed to ensure that if any clients are using a service then
34 * don't de-register it.
35 */
36
37 #include <sys/ib/mgt/ibcm/ibcm_impl.h>
38 #include <sys/ddi.h>
39
40
41 /* statics */
42 static vmem_t *ibcm_local_sid_arena;
43 static vmem_t *ibcm_ip_sid_arena;
44 static ib_svc_id_t ibcm_local_sid_seed;
45 static ib_com_id_t ibcm_local_cid_seed;
46 _NOTE(READ_ONLY_DATA({ibcm_local_sid_arena ibcm_local_sid_seed
47 ibcm_ip_sid_arena ibcm_local_cid_seed}))
48 static void ibcm_delete_state_from_avl(ibcm_state_data_t *statep);
49 static void ibcm_init_conn_trace(ibcm_state_data_t *statep);
50 static void ibcm_fini_conn_trace(ibcm_state_data_t *statep);
51 static void ibcm_dump_conn_trbuf(void *statep, char *line_prefix,
52 char *buf, int buf_size);
53 extern ibt_status_t ibcm_get_node_rec(ibmf_saa_handle_t, sa_node_record_t *,
54 uint64_t c_mask, void *, size_t *);
55
56 /*
57 * ibcm_lookup_msg:
58 *
59 * Retrieves an existing state structure or creates a new one if none found.
60 * This function is used during
61 * Passive connection side for INCOMING REQ/REJ/RTU/MRA/DREQ/DREP/LAP msgs
62 * Active connection side for INCOMING REP/REJ/MRA/DREQ/DREP/APR msgs
63 * Active side CM for outgoing REQ message.
64 *
65 * NOTE: Only return IBCM_LOOKUP_FAIL if lookup failed to find a match.
66 *
67 * Arguments are:-
155 * statep is created for INCOMING/OUTGOING REQ.
156 * For all other event_types we return lookup failure
157 */
158 if (!((event_type == IBCM_INCOMING_REQ) ||
159 (event_type == IBCM_INCOMING_REQ_STALE) ||
160 (event_type == IBCM_INCOMING_REP_STALE) ||
161 (event_type == IBCM_OUTGOING_REQ))) {
162 IBTF_DPRINTF_L2(cmlog, "ibcm_lookup_msg: failed for "
163 "event type %x remote_comid = 0x%x",
164 event_type, comid);
165
166 return (IBCM_LOOKUP_FAIL);
167 }
168
169 if ((event_type == IBCM_INCOMING_REQ) ||
170 (event_type == IBCM_OUTGOING_REQ)) {
171
172 /* fill in the new ibcm_state_data */
173 sp = *rstatep;
174
175 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*sp))
176
177 /* initialize statep */
178 mutex_init(&sp->state_mutex, NULL, MUTEX_DEFAULT, NULL);
179 cv_init(&sp->block_client_cv, NULL, CV_DRIVER, NULL);
180 cv_init(&sp->block_mad_cv, NULL, CV_DRIVER, NULL);
181
182 sp->hcap = hcap;
183 IBCM_REF_CNT_INCR(sp);
184 sp->local_comid = comid;
185
186 if (ibcm_enable_trace != 0)
187 ibcm_init_conn_trace(sp);
188
189 if (event_type == IBCM_INCOMING_REQ) { /* Passive side */
190 sp->state = IBCM_STATE_REQ_RCVD;
191 sp->clnt_proceed = IBCM_BLOCK;
192 sp->close_nocb_state = IBCM_UNBLOCK;
193 sp->remote_hca_guid = remote_hca_guid;
194 sp->remote_qpn = remote_qpn;
195
196 } else if (event_type == IBCM_OUTGOING_REQ) { /* Active side */
197 sp->close_nocb_state = IBCM_UNBLOCK;
198 sp->state = IBCM_STATE_IDLE;
199 }
200
201 _NOTE(NOW_VISIBLE_TO_OTHER_THREADS(*sp))
202
203 } else {
204 sp = *rstatep; /* for incoming REQ/REP STALE only */
205 }
206
207 if ((event_type == IBCM_INCOMING_REQ) ||
208 (event_type == IBCM_INCOMING_REP_STALE)) {
209
210 /* First, insert a new "sp" into "hca_passive_tree" @ "where" */
211 avl_insert(&(hcap->hca_passive_tree), (void *)sp, where);
212
213 if (event_type == IBCM_INCOMING_REQ) { /* Only INCOMING_REQ */
214 /*
215 * We have to do an avl_find() to figure out
216 * "where" to insert the statep into the active tree.
217 *
218 * CM doesn't care for avl_find's retval.
219 */
220 (void) avl_find(&hcap->hca_active_tree,
221 &sp->local_comid, &where);
222
629 * Adds a SIDR entry. Called *ONLY* from ibcm_find_sidr_entry()
630 *
631 * INPUTS:
632 * lid: LID of incoming SIDR REQ
633 * gid: GID of incoming SIDR REQ
634 * grh_exists: TRUE if GRH exists in the incoming SIDR REQ
635 * req_id: Request ID
636 * hcap: CM State table to search for SIDR state structure
637 * Return Values: NONE
638 */
639 ibcm_ud_state_data_t *
640 ibcm_add_sidr_entry(ibcm_sidr_srch_t *srch_param, ibcm_hca_info_t *hcap)
641 {
642 ibcm_ud_state_data_t *ud_statep;
643
644 IBTF_DPRINTF_L5(cmlog, "ibcm_add_sidr_entry: lid=%x, guid=%llX, "
645 "grh = %x req_id = %x", srch_param->srch_lid,
646 srch_param->srch_gid.gid_guid, srch_param->srch_grh_exists,
647 srch_param->srch_req_id);
648
649 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*ud_statep))
650
651 /* didn't find the entry - so create new */
652 ud_statep = kmem_zalloc(sizeof (ibcm_ud_state_data_t), KM_SLEEP);
653
654 mutex_init(&ud_statep->ud_state_mutex, NULL, MUTEX_DEFAULT, NULL);
655 cv_init(&ud_statep->ud_block_client_cv, NULL, CV_DRIVER, NULL);
656
657 /* Initialize some ud_statep fields */
658 mutex_enter(&ud_statep->ud_state_mutex);
659 ud_statep->ud_hcap = hcap;
660 ud_statep->ud_req_id = srch_param->srch_req_id;
661 ud_statep->ud_ref_cnt = 1;
662 ud_statep->ud_grh_exists = srch_param->srch_grh_exists;
663 ud_statep->ud_sidr_req_lid = srch_param->srch_lid;
664 ud_statep->ud_sidr_req_gid = srch_param->srch_gid;
665 ud_statep->ud_mode = srch_param->srch_mode;
666 ud_statep->ud_max_cm_retries = ibcm_max_retries;
667 mutex_exit(&ud_statep->ud_state_mutex);
668
669 /* Update the list */
670 ud_statep->ud_nextp = hcap->hca_sidr_list;
671 hcap->hca_sidr_list = ud_statep;
672
673 _NOTE(NOW_VISIBLE_TO_OTHER_THREADS(*ud_statep))
674
675 return (ud_statep);
676 }
677
678
679 /*
680 * ibcm_delete_ud_state_data:
681 * Deletes a given state structure
682 *
683 * Arguments are:-
684 * statep - statep to be deleted
685 *
686 * Return Values: NONE
687 */
688 void
689 ibcm_delete_ud_state_data(ibcm_ud_state_data_t *ud_statep)
690 {
691 ibcm_ud_state_data_t *prevp, *headp;
692 ibcm_hca_info_t *hcap;
693
694 IBTF_DPRINTF_L4(cmlog, "ibcm_delete_ud_state_data: ud_statep 0x%p",
807 /* free the statep */
808 kmem_free(ud_statep, sizeof (ibcm_ud_state_data_t));
809 }
810
811
812 /*
813 * ibcm_init_ids:
814 * Create the vmem arenas for the various global ids
815 *
816 * Arguments are:-
817 * NONE
818 *
819 * Return Values: ibcm_status_t
820 */
821
822 ibcm_status_t
823 ibcm_init_ids(void)
824 {
825 timespec_t tv;
826
827 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(ibcm_local_sid_arena))
828 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(ibcm_ip_sid_arena))
829 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(ibcm_local_sid_seed))
830 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(ibcm_local_cid_seed))
831
832 ibcm_local_sid_arena = vmem_create("ibcm_local_sid",
833 (void *)IBCM_INITIAL_SID, IBCM_MAX_LOCAL_SIDS, 1, NULL, NULL, NULL,
834 0, VM_SLEEP | VMC_IDENTIFIER);
835
836 if (!ibcm_local_sid_arena)
837 return (IBCM_FAILURE);
838
839 ibcm_ip_sid_arena = vmem_create("ibcm_ip_sid", (void *)IBCM_INITIAL_SID,
840 IBCM_MAX_IP_SIDS, 1, NULL, NULL, NULL, 0,
841 VM_SLEEP | VMC_IDENTIFIER);
842
843 if (!ibcm_ip_sid_arena)
844 return (IBCM_FAILURE);
845
846 /* create a random starting value for local service ids */
847 gethrestime(&tv);
848 ibcm_local_sid_seed = ((uint64_t)tv.tv_sec << 20) & 0x007FFFFFFFF00000;
849 ASSERT((ibcm_local_sid_seed & IB_SID_AGN_MASK) == 0);
850 ibcm_local_sid_seed |= IB_SID_AGN_LOCAL;
851
852 ibcm_local_cid_seed = (ib_com_id_t)tv.tv_sec;
853 _NOTE(NOW_VISIBLE_TO_OTHER_THREADS(ibcm_local_sid_arena))
854 _NOTE(NOW_VISIBLE_TO_OTHER_THREADS(ibcm_local_sid_seed))
855 _NOTE(NOW_VISIBLE_TO_OTHER_THREADS(ibcm_ip_sid_arena))
856 _NOTE(NOW_VISIBLE_TO_OTHER_THREADS(ibcm_local_cid_seed))
857
858 return (IBCM_SUCCESS);
859 }
860
861
862 /*
863 * ibcm_init_hca_ids:
864 * Create the vmem arenas for the various hca level ids
865 *
866 * Arguments are:-
867 * hcap pointer to ibcm_hca_info_t
868 *
869 * Return Values: ibcm_status_t
870 */
871 ibcm_status_t
872 ibcm_init_hca_ids(ibcm_hca_info_t *hcap)
873 {
874 hcap->hca_comid_arena = vmem_create("ibcm_com_ids",
875 (void *)IBCM_INITIAL_COMID, IBCM_MAX_COMIDS,
876 1, NULL, NULL, NULL, 0, VM_SLEEP | VMC_IDENTIFIER);
1220 }
1221
1222
1223 /*
1224 * ibcm_create_svc_entry:
1225 * Make sure no conflicting entry exists, then allocate it.
1226 * Fill in the critical "look up" details that are provided
1227 * in the arguments before dropping the lock.
1228 *
1229 * Return values:
1230 * Pointer to ibcm_svc_info_t, if created, otherwise NULL.
1231 */
1232 ibcm_svc_info_t *
1233 ibcm_create_svc_entry(ib_svc_id_t sid, int num_sids)
1234 {
1235 ibcm_svc_info_t *svcp;
1236 ibcm_svc_info_t *svcinfop;
1237 ibcm_svc_lookup_t svc;
1238 avl_index_t where = 0;
1239
1240 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*svcinfop))
1241
1242 /* assume success, and avoid kmem while holding the writer lock */
1243 svcinfop = kmem_zalloc(sizeof (*svcinfop), KM_SLEEP);
1244 svcinfop->svc_id = sid;
1245 svcinfop->svc_num_sids = num_sids;
1246
1247 svc.sid = sid;
1248 svc.num_sids = num_sids;
1249
1250 mutex_enter(&ibcm_svc_info_lock);
1251 #ifdef __lock_lint
1252 ibcm_svc_compare(NULL, NULL);
1253 #endif
1254 svcp = avl_find(&ibcm_svc_avl_tree, &svc, &where);
1255 if (svcp != NULL) { /* overlab exists */
1256 mutex_exit(&ibcm_svc_info_lock);
1257 kmem_free(svcinfop, sizeof (*svcinfop));
1258 return (NULL);
1259 }
1260 avl_insert(&ibcm_svc_avl_tree, (void *)svcinfop, where);
1261 mutex_exit(&ibcm_svc_info_lock);
1262
1263 _NOTE(NOW_VISIBLE_TO_OTHER_THREADS(*svcinfop))
1264
1265 return (svcinfop);
1266 }
1267
1268 /*
1269 * ibcm_find_svc_entry:
1270 * Finds a ibcm_svc_info_t entry into the CM's global table.
1271 * The search done here assumes the list is sorted by SID.
1272 *
1273 * Arguments are:
1274 * sid - Service ID to look up
1275 *
1276 * Return values:
1277 * Pointer to ibcm_svc_info_t, if found, otherwise NULL.
1278 */
1279 ibcm_svc_info_t *
1280 ibcm_find_svc_entry(ib_svc_id_t sid)
1281 {
1282 ibcm_svc_info_t *svcp;
1283 ibcm_svc_lookup_t svc;
1284
1285 IBTF_DPRINTF_L3(cmlog, "ibcm_find_svc_entry: finding SID 0x%llX", sid);
1286
1287 ASSERT(MUTEX_HELD(&ibcm_svc_info_lock));
1288
1289 svc.sid = sid;
1290 svc.num_sids = 1;
1291 #ifdef __lock_lint
1292 ibcm_svc_compare(NULL, NULL);
1293 #endif
1294 svcp = avl_find(&ibcm_svc_avl_tree, &svc, NULL);
1295 if (svcp != NULL) {
1296 IBTF_DPRINTF_L3(cmlog, "ibcm_find_svc_entry: "
1297 "found SID = 0x%llX", sid);
1298 return (svcp); /* found it */
1299 }
1300 IBTF_DPRINTF_L3(cmlog, "ibcm_find_svc_entry: SID %llX not found", sid);
1301 return (NULL);
1302 }
1303
1304 /*
1305 * ibcm_alloc_ibmf_msg:
1306 * Allocate an ibmf message structure and the additional memory required for
1307 * sending an outgoing CM mad. The ibmf message structure contains two
1308 * ibmf_msg_bufs_t fields, one for the incoming MAD and one for the outgoing
1309 * MAD. The CM must allocate the memory for the outgoing MAD. The msg_buf
1310 * field has three buffers: the mad header, the class header, and the class
1311 * data. To simplify the code and reduce the number of kmem_zalloc() calls,
1312 * ibcm_alloc_ibmf_msg will allocate one buffer and set the pointers to the
1313 * right offsets. No class header is needed so only the mad header and class
1360
1361 kmem_free((*ibmf_msgpp)->im_msgbufs_send.im_bufs_mad_hdr,
1362 IBCM_MAD_SIZE);
1363
1364 if ((ibmf_status = ibmf_free_msg(ibmf_handle, ibmf_msgpp)) !=
1365 IBMF_SUCCESS) {
1366 IBTF_DPRINTF_L2(cmlog, "ibcm_free_out_msg: "
1367 "ibmf_free_msg failed %d", ibmf_status);
1368 return (IBCM_FAILURE);
1369 } else
1370 return (IBCM_SUCCESS);
1371 }
1372
1373 ibcm_qp_list_t *
1374 ibcm_find_qp(ibcm_hca_info_t *hcap, int port_no, ib_pkey_t pkey)
1375 {
1376 ibcm_qp_list_t *entry;
1377 ibmf_qp_handle_t ibmf_qp;
1378 int ibmf_status;
1379
1380 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*entry))
1381
1382 mutex_enter(&ibcm_qp_list_lock);
1383
1384 /*
1385 * CM currently does not track port up and down status. If tracking of
1386 * " port status" is added in the future, then CM could be optimized to
1387 * re-use other ports on hcap, if the port associated with the above
1388 * port_no is down. But, the issue of "reachability" needs to be
1389 * handled, before selecting an alternative port different from above.
1390 */
1391 entry = hcap->hca_port_info[port_no-1].port_qplist;
1392 while (entry != NULL) {
1393 if (entry->qp_pkey == pkey) {
1394 ++entry->qp_ref_cnt;
1395 mutex_exit(&ibcm_qp_list_lock);
1396 return (entry);
1397 }
1398 entry = entry->qp_next;
1399 }
1400
1401 /*
1424 entry->qp_next = hcap->hca_port_info[port_no-1].port_qplist;
1425 hcap->hca_port_info[port_no-1].port_qplist = entry;
1426 entry->qp_cm = ibmf_qp;
1427 entry->qp_ref_cnt = 1;
1428 entry->qp_pkey = pkey;
1429 entry->qp_port = &(hcap->hca_port_info[port_no-1]);
1430
1431 mutex_exit(&ibcm_qp_list_lock);
1432
1433 /* set-up the handler */
1434 ibmf_status = ibmf_setup_async_cb(
1435 hcap->hca_port_info[port_no-1].port_ibmf_hdl, ibmf_qp,
1436 ibcm_recv_cb, entry, 0);
1437
1438 ASSERT(ibmf_status == IBMF_SUCCESS);
1439
1440 #ifdef DEBUG
1441 ibcm_query_qp(hcap->hca_port_info[port_no-1].port_ibmf_hdl, ibmf_qp);
1442 #endif
1443
1444 _NOTE(NOW_VISIBLE_TO_OTHER_THREADS(*entry))
1445
1446 return (entry);
1447 }
1448
1449 void
1450 ibcm_release_qp(ibcm_qp_list_t *cm_qp_entry)
1451 {
1452 mutex_enter(&ibcm_qp_list_lock);
1453 --cm_qp_entry->qp_ref_cnt;
1454 ASSERT(cm_qp_entry->qp_ref_cnt >= 0);
1455 mutex_exit(&ibcm_qp_list_lock);
1456 }
1457
1458
1459 /* called holding the ibcm_qp_list_lock mutex */
1460 ibcm_status_t
1461 ibcm_free_qp(ibcm_qp_list_t *cm_qp_entry)
1462 {
1463 int ibmf_status;
1464
1465 IBTF_DPRINTF_L5(cmlog, "ibcm_free_qp: qp_hdl %p ref_cnt %d pkey %x",
1564 sb_data->s_data16[i] = h2b16(*p16++);
1565
1566 p32 = (uint32_t *)p16;
1567 for (i = 0; i < 4; i++)
1568 sb_data->s_data32[i] = h2b32(*p32++);
1569 p64 = (uint64_t *)p32;
1570
1571 for (i = 0; i < 2; i++)
1572 sb_data->s_data64[i] = h2b64(*p64++);
1573 }
1574
1575 /* Trace related functions */
1576
1577 void
1578 ibcm_init_conn_trace(ibcm_state_data_t *sp)
1579 {
1580 IBTF_DPRINTF_L5(cmlog, "ibcm_init_conn_trace: statep %p", sp);
1581
1582 /* Initialize trace related fields */
1583
1584 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*sp->conn_trace))
1585 sp->conn_trace = kmem_zalloc(sizeof (ibcm_conn_trace_t), KM_SLEEP);
1586 if ((ibcm_enable_trace & 1) == 0)
1587 sp->conn_trace->conn_base_tm = gethrtime();
1588 sp->conn_trace->conn_allocated_trcnt = ibcm_conn_max_trcnt;
1589 sp->conn_trace->conn_trace_events =
1590 kmem_zalloc(sp->conn_trace->conn_allocated_trcnt, KM_SLEEP);
1591 sp->conn_trace->conn_trace_event_times =
1592 kmem_zalloc(sp->conn_trace->conn_allocated_trcnt *
1593 sizeof (tm_diff_type), KM_SLEEP);
1594 _NOTE(NOW_VISIBLE_TO_OTHER_THREADS(*sp->conn_trace))
1595 }
1596
1597 void
1598 ibcm_fini_conn_trace(ibcm_state_data_t *statep)
1599 {
1600 IBTF_DPRINTF_L5(cmlog, "ibcm_fini_conn_trace: statep %p tracep %p",
1601 statep, statep->conn_trace);
1602
1603 /* free the trace data */
1604 if (statep->conn_trace) {
1605 if (statep->conn_trace->conn_trace_events)
1606 kmem_free(statep->conn_trace->conn_trace_events,
1607 statep->conn_trace->conn_allocated_trcnt);
1608 if (statep->conn_trace->conn_trace_event_times)
1609 kmem_free(statep->conn_trace->conn_trace_event_times,
1610 statep->conn_trace->conn_allocated_trcnt *
1611 sizeof (tm_diff_type));
1612
1613 kmem_free(statep->conn_trace, sizeof (ibcm_conn_trace_t));
1614 }
1976
1977 /* Retrieve Node Records from SA Access. */
1978 bzero(&nr_req, sizeof (sa_node_record_t));
1979 nr_req.LID = lid;
1980
1981 ibt_status = ibcm_get_node_rec(saa_handle, &nr_req,
1982 SA_NODEINFO_COMPMASK_NODELID, &res_p, &len);
1983 if (ibt_status != IBT_SUCCESS) {
1984 IBTF_DPRINTF_L2(cmlog, "ibcm_ibtl_node_info: "
1985 "failed (%d) to get Node records", ibt_status);
1986 ibcm_dec_hca_acc_cnt(hcap);
1987 return (IBT_FAILURE);
1988 }
1989
1990 num_rec = len/sizeof (sa_node_record_t);
1991 nr_resp = (sa_node_record_t *)(uchar_t *)res_p;
1992
1993 if ((nr_resp != NULL) && (num_rec > 0)) {
1994 IBCM_DUMP_NODE_REC(nr_resp);
1995
1996 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(
1997 *node_info_p))
1998
1999 node_info_p->n_sys_img_guid =
2000 nr_resp->NodeInfo.SystemImageGUID;
2001 node_info_p->n_node_guid =
2002 nr_resp->NodeInfo.NodeGUID;
2003 node_info_p->n_port_guid =
2004 nr_resp->NodeInfo.PortGUID;
2005 node_info_p->n_dev_id =
2006 nr_resp->NodeInfo.DeviceID;
2007 node_info_p->n_revision =
2008 nr_resp->NodeInfo.Revision;
2009 node_info_p->n_vendor_id =
2010 nr_resp->NodeInfo.VendorID;
2011 node_info_p->n_num_ports =
2012 nr_resp->NodeInfo.NumPorts;
2013 node_info_p->n_port_num =
2014 nr_resp->NodeInfo.LocalPortNum;
2015 node_info_p->n_node_type =
2016 nr_resp->NodeInfo.NodeType;
2017 (void) strncpy(node_info_p->n_description,
2018 (char *)&nr_resp->NodeDescription, 64);
2019 _NOTE(NOW_VISIBLE_TO_OTHER_THREADS(
2020 *node_info_p))
2021
2022
2023 kmem_free(nr_resp, len);
2024 }
2025 ibcm_dec_hca_acc_cnt(hcap);
2026 return (IBT_SUCCESS);
2027 }
2028
2029 /*
2030 * ibcm_ibmf_analyze_error:
2031 * Checks IBMF status and determines appropriate ibt status.
2032 *
2033 * Arguments:
2034 * ibmf_status - IBMF Status
2035 *
2036 * Return values:
2037 * ibt_status_t
2038 */
2039 ibt_status_t
2040 ibcm_ibmf_analyze_error(int ibmf_status)
2041 {
2042 if (ibt_check_failure(ibmf_status, NULL) != IBT_FAILURE_STANDARD) {
|
26 /*
27 * ibcm_utils.c
28 *
29 * contains internal lookup functions of IB CM module
30 * along with some other miscellaneous stuff
31 *
32 * TBD:
33 * 1. Code needed to ensure that if any clients are using a service then
34 * don't de-register it.
35 */
36
37 #include <sys/ib/mgt/ibcm/ibcm_impl.h>
38 #include <sys/ddi.h>
39
40
41 /* statics */
42 static vmem_t *ibcm_local_sid_arena;
43 static vmem_t *ibcm_ip_sid_arena;
44 static ib_svc_id_t ibcm_local_sid_seed;
45 static ib_com_id_t ibcm_local_cid_seed;
46 static void ibcm_delete_state_from_avl(ibcm_state_data_t *statep);
47 static void ibcm_init_conn_trace(ibcm_state_data_t *statep);
48 static void ibcm_fini_conn_trace(ibcm_state_data_t *statep);
49 static void ibcm_dump_conn_trbuf(void *statep, char *line_prefix,
50 char *buf, int buf_size);
51 extern ibt_status_t ibcm_get_node_rec(ibmf_saa_handle_t, sa_node_record_t *,
52 uint64_t c_mask, void *, size_t *);
53
54 /*
55 * ibcm_lookup_msg:
56 *
57 * Retrieves an existing state structure or creates a new one if none found.
58 * This function is used during
59 * Passive connection side for INCOMING REQ/REJ/RTU/MRA/DREQ/DREP/LAP msgs
60 * Active connection side for INCOMING REP/REJ/MRA/DREQ/DREP/APR msgs
61 * Active side CM for outgoing REQ message.
62 *
63 * NOTE: Only return IBCM_LOOKUP_FAIL if lookup failed to find a match.
64 *
65 * Arguments are:-
153 * statep is created for INCOMING/OUTGOING REQ.
154 * For all other event_types we return lookup failure
155 */
156 if (!((event_type == IBCM_INCOMING_REQ) ||
157 (event_type == IBCM_INCOMING_REQ_STALE) ||
158 (event_type == IBCM_INCOMING_REP_STALE) ||
159 (event_type == IBCM_OUTGOING_REQ))) {
160 IBTF_DPRINTF_L2(cmlog, "ibcm_lookup_msg: failed for "
161 "event type %x remote_comid = 0x%x",
162 event_type, comid);
163
164 return (IBCM_LOOKUP_FAIL);
165 }
166
167 if ((event_type == IBCM_INCOMING_REQ) ||
168 (event_type == IBCM_OUTGOING_REQ)) {
169
170 /* fill in the new ibcm_state_data */
171 sp = *rstatep;
172
173 /* initialize statep */
174 mutex_init(&sp->state_mutex, NULL, MUTEX_DEFAULT, NULL);
175 cv_init(&sp->block_client_cv, NULL, CV_DRIVER, NULL);
176 cv_init(&sp->block_mad_cv, NULL, CV_DRIVER, NULL);
177
178 sp->hcap = hcap;
179 IBCM_REF_CNT_INCR(sp);
180 sp->local_comid = comid;
181
182 if (ibcm_enable_trace != 0)
183 ibcm_init_conn_trace(sp);
184
185 if (event_type == IBCM_INCOMING_REQ) { /* Passive side */
186 sp->state = IBCM_STATE_REQ_RCVD;
187 sp->clnt_proceed = IBCM_BLOCK;
188 sp->close_nocb_state = IBCM_UNBLOCK;
189 sp->remote_hca_guid = remote_hca_guid;
190 sp->remote_qpn = remote_qpn;
191 } else if (event_type == IBCM_OUTGOING_REQ) { /* Active side */
192 sp->close_nocb_state = IBCM_UNBLOCK;
193 sp->state = IBCM_STATE_IDLE;
194 }
195 } else {
196 sp = *rstatep; /* for incoming REQ/REP STALE only */
197 }
198
199 if ((event_type == IBCM_INCOMING_REQ) ||
200 (event_type == IBCM_INCOMING_REP_STALE)) {
201
202 /* First, insert a new "sp" into "hca_passive_tree" @ "where" */
203 avl_insert(&(hcap->hca_passive_tree), (void *)sp, where);
204
205 if (event_type == IBCM_INCOMING_REQ) { /* Only INCOMING_REQ */
206 /*
207 * We have to do an avl_find() to figure out
208 * "where" to insert the statep into the active tree.
209 *
210 * CM doesn't care for avl_find's retval.
211 */
212 (void) avl_find(&hcap->hca_active_tree,
213 &sp->local_comid, &where);
214
621 * Adds a SIDR entry. Called *ONLY* from ibcm_find_sidr_entry()
622 *
623 * INPUTS:
624 * lid: LID of incoming SIDR REQ
625 * gid: GID of incoming SIDR REQ
626 * grh_exists: TRUE if GRH exists in the incoming SIDR REQ
627 * req_id: Request ID
628 * hcap: CM State table to search for SIDR state structure
629 * Return Values: NONE
630 */
631 ibcm_ud_state_data_t *
632 ibcm_add_sidr_entry(ibcm_sidr_srch_t *srch_param, ibcm_hca_info_t *hcap)
633 {
634 ibcm_ud_state_data_t *ud_statep;
635
636 IBTF_DPRINTF_L5(cmlog, "ibcm_add_sidr_entry: lid=%x, guid=%llX, "
637 "grh = %x req_id = %x", srch_param->srch_lid,
638 srch_param->srch_gid.gid_guid, srch_param->srch_grh_exists,
639 srch_param->srch_req_id);
640
641 /* didn't find the entry - so create new */
642 ud_statep = kmem_zalloc(sizeof (ibcm_ud_state_data_t), KM_SLEEP);
643
644 mutex_init(&ud_statep->ud_state_mutex, NULL, MUTEX_DEFAULT, NULL);
645 cv_init(&ud_statep->ud_block_client_cv, NULL, CV_DRIVER, NULL);
646
647 /* Initialize some ud_statep fields */
648 mutex_enter(&ud_statep->ud_state_mutex);
649 ud_statep->ud_hcap = hcap;
650 ud_statep->ud_req_id = srch_param->srch_req_id;
651 ud_statep->ud_ref_cnt = 1;
652 ud_statep->ud_grh_exists = srch_param->srch_grh_exists;
653 ud_statep->ud_sidr_req_lid = srch_param->srch_lid;
654 ud_statep->ud_sidr_req_gid = srch_param->srch_gid;
655 ud_statep->ud_mode = srch_param->srch_mode;
656 ud_statep->ud_max_cm_retries = ibcm_max_retries;
657 mutex_exit(&ud_statep->ud_state_mutex);
658
659 /* Update the list */
660 ud_statep->ud_nextp = hcap->hca_sidr_list;
661 hcap->hca_sidr_list = ud_statep;
662
663 return (ud_statep);
664 }
665
666
667 /*
668 * ibcm_delete_ud_state_data:
669 * Deletes a given state structure
670 *
671 * Arguments are:-
672 * statep - statep to be deleted
673 *
674 * Return Values: NONE
675 */
676 void
677 ibcm_delete_ud_state_data(ibcm_ud_state_data_t *ud_statep)
678 {
679 ibcm_ud_state_data_t *prevp, *headp;
680 ibcm_hca_info_t *hcap;
681
682 IBTF_DPRINTF_L4(cmlog, "ibcm_delete_ud_state_data: ud_statep 0x%p",
795 /* free the statep */
796 kmem_free(ud_statep, sizeof (ibcm_ud_state_data_t));
797 }
798
799
800 /*
801 * ibcm_init_ids:
802 * Create the vmem arenas for the various global ids
803 *
804 * Arguments are:-
805 * NONE
806 *
807 * Return Values: ibcm_status_t
808 */
809
810 ibcm_status_t
811 ibcm_init_ids(void)
812 {
813 timespec_t tv;
814
815 ibcm_local_sid_arena = vmem_create("ibcm_local_sid",
816 (void *)IBCM_INITIAL_SID, IBCM_MAX_LOCAL_SIDS, 1, NULL, NULL, NULL,
817 0, VM_SLEEP | VMC_IDENTIFIER);
818
819 if (!ibcm_local_sid_arena)
820 return (IBCM_FAILURE);
821
822 ibcm_ip_sid_arena = vmem_create("ibcm_ip_sid", (void *)IBCM_INITIAL_SID,
823 IBCM_MAX_IP_SIDS, 1, NULL, NULL, NULL, 0,
824 VM_SLEEP | VMC_IDENTIFIER);
825
826 if (!ibcm_ip_sid_arena)
827 return (IBCM_FAILURE);
828
829 /* create a random starting value for local service ids */
830 gethrestime(&tv);
831 ibcm_local_sid_seed = ((uint64_t)tv.tv_sec << 20) & 0x007FFFFFFFF00000;
832 ASSERT((ibcm_local_sid_seed & IB_SID_AGN_MASK) == 0);
833 ibcm_local_sid_seed |= IB_SID_AGN_LOCAL;
834
835 ibcm_local_cid_seed = (ib_com_id_t)tv.tv_sec;
836
837 return (IBCM_SUCCESS);
838 }
839
840
841 /*
842 * ibcm_init_hca_ids:
843 * Create the vmem arenas for the various hca level ids
844 *
845 * Arguments are:-
846 * hcap pointer to ibcm_hca_info_t
847 *
848 * Return Values: ibcm_status_t
849 */
850 ibcm_status_t
851 ibcm_init_hca_ids(ibcm_hca_info_t *hcap)
852 {
853 hcap->hca_comid_arena = vmem_create("ibcm_com_ids",
854 (void *)IBCM_INITIAL_COMID, IBCM_MAX_COMIDS,
855 1, NULL, NULL, NULL, 0, VM_SLEEP | VMC_IDENTIFIER);
1199 }
1200
1201
1202 /*
1203 * ibcm_create_svc_entry:
1204 * Make sure no conflicting entry exists, then allocate it.
1205 * Fill in the critical "look up" details that are provided
1206 * in the arguments before dropping the lock.
1207 *
1208 * Return values:
1209 * Pointer to ibcm_svc_info_t, if created, otherwise NULL.
1210 */
1211 ibcm_svc_info_t *
1212 ibcm_create_svc_entry(ib_svc_id_t sid, int num_sids)
1213 {
1214 ibcm_svc_info_t *svcp;
1215 ibcm_svc_info_t *svcinfop;
1216 ibcm_svc_lookup_t svc;
1217 avl_index_t where = 0;
1218
1219 /* assume success, and avoid kmem while holding the writer lock */
1220 svcinfop = kmem_zalloc(sizeof (*svcinfop), KM_SLEEP);
1221 svcinfop->svc_id = sid;
1222 svcinfop->svc_num_sids = num_sids;
1223
1224 svc.sid = sid;
1225 svc.num_sids = num_sids;
1226
1227 mutex_enter(&ibcm_svc_info_lock);
1228 svcp = avl_find(&ibcm_svc_avl_tree, &svc, &where);
1229 if (svcp != NULL) { /* overlab exists */
1230 mutex_exit(&ibcm_svc_info_lock);
1231 kmem_free(svcinfop, sizeof (*svcinfop));
1232 return (NULL);
1233 }
1234 avl_insert(&ibcm_svc_avl_tree, (void *)svcinfop, where);
1235 mutex_exit(&ibcm_svc_info_lock);
1236
1237 return (svcinfop);
1238 }
1239
1240 /*
1241 * ibcm_find_svc_entry:
1242 * Finds a ibcm_svc_info_t entry into the CM's global table.
1243 * The search done here assumes the list is sorted by SID.
1244 *
1245 * Arguments are:
1246 * sid - Service ID to look up
1247 *
1248 * Return values:
1249 * Pointer to ibcm_svc_info_t, if found, otherwise NULL.
1250 */
1251 ibcm_svc_info_t *
1252 ibcm_find_svc_entry(ib_svc_id_t sid)
1253 {
1254 ibcm_svc_info_t *svcp;
1255 ibcm_svc_lookup_t svc;
1256
1257 IBTF_DPRINTF_L3(cmlog, "ibcm_find_svc_entry: finding SID 0x%llX", sid);
1258
1259 ASSERT(MUTEX_HELD(&ibcm_svc_info_lock));
1260
1261 svc.sid = sid;
1262 svc.num_sids = 1;
1263 svcp = avl_find(&ibcm_svc_avl_tree, &svc, NULL);
1264 if (svcp != NULL) {
1265 IBTF_DPRINTF_L3(cmlog, "ibcm_find_svc_entry: "
1266 "found SID = 0x%llX", sid);
1267 return (svcp); /* found it */
1268 }
1269 IBTF_DPRINTF_L3(cmlog, "ibcm_find_svc_entry: SID %llX not found", sid);
1270 return (NULL);
1271 }
1272
1273 /*
1274 * ibcm_alloc_ibmf_msg:
1275 * Allocate an ibmf message structure and the additional memory required for
1276 * sending an outgoing CM mad. The ibmf message structure contains two
1277 * ibmf_msg_bufs_t fields, one for the incoming MAD and one for the outgoing
1278 * MAD. The CM must allocate the memory for the outgoing MAD. The msg_buf
1279 * field has three buffers: the mad header, the class header, and the class
1280 * data. To simplify the code and reduce the number of kmem_zalloc() calls,
1281 * ibcm_alloc_ibmf_msg will allocate one buffer and set the pointers to the
1282 * right offsets. No class header is needed so only the mad header and class
1329
1330 kmem_free((*ibmf_msgpp)->im_msgbufs_send.im_bufs_mad_hdr,
1331 IBCM_MAD_SIZE);
1332
1333 if ((ibmf_status = ibmf_free_msg(ibmf_handle, ibmf_msgpp)) !=
1334 IBMF_SUCCESS) {
1335 IBTF_DPRINTF_L2(cmlog, "ibcm_free_out_msg: "
1336 "ibmf_free_msg failed %d", ibmf_status);
1337 return (IBCM_FAILURE);
1338 } else
1339 return (IBCM_SUCCESS);
1340 }
1341
1342 ibcm_qp_list_t *
1343 ibcm_find_qp(ibcm_hca_info_t *hcap, int port_no, ib_pkey_t pkey)
1344 {
1345 ibcm_qp_list_t *entry;
1346 ibmf_qp_handle_t ibmf_qp;
1347 int ibmf_status;
1348
1349 mutex_enter(&ibcm_qp_list_lock);
1350
1351 /*
1352 * CM currently does not track port up and down status. If tracking of
1353 * " port status" is added in the future, then CM could be optimized to
1354 * re-use other ports on hcap, if the port associated with the above
1355 * port_no is down. But, the issue of "reachability" needs to be
1356 * handled, before selecting an alternative port different from above.
1357 */
1358 entry = hcap->hca_port_info[port_no-1].port_qplist;
1359 while (entry != NULL) {
1360 if (entry->qp_pkey == pkey) {
1361 ++entry->qp_ref_cnt;
1362 mutex_exit(&ibcm_qp_list_lock);
1363 return (entry);
1364 }
1365 entry = entry->qp_next;
1366 }
1367
1368 /*
1391 entry->qp_next = hcap->hca_port_info[port_no-1].port_qplist;
1392 hcap->hca_port_info[port_no-1].port_qplist = entry;
1393 entry->qp_cm = ibmf_qp;
1394 entry->qp_ref_cnt = 1;
1395 entry->qp_pkey = pkey;
1396 entry->qp_port = &(hcap->hca_port_info[port_no-1]);
1397
1398 mutex_exit(&ibcm_qp_list_lock);
1399
1400 /* set-up the handler */
1401 ibmf_status = ibmf_setup_async_cb(
1402 hcap->hca_port_info[port_no-1].port_ibmf_hdl, ibmf_qp,
1403 ibcm_recv_cb, entry, 0);
1404
1405 ASSERT(ibmf_status == IBMF_SUCCESS);
1406
1407 #ifdef DEBUG
1408 ibcm_query_qp(hcap->hca_port_info[port_no-1].port_ibmf_hdl, ibmf_qp);
1409 #endif
1410
1411 return (entry);
1412 }
1413
1414 void
1415 ibcm_release_qp(ibcm_qp_list_t *cm_qp_entry)
1416 {
1417 mutex_enter(&ibcm_qp_list_lock);
1418 --cm_qp_entry->qp_ref_cnt;
1419 ASSERT(cm_qp_entry->qp_ref_cnt >= 0);
1420 mutex_exit(&ibcm_qp_list_lock);
1421 }
1422
1423
1424 /* called holding the ibcm_qp_list_lock mutex */
1425 ibcm_status_t
1426 ibcm_free_qp(ibcm_qp_list_t *cm_qp_entry)
1427 {
1428 int ibmf_status;
1429
1430 IBTF_DPRINTF_L5(cmlog, "ibcm_free_qp: qp_hdl %p ref_cnt %d pkey %x",
1529 sb_data->s_data16[i] = h2b16(*p16++);
1530
1531 p32 = (uint32_t *)p16;
1532 for (i = 0; i < 4; i++)
1533 sb_data->s_data32[i] = h2b32(*p32++);
1534 p64 = (uint64_t *)p32;
1535
1536 for (i = 0; i < 2; i++)
1537 sb_data->s_data64[i] = h2b64(*p64++);
1538 }
1539
1540 /* Trace related functions */
1541
1542 void
1543 ibcm_init_conn_trace(ibcm_state_data_t *sp)
1544 {
1545 IBTF_DPRINTF_L5(cmlog, "ibcm_init_conn_trace: statep %p", sp);
1546
1547 /* Initialize trace related fields */
1548
1549 sp->conn_trace = kmem_zalloc(sizeof (ibcm_conn_trace_t), KM_SLEEP);
1550 if ((ibcm_enable_trace & 1) == 0)
1551 sp->conn_trace->conn_base_tm = gethrtime();
1552 sp->conn_trace->conn_allocated_trcnt = ibcm_conn_max_trcnt;
1553 sp->conn_trace->conn_trace_events =
1554 kmem_zalloc(sp->conn_trace->conn_allocated_trcnt, KM_SLEEP);
1555 sp->conn_trace->conn_trace_event_times =
1556 kmem_zalloc(sp->conn_trace->conn_allocated_trcnt *
1557 sizeof (tm_diff_type), KM_SLEEP);
1558 }
1559
1560 void
1561 ibcm_fini_conn_trace(ibcm_state_data_t *statep)
1562 {
1563 IBTF_DPRINTF_L5(cmlog, "ibcm_fini_conn_trace: statep %p tracep %p",
1564 statep, statep->conn_trace);
1565
1566 /* free the trace data */
1567 if (statep->conn_trace) {
1568 if (statep->conn_trace->conn_trace_events)
1569 kmem_free(statep->conn_trace->conn_trace_events,
1570 statep->conn_trace->conn_allocated_trcnt);
1571 if (statep->conn_trace->conn_trace_event_times)
1572 kmem_free(statep->conn_trace->conn_trace_event_times,
1573 statep->conn_trace->conn_allocated_trcnt *
1574 sizeof (tm_diff_type));
1575
1576 kmem_free(statep->conn_trace, sizeof (ibcm_conn_trace_t));
1577 }
1939
1940 /* Retrieve Node Records from SA Access. */
1941 bzero(&nr_req, sizeof (sa_node_record_t));
1942 nr_req.LID = lid;
1943
1944 ibt_status = ibcm_get_node_rec(saa_handle, &nr_req,
1945 SA_NODEINFO_COMPMASK_NODELID, &res_p, &len);
1946 if (ibt_status != IBT_SUCCESS) {
1947 IBTF_DPRINTF_L2(cmlog, "ibcm_ibtl_node_info: "
1948 "failed (%d) to get Node records", ibt_status);
1949 ibcm_dec_hca_acc_cnt(hcap);
1950 return (IBT_FAILURE);
1951 }
1952
1953 num_rec = len/sizeof (sa_node_record_t);
1954 nr_resp = (sa_node_record_t *)(uchar_t *)res_p;
1955
1956 if ((nr_resp != NULL) && (num_rec > 0)) {
1957 IBCM_DUMP_NODE_REC(nr_resp);
1958
1959 node_info_p->n_sys_img_guid =
1960 nr_resp->NodeInfo.SystemImageGUID;
1961 node_info_p->n_node_guid =
1962 nr_resp->NodeInfo.NodeGUID;
1963 node_info_p->n_port_guid =
1964 nr_resp->NodeInfo.PortGUID;
1965 node_info_p->n_dev_id =
1966 nr_resp->NodeInfo.DeviceID;
1967 node_info_p->n_revision =
1968 nr_resp->NodeInfo.Revision;
1969 node_info_p->n_vendor_id =
1970 nr_resp->NodeInfo.VendorID;
1971 node_info_p->n_num_ports =
1972 nr_resp->NodeInfo.NumPorts;
1973 node_info_p->n_port_num =
1974 nr_resp->NodeInfo.LocalPortNum;
1975 node_info_p->n_node_type =
1976 nr_resp->NodeInfo.NodeType;
1977 (void) strncpy(node_info_p->n_description,
1978 (char *)&nr_resp->NodeDescription, 64);
1979
1980 kmem_free(nr_resp, len);
1981 }
1982 ibcm_dec_hca_acc_cnt(hcap);
1983 return (IBT_SUCCESS);
1984 }
1985
1986 /*
1987 * ibcm_ibmf_analyze_error:
1988 * Checks IBMF status and determines appropriate ibt status.
1989 *
1990 * Arguments:
1991 * ibmf_status - IBMF Status
1992 *
1993 * Return values:
1994 * ibt_status_t
1995 */
1996 ibt_status_t
1997 ibcm_ibmf_analyze_error(int ibmf_status)
1998 {
1999 if (ibt_check_failure(ibmf_status, NULL) != IBT_FAILURE_STANDARD) {
|