1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
23 */
24 /*
25 * Copyright 2014 Nexenta Systems, Inc. All rights reserved.
26 */
27
28 /*
29 * ibdm.c
30 *
31 * This file contains the InifiniBand Device Manager (IBDM) support functions.
32 * IB nexus driver will only be the client for the IBDM module.
33 *
34 * IBDM registers with IBTF for HCA arrival/removal notification.
35 * IBDM registers with SA access to send DM MADs to discover the IOC's behind
36 * the IOU's.
37 *
38 * IB nexus driver registers with IBDM to find the information about the
39 * HCA's and IOC's (behind the IOU) present on the IB fabric.
40 */
41
42 #include <sys/sysmacros.h>
43 #include <sys/systm.h>
44 #include <sys/taskq.h>
45 #include <sys/ib/mgt/ibdm/ibdm_impl.h>
46 #include <sys/ib/mgt/ibmf/ibmf_impl.h>
47 #include <sys/ib/ibtl/impl/ibtl_ibnex.h>
48 #include <sys/modctl.h>
49
50 /* Function Prototype declarations */
51 static int ibdm_free_iou_info(ibdm_dp_gidinfo_t *, ibdm_iou_info_t **);
52 static int ibdm_fini(void);
53 static int ibdm_init(void);
54 static int ibdm_get_reachable_ports(ibdm_port_attr_t *,
55 ibdm_hca_list_t *);
56 static ibdm_dp_gidinfo_t *ibdm_check_dgid(ib_guid_t, ib_sn_prefix_t);
57 static ibdm_dp_gidinfo_t *ibdm_check_dest_nodeguid(ibdm_dp_gidinfo_t *);
58 static boolean_t ibdm_is_cisco(ib_guid_t);
59 static boolean_t ibdm_is_cisco_switch(ibdm_dp_gidinfo_t *);
60 static void ibdm_wait_cisco_probe_completion(ibdm_dp_gidinfo_t *);
61 static int ibdm_set_classportinfo(ibdm_dp_gidinfo_t *);
62 static int ibdm_send_classportinfo(ibdm_dp_gidinfo_t *);
63 static int ibdm_send_iounitinfo(ibdm_dp_gidinfo_t *);
64 static int ibdm_is_dev_mgt_supported(ibdm_dp_gidinfo_t *);
65 static int ibdm_get_node_port_guids(ibmf_saa_handle_t, ib_lid_t,
66 ib_guid_t *, ib_guid_t *);
67 static int ibdm_retry_command(ibdm_timeout_cb_args_t *);
68 static int ibdm_get_diagcode(ibdm_dp_gidinfo_t *, int);
69 static int ibdm_verify_mad_status(ib_mad_hdr_t *);
70 static int ibdm_handle_redirection(ibmf_msg_t *,
71 ibdm_dp_gidinfo_t *, int *);
72 static void ibdm_wait_probe_completion(void);
73 static void ibdm_sweep_fabric(int);
74 static void ibdm_probe_gid_thread(void *);
75 static void ibdm_wakeup_probe_gid_cv(void);
76 static void ibdm_port_attr_ibmf_init(ibdm_port_attr_t *, ib_pkey_t, int);
77 static int ibdm_port_attr_ibmf_fini(ibdm_port_attr_t *, int);
78 static void ibdm_update_port_attr(ibdm_port_attr_t *);
79 static void ibdm_handle_hca_attach(ib_guid_t);
80 static void ibdm_handle_srventry_mad(ibmf_msg_t *,
81 ibdm_dp_gidinfo_t *, int *);
82 static void ibdm_ibmf_recv_cb(ibmf_handle_t, ibmf_msg_t *, void *);
83 static void ibdm_recv_incoming_mad(void *);
84 static void ibdm_process_incoming_mad(ibmf_handle_t, ibmf_msg_t *, void *);
85 static void ibdm_ibmf_send_cb(ibmf_handle_t, ibmf_msg_t *, void *);
86 static void ibdm_pkt_timeout_hdlr(void *arg);
87 static void ibdm_initialize_port(ibdm_port_attr_t *);
88 static void ibdm_update_port_pkeys(ibdm_port_attr_t *port);
89 static void ibdm_handle_diagcode(ibmf_msg_t *, ibdm_dp_gidinfo_t *, int *);
90 static void ibdm_probe_gid(ibdm_dp_gidinfo_t *);
91 static void ibdm_alloc_send_buffers(ibmf_msg_t *);
92 static void ibdm_free_send_buffers(ibmf_msg_t *);
93 static void ibdm_handle_hca_detach(ib_guid_t);
94 static void ibdm_handle_port_change_event(ibt_async_event_t *);
95 static int ibdm_fini_port(ibdm_port_attr_t *);
96 static int ibdm_uninit_hca(ibdm_hca_list_t *);
97 static void ibdm_handle_setclassportinfo(ibmf_handle_t, ibmf_msg_t *,
98 ibdm_dp_gidinfo_t *, int *);
99 static void ibdm_handle_iounitinfo(ibmf_handle_t,
100 ibmf_msg_t *, ibdm_dp_gidinfo_t *, int *);
101 static void ibdm_handle_ioc_profile(ibmf_handle_t,
102 ibmf_msg_t *, ibdm_dp_gidinfo_t *, int *);
103 static void ibdm_event_hdlr(void *, ibt_hca_hdl_t,
104 ibt_async_code_t, ibt_async_event_t *);
105 static void ibdm_handle_classportinfo(ibmf_handle_t,
106 ibmf_msg_t *, ibdm_dp_gidinfo_t *, int *);
107 static void ibdm_update_ioc_port_gidlist(ibdm_ioc_info_t *,
108 ibdm_dp_gidinfo_t *);
109
110 static ibdm_hca_list_t *ibdm_dup_hca_attr(ibdm_hca_list_t *);
111 static ibdm_ioc_info_t *ibdm_dup_ioc_info(ibdm_ioc_info_t *,
112 ibdm_dp_gidinfo_t *gid_list);
113 static void ibdm_probe_ioc(ib_guid_t, ib_guid_t, int);
114 static ibdm_ioc_info_t *ibdm_is_ioc_present(ib_guid_t,
115 ibdm_dp_gidinfo_t *, int *);
116 static ibdm_port_attr_t *ibdm_get_port_attr(ibt_async_event_t *,
117 ibdm_hca_list_t **);
118 static sa_node_record_t *ibdm_get_node_records(ibmf_saa_handle_t,
119 size_t *, ib_guid_t);
120 static int ibdm_get_node_record_by_port(ibmf_saa_handle_t,
121 ib_guid_t, sa_node_record_t **, size_t *);
122 static sa_portinfo_record_t *ibdm_get_portinfo(ibmf_saa_handle_t, size_t *,
123 ib_lid_t);
124 static ibdm_dp_gidinfo_t *ibdm_create_gid_info(ibdm_port_attr_t *,
125 ib_gid_t, ib_gid_t);
126 static ibdm_dp_gidinfo_t *ibdm_find_gid(ib_guid_t, ib_guid_t);
127 static int ibdm_send_ioc_profile(ibdm_dp_gidinfo_t *, uint8_t);
128 static ibdm_ioc_info_t *ibdm_update_ioc_gidlist(ibdm_dp_gidinfo_t *, int);
129 static void ibdm_saa_event_cb(ibmf_saa_handle_t, ibmf_saa_subnet_event_t,
130 ibmf_saa_event_details_t *, void *);
131 static void ibdm_reprobe_update_port_srv(ibdm_ioc_info_t *,
132 ibdm_dp_gidinfo_t *);
133 static ibdm_dp_gidinfo_t *ibdm_handle_gid_rm(ibdm_dp_gidinfo_t *);
134 static void ibdm_rmfrom_glgid_list(ibdm_dp_gidinfo_t *,
135 ibdm_dp_gidinfo_t *);
136 static void ibdm_addto_gidlist(ibdm_gid_t **, ibdm_gid_t *);
137 static void ibdm_free_gid_list(ibdm_gid_t *);
138 static void ibdm_rescan_gidlist(ib_guid_t *ioc_guid);
139 static void ibdm_notify_newgid_iocs(ibdm_dp_gidinfo_t *);
140 static void ibdm_saa_event_taskq(void *);
141 static void ibdm_free_saa_event_arg(ibdm_saa_event_arg_t *);
142 static void ibdm_get_next_port(ibdm_hca_list_t **,
143 ibdm_port_attr_t **, int);
144 static void ibdm_add_to_gl_gid(ibdm_dp_gidinfo_t *,
145 ibdm_dp_gidinfo_t *);
146 static void ibdm_addto_glhcalist(ibdm_dp_gidinfo_t *,
147 ibdm_hca_list_t *);
148 static void ibdm_delete_glhca_list(ibdm_dp_gidinfo_t *);
149 static void ibdm_saa_handle_new_gid(void *);
150 static void ibdm_reset_all_dgids(ibmf_saa_handle_t);
151 static void ibdm_reset_gidinfo(ibdm_dp_gidinfo_t *);
152 static void ibdm_delete_gidinfo(ibdm_dp_gidinfo_t *);
153 static void ibdm_fill_srv_attr_mod(ib_mad_hdr_t *, ibdm_timeout_cb_args_t *);
154 static void ibdm_bump_transactionID(ibdm_dp_gidinfo_t *);
155 static ibdm_ioc_info_t *ibdm_handle_prev_iou();
156 static int ibdm_serv_cmp(ibdm_srvents_info_t *, ibdm_srvents_info_t *,
157 int);
158 static ibdm_ioc_info_t *ibdm_get_ioc_info_with_gid(ib_guid_t,
159 ibdm_dp_gidinfo_t **);
160
161 int ibdm_dft_timeout = IBDM_DFT_TIMEOUT;
162 int ibdm_dft_retry_cnt = IBDM_DFT_NRETRIES;
163 #ifdef DEBUG
164 int ibdm_ignore_saa_event = 0;
165 #endif
166 int ibdm_enumerate_iocs = 0;
167
168 /* Modload support */
169 static struct modlmisc ibdm_modlmisc = {
170 &mod_miscops,
171 "InfiniBand Device Manager"
172 };
173
174 struct modlinkage ibdm_modlinkage = {
175 MODREV_1,
176 { (void *)&ibdm_modlmisc, NULL }
177 };
178
179 static ibt_clnt_modinfo_t ibdm_ibt_modinfo = {
180 IBTI_V_CURR,
181 IBT_DM,
182 ibdm_event_hdlr,
183 NULL,
184 "ibdm"
185 };
186
187 /* Global variables */
188 ibdm_t ibdm;
189 int ibdm_taskq_enable = IBDM_ENABLE_TASKQ_HANDLING;
190 char *ibdm_string = "ibdm";
191
192 _NOTE(SCHEME_PROTECTS_DATA("Serialized access by cv",
193 ibdm.ibdm_dp_gidlist_head))
194
195 /*
196 * _init
197 * Loadable module init, called before any other module.
198 * Initialize mutex
199 * Register with IBTF
200 */
201 int
202 _init(void)
203 {
204 int err;
205
206 IBTF_DPRINTF_L4("ibdm", "\t_init: addr of ibdm %p", &ibdm);
207
208 if ((err = ibdm_init()) != IBDM_SUCCESS) {
209 IBTF_DPRINTF_L2("ibdm", "_init: ibdm_init failed 0x%x", err);
210 (void) ibdm_fini();
211 return (DDI_FAILURE);
212 }
213
214 if ((err = mod_install(&ibdm_modlinkage)) != 0) {
215 IBTF_DPRINTF_L2("ibdm", "_init: mod_install failed 0x%x", err);
216 (void) ibdm_fini();
217 }
218 return (err);
219 }
220
221
222 int
223 _fini(void)
224 {
225 int err;
226
227 if ((err = ibdm_fini()) != IBDM_SUCCESS) {
228 IBTF_DPRINTF_L2("ibdm", "_fini: ibdm_fini failed 0x%x", err);
229 (void) ibdm_init();
230 return (EBUSY);
231 }
232
233 if ((err = mod_remove(&ibdm_modlinkage)) != 0) {
234 IBTF_DPRINTF_L2("ibdm", "_fini: mod_remove failed 0x%x", err);
235 (void) ibdm_init();
236 }
237 return (err);
238 }
239
240
241 int
242 _info(struct modinfo *modinfop)
243 {
244 return (mod_info(&ibdm_modlinkage, modinfop));
245 }
246
247
248 /*
249 * ibdm_init():
250 * Register with IBTF
251 * Allocate memory for the HCAs
252 * Allocate minor-nodes for the HCAs
253 */
254 static int
255 ibdm_init(void)
256 {
257 int i, hca_count;
258 ib_guid_t *hca_guids;
259 ibt_status_t status;
260
261 IBTF_DPRINTF_L4("ibdm", "\tibdm_init:");
262 if (!(ibdm.ibdm_state & IBDM_LOCKS_ALLOCED)) {
263 mutex_init(&ibdm.ibdm_mutex, NULL, MUTEX_DEFAULT, NULL);
264 mutex_init(&ibdm.ibdm_hl_mutex, NULL, MUTEX_DEFAULT, NULL);
265 mutex_init(&ibdm.ibdm_ibnex_mutex, NULL, MUTEX_DEFAULT, NULL);
266 cv_init(&ibdm.ibdm_port_settle_cv, NULL, CV_DRIVER, NULL);
267 mutex_enter(&ibdm.ibdm_mutex);
268 ibdm.ibdm_state |= IBDM_LOCKS_ALLOCED;
269 }
270
271 if (!(ibdm.ibdm_state & IBDM_IBT_ATTACHED)) {
272 if ((status = ibt_attach(&ibdm_ibt_modinfo, NULL, NULL,
273 (void *)&ibdm.ibdm_ibt_clnt_hdl)) != IBT_SUCCESS) {
274 IBTF_DPRINTF_L2("ibdm", "ibdm_init: ibt_attach "
275 "failed %x", status);
276 mutex_exit(&ibdm.ibdm_mutex);
277 return (IBDM_FAILURE);
278 }
279
280 ibdm.ibdm_state |= IBDM_IBT_ATTACHED;
281 mutex_exit(&ibdm.ibdm_mutex);
282 }
283
284
285 if (!(ibdm.ibdm_state & IBDM_HCA_ATTACHED)) {
286 hca_count = ibt_get_hca_list(&hca_guids);
287 IBTF_DPRINTF_L4("ibdm", "ibdm_init: num_hcas = %d", hca_count);
288 for (i = 0; i < hca_count; i++)
289 (void) ibdm_handle_hca_attach(hca_guids[i]);
290 if (hca_count)
291 ibt_free_hca_list(hca_guids, hca_count);
292
293 mutex_enter(&ibdm.ibdm_mutex);
294 ibdm.ibdm_state |= IBDM_HCA_ATTACHED;
295 mutex_exit(&ibdm.ibdm_mutex);
296 }
297
298 if (!(ibdm.ibdm_state & IBDM_CVS_ALLOCED)) {
299 cv_init(&ibdm.ibdm_probe_cv, NULL, CV_DRIVER, NULL);
300 cv_init(&ibdm.ibdm_busy_cv, NULL, CV_DRIVER, NULL);
301 mutex_enter(&ibdm.ibdm_mutex);
302 ibdm.ibdm_state |= IBDM_CVS_ALLOCED;
303 mutex_exit(&ibdm.ibdm_mutex);
304 }
305 return (IBDM_SUCCESS);
306 }
307
308
309 static int
310 ibdm_free_iou_info(ibdm_dp_gidinfo_t *gid_info, ibdm_iou_info_t **ioup)
311 {
312 int ii, k, niocs;
313 size_t size;
314 ibdm_gid_t *delete, *head;
315 timeout_id_t timeout_id;
316 ibdm_ioc_info_t *ioc;
317 ibdm_iou_info_t *gl_iou = *ioup;
318
319 ASSERT(mutex_owned(&gid_info->gl_mutex));
320 if (gl_iou == NULL) {
321 IBTF_DPRINTF_L4("ibdm", "\tibdm_free_iou_info: No IOU");
322 return (0);
323 }
324
325 niocs = gl_iou->iou_info.iou_num_ctrl_slots;
326 IBTF_DPRINTF_L4("ibdm", "\tfree_iou_info: gid_info = %p, niocs %d",
327 gid_info, niocs);
328
329 for (ii = 0; ii < niocs; ii++) {
330 ioc = (ibdm_ioc_info_t *)&gl_iou->iou_ioc_info[ii];
331
332 /* handle the case where an ioc_timeout_id is scheduled */
333 if (ioc->ioc_timeout_id) {
334 timeout_id = ioc->ioc_timeout_id;
335 ioc->ioc_timeout_id = 0;
336 mutex_exit(&gid_info->gl_mutex);
337 IBTF_DPRINTF_L5("ibdm", "free_iou_info: "
338 "ioc_timeout_id = 0x%x", timeout_id);
339 if (untimeout(timeout_id) == -1) {
340 IBTF_DPRINTF_L2("ibdm", "free_iou_info: "
341 "untimeout ioc_timeout_id failed");
342 mutex_enter(&gid_info->gl_mutex);
343 return (-1);
344 }
345 mutex_enter(&gid_info->gl_mutex);
346 }
347
348 /* handle the case where an ioc_dc_timeout_id is scheduled */
349 if (ioc->ioc_dc_timeout_id) {
350 timeout_id = ioc->ioc_dc_timeout_id;
351 ioc->ioc_dc_timeout_id = 0;
352 mutex_exit(&gid_info->gl_mutex);
353 IBTF_DPRINTF_L5("ibdm", "free_iou_info: "
354 "ioc_dc_timeout_id = 0x%x", timeout_id);
355 if (untimeout(timeout_id) == -1) {
356 IBTF_DPRINTF_L2("ibdm", "free_iou_info: "
357 "untimeout ioc_dc_timeout_id failed");
358 mutex_enter(&gid_info->gl_mutex);
359 return (-1);
360 }
361 mutex_enter(&gid_info->gl_mutex);
362 }
363
364 /* handle the case where serv[k].se_timeout_id is scheduled */
365 for (k = 0; k < ioc->ioc_profile.ioc_service_entries; k++) {
366 if (ioc->ioc_serv[k].se_timeout_id) {
367 timeout_id = ioc->ioc_serv[k].se_timeout_id;
368 ioc->ioc_serv[k].se_timeout_id = 0;
369 mutex_exit(&gid_info->gl_mutex);
370 IBTF_DPRINTF_L5("ibdm", "free_iou_info: "
371 "ioc->ioc_serv[%d].se_timeout_id = 0x%x",
372 k, timeout_id);
373 if (untimeout(timeout_id) == -1) {
374 IBTF_DPRINTF_L2("ibdm", "free_iou_info:"
375 " untimeout se_timeout_id failed");
376 mutex_enter(&gid_info->gl_mutex);
377 return (-1);
378 }
379 mutex_enter(&gid_info->gl_mutex);
380 }
381 }
382
383 /* delete GID list in IOC */
384 head = ioc->ioc_gid_list;
385 while (head) {
386 IBTF_DPRINTF_L4("ibdm", "\tibdm_free_iou_info: "
387 "Deleting gid_list struct %p", head);
388 delete = head;
389 head = head->gid_next;
390 kmem_free(delete, sizeof (ibdm_gid_t));
391 }
392 ioc->ioc_gid_list = NULL;
393
394 /* delete ioc_serv */
395 size = ioc->ioc_profile.ioc_service_entries *
396 sizeof (ibdm_srvents_info_t);
397 if (ioc->ioc_serv && size) {
398 kmem_free(ioc->ioc_serv, size);
399 ioc->ioc_serv = NULL;
400 }
401 }
402 /*
403 * Clear the IBDM_CISCO_PROBE_DONE flag to get the IO Unit information
404 * via the switch during the probe process.
405 */
406 gid_info->gl_flag &= ~IBDM_CISCO_PROBE_DONE;
407
408 IBTF_DPRINTF_L4("ibdm", "\tibdm_free_iou_info: deleting IOU & IOC");
409 size = sizeof (ibdm_iou_info_t) + niocs * sizeof (ibdm_ioc_info_t);
410 kmem_free(gl_iou, size);
411 *ioup = NULL;
412 return (0);
413 }
414
415
416 /*
417 * ibdm_fini():
418 * Un-register with IBTF
419 * De allocate memory for the GID info
420 */
421 static int
422 ibdm_fini()
423 {
424 int ii;
425 ibdm_hca_list_t *hca_list, *temp;
426 ibdm_dp_gidinfo_t *gid_info, *tmp;
427 ibdm_gid_t *head, *delete;
428
429 IBTF_DPRINTF_L4("ibdm", "\tibdm_fini");
430
431 mutex_enter(&ibdm.ibdm_hl_mutex);
432 if (ibdm.ibdm_state & IBDM_IBT_ATTACHED) {
433 if (ibt_detach(ibdm.ibdm_ibt_clnt_hdl) != IBT_SUCCESS) {
434 IBTF_DPRINTF_L2("ibdm", "\t_fini: ibt_detach failed");
435 mutex_exit(&ibdm.ibdm_hl_mutex);
436 return (IBDM_FAILURE);
437 }
438 ibdm.ibdm_state &= ~IBDM_IBT_ATTACHED;
439 ibdm.ibdm_ibt_clnt_hdl = NULL;
440 }
441
442 hca_list = ibdm.ibdm_hca_list_head;
443 IBTF_DPRINTF_L4("ibdm", "\tibdm_fini: nhcas %d", ibdm.ibdm_hca_count);
444 for (ii = 0; ii < ibdm.ibdm_hca_count; ii++) {
445 temp = hca_list;
446 hca_list = hca_list->hl_next;
447 IBTF_DPRINTF_L4("ibdm", "\tibdm_fini: hca %p", temp);
448 if (ibdm_uninit_hca(temp) != IBDM_SUCCESS) {
449 IBTF_DPRINTF_L2("ibdm", "\tibdm_fini: "
450 "uninit_hca %p failed", temp);
451 mutex_exit(&ibdm.ibdm_hl_mutex);
452 return (IBDM_FAILURE);
453 }
454 }
455 mutex_exit(&ibdm.ibdm_hl_mutex);
456
457 mutex_enter(&ibdm.ibdm_mutex);
458 if (ibdm.ibdm_state & IBDM_HCA_ATTACHED)
459 ibdm.ibdm_state &= ~IBDM_HCA_ATTACHED;
460
461 gid_info = ibdm.ibdm_dp_gidlist_head;
462 while (gid_info) {
463 mutex_enter(&gid_info->gl_mutex);
464 (void) ibdm_free_iou_info(gid_info, &gid_info->gl_iou);
465 mutex_exit(&gid_info->gl_mutex);
466 ibdm_delete_glhca_list(gid_info);
467
468 tmp = gid_info;
469 gid_info = gid_info->gl_next;
470 mutex_destroy(&tmp->gl_mutex);
471 head = tmp->gl_gid;
472 while (head) {
473 IBTF_DPRINTF_L4("ibdm",
474 "\tibdm_fini: Deleting gid structs");
475 delete = head;
476 head = head->gid_next;
477 kmem_free(delete, sizeof (ibdm_gid_t));
478 }
479 kmem_free(tmp, sizeof (ibdm_dp_gidinfo_t));
480 }
481 mutex_exit(&ibdm.ibdm_mutex);
482
483 if (ibdm.ibdm_state & IBDM_LOCKS_ALLOCED) {
484 ibdm.ibdm_state &= ~IBDM_LOCKS_ALLOCED;
485 mutex_destroy(&ibdm.ibdm_mutex);
486 mutex_destroy(&ibdm.ibdm_hl_mutex);
487 mutex_destroy(&ibdm.ibdm_ibnex_mutex);
488 cv_destroy(&ibdm.ibdm_port_settle_cv);
489 }
490 if (ibdm.ibdm_state & IBDM_CVS_ALLOCED) {
491 ibdm.ibdm_state &= ~IBDM_CVS_ALLOCED;
492 cv_destroy(&ibdm.ibdm_probe_cv);
493 cv_destroy(&ibdm.ibdm_busy_cv);
494 }
495 return (IBDM_SUCCESS);
496 }
497
498
499 /*
500 * ibdm_event_hdlr()
501 *
502 * IBDM registers this asynchronous event handler at the time of
503 * ibt_attach. IBDM support the following async events. For other
504 * event, simply returns success.
505 * IBT_HCA_ATTACH_EVENT:
506 * Retrieves the information about all the port that are
507 * present on this HCA, allocates the port attributes
508 * structure and calls IB nexus callback routine with
509 * the port attributes structure as an input argument.
510 * IBT_HCA_DETACH_EVENT:
511 * Retrieves the information about all the ports that are
512 * present on this HCA and calls IB nexus callback with
513 * port guid as an argument
514 * IBT_EVENT_PORT_UP:
515 * Register with IBMF and SA access
516 * Setup IBMF receive callback routine
517 * IBT_EVENT_PORT_DOWN:
518 * Un-Register with IBMF and SA access
519 * Teardown IBMF receive callback routine
520 */
521 /*ARGSUSED*/
522 static void
523 ibdm_event_hdlr(void *clnt_hdl,
524 ibt_hca_hdl_t hca_hdl, ibt_async_code_t code, ibt_async_event_t *event)
525 {
526 ibdm_hca_list_t *hca_list;
527 ibdm_port_attr_t *port;
528 ibmf_saa_handle_t port_sa_hdl;
529
530 IBTF_DPRINTF_L4("ibdm", "\tevent_hdlr: async code 0x%x", code);
531
532 switch (code) {
533 case IBT_HCA_ATTACH_EVENT: /* New HCA registered with IBTF */
534 ibdm_handle_hca_attach(event->ev_hca_guid);
535 break;
536
537 case IBT_HCA_DETACH_EVENT: /* HCA unregistered with IBTF */
538 ibdm_handle_hca_detach(event->ev_hca_guid);
539 mutex_enter(&ibdm.ibdm_ibnex_mutex);
540 if (ibdm.ibdm_ibnex_callback != NULL) {
541 (*ibdm.ibdm_ibnex_callback)((void *)
542 &event->ev_hca_guid, IBDM_EVENT_HCA_REMOVED);
543 }
544 mutex_exit(&ibdm.ibdm_ibnex_mutex);
545 break;
546
547 case IBT_EVENT_PORT_UP:
548 IBTF_DPRINTF_L4("ibdm", "\tevent_hdlr: PORT_UP");
549 mutex_enter(&ibdm.ibdm_hl_mutex);
550 port = ibdm_get_port_attr(event, &hca_list);
551 if (port == NULL) {
552 IBTF_DPRINTF_L2("ibdm",
553 "\tevent_hdlr: HCA not present");
554 mutex_exit(&ibdm.ibdm_hl_mutex);
555 break;
556 }
557 ibdm_initialize_port(port);
558 hca_list->hl_nports_active++;
559 cv_broadcast(&ibdm.ibdm_port_settle_cv);
560 mutex_exit(&ibdm.ibdm_hl_mutex);
561
562 /* Inform IB nexus driver */
563 mutex_enter(&ibdm.ibdm_ibnex_mutex);
564 if (ibdm.ibdm_ibnex_callback != NULL) {
565 (*ibdm.ibdm_ibnex_callback)((void *)
566 &event->ev_hca_guid, IBDM_EVENT_PORT_UP);
567 }
568 mutex_exit(&ibdm.ibdm_ibnex_mutex);
569 break;
570
571 case IBT_ERROR_PORT_DOWN:
572 IBTF_DPRINTF_L4("ibdm", "\tevent_hdlr: PORT_DOWN");
573 mutex_enter(&ibdm.ibdm_hl_mutex);
574 port = ibdm_get_port_attr(event, &hca_list);
575 if (port == NULL) {
576 IBTF_DPRINTF_L2("ibdm",
577 "\tevent_hdlr: HCA not present");
578 mutex_exit(&ibdm.ibdm_hl_mutex);
579 break;
580 }
581 hca_list->hl_nports_active--;
582 port_sa_hdl = port->pa_sa_hdl;
583 (void) ibdm_fini_port(port);
584 port->pa_state = IBT_PORT_DOWN;
585 cv_broadcast(&ibdm.ibdm_port_settle_cv);
586 mutex_exit(&ibdm.ibdm_hl_mutex);
587 ibdm_reset_all_dgids(port_sa_hdl);
588 break;
589
590 case IBT_PORT_CHANGE_EVENT:
591 IBTF_DPRINTF_L4("ibdm", "\tevent_hdlr: PORT_CHANGE");
592 if (event->ev_port_flags & IBT_PORT_CHANGE_PKEY)
593 ibdm_handle_port_change_event(event);
594 break;
595
596 default: /* Ignore all other events/errors */
597 break;
598 }
599 }
600
601 static void
602 ibdm_handle_port_change_event(ibt_async_event_t *event)
603 {
604 ibdm_port_attr_t *port;
605 ibdm_hca_list_t *hca_list;
606
607 IBTF_DPRINTF_L2("ibdm", "\tibdm_handle_port_change_event:"
608 " HCA guid %llx", event->ev_hca_guid);
609 mutex_enter(&ibdm.ibdm_hl_mutex);
610 port = ibdm_get_port_attr(event, &hca_list);
611 if (port == NULL) {
612 IBTF_DPRINTF_L2("ibdm", "\tevent_hdlr: HCA not present");
613 mutex_exit(&ibdm.ibdm_hl_mutex);
614 return;
615 }
616 ibdm_update_port_pkeys(port);
617 cv_broadcast(&ibdm.ibdm_port_settle_cv);
618 mutex_exit(&ibdm.ibdm_hl_mutex);
619
620 /* Inform IB nexus driver */
621 mutex_enter(&ibdm.ibdm_ibnex_mutex);
622 if (ibdm.ibdm_ibnex_callback != NULL) {
623 (*ibdm.ibdm_ibnex_callback)((void *)
624 &event->ev_hca_guid, IBDM_EVENT_PORT_PKEY_CHANGE);
625 }
626 mutex_exit(&ibdm.ibdm_ibnex_mutex);
627 }
628
629 /*
630 * ibdm_update_port_pkeys()
631 * Update the pkey table
632 * Update the port attributes
633 */
634 static void
635 ibdm_update_port_pkeys(ibdm_port_attr_t *port)
636 {
637 uint_t nports, size;
638 uint_t pkey_idx, opkey_idx;
639 uint16_t npkeys;
640 ibt_hca_portinfo_t *pinfop;
641 ib_pkey_t pkey;
642 ibdm_pkey_tbl_t *pkey_tbl;
643 ibdm_port_attr_t newport;
644
645 IBTF_DPRINTF_L4("ibdm", "\tupdate_port_pkeys:");
646 ASSERT(MUTEX_HELD(&ibdm.ibdm_hl_mutex));
647
648 /* Check whether the port is active */
649 if (ibt_get_port_state(port->pa_hca_hdl, port->pa_port_num, NULL,
650 NULL) != IBT_SUCCESS)
651 return;
652
653 if (ibt_query_hca_ports(port->pa_hca_hdl, port->pa_port_num,
654 &pinfop, &nports, &size) != IBT_SUCCESS) {
655 /* This should not occur */
656 port->pa_npkeys = 0;
657 port->pa_pkey_tbl = NULL;
658 return;
659 }
660
661 npkeys = pinfop->p_pkey_tbl_sz;
662 pkey_tbl = kmem_zalloc(npkeys * sizeof (ibdm_pkey_tbl_t), KM_SLEEP);
663 newport.pa_pkey_tbl = pkey_tbl;
664 newport.pa_ibmf_hdl = port->pa_ibmf_hdl;
665
666 for (pkey_idx = 0; pkey_idx < npkeys; pkey_idx++) {
667 pkey = pkey_tbl[pkey_idx].pt_pkey =
668 pinfop->p_pkey_tbl[pkey_idx];
669 /*
670 * Is this pkey present in the current table ?
671 */
672 for (opkey_idx = 0; opkey_idx < port->pa_npkeys; opkey_idx++) {
673 if (pkey == port->pa_pkey_tbl[opkey_idx].pt_pkey) {
674 pkey_tbl[pkey_idx].pt_qp_hdl =
675 port->pa_pkey_tbl[opkey_idx].pt_qp_hdl;
676 port->pa_pkey_tbl[opkey_idx].pt_qp_hdl = NULL;
677 break;
678 }
679 }
680
681 if (opkey_idx == port->pa_npkeys) {
682 pkey = pkey_tbl[pkey_idx].pt_pkey;
683 if (IBDM_INVALID_PKEY(pkey)) {
684 pkey_tbl[pkey_idx].pt_qp_hdl = NULL;
685 continue;
686 }
687 ibdm_port_attr_ibmf_init(&newport, pkey, pkey_idx);
688 }
689 }
690
691 for (opkey_idx = 0; opkey_idx < port->pa_npkeys; opkey_idx++) {
692 if (port->pa_pkey_tbl[opkey_idx].pt_qp_hdl != NULL) {
693 if (ibdm_port_attr_ibmf_fini(port, opkey_idx) !=
694 IBDM_SUCCESS) {
695 IBTF_DPRINTF_L2("ibdm", "\tupdate_port_pkeys: "
696 "ibdm_port_attr_ibmf_fini failed for "
697 "port pkey 0x%x",
698 port->pa_pkey_tbl[opkey_idx].pt_pkey);
699 }
700 }
701 }
702
703 if (port->pa_pkey_tbl != NULL) {
704 kmem_free(port->pa_pkey_tbl,
705 port->pa_npkeys * sizeof (ibdm_pkey_tbl_t));
706 }
707
708 port->pa_npkeys = npkeys;
709 port->pa_pkey_tbl = pkey_tbl;
710 port->pa_sn_prefix = pinfop->p_sgid_tbl[0].gid_prefix;
711 port->pa_state = pinfop->p_linkstate;
712 ibt_free_portinfo(pinfop, size);
713 }
714
715 /*
716 * ibdm_initialize_port()
717 * Register with IBMF
718 * Register with SA access
719 * Register a receive callback routine with IBMF. IBMF invokes
720 * this routine whenever a MAD arrives at this port.
721 * Update the port attributes
722 */
723 static void
724 ibdm_initialize_port(ibdm_port_attr_t *port)
725 {
726 int ii;
727 uint_t nports, size;
728 uint_t pkey_idx;
729 ib_pkey_t pkey;
730 ibt_hca_portinfo_t *pinfop;
731 ibmf_register_info_t ibmf_reg;
732 ibmf_saa_subnet_event_args_t event_args;
733
734 IBTF_DPRINTF_L4("ibdm", "\tinitialize_port:");
735 ASSERT(MUTEX_HELD(&ibdm.ibdm_hl_mutex));
736
737 /* Check whether the port is active */
738 if (ibt_get_port_state(port->pa_hca_hdl, port->pa_port_num, NULL,
739 NULL) != IBT_SUCCESS)
740 return;
741
742 if (port->pa_sa_hdl != NULL || port->pa_pkey_tbl != NULL)
743 return;
744
745 if (ibt_query_hca_ports(port->pa_hca_hdl, port->pa_port_num,
746 &pinfop, &nports, &size) != IBT_SUCCESS) {
747 /* This should not occur */
748 port->pa_npkeys = 0;
749 port->pa_pkey_tbl = NULL;
750 return;
751 }
752 port->pa_sn_prefix = pinfop->p_sgid_tbl[0].gid_prefix;
753
754 port->pa_state = pinfop->p_linkstate;
755 port->pa_npkeys = pinfop->p_pkey_tbl_sz;
756 port->pa_pkey_tbl = (ibdm_pkey_tbl_t *)kmem_zalloc(
757 port->pa_npkeys * sizeof (ibdm_pkey_tbl_t), KM_SLEEP);
758
759 for (pkey_idx = 0; pkey_idx < port->pa_npkeys; pkey_idx++)
760 port->pa_pkey_tbl[pkey_idx].pt_pkey =
761 pinfop->p_pkey_tbl[pkey_idx];
762
763 ibt_free_portinfo(pinfop, size);
764
765 if (ibdm_enumerate_iocs) {
766 event_args.is_event_callback = ibdm_saa_event_cb;
767 event_args.is_event_callback_arg = port;
768 if (ibmf_sa_session_open(port->pa_port_guid, 0, &event_args,
769 IBMF_VERSION, 0, &port->pa_sa_hdl) != IBMF_SUCCESS) {
770 IBTF_DPRINTF_L2("ibdm", "\tinitialize_port: "
771 "sa access registration failed");
772 (void) ibdm_fini_port(port);
773 return;
774 }
775
776 ibmf_reg.ir_ci_guid = port->pa_hca_guid;
777 ibmf_reg.ir_port_num = port->pa_port_num;
778 ibmf_reg.ir_client_class = DEV_MGT_MANAGER;
779
780 if (ibmf_register(&ibmf_reg, IBMF_VERSION, 0, NULL, NULL,
781 &port->pa_ibmf_hdl, &port->pa_ibmf_caps) != IBMF_SUCCESS) {
782 IBTF_DPRINTF_L2("ibdm", "\tinitialize_port: "
783 "IBMF registration failed");
784 (void) ibdm_fini_port(port);
785 return;
786 }
787
788 if (ibmf_setup_async_cb(port->pa_ibmf_hdl,
789 IBMF_QP_HANDLE_DEFAULT,
790 ibdm_ibmf_recv_cb, 0, 0) != IBMF_SUCCESS) {
791 IBTF_DPRINTF_L2("ibdm", "\tinitialize_port: "
792 "IBMF setup recv cb failed");
793 (void) ibdm_fini_port(port);
794 return;
795 }
796 } else {
797 port->pa_sa_hdl = NULL;
798 port->pa_ibmf_hdl = NULL;
799 }
800
801 for (ii = 0; ii < port->pa_npkeys; ii++) {
802 pkey = port->pa_pkey_tbl[ii].pt_pkey;
803 if (IBDM_INVALID_PKEY(pkey)) {
804 port->pa_pkey_tbl[ii].pt_qp_hdl = NULL;
805 continue;
806 }
807 ibdm_port_attr_ibmf_init(port, pkey, ii);
808 }
809 }
810
811
812 /*
813 * ibdm_port_attr_ibmf_init:
814 * With IBMF - Alloc QP Handle and Setup Async callback
815 */
816 static void
817 ibdm_port_attr_ibmf_init(ibdm_port_attr_t *port, ib_pkey_t pkey, int ii)
818 {
819 int ret;
820
821 if (ibdm_enumerate_iocs == 0) {
822 port->pa_pkey_tbl[ii].pt_qp_hdl = NULL;
823 return;
824 }
825
826 if ((ret = ibmf_alloc_qp(port->pa_ibmf_hdl, pkey, IB_GSI_QKEY,
827 IBMF_ALT_QP_MAD_NO_RMPP, &port->pa_pkey_tbl[ii].pt_qp_hdl)) !=
828 IBMF_SUCCESS) {
829 IBTF_DPRINTF_L2("ibdm", "\tport_attr_ibmf_init: "
830 "IBMF failed to alloc qp %d", ret);
831 port->pa_pkey_tbl[ii].pt_qp_hdl = NULL;
832 return;
833 }
834
835 IBTF_DPRINTF_L4("ibdm", "\tport_attr_ibmf_init: QP handle is %p",
836 port->pa_ibmf_hdl);
837
838 if ((ret = ibmf_setup_async_cb(port->pa_ibmf_hdl,
839 port->pa_pkey_tbl[ii].pt_qp_hdl, ibdm_ibmf_recv_cb, 0, 0)) !=
840 IBMF_SUCCESS) {
841 IBTF_DPRINTF_L2("ibdm", "\tport_attr_ibmf_init: "
842 "IBMF setup recv cb failed %d", ret);
843 (void) ibmf_free_qp(port->pa_ibmf_hdl,
844 &port->pa_pkey_tbl[ii].pt_qp_hdl, 0);
845 port->pa_pkey_tbl[ii].pt_qp_hdl = NULL;
846 }
847 }
848
849
850 /*
851 * ibdm_get_port_attr()
852 * Get port attributes from HCA guid and port number
853 * Return pointer to ibdm_port_attr_t on Success
854 * and NULL on failure
855 */
856 static ibdm_port_attr_t *
857 ibdm_get_port_attr(ibt_async_event_t *event, ibdm_hca_list_t **retval)
858 {
859 ibdm_hca_list_t *hca_list;
860 ibdm_port_attr_t *port_attr;
861 int ii;
862
863 IBTF_DPRINTF_L4("ibdm", "\tget_port_attr: port# %d", event->ev_port);
864 ASSERT(MUTEX_HELD(&ibdm.ibdm_hl_mutex));
865 hca_list = ibdm.ibdm_hca_list_head;
866 while (hca_list) {
867 if (hca_list->hl_hca_guid == event->ev_hca_guid) {
868 for (ii = 0; ii < hca_list->hl_nports; ii++) {
869 port_attr = &hca_list->hl_port_attr[ii];
870 if (port_attr->pa_port_num == event->ev_port) {
871 *retval = hca_list;
872 return (port_attr);
873 }
874 }
875 }
876 hca_list = hca_list->hl_next;
877 }
878 return (NULL);
879 }
880
881
882 /*
883 * ibdm_update_port_attr()
884 * Update the port attributes
885 */
886 static void
887 ibdm_update_port_attr(ibdm_port_attr_t *port)
888 {
889 uint_t nports, size;
890 uint_t pkey_idx;
891 ibt_hca_portinfo_t *portinfop;
892
893 IBTF_DPRINTF_L4("ibdm", "\tupdate_port_attr: Begin");
894 if (ibt_query_hca_ports(port->pa_hca_hdl,
895 port->pa_port_num, &portinfop, &nports, &size) != IBT_SUCCESS) {
896 /* This should not occur */
897 port->pa_npkeys = 0;
898 port->pa_pkey_tbl = NULL;
899 return;
900 }
901 port->pa_sn_prefix = portinfop->p_sgid_tbl[0].gid_prefix;
902
903 port->pa_state = portinfop->p_linkstate;
904
905 /*
906 * PKey information in portinfo valid only if port is
907 * ACTIVE. Bail out if not.
908 */
909 if (port->pa_state != IBT_PORT_ACTIVE) {
910 port->pa_npkeys = 0;
911 port->pa_pkey_tbl = NULL;
912 ibt_free_portinfo(portinfop, size);
913 return;
914 }
915
916 port->pa_npkeys = portinfop->p_pkey_tbl_sz;
917 port->pa_pkey_tbl = (ibdm_pkey_tbl_t *)kmem_zalloc(
918 port->pa_npkeys * sizeof (ibdm_pkey_tbl_t), KM_SLEEP);
919
920 for (pkey_idx = 0; pkey_idx < port->pa_npkeys; pkey_idx++) {
921 port->pa_pkey_tbl[pkey_idx].pt_pkey =
922 portinfop->p_pkey_tbl[pkey_idx];
923 }
924 ibt_free_portinfo(portinfop, size);
925 }
926
927
928 /*
929 * ibdm_handle_hca_attach()
930 */
931 static void
932 ibdm_handle_hca_attach(ib_guid_t hca_guid)
933 {
934 uint_t size;
935 uint_t ii, nports;
936 ibt_status_t status;
937 ibt_hca_hdl_t hca_hdl;
938 ibt_hca_attr_t *hca_attr;
939 ibdm_hca_list_t *hca_list, *temp;
940 ibdm_port_attr_t *port_attr;
941 ibt_hca_portinfo_t *portinfop;
942
943 IBTF_DPRINTF_L4("ibdm",
944 "\thandle_hca_attach: hca_guid = 0x%llX", hca_guid);
945
946 /* open the HCA first */
947 if ((status = ibt_open_hca(ibdm.ibdm_ibt_clnt_hdl, hca_guid,
948 &hca_hdl)) != IBT_SUCCESS) {
949 IBTF_DPRINTF_L2("ibdm", "\thandle_hca_attach: "
950 "open_hca failed, status 0x%x", status);
951 return;
952 }
953
954 hca_attr = (ibt_hca_attr_t *)
955 kmem_alloc(sizeof (ibt_hca_attr_t), KM_SLEEP);
956 /* ibt_query_hca always returns IBT_SUCCESS */
957 (void) ibt_query_hca(hca_hdl, hca_attr);
958
959 IBTF_DPRINTF_L4("ibdm", "\tvid: 0x%x, pid: 0x%x, ver: 0x%x,"
960 " #ports: %d", hca_attr->hca_vendor_id, hca_attr->hca_device_id,
961 hca_attr->hca_version_id, hca_attr->hca_nports);
962
963 if ((status = ibt_query_hca_ports(hca_hdl, 0, &portinfop, &nports,
964 &size)) != IBT_SUCCESS) {
965 IBTF_DPRINTF_L2("ibdm", "\thandle_hca_attach: "
966 "ibt_query_hca_ports failed, status 0x%x", status);
967 kmem_free(hca_attr, sizeof (ibt_hca_attr_t));
968 (void) ibt_close_hca(hca_hdl);
969 return;
970 }
971 hca_list = (ibdm_hca_list_t *)
972 kmem_zalloc((sizeof (ibdm_hca_list_t)), KM_SLEEP);
973 hca_list->hl_port_attr = (ibdm_port_attr_t *)kmem_zalloc(
974 (sizeof (ibdm_port_attr_t) * hca_attr->hca_nports), KM_SLEEP);
975 hca_list->hl_hca_guid = hca_attr->hca_node_guid;
976 hca_list->hl_nports = hca_attr->hca_nports;
977 hca_list->hl_attach_time = gethrtime();
978 hca_list->hl_hca_hdl = hca_hdl;
979
980 /*
981 * Init a dummy port attribute for the HCA node
982 * This is for Per-HCA Node. Initialize port_attr :
983 * hca_guid & port_guid -> hca_guid
984 * npkeys, pkey_tbl is NULL
985 * port_num, sn_prefix is 0
986 * vendorid, product_id, dev_version from HCA
987 * pa_state is IBT_PORT_ACTIVE
988 */
989 hca_list->hl_hca_port_attr = (ibdm_port_attr_t *)kmem_zalloc(
990 sizeof (ibdm_port_attr_t), KM_SLEEP);
991 port_attr = hca_list->hl_hca_port_attr;
992 port_attr->pa_vendorid = hca_attr->hca_vendor_id;
993 port_attr->pa_productid = hca_attr->hca_device_id;
994 port_attr->pa_dev_version = hca_attr->hca_version_id;
995 port_attr->pa_hca_guid = hca_attr->hca_node_guid;
996 port_attr->pa_hca_hdl = hca_list->hl_hca_hdl;
997 port_attr->pa_port_guid = hca_attr->hca_node_guid;
998 port_attr->pa_state = IBT_PORT_ACTIVE;
999
1000
1001 for (ii = 0; ii < nports; ii++) {
1002 port_attr = &hca_list->hl_port_attr[ii];
1003 port_attr->pa_vendorid = hca_attr->hca_vendor_id;
1004 port_attr->pa_productid = hca_attr->hca_device_id;
1005 port_attr->pa_dev_version = hca_attr->hca_version_id;
1006 port_attr->pa_hca_guid = hca_attr->hca_node_guid;
1007 port_attr->pa_hca_hdl = hca_list->hl_hca_hdl;
1008 port_attr->pa_port_guid = portinfop[ii].p_sgid_tbl->gid_guid;
1009 port_attr->pa_sn_prefix = portinfop[ii].p_sgid_tbl->gid_prefix;
1010 port_attr->pa_port_num = portinfop[ii].p_port_num;
1011 port_attr->pa_state = portinfop[ii].p_linkstate;
1012
1013 /*
1014 * Register with IBMF, SA access when the port is in
1015 * ACTIVE state. Also register a callback routine
1016 * with IBMF to receive incoming DM MAD's.
1017 * The IBDM event handler takes care of registration of
1018 * port which are not active.
1019 */
1020 IBTF_DPRINTF_L4("ibdm",
1021 "\thandle_hca_attach: port guid %llx Port state 0x%x",
1022 port_attr->pa_port_guid, portinfop[ii].p_linkstate);
1023
1024 if (portinfop[ii].p_linkstate == IBT_PORT_ACTIVE) {
1025 mutex_enter(&ibdm.ibdm_hl_mutex);
1026 hca_list->hl_nports_active++;
1027 ibdm_initialize_port(port_attr);
1028 cv_broadcast(&ibdm.ibdm_port_settle_cv);
1029 mutex_exit(&ibdm.ibdm_hl_mutex);
1030 }
1031 }
1032 mutex_enter(&ibdm.ibdm_hl_mutex);
1033 for (temp = ibdm.ibdm_hca_list_head; temp; temp = temp->hl_next) {
1034 if (temp->hl_hca_guid == hca_guid) {
1035 IBTF_DPRINTF_L2("ibdm", "hca_attach: HCA %llX "
1036 "already seen by IBDM", hca_guid);
1037 mutex_exit(&ibdm.ibdm_hl_mutex);
1038 (void) ibdm_uninit_hca(hca_list);
1039 return;
1040 }
1041 }
1042 ibdm.ibdm_hca_count++;
1043 if (ibdm.ibdm_hca_list_head == NULL) {
1044 ibdm.ibdm_hca_list_head = hca_list;
1045 ibdm.ibdm_hca_list_tail = hca_list;
1046 } else {
1047 ibdm.ibdm_hca_list_tail->hl_next = hca_list;
1048 ibdm.ibdm_hca_list_tail = hca_list;
1049 }
1050 mutex_exit(&ibdm.ibdm_hl_mutex);
1051 mutex_enter(&ibdm.ibdm_ibnex_mutex);
1052 if (ibdm.ibdm_ibnex_callback != NULL) {
1053 (*ibdm.ibdm_ibnex_callback)((void *)
1054 &hca_guid, IBDM_EVENT_HCA_ADDED);
1055 }
1056 mutex_exit(&ibdm.ibdm_ibnex_mutex);
1057
1058 kmem_free(hca_attr, sizeof (ibt_hca_attr_t));
1059 ibt_free_portinfo(portinfop, size);
1060 }
1061
1062
1063 /*
1064 * ibdm_handle_hca_detach()
1065 */
1066 static void
1067 ibdm_handle_hca_detach(ib_guid_t hca_guid)
1068 {
1069 ibdm_hca_list_t *head, *prev = NULL;
1070 size_t len;
1071 ibdm_dp_gidinfo_t *gidinfo;
1072 ibdm_port_attr_t *port_attr;
1073 int i;
1074
1075 IBTF_DPRINTF_L4("ibdm",
1076 "\thandle_hca_detach: hca_guid = 0x%llx", hca_guid);
1077
1078 /* Make sure no probes are running */
1079 mutex_enter(&ibdm.ibdm_mutex);
1080 while (ibdm.ibdm_busy & IBDM_BUSY)
1081 cv_wait(&ibdm.ibdm_busy_cv, &ibdm.ibdm_mutex);
1082 ibdm.ibdm_busy |= IBDM_BUSY;
1083 mutex_exit(&ibdm.ibdm_mutex);
1084
1085 mutex_enter(&ibdm.ibdm_hl_mutex);
1086 head = ibdm.ibdm_hca_list_head;
1087 while (head) {
1088 if (head->hl_hca_guid == hca_guid) {
1089 if (prev == NULL)
1090 ibdm.ibdm_hca_list_head = head->hl_next;
1091 else
1092 prev->hl_next = head->hl_next;
1093 if (ibdm.ibdm_hca_list_tail == head)
1094 ibdm.ibdm_hca_list_tail = prev;
1095 ibdm.ibdm_hca_count--;
1096 break;
1097 }
1098 prev = head;
1099 head = head->hl_next;
1100 }
1101 mutex_exit(&ibdm.ibdm_hl_mutex);
1102 if (ibdm_uninit_hca(head) != IBDM_SUCCESS)
1103 (void) ibdm_handle_hca_attach(hca_guid);
1104
1105 #ifdef DEBUG
1106 if (ibdm_enumerate_iocs == 0) {
1107 ASSERT(ibdm.ibdm_dp_gidlist_head == NULL);
1108 }
1109 #endif
1110
1111 /*
1112 * Now clean up the HCA lists in the gidlist.
1113 */
1114 for (gidinfo = ibdm.ibdm_dp_gidlist_head; gidinfo; gidinfo =
1115 gidinfo->gl_next) {
1116 prev = NULL;
1117 head = gidinfo->gl_hca_list;
1118 while (head) {
1119 if (head->hl_hca_guid == hca_guid) {
1120 if (prev == NULL)
1121 gidinfo->gl_hca_list =
1122 head->hl_next;
1123 else
1124 prev->hl_next = head->hl_next;
1125 for (i = 0; i < head->hl_nports; i++) {
1126 port_attr = &head->hl_port_attr[i];
1127 if (port_attr->pa_pkey_tbl != NULL)
1128 kmem_free(
1129 port_attr->pa_pkey_tbl,
1130 port_attr->pa_npkeys *
1131 sizeof (ibdm_pkey_tbl_t));
1132 }
1133 len = sizeof (ibdm_hca_list_t) +
1134 (head->hl_nports *
1135 sizeof (ibdm_port_attr_t));
1136 kmem_free(head, len);
1137
1138 break;
1139 }
1140 prev = head;
1141 head = head->hl_next;
1142 }
1143 }
1144
1145 mutex_enter(&ibdm.ibdm_mutex);
1146 ibdm.ibdm_busy &= ~IBDM_BUSY;
1147 cv_broadcast(&ibdm.ibdm_busy_cv);
1148 mutex_exit(&ibdm.ibdm_mutex);
1149 }
1150
1151
1152 static int
1153 ibdm_uninit_hca(ibdm_hca_list_t *head)
1154 {
1155 int ii;
1156 ibdm_port_attr_t *port_attr;
1157
1158 for (ii = 0; ii < head->hl_nports; ii++) {
1159 port_attr = &head->hl_port_attr[ii];
1160 if (ibdm_fini_port(port_attr) != IBDM_SUCCESS) {
1161 IBTF_DPRINTF_L2("ibdm", "uninit_hca: HCA %p port 0x%x "
1162 "ibdm_fini_port() failed", head, ii);
1163 return (IBDM_FAILURE);
1164 }
1165 }
1166 if (head->hl_hca_hdl)
1167 if (ibt_close_hca(head->hl_hca_hdl) != IBT_SUCCESS) {
1168 IBTF_DPRINTF_L2("ibdm", "uninit_hca: "
1169 "ibt_close_hca() failed");
1170 return (IBDM_FAILURE);
1171 }
1172 kmem_free(head->hl_port_attr,
1173 head->hl_nports * sizeof (ibdm_port_attr_t));
1174 kmem_free(head->hl_hca_port_attr, sizeof (ibdm_port_attr_t));
1175 kmem_free(head, sizeof (ibdm_hca_list_t));
1176 return (IBDM_SUCCESS);
1177 }
1178
1179
1180 /*
1181 * For each port on the HCA,
1182 * 1) Teardown IBMF receive callback function
1183 * 2) Unregister with IBMF
1184 * 3) Unregister with SA access
1185 */
1186 static int
1187 ibdm_fini_port(ibdm_port_attr_t *port_attr)
1188 {
1189 int ii, ibmf_status;
1190
1191 for (ii = 0; ii < port_attr->pa_npkeys; ii++) {
1192 if (port_attr->pa_pkey_tbl == NULL)
1193 break;
1194 if (!port_attr->pa_pkey_tbl[ii].pt_qp_hdl)
1195 continue;
1196 if (ibdm_port_attr_ibmf_fini(port_attr, ii) != IBDM_SUCCESS) {
1197 IBTF_DPRINTF_L4("ibdm", "\tfini_port: "
1198 "ibdm_port_attr_ibmf_fini failed for "
1199 "port pkey 0x%x", ii);
1200 return (IBDM_FAILURE);
1201 }
1202 }
1203
1204 if (port_attr->pa_ibmf_hdl) {
1205 ibmf_status = ibmf_tear_down_async_cb(port_attr->pa_ibmf_hdl,
1206 IBMF_QP_HANDLE_DEFAULT, 0);
1207 if (ibmf_status != IBMF_SUCCESS) {
1208 IBTF_DPRINTF_L4("ibdm", "\tfini_port: "
1209 "ibmf_tear_down_async_cb failed %d", ibmf_status);
1210 return (IBDM_FAILURE);
1211 }
1212
1213 ibmf_status = ibmf_unregister(&port_attr->pa_ibmf_hdl, 0);
1214 if (ibmf_status != IBMF_SUCCESS) {
1215 IBTF_DPRINTF_L2("ibdm", "\tfini_port: "
1216 "ibmf_unregister failed %d", ibmf_status);
1217 return (IBDM_FAILURE);
1218 }
1219
1220 port_attr->pa_ibmf_hdl = NULL;
1221 }
1222
1223 if (port_attr->pa_sa_hdl) {
1224 ibmf_status = ibmf_sa_session_close(&port_attr->pa_sa_hdl, 0);
1225 if (ibmf_status != IBMF_SUCCESS) {
1226 IBTF_DPRINTF_L2("ibdm", "\tfini_port: "
1227 "ibmf_sa_session_close failed %d", ibmf_status);
1228 return (IBDM_FAILURE);
1229 }
1230 port_attr->pa_sa_hdl = NULL;
1231 }
1232
1233 if (port_attr->pa_pkey_tbl != NULL) {
1234 kmem_free(port_attr->pa_pkey_tbl,
1235 port_attr->pa_npkeys * sizeof (ibdm_pkey_tbl_t));
1236 port_attr->pa_pkey_tbl = NULL;
1237 port_attr->pa_npkeys = 0;
1238 }
1239
1240 return (IBDM_SUCCESS);
1241 }
1242
1243
1244 /*
1245 * ibdm_port_attr_ibmf_fini:
1246 * With IBMF - Tear down Async callback and free QP Handle
1247 */
1248 static int
1249 ibdm_port_attr_ibmf_fini(ibdm_port_attr_t *port_attr, int ii)
1250 {
1251 int ibmf_status;
1252
1253 IBTF_DPRINTF_L5("ibdm", "\tport_attr_ibmf_fini:");
1254
1255 if (ibdm_enumerate_iocs == 0) {
1256 ASSERT(port_attr->pa_pkey_tbl[ii].pt_qp_hdl == NULL);
1257 return (IBDM_SUCCESS);
1258 }
1259
1260 if (port_attr->pa_pkey_tbl[ii].pt_qp_hdl) {
1261 ibmf_status = ibmf_tear_down_async_cb(port_attr->pa_ibmf_hdl,
1262 port_attr->pa_pkey_tbl[ii].pt_qp_hdl, 0);
1263 if (ibmf_status != IBMF_SUCCESS) {
1264 IBTF_DPRINTF_L4("ibdm", "\tport_attr_ibmf_fini: "
1265 "ibmf_tear_down_async_cb failed %d", ibmf_status);
1266 return (IBDM_FAILURE);
1267 }
1268 ibmf_status = ibmf_free_qp(port_attr->pa_ibmf_hdl,
1269 &port_attr->pa_pkey_tbl[ii].pt_qp_hdl, 0);
1270 if (ibmf_status != IBMF_SUCCESS) {
1271 IBTF_DPRINTF_L4("ibdm", "\tport_attr_ibmf_fini: "
1272 "ibmf_free_qp failed %d", ibmf_status);
1273 return (IBDM_FAILURE);
1274 }
1275 port_attr->pa_pkey_tbl[ii].pt_qp_hdl = NULL;
1276 }
1277 return (IBDM_SUCCESS);
1278 }
1279
1280
1281 /*
1282 * ibdm_gid_decr_pending:
1283 * decrement gl_pending_cmds. If zero wakeup sleeping threads
1284 */
1285 static void
1286 ibdm_gid_decr_pending(ibdm_dp_gidinfo_t *gidinfo)
1287 {
1288 mutex_enter(&ibdm.ibdm_mutex);
1289 mutex_enter(&gidinfo->gl_mutex);
1290 if (--gidinfo->gl_pending_cmds == 0) {
1291 /*
1292 * Handle DGID getting removed.
1293 */
1294 if (gidinfo->gl_disconnected) {
1295 mutex_exit(&gidinfo->gl_mutex);
1296 mutex_exit(&ibdm.ibdm_mutex);
1297
1298 IBTF_DPRINTF_L3(ibdm_string, "\tgid_decr_pending: "
1299 "gidinfo %p hot removal", gidinfo);
1300 ibdm_delete_gidinfo(gidinfo);
1301
1302 mutex_enter(&ibdm.ibdm_mutex);
1303 ibdm.ibdm_ngid_probes_in_progress--;
1304 ibdm_wait_probe_completion();
1305 mutex_exit(&ibdm.ibdm_mutex);
1306 return;
1307 }
1308 mutex_exit(&gidinfo->gl_mutex);
1309 mutex_exit(&ibdm.ibdm_mutex);
1310 ibdm_notify_newgid_iocs(gidinfo);
1311 mutex_enter(&ibdm.ibdm_mutex);
1312 mutex_enter(&gidinfo->gl_mutex);
1313
1314 ibdm.ibdm_ngid_probes_in_progress--;
1315 ibdm_wait_probe_completion();
1316 }
1317 mutex_exit(&gidinfo->gl_mutex);
1318 mutex_exit(&ibdm.ibdm_mutex);
1319 }
1320
1321
1322 /*
1323 * ibdm_wait_probe_completion:
1324 * wait for probing to complete
1325 */
1326 static void
1327 ibdm_wait_probe_completion(void)
1328 {
1329 ASSERT(MUTEX_HELD(&ibdm.ibdm_mutex));
1330 if (ibdm.ibdm_ngid_probes_in_progress) {
1331 IBTF_DPRINTF_L4("ibdm", "\twait for probe complete");
1332 ibdm.ibdm_busy |= IBDM_PROBE_IN_PROGRESS;
1333 while (ibdm.ibdm_busy & IBDM_PROBE_IN_PROGRESS)
1334 cv_wait(&ibdm.ibdm_probe_cv, &ibdm.ibdm_mutex);
1335 }
1336 }
1337
1338
1339 /*
1340 * ibdm_wait_cisco_probe_completion:
1341 * wait for the reply from the Cisco FC GW switch after a setclassportinfo
1342 * request is sent. This wait can be achieved on each gid.
1343 */
1344 static void
1345 ibdm_wait_cisco_probe_completion(ibdm_dp_gidinfo_t *gidinfo)
1346 {
1347 ASSERT(MUTEX_HELD(&gidinfo->gl_mutex));
1348 IBTF_DPRINTF_L4("ibdm", "\twait for cisco probe complete");
1349 gidinfo->gl_flag |= IBDM_CISCO_PROBE;
1350 while (gidinfo->gl_flag & IBDM_CISCO_PROBE)
1351 cv_wait(&gidinfo->gl_probe_cv, &gidinfo->gl_mutex);
1352 }
1353
1354
1355 /*
1356 * ibdm_wakeup_probe_gid_cv:
1357 * wakeup waiting threads (based on ibdm_ngid_probes_in_progress)
1358 */
1359 static void
1360 ibdm_wakeup_probe_gid_cv(void)
1361 {
1362 ASSERT(MUTEX_HELD(&ibdm.ibdm_mutex));
1363 if (!ibdm.ibdm_ngid_probes_in_progress) {
1364 IBTF_DPRINTF_L4("ibdm", "wakeup_probe_gid_thread: Wakeup");
1365 ibdm.ibdm_busy &= ~IBDM_PROBE_IN_PROGRESS;
1366 cv_broadcast(&ibdm.ibdm_probe_cv);
1367 }
1368
1369 }
1370
1371
1372 /*
1373 * ibdm_sweep_fabric(reprobe_flag)
1374 * Find all possible Managed IOU's and their IOC's that are visible
1375 * to the host. The algorithm used is as follows
1376 *
1377 * Send a "bus walk" request for each port on the host HCA to SA access
1378 * SA returns complete set of GID's that are reachable from
1379 * source port. This is done in parallel.
1380 *
1381 * Initialize GID state to IBDM_GID_PROBE_NOT_DONE
1382 *
1383 * Sort the GID list and eliminate duplicate GID's
1384 * 1) Use DGID for sorting
1385 * 2) use PortGuid for sorting
1386 * Send SA query to retrieve NodeRecord and
1387 * extract PortGuid from that.
1388 *
1389 * Set GID state to IBDM_GID_PROBE_FAILED to all the ports that dont
1390 * support DM MAD's
1391 * Send a "Portinfo" query to get the port capabilities and
1392 * then check for DM MAD's support
1393 *
1394 * Send "ClassPortInfo" request for all the GID's in parallel,
1395 * set the GID state to IBDM_GET_CLASSPORTINFO and wait on the
1396 * cv_signal to complete.
1397 *
1398 * When DM agent on the remote GID sends back the response, IBMF
1399 * invokes DM callback routine.
1400 *
1401 * If the response is proper, send "IOUnitInfo" request and set
1402 * GID state to IBDM_GET_IOUNITINFO.
1403 *
1404 * If the response is proper, send "IocProfileInfo" request to
1405 * all the IOC simultaneously and set GID state to IBDM_GET_IOC_DETAILS.
1406 *
1407 * Send request to get Service entries simultaneously
1408 *
1409 * Signal the waiting thread when received response for all the commands.
1410 *
1411 * Set the GID state to IBDM_GID_PROBE_FAILED when received a error
1412 * response during the probing period.
1413 *
1414 * Note:
1415 * ibdm.ibdm_ngid_probes_in_progress and ibdm_gid_list_t:gl_pending_cmds
1416 * keep track of number commands in progress at any point of time.
1417 * MAD transaction ID is used to identify a particular GID
1418 * TBD: Consider registering the IBMF receive callback on demand
1419 *
1420 * Note: This routine must be called with ibdm.ibdm_mutex held
1421 * TBD: Re probe the failure GID (for certain failures) when requested
1422 * for fabric sweep next time
1423 *
1424 * Parameters : If reprobe_flag is set, All IOCs will be reprobed.
1425 */
1426 static void
1427 ibdm_sweep_fabric(int reprobe_flag)
1428 {
1429 int ii;
1430 int new_paths = 0;
1431 uint8_t niocs;
1432 taskqid_t tid;
1433 ibdm_ioc_info_t *ioc;
1434 ibdm_hca_list_t *hca_list = NULL;
1435 ibdm_port_attr_t *port = NULL;
1436 ibdm_dp_gidinfo_t *gid_info;
1437
1438 IBTF_DPRINTF_L4("ibdm", "\tsweep_fabric: Enter");
1439 ASSERT(MUTEX_HELD(&ibdm.ibdm_mutex));
1440
1441 /*
1442 * Check whether a sweep already in progress. If so, just
1443 * wait for the fabric sweep to complete
1444 */
1445 while (ibdm.ibdm_busy & IBDM_BUSY)
1446 cv_wait(&ibdm.ibdm_busy_cv, &ibdm.ibdm_mutex);
1447 ibdm.ibdm_busy |= IBDM_BUSY;
1448 mutex_exit(&ibdm.ibdm_mutex);
1449
1450 ibdm_dump_sweep_fabric_timestamp(0);
1451
1452 /* Rescan the GID list for any removed GIDs for reprobe */
1453 if (reprobe_flag)
1454 ibdm_rescan_gidlist(NULL);
1455
1456 /*
1457 * Get list of all the ports reachable from the local known HCA
1458 * ports which are active
1459 */
1460 mutex_enter(&ibdm.ibdm_hl_mutex);
1461 for (ibdm_get_next_port(&hca_list, &port, 1); port;
1462 ibdm_get_next_port(&hca_list, &port, 1)) {
1463 /*
1464 * Get PATHS to all the reachable ports from
1465 * SGID and update the global ibdm structure.
1466 */
1467 new_paths = ibdm_get_reachable_ports(port, hca_list);
1468 ibdm.ibdm_ngids += new_paths;
1469 }
1470 mutex_exit(&ibdm.ibdm_hl_mutex);
1471
1472 mutex_enter(&ibdm.ibdm_mutex);
1473 ibdm.ibdm_ngid_probes_in_progress += ibdm.ibdm_ngids;
1474 mutex_exit(&ibdm.ibdm_mutex);
1475
1476 /* Send a request to probe GIDs asynchronously. */
1477 for (gid_info = ibdm.ibdm_dp_gidlist_head; gid_info;
1478 gid_info = gid_info->gl_next) {
1479 mutex_enter(&gid_info->gl_mutex);
1480 gid_info->gl_reprobe_flag = reprobe_flag;
1481 mutex_exit(&gid_info->gl_mutex);
1482
1483 /* process newly encountered GIDs */
1484 tid = taskq_dispatch(system_taskq, ibdm_probe_gid_thread,
1485 (void *)gid_info, TQ_NOSLEEP);
1486 IBTF_DPRINTF_L4("ibdm", "\tsweep_fabric: gid_info = %p"
1487 " taskq_id = %x", gid_info, tid);
1488 /* taskq failed to dispatch call it directly */
1489 if (tid == NULL)
1490 ibdm_probe_gid_thread((void *)gid_info);
1491 }
1492
1493 mutex_enter(&ibdm.ibdm_mutex);
1494 ibdm_wait_probe_completion();
1495
1496 /*
1497 * Update the properties, if reprobe_flag is set
1498 * Skip if gl_reprobe_flag is set, this will be
1499 * a re-inserted / new GID, for which notifications
1500 * have already been send.
1501 */
1502 if (reprobe_flag) {
1503 for (gid_info = ibdm.ibdm_dp_gidlist_head; gid_info;
1504 gid_info = gid_info->gl_next) {
1505 if (gid_info->gl_iou == NULL)
1506 continue;
1507 if (gid_info->gl_reprobe_flag) {
1508 gid_info->gl_reprobe_flag = 0;
1509 continue;
1510 }
1511
1512 niocs = gid_info->gl_iou->iou_info.iou_num_ctrl_slots;
1513 for (ii = 0; ii < niocs; ii++) {
1514 ioc = IBDM_GIDINFO2IOCINFO(gid_info, ii);
1515 if (ioc)
1516 ibdm_reprobe_update_port_srv(ioc,
1517 gid_info);
1518 }
1519 }
1520 } else if (ibdm.ibdm_prev_iou) {
1521 ibdm_ioc_info_t *ioc_list;
1522
1523 /*
1524 * Get the list of IOCs which have changed.
1525 * If any IOCs have changed, Notify IBNexus
1526 */
1527 ibdm.ibdm_prev_iou = 0;
1528 ioc_list = ibdm_handle_prev_iou();
1529 if (ioc_list) {
1530 if (ibdm.ibdm_ibnex_callback != NULL) {
1531 (*ibdm.ibdm_ibnex_callback)(
1532 (void *)ioc_list,
1533 IBDM_EVENT_IOC_PROP_UPDATE);
1534 }
1535 }
1536 }
1537
1538 ibdm_dump_sweep_fabric_timestamp(1);
1539
1540 ibdm.ibdm_busy &= ~IBDM_BUSY;
1541 cv_broadcast(&ibdm.ibdm_busy_cv);
1542 IBTF_DPRINTF_L5("ibdm", "\tsweep_fabric: EXIT");
1543 }
1544
1545
1546 /*
1547 * ibdm_is_cisco:
1548 * Check if this is a Cisco device or not.
1549 */
1550 static boolean_t
1551 ibdm_is_cisco(ib_guid_t guid)
1552 {
1553 if ((guid >> IBDM_OUI_GUID_SHIFT) == IBDM_CISCO_COMPANY_ID)
1554 return (B_TRUE);
1555 return (B_FALSE);
1556 }
1557
1558
1559 /*
1560 * ibdm_is_cisco_switch:
1561 * Check if this switch is a CISCO switch or not.
1562 * Note that if this switch is already activated, ibdm_is_cisco_switch()
1563 * returns B_FALSE not to re-activate it again.
1564 */
1565 static boolean_t
1566 ibdm_is_cisco_switch(ibdm_dp_gidinfo_t *gid_info)
1567 {
1568 int company_id, device_id;
1569 ASSERT(gid_info != 0);
1570 ASSERT(MUTEX_HELD(&gid_info->gl_mutex));
1571
1572 /*
1573 * If this switch is already activated, don't re-activate it.
1574 */
1575 if (gid_info->gl_flag & IBDM_CISCO_PROBE_DONE)
1576 return (B_FALSE);
1577
1578 /*
1579 * Check if this switch is a Cisco FC GW or not.
1580 * Use the node guid (the OUI part) instead of the vendor id
1581 * since the vendor id is zero in practice.
1582 */
1583 company_id = gid_info->gl_nodeguid >> IBDM_OUI_GUID_SHIFT;
1584 device_id = gid_info->gl_devid;
1585
1586 if (company_id == IBDM_CISCO_COMPANY_ID &&
1587 device_id == IBDM_CISCO_DEVICE_ID)
1588 return (B_TRUE);
1589 return (B_FALSE);
1590 }
1591
1592
1593 /*
1594 * ibdm_probe_gid_thread:
1595 * thread that does the actual work for sweeping the fabric
1596 * for a given GID
1597 */
1598 static void
1599 ibdm_probe_gid_thread(void *args)
1600 {
1601 int reprobe_flag;
1602 ib_guid_t node_guid;
1603 ib_guid_t port_guid;
1604 ibdm_dp_gidinfo_t *gid_info;
1605
1606 gid_info = (ibdm_dp_gidinfo_t *)args;
1607 reprobe_flag = gid_info->gl_reprobe_flag;
1608 IBTF_DPRINTF_L4("ibdm", "\tprobe_gid_thread: gid_info = %p, flag = %d",
1609 gid_info, reprobe_flag);
1610 ASSERT(gid_info != NULL);
1611 ASSERT(gid_info->gl_pending_cmds == 0);
1612
1613 if (gid_info->gl_state != IBDM_GID_PROBE_NOT_DONE &&
1614 reprobe_flag == 0) {
1615 /*
1616 * This GID may have been already probed. Send
1617 * in a CLP to check if IOUnitInfo changed?
1618 * Explicitly set gl_reprobe_flag to 0 so that
1619 * IBnex is not notified on completion
1620 */
1621 if (gid_info->gl_state == IBDM_GID_PROBING_COMPLETE) {
1622 IBTF_DPRINTF_L4("ibdm", "\tprobe_gid_thread: "
1623 "get new IOCs information");
1624 mutex_enter(&gid_info->gl_mutex);
1625 gid_info->gl_pending_cmds++;
1626 gid_info->gl_state = IBDM_GET_IOUNITINFO;
1627 gid_info->gl_reprobe_flag = 0;
1628 mutex_exit(&gid_info->gl_mutex);
1629 if (ibdm_send_iounitinfo(gid_info) != IBDM_SUCCESS) {
1630 mutex_enter(&gid_info->gl_mutex);
1631 --gid_info->gl_pending_cmds;
1632 mutex_exit(&gid_info->gl_mutex);
1633 mutex_enter(&ibdm.ibdm_mutex);
1634 --ibdm.ibdm_ngid_probes_in_progress;
1635 ibdm_wakeup_probe_gid_cv();
1636 mutex_exit(&ibdm.ibdm_mutex);
1637 }
1638 } else {
1639 mutex_enter(&ibdm.ibdm_mutex);
1640 --ibdm.ibdm_ngid_probes_in_progress;
1641 ibdm_wakeup_probe_gid_cv();
1642 mutex_exit(&ibdm.ibdm_mutex);
1643 }
1644 return;
1645 } else if (reprobe_flag && gid_info->gl_state ==
1646 IBDM_GID_PROBING_COMPLETE) {
1647 /*
1648 * Reprobe all IOCs for the GID which has completed
1649 * probe. Skip other port GIDs to same IOU.
1650 * Explicitly set gl_reprobe_flag to 0 so that
1651 * IBnex is not notified on completion
1652 */
1653 ibdm_ioc_info_t *ioc_info;
1654 uint8_t niocs, ii;
1655
1656 ASSERT(gid_info->gl_iou);
1657 mutex_enter(&gid_info->gl_mutex);
1658 niocs = gid_info->gl_iou->iou_info.iou_num_ctrl_slots;
1659 gid_info->gl_state = IBDM_GET_IOC_DETAILS;
1660 gid_info->gl_pending_cmds += niocs;
1661 gid_info->gl_reprobe_flag = 0;
1662 mutex_exit(&gid_info->gl_mutex);
1663 for (ii = 0; ii < niocs; ii++) {
1664 uchar_t slot_info;
1665 ib_dm_io_unitinfo_t *giou_info;
1666
1667 /*
1668 * Check whether IOC is present in the slot
1669 * Series of nibbles (in the field
1670 * iou_ctrl_list) represents a slot in the
1671 * IOU.
1672 * Byte format: 76543210
1673 * Bits 0-3 of first byte represent Slot 2
1674 * bits 4-7 of first byte represent slot 1,
1675 * bits 0-3 of second byte represent slot 4
1676 * and so on
1677 * Each 4-bit nibble has the following meaning
1678 * 0x0 : IOC not installed
1679 * 0x1 : IOC is present
1680 * 0xf : Slot does not exist
1681 * and all other values are reserved.
1682 */
1683 ioc_info = IBDM_GIDINFO2IOCINFO(gid_info, ii);
1684 giou_info = &gid_info->gl_iou->iou_info;
1685 slot_info = giou_info->iou_ctrl_list[(ii/2)];
1686 if ((ii % 2) == 0)
1687 slot_info = (slot_info >> 4);
1688
1689 if ((slot_info & 0xf) != 1) {
1690 ioc_info->ioc_state =
1691 IBDM_IOC_STATE_PROBE_FAILED;
1692 ibdm_gid_decr_pending(gid_info);
1693 continue;
1694 }
1695
1696 if (ibdm_send_ioc_profile(gid_info, ii) !=
1697 IBDM_SUCCESS) {
1698 ibdm_gid_decr_pending(gid_info);
1699 }
1700 }
1701
1702 return;
1703 } else if (gid_info->gl_state != IBDM_GID_PROBE_NOT_DONE) {
1704 mutex_enter(&ibdm.ibdm_mutex);
1705 --ibdm.ibdm_ngid_probes_in_progress;
1706 ibdm_wakeup_probe_gid_cv();
1707 mutex_exit(&ibdm.ibdm_mutex);
1708 return;
1709 }
1710
1711 /*
1712 * Check whether the destination GID supports DM agents. If
1713 * not, stop probing the GID and continue with the next GID
1714 * in the list.
1715 */
1716 if (ibdm_is_dev_mgt_supported(gid_info) != IBDM_SUCCESS) {
1717 mutex_enter(&gid_info->gl_mutex);
1718 gid_info->gl_state = IBDM_GID_PROBING_FAILED;
1719 gid_info->gl_is_dm_capable = B_FALSE;
1720 mutex_exit(&gid_info->gl_mutex);
1721 ibdm_delete_glhca_list(gid_info);
1722 mutex_enter(&ibdm.ibdm_mutex);
1723 --ibdm.ibdm_ngid_probes_in_progress;
1724 ibdm_wakeup_probe_gid_cv();
1725 mutex_exit(&ibdm.ibdm_mutex);
1726 return;
1727 }
1728
1729 /*
1730 * This GID is Device management capable
1731 */
1732 mutex_enter(&gid_info->gl_mutex);
1733 gid_info->gl_is_dm_capable = B_TRUE;
1734 mutex_exit(&gid_info->gl_mutex);
1735
1736 /* Get the nodeguid and portguid of the port */
1737 if (ibdm_get_node_port_guids(gid_info->gl_sa_hdl, gid_info->gl_dlid,
1738 &node_guid, &port_guid) != IBDM_SUCCESS) {
1739 mutex_enter(&gid_info->gl_mutex);
1740 gid_info->gl_state = IBDM_GID_PROBING_FAILED;
1741 mutex_exit(&gid_info->gl_mutex);
1742 ibdm_delete_glhca_list(gid_info);
1743 mutex_enter(&ibdm.ibdm_mutex);
1744 --ibdm.ibdm_ngid_probes_in_progress;
1745 ibdm_wakeup_probe_gid_cv();
1746 mutex_exit(&ibdm.ibdm_mutex);
1747 return;
1748 }
1749
1750 /*
1751 * Check whether we already knew about this NodeGuid
1752 * If so, do not probe the GID and continue with the
1753 * next GID in the gid list. Set the GID state to
1754 * probing done.
1755 */
1756 mutex_enter(&ibdm.ibdm_mutex);
1757 gid_info->gl_nodeguid = node_guid;
1758 gid_info->gl_portguid = port_guid;
1759 if (ibdm_check_dest_nodeguid(gid_info) != NULL) {
1760 mutex_exit(&ibdm.ibdm_mutex);
1761 mutex_enter(&gid_info->gl_mutex);
1762 gid_info->gl_state = IBDM_GID_PROBING_SKIPPED;
1763 mutex_exit(&gid_info->gl_mutex);
1764 ibdm_delete_glhca_list(gid_info);
1765 mutex_enter(&ibdm.ibdm_mutex);
1766 --ibdm.ibdm_ngid_probes_in_progress;
1767 ibdm_wakeup_probe_gid_cv();
1768 mutex_exit(&ibdm.ibdm_mutex);
1769 return;
1770 }
1771 ibdm_add_to_gl_gid(gid_info, gid_info);
1772 mutex_exit(&ibdm.ibdm_mutex);
1773
1774 /*
1775 * New or reinserted GID : Enable notification to IBnex
1776 */
1777 mutex_enter(&gid_info->gl_mutex);
1778 gid_info->gl_reprobe_flag = 1;
1779
1780 /*
1781 * A Cisco FC GW needs the special handling to get IOUnitInfo.
1782 */
1783 if (ibdm_is_cisco_switch(gid_info)) {
1784 gid_info->gl_pending_cmds++;
1785 gid_info->gl_state = IBDM_SET_CLASSPORTINFO;
1786 mutex_exit(&gid_info->gl_mutex);
1787
1788 if (ibdm_set_classportinfo(gid_info) != IBDM_SUCCESS) {
1789 mutex_enter(&gid_info->gl_mutex);
1790 gid_info->gl_state = IBDM_GID_PROBING_FAILED;
1791 --gid_info->gl_pending_cmds;
1792 mutex_exit(&gid_info->gl_mutex);
1793
1794 /* free the hca_list on this gid_info */
1795 ibdm_delete_glhca_list(gid_info);
1796
1797 mutex_enter(&ibdm.ibdm_mutex);
1798 --ibdm.ibdm_ngid_probes_in_progress;
1799 ibdm_wakeup_probe_gid_cv();
1800 mutex_exit(&ibdm.ibdm_mutex);
1801
1802 return;
1803 }
1804
1805 mutex_enter(&gid_info->gl_mutex);
1806 ibdm_wait_cisco_probe_completion(gid_info);
1807
1808 IBTF_DPRINTF_L4("ibdm", "\tibdm_probe_gid_thread: "
1809 "CISCO Wakeup signal received");
1810 }
1811
1812 /* move on to the 'GET_CLASSPORTINFO' stage */
1813 gid_info->gl_pending_cmds++;
1814 gid_info->gl_state = IBDM_GET_CLASSPORTINFO;
1815 mutex_exit(&gid_info->gl_mutex);
1816
1817 IBTF_DPRINTF_L3(ibdm_string, "\tibdm_probe_gid_thread: "
1818 "%d: gid_info %p gl_state %d pending_cmds %d",
1819 __LINE__, gid_info, gid_info->gl_state,
1820 gid_info->gl_pending_cmds);
1821
1822 /*
1823 * Send ClassPortInfo request to the GID asynchronously.
1824 */
1825 if (ibdm_send_classportinfo(gid_info) != IBDM_SUCCESS) {
1826
1827 mutex_enter(&gid_info->gl_mutex);
1828 gid_info->gl_state = IBDM_GID_PROBING_FAILED;
1829 --gid_info->gl_pending_cmds;
1830 mutex_exit(&gid_info->gl_mutex);
1831
1832 /* free the hca_list on this gid_info */
1833 ibdm_delete_glhca_list(gid_info);
1834
1835 mutex_enter(&ibdm.ibdm_mutex);
1836 --ibdm.ibdm_ngid_probes_in_progress;
1837 ibdm_wakeup_probe_gid_cv();
1838 mutex_exit(&ibdm.ibdm_mutex);
1839
1840 return;
1841 }
1842 }
1843
1844
1845 /*
1846 * ibdm_check_dest_nodeguid
1847 * Searches for the NodeGuid in the GID list
1848 * Returns matching gid_info if found and otherwise NULL
1849 *
1850 * This function is called to handle new GIDs discovered
1851 * during device sweep / probe or for GID_AVAILABLE event.
1852 *
1853 * Parameter :
1854 * gid_info GID to check
1855 */
1856 static ibdm_dp_gidinfo_t *
1857 ibdm_check_dest_nodeguid(ibdm_dp_gidinfo_t *gid_info)
1858 {
1859 ibdm_dp_gidinfo_t *gid_list;
1860 ibdm_gid_t *tmp;
1861
1862 IBTF_DPRINTF_L4("ibdm", "\tcheck_dest_nodeguid");
1863
1864 gid_list = ibdm.ibdm_dp_gidlist_head;
1865 while (gid_list) {
1866 if ((gid_list != gid_info) &&
1867 (gid_info->gl_nodeguid == gid_list->gl_nodeguid)) {
1868 IBTF_DPRINTF_L4("ibdm",
1869 "\tcheck_dest_nodeguid: NodeGuid is present");
1870
1871 /* Add to gid_list */
1872 tmp = kmem_zalloc(sizeof (ibdm_gid_t),
1873 KM_SLEEP);
1874 tmp->gid_dgid_hi = gid_info->gl_dgid_hi;
1875 tmp->gid_dgid_lo = gid_info->gl_dgid_lo;
1876 tmp->gid_next = gid_list->gl_gid;
1877 gid_list->gl_gid = tmp;
1878 gid_list->gl_ngids++;
1879 return (gid_list);
1880 }
1881
1882 gid_list = gid_list->gl_next;
1883 }
1884
1885 return (NULL);
1886 }
1887
1888
1889 /*
1890 * ibdm_is_dev_mgt_supported
1891 * Get the PortInfo attribute (SA Query)
1892 * Check "CompatabilityMask" field in the Portinfo.
1893 * Return IBDM_SUCCESS if DM MAD's supported (if bit 19 set)
1894 * by the port, otherwise IBDM_FAILURE
1895 */
1896 static int
1897 ibdm_is_dev_mgt_supported(ibdm_dp_gidinfo_t *gid_info)
1898 {
1899 int ret;
1900 size_t length = 0;
1901 sa_portinfo_record_t req, *resp = NULL;
1902 ibmf_saa_access_args_t qargs;
1903
1904 bzero(&req, sizeof (sa_portinfo_record_t));
1905 req.EndportLID = gid_info->gl_dlid;
1906
1907 qargs.sq_attr_id = SA_PORTINFORECORD_ATTRID;
1908 qargs.sq_access_type = IBMF_SAA_RETRIEVE;
1909 qargs.sq_component_mask = SA_PORTINFO_COMPMASK_PORTLID;
1910 qargs.sq_template = &req;
1911 qargs.sq_callback = NULL;
1912 qargs.sq_callback_arg = NULL;
1913
1914 ret = ibmf_sa_access(gid_info->gl_sa_hdl,
1915 &qargs, 0, &length, (void **)&resp);
1916
1917 if ((ret != IBMF_SUCCESS) || (length == 0) || (resp == NULL)) {
1918 IBTF_DPRINTF_L2("ibdm", "\tis_dev_mgt_supported:"
1919 "failed to get PORTINFO attribute %d", ret);
1920 return (IBDM_FAILURE);
1921 }
1922
1923 if (resp->PortInfo.CapabilityMask & SM_CAP_MASK_IS_DM_SUPPD) {
1924 IBTF_DPRINTF_L4("ibdm", "\tis_dev_mgt_supported: SUPPD !!");
1925 ret = IBDM_SUCCESS;
1926 } else {
1927 IBTF_DPRINTF_L4("ibdm", "\tis_dev_mgt_supported: "
1928 "Not SUPPD !!, cap 0x%x", resp->PortInfo.CapabilityMask);
1929 ret = IBDM_FAILURE;
1930 }
1931 kmem_free(resp, length);
1932 return (ret);
1933 }
1934
1935
1936 /*
1937 * ibdm_get_node_port_guids()
1938 * Get the NodeInfoRecord of the port
1939 * Save NodeGuid and PortGUID values in the GID list structure.
1940 * Return IBDM_SUCCESS/IBDM_FAILURE
1941 */
1942 static int
1943 ibdm_get_node_port_guids(ibmf_saa_handle_t sa_hdl, ib_lid_t dlid,
1944 ib_guid_t *node_guid, ib_guid_t *port_guid)
1945 {
1946 int ret;
1947 size_t length = 0;
1948 sa_node_record_t req, *resp = NULL;
1949 ibmf_saa_access_args_t qargs;
1950
1951 IBTF_DPRINTF_L4("ibdm", "\tget_node_port_guids");
1952
1953 bzero(&req, sizeof (sa_node_record_t));
1954 req.LID = dlid;
1955
1956 qargs.sq_attr_id = SA_NODERECORD_ATTRID;
1957 qargs.sq_access_type = IBMF_SAA_RETRIEVE;
1958 qargs.sq_component_mask = SA_NODEINFO_COMPMASK_NODELID;
1959 qargs.sq_template = &req;
1960 qargs.sq_callback = NULL;
1961 qargs.sq_callback_arg = NULL;
1962
1963 ret = ibmf_sa_access(sa_hdl, &qargs, 0, &length, (void **)&resp);
1964 if ((ret != IBMF_SUCCESS) || (length == 0) || (resp == NULL)) {
1965 IBTF_DPRINTF_L2("ibdm", "\tget_node_port_guids:"
1966 " SA Retrieve Failed: %d", ret);
1967 return (IBDM_FAILURE);
1968 }
1969 IBTF_DPRINTF_L4("ibdm", "\tget_node_port_guids: NodeGuid %llx Port"
1970 "GUID %llx", resp->NodeInfo.NodeGUID, resp->NodeInfo.NodeGUID);
1971
1972 *node_guid = resp->NodeInfo.NodeGUID;
1973 *port_guid = resp->NodeInfo.PortGUID;
1974 kmem_free(resp, length);
1975 return (IBDM_SUCCESS);
1976 }
1977
1978
1979 /*
1980 * ibdm_get_reachable_ports()
1981 * Get list of the destination GID (and its path records) by
1982 * querying the SA access.
1983 *
1984 * Returns Number paths
1985 */
1986 static int
1987 ibdm_get_reachable_ports(ibdm_port_attr_t *portinfo, ibdm_hca_list_t *hca)
1988 {
1989 uint_t ii, jj, nrecs;
1990 uint_t npaths = 0;
1991 size_t length;
1992 ib_gid_t sgid;
1993 ibdm_pkey_tbl_t *pkey_tbl;
1994 sa_path_record_t *result;
1995 sa_path_record_t *precp;
1996 ibdm_dp_gidinfo_t *gid_info;
1997
1998 ASSERT(MUTEX_HELD(&ibdm.ibdm_hl_mutex));
1999 IBTF_DPRINTF_L4("ibdm", "\tget_reachable_ports: portinfo %p", portinfo);
2000
2001 sgid.gid_prefix = portinfo->pa_sn_prefix;
2002 sgid.gid_guid = portinfo->pa_port_guid;
2003
2004 /* get reversible paths */
2005 if (portinfo->pa_sa_hdl && ibmf_saa_paths_from_gid(portinfo->pa_sa_hdl,
2006 sgid, IBMF_SAA_PKEY_WC, B_TRUE, 0, &nrecs, &length, &result)
2007 != IBMF_SUCCESS) {
2008 IBTF_DPRINTF_L2("ibdm",
2009 "\tget_reachable_ports: Getting path records failed");
2010 return (0);
2011 }
2012
2013 for (ii = 0; ii < nrecs; ii++) {
2014 sa_node_record_t *nrec;
2015 size_t length;
2016
2017 precp = &result[ii];
2018 if ((gid_info = ibdm_check_dgid(precp->DGID.gid_guid,
2019 precp->DGID.gid_prefix)) != NULL) {
2020 IBTF_DPRINTF_L5("ibdm", "\tget_reachable_ports: "
2021 "Already exists nrecs %d, ii %d", nrecs, ii);
2022 ibdm_addto_glhcalist(gid_info, hca);
2023 continue;
2024 }
2025 /*
2026 * This is a new GID. Allocate a GID structure and
2027 * initialize the structure
2028 * gl_state is initialized to IBDM_GID_PROBE_NOT_DONE (0)
2029 * by kmem_zalloc call
2030 */
2031 gid_info = kmem_zalloc(sizeof (ibdm_dp_gidinfo_t), KM_SLEEP);
2032 mutex_init(&gid_info->gl_mutex, NULL, MUTEX_DEFAULT, NULL);
2033 cv_init(&gid_info->gl_probe_cv, NULL, CV_DRIVER, NULL);
2034 gid_info->gl_dgid_hi = precp->DGID.gid_prefix;
2035 gid_info->gl_dgid_lo = precp->DGID.gid_guid;
2036 gid_info->gl_sgid_hi = precp->SGID.gid_prefix;
2037 gid_info->gl_sgid_lo = precp->SGID.gid_guid;
2038 gid_info->gl_p_key = precp->P_Key;
2039 gid_info->gl_sa_hdl = portinfo->pa_sa_hdl;
2040 gid_info->gl_ibmf_hdl = portinfo->pa_ibmf_hdl;
2041 gid_info->gl_slid = precp->SLID;
2042 gid_info->gl_dlid = precp->DLID;
2043 gid_info->gl_transactionID = (++ibdm.ibdm_transactionID)
2044 << IBDM_GID_TRANSACTIONID_SHIFT;
2045 gid_info->gl_min_transactionID = gid_info->gl_transactionID;
2046 gid_info->gl_max_transactionID = (ibdm.ibdm_transactionID +1)
2047 << IBDM_GID_TRANSACTIONID_SHIFT;
2048 gid_info->gl_SL = precp->SL;
2049
2050 /*
2051 * get the node record with this guid if the destination
2052 * device is a Cisco one.
2053 */
2054 if (ibdm_is_cisco(precp->DGID.gid_guid) &&
2055 (gid_info->gl_nodeguid == 0 || gid_info->gl_devid == 0) &&
2056 ibdm_get_node_record_by_port(portinfo->pa_sa_hdl,
2057 precp->DGID.gid_guid, &nrec, &length) == IBDM_SUCCESS) {
2058 gid_info->gl_nodeguid = nrec->NodeInfo.NodeGUID;
2059 gid_info->gl_devid = nrec->NodeInfo.DeviceID;
2060 kmem_free(nrec, length);
2061 }
2062
2063 ibdm_addto_glhcalist(gid_info, hca);
2064
2065 ibdm_dump_path_info(precp);
2066
2067 gid_info->gl_qp_hdl = NULL;
2068 ASSERT(portinfo->pa_pkey_tbl != NULL &&
2069 portinfo->pa_npkeys != 0);
2070
2071 for (jj = 0; jj < portinfo->pa_npkeys; jj++) {
2072 pkey_tbl = &portinfo->pa_pkey_tbl[jj];
2073 if ((gid_info->gl_p_key == pkey_tbl->pt_pkey) &&
2074 (pkey_tbl->pt_qp_hdl != NULL)) {
2075 gid_info->gl_qp_hdl = pkey_tbl->pt_qp_hdl;
2076 break;
2077 }
2078 }
2079
2080 /*
2081 * QP handle for GID not initialized. No matching Pkey
2082 * was found!! ibdm should *not* hit this case. Flag an
2083 * error and drop the GID if ibdm does encounter this.
2084 */
2085 if (gid_info->gl_qp_hdl == NULL) {
2086 IBTF_DPRINTF_L2(ibdm_string,
2087 "\tget_reachable_ports: No matching Pkey");
2088 ibdm_delete_gidinfo(gid_info);
2089 continue;
2090 }
2091 if (ibdm.ibdm_dp_gidlist_head == NULL) {
2092 ibdm.ibdm_dp_gidlist_head = gid_info;
2093 ibdm.ibdm_dp_gidlist_tail = gid_info;
2094 } else {
2095 ibdm.ibdm_dp_gidlist_tail->gl_next = gid_info;
2096 gid_info->gl_prev = ibdm.ibdm_dp_gidlist_tail;
2097 ibdm.ibdm_dp_gidlist_tail = gid_info;
2098 }
2099 npaths++;
2100 }
2101 kmem_free(result, length);
2102 IBTF_DPRINTF_L4("ibdm", "\tget_reachable_ports: npaths = %d", npaths);
2103 return (npaths);
2104 }
2105
2106
2107 /*
2108 * ibdm_check_dgid()
2109 * Look in the global list to check whether we know this DGID already
2110 * Return IBDM_GID_PRESENT/IBDM_GID_NOT_PRESENT
2111 */
2112 static ibdm_dp_gidinfo_t *
2113 ibdm_check_dgid(ib_guid_t guid, ib_sn_prefix_t prefix)
2114 {
2115 ibdm_dp_gidinfo_t *gid_list;
2116
2117 for (gid_list = ibdm.ibdm_dp_gidlist_head; gid_list;
2118 gid_list = gid_list->gl_next) {
2119 if ((guid == gid_list->gl_dgid_lo) &&
2120 (prefix == gid_list->gl_dgid_hi)) {
2121 break;
2122 }
2123 }
2124 return (gid_list);
2125 }
2126
2127
2128 /*
2129 * ibdm_find_gid()
2130 * Look in the global list to find a GID entry with matching
2131 * port & node GUID.
2132 * Return pointer to gidinfo if found, else return NULL
2133 */
2134 static ibdm_dp_gidinfo_t *
2135 ibdm_find_gid(ib_guid_t nodeguid, ib_guid_t portguid)
2136 {
2137 ibdm_dp_gidinfo_t *gid_list;
2138
2139 IBTF_DPRINTF_L4("ibdm", "ibdm_find_gid(%llx, %llx)\n",
2140 nodeguid, portguid);
2141
2142 for (gid_list = ibdm.ibdm_dp_gidlist_head; gid_list;
2143 gid_list = gid_list->gl_next) {
2144 if ((portguid == gid_list->gl_portguid) &&
2145 (nodeguid == gid_list->gl_nodeguid)) {
2146 break;
2147 }
2148 }
2149
2150 IBTF_DPRINTF_L4("ibdm", "ibdm_find_gid : returned %p\n",
2151 gid_list);
2152 return (gid_list);
2153 }
2154
2155
2156 /*
2157 * ibdm_set_classportinfo()
2158 * ibdm_set_classportinfo() is a function to activate a Cisco FC GW
2159 * by sending the setClassPortInfo request with the trapLID, trapGID
2160 * and etc. to the gateway since the gateway doesn't provide the IO
2161 * Unit Information othewise. This behavior is the Cisco specific one,
2162 * and this function is called to a Cisco FC GW only.
2163 * Returns IBDM_SUCCESS/IBDM_FAILURE
2164 */
2165 static int
2166 ibdm_set_classportinfo(ibdm_dp_gidinfo_t *gid_info)
2167 {
2168 ibmf_msg_t *msg;
2169 ib_mad_hdr_t *hdr;
2170 ibdm_timeout_cb_args_t *cb_args;
2171 void *data;
2172 ib_mad_classportinfo_t *cpi;
2173
2174 IBTF_DPRINTF_L4("ibdm",
2175 "\tset_classportinfo: gid info 0x%p", gid_info);
2176
2177 /*
2178 * Send command to set classportinfo attribute. Allocate a IBMF
2179 * packet and initialize the packet.
2180 */
2181 if (ibmf_alloc_msg(gid_info->gl_ibmf_hdl, IBMF_ALLOC_SLEEP,
2182 &msg) != IBMF_SUCCESS) {
2183 IBTF_DPRINTF_L4("ibdm", "\tset_classportinfo: pkt alloc fail");
2184 return (IBDM_FAILURE);
2185 }
2186
2187 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*msg))
2188 ibdm_alloc_send_buffers(msg);
2189 _NOTE(NOW_VISIBLE_TO_OTHER_THREADS(*msg))
2190
2191 msg->im_local_addr.ia_local_lid = gid_info->gl_slid;
2192 msg->im_local_addr.ia_remote_lid = gid_info->gl_dlid;
2193 msg->im_local_addr.ia_remote_qno = 1;
2194 msg->im_local_addr.ia_p_key = gid_info->gl_p_key;
2195 msg->im_local_addr.ia_q_key = IB_GSI_QKEY;
2196 msg->im_local_addr.ia_service_level = gid_info->gl_SL;
2197
2198 hdr = IBDM_OUT_IBMFMSG_MADHDR(msg);
2199 hdr->BaseVersion = MAD_CLASS_BASE_VERS_1;
2200 hdr->MgmtClass = MAD_MGMT_CLASS_DEV_MGT;
2201 hdr->ClassVersion = IB_DM_CLASS_VERSION_1;
2202 hdr->R_Method = IB_DM_DEVMGT_METHOD_SET;
2203 hdr->Status = 0;
2204 hdr->TransactionID = h2b64(gid_info->gl_transactionID);
2205 hdr->AttributeID = h2b16(IB_DM_ATTR_CLASSPORTINFO);
2206 hdr->AttributeModifier = 0;
2207
2208 data = msg->im_msgbufs_send.im_bufs_cl_data;
2209 cpi = (ib_mad_classportinfo_t *)data;
2210
2211 /*
2212 * Set the classportinfo values to activate this Cisco FC GW.
2213 */
2214 cpi->TrapGID_hi = h2b64(gid_info->gl_sgid_hi);
2215 cpi->TrapGID_lo = h2b64(gid_info->gl_sgid_lo);
2216 cpi->TrapLID = h2b16(gid_info->gl_slid);
2217 cpi->TrapSL = gid_info->gl_SL;
2218 cpi->TrapP_Key = h2b16(gid_info->gl_p_key);
2219 cpi->TrapQP = h2b32((((ibmf_alt_qp_t *)gid_info->gl_qp_hdl)->isq_qpn));
2220 cpi->TrapQ_Key = h2b32((((ibmf_alt_qp_t *)
2221 gid_info->gl_qp_hdl)->isq_qkey));
2222
2223 cb_args = &gid_info->gl_cpi_cb_args;
2224 cb_args->cb_gid_info = gid_info;
2225 cb_args->cb_retry_count = ibdm_dft_retry_cnt;
2226 cb_args->cb_req_type = IBDM_REQ_TYPE_CLASSPORTINFO;
2227
2228 mutex_enter(&gid_info->gl_mutex);
2229 gid_info->gl_timeout_id = timeout(ibdm_pkt_timeout_hdlr,
2230 cb_args, IBDM_TIMEOUT_VALUE(ibdm_dft_timeout));
2231 mutex_exit(&gid_info->gl_mutex);
2232
2233 IBTF_DPRINTF_L5("ibdm", "\tset_classportinfo: "
2234 "timeout id %x", gid_info->gl_timeout_id);
2235
2236 if (ibmf_msg_transport(gid_info->gl_ibmf_hdl, gid_info->gl_qp_hdl,
2237 msg, NULL, ibdm_ibmf_send_cb, cb_args, 0) != IBMF_SUCCESS) {
2238 IBTF_DPRINTF_L2("ibdm",
2239 "\tset_classportinfo: ibmf send failed");
2240 ibdm_ibmf_send_cb(gid_info->gl_ibmf_hdl, msg, cb_args);
2241 }
2242
2243 return (IBDM_SUCCESS);
2244 }
2245
2246
2247 /*
2248 * ibdm_send_classportinfo()
2249 * Send classportinfo request. When the request is completed
2250 * IBMF calls ibdm_classportinfo_cb routine to inform about
2251 * the completion.
2252 * Returns IBDM_SUCCESS/IBDM_FAILURE
2253 */
2254 static int
2255 ibdm_send_classportinfo(ibdm_dp_gidinfo_t *gid_info)
2256 {
2257 ibmf_msg_t *msg;
2258 ib_mad_hdr_t *hdr;
2259 ibdm_timeout_cb_args_t *cb_args;
2260
2261 IBTF_DPRINTF_L4("ibdm",
2262 "\tsend_classportinfo: gid info 0x%p", gid_info);
2263
2264 /*
2265 * Send command to get classportinfo attribute. Allocate a IBMF
2266 * packet and initialize the packet.
2267 */
2268 if (ibmf_alloc_msg(gid_info->gl_ibmf_hdl, IBMF_ALLOC_SLEEP,
2269 &msg) != IBMF_SUCCESS) {
2270 IBTF_DPRINTF_L4("ibdm", "\tsend_classportinfo: pkt alloc fail");
2271 return (IBDM_FAILURE);
2272 }
2273
2274 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*msg))
2275 ibdm_alloc_send_buffers(msg);
2276 _NOTE(NOW_VISIBLE_TO_OTHER_THREADS(*msg))
2277
2278 msg->im_local_addr.ia_local_lid = gid_info->gl_slid;
2279 msg->im_local_addr.ia_remote_lid = gid_info->gl_dlid;
2280 msg->im_local_addr.ia_remote_qno = 1;
2281 msg->im_local_addr.ia_p_key = gid_info->gl_p_key;
2282 msg->im_local_addr.ia_q_key = IB_GSI_QKEY;
2283 msg->im_local_addr.ia_service_level = gid_info->gl_SL;
2284
2285 hdr = IBDM_OUT_IBMFMSG_MADHDR(msg);
2286 hdr->BaseVersion = MAD_CLASS_BASE_VERS_1;
2287 hdr->MgmtClass = MAD_MGMT_CLASS_DEV_MGT;
2288 hdr->ClassVersion = IB_DM_CLASS_VERSION_1;
2289 hdr->R_Method = IB_DM_DEVMGT_METHOD_GET;
2290 hdr->Status = 0;
2291 hdr->TransactionID = h2b64(gid_info->gl_transactionID);
2292 hdr->AttributeID = h2b16(IB_DM_ATTR_CLASSPORTINFO);
2293 hdr->AttributeModifier = 0;
2294
2295 cb_args = &gid_info->gl_cpi_cb_args;
2296 cb_args->cb_gid_info = gid_info;
2297 cb_args->cb_retry_count = ibdm_dft_retry_cnt;
2298 cb_args->cb_req_type = IBDM_REQ_TYPE_CLASSPORTINFO;
2299
2300 mutex_enter(&gid_info->gl_mutex);
2301 gid_info->gl_timeout_id = timeout(ibdm_pkt_timeout_hdlr,
2302 cb_args, IBDM_TIMEOUT_VALUE(ibdm_dft_timeout));
2303 mutex_exit(&gid_info->gl_mutex);
2304
2305 IBTF_DPRINTF_L5("ibdm", "\tsend_classportinfo: "
2306 "timeout id %x", gid_info->gl_timeout_id);
2307
2308 if (ibmf_msg_transport(gid_info->gl_ibmf_hdl, gid_info->gl_qp_hdl,
2309 msg, NULL, ibdm_ibmf_send_cb, cb_args, 0) != IBMF_SUCCESS) {
2310 IBTF_DPRINTF_L2("ibdm",
2311 "\tsend_classportinfo: ibmf send failed");
2312 ibdm_ibmf_send_cb(gid_info->gl_ibmf_hdl, msg, cb_args);
2313 }
2314
2315 return (IBDM_SUCCESS);
2316 }
2317
2318
2319 /*
2320 * ibdm_handle_setclassportinfo()
2321 * Invoked by the IBMF when setClassPortInfo request is completed.
2322 */
2323 static void
2324 ibdm_handle_setclassportinfo(ibmf_handle_t ibmf_hdl,
2325 ibmf_msg_t *msg, ibdm_dp_gidinfo_t *gid_info, int *flag)
2326 {
2327 void *data;
2328 timeout_id_t timeout_id;
2329 ib_mad_classportinfo_t *cpi;
2330
2331 IBTF_DPRINTF_L4("ibdm", "\thandle_setclassportinfo:ibmf hdl "
2332 "%p msg %p gid info %p", ibmf_hdl, msg, gid_info);
2333
2334 if (IBDM_IN_IBMFMSG_ATTR(msg) != IB_DM_ATTR_CLASSPORTINFO) {
2335 IBTF_DPRINTF_L4("ibdm", "\thandle_setclassportinfo: "
2336 "Not a ClassPortInfo resp");
2337 *flag |= IBDM_IBMF_PKT_UNEXP_RESP;
2338 return;
2339 }
2340
2341 /*
2342 * Verify whether timeout handler is created/active.
2343 * If created/ active, cancel the timeout handler
2344 */
2345 mutex_enter(&gid_info->gl_mutex);
2346 if (gid_info->gl_state != IBDM_SET_CLASSPORTINFO) {
2347 IBTF_DPRINTF_L2("ibdm", "\thandle_setclassportinfo:DUP resp");
2348 *flag |= IBDM_IBMF_PKT_DUP_RESP;
2349 mutex_exit(&gid_info->gl_mutex);
2350 return;
2351 }
2352 ibdm_bump_transactionID(gid_info);
2353
2354 gid_info->gl_iou_cb_args.cb_req_type = 0;
2355 if (gid_info->gl_timeout_id) {
2356 timeout_id = gid_info->gl_timeout_id;
2357 mutex_exit(&gid_info->gl_mutex);
2358 IBTF_DPRINTF_L5("ibdm", "handle_setlassportinfo: "
2359 "gl_timeout_id = 0x%x", timeout_id);
2360 if (untimeout(timeout_id) == -1) {
2361 IBTF_DPRINTF_L2("ibdm", "handle_setclassportinfo: "
2362 "untimeout gl_timeout_id failed");
2363 }
2364 mutex_enter(&gid_info->gl_mutex);
2365 gid_info->gl_timeout_id = 0;
2366 }
2367 mutex_exit(&gid_info->gl_mutex);
2368
2369 data = msg->im_msgbufs_recv.im_bufs_cl_data;
2370 cpi = (ib_mad_classportinfo_t *)data;
2371
2372 ibdm_dump_classportinfo(cpi);
2373 }
2374
2375
2376 /*
2377 * ibdm_handle_classportinfo()
2378 * Invoked by the IBMF when the classportinfo request is completed.
2379 */
2380 static void
2381 ibdm_handle_classportinfo(ibmf_handle_t ibmf_hdl,
2382 ibmf_msg_t *msg, ibdm_dp_gidinfo_t *gid_info, int *flag)
2383 {
2384 void *data;
2385 timeout_id_t timeout_id;
2386 ib_mad_hdr_t *hdr;
2387 ib_mad_classportinfo_t *cpi;
2388
2389 IBTF_DPRINTF_L4("ibdm", "\thandle_classportinfo:ibmf hdl "
2390 "%p msg %p gid info %p", ibmf_hdl, msg, gid_info);
2391
2392 if (IBDM_IN_IBMFMSG_ATTR(msg) != IB_DM_ATTR_CLASSPORTINFO) {
2393 IBTF_DPRINTF_L4("ibdm", "\thandle_classportinfo: "
2394 "Not a ClassPortInfo resp");
2395 *flag |= IBDM_IBMF_PKT_UNEXP_RESP;
2396 return;
2397 }
2398
2399 /*
2400 * Verify whether timeout handler is created/active.
2401 * If created/ active, cancel the timeout handler
2402 */
2403 mutex_enter(&gid_info->gl_mutex);
2404 ibdm_bump_transactionID(gid_info);
2405 if (gid_info->gl_state != IBDM_GET_CLASSPORTINFO) {
2406 IBTF_DPRINTF_L2("ibdm", "\thandle_classportinfo:DUP resp");
2407 *flag |= IBDM_IBMF_PKT_DUP_RESP;
2408 mutex_exit(&gid_info->gl_mutex);
2409 return;
2410 }
2411 gid_info->gl_iou_cb_args.cb_req_type = 0;
2412 if (gid_info->gl_timeout_id) {
2413 timeout_id = gid_info->gl_timeout_id;
2414 mutex_exit(&gid_info->gl_mutex);
2415 IBTF_DPRINTF_L5("ibdm", "handle_ioclassportinfo: "
2416 "gl_timeout_id = 0x%x", timeout_id);
2417 if (untimeout(timeout_id) == -1) {
2418 IBTF_DPRINTF_L2("ibdm", "handle_classportinfo: "
2419 "untimeout gl_timeout_id failed");
2420 }
2421 mutex_enter(&gid_info->gl_mutex);
2422 gid_info->gl_timeout_id = 0;
2423 }
2424 gid_info->gl_state = IBDM_GET_IOUNITINFO;
2425 gid_info->gl_pending_cmds++;
2426 mutex_exit(&gid_info->gl_mutex);
2427
2428 data = msg->im_msgbufs_recv.im_bufs_cl_data;
2429 cpi = (ib_mad_classportinfo_t *)data;
2430
2431 /*
2432 * Cache the "RespTimeValue" and redirection information in the
2433 * global gid list data structure. This cached information will
2434 * be used to send any further requests to the GID.
2435 */
2436 gid_info->gl_resp_timeout =
2437 (b2h32(cpi->RespTimeValue) & 0x1F);
2438
2439 gid_info->gl_redirected = ((IBDM_IN_IBMFMSG_STATUS(msg) &
2440 MAD_STATUS_REDIRECT_REQUIRED) ? B_TRUE: B_FALSE);
2441 gid_info->gl_redirect_dlid = b2h16(cpi->RedirectLID);
2442 gid_info->gl_redirect_QP = (b2h32(cpi->RedirectQP) & 0xffffff);
2443 gid_info->gl_redirect_pkey = b2h16(cpi->RedirectP_Key);
2444 gid_info->gl_redirect_qkey = b2h32(cpi->RedirectQ_Key);
2445 gid_info->gl_redirectGID_hi = b2h64(cpi->RedirectGID_hi);
2446 gid_info->gl_redirectGID_lo = b2h64(cpi->RedirectGID_lo);
2447 gid_info->gl_redirectSL = cpi->RedirectSL;
2448
2449 ibdm_dump_classportinfo(cpi);
2450
2451 /*
2452 * Send IOUnitInfo request
2453 * Reuse previously allocated IBMF packet for sending ClassPortInfo
2454 * Check whether DM agent on the remote node requested redirection
2455 * If so, send the request to the redirect DGID/DLID/PKEY/QP.
2456 */
2457 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*msg))
2458 ibdm_alloc_send_buffers(msg);
2459 _NOTE(NOW_VISIBLE_TO_OTHER_THREADS(*msg))
2460 msg->im_local_addr.ia_local_lid = gid_info->gl_slid;
2461 msg->im_local_addr.ia_remote_lid = gid_info->gl_dlid;
2462
2463 if (gid_info->gl_redirected == B_TRUE) {
2464 if (gid_info->gl_redirect_dlid != 0) {
2465 msg->im_local_addr.ia_remote_lid =
2466 gid_info->gl_redirect_dlid;
2467 }
2468 msg->im_local_addr.ia_remote_qno = gid_info->gl_redirect_QP;
2469 msg->im_local_addr.ia_p_key = gid_info->gl_redirect_pkey;
2470 msg->im_local_addr.ia_q_key = gid_info->gl_redirect_qkey;
2471 msg->im_local_addr.ia_service_level = gid_info->gl_redirectSL;
2472 } else {
2473 msg->im_local_addr.ia_remote_qno = 1;
2474 msg->im_local_addr.ia_p_key = gid_info->gl_p_key;
2475 msg->im_local_addr.ia_q_key = IB_GSI_QKEY;
2476 msg->im_local_addr.ia_service_level = gid_info->gl_SL;
2477 }
2478
2479 hdr = IBDM_OUT_IBMFMSG_MADHDR(msg);
2480 hdr->BaseVersion = MAD_CLASS_BASE_VERS_1;
2481 hdr->MgmtClass = MAD_MGMT_CLASS_DEV_MGT;
2482 hdr->ClassVersion = IB_DM_CLASS_VERSION_1;
2483 hdr->R_Method = IB_DM_DEVMGT_METHOD_GET;
2484 hdr->Status = 0;
2485 hdr->TransactionID = h2b64(gid_info->gl_transactionID);
2486 hdr->AttributeID = h2b16(IB_DM_ATTR_IO_UNITINFO);
2487 hdr->AttributeModifier = 0;
2488
2489 gid_info->gl_iou_cb_args.cb_req_type = IBDM_REQ_TYPE_IOUINFO;
2490 gid_info->gl_iou_cb_args.cb_gid_info = gid_info;
2491 gid_info->gl_iou_cb_args.cb_retry_count = ibdm_dft_retry_cnt;
2492
2493 mutex_enter(&gid_info->gl_mutex);
2494 gid_info->gl_timeout_id = timeout(ibdm_pkt_timeout_hdlr,
2495 &gid_info->gl_iou_cb_args, IBDM_TIMEOUT_VALUE(ibdm_dft_timeout));
2496 mutex_exit(&gid_info->gl_mutex);
2497
2498 IBTF_DPRINTF_L5("ibdm", "handle_classportinfo:"
2499 "timeout %x", gid_info->gl_timeout_id);
2500
2501 if (ibmf_msg_transport(ibmf_hdl, gid_info->gl_qp_hdl, msg, NULL,
2502 ibdm_ibmf_send_cb, &gid_info->gl_iou_cb_args, 0) != IBMF_SUCCESS) {
2503 IBTF_DPRINTF_L2("ibdm",
2504 "\thandle_classportinfo: msg transport failed");
2505 ibdm_ibmf_send_cb(ibmf_hdl, msg, &gid_info->gl_iou_cb_args);
2506 }
2507 (*flag) |= IBDM_IBMF_PKT_REUSED;
2508 }
2509
2510
2511 /*
2512 * ibdm_send_iounitinfo:
2513 * Sends a DM request to get IOU unitinfo.
2514 */
2515 static int
2516 ibdm_send_iounitinfo(ibdm_dp_gidinfo_t *gid_info)
2517 {
2518 ibmf_msg_t *msg;
2519 ib_mad_hdr_t *hdr;
2520
2521 IBTF_DPRINTF_L4("ibdm", "\tsend_iounitinfo: gid info 0x%p", gid_info);
2522
2523 /*
2524 * Send command to get iounitinfo attribute. Allocate a IBMF
2525 * packet and initialize the packet.
2526 */
2527 if (ibmf_alloc_msg(gid_info->gl_ibmf_hdl, IBMF_ALLOC_SLEEP, &msg) !=
2528 IBMF_SUCCESS) {
2529 IBTF_DPRINTF_L4("ibdm", "\tsend_iounitinfo: pkt alloc fail");
2530 return (IBDM_FAILURE);
2531 }
2532
2533 mutex_enter(&gid_info->gl_mutex);
2534 ibdm_bump_transactionID(gid_info);
2535 mutex_exit(&gid_info->gl_mutex);
2536
2537
2538 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*msg))
2539 ibdm_alloc_send_buffers(msg);
2540 _NOTE(NOW_VISIBLE_TO_OTHER_THREADS(*msg))
2541 msg->im_local_addr.ia_local_lid = gid_info->gl_slid;
2542 msg->im_local_addr.ia_remote_lid = gid_info->gl_dlid;
2543 msg->im_local_addr.ia_remote_qno = 1;
2544 msg->im_local_addr.ia_p_key = gid_info->gl_p_key;
2545 msg->im_local_addr.ia_q_key = IB_GSI_QKEY;
2546 msg->im_local_addr.ia_service_level = gid_info->gl_SL;
2547
2548 hdr = IBDM_OUT_IBMFMSG_MADHDR(msg);
2549 hdr->BaseVersion = MAD_CLASS_BASE_VERS_1;
2550 hdr->MgmtClass = MAD_MGMT_CLASS_DEV_MGT;
2551 hdr->ClassVersion = IB_DM_CLASS_VERSION_1;
2552 hdr->R_Method = IB_DM_DEVMGT_METHOD_GET;
2553 hdr->Status = 0;
2554 hdr->TransactionID = h2b64(gid_info->gl_transactionID);
2555 hdr->AttributeID = h2b16(IB_DM_ATTR_IO_UNITINFO);
2556 hdr->AttributeModifier = 0;
2557
2558 gid_info->gl_iou_cb_args.cb_gid_info = gid_info;
2559 gid_info->gl_iou_cb_args.cb_retry_count = ibdm_dft_retry_cnt;
2560 gid_info->gl_iou_cb_args.cb_req_type = IBDM_REQ_TYPE_IOUINFO;
2561
2562 mutex_enter(&gid_info->gl_mutex);
2563 gid_info->gl_timeout_id = timeout(ibdm_pkt_timeout_hdlr,
2564 &gid_info->gl_iou_cb_args, IBDM_TIMEOUT_VALUE(ibdm_dft_timeout));
2565 mutex_exit(&gid_info->gl_mutex);
2566
2567 IBTF_DPRINTF_L5("ibdm", "send_iouunitinfo:"
2568 "timeout %x", gid_info->gl_timeout_id);
2569
2570 if (ibmf_msg_transport(gid_info->gl_ibmf_hdl, gid_info->gl_qp_hdl, msg,
2571 NULL, ibdm_ibmf_send_cb, &gid_info->gl_iou_cb_args, 0) !=
2572 IBMF_SUCCESS) {
2573 IBTF_DPRINTF_L2("ibdm", "\tsend_iounitinfo: ibmf send failed");
2574 ibdm_ibmf_send_cb(gid_info->gl_ibmf_hdl,
2575 msg, &gid_info->gl_iou_cb_args);
2576 }
2577 return (IBDM_SUCCESS);
2578 }
2579
2580 /*
2581 * ibdm_handle_iounitinfo()
2582 * Invoked by the IBMF when IO Unitinfo request is completed.
2583 */
2584 static void
2585 ibdm_handle_iounitinfo(ibmf_handle_t ibmf_hdl,
2586 ibmf_msg_t *msg, ibdm_dp_gidinfo_t *gid_info, int *flag)
2587 {
2588 int ii, first = B_TRUE;
2589 int num_iocs;
2590 size_t size;
2591 uchar_t slot_info;
2592 timeout_id_t timeout_id;
2593 ib_mad_hdr_t *hdr;
2594 ibdm_ioc_info_t *ioc_info;
2595 ib_dm_io_unitinfo_t *iou_info;
2596 ib_dm_io_unitinfo_t *giou_info;
2597 ibdm_timeout_cb_args_t *cb_args;
2598
2599 IBTF_DPRINTF_L4("ibdm", "\thandle_iouintinfo:"
2600 " ibmf hdl %p pkt %p gid info %p", ibmf_hdl, msg, gid_info);
2601
2602 if (IBDM_IN_IBMFMSG_ATTR(msg) != IB_DM_ATTR_IO_UNITINFO) {
2603 IBTF_DPRINTF_L4("ibdm", "\thandle_iounitinfo: "
2604 "Unexpected response");
2605 (*flag) |= IBDM_IBMF_PKT_UNEXP_RESP;
2606 return;
2607 }
2608
2609 mutex_enter(&gid_info->gl_mutex);
2610 if (gid_info->gl_state != IBDM_GET_IOUNITINFO) {
2611 IBTF_DPRINTF_L4("ibdm",
2612 "\thandle_iounitinfo: DUP resp");
2613 mutex_exit(&gid_info->gl_mutex);
2614 (*flag) = IBDM_IBMF_PKT_DUP_RESP;
2615 return;
2616 }
2617 gid_info->gl_iou_cb_args.cb_req_type = 0;
2618 if (gid_info->gl_timeout_id) {
2619 timeout_id = gid_info->gl_timeout_id;
2620 mutex_exit(&gid_info->gl_mutex);
2621 IBTF_DPRINTF_L5("ibdm", "handle_iounitinfo: "
2622 "gl_timeout_id = 0x%x", timeout_id);
2623 if (untimeout(timeout_id) == -1) {
2624 IBTF_DPRINTF_L2("ibdm", "handle_iounitinfo: "
2625 "untimeout gl_timeout_id failed");
2626 }
2627 mutex_enter(&gid_info->gl_mutex);
2628 gid_info->gl_timeout_id = 0;
2629 }
2630 gid_info->gl_state = IBDM_GET_IOC_DETAILS;
2631
2632 iou_info = IBDM_IN_IBMFMSG2IOU(msg);
2633 ibdm_dump_iounitinfo(iou_info);
2634 num_iocs = iou_info->iou_num_ctrl_slots;
2635 /*
2636 * check if number of IOCs reported is zero? if yes, return.
2637 * when num_iocs are reported zero internal IOC database needs
2638 * to be updated. To ensure that save the number of IOCs in
2639 * the new field "gl_num_iocs". Use a new field instead of
2640 * "giou_info->iou_num_ctrl_slots" as that would prevent
2641 * an unnecessary kmem_alloc/kmem_free when num_iocs is 0.
2642 */
2643 if (num_iocs == 0 && gid_info->gl_num_iocs == 0) {
2644 IBTF_DPRINTF_L4("ibdm", "\thandle_iounitinfo: no IOC's");
2645 mutex_exit(&gid_info->gl_mutex);
2646 return;
2647 }
2648 IBTF_DPRINTF_L4("ibdm", "\thandle_iounitinfo: num_iocs = %d", num_iocs);
2649
2650 /*
2651 * if there is an existing gl_iou (IOU has been probed before)
2652 * check if the "iou_changeid" is same as saved entry in
2653 * "giou_info->iou_changeid".
2654 * (note: this logic can prevent IOC enumeration if a given
2655 * vendor doesn't support setting iou_changeid field for its IOU)
2656 *
2657 * if there is an existing gl_iou and iou_changeid has changed :
2658 * free up existing gl_iou info and its related structures.
2659 * reallocate gl_iou info all over again.
2660 * if we donot free this up; then this leads to memory leaks
2661 */
2662 if (gid_info->gl_iou) {
2663 giou_info = &gid_info->gl_iou->iou_info;
2664 if (b2h16(iou_info->iou_changeid) ==
2665 giou_info->iou_changeid) {
2666 IBTF_DPRINTF_L3("ibdm",
2667 "\thandle_iounitinfo: no IOCs changed");
2668 gid_info->gl_state = IBDM_GID_PROBING_COMPLETE;
2669 mutex_exit(&gid_info->gl_mutex);
2670 return;
2671 }
2672
2673 /*
2674 * Store the iou info as prev_iou to be used after
2675 * sweep is done.
2676 */
2677 ASSERT(gid_info->gl_prev_iou == NULL);
2678 IBTF_DPRINTF_L4(ibdm_string,
2679 "\thandle_iounitinfo: setting gl_prev_iou %p",
2680 gid_info->gl_prev_iou);
2681 gid_info->gl_prev_iou = gid_info->gl_iou;
2682 ibdm.ibdm_prev_iou = 1;
2683 gid_info->gl_iou = NULL;
2684 }
2685
2686 size = sizeof (ibdm_iou_info_t) + num_iocs * sizeof (ibdm_ioc_info_t);
2687 gid_info->gl_iou = (ibdm_iou_info_t *)kmem_zalloc(size, KM_SLEEP);
2688 giou_info = &gid_info->gl_iou->iou_info;
2689 gid_info->gl_iou->iou_ioc_info = (ibdm_ioc_info_t *)
2690 ((char *)gid_info->gl_iou + sizeof (ibdm_iou_info_t));
2691
2692 giou_info->iou_num_ctrl_slots = gid_info->gl_num_iocs = num_iocs;
2693 giou_info->iou_flag = iou_info->iou_flag;
2694 bcopy(iou_info->iou_ctrl_list, giou_info->iou_ctrl_list, 128);
2695 giou_info->iou_changeid = b2h16(iou_info->iou_changeid);
2696 gid_info->gl_pending_cmds++; /* for diag code */
2697 mutex_exit(&gid_info->gl_mutex);
2698
2699 if (ibdm_get_diagcode(gid_info, 0) != IBDM_SUCCESS) {
2700 mutex_enter(&gid_info->gl_mutex);
2701 gid_info->gl_pending_cmds--;
2702 mutex_exit(&gid_info->gl_mutex);
2703 }
2704 /*
2705 * Parallelize getting IOC controller profiles from here.
2706 * Allocate IBMF packets and send commands to get IOC profile for
2707 * each IOC present on the IOU.
2708 */
2709 for (ii = 0; ii < num_iocs; ii++) {
2710 /*
2711 * Check whether IOC is present in the slot
2712 * Series of nibbles (in the field iou_ctrl_list) represents
2713 * a slot in the IOU.
2714 * Byte format: 76543210
2715 * Bits 0-3 of first byte represent Slot 2
2716 * bits 4-7 of first byte represent slot 1,
2717 * bits 0-3 of second byte represent slot 4 and so on
2718 * Each 4-bit nibble has the following meaning
2719 * 0x0 : IOC not installed
2720 * 0x1 : IOC is present
2721 * 0xf : Slot does not exist
2722 * and all other values are reserved.
2723 */
2724 ioc_info = IBDM_GIDINFO2IOCINFO(gid_info, ii);
2725 slot_info = giou_info->iou_ctrl_list[(ii/2)];
2726 if ((ii % 2) == 0)
2727 slot_info = (slot_info >> 4);
2728
2729 if ((slot_info & 0xf) != 1) {
2730 IBTF_DPRINTF_L4("ibdm", "\thandle_iouintinfo: "
2731 "No IOC is present in the slot = %d", ii);
2732 ioc_info->ioc_state = IBDM_IOC_STATE_PROBE_FAILED;
2733 continue;
2734 }
2735
2736 mutex_enter(&gid_info->gl_mutex);
2737 ibdm_bump_transactionID(gid_info);
2738 mutex_exit(&gid_info->gl_mutex);
2739
2740 /*
2741 * Re use the already allocated packet (for IOUnitinfo) to
2742 * send the first IOC controller attribute. Allocate new
2743 * IBMF packets for the rest of the IOC's
2744 */
2745 if (first != B_TRUE) {
2746 msg = NULL;
2747 if (ibmf_alloc_msg(ibmf_hdl, IBMF_ALLOC_SLEEP,
2748 &msg) != IBMF_SUCCESS) {
2749 IBTF_DPRINTF_L4("ibdm", "\thandle_iouintinfo: "
2750 "IBMF packet allocation failed");
2751 continue;
2752 }
2753
2754 }
2755
2756 /* allocate send buffers for all messages */
2757 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*msg))
2758 ibdm_alloc_send_buffers(msg);
2759 _NOTE(NOW_VISIBLE_TO_OTHER_THREADS(*msg))
2760
2761 msg->im_local_addr.ia_local_lid = gid_info->gl_slid;
2762 msg->im_local_addr.ia_remote_lid = gid_info->gl_dlid;
2763 if (gid_info->gl_redirected == B_TRUE) {
2764 if (gid_info->gl_redirect_dlid != 0) {
2765 msg->im_local_addr.ia_remote_lid =
2766 gid_info->gl_redirect_dlid;
2767 }
2768 msg->im_local_addr.ia_remote_qno =
2769 gid_info->gl_redirect_QP;
2770 msg->im_local_addr.ia_p_key =
2771 gid_info->gl_redirect_pkey;
2772 msg->im_local_addr.ia_q_key =
2773 gid_info->gl_redirect_qkey;
2774 msg->im_local_addr.ia_service_level =
2775 gid_info->gl_redirectSL;
2776 } else {
2777 msg->im_local_addr.ia_remote_qno = 1;
2778 msg->im_local_addr.ia_p_key = gid_info->gl_p_key;
2779 msg->im_local_addr.ia_q_key = IB_GSI_QKEY;
2780 msg->im_local_addr.ia_service_level = gid_info->gl_SL;
2781 }
2782
2783 hdr = IBDM_OUT_IBMFMSG_MADHDR(msg);
2784 hdr->BaseVersion = MAD_CLASS_BASE_VERS_1;
2785 hdr->MgmtClass = MAD_MGMT_CLASS_DEV_MGT;
2786 hdr->ClassVersion = IB_DM_CLASS_VERSION_1;
2787 hdr->R_Method = IB_DM_DEVMGT_METHOD_GET;
2788 hdr->Status = 0;
2789 hdr->TransactionID = h2b64(gid_info->gl_transactionID);
2790 hdr->AttributeID = h2b16(IB_DM_ATTR_IOC_CTRL_PROFILE);
2791 hdr->AttributeModifier = h2b32(ii + 1);
2792
2793 ioc_info->ioc_state = IBDM_IOC_STATE_PROBE_INVALID;
2794 cb_args = &ioc_info->ioc_cb_args;
2795 cb_args->cb_gid_info = gid_info;
2796 cb_args->cb_retry_count = ibdm_dft_retry_cnt;
2797 cb_args->cb_req_type = IBDM_REQ_TYPE_IOCINFO;
2798 cb_args->cb_ioc_num = ii;
2799
2800 mutex_enter(&gid_info->gl_mutex);
2801 gid_info->gl_pending_cmds++; /* for diag code */
2802
2803 ioc_info->ioc_timeout_id = timeout(ibdm_pkt_timeout_hdlr,
2804 cb_args, IBDM_TIMEOUT_VALUE(ibdm_dft_timeout));
2805 mutex_exit(&gid_info->gl_mutex);
2806
2807 IBTF_DPRINTF_L5("ibdm", "\thandle_iounitinfo:"
2808 "timeout 0x%x, ioc_num %d", ioc_info->ioc_timeout_id, ii);
2809
2810 if (ibmf_msg_transport(ibmf_hdl, gid_info->gl_qp_hdl, msg,
2811 NULL, ibdm_ibmf_send_cb, cb_args, 0) != IBMF_SUCCESS) {
2812 IBTF_DPRINTF_L2("ibdm",
2813 "\thandle_iounitinfo: msg transport failed");
2814 ibdm_ibmf_send_cb(ibmf_hdl, msg, cb_args);
2815 }
2816 (*flag) |= IBDM_IBMF_PKT_REUSED;
2817 first = B_FALSE;
2818 gid_info->gl_iou->iou_niocs_probe_in_progress++;
2819 }
2820 }
2821
2822
2823 /*
2824 * ibdm_handle_ioc_profile()
2825 * Invoked by the IBMF when the IOCControllerProfile request
2826 * gets completed
2827 */
2828 static void
2829 ibdm_handle_ioc_profile(ibmf_handle_t ibmf_hdl,
2830 ibmf_msg_t *msg, ibdm_dp_gidinfo_t *gid_info, int *flag)
2831 {
2832 int first = B_TRUE, reprobe = 0;
2833 uint_t ii, ioc_no, srv_start;
2834 uint_t nserv_entries;
2835 timeout_id_t timeout_id;
2836 ib_mad_hdr_t *hdr;
2837 ibdm_ioc_info_t *ioc_info;
2838 ibdm_timeout_cb_args_t *cb_args;
2839 ib_dm_ioc_ctrl_profile_t *ioc, *gioc;
2840
2841 IBTF_DPRINTF_L4("ibdm", "\thandle_ioc_profile:"
2842 " ibmf hdl %p msg %p gid info %p", ibmf_hdl, msg, gid_info);
2843
2844 ioc = IBDM_IN_IBMFMSG2IOC(msg);
2845 /*
2846 * Check whether we know this IOC already
2847 * This will return NULL if reprobe is in progress
2848 * IBDM_IOC_STATE_REPROBE_PROGRESS will be set.
2849 * Do not hold mutexes here.
2850 */
2851 if (ibdm_is_ioc_present(ioc->ioc_guid, gid_info, flag) != NULL) {
2852 IBTF_DPRINTF_L4("ibdm", "\thandle_ioc_profile:"
2853 "IOC guid %llx is present", ioc->ioc_guid);
2854 return;
2855 }
2856 ioc_no = IBDM_IN_IBMFMSG_ATTRMOD(msg);
2857 IBTF_DPRINTF_L4("ibdm", "\thandle_ioc_profile: ioc_no = %d", ioc_no-1);
2858
2859 /* Make sure that IOC index is with the valid range */
2860 if (IBDM_IS_IOC_NUM_INVALID(ioc_no, gid_info)) {
2861 IBTF_DPRINTF_L2("ibdm", "\thandle_ioc_profile: "
2862 "IOC index Out of range, index %d", ioc);
2863 (*flag) |= IBDM_IBMF_PKT_UNEXP_RESP;
2864 return;
2865 }
2866 ioc_info = &gid_info->gl_iou->iou_ioc_info[ioc_no - 1];
2867 ioc_info->ioc_iou_info = gid_info->gl_iou;
2868
2869 mutex_enter(&gid_info->gl_mutex);
2870 if (ioc_info->ioc_state == IBDM_IOC_STATE_REPROBE_PROGRESS) {
2871 reprobe = 1;
2872 ioc_info->ioc_prev_serv = ioc_info->ioc_serv;
2873 ioc_info->ioc_serv = NULL;
2874 ioc_info->ioc_prev_serv_cnt =
2875 ioc_info->ioc_profile.ioc_service_entries;
2876 } else if (ioc_info->ioc_state != IBDM_IOC_STATE_PROBE_INVALID) {
2877 IBTF_DPRINTF_L2("ibdm", "\thandle_ioc_profile: DUP response"
2878 "ioc %d, ioc_state %x", ioc_no - 1, ioc_info->ioc_state);
2879 mutex_exit(&gid_info->gl_mutex);
2880 (*flag) |= IBDM_IBMF_PKT_DUP_RESP;
2881 return;
2882 }
2883 ioc_info->ioc_cb_args.cb_req_type = 0;
2884 if (ioc_info->ioc_timeout_id) {
2885 timeout_id = ioc_info->ioc_timeout_id;
2886 ioc_info->ioc_timeout_id = 0;
2887 mutex_exit(&gid_info->gl_mutex);
2888 IBTF_DPRINTF_L5("ibdm", "handle_ioc_profile: "
2889 "ioc_timeout_id = 0x%x", timeout_id);
2890 if (untimeout(timeout_id) == -1) {
2891 IBTF_DPRINTF_L2("ibdm", "handle_ioc_profile: "
2892 "untimeout ioc_timeout_id failed");
2893 }
2894 mutex_enter(&gid_info->gl_mutex);
2895 }
2896
2897 ioc_info->ioc_state = IBDM_IOC_STATE_PROBE_SUCCESS;
2898 if (reprobe == 0) {
2899 ioc_info->ioc_iou_guid = gid_info->gl_nodeguid;
2900 ioc_info->ioc_nodeguid = gid_info->gl_nodeguid;
2901 }
2902
2903 /*
2904 * Save all the IOC information in the global structures.
2905 * Note the wire format is Big Endian and the Sparc process also
2906 * big endian. So, there is no need to convert the data fields
2907 * The conversion routines used below are ineffective on Sparc
2908 * machines where as they will be effective on little endian
2909 * machines such as Intel processors.
2910 */
2911 gioc = (ib_dm_ioc_ctrl_profile_t *)&ioc_info->ioc_profile;
2912
2913 /*
2914 * Restrict updates to onlyport GIDs and service entries during reprobe
2915 */
2916 if (reprobe == 0) {
2917 gioc->ioc_guid = b2h64(ioc->ioc_guid);
2918 gioc->ioc_vendorid =
2919 ((b2h32(ioc->ioc_vendorid) & IB_DM_VENDORID_MASK)
2920 >> IB_DM_VENDORID_SHIFT);
2921 gioc->ioc_deviceid = b2h32(ioc->ioc_deviceid);
2922 gioc->ioc_device_ver = b2h16(ioc->ioc_device_ver);
2923 gioc->ioc_subsys_vendorid =
2924 ((b2h32(ioc->ioc_subsys_vendorid) & IB_DM_VENDORID_MASK)
2925 >> IB_DM_VENDORID_SHIFT);
2926 gioc->ioc_subsys_id = b2h32(ioc->ioc_subsys_id);
2927 gioc->ioc_io_class = b2h16(ioc->ioc_io_class);
2928 gioc->ioc_io_subclass = b2h16(ioc->ioc_io_subclass);
2929 gioc->ioc_protocol = b2h16(ioc->ioc_protocol);
2930 gioc->ioc_protocol_ver = b2h16(ioc->ioc_protocol_ver);
2931 gioc->ioc_send_msg_qdepth =
2932 b2h16(ioc->ioc_send_msg_qdepth);
2933 gioc->ioc_rdma_read_qdepth =
2934 b2h16(ioc->ioc_rdma_read_qdepth);
2935 gioc->ioc_send_msg_sz = b2h32(ioc->ioc_send_msg_sz);
2936 gioc->ioc_rdma_xfer_sz = b2h32(ioc->ioc_rdma_xfer_sz);
2937 gioc->ioc_ctrl_opcap_mask = ioc->ioc_ctrl_opcap_mask;
2938 bcopy(ioc->ioc_id_string, gioc->ioc_id_string,
2939 IB_DM_IOC_ID_STRING_LEN);
2940
2941 ioc_info->ioc_iou_diagcode = gid_info->gl_iou->iou_diagcode;
2942 ioc_info->ioc_iou_dc_valid = gid_info->gl_iou->iou_dc_valid;
2943 ioc_info->ioc_diagdeviceid = (IB_DM_IOU_DEVICEID_MASK &
2944 gid_info->gl_iou->iou_info.iou_flag) ? B_TRUE : B_FALSE;
2945
2946 if (ioc_info->ioc_diagdeviceid == B_TRUE) {
2947 gid_info->gl_pending_cmds++;
2948 IBTF_DPRINTF_L3(ibdm_string,
2949 "\tibdm_handle_ioc_profile: "
2950 "%d: gid_info %p gl_state %d pending_cmds %d",
2951 __LINE__, gid_info, gid_info->gl_state,
2952 gid_info->gl_pending_cmds);
2953 }
2954 }
2955 gioc->ioc_service_entries = ioc->ioc_service_entries;
2956 mutex_exit(&gid_info->gl_mutex);
2957
2958 ibdm_dump_ioc_profile(gioc);
2959
2960 if ((ioc_info->ioc_diagdeviceid == B_TRUE) && (reprobe == 0)) {
2961 if (ibdm_get_diagcode(gid_info, ioc_no) != IBDM_SUCCESS) {
2962 mutex_enter(&gid_info->gl_mutex);
2963 gid_info->gl_pending_cmds--;
2964 mutex_exit(&gid_info->gl_mutex);
2965 }
2966 }
2967 ioc_info->ioc_serv = (ibdm_srvents_info_t *)kmem_zalloc(
2968 (gioc->ioc_service_entries * sizeof (ibdm_srvents_info_t)),
2969 KM_SLEEP);
2970
2971 /*
2972 * In one single request, maximum number of requests that can be
2973 * obtained is 4. If number of service entries are more than four,
2974 * calculate number requests needed and send them parallelly.
2975 */
2976 nserv_entries = ioc->ioc_service_entries;
2977 ii = 0;
2978 while (nserv_entries) {
2979 mutex_enter(&gid_info->gl_mutex);
2980 gid_info->gl_pending_cmds++;
2981 ibdm_bump_transactionID(gid_info);
2982 mutex_exit(&gid_info->gl_mutex);
2983
2984 if (first != B_TRUE) {
2985 if (ibmf_alloc_msg(ibmf_hdl, IBMF_ALLOC_SLEEP,
2986 &msg) != IBMF_SUCCESS) {
2987 continue;
2988 }
2989
2990 }
2991 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*msg))
2992 ibdm_alloc_send_buffers(msg);
2993 _NOTE(NOW_VISIBLE_TO_OTHER_THREADS(*msg))
2994 msg->im_local_addr.ia_local_lid = gid_info->gl_slid;
2995 msg->im_local_addr.ia_remote_lid = gid_info->gl_dlid;
2996 if (gid_info->gl_redirected == B_TRUE) {
2997 if (gid_info->gl_redirect_dlid != 0) {
2998 msg->im_local_addr.ia_remote_lid =
2999 gid_info->gl_redirect_dlid;
3000 }
3001 msg->im_local_addr.ia_remote_qno =
3002 gid_info->gl_redirect_QP;
3003 msg->im_local_addr.ia_p_key =
3004 gid_info->gl_redirect_pkey;
3005 msg->im_local_addr.ia_q_key =
3006 gid_info->gl_redirect_qkey;
3007 msg->im_local_addr.ia_service_level =
3008 gid_info->gl_redirectSL;
3009 } else {
3010 msg->im_local_addr.ia_remote_qno = 1;
3011 msg->im_local_addr.ia_p_key = gid_info->gl_p_key;
3012 msg->im_local_addr.ia_q_key = IB_GSI_QKEY;
3013 msg->im_local_addr.ia_service_level = gid_info->gl_SL;
3014 }
3015
3016 hdr = IBDM_OUT_IBMFMSG_MADHDR(msg);
3017 hdr->BaseVersion = MAD_CLASS_BASE_VERS_1;
3018 hdr->MgmtClass = MAD_MGMT_CLASS_DEV_MGT;
3019 hdr->ClassVersion = IB_DM_CLASS_VERSION_1;
3020 hdr->R_Method = IB_DM_DEVMGT_METHOD_GET;
3021 hdr->Status = 0;
3022 hdr->TransactionID = h2b64(gid_info->gl_transactionID);
3023 hdr->AttributeID = h2b16(IB_DM_ATTR_SERVICE_ENTRIES);
3024
3025 srv_start = ii * 4;
3026 cb_args = &ioc_info->ioc_serv[srv_start].se_cb_args;
3027 cb_args->cb_gid_info = gid_info;
3028 cb_args->cb_retry_count = ibdm_dft_retry_cnt;
3029 cb_args->cb_req_type = IBDM_REQ_TYPE_SRVENTS;
3030 cb_args->cb_srvents_start = srv_start;
3031 cb_args->cb_ioc_num = ioc_no - 1;
3032
3033 if (nserv_entries >= IBDM_MAX_SERV_ENTRIES_PER_REQ) {
3034 nserv_entries -= IBDM_MAX_SERV_ENTRIES_PER_REQ;
3035 cb_args->cb_srvents_end = (cb_args->cb_srvents_start +
3036 IBDM_MAX_SERV_ENTRIES_PER_REQ - 1);
3037 } else {
3038 cb_args->cb_srvents_end =
3039 (cb_args->cb_srvents_start + nserv_entries - 1);
3040 nserv_entries = 0;
3041 }
3042 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*hdr))
3043 ibdm_fill_srv_attr_mod(hdr, cb_args);
3044 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*hdr))
3045
3046 mutex_enter(&gid_info->gl_mutex);
3047 ioc_info->ioc_serv[srv_start].se_timeout_id = timeout(
3048 ibdm_pkt_timeout_hdlr, cb_args,
3049 IBDM_TIMEOUT_VALUE(ibdm_dft_timeout));
3050 mutex_exit(&gid_info->gl_mutex);
3051
3052 IBTF_DPRINTF_L5("ibdm", "\thandle_ioc_profile:"
3053 "timeout %x, ioc %d srv %d",
3054 ioc_info->ioc_serv[srv_start].se_timeout_id,
3055 ioc_no - 1, srv_start);
3056
3057 if (ibmf_msg_transport(ibmf_hdl, gid_info->gl_qp_hdl, msg,
3058 NULL, ibdm_ibmf_send_cb, cb_args, 0) != IBMF_SUCCESS) {
3059 IBTF_DPRINTF_L2("ibdm",
3060 "\thandle_ioc_profile: msg send failed");
3061 ibdm_ibmf_send_cb(ibmf_hdl, msg, cb_args);
3062 }
3063 (*flag) |= IBDM_IBMF_PKT_REUSED;
3064 first = B_FALSE;
3065 ii++;
3066 }
3067 }
3068
3069
3070 /*
3071 * ibdm_handle_srventry_mad()
3072 */
3073 static void
3074 ibdm_handle_srventry_mad(ibmf_msg_t *msg,
3075 ibdm_dp_gidinfo_t *gid_info, int *flag)
3076 {
3077 uint_t ii, ioc_no, attrmod;
3078 uint_t nentries, start, end;
3079 timeout_id_t timeout_id;
3080 ib_dm_srv_t *srv_ents;
3081 ibdm_ioc_info_t *ioc_info;
3082 ibdm_srvents_info_t *gsrv_ents;
3083
3084 IBTF_DPRINTF_L4("ibdm", "\thandle_srventry_mad:"
3085 " IBMF msg %p gid info %p", msg, gid_info);
3086
3087 srv_ents = IBDM_IN_IBMFMSG2SRVENT(msg);
3088 /*
3089 * Get the start and end index of the service entries
3090 * Upper 16 bits identify the IOC
3091 * Lower 16 bits specify the range of service entries
3092 * LSB specifies (Big endian) end of the range
3093 * MSB specifies (Big endian) start of the range
3094 */
3095 attrmod = IBDM_IN_IBMFMSG_ATTRMOD(msg);
3096 ioc_no = ((attrmod >> 16) & IBDM_16_BIT_MASK);
3097 end = ((attrmod >> 8) & IBDM_8_BIT_MASK);
3098 start = (attrmod & IBDM_8_BIT_MASK);
3099
3100 /* Make sure that IOC index is with the valid range */
3101 if ((ioc_no < 1) |
3102 (ioc_no > gid_info->gl_iou->iou_info.iou_num_ctrl_slots)) {
3103 IBTF_DPRINTF_L2("ibdm", "\thandle_srventry_mad: "
3104 "IOC index Out of range, index %d", ioc_no);
3105 (*flag) |= IBDM_IBMF_PKT_UNEXP_RESP;
3106 return;
3107 }
3108 ioc_info = IBDM_GIDINFO2IOCINFO(gid_info, (ioc_no -1));
3109
3110 /*
3111 * Make sure that the "start" and "end" service indexes are
3112 * with in the valid range
3113 */
3114 nentries = ioc_info->ioc_profile.ioc_service_entries;
3115 if ((start > end) | (start >= nentries) | (end >= nentries)) {
3116 IBTF_DPRINTF_L2("ibdm", "\thandle_srventry_mad: "
3117 "Attr modifier 0x%x, #Serv entries %d", attrmod, nentries);
3118 (*flag) |= IBDM_IBMF_PKT_UNEXP_RESP;
3119 return;
3120 }
3121 gsrv_ents = &ioc_info->ioc_serv[start];
3122 mutex_enter(&gid_info->gl_mutex);
3123 if (gsrv_ents->se_state != IBDM_SE_INVALID) {
3124 IBTF_DPRINTF_L2("ibdm", "\thandle_srventry_mad: "
3125 "already known, ioc %d, srv %d, se_state %x",
3126 ioc_no - 1, start, gsrv_ents->se_state);
3127 mutex_exit(&gid_info->gl_mutex);
3128 (*flag) |= IBDM_IBMF_PKT_DUP_RESP;
3129 return;
3130 }
3131 ioc_info->ioc_serv[start].se_cb_args.cb_req_type = 0;
3132 if (ioc_info->ioc_serv[start].se_timeout_id) {
3133 IBTF_DPRINTF_L2("ibdm",
3134 "\thandle_srventry_mad: ioc %d start %d", ioc_no, start);
3135 timeout_id = ioc_info->ioc_serv[start].se_timeout_id;
3136 ioc_info->ioc_serv[start].se_timeout_id = 0;
3137 mutex_exit(&gid_info->gl_mutex);
3138 IBTF_DPRINTF_L5("ibdm", "handle_srverntry_mad: "
3139 "se_timeout_id = 0x%x", timeout_id);
3140 if (untimeout(timeout_id) == -1) {
3141 IBTF_DPRINTF_L2("ibdm", "handle_srventry_mad: "
3142 "untimeout se_timeout_id failed");
3143 }
3144 mutex_enter(&gid_info->gl_mutex);
3145 }
3146
3147 gsrv_ents->se_state = IBDM_SE_VALID;
3148 mutex_exit(&gid_info->gl_mutex);
3149 for (ii = start; ii <= end; ii++, srv_ents++, gsrv_ents++) {
3150 gsrv_ents->se_attr.srv_id = b2h64(srv_ents->srv_id);
3151 bcopy(srv_ents->srv_name,
3152 gsrv_ents->se_attr.srv_name, IB_DM_MAX_SVC_NAME_LEN);
3153 ibdm_dump_service_entries(&gsrv_ents->se_attr);
3154 }
3155 }
3156
3157
3158 /*
3159 * ibdm_get_diagcode:
3160 * Send request to get IOU/IOC diag code
3161 * Returns IBDM_SUCCESS/IBDM_FAILURE
3162 */
3163 static int
3164 ibdm_get_diagcode(ibdm_dp_gidinfo_t *gid_info, int attr)
3165 {
3166 ibmf_msg_t *msg;
3167 ib_mad_hdr_t *hdr;
3168 ibdm_ioc_info_t *ioc;
3169 ibdm_timeout_cb_args_t *cb_args;
3170 timeout_id_t *timeout_id;
3171
3172 IBTF_DPRINTF_L4("ibdm", "\tget_diagcode: gid info %p, attr = %d",
3173 gid_info, attr);
3174
3175 if (ibmf_alloc_msg(gid_info->gl_ibmf_hdl, IBMF_ALLOC_SLEEP,
3176 &msg) != IBMF_SUCCESS) {
3177 IBTF_DPRINTF_L4("ibdm", "\tget_diagcode: pkt alloc fail");
3178 return (IBDM_FAILURE);
3179 }
3180
3181 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*msg))
3182 ibdm_alloc_send_buffers(msg);
3183 _NOTE(NOW_VISIBLE_TO_OTHER_THREADS(*msg))
3184
3185 mutex_enter(&gid_info->gl_mutex);
3186 ibdm_bump_transactionID(gid_info);
3187 mutex_exit(&gid_info->gl_mutex);
3188
3189 msg->im_local_addr.ia_local_lid = gid_info->gl_slid;
3190 msg->im_local_addr.ia_remote_lid = gid_info->gl_dlid;
3191 if (gid_info->gl_redirected == B_TRUE) {
3192 if (gid_info->gl_redirect_dlid != 0) {
3193 msg->im_local_addr.ia_remote_lid =
3194 gid_info->gl_redirect_dlid;
3195 }
3196
3197 msg->im_local_addr.ia_remote_qno = gid_info->gl_redirect_QP;
3198 msg->im_local_addr.ia_p_key = gid_info->gl_redirect_pkey;
3199 msg->im_local_addr.ia_q_key = gid_info->gl_redirect_qkey;
3200 msg->im_local_addr.ia_service_level = gid_info->gl_redirectSL;
3201 } else {
3202 msg->im_local_addr.ia_remote_qno = 1;
3203 msg->im_local_addr.ia_p_key = gid_info->gl_p_key;
3204 msg->im_local_addr.ia_q_key = IB_GSI_QKEY;
3205 msg->im_local_addr.ia_service_level = gid_info->gl_SL;
3206 }
3207
3208 hdr = IBDM_OUT_IBMFMSG_MADHDR(msg);
3209 hdr->BaseVersion = MAD_CLASS_BASE_VERS_1;
3210 hdr->MgmtClass = MAD_MGMT_CLASS_DEV_MGT;
3211 hdr->ClassVersion = IB_DM_CLASS_VERSION_1;
3212 hdr->R_Method = IB_DM_DEVMGT_METHOD_GET;
3213 hdr->Status = 0;
3214 hdr->TransactionID = h2b64(gid_info->gl_transactionID);
3215
3216 hdr->AttributeID = h2b16(IB_DM_ATTR_DIAG_CODE);
3217 hdr->AttributeModifier = h2b32(attr);
3218
3219 if (attr == 0) {
3220 cb_args = &gid_info->gl_iou_cb_args;
3221 gid_info->gl_iou->iou_dc_valid = B_FALSE;
3222 cb_args->cb_ioc_num = 0;
3223 cb_args->cb_req_type = IBDM_REQ_TYPE_IOU_DIAGCODE;
3224 timeout_id = &gid_info->gl_timeout_id;
3225 } else {
3226 ioc = IBDM_GIDINFO2IOCINFO(gid_info, (attr - 1));
3227 ioc->ioc_dc_valid = B_FALSE;
3228 cb_args = &ioc->ioc_dc_cb_args;
3229 cb_args->cb_ioc_num = attr - 1;
3230 cb_args->cb_req_type = IBDM_REQ_TYPE_IOC_DIAGCODE;
3231 timeout_id = &ioc->ioc_dc_timeout_id;
3232 }
3233 cb_args->cb_gid_info = gid_info;
3234 cb_args->cb_retry_count = ibdm_dft_retry_cnt;
3235 cb_args->cb_srvents_start = 0;
3236
3237 mutex_enter(&gid_info->gl_mutex);
3238 *timeout_id = timeout(ibdm_pkt_timeout_hdlr,
3239 cb_args, IBDM_TIMEOUT_VALUE(ibdm_dft_timeout));
3240 mutex_exit(&gid_info->gl_mutex);
3241
3242 IBTF_DPRINTF_L5("ibdm", "\tget_diagcode:"
3243 "timeout %x, ioc %d", *timeout_id, cb_args->cb_ioc_num);
3244
3245 if (ibmf_msg_transport(gid_info->gl_ibmf_hdl, gid_info->gl_qp_hdl,
3246 msg, NULL, ibdm_ibmf_send_cb, cb_args, 0) != IBMF_SUCCESS) {
3247 IBTF_DPRINTF_L2("ibdm", "\tget_diagcode: ibmf send failed");
3248 ibdm_ibmf_send_cb(gid_info->gl_ibmf_hdl, msg, cb_args);
3249 }
3250 return (IBDM_SUCCESS);
3251 }
3252
3253 /*
3254 * ibdm_handle_diagcode:
3255 * Process the DiagCode MAD response and update local DM
3256 * data structure.
3257 */
3258 static void
3259 ibdm_handle_diagcode(ibmf_msg_t *ibmf_msg,
3260 ibdm_dp_gidinfo_t *gid_info, int *flag)
3261 {
3262 uint16_t attrmod, *diagcode;
3263 ibdm_iou_info_t *iou;
3264 ibdm_ioc_info_t *ioc;
3265 timeout_id_t timeout_id;
3266 ibdm_timeout_cb_args_t *cb_args;
3267
3268 diagcode = (uint16_t *)ibmf_msg->im_msgbufs_recv.im_bufs_cl_data;
3269
3270 mutex_enter(&gid_info->gl_mutex);
3271 attrmod = IBDM_IN_IBMFMSG_ATTRMOD(ibmf_msg);
3272 iou = gid_info->gl_iou;
3273 if (attrmod == 0) {
3274 if (iou->iou_dc_valid != B_FALSE) {
3275 (*flag) |= IBDM_IBMF_PKT_DUP_RESP;
3276 IBTF_DPRINTF_L4("ibdm",
3277 "\thandle_diagcode: Duplicate IOU DiagCode");
3278 mutex_exit(&gid_info->gl_mutex);
3279 return;
3280 }
3281 cb_args = &gid_info->gl_iou_cb_args;
3282 cb_args->cb_req_type = 0;
3283 iou->iou_diagcode = b2h16(*diagcode);
3284 iou->iou_dc_valid = B_TRUE;
3285 if (gid_info->gl_timeout_id) {
3286 timeout_id = gid_info->gl_timeout_id;
3287 mutex_exit(&gid_info->gl_mutex);
3288 IBTF_DPRINTF_L5("ibdm", "\thandle_diagcode: "
3289 "gl_timeout_id = 0x%x", timeout_id);
3290 if (untimeout(timeout_id) == -1) {
3291 IBTF_DPRINTF_L2("ibdm", "handle_diagcode: "
3292 "untimeout gl_timeout_id failed");
3293 }
3294 mutex_enter(&gid_info->gl_mutex);
3295 gid_info->gl_timeout_id = 0;
3296 }
3297 } else {
3298 ioc = IBDM_GIDINFO2IOCINFO(gid_info, (attrmod - 1));
3299 if (ioc->ioc_dc_valid != B_FALSE) {
3300 (*flag) |= IBDM_IBMF_PKT_DUP_RESP;
3301 IBTF_DPRINTF_L4("ibdm",
3302 "\thandle_diagcode: Duplicate IOC DiagCode");
3303 mutex_exit(&gid_info->gl_mutex);
3304 return;
3305 }
3306 cb_args = &ioc->ioc_dc_cb_args;
3307 cb_args->cb_req_type = 0;
3308 ioc->ioc_diagcode = b2h16(*diagcode);
3309 ioc->ioc_dc_valid = B_TRUE;
3310 timeout_id = iou->iou_ioc_info[attrmod - 1].ioc_dc_timeout_id;
3311 if (timeout_id) {
3312 iou->iou_ioc_info[attrmod - 1].ioc_dc_timeout_id = 0;
3313 mutex_exit(&gid_info->gl_mutex);
3314 IBTF_DPRINTF_L5("ibdm", "handle_diagcode: "
3315 "timeout_id = 0x%x", timeout_id);
3316 if (untimeout(timeout_id) == -1) {
3317 IBTF_DPRINTF_L2("ibdm", "\thandle_diagcode: "
3318 "untimeout ioc_dc_timeout_id failed");
3319 }
3320 mutex_enter(&gid_info->gl_mutex);
3321 }
3322 }
3323 mutex_exit(&gid_info->gl_mutex);
3324
3325 IBTF_DPRINTF_L4("ibdm", "\thandle_diagcode: DiagCode : 0x%x"
3326 "attrmod : 0x%x", b2h16(*diagcode), attrmod);
3327 }
3328
3329
3330 /*
3331 * ibdm_is_ioc_present()
3332 * Return ibdm_ioc_info_t if IOC guid is found in the global gid list
3333 */
3334 static ibdm_ioc_info_t *
3335 ibdm_is_ioc_present(ib_guid_t ioc_guid,
3336 ibdm_dp_gidinfo_t *gid_info, int *flag)
3337 {
3338 int ii;
3339 ibdm_ioc_info_t *ioc;
3340 ibdm_dp_gidinfo_t *head;
3341 ib_dm_io_unitinfo_t *iou;
3342
3343 mutex_enter(&ibdm.ibdm_mutex);
3344 head = ibdm.ibdm_dp_gidlist_head;
3345 while (head) {
3346 mutex_enter(&head->gl_mutex);
3347 if (head->gl_iou == NULL) {
3348 mutex_exit(&head->gl_mutex);
3349 head = head->gl_next;
3350 continue;
3351 }
3352 iou = &head->gl_iou->iou_info;
3353 for (ii = 0; ii < iou->iou_num_ctrl_slots; ii++) {
3354 ioc = IBDM_GIDINFO2IOCINFO(head, ii);
3355 if ((ioc->ioc_state == IBDM_IOC_STATE_PROBE_SUCCESS) &&
3356 (ioc->ioc_profile.ioc_guid == ioc_guid)) {
3357 if (gid_info == head) {
3358 *flag |= IBDM_IBMF_PKT_DUP_RESP;
3359 } else if (ibdm_check_dgid(head->gl_dgid_lo,
3360 head->gl_dgid_hi) != NULL) {
3361 IBTF_DPRINTF_L4("ibdm", "\tis_ioc_"
3362 "present: gid not present");
3363 ibdm_add_to_gl_gid(gid_info, head);
3364 }
3365 mutex_exit(&head->gl_mutex);
3366 mutex_exit(&ibdm.ibdm_mutex);
3367 return (ioc);
3368 }
3369 }
3370 mutex_exit(&head->gl_mutex);
3371 head = head->gl_next;
3372 }
3373 mutex_exit(&ibdm.ibdm_mutex);
3374 return (NULL);
3375 }
3376
3377
3378 /*
3379 * ibdm_ibmf_send_cb()
3380 * IBMF invokes this callback routine after posting the DM MAD to
3381 * the HCA.
3382 */
3383 /*ARGSUSED*/
3384 static void
3385 ibdm_ibmf_send_cb(ibmf_handle_t ibmf_hdl, ibmf_msg_t *ibmf_msg, void *arg)
3386 {
3387 ibdm_dump_ibmf_msg(ibmf_msg, 1);
3388 ibdm_free_send_buffers(ibmf_msg);
3389 if (ibmf_free_msg(ibmf_hdl, &ibmf_msg) != IBMF_SUCCESS) {
3390 IBTF_DPRINTF_L4("ibdm",
3391 "\tibmf_send_cb: IBMF free msg failed");
3392 }
3393 }
3394
3395
3396 /*
3397 * ibdm_ibmf_recv_cb()
3398 * Invoked by the IBMF when a response to the one of the DM requests
3399 * is received.
3400 */
3401 /*ARGSUSED*/
3402 static void
3403 ibdm_ibmf_recv_cb(ibmf_handle_t ibmf_hdl, ibmf_msg_t *msg, void *arg)
3404 {
3405 ibdm_taskq_args_t *taskq_args;
3406
3407 /*
3408 * If the taskq enable is set then dispatch a taskq to process
3409 * the MAD, otherwise just process it on this thread
3410 */
3411 if (ibdm_taskq_enable != IBDM_ENABLE_TASKQ_HANDLING) {
3412 ibdm_process_incoming_mad(ibmf_hdl, msg, arg);
3413 return;
3414 }
3415
3416 /*
3417 * create a taskq and dispatch it to process the incoming MAD
3418 */
3419 taskq_args = kmem_alloc(sizeof (ibdm_taskq_args_t), KM_NOSLEEP);
3420 if (taskq_args == NULL) {
3421 IBTF_DPRINTF_L2("ibdm", "ibmf_recv_cb: kmem_alloc failed for"
3422 "taskq_args");
3423 if (ibmf_free_msg(ibmf_hdl, &msg) != IBMF_SUCCESS) {
3424 IBTF_DPRINTF_L4("ibmf_recv_cb",
3425 "\tibmf_recv_cb: IBMF free msg failed");
3426 }
3427 return;
3428 }
3429 taskq_args->tq_ibmf_handle = ibmf_hdl;
3430 taskq_args->tq_ibmf_msg = msg;
3431 taskq_args->tq_args = arg;
3432
3433 if (taskq_dispatch(system_taskq, ibdm_recv_incoming_mad, taskq_args,
3434 TQ_NOSLEEP) == 0) {
3435 IBTF_DPRINTF_L2("ibdm", "ibmf_recv_cb: taskq_dispatch failed");
3436 if (ibmf_free_msg(ibmf_hdl, &msg) != IBMF_SUCCESS) {
3437 IBTF_DPRINTF_L4("ibmf_recv_cb",
3438 "\tibmf_recv_cb: IBMF free msg failed");
3439 }
3440 kmem_free(taskq_args, sizeof (ibdm_taskq_args_t));
3441 return;
3442 }
3443
3444 /* taskq_args are deleted in ibdm_recv_incoming_mad() */
3445 }
3446
3447
3448 void
3449 ibdm_recv_incoming_mad(void *args)
3450 {
3451 ibdm_taskq_args_t *taskq_args;
3452
3453 taskq_args = (ibdm_taskq_args_t *)args;
3454
3455 IBTF_DPRINTF_L4("ibdm", "\tibdm_recv_incoming_mad: "
3456 "Processing incoming MAD via taskq");
3457
3458 ibdm_process_incoming_mad(taskq_args->tq_ibmf_handle,
3459 taskq_args->tq_ibmf_msg, taskq_args->tq_args);
3460
3461 kmem_free(taskq_args, sizeof (ibdm_taskq_args_t));
3462 }
3463
3464
3465 /*
3466 * Calls ibdm_process_incoming_mad with all function arguments extracted
3467 * from args
3468 */
3469 /*ARGSUSED*/
3470 static void
3471 ibdm_process_incoming_mad(ibmf_handle_t ibmf_hdl, ibmf_msg_t *msg, void *arg)
3472 {
3473 int flag = 0;
3474 int ret;
3475 uint64_t transaction_id;
3476 ib_mad_hdr_t *hdr;
3477 ibdm_dp_gidinfo_t *gid_info = NULL;
3478
3479 IBTF_DPRINTF_L4("ibdm",
3480 "\tprocess_incoming_mad: ibmf hdl %p pkt %p", ibmf_hdl, msg);
3481 ibdm_dump_ibmf_msg(msg, 0);
3482
3483 /*
3484 * IBMF calls this routine for every DM MAD that arrives at this port.
3485 * But we handle only the responses for requests we sent. We drop all
3486 * the DM packets that does not have response bit set in the MAD
3487 * header(this eliminates all the requests sent to this port).
3488 * We handle only DM class version 1 MAD's
3489 */
3490 hdr = IBDM_IN_IBMFMSG_MADHDR(msg);
3491 if (ibdm_verify_mad_status(hdr) != IBDM_SUCCESS) {
3492 if (ibmf_free_msg(ibmf_hdl, &msg) != IBMF_SUCCESS) {
3493 IBTF_DPRINTF_L2("ibdm", "\tprocess_incoming_mad: "
3494 "IBMF free msg failed DM request drop it");
3495 }
3496 return;
3497 }
3498
3499 transaction_id = b2h64(hdr->TransactionID);
3500
3501 mutex_enter(&ibdm.ibdm_mutex);
3502 gid_info = ibdm.ibdm_dp_gidlist_head;
3503 while (gid_info) {
3504 if ((gid_info->gl_transactionID &
3505 IBDM_GID_TRANSACTIONID_MASK) ==
3506 (transaction_id & IBDM_GID_TRANSACTIONID_MASK))
3507 break;
3508 gid_info = gid_info->gl_next;
3509 }
3510 mutex_exit(&ibdm.ibdm_mutex);
3511
3512 if (gid_info == NULL) {
3513 /* Drop the packet */
3514 IBTF_DPRINTF_L2("ibdm", "process_incoming_mad: transaction ID"
3515 " does not match: 0x%llx", transaction_id);
3516 if (ibmf_free_msg(ibmf_hdl, &msg) != IBMF_SUCCESS) {
3517 IBTF_DPRINTF_L2("ibdm", "process_incoming_mad: "
3518 "IBMF free msg failed DM request drop it");
3519 }
3520 return;
3521 }
3522
3523 /* Handle redirection for all the MAD's, except ClassPortInfo */
3524 if (((IBDM_IN_IBMFMSG_STATUS(msg) & MAD_STATUS_REDIRECT_REQUIRED)) &&
3525 (IBDM_IN_IBMFMSG_ATTR(msg) != IB_DM_ATTR_CLASSPORTINFO)) {
3526 ret = ibdm_handle_redirection(msg, gid_info, &flag);
3527 if (ret == IBDM_SUCCESS) {
3528 return;
3529 }
3530 } else {
3531 uint_t gl_state;
3532
3533 mutex_enter(&gid_info->gl_mutex);
3534 gl_state = gid_info->gl_state;
3535 mutex_exit(&gid_info->gl_mutex);
3536
3537 switch (gl_state) {
3538
3539 case IBDM_SET_CLASSPORTINFO:
3540 ibdm_handle_setclassportinfo(
3541 ibmf_hdl, msg, gid_info, &flag);
3542 break;
3543
3544 case IBDM_GET_CLASSPORTINFO:
3545 ibdm_handle_classportinfo(
3546 ibmf_hdl, msg, gid_info, &flag);
3547 break;
3548
3549 case IBDM_GET_IOUNITINFO:
3550 ibdm_handle_iounitinfo(ibmf_hdl, msg, gid_info, &flag);
3551 break;
3552
3553 case IBDM_GET_IOC_DETAILS:
3554 switch (IBDM_IN_IBMFMSG_ATTR(msg)) {
3555
3556 case IB_DM_ATTR_SERVICE_ENTRIES:
3557 ibdm_handle_srventry_mad(msg, gid_info, &flag);
3558 break;
3559
3560 case IB_DM_ATTR_IOC_CTRL_PROFILE:
3561 ibdm_handle_ioc_profile(
3562 ibmf_hdl, msg, gid_info, &flag);
3563 break;
3564
3565 case IB_DM_ATTR_DIAG_CODE:
3566 ibdm_handle_diagcode(msg, gid_info, &flag);
3567 break;
3568
3569 default:
3570 IBTF_DPRINTF_L2("ibdm", "process_incoming_mad: "
3571 "Error state, wrong attribute :-(");
3572 (void) ibmf_free_msg(ibmf_hdl, &msg);
3573 return;
3574 }
3575 break;
3576 default:
3577 IBTF_DPRINTF_L2("ibdm",
3578 "process_incoming_mad: Dropping the packet"
3579 " gl_state %x", gl_state);
3580 if (ibmf_free_msg(ibmf_hdl, &msg) != IBMF_SUCCESS) {
3581 IBTF_DPRINTF_L2("ibdm", "process_incoming_mad: "
3582 "IBMF free msg failed DM request drop it");
3583 }
3584 return;
3585 }
3586 }
3587
3588 if ((flag & IBDM_IBMF_PKT_DUP_RESP) ||
3589 (flag & IBDM_IBMF_PKT_UNEXP_RESP)) {
3590 IBTF_DPRINTF_L2("ibdm",
3591 "\tprocess_incoming_mad:Dup/unexp resp : 0x%x", flag);
3592 if (ibmf_free_msg(ibmf_hdl, &msg) != IBMF_SUCCESS) {
3593 IBTF_DPRINTF_L2("ibdm", "process_incoming_mad: "
3594 "IBMF free msg failed DM request drop it");
3595 }
3596 return;
3597 }
3598
3599 mutex_enter(&gid_info->gl_mutex);
3600 if (gid_info->gl_pending_cmds < 1) {
3601 IBTF_DPRINTF_L2("ibdm",
3602 "\tprocess_incoming_mad: pending commands negative");
3603 }
3604 if (--gid_info->gl_pending_cmds) {
3605 IBTF_DPRINTF_L4("ibdm", "\tprocess_incoming_mad: "
3606 "gid_info %p pending cmds %d",
3607 gid_info, gid_info->gl_pending_cmds);
3608 mutex_exit(&gid_info->gl_mutex);
3609 } else {
3610 uint_t prev_state;
3611 IBTF_DPRINTF_L4("ibdm", "\tprocess_incoming_mad: Probing DONE");
3612 prev_state = gid_info->gl_state;
3613 gid_info->gl_state = IBDM_GID_PROBING_COMPLETE;
3614 if (prev_state == IBDM_SET_CLASSPORTINFO) {
3615 IBTF_DPRINTF_L4("ibdm",
3616 "\tprocess_incoming_mad: "
3617 "Setclassportinfo for Cisco FC GW is done.");
3618 gid_info->gl_flag &= ~IBDM_CISCO_PROBE;
3619 gid_info->gl_flag |= IBDM_CISCO_PROBE_DONE;
3620 mutex_exit(&gid_info->gl_mutex);
3621 cv_broadcast(&gid_info->gl_probe_cv);
3622 } else {
3623 mutex_exit(&gid_info->gl_mutex);
3624 ibdm_notify_newgid_iocs(gid_info);
3625 mutex_enter(&ibdm.ibdm_mutex);
3626 if (--ibdm.ibdm_ngid_probes_in_progress == 0) {
3627 IBTF_DPRINTF_L4("ibdm",
3628 "\tprocess_incoming_mad: Wakeup");
3629 ibdm.ibdm_busy &= ~IBDM_PROBE_IN_PROGRESS;
3630 cv_broadcast(&ibdm.ibdm_probe_cv);
3631 }
3632 mutex_exit(&ibdm.ibdm_mutex);
3633 }
3634 }
3635
3636 /*
3637 * Do not deallocate the IBMF packet if atleast one request
3638 * is posted. IBMF packet is reused.
3639 */
3640 if (!(flag & IBDM_IBMF_PKT_REUSED)) {
3641 if (ibmf_free_msg(ibmf_hdl, &msg) != IBMF_SUCCESS) {
3642 IBTF_DPRINTF_L2("ibdm", "\tprocess_incoming_mad: "
3643 "IBMF free msg failed DM request drop it");
3644 }
3645 }
3646 }
3647
3648
3649 /*
3650 * ibdm_verify_mad_status()
3651 * Verifies the MAD status
3652 * Returns IBDM_SUCCESS if status is correct
3653 * Returns IBDM_FAILURE for bogus MAD status
3654 */
3655 static int
3656 ibdm_verify_mad_status(ib_mad_hdr_t *hdr)
3657 {
3658 int ret = 0;
3659
3660 if ((hdr->R_Method != IB_DM_DEVMGT_METHOD_GET_RESP) ||
3661 (hdr->ClassVersion != IB_DM_CLASS_VERSION_1)) {
3662 return (IBDM_FAILURE);
3663 }
3664
3665 if (b2h16(hdr->Status) == 0)
3666 ret = IBDM_SUCCESS;
3667 else if ((b2h16(hdr->Status) & 0x1f) == MAD_STATUS_REDIRECT_REQUIRED)
3668 ret = IBDM_SUCCESS;
3669 else {
3670 IBTF_DPRINTF_L2("ibdm",
3671 "\tverify_mad_status: Status : 0x%x", b2h16(hdr->Status));
3672 ret = IBDM_FAILURE;
3673 }
3674 return (ret);
3675 }
3676
3677
3678
3679 /*
3680 * ibdm_handle_redirection()
3681 * Returns IBDM_SUCCESS/IBDM_FAILURE
3682 */
3683 static int
3684 ibdm_handle_redirection(ibmf_msg_t *msg,
3685 ibdm_dp_gidinfo_t *gid_info, int *flag)
3686 {
3687 int attrmod, ioc_no, start;
3688 void *data;
3689 timeout_id_t *timeout_id;
3690 ib_mad_hdr_t *hdr;
3691 ibdm_ioc_info_t *ioc = NULL;
3692 ibdm_timeout_cb_args_t *cb_args;
3693 ib_mad_classportinfo_t *cpi;
3694
3695 IBTF_DPRINTF_L4("ibdm", "\thandle_redirection: Enter");
3696 mutex_enter(&gid_info->gl_mutex);
3697 switch (gid_info->gl_state) {
3698 case IBDM_GET_IOUNITINFO:
3699 cb_args = &gid_info->gl_iou_cb_args;
3700 timeout_id = &gid_info->gl_timeout_id;
3701 break;
3702
3703 case IBDM_GET_IOC_DETAILS:
3704 attrmod = IBDM_IN_IBMFMSG_ATTRMOD(msg);
3705 switch (IBDM_IN_IBMFMSG_ATTR(msg)) {
3706
3707 case IB_DM_ATTR_DIAG_CODE:
3708 if (attrmod == 0) {
3709 cb_args = &gid_info->gl_iou_cb_args;
3710 timeout_id = &gid_info->gl_timeout_id;
3711 break;
3712 }
3713 if (IBDM_IS_IOC_NUM_INVALID(attrmod, gid_info)) {
3714 IBTF_DPRINTF_L2("ibdm", "\thandle_redirction:"
3715 "IOC# Out of range %d", attrmod);
3716 (*flag) |= IBDM_IBMF_PKT_UNEXP_RESP;
3717 mutex_exit(&gid_info->gl_mutex);
3718 return (IBDM_FAILURE);
3719 }
3720 ioc = IBDM_GIDINFO2IOCINFO(gid_info, (attrmod -1));
3721 cb_args = &ioc->ioc_dc_cb_args;
3722 timeout_id = &ioc->ioc_dc_timeout_id;
3723 break;
3724
3725 case IB_DM_ATTR_IOC_CTRL_PROFILE:
3726 if (IBDM_IS_IOC_NUM_INVALID(attrmod, gid_info)) {
3727 IBTF_DPRINTF_L2("ibdm", "\thandle_redirction:"
3728 "IOC# Out of range %d", attrmod);
3729 (*flag) |= IBDM_IBMF_PKT_UNEXP_RESP;
3730 mutex_exit(&gid_info->gl_mutex);
3731 return (IBDM_FAILURE);
3732 }
3733 ioc = IBDM_GIDINFO2IOCINFO(gid_info, (attrmod -1));
3734 cb_args = &ioc->ioc_cb_args;
3735 timeout_id = &ioc->ioc_timeout_id;
3736 break;
3737
3738 case IB_DM_ATTR_SERVICE_ENTRIES:
3739 ioc_no = ((attrmod >> 16) & IBDM_16_BIT_MASK);
3740 if (IBDM_IS_IOC_NUM_INVALID(ioc_no, gid_info)) {
3741 IBTF_DPRINTF_L2("ibdm", "\thandle_redirction:"
3742 "IOC# Out of range %d", ioc_no);
3743 (*flag) |= IBDM_IBMF_PKT_UNEXP_RESP;
3744 mutex_exit(&gid_info->gl_mutex);
3745 return (IBDM_FAILURE);
3746 }
3747 start = (attrmod & IBDM_8_BIT_MASK);
3748 ioc = IBDM_GIDINFO2IOCINFO(gid_info, (ioc_no -1));
3749 if (start > ioc->ioc_profile.ioc_service_entries) {
3750 IBTF_DPRINTF_L2("ibdm", "\thandle_redirction:"
3751 " SE index Out of range %d", start);
3752 (*flag) |= IBDM_IBMF_PKT_UNEXP_RESP;
3753 mutex_exit(&gid_info->gl_mutex);
3754 return (IBDM_FAILURE);
3755 }
3756 cb_args = &ioc->ioc_serv[start].se_cb_args;
3757 timeout_id = &ioc->ioc_serv[start].se_timeout_id;
3758 break;
3759
3760 default:
3761 /* ERROR State */
3762 IBTF_DPRINTF_L2("ibdm",
3763 "\thandle_redirection: wrong attribute :-(");
3764 (*flag) |= IBDM_IBMF_PKT_UNEXP_RESP;
3765 mutex_exit(&gid_info->gl_mutex);
3766 return (IBDM_FAILURE);
3767 }
3768 break;
3769 default:
3770 /* ERROR State */
3771 IBTF_DPRINTF_L2("ibdm",
3772 "\thandle_redirection: Error state :-(");
3773 (*flag) |= IBDM_IBMF_PKT_UNEXP_RESP;
3774 mutex_exit(&gid_info->gl_mutex);
3775 return (IBDM_FAILURE);
3776 }
3777 if ((*timeout_id) != 0) {
3778 mutex_exit(&gid_info->gl_mutex);
3779 if (untimeout(*timeout_id) == -1) {
3780 IBTF_DPRINTF_L2("ibdm", "\thandle_redirection: "
3781 "untimeout failed %x", *timeout_id);
3782 } else {
3783 IBTF_DPRINTF_L5("ibdm",
3784 "\thandle_redirection: timeout %x", *timeout_id);
3785 }
3786 mutex_enter(&gid_info->gl_mutex);
3787 *timeout_id = 0;
3788 }
3789
3790 data = msg->im_msgbufs_recv.im_bufs_cl_data;
3791 cpi = (ib_mad_classportinfo_t *)data;
3792
3793 gid_info->gl_resp_timeout =
3794 (b2h32(cpi->RespTimeValue) & 0x1F);
3795
3796 gid_info->gl_redirected = B_TRUE;
3797 gid_info->gl_redirect_dlid = b2h16(cpi->RedirectLID);
3798 gid_info->gl_redirect_QP = (b2h32(cpi->RedirectQP) & 0xffffff);
3799 gid_info->gl_redirect_pkey = b2h16(cpi->RedirectP_Key);
3800 gid_info->gl_redirect_qkey = b2h32(cpi->RedirectQ_Key);
3801 gid_info->gl_redirectGID_hi = b2h64(cpi->RedirectGID_hi);
3802 gid_info->gl_redirectGID_lo = b2h64(cpi->RedirectGID_lo);
3803 gid_info->gl_redirectSL = cpi->RedirectSL;
3804
3805 if (gid_info->gl_redirect_dlid != 0) {
3806 msg->im_local_addr.ia_remote_lid =
3807 gid_info->gl_redirect_dlid;
3808 }
3809 ibdm_bump_transactionID(gid_info);
3810 mutex_exit(&gid_info->gl_mutex);
3811
3812 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*msg, *hdr))
3813 ibdm_alloc_send_buffers(msg);
3814
3815 hdr = IBDM_OUT_IBMFMSG_MADHDR(msg);
3816 hdr->BaseVersion = MAD_CLASS_BASE_VERS_1;
3817 hdr->MgmtClass = MAD_MGMT_CLASS_DEV_MGT;
3818 hdr->ClassVersion = IB_DM_CLASS_VERSION_1;
3819 hdr->R_Method = IB_DM_DEVMGT_METHOD_GET;
3820 hdr->Status = 0;
3821 hdr->TransactionID = h2b64(gid_info->gl_transactionID);
3822 hdr->AttributeID =
3823 msg->im_msgbufs_recv.im_bufs_mad_hdr->AttributeID;
3824 hdr->AttributeModifier =
3825 msg->im_msgbufs_recv.im_bufs_mad_hdr->AttributeModifier;
3826 _NOTE(NOW_VISIBLE_TO_OTHER_THREADS(*msg, *hdr))
3827
3828 msg->im_local_addr.ia_remote_qno = gid_info->gl_redirect_QP;
3829 msg->im_local_addr.ia_p_key = gid_info->gl_redirect_pkey;
3830 msg->im_local_addr.ia_q_key = gid_info->gl_redirect_qkey;
3831 msg->im_local_addr.ia_service_level = gid_info->gl_redirectSL;
3832
3833 mutex_enter(&gid_info->gl_mutex);
3834 *timeout_id = timeout(ibdm_pkt_timeout_hdlr,
3835 cb_args, IBDM_TIMEOUT_VALUE(ibdm_dft_timeout));
3836 mutex_exit(&gid_info->gl_mutex);
3837
3838 IBTF_DPRINTF_L5("ibdm", "\thandle_redirect:"
3839 "timeout %x", *timeout_id);
3840
3841 if (ibmf_msg_transport(gid_info->gl_ibmf_hdl, gid_info->gl_qp_hdl,
3842 msg, NULL, ibdm_ibmf_send_cb, cb_args, 0) != IBMF_SUCCESS) {
3843 IBTF_DPRINTF_L4("ibdm", "\thandle_redirection:"
3844 "message transport failed");
3845 ibdm_ibmf_send_cb(gid_info->gl_ibmf_hdl, msg, cb_args);
3846 }
3847 (*flag) |= IBDM_IBMF_PKT_REUSED;
3848 IBTF_DPRINTF_L4("ibdm", "\thandle_redirection: Exit");
3849 return (IBDM_SUCCESS);
3850 }
3851
3852
3853 /*
3854 * ibdm_pkt_timeout_hdlr
3855 * This timeout handler is registed for every IBMF packet that is
3856 * sent through the IBMF. It gets called when no response is received
3857 * within the specified time for the packet. No retries for the failed
3858 * commands currently. Drops the failed IBMF packet and update the
3859 * pending list commands.
3860 */
3861 static void
3862 ibdm_pkt_timeout_hdlr(void *arg)
3863 {
3864 ibdm_iou_info_t *iou;
3865 ibdm_ioc_info_t *ioc;
3866 ibdm_timeout_cb_args_t *cb_args = arg;
3867 ibdm_dp_gidinfo_t *gid_info;
3868 int srv_ent;
3869 uint_t new_gl_state;
3870
3871 IBTF_DPRINTF_L2("ibdm", "\tpkt_timeout_hdlr: gid_info: %p "
3872 "rtype 0x%x iocidx 0x%x srvidx %d", cb_args->cb_gid_info,
3873 cb_args->cb_req_type, cb_args->cb_ioc_num,
3874 cb_args->cb_srvents_start);
3875
3876 gid_info = cb_args->cb_gid_info;
3877 mutex_enter(&gid_info->gl_mutex);
3878
3879 if ((gid_info->gl_state == IBDM_GID_PROBING_COMPLETE) ||
3880 (cb_args->cb_req_type == 0)) {
3881
3882 IBTF_DPRINTF_L2("ibdm", "\tpkt_timeout_hdlr: req completed"
3883 "rtype 0x%x iocidx 0x%x srvidx %d", cb_args->cb_req_type,
3884 cb_args->cb_ioc_num, cb_args->cb_srvents_start);
3885
3886 if (gid_info->gl_timeout_id)
3887 gid_info->gl_timeout_id = 0;
3888 mutex_exit(&gid_info->gl_mutex);
3889 return;
3890 }
3891 if (cb_args->cb_retry_count) {
3892 cb_args->cb_retry_count--;
3893 /*
3894 * A new timeout_id is set inside ibdm_retry_command().
3895 * When the function returns an error, the timeout_id
3896 * is reset (to zero) in the switch statement below.
3897 */
3898 if (ibdm_retry_command(cb_args) == IBDM_SUCCESS) {
3899 mutex_exit(&gid_info->gl_mutex);
3900 return;
3901 }
3902 cb_args->cb_retry_count = 0;
3903 }
3904
3905 IBTF_DPRINTF_L2("ibdm", "\tpkt_timeout_hdlr: command failed: gid %p"
3906 " rtype 0x%x iocidx 0x%x srvidx %d", cb_args->cb_gid_info,
3907 cb_args->cb_req_type, cb_args->cb_ioc_num,
3908 cb_args->cb_srvents_start);
3909
3910 switch (cb_args->cb_req_type) {
3911
3912 case IBDM_REQ_TYPE_CLASSPORTINFO:
3913 case IBDM_REQ_TYPE_IOUINFO:
3914 new_gl_state = IBDM_GID_PROBING_FAILED;
3915 if (gid_info->gl_timeout_id)
3916 gid_info->gl_timeout_id = 0;
3917 break;
3918
3919 case IBDM_REQ_TYPE_IOCINFO:
3920 new_gl_state = IBDM_GID_PROBING_COMPLETE;
3921 iou = gid_info->gl_iou;
3922 ioc = &iou->iou_ioc_info[cb_args->cb_ioc_num];
3923 ioc->ioc_state = IBDM_IOC_STATE_PROBE_FAILED;
3924 if (ioc->ioc_timeout_id)
3925 ioc->ioc_timeout_id = 0;
3926 break;
3927
3928 case IBDM_REQ_TYPE_SRVENTS:
3929 new_gl_state = IBDM_GID_PROBING_COMPLETE;
3930 iou = gid_info->gl_iou;
3931 ioc = &iou->iou_ioc_info[cb_args->cb_ioc_num];
3932 ioc->ioc_state = IBDM_IOC_STATE_PROBE_FAILED;
3933 srv_ent = cb_args->cb_srvents_start;
3934 if (ioc->ioc_serv[srv_ent].se_timeout_id)
3935 ioc->ioc_serv[srv_ent].se_timeout_id = 0;
3936 break;
3937
3938 case IBDM_REQ_TYPE_IOU_DIAGCODE:
3939 new_gl_state = IBDM_GID_PROBING_COMPLETE;
3940 iou = gid_info->gl_iou;
3941 iou->iou_dc_valid = B_FALSE;
3942 if (gid_info->gl_timeout_id)
3943 gid_info->gl_timeout_id = 0;
3944 break;
3945
3946 case IBDM_REQ_TYPE_IOC_DIAGCODE:
3947 new_gl_state = IBDM_GID_PROBING_COMPLETE;
3948 iou = gid_info->gl_iou;
3949 ioc = &iou->iou_ioc_info[cb_args->cb_ioc_num];
3950 ioc->ioc_dc_valid = B_FALSE;
3951 if (ioc->ioc_dc_timeout_id)
3952 ioc->ioc_dc_timeout_id = 0;
3953 break;
3954
3955 default: /* ERROR State */
3956 new_gl_state = IBDM_GID_PROBING_FAILED;
3957 if (gid_info->gl_timeout_id)
3958 gid_info->gl_timeout_id = 0;
3959 IBTF_DPRINTF_L2("ibdm",
3960 "\tpkt_timeout_hdlr: wrong request type.");
3961 break;
3962 }
3963
3964 --gid_info->gl_pending_cmds; /* decrease the counter */
3965
3966 if (gid_info->gl_pending_cmds == 0) {
3967 gid_info->gl_state = new_gl_state;
3968 mutex_exit(&gid_info->gl_mutex);
3969 /*
3970 * Delete this gid_info if the gid probe fails.
3971 */
3972 if (new_gl_state == IBDM_GID_PROBING_FAILED) {
3973 ibdm_delete_glhca_list(gid_info);
3974 }
3975 ibdm_notify_newgid_iocs(gid_info);
3976 mutex_enter(&ibdm.ibdm_mutex);
3977 if (--ibdm.ibdm_ngid_probes_in_progress == 0) {
3978 IBTF_DPRINTF_L4("ibdm", "\tpkt_timeout_hdlr: Wakeup");
3979 ibdm.ibdm_busy &= ~IBDM_PROBE_IN_PROGRESS;
3980 cv_broadcast(&ibdm.ibdm_probe_cv);
3981 }
3982 mutex_exit(&ibdm.ibdm_mutex);
3983 } else {
3984 /*
3985 * Reset gl_pending_cmd if the extra timeout happens since
3986 * gl_pending_cmd becomes negative as a result.
3987 */
3988 if (gid_info->gl_pending_cmds < 0) {
3989 gid_info->gl_pending_cmds = 0;
3990 IBTF_DPRINTF_L2("ibdm",
3991 "\tpkt_timeout_hdlr: extra timeout request."
3992 " reset gl_pending_cmds");
3993 }
3994 mutex_exit(&gid_info->gl_mutex);
3995 /*
3996 * Delete this gid_info if the gid probe fails.
3997 */
3998 if (new_gl_state == IBDM_GID_PROBING_FAILED) {
3999 ibdm_delete_glhca_list(gid_info);
4000 }
4001 }
4002 }
4003
4004
4005 /*
4006 * ibdm_retry_command()
4007 * Retries the failed command.
4008 * Returns IBDM_FAILURE/IBDM_SUCCESS
4009 */
4010 static int
4011 ibdm_retry_command(ibdm_timeout_cb_args_t *cb_args)
4012 {
4013 int ret;
4014 ibmf_msg_t *msg;
4015 ib_mad_hdr_t *hdr;
4016 ibdm_dp_gidinfo_t *gid_info = cb_args->cb_gid_info;
4017 timeout_id_t *timeout_id;
4018 ibdm_ioc_info_t *ioc;
4019 int ioc_no;
4020 ASSERT(MUTEX_HELD(&gid_info->gl_mutex));
4021
4022 IBTF_DPRINTF_L2("ibdm", "\tretry_command: gid_info: %p "
4023 "rtype 0x%x iocidx 0x%x srvidx %d", cb_args->cb_gid_info,
4024 cb_args->cb_req_type, cb_args->cb_ioc_num,
4025 cb_args->cb_srvents_start);
4026
4027 ret = ibmf_alloc_msg(gid_info->gl_ibmf_hdl, IBMF_ALLOC_NOSLEEP, &msg);
4028
4029
4030 /*
4031 * Reset the gid if alloc_msg failed with BAD_HANDLE
4032 * ibdm_reset_gidinfo reinits the gid_info
4033 */
4034 if (ret == IBMF_BAD_HANDLE) {
4035 IBTF_DPRINTF_L3(ibdm_string, "\tretry_command: gid %p hdl bad",
4036 gid_info);
4037
4038 mutex_exit(&gid_info->gl_mutex);
4039 ibdm_reset_gidinfo(gid_info);
4040 mutex_enter(&gid_info->gl_mutex);
4041
4042 /* Retry alloc */
4043 ret = ibmf_alloc_msg(gid_info->gl_ibmf_hdl, IBMF_ALLOC_NOSLEEP,
4044 &msg);
4045 }
4046
4047 if (ret != IBDM_SUCCESS) {
4048 IBTF_DPRINTF_L2("ibdm", "\tretry_command: alloc failed: %p "
4049 "rtype 0x%x iocidx 0x%x srvidx %d", cb_args->cb_gid_info,
4050 cb_args->cb_req_type, cb_args->cb_ioc_num,
4051 cb_args->cb_srvents_start);
4052 return (IBDM_FAILURE);
4053 }
4054
4055 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*msg))
4056 ibdm_alloc_send_buffers(msg);
4057 _NOTE(NOW_VISIBLE_TO_OTHER_THREADS(*msg))
4058
4059 ibdm_bump_transactionID(gid_info);
4060
4061 msg->im_local_addr.ia_local_lid = gid_info->gl_slid;
4062 msg->im_local_addr.ia_remote_lid = gid_info->gl_dlid;
4063 if (gid_info->gl_redirected == B_TRUE) {
4064 if (gid_info->gl_redirect_dlid != 0) {
4065 msg->im_local_addr.ia_remote_lid =
4066 gid_info->gl_redirect_dlid;
4067 }
4068 msg->im_local_addr.ia_remote_qno = gid_info->gl_redirect_QP;
4069 msg->im_local_addr.ia_p_key = gid_info->gl_redirect_pkey;
4070 msg->im_local_addr.ia_q_key = gid_info->gl_redirect_qkey;
4071 msg->im_local_addr.ia_service_level = gid_info->gl_redirectSL;
4072 } else {
4073 msg->im_local_addr.ia_remote_qno = 1;
4074 msg->im_local_addr.ia_p_key = gid_info->gl_p_key;
4075 msg->im_local_addr.ia_q_key = IB_GSI_QKEY;
4076 msg->im_local_addr.ia_service_level = gid_info->gl_SL;
4077 }
4078 hdr = IBDM_OUT_IBMFMSG_MADHDR(msg);
4079 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*hdr))
4080 hdr->BaseVersion = MAD_CLASS_BASE_VERS_1;
4081 hdr->MgmtClass = MAD_MGMT_CLASS_DEV_MGT;
4082 hdr->ClassVersion = IB_DM_CLASS_VERSION_1;
4083 hdr->R_Method = IB_DM_DEVMGT_METHOD_GET;
4084 hdr->Status = 0;
4085 hdr->TransactionID = h2b64(gid_info->gl_transactionID);
4086
4087 switch (cb_args->cb_req_type) {
4088 case IBDM_REQ_TYPE_CLASSPORTINFO:
4089 hdr->AttributeID = h2b16(IB_DM_ATTR_CLASSPORTINFO);
4090 hdr->AttributeModifier = 0;
4091 timeout_id = &gid_info->gl_timeout_id;
4092 break;
4093 case IBDM_REQ_TYPE_IOUINFO:
4094 hdr->AttributeID = h2b16(IB_DM_ATTR_IO_UNITINFO);
4095 hdr->AttributeModifier = 0;
4096 timeout_id = &gid_info->gl_timeout_id;
4097 break;
4098 case IBDM_REQ_TYPE_IOCINFO:
4099 hdr->AttributeID = h2b16(IB_DM_ATTR_IOC_CTRL_PROFILE);
4100 hdr->AttributeModifier = h2b32(cb_args->cb_ioc_num + 1);
4101 ioc = IBDM_GIDINFO2IOCINFO(gid_info, cb_args->cb_ioc_num);
4102 timeout_id = &ioc->ioc_timeout_id;
4103 break;
4104 case IBDM_REQ_TYPE_SRVENTS:
4105 hdr->AttributeID = h2b16(IB_DM_ATTR_SERVICE_ENTRIES);
4106 ibdm_fill_srv_attr_mod(hdr, cb_args);
4107 ioc = IBDM_GIDINFO2IOCINFO(gid_info, cb_args->cb_ioc_num);
4108 timeout_id =
4109 &ioc->ioc_serv[cb_args->cb_srvents_start].se_timeout_id;
4110 break;
4111 case IBDM_REQ_TYPE_IOU_DIAGCODE:
4112 hdr->AttributeID = h2b16(IB_DM_ATTR_DIAG_CODE);
4113 hdr->AttributeModifier = 0;
4114 timeout_id = &gid_info->gl_timeout_id;
4115 break;
4116 case IBDM_REQ_TYPE_IOC_DIAGCODE:
4117 hdr->AttributeID = h2b16(IB_DM_ATTR_DIAG_CODE);
4118 hdr->AttributeModifier = h2b32(cb_args->cb_ioc_num + 1);
4119 ioc_no = cb_args->cb_ioc_num;
4120 ioc = &gid_info->gl_iou->iou_ioc_info[ioc_no];
4121 timeout_id = &ioc->ioc_dc_timeout_id;
4122 break;
4123 }
4124 _NOTE(NOW_VISIBLE_TO_OTHER_THREADS(*hdr))
4125
4126 *timeout_id = timeout(ibdm_pkt_timeout_hdlr,
4127 cb_args, IBDM_TIMEOUT_VALUE(ibdm_dft_timeout));
4128
4129 mutex_exit(&gid_info->gl_mutex);
4130
4131 IBTF_DPRINTF_L5("ibdm", "\tretry_command: %p,%x,%d,%d:"
4132 "timeout %x", cb_args->cb_req_type, cb_args->cb_ioc_num,
4133 cb_args->cb_srvents_start, *timeout_id);
4134
4135 if (ibmf_msg_transport(gid_info->gl_ibmf_hdl,
4136 gid_info->gl_qp_hdl, msg, NULL, ibdm_ibmf_send_cb,
4137 cb_args, 0) != IBMF_SUCCESS) {
4138 IBTF_DPRINTF_L2("ibdm", "\tretry_command: send failed: %p "
4139 "rtype 0x%x iocidx 0x%x srvidx %d", cb_args->cb_gid_info,
4140 cb_args->cb_req_type, cb_args->cb_ioc_num,
4141 cb_args->cb_srvents_start);
4142 ibdm_ibmf_send_cb(gid_info->gl_ibmf_hdl, msg, cb_args);
4143 }
4144 mutex_enter(&gid_info->gl_mutex);
4145 return (IBDM_SUCCESS);
4146 }
4147
4148
4149 /*
4150 * ibdm_update_ioc_port_gidlist()
4151 */
4152 static void
4153 ibdm_update_ioc_port_gidlist(ibdm_ioc_info_t *dest,
4154 ibdm_dp_gidinfo_t *gid_info)
4155 {
4156 int ii, ngid_ents;
4157 ibdm_gid_t *tmp;
4158 ibdm_hca_list_t *gid_hca_head, *temp;
4159 ibdm_hca_list_t *ioc_head = NULL;
4160 ASSERT(MUTEX_HELD(&gid_info->gl_mutex));
4161
4162 IBTF_DPRINTF_L5("ibdm", "\tupdate_ioc_port_gidlist: Enter");
4163
4164 ngid_ents = gid_info->gl_ngids;
4165 dest->ioc_nportgids = ngid_ents;
4166 dest->ioc_gid_list = kmem_zalloc(sizeof (ibdm_gid_t) *
4167 ngid_ents, KM_SLEEP);
4168 tmp = gid_info->gl_gid;
4169 for (ii = 0; (ii < ngid_ents) && (tmp); ii++) {
4170 dest->ioc_gid_list[ii].gid_dgid_hi = tmp->gid_dgid_hi;
4171 dest->ioc_gid_list[ii].gid_dgid_lo = tmp->gid_dgid_lo;
4172 tmp = tmp->gid_next;
4173 }
4174
4175 gid_hca_head = gid_info->gl_hca_list;
4176 while (gid_hca_head) {
4177 temp = ibdm_dup_hca_attr(gid_hca_head);
4178 temp->hl_next = ioc_head;
4179 ioc_head = temp;
4180 gid_hca_head = gid_hca_head->hl_next;
4181 }
4182 dest->ioc_hca_list = ioc_head;
4183 }
4184
4185
4186 /*
4187 * ibdm_alloc_send_buffers()
4188 * Allocates memory for the IBMF send buffer to send and/or receive
4189 * the Device Management MAD packet.
4190 */
4191 static void
4192 ibdm_alloc_send_buffers(ibmf_msg_t *msgp)
4193 {
4194 msgp->im_msgbufs_send.im_bufs_mad_hdr =
4195 kmem_zalloc(IBDM_MAD_SIZE, KM_SLEEP);
4196
4197 msgp->im_msgbufs_send.im_bufs_cl_hdr = (uchar_t *)
4198 msgp->im_msgbufs_send.im_bufs_mad_hdr + sizeof (ib_mad_hdr_t);
4199 msgp->im_msgbufs_send.im_bufs_cl_hdr_len = IBDM_DM_MAD_HDR_SZ;
4200
4201 msgp->im_msgbufs_send.im_bufs_cl_data =
4202 ((char *)msgp->im_msgbufs_send.im_bufs_cl_hdr + IBDM_DM_MAD_HDR_SZ);
4203 msgp->im_msgbufs_send.im_bufs_cl_data_len =
4204 IBDM_MAD_SIZE - sizeof (ib_mad_hdr_t) - IBDM_DM_MAD_HDR_SZ;
4205 }
4206
4207
4208 /*
4209 * ibdm_alloc_send_buffers()
4210 * De-allocates memory for the IBMF send buffer
4211 */
4212 static void
4213 ibdm_free_send_buffers(ibmf_msg_t *msgp)
4214 {
4215 if (msgp->im_msgbufs_send.im_bufs_mad_hdr != NULL)
4216 kmem_free(msgp->im_msgbufs_send.im_bufs_mad_hdr, IBDM_MAD_SIZE);
4217 }
4218
4219 /*
4220 * ibdm_probe_ioc()
4221 * 1. Gets the node records for the port GUID. This detects all the port
4222 * to the IOU.
4223 * 2. Selectively probes all the IOC, given it's node GUID
4224 * 3. In case of reprobe, only the IOC to be reprobed is send the IOC
4225 * Controller Profile asynchronously
4226 */
4227 /*ARGSUSED*/
4228 static void
4229 ibdm_probe_ioc(ib_guid_t nodeguid, ib_guid_t ioc_guid, int reprobe_flag)
4230 {
4231 int ii, nrecords;
4232 size_t nr_len = 0, pi_len = 0;
4233 ib_gid_t sgid, dgid;
4234 ibdm_hca_list_t *hca_list = NULL;
4235 sa_node_record_t *nr, *tmp;
4236 ibdm_port_attr_t *port = NULL;
4237 ibdm_dp_gidinfo_t *reprobe_gid, *new_gid, *node_gid;
4238 ibdm_dp_gidinfo_t *temp_gidinfo;
4239 ibdm_gid_t *temp_gid;
4240 sa_portinfo_record_t *pi;
4241
4242 IBTF_DPRINTF_L4("ibdm", "\tprobe_ioc(%llx, %llx, %x): Begin",
4243 nodeguid, ioc_guid, reprobe_flag);
4244
4245 /* Rescan the GID list for any removed GIDs for reprobe */
4246 if (reprobe_flag)
4247 ibdm_rescan_gidlist(&ioc_guid);
4248
4249 mutex_enter(&ibdm.ibdm_hl_mutex);
4250 for (ibdm_get_next_port(&hca_list, &port, 1); port;
4251 ibdm_get_next_port(&hca_list, &port, 1)) {
4252 reprobe_gid = new_gid = node_gid = NULL;
4253
4254 nr = ibdm_get_node_records(port->pa_sa_hdl, &nr_len, nodeguid);
4255 if (nr == NULL) {
4256 IBTF_DPRINTF_L4("ibdm", "\tprobe_ioc: no records");
4257 continue;
4258 }
4259 nrecords = (nr_len / sizeof (sa_node_record_t));
4260 for (tmp = nr, ii = 0; (ii < nrecords); ii++, tmp++) {
4261 if ((pi = ibdm_get_portinfo(
4262 port->pa_sa_hdl, &pi_len, tmp->LID)) == NULL) {
4263 IBTF_DPRINTF_L4("ibdm",
4264 "\tibdm_get_portinfo: no portinfo recs");
4265 continue;
4266 }
4267
4268 /*
4269 * If Device Management is not supported on
4270 * this port, skip the rest.
4271 */
4272 if (!(pi->PortInfo.CapabilityMask &
4273 SM_CAP_MASK_IS_DM_SUPPD)) {
4274 kmem_free(pi, pi_len);
4275 continue;
4276 }
4277
4278 /*
4279 * For reprobes: Check if GID, already in
4280 * the list. If so, set the state to SKIPPED
4281 */
4282 if (((temp_gidinfo = ibdm_find_gid(nodeguid,
4283 tmp->NodeInfo.PortGUID)) != NULL) &&
4284 temp_gidinfo->gl_state ==
4285 IBDM_GID_PROBING_COMPLETE) {
4286 ASSERT(reprobe_gid == NULL);
4287 ibdm_addto_glhcalist(temp_gidinfo,
4288 hca_list);
4289 reprobe_gid = temp_gidinfo;
4290 kmem_free(pi, pi_len);
4291 continue;
4292 } else if (temp_gidinfo != NULL) {
4293 kmem_free(pi, pi_len);
4294 ibdm_addto_glhcalist(temp_gidinfo,
4295 hca_list);
4296 continue;
4297 }
4298
4299 IBTF_DPRINTF_L4("ibdm", "\tprobe_ioc : "
4300 "create_gid : prefix %llx, guid %llx\n",
4301 pi->PortInfo.GidPrefix,
4302 tmp->NodeInfo.PortGUID);
4303
4304 sgid.gid_prefix = port->pa_sn_prefix;
4305 sgid.gid_guid = port->pa_port_guid;
4306 dgid.gid_prefix = pi->PortInfo.GidPrefix;
4307 dgid.gid_guid = tmp->NodeInfo.PortGUID;
4308 new_gid = ibdm_create_gid_info(port, sgid,
4309 dgid);
4310 if (new_gid == NULL) {
4311 IBTF_DPRINTF_L2("ibdm", "\tprobe_ioc: "
4312 "create_gid_info failed\n");
4313 kmem_free(pi, pi_len);
4314 continue;
4315 }
4316 if (node_gid == NULL) {
4317 node_gid = new_gid;
4318 ibdm_add_to_gl_gid(node_gid, node_gid);
4319 } else {
4320 IBTF_DPRINTF_L4("ibdm",
4321 "\tprobe_ioc: new gid");
4322 temp_gid = kmem_zalloc(
4323 sizeof (ibdm_gid_t), KM_SLEEP);
4324 temp_gid->gid_dgid_hi =
4325 new_gid->gl_dgid_hi;
4326 temp_gid->gid_dgid_lo =
4327 new_gid->gl_dgid_lo;
4328 temp_gid->gid_next = node_gid->gl_gid;
4329 node_gid->gl_gid = temp_gid;
4330 node_gid->gl_ngids++;
4331 }
4332 new_gid->gl_is_dm_capable = B_TRUE;
4333 new_gid->gl_nodeguid = nodeguid;
4334 new_gid->gl_portguid = dgid.gid_guid;
4335 ibdm_addto_glhcalist(new_gid, hca_list);
4336
4337 /*
4338 * Set the state to skipped as all these
4339 * gids point to the same node.
4340 * We (re)probe only one GID below and reset
4341 * state appropriately
4342 */
4343 new_gid->gl_state = IBDM_GID_PROBING_SKIPPED;
4344 new_gid->gl_devid = (*tmp).NodeInfo.DeviceID;
4345 kmem_free(pi, pi_len);
4346 }
4347 kmem_free(nr, nr_len);
4348
4349 IBTF_DPRINTF_L4("ibdm", "\tprobe_ioc : reprobe_flag %d "
4350 "reprobe_gid %p new_gid %p node_gid %p",
4351 reprobe_flag, reprobe_gid, new_gid, node_gid);
4352
4353 if (reprobe_flag != 0 && reprobe_gid != NULL) {
4354 int niocs, jj;
4355 ibdm_ioc_info_t *tmp_ioc;
4356 int ioc_matched = 0;
4357
4358 mutex_exit(&ibdm.ibdm_hl_mutex);
4359 mutex_enter(&reprobe_gid->gl_mutex);
4360 reprobe_gid->gl_state = IBDM_GET_IOC_DETAILS;
4361 niocs =
4362 reprobe_gid->gl_iou->iou_info.iou_num_ctrl_slots;
4363 reprobe_gid->gl_pending_cmds++;
4364 mutex_exit(&reprobe_gid->gl_mutex);
4365
4366 for (jj = 0; jj < niocs; jj++) {
4367 tmp_ioc =
4368 IBDM_GIDINFO2IOCINFO(reprobe_gid, jj);
4369 if (tmp_ioc->ioc_profile.ioc_guid != ioc_guid)
4370 continue;
4371
4372 ioc_matched = 1;
4373
4374 /*
4375 * Explicitly set gl_reprobe_flag to 0 so that
4376 * IBnex is not notified on completion
4377 */
4378 mutex_enter(&reprobe_gid->gl_mutex);
4379 reprobe_gid->gl_reprobe_flag = 0;
4380 mutex_exit(&reprobe_gid->gl_mutex);
4381
4382 mutex_enter(&ibdm.ibdm_mutex);
4383 ibdm.ibdm_ngid_probes_in_progress++;
4384 mutex_exit(&ibdm.ibdm_mutex);
4385 if (ibdm_send_ioc_profile(reprobe_gid, jj) !=
4386 IBDM_SUCCESS) {
4387 IBTF_DPRINTF_L4("ibdm",
4388 "\tprobe_ioc: "
4389 "send_ioc_profile failed "
4390 "for ioc %d", jj);
4391 ibdm_gid_decr_pending(reprobe_gid);
4392 break;
4393 }
4394 mutex_enter(&ibdm.ibdm_mutex);
4395 ibdm_wait_probe_completion();
4396 mutex_exit(&ibdm.ibdm_mutex);
4397 break;
4398 }
4399 if (ioc_matched == 0)
4400 ibdm_gid_decr_pending(reprobe_gid);
4401 else {
4402 mutex_enter(&ibdm.ibdm_hl_mutex);
4403 break;
4404 }
4405 } else if (new_gid != NULL) {
4406 mutex_exit(&ibdm.ibdm_hl_mutex);
4407 node_gid = node_gid ? node_gid : new_gid;
4408
4409 /*
4410 * New or reinserted GID : Enable notification
4411 * to IBnex
4412 */
4413 mutex_enter(&node_gid->gl_mutex);
4414 node_gid->gl_reprobe_flag = 1;
4415 mutex_exit(&node_gid->gl_mutex);
4416
4417 ibdm_probe_gid(node_gid);
4418
4419 mutex_enter(&ibdm.ibdm_hl_mutex);
4420 }
4421 }
4422 mutex_exit(&ibdm.ibdm_hl_mutex);
4423 IBTF_DPRINTF_L4("ibdm", "\tprobe_ioc : End\n");
4424 }
4425
4426
4427 /*
4428 * ibdm_probe_gid()
4429 * Selectively probes the GID
4430 */
4431 static void
4432 ibdm_probe_gid(ibdm_dp_gidinfo_t *gid_info)
4433 {
4434 IBTF_DPRINTF_L4("ibdm", "\tprobe_gid:");
4435
4436 /*
4437 * A Cisco FC GW needs the special handling to get IOUnitInfo.
4438 */
4439 mutex_enter(&gid_info->gl_mutex);
4440 if (ibdm_is_cisco_switch(gid_info)) {
4441 gid_info->gl_pending_cmds++;
4442 gid_info->gl_state = IBDM_SET_CLASSPORTINFO;
4443 mutex_exit(&gid_info->gl_mutex);
4444
4445 if (ibdm_set_classportinfo(gid_info) != IBDM_SUCCESS) {
4446
4447 mutex_enter(&gid_info->gl_mutex);
4448 gid_info->gl_state = IBDM_GID_PROBING_FAILED;
4449 --gid_info->gl_pending_cmds;
4450 mutex_exit(&gid_info->gl_mutex);
4451
4452 /* free the hca_list on this gid_info */
4453 ibdm_delete_glhca_list(gid_info);
4454 gid_info = gid_info->gl_next;
4455 return;
4456 }
4457
4458 mutex_enter(&gid_info->gl_mutex);
4459 ibdm_wait_cisco_probe_completion(gid_info);
4460
4461 IBTF_DPRINTF_L4("ibdm",
4462 "\tprobe_gid: CISCO Wakeup signal received");
4463 }
4464
4465 /* move on to the 'GET_CLASSPORTINFO' stage */
4466 gid_info->gl_pending_cmds++;
4467 gid_info->gl_state = IBDM_GET_CLASSPORTINFO;
4468 mutex_exit(&gid_info->gl_mutex);
4469
4470 if (ibdm_send_classportinfo(gid_info) != IBDM_SUCCESS) {
4471
4472 mutex_enter(&gid_info->gl_mutex);
4473 gid_info->gl_state = IBDM_GID_PROBING_FAILED;
4474 --gid_info->gl_pending_cmds;
4475 mutex_exit(&gid_info->gl_mutex);
4476
4477 /* free the hca_list on this gid_info */
4478 ibdm_delete_glhca_list(gid_info);
4479 gid_info = gid_info->gl_next;
4480 return;
4481 }
4482
4483 mutex_enter(&ibdm.ibdm_mutex);
4484 ibdm.ibdm_ngid_probes_in_progress++;
4485 gid_info = gid_info->gl_next;
4486 ibdm_wait_probe_completion();
4487 mutex_exit(&ibdm.ibdm_mutex);
4488
4489 IBTF_DPRINTF_L4("ibdm", "\tprobe_gid: Wakeup signal received");
4490 }
4491
4492
4493 /*
4494 * ibdm_create_gid_info()
4495 * Allocates a gid_info structure and initializes
4496 * Returns pointer to the structure on success
4497 * and NULL on failure
4498 */
4499 static ibdm_dp_gidinfo_t *
4500 ibdm_create_gid_info(ibdm_port_attr_t *port, ib_gid_t sgid, ib_gid_t dgid)
4501 {
4502 uint8_t ii, npaths;
4503 sa_path_record_t *path;
4504 size_t len;
4505 ibdm_pkey_tbl_t *pkey_tbl;
4506 ibdm_dp_gidinfo_t *gid_info = NULL;
4507 int ret;
4508
4509 IBTF_DPRINTF_L4("ibdm", "\tcreate_gid_info: Begin");
4510 npaths = 1;
4511
4512 /* query for reversible paths */
4513 if (port->pa_sa_hdl)
4514 ret = ibmf_saa_gid_to_pathrecords(port->pa_sa_hdl,
4515 sgid, dgid, IBMF_SAA_PKEY_WC, 0, B_TRUE, &npaths, 0,
4516 &len, &path);
4517 else
4518 return (NULL);
4519
4520 if (ret == IBMF_SUCCESS && path) {
4521 ibdm_dump_path_info(path);
4522
4523 gid_info = kmem_zalloc(
4524 sizeof (ibdm_dp_gidinfo_t), KM_SLEEP);
4525 mutex_init(&gid_info->gl_mutex, NULL, MUTEX_DEFAULT, NULL);
4526 cv_init(&gid_info->gl_probe_cv, NULL, CV_DRIVER, NULL);
4527 gid_info->gl_dgid_hi = path->DGID.gid_prefix;
4528 gid_info->gl_dgid_lo = path->DGID.gid_guid;
4529 gid_info->gl_sgid_hi = path->SGID.gid_prefix;
4530 gid_info->gl_sgid_lo = path->SGID.gid_guid;
4531 gid_info->gl_p_key = path->P_Key;
4532 gid_info->gl_sa_hdl = port->pa_sa_hdl;
4533 gid_info->gl_ibmf_hdl = port->pa_ibmf_hdl;
4534 gid_info->gl_slid = path->SLID;
4535 gid_info->gl_dlid = path->DLID;
4536 gid_info->gl_transactionID = (++ibdm.ibdm_transactionID)
4537 << IBDM_GID_TRANSACTIONID_SHIFT;
4538 gid_info->gl_min_transactionID = gid_info->gl_transactionID;
4539 gid_info->gl_max_transactionID = (ibdm.ibdm_transactionID +1)
4540 << IBDM_GID_TRANSACTIONID_SHIFT;
4541 gid_info->gl_SL = path->SL;
4542
4543 gid_info->gl_qp_hdl = IBMF_QP_HANDLE_DEFAULT;
4544 for (ii = 0; ii < port->pa_npkeys; ii++) {
4545 if (port->pa_pkey_tbl == NULL)
4546 break;
4547
4548 pkey_tbl = &port->pa_pkey_tbl[ii];
4549 if ((gid_info->gl_p_key == pkey_tbl->pt_pkey) &&
4550 (pkey_tbl->pt_qp_hdl != NULL)) {
4551 gid_info->gl_qp_hdl = pkey_tbl->pt_qp_hdl;
4552 break;
4553 }
4554 }
4555 kmem_free(path, len);
4556
4557 /*
4558 * QP handle for GID not initialized. No matching Pkey
4559 * was found!! ibdm should *not* hit this case. Flag an
4560 * error and drop the GID if ibdm does encounter this.
4561 */
4562 if (gid_info->gl_qp_hdl == NULL) {
4563 IBTF_DPRINTF_L2(ibdm_string,
4564 "\tcreate_gid_info: No matching Pkey");
4565 ibdm_delete_gidinfo(gid_info);
4566 return (NULL);
4567 }
4568
4569 ibdm.ibdm_ngids++;
4570 if (ibdm.ibdm_dp_gidlist_head == NULL) {
4571 ibdm.ibdm_dp_gidlist_head = gid_info;
4572 ibdm.ibdm_dp_gidlist_tail = gid_info;
4573 } else {
4574 ibdm.ibdm_dp_gidlist_tail->gl_next = gid_info;
4575 gid_info->gl_prev = ibdm.ibdm_dp_gidlist_tail;
4576 ibdm.ibdm_dp_gidlist_tail = gid_info;
4577 }
4578 }
4579
4580 return (gid_info);
4581 }
4582
4583
4584 /*
4585 * ibdm_get_node_records
4586 * Sends a SA query to get the NODE record
4587 * Returns pointer to the sa_node_record_t on success
4588 * and NULL on failure
4589 */
4590 static sa_node_record_t *
4591 ibdm_get_node_records(ibmf_saa_handle_t sa_hdl, size_t *length, ib_guid_t guid)
4592 {
4593 sa_node_record_t req, *resp = NULL;
4594 ibmf_saa_access_args_t args;
4595 int ret;
4596
4597 IBTF_DPRINTF_L4("ibdm", "\tget_node_records: Begin");
4598
4599 bzero(&req, sizeof (sa_node_record_t));
4600 req.NodeInfo.NodeGUID = guid;
4601
4602 args.sq_attr_id = SA_NODERECORD_ATTRID;
4603 args.sq_access_type = IBMF_SAA_RETRIEVE;
4604 args.sq_component_mask = SA_NODEINFO_COMPMASK_NODEGUID;
4605 args.sq_template = &req;
4606 args.sq_callback = NULL;
4607 args.sq_callback_arg = NULL;
4608
4609 ret = ibmf_sa_access(sa_hdl, &args, 0, length, (void **) &resp);
4610 if (ret != IBMF_SUCCESS) {
4611 IBTF_DPRINTF_L2("ibdm", "\tget_node_records:"
4612 " SA Retrieve Failed: %d", ret);
4613 return (NULL);
4614 }
4615 if ((resp == NULL) || (*length == 0)) {
4616 IBTF_DPRINTF_L2("ibdm", "\tget_node_records: No records");
4617 return (NULL);
4618 }
4619
4620 IBTF_DPRINTF_L4("ibdm", "\tget_node_records: NodeGuid %llx "
4621 "PortGUID %llx", resp->NodeInfo.NodeGUID, resp->NodeInfo.PortGUID);
4622
4623 return (resp);
4624 }
4625
4626
4627 /*
4628 * ibdm_get_portinfo()
4629 * Sends a SA query to get the PortInfo record
4630 * Returns pointer to the sa_portinfo_record_t on success
4631 * and NULL on failure
4632 */
4633 static sa_portinfo_record_t *
4634 ibdm_get_portinfo(ibmf_saa_handle_t sa_hdl, size_t *length, ib_lid_t lid)
4635 {
4636 sa_portinfo_record_t req, *resp = NULL;
4637 ibmf_saa_access_args_t args;
4638 int ret;
4639
4640 IBTF_DPRINTF_L4("ibdm", "\tget_portinfo: Begin");
4641
4642 bzero(&req, sizeof (sa_portinfo_record_t));
4643 req.EndportLID = lid;
4644
4645 args.sq_attr_id = SA_PORTINFORECORD_ATTRID;
4646 args.sq_access_type = IBMF_SAA_RETRIEVE;
4647 args.sq_component_mask = SA_PORTINFO_COMPMASK_PORTLID;
4648 args.sq_template = &req;
4649 args.sq_callback = NULL;
4650 args.sq_callback_arg = NULL;
4651
4652 ret = ibmf_sa_access(sa_hdl, &args, 0, length, (void **) &resp);
4653 if (ret != IBMF_SUCCESS) {
4654 IBTF_DPRINTF_L2("ibdm", "\tget_portinfo:"
4655 " SA Retrieve Failed: 0x%X", ret);
4656 return (NULL);
4657 }
4658 if ((*length == 0) || (resp == NULL))
4659 return (NULL);
4660
4661 IBTF_DPRINTF_L4("ibdm", "\tget_portinfo: GidPrefix %llx Cap 0x%x",
4662 resp->PortInfo.GidPrefix, resp->PortInfo.CapabilityMask);
4663 return (resp);
4664 }
4665
4666
4667 /*
4668 * ibdm_ibnex_register_callback
4669 * IB nexus callback routine for HCA attach and detach notification
4670 */
4671 void
4672 ibdm_ibnex_register_callback(ibdm_callback_t ibnex_dm_callback)
4673 {
4674 IBTF_DPRINTF_L4("ibdm", "\tibnex_register_callbacks");
4675 mutex_enter(&ibdm.ibdm_ibnex_mutex);
4676 ibdm.ibdm_ibnex_callback = ibnex_dm_callback;
4677 mutex_exit(&ibdm.ibdm_ibnex_mutex);
4678 }
4679
4680
4681 /*
4682 * ibdm_ibnex_unregister_callbacks
4683 */
4684 void
4685 ibdm_ibnex_unregister_callback()
4686 {
4687 IBTF_DPRINTF_L4("ibdm", "\tibnex_unregister_callbacks");
4688 mutex_enter(&ibdm.ibdm_ibnex_mutex);
4689 ibdm.ibdm_ibnex_callback = NULL;
4690 mutex_exit(&ibdm.ibdm_ibnex_mutex);
4691 }
4692
4693 /*
4694 * ibdm_get_waittime()
4695 * Calculates the wait time based on the last HCA attach time
4696 */
4697 static clock_t
4698 ibdm_get_waittime(ib_guid_t hca_guid, int dft_wait_sec)
4699 {
4700 const hrtime_t dft_wait = dft_wait_sec * NANOSEC;
4701 hrtime_t temp, wait_time = 0;
4702 clock_t usecs;
4703 int i;
4704 ibdm_hca_list_t *hca;
4705
4706 IBTF_DPRINTF_L4("ibdm", "\tget_waittime hcaguid:%llx"
4707 "\tport settling time %d", hca_guid, dft_wait);
4708
4709 ASSERT(mutex_owned(&ibdm.ibdm_hl_mutex));
4710
4711 hca = ibdm.ibdm_hca_list_head;
4712
4713 for (i = 0; i < ibdm.ibdm_hca_count; i++, hca = hca->hl_next) {
4714 if (hca->hl_nports == hca->hl_nports_active)
4715 continue;
4716
4717 if (hca_guid && (hca_guid != hca->hl_hca_guid))
4718 continue;
4719
4720 temp = gethrtime() - hca->hl_attach_time;
4721 temp = MAX(0, (dft_wait - temp));
4722
4723 if (hca_guid) {
4724 wait_time = temp;
4725 break;
4726 }
4727
4728 wait_time = MAX(temp, wait_time);
4729 }
4730
4731 /* convert to microseconds */
4732 usecs = MIN(wait_time, dft_wait) / (NANOSEC / MICROSEC);
4733
4734 IBTF_DPRINTF_L2("ibdm", "\tget_waittime: wait_time = %ld usecs",
4735 (long)usecs);
4736
4737 return (drv_usectohz(usecs));
4738 }
4739
4740 void
4741 ibdm_ibnex_port_settle_wait(ib_guid_t hca_guid, int dft_wait)
4742 {
4743 clock_t wait_time;
4744
4745 mutex_enter(&ibdm.ibdm_hl_mutex);
4746
4747 while ((wait_time = ibdm_get_waittime(hca_guid, dft_wait)) > 0)
4748 (void) cv_reltimedwait(&ibdm.ibdm_port_settle_cv,
4749 &ibdm.ibdm_hl_mutex, wait_time, TR_CLOCK_TICK);
4750
4751 mutex_exit(&ibdm.ibdm_hl_mutex);
4752 }
4753
4754
4755 /*
4756 * ibdm_ibnex_probe_hcaport
4757 * Probes the presence of HCA port (with HCA dip and port number)
4758 * Returns port attributes structure on SUCCESS
4759 */
4760 ibdm_port_attr_t *
4761 ibdm_ibnex_probe_hcaport(ib_guid_t hca_guid, uint8_t port_num)
4762 {
4763 int ii, jj;
4764 ibdm_hca_list_t *hca_list;
4765 ibdm_port_attr_t *port_attr;
4766
4767 IBTF_DPRINTF_L4("ibdm", "\tibnex_probe_hcaport:");
4768
4769 mutex_enter(&ibdm.ibdm_hl_mutex);
4770 hca_list = ibdm.ibdm_hca_list_head;
4771 for (ii = 0; ii < ibdm.ibdm_hca_count; ii++) {
4772 if (hca_list->hl_hca_guid == hca_guid) {
4773 for (jj = 0; jj < hca_list->hl_nports; jj++) {
4774 if (hca_list->hl_port_attr[jj].pa_port_num ==
4775 port_num) {
4776 break;
4777 }
4778 }
4779 if (jj != hca_list->hl_nports)
4780 break;
4781 }
4782 hca_list = hca_list->hl_next;
4783 }
4784 if (ii == ibdm.ibdm_hca_count) {
4785 IBTF_DPRINTF_L2("ibdm", "\tibnex_probe_hcaport: not found");
4786 mutex_exit(&ibdm.ibdm_hl_mutex);
4787 return (NULL);
4788 }
4789 port_attr = (ibdm_port_attr_t *)kmem_zalloc(
4790 sizeof (ibdm_port_attr_t), KM_SLEEP);
4791 bcopy((char *)&hca_list->hl_port_attr[jj],
4792 port_attr, sizeof (ibdm_port_attr_t));
4793 ibdm_update_port_attr(port_attr);
4794
4795 mutex_exit(&ibdm.ibdm_hl_mutex);
4796 return (port_attr);
4797 }
4798
4799
4800 /*
4801 * ibdm_ibnex_get_port_attrs
4802 * Scan all HCAs for a matching port_guid.
4803 * Returns "port attributes" structure on success.
4804 */
4805 ibdm_port_attr_t *
4806 ibdm_ibnex_get_port_attrs(ib_guid_t port_guid)
4807 {
4808 int ii, jj;
4809 ibdm_hca_list_t *hca_list;
4810 ibdm_port_attr_t *port_attr;
4811
4812 IBTF_DPRINTF_L4("ibdm", "\tibnex_get_port_attrs:");
4813
4814 mutex_enter(&ibdm.ibdm_hl_mutex);
4815 hca_list = ibdm.ibdm_hca_list_head;
4816
4817 for (ii = 0; ii < ibdm.ibdm_hca_count; ii++) {
4818 for (jj = 0; jj < hca_list->hl_nports; jj++) {
4819 if (hca_list->hl_port_attr[jj].pa_port_guid ==
4820 port_guid) {
4821 break;
4822 }
4823 }
4824 if (jj != hca_list->hl_nports)
4825 break;
4826 hca_list = hca_list->hl_next;
4827 }
4828
4829 if (ii == ibdm.ibdm_hca_count) {
4830 IBTF_DPRINTF_L2("ibdm", "\tibnex_get_port_attrs: not found");
4831 mutex_exit(&ibdm.ibdm_hl_mutex);
4832 return (NULL);
4833 }
4834
4835 port_attr = (ibdm_port_attr_t *)kmem_alloc(sizeof (ibdm_port_attr_t),
4836 KM_SLEEP);
4837 bcopy((char *)&hca_list->hl_port_attr[jj], port_attr,
4838 sizeof (ibdm_port_attr_t));
4839 ibdm_update_port_attr(port_attr);
4840
4841 mutex_exit(&ibdm.ibdm_hl_mutex);
4842 return (port_attr);
4843 }
4844
4845
4846 /*
4847 * ibdm_ibnex_free_port_attr()
4848 */
4849 void
4850 ibdm_ibnex_free_port_attr(ibdm_port_attr_t *port_attr)
4851 {
4852 IBTF_DPRINTF_L4("ibdm", "\tibnex_free_port_attr:");
4853 if (port_attr) {
4854 if (port_attr->pa_pkey_tbl != NULL) {
4855 kmem_free(port_attr->pa_pkey_tbl,
4856 (port_attr->pa_npkeys * sizeof (ibdm_pkey_tbl_t)));
4857 }
4858 kmem_free(port_attr, sizeof (ibdm_port_attr_t));
4859 }
4860 }
4861
4862
4863 /*
4864 * ibdm_ibnex_get_hca_list()
4865 * Returns portinfo for all the port for all the HCA's
4866 */
4867 void
4868 ibdm_ibnex_get_hca_list(ibdm_hca_list_t **hca, int *count)
4869 {
4870 ibdm_hca_list_t *head = NULL, *temp, *temp1;
4871 int ii;
4872
4873 IBTF_DPRINTF_L4("ibdm", "\tibnex_get_hca_list:");
4874
4875 mutex_enter(&ibdm.ibdm_hl_mutex);
4876 temp = ibdm.ibdm_hca_list_head;
4877 for (ii = 0; ii < ibdm.ibdm_hca_count; ii++) {
4878 temp1 = ibdm_dup_hca_attr(temp);
4879 temp1->hl_next = head;
4880 head = temp1;
4881 temp = temp->hl_next;
4882 }
4883 *count = ibdm.ibdm_hca_count;
4884 *hca = head;
4885 mutex_exit(&ibdm.ibdm_hl_mutex);
4886 }
4887
4888
4889 /*
4890 * ibdm_ibnex_get_hca_info_by_guid()
4891 */
4892 ibdm_hca_list_t *
4893 ibdm_ibnex_get_hca_info_by_guid(ib_guid_t hca_guid)
4894 {
4895 ibdm_hca_list_t *head = NULL, *hca = NULL;
4896
4897 IBTF_DPRINTF_L4("ibdm", "\tibnex_get_hca_info_by_dip");
4898
4899 mutex_enter(&ibdm.ibdm_hl_mutex);
4900 head = ibdm.ibdm_hca_list_head;
4901 while (head) {
4902 if (head->hl_hca_guid == hca_guid) {
4903 hca = ibdm_dup_hca_attr(head);
4904 hca->hl_next = NULL;
4905 break;
4906 }
4907 head = head->hl_next;
4908 }
4909 mutex_exit(&ibdm.ibdm_hl_mutex);
4910 IBTF_DPRINTF_L4("ibdm", "\tibnex_get_hca_info_by_dip %p", hca);
4911 return (hca);
4912 }
4913
4914
4915 /*
4916 * ibdm_dup_hca_attr()
4917 * Allocate a new HCA attribute strucuture and initialize
4918 * hca attribute structure with the incoming HCA attributes
4919 * returned the allocated hca attributes.
4920 */
4921 static ibdm_hca_list_t *
4922 ibdm_dup_hca_attr(ibdm_hca_list_t *in_hca)
4923 {
4924 int len;
4925 ibdm_hca_list_t *out_hca;
4926
4927 len = sizeof (ibdm_hca_list_t) +
4928 (in_hca->hl_nports * sizeof (ibdm_port_attr_t));
4929 IBTF_DPRINTF_L4("ibdm", "\tdup_hca_attr len %d", len);
4930 out_hca = (ibdm_hca_list_t *)kmem_alloc(len, KM_SLEEP);
4931 bcopy((char *)in_hca,
4932 (char *)out_hca, sizeof (ibdm_hca_list_t));
4933 if (in_hca->hl_nports) {
4934 out_hca->hl_port_attr = (ibdm_port_attr_t *)
4935 ((char *)out_hca + sizeof (ibdm_hca_list_t));
4936 bcopy((char *)in_hca->hl_port_attr,
4937 (char *)out_hca->hl_port_attr,
4938 (in_hca->hl_nports * sizeof (ibdm_port_attr_t)));
4939 for (len = 0; len < out_hca->hl_nports; len++)
4940 ibdm_update_port_attr(&out_hca->hl_port_attr[len]);
4941 }
4942 return (out_hca);
4943 }
4944
4945
4946 /*
4947 * ibdm_ibnex_free_hca_list()
4948 * Free one/more HCA lists
4949 */
4950 void
4951 ibdm_ibnex_free_hca_list(ibdm_hca_list_t *hca_list)
4952 {
4953 int ii;
4954 size_t len;
4955 ibdm_hca_list_t *temp;
4956 ibdm_port_attr_t *port;
4957
4958 IBTF_DPRINTF_L4("ibdm", "\tibnex_free_hca_list:");
4959 ASSERT(hca_list);
4960 while (hca_list) {
4961 temp = hca_list;
4962 hca_list = hca_list->hl_next;
4963 for (ii = 0; ii < temp->hl_nports; ii++) {
4964 port = &temp->hl_port_attr[ii];
4965 len = (port->pa_npkeys * sizeof (ibdm_pkey_tbl_t));
4966 if (len != 0)
4967 kmem_free(port->pa_pkey_tbl, len);
4968 }
4969 len = sizeof (ibdm_hca_list_t) + (temp->hl_nports *
4970 sizeof (ibdm_port_attr_t));
4971 kmem_free(temp, len);
4972 }
4973 }
4974
4975
4976 /*
4977 * ibdm_ibnex_probe_iocguid()
4978 * Probes the IOC on the fabric and returns the IOC information
4979 * if present. Otherwise, NULL is returned
4980 */
4981 /* ARGSUSED */
4982 ibdm_ioc_info_t *
4983 ibdm_ibnex_probe_ioc(ib_guid_t iou, ib_guid_t ioc_guid, int reprobe_flag)
4984 {
4985 int k;
4986 ibdm_ioc_info_t *ioc_info;
4987 ibdm_dp_gidinfo_t *gid_info; /* used as index and arg */
4988 timeout_id_t *timeout_id;
4989
4990 IBTF_DPRINTF_L4("ibdm", "\tibnex_probe_ioc: (%llX, %llX, %d) Begin",
4991 iou, ioc_guid, reprobe_flag);
4992
4993 if (ibdm_enumerate_iocs == 0)
4994 return (NULL);
4995
4996 /* Check whether we know this already */
4997 ioc_info = ibdm_get_ioc_info_with_gid(ioc_guid, &gid_info);
4998 if (ioc_info == NULL) {
4999 mutex_enter(&ibdm.ibdm_mutex);
5000 while (ibdm.ibdm_busy & IBDM_BUSY)
5001 cv_wait(&ibdm.ibdm_busy_cv, &ibdm.ibdm_mutex);
5002 ibdm.ibdm_busy |= IBDM_BUSY;
5003 mutex_exit(&ibdm.ibdm_mutex);
5004 ibdm_probe_ioc(iou, ioc_guid, 0);
5005 mutex_enter(&ibdm.ibdm_mutex);
5006 ibdm.ibdm_busy &= ~IBDM_BUSY;
5007 cv_broadcast(&ibdm.ibdm_busy_cv);
5008 mutex_exit(&ibdm.ibdm_mutex);
5009 ioc_info = ibdm_get_ioc_info_with_gid(ioc_guid, &gid_info);
5010 } else if (reprobe_flag) { /* Handle Reprobe for the IOC */
5011 ASSERT(gid_info != NULL);
5012 /* Free the ioc_list before reprobe; and cancel any timers */
5013 mutex_enter(&ibdm.ibdm_mutex);
5014 mutex_enter(&gid_info->gl_mutex);
5015 if (ioc_info->ioc_timeout_id) {
5016 timeout_id = ioc_info->ioc_timeout_id;
5017 ioc_info->ioc_timeout_id = 0;
5018 mutex_exit(&gid_info->gl_mutex);
5019 IBTF_DPRINTF_L5("ibdm", "\tprobe_ioc: "
5020 "ioc_timeout_id = 0x%x", timeout_id);
5021 if (untimeout(timeout_id) == -1) {
5022 IBTF_DPRINTF_L2("ibdm", "\tprobe_ioc: "
5023 "untimeout ioc_timeout_id failed");
5024 }
5025 mutex_enter(&gid_info->gl_mutex);
5026 }
5027 if (ioc_info->ioc_dc_timeout_id) {
5028 timeout_id = ioc_info->ioc_dc_timeout_id;
5029 ioc_info->ioc_dc_timeout_id = 0;
5030 mutex_exit(&gid_info->gl_mutex);
5031 IBTF_DPRINTF_L5("ibdm", "\tprobe_ioc: "
5032 "ioc_dc_timeout_id = 0x%x", timeout_id);
5033 if (untimeout(timeout_id) == -1) {
5034 IBTF_DPRINTF_L2("ibdm", "\tprobe_ioc: "
5035 "untimeout ioc_dc_timeout_id failed");
5036 }
5037 mutex_enter(&gid_info->gl_mutex);
5038 }
5039 for (k = 0; k < ioc_info->ioc_profile.ioc_service_entries; k++)
5040 if (ioc_info->ioc_serv[k].se_timeout_id) {
5041 timeout_id = ioc_info->ioc_serv[k].
5042 se_timeout_id;
5043 ioc_info->ioc_serv[k].se_timeout_id = 0;
5044 mutex_exit(&gid_info->gl_mutex);
5045 IBTF_DPRINTF_L5("ibdm", "\tprobe_ioc: "
5046 "ioc_info->ioc_serv[k].se_timeout_id = %x",
5047 k, timeout_id);
5048 if (untimeout(timeout_id) == -1) {
5049 IBTF_DPRINTF_L2("ibdm", "\tprobe_ioc: "
5050 "untimeout se_timeout_id %d "
5051 "failed", k);
5052 }
5053 mutex_enter(&gid_info->gl_mutex);
5054 }
5055 mutex_exit(&gid_info->gl_mutex);
5056 mutex_exit(&ibdm.ibdm_mutex);
5057 ibdm_ibnex_free_ioc_list(ioc_info);
5058
5059 mutex_enter(&ibdm.ibdm_mutex);
5060 while (ibdm.ibdm_busy & IBDM_BUSY)
5061 cv_wait(&ibdm.ibdm_busy_cv, &ibdm.ibdm_mutex);
5062 ibdm.ibdm_busy |= IBDM_BUSY;
5063 mutex_exit(&ibdm.ibdm_mutex);
5064
5065 ibdm_probe_ioc(iou, ioc_guid, 1);
5066
5067 /*
5068 * Skip if gl_reprobe_flag is set, this will be
5069 * a re-inserted / new GID, for which notifications
5070 * have already been send.
5071 */
5072 for (gid_info = ibdm.ibdm_dp_gidlist_head; gid_info;
5073 gid_info = gid_info->gl_next) {
5074 uint8_t ii, niocs;
5075 ibdm_ioc_info_t *ioc;
5076
5077 if (gid_info->gl_iou == NULL)
5078 continue;
5079
5080 if (gid_info->gl_reprobe_flag) {
5081 gid_info->gl_reprobe_flag = 0;
5082 continue;
5083 }
5084
5085 niocs = gid_info->gl_iou->iou_info.iou_num_ctrl_slots;
5086 for (ii = 0; ii < niocs; ii++) {
5087 ioc = IBDM_GIDINFO2IOCINFO(gid_info, ii);
5088 if (ioc->ioc_profile.ioc_guid == ioc_guid) {
5089 mutex_enter(&ibdm.ibdm_mutex);
5090 ibdm_reprobe_update_port_srv(ioc,
5091 gid_info);
5092 mutex_exit(&ibdm.ibdm_mutex);
5093 }
5094 }
5095 }
5096 mutex_enter(&ibdm.ibdm_mutex);
5097 ibdm.ibdm_busy &= ~IBDM_BUSY;
5098 cv_broadcast(&ibdm.ibdm_busy_cv);
5099 mutex_exit(&ibdm.ibdm_mutex);
5100
5101 ioc_info = ibdm_get_ioc_info_with_gid(ioc_guid, &gid_info);
5102 }
5103 return (ioc_info);
5104 }
5105
5106
5107 /*
5108 * ibdm_get_ioc_info_with_gid()
5109 * Returns pointer to ibdm_ioc_info_t if it finds
5110 * matching record for the ioc_guid. Otherwise NULL is returned.
5111 * The pointer to gid_info is set to the second argument in case that
5112 * the non-NULL value returns (and the second argument is not NULL).
5113 *
5114 * Note. use the same strings as "ibnex_get_ioc_info" in
5115 * IBTF_DPRINTF() to keep compatibility.
5116 */
5117 static ibdm_ioc_info_t *
5118 ibdm_get_ioc_info_with_gid(ib_guid_t ioc_guid,
5119 ibdm_dp_gidinfo_t **gid_info)
5120 {
5121 int ii;
5122 ibdm_ioc_info_t *ioc = NULL, *tmp = NULL;
5123 ibdm_dp_gidinfo_t *gid_list;
5124 ib_dm_io_unitinfo_t *iou;
5125
5126 IBTF_DPRINTF_L4("ibdm", "\tibnex_get_ioc_info: GUID %llx", ioc_guid);
5127
5128 mutex_enter(&ibdm.ibdm_mutex);
5129 while (ibdm.ibdm_busy & IBDM_BUSY)
5130 cv_wait(&ibdm.ibdm_busy_cv, &ibdm.ibdm_mutex);
5131 ibdm.ibdm_busy |= IBDM_BUSY;
5132
5133 if (gid_info)
5134 *gid_info = NULL; /* clear the value of gid_info */
5135
5136 gid_list = ibdm.ibdm_dp_gidlist_head;
5137 while (gid_list) {
5138 mutex_enter(&gid_list->gl_mutex);
5139 if (gid_list->gl_state != IBDM_GID_PROBING_COMPLETE) {
5140 mutex_exit(&gid_list->gl_mutex);
5141 gid_list = gid_list->gl_next;
5142 continue;
5143 }
5144 if (gid_list->gl_iou == NULL) {
5145 IBTF_DPRINTF_L2("ibdm",
5146 "\tget_ioc_info: No IOU info");
5147 mutex_exit(&gid_list->gl_mutex);
5148 gid_list = gid_list->gl_next;
5149 continue;
5150 }
5151 iou = &gid_list->gl_iou->iou_info;
5152 for (ii = 0; ii < iou->iou_num_ctrl_slots; ii++) {
5153 tmp = IBDM_GIDINFO2IOCINFO(gid_list, ii);
5154 if ((tmp->ioc_profile.ioc_guid == ioc_guid) &&
5155 (tmp->ioc_state == IBDM_IOC_STATE_PROBE_SUCCESS)) {
5156 ioc = ibdm_dup_ioc_info(tmp, gid_list);
5157 if (gid_info)
5158 *gid_info = gid_list; /* set this ptr */
5159 mutex_exit(&gid_list->gl_mutex);
5160 ibdm.ibdm_busy &= ~IBDM_BUSY;
5161 cv_broadcast(&ibdm.ibdm_busy_cv);
5162 mutex_exit(&ibdm.ibdm_mutex);
5163 IBTF_DPRINTF_L4("ibdm", "\tget_ioc_info: End");
5164 return (ioc);
5165 }
5166 }
5167 if (ii == iou->iou_num_ctrl_slots)
5168 ioc = NULL;
5169
5170 mutex_exit(&gid_list->gl_mutex);
5171 gid_list = gid_list->gl_next;
5172 }
5173
5174 ibdm.ibdm_busy &= ~IBDM_BUSY;
5175 cv_broadcast(&ibdm.ibdm_busy_cv);
5176 mutex_exit(&ibdm.ibdm_mutex);
5177 IBTF_DPRINTF_L4("ibdm", "\tget_ioc_info: failure End");
5178 return (ioc);
5179 }
5180
5181 /*
5182 * ibdm_ibnex_get_ioc_info()
5183 * Returns pointer to ibdm_ioc_info_t if it finds
5184 * matching record for the ioc_guid, otherwise NULL
5185 * is returned
5186 *
5187 * Note. this is a wrapper function to ibdm_get_ioc_info_with_gid() now.
5188 */
5189 ibdm_ioc_info_t *
5190 ibdm_ibnex_get_ioc_info(ib_guid_t ioc_guid)
5191 {
5192 if (ibdm_enumerate_iocs == 0)
5193 return (NULL);
5194
5195 /* will not use the gid_info pointer, so the second arg is NULL */
5196 return (ibdm_get_ioc_info_with_gid(ioc_guid, NULL));
5197 }
5198
5199 /*
5200 * ibdm_ibnex_get_ioc_count()
5201 * Returns number of ibdm_ioc_info_t it finds
5202 */
5203 int
5204 ibdm_ibnex_get_ioc_count(void)
5205 {
5206 int count = 0, k;
5207 ibdm_ioc_info_t *ioc;
5208 ibdm_dp_gidinfo_t *gid_list;
5209
5210 if (ibdm_enumerate_iocs == 0)
5211 return (0);
5212
5213 mutex_enter(&ibdm.ibdm_mutex);
5214 ibdm_sweep_fabric(0);
5215
5216 while (ibdm.ibdm_busy & IBDM_BUSY)
5217 cv_wait(&ibdm.ibdm_busy_cv, &ibdm.ibdm_mutex);
5218 ibdm.ibdm_busy |= IBDM_BUSY;
5219
5220 for (gid_list = ibdm.ibdm_dp_gidlist_head; gid_list;
5221 gid_list = gid_list->gl_next) {
5222 mutex_enter(&gid_list->gl_mutex);
5223 if ((gid_list->gl_state != IBDM_GID_PROBING_COMPLETE) ||
5224 (gid_list->gl_iou == NULL)) {
5225 mutex_exit(&gid_list->gl_mutex);
5226 continue;
5227 }
5228 for (k = 0; k < gid_list->gl_iou->iou_info.iou_num_ctrl_slots;
5229 k++) {
5230 ioc = IBDM_GIDINFO2IOCINFO(gid_list, k);
5231 if (ioc->ioc_state == IBDM_IOC_STATE_PROBE_SUCCESS)
5232 ++count;
5233 }
5234 mutex_exit(&gid_list->gl_mutex);
5235 }
5236 ibdm.ibdm_busy &= ~IBDM_BUSY;
5237 cv_broadcast(&ibdm.ibdm_busy_cv);
5238 mutex_exit(&ibdm.ibdm_mutex);
5239
5240 IBTF_DPRINTF_L4("ibdm", "\tget_ioc_count: count = %d", count);
5241 return (count);
5242 }
5243
5244
5245 /*
5246 * ibdm_ibnex_get_ioc_list()
5247 * Returns information about all the IOCs present on the fabric.
5248 * Reprobes the IOCs and the GID list if list_flag is set to REPROBE_ALL.
5249 * Does not sweep fabric if DONOT_PROBE is set
5250 */
5251 ibdm_ioc_info_t *
5252 ibdm_ibnex_get_ioc_list(ibdm_ibnex_get_ioclist_mtd_t list_flag)
5253 {
5254 int ii;
5255 ibdm_ioc_info_t *ioc_list = NULL, *tmp, *ioc;
5256 ibdm_dp_gidinfo_t *gid_list;
5257 ib_dm_io_unitinfo_t *iou;
5258
5259 IBTF_DPRINTF_L4("ibdm", "\tget_ioc_list: Enter");
5260
5261 if (ibdm_enumerate_iocs == 0)
5262 return (NULL);
5263
5264 mutex_enter(&ibdm.ibdm_mutex);
5265 if (list_flag != IBDM_IBNEX_DONOT_PROBE)
5266 ibdm_sweep_fabric(list_flag == IBDM_IBNEX_REPROBE_ALL);
5267
5268 while (ibdm.ibdm_busy & IBDM_BUSY)
5269 cv_wait(&ibdm.ibdm_busy_cv, &ibdm.ibdm_mutex);
5270 ibdm.ibdm_busy |= IBDM_BUSY;
5271
5272 gid_list = ibdm.ibdm_dp_gidlist_head;
5273 while (gid_list) {
5274 mutex_enter(&gid_list->gl_mutex);
5275 if (gid_list->gl_state != IBDM_GID_PROBING_COMPLETE) {
5276 mutex_exit(&gid_list->gl_mutex);
5277 gid_list = gid_list->gl_next;
5278 continue;
5279 }
5280 if (gid_list->gl_iou == NULL) {
5281 IBTF_DPRINTF_L2("ibdm",
5282 "\tget_ioc_list: No IOU info");
5283 mutex_exit(&gid_list->gl_mutex);
5284 gid_list = gid_list->gl_next;
5285 continue;
5286 }
5287 iou = &gid_list->gl_iou->iou_info;
5288 for (ii = 0; ii < iou->iou_num_ctrl_slots; ii++) {
5289 ioc = IBDM_GIDINFO2IOCINFO(gid_list, ii);
5290 if (ioc->ioc_state == IBDM_IOC_STATE_PROBE_SUCCESS) {
5291 tmp = ibdm_dup_ioc_info(ioc, gid_list);
5292 tmp->ioc_next = ioc_list;
5293 ioc_list = tmp;
5294 }
5295 }
5296 mutex_exit(&gid_list->gl_mutex);
5297 gid_list = gid_list->gl_next;
5298 }
5299 ibdm.ibdm_busy &= ~IBDM_BUSY;
5300 cv_broadcast(&ibdm.ibdm_busy_cv);
5301 mutex_exit(&ibdm.ibdm_mutex);
5302
5303 IBTF_DPRINTF_L4("ibdm", "\tget_ioc_list: End");
5304 return (ioc_list);
5305 }
5306
5307 /*
5308 * ibdm_dup_ioc_info()
5309 * Duplicate the IOC information and return the IOC
5310 * information.
5311 */
5312 static ibdm_ioc_info_t *
5313 ibdm_dup_ioc_info(ibdm_ioc_info_t *in_ioc, ibdm_dp_gidinfo_t *gid_list)
5314 {
5315 ibdm_ioc_info_t *out_ioc;
5316 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*out_ioc));
5317 ASSERT(MUTEX_HELD(&gid_list->gl_mutex));
5318
5319 out_ioc = kmem_alloc(sizeof (ibdm_ioc_info_t), KM_SLEEP);
5320 bcopy(in_ioc, out_ioc, sizeof (ibdm_ioc_info_t));
5321 ibdm_update_ioc_port_gidlist(out_ioc, gid_list);
5322 out_ioc->ioc_iou_dc_valid = gid_list->gl_iou->iou_dc_valid;
5323 out_ioc->ioc_iou_diagcode = gid_list->gl_iou->iou_diagcode;
5324
5325 return (out_ioc);
5326 }
5327
5328
5329 /*
5330 * ibdm_free_ioc_list()
5331 * Deallocate memory for IOC list structure
5332 */
5333 void
5334 ibdm_ibnex_free_ioc_list(ibdm_ioc_info_t *ioc)
5335 {
5336 ibdm_ioc_info_t *temp;
5337
5338 IBTF_DPRINTF_L4("ibdm", "\tibnex_free_ioc_list:");
5339 while (ioc) {
5340 temp = ioc;
5341 ioc = ioc->ioc_next;
5342 kmem_free(temp->ioc_gid_list,
5343 (sizeof (ibdm_gid_t) * temp->ioc_nportgids));
5344 if (temp->ioc_hca_list)
5345 ibdm_ibnex_free_hca_list(temp->ioc_hca_list);
5346 kmem_free(temp, sizeof (ibdm_ioc_info_t));
5347 }
5348 }
5349
5350
5351 /*
5352 * ibdm_ibnex_update_pkey_tbls
5353 * Updates the DM P_Key database.
5354 * NOTE: Two cases are handled here: P_Key being added or removed.
5355 *
5356 * Arguments : NONE
5357 * Return Values : NONE
5358 */
5359 void
5360 ibdm_ibnex_update_pkey_tbls(void)
5361 {
5362 int h, pp, pidx;
5363 uint_t nports;
5364 uint_t size;
5365 ib_pkey_t new_pkey;
5366 ib_pkey_t *orig_pkey;
5367 ibdm_hca_list_t *hca_list;
5368 ibdm_port_attr_t *port;
5369 ibt_hca_portinfo_t *pinfop;
5370
5371 IBTF_DPRINTF_L4("ibdm", "\tibnex_update_pkey_tbls:");
5372
5373 mutex_enter(&ibdm.ibdm_hl_mutex);
5374 hca_list = ibdm.ibdm_hca_list_head;
5375
5376 for (h = 0; h < ibdm.ibdm_hca_count; h++) {
5377
5378 /* This updates P_Key Tables for all ports of this HCA */
5379 (void) ibt_query_hca_ports(hca_list->hl_hca_hdl, 0, &pinfop,
5380 &nports, &size);
5381
5382 /* number of ports shouldn't have changed */
5383 ASSERT(nports == hca_list->hl_nports);
5384
5385 for (pp = 0; pp < hca_list->hl_nports; pp++) {
5386 port = &hca_list->hl_port_attr[pp];
5387
5388 /*
5389 * First figure out the P_Keys from IBTL.
5390 * Three things could have happened:
5391 * New P_Keys added
5392 * Existing P_Keys removed
5393 * Both of the above two
5394 *
5395 * Loop through the P_Key Indices and check if a
5396 * give P_Key_Ix matches that of the one seen by
5397 * IBDM. If they match no action is needed.
5398 *
5399 * If they don't match:
5400 * 1. if orig_pkey is invalid and new_pkey is valid
5401 * ---> add new_pkey to DM database
5402 * 2. if orig_pkey is valid and new_pkey is invalid
5403 * ---> remove orig_pkey from DM database
5404 * 3. if orig_pkey and new_pkey are both valid:
5405 * ---> remov orig_pkey from DM database
5406 * ---> add new_pkey to DM database
5407 * 4. if orig_pkey and new_pkey are both invalid:
5408 * ---> do nothing. Updated DM database.
5409 */
5410
5411 for (pidx = 0; pidx < port->pa_npkeys; pidx++) {
5412 new_pkey = pinfop[pp].p_pkey_tbl[pidx];
5413 orig_pkey = &port->pa_pkey_tbl[pidx].pt_pkey;
5414
5415 /* keys match - do nothing */
5416 if (*orig_pkey == new_pkey)
5417 continue;
5418
5419 if (IBDM_INVALID_PKEY(*orig_pkey) &&
5420 !IBDM_INVALID_PKEY(new_pkey)) {
5421 /* P_Key was added */
5422 IBTF_DPRINTF_L5("ibdm",
5423 "\tibnex_update_pkey_tbls: new "
5424 "P_Key added = 0x%x", new_pkey);
5425 *orig_pkey = new_pkey;
5426 ibdm_port_attr_ibmf_init(port,
5427 new_pkey, pp);
5428 } else if (!IBDM_INVALID_PKEY(*orig_pkey) &&
5429 IBDM_INVALID_PKEY(new_pkey)) {
5430 /* P_Key was removed */
5431 IBTF_DPRINTF_L5("ibdm",
5432 "\tibnex_update_pkey_tbls: P_Key "
5433 "removed = 0x%x", *orig_pkey);
5434 *orig_pkey = new_pkey;
5435 (void) ibdm_port_attr_ibmf_fini(port,
5436 pidx);
5437 } else if (!IBDM_INVALID_PKEY(*orig_pkey) &&
5438 !IBDM_INVALID_PKEY(new_pkey)) {
5439 /* P_Key were replaced */
5440 IBTF_DPRINTF_L5("ibdm",
5441 "\tibnex_update_pkey_tbls: P_Key "
5442 "replaced 0x%x with 0x%x",
5443 *orig_pkey, new_pkey);
5444 (void) ibdm_port_attr_ibmf_fini(port,
5445 pidx);
5446 *orig_pkey = new_pkey;
5447 ibdm_port_attr_ibmf_init(port,
5448 new_pkey, pp);
5449 } else {
5450 /*
5451 * P_Keys are invalid
5452 * set anyway to reflect if
5453 * INVALID_FULL was changed to
5454 * INVALID_LIMITED or vice-versa.
5455 */
5456 *orig_pkey = new_pkey;
5457 } /* end of else */
5458
5459 } /* loop of p_key index */
5460
5461 } /* loop of #ports of HCA */
5462
5463 ibt_free_portinfo(pinfop, size);
5464 hca_list = hca_list->hl_next;
5465
5466 } /* loop for all HCAs in the system */
5467
5468 mutex_exit(&ibdm.ibdm_hl_mutex);
5469 }
5470
5471
5472 /*
5473 * ibdm_send_ioc_profile()
5474 * Send IOC Controller Profile request. When the request is completed
5475 * IBMF calls ibdm_process_incoming_mad routine to inform about
5476 * the completion.
5477 */
5478 static int
5479 ibdm_send_ioc_profile(ibdm_dp_gidinfo_t *gid_info, uint8_t ioc_no)
5480 {
5481 ibmf_msg_t *msg;
5482 ib_mad_hdr_t *hdr;
5483 ibdm_ioc_info_t *ioc_info = &(gid_info->gl_iou->iou_ioc_info[ioc_no]);
5484 ibdm_timeout_cb_args_t *cb_args;
5485
5486 IBTF_DPRINTF_L4("ibdm", "\tsend_ioc_profile: "
5487 "gid info 0x%p, ioc_no = %d", gid_info, ioc_no);
5488
5489 /*
5490 * Send command to get IOC profile.
5491 * Allocate a IBMF packet and initialize the packet.
5492 */
5493 if (ibmf_alloc_msg(gid_info->gl_ibmf_hdl, IBMF_ALLOC_SLEEP,
5494 &msg) != IBMF_SUCCESS) {
5495 IBTF_DPRINTF_L2("ibdm", "\tsend_ioc_profile: pkt alloc fail");
5496 return (IBDM_FAILURE);
5497 }
5498
5499 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*msg))
5500 ibdm_alloc_send_buffers(msg);
5501 _NOTE(NOW_VISIBLE_TO_OTHER_THREADS(*msg))
5502
5503 mutex_enter(&gid_info->gl_mutex);
5504 ibdm_bump_transactionID(gid_info);
5505 mutex_exit(&gid_info->gl_mutex);
5506
5507 msg->im_local_addr.ia_local_lid = gid_info->gl_slid;
5508 msg->im_local_addr.ia_remote_lid = gid_info->gl_dlid;
5509 if (gid_info->gl_redirected == B_TRUE) {
5510 if (gid_info->gl_redirect_dlid != 0) {
5511 msg->im_local_addr.ia_remote_lid =
5512 gid_info->gl_redirect_dlid;
5513 }
5514 msg->im_local_addr.ia_remote_qno = gid_info->gl_redirect_QP;
5515 msg->im_local_addr.ia_p_key = gid_info->gl_redirect_pkey;
5516 msg->im_local_addr.ia_q_key = gid_info->gl_redirect_qkey;
5517 msg->im_local_addr.ia_service_level = gid_info->gl_redirectSL;
5518 } else {
5519 msg->im_local_addr.ia_remote_qno = 1;
5520 msg->im_local_addr.ia_p_key = gid_info->gl_p_key;
5521 msg->im_local_addr.ia_q_key = IB_GSI_QKEY;
5522 msg->im_local_addr.ia_service_level = gid_info->gl_SL;
5523 }
5524
5525 hdr = IBDM_OUT_IBMFMSG_MADHDR(msg);
5526 hdr->BaseVersion = MAD_CLASS_BASE_VERS_1;
5527 hdr->MgmtClass = MAD_MGMT_CLASS_DEV_MGT;
5528 hdr->ClassVersion = IB_DM_CLASS_VERSION_1;
5529 hdr->R_Method = IB_DM_DEVMGT_METHOD_GET;
5530 hdr->Status = 0;
5531 hdr->TransactionID = h2b64(gid_info->gl_transactionID);
5532 hdr->AttributeID = h2b16(IB_DM_ATTR_IOC_CTRL_PROFILE);
5533 hdr->AttributeModifier = h2b32(ioc_no + 1);
5534
5535 ioc_info->ioc_state = IBDM_IOC_STATE_REPROBE_PROGRESS;
5536 cb_args = &ioc_info->ioc_cb_args;
5537 cb_args->cb_gid_info = gid_info;
5538 cb_args->cb_retry_count = ibdm_dft_retry_cnt;
5539 cb_args->cb_req_type = IBDM_REQ_TYPE_IOCINFO;
5540 cb_args->cb_ioc_num = ioc_no;
5541
5542 mutex_enter(&gid_info->gl_mutex);
5543 ioc_info->ioc_timeout_id = timeout(ibdm_pkt_timeout_hdlr,
5544 cb_args, IBDM_TIMEOUT_VALUE(ibdm_dft_timeout));
5545 mutex_exit(&gid_info->gl_mutex);
5546
5547 IBTF_DPRINTF_L5("ibdm", "\tsend_ioc_profile:"
5548 "timeout %x", ioc_info->ioc_timeout_id);
5549
5550 if (ibmf_msg_transport(gid_info->gl_ibmf_hdl, gid_info->gl_qp_hdl, msg,
5551 NULL, ibdm_ibmf_send_cb, cb_args, 0) != IBMF_SUCCESS) {
5552 IBTF_DPRINTF_L2("ibdm",
5553 "\tsend_ioc_profile: msg transport failed");
5554 ibdm_ibmf_send_cb(gid_info->gl_ibmf_hdl, msg, cb_args);
5555 }
5556 ioc_info->ioc_state = IBDM_IOC_STATE_REPROBE_PROGRESS;
5557 return (IBDM_SUCCESS);
5558 }
5559
5560
5561 /*
5562 * ibdm_port_reachable
5563 * Returns B_TRUE if the port GID is reachable by sending
5564 * a SA query to get the NODE record for this port GUID.
5565 */
5566 static boolean_t
5567 ibdm_port_reachable(ibmf_saa_handle_t sa_hdl, ib_guid_t guid)
5568 {
5569 sa_node_record_t *resp;
5570 size_t length;
5571
5572 /*
5573 * Verify if it's reachable by getting the node record.
5574 */
5575 if (ibdm_get_node_record_by_port(sa_hdl, guid, &resp, &length) ==
5576 IBDM_SUCCESS) {
5577 kmem_free(resp, length);
5578 return (B_TRUE);
5579 }
5580 return (B_FALSE);
5581 }
5582
5583 /*
5584 * ibdm_get_node_record_by_port
5585 * Sends a SA query to get the NODE record for port GUID
5586 * Returns IBDM_SUCCESS if the port GID is reachable.
5587 *
5588 * Note: the caller must be responsible for freeing the resource
5589 * by calling kmem_free(resp, length) later.
5590 */
5591 static int
5592 ibdm_get_node_record_by_port(ibmf_saa_handle_t sa_hdl, ib_guid_t guid,
5593 sa_node_record_t **resp, size_t *length)
5594 {
5595 sa_node_record_t req;
5596 ibmf_saa_access_args_t args;
5597 int ret;
5598 ASSERT(resp != NULL && length != NULL);
5599
5600 IBTF_DPRINTF_L4("ibdm", "\tport_reachable: port_guid %llx",
5601 guid);
5602
5603 bzero(&req, sizeof (sa_node_record_t));
5604 req.NodeInfo.PortGUID = guid;
5605
5606 args.sq_attr_id = SA_NODERECORD_ATTRID;
5607 args.sq_access_type = IBMF_SAA_RETRIEVE;
5608 args.sq_component_mask = SA_NODEINFO_COMPMASK_PORTGUID;
5609 args.sq_template = &req;
5610 args.sq_callback = NULL;
5611 args.sq_callback_arg = NULL;
5612
5613 ret = ibmf_sa_access(sa_hdl, &args, 0, length, (void **) resp);
5614 if (ret != IBMF_SUCCESS) {
5615 IBTF_DPRINTF_L2("ibdm", "\tport_reachable:"
5616 " SA Retrieve Failed: %d", ret);
5617 return (IBDM_FAILURE);
5618 }
5619 if (*resp == NULL || *length == 0) {
5620 IBTF_DPRINTF_L2("ibdm", "\tport_reachable: No records");
5621 return (IBDM_FAILURE);
5622 }
5623 /*
5624 * There is one NodeRecord on each endport on a subnet.
5625 */
5626 ASSERT(*length == sizeof (sa_node_record_t));
5627
5628 return (IBDM_SUCCESS);
5629 }
5630
5631
5632 /*
5633 * Update the gidlist for all affected IOCs when GID becomes
5634 * available/unavailable.
5635 *
5636 * Parameters :
5637 * gidinfo - Incoming / Outgoing GID.
5638 * add_flag - 1 for GID added, 0 for GID removed.
5639 * - (-1) : IOC gid list updated, ioc_list required.
5640 *
5641 * This function gets the GID for the node GUID corresponding to the
5642 * port GID. Gets the IOU info
5643 */
5644 static ibdm_ioc_info_t *
5645 ibdm_update_ioc_gidlist(ibdm_dp_gidinfo_t *gid_info, int avail_flag)
5646 {
5647 ibdm_dp_gidinfo_t *node_gid = NULL;
5648 uint8_t niocs, ii;
5649 ibdm_ioc_info_t *ioc, *ioc_list = NULL, *tmp;
5650
5651 IBTF_DPRINTF_L4("ibdm", "\tupdate_ioc_gidlist");
5652
5653 switch (avail_flag) {
5654 case 1 :
5655 node_gid = ibdm_check_dest_nodeguid(gid_info);
5656 break;
5657 case 0 :
5658 node_gid = ibdm_handle_gid_rm(gid_info);
5659 break;
5660 case -1 :
5661 node_gid = gid_info;
5662 break;
5663 default :
5664 break;
5665 }
5666
5667 if (node_gid == NULL) {
5668 IBTF_DPRINTF_L4("ibdm", "\tupdate_ioc_gidlist: "
5669 "No node GID found, port gid 0x%p, avail_flag %d",
5670 gid_info, avail_flag);
5671 return (NULL);
5672 }
5673
5674 mutex_enter(&node_gid->gl_mutex);
5675 if ((node_gid->gl_state != IBDM_GID_PROBING_COMPLETE &&
5676 node_gid->gl_state != IBDM_GID_PROBING_SKIPPED) ||
5677 node_gid->gl_iou == NULL) {
5678 IBTF_DPRINTF_L4("ibdm", "\tupdate_ioc_gidlist "
5679 "gl_state %x, gl_iou %p", node_gid->gl_state,
5680 node_gid->gl_iou);
5681 mutex_exit(&node_gid->gl_mutex);
5682 return (NULL);
5683 }
5684
5685 niocs = node_gid->gl_iou->iou_info.iou_num_ctrl_slots;
5686 IBTF_DPRINTF_L4("ibdm", "\tupdate_ioc_gidlist : niocs %x",
5687 niocs);
5688 for (ii = 0; ii < niocs; ii++) {
5689 ioc = IBDM_GIDINFO2IOCINFO(node_gid, ii);
5690 /*
5691 * Skip IOCs for which probe is not complete or
5692 * reprobe is progress
5693 */
5694 if (ioc->ioc_state == IBDM_IOC_STATE_PROBE_SUCCESS) {
5695 tmp = ibdm_dup_ioc_info(ioc, node_gid);
5696 tmp->ioc_info_updated.ib_gid_prop_updated = 1;
5697 tmp->ioc_next = ioc_list;
5698 ioc_list = tmp;
5699 }
5700 }
5701 mutex_exit(&node_gid->gl_mutex);
5702
5703 IBTF_DPRINTF_L4("ibdm", "\tupdate_ioc_gidlist : return %p",
5704 ioc_list);
5705 return (ioc_list);
5706 }
5707
5708 /*
5709 * ibdm_saa_event_cb :
5710 * Event handling which does *not* require ibdm_hl_mutex to be
5711 * held are executed in the same thread. This is to prevent
5712 * deadlocks with HCA port down notifications which hold the
5713 * ibdm_hl_mutex.
5714 *
5715 * GID_AVAILABLE event is handled here. A taskq is spawned to
5716 * handle GID_UNAVAILABLE.
5717 *
5718 * A new mutex ibdm_ibnex_mutex has been introduced to protect
5719 * ibnex_callback. This has been done to prevent any possible
5720 * deadlock (described above) while handling GID_AVAILABLE.
5721 *
5722 * IBMF calls the event callback for a HCA port. The SA handle
5723 * for this port would be valid, till the callback returns.
5724 * IBDM calling IBDM using the above SA handle should be valid.
5725 *
5726 * IBDM will additionally check (SA handle != NULL), before
5727 * calling IBMF.
5728 */
5729 /*ARGSUSED*/
5730 static void
5731 ibdm_saa_event_cb(ibmf_saa_handle_t ibmf_saa_handle,
5732 ibmf_saa_subnet_event_t ibmf_saa_event,
5733 ibmf_saa_event_details_t *event_details, void *callback_arg)
5734 {
5735 ibdm_saa_event_arg_t *event_arg;
5736 ib_gid_t sgid, dgid;
5737 ibdm_port_attr_t *hca_port;
5738 ibdm_dp_gidinfo_t *gid_info, *node_gid_info = NULL;
5739 sa_node_record_t *nrec;
5740 size_t length;
5741
5742 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*event_arg));
5743
5744 hca_port = (ibdm_port_attr_t *)callback_arg;
5745
5746 IBTF_DPRINTF_L4("ibdm", "\tsaa_event_cb(%x, %x, %x, %x)\n",
5747 ibmf_saa_handle, ibmf_saa_event, event_details,
5748 callback_arg);
5749
5750 #ifdef DEBUG
5751 if (ibdm_ignore_saa_event)
5752 return;
5753 #endif
5754
5755 if (ibmf_saa_event == IBMF_SAA_EVENT_GID_AVAILABLE) {
5756 /*
5757 * Ensure no other probe / sweep fabric is in
5758 * progress.
5759 */
5760 mutex_enter(&ibdm.ibdm_mutex);
5761 while (ibdm.ibdm_busy & IBDM_BUSY)
5762 cv_wait(&ibdm.ibdm_busy_cv, &ibdm.ibdm_mutex);
5763 ibdm.ibdm_busy |= IBDM_BUSY;
5764 mutex_exit(&ibdm.ibdm_mutex);
5765
5766 /*
5767 * If we already know about this GID, return.
5768 * GID_AVAILABLE may be reported for multiple HCA
5769 * ports.
5770 */
5771 if ((ibdm_check_dgid(event_details->ie_gid.gid_guid,
5772 event_details->ie_gid.gid_prefix)) != NULL) {
5773 mutex_enter(&ibdm.ibdm_mutex);
5774 ibdm.ibdm_busy &= ~IBDM_BUSY;
5775 cv_broadcast(&ibdm.ibdm_busy_cv);
5776 mutex_exit(&ibdm.ibdm_mutex);
5777 return;
5778 }
5779
5780 IBTF_DPRINTF_L4("ibdm", "\tGID (prefix %x, guid %llx) "
5781 "Insertion notified",
5782 event_details->ie_gid.gid_prefix,
5783 event_details->ie_gid.gid_guid);
5784
5785 /* This is a new gid, insert it to GID list */
5786 sgid.gid_prefix = hca_port->pa_sn_prefix;
5787 sgid.gid_guid = hca_port->pa_port_guid;
5788 dgid.gid_prefix = event_details->ie_gid.gid_prefix;
5789 dgid.gid_guid = event_details->ie_gid.gid_guid;
5790 gid_info = ibdm_create_gid_info(hca_port, sgid, dgid);
5791 if (gid_info == NULL) {
5792 IBTF_DPRINTF_L4("ibdm", "\tGID_AVAILABLE: "
5793 "create_gid_info returned NULL");
5794 mutex_enter(&ibdm.ibdm_mutex);
5795 ibdm.ibdm_busy &= ~IBDM_BUSY;
5796 cv_broadcast(&ibdm.ibdm_busy_cv);
5797 mutex_exit(&ibdm.ibdm_mutex);
5798 return;
5799 }
5800 mutex_enter(&gid_info->gl_mutex);
5801 gid_info->gl_state = IBDM_GID_PROBING_SKIPPED;
5802 mutex_exit(&gid_info->gl_mutex);
5803
5804 /* Get the node GUID */
5805 if (ibdm_get_node_record_by_port(ibmf_saa_handle, dgid.gid_guid,
5806 &nrec, &length) != IBDM_SUCCESS) {
5807 /*
5808 * Set the state to PROBE_NOT_DONE for the
5809 * next sweep to probe it
5810 */
5811 IBTF_DPRINTF_L2("ibdm", "\tsaa_event_taskq: "
5812 "Skipping GID : port GUID not found");
5813 mutex_enter(&gid_info->gl_mutex);
5814 gid_info->gl_state = IBDM_GID_PROBE_NOT_DONE;
5815 mutex_exit(&gid_info->gl_mutex);
5816 mutex_enter(&ibdm.ibdm_mutex);
5817 ibdm.ibdm_busy &= ~IBDM_BUSY;
5818 cv_broadcast(&ibdm.ibdm_busy_cv);
5819 mutex_exit(&ibdm.ibdm_mutex);
5820 return;
5821 }
5822 gid_info->gl_nodeguid = nrec->NodeInfo.NodeGUID;
5823 gid_info->gl_devid = nrec->NodeInfo.DeviceID;
5824 kmem_free(nrec, length);
5825 gid_info->gl_portguid = dgid.gid_guid;
5826
5827 /*
5828 * Get the gid info with the same node GUID.
5829 */
5830 mutex_enter(&ibdm.ibdm_mutex);
5831 node_gid_info = ibdm.ibdm_dp_gidlist_head;
5832 while (node_gid_info) {
5833 if (node_gid_info->gl_nodeguid ==
5834 gid_info->gl_nodeguid &&
5835 node_gid_info->gl_iou != NULL) {
5836 break;
5837 }
5838 node_gid_info = node_gid_info->gl_next;
5839 }
5840 mutex_exit(&ibdm.ibdm_mutex);
5841
5842 /*
5843 * Handling a new GID requires filling of gl_hca_list.
5844 * This require ibdm hca_list to be parsed and hence
5845 * holding the ibdm_hl_mutex. Spawning a new thread to
5846 * handle this.
5847 */
5848 if (node_gid_info == NULL) {
5849 if (taskq_dispatch(system_taskq,
5850 ibdm_saa_handle_new_gid, (void *)gid_info,
5851 TQ_NOSLEEP) == NULL) {
5852 IBTF_DPRINTF_L2("ibdm", "\tsaa_event_cb: "
5853 "new_gid taskq_dispatch failed");
5854 return;
5855 }
5856 }
5857
5858 mutex_enter(&ibdm.ibdm_mutex);
5859 ibdm.ibdm_busy &= ~IBDM_BUSY;
5860 cv_broadcast(&ibdm.ibdm_busy_cv);
5861 mutex_exit(&ibdm.ibdm_mutex);
5862 return;
5863 }
5864
5865 if (ibmf_saa_event != IBMF_SAA_EVENT_GID_UNAVAILABLE)
5866 return;
5867
5868 /*
5869 * GID UNAVAIL EVENT: Try to locate the GID in the GID list.
5870 * If we don't find it we just return.
5871 */
5872 mutex_enter(&ibdm.ibdm_mutex);
5873 gid_info = ibdm.ibdm_dp_gidlist_head;
5874 while (gid_info) {
5875 if (gid_info->gl_portguid ==
5876 event_details->ie_gid.gid_guid) {
5877 break;
5878 }
5879 gid_info = gid_info->gl_next;
5880 }
5881 mutex_exit(&ibdm.ibdm_mutex);
5882 if (gid_info == NULL) {
5883 IBTF_DPRINTF_L2("ibdm", "\tsaa_event_cb: "
5884 "GID for GUID %llX not found during GID UNAVAIL event",
5885 event_details->ie_gid.gid_guid);
5886 return;
5887 }
5888
5889 /*
5890 * If this GID is DM capable, we'll have to check whether this DGID
5891 * is reachable via another port.
5892 */
5893 if (gid_info->gl_is_dm_capable == B_TRUE) {
5894 event_arg = (ibdm_saa_event_arg_t *)kmem_alloc(
5895 sizeof (ibdm_saa_event_arg_t), KM_SLEEP);
5896 event_arg->ibmf_saa_handle = ibmf_saa_handle;
5897 event_arg->ibmf_saa_event = ibmf_saa_event;
5898 bcopy(event_details, &event_arg->event_details,
5899 sizeof (ibmf_saa_event_details_t));
5900 event_arg->callback_arg = callback_arg;
5901
5902 if (taskq_dispatch(system_taskq, ibdm_saa_event_taskq,
5903 (void *)event_arg, TQ_NOSLEEP) == NULL) {
5904 IBTF_DPRINTF_L2("ibdm", "\tsaa_event_cb: "
5905 "taskq_dispatch failed");
5906 ibdm_free_saa_event_arg(event_arg);
5907 return;
5908 }
5909 }
5910 }
5911
5912 /*
5913 * Handle a new GID discovered by GID_AVAILABLE saa event.
5914 */
5915 void
5916 ibdm_saa_handle_new_gid(void *arg)
5917 {
5918 ibdm_dp_gidinfo_t *gid_info;
5919 ibdm_hca_list_t *hca_list = NULL;
5920 ibdm_port_attr_t *port = NULL;
5921 ibdm_ioc_info_t *ioc_list = NULL;
5922
5923 IBTF_DPRINTF_L4(ibdm_string, "\tsaa_handle_new_gid(%p)", arg);
5924
5925 gid_info = (ibdm_dp_gidinfo_t *)arg;
5926
5927 /*
5928 * Ensure that no other sweep / probe has completed
5929 * probing this gid.
5930 */
5931 mutex_enter(&gid_info->gl_mutex);
5932 if (gid_info->gl_state != IBDM_GID_PROBE_NOT_DONE) {
5933 mutex_exit(&gid_info->gl_mutex);
5934 return;
5935 }
5936 mutex_exit(&gid_info->gl_mutex);
5937
5938 /*
5939 * Parse HCAs to fill gl_hca_list
5940 */
5941 mutex_enter(&ibdm.ibdm_hl_mutex);
5942 for (ibdm_get_next_port(&hca_list, &port, 1); port;
5943 ibdm_get_next_port(&hca_list, &port, 1)) {
5944 if (ibdm_port_reachable(port->pa_sa_hdl,
5945 gid_info->gl_portguid) == B_TRUE) {
5946 ibdm_addto_glhcalist(gid_info, hca_list);
5947 }
5948 }
5949 mutex_exit(&ibdm.ibdm_hl_mutex);
5950
5951 /*
5952 * Ensure no other probe / sweep fabric is in
5953 * progress.
5954 */
5955 mutex_enter(&ibdm.ibdm_mutex);
5956 while (ibdm.ibdm_busy & IBDM_BUSY)
5957 cv_wait(&ibdm.ibdm_busy_cv, &ibdm.ibdm_mutex);
5958 ibdm.ibdm_busy |= IBDM_BUSY;
5959 mutex_exit(&ibdm.ibdm_mutex);
5960
5961 /*
5962 * New IOU probe it, to check if new IOCs
5963 */
5964 IBTF_DPRINTF_L4(ibdm_string, "\tsaa_handle_new_gid: "
5965 "new GID : probing");
5966 mutex_enter(&ibdm.ibdm_mutex);
5967 ibdm.ibdm_ngid_probes_in_progress++;
5968 mutex_exit(&ibdm.ibdm_mutex);
5969 mutex_enter(&gid_info->gl_mutex);
5970 gid_info->gl_reprobe_flag = 0;
5971 gid_info->gl_state = IBDM_GID_PROBE_NOT_DONE;
5972 mutex_exit(&gid_info->gl_mutex);
5973 ibdm_probe_gid_thread((void *)gid_info);
5974
5975 mutex_enter(&ibdm.ibdm_mutex);
5976 ibdm_wait_probe_completion();
5977 mutex_exit(&ibdm.ibdm_mutex);
5978
5979 if (gid_info->gl_iou == NULL) {
5980 mutex_enter(&ibdm.ibdm_mutex);
5981 ibdm.ibdm_busy &= ~IBDM_BUSY;
5982 cv_broadcast(&ibdm.ibdm_busy_cv);
5983 mutex_exit(&ibdm.ibdm_mutex);
5984 return;
5985 }
5986
5987 /*
5988 * Update GID list in all IOCs affected by this
5989 */
5990 ioc_list = ibdm_update_ioc_gidlist(gid_info, 1);
5991
5992 /*
5993 * Pass on the IOCs with updated GIDs to IBnexus
5994 */
5995 if (ioc_list) {
5996 mutex_enter(&ibdm.ibdm_ibnex_mutex);
5997 if (ibdm.ibdm_ibnex_callback != NULL) {
5998 (*ibdm.ibdm_ibnex_callback)((void *)ioc_list,
5999 IBDM_EVENT_IOC_PROP_UPDATE);
6000 }
6001 mutex_exit(&ibdm.ibdm_ibnex_mutex);
6002 }
6003
6004 mutex_enter(&ibdm.ibdm_mutex);
6005 ibdm.ibdm_busy &= ~IBDM_BUSY;
6006 cv_broadcast(&ibdm.ibdm_busy_cv);
6007 mutex_exit(&ibdm.ibdm_mutex);
6008 }
6009
6010 /*
6011 * ibdm_saa_event_taskq :
6012 * GID_UNAVAILABLE Event handling requires ibdm_hl_mutex to be
6013 * held. The GID_UNAVAILABLE handling is done in a taskq to
6014 * prevent deadlocks with HCA port down notifications which hold
6015 * ibdm_hl_mutex.
6016 */
6017 void
6018 ibdm_saa_event_taskq(void *arg)
6019 {
6020 ibdm_saa_event_arg_t *event_arg;
6021 ibmf_saa_handle_t ibmf_saa_handle;
6022 ibmf_saa_subnet_event_t ibmf_saa_event;
6023 ibmf_saa_event_details_t *event_details;
6024 void *callback_arg;
6025
6026 ibdm_dp_gidinfo_t *gid_info;
6027 ibdm_port_attr_t *hca_port, *port = NULL;
6028 ibdm_hca_list_t *hca_list = NULL;
6029 int sa_handle_valid = 0;
6030 ibdm_ioc_info_t *ioc_list = NULL;
6031
6032 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*event_arg));
6033
6034 event_arg = (ibdm_saa_event_arg_t *)arg;
6035 ibmf_saa_handle = event_arg->ibmf_saa_handle;
6036 ibmf_saa_event = event_arg->ibmf_saa_event;
6037 event_details = &event_arg->event_details;
6038 callback_arg = event_arg->callback_arg;
6039
6040 ASSERT(callback_arg != NULL);
6041 ASSERT(ibmf_saa_event == IBMF_SAA_EVENT_GID_UNAVAILABLE);
6042 IBTF_DPRINTF_L4("ibdm", "\tsaa_event_taskq(%x, %x, %x, %x)",
6043 ibmf_saa_handle, ibmf_saa_event, event_details,
6044 callback_arg);
6045
6046 hca_port = (ibdm_port_attr_t *)callback_arg;
6047
6048 /* Check if the port_attr is still valid */
6049 mutex_enter(&ibdm.ibdm_hl_mutex);
6050 for (ibdm_get_next_port(&hca_list, &port, 0); port;
6051 ibdm_get_next_port(&hca_list, &port, 0)) {
6052 if (port == hca_port && port->pa_port_guid ==
6053 hca_port->pa_port_guid) {
6054 if (ibmf_saa_handle == hca_port->pa_sa_hdl)
6055 sa_handle_valid = 1;
6056 break;
6057 }
6058 }
6059 mutex_exit(&ibdm.ibdm_hl_mutex);
6060 if (sa_handle_valid == 0) {
6061 ibdm_free_saa_event_arg(event_arg);
6062 return;
6063 }
6064
6065 if (hca_port && (hca_port->pa_sa_hdl == NULL ||
6066 ibmf_saa_handle != hca_port->pa_sa_hdl)) {
6067 ibdm_free_saa_event_arg(event_arg);
6068 return;
6069 }
6070 hca_list = NULL;
6071 port = NULL;
6072
6073 /*
6074 * Check if the GID is visible to other HCA ports.
6075 * Return if so.
6076 */
6077 mutex_enter(&ibdm.ibdm_hl_mutex);
6078 for (ibdm_get_next_port(&hca_list, &port, 1); port;
6079 ibdm_get_next_port(&hca_list, &port, 1)) {
6080 if (ibdm_port_reachable(port->pa_sa_hdl,
6081 event_details->ie_gid.gid_guid) == B_TRUE) {
6082 mutex_exit(&ibdm.ibdm_hl_mutex);
6083 ibdm_free_saa_event_arg(event_arg);
6084 return;
6085 }
6086 }
6087 mutex_exit(&ibdm.ibdm_hl_mutex);
6088
6089 /*
6090 * Ensure no other probe / sweep fabric is in
6091 * progress.
6092 */
6093 mutex_enter(&ibdm.ibdm_mutex);
6094 while (ibdm.ibdm_busy & IBDM_BUSY)
6095 cv_wait(&ibdm.ibdm_busy_cv, &ibdm.ibdm_mutex);
6096 ibdm.ibdm_busy |= IBDM_BUSY;
6097 mutex_exit(&ibdm.ibdm_mutex);
6098
6099 /*
6100 * If this GID is no longer in GID list, return
6101 * GID_UNAVAILABLE may be reported for multiple HCA
6102 * ports.
6103 */
6104 mutex_enter(&ibdm.ibdm_mutex);
6105 gid_info = ibdm.ibdm_dp_gidlist_head;
6106 while (gid_info) {
6107 if (gid_info->gl_portguid ==
6108 event_details->ie_gid.gid_guid) {
6109 break;
6110 }
6111 gid_info = gid_info->gl_next;
6112 }
6113 mutex_exit(&ibdm.ibdm_mutex);
6114 if (gid_info == NULL) {
6115 mutex_enter(&ibdm.ibdm_mutex);
6116 ibdm.ibdm_busy &= ~IBDM_BUSY;
6117 cv_broadcast(&ibdm.ibdm_busy_cv);
6118 mutex_exit(&ibdm.ibdm_mutex);
6119 ibdm_free_saa_event_arg(event_arg);
6120 return;
6121 }
6122
6123 IBTF_DPRINTF_L4("ibdm", "\tGID (prefix %x, guid %llx) "
6124 "Unavailable notification",
6125 event_details->ie_gid.gid_prefix,
6126 event_details->ie_gid.gid_guid);
6127
6128 /*
6129 * Update GID list in all IOCs affected by this
6130 */
6131 if (gid_info->gl_state == IBDM_GID_PROBING_SKIPPED ||
6132 gid_info->gl_state == IBDM_GID_PROBING_COMPLETE)
6133 ioc_list = ibdm_update_ioc_gidlist(gid_info, 0);
6134
6135 /*
6136 * Remove GID from the global GID list
6137 * Handle the case where all port GIDs for an
6138 * IOU have been hot-removed. Check both gid_info
6139 * & ioc_info for checking ngids.
6140 */
6141 mutex_enter(&ibdm.ibdm_mutex);
6142 if (gid_info->gl_iou != NULL && gid_info->gl_ngids == 0) {
6143 mutex_enter(&gid_info->gl_mutex);
6144 (void) ibdm_free_iou_info(gid_info, &gid_info->gl_iou);
6145 mutex_exit(&gid_info->gl_mutex);
6146 }
6147 if (gid_info->gl_prev != NULL)
6148 gid_info->gl_prev->gl_next = gid_info->gl_next;
6149 if (gid_info->gl_next != NULL)
6150 gid_info->gl_next->gl_prev = gid_info->gl_prev;
6151
6152 if (gid_info == ibdm.ibdm_dp_gidlist_head)
6153 ibdm.ibdm_dp_gidlist_head = gid_info->gl_next;
6154 if (gid_info == ibdm.ibdm_dp_gidlist_tail)
6155 ibdm.ibdm_dp_gidlist_tail = gid_info->gl_prev;
6156 ibdm.ibdm_ngids--;
6157
6158 ibdm.ibdm_busy &= ~IBDM_BUSY;
6159 cv_broadcast(&ibdm.ibdm_busy_cv);
6160 mutex_exit(&ibdm.ibdm_mutex);
6161
6162 /* free the hca_list on this gid_info */
6163 ibdm_delete_glhca_list(gid_info);
6164
6165 mutex_destroy(&gid_info->gl_mutex);
6166 kmem_free(gid_info, sizeof (ibdm_dp_gidinfo_t));
6167
6168 /*
6169 * Pass on the IOCs with updated GIDs to IBnexus
6170 */
6171 if (ioc_list) {
6172 IBTF_DPRINTF_L4("ibdm", "\tGID_UNAVAILABLE "
6173 "IOC_PROP_UPDATE for %p\n", ioc_list);
6174 mutex_enter(&ibdm.ibdm_ibnex_mutex);
6175 if (ibdm.ibdm_ibnex_callback != NULL) {
6176 (*ibdm.ibdm_ibnex_callback)((void *)
6177 ioc_list, IBDM_EVENT_IOC_PROP_UPDATE);
6178 }
6179 mutex_exit(&ibdm.ibdm_ibnex_mutex);
6180 }
6181
6182 ibdm_free_saa_event_arg(event_arg);
6183 }
6184
6185
6186 static int
6187 ibdm_cmp_gid_list(ibdm_gid_t *new, ibdm_gid_t *prev)
6188 {
6189 ibdm_gid_t *scan_new, *scan_prev;
6190 int cmp_failed = 0;
6191
6192 ASSERT(new != NULL);
6193 ASSERT(prev != NULL);
6194
6195 /*
6196 * Search for each new gid anywhere in the prev GID list.
6197 * Note that the gid list could have been re-ordered.
6198 */
6199 for (scan_new = new; scan_new; scan_new = scan_new->gid_next) {
6200 for (scan_prev = prev, cmp_failed = 1; scan_prev;
6201 scan_prev = scan_prev->gid_next) {
6202 if (scan_prev->gid_dgid_hi == scan_new->gid_dgid_hi &&
6203 scan_prev->gid_dgid_lo == scan_new->gid_dgid_lo) {
6204 cmp_failed = 0;
6205 break;
6206 }
6207 }
6208
6209 if (cmp_failed)
6210 return (1);
6211 }
6212 return (0);
6213 }
6214
6215 /*
6216 * This is always called in a single thread
6217 * This function updates the gid_list and serv_list of IOC
6218 * The current gid_list is in ioc_info_t(contains only port
6219 * guids for which probe is done) & gidinfo_t(other port gids)
6220 * The gids in both locations are used for comparision.
6221 */
6222 static void
6223 ibdm_reprobe_update_port_srv(ibdm_ioc_info_t *ioc, ibdm_dp_gidinfo_t *gidinfo)
6224 {
6225 ibdm_gid_t *cur_gid_list;
6226 uint_t cur_nportgids;
6227
6228 ASSERT(MUTEX_HELD(&ibdm.ibdm_mutex));
6229
6230 ioc->ioc_info_updated.ib_prop_updated = 0;
6231
6232
6233 /* Current GID list in gid_info only */
6234 cur_gid_list = gidinfo->gl_gid;
6235 cur_nportgids = gidinfo->gl_ngids;
6236
6237 if (ioc->ioc_prev_serv_cnt !=
6238 ioc->ioc_profile.ioc_service_entries ||
6239 ibdm_serv_cmp(&ioc->ioc_serv[0], &ioc->ioc_prev_serv[0],
6240 ioc->ioc_prev_serv_cnt))
6241 ioc->ioc_info_updated.ib_srv_prop_updated = 1;
6242
6243 if (ioc->ioc_prev_nportgids != cur_nportgids ||
6244 ioc->ioc_prev_gid_list == NULL || cur_gid_list == NULL) {
6245 ioc->ioc_info_updated.ib_gid_prop_updated = 1;
6246 } else if (ibdm_cmp_gid_list(ioc->ioc_prev_gid_list, cur_gid_list)) {
6247 ioc->ioc_info_updated.ib_gid_prop_updated = 1;
6248 }
6249
6250 /* Zero out previous entries */
6251 ibdm_free_gid_list(ioc->ioc_prev_gid_list);
6252 if (ioc->ioc_prev_serv)
6253 kmem_free(ioc->ioc_prev_serv, ioc->ioc_prev_serv_cnt *
6254 sizeof (ibdm_srvents_info_t));
6255 ioc->ioc_prev_serv_cnt = 0;
6256 ioc->ioc_prev_nportgids = 0;
6257 ioc->ioc_prev_serv = NULL;
6258 ioc->ioc_prev_gid_list = NULL;
6259 }
6260
6261 /*
6262 * Handle GID removal. This returns gid_info of an GID for the same
6263 * node GUID, if found. For an GID with IOU information, the same
6264 * gid_info is returned if no gid_info with same node_guid is found.
6265 */
6266 static ibdm_dp_gidinfo_t *
6267 ibdm_handle_gid_rm(ibdm_dp_gidinfo_t *rm_gid)
6268 {
6269 ibdm_dp_gidinfo_t *gid_list;
6270
6271 IBTF_DPRINTF_L4("ibdm", "\thandle_gid_rm(0x%p)", rm_gid);
6272
6273 if (rm_gid->gl_iou == NULL) {
6274 IBTF_DPRINTF_L4("ibdm", "\thandle_gid_rm NO iou");
6275 /*
6276 * Search for a GID with same node_guid and
6277 * gl_iou != NULL
6278 */
6279 for (gid_list = ibdm.ibdm_dp_gidlist_head; gid_list;
6280 gid_list = gid_list->gl_next) {
6281 if (gid_list->gl_iou != NULL && (gid_list->gl_nodeguid
6282 == rm_gid->gl_nodeguid))
6283 break;
6284 }
6285
6286 if (gid_list)
6287 ibdm_rmfrom_glgid_list(gid_list, rm_gid);
6288
6289 IBTF_DPRINTF_L4("ibdm", "\thandle_gid_rm ret %p", gid_list);
6290 return (gid_list);
6291 } else {
6292 /*
6293 * Search for a GID with same node_guid and
6294 * gl_iou == NULL
6295 */
6296 IBTF_DPRINTF_L4("ibdm", "\thandle_gid_rm with iou");
6297 for (gid_list = ibdm.ibdm_dp_gidlist_head; gid_list;
6298 gid_list = gid_list->gl_next) {
6299 if (gid_list->gl_iou == NULL && (gid_list->gl_nodeguid
6300 == rm_gid->gl_nodeguid))
6301 break;
6302 }
6303
6304 if (gid_list) {
6305 /*
6306 * Copy the following fields from rm_gid :
6307 * 1. gl_state
6308 * 2. gl_iou
6309 * 3. gl_gid & gl_ngids
6310 *
6311 * Note : Function is synchronized by
6312 * ibdm_busy flag.
6313 *
6314 * Note : Redirect info is initialized if
6315 * any MADs for the GID fail
6316 */
6317 IBTF_DPRINTF_L4("ibdm", "\thandle_gid_rm "
6318 "copying info to GID with gl_iou != NULl");
6319 gid_list->gl_state = rm_gid->gl_state;
6320 gid_list->gl_iou = rm_gid->gl_iou;
6321 gid_list->gl_gid = rm_gid->gl_gid;
6322 gid_list->gl_ngids = rm_gid->gl_ngids;
6323
6324 /* Remove the GID from gl_gid list */
6325 ibdm_rmfrom_glgid_list(gid_list, rm_gid);
6326 } else {
6327 /*
6328 * Handle a case where all GIDs to the IOU have
6329 * been removed.
6330 */
6331 IBTF_DPRINTF_L4("ibdm", "\thandle_gid_rm 0 GID "
6332 "to IOU");
6333
6334 ibdm_rmfrom_glgid_list(rm_gid, rm_gid);
6335 return (rm_gid);
6336 }
6337 IBTF_DPRINTF_L4("ibdm", "\thandle_gid_rm ret %p", gid_list);
6338 return (gid_list);
6339 }
6340 }
6341
6342 static void
6343 ibdm_rmfrom_glgid_list(ibdm_dp_gidinfo_t *gid_info,
6344 ibdm_dp_gidinfo_t *rm_gid)
6345 {
6346 ibdm_gid_t *tmp, *prev;
6347
6348 IBTF_DPRINTF_L4("ibdm", "\trmfrom_glgid (%p, %p)",
6349 gid_info, rm_gid);
6350
6351 for (tmp = gid_info->gl_gid, prev = NULL; tmp; ) {
6352 if (tmp->gid_dgid_hi == rm_gid->gl_dgid_hi &&
6353 tmp->gid_dgid_lo == rm_gid->gl_dgid_lo) {
6354 if (prev == NULL)
6355 gid_info->gl_gid = tmp->gid_next;
6356 else
6357 prev->gid_next = tmp->gid_next;
6358
6359 kmem_free(tmp, sizeof (ibdm_gid_t));
6360 gid_info->gl_ngids--;
6361 break;
6362 } else {
6363 prev = tmp;
6364 tmp = tmp->gid_next;
6365 }
6366 }
6367 }
6368
6369 static void
6370 ibdm_addto_gidlist(ibdm_gid_t **src_ptr, ibdm_gid_t *dest)
6371 {
6372 ibdm_gid_t *head = NULL, *new, *tail;
6373
6374 /* First copy the destination */
6375 for (; dest; dest = dest->gid_next) {
6376 new = kmem_zalloc(sizeof (ibdm_gid_t), KM_SLEEP);
6377 new->gid_dgid_hi = dest->gid_dgid_hi;
6378 new->gid_dgid_lo = dest->gid_dgid_lo;
6379 new->gid_next = head;
6380 head = new;
6381 }
6382
6383 /* Insert this to the source */
6384 if (*src_ptr == NULL)
6385 *src_ptr = head;
6386 else {
6387 for (tail = *src_ptr; tail->gid_next != NULL;
6388 tail = tail->gid_next)
6389 ;
6390
6391 tail->gid_next = head;
6392 }
6393 }
6394
6395 static void
6396 ibdm_free_gid_list(ibdm_gid_t *head)
6397 {
6398 ibdm_gid_t *delete;
6399
6400 for (delete = head; delete; ) {
6401 head = delete->gid_next;
6402 kmem_free(delete, sizeof (ibdm_gid_t));
6403 delete = head;
6404 }
6405 }
6406
6407 /*
6408 * This function rescans the DM capable GIDs (gl_state is
6409 * GID_PROBE_COMPLETE or IBDM_GID_PROBING_SKIPPED.This
6410 * basically checks if the DM capable GID is reachable. If
6411 * not this is handled the same way as GID_UNAVAILABLE,
6412 * except that notifications are not send to IBnexus.
6413 *
6414 * This function also initializes the ioc_prev_list for
6415 * a particular IOC (when called from probe_ioc, with
6416 * ioc_guidp != NULL) or all IOCs for the gid (called from
6417 * sweep_fabric, ioc_guidp == NULL).
6418 */
6419 static void
6420 ibdm_rescan_gidlist(ib_guid_t *ioc_guidp)
6421 {
6422 ibdm_dp_gidinfo_t *gid_info, *tmp;
6423 int ii, niocs, found;
6424 ibdm_hca_list_t *hca_list = NULL;
6425 ibdm_port_attr_t *port = NULL;
6426 ibdm_ioc_info_t *ioc_list;
6427
6428 for (gid_info = ibdm.ibdm_dp_gidlist_head; gid_info; ) {
6429 found = 0;
6430 if (gid_info->gl_state != IBDM_GID_PROBING_SKIPPED &&
6431 gid_info->gl_state != IBDM_GID_PROBING_COMPLETE) {
6432 gid_info = gid_info->gl_next;
6433 continue;
6434 }
6435
6436 /*
6437 * Check if the GID is visible to any HCA ports.
6438 * Return if so.
6439 */
6440 mutex_enter(&ibdm.ibdm_hl_mutex);
6441 for (ibdm_get_next_port(&hca_list, &port, 1); port;
6442 ibdm_get_next_port(&hca_list, &port, 1)) {
6443 if (ibdm_port_reachable(port->pa_sa_hdl,
6444 gid_info->gl_dgid_lo) == B_TRUE) {
6445 found = 1;
6446 break;
6447 }
6448 }
6449 mutex_exit(&ibdm.ibdm_hl_mutex);
6450
6451 if (found) {
6452 if (gid_info->gl_iou == NULL) {
6453 gid_info = gid_info->gl_next;
6454 continue;
6455 }
6456
6457 /* Intialize the ioc_prev_gid_list */
6458 niocs =
6459 gid_info->gl_iou->iou_info.iou_num_ctrl_slots;
6460 for (ii = 0; ii < niocs; ii++) {
6461 ioc_list = IBDM_GIDINFO2IOCINFO(gid_info, ii);
6462
6463 if (ioc_guidp == NULL || (*ioc_guidp ==
6464 ioc_list->ioc_profile.ioc_guid)) {
6465 /* Add info of GIDs in gid_info also */
6466 ibdm_addto_gidlist(
6467 &ioc_list->ioc_prev_gid_list,
6468 gid_info->gl_gid);
6469 ioc_list->ioc_prev_nportgids =
6470 gid_info->gl_ngids;
6471 }
6472 }
6473 gid_info = gid_info->gl_next;
6474 continue;
6475 }
6476
6477 IBTF_DPRINTF_L4("ibdm", "\trescan_gidlist "
6478 "deleted port GUID %llx",
6479 gid_info->gl_dgid_lo);
6480
6481 /*
6482 * Update GID list in all IOCs affected by this
6483 */
6484 ioc_list = ibdm_update_ioc_gidlist(gid_info, 0);
6485
6486 /*
6487 * Remove GID from the global GID list
6488 * Handle the case where all port GIDs for an
6489 * IOU have been hot-removed.
6490 */
6491 mutex_enter(&ibdm.ibdm_mutex);
6492 if (gid_info->gl_iou != NULL && gid_info->gl_ngids == 0) {
6493 mutex_enter(&gid_info->gl_mutex);
6494 (void) ibdm_free_iou_info(gid_info, &gid_info->gl_iou);
6495 mutex_exit(&gid_info->gl_mutex);
6496 }
6497
6498 tmp = gid_info->gl_next;
6499 if (gid_info->gl_prev != NULL)
6500 gid_info->gl_prev->gl_next = gid_info->gl_next;
6501 if (gid_info->gl_next != NULL)
6502 gid_info->gl_next->gl_prev = gid_info->gl_prev;
6503
6504 if (gid_info == ibdm.ibdm_dp_gidlist_head)
6505 ibdm.ibdm_dp_gidlist_head = gid_info->gl_next;
6506 if (gid_info == ibdm.ibdm_dp_gidlist_tail)
6507 ibdm.ibdm_dp_gidlist_tail = gid_info->gl_prev;
6508 ibdm.ibdm_ngids--;
6509 mutex_exit(&ibdm.ibdm_mutex);
6510
6511 /* free the hca_list on this gid_info */
6512 ibdm_delete_glhca_list(gid_info);
6513
6514 mutex_destroy(&gid_info->gl_mutex);
6515 kmem_free(gid_info, sizeof (ibdm_dp_gidinfo_t));
6516
6517 gid_info = tmp;
6518
6519 /*
6520 * Pass on the IOCs with updated GIDs to IBnexus
6521 */
6522 if (ioc_list) {
6523 IBTF_DPRINTF_L4("ibdm", "\trescan_gidlist "
6524 "IOC_PROP_UPDATE for %p\n", ioc_list);
6525 mutex_enter(&ibdm.ibdm_ibnex_mutex);
6526 if (ibdm.ibdm_ibnex_callback != NULL) {
6527 (*ibdm.ibdm_ibnex_callback)((void *)
6528 ioc_list, IBDM_EVENT_IOC_PROP_UPDATE);
6529 }
6530 mutex_exit(&ibdm.ibdm_ibnex_mutex);
6531 }
6532 }
6533 }
6534
6535 /*
6536 * This function notifies IBnex of IOCs on this GID.
6537 * Notification is for GIDs with gl_reprobe_flag set.
6538 * The flag is set when IOC probe / fabric sweep
6539 * probes a GID starting from CLASS port info.
6540 *
6541 * IBnexus will have information of a reconnected IOC
6542 * if it had probed it before. If this is a new IOC,
6543 * IBnexus ignores the notification.
6544 *
6545 * This function should be called with no locks held.
6546 */
6547 static void
6548 ibdm_notify_newgid_iocs(ibdm_dp_gidinfo_t *gid_info)
6549 {
6550 ibdm_ioc_info_t *ioc_list;
6551
6552 if (gid_info->gl_reprobe_flag == 0 ||
6553 gid_info->gl_iou == NULL)
6554 return;
6555
6556 ioc_list = ibdm_update_ioc_gidlist(gid_info, -1);
6557
6558 /*
6559 * Pass on the IOCs with updated GIDs to IBnexus
6560 */
6561 if (ioc_list) {
6562 mutex_enter(&ibdm.ibdm_ibnex_mutex);
6563 if (ibdm.ibdm_ibnex_callback != NULL) {
6564 (*ibdm.ibdm_ibnex_callback)((void *)ioc_list,
6565 IBDM_EVENT_IOC_PROP_UPDATE);
6566 }
6567 mutex_exit(&ibdm.ibdm_ibnex_mutex);
6568 }
6569 }
6570
6571
6572 static void
6573 ibdm_free_saa_event_arg(ibdm_saa_event_arg_t *arg)
6574 {
6575 if (arg != NULL)
6576 kmem_free(arg, sizeof (ibdm_saa_event_arg_t));
6577 }
6578
6579 /*
6580 * This function parses the list of HCAs and HCA ports
6581 * to return the port_attr of the next HCA port. A port
6582 * connected to IB fabric (port_state active) is returned,
6583 * if connected_flag is set.
6584 */
6585 static void
6586 ibdm_get_next_port(ibdm_hca_list_t **inp_hcap,
6587 ibdm_port_attr_t **inp_portp, int connect_flag)
6588 {
6589 int ii;
6590 ibdm_port_attr_t *port, *next_port = NULL;
6591 ibdm_port_attr_t *inp_port;
6592 ibdm_hca_list_t *hca_list;
6593 int found = 0;
6594
6595 ASSERT(MUTEX_HELD(&ibdm.ibdm_hl_mutex));
6596 IBTF_DPRINTF_L4(ibdm_string, "\tget_next_port(%p, %p, %x)",
6597 inp_hcap, inp_portp, connect_flag);
6598
6599 hca_list = *inp_hcap;
6600 inp_port = *inp_portp;
6601
6602 if (hca_list == NULL)
6603 hca_list = ibdm.ibdm_hca_list_head;
6604
6605 for (; hca_list; hca_list = hca_list->hl_next) {
6606 for (ii = 0; ii < hca_list->hl_nports; ii++) {
6607 port = &hca_list->hl_port_attr[ii];
6608
6609 /*
6610 * inp_port != NULL;
6611 * Skip till we find the matching port
6612 */
6613 if (inp_port && !found) {
6614 if (inp_port == port)
6615 found = 1;
6616 continue;
6617 }
6618
6619 if (!connect_flag) {
6620 next_port = port;
6621 break;
6622 }
6623
6624 if (port->pa_sa_hdl == NULL)
6625 ibdm_initialize_port(port);
6626 if (port->pa_sa_hdl == NULL)
6627 (void) ibdm_fini_port(port);
6628 else if (next_port == NULL &&
6629 port->pa_sa_hdl != NULL &&
6630 port->pa_state == IBT_PORT_ACTIVE) {
6631 next_port = port;
6632 break;
6633 }
6634 }
6635
6636 if (next_port)
6637 break;
6638 }
6639
6640 IBTF_DPRINTF_L4(ibdm_string, "\tget_next_port : "
6641 "returns hca_list %p port %p", hca_list, next_port);
6642 *inp_hcap = hca_list;
6643 *inp_portp = next_port;
6644 }
6645
6646 static void
6647 ibdm_add_to_gl_gid(ibdm_dp_gidinfo_t *nodegid, ibdm_dp_gidinfo_t *addgid)
6648 {
6649 ibdm_gid_t *tmp;
6650
6651 tmp = kmem_zalloc(sizeof (ibdm_gid_t), KM_SLEEP);
6652 tmp->gid_dgid_hi = addgid->gl_dgid_hi;
6653 tmp->gid_dgid_lo = addgid->gl_dgid_lo;
6654
6655 mutex_enter(&nodegid->gl_mutex);
6656 tmp->gid_next = nodegid->gl_gid;
6657 nodegid->gl_gid = tmp;
6658 nodegid->gl_ngids++;
6659 mutex_exit(&nodegid->gl_mutex);
6660 }
6661
6662 static void
6663 ibdm_addto_glhcalist(ibdm_dp_gidinfo_t *gid_info,
6664 ibdm_hca_list_t *hca)
6665 {
6666 ibdm_hca_list_t *head, *prev = NULL, *temp;
6667
6668 IBTF_DPRINTF_L4(ibdm_string, "\taddto_glhcalist(%p, %p) "
6669 ": gl_hca_list %p", gid_info, hca, gid_info->gl_hca_list);
6670 ASSERT(!MUTEX_HELD(&gid_info->gl_mutex));
6671
6672 mutex_enter(&gid_info->gl_mutex);
6673 head = gid_info->gl_hca_list;
6674 if (head == NULL) {
6675 head = ibdm_dup_hca_attr(hca);
6676 head->hl_next = NULL;
6677 gid_info->gl_hca_list = head;
6678 mutex_exit(&gid_info->gl_mutex);
6679 IBTF_DPRINTF_L4(ibdm_string, "\tadd_to_glhcalist: "
6680 "gid %p, gl_hca_list %p", gid_info,
6681 gid_info->gl_hca_list);
6682 return;
6683 }
6684
6685 /* Check if already in the list */
6686 while (head) {
6687 if (head->hl_hca_guid == hca->hl_hca_guid) {
6688 mutex_exit(&gid_info->gl_mutex);
6689 IBTF_DPRINTF_L4(ibdm_string,
6690 "\taddto_glhcalist : gid %p hca %p dup",
6691 gid_info, hca);
6692 return;
6693 }
6694 prev = head;
6695 head = head->hl_next;
6696 }
6697
6698 /* Add this HCA to gl_hca_list */
6699 temp = ibdm_dup_hca_attr(hca);
6700 temp->hl_next = NULL;
6701 prev->hl_next = temp;
6702 mutex_exit(&gid_info->gl_mutex);
6703
6704 IBTF_DPRINTF_L4(ibdm_string, "\tadd_to_glhcalist: "
6705 "gid %p, gl_hca_list %p", gid_info, gid_info->gl_hca_list);
6706 }
6707
6708 static void
6709 ibdm_delete_glhca_list(ibdm_dp_gidinfo_t *gid_info)
6710 {
6711 ASSERT(!MUTEX_HELD(&gid_info->gl_mutex));
6712 ASSERT(!MUTEX_HELD(&ibdm.ibdm_mutex));
6713
6714 mutex_enter(&gid_info->gl_mutex);
6715 if (gid_info->gl_hca_list)
6716 ibdm_ibnex_free_hca_list(gid_info->gl_hca_list);
6717 gid_info->gl_hca_list = NULL;
6718 mutex_exit(&gid_info->gl_mutex);
6719 }
6720
6721
6722 static void
6723 ibdm_reset_all_dgids(ibmf_saa_handle_t port_sa_hdl)
6724 {
6725 IBTF_DPRINTF_L4(ibdm_string, "\treset_all_dgids(%X)",
6726 port_sa_hdl);
6727
6728 if (ibdm_enumerate_iocs == 0)
6729 return;
6730
6731 ASSERT(!MUTEX_HELD(&ibdm.ibdm_mutex));
6732 ASSERT(!MUTEX_HELD(&ibdm.ibdm_hl_mutex));
6733
6734 /* Check : Not busy in another probe / sweep */
6735 mutex_enter(&ibdm.ibdm_mutex);
6736 if ((ibdm.ibdm_busy & IBDM_BUSY) == 0) {
6737 ibdm_dp_gidinfo_t *gid_info;
6738
6739 ibdm.ibdm_busy |= IBDM_BUSY;
6740 mutex_exit(&ibdm.ibdm_mutex);
6741
6742 /*
6743 * Check if any GID is using the SA & IBMF handle
6744 * of HCA port going down. Reset ibdm_dp_gidinfo_t
6745 * using another HCA port which can reach the GID.
6746 * This is for DM capable GIDs only, no need to do
6747 * this for others
6748 *
6749 * Delete the GID if no alternate HCA port to reach
6750 * it is found.
6751 */
6752 for (gid_info = ibdm.ibdm_dp_gidlist_head; gid_info; ) {
6753 ibdm_dp_gidinfo_t *tmp;
6754
6755 IBTF_DPRINTF_L4(ibdm_string, "\tevent_hdlr "
6756 "checking gidinfo %p", gid_info);
6757
6758 if (gid_info->gl_sa_hdl == port_sa_hdl) {
6759 IBTF_DPRINTF_L3(ibdm_string,
6760 "\tevent_hdlr: down HCA port hdl "
6761 "matches gid %p", gid_info);
6762
6763 /*
6764 * The non-DM GIDs can come back
6765 * with a new subnet prefix, when
6766 * the HCA port commes up again. To
6767 * avoid issues, delete non-DM
6768 * capable GIDs, if the gid was
6769 * discovered using the HCA port
6770 * going down. This is ensured by
6771 * setting gl_disconnected to 1.
6772 */
6773 if (gid_info->gl_is_dm_capable == B_FALSE)
6774 gid_info->gl_disconnected = 1;
6775 else
6776 ibdm_reset_gidinfo(gid_info);
6777
6778 if (gid_info->gl_disconnected) {
6779 IBTF_DPRINTF_L3(ibdm_string,
6780 "\tevent_hdlr: deleting"
6781 " gid %p", gid_info);
6782 tmp = gid_info;
6783 gid_info = gid_info->gl_next;
6784 ibdm_delete_gidinfo(tmp);
6785 } else
6786 gid_info = gid_info->gl_next;
6787 } else
6788 gid_info = gid_info->gl_next;
6789 }
6790
6791 mutex_enter(&ibdm.ibdm_mutex);
6792 ibdm.ibdm_busy &= ~IBDM_BUSY;
6793 cv_signal(&ibdm.ibdm_busy_cv);
6794 }
6795 mutex_exit(&ibdm.ibdm_mutex);
6796 }
6797
6798 static void
6799 ibdm_reset_gidinfo(ibdm_dp_gidinfo_t *gidinfo)
6800 {
6801 ibdm_hca_list_t *hca_list = NULL;
6802 ibdm_port_attr_t *port = NULL;
6803 int gid_reinited = 0;
6804 sa_node_record_t *nr, *tmp;
6805 sa_portinfo_record_t *pi;
6806 size_t nr_len = 0, pi_len = 0;
6807 size_t path_len;
6808 ib_gid_t sgid, dgid;
6809 int ret, ii, nrecords;
6810 sa_path_record_t *path;
6811 uint8_t npaths = 1;
6812 ibdm_pkey_tbl_t *pkey_tbl;
6813
6814 IBTF_DPRINTF_L4(ibdm_string, "\treset_gidinfo(%p)", gidinfo);
6815
6816 /*
6817 * Get list of all the ports reachable from the local known HCA
6818 * ports which are active
6819 */
6820 mutex_enter(&ibdm.ibdm_hl_mutex);
6821 for (ibdm_get_next_port(&hca_list, &port, 1); port;
6822 ibdm_get_next_port(&hca_list, &port, 1)) {
6823
6824
6825 /*
6826 * Get the path and re-populate the gidinfo.
6827 * Getting the path is the same probe_ioc
6828 * Init the gid info as in ibdm_create_gidinfo()
6829 */
6830 nr = ibdm_get_node_records(port->pa_sa_hdl, &nr_len,
6831 gidinfo->gl_nodeguid);
6832 if (nr == NULL) {
6833 IBTF_DPRINTF_L4(ibdm_string,
6834 "\treset_gidinfo : no records");
6835 continue;
6836 }
6837
6838 nrecords = (nr_len / sizeof (sa_node_record_t));
6839 for (tmp = nr, ii = 0; (ii < nrecords); ii++, tmp++) {
6840 if (tmp->NodeInfo.PortGUID == gidinfo->gl_portguid)
6841 break;
6842 }
6843
6844 if (ii == nrecords) {
6845 IBTF_DPRINTF_L4(ibdm_string,
6846 "\treset_gidinfo : no record for portguid");
6847 kmem_free(nr, nr_len);
6848 continue;
6849 }
6850
6851 pi = ibdm_get_portinfo(port->pa_sa_hdl, &pi_len, tmp->LID);
6852 if (pi == NULL) {
6853 IBTF_DPRINTF_L4(ibdm_string,
6854 "\treset_gidinfo : no portinfo");
6855 kmem_free(nr, nr_len);
6856 continue;
6857 }
6858
6859 sgid.gid_prefix = port->pa_sn_prefix;
6860 sgid.gid_guid = port->pa_port_guid;
6861 dgid.gid_prefix = pi->PortInfo.GidPrefix;
6862 dgid.gid_guid = tmp->NodeInfo.PortGUID;
6863
6864 ret = ibmf_saa_gid_to_pathrecords(port->pa_sa_hdl, sgid, dgid,
6865 IBMF_SAA_PKEY_WC, 0, B_TRUE, &npaths, 0, &path_len, &path);
6866
6867 if ((ret != IBMF_SUCCESS) || path == NULL) {
6868 IBTF_DPRINTF_L4(ibdm_string,
6869 "\treset_gidinfo : no paths");
6870 kmem_free(pi, pi_len);
6871 kmem_free(nr, nr_len);
6872 continue;
6873 }
6874
6875 gidinfo->gl_dgid_hi = path->DGID.gid_prefix;
6876 gidinfo->gl_dgid_lo = path->DGID.gid_guid;
6877 gidinfo->gl_sgid_hi = path->SGID.gid_prefix;
6878 gidinfo->gl_sgid_lo = path->SGID.gid_guid;
6879 gidinfo->gl_p_key = path->P_Key;
6880 gidinfo->gl_sa_hdl = port->pa_sa_hdl;
6881 gidinfo->gl_ibmf_hdl = port->pa_ibmf_hdl;
6882 gidinfo->gl_slid = path->SLID;
6883 gidinfo->gl_dlid = path->DLID;
6884 /* Reset redirect info, next MAD will set if redirected */
6885 gidinfo->gl_redirected = 0;
6886 gidinfo->gl_devid = (*tmp).NodeInfo.DeviceID;
6887 gidinfo->gl_SL = path->SL;
6888
6889 gidinfo->gl_qp_hdl = IBMF_QP_HANDLE_DEFAULT;
6890 for (ii = 0; ii < port->pa_npkeys; ii++) {
6891 if (port->pa_pkey_tbl == NULL)
6892 break;
6893
6894 pkey_tbl = &port->pa_pkey_tbl[ii];
6895 if ((gidinfo->gl_p_key == pkey_tbl->pt_pkey) &&
6896 (pkey_tbl->pt_qp_hdl != NULL)) {
6897 gidinfo->gl_qp_hdl = pkey_tbl->pt_qp_hdl;
6898 break;
6899 }
6900 }
6901
6902 if (gidinfo->gl_qp_hdl == NULL)
6903 IBTF_DPRINTF_L2(ibdm_string,
6904 "\treset_gid_info: No matching Pkey");
6905 else
6906 gid_reinited = 1;
6907
6908 kmem_free(path, path_len);
6909 kmem_free(pi, pi_len);
6910 kmem_free(nr, nr_len);
6911 break;
6912 }
6913 mutex_exit(&ibdm.ibdm_hl_mutex);
6914
6915 if (!gid_reinited)
6916 gidinfo->gl_disconnected = 1;
6917 }
6918
6919 static void
6920 ibdm_delete_gidinfo(ibdm_dp_gidinfo_t *gidinfo)
6921 {
6922 ibdm_ioc_info_t *ioc_list;
6923 int in_gidlist = 0;
6924
6925 /*
6926 * Check if gidinfo has been inserted into the
6927 * ibdm_dp_gidlist_head list. gl_next or gl_prev
6928 * != NULL, if gidinfo is the list.
6929 */
6930 if (gidinfo->gl_prev != NULL ||
6931 gidinfo->gl_next != NULL ||
6932 ibdm.ibdm_dp_gidlist_head == gidinfo)
6933 in_gidlist = 1;
6934
6935 ioc_list = ibdm_update_ioc_gidlist(gidinfo, 0);
6936
6937 /*
6938 * Remove GID from the global GID list
6939 * Handle the case where all port GIDs for an
6940 * IOU have been hot-removed.
6941 */
6942 mutex_enter(&ibdm.ibdm_mutex);
6943 if (gidinfo->gl_iou != NULL && gidinfo->gl_ngids == 0) {
6944 mutex_enter(&gidinfo->gl_mutex);
6945 (void) ibdm_free_iou_info(gidinfo, &gidinfo->gl_iou);
6946 mutex_exit(&gidinfo->gl_mutex);
6947 }
6948
6949 /* Delete gl_hca_list */
6950 mutex_exit(&ibdm.ibdm_mutex);
6951 ibdm_delete_glhca_list(gidinfo);
6952 mutex_enter(&ibdm.ibdm_mutex);
6953
6954 if (in_gidlist) {
6955 if (gidinfo->gl_prev != NULL)
6956 gidinfo->gl_prev->gl_next = gidinfo->gl_next;
6957 if (gidinfo->gl_next != NULL)
6958 gidinfo->gl_next->gl_prev = gidinfo->gl_prev;
6959
6960 if (gidinfo == ibdm.ibdm_dp_gidlist_head)
6961 ibdm.ibdm_dp_gidlist_head = gidinfo->gl_next;
6962 if (gidinfo == ibdm.ibdm_dp_gidlist_tail)
6963 ibdm.ibdm_dp_gidlist_tail = gidinfo->gl_prev;
6964 ibdm.ibdm_ngids--;
6965 }
6966 mutex_exit(&ibdm.ibdm_mutex);
6967
6968 mutex_destroy(&gidinfo->gl_mutex);
6969 cv_destroy(&gidinfo->gl_probe_cv);
6970 kmem_free(gidinfo, sizeof (ibdm_dp_gidinfo_t));
6971
6972 /*
6973 * Pass on the IOCs with updated GIDs to IBnexus
6974 */
6975 if (ioc_list) {
6976 IBTF_DPRINTF_L4("ibdm", "\tdelete_gidinfo "
6977 "IOC_PROP_UPDATE for %p\n", ioc_list);
6978 mutex_enter(&ibdm.ibdm_ibnex_mutex);
6979 if (ibdm.ibdm_ibnex_callback != NULL) {
6980 (*ibdm.ibdm_ibnex_callback)((void *)
6981 ioc_list, IBDM_EVENT_IOC_PROP_UPDATE);
6982 }
6983 mutex_exit(&ibdm.ibdm_ibnex_mutex);
6984 }
6985 }
6986
6987
6988 static void
6989 ibdm_fill_srv_attr_mod(ib_mad_hdr_t *hdr, ibdm_timeout_cb_args_t *cb_args)
6990 {
6991 uint32_t attr_mod;
6992
6993 attr_mod = (cb_args->cb_ioc_num + 1) << 16;
6994 attr_mod |= cb_args->cb_srvents_start;
6995 attr_mod |= (cb_args->cb_srvents_end) << 8;
6996 hdr->AttributeModifier = h2b32(attr_mod);
6997 }
6998
6999 static void
7000 ibdm_bump_transactionID(ibdm_dp_gidinfo_t *gid_info)
7001 {
7002 ASSERT(MUTEX_HELD(&gid_info->gl_mutex));
7003 gid_info->gl_transactionID++;
7004 if (gid_info->gl_transactionID == gid_info->gl_max_transactionID) {
7005 IBTF_DPRINTF_L4(ibdm_string,
7006 "\tbump_transactionID(%p), wrapup", gid_info);
7007 gid_info->gl_transactionID = gid_info->gl_min_transactionID;
7008 }
7009 }
7010
7011 /*
7012 * gl_prev_iou is set for *non-reprobe* sweeep requests, which
7013 * detected that ChangeID in IOU info has changed. The service
7014 * entry also may have changed. Check if service entry in IOC
7015 * has changed wrt the prev iou, if so notify to IB Nexus.
7016 */
7017 static ibdm_ioc_info_t *
7018 ibdm_handle_prev_iou()
7019 {
7020 ibdm_dp_gidinfo_t *gid_info;
7021 ibdm_ioc_info_t *ioc_list_head = NULL, *ioc_list;
7022 ibdm_ioc_info_t *prev_ioc, *ioc;
7023 int ii, jj, niocs, prev_niocs;
7024
7025 ASSERT(MUTEX_HELD(&ibdm.ibdm_mutex));
7026
7027 IBTF_DPRINTF_L4(ibdm_string, "\thandle_prev_iou enter");
7028 for (gid_info = ibdm.ibdm_dp_gidlist_head; gid_info;
7029 gid_info = gid_info->gl_next) {
7030 if (gid_info->gl_prev_iou == NULL)
7031 continue;
7032
7033 IBTF_DPRINTF_L4(ibdm_string, "\thandle_prev_iou gid %p",
7034 gid_info);
7035 niocs = gid_info->gl_iou->iou_info.iou_num_ctrl_slots;
7036 prev_niocs =
7037 gid_info->gl_prev_iou->iou_info.iou_num_ctrl_slots;
7038 for (ii = 0; ii < niocs; ii++) {
7039 ioc = IBDM_GIDINFO2IOCINFO(gid_info, ii);
7040
7041 /* Find matching IOC */
7042 for (jj = 0; jj < prev_niocs; jj++) {
7043 prev_ioc = (ibdm_ioc_info_t *)
7044 &gid_info->gl_prev_iou->iou_ioc_info[jj];
7045 if (prev_ioc->ioc_profile.ioc_guid ==
7046 ioc->ioc_profile.ioc_guid)
7047 break;
7048 }
7049 if (jj == prev_niocs)
7050 prev_ioc = NULL;
7051 if (ioc == NULL || prev_ioc == NULL)
7052 continue;
7053 if ((ioc->ioc_profile.ioc_service_entries !=
7054 prev_ioc->ioc_profile.ioc_service_entries) ||
7055 ibdm_serv_cmp(&ioc->ioc_serv[0],
7056 &prev_ioc->ioc_serv[0],
7057 ioc->ioc_profile.ioc_service_entries) != 0) {
7058 IBTF_DPRINTF_L4(ibdm_string,
7059 "/thandle_prev_iou modified IOC: "
7060 "current ioc %p, old ioc %p",
7061 ioc, prev_ioc);
7062 mutex_enter(&gid_info->gl_mutex);
7063 ioc_list = ibdm_dup_ioc_info(ioc, gid_info);
7064 mutex_exit(&gid_info->gl_mutex);
7065 ioc_list->ioc_info_updated.ib_prop_updated
7066 = 0;
7067 ioc_list->ioc_info_updated.ib_srv_prop_updated
7068 = 1;
7069
7070 if (ioc_list_head == NULL)
7071 ioc_list_head = ioc_list;
7072 else {
7073 ioc_list_head->ioc_next = ioc_list;
7074 ioc_list_head = ioc_list;
7075 }
7076 }
7077 }
7078
7079 mutex_enter(&gid_info->gl_mutex);
7080 (void) ibdm_free_iou_info(gid_info, &gid_info->gl_prev_iou);
7081 mutex_exit(&gid_info->gl_mutex);
7082 }
7083 IBTF_DPRINTF_L4(ibdm_string, "\thandle_prev_iouret %p",
7084 ioc_list_head);
7085 return (ioc_list_head);
7086 }
7087
7088 /*
7089 * Compares two service entries lists, returns 0 if same, returns 1
7090 * if no match.
7091 */
7092 static int
7093 ibdm_serv_cmp(ibdm_srvents_info_t *serv1, ibdm_srvents_info_t *serv2,
7094 int nserv)
7095 {
7096 int ii;
7097
7098 IBTF_DPRINTF_L4(ibdm_string, "\tserv_cmp: enter");
7099 for (ii = 0; ii < nserv; ii++, serv1++, serv2++) {
7100 if (serv1->se_attr.srv_id != serv2->se_attr.srv_id ||
7101 bcmp(serv1->se_attr.srv_name,
7102 serv2->se_attr.srv_name,
7103 IB_DM_MAX_SVC_NAME_LEN) != 0) {
7104 IBTF_DPRINTF_L4(ibdm_string, "\tserv_cmp: ret 1");
7105 return (1);
7106 }
7107 }
7108 IBTF_DPRINTF_L4(ibdm_string, "\tserv_cmp: ret 0");
7109 return (0);
7110 }
7111
7112 /* For debugging purpose only */
7113 #ifdef DEBUG
7114 void
7115 ibdm_dump_mad_hdr(ib_mad_hdr_t *mad_hdr)
7116 {
7117 IBTF_DPRINTF_L4("ibdm", "\t\t MAD Header info");
7118 IBTF_DPRINTF_L4("ibdm", "\t\t ---------------");
7119
7120 IBTF_DPRINTF_L4("ibdm", "\tBase version : 0x%x"
7121 "\tMgmt Class : 0x%x", mad_hdr->BaseVersion, mad_hdr->MgmtClass);
7122 IBTF_DPRINTF_L4("ibdm", "\tClass version : 0x%x"
7123 "\tR Method : 0x%x",
7124 mad_hdr->ClassVersion, mad_hdr->R_Method);
7125 IBTF_DPRINTF_L4("ibdm", "\tMAD Status : 0x%x"
7126 "\tTransaction ID : 0x%llx",
7127 b2h16(mad_hdr->Status), b2h64(mad_hdr->TransactionID));
7128 IBTF_DPRINTF_L4("ibdm", "\t Attribute ID : 0x%x"
7129 "\tAttribute Modified : 0x%lx",
7130 b2h16(mad_hdr->AttributeID), b2h32(mad_hdr->AttributeModifier));
7131 }
7132
7133
7134 void
7135 ibdm_dump_ibmf_msg(ibmf_msg_t *ibmf_msg, int flag)
7136 {
7137 ib_mad_hdr_t *mad_hdr;
7138
7139 IBTF_DPRINTF_L4("ibdm", "\t\t(IBMF_PKT): Local address info");
7140 IBTF_DPRINTF_L4("ibdm", "\t\t ------------------");
7141
7142 IBTF_DPRINTF_L4("ibdm", "\tLocal Lid : 0x%x\tRemote Lid : 0x%x"
7143 " Remote Qp : 0x%x", ibmf_msg->im_local_addr.ia_local_lid,
7144 ibmf_msg->im_local_addr.ia_remote_lid,
7145 ibmf_msg->im_local_addr.ia_remote_qno);
7146 IBTF_DPRINTF_L4("ibdm", "\tP_key : 0x%x\tQ_key : 0x%x"
7147 " SL : 0x%x", ibmf_msg->im_local_addr.ia_p_key,
7148 ibmf_msg->im_local_addr.ia_q_key,
7149 ibmf_msg->im_local_addr.ia_service_level);
7150
7151 if (flag)
7152 mad_hdr = (ib_mad_hdr_t *)IBDM_OUT_IBMFMSG_MADHDR(ibmf_msg);
7153 else
7154 mad_hdr = IBDM_IN_IBMFMSG_MADHDR(ibmf_msg);
7155
7156 ibdm_dump_mad_hdr(mad_hdr);
7157 }
7158
7159
7160 void
7161 ibdm_dump_path_info(sa_path_record_t *path)
7162 {
7163 IBTF_DPRINTF_L4("ibdm", "\t\t Path information");
7164 IBTF_DPRINTF_L4("ibdm", "\t\t ----------------");
7165
7166 IBTF_DPRINTF_L4("ibdm", "\t DGID hi : %llx\tDGID lo : %llx",
7167 path->DGID.gid_prefix, path->DGID.gid_guid);
7168 IBTF_DPRINTF_L4("ibdm", "\t SGID hi : %llx\tSGID lo : %llx",
7169 path->SGID.gid_prefix, path->SGID.gid_guid);
7170 IBTF_DPRINTF_L4("ibdm", "\t SLID : %x\t\tDlID : %x",
7171 path->SLID, path->DLID);
7172 IBTF_DPRINTF_L4("ibdm", "\t P Key : %x\t\tSL : %x",
7173 path->P_Key, path->SL);
7174 }
7175
7176
7177 void
7178 ibdm_dump_classportinfo(ib_mad_classportinfo_t *classportinfo)
7179 {
7180 IBTF_DPRINTF_L4("ibdm", "\t\t CLASSPORT INFO");
7181 IBTF_DPRINTF_L4("ibdm", "\t\t --------------");
7182
7183 IBTF_DPRINTF_L4("ibdm", "\t Response Time Value : 0x%x",
7184 ((b2h32(classportinfo->RespTimeValue)) & 0x1F));
7185
7186 IBTF_DPRINTF_L4("ibdm", "\t Redirected GID hi : 0x%llx",
7187 b2h64(classportinfo->RedirectGID_hi));
7188 IBTF_DPRINTF_L4("ibdm", "\t Redirected GID lo : 0x%llx",
7189 b2h64(classportinfo->RedirectGID_lo));
7190 IBTF_DPRINTF_L4("ibdm", "\t Redirected TC : 0x%x",
7191 classportinfo->RedirectTC);
7192 IBTF_DPRINTF_L4("ibdm", "\t Redirected SL : 0x%x",
7193 classportinfo->RedirectSL);
7194 IBTF_DPRINTF_L4("ibdm", "\t Redirected FL : 0x%x",
7195 classportinfo->RedirectFL);
7196 IBTF_DPRINTF_L4("ibdm", "\t Redirected LID : 0x%x",
7197 b2h16(classportinfo->RedirectLID));
7198 IBTF_DPRINTF_L4("ibdm", "\t Redirected P KEY : 0x%x",
7199 b2h16(classportinfo->RedirectP_Key));
7200 IBTF_DPRINTF_L4("ibdm", "\t Redirected QP : 0x%x",
7201 classportinfo->RedirectQP);
7202 IBTF_DPRINTF_L4("ibdm", "\t Redirected Q KEY : 0x%x",
7203 b2h32(classportinfo->RedirectQ_Key));
7204 IBTF_DPRINTF_L4("ibdm", "\t Trap GID hi : 0x%llx",
7205 b2h64(classportinfo->TrapGID_hi));
7206 IBTF_DPRINTF_L4("ibdm", "\t Trap GID lo : 0x%llx",
7207 b2h64(classportinfo->TrapGID_lo));
7208 IBTF_DPRINTF_L4("ibdm", "\t Trap TC : 0x%x",
7209 classportinfo->TrapTC);
7210 IBTF_DPRINTF_L4("ibdm", "\t Trap SL : 0x%x",
7211 classportinfo->TrapSL);
7212 IBTF_DPRINTF_L4("ibdm", "\t Trap FL : 0x%x",
7213 classportinfo->TrapFL);
7214 IBTF_DPRINTF_L4("ibdm", "\t Trap LID : 0x%x",
7215 b2h16(classportinfo->TrapLID));
7216 IBTF_DPRINTF_L4("ibdm", "\t Trap P_Key : 0x%x",
7217 b2h16(classportinfo->TrapP_Key));
7218 IBTF_DPRINTF_L4("ibdm", "\t Trap HL : 0x%x",
7219 classportinfo->TrapHL);
7220 IBTF_DPRINTF_L4("ibdm", "\t Trap QP : 0x%x",
7221 classportinfo->TrapQP);
7222 IBTF_DPRINTF_L4("ibdm", "\t Trap Q_Key : 0x%x",
7223 b2h32(classportinfo->TrapQ_Key));
7224 }
7225
7226
7227 void
7228 ibdm_dump_iounitinfo(ib_dm_io_unitinfo_t *iou_info)
7229 {
7230 IBTF_DPRINTF_L4("ibdm", "\t\t I/O UnitInfo");
7231 IBTF_DPRINTF_L4("ibdm", "\t\t ------------");
7232
7233 IBTF_DPRINTF_L4("ibdm", "\tChange ID : 0x%x",
7234 b2h16(iou_info->iou_changeid));
7235 IBTF_DPRINTF_L4("ibdm", "\t#of ctrl slots : %d",
7236 iou_info->iou_num_ctrl_slots);
7237 IBTF_DPRINTF_L4("ibdm", "\tIOU flag : 0x%x",
7238 iou_info->iou_flag);
7239 IBTF_DPRINTF_L4("ibdm", "\tContrl list byte 0 : 0x%x",
7240 iou_info->iou_ctrl_list[0]);
7241 IBTF_DPRINTF_L4("ibdm", "\tContrl list byte 1 : 0x%x",
7242 iou_info->iou_ctrl_list[1]);
7243 IBTF_DPRINTF_L4("ibdm", "\tContrl list byte 2 : 0x%x",
7244 iou_info->iou_ctrl_list[2]);
7245 }
7246
7247
7248 void
7249 ibdm_dump_ioc_profile(ib_dm_ioc_ctrl_profile_t *ioc)
7250 {
7251 IBTF_DPRINTF_L4("ibdm", "\t\t IOC Controller Profile");
7252 IBTF_DPRINTF_L4("ibdm", "\t\t ----------------------");
7253
7254 IBTF_DPRINTF_L4("ibdm", "\tIOC Guid : %llx", ioc->ioc_guid);
7255 IBTF_DPRINTF_L4("ibdm", "\tVendorID : 0x%x", ioc->ioc_vendorid);
7256 IBTF_DPRINTF_L4("ibdm", "\tDevice Id : 0x%x", ioc->ioc_deviceid);
7257 IBTF_DPRINTF_L4("ibdm", "\tDevice Ver : 0x%x", ioc->ioc_device_ver);
7258 IBTF_DPRINTF_L4("ibdm", "\tSubsys ID : 0x%x", ioc->ioc_subsys_id);
7259 IBTF_DPRINTF_L4("ibdm", "\tIO class : 0x%x", ioc->ioc_io_class);
7260 IBTF_DPRINTF_L4("ibdm", "\tIO subclass : 0x%x", ioc->ioc_io_subclass);
7261 IBTF_DPRINTF_L4("ibdm", "\tProtocol : 0x%x", ioc->ioc_protocol);
7262 IBTF_DPRINTF_L4("ibdm", "\tProtocolV : 0x%x", ioc->ioc_protocol_ver);
7263 IBTF_DPRINTF_L4("ibdm", "\tmsg qdepth : %d", ioc->ioc_send_msg_qdepth);
7264 IBTF_DPRINTF_L4("ibdm", "\trdma qdepth : %d",
7265 ioc->ioc_rdma_read_qdepth);
7266 IBTF_DPRINTF_L4("ibdm", "\tsndmsg sz : %d", ioc->ioc_send_msg_sz);
7267 IBTF_DPRINTF_L4("ibdm", "\trdma xfersz : %d", ioc->ioc_rdma_xfer_sz);
7268 IBTF_DPRINTF_L4("ibdm", "\topcal mask : 0x%x",
7269 ioc->ioc_ctrl_opcap_mask);
7270 IBTF_DPRINTF_L4("ibdm", "\tsrventries : %x", ioc->ioc_service_entries);
7271 }
7272
7273
7274 void
7275 ibdm_dump_service_entries(ib_dm_srv_t *srv_ents)
7276 {
7277 IBTF_DPRINTF_L4("ibdm",
7278 "\thandle_srventry_mad: service id : %llx", srv_ents->srv_id);
7279
7280 IBTF_DPRINTF_L4("ibdm", "\thandle_srventry_mad: "
7281 "Service Name : %s", srv_ents->srv_name);
7282 }
7283
7284 int ibdm_allow_sweep_fabric_timestamp = 1;
7285
7286 void
7287 ibdm_dump_sweep_fabric_timestamp(int flag)
7288 {
7289 static hrtime_t x;
7290 if (flag) {
7291 if (ibdm_allow_sweep_fabric_timestamp) {
7292 IBTF_DPRINTF_L4("ibdm", "\tTime taken to complete "
7293 "sweep %lld ms", ((gethrtime() - x)/ 1000000));
7294 }
7295 x = 0;
7296 } else
7297 x = gethrtime();
7298 }
7299 #endif