Print this page
7127 remove -Wno-missing-braces from Makefile.uts
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/io/ib/mgt/ibdma/ibdma.c
+++ new/usr/src/uts/common/io/ib/mgt/ibdma/ibdma.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21
22 22 /*
23 23 * Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved.
24 24 */
25 25
26 26 /*
27 27 * Infiniband Device Management Agent for IB storage.
28 28 */
29 29
30 30 #include <sys/conf.h>
31 31 #include <sys/file.h>
32 32 #include <sys/ddi.h>
33 33 #include <sys/sunddi.h>
34 34 #include <sys/modctl.h>
35 35 #include <sys/priv.h>
36 36 #include <sys/sysmacros.h>
37 37
38 38 #include <sys/ib/ibtl/ibti.h> /* IB public interfaces */
39 39
40 40 #include <sys/ib/mgt/ibdma/ibdma.h>
41 41 #include <sys/ib/mgt/ibdma/ibdma_impl.h>
42 42
43 43 /*
44 44 * NOTE: The IB Device Management Agent function, like other IB
45 45 * managers and agents is best implemented as a kernel misc.
46 46 * module.
47 47 * Eventually we could modify IBT_DM_AGENT so that we don't need to
48 48 * open each HCA to receive asynchronous events.
49 49 */
50 50
51 51 #define IBDMA_NAME_VERSION "IB Device Management Agent"
52 52
53 53 extern struct mod_ops mod_miscops;
54 54
55 55 static void ibdma_ibt_async_handler(void *clnt, ibt_hca_hdl_t hdl,
56 56 ibt_async_code_t code, ibt_async_event_t *event);
57 57
58 58 static void ibdma_mad_recv_cb(ibmf_handle_t ibmf_hdl,
59 59 ibmf_msg_t *msgp, void *args);
60 60 static void ibdma_create_resp_mad(ibmf_msg_t *msgp);
↓ open down ↓ |
60 lines elided |
↑ open up ↑ |
61 61
62 62 /*
63 63 * Misc. kernel module for now.
64 64 */
65 65 static struct modlmisc modlmisc = {
66 66 &mod_miscops,
67 67 IBDMA_NAME_VERSION
68 68 };
69 69
70 70 static struct modlinkage modlinkage = {
71 - MODREV_1, (void *)&modlmisc, NULL
71 + MODREV_1, { (void *)&modlmisc, NULL }
72 72 };
73 73
74 74 static ibt_clnt_modinfo_t ibdma_ibt_modinfo = {
75 75 IBTI_V_CURR,
76 76 IBT_DM_AGENT,
77 77 ibdma_ibt_async_handler,
78 78 NULL,
79 79 "ibdma"
80 80 };
81 81
82 82 /*
83 83 * Module global state allocated at init().
84 84 */
85 85 static ibdma_mod_state_t *ibdma = NULL;
86 86
87 87 /*
88 88 * Init/Fini handlers and IBTL HCA management prototypes.
89 89 */
90 90 static int ibdma_init();
91 91 static int ibdma_fini();
92 92 static int ibdma_ibt_init();
93 93 static void ibdma_ibt_fini();
94 94 static ibdma_hca_t *ibdma_hca_init(ib_guid_t guid);
95 95 static void ibdma_hca_fini(ibdma_hca_t *hca);
96 96 static ibdma_hca_t *ibdma_find_hca(ib_guid_t guid);
97 97
98 98 /*
99 99 * DevMgmt Agent MAD attribute handlers prototypes.
100 100 */
101 101 static void ibdma_get_class_portinfo(ibmf_msg_t *msg);
102 102 static void ibdma_get_io_unitinfo(ibdma_hca_t *hca, ibmf_msg_t *msg);
103 103 static void ibdma_get_ioc_profile(ibdma_hca_t *hca, ibmf_msg_t *msg);
104 104 static void ibdma_get_ioc_services(ibdma_hca_t *hca, ibmf_msg_t *msg);
105 105
106 106 /*
107 107 * _init()
108 108 */
109 109 int
110 110 _init(void)
111 111 {
112 112 int status;
113 113
114 114 ASSERT(ibdma == NULL);
115 115
116 116 ibdma = kmem_zalloc(sizeof (*ibdma), KM_SLEEP);
117 117 ASSERT(ibdma != NULL);
118 118
119 119 status = ibdma_init();
120 120 if (status != DDI_SUCCESS) {
121 121 kmem_free(ibdma, sizeof (*ibdma));
122 122 ibdma = NULL;
123 123 return (status);
124 124 }
125 125
126 126 status = mod_install(&modlinkage);
127 127 if (status != DDI_SUCCESS) {
128 128 cmn_err(CE_NOTE, "_init, mod_install error (%d)", status);
129 129 (void) ibdma_fini();
130 130 kmem_free(ibdma, sizeof (*ibdma));
131 131 ibdma = NULL;
132 132 }
133 133 return (status);
134 134 }
135 135
136 136 /*
137 137 * _info()
138 138 */
139 139 int
140 140 _info(struct modinfo *modinfop)
141 141 {
142 142 return (mod_info(&modlinkage, modinfop));
143 143 }
144 144
145 145 /*
146 146 * _fini()
147 147 */
148 148 int
149 149 _fini(void)
150 150 {
151 151 int status;
152 152 int slot;
153 153 ibdma_hca_t *hca;
154 154
155 155 status = mod_remove(&modlinkage);
156 156 if (status != DDI_SUCCESS) {
157 157 cmn_err(CE_NOTE, "_fini, mod_remove error (%d)", status);
158 158 return (status);
159 159 }
160 160
161 161 /*
162 162 * Sanity check to see if anyone is not cleaning
163 163 * up appropriately.
164 164 */
165 165 mutex_enter(&ibdma->ms_hca_list_lock);
166 166 hca = list_head(&ibdma->ms_hca_list);
167 167 while (hca != NULL) {
168 168 for (slot = 0; slot < IBDMA_MAX_IOC; slot++) {
169 169 if (hca->ih_ioc[slot].ii_inuse) {
170 170 cmn_err(CE_NOTE, "_fini, IOC %d still attached"
171 171 " for (0x%0llx)", slot+1,
172 172 (u_longlong_t)hca->ih_iou_guid);
173 173 }
174 174 }
175 175 hca = list_next(&ibdma->ms_hca_list, hca);
176 176 }
177 177 mutex_exit(&ibdma->ms_hca_list_lock);
178 178
179 179 (void) ibdma_fini();
180 180 kmem_free(ibdma, sizeof (*ibdma));
181 181 return (status);
182 182 }
183 183
184 184 /*
185 185 * ibdma_init()
186 186 *
187 187 * Initialize I/O Unit structure, generate initial HCA list and register
188 188 * it port with the IBMF.
189 189 */
190 190 static int
191 191 ibdma_init()
192 192 {
193 193 int status;
194 194
195 195 /*
196 196 * Global lock and I/O Unit initialization.
197 197 */
198 198 mutex_init(&ibdma->ms_hca_list_lock, NULL, MUTEX_DRIVER, NULL);
199 199
200 200 /*
201 201 * Discover IB hardware and setup for device management agent
202 202 * support.
203 203 */
204 204 status = ibdma_ibt_init();
205 205 if (status != DDI_SUCCESS) {
206 206 cmn_err(CE_NOTE, "ibdma_init, ibt_attach failed (%d)",
207 207 status);
208 208 mutex_destroy(&ibdma->ms_hca_list_lock);
209 209 return (status);
210 210 }
211 211
212 212 return (status);
213 213 }
214 214
215 215 /*
216 216 * ibdma_fini()
217 217 *
218 218 * Release resource if we are no longer in use.
219 219 */
220 220 static int
221 221 ibdma_fini()
222 222 {
223 223 ibdma_ibt_fini();
224 224 mutex_destroy(&ibdma->ms_hca_list_lock);
225 225 return (DDI_SUCCESS);
226 226 }
227 227
228 228 /*
229 229 * ibdma_ibt_async_handler()
230 230 */
231 231 /* ARGSUSED */
232 232 static void
233 233 ibdma_ibt_async_handler(void *clnt, ibt_hca_hdl_t hdl,
234 234 ibt_async_code_t code, ibt_async_event_t *event)
235 235 {
236 236 ibdma_hca_t *hca;
237 237
238 238 switch (code) {
239 239
240 240 case IBT_EVENT_PORT_UP:
241 241 case IBT_ERROR_PORT_DOWN:
242 242 case IBT_PORT_CHANGE_EVENT:
243 243 case IBT_CLNT_REREG_EVENT:
244 244 break;
245 245
246 246 case IBT_HCA_ATTACH_EVENT:
247 247 mutex_enter(&ibdma->ms_hca_list_lock);
248 248 hca = ibdma_hca_init(event->ev_hca_guid);
249 249 if (hca != NULL) {
250 250 list_insert_tail(&ibdma->ms_hca_list, hca);
251 251 cmn_err(CE_NOTE, "hca ibt hdl (%p)",
252 252 (void *)hca->ih_ibt_hdl);
253 253 ibdma->ms_num_hcas++;
254 254 }
255 255 mutex_exit(&ibdma->ms_hca_list_lock);
256 256 break;
257 257
258 258 case IBT_HCA_DETACH_EVENT:
259 259 mutex_enter(&ibdma->ms_hca_list_lock);
260 260 hca = ibdma_find_hca(event->ev_hca_guid);
261 261 if (hca != NULL) {
262 262 list_remove(&ibdma->ms_hca_list, hca);
263 263 cmn_err(CE_NOTE, "removing hca (%p) (0x%llx)",
264 264 (void *)hca, hca ?
265 265 (u_longlong_t)hca->ih_iou_guid : 0x0ll);
266 266 ibdma_hca_fini(hca);
267 267 }
268 268 mutex_exit(&ibdma->ms_hca_list_lock);
269 269 break;
270 270
271 271 default:
272 272 #ifdef DEBUG
273 273 cmn_err(CE_NOTE, "ibt_async_handler, unhandled event(%d)",
274 274 code);
275 275 #endif
276 276 break;
277 277 }
278 278
279 279 }
280 280
281 281 /*
282 282 * ibdma_ibt_init()
283 283 */
284 284 static int
285 285 ibdma_ibt_init()
286 286 {
287 287 int status;
288 288 int hca_cnt;
289 289 int hca_ndx;
290 290 ib_guid_t *guid;
291 291 ibdma_hca_t *hca;
292 292
293 293 /*
294 294 * Attach to IBTF and get HCA list.
295 295 */
296 296 status = ibt_attach(&ibdma_ibt_modinfo, NULL,
297 297 ibdma, &ibdma->ms_ibt_hdl);
298 298 if (status != DDI_SUCCESS) {
299 299 cmn_err(CE_NOTE, "ibt_init, ibt_attach failed (%d)",
300 300 status);
301 301 return (status);
302 302 }
303 303
304 304 list_create(&ibdma->ms_hca_list, sizeof (ibdma_hca_t),
305 305 offsetof(ibdma_hca_t, ih_node));
306 306
307 307 hca_cnt = ibt_get_hca_list(&guid);
308 308 if (hca_cnt < 1) {
309 309 #ifdef DEBUG_IBDMA
310 310 cmn_err(CE_NOTE, "ibt_init, no HCA(s) found");
311 311 #endif
312 312 /* not an error if no HCAs, but nothing more to do here */
313 313 return (DDI_SUCCESS);
314 314 }
315 315
316 316 mutex_enter(&ibdma->ms_hca_list_lock);
317 317
318 318 for (hca_ndx = 0; hca_ndx < hca_cnt; hca_ndx++) {
319 319 #ifdef DEBUG_IBDMA
320 320 cmn_err(CE_NOTE, "adding hca GUID(0x%llx)",
321 321 (u_longlong_t)guid[hca_ndx]);
322 322 #endif
323 323
324 324 hca = ibdma_hca_init(guid[hca_ndx]);
325 325 if (hca == NULL) {
326 326 cmn_err(CE_NOTE, "ibt_init, hca_init GUID(0x%llx)"
327 327 " failed", (u_longlong_t)guid[hca_ndx]);
328 328 continue;
329 329 }
330 330 list_insert_tail(&ibdma->ms_hca_list, hca);
331 331 ibdma->ms_num_hcas++;
332 332 }
333 333
334 334 mutex_exit(&ibdma->ms_hca_list_lock);
335 335
336 336 ibt_free_hca_list(guid, hca_cnt);
337 337 #ifdef DEBUG_IBDMA
338 338 cmn_err(CE_NOTE, "Added %d HCA(s)",
339 339 ibdma->ms_num_hcas);
340 340 #endif
341 341 return (DDI_SUCCESS);
342 342 }
343 343
344 344 /*
345 345 * ibdma_ibt_fini()
346 346 */
347 347 static void
348 348 ibdma_ibt_fini()
349 349 {
350 350 ibdma_hca_t *hca;
351 351 ibdma_hca_t *next;
352 352
353 353 mutex_enter(&ibdma->ms_hca_list_lock);
354 354 hca = list_head(&ibdma->ms_hca_list);
355 355 while (hca != NULL) {
356 356 next = list_next(&ibdma->ms_hca_list, hca);
357 357 list_remove(&ibdma->ms_hca_list, hca);
358 358 #ifdef DEBUG_IBDMA
359 359 cmn_err(CE_NOTE, "removing hca (%p) (0x%llx)",
360 360 (void *)hca, hca ?
361 361 (u_longlong_t)hca->ih_iou_guid : 0x0ll);
362 362 cmn_err(CE_NOTE, "hca ibt hdl (%p)",
363 363 (void *)hca->ih_ibt_hdl);
364 364 #endif
365 365 ibdma_hca_fini(hca);
366 366 hca = next;
367 367 }
368 368 list_destroy(&ibdma->ms_hca_list);
369 369
370 370 (void) ibt_detach(ibdma->ms_ibt_hdl);
371 371 ibdma->ms_ibt_hdl = NULL;
372 372 ibdma->ms_num_hcas = 0;
373 373 mutex_exit(&ibdma->ms_hca_list_lock);
374 374 }
375 375
376 376 /*
377 377 * ibdma_find_hca()
378 378 */
379 379 static ibdma_hca_t *
380 380 ibdma_find_hca(ib_guid_t guid)
381 381 {
382 382 ibdma_hca_t *hca;
383 383
384 384 ASSERT(mutex_owned(&ibdma->ms_hca_list_lock));
385 385
386 386 hca = list_head(&ibdma->ms_hca_list);
387 387 while (hca != NULL) {
388 388 if (hca->ih_iou_guid == guid) {
389 389 break;
390 390 }
391 391 hca = list_next(&ibdma->ms_hca_list, hca);
392 392 }
393 393 return (hca);
394 394 }
395 395
396 396 /*
397 397 * ibdma_hca_init()
398 398 */
399 399 static ibdma_hca_t *
400 400 ibdma_hca_init(ib_guid_t guid)
401 401 {
402 402 ibt_status_t status;
403 403 ibdma_hca_t *hca;
404 404 ibdma_port_t *port;
405 405 ibt_hca_attr_t hca_attr;
406 406 int ndx;
407 407
408 408 ASSERT(mutex_owned(&ibdma->ms_hca_list_lock));
409 409
410 410 status = ibt_query_hca_byguid(guid, &hca_attr);
411 411 if (status != IBT_SUCCESS) {
412 412 cmn_err(CE_NOTE, "hca_init HCA query error (%d)",
413 413 status);
414 414 return (NULL);
415 415 }
416 416
417 417 if (ibdma_find_hca(guid) != NULL) {
418 418 #ifdef DEBUG_IBDMA
419 419 cmn_err(CE_NOTE, "hca_init HCA already exists");
420 420 #endif
421 421 return (NULL);
422 422 }
423 423
424 424 hca = kmem_zalloc(sizeof (ibdma_hca_t) +
425 425 (hca_attr.hca_nports-1)*sizeof (ibdma_port_t), KM_SLEEP);
426 426 ASSERT(hca != NULL);
427 427
428 428 hca->ih_nports = hca_attr.hca_nports;
429 429
430 430 rw_init(&hca->ih_iou_rwlock, NULL, RW_DRIVER, NULL);
431 431 rw_enter(&hca->ih_iou_rwlock, RW_WRITER);
432 432 hca->ih_iou_guid = guid;
433 433 hca->ih_iou.iou_changeid = h2b16(1);
434 434 hca->ih_iou.iou_num_ctrl_slots = IBDMA_MAX_IOC;
435 435 hca->ih_iou.iou_flag = IB_DM_IOU_OPTIONROM_ABSENT;
436 436
437 437 list_create(&hca->ih_hdl_list, sizeof (ibdma_hdl_impl_t),
438 438 offsetof(ibdma_hdl_impl_t, ih_node));
439 439 rw_exit(&hca->ih_iou_rwlock);
440 440
441 441 /*
442 442 * It would be better to not open, but IBTL is setup to only allow
443 443 * certain managers to get async call backs if not open.
444 444 */
445 445 status = ibt_open_hca(ibdma->ms_ibt_hdl, guid, &hca->ih_ibt_hdl);
446 446 if (status != IBT_SUCCESS) {
447 447 cmn_err(CE_NOTE, "hca_init() IBT open failed (%d)",
448 448 status);
449 449
450 450 list_destroy(&hca->ih_hdl_list);
451 451 rw_destroy(&hca->ih_iou_rwlock);
452 452 kmem_free(hca, sizeof (ibdma_hca_t) +
453 453 (hca_attr.hca_nports-1)*sizeof (ibdma_port_t));
454 454 return (NULL);
455 455 }
456 456
457 457 /*
458 458 * Register with the IB Management Framework and setup MAD call-back.
459 459 */
460 460 for (ndx = 0; ndx < hca->ih_nports; ndx++) {
461 461 port = &hca->ih_port[ndx];
462 462 port->ip_hcap = hca;
463 463 port->ip_ibmf_reg.ir_ci_guid = hca->ih_iou_guid;
464 464 port->ip_ibmf_reg.ir_port_num = ndx + 1;
465 465 port->ip_ibmf_reg.ir_client_class = DEV_MGT_AGENT;
466 466
467 467 status = ibmf_register(&port->ip_ibmf_reg, IBMF_VERSION,
468 468 0, NULL, NULL, &port->ip_ibmf_hdl, &port->ip_ibmf_caps);
469 469 if (status != IBMF_SUCCESS) {
470 470 cmn_err(CE_NOTE, "hca_init, IBMF register failed (%d)",
471 471 status);
472 472 port->ip_ibmf_hdl = NULL;
473 473 ibdma_hca_fini(hca);
474 474 return (NULL);
475 475 }
476 476
477 477 status = ibmf_setup_async_cb(port->ip_ibmf_hdl,
478 478 IBMF_QP_HANDLE_DEFAULT, ibdma_mad_recv_cb, port, 0);
479 479 if (status != IBMF_SUCCESS) {
480 480 cmn_err(CE_NOTE, "hca_init, IBMF cb setup failed (%d)",
481 481 status);
482 482 ibdma_hca_fini(hca);
483 483 return (NULL);
484 484 }
485 485
486 486 status = ibt_modify_port_byguid(hca->ih_iou_guid,
487 487 ndx+1, IBT_PORT_SET_DEVMGT, 0);
488 488 if (status != IBT_SUCCESS) {
489 489 cmn_err(CE_NOTE, "hca_init, IBT modify port caps"
490 490 " error (%d)", status);
491 491 ibdma_hca_fini(hca);
492 492 return (NULL);
493 493 }
494 494 }
495 495 return (hca);
496 496 }
497 497
498 498 /*
499 499 * ibdma_hca_fini()
500 500 */
501 501 static void
502 502 ibdma_hca_fini(ibdma_hca_t *hca)
503 503 {
504 504 int status;
505 505 int ndx;
506 506 ibdma_port_t *port;
507 507 ibdma_hdl_impl_t *hdl;
508 508 ibdma_hdl_impl_t *hdl_next;
509 509
510 510 ASSERT(mutex_owned(&ibdma->ms_hca_list_lock));
511 511 ASSERT(hca != NULL);
512 512
513 513 rw_enter(&hca->ih_iou_rwlock, RW_WRITER);
514 514
515 515 /*
516 516 * All handles should have been de-registered, but release
517 517 * any that are outstanding.
518 518 */
519 519 hdl = list_head(&hca->ih_hdl_list);
520 520 while (hdl != NULL) {
521 521 hdl_next = list_next(&hca->ih_hdl_list, hdl);
522 522 list_remove(&hca->ih_hdl_list, hdl);
523 523 cmn_err(CE_NOTE, "hca_fini, unexpected ibdma user handle"
524 524 " exists");
525 525 kmem_free(hdl, sizeof (*hdl));
526 526 hdl = hdl_next;
527 527 }
528 528 list_destroy(&hca->ih_hdl_list);
529 529
530 530 /*
531 531 * Un-register with the IBMF.
532 532 */
533 533 for (ndx = 0; ndx < hca->ih_nports; ndx++) {
534 534 port = &hca->ih_port[ndx];
535 535 port->ip_hcap = NULL;
536 536
537 537 status = ibt_modify_port_byguid(hca->ih_iou_guid,
538 538 ndx+1, IBT_PORT_RESET_DEVMGT, 0);
539 539 if (status != IBT_SUCCESS)
540 540 cmn_err(CE_NOTE, "hca_fini, IBT modify port caps"
541 541 " error (%d)", status);
542 542
543 543 if (port->ip_ibmf_hdl == NULL)
544 544 continue;
545 545
546 546 status = ibmf_tear_down_async_cb(port->ip_ibmf_hdl,
547 547 IBMF_QP_HANDLE_DEFAULT, 0);
548 548 if (status != IBMF_SUCCESS)
549 549 cmn_err(CE_NOTE, "hca_fini, IBMF tear down cb"
550 550 " error (%d)", status);
551 551
552 552 status = ibmf_unregister(&port->ip_ibmf_hdl, 0);
553 553 if (status != IBMF_SUCCESS)
554 554 cmn_err(CE_NOTE, "hca_fini, IBMF un-register"
555 555 " error (%d)", status);
556 556 port->ip_ibmf_hdl = NULL;
557 557 }
558 558
559 559 status = ibt_close_hca(hca->ih_ibt_hdl);
560 560 if (status != IBT_SUCCESS)
561 561 cmn_err(CE_NOTE, "hca_fini close error (%d)", status);
562 562
563 563 rw_exit(&hca->ih_iou_rwlock);
564 564 rw_destroy(&hca->ih_iou_rwlock);
565 565 kmem_free(hca, sizeof (ibdma_hca_t) +
566 566 (hca->ih_nports-1) * sizeof (ibdma_port_t));
567 567 }
568 568
569 569 /* DM IBMF MAD handlers */
570 570 /*
571 571 * ibdma_create_resp_mad()
572 572 */
573 573 static void
574 574 ibdma_create_resp_mad(ibmf_msg_t *msgp)
575 575 {
576 576 /*
577 577 * Allocate send buffer fix up hdr for response.
578 578 */
579 579 msgp->im_msgbufs_send.im_bufs_mad_hdr =
580 580 kmem_zalloc(IBDMA_MAD_SIZE, KM_SLEEP);
581 581
582 582 msgp->im_msgbufs_send.im_bufs_cl_hdr = (uchar_t *)
583 583 msgp->im_msgbufs_send.im_bufs_mad_hdr + sizeof (ib_mad_hdr_t);
584 584 msgp->im_msgbufs_send.im_bufs_cl_hdr_len = IBDMA_DM_MAD_HDR_SIZE;
585 585 msgp->im_msgbufs_send.im_bufs_cl_data =
586 586 ((char *)msgp->im_msgbufs_send.im_bufs_cl_hdr +
587 587 IBDMA_DM_MAD_HDR_SIZE);
588 588 msgp->im_msgbufs_send.im_bufs_cl_data_len =
589 589 IBDMA_MAD_SIZE - sizeof (ib_mad_hdr_t) - IBDMA_DM_MAD_HDR_SIZE;
590 590 (void) memcpy(msgp->im_msgbufs_send.im_bufs_mad_hdr,
591 591 msgp->im_msgbufs_recv.im_bufs_mad_hdr, IBDMA_MAD_SIZE);
592 592
593 593 /*
594 594 * We may want to support a GRH since this is a GMP; not
595 595 * required for current SRP device manager platforms.
596 596 */
597 597 #if 0
598 598 if (msgp->im_msg_flags & IBMF_MSG_FLAGS_GLOBAL_ADDRESS) {
599 599 ib_gid_t temp = msgp->im_global_addr.ig_recver_gid;
600 600
601 601 msgp->im_global_addr.ig_recver_gid =
602 602 msgp->im_global_addr.ig_sender_gid;
603 603 msgp->im_global_addr.ig_sender_gid = temp;
604 604 }
605 605 #endif
606 606 }
607 607
608 608 /*
609 609 * ibdma_mad_send_cb()
610 610 */
611 611 /* ARGSUSED */
612 612 static void
613 613 ibdma_mad_send_cb(ibmf_handle_t ibmf_hdl, ibmf_msg_t *msgp, void *arg)
614 614 {
615 615 /*
616 616 * Just free the buffers and release the message.
617 617 */
618 618 if (msgp->im_msgbufs_send.im_bufs_mad_hdr != NULL) {
619 619 kmem_free(msgp->im_msgbufs_send.im_bufs_mad_hdr,
620 620 IBDMA_MAD_SIZE);
621 621 msgp->im_msgbufs_send.im_bufs_mad_hdr = NULL;
622 622 }
623 623 if (ibmf_free_msg(ibmf_hdl, &msgp) != IBMF_SUCCESS) {
624 624 cmn_err(CE_NOTE, "mad_send_cb, IBMF message free error");
625 625 }
626 626 }
627 627
628 628 /*
629 629 * ibdma_mad_recv_cb()
630 630 */
631 631 static void
632 632 ibdma_mad_recv_cb(ibmf_handle_t ibmf_hdl, ibmf_msg_t *msgp, void *args)
633 633 {
634 634 int status;
635 635 ib_mad_hdr_t *in_mad;
636 636 ib_mad_hdr_t *out_mad;
637 637 ibdma_port_t *port = args;
638 638
639 639 ASSERT(msgp != NULL);
640 640 ASSERT(port != NULL);
641 641
642 642 if (msgp->im_msg_status != IBMF_SUCCESS) {
643 643 cmn_err(CE_NOTE, "mad_recv_cb, bad MAD receive status (%d)",
644 644 msgp->im_msg_status);
645 645 goto drop;
646 646 }
647 647
648 648 in_mad = msgp->im_msgbufs_recv.im_bufs_mad_hdr;
649 649
650 650 if (in_mad->MgmtClass != MAD_MGMT_CLASS_DEV_MGT) {
651 651 #ifdef DEBUG_IBDMA
652 652 cmn_err(CE_NOTE, "mad_recv_cb, MAD not of Dev Mgmt Class");
653 653 #endif
654 654 goto drop;
655 655 }
656 656
657 657 ibdma_create_resp_mad(msgp);
658 658 out_mad = msgp->im_msgbufs_send.im_bufs_mad_hdr;
659 659
660 660 out_mad->R_Method = IB_DM_DEVMGT_METHOD_GET_RESP;
661 661 out_mad->Status = 0;
662 662
663 663 if (in_mad->R_Method == MAD_METHOD_SET) {
664 664 #ifdef DEBUG_IBDMA
665 665 cmn_err(CE_NOTE, "mad_recv_cb, no attributes supported"
666 666 " for set");
667 667 #endif
668 668 out_mad->Status = MAD_STATUS_UNSUPP_METHOD_ATTR;
669 669 goto send_resp;
670 670 }
671 671
672 672 if (in_mad->R_Method != MAD_METHOD_GET) {
673 673 #ifdef DEBUG_IBDMA
674 674 cmn_err(CE_NOTE, "mad_recv_cb, no attributes supported"
675 675 " for set");
676 676 #endif
677 677 out_mad->Status = MAD_STATUS_UNSUPP_METHOD;
678 678 goto send_resp;
679 679 }
680 680
681 681 /*
682 682 * Process a GET method.
683 683 */
684 684 switch (b2h16(in_mad->AttributeID)) {
685 685
686 686 case IB_DM_ATTR_CLASSPORTINFO:
687 687 ibdma_get_class_portinfo(msgp);
688 688 break;
689 689
690 690 case IB_DM_ATTR_IO_UNITINFO:
691 691 ibdma_get_io_unitinfo(port->ip_hcap, msgp);
692 692 break;
693 693
694 694 case IB_DM_ATTR_IOC_CTRL_PROFILE:
695 695 ibdma_get_ioc_profile(port->ip_hcap, msgp);
696 696 break;
697 697
698 698 case IB_DM_ATTR_SERVICE_ENTRIES:
699 699 ibdma_get_ioc_services(port->ip_hcap, msgp);
700 700 break;
701 701
702 702 default:
703 703 out_mad->Status = MAD_STATUS_UNSUPP_METHOD_ATTR;
704 704 break;
705 705 }
706 706
707 707 send_resp:
708 708 status = ibmf_msg_transport(ibmf_hdl, IBMF_QP_HANDLE_DEFAULT,
709 709 msgp, NULL, ibdma_mad_send_cb, NULL, 0);
710 710 if (status != IBMF_SUCCESS) {
711 711 cmn_err(CE_NOTE, "mad_recv_cb, send error (%d)", status);
712 712 ibdma_mad_send_cb(ibmf_hdl, msgp, NULL);
713 713 }
714 714 return;
715 715
716 716 drop:
717 717 status = ibmf_free_msg(ibmf_hdl, &msgp);
718 718 if (status != IBMF_SUCCESS) {
719 719 cmn_err(CE_NOTE, "mad_recv_cb, error dropping (%d)",
720 720 status);
721 721 }
722 722 }
723 723
724 724 /*
725 725 * ibdma_get_class_portinfo()
726 726 */
727 727 static void
728 728 ibdma_get_class_portinfo(ibmf_msg_t *msg)
729 729 {
730 730 ib_mad_classportinfo_t *cpip;
731 731
732 732 cpip = (ib_mad_classportinfo_t *)msg->im_msgbufs_send.im_bufs_cl_data;
733 733 bzero(cpip, sizeof (*cpip));
734 734 cpip->BaseVersion = MAD_CLASS_BASE_VERS_1;
735 735 cpip->ClassVersion = IB_DM_CLASS_VERSION_1;
736 736 cpip->RespTimeValue = h2b32(IBDMA_DM_RESP_TIME);
737 737 }
738 738
739 739 /*
740 740 * ibdma_get_io_unitinfo()
741 741 */
742 742 static void
743 743 ibdma_get_io_unitinfo(ibdma_hca_t *hca, ibmf_msg_t *msg)
744 744 {
745 745 ib_dm_io_unitinfo_t *uip;
746 746
747 747 uip = (ib_dm_io_unitinfo_t *)msg->im_msgbufs_send.im_bufs_cl_data;
748 748 rw_enter(&hca->ih_iou_rwlock, RW_READER);
749 749 bcopy(&hca->ih_iou, uip, sizeof (ib_dm_io_unitinfo_t));
750 750 rw_exit(&hca->ih_iou_rwlock);
751 751 }
752 752
753 753 /*
754 754 * ibdma_get_ioc_profile()
755 755 */
756 756 static void
757 757 ibdma_get_ioc_profile(ibdma_hca_t *hca, ibmf_msg_t *msg)
758 758 {
759 759 ib_dm_ioc_ctrl_profile_t *iocp;
760 760 uint32_t slot;
761 761
762 762 ASSERT(msg != NULL);
763 763
764 764 slot = b2h32(msg->im_msgbufs_recv.im_bufs_mad_hdr->AttributeModifier);
765 765 iocp = (ib_dm_ioc_ctrl_profile_t *)
766 766 msg->im_msgbufs_send.im_bufs_cl_data;
767 767 if (slot == 0 || slot > IBDMA_MAX_IOC) {
768 768 msg->im_msgbufs_send.im_bufs_mad_hdr->Status =
769 769 MAD_STATUS_INVALID_FIELD;
770 770 return;
771 771 }
772 772
773 773 slot--;
774 774 rw_enter(&hca->ih_iou_rwlock, RW_READER);
775 775 if (ibdma_get_ioc_state(hca, slot) == IBDMA_IOC_PRESENT) {
776 776 bcopy(&hca->ih_ioc[slot].ii_profile, iocp,
777 777 sizeof (ib_dm_ioc_ctrl_profile_t));
778 778 } else {
779 779 msg->im_msgbufs_send.im_bufs_mad_hdr->Status =
780 780 IB_DM_DEVMGT_MAD_STAT_NORESP;
781 781 }
782 782 rw_exit(&hca->ih_iou_rwlock);
783 783 }
784 784
785 785 /*
786 786 * ibdma_get_ioc_services()
787 787 */
788 788 static void
789 789 ibdma_get_ioc_services(ibdma_hca_t *hca, ibmf_msg_t *msg)
790 790 {
791 791 ib_dm_srv_t *to_svcp;
792 792 ib_dm_srv_t *from_svcp;
793 793 uint32_t slot;
794 794 uint8_t hi;
795 795 uint8_t low;
796 796
797 797 ASSERT(msg != NULL);
798 798
799 799 slot = b2h32(msg->im_msgbufs_recv.im_bufs_mad_hdr->AttributeModifier);
800 800 hi = (slot >> 8) & 0x00FF;
801 801 low = slot & 0x00FF;
802 802 slot = (slot >> 16) & 0x0FFFF;
803 803 if (slot == 0 || slot > IBDMA_MAX_IOC) {
804 804 msg->im_msgbufs_send.im_bufs_mad_hdr->Status =
805 805 MAD_STATUS_INVALID_FIELD;
806 806 return;
807 807 }
808 808
809 809 slot--;
810 810
811 811 rw_enter(&hca->ih_iou_rwlock, RW_READER);
812 812 if (ibdma_get_ioc_state(hca, slot) != IBDMA_IOC_PRESENT) {
813 813 msg->im_msgbufs_send.im_bufs_mad_hdr->Status =
814 814 IB_DM_DEVMGT_MAD_STAT_NORESP;
815 815 rw_exit(&hca->ih_iou_rwlock);
816 816 return;
817 817 }
818 818
819 819 if ((low > hi) || (hi - low > 4)) {
820 820 msg->im_msgbufs_send.im_bufs_mad_hdr->Status =
821 821 MAD_STATUS_INVALID_FIELD;
822 822 rw_exit(&hca->ih_iou_rwlock);
823 823 return;
824 824 }
825 825
826 826 if (hi > hca->ih_ioc[slot].ii_profile.ioc_service_entries) {
827 827 msg->im_msgbufs_send.im_bufs_mad_hdr->Status =
828 828 MAD_STATUS_INVALID_FIELD;
829 829 rw_exit(&hca->ih_iou_rwlock);
830 830 return;
831 831 }
832 832
833 833 to_svcp = (ib_dm_srv_t *)msg->im_msgbufs_send.im_bufs_cl_data;
834 834 from_svcp = hca->ih_ioc[slot].ii_srvcs + low;
835 835 bcopy(from_svcp, to_svcp, sizeof (ib_dm_srv_t) * (hi - low + 1));
836 836 rw_exit(&hca->ih_iou_rwlock);
837 837 }
838 838
839 839
840 840 /*
841 841 * Client API internal helpers
842 842 */
843 843
844 844 /*
845 845 * ibdma_hdl_to_ioc()
846 846 */
847 847 ibdma_hdl_impl_t *
848 848 ibdma_get_hdl_impl(ibdma_hdl_t hdl)
849 849 {
850 850 ibdma_hca_t *hca;
851 851 ibdma_hdl_impl_t *hdl_tmp = hdl;
852 852 ibdma_hdl_impl_t *hdl_impl;
853 853
854 854 ASSERT(mutex_owned(&ibdma->ms_hca_list_lock));
855 855
856 856 if (hdl_tmp == NULL) {
857 857 cmn_err(CE_NOTE, "get_hdl_impl, NULL handle");
858 858 return (NULL);
859 859 }
860 860
861 861 hca = ibdma_find_hca(hdl_tmp->ih_iou_guid);
862 862 if (hca == NULL) {
863 863 cmn_err(CE_NOTE, "get_hdl_impl, invalid handle, bad IOU");
864 864 return (NULL);
865 865 }
866 866
867 867 hdl_impl = list_head(&hca->ih_hdl_list);
868 868 while (hdl_impl != NULL) {
869 869 if (hdl_impl == hdl_tmp) {
870 870 break;
871 871 }
872 872 hdl_impl = list_next(&hca->ih_hdl_list, hdl_impl);
873 873 }
874 874 return (hdl_impl);
875 875 }
876 876
877 877 /*
878 878 * ibdma_set_ioc_state()
879 879 *
880 880 * slot should be 0 based (not DM 1 based slot).
881 881 *
882 882 * I/O Unit write lock should be held outside of this function.
883 883 */
884 884 static void
885 885 ibdma_set_ioc_state(ibdma_hca_t *hca, int slot, ibdma_ioc_state_t state)
886 886 {
887 887 uint8_t cur;
888 888 uint16_t id;
889 889
890 890 cur = hca->ih_iou.iou_ctrl_list[slot >> 1];
891 891 if (slot & 1) {
892 892 cur = (cur & 0xF0) | state;
893 893 } else {
894 894 cur = (cur & 0x0F) | (state << 4);
895 895 }
896 896 hca->ih_iou.iou_ctrl_list[slot >> 1] = cur;
897 897 id = b2h16(hca->ih_iou.iou_changeid);
898 898 id++;
899 899 hca->ih_iou.iou_changeid = h2b16(id);
900 900 #ifdef DEBUG_IBDMA
901 901 cmn_err(CE_NOTE, "set_ioc_state, slot offset(%d), value(%d)",
902 902 slot, hca->ih_iou.iou_ctrl_list[slot >> 1]);
903 903 #endif
904 904 }
905 905
906 906 /*
907 907 * ibdma_get_ioc_state()
908 908 *
909 909 * slot should be 0 based (not DM 1 based slot).
910 910 *
911 911 * I/O Unit read lock should be held outside of this function.
912 912 */
913 913 static ibdma_ioc_state_t
914 914 ibdma_get_ioc_state(ibdma_hca_t *hca, int slot)
915 915 {
916 916 uint8_t cur;
917 917
918 918 if (slot >= IBDMA_MAX_IOC)
919 919 return (0xFF);
920 920
921 921 cur = hca->ih_iou.iou_ctrl_list[slot >> 1];
922 922 cur = slot & 1 ? cur & 0x0F : cur >> 4;
923 923 return (cur);
924 924 }
925 925
926 926 /* CLIENT API Implementation */
927 927 /*
928 928 * ibdma_ioc_register()
929 929 *
930 930 */
931 931 ibdma_hdl_t
932 932 ibdma_ioc_register(ib_guid_t iou_guid, ib_dm_ioc_ctrl_profile_t *profile,
933 933 ib_dm_srv_t *services)
934 934 {
935 935 int free_slot = -1;
936 936 int svc_entries;
937 937 int slot;
938 938 ibdma_hca_t *hca;
939 939 ibdma_hdl_impl_t *hdl;
940 940
941 941 if (profile == NULL || services == NULL) {
942 942 cmn_err(CE_NOTE, "ioc_register, bad parameter");
943 943 return (NULL);
944 944 }
945 945
946 946 svc_entries = profile->ioc_service_entries;
947 947 if (svc_entries == 0) {
948 948 cmn_err(CE_NOTE, "ioc_register, bad profile no service");
949 949 return (NULL);
950 950 }
951 951
952 952 /*
953 953 * Find the associated I/O Unit.
954 954 */
955 955 mutex_enter(&ibdma->ms_hca_list_lock);
956 956 hca = ibdma_find_hca(iou_guid);
957 957 if (hca == NULL) {
958 958 mutex_exit(&ibdma->ms_hca_list_lock);
959 959 cmn_err(CE_NOTE, "ioc_register, bad I/O Unit GUID (0x%llx)",
960 960 (u_longlong_t)iou_guid);
961 961 return (NULL);
962 962 }
963 963
964 964 rw_enter(&hca->ih_iou_rwlock, RW_WRITER);
965 965 for (slot = 0; slot < IBDMA_MAX_IOC; slot++) {
966 966 if (hca->ih_ioc[slot].ii_inuse == 0) {
967 967 if (free_slot == -1) {
968 968 free_slot = slot;
969 969 }
970 970 continue;
971 971 }
972 972
973 973 if (profile->ioc_guid ==
974 974 hca->ih_ioc[slot].ii_profile.ioc_guid) {
975 975 rw_exit(&hca->ih_iou_rwlock);
976 976 mutex_exit(&ibdma->ms_hca_list_lock);
977 977 #ifdef DEBUG_IBDMA
978 978 cmn_err(CE_NOTE, "ioc_register, IOC previously"
979 979 " registered");
980 980 #endif
981 981 return (NULL);
982 982 }
983 983 }
984 984
985 985 if (free_slot < 0) {
986 986 rw_exit(&hca->ih_iou_rwlock);
987 987 cmn_err(CE_NOTE, "ioc_register, error - I/O Unit full");
988 988 return (NULL);
989 989 }
990 990 #ifdef DEBUG_IBDMA
991 991 cmn_err(CE_NOTE, "ibdma_ioc_register, assigned to 0 based slot (%d)",
992 992 free_slot);
993 993 #endif
994 994
995 995 hca->ih_ioc[free_slot].ii_inuse = 1;
996 996 hca->ih_ioc[free_slot].ii_slot = free_slot;
997 997 hca->ih_ioc[free_slot].ii_hcap = hca;
998 998
999 999 /*
1000 1000 * Allocate local copy of profile and services.
1001 1001 */
1002 1002 hca->ih_ioc[free_slot].ii_srvcs =
1003 1003 kmem_zalloc(sizeof (ib_dm_srv_t) * svc_entries, KM_SLEEP);
1004 1004 bcopy(profile, &hca->ih_ioc[free_slot].ii_profile,
1005 1005 sizeof (ib_dm_ioc_ctrl_profile_t));
1006 1006 bcopy(services, hca->ih_ioc[free_slot].ii_srvcs,
1007 1007 sizeof (ib_dm_srv_t) * svc_entries);
1008 1008
1009 1009 /*
1010 1010 * Update the profile copy with the I/O controller slot assigned.
1011 1011 * The slot occupies the lower 8 biths of the vendor ID/slot 32bit
1012 1012 * field.
1013 1013 */
1014 1014 profile->ioc_vendorid |= h2b32(free_slot);
1015 1015
1016 1016 ibdma_set_ioc_state(hca, free_slot, IBDMA_IOC_PRESENT);
1017 1017
1018 1018 hdl = kmem_alloc(sizeof (*hdl), KM_SLEEP);
1019 1019 hdl->ih_iou_guid = hca->ih_iou_guid;
1020 1020 hdl->ih_ioc_ndx = (uint8_t)free_slot;
1021 1021 list_insert_tail(&hca->ih_hdl_list, hdl);
1022 1022
1023 1023 rw_exit(&hca->ih_iou_rwlock);
1024 1024 mutex_exit(&ibdma->ms_hca_list_lock);
1025 1025
1026 1026 return ((ibdma_hdl_t)hdl);
1027 1027 }
1028 1028
1029 1029 /*
1030 1030 * ibdma_ioc_unregister()
1031 1031 *
1032 1032 */
1033 1033 ibdma_status_t
1034 1034 ibdma_ioc_unregister(ibdma_hdl_t hdl)
1035 1035 {
1036 1036 ibdma_ioc_t *ioc;
1037 1037 ibdma_hca_t *hca;
1038 1038 int slot;
1039 1039 ibdma_hdl_impl_t *hdl_tmp = hdl;
1040 1040 ibdma_hdl_impl_t *hdl_impl;
1041 1041
1042 1042 if (hdl == NULL) {
1043 1043 cmn_err(CE_NOTE, "ioc_unregister, NULL handle");
1044 1044 return (IBDMA_BAD_PARAM);
1045 1045 }
1046 1046
1047 1047 mutex_enter(&ibdma->ms_hca_list_lock);
1048 1048 hca = ibdma_find_hca(hdl_tmp->ih_iou_guid);
1049 1049 if (hca == NULL) {
1050 1050 cmn_err(CE_NOTE, "ioc_unregsiter, invalid handle, IOU"
1051 1051 " not found");
1052 1052 mutex_exit(&ibdma->ms_hca_list_lock);
1053 1053 return (IBDMA_BAD_PARAM);
1054 1054 }
1055 1055
1056 1056 hdl_impl = list_head(&hca->ih_hdl_list);
1057 1057 while (hdl_impl != NULL) {
1058 1058 if (hdl_impl == hdl_tmp) {
1059 1059 break;
1060 1060 }
1061 1061 hdl_impl = list_next(&hca->ih_hdl_list, hdl_impl);
1062 1062 }
1063 1063
1064 1064 if (hdl_impl == NULL) {
1065 1065 cmn_err(CE_NOTE, "ioc_unregsiter, invalid handle, not found");
1066 1066 mutex_exit(&ibdma->ms_hca_list_lock);
1067 1067 return (IBDMA_BAD_PARAM);
1068 1068 }
1069 1069
1070 1070 list_remove(&hca->ih_hdl_list, hdl_impl);
1071 1071
1072 1072 if (hdl_impl->ih_ioc_ndx >= IBDMA_MAX_IOC) {
1073 1073 cmn_err(CE_NOTE, "ioc_unregister, corrupted handle");
1074 1074 kmem_free(hdl_impl, sizeof (*hdl_impl));
1075 1075 mutex_exit(&ibdma->ms_hca_list_lock);
1076 1076 return (IBDMA_BAD_PARAM);
1077 1077 }
1078 1078 ioc = &hca->ih_ioc[hdl_impl->ih_ioc_ndx];
1079 1079 kmem_free(hdl_impl, sizeof (*hdl_impl));
1080 1080
1081 1081 if (ioc->ii_slot > IBDMA_MAX_IOC) {
1082 1082 cmn_err(CE_NOTE, "ioc_unregister, IOC corrupted, bad"
1083 1083 " slot in IOC");
1084 1084 mutex_exit(&ibdma->ms_hca_list_lock);
1085 1085 return (IBDMA_BAD_PARAM);
1086 1086 }
1087 1087
1088 1088 rw_enter(&ioc->ii_hcap->ih_iou_rwlock, RW_WRITER);
1089 1089 if (ioc->ii_inuse == 0) {
1090 1090 rw_exit(&ioc->ii_hcap->ih_iou_rwlock);
1091 1091 mutex_exit(&ibdma->ms_hca_list_lock);
1092 1092 cmn_err(CE_NOTE, "ioc_unregister, slot not in use (%d)",
1093 1093 ioc->ii_slot+1);
1094 1094 return (IBDMA_BAD_PARAM);
1095 1095 }
1096 1096
1097 1097 ASSERT(ioc->ii_srvcs != NULL);
1098 1098
1099 1099 slot = ioc->ii_slot;
1100 1100 hca = ioc->ii_hcap;
1101 1101 kmem_free(ioc->ii_srvcs, sizeof (ib_dm_srv_t) *
1102 1102 ioc->ii_profile.ioc_service_entries);
1103 1103 bzero(ioc, sizeof (ibdma_ioc_t));
1104 1104 ibdma_set_ioc_state(hca, slot, IBDMA_IOC_NOT_INSTALLED);
1105 1105
1106 1106 rw_exit(&hca->ih_iou_rwlock);
1107 1107 mutex_exit(&ibdma->ms_hca_list_lock);
1108 1108
1109 1109 return (IBDMA_SUCCESS);
1110 1110 }
1111 1111
1112 1112 /*
1113 1113 * ibdma_ioc_update()
1114 1114 *
1115 1115 */
1116 1116 ibdma_status_t
1117 1117 ibdma_ioc_update(ibdma_hdl_t hdl, ib_dm_ioc_ctrl_profile_t *profile,
1118 1118 ib_dm_srv_t *services)
1119 1119 {
1120 1120 ibdma_ioc_t *ioc;
1121 1121 ibdma_hca_t *hca;
1122 1122 ibdma_hdl_impl_t *hdl_tmp = hdl;
1123 1123 ibdma_hdl_impl_t *hdl_impl;
1124 1124
1125 1125 if (hdl == NULL) {
1126 1126 cmn_err(CE_NOTE, "ioc_update, NULL handle");
1127 1127 return (IBDMA_BAD_PARAM);
1128 1128 }
1129 1129
1130 1130 if (profile == NULL || services == NULL) {
1131 1131 cmn_err(CE_NOTE, "ioc_update, NULL parameter");
1132 1132 return (IBDMA_BAD_PARAM);
1133 1133 }
1134 1134
1135 1135 mutex_enter(&ibdma->ms_hca_list_lock);
1136 1136 hca = ibdma_find_hca(hdl_tmp->ih_iou_guid);
1137 1137 if (hca == NULL) {
1138 1138 cmn_err(CE_NOTE, "ioc_update, invalid handle, IOU not found");
1139 1139 mutex_exit(&ibdma->ms_hca_list_lock);
1140 1140 return (IBDMA_BAD_PARAM);
1141 1141 }
1142 1142
1143 1143 hdl_impl = list_head(&hca->ih_hdl_list);
1144 1144 while (hdl_impl != NULL) {
1145 1145 if (hdl_impl == hdl_tmp) {
1146 1146 break;
1147 1147 }
1148 1148 hdl_impl = list_next(&hca->ih_hdl_list, hdl_impl);
1149 1149 }
1150 1150
1151 1151 if (hdl_impl == NULL) {
1152 1152 cmn_err(CE_NOTE, "ioc_update, invalid handle, not found");
1153 1153 mutex_exit(&ibdma->ms_hca_list_lock);
1154 1154 return (IBDMA_BAD_PARAM);
1155 1155 }
1156 1156
1157 1157 if (hdl_impl->ih_ioc_ndx >= IBDMA_MAX_IOC) {
1158 1158 cmn_err(CE_NOTE, "ioc_update, corrupted handle");
1159 1159 mutex_exit(&ibdma->ms_hca_list_lock);
1160 1160 return (IBDMA_BAD_PARAM);
1161 1161 }
1162 1162 ioc = &hca->ih_ioc[hdl_impl->ih_ioc_ndx];
1163 1163
1164 1164 if (ioc->ii_slot >= IBDMA_MAX_IOC || ioc->ii_hcap == NULL) {
1165 1165 cmn_err(CE_NOTE, "ioc_update, bad handle (%p)",
1166 1166 (void *)hdl);
1167 1167 mutex_exit(&ibdma->ms_hca_list_lock);
1168 1168 return (IBDMA_BAD_PARAM);
1169 1169 }
1170 1170
1171 1171 rw_enter(&ioc->ii_hcap->ih_iou_rwlock, RW_WRITER);
1172 1172 if (ioc->ii_inuse == 0) {
1173 1173 rw_exit(&ioc->ii_hcap->ih_iou_rwlock);
1174 1174 mutex_exit(&ibdma->ms_hca_list_lock);
1175 1175 cmn_err(CE_NOTE, "ioc_udate slot not in use (%d)",
1176 1176 ioc->ii_slot+1);
1177 1177 return (IBDMA_BAD_PARAM);
1178 1178 }
1179 1179
1180 1180 ASSERT(ioc->ii_srvcs != NULL);
1181 1181
1182 1182 kmem_free(ioc->ii_srvcs, ioc->ii_profile.ioc_service_entries *
1183 1183 sizeof (ib_dm_srv_t));
1184 1184 ioc->ii_srvcs = kmem_zalloc(profile->ioc_service_entries *
1185 1185 sizeof (ib_dm_srv_t), KM_SLEEP);
1186 1186
1187 1187 bcopy(profile, &ioc->ii_profile, sizeof (ib_dm_ioc_ctrl_profile_t));
1188 1188 bcopy(services, ioc->ii_srvcs, sizeof (ib_dm_srv_t) *
1189 1189 profile->ioc_service_entries);
1190 1190 /*
1191 1191 * Update the profile copy with the I/O controller slot assigned.
1192 1192 * The slot occupies the lower 8 biths of the vendor ID/slot 32bit
1193 1193 * field.
1194 1194 */
1195 1195 profile->ioc_vendorid |= h2b32(ioc->ii_slot);
1196 1196 ibdma_set_ioc_state(ioc->ii_hcap, ioc->ii_slot, IBDMA_IOC_PRESENT);
1197 1197 rw_exit(&ioc->ii_hcap->ih_iou_rwlock);
1198 1198 mutex_exit(&ibdma->ms_hca_list_lock);
1199 1199
1200 1200 return (IBDMA_SUCCESS);
1201 1201 }
↓ open down ↓ |
1120 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX