Print this page
7127 remove -Wno-missing-braces from Makefile.uts
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/io/comstar/port/srpt/srpt_ioc.c
+++ new/usr/src/uts/common/io/comstar/port/srpt/srpt_ioc.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21
22 22 /*
23 23 * Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved.
24 24 */
25 25
26 26 /*
27 27 * I/O Controller functions for the Solaris COMSTAR SCSI RDMA Protocol
28 28 * Target (SRPT) port provider.
29 29 */
30 30
31 31 #include <sys/types.h>
32 32 #include <sys/ddi.h>
33 33 #include <sys/types.h>
34 34 #include <sys/sunddi.h>
35 35 #include <sys/atomic.h>
36 36 #include <sys/sysmacros.h>
37 37 #include <sys/ib/ibtl/ibti.h>
38 38 #include <sys/sdt.h>
39 39
40 40 #include "srp.h"
41 41 #include "srpt_impl.h"
42 42 #include "srpt_ioc.h"
43 43 #include "srpt_stp.h"
44 44 #include "srpt_ch.h"
45 45 #include "srpt_common.h"
46 46
47 47 /*
48 48 * srpt_ioc_srq_size - Tunable parameter that specifies the number
49 49 * of receive WQ entries that can be posted to the IOC shared
50 50 * receive queue.
51 51 */
52 52 uint32_t srpt_ioc_srq_size = SRPT_DEFAULT_IOC_SRQ_SIZE;
53 53 extern uint16_t srpt_send_msg_depth;
54 54 extern uint32_t srpt_iu_size;
55 55 extern boolean_t srpt_enable_by_default;
56 56
57 57 /* IOC profile capabilities mask must be big-endian */
58 58 typedef struct srpt_ioc_opcap_bits_s {
59 59 #if defined(_BIT_FIELDS_LTOH)
60 60 uint8_t af:1,
61 61 at:1,
62 62 wf:1,
63 63 wt:1,
64 64 rf:1,
65 65 rt:1,
66 66 sf:1,
67 67 st:1;
68 68 #elif defined(_BIT_FIELDS_HTOL)
69 69 uint8_t st:1,
70 70 sf:1,
71 71 rt:1,
72 72 rf:1,
73 73 wt:1,
74 74 wf:1,
75 75 at:1,
76 76 af:1;
77 77 #else
78 78 #error One of _BIT_FIELDS_LTOH or _BIT_FIELDS_HTOL must be defined
79 79 #endif
80 80 } srpt_ioc_opcap_bits_t;
81 81
82 82 typedef union {
83 83 srpt_ioc_opcap_bits_t bits;
84 84 uint8_t mask;
85 85 } srpt_ioc_opcap_mask_t;
86 86
87 87 /*
88 88 * vmem arena variables - values derived from iSER
89 89 */
90 90 #define SRPT_MR_QUANTSIZE 0x400 /* 1K */
91 91 #define SRPT_MIN_CHUNKSIZE 0x100000 /* 1MB */
92 92
93 93 /* use less memory on 32-bit kernels as it's much more constrained */
94 94 #ifdef _LP64
95 95 #define SRPT_BUF_MR_CHUNKSIZE 0x1000000 /* 16MB */
96 96 #define SRPT_BUF_POOL_MAX 0x40000000 /* 1GB */
97 97 #else
98 98 #define SRPT_BUF_MR_CHUNKSIZE 0x400000 /* 4MB */
99 99 #define SRPT_BUF_POOL_MAX 0x4000000 /* 64MB */
100 100 #endif
101 101
102 102 static ibt_mr_flags_t srpt_dbuf_mr_flags =
103 103 IBT_MR_ENABLE_LOCAL_WRITE | IBT_MR_ENABLE_REMOTE_WRITE |
104 104 IBT_MR_ENABLE_REMOTE_READ;
105 105
106 106 void srpt_ioc_ib_async_hdlr(void *clnt, ibt_hca_hdl_t hdl,
107 107 ibt_async_code_t code, ibt_async_event_t *event);
108 108
109 109 static struct ibt_clnt_modinfo_s srpt_ibt_modinfo = {
110 110 IBTI_V_CURR,
111 111 IBT_STORAGE_DEV,
112 112 srpt_ioc_ib_async_hdlr,
113 113 NULL,
114 114 "srpt"
115 115 };
116 116
117 117 static srpt_ioc_t *srpt_ioc_init(ib_guid_t guid);
118 118 static void srpt_ioc_fini(srpt_ioc_t *ioc);
119 119 static boolean_t srpt_check_hca_cfg_enabled(ib_guid_t hca_guid);
120 120
121 121 static srpt_vmem_pool_t *srpt_vmem_create(const char *name, srpt_ioc_t *ioc,
122 122 ib_memlen_t chunksize, uint64_t maxsize, ibt_mr_flags_t flags);
123 123 static void *srpt_vmem_alloc(srpt_vmem_pool_t *vm_pool, size_t size);
124 124 static int srpt_vmem_mr_compare(const void *a, const void *b);
125 125 static srpt_mr_t *srpt_vmem_chunk_alloc(srpt_vmem_pool_t *ioc,
126 126 ib_memlen_t chunksize);
127 127 static void srpt_vmem_destroy(srpt_vmem_pool_t *vm_pool);
128 128 static void srpt_vmem_free(srpt_vmem_pool_t *vm_pool, void *vaddr, size_t size);
129 129 static srpt_mr_t *srpt_reg_mem(srpt_vmem_pool_t *vm_pool, ib_vaddr_t vaddr,
130 130 ib_memlen_t len);
131 131 static void srpt_vmem_chunk_free(srpt_vmem_pool_t *vm_pool, srpt_mr_t *mr);
132 132 static void srpt_dereg_mem(srpt_ioc_t *ioc, srpt_mr_t *mr);
133 133 static int srpt_vmem_mr(srpt_vmem_pool_t *vm_pool, void *vaddr, size_t size,
134 134 srpt_mr_t *mr);
135 135
136 136 /*
137 137 * srpt_ioc_attach() - I/O Controller attach
138 138 *
139 139 * Attach to IBTF and initialize I/O controllers. The srpt_ctxt->sc_rwlock
140 140 * should be held outside of this call.
141 141 */
142 142 int
143 143 srpt_ioc_attach()
144 144 {
145 145 int status;
146 146 int hca_cnt;
147 147 int hca_ndx;
148 148 ib_guid_t *guid;
149 149
150 150 ASSERT(srpt_ctxt != NULL);
151 151
152 152 /*
153 153 * Attach to IBTF and initialize a list of IB devices. Each
154 154 * HCA will be represented by an I/O Controller.
155 155 */
156 156 status = ibt_attach(&srpt_ibt_modinfo, srpt_ctxt->sc_dip,
157 157 srpt_ctxt, &srpt_ctxt->sc_ibt_hdl);
158 158 if (status != DDI_SUCCESS) {
159 159 SRPT_DPRINTF_L1("ioc_attach, ibt_attach failed (0x%x)",
160 160 status);
161 161 return (DDI_FAILURE);
162 162 }
163 163
164 164 hca_cnt = ibt_get_hca_list(&guid);
165 165 if (hca_cnt < 1) {
166 166 /*
167 167 * not a fatal error. Service will be up and
168 168 * waiting for ATTACH events.
169 169 */
170 170 SRPT_DPRINTF_L2("ioc_attach, no HCA found");
171 171 return (DDI_SUCCESS);
172 172 }
173 173
174 174 for (hca_ndx = 0; hca_ndx < hca_cnt; hca_ndx++) {
175 175 SRPT_DPRINTF_L2("ioc_attach, attaching HCA %016llx",
176 176 (u_longlong_t)guid[hca_ndx]);
177 177 srpt_ioc_attach_hca(guid[hca_ndx], B_FALSE);
178 178 }
179 179
180 180 ibt_free_hca_list(guid, hca_cnt);
181 181 SRPT_DPRINTF_L3("ioc_attach, added %d I/O Controller(s)",
182 182 srpt_ctxt->sc_num_iocs);
183 183 return (DDI_SUCCESS);
184 184 }
185 185
186 186 /*
187 187 * Initialize I/O Controllers. sprt_ctxt->sc_rwlock must be locked by the
188 188 * caller.
189 189 *
190 190 * 'checked' indicates no need to lookup the hca in the HCA configuration
191 191 * list.
192 192 */
193 193 void
194 194 srpt_ioc_attach_hca(ib_guid_t hca_guid, boolean_t checked)
195 195 {
196 196 boolean_t enable_hca = B_TRUE;
197 197 srpt_ioc_t *ioc;
198 198
199 199 if (!checked) {
200 200 enable_hca = srpt_check_hca_cfg_enabled(hca_guid);
201 201
202 202 if (!enable_hca) {
203 203 /* nothing to do */
204 204 SRPT_DPRINTF_L2(
205 205 "ioc_attach_hca, HCA %016llx disabled "
206 206 "by srpt config",
207 207 (u_longlong_t)hca_guid);
208 208 return;
209 209 }
210 210 }
211 211
212 212 SRPT_DPRINTF_L2("ioc_attach_hca, adding I/O"
213 213 " Controller (%016llx)", (u_longlong_t)hca_guid);
214 214
215 215 ioc = srpt_ioc_init(hca_guid);
216 216 if (ioc == NULL) {
217 217 /*
218 218 * IOC already exists or an error occurred. Already
219 219 * logged by srpt_ioc_init()
220 220 */
221 221 return;
222 222 }
223 223
224 224 /*
225 225 * Create the COMSTAR SRP Target for this IOC. If this fails,
226 226 * remove the IOC.
227 227 */
228 228 rw_enter(&ioc->ioc_rwlock, RW_WRITER);
229 229 ioc->ioc_tgt_port = srpt_stp_alloc_port(ioc, ioc->ioc_guid);
230 230 if (ioc->ioc_tgt_port == NULL) {
231 231 SRPT_DPRINTF_L1("ioc_attach_hca: alloc SCSI"
232 232 " Target Port error on GUID(%016llx)",
233 233 (u_longlong_t)ioc->ioc_guid);
234 234 rw_exit(&ioc->ioc_rwlock);
235 235 srpt_ioc_fini(ioc);
236 236 return;
237 237 }
238 238 rw_exit(&ioc->ioc_rwlock);
239 239
240 240 /*
241 241 * New HCA added with default SCSI Target Port, SRP service
242 242 * will be started when SCSI Target Port is brought
243 243 * on-line by STMF.
244 244 */
245 245 list_insert_tail(&srpt_ctxt->sc_ioc_list, ioc);
246 246 SRPT_DPRINTF_L2("ioc_attach_hca, I/O Controller ibt HCA hdl (%p)",
247 247 (void *)ioc->ioc_ibt_hdl);
248 248
249 249 srpt_ctxt->sc_num_iocs++;
250 250 }
251 251
252 252 /*
253 253 * srpt_check_hca_cfg_enabled()
254 254 *
255 255 * Function to check the configuration for the enabled status of a given
256 256 * HCA. Returns B_TRUE if SRPT services should be activated for this HCA,
257 257 * B_FALSE if it should be disabled.
258 258 */
259 259 static boolean_t
260 260 srpt_check_hca_cfg_enabled(ib_guid_t hca_guid)
261 261 {
262 262 int status;
263 263 char buf[32];
264 264 nvlist_t *hcanv;
265 265 boolean_t enable_hca;
266 266
267 267 enable_hca = srpt_enable_by_default;
268 268
269 269 SRPT_FORMAT_HCAKEY(buf, sizeof (buf), (u_longlong_t)hca_guid);
270 270
271 271 if (srpt_ctxt->sc_cfg_hca_nv != NULL) {
272 272 status = nvlist_lookup_nvlist(srpt_ctxt->sc_cfg_hca_nv,
273 273 buf, &hcanv);
274 274 if (status == 0) {
275 275 SRPT_DPRINTF_L3("check_hca_cfg, found guid %s", buf);
276 276 (void) nvlist_lookup_boolean_value(hcanv,
277 277 SRPT_PROP_ENABLED, &enable_hca);
278 278 } else {
279 279 SRPT_DPRINTF_L3("check_hca_cfg, did not find guid %s",
280 280 buf);
281 281 }
282 282 }
283 283
284 284 return (enable_hca);
285 285 }
286 286
287 287 /*
288 288 * srpt_ioc_update()
289 289 *
290 290 * Using the configuration nvlist, enables or disables SRP services
291 291 * the provided HCAs. srpt_ctxt->sc_rwlock should be held outside of this call.
292 292 */
293 293 void
294 294 srpt_ioc_update(void)
295 295 {
296 296 boolean_t enabled;
297 297 nvpair_t *nvp = NULL;
298 298 uint64_t hca_guid;
299 299 nvlist_t *nvl;
300 300 nvlist_t *cfg = srpt_ctxt->sc_cfg_hca_nv;
301 301
302 302 if (cfg == NULL) {
303 303 SRPT_DPRINTF_L2("ioc_update, no configuration data");
304 304 return;
305 305 }
306 306
307 307 while ((nvp = nvlist_next_nvpair(cfg, nvp)) != NULL) {
308 308 enabled = srpt_enable_by_default;
309 309
310 310 if ((nvpair_value_nvlist(nvp, &nvl)) != 0) {
311 311 SRPT_DPRINTF_L2("ioc_update, did not find an nvlist");
312 312 continue;
313 313 }
314 314
315 315 if ((nvlist_lookup_uint64(nvl, SRPT_PROP_GUID, &hca_guid))
316 316 != 0) {
317 317 SRPT_DPRINTF_L2("ioc_update, did not find a guid");
318 318 continue;
319 319 }
320 320
321 321 (void) nvlist_lookup_boolean_value(nvl, SRPT_PROP_ENABLED,
322 322 &enabled);
323 323
324 324 if (enabled) {
325 325 SRPT_DPRINTF_L2("ioc_update, enabling guid %016llx",
326 326 (u_longlong_t)hca_guid);
327 327 srpt_ioc_attach_hca(hca_guid, B_TRUE);
328 328 } else {
329 329 SRPT_DPRINTF_L2("ioc_update, disabling guid %016llx",
330 330 (u_longlong_t)hca_guid);
331 331 srpt_ioc_detach_hca(hca_guid);
332 332 }
333 333 }
334 334 }
335 335
336 336 /*
337 337 * srpt_ioc_detach() - I/O Controller detach
338 338 *
339 339 * srpt_ctxt->sc_rwlock should be held outside of this call.
340 340 */
341 341 void
342 342 srpt_ioc_detach()
343 343 {
344 344 srpt_ioc_t *ioc;
345 345
346 346 /*
347 347 * All SRP targets must be destroyed before calling this
348 348 * function.
349 349 */
350 350 while ((ioc = list_head(&srpt_ctxt->sc_ioc_list)) != NULL) {
351 351 SRPT_DPRINTF_L2("ioc_detach, removing I/O Controller(%p)"
352 352 " (%016llx), ibt_hdl(%p)",
353 353 (void *)ioc,
354 354 ioc ? (u_longlong_t)ioc->ioc_guid : 0x0ll,
355 355 (void *)ioc->ioc_ibt_hdl);
356 356
357 357 list_remove(&srpt_ctxt->sc_ioc_list, ioc);
358 358 srpt_ioc_fini(ioc);
359 359 srpt_ctxt->sc_num_iocs--;
360 360 }
361 361
362 362 srpt_ctxt->sc_ibt_hdl = NULL;
363 363 }
364 364
365 365 /*
366 366 * srpt_ioc_detach_hca()
367 367 *
368 368 * Stop SRP Target services on this HCA
369 369 *
370 370 * Note that this is not entirely synchronous with srpt_ioc_attach_hca()
371 371 * in that we don't need to check the configuration to know whether to
372 372 * disable an HCA. We get here either because the IB framework has told
373 373 * us the HCA has been detached, or because the administrator has explicitly
374 374 * disabled this HCA.
375 375 *
376 376 * Must be called with srpt_ctxt->sc_rwlock locked as RW_WRITER.
377 377 */
378 378 void
379 379 srpt_ioc_detach_hca(ib_guid_t hca_guid)
380 380 {
381 381 srpt_ioc_t *ioc;
382 382 srpt_target_port_t *tgt;
383 383 stmf_status_t stmf_status = STMF_SUCCESS;
384 384
385 385 ioc = srpt_ioc_get_locked(hca_guid);
386 386 if (ioc == NULL) {
387 387 /* doesn't exist, nothing to do */
388 388 return;
389 389 }
390 390
391 391 rw_enter(&ioc->ioc_rwlock, RW_WRITER);
392 392 tgt = ioc->ioc_tgt_port;
393 393
394 394 if (tgt != NULL) {
395 395 stmf_status = srpt_stp_destroy_port(tgt);
396 396 if (stmf_status == STMF_SUCCESS) {
397 397 ioc->ioc_tgt_port = NULL;
398 398 (void) srpt_stp_free_port(tgt);
399 399 }
400 400 }
401 401
402 402 rw_exit(&ioc->ioc_rwlock);
403 403
404 404 if (stmf_status != STMF_SUCCESS) {
405 405 /* should never happen */
406 406 return;
407 407 }
408 408
409 409 list_remove(&srpt_ctxt->sc_ioc_list, ioc);
410 410 srpt_ctxt->sc_num_iocs--;
411 411
412 412 srpt_ioc_fini(ioc);
413 413 SRPT_DPRINTF_L2("ioc_detach_hca, HCA %016llx detached",
414 414 (u_longlong_t)hca_guid);
415 415 }
416 416
417 417 /*
418 418 * srpt_ioc_init() - I/O Controller initialization
419 419 *
420 420 * Requires srpt_ctxt->rw_lock be held outside of call.
421 421 */
422 422 static srpt_ioc_t *
423 423 srpt_ioc_init(ib_guid_t guid)
424 424 {
425 425 ibt_status_t status;
426 426 srpt_ioc_t *ioc;
427 427 ibt_hca_attr_t hca_attr;
428 428 uint_t iu_ndx;
429 429 uint_t err_ndx;
430 430 ibt_mr_attr_t mr_attr;
431 431 ibt_mr_desc_t mr_desc;
432 432 srpt_iu_t *iu;
433 433 ibt_srq_sizes_t srq_attr;
434 434 char namebuf[32];
435 435 size_t iu_offset;
436 436 uint_t srq_sz;
437 437
438 438 status = ibt_query_hca_byguid(guid, &hca_attr);
439 439 if (status != IBT_SUCCESS) {
440 440 SRPT_DPRINTF_L1("ioc_init, HCA query error (%d)",
441 441 status);
442 442 return (NULL);
443 443 }
444 444
445 445 ioc = srpt_ioc_get_locked(guid);
446 446 if (ioc != NULL) {
447 447 SRPT_DPRINTF_L2("ioc_init, HCA already exists");
448 448 return (NULL);
449 449 }
450 450
451 451 ioc = kmem_zalloc(sizeof (srpt_ioc_t), KM_SLEEP);
452 452
453 453 rw_init(&ioc->ioc_rwlock, NULL, RW_DRIVER, NULL);
454 454 rw_enter(&ioc->ioc_rwlock, RW_WRITER);
455 455
456 456 bcopy(&hca_attr, &ioc->ioc_attr, sizeof (ibt_hca_attr_t));
457 457
458 458 SRPT_DPRINTF_L2("ioc_init, HCA max mr=%d, mrlen=%lld",
459 459 hca_attr.hca_max_memr, (u_longlong_t)hca_attr.hca_max_memr_len);
460 460 ioc->ioc_guid = guid;
461 461
462 462 status = ibt_open_hca(srpt_ctxt->sc_ibt_hdl, guid, &ioc->ioc_ibt_hdl);
463 463 if (status != IBT_SUCCESS) {
464 464 SRPT_DPRINTF_L1("ioc_init, IBT open failed (%d)", status);
465 465 goto hca_open_err;
466 466 }
467 467
468 468 status = ibt_alloc_pd(ioc->ioc_ibt_hdl, IBT_PD_NO_FLAGS,
469 469 &ioc->ioc_pd_hdl);
470 470 if (status != IBT_SUCCESS) {
471 471 SRPT_DPRINTF_L1("ioc_init, IBT create PD failed (%d)", status);
472 472 goto pd_alloc_err;
473 473 }
474 474
475 475 /*
476 476 * We require hardware support for SRQs. We use a common SRQ to
477 477 * reduce channel memory consumption.
478 478 */
479 479 if ((ioc->ioc_attr.hca_flags & IBT_HCA_SRQ) == 0) {
480 480 SRPT_DPRINTF_L0(
481 481 "ioc_init, no SRQ capability, HCA not supported");
482 482 goto srq_alloc_err;
483 483 }
484 484
485 485 SRPT_DPRINTF_L3("ioc_init, Using shared receive queues, max srq work"
486 486 " queue size(%d), def size = %d", ioc->ioc_attr.hca_max_srqs_sz,
487 487 srpt_ioc_srq_size);
488 488 srq_sz = srq_attr.srq_wr_sz = min(srpt_ioc_srq_size,
489 489 ioc->ioc_attr.hca_max_srqs_sz) - 1;
490 490 srq_attr.srq_sgl_sz = 1;
491 491
492 492 status = ibt_alloc_srq(ioc->ioc_ibt_hdl, IBT_SRQ_NO_FLAGS,
493 493 ioc->ioc_pd_hdl, &srq_attr, &ioc->ioc_srq_hdl,
494 494 &ioc->ioc_srq_attr);
495 495 if (status != IBT_SUCCESS) {
496 496 SRPT_DPRINTF_L1("ioc_init, IBT create SRQ failed(%d)", status);
497 497 goto srq_alloc_err;
498 498 }
499 499
500 500 SRPT_DPRINTF_L2("ioc_init, Using SRQ size(%d), MAX SG size(%d)",
501 501 srq_sz, 1);
502 502
503 503 ibt_set_srq_private(ioc->ioc_srq_hdl, ioc);
504 504
505 505 /*
506 506 * Allocate a pool of SRP IU message buffers and post them to
507 507 * the I/O Controller SRQ. We let the SRQ manage the free IU
508 508 * messages.
509 509 */
510 510 ioc->ioc_num_iu_entries = srq_sz;
511 511
512 512 ioc->ioc_iu_pool = kmem_zalloc(sizeof (srpt_iu_t) *
513 513 ioc->ioc_num_iu_entries, KM_SLEEP);
514 514
515 515 ioc->ioc_iu_bufs = kmem_alloc(srpt_iu_size *
516 516 ioc->ioc_num_iu_entries, KM_SLEEP);
517 517
518 518 if ((ioc->ioc_iu_pool == NULL) || (ioc->ioc_iu_bufs == NULL)) {
519 519 SRPT_DPRINTF_L1("ioc_init, failed to allocate SRQ IUs");
520 520 goto srq_iu_alloc_err;
521 521 }
522 522
523 523 mr_attr.mr_vaddr = (ib_vaddr_t)(uintptr_t)ioc->ioc_iu_bufs;
524 524 mr_attr.mr_len = srpt_iu_size * ioc->ioc_num_iu_entries;
525 525 mr_attr.mr_as = NULL;
526 526 mr_attr.mr_flags = IBT_MR_ENABLE_LOCAL_WRITE;
527 527
528 528 status = ibt_register_mr(ioc->ioc_ibt_hdl, ioc->ioc_pd_hdl,
529 529 &mr_attr, &ioc->ioc_iu_mr_hdl, &mr_desc);
530 530 if (status != IBT_SUCCESS) {
531 531 SRPT_DPRINTF_L1("ioc_init, IU buffer pool MR err(%d)",
532 532 status);
533 533 goto srq_iu_alloc_err;
534 534 }
535 535
536 536 for (iu_ndx = 0, iu = ioc->ioc_iu_pool; iu_ndx <
537 537 ioc->ioc_num_iu_entries; iu_ndx++, iu++) {
538 538
539 539 iu_offset = (iu_ndx * srpt_iu_size);
540 540 iu->iu_buf = (void *)((uintptr_t)ioc->ioc_iu_bufs + iu_offset);
541 541
542 542 mutex_init(&iu->iu_lock, NULL, MUTEX_DRIVER, NULL);
543 543
544 544 iu->iu_sge.ds_va = mr_desc.md_vaddr + iu_offset;
545 545 iu->iu_sge.ds_key = mr_desc.md_lkey;
546 546 iu->iu_sge.ds_len = srpt_iu_size;
547 547 iu->iu_ioc = ioc;
548 548 iu->iu_pool_ndx = iu_ndx;
549 549
550 550 status = srpt_ioc_post_recv_iu(ioc, &ioc->ioc_iu_pool[iu_ndx]);
551 551 if (status != IBT_SUCCESS) {
552 552 SRPT_DPRINTF_L1("ioc_init, SRQ IU post err(%d)",
553 553 status);
554 554 goto srq_iu_post_err;
555 555 }
556 556 }
557 557
558 558 /*
559 559 * Initialize the dbuf vmem arena
560 560 */
561 561 (void) snprintf(namebuf, sizeof (namebuf),
562 562 "srpt_buf_pool_%16llX", (u_longlong_t)guid);
563 563 ioc->ioc_dbuf_pool = srpt_vmem_create(namebuf, ioc,
564 564 SRPT_BUF_MR_CHUNKSIZE, SRPT_BUF_POOL_MAX, srpt_dbuf_mr_flags);
565 565
566 566 if (ioc->ioc_dbuf_pool == NULL) {
567 567 goto stmf_db_alloc_err;
568 568 }
569 569
570 570 /*
571 571 * Allocate the I/O Controller STMF data buffer allocator. The
572 572 * data store will span all targets associated with this IOC.
573 573 */
574 574 ioc->ioc_stmf_ds = stmf_alloc(STMF_STRUCT_DBUF_STORE, 0, 0);
575 575 if (ioc->ioc_stmf_ds == NULL) {
576 576 SRPT_DPRINTF_L1("ioc_attach, STMF DBUF alloc failure for IOC");
577 577 goto stmf_db_alloc_err;
578 578 }
579 579 ioc->ioc_stmf_ds->ds_alloc_data_buf = &srpt_ioc_ds_alloc_dbuf;
580 580 ioc->ioc_stmf_ds->ds_free_data_buf = &srpt_ioc_ds_free_dbuf;
581 581 ioc->ioc_stmf_ds->ds_port_private = ioc;
582 582
583 583 rw_exit(&ioc->ioc_rwlock);
584 584 return (ioc);
585 585
586 586 stmf_db_alloc_err:
587 587 if (ioc->ioc_dbuf_pool != NULL) {
588 588 srpt_vmem_destroy(ioc->ioc_dbuf_pool);
589 589 }
590 590
591 591 srq_iu_post_err:
592 592 if (ioc->ioc_iu_mr_hdl != NULL) {
593 593 status = ibt_deregister_mr(ioc->ioc_ibt_hdl,
594 594 ioc->ioc_iu_mr_hdl);
595 595 if (status != IBT_SUCCESS) {
596 596 SRPT_DPRINTF_L1("ioc_init, error deregistering"
597 597 " memory region (%d)", status);
598 598 }
599 599 }
600 600 for (err_ndx = 0, iu = ioc->ioc_iu_pool; err_ndx < iu_ndx;
601 601 err_ndx++, iu++) {
602 602 mutex_destroy(&iu->iu_lock);
603 603 }
604 604
605 605 srq_iu_alloc_err:
606 606 if (ioc->ioc_iu_bufs != NULL) {
607 607 kmem_free(ioc->ioc_iu_bufs, srpt_iu_size *
608 608 ioc->ioc_num_iu_entries);
609 609 }
610 610 if (ioc->ioc_iu_pool != NULL) {
611 611 kmem_free(ioc->ioc_iu_pool,
612 612 sizeof (srpt_iu_t) * ioc->ioc_num_iu_entries);
613 613 }
614 614 if (ioc->ioc_srq_hdl != NULL) {
615 615 status = ibt_free_srq(ioc->ioc_srq_hdl);
616 616 if (status != IBT_SUCCESS) {
617 617 SRPT_DPRINTF_L1("ioc_init, error freeing SRQ (%d)",
618 618 status);
619 619 }
620 620
621 621 }
622 622
623 623 srq_alloc_err:
624 624 status = ibt_free_pd(ioc->ioc_ibt_hdl, ioc->ioc_pd_hdl);
625 625 if (status != IBT_SUCCESS) {
626 626 SRPT_DPRINTF_L1("ioc_init, free PD error (%d)", status);
627 627 }
628 628
629 629 pd_alloc_err:
630 630 status = ibt_close_hca(ioc->ioc_ibt_hdl);
631 631 if (status != IBT_SUCCESS) {
632 632 SRPT_DPRINTF_L1("ioc_init, close ioc error (%d)", status);
633 633 }
634 634
635 635 hca_open_err:
636 636 rw_exit(&ioc->ioc_rwlock);
637 637 rw_destroy(&ioc->ioc_rwlock);
638 638 kmem_free(ioc, sizeof (*ioc));
639 639 return (NULL);
640 640 }
641 641
642 642 /*
643 643 * srpt_ioc_fini() - I/O Controller Cleanup
644 644 *
645 645 * Requires srpt_ctxt->sc_rwlock be held outside of call.
646 646 */
647 647 static void
648 648 srpt_ioc_fini(srpt_ioc_t *ioc)
649 649 {
650 650 int status;
651 651 int ndx;
652 652
653 653 /*
654 654 * Note driver flows will have already taken all SRP
655 655 * services running on the I/O Controller off-line.
656 656 */
657 657 ASSERT(ioc->ioc_tgt_port == NULL);
658 658 rw_enter(&ioc->ioc_rwlock, RW_WRITER);
659 659 if (ioc->ioc_ibt_hdl != NULL) {
660 660 if (ioc->ioc_stmf_ds != NULL) {
661 661 stmf_free(ioc->ioc_stmf_ds);
662 662 }
663 663
664 664 if (ioc->ioc_srq_hdl != NULL) {
665 665 SRPT_DPRINTF_L4("ioc_fini, freeing SRQ");
666 666 status = ibt_free_srq(ioc->ioc_srq_hdl);
667 667 if (status != IBT_SUCCESS) {
668 668 SRPT_DPRINTF_L1("ioc_fini, free SRQ"
669 669 " error (%d)", status);
670 670 }
671 671 }
672 672
673 673 if (ioc->ioc_iu_mr_hdl != NULL) {
674 674 status = ibt_deregister_mr(
675 675 ioc->ioc_ibt_hdl, ioc->ioc_iu_mr_hdl);
676 676 if (status != IBT_SUCCESS) {
677 677 SRPT_DPRINTF_L1("ioc_fini, error deregistering"
678 678 " memory region (%d)", status);
679 679 }
680 680 }
681 681
682 682 if (ioc->ioc_iu_bufs != NULL) {
683 683 kmem_free(ioc->ioc_iu_bufs, srpt_iu_size *
684 684 ioc->ioc_num_iu_entries);
685 685 }
686 686
687 687 if (ioc->ioc_iu_pool != NULL) {
688 688 SRPT_DPRINTF_L4("ioc_fini, freeing IU entries");
689 689 for (ndx = 0; ndx < ioc->ioc_num_iu_entries; ndx++) {
690 690 mutex_destroy(&ioc->ioc_iu_pool[ndx].iu_lock);
691 691 }
692 692
693 693 SRPT_DPRINTF_L4("ioc_fini, free IU pool struct");
694 694 kmem_free(ioc->ioc_iu_pool,
695 695 sizeof (srpt_iu_t) * (ioc->ioc_num_iu_entries));
696 696 ioc->ioc_iu_pool = NULL;
697 697 ioc->ioc_num_iu_entries = 0;
698 698 }
699 699
700 700 if (ioc->ioc_dbuf_pool != NULL) {
701 701 srpt_vmem_destroy(ioc->ioc_dbuf_pool);
702 702 }
703 703
704 704 if (ioc->ioc_pd_hdl != NULL) {
705 705 status = ibt_free_pd(ioc->ioc_ibt_hdl,
706 706 ioc->ioc_pd_hdl);
707 707 if (status != IBT_SUCCESS) {
708 708 SRPT_DPRINTF_L1("ioc_fini, free PD"
709 709 " error (%d)", status);
710 710 }
711 711 }
712 712
713 713 status = ibt_close_hca(ioc->ioc_ibt_hdl);
714 714 if (status != IBT_SUCCESS) {
715 715 SRPT_DPRINTF_L1(
716 716 "ioc_fini, close ioc error (%d)", status);
717 717 }
718 718 }
719 719 rw_exit(&ioc->ioc_rwlock);
720 720 rw_destroy(&ioc->ioc_rwlock);
721 721 kmem_free(ioc, sizeof (srpt_ioc_t));
722 722 }
723 723
724 724 /*
725 725 * srpt_ioc_port_active() - I/O Controller port active
726 726 */
727 727 static void
728 728 srpt_ioc_port_active(ibt_async_event_t *event)
729 729 {
730 730 ibt_status_t status;
731 731 srpt_ioc_t *ioc;
732 732 srpt_target_port_t *tgt = NULL;
733 733 boolean_t online_target = B_FALSE;
734 734 stmf_change_status_t cstatus;
735 735
736 736 ASSERT(event != NULL);
737 737
738 738 SRPT_DPRINTF_L3("ioc_port_active event handler, invoked");
739 739
740 740 /*
741 741 * Find the HCA in question and if the HCA has completed
742 742 * initialization, and the SRP Target service for the
743 743 * the I/O Controller exists, then bind this port.
744 744 */
745 745 ioc = srpt_ioc_get(event->ev_hca_guid);
746 746
747 747 if (ioc == NULL) {
748 748 SRPT_DPRINTF_L2("ioc_port_active, I/O Controller not"
749 749 " active");
750 750 return;
751 751 }
752 752
753 753 tgt = ioc->ioc_tgt_port;
754 754 if (tgt == NULL) {
755 755 SRPT_DPRINTF_L2("ioc_port_active, no I/O Controller target"
756 756 " undefined");
757 757 return;
758 758 }
759 759
760 760
761 761 /*
762 762 * We take the target lock here to serialize this operation
763 763 * with any STMF initiated target state transitions. If
764 764 * SRP is off-line then the service handle is NULL.
765 765 */
766 766 mutex_enter(&tgt->tp_lock);
767 767
768 768 if (tgt->tp_ibt_svc_hdl != NULL) {
769 769 status = srpt_ioc_svc_bind(tgt, event->ev_port);
770 770 if ((status != IBT_SUCCESS) &&
771 771 (status != IBT_HCA_PORT_NOT_ACTIVE)) {
772 772 SRPT_DPRINTF_L1("ioc_port_active, bind failed (%d)",
773 773 status);
774 774 }
775 775 } else {
776 776 /* if we were offline because of no ports, try onlining now */
777 777 if ((tgt->tp_num_active_ports == 0) &&
778 778 (tgt->tp_requested_state != tgt->tp_state) &&
779 779 (tgt->tp_requested_state == SRPT_TGT_STATE_ONLINE)) {
780 780 online_target = B_TRUE;
781 781 cstatus.st_completion_status = STMF_SUCCESS;
782 782 cstatus.st_additional_info = "port active";
783 783 }
784 784 }
785 785
786 786 mutex_exit(&tgt->tp_lock);
787 787
788 788 if (online_target) {
789 789 stmf_status_t ret;
790 790
791 791 ret = stmf_ctl(STMF_CMD_LPORT_ONLINE, tgt->tp_lport, &cstatus);
792 792
793 793 if (ret == STMF_SUCCESS) {
794 794 SRPT_DPRINTF_L1("ioc_port_active, port %d active, "
795 795 "target %016llx online requested", event->ev_port,
796 796 (u_longlong_t)ioc->ioc_guid);
797 797 } else if (ret != STMF_ALREADY) {
798 798 SRPT_DPRINTF_L1("ioc_port_active, port %d active, "
799 799 "target %016llx failed online request: %d",
800 800 event->ev_port, (u_longlong_t)ioc->ioc_guid,
801 801 (int)ret);
802 802 }
803 803 }
804 804 }
805 805
806 806 /*
807 807 * srpt_ioc_port_down()
808 808 */
809 809 static void
810 810 srpt_ioc_port_down(ibt_async_event_t *event)
811 811 {
812 812 srpt_ioc_t *ioc;
813 813 srpt_target_port_t *tgt;
814 814 srpt_channel_t *ch;
815 815 srpt_channel_t *next_ch;
816 816 boolean_t offline_target = B_FALSE;
817 817 stmf_change_status_t cstatus;
818 818
819 819 SRPT_DPRINTF_L3("ioc_port_down event handler, invoked");
820 820
821 821 /*
822 822 * Find the HCA in question and if the HCA has completed
823 823 * initialization, and the SRP Target service for the
824 824 * the I/O Controller exists, then logout initiators
825 825 * through this port.
826 826 */
827 827 ioc = srpt_ioc_get(event->ev_hca_guid);
828 828
829 829 if (ioc == NULL) {
830 830 SRPT_DPRINTF_L2("ioc_port_down, I/O Controller not"
831 831 " active");
832 832 return;
833 833 }
834 834
835 835 /*
836 836 * We only have one target now, but we could go through all
837 837 * SCSI target ports if more are added.
838 838 */
839 839 tgt = ioc->ioc_tgt_port;
840 840 if (tgt == NULL) {
841 841 SRPT_DPRINTF_L2("ioc_port_down, no I/O Controller target"
842 842 " undefined");
843 843 return;
844 844 }
845 845 mutex_enter(&tgt->tp_lock);
846 846
847 847 /*
848 848 * For all channel's logged in through this port, initiate a
849 849 * disconnect.
850 850 */
851 851 mutex_enter(&tgt->tp_ch_list_lock);
852 852 ch = list_head(&tgt->tp_ch_list);
853 853 while (ch != NULL) {
854 854 next_ch = list_next(&tgt->tp_ch_list, ch);
855 855 if (ch->ch_session && (ch->ch_session->ss_hw_port ==
856 856 event->ev_port)) {
857 857 srpt_ch_disconnect(ch);
858 858 }
859 859 ch = next_ch;
860 860 }
861 861 mutex_exit(&tgt->tp_ch_list_lock);
862 862
863 863 tgt->tp_num_active_ports--;
864 864
865 865 /* if we have no active ports, take the target offline */
866 866 if ((tgt->tp_num_active_ports == 0) &&
867 867 (tgt->tp_state == SRPT_TGT_STATE_ONLINE)) {
868 868 cstatus.st_completion_status = STMF_SUCCESS;
869 869 cstatus.st_additional_info = "no ports active";
870 870 offline_target = B_TRUE;
871 871 }
872 872
873 873 mutex_exit(&tgt->tp_lock);
874 874
875 875 if (offline_target) {
876 876 stmf_status_t ret;
877 877
878 878 ret = stmf_ctl(STMF_CMD_LPORT_OFFLINE, tgt->tp_lport, &cstatus);
879 879
880 880 if (ret == STMF_SUCCESS) {
881 881 SRPT_DPRINTF_L1("ioc_port_down, port %d down, target "
882 882 "%016llx offline requested", event->ev_port,
883 883 (u_longlong_t)ioc->ioc_guid);
884 884 } else if (ret != STMF_ALREADY) {
885 885 SRPT_DPRINTF_L1("ioc_port_down, port %d down, target "
886 886 "%016llx failed offline request: %d",
887 887 event->ev_port,
888 888 (u_longlong_t)ioc->ioc_guid, (int)ret);
889 889 }
890 890 }
891 891 }
892 892
893 893 /*
894 894 * srpt_ioc_ib_async_hdlr - I/O Controller IB asynchronous events
895 895 */
896 896 /* ARGSUSED */
897 897 void
898 898 srpt_ioc_ib_async_hdlr(void *clnt, ibt_hca_hdl_t hdl,
899 899 ibt_async_code_t code, ibt_async_event_t *event)
900 900 {
901 901 srpt_channel_t *ch;
902 902
903 903 switch (code) {
904 904 case IBT_EVENT_PORT_UP:
905 905 srpt_ioc_port_active(event);
906 906 break;
907 907
908 908 case IBT_ERROR_PORT_DOWN:
909 909 srpt_ioc_port_down(event);
910 910 break;
911 911
912 912 case IBT_HCA_ATTACH_EVENT:
913 913 SRPT_DPRINTF_L2(
914 914 "ib_async_hdlr, received attach event for HCA 0x%016llx",
915 915 (u_longlong_t)event->ev_hca_guid);
916 916
917 917 rw_enter(&srpt_ctxt->sc_rwlock, RW_WRITER);
918 918 srpt_ioc_attach_hca(event->ev_hca_guid, B_FALSE);
919 919 rw_exit(&srpt_ctxt->sc_rwlock);
920 920
921 921 break;
922 922
923 923 case IBT_HCA_DETACH_EVENT:
924 924 SRPT_DPRINTF_L1(
925 925 "ioc_iob_async_hdlr, received HCA_DETACH_EVENT for "
926 926 "HCA 0x%016llx",
927 927 (u_longlong_t)event->ev_hca_guid);
928 928
929 929 rw_enter(&srpt_ctxt->sc_rwlock, RW_WRITER);
930 930 srpt_ioc_detach_hca(event->ev_hca_guid);
931 931 rw_exit(&srpt_ctxt->sc_rwlock);
932 932
933 933 break;
934 934
935 935 case IBT_EVENT_EMPTY_CHAN:
936 936 /* Channel in ERROR state is now empty */
937 937 ch = (srpt_channel_t *)ibt_get_chan_private(event->ev_chan_hdl);
938 938 SRPT_DPRINTF_L3(
939 939 "ioc_iob_async_hdlr, received empty channel error on %p",
940 940 (void *)ch);
941 941 break;
942 942
943 943 default:
944 944 SRPT_DPRINTF_L2("ioc_ib_async_hdlr, event not "
945 945 "handled (%d)", code);
946 946 break;
947 947 }
948 948 }
949 949
950 950 /*
951 951 * srpt_ioc_svc_bind()
952 952 */
953 953 ibt_status_t
954 954 srpt_ioc_svc_bind(srpt_target_port_t *tgt, uint_t portnum)
955 955 {
956 956 ibt_status_t status;
957 957 srpt_hw_port_t *port;
958 958 ibt_hca_portinfo_t *portinfo;
959 959 uint_t qportinfo_sz;
960 960 uint_t qportnum;
961 961 ib_gid_t new_gid;
962 962 srpt_ioc_t *ioc;
963 963 srpt_session_t sess;
964 964
965 965 ASSERT(tgt != NULL);
966 966 ASSERT(tgt->tp_ioc != NULL);
967 967 ioc = tgt->tp_ioc;
968 968
969 969 if (tgt->tp_ibt_svc_hdl == NULL) {
970 970 SRPT_DPRINTF_L2("ioc_svc_bind, NULL SCSI target port"
971 971 " service");
972 972 return (IBT_INVALID_PARAM);
973 973 }
974 974
975 975 if (portnum == 0 || portnum > tgt->tp_nports) {
976 976 SRPT_DPRINTF_L2("ioc_svc_bind, bad port (%d)", portnum);
977 977 return (IBT_INVALID_PARAM);
978 978 }
979 979 status = ibt_query_hca_ports(ioc->ioc_ibt_hdl, portnum,
980 980 &portinfo, &qportnum, &qportinfo_sz);
981 981 if (status != IBT_SUCCESS) {
982 982 SRPT_DPRINTF_L1("ioc_svc_bind, query port error (%d)",
983 983 portnum);
984 984 return (IBT_INVALID_PARAM);
985 985 }
986 986
987 987 ASSERT(portinfo != NULL);
988 988
989 989 /*
990 990 * If port is not active do nothing, caller should attempt to bind
991 991 * after the port goes active.
992 992 */
993 993 if (portinfo->p_linkstate != IBT_PORT_ACTIVE) {
994 994 SRPT_DPRINTF_L2("ioc_svc_bind, port %d not in active state",
995 995 portnum);
996 996 ibt_free_portinfo(portinfo, qportinfo_sz);
997 997 return (IBT_HCA_PORT_NOT_ACTIVE);
998 998 }
999 999
1000 1000 port = &tgt->tp_hw_port[portnum-1];
1001 1001 new_gid = portinfo->p_sgid_tbl[0];
1002 1002 ibt_free_portinfo(portinfo, qportinfo_sz);
1003 1003
1004 1004 /*
1005 1005 * If previously bound and the port GID has changed,
1006 1006 * unbind the old GID.
1007 1007 */
1008 1008 if (port->hwp_bind_hdl != NULL) {
1009 1009 if (new_gid.gid_guid != port->hwp_gid.gid_guid ||
1010 1010 new_gid.gid_prefix != port->hwp_gid.gid_prefix) {
1011 1011 SRPT_DPRINTF_L2("ioc_svc_bind, unregister current"
1012 1012 " bind");
1013 1013 (void) ibt_unbind_service(tgt->tp_ibt_svc_hdl,
1014 1014 port->hwp_bind_hdl);
1015 1015 port->hwp_bind_hdl = NULL;
1016 1016 } else {
1017 1017 SRPT_DPRINTF_L2("ioc_svc_bind, port %d already bound",
1018 1018 portnum);
1019 1019 }
1020 1020 }
1021 1021
1022 1022 /* bind the new port GID */
1023 1023 if (port->hwp_bind_hdl == NULL) {
1024 1024 SRPT_DPRINTF_L2("ioc_svc_bind, bind service, %016llx:%016llx",
1025 1025 (u_longlong_t)new_gid.gid_prefix,
1026 1026 (u_longlong_t)new_gid.gid_guid);
1027 1027
1028 1028 /*
1029 1029 * Pass SCSI Target Port as CM private data, the target will
1030 1030 * always exist while this service is bound.
1031 1031 */
1032 1032 status = ibt_bind_service(tgt->tp_ibt_svc_hdl, new_gid, NULL,
1033 1033 tgt, &port->hwp_bind_hdl);
1034 1034 if (status != IBT_SUCCESS && status != IBT_CM_SERVICE_EXISTS) {
1035 1035 SRPT_DPRINTF_L1("ioc_svc_bind, bind error (%d)",
1036 1036 status);
1037 1037 return (status);
1038 1038 }
1039 1039 port->hwp_gid.gid_prefix = new_gid.gid_prefix;
1040 1040 port->hwp_gid.gid_guid = new_gid.gid_guid;
1041 1041 }
1042 1042
1043 1043 /* port is now active */
1044 1044 tgt->tp_num_active_ports++;
1045 1045
1046 1046 /* setting up a transient structure for the dtrace probe. */
1047 1047 bzero(&sess, sizeof (srpt_session_t));
1048 1048 ALIAS_STR(sess.ss_t_gid, new_gid.gid_prefix, new_gid.gid_guid);
1049 1049 EUI_STR(sess.ss_t_name, tgt->tp_ibt_svc_id);
1050 1050
1051 1051 DTRACE_SRP_1(service__up, srpt_session_t, &sess);
1052 1052
1053 1053 return (IBT_SUCCESS);
1054 1054 }
1055 1055
1056 1056 /*
1057 1057 * srpt_ioc_svc_unbind()
1058 1058 */
1059 1059 void
1060 1060 srpt_ioc_svc_unbind(srpt_target_port_t *tgt, uint_t portnum)
1061 1061 {
1062 1062 srpt_hw_port_t *port;
1063 1063 srpt_session_t sess;
1064 1064 ibt_status_t ret;
1065 1065
1066 1066 if (tgt == NULL) {
1067 1067 SRPT_DPRINTF_L2("ioc_svc_unbind, SCSI target does not exist");
1068 1068 return;
1069 1069 }
1070 1070
1071 1071 if (portnum == 0 || portnum > tgt->tp_nports) {
1072 1072 SRPT_DPRINTF_L2("ioc_svc_unbind, bad port (%d)", portnum);
1073 1073 return;
1074 1074 }
1075 1075 port = &tgt->tp_hw_port[portnum-1];
1076 1076
1077 1077 /* setting up a transient structure for the dtrace probe. */
1078 1078 bzero(&sess, sizeof (srpt_session_t));
1079 1079 ALIAS_STR(sess.ss_t_gid, port->hwp_gid.gid_prefix,
1080 1080 port->hwp_gid.gid_guid);
1081 1081 EUI_STR(sess.ss_t_name, tgt->tp_ibt_svc_id);
1082 1082
1083 1083 DTRACE_SRP_1(service__down, srpt_session_t, &sess);
1084 1084
1085 1085 if (tgt->tp_ibt_svc_hdl != NULL && port->hwp_bind_hdl != NULL) {
1086 1086 SRPT_DPRINTF_L2("ioc_svc_unbind, unregister current bind");
1087 1087 ret = ibt_unbind_service(tgt->tp_ibt_svc_hdl,
1088 1088 port->hwp_bind_hdl);
1089 1089 if (ret != IBT_SUCCESS) {
1090 1090 SRPT_DPRINTF_L1(
1091 1091 "ioc_svc_unbind, unregister port %d failed: %d",
1092 1092 portnum, ret);
1093 1093 } else {
1094 1094 port->hwp_bind_hdl = NULL;
1095 1095 port->hwp_gid.gid_prefix = 0;
1096 1096 port->hwp_gid.gid_guid = 0;
1097 1097 }
1098 1098 }
1099 1099 }
1100 1100
1101 1101 /*
1102 1102 * srpt_ioc_svc_unbind_all()
1103 1103 */
1104 1104 void
1105 1105 srpt_ioc_svc_unbind_all(srpt_target_port_t *tgt)
1106 1106 {
1107 1107 uint_t portnum;
1108 1108
1109 1109 if (tgt == NULL) {
1110 1110 SRPT_DPRINTF_L2("ioc_svc_unbind_all, NULL SCSI target port"
1111 1111 " specified");
1112 1112 return;
1113 1113 }
1114 1114 for (portnum = 1; portnum <= tgt->tp_nports; portnum++) {
1115 1115 srpt_ioc_svc_unbind(tgt, portnum);
1116 1116 }
1117 1117 }
1118 1118
1119 1119 /*
1120 1120 * srpt_ioc_get_locked()
1121 1121 *
1122 1122 * Requires srpt_ctxt->rw_lock be held outside of call.
1123 1123 */
1124 1124 srpt_ioc_t *
1125 1125 srpt_ioc_get_locked(ib_guid_t guid)
1126 1126 {
1127 1127 srpt_ioc_t *ioc;
1128 1128
1129 1129 ioc = list_head(&srpt_ctxt->sc_ioc_list);
1130 1130 while (ioc != NULL) {
1131 1131 if (ioc->ioc_guid == guid) {
1132 1132 break;
1133 1133 }
1134 1134 ioc = list_next(&srpt_ctxt->sc_ioc_list, ioc);
1135 1135 }
1136 1136 return (ioc);
1137 1137 }
1138 1138
1139 1139 /*
1140 1140 * srpt_ioc_get()
1141 1141 */
1142 1142 srpt_ioc_t *
1143 1143 srpt_ioc_get(ib_guid_t guid)
1144 1144 {
1145 1145 srpt_ioc_t *ioc;
1146 1146
1147 1147 rw_enter(&srpt_ctxt->sc_rwlock, RW_READER);
1148 1148 ioc = srpt_ioc_get_locked(guid);
1149 1149 rw_exit(&srpt_ctxt->sc_rwlock);
1150 1150 return (ioc);
1151 1151 }
1152 1152
1153 1153 /*
1154 1154 * srpt_ioc_post_recv_iu()
1155 1155 */
1156 1156 ibt_status_t
1157 1157 srpt_ioc_post_recv_iu(srpt_ioc_t *ioc, srpt_iu_t *iu)
1158 1158 {
1159 1159 ibt_status_t status;
1160 1160 ibt_recv_wr_t wr;
1161 1161 uint_t posted;
1162 1162
1163 1163 ASSERT(ioc != NULL);
1164 1164 ASSERT(iu != NULL);
1165 1165
1166 1166 wr.wr_id = (ibt_wrid_t)(uintptr_t)iu;
1167 1167 wr.wr_nds = 1;
1168 1168 wr.wr_sgl = &iu->iu_sge;
1169 1169 posted = 0;
1170 1170
1171 1171 status = ibt_post_srq(ioc->ioc_srq_hdl, &wr, 1, &posted);
1172 1172 if (status != IBT_SUCCESS) {
1173 1173 SRPT_DPRINTF_L2("ioc_post_recv_iu, post error (%d)",
1174 1174 status);
1175 1175 }
1176 1176 return (status);
1177 1177 }
1178 1178
1179 1179 /*
1180 1180 * srpt_ioc_repost_recv_iu()
1181 1181 */
1182 1182 void
1183 1183 srpt_ioc_repost_recv_iu(srpt_ioc_t *ioc, srpt_iu_t *iu)
1184 1184 {
1185 1185 srpt_channel_t *ch;
1186 1186 ibt_status_t status;
1187 1187
1188 1188 ASSERT(iu != NULL);
1189 1189 ASSERT(mutex_owned(&iu->iu_lock));
1190 1190
1191 1191 /*
1192 1192 * Some additional sanity checks while in debug state, all STMF
1193 1193 * related task activities should be complete prior to returning
1194 1194 * this IU to the available pool.
1195 1195 */
1196 1196 ASSERT(iu->iu_stmf_task == NULL);
1197 1197 ASSERT(iu->iu_sq_posted_cnt == 0);
1198 1198
1199 1199 ch = iu->iu_ch;
1200 1200 iu->iu_ch = NULL;
1201 1201 iu->iu_num_rdescs = 0;
1202 1202 iu->iu_rdescs = NULL;
1203 1203 iu->iu_tot_xfer_len = 0;
1204 1204 iu->iu_tag = 0;
1205 1205 iu->iu_flags = 0;
1206 1206 iu->iu_sq_posted_cnt = 0;
1207 1207
1208 1208 status = srpt_ioc_post_recv_iu(ioc, iu);
1209 1209
1210 1210 if (status != IBT_SUCCESS) {
1211 1211 /*
1212 1212 * Very bad, we should initiate a shutdown of the I/O
1213 1213 * Controller here, off-lining any targets associated
1214 1214 * with this I/O Controller (and therefore disconnecting
1215 1215 * any logins that remain).
1216 1216 *
1217 1217 * In practice this should never happen so we put
1218 1218 * the code near the bottom of the implementation list.
1219 1219 */
1220 1220 SRPT_DPRINTF_L0("ioc_repost_recv_iu, error RX IU (%d)",
1221 1221 status);
1222 1222 ASSERT(0);
1223 1223 } else if (ch != NULL) {
1224 1224 atomic_inc_32(&ch->ch_req_lim_delta);
1225 1225 }
1226 1226 }
↓ open down ↓ |
1226 lines elided |
↑ open up ↑ |
1227 1227
1228 1228 /*
1229 1229 * srpt_ioc_init_profile()
1230 1230 *
1231 1231 * SRP I/O Controller serialization lock must be held when this
1232 1232 * routine is invoked.
1233 1233 */
1234 1234 void
1235 1235 srpt_ioc_init_profile(srpt_ioc_t *ioc)
1236 1236 {
1237 - srpt_ioc_opcap_mask_t capmask = {0};
1237 + srpt_ioc_opcap_mask_t capmask = {{0}};
1238 1238
1239 1239 ASSERT(ioc != NULL);
1240 1240
1241 1241 ioc->ioc_profile.ioc_guid = h2b64(ioc->ioc_guid);
1242 1242 (void) memcpy(ioc->ioc_profile.ioc_id_string,
1243 1243 "Solaris SRP Target 0.9a", 23);
1244 1244
1245 1245 /*
1246 1246 * Note vendor ID and subsystem ID are 24 bit values. Low order
1247 1247 * 8 bits in vendor ID field is slot and is initialized to zero.
1248 1248 * Low order 8 bits of subsystem ID is a reserved field and
1249 1249 * initialized to zero.
1250 1250 */
1251 1251 ioc->ioc_profile.ioc_vendorid =
1252 1252 h2b32((uint32_t)(ioc->ioc_attr.hca_vendor_id << 8));
1253 1253 ioc->ioc_profile.ioc_deviceid =
1254 1254 h2b32((uint32_t)ioc->ioc_attr.hca_device_id);
1255 1255 ioc->ioc_profile.ioc_device_ver =
1256 1256 h2b16((uint16_t)ioc->ioc_attr.hca_version_id);
1257 1257 ioc->ioc_profile.ioc_subsys_vendorid =
1258 1258 h2b32((uint32_t)(ioc->ioc_attr.hca_vendor_id << 8));
1259 1259 ioc->ioc_profile.ioc_subsys_id = h2b32(0);
1260 1260 ioc->ioc_profile.ioc_io_class = h2b16(SRP_REV_16A_IO_CLASS);
1261 1261 ioc->ioc_profile.ioc_io_subclass = h2b16(SRP_IO_SUBCLASS);
1262 1262 ioc->ioc_profile.ioc_protocol = h2b16(SRP_PROTOCOL);
1263 1263 ioc->ioc_profile.ioc_protocol_ver = h2b16(SRP_PROTOCOL_VERSION);
1264 1264 ioc->ioc_profile.ioc_send_msg_qdepth = h2b16(srpt_send_msg_depth);
1265 1265 ioc->ioc_profile.ioc_rdma_read_qdepth =
1266 1266 ioc->ioc_attr.hca_max_rdma_out_chan;
1267 1267 ioc->ioc_profile.ioc_send_msg_sz = h2b32(srpt_iu_size);
1268 1268 ioc->ioc_profile.ioc_rdma_xfer_sz = h2b32(SRPT_DEFAULT_MAX_RDMA_SIZE);
1269 1269
1270 1270 capmask.bits.st = 1; /* Messages can be sent to IOC */
1271 1271 capmask.bits.sf = 1; /* Messages can be sent from IOC */
1272 1272 capmask.bits.rf = 1; /* RDMA Reads can be sent from IOC */
1273 1273 capmask.bits.wf = 1; /* RDMA Writes can be sent from IOC */
1274 1274 ioc->ioc_profile.ioc_ctrl_opcap_mask = capmask.mask;
1275 1275
1276 1276 /*
1277 1277 * We currently only have one target, but if we had a list we would
1278 1278 * go through that list and only count those that are ONLINE when
1279 1279 * setting the services count and entries.
1280 1280 */
1281 1281 if (ioc->ioc_tgt_port->tp_srp_enabled) {
1282 1282 ioc->ioc_profile.ioc_service_entries = 1;
1283 1283 ioc->ioc_svc.srv_id = h2b64(ioc->ioc_guid);
1284 1284 (void) snprintf((char *)ioc->ioc_svc.srv_name,
1285 1285 IB_DM_MAX_SVC_NAME_LEN, "SRP.T10:%016llx",
1286 1286 (u_longlong_t)ioc->ioc_guid);
1287 1287 } else {
1288 1288 ioc->ioc_profile.ioc_service_entries = 0;
1289 1289 ioc->ioc_svc.srv_id = 0;
1290 1290 }
1291 1291 }
1292 1292
1293 1293 /*
1294 1294 * srpt_ioc_ds_alloc_dbuf()
1295 1295 */
1296 1296 /* ARGSUSED */
1297 1297 stmf_data_buf_t *
1298 1298 srpt_ioc_ds_alloc_dbuf(struct scsi_task *task, uint32_t size,
1299 1299 uint32_t *pminsize, uint32_t flags)
1300 1300 {
1301 1301 srpt_iu_t *iu;
1302 1302 srpt_ioc_t *ioc;
1303 1303 srpt_ds_dbuf_t *dbuf;
1304 1304 stmf_data_buf_t *stmf_dbuf;
1305 1305 void *buf;
1306 1306 srpt_mr_t mr;
1307 1307
1308 1308 ASSERT(task != NULL);
1309 1309 iu = task->task_port_private;
1310 1310 ioc = iu->iu_ioc;
1311 1311
1312 1312 SRPT_DPRINTF_L4("ioc_ds_alloc_dbuf, invoked ioc(%p)"
1313 1313 " size(%d), flags(%x)",
1314 1314 (void *)ioc, size, flags);
1315 1315
1316 1316 buf = srpt_vmem_alloc(ioc->ioc_dbuf_pool, size);
1317 1317 if (buf == NULL) {
1318 1318 return (NULL);
1319 1319 }
1320 1320
1321 1321 if (srpt_vmem_mr(ioc->ioc_dbuf_pool, buf, size, &mr) != 0) {
1322 1322 goto stmf_alloc_err;
1323 1323 }
1324 1324
1325 1325 stmf_dbuf = stmf_alloc(STMF_STRUCT_DATA_BUF, sizeof (srpt_ds_dbuf_t),
1326 1326 0);
1327 1327 if (stmf_dbuf == NULL) {
1328 1328 SRPT_DPRINTF_L2("ioc_ds_alloc_dbuf, stmf_alloc failed");
1329 1329 goto stmf_alloc_err;
1330 1330 }
1331 1331
1332 1332 dbuf = stmf_dbuf->db_port_private;
1333 1333 dbuf->db_stmf_buf = stmf_dbuf;
1334 1334 dbuf->db_mr_hdl = mr.mr_hdl;
1335 1335 dbuf->db_ioc = ioc;
1336 1336 dbuf->db_sge.ds_va = mr.mr_va;
1337 1337 dbuf->db_sge.ds_key = mr.mr_lkey;
1338 1338 dbuf->db_sge.ds_len = size;
1339 1339
1340 1340 stmf_dbuf->db_buf_size = size;
1341 1341 stmf_dbuf->db_data_size = size;
1342 1342 stmf_dbuf->db_relative_offset = 0;
1343 1343 stmf_dbuf->db_flags = 0;
1344 1344 stmf_dbuf->db_xfer_status = 0;
1345 1345 stmf_dbuf->db_sglist_length = 1;
1346 1346 stmf_dbuf->db_sglist[0].seg_addr = buf;
1347 1347 stmf_dbuf->db_sglist[0].seg_length = size;
1348 1348
1349 1349 return (stmf_dbuf);
1350 1350
1351 1351 buf_mr_err:
1352 1352 stmf_free(stmf_dbuf);
1353 1353
1354 1354 stmf_alloc_err:
1355 1355 srpt_vmem_free(ioc->ioc_dbuf_pool, buf, size);
1356 1356
1357 1357 return (NULL);
1358 1358 }
1359 1359
1360 1360 void
1361 1361 srpt_ioc_ds_free_dbuf(struct stmf_dbuf_store *ds,
1362 1362 stmf_data_buf_t *dbuf)
1363 1363 {
1364 1364 srpt_ioc_t *ioc;
1365 1365
1366 1366 SRPT_DPRINTF_L4("ioc_ds_free_dbuf, invoked buf (%p)",
1367 1367 (void *)dbuf);
1368 1368 ioc = ds->ds_port_private;
1369 1369
1370 1370 srpt_vmem_free(ioc->ioc_dbuf_pool, dbuf->db_sglist[0].seg_addr,
1371 1371 dbuf->db_buf_size);
1372 1372 stmf_free(dbuf);
1373 1373 }
1374 1374
1375 1375 /* Memory arena routines */
1376 1376
1377 1377 static srpt_vmem_pool_t *
1378 1378 srpt_vmem_create(const char *name, srpt_ioc_t *ioc, ib_memlen_t chunksize,
1379 1379 uint64_t maxsize, ibt_mr_flags_t flags)
1380 1380 {
1381 1381 srpt_mr_t *chunk;
1382 1382 srpt_vmem_pool_t *result;
1383 1383
1384 1384 ASSERT(chunksize <= maxsize);
1385 1385
1386 1386 result = kmem_zalloc(sizeof (srpt_vmem_pool_t), KM_SLEEP);
1387 1387
1388 1388 result->svp_ioc = ioc;
1389 1389 result->svp_chunksize = chunksize;
1390 1390 result->svp_max_size = maxsize;
1391 1391 result->svp_flags = flags;
1392 1392
1393 1393 rw_init(&result->svp_lock, NULL, RW_DRIVER, NULL);
1394 1394 avl_create(&result->svp_mr_list, srpt_vmem_mr_compare,
1395 1395 sizeof (srpt_mr_t), offsetof(srpt_mr_t, mr_avl));
1396 1396
1397 1397 chunk = srpt_vmem_chunk_alloc(result, chunksize);
1398 1398
1399 1399 avl_add(&result->svp_mr_list, chunk);
1400 1400 result->svp_total_size = chunksize;
1401 1401
1402 1402 result->svp_vmem = vmem_create(name,
1403 1403 (void*)(uintptr_t)chunk->mr_va,
1404 1404 (size_t)chunk->mr_len, SRPT_MR_QUANTSIZE,
1405 1405 NULL, NULL, NULL, 0, VM_SLEEP);
1406 1406
1407 1407 return (result);
1408 1408 }
1409 1409
1410 1410 static void
1411 1411 srpt_vmem_destroy(srpt_vmem_pool_t *vm_pool)
1412 1412 {
1413 1413 srpt_mr_t *chunk;
1414 1414 srpt_mr_t *next;
1415 1415
1416 1416 rw_enter(&vm_pool->svp_lock, RW_WRITER);
1417 1417 vmem_destroy(vm_pool->svp_vmem);
1418 1418
1419 1419 chunk = avl_first(&vm_pool->svp_mr_list);
1420 1420
1421 1421 while (chunk != NULL) {
1422 1422 next = AVL_NEXT(&vm_pool->svp_mr_list, chunk);
1423 1423 avl_remove(&vm_pool->svp_mr_list, chunk);
1424 1424 srpt_vmem_chunk_free(vm_pool, chunk);
1425 1425 chunk = next;
1426 1426 }
1427 1427
1428 1428 avl_destroy(&vm_pool->svp_mr_list);
1429 1429
1430 1430 rw_exit(&vm_pool->svp_lock);
1431 1431 rw_destroy(&vm_pool->svp_lock);
1432 1432
1433 1433 kmem_free(vm_pool, sizeof (srpt_vmem_pool_t));
1434 1434 }
1435 1435
1436 1436 static void *
1437 1437 srpt_vmem_alloc(srpt_vmem_pool_t *vm_pool, size_t size)
1438 1438 {
1439 1439 void *result;
1440 1440 srpt_mr_t *next;
1441 1441 ib_memlen_t chunklen;
1442 1442
1443 1443 ASSERT(vm_pool != NULL);
1444 1444
1445 1445 result = vmem_alloc(vm_pool->svp_vmem, size,
1446 1446 VM_NOSLEEP | VM_FIRSTFIT);
1447 1447
1448 1448 if (result != NULL) {
1449 1449 /* memory successfully allocated */
1450 1450 return (result);
1451 1451 }
1452 1452
1453 1453 /* need more vmem */
1454 1454 rw_enter(&vm_pool->svp_lock, RW_WRITER);
1455 1455 chunklen = vm_pool->svp_chunksize;
1456 1456
1457 1457 if (vm_pool->svp_total_size >= vm_pool->svp_max_size) {
1458 1458 /* no more room to alloc */
1459 1459 rw_exit(&vm_pool->svp_lock);
1460 1460 return (NULL);
1461 1461 }
1462 1462
1463 1463 if ((vm_pool->svp_total_size + chunklen) > vm_pool->svp_max_size) {
1464 1464 chunklen = vm_pool->svp_max_size - vm_pool->svp_total_size;
1465 1465 }
1466 1466
1467 1467 next = srpt_vmem_chunk_alloc(vm_pool, chunklen);
1468 1468 if (next != NULL) {
1469 1469 /*
1470 1470 * Note that the size of the chunk we got
1471 1471 * may not be the size we requested. Use the
1472 1472 * length returned in the chunk itself.
1473 1473 */
1474 1474 if (vmem_add(vm_pool->svp_vmem, (void*)(uintptr_t)next->mr_va,
1475 1475 next->mr_len, VM_NOSLEEP) == NULL) {
1476 1476 srpt_vmem_chunk_free(vm_pool, next);
1477 1477 SRPT_DPRINTF_L2("vmem_add failed");
1478 1478 } else {
1479 1479 vm_pool->svp_total_size += next->mr_len;
1480 1480 avl_add(&vm_pool->svp_mr_list, next);
1481 1481 }
1482 1482 }
1483 1483
1484 1484 rw_exit(&vm_pool->svp_lock);
1485 1485
1486 1486 result = vmem_alloc(vm_pool->svp_vmem, size, VM_NOSLEEP | VM_FIRSTFIT);
1487 1487
1488 1488 return (result);
1489 1489 }
1490 1490
1491 1491 static void
1492 1492 srpt_vmem_free(srpt_vmem_pool_t *vm_pool, void *vaddr, size_t size)
1493 1493 {
1494 1494 vmem_free(vm_pool->svp_vmem, vaddr, size);
1495 1495 }
1496 1496
1497 1497 static int
1498 1498 srpt_vmem_mr(srpt_vmem_pool_t *vm_pool, void *vaddr, size_t size,
1499 1499 srpt_mr_t *mr)
1500 1500 {
1501 1501 avl_index_t where;
1502 1502 ib_vaddr_t mrva = (ib_vaddr_t)(uintptr_t)vaddr;
1503 1503 srpt_mr_t chunk;
1504 1504 srpt_mr_t *nearest;
1505 1505 ib_vaddr_t chunk_end;
1506 1506 int status = DDI_FAILURE;
1507 1507
1508 1508 rw_enter(&vm_pool->svp_lock, RW_READER);
1509 1509
1510 1510 chunk.mr_va = mrva;
1511 1511 nearest = avl_find(&vm_pool->svp_mr_list, &chunk, &where);
1512 1512
1513 1513 if (nearest == NULL) {
1514 1514 nearest = avl_nearest(&vm_pool->svp_mr_list, where,
1515 1515 AVL_BEFORE);
1516 1516 }
1517 1517
1518 1518 if (nearest != NULL) {
1519 1519 /* Verify this chunk contains the specified address range */
1520 1520 ASSERT(nearest->mr_va <= mrva);
1521 1521
1522 1522 chunk_end = nearest->mr_va + nearest->mr_len;
1523 1523 if (chunk_end >= mrva + size) {
1524 1524 mr->mr_hdl = nearest->mr_hdl;
1525 1525 mr->mr_va = mrva;
1526 1526 mr->mr_len = size;
1527 1527 mr->mr_lkey = nearest->mr_lkey;
1528 1528 mr->mr_rkey = nearest->mr_rkey;
1529 1529 status = DDI_SUCCESS;
1530 1530 }
1531 1531 }
1532 1532
1533 1533 rw_exit(&vm_pool->svp_lock);
1534 1534 return (status);
1535 1535 }
1536 1536
1537 1537 static srpt_mr_t *
1538 1538 srpt_vmem_chunk_alloc(srpt_vmem_pool_t *vm_pool, ib_memlen_t chunksize)
1539 1539 {
1540 1540 void *chunk = NULL;
1541 1541 srpt_mr_t *result = NULL;
1542 1542
1543 1543 while ((chunk == NULL) && (chunksize >= SRPT_MIN_CHUNKSIZE)) {
1544 1544 chunk = kmem_alloc(chunksize, KM_NOSLEEP);
1545 1545 if (chunk == NULL) {
1546 1546 SRPT_DPRINTF_L2("srpt_vmem_chunk_alloc: "
1547 1547 "failed to alloc chunk of %d, trying %d",
1548 1548 (int)chunksize, (int)chunksize/2);
1549 1549 chunksize /= 2;
1550 1550 }
1551 1551 }
1552 1552
1553 1553 if (chunk != NULL) {
1554 1554 result = srpt_reg_mem(vm_pool, (ib_vaddr_t)(uintptr_t)chunk,
1555 1555 chunksize);
1556 1556 if (result == NULL) {
1557 1557 SRPT_DPRINTF_L2("srpt_vmem_chunk_alloc: "
1558 1558 "chunk registration failed");
1559 1559 kmem_free(chunk, chunksize);
1560 1560 }
1561 1561 }
1562 1562
1563 1563 return (result);
1564 1564 }
1565 1565
1566 1566 static void
1567 1567 srpt_vmem_chunk_free(srpt_vmem_pool_t *vm_pool, srpt_mr_t *mr)
1568 1568 {
1569 1569 void *chunk = (void *)(uintptr_t)mr->mr_va;
1570 1570 ib_memlen_t chunksize = mr->mr_len;
1571 1571
1572 1572 srpt_dereg_mem(vm_pool->svp_ioc, mr);
1573 1573 kmem_free(chunk, chunksize);
1574 1574 }
1575 1575
1576 1576 static srpt_mr_t *
1577 1577 srpt_reg_mem(srpt_vmem_pool_t *vm_pool, ib_vaddr_t vaddr, ib_memlen_t len)
1578 1578 {
1579 1579 srpt_mr_t *result = NULL;
1580 1580 ibt_mr_attr_t mr_attr;
1581 1581 ibt_mr_desc_t mr_desc;
1582 1582 ibt_status_t status;
1583 1583 srpt_ioc_t *ioc = vm_pool->svp_ioc;
1584 1584
1585 1585 result = kmem_zalloc(sizeof (srpt_mr_t), KM_NOSLEEP);
1586 1586 if (result == NULL) {
1587 1587 SRPT_DPRINTF_L2("srpt_reg_mem: failed to allocate");
1588 1588 return (NULL);
1589 1589 }
1590 1590
1591 1591 bzero(&mr_attr, sizeof (ibt_mr_attr_t));
1592 1592 bzero(&mr_desc, sizeof (ibt_mr_desc_t));
1593 1593
1594 1594 mr_attr.mr_vaddr = vaddr;
1595 1595 mr_attr.mr_len = len;
1596 1596 mr_attr.mr_as = NULL;
1597 1597 mr_attr.mr_flags = vm_pool->svp_flags;
1598 1598
1599 1599 status = ibt_register_mr(ioc->ioc_ibt_hdl, ioc->ioc_pd_hdl,
1600 1600 &mr_attr, &result->mr_hdl, &mr_desc);
1601 1601 if (status != IBT_SUCCESS) {
1602 1602 SRPT_DPRINTF_L2("srpt_reg_mem: ibt_register_mr "
1603 1603 "failed %d", status);
1604 1604 kmem_free(result, sizeof (srpt_mr_t));
1605 1605 return (NULL);
1606 1606 }
1607 1607
1608 1608 result->mr_va = mr_attr.mr_vaddr;
1609 1609 result->mr_len = mr_attr.mr_len;
1610 1610 result->mr_lkey = mr_desc.md_lkey;
1611 1611 result->mr_rkey = mr_desc.md_rkey;
1612 1612
1613 1613 return (result);
1614 1614 }
1615 1615
1616 1616 static void
1617 1617 srpt_dereg_mem(srpt_ioc_t *ioc, srpt_mr_t *mr)
1618 1618 {
1619 1619 ibt_status_t status;
1620 1620
1621 1621 status = ibt_deregister_mr(ioc->ioc_ibt_hdl, mr->mr_hdl);
1622 1622 if (status != IBT_SUCCESS) {
1623 1623 SRPT_DPRINTF_L1("srpt_dereg_mem, error deregistering MR (%d)",
1624 1624 status);
1625 1625 }
1626 1626 kmem_free(mr, sizeof (srpt_mr_t));
1627 1627 }
1628 1628
1629 1629 static int
1630 1630 srpt_vmem_mr_compare(const void *a, const void *b)
1631 1631 {
1632 1632 srpt_mr_t *mr1 = (srpt_mr_t *)a;
1633 1633 srpt_mr_t *mr2 = (srpt_mr_t *)b;
1634 1634
1635 1635 /* sort and match by virtual address */
1636 1636 if (mr1->mr_va < mr2->mr_va) {
1637 1637 return (-1);
1638 1638 } else if (mr1->mr_va > mr2->mr_va) {
1639 1639 return (1);
1640 1640 }
1641 1641
1642 1642 return (0);
1643 1643 }
↓ open down ↓ |
396 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX