Print this page
7127 remove -Wno-missing-braces from Makefile.uts
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/io/idm/idm.c
+++ new/usr/src/uts/common/io/idm/idm.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21 /*
22 22 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
23 23 */
24 24
25 25 #include <sys/cpuvar.h>
26 26 #include <sys/conf.h>
27 27 #include <sys/file.h>
28 28 #include <sys/ddi.h>
29 29 #include <sys/sunddi.h>
30 30 #include <sys/modctl.h>
31 31
32 32 #include <sys/socket.h>
33 33 #include <sys/strsubr.h>
34 34 #include <sys/sysmacros.h>
35 35
36 36 #include <sys/socketvar.h>
37 37 #include <netinet/in.h>
38 38
39 39 #include <sys/idm/idm.h>
40 40 #include <sys/idm/idm_so.h>
41 41
42 42 #define IDM_NAME_VERSION "iSCSI Data Mover"
↓ open down ↓ |
42 lines elided |
↑ open up ↑ |
43 43
44 44 extern struct mod_ops mod_miscops;
45 45 extern struct mod_ops mod_miscops;
46 46
47 47 static struct modlmisc modlmisc = {
48 48 &mod_miscops, /* Type of module */
49 49 IDM_NAME_VERSION
50 50 };
51 51
52 52 static struct modlinkage modlinkage = {
53 - MODREV_1, (void *)&modlmisc, NULL
53 + MODREV_1, { (void *)&modlmisc, NULL }
54 54 };
55 55
56 56 extern void idm_wd_thread(void *arg);
57 57
58 58 static int _idm_init(void);
59 59 static int _idm_fini(void);
60 60 static void idm_buf_bind_in_locked(idm_task_t *idt, idm_buf_t *buf);
61 61 static void idm_buf_bind_out_locked(idm_task_t *idt, idm_buf_t *buf);
62 62 static void idm_buf_unbind_in_locked(idm_task_t *idt, idm_buf_t *buf);
63 63 static void idm_buf_unbind_out_locked(idm_task_t *idt, idm_buf_t *buf);
64 64 static void idm_task_abort_one(idm_conn_t *ic, idm_task_t *idt,
65 65 idm_abort_type_t abort_type);
66 66 static void idm_task_aborted(idm_task_t *idt, idm_status_t status);
67 67 static idm_pdu_t *idm_pdu_alloc_common(uint_t hdrlen, uint_t datalen,
68 68 int sleepflag);
69 69
70 70 boolean_t idm_conn_logging = 0;
71 71 boolean_t idm_svc_logging = 0;
72 72 #ifdef DEBUG
73 73 boolean_t idm_pattern_checking = 1;
74 74 #else
75 75 boolean_t idm_pattern_checking = 0;
76 76 #endif
77 77
78 78 /*
79 79 * Potential tuneable for the maximum number of tasks. Default to
80 80 * IDM_TASKIDS_MAX
81 81 */
82 82
83 83 uint32_t idm_max_taskids = IDM_TASKIDS_MAX;
84 84
85 85 /*
86 86 * Global list of transport handles
87 87 * These are listed in preferential order, so we can simply take the
88 88 * first "it_conn_is_capable" hit. Note also that the order maps to
89 89 * the order of the idm_transport_type_t list.
90 90 */
91 91 idm_transport_t idm_transport_list[] = {
92 92
93 93 /* iSER on InfiniBand transport handle */
94 94 {IDM_TRANSPORT_TYPE_ISER, /* type */
95 95 "/devices/ib/iser@0:iser", /* device path */
96 96 NULL, /* LDI handle */
97 97 NULL, /* transport ops */
98 98 NULL}, /* transport caps */
99 99
100 100 /* IDM native sockets transport handle */
101 101 {IDM_TRANSPORT_TYPE_SOCKETS, /* type */
102 102 NULL, /* device path */
103 103 NULL, /* LDI handle */
104 104 NULL, /* transport ops */
105 105 NULL} /* transport caps */
106 106
107 107 };
108 108
109 109 int
110 110 _init(void)
111 111 {
112 112 int rc;
113 113
114 114 if ((rc = _idm_init()) != 0) {
115 115 return (rc);
116 116 }
117 117
118 118 return (mod_install(&modlinkage));
119 119 }
120 120
121 121 int
122 122 _fini(void)
123 123 {
124 124 int rc;
125 125
126 126 if ((rc = _idm_fini()) != 0) {
127 127 return (rc);
128 128 }
129 129
130 130 if ((rc = mod_remove(&modlinkage)) != 0) {
131 131 return (rc);
132 132 }
133 133
134 134 return (rc);
135 135 }
136 136
137 137 int
138 138 _info(struct modinfo *modinfop)
139 139 {
140 140 return (mod_info(&modlinkage, modinfop));
141 141 }
142 142
143 143 /*
144 144 * idm_transport_register()
145 145 *
146 146 * Provides a mechanism for an IDM transport driver to register its
147 147 * transport ops and caps with the IDM kernel module. Invoked during
148 148 * a transport driver's attach routine.
149 149 */
150 150 idm_status_t
151 151 idm_transport_register(idm_transport_attr_t *attr)
152 152 {
153 153 ASSERT(attr->it_ops != NULL);
154 154 ASSERT(attr->it_caps != NULL);
155 155
156 156 switch (attr->type) {
157 157 /* All known non-native transports here; for now, iSER */
158 158 case IDM_TRANSPORT_TYPE_ISER:
159 159 idm_transport_list[attr->type].it_ops = attr->it_ops;
160 160 idm_transport_list[attr->type].it_caps = attr->it_caps;
161 161 return (IDM_STATUS_SUCCESS);
162 162
163 163 default:
164 164 cmn_err(CE_NOTE, "idm: unknown transport type (0x%x) in "
165 165 "idm_transport_register", attr->type);
166 166 return (IDM_STATUS_SUCCESS);
167 167 }
168 168 }
169 169
170 170 /*
171 171 * idm_ini_conn_create
172 172 *
173 173 * This function is invoked by the iSCSI layer to create a connection context.
174 174 * This does not actually establish the socket connection.
175 175 *
176 176 * cr - Connection request parameters
177 177 * new_con - Output parameter that contains the new request if successful
178 178 *
179 179 */
180 180 idm_status_t
181 181 idm_ini_conn_create(idm_conn_req_t *cr, idm_conn_t **new_con)
182 182 {
183 183 idm_transport_t *it;
184 184 idm_conn_t *ic;
185 185 int rc;
186 186
187 187 it = idm_transport_lookup(cr);
188 188
189 189 retry:
190 190 ic = idm_conn_create_common(CONN_TYPE_INI, it->it_type,
191 191 &cr->icr_conn_ops);
192 192
193 193 bcopy(&cr->cr_ini_dst_addr, &ic->ic_ini_dst_addr,
194 194 sizeof (cr->cr_ini_dst_addr));
195 195
196 196 /* create the transport-specific connection components */
197 197 rc = it->it_ops->it_ini_conn_create(cr, ic);
198 198 if (rc != IDM_STATUS_SUCCESS) {
199 199 /* cleanup the failed connection */
200 200 idm_conn_destroy_common(ic);
201 201
202 202 /*
203 203 * It is possible for an IB client to connect to
204 204 * an ethernet-only client via an IB-eth gateway.
205 205 * Therefore, if we are attempting to use iSER and
206 206 * fail, retry with sockets before ultimately
207 207 * failing the connection.
208 208 */
209 209 if (it->it_type == IDM_TRANSPORT_TYPE_ISER) {
210 210 it = &idm_transport_list[IDM_TRANSPORT_TYPE_SOCKETS];
211 211 goto retry;
212 212 }
213 213
214 214 return (IDM_STATUS_FAIL);
215 215 }
216 216
217 217 *new_con = ic;
218 218
219 219 mutex_enter(&idm.idm_global_mutex);
220 220 list_insert_tail(&idm.idm_ini_conn_list, ic);
221 221 mutex_exit(&idm.idm_global_mutex);
222 222
223 223 return (IDM_STATUS_SUCCESS);
224 224 }
225 225
226 226 /*
227 227 * idm_ini_conn_destroy
228 228 *
229 229 * Releases any resources associated with the connection. This is the
230 230 * complement to idm_ini_conn_create.
231 231 * ic - idm_conn_t structure representing the relevant connection
232 232 *
233 233 */
234 234 void
235 235 idm_ini_conn_destroy_task(void *ic_void)
236 236 {
237 237 idm_conn_t *ic = ic_void;
238 238
239 239 ic->ic_transport_ops->it_ini_conn_destroy(ic);
240 240 idm_conn_destroy_common(ic);
241 241 }
242 242
243 243 void
244 244 idm_ini_conn_destroy(idm_conn_t *ic)
245 245 {
246 246 /*
247 247 * It's reasonable for the initiator to call idm_ini_conn_destroy
248 248 * from within the context of the CN_CONNECT_DESTROY notification.
249 249 * That's a problem since we want to destroy the taskq for the
250 250 * state machine associated with the connection. Remove the
251 251 * connection from the list right away then handle the remaining
252 252 * work via the idm_global_taskq.
253 253 */
254 254 mutex_enter(&idm.idm_global_mutex);
255 255 list_remove(&idm.idm_ini_conn_list, ic);
256 256 mutex_exit(&idm.idm_global_mutex);
257 257
258 258 if (taskq_dispatch(idm.idm_global_taskq,
259 259 &idm_ini_conn_destroy_task, ic, TQ_SLEEP) == NULL) {
260 260 cmn_err(CE_WARN,
261 261 "idm_ini_conn_destroy: Couldn't dispatch task");
262 262 }
263 263 }
264 264
265 265 /*
266 266 * idm_ini_conn_connect
267 267 *
268 268 * Establish connection to the remote system identified in idm_conn_t.
269 269 * The connection parameters including the remote IP address were established
270 270 * in the call to idm_ini_conn_create. The IDM state machine will
271 271 * perform client notifications as necessary to prompt the initiator through
272 272 * the login process. IDM also keeps a timer running so that if the login
273 273 * process doesn't complete in a timely manner it will fail.
274 274 *
275 275 * ic - idm_conn_t structure representing the relevant connection
276 276 *
277 277 * Returns success if the connection was established, otherwise some kind
278 278 * of meaningful error code.
279 279 *
280 280 * Upon return the login has either failed or is loggin in (ffp)
281 281 */
282 282 idm_status_t
283 283 idm_ini_conn_connect(idm_conn_t *ic)
284 284 {
285 285 idm_status_t rc;
286 286
287 287 rc = idm_conn_sm_init(ic);
288 288 if (rc != IDM_STATUS_SUCCESS) {
289 289 return (ic->ic_conn_sm_status);
290 290 }
291 291
292 292 /* Hold connection until we return */
293 293 idm_conn_hold(ic);
294 294
295 295 /* Kick state machine */
296 296 idm_conn_event(ic, CE_CONNECT_REQ, NULL);
297 297
298 298 /* Wait for login flag */
299 299 mutex_enter(&ic->ic_state_mutex);
300 300 while (!(ic->ic_state_flags & CF_LOGIN_READY) &&
301 301 !(ic->ic_state_flags & CF_ERROR)) {
302 302 cv_wait(&ic->ic_state_cv, &ic->ic_state_mutex);
303 303 }
304 304
305 305 /*
306 306 * The CN_READY_TO_LOGIN and/or the CN_CONNECT_FAIL call to
307 307 * idm_notify_client has already been generated by the idm conn
308 308 * state machine. If connection fails any time after this
309 309 * check, we will detect it in iscsi_login.
310 310 */
311 311 if (ic->ic_state_flags & CF_ERROR) {
312 312 rc = ic->ic_conn_sm_status;
313 313 }
314 314 mutex_exit(&ic->ic_state_mutex);
315 315 idm_conn_rele(ic);
316 316
317 317 return (rc);
318 318 }
319 319
320 320 /*
321 321 * idm_ini_conn_disconnect
322 322 *
323 323 * Forces a connection (previously established using idm_ini_conn_connect)
324 324 * to perform a controlled shutdown, cleaning up any outstanding requests.
325 325 *
326 326 * ic - idm_conn_t structure representing the relevant connection
327 327 *
328 328 * This is asynchronous and will return before the connection is properly
329 329 * shutdown
330 330 */
331 331 /* ARGSUSED */
332 332 void
333 333 idm_ini_conn_disconnect(idm_conn_t *ic)
334 334 {
335 335 idm_conn_event(ic, CE_TRANSPORT_FAIL, NULL);
336 336 }
337 337
338 338 /*
339 339 * idm_ini_conn_disconnect_wait
340 340 *
341 341 * Forces a connection (previously established using idm_ini_conn_connect)
342 342 * to perform a controlled shutdown. Blocks until the connection is
343 343 * disconnected.
344 344 *
345 345 * ic - idm_conn_t structure representing the relevant connection
346 346 */
347 347 /* ARGSUSED */
348 348 void
349 349 idm_ini_conn_disconnect_sync(idm_conn_t *ic)
350 350 {
351 351 mutex_enter(&ic->ic_state_mutex);
352 352 if ((ic->ic_state != CS_S9_INIT_ERROR) &&
353 353 (ic->ic_state != CS_S11_COMPLETE)) {
354 354 idm_conn_event_locked(ic, CE_TRANSPORT_FAIL, NULL, CT_NONE);
355 355 while ((ic->ic_state != CS_S9_INIT_ERROR) &&
356 356 (ic->ic_state != CS_S11_COMPLETE))
357 357 cv_wait(&ic->ic_state_cv, &ic->ic_state_mutex);
358 358 }
359 359 mutex_exit(&ic->ic_state_mutex);
360 360 }
361 361
362 362 /*
363 363 * idm_tgt_svc_create
364 364 *
365 365 * The target calls this service to obtain a service context for each available
366 366 * transport, starting a service of each type related to the IP address and port
367 367 * passed. The idm_svc_req_t contains the service parameters.
368 368 */
369 369 idm_status_t
370 370 idm_tgt_svc_create(idm_svc_req_t *sr, idm_svc_t **new_svc)
371 371 {
372 372 idm_transport_type_t type;
373 373 idm_transport_t *it;
374 374 idm_svc_t *is;
375 375 int rc;
376 376
377 377 *new_svc = NULL;
378 378 is = kmem_zalloc(sizeof (idm_svc_t), KM_SLEEP);
379 379
380 380 /* Initialize transport-agnostic components of the service handle */
381 381 is->is_svc_req = *sr;
382 382 mutex_init(&is->is_mutex, NULL, MUTEX_DEFAULT, NULL);
383 383 cv_init(&is->is_cv, NULL, CV_DEFAULT, NULL);
384 384 mutex_init(&is->is_count_mutex, NULL, MUTEX_DEFAULT, NULL);
385 385 cv_init(&is->is_count_cv, NULL, CV_DEFAULT, NULL);
386 386 idm_refcnt_init(&is->is_refcnt, is);
387 387
388 388 /*
389 389 * Make sure all available transports are setup. We call this now
390 390 * instead of at initialization time in case IB has become available
391 391 * since we started (hotplug, etc).
392 392 */
393 393 idm_transport_setup(sr->sr_li, B_FALSE);
394 394
395 395 /*
396 396 * Loop through the transports, configuring the transport-specific
397 397 * components of each one.
398 398 */
399 399 for (type = 0; type < IDM_TRANSPORT_NUM_TYPES; type++) {
400 400
401 401 it = &idm_transport_list[type];
402 402 /*
403 403 * If it_ops is NULL then the transport is unconfigured
404 404 * and we shouldn't try to start the service.
405 405 */
406 406 if (it->it_ops == NULL) {
407 407 continue;
408 408 }
409 409
410 410 rc = it->it_ops->it_tgt_svc_create(sr, is);
411 411 if (rc != IDM_STATUS_SUCCESS) {
412 412 /* Teardown any configured services */
413 413 while (type--) {
414 414 it = &idm_transport_list[type];
415 415 if (it->it_ops == NULL) {
416 416 continue;
417 417 }
418 418 it->it_ops->it_tgt_svc_destroy(is);
419 419 }
420 420 /* Free the svc context and return */
421 421 kmem_free(is, sizeof (idm_svc_t));
422 422 return (rc);
423 423 }
424 424 }
425 425
426 426 *new_svc = is;
427 427
428 428 mutex_enter(&idm.idm_global_mutex);
429 429 list_insert_tail(&idm.idm_tgt_svc_list, is);
430 430 mutex_exit(&idm.idm_global_mutex);
431 431
432 432 return (IDM_STATUS_SUCCESS);
433 433 }
434 434
435 435 /*
436 436 * idm_tgt_svc_destroy
437 437 *
438 438 * is - idm_svc_t returned by the call to idm_tgt_svc_create
439 439 *
440 440 * Cleanup any resources associated with the idm_svc_t.
441 441 */
442 442 void
443 443 idm_tgt_svc_destroy(idm_svc_t *is)
444 444 {
445 445 idm_transport_type_t type;
446 446 idm_transport_t *it;
447 447
448 448 mutex_enter(&idm.idm_global_mutex);
449 449 /* remove this service from the global list */
450 450 list_remove(&idm.idm_tgt_svc_list, is);
451 451 /* wakeup any waiters for service change */
452 452 cv_broadcast(&idm.idm_tgt_svc_cv);
453 453 mutex_exit(&idm.idm_global_mutex);
454 454
455 455 /* teardown each transport-specific service */
456 456 for (type = 0; type < IDM_TRANSPORT_NUM_TYPES; type++) {
457 457 it = &idm_transport_list[type];
458 458 if (it->it_ops == NULL) {
459 459 continue;
460 460 }
461 461
462 462 it->it_ops->it_tgt_svc_destroy(is);
463 463 }
464 464
465 465 /* tear down the svc resources */
466 466 idm_refcnt_destroy(&is->is_refcnt);
467 467 cv_destroy(&is->is_count_cv);
468 468 mutex_destroy(&is->is_count_mutex);
469 469 cv_destroy(&is->is_cv);
470 470 mutex_destroy(&is->is_mutex);
471 471
472 472 /* free the svc handle */
473 473 kmem_free(is, sizeof (idm_svc_t));
474 474 }
475 475
476 476 void
477 477 idm_tgt_svc_hold(idm_svc_t *is)
478 478 {
479 479 idm_refcnt_hold(&is->is_refcnt);
480 480 }
481 481
482 482 void
483 483 idm_tgt_svc_rele_and_destroy(idm_svc_t *is)
484 484 {
485 485 idm_refcnt_rele_and_destroy(&is->is_refcnt,
486 486 (idm_refcnt_cb_t *)&idm_tgt_svc_destroy);
487 487 }
488 488
489 489 /*
490 490 * idm_tgt_svc_online
491 491 *
492 492 * is - idm_svc_t returned by the call to idm_tgt_svc_create
493 493 *
494 494 * Online each transport service, as we want this target to be accessible
495 495 * via any configured transport.
496 496 *
497 497 * When the initiator establishes a new connection to the target, IDM will
498 498 * call the "new connect" callback defined in the idm_svc_req_t structure
499 499 * and it will pass an idm_conn_t structure representing that new connection.
500 500 */
501 501 idm_status_t
502 502 idm_tgt_svc_online(idm_svc_t *is)
503 503 {
504 504
505 505 idm_transport_type_t type, last_type;
506 506 idm_transport_t *it;
507 507 int rc = IDM_STATUS_SUCCESS;
508 508
509 509 mutex_enter(&is->is_mutex);
510 510 if (is->is_online == 0) {
511 511 /* Walk through each of the transports and online them */
512 512 for (type = 0; type < IDM_TRANSPORT_NUM_TYPES; type++) {
513 513 it = &idm_transport_list[type];
514 514 if (it->it_ops == NULL) {
515 515 /* transport is not registered */
516 516 continue;
517 517 }
518 518
519 519 mutex_exit(&is->is_mutex);
520 520 rc = it->it_ops->it_tgt_svc_online(is);
521 521 mutex_enter(&is->is_mutex);
522 522 if (rc != IDM_STATUS_SUCCESS) {
523 523 last_type = type;
524 524 break;
525 525 }
526 526 }
527 527 if (rc != IDM_STATUS_SUCCESS) {
528 528 /*
529 529 * The last transport failed to online.
530 530 * Offline any transport onlined above and
531 531 * do not online the target.
532 532 */
533 533 for (type = 0; type < last_type; type++) {
534 534 it = &idm_transport_list[type];
535 535 if (it->it_ops == NULL) {
536 536 /* transport is not registered */
537 537 continue;
538 538 }
539 539
540 540 mutex_exit(&is->is_mutex);
541 541 it->it_ops->it_tgt_svc_offline(is);
542 542 mutex_enter(&is->is_mutex);
543 543 }
544 544 } else {
545 545 /* Target service now online */
546 546 is->is_online = 1;
547 547 }
548 548 } else {
549 549 /* Target service already online, just bump the count */
550 550 is->is_online++;
551 551 }
552 552 mutex_exit(&is->is_mutex);
553 553
554 554 return (rc);
555 555 }
556 556
557 557 /*
558 558 * idm_tgt_svc_offline
559 559 *
560 560 * is - idm_svc_t returned by the call to idm_tgt_svc_create
561 561 *
562 562 * Shutdown any online target services.
563 563 */
564 564 void
565 565 idm_tgt_svc_offline(idm_svc_t *is)
566 566 {
567 567 idm_transport_type_t type;
568 568 idm_transport_t *it;
569 569
570 570 mutex_enter(&is->is_mutex);
571 571 is->is_online--;
572 572 if (is->is_online == 0) {
573 573 /* Walk through each of the transports and offline them */
574 574 for (type = 0; type < IDM_TRANSPORT_NUM_TYPES; type++) {
575 575 it = &idm_transport_list[type];
576 576 if (it->it_ops == NULL) {
577 577 /* transport is not registered */
578 578 continue;
579 579 }
580 580
581 581 mutex_exit(&is->is_mutex);
582 582 it->it_ops->it_tgt_svc_offline(is);
583 583 mutex_enter(&is->is_mutex);
584 584 }
585 585 }
586 586 mutex_exit(&is->is_mutex);
587 587 }
588 588
589 589 /*
590 590 * idm_tgt_svc_lookup
591 591 *
592 592 * Lookup a service instance listening on the specified port
593 593 */
594 594
595 595 idm_svc_t *
596 596 idm_tgt_svc_lookup(uint16_t port)
597 597 {
598 598 idm_svc_t *result;
599 599
600 600 retry:
601 601 mutex_enter(&idm.idm_global_mutex);
602 602 for (result = list_head(&idm.idm_tgt_svc_list);
603 603 result != NULL;
604 604 result = list_next(&idm.idm_tgt_svc_list, result)) {
605 605 if (result->is_svc_req.sr_port == port) {
606 606 if (result->is_online == 0) {
607 607 /*
608 608 * A service exists on this port, but it
609 609 * is going away, wait for it to cleanup.
610 610 */
611 611 cv_wait(&idm.idm_tgt_svc_cv,
612 612 &idm.idm_global_mutex);
613 613 mutex_exit(&idm.idm_global_mutex);
614 614 goto retry;
615 615 }
616 616 idm_tgt_svc_hold(result);
617 617 mutex_exit(&idm.idm_global_mutex);
618 618 return (result);
619 619 }
620 620 }
621 621 mutex_exit(&idm.idm_global_mutex);
622 622
623 623 return (NULL);
624 624 }
625 625
626 626 /*
627 627 * idm_negotiate_key_values()
628 628 * Give IDM level a chance to negotiate any login parameters it should own.
629 629 * -- leave unhandled parameters alone on request_nvl
630 630 * -- move all handled parameters to response_nvl with an appropriate response
631 631 * -- also add an entry to negotiated_nvl for any accepted parameters
632 632 */
633 633 kv_status_t
634 634 idm_negotiate_key_values(idm_conn_t *ic, nvlist_t *request_nvl,
635 635 nvlist_t *response_nvl, nvlist_t *negotiated_nvl)
636 636 {
637 637 ASSERT(ic->ic_transport_ops != NULL);
638 638 return (ic->ic_transport_ops->it_negotiate_key_values(ic,
639 639 request_nvl, response_nvl, negotiated_nvl));
640 640 }
641 641
642 642 /*
643 643 * idm_notice_key_values()
644 644 * Activate at the IDM level any parameters that have been negotiated.
645 645 * Passes the set of key value pairs to the transport for activation.
646 646 * This will be invoked as the connection is entering full-feature mode.
647 647 */
648 648 void
649 649 idm_notice_key_values(idm_conn_t *ic, nvlist_t *negotiated_nvl)
650 650 {
651 651 ASSERT(ic->ic_transport_ops != NULL);
652 652 ic->ic_transport_ops->it_notice_key_values(ic, negotiated_nvl);
653 653 }
654 654
655 655 /*
656 656 * idm_declare_key_values()
657 657 * Activate an operational set of declarative parameters from the config_nvl,
658 658 * and return the selected values in the outgoing_nvl.
659 659 */
660 660 kv_status_t
661 661 idm_declare_key_values(idm_conn_t *ic, nvlist_t *config_nvl,
662 662 nvlist_t *outgoing_nvl)
663 663 {
664 664 ASSERT(ic->ic_transport_ops != NULL);
665 665 return (ic->ic_transport_ops->it_declare_key_values(ic, config_nvl,
666 666 outgoing_nvl));
667 667 }
668 668
669 669 /*
670 670 * idm_buf_tx_to_ini
671 671 *
672 672 * This is IDM's implementation of the 'Put_Data' operational primitive.
673 673 *
674 674 * This function is invoked by a target iSCSI layer to request its local
675 675 * Datamover layer to transmit the Data-In PDU to the peer iSCSI layer
676 676 * on the remote iSCSI node. The I/O buffer represented by 'idb' is
677 677 * transferred to the initiator associated with task 'idt'. The connection
678 678 * info, contents of the Data-In PDU header, the DataDescriptorIn, BHS,
679 679 * and the callback (idb->idb_buf_cb) at transfer completion are
680 680 * provided as input.
681 681 *
682 682 * This data transfer takes place transparently to the remote iSCSI layer,
683 683 * i.e. without its participation.
684 684 *
685 685 * Using sockets, IDM implements the data transfer by segmenting the data
686 686 * buffer into appropriately sized iSCSI PDUs and transmitting them to the
687 687 * initiator. iSER performs the transfer using RDMA write.
688 688 *
689 689 */
690 690 idm_status_t
691 691 idm_buf_tx_to_ini(idm_task_t *idt, idm_buf_t *idb,
692 692 uint32_t offset, uint32_t xfer_len,
693 693 idm_buf_cb_t idb_buf_cb, void *cb_arg)
694 694 {
695 695 idm_status_t rc;
696 696
697 697 idb->idb_bufoffset = offset;
698 698 idb->idb_xfer_len = xfer_len;
699 699 idb->idb_buf_cb = idb_buf_cb;
700 700 idb->idb_cb_arg = cb_arg;
701 701 gethrestime(&idb->idb_xfer_start);
702 702
703 703 /*
704 704 * Buffer should not contain the pattern. If the pattern is
705 705 * present then we've been asked to transmit initialized data
706 706 */
707 707 IDM_BUFPAT_CHECK(idb, xfer_len, BP_CHECK_ASSERT);
708 708
709 709 mutex_enter(&idt->idt_mutex);
710 710 switch (idt->idt_state) {
711 711 case TASK_ACTIVE:
712 712 idt->idt_tx_to_ini_start++;
713 713 idm_task_hold(idt);
714 714 idm_buf_bind_in_locked(idt, idb);
715 715 idb->idb_in_transport = B_TRUE;
716 716 rc = (*idt->idt_ic->ic_transport_ops->it_buf_tx_to_ini)
717 717 (idt, idb);
718 718 return (rc);
719 719
720 720 case TASK_SUSPENDING:
721 721 case TASK_SUSPENDED:
722 722 /*
723 723 * Bind buffer but don't start a transfer since the task
724 724 * is suspended
725 725 */
726 726 idm_buf_bind_in_locked(idt, idb);
727 727 mutex_exit(&idt->idt_mutex);
728 728 return (IDM_STATUS_SUCCESS);
729 729
730 730 case TASK_ABORTING:
731 731 case TASK_ABORTED:
732 732 /*
733 733 * Once the task is aborted, any buffers added to the
734 734 * idt_inbufv will never get cleaned up, so just return
735 735 * SUCCESS. The buffer should get cleaned up by the
736 736 * client or framework once task_aborted has completed.
737 737 */
738 738 mutex_exit(&idt->idt_mutex);
739 739 return (IDM_STATUS_SUCCESS);
740 740
741 741 default:
742 742 ASSERT(0);
743 743 break;
744 744 }
745 745 mutex_exit(&idt->idt_mutex);
746 746
747 747 return (IDM_STATUS_FAIL);
748 748 }
749 749
750 750 /*
751 751 * idm_buf_rx_from_ini
752 752 *
753 753 * This is IDM's implementation of the 'Get_Data' operational primitive.
754 754 *
755 755 * This function is invoked by a target iSCSI layer to request its local
756 756 * Datamover layer to retrieve certain data identified by the R2T PDU from the
757 757 * peer iSCSI layer on the remote node. The retrieved Data-Out PDU will be
758 758 * mapped to the respective buffer by the task tags (ITT & TTT).
759 759 * The connection information, contents of an R2T PDU, DataDescriptor, BHS, and
760 760 * the callback (idb->idb_buf_cb) notification for data transfer completion are
761 761 * are provided as input.
762 762 *
763 763 * When an iSCSI node sends an R2T PDU to its local Datamover layer, the local
764 764 * Datamover layer, the local and remote Datamover layers transparently bring
765 765 * about the data transfer requested by the R2T PDU, without the participation
766 766 * of the iSCSI layers.
767 767 *
768 768 * Using sockets, IDM transmits an R2T PDU for each buffer and the rx_data_out()
769 769 * assembles the Data-Out PDUs into the buffer. iSER uses RDMA read.
770 770 *
771 771 */
772 772 idm_status_t
773 773 idm_buf_rx_from_ini(idm_task_t *idt, idm_buf_t *idb,
774 774 uint32_t offset, uint32_t xfer_len,
775 775 idm_buf_cb_t idb_buf_cb, void *cb_arg)
776 776 {
777 777 idm_status_t rc;
778 778
779 779 idb->idb_bufoffset = offset;
780 780 idb->idb_xfer_len = xfer_len;
781 781 idb->idb_buf_cb = idb_buf_cb;
782 782 idb->idb_cb_arg = cb_arg;
783 783 gethrestime(&idb->idb_xfer_start);
784 784
785 785 /*
786 786 * "In" buf list is for "Data In" PDU's, "Out" buf list is for
787 787 * "Data Out" PDU's
788 788 */
789 789 mutex_enter(&idt->idt_mutex);
790 790 switch (idt->idt_state) {
791 791 case TASK_ACTIVE:
792 792 idt->idt_rx_from_ini_start++;
793 793 idm_task_hold(idt);
794 794 idm_buf_bind_out_locked(idt, idb);
795 795 idb->idb_in_transport = B_TRUE;
796 796 rc = (*idt->idt_ic->ic_transport_ops->it_buf_rx_from_ini)
797 797 (idt, idb);
798 798 return (rc);
799 799 case TASK_SUSPENDING:
800 800 case TASK_SUSPENDED:
801 801 case TASK_ABORTING:
802 802 case TASK_ABORTED:
803 803 /*
804 804 * Bind buffer but don't start a transfer since the task
805 805 * is suspended
806 806 */
807 807 idm_buf_bind_out_locked(idt, idb);
808 808 mutex_exit(&idt->idt_mutex);
809 809 return (IDM_STATUS_SUCCESS);
810 810 default:
811 811 ASSERT(0);
812 812 break;
813 813 }
814 814 mutex_exit(&idt->idt_mutex);
815 815
816 816 return (IDM_STATUS_FAIL);
817 817 }
818 818
819 819 /*
820 820 * idm_buf_tx_to_ini_done
821 821 *
822 822 * The transport calls this after it has completed a transfer requested by
823 823 * a call to transport_buf_tx_to_ini
824 824 *
825 825 * Caller holds idt->idt_mutex, idt->idt_mutex is released before returning.
826 826 * idt may be freed after the call to idb->idb_buf_cb.
827 827 */
828 828 void
829 829 idm_buf_tx_to_ini_done(idm_task_t *idt, idm_buf_t *idb, idm_status_t status)
830 830 {
831 831 ASSERT(mutex_owned(&idt->idt_mutex));
832 832 idb->idb_in_transport = B_FALSE;
833 833 idb->idb_tx_thread = B_FALSE;
834 834 idt->idt_tx_to_ini_done++;
835 835 gethrestime(&idb->idb_xfer_done);
836 836
837 837 /*
838 838 * idm_refcnt_rele may cause TASK_SUSPENDING --> TASK_SUSPENDED or
839 839 * TASK_ABORTING --> TASK_ABORTED transistion if the refcount goes
840 840 * to 0.
841 841 */
842 842 idm_task_rele(idt);
843 843 idb->idb_status = status;
844 844
845 845 switch (idt->idt_state) {
846 846 case TASK_ACTIVE:
847 847 idt->idt_ic->ic_timestamp = ddi_get_lbolt();
848 848 idm_buf_unbind_in_locked(idt, idb);
849 849 mutex_exit(&idt->idt_mutex);
850 850 (*idb->idb_buf_cb)(idb, status);
851 851 return;
852 852 case TASK_SUSPENDING:
853 853 case TASK_SUSPENDED:
854 854 case TASK_ABORTING:
855 855 case TASK_ABORTED:
856 856 /*
857 857 * To keep things simple we will ignore the case where the
858 858 * transfer was successful and leave all buffers bound to the
859 859 * task. This allows us to also ignore the case where we've
860 860 * been asked to abort a task but the last transfer of the
861 861 * task has completed. IDM has no idea whether this was, in
862 862 * fact, the last transfer of the task so it would be difficult
863 863 * to handle this case. Everything should get sorted out again
864 864 * after task reassignment is complete.
865 865 *
866 866 * In the case of TASK_ABORTING we could conceivably call the
867 867 * buffer callback here but the timing of when the client's
868 868 * client_task_aborted callback is invoked vs. when the client's
869 869 * buffer callback gets invoked gets sticky. We don't want
870 870 * the client to here from us again after the call to
871 871 * client_task_aborted() but we don't want to give it a bunch
872 872 * of failed buffer transfers until we've called
873 873 * client_task_aborted(). Instead we'll just leave all the
874 874 * buffers bound and allow the client to cleanup.
875 875 */
876 876 break;
877 877 default:
878 878 ASSERT(0);
879 879 }
880 880 mutex_exit(&idt->idt_mutex);
881 881 }
882 882
883 883 /*
884 884 * idm_buf_rx_from_ini_done
885 885 *
886 886 * The transport calls this after it has completed a transfer requested by
887 887 * a call totransport_buf_tx_to_ini
888 888 *
889 889 * Caller holds idt->idt_mutex, idt->idt_mutex is released before returning.
890 890 * idt may be freed after the call to idb->idb_buf_cb.
891 891 */
892 892 void
893 893 idm_buf_rx_from_ini_done(idm_task_t *idt, idm_buf_t *idb, idm_status_t status)
894 894 {
895 895 ASSERT(mutex_owned(&idt->idt_mutex));
896 896 idb->idb_in_transport = B_FALSE;
897 897 idt->idt_rx_from_ini_done++;
898 898 gethrestime(&idb->idb_xfer_done);
899 899
900 900 /*
901 901 * idm_refcnt_rele may cause TASK_SUSPENDING --> TASK_SUSPENDED or
902 902 * TASK_ABORTING --> TASK_ABORTED transistion if the refcount goes
903 903 * to 0.
904 904 */
905 905 idm_task_rele(idt);
906 906 idb->idb_status = status;
907 907
908 908 if (status == IDM_STATUS_SUCCESS) {
909 909 /*
910 910 * Buffer should not contain the pattern. If it does then
911 911 * we did not get the data from the remote host.
912 912 */
913 913 IDM_BUFPAT_CHECK(idb, idb->idb_xfer_len, BP_CHECK_ASSERT);
914 914 }
915 915
916 916 switch (idt->idt_state) {
917 917 case TASK_ACTIVE:
918 918 idt->idt_ic->ic_timestamp = ddi_get_lbolt();
919 919 idm_buf_unbind_out_locked(idt, idb);
920 920 mutex_exit(&idt->idt_mutex);
921 921 (*idb->idb_buf_cb)(idb, status);
922 922 return;
923 923 case TASK_SUSPENDING:
924 924 case TASK_SUSPENDED:
925 925 case TASK_ABORTING:
926 926 case TASK_ABORTED:
927 927 /*
928 928 * To keep things simple we will ignore the case where the
929 929 * transfer was successful and leave all buffers bound to the
930 930 * task. This allows us to also ignore the case where we've
931 931 * been asked to abort a task but the last transfer of the
932 932 * task has completed. IDM has no idea whether this was, in
933 933 * fact, the last transfer of the task so it would be difficult
934 934 * to handle this case. Everything should get sorted out again
935 935 * after task reassignment is complete.
936 936 *
937 937 * In the case of TASK_ABORTING we could conceivably call the
938 938 * buffer callback here but the timing of when the client's
939 939 * client_task_aborted callback is invoked vs. when the client's
940 940 * buffer callback gets invoked gets sticky. We don't want
941 941 * the client to here from us again after the call to
942 942 * client_task_aborted() but we don't want to give it a bunch
943 943 * of failed buffer transfers until we've called
944 944 * client_task_aborted(). Instead we'll just leave all the
945 945 * buffers bound and allow the client to cleanup.
946 946 */
947 947 break;
948 948 default:
949 949 ASSERT(0);
950 950 }
951 951 mutex_exit(&idt->idt_mutex);
952 952 }
953 953
954 954 /*
955 955 * idm_buf_alloc
956 956 *
957 957 * Allocates a buffer handle and registers it for use with the transport
958 958 * layer. If a buffer is not passed on bufptr, the buffer will be allocated
959 959 * as well as the handle.
960 960 *
961 961 * ic - connection on which the buffer will be transferred
962 962 * bufptr - allocate memory for buffer if NULL, else assign to buffer
963 963 * buflen - length of buffer
964 964 *
965 965 * Returns idm_buf_t handle if successful, otherwise NULL
966 966 */
967 967 idm_buf_t *
968 968 idm_buf_alloc(idm_conn_t *ic, void *bufptr, uint64_t buflen)
969 969 {
970 970 idm_buf_t *buf = NULL;
971 971 int rc;
972 972
973 973 ASSERT(ic != NULL);
974 974 ASSERT(idm.idm_buf_cache != NULL);
975 975 ASSERT(buflen > 0);
976 976
977 977 /* Don't allocate new buffers if we are not in FFP */
978 978 mutex_enter(&ic->ic_state_mutex);
979 979 if (!ic->ic_ffp) {
980 980 mutex_exit(&ic->ic_state_mutex);
981 981 return (NULL);
982 982 }
983 983
984 984
985 985 idm_conn_hold(ic);
986 986 mutex_exit(&ic->ic_state_mutex);
987 987
988 988 buf = kmem_cache_alloc(idm.idm_buf_cache, KM_NOSLEEP);
989 989 if (buf == NULL) {
990 990 idm_conn_rele(ic);
991 991 return (NULL);
992 992 }
993 993
994 994 buf->idb_ic = ic;
995 995 buf->idb_buflen = buflen;
996 996 buf->idb_exp_offset = 0;
997 997 buf->idb_bufoffset = 0;
998 998 buf->idb_xfer_len = 0;
999 999 buf->idb_magic = IDM_BUF_MAGIC;
1000 1000 buf->idb_in_transport = B_FALSE;
1001 1001 buf->idb_bufbcopy = B_FALSE;
1002 1002
1003 1003 /*
1004 1004 * If bufptr is NULL, we have an implicit request to allocate
1005 1005 * memory for this IDM buffer handle and register it for use
1006 1006 * with the transport. To simplify this, and to give more freedom
1007 1007 * to the transport layer for it's own buffer management, both of
1008 1008 * these actions will take place in the transport layer.
1009 1009 * If bufptr is set, then the caller has allocated memory (or more
1010 1010 * likely it's been passed from an upper layer), and we need only
1011 1011 * register the buffer for use with the transport layer.
1012 1012 */
1013 1013 if (bufptr == NULL) {
1014 1014 /*
1015 1015 * Allocate a buffer from the transport layer (which
1016 1016 * will also register the buffer for use).
1017 1017 */
1018 1018 rc = ic->ic_transport_ops->it_buf_alloc(buf, buflen);
1019 1019 if (rc != 0) {
1020 1020 idm_conn_rele(ic);
1021 1021 kmem_cache_free(idm.idm_buf_cache, buf);
1022 1022 return (NULL);
1023 1023 }
1024 1024 /* Set the bufalloc'd flag */
1025 1025 buf->idb_bufalloc = B_TRUE;
1026 1026 } else {
1027 1027 /*
1028 1028 * For large transfers, Set the passed bufptr into
1029 1029 * the buf handle, and register the handle with the
1030 1030 * transport layer. As memory registration with the
1031 1031 * transport layer is a time/cpu intensive operation,
1032 1032 * for small transfers (up to a pre-defined bcopy
1033 1033 * threshold), use pre-registered memory buffers
1034 1034 * and bcopy data at the appropriate time.
1035 1035 */
1036 1036 buf->idb_buf = bufptr;
1037 1037
1038 1038 rc = ic->ic_transport_ops->it_buf_setup(buf);
1039 1039 if (rc != 0) {
1040 1040 idm_conn_rele(ic);
1041 1041 kmem_cache_free(idm.idm_buf_cache, buf);
1042 1042 return (NULL);
1043 1043 }
1044 1044 /*
1045 1045 * The transport layer is now expected to set the idb_bufalloc
1046 1046 * correctly to indicate if resources have been allocated.
1047 1047 */
1048 1048 }
1049 1049
1050 1050 IDM_BUFPAT_SET(buf);
1051 1051
1052 1052 return (buf);
1053 1053 }
1054 1054
1055 1055 /*
1056 1056 * idm_buf_free
1057 1057 *
1058 1058 * Release a buffer handle along with the associated buffer that was allocated
1059 1059 * or assigned with idm_buf_alloc
1060 1060 */
1061 1061 void
1062 1062 idm_buf_free(idm_buf_t *buf)
1063 1063 {
1064 1064 idm_conn_t *ic = buf->idb_ic;
1065 1065
1066 1066
1067 1067 buf->idb_task_binding = NULL;
1068 1068
1069 1069 if (buf->idb_bufalloc) {
1070 1070 ic->ic_transport_ops->it_buf_free(buf);
1071 1071 } else {
1072 1072 ic->ic_transport_ops->it_buf_teardown(buf);
1073 1073 }
1074 1074 kmem_cache_free(idm.idm_buf_cache, buf);
1075 1075 idm_conn_rele(ic);
1076 1076 }
1077 1077
1078 1078 /*
1079 1079 * idm_buf_bind_in
1080 1080 *
1081 1081 * This function associates a buffer with a task. This is only for use by the
1082 1082 * iSCSI initiator that will have only one buffer per transfer direction
1083 1083 *
1084 1084 */
1085 1085 void
1086 1086 idm_buf_bind_in(idm_task_t *idt, idm_buf_t *buf)
1087 1087 {
1088 1088 mutex_enter(&idt->idt_mutex);
1089 1089 idm_buf_bind_in_locked(idt, buf);
1090 1090 mutex_exit(&idt->idt_mutex);
1091 1091 }
1092 1092
1093 1093 static void
1094 1094 idm_buf_bind_in_locked(idm_task_t *idt, idm_buf_t *buf)
1095 1095 {
1096 1096 buf->idb_task_binding = idt;
1097 1097 buf->idb_ic = idt->idt_ic;
1098 1098 idm_listbuf_insert(&idt->idt_inbufv, buf);
1099 1099 }
1100 1100
1101 1101 void
1102 1102 idm_buf_bind_out(idm_task_t *idt, idm_buf_t *buf)
1103 1103 {
1104 1104 /*
1105 1105 * For small transfers, the iSER transport delegates the IDM
1106 1106 * layer to bcopy the SCSI Write data for faster IOPS.
1107 1107 */
1108 1108 if (buf->idb_bufbcopy == B_TRUE) {
1109 1109
1110 1110 bcopy(buf->idb_bufptr, buf->idb_buf, buf->idb_buflen);
1111 1111 }
1112 1112 mutex_enter(&idt->idt_mutex);
1113 1113 idm_buf_bind_out_locked(idt, buf);
1114 1114 mutex_exit(&idt->idt_mutex);
1115 1115 }
1116 1116
1117 1117 static void
1118 1118 idm_buf_bind_out_locked(idm_task_t *idt, idm_buf_t *buf)
1119 1119 {
1120 1120 buf->idb_task_binding = idt;
1121 1121 buf->idb_ic = idt->idt_ic;
1122 1122 idm_listbuf_insert(&idt->idt_outbufv, buf);
1123 1123 }
1124 1124
1125 1125 void
1126 1126 idm_buf_unbind_in(idm_task_t *idt, idm_buf_t *buf)
1127 1127 {
1128 1128 /*
1129 1129 * For small transfers, the iSER transport delegates the IDM
1130 1130 * layer to bcopy the SCSI Read data into the read buufer
1131 1131 * for faster IOPS.
1132 1132 */
1133 1133 if (buf->idb_bufbcopy == B_TRUE) {
1134 1134 bcopy(buf->idb_buf, buf->idb_bufptr, buf->idb_buflen);
1135 1135 }
1136 1136 mutex_enter(&idt->idt_mutex);
1137 1137 idm_buf_unbind_in_locked(idt, buf);
1138 1138 mutex_exit(&idt->idt_mutex);
1139 1139 }
1140 1140
1141 1141 static void
1142 1142 idm_buf_unbind_in_locked(idm_task_t *idt, idm_buf_t *buf)
1143 1143 {
1144 1144 list_remove(&idt->idt_inbufv, buf);
1145 1145 }
1146 1146
1147 1147 void
1148 1148 idm_buf_unbind_out(idm_task_t *idt, idm_buf_t *buf)
1149 1149 {
1150 1150 mutex_enter(&idt->idt_mutex);
1151 1151 idm_buf_unbind_out_locked(idt, buf);
1152 1152 mutex_exit(&idt->idt_mutex);
1153 1153 }
1154 1154
1155 1155 static void
1156 1156 idm_buf_unbind_out_locked(idm_task_t *idt, idm_buf_t *buf)
1157 1157 {
1158 1158 list_remove(&idt->idt_outbufv, buf);
1159 1159 }
1160 1160
1161 1161 /*
1162 1162 * idm_buf_find() will lookup the idm_buf_t based on the relative offset in the
1163 1163 * iSCSI PDU
1164 1164 */
1165 1165 idm_buf_t *
1166 1166 idm_buf_find(void *lbuf, size_t data_offset)
1167 1167 {
1168 1168 idm_buf_t *idb;
1169 1169 list_t *lst = (list_t *)lbuf;
1170 1170
1171 1171 /* iterate through the list to find the buffer */
1172 1172 for (idb = list_head(lst); idb != NULL; idb = list_next(lst, idb)) {
1173 1173
1174 1174 ASSERT((idb->idb_ic->ic_conn_type == CONN_TYPE_TGT) ||
1175 1175 (idb->idb_bufoffset == 0));
1176 1176
1177 1177 if ((data_offset >= idb->idb_bufoffset) &&
1178 1178 (data_offset < (idb->idb_bufoffset + idb->idb_buflen))) {
1179 1179
1180 1180 return (idb);
1181 1181 }
1182 1182 }
1183 1183
1184 1184 return (NULL);
1185 1185 }
1186 1186
1187 1187 void
1188 1188 idm_bufpat_set(idm_buf_t *idb)
1189 1189 {
1190 1190 idm_bufpat_t *bufpat;
1191 1191 int len, i;
1192 1192
1193 1193 len = idb->idb_buflen;
1194 1194 len = (len / sizeof (idm_bufpat_t)) * sizeof (idm_bufpat_t);
1195 1195
1196 1196 bufpat = idb->idb_buf;
1197 1197 for (i = 0; i < len; i += sizeof (idm_bufpat_t)) {
1198 1198 bufpat->bufpat_idb = idb;
1199 1199 bufpat->bufpat_bufmagic = IDM_BUF_MAGIC;
1200 1200 bufpat->bufpat_offset = i;
1201 1201 bufpat++;
1202 1202 }
1203 1203 }
1204 1204
1205 1205 boolean_t
1206 1206 idm_bufpat_check(idm_buf_t *idb, int check_len, idm_bufpat_check_type_t type)
1207 1207 {
1208 1208 idm_bufpat_t *bufpat;
1209 1209 int len, i;
1210 1210
1211 1211 len = (type == BP_CHECK_QUICK) ? sizeof (idm_bufpat_t) : check_len;
1212 1212 len = (len / sizeof (idm_bufpat_t)) * sizeof (idm_bufpat_t);
1213 1213 ASSERT(len <= idb->idb_buflen);
1214 1214 bufpat = idb->idb_buf;
1215 1215
1216 1216 /*
1217 1217 * Don't check the pattern in buffers that came from outside IDM
1218 1218 * (these will be buffers from the initiator that we opted not
1219 1219 * to double-buffer)
1220 1220 */
1221 1221 if (!idb->idb_bufalloc)
1222 1222 return (B_FALSE);
1223 1223
1224 1224 /*
1225 1225 * Return true if we find the pattern anywhere in the buffer
1226 1226 */
1227 1227 for (i = 0; i < len; i += sizeof (idm_bufpat_t)) {
1228 1228 if (BUFPAT_MATCH(bufpat, idb)) {
1229 1229 IDM_CONN_LOG(CE_WARN, "idm_bufpat_check found: "
1230 1230 "idb %p bufpat %p "
1231 1231 "bufpat_idb=%p bufmagic=%08x offset=%08x",
1232 1232 (void *)idb, (void *)bufpat, bufpat->bufpat_idb,
1233 1233 bufpat->bufpat_bufmagic, bufpat->bufpat_offset);
1234 1234 DTRACE_PROBE2(bufpat__pattern__found,
1235 1235 idm_buf_t *, idb, idm_bufpat_t *, bufpat);
1236 1236 if (type == BP_CHECK_ASSERT) {
1237 1237 ASSERT(0);
1238 1238 }
1239 1239 return (B_TRUE);
1240 1240 }
1241 1241 bufpat++;
1242 1242 }
1243 1243
1244 1244 return (B_FALSE);
1245 1245 }
1246 1246
1247 1247 /*
1248 1248 * idm_task_alloc
1249 1249 *
1250 1250 * This function will allocate a idm_task_t structure. A task tag is also
1251 1251 * generated and saved in idt_tt. The task is not active.
1252 1252 */
1253 1253 idm_task_t *
1254 1254 idm_task_alloc(idm_conn_t *ic)
1255 1255 {
1256 1256 idm_task_t *idt;
1257 1257
1258 1258 ASSERT(ic != NULL);
1259 1259
1260 1260 /* Don't allocate new tasks if we are not in FFP */
1261 1261 if (!ic->ic_ffp) {
1262 1262 return (NULL);
1263 1263 }
1264 1264 idt = kmem_cache_alloc(idm.idm_task_cache, KM_NOSLEEP);
1265 1265 if (idt == NULL) {
1266 1266 return (NULL);
1267 1267 }
1268 1268
1269 1269 ASSERT(list_is_empty(&idt->idt_inbufv));
1270 1270 ASSERT(list_is_empty(&idt->idt_outbufv));
1271 1271
1272 1272 mutex_enter(&ic->ic_state_mutex);
1273 1273 if (!ic->ic_ffp) {
1274 1274 mutex_exit(&ic->ic_state_mutex);
1275 1275 kmem_cache_free(idm.idm_task_cache, idt);
1276 1276 return (NULL);
1277 1277 }
1278 1278 idm_conn_hold(ic);
1279 1279 mutex_exit(&ic->ic_state_mutex);
1280 1280
1281 1281 idt->idt_state = TASK_IDLE;
1282 1282 idt->idt_ic = ic;
1283 1283 idt->idt_private = NULL;
1284 1284 idt->idt_exp_datasn = 0;
1285 1285 idt->idt_exp_rttsn = 0;
1286 1286 idt->idt_flags = 0;
1287 1287 return (idt);
1288 1288 }
1289 1289
1290 1290 /*
1291 1291 * idm_task_start
1292 1292 *
1293 1293 * Mark the task active and initialize some stats. The caller
1294 1294 * sets up the idm_task_t structure with a prior call to idm_task_alloc().
1295 1295 * The task service does not function as a task/work engine, it is the
1296 1296 * responsibility of the initiator to start the data transfer and free the
1297 1297 * resources.
1298 1298 */
1299 1299 void
1300 1300 idm_task_start(idm_task_t *idt, uintptr_t handle)
1301 1301 {
1302 1302 ASSERT(idt != NULL);
1303 1303
1304 1304 /* mark the task as ACTIVE */
1305 1305 idt->idt_state = TASK_ACTIVE;
1306 1306 idt->idt_client_handle = handle;
1307 1307 idt->idt_tx_to_ini_start = idt->idt_tx_to_ini_done =
1308 1308 idt->idt_rx_from_ini_start = idt->idt_rx_from_ini_done =
1309 1309 idt->idt_tx_bytes = idt->idt_rx_bytes = 0;
1310 1310 }
1311 1311
1312 1312 /*
1313 1313 * idm_task_done
1314 1314 *
1315 1315 * This function sets the state to indicate that the task is no longer active.
1316 1316 */
1317 1317 void
1318 1318 idm_task_done(idm_task_t *idt)
1319 1319 {
1320 1320 ASSERT(idt != NULL);
1321 1321
1322 1322 mutex_enter(&idt->idt_mutex);
1323 1323 idt->idt_state = TASK_IDLE;
1324 1324 mutex_exit(&idt->idt_mutex);
1325 1325
1326 1326 /*
1327 1327 * Although unlikely it is possible for a reference to come in after
1328 1328 * the client has decided the task is over but before we've marked
1329 1329 * the task idle. One specific unavoidable scenario is the case where
1330 1330 * received PDU with the matching ITT/TTT results in a successful
1331 1331 * lookup of this task. We are at the mercy of the remote node in
1332 1332 * that case so we need to handle it. Now that the task state
1333 1333 * has changed no more references will occur so a simple call to
1334 1334 * idm_refcnt_wait_ref should deal with the situation.
1335 1335 */
1336 1336 idm_refcnt_wait_ref(&idt->idt_refcnt);
1337 1337 idm_refcnt_reset(&idt->idt_refcnt);
1338 1338 }
1339 1339
1340 1340 /*
1341 1341 * idm_task_free
1342 1342 *
1343 1343 * This function will free the Task Tag and the memory allocated for the task
1344 1344 * idm_task_done should be called prior to this call
1345 1345 */
1346 1346 void
1347 1347 idm_task_free(idm_task_t *idt)
1348 1348 {
1349 1349 idm_conn_t *ic;
1350 1350
1351 1351 ASSERT(idt != NULL);
1352 1352 ASSERT(idt->idt_refcnt.ir_refcnt == 0);
1353 1353 ASSERT(idt->idt_state == TASK_IDLE);
1354 1354
1355 1355 ic = idt->idt_ic;
1356 1356
1357 1357 /*
1358 1358 * It's possible for items to still be in the idt_inbufv list if
1359 1359 * they were added after idm_free_task_rsrc was called. We rely on
1360 1360 * STMF to free all buffers associated with the task however STMF
1361 1361 * doesn't know that we have this reference to the buffers.
1362 1362 * Use list_create so that we don't end up with stale references
1363 1363 * to these buffers.
1364 1364 */
1365 1365 list_create(&idt->idt_inbufv, sizeof (idm_buf_t),
1366 1366 offsetof(idm_buf_t, idb_buflink));
1367 1367 list_create(&idt->idt_outbufv, sizeof (idm_buf_t),
1368 1368 offsetof(idm_buf_t, idb_buflink));
1369 1369
1370 1370 kmem_cache_free(idm.idm_task_cache, idt);
1371 1371
1372 1372 idm_conn_rele(ic);
1373 1373 }
1374 1374
1375 1375 /*
1376 1376 * idm_task_find_common
1377 1377 * common code for idm_task_find() and idm_task_find_and_complete()
1378 1378 */
1379 1379 /*ARGSUSED*/
1380 1380 static idm_task_t *
1381 1381 idm_task_find_common(idm_conn_t *ic, uint32_t itt, uint32_t ttt,
1382 1382 boolean_t complete)
1383 1383 {
1384 1384 uint32_t tt, client_handle;
1385 1385 idm_task_t *idt;
1386 1386
1387 1387 /*
1388 1388 * Must match both itt and ttt. The table is indexed by itt
1389 1389 * for initiator connections and ttt for target connections.
1390 1390 */
1391 1391 if (IDM_CONN_ISTGT(ic)) {
1392 1392 tt = ttt;
1393 1393 client_handle = itt;
1394 1394 } else {
1395 1395 tt = itt;
1396 1396 client_handle = ttt;
1397 1397 }
1398 1398
1399 1399 rw_enter(&idm.idm_taskid_table_lock, RW_READER);
1400 1400 if (tt >= idm.idm_taskid_max) {
1401 1401 rw_exit(&idm.idm_taskid_table_lock);
1402 1402 return (NULL);
1403 1403 }
1404 1404
1405 1405 idt = idm.idm_taskid_table[tt];
1406 1406
1407 1407 if (idt != NULL) {
1408 1408 mutex_enter(&idt->idt_mutex);
1409 1409 if ((idt->idt_state != TASK_ACTIVE) ||
1410 1410 (idt->idt_ic != ic) ||
1411 1411 (IDM_CONN_ISTGT(ic) &&
1412 1412 (idt->idt_client_handle != client_handle))) {
1413 1413 /*
1414 1414 * Task doesn't match or task is aborting and
1415 1415 * we don't want any more references.
1416 1416 */
1417 1417 if ((idt->idt_ic != ic) &&
1418 1418 (idt->idt_state == TASK_ACTIVE) &&
1419 1419 (IDM_CONN_ISINI(ic) || idt->idt_client_handle ==
1420 1420 client_handle)) {
1421 1421 IDM_CONN_LOG(CE_WARN,
1422 1422 "idm_task_find: wrong connection %p != %p",
1423 1423 (void *)ic, (void *)idt->idt_ic);
1424 1424 }
1425 1425 mutex_exit(&idt->idt_mutex);
1426 1426 rw_exit(&idm.idm_taskid_table_lock);
1427 1427 return (NULL);
1428 1428 }
1429 1429 idm_task_hold(idt);
1430 1430 /*
1431 1431 * Set the task state to TASK_COMPLETE so it can no longer
1432 1432 * be found or aborted.
1433 1433 */
1434 1434 if (B_TRUE == complete)
1435 1435 idt->idt_state = TASK_COMPLETE;
1436 1436 mutex_exit(&idt->idt_mutex);
1437 1437 }
1438 1438 rw_exit(&idm.idm_taskid_table_lock);
1439 1439
1440 1440 return (idt);
1441 1441 }
1442 1442
1443 1443 /*
1444 1444 * This function looks up a task by task tag.
1445 1445 */
1446 1446 idm_task_t *
1447 1447 idm_task_find(idm_conn_t *ic, uint32_t itt, uint32_t ttt)
1448 1448 {
1449 1449 return (idm_task_find_common(ic, itt, ttt, B_FALSE));
1450 1450 }
1451 1451
1452 1452 /*
1453 1453 * This function looks up a task by task tag. If found, the task state
1454 1454 * is atomically set to TASK_COMPLETE so it can longer be found or aborted.
1455 1455 */
1456 1456 idm_task_t *
1457 1457 idm_task_find_and_complete(idm_conn_t *ic, uint32_t itt, uint32_t ttt)
1458 1458 {
1459 1459 return (idm_task_find_common(ic, itt, ttt, B_TRUE));
1460 1460 }
1461 1461
1462 1462 /*
1463 1463 * idm_task_find_by_handle
1464 1464 *
1465 1465 * This function looks up a task by the client-private idt_client_handle.
1466 1466 *
1467 1467 * This function should NEVER be called in the performance path. It is
1468 1468 * intended strictly for error recovery/task management.
1469 1469 */
1470 1470 /*ARGSUSED*/
1471 1471 void *
1472 1472 idm_task_find_by_handle(idm_conn_t *ic, uintptr_t handle)
1473 1473 {
1474 1474 idm_task_t *idt = NULL;
1475 1475 int idx = 0;
1476 1476
1477 1477 rw_enter(&idm.idm_taskid_table_lock, RW_READER);
1478 1478
1479 1479 for (idx = 0; idx < idm.idm_taskid_max; idx++) {
1480 1480 idt = idm.idm_taskid_table[idx];
1481 1481
1482 1482 if (idt == NULL)
1483 1483 continue;
1484 1484
1485 1485 mutex_enter(&idt->idt_mutex);
1486 1486
1487 1487 if (idt->idt_state != TASK_ACTIVE) {
1488 1488 /*
1489 1489 * Task is either in suspend, abort, or already
1490 1490 * complete.
1491 1491 */
1492 1492 mutex_exit(&idt->idt_mutex);
1493 1493 continue;
1494 1494 }
1495 1495
1496 1496 if (idt->idt_client_handle == handle) {
1497 1497 idm_task_hold(idt);
1498 1498 mutex_exit(&idt->idt_mutex);
1499 1499 break;
1500 1500 }
1501 1501
1502 1502 mutex_exit(&idt->idt_mutex);
1503 1503 }
1504 1504
1505 1505 rw_exit(&idm.idm_taskid_table_lock);
1506 1506
1507 1507 if ((idt == NULL) || (idx == idm.idm_taskid_max))
1508 1508 return (NULL);
1509 1509
1510 1510 return (idt->idt_private);
1511 1511 }
1512 1512
1513 1513 void
1514 1514 idm_task_hold(idm_task_t *idt)
1515 1515 {
1516 1516 idm_refcnt_hold(&idt->idt_refcnt);
1517 1517 }
1518 1518
1519 1519 void
1520 1520 idm_task_rele(idm_task_t *idt)
1521 1521 {
1522 1522 idm_refcnt_rele(&idt->idt_refcnt);
1523 1523 }
1524 1524
1525 1525 void
1526 1526 idm_task_abort(idm_conn_t *ic, idm_task_t *idt, idm_abort_type_t abort_type)
1527 1527 {
1528 1528 idm_task_t *task;
1529 1529 int idx;
1530 1530
1531 1531 /*
1532 1532 * Passing NULL as the task indicates that all tasks
1533 1533 * for this connection should be aborted.
1534 1534 */
1535 1535 if (idt == NULL) {
1536 1536 /*
1537 1537 * Only the connection state machine should ask for
1538 1538 * all tasks to abort and this should never happen in FFP.
1539 1539 */
1540 1540 ASSERT(!ic->ic_ffp);
1541 1541 rw_enter(&idm.idm_taskid_table_lock, RW_READER);
1542 1542 for (idx = 0; idx < idm.idm_taskid_max; idx++) {
1543 1543 task = idm.idm_taskid_table[idx];
1544 1544 if (task == NULL)
1545 1545 continue;
1546 1546 mutex_enter(&task->idt_mutex);
1547 1547 if ((task->idt_state != TASK_IDLE) &&
1548 1548 (task->idt_state != TASK_COMPLETE) &&
1549 1549 (task->idt_ic == ic)) {
1550 1550 rw_exit(&idm.idm_taskid_table_lock);
1551 1551 idm_task_abort_one(ic, task, abort_type);
1552 1552 rw_enter(&idm.idm_taskid_table_lock, RW_READER);
1553 1553 } else
1554 1554 mutex_exit(&task->idt_mutex);
1555 1555 }
1556 1556 rw_exit(&idm.idm_taskid_table_lock);
1557 1557 } else {
1558 1558 mutex_enter(&idt->idt_mutex);
1559 1559 idm_task_abort_one(ic, idt, abort_type);
1560 1560 }
1561 1561 }
1562 1562
1563 1563 static void
1564 1564 idm_task_abort_unref_cb(void *ref)
1565 1565 {
1566 1566 idm_task_t *idt = ref;
1567 1567
1568 1568 mutex_enter(&idt->idt_mutex);
1569 1569 switch (idt->idt_state) {
1570 1570 case TASK_SUSPENDING:
1571 1571 idt->idt_state = TASK_SUSPENDED;
1572 1572 mutex_exit(&idt->idt_mutex);
1573 1573 idm_task_aborted(idt, IDM_STATUS_SUSPENDED);
1574 1574 return;
1575 1575 case TASK_ABORTING:
1576 1576 idt->idt_state = TASK_ABORTED;
1577 1577 mutex_exit(&idt->idt_mutex);
1578 1578 idm_task_aborted(idt, IDM_STATUS_ABORTED);
1579 1579 return;
1580 1580 default:
1581 1581 mutex_exit(&idt->idt_mutex);
1582 1582 ASSERT(0);
1583 1583 break;
1584 1584 }
1585 1585 }
1586 1586
1587 1587 /*
1588 1588 * Abort the idm task.
1589 1589 * Caller must hold the task mutex, which will be released before return
1590 1590 */
1591 1591 static void
1592 1592 idm_task_abort_one(idm_conn_t *ic, idm_task_t *idt, idm_abort_type_t abort_type)
1593 1593 {
1594 1594 /* Caller must hold connection mutex */
1595 1595 ASSERT(mutex_owned(&idt->idt_mutex));
1596 1596 switch (idt->idt_state) {
1597 1597 case TASK_ACTIVE:
1598 1598 switch (abort_type) {
1599 1599 case AT_INTERNAL_SUSPEND:
1600 1600 /* Call transport to release any resources */
1601 1601 idt->idt_state = TASK_SUSPENDING;
1602 1602 mutex_exit(&idt->idt_mutex);
1603 1603 ic->ic_transport_ops->it_free_task_rsrc(idt);
1604 1604
1605 1605 /*
1606 1606 * Wait for outstanding references. When all
1607 1607 * references are released the callback will call
1608 1608 * idm_task_aborted().
1609 1609 */
1610 1610 idm_refcnt_async_wait_ref(&idt->idt_refcnt,
1611 1611 &idm_task_abort_unref_cb);
1612 1612 return;
1613 1613 case AT_INTERNAL_ABORT:
1614 1614 case AT_TASK_MGMT_ABORT:
1615 1615 idt->idt_state = TASK_ABORTING;
1616 1616 mutex_exit(&idt->idt_mutex);
1617 1617 ic->ic_transport_ops->it_free_task_rsrc(idt);
1618 1618
1619 1619 /*
1620 1620 * Wait for outstanding references. When all
1621 1621 * references are released the callback will call
1622 1622 * idm_task_aborted().
1623 1623 */
1624 1624 idm_refcnt_async_wait_ref(&idt->idt_refcnt,
1625 1625 &idm_task_abort_unref_cb);
1626 1626 return;
1627 1627 default:
1628 1628 ASSERT(0);
1629 1629 }
1630 1630 break;
1631 1631 case TASK_SUSPENDING:
1632 1632 /* Already called transport_free_task_rsrc(); */
1633 1633 switch (abort_type) {
1634 1634 case AT_INTERNAL_SUSPEND:
1635 1635 /* Already doing it */
1636 1636 break;
1637 1637 case AT_INTERNAL_ABORT:
1638 1638 case AT_TASK_MGMT_ABORT:
1639 1639 idt->idt_state = TASK_ABORTING;
1640 1640 break;
1641 1641 default:
1642 1642 ASSERT(0);
1643 1643 }
1644 1644 break;
1645 1645 case TASK_SUSPENDED:
1646 1646 /* Already called transport_free_task_rsrc(); */
1647 1647 switch (abort_type) {
1648 1648 case AT_INTERNAL_SUSPEND:
1649 1649 /* Already doing it */
1650 1650 break;
1651 1651 case AT_INTERNAL_ABORT:
1652 1652 case AT_TASK_MGMT_ABORT:
1653 1653 idt->idt_state = TASK_ABORTING;
1654 1654 mutex_exit(&idt->idt_mutex);
1655 1655
1656 1656 /*
1657 1657 * We could probably call idm_task_aborted directly
1658 1658 * here but we may be holding the conn lock. It's
1659 1659 * easier to just switch contexts. Even though
1660 1660 * we shouldn't really have any references we'll
1661 1661 * set the state to TASK_ABORTING instead of
1662 1662 * TASK_ABORTED so we can use the same code path.
1663 1663 */
1664 1664 idm_refcnt_async_wait_ref(&idt->idt_refcnt,
1665 1665 &idm_task_abort_unref_cb);
1666 1666 return;
1667 1667 default:
1668 1668 ASSERT(0);
1669 1669 }
1670 1670 break;
1671 1671 case TASK_ABORTING:
1672 1672 case TASK_ABORTED:
1673 1673 switch (abort_type) {
1674 1674 case AT_INTERNAL_SUSPEND:
1675 1675 /* We're already past this point... */
1676 1676 case AT_INTERNAL_ABORT:
1677 1677 case AT_TASK_MGMT_ABORT:
1678 1678 /* Already doing it */
1679 1679 break;
1680 1680 default:
1681 1681 ASSERT(0);
1682 1682 }
1683 1683 break;
1684 1684 case TASK_COMPLETE:
1685 1685 /*
1686 1686 * In this case, let it go. The status has already been
1687 1687 * sent (which may or may not get successfully transmitted)
1688 1688 * and we don't want to end up in a race between completing
1689 1689 * the status PDU and marking the task suspended.
1690 1690 */
1691 1691 break;
1692 1692 default:
1693 1693 ASSERT(0);
1694 1694 }
1695 1695 mutex_exit(&idt->idt_mutex);
1696 1696 }
1697 1697
1698 1698 static void
1699 1699 idm_task_aborted(idm_task_t *idt, idm_status_t status)
1700 1700 {
1701 1701 (*idt->idt_ic->ic_conn_ops.icb_task_aborted)(idt, status);
1702 1702 }
1703 1703
1704 1704 /*
1705 1705 * idm_pdu_tx
1706 1706 *
1707 1707 * This is IDM's implementation of the 'Send_Control' operational primitive.
1708 1708 * This function is invoked by an initiator iSCSI layer requesting the transfer
1709 1709 * of a iSCSI command PDU or a target iSCSI layer requesting the transfer of a
1710 1710 * iSCSI response PDU. The PDU will be transmitted as-is by the local Datamover
1711 1711 * layer to the peer iSCSI layer in the remote iSCSI node. The connection info
1712 1712 * and iSCSI PDU-specific qualifiers namely BHS, AHS, DataDescriptor and Size
1713 1713 * are provided as input.
1714 1714 *
1715 1715 */
1716 1716 void
1717 1717 idm_pdu_tx(idm_pdu_t *pdu)
1718 1718 {
1719 1719 idm_conn_t *ic = pdu->isp_ic;
1720 1720 iscsi_async_evt_hdr_t *async_evt;
1721 1721
1722 1722 /*
1723 1723 * If we are in full-featured mode then route SCSI-related
1724 1724 * commands to the appropriate function vector without checking
1725 1725 * the connection state. We will only be in full-feature mode
1726 1726 * when we are in an acceptable state for SCSI PDU's.
1727 1727 *
1728 1728 * We also need to ensure that there are no PDU events outstanding
1729 1729 * on the state machine. Any non-SCSI PDU's received in full-feature
1730 1730 * mode will result in PDU events and until these have been handled
1731 1731 * we need to route all PDU's through the state machine as PDU
1732 1732 * events to maintain ordering.
1733 1733 *
1734 1734 * Note that IDM cannot enter FFP mode until it processes in
1735 1735 * its state machine the last xmit of the login process.
1736 1736 * Hence, checking the IDM_PDU_LOGIN_TX flag here would be
1737 1737 * superfluous.
1738 1738 */
1739 1739 mutex_enter(&ic->ic_state_mutex);
1740 1740 if (ic->ic_ffp && (ic->ic_pdu_events == 0)) {
1741 1741 mutex_exit(&ic->ic_state_mutex);
1742 1742 switch (IDM_PDU_OPCODE(pdu)) {
1743 1743 case ISCSI_OP_SCSI_RSP:
1744 1744 /* Target only */
1745 1745 DTRACE_ISCSI_2(scsi__response, idm_conn_t *, ic,
1746 1746 iscsi_scsi_rsp_hdr_t *,
1747 1747 (iscsi_scsi_rsp_hdr_t *)pdu->isp_hdr);
1748 1748 idm_pdu_tx_forward(ic, pdu);
1749 1749 return;
1750 1750 case ISCSI_OP_SCSI_TASK_MGT_RSP:
1751 1751 /* Target only */
1752 1752 DTRACE_ISCSI_2(task__response, idm_conn_t *, ic,
1753 1753 iscsi_text_rsp_hdr_t *,
1754 1754 (iscsi_text_rsp_hdr_t *)pdu->isp_hdr);
1755 1755 idm_pdu_tx_forward(ic, pdu);
1756 1756 return;
1757 1757 case ISCSI_OP_SCSI_DATA_RSP:
1758 1758 /* Target only */
1759 1759 DTRACE_ISCSI_2(data__send, idm_conn_t *, ic,
1760 1760 iscsi_data_rsp_hdr_t *,
1761 1761 (iscsi_data_rsp_hdr_t *)pdu->isp_hdr);
1762 1762 idm_pdu_tx_forward(ic, pdu);
1763 1763 return;
1764 1764 case ISCSI_OP_RTT_RSP:
1765 1765 /* Target only */
1766 1766 DTRACE_ISCSI_2(data__request, idm_conn_t *, ic,
1767 1767 iscsi_rtt_hdr_t *,
1768 1768 (iscsi_rtt_hdr_t *)pdu->isp_hdr);
1769 1769 idm_pdu_tx_forward(ic, pdu);
1770 1770 return;
1771 1771 case ISCSI_OP_NOOP_IN:
1772 1772 /* Target only */
1773 1773 DTRACE_ISCSI_2(nop__send, idm_conn_t *, ic,
1774 1774 iscsi_nop_in_hdr_t *,
1775 1775 (iscsi_nop_in_hdr_t *)pdu->isp_hdr);
1776 1776 idm_pdu_tx_forward(ic, pdu);
1777 1777 return;
1778 1778 case ISCSI_OP_TEXT_RSP:
1779 1779 /* Target only */
1780 1780 DTRACE_ISCSI_2(text__response, idm_conn_t *, ic,
1781 1781 iscsi_text_rsp_hdr_t *,
1782 1782 (iscsi_text_rsp_hdr_t *)pdu->isp_hdr);
1783 1783 idm_pdu_tx_forward(ic, pdu);
1784 1784 return;
1785 1785 case ISCSI_OP_TEXT_CMD:
1786 1786 case ISCSI_OP_NOOP_OUT:
1787 1787 case ISCSI_OP_SCSI_CMD:
1788 1788 case ISCSI_OP_SCSI_DATA:
1789 1789 case ISCSI_OP_SCSI_TASK_MGT_MSG:
1790 1790 /* Initiator only */
1791 1791 idm_pdu_tx_forward(ic, pdu);
1792 1792 return;
1793 1793 default:
1794 1794 break;
1795 1795 }
1796 1796
1797 1797 mutex_enter(&ic->ic_state_mutex);
1798 1798 }
1799 1799
1800 1800 /*
1801 1801 * Any PDU's processed outside of full-feature mode and non-SCSI
1802 1802 * PDU's in full-feature mode are handled by generating an
1803 1803 * event to the connection state machine. The state machine
1804 1804 * will validate the PDU against the current state and either
1805 1805 * transmit the PDU if the opcode is allowed or handle an
1806 1806 * error if the PDU is not allowed.
1807 1807 *
1808 1808 * This code-path will also generate any events that are implied
1809 1809 * by the PDU opcode. For example a "login response" with success
1810 1810 * status generates a CE_LOGOUT_SUCCESS_SND event.
1811 1811 */
1812 1812 switch (IDM_PDU_OPCODE(pdu)) {
1813 1813 case ISCSI_OP_LOGIN_CMD:
1814 1814 idm_conn_tx_pdu_event(ic, CE_LOGIN_SND, (uintptr_t)pdu);
1815 1815 break;
1816 1816 case ISCSI_OP_LOGIN_RSP:
1817 1817 DTRACE_ISCSI_2(login__response, idm_conn_t *, ic,
1818 1818 iscsi_login_rsp_hdr_t *,
1819 1819 (iscsi_login_rsp_hdr_t *)pdu->isp_hdr);
1820 1820 idm_parse_login_rsp(ic, pdu, /* Is RX */ B_FALSE);
1821 1821 break;
1822 1822 case ISCSI_OP_LOGOUT_CMD:
1823 1823 idm_parse_logout_req(ic, pdu, /* Is RX */ B_FALSE);
1824 1824 break;
1825 1825 case ISCSI_OP_LOGOUT_RSP:
1826 1826 DTRACE_ISCSI_2(logout__response, idm_conn_t *, ic,
1827 1827 iscsi_logout_rsp_hdr_t *,
1828 1828 (iscsi_logout_rsp_hdr_t *)pdu->isp_hdr);
1829 1829 idm_parse_logout_rsp(ic, pdu, /* Is RX */ B_FALSE);
1830 1830 break;
1831 1831 case ISCSI_OP_ASYNC_EVENT:
1832 1832 DTRACE_ISCSI_2(async__send, idm_conn_t *, ic,
1833 1833 iscsi_async_evt_hdr_t *,
1834 1834 (iscsi_async_evt_hdr_t *)pdu->isp_hdr);
1835 1835 async_evt = (iscsi_async_evt_hdr_t *)pdu->isp_hdr;
1836 1836 switch (async_evt->async_event) {
1837 1837 case ISCSI_ASYNC_EVENT_REQUEST_LOGOUT:
1838 1838 idm_conn_tx_pdu_event(ic, CE_ASYNC_LOGOUT_SND,
1839 1839 (uintptr_t)pdu);
1840 1840 break;
1841 1841 case ISCSI_ASYNC_EVENT_DROPPING_CONNECTION:
1842 1842 idm_conn_tx_pdu_event(ic, CE_ASYNC_DROP_CONN_SND,
1843 1843 (uintptr_t)pdu);
1844 1844 break;
1845 1845 case ISCSI_ASYNC_EVENT_DROPPING_ALL_CONNECTIONS:
1846 1846 idm_conn_tx_pdu_event(ic, CE_ASYNC_DROP_ALL_CONN_SND,
1847 1847 (uintptr_t)pdu);
1848 1848 break;
1849 1849 case ISCSI_ASYNC_EVENT_SCSI_EVENT:
1850 1850 case ISCSI_ASYNC_EVENT_PARAM_NEGOTIATION:
1851 1851 default:
1852 1852 idm_conn_tx_pdu_event(ic, CE_MISC_TX,
1853 1853 (uintptr_t)pdu);
1854 1854 break;
1855 1855 }
1856 1856 break;
1857 1857 case ISCSI_OP_SCSI_RSP:
1858 1858 /* Target only */
1859 1859 DTRACE_ISCSI_2(scsi__response, idm_conn_t *, ic,
1860 1860 iscsi_scsi_rsp_hdr_t *,
1861 1861 (iscsi_scsi_rsp_hdr_t *)pdu->isp_hdr);
1862 1862 idm_conn_tx_pdu_event(ic, CE_MISC_TX, (uintptr_t)pdu);
1863 1863 break;
1864 1864 case ISCSI_OP_SCSI_TASK_MGT_RSP:
1865 1865 /* Target only */
1866 1866 DTRACE_ISCSI_2(task__response, idm_conn_t *, ic,
1867 1867 iscsi_scsi_task_mgt_rsp_hdr_t *,
1868 1868 (iscsi_scsi_task_mgt_rsp_hdr_t *)pdu->isp_hdr);
1869 1869 idm_conn_tx_pdu_event(ic, CE_MISC_TX, (uintptr_t)pdu);
1870 1870 break;
1871 1871 case ISCSI_OP_SCSI_DATA_RSP:
1872 1872 /* Target only */
1873 1873 DTRACE_ISCSI_2(data__send, idm_conn_t *, ic,
1874 1874 iscsi_data_rsp_hdr_t *,
1875 1875 (iscsi_data_rsp_hdr_t *)pdu->isp_hdr);
1876 1876 idm_conn_tx_pdu_event(ic, CE_MISC_TX, (uintptr_t)pdu);
1877 1877 break;
1878 1878 case ISCSI_OP_RTT_RSP:
1879 1879 /* Target only */
1880 1880 DTRACE_ISCSI_2(data__request, idm_conn_t *, ic,
1881 1881 iscsi_rtt_hdr_t *,
1882 1882 (iscsi_rtt_hdr_t *)pdu->isp_hdr);
1883 1883 idm_conn_tx_pdu_event(ic, CE_MISC_TX, (uintptr_t)pdu);
1884 1884 break;
1885 1885 case ISCSI_OP_NOOP_IN:
1886 1886 /* Target only */
1887 1887 DTRACE_ISCSI_2(nop__send, idm_conn_t *, ic,
1888 1888 iscsi_nop_in_hdr_t *,
1889 1889 (iscsi_nop_in_hdr_t *)pdu->isp_hdr);
1890 1890 idm_conn_tx_pdu_event(ic, CE_MISC_TX, (uintptr_t)pdu);
1891 1891 break;
1892 1892 case ISCSI_OP_TEXT_RSP:
1893 1893 /* Target only */
1894 1894 DTRACE_ISCSI_2(text__response, idm_conn_t *, ic,
1895 1895 iscsi_text_rsp_hdr_t *,
1896 1896 (iscsi_text_rsp_hdr_t *)pdu->isp_hdr);
1897 1897 idm_conn_tx_pdu_event(ic, CE_MISC_TX, (uintptr_t)pdu);
1898 1898 break;
1899 1899 /* Initiator only */
1900 1900 case ISCSI_OP_SCSI_CMD:
1901 1901 case ISCSI_OP_SCSI_TASK_MGT_MSG:
1902 1902 case ISCSI_OP_SCSI_DATA:
1903 1903 case ISCSI_OP_NOOP_OUT:
1904 1904 case ISCSI_OP_TEXT_CMD:
1905 1905 case ISCSI_OP_SNACK_CMD:
1906 1906 case ISCSI_OP_REJECT_MSG:
1907 1907 default:
1908 1908 /*
1909 1909 * Connection state machine will validate these PDU's against
1910 1910 * the current state. A PDU not allowed in the current
1911 1911 * state will cause a protocol error.
1912 1912 */
1913 1913 idm_conn_tx_pdu_event(ic, CE_MISC_TX, (uintptr_t)pdu);
1914 1914 break;
1915 1915 }
1916 1916 mutex_exit(&ic->ic_state_mutex);
1917 1917 }
1918 1918
1919 1919 /*
1920 1920 * Common allocation of a PDU along with memory for header and data.
1921 1921 */
1922 1922 static idm_pdu_t *
1923 1923 idm_pdu_alloc_common(uint_t hdrlen, uint_t datalen, int sleepflag)
1924 1924 {
1925 1925 idm_pdu_t *result;
1926 1926
1927 1927 /*
1928 1928 * IDM clients should cache these structures for performance
1929 1929 * critical paths. We can't cache effectively in IDM because we
1930 1930 * don't know the correct header and data size.
1931 1931 *
1932 1932 * Valid header length is assumed to be hdrlen and valid data
1933 1933 * length is assumed to be datalen. isp_hdrlen and isp_datalen
1934 1934 * can be adjusted after the PDU is returned if necessary.
1935 1935 */
1936 1936 result = kmem_zalloc(sizeof (idm_pdu_t) + hdrlen + datalen, sleepflag);
1937 1937 if (result != NULL) {
1938 1938 /* For idm_pdu_free sanity check */
1939 1939 result->isp_flags |= IDM_PDU_ALLOC;
1940 1940 /* pointer arithmetic */
1941 1941 result->isp_hdr = (iscsi_hdr_t *)(result + 1);
1942 1942 result->isp_hdrlen = hdrlen;
1943 1943 result->isp_hdrbuflen = hdrlen;
1944 1944 result->isp_transport_hdrlen = 0;
1945 1945 if (datalen != 0)
1946 1946 result->isp_data = (uint8_t *)result->isp_hdr + hdrlen;
1947 1947 result->isp_datalen = datalen;
1948 1948 result->isp_databuflen = datalen;
1949 1949 result->isp_magic = IDM_PDU_MAGIC;
1950 1950 }
1951 1951
1952 1952 return (result);
1953 1953 }
1954 1954
1955 1955 /*
1956 1956 * Typical idm_pdu_alloc invocation, will block for resources.
1957 1957 */
1958 1958 idm_pdu_t *
1959 1959 idm_pdu_alloc(uint_t hdrlen, uint_t datalen)
1960 1960 {
1961 1961 return (idm_pdu_alloc_common(hdrlen, datalen, KM_SLEEP));
1962 1962 }
1963 1963
1964 1964 /*
1965 1965 * Non-blocking idm_pdu_alloc implementation, returns NULL if resources
1966 1966 * are not available. Needed for transport-layer allocations which may
1967 1967 * be invoking in interrupt context.
1968 1968 */
1969 1969 idm_pdu_t *
1970 1970 idm_pdu_alloc_nosleep(uint_t hdrlen, uint_t datalen)
1971 1971 {
1972 1972 return (idm_pdu_alloc_common(hdrlen, datalen, KM_NOSLEEP));
1973 1973 }
1974 1974
1975 1975 /*
1976 1976 * Free a PDU previously allocated with idm_pdu_alloc() including any
1977 1977 * header and data space allocated as part of the original request.
1978 1978 * Additional memory regions referenced by subsequent modification of
1979 1979 * the isp_hdr and/or isp_data fields will not be freed.
1980 1980 */
1981 1981 void
1982 1982 idm_pdu_free(idm_pdu_t *pdu)
1983 1983 {
1984 1984 /* Make sure the structure was allocated using idm_pdu_alloc() */
1985 1985 ASSERT(pdu->isp_flags & IDM_PDU_ALLOC);
1986 1986 kmem_free(pdu,
1987 1987 sizeof (idm_pdu_t) + pdu->isp_hdrbuflen + pdu->isp_databuflen);
1988 1988 }
1989 1989
1990 1990 /*
1991 1991 * Initialize the connection, private and callback fields in a PDU.
1992 1992 */
1993 1993 void
1994 1994 idm_pdu_init(idm_pdu_t *pdu, idm_conn_t *ic, void *private, idm_pdu_cb_t *cb)
1995 1995 {
1996 1996 /*
1997 1997 * idm_pdu_complete() will call idm_pdu_free if the callback is
1998 1998 * NULL. This will only work if the PDU was originally allocated
1999 1999 * with idm_pdu_alloc().
2000 2000 */
2001 2001 ASSERT((pdu->isp_flags & IDM_PDU_ALLOC) ||
2002 2002 (cb != NULL));
2003 2003 pdu->isp_magic = IDM_PDU_MAGIC;
2004 2004 pdu->isp_ic = ic;
2005 2005 pdu->isp_private = private;
2006 2006 pdu->isp_callback = cb;
2007 2007 }
2008 2008
2009 2009 /*
2010 2010 * Initialize the header and header length field. This function should
2011 2011 * not be used to adjust the header length in a buffer allocated via
2012 2012 * pdu_pdu_alloc since it overwrites the existing header pointer.
2013 2013 */
2014 2014 void
2015 2015 idm_pdu_init_hdr(idm_pdu_t *pdu, uint8_t *hdr, uint_t hdrlen)
2016 2016 {
2017 2017 pdu->isp_hdr = (iscsi_hdr_t *)((void *)hdr);
2018 2018 pdu->isp_hdrlen = hdrlen;
2019 2019 }
2020 2020
2021 2021 /*
2022 2022 * Initialize the data and data length fields. This function should
2023 2023 * not be used to adjust the data length of a buffer allocated via
2024 2024 * idm_pdu_alloc since it overwrites the existing data pointer.
2025 2025 */
2026 2026 void
2027 2027 idm_pdu_init_data(idm_pdu_t *pdu, uint8_t *data, uint_t datalen)
2028 2028 {
2029 2029 pdu->isp_data = data;
2030 2030 pdu->isp_datalen = datalen;
2031 2031 }
2032 2032
2033 2033 void
2034 2034 idm_pdu_complete(idm_pdu_t *pdu, idm_status_t status)
2035 2035 {
2036 2036 if (pdu->isp_callback) {
2037 2037 pdu->isp_status = status;
2038 2038 (*pdu->isp_callback)(pdu, status);
2039 2039 } else {
2040 2040 idm_pdu_free(pdu);
2041 2041 }
2042 2042 }
2043 2043
2044 2044 /*
2045 2045 * State machine auditing
2046 2046 */
2047 2047
2048 2048 void
2049 2049 idm_sm_audit_init(sm_audit_buf_t *audit_buf)
2050 2050 {
2051 2051 bzero(audit_buf, sizeof (sm_audit_buf_t));
2052 2052 audit_buf->sab_max_index = SM_AUDIT_BUF_MAX_REC - 1;
2053 2053 }
2054 2054
2055 2055 static
2056 2056 sm_audit_record_t *
2057 2057 idm_sm_audit_common(sm_audit_buf_t *audit_buf, sm_audit_record_type_t r_type,
2058 2058 sm_audit_sm_type_t sm_type,
2059 2059 int current_state)
2060 2060 {
2061 2061 sm_audit_record_t *sar;
2062 2062
2063 2063 sar = audit_buf->sab_records;
2064 2064 sar += audit_buf->sab_index;
2065 2065 audit_buf->sab_index++;
2066 2066 audit_buf->sab_index &= audit_buf->sab_max_index;
2067 2067
2068 2068 sar->sar_type = r_type;
2069 2069 gethrestime(&sar->sar_timestamp);
2070 2070 sar->sar_sm_type = sm_type;
2071 2071 sar->sar_state = current_state;
2072 2072
2073 2073 return (sar);
2074 2074 }
2075 2075
2076 2076 void
2077 2077 idm_sm_audit_event(sm_audit_buf_t *audit_buf,
2078 2078 sm_audit_sm_type_t sm_type, int current_state,
2079 2079 int event, uintptr_t event_info)
2080 2080 {
2081 2081 sm_audit_record_t *sar;
2082 2082
2083 2083 sar = idm_sm_audit_common(audit_buf, SAR_STATE_EVENT,
2084 2084 sm_type, current_state);
2085 2085 sar->sar_event = event;
2086 2086 sar->sar_event_info = event_info;
2087 2087 }
2088 2088
2089 2089 void
2090 2090 idm_sm_audit_state_change(sm_audit_buf_t *audit_buf,
2091 2091 sm_audit_sm_type_t sm_type, int current_state, int new_state)
2092 2092 {
2093 2093 sm_audit_record_t *sar;
2094 2094
2095 2095 sar = idm_sm_audit_common(audit_buf, SAR_STATE_CHANGE,
2096 2096 sm_type, current_state);
2097 2097 sar->sar_new_state = new_state;
2098 2098 }
2099 2099
2100 2100
2101 2101 /*
2102 2102 * Object reference tracking
2103 2103 */
2104 2104
2105 2105 void
2106 2106 idm_refcnt_init(idm_refcnt_t *refcnt, void *referenced_obj)
2107 2107 {
2108 2108 bzero(refcnt, sizeof (*refcnt));
2109 2109 idm_refcnt_reset(refcnt);
2110 2110 refcnt->ir_referenced_obj = referenced_obj;
2111 2111 bzero(&refcnt->ir_audit_buf, sizeof (refcnt_audit_buf_t));
2112 2112 refcnt->ir_audit_buf.anb_max_index = REFCNT_AUDIT_BUF_MAX_REC - 1;
2113 2113 mutex_init(&refcnt->ir_mutex, NULL, MUTEX_DEFAULT, NULL);
2114 2114 cv_init(&refcnt->ir_cv, NULL, CV_DEFAULT, NULL);
2115 2115 }
2116 2116
2117 2117 void
2118 2118 idm_refcnt_destroy(idm_refcnt_t *refcnt)
2119 2119 {
2120 2120 /*
2121 2121 * Grab the mutex to there are no other lingering threads holding
2122 2122 * the mutex before we destroy it (e.g. idm_refcnt_rele just after
2123 2123 * the refcnt goes to zero if ir_waiting == REF_WAIT_ASYNC)
2124 2124 */
2125 2125 mutex_enter(&refcnt->ir_mutex);
2126 2126 ASSERT(refcnt->ir_refcnt == 0);
2127 2127 cv_destroy(&refcnt->ir_cv);
2128 2128 mutex_destroy(&refcnt->ir_mutex);
2129 2129 }
2130 2130
2131 2131 void
2132 2132 idm_refcnt_reset(idm_refcnt_t *refcnt)
2133 2133 {
2134 2134 refcnt->ir_waiting = REF_NOWAIT;
2135 2135 refcnt->ir_refcnt = 0;
2136 2136 }
2137 2137
2138 2138 void
2139 2139 idm_refcnt_hold(idm_refcnt_t *refcnt)
2140 2140 {
2141 2141 /*
2142 2142 * Nothing should take a hold on an object after a call to
2143 2143 * idm_refcnt_wait_ref or idm_refcnd_async_wait_ref
2144 2144 */
2145 2145 ASSERT(refcnt->ir_waiting == REF_NOWAIT);
2146 2146
2147 2147 mutex_enter(&refcnt->ir_mutex);
2148 2148 refcnt->ir_refcnt++;
2149 2149 REFCNT_AUDIT(refcnt);
2150 2150 mutex_exit(&refcnt->ir_mutex);
2151 2151 }
2152 2152
2153 2153 static void
2154 2154 idm_refcnt_unref_task(void *refcnt_void)
2155 2155 {
2156 2156 idm_refcnt_t *refcnt = refcnt_void;
2157 2157
2158 2158 REFCNT_AUDIT(refcnt);
2159 2159 (*refcnt->ir_cb)(refcnt->ir_referenced_obj);
2160 2160 }
2161 2161
2162 2162 void
2163 2163 idm_refcnt_rele(idm_refcnt_t *refcnt)
2164 2164 {
2165 2165 mutex_enter(&refcnt->ir_mutex);
2166 2166 ASSERT(refcnt->ir_refcnt > 0);
2167 2167 refcnt->ir_refcnt--;
2168 2168 REFCNT_AUDIT(refcnt);
2169 2169 if (refcnt->ir_waiting == REF_NOWAIT) {
2170 2170 /* No one is waiting on this object */
2171 2171 mutex_exit(&refcnt->ir_mutex);
2172 2172 return;
2173 2173 }
2174 2174
2175 2175 /*
2176 2176 * Someone is waiting for this object to go idle so check if
2177 2177 * refcnt is 0. Waiting on an object then later grabbing another
2178 2178 * reference is not allowed so we don't need to handle that case.
2179 2179 */
2180 2180 if (refcnt->ir_refcnt == 0) {
2181 2181 if (refcnt->ir_waiting == REF_WAIT_ASYNC) {
2182 2182 if (taskq_dispatch(idm.idm_global_taskq,
2183 2183 &idm_refcnt_unref_task, refcnt, TQ_SLEEP) == NULL) {
2184 2184 cmn_err(CE_WARN,
2185 2185 "idm_refcnt_rele: Couldn't dispatch task");
2186 2186 }
2187 2187 } else if (refcnt->ir_waiting == REF_WAIT_SYNC) {
2188 2188 cv_signal(&refcnt->ir_cv);
2189 2189 }
2190 2190 }
2191 2191 mutex_exit(&refcnt->ir_mutex);
2192 2192 }
2193 2193
2194 2194 void
2195 2195 idm_refcnt_rele_and_destroy(idm_refcnt_t *refcnt, idm_refcnt_cb_t *cb_func)
2196 2196 {
2197 2197 mutex_enter(&refcnt->ir_mutex);
2198 2198 ASSERT(refcnt->ir_refcnt > 0);
2199 2199 refcnt->ir_refcnt--;
2200 2200 REFCNT_AUDIT(refcnt);
2201 2201
2202 2202 /*
2203 2203 * Someone is waiting for this object to go idle so check if
2204 2204 * refcnt is 0. Waiting on an object then later grabbing another
2205 2205 * reference is not allowed so we don't need to handle that case.
2206 2206 */
2207 2207 if (refcnt->ir_refcnt == 0) {
2208 2208 refcnt->ir_cb = cb_func;
2209 2209 refcnt->ir_waiting = REF_WAIT_ASYNC;
2210 2210 if (taskq_dispatch(idm.idm_global_taskq,
2211 2211 &idm_refcnt_unref_task, refcnt, TQ_SLEEP) == NULL) {
2212 2212 cmn_err(CE_WARN,
2213 2213 "idm_refcnt_rele: Couldn't dispatch task");
2214 2214 }
2215 2215 }
2216 2216 mutex_exit(&refcnt->ir_mutex);
2217 2217 }
2218 2218
2219 2219 void
2220 2220 idm_refcnt_wait_ref(idm_refcnt_t *refcnt)
2221 2221 {
2222 2222 mutex_enter(&refcnt->ir_mutex);
2223 2223 refcnt->ir_waiting = REF_WAIT_SYNC;
2224 2224 REFCNT_AUDIT(refcnt);
2225 2225 while (refcnt->ir_refcnt != 0)
2226 2226 cv_wait(&refcnt->ir_cv, &refcnt->ir_mutex);
2227 2227 mutex_exit(&refcnt->ir_mutex);
2228 2228 }
2229 2229
2230 2230 void
2231 2231 idm_refcnt_async_wait_ref(idm_refcnt_t *refcnt, idm_refcnt_cb_t *cb_func)
2232 2232 {
2233 2233 mutex_enter(&refcnt->ir_mutex);
2234 2234 refcnt->ir_waiting = REF_WAIT_ASYNC;
2235 2235 refcnt->ir_cb = cb_func;
2236 2236 REFCNT_AUDIT(refcnt);
2237 2237 /*
2238 2238 * It's possible we don't have any references. To make things easier
2239 2239 * on the caller use a taskq to call the callback instead of
2240 2240 * calling it synchronously
2241 2241 */
2242 2242 if (refcnt->ir_refcnt == 0) {
2243 2243 if (taskq_dispatch(idm.idm_global_taskq,
2244 2244 &idm_refcnt_unref_task, refcnt, TQ_SLEEP) == NULL) {
2245 2245 cmn_err(CE_WARN,
2246 2246 "idm_refcnt_async_wait_ref: "
2247 2247 "Couldn't dispatch task");
2248 2248 }
2249 2249 }
2250 2250 mutex_exit(&refcnt->ir_mutex);
2251 2251 }
2252 2252
2253 2253 void
2254 2254 idm_refcnt_destroy_unref_obj(idm_refcnt_t *refcnt,
2255 2255 idm_refcnt_cb_t *cb_func)
2256 2256 {
2257 2257 mutex_enter(&refcnt->ir_mutex);
2258 2258 if (refcnt->ir_refcnt == 0) {
2259 2259 mutex_exit(&refcnt->ir_mutex);
2260 2260 (*cb_func)(refcnt->ir_referenced_obj);
2261 2261 return;
2262 2262 }
2263 2263 mutex_exit(&refcnt->ir_mutex);
2264 2264 }
2265 2265
2266 2266 void
2267 2267 idm_conn_hold(idm_conn_t *ic)
2268 2268 {
2269 2269 idm_refcnt_hold(&ic->ic_refcnt);
2270 2270 }
2271 2271
2272 2272 void
2273 2273 idm_conn_rele(idm_conn_t *ic)
2274 2274 {
2275 2275 idm_refcnt_rele(&ic->ic_refcnt);
2276 2276 }
2277 2277
2278 2278 void
2279 2279 idm_conn_set_target_name(idm_conn_t *ic, char *target_name)
2280 2280 {
2281 2281 (void) strlcpy(ic->ic_target_name, target_name, ISCSI_MAX_NAME_LEN + 1);
2282 2282 }
2283 2283
2284 2284 void
2285 2285 idm_conn_set_initiator_name(idm_conn_t *ic, char *initiator_name)
2286 2286 {
2287 2287 (void) strlcpy(ic->ic_initiator_name, initiator_name,
2288 2288 ISCSI_MAX_NAME_LEN + 1);
2289 2289 }
2290 2290
2291 2291 void
2292 2292 idm_conn_set_isid(idm_conn_t *ic, uint8_t isid[ISCSI_ISID_LEN])
2293 2293 {
2294 2294 (void) snprintf(ic->ic_isid, ISCSI_MAX_ISID_LEN + 1,
2295 2295 "%02x%02x%02x%02x%02x%02x",
2296 2296 isid[0], isid[1], isid[2], isid[3], isid[4], isid[5]);
2297 2297 }
2298 2298
2299 2299 static int
2300 2300 _idm_init(void)
2301 2301 {
2302 2302 /* Initialize the rwlock for the taskid table */
2303 2303 rw_init(&idm.idm_taskid_table_lock, NULL, RW_DRIVER, NULL);
2304 2304
2305 2305 /* Initialize the global mutex and taskq */
2306 2306 mutex_init(&idm.idm_global_mutex, NULL, MUTEX_DEFAULT, NULL);
2307 2307
2308 2308 cv_init(&idm.idm_tgt_svc_cv, NULL, CV_DEFAULT, NULL);
2309 2309 cv_init(&idm.idm_wd_cv, NULL, CV_DEFAULT, NULL);
2310 2310
2311 2311 /*
2312 2312 * The maximum allocation needs to be high here since there can be
2313 2313 * many concurrent tasks using the global taskq.
2314 2314 */
2315 2315 idm.idm_global_taskq = taskq_create("idm_global_taskq", 1, minclsyspri,
2316 2316 128, 16384, TASKQ_PREPOPULATE);
2317 2317 if (idm.idm_global_taskq == NULL) {
2318 2318 cv_destroy(&idm.idm_wd_cv);
2319 2319 cv_destroy(&idm.idm_tgt_svc_cv);
2320 2320 mutex_destroy(&idm.idm_global_mutex);
2321 2321 rw_destroy(&idm.idm_taskid_table_lock);
2322 2322 return (ENOMEM);
2323 2323 }
2324 2324
2325 2325 /* Start watchdog thread */
2326 2326 idm.idm_wd_thread = thread_create(NULL, 0,
2327 2327 idm_wd_thread, NULL, 0, &p0, TS_RUN, minclsyspri);
2328 2328 if (idm.idm_wd_thread == NULL) {
2329 2329 /* Couldn't create the watchdog thread */
2330 2330 taskq_destroy(idm.idm_global_taskq);
2331 2331 cv_destroy(&idm.idm_wd_cv);
2332 2332 cv_destroy(&idm.idm_tgt_svc_cv);
2333 2333 mutex_destroy(&idm.idm_global_mutex);
2334 2334 rw_destroy(&idm.idm_taskid_table_lock);
2335 2335 return (ENOMEM);
2336 2336 }
2337 2337
2338 2338 /* Pause until the watchdog thread is running */
2339 2339 mutex_enter(&idm.idm_global_mutex);
2340 2340 while (!idm.idm_wd_thread_running)
2341 2341 cv_wait(&idm.idm_wd_cv, &idm.idm_global_mutex);
2342 2342 mutex_exit(&idm.idm_global_mutex);
2343 2343
2344 2344 /*
2345 2345 * Allocate the task ID table and set "next" to 0.
2346 2346 */
2347 2347
2348 2348 idm.idm_taskid_max = idm_max_taskids;
2349 2349 idm.idm_taskid_table = (idm_task_t **)
2350 2350 kmem_zalloc(idm.idm_taskid_max * sizeof (idm_task_t *), KM_SLEEP);
2351 2351 idm.idm_taskid_next = 0;
2352 2352
2353 2353 /* Create the global buffer and task kmem caches */
2354 2354 idm.idm_buf_cache = kmem_cache_create("idm_buf_cache",
2355 2355 sizeof (idm_buf_t), 8, NULL, NULL, NULL, NULL, NULL, KM_SLEEP);
2356 2356
2357 2357 /*
2358 2358 * Note, we're explicitly allocating an additional iSER header-
2359 2359 * sized chunk for each of these elements. See idm_task_constructor().
2360 2360 */
2361 2361 idm.idm_task_cache = kmem_cache_create("idm_task_cache",
2362 2362 sizeof (idm_task_t) + IDM_TRANSPORT_HEADER_LENGTH, 8,
2363 2363 &idm_task_constructor, &idm_task_destructor,
2364 2364 NULL, NULL, NULL, KM_SLEEP);
2365 2365
2366 2366 /* Create the service and connection context lists */
2367 2367 list_create(&idm.idm_tgt_svc_list, sizeof (idm_svc_t),
2368 2368 offsetof(idm_svc_t, is_list_node));
2369 2369 list_create(&idm.idm_tgt_conn_list, sizeof (idm_conn_t),
2370 2370 offsetof(idm_conn_t, ic_list_node));
2371 2371 list_create(&idm.idm_ini_conn_list, sizeof (idm_conn_t),
2372 2372 offsetof(idm_conn_t, ic_list_node));
2373 2373
2374 2374 /* Initialize the native sockets transport */
2375 2375 idm_so_init(&idm_transport_list[IDM_TRANSPORT_TYPE_SOCKETS]);
2376 2376
2377 2377 /* Create connection ID pool */
2378 2378 (void) idm_idpool_create(&idm.idm_conn_id_pool);
2379 2379
2380 2380 return (DDI_SUCCESS);
2381 2381 }
2382 2382
2383 2383 static int
2384 2384 _idm_fini(void)
2385 2385 {
2386 2386 if (!list_is_empty(&idm.idm_ini_conn_list) ||
2387 2387 !list_is_empty(&idm.idm_tgt_conn_list) ||
2388 2388 !list_is_empty(&idm.idm_tgt_svc_list)) {
2389 2389 return (EBUSY);
2390 2390 }
2391 2391
2392 2392 mutex_enter(&idm.idm_global_mutex);
2393 2393 idm.idm_wd_thread_running = B_FALSE;
2394 2394 cv_signal(&idm.idm_wd_cv);
2395 2395 mutex_exit(&idm.idm_global_mutex);
2396 2396
2397 2397 thread_join(idm.idm_wd_thread_did);
2398 2398
2399 2399 idm_idpool_destroy(&idm.idm_conn_id_pool);
2400 2400
2401 2401 /* Close any LDI handles we have open on transport drivers */
2402 2402 mutex_enter(&idm.idm_global_mutex);
2403 2403 idm_transport_teardown();
2404 2404 mutex_exit(&idm.idm_global_mutex);
2405 2405
2406 2406 /* Teardown the native sockets transport */
2407 2407 idm_so_fini();
2408 2408
2409 2409 list_destroy(&idm.idm_ini_conn_list);
2410 2410 list_destroy(&idm.idm_tgt_conn_list);
2411 2411 list_destroy(&idm.idm_tgt_svc_list);
2412 2412 kmem_cache_destroy(idm.idm_task_cache);
2413 2413 kmem_cache_destroy(idm.idm_buf_cache);
2414 2414 kmem_free(idm.idm_taskid_table,
2415 2415 idm.idm_taskid_max * sizeof (idm_task_t *));
2416 2416 mutex_destroy(&idm.idm_global_mutex);
2417 2417 cv_destroy(&idm.idm_wd_cv);
2418 2418 cv_destroy(&idm.idm_tgt_svc_cv);
2419 2419 rw_destroy(&idm.idm_taskid_table_lock);
2420 2420
2421 2421 return (0);
2422 2422 }
↓ open down ↓ |
2359 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX