1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved. 23 */ 24 /* 25 * Copyright 2012, Nexenta Systems, Inc. All rights reserved. 26 * Copyright (c) 2013 by Delphix. All rights reserved. 27 * Copyright (c) 2013 by Saso Kiselkov. All rights reserved. 28 */ 29 30 #include <sys/conf.h> 31 #include <sys/file.h> 32 #include <sys/ddi.h> 33 #include <sys/sunddi.h> 34 #include <sys/modctl.h> 35 #include <sys/scsi/scsi.h> 36 #include <sys/scsi/generic/persist.h> 37 #include <sys/scsi/impl/scsi_reset_notify.h> 38 #include <sys/disp.h> 39 #include <sys/byteorder.h> 40 #include <sys/atomic.h> 41 #include <sys/ethernet.h> 42 #include <sys/sdt.h> 43 #include <sys/nvpair.h> 44 #include <sys/zone.h> 45 #include <sys/id_space.h> 46 47 #include <sys/stmf.h> 48 #include <sys/lpif.h> 49 #include <sys/portif.h> 50 #include <sys/stmf_ioctl.h> 51 #include <sys/pppt_ic_if.h> 52 53 #include "stmf_impl.h" 54 #include "lun_map.h" 55 #include "stmf_state.h" 56 #include "stmf_stats.h" 57 58 /* 59 * Lock order: 60 * stmf_state_lock --> ilport_lock/iss_lockp --> ilu_task_lock 61 */ 62 63 static uint64_t stmf_session_counter = 0; 64 static uint16_t stmf_rtpid_counter = 0; 65 /* start messages at 1 */ 66 static uint64_t stmf_proxy_msg_id = 1; 67 #define MSG_ID_TM_BIT 0x8000000000000000 68 #define ALIGNED_TO_8BYTE_BOUNDARY(i) (((i) + 7) & ~7) 69 70 /* 71 * When stmf_io_deadman_enabled is set to B_TRUE, we check that finishing up 72 * I/O operations on an offlining LU doesn't take longer than stmf_io_deadman 73 * seconds. If it does, we trigger a panic to inform the user of hung I/O 74 * blocking us for too long. 75 */ 76 boolean_t stmf_io_deadman_enabled = B_TRUE; 77 int stmf_io_deadman = 1000; /* seconds */ 78 79 struct stmf_svc_clocks; 80 81 static int stmf_attach(dev_info_t *dip, ddi_attach_cmd_t cmd); 82 static int stmf_detach(dev_info_t *dip, ddi_detach_cmd_t cmd); 83 static int stmf_getinfo(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg, 84 void **result); 85 static int stmf_open(dev_t *devp, int flag, int otype, cred_t *credp); 86 static int stmf_close(dev_t dev, int flag, int otype, cred_t *credp); 87 static int stmf_ioctl(dev_t dev, int cmd, intptr_t data, int mode, 88 cred_t *credp, int *rval); 89 static int stmf_get_stmf_state(stmf_state_desc_t *std); 90 static int stmf_set_stmf_state(stmf_state_desc_t *std); 91 static void stmf_abort_task_offline(scsi_task_t *task, int offline_lu, 92 char *info); 93 static int stmf_set_alua_state(stmf_alua_state_desc_t *alua_state); 94 static void stmf_get_alua_state(stmf_alua_state_desc_t *alua_state); 95 96 static void stmf_task_audit(stmf_i_scsi_task_t *itask, 97 task_audit_event_t te, uint32_t cmd_or_iof, stmf_data_buf_t *dbuf); 98 99 static boolean_t stmf_base16_str_to_binary(char *c, int dplen, uint8_t *dp); 100 static char stmf_ctoi(char c); 101 stmf_xfer_data_t *stmf_prepare_tpgs_data(uint8_t ilu_alua); 102 void stmf_svc_init(); 103 stmf_status_t stmf_svc_fini(); 104 void stmf_svc(void *arg); 105 static void stmf_wait_ilu_tasks_finish(stmf_i_lu_t *ilu); 106 void stmf_svc_queue(int cmd, void *obj, stmf_state_change_info_t *info); 107 static void stmf_svc_kill_obj_requests(void *obj); 108 static void stmf_svc_timeout(struct stmf_svc_clocks *); 109 void stmf_check_freetask(); 110 void stmf_abort_target_reset(scsi_task_t *task); 111 stmf_status_t stmf_lun_reset_poll(stmf_lu_t *lu, struct scsi_task *task, 112 int target_reset); 113 void stmf_target_reset_poll(struct scsi_task *task); 114 void stmf_handle_lun_reset(scsi_task_t *task); 115 void stmf_handle_target_reset(scsi_task_t *task); 116 void stmf_xd_to_dbuf(stmf_data_buf_t *dbuf, int set_rel_off); 117 int stmf_load_ppd_ioctl(stmf_ppioctl_data_t *ppi, uint64_t *ppi_token, 118 uint32_t *err_ret); 119 int stmf_delete_ppd_ioctl(stmf_ppioctl_data_t *ppi); 120 int stmf_get_ppd_ioctl(stmf_ppioctl_data_t *ppi, stmf_ppioctl_data_t *ppi_out, 121 uint32_t *err_ret); 122 void stmf_delete_ppd(stmf_pp_data_t *ppd); 123 void stmf_delete_all_ppds(); 124 void stmf_trace_clear(); 125 void stmf_worker_init(); 126 stmf_status_t stmf_worker_fini(); 127 void stmf_worker_mgmt(); 128 void stmf_worker_task(void *arg); 129 static void stmf_task_lu_free(scsi_task_t *task, stmf_i_scsi_session_t *iss); 130 static stmf_status_t stmf_ic_lu_reg(stmf_ic_reg_dereg_lun_msg_t *msg, 131 uint32_t type); 132 static stmf_status_t stmf_ic_lu_dereg(stmf_ic_reg_dereg_lun_msg_t *msg); 133 static stmf_status_t stmf_ic_rx_scsi_status(stmf_ic_scsi_status_msg_t *msg); 134 static stmf_status_t stmf_ic_rx_status(stmf_ic_status_msg_t *msg); 135 static stmf_status_t stmf_ic_rx_scsi_data(stmf_ic_scsi_data_msg_t *msg); 136 void stmf_task_lu_killall(stmf_lu_t *lu, scsi_task_t *tm_task, stmf_status_t s); 137 138 /* pppt modhandle */ 139 ddi_modhandle_t pppt_mod; 140 141 /* pppt modload imported functions */ 142 stmf_ic_reg_port_msg_alloc_func_t ic_reg_port_msg_alloc; 143 stmf_ic_dereg_port_msg_alloc_func_t ic_dereg_port_msg_alloc; 144 stmf_ic_reg_lun_msg_alloc_func_t ic_reg_lun_msg_alloc; 145 stmf_ic_dereg_lun_msg_alloc_func_t ic_dereg_lun_msg_alloc; 146 stmf_ic_lun_active_msg_alloc_func_t ic_lun_active_msg_alloc; 147 stmf_ic_scsi_cmd_msg_alloc_func_t ic_scsi_cmd_msg_alloc; 148 stmf_ic_scsi_data_xfer_done_msg_alloc_func_t ic_scsi_data_xfer_done_msg_alloc; 149 stmf_ic_session_create_msg_alloc_func_t ic_session_reg_msg_alloc; 150 stmf_ic_session_destroy_msg_alloc_func_t ic_session_dereg_msg_alloc; 151 stmf_ic_tx_msg_func_t ic_tx_msg; 152 stmf_ic_msg_free_func_t ic_msg_free; 153 154 static void stmf_itl_task_start(stmf_i_scsi_task_t *itask); 155 static void stmf_itl_lu_new_task(stmf_i_scsi_task_t *itask); 156 static void stmf_itl_task_done(stmf_i_scsi_task_t *itask); 157 158 static void stmf_lport_xfer_start(stmf_i_scsi_task_t *itask, 159 stmf_data_buf_t *dbuf); 160 static void stmf_lport_xfer_done(stmf_i_scsi_task_t *itask, 161 stmf_data_buf_t *dbuf); 162 163 static void stmf_update_kstat_lu_q(scsi_task_t *, void()); 164 static void stmf_update_kstat_lport_q(scsi_task_t *, void()); 165 static void stmf_update_kstat_lu_io(scsi_task_t *, stmf_data_buf_t *); 166 static void stmf_update_kstat_lport_io(scsi_task_t *, stmf_data_buf_t *); 167 168 static int stmf_irport_compare(const void *void_irport1, 169 const void *void_irport2); 170 static stmf_i_remote_port_t *stmf_irport_create(scsi_devid_desc_t *rport_devid); 171 static void stmf_irport_destroy(stmf_i_remote_port_t *irport); 172 static stmf_i_remote_port_t *stmf_irport_register( 173 scsi_devid_desc_t *rport_devid); 174 static stmf_i_remote_port_t *stmf_irport_lookup_locked( 175 scsi_devid_desc_t *rport_devid); 176 static void stmf_irport_deregister(stmf_i_remote_port_t *irport); 177 178 extern struct mod_ops mod_driverops; 179 180 /* =====[ Tunables ]===== */ 181 /* Internal tracing */ 182 volatile int stmf_trace_on = 1; 183 volatile int stmf_trace_buf_size = (1 * 1024 * 1024); 184 /* 185 * The reason default task timeout is 75 is because we want the 186 * host to timeout 1st and mostly host timeout is 60 seconds. 187 */ 188 volatile int stmf_default_task_timeout = 75; 189 /* 190 * Setting this to one means, you are responsible for config load and keeping 191 * things in sync with persistent database. 192 */ 193 volatile int stmf_allow_modunload = 0; 194 195 volatile int stmf_max_nworkers = 256; 196 volatile int stmf_min_nworkers = 4; 197 volatile int stmf_worker_scale_down_delay = 20; 198 199 /* === [ Debugging and fault injection ] === */ 200 #ifdef DEBUG 201 volatile uint32_t stmf_drop_task_counter = 0; 202 volatile uint32_t stmf_drop_buf_counter = 0; 203 204 #endif 205 206 stmf_state_t stmf_state; 207 static stmf_lu_t *dlun0; 208 209 static uint8_t stmf_first_zero[] = 210 { 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 0xff }; 211 static uint8_t stmf_first_one[] = 212 { 0xff, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0 }; 213 214 static kmutex_t trace_buf_lock; 215 static int trace_buf_size; 216 static int trace_buf_curndx; 217 caddr_t stmf_trace_buf; 218 219 static enum { 220 STMF_WORKERS_DISABLED = 0, 221 STMF_WORKERS_ENABLING, 222 STMF_WORKERS_ENABLED 223 } stmf_workers_state = STMF_WORKERS_DISABLED; 224 static int stmf_i_max_nworkers; 225 static int stmf_i_min_nworkers; 226 static int stmf_nworkers_cur; /* # of workers currently running */ 227 static int stmf_nworkers_needed; /* # of workers need to be running */ 228 static int stmf_worker_sel_counter = 0; 229 static uint32_t stmf_cur_ntasks = 0; 230 static clock_t stmf_wm_last = 0; 231 /* 232 * This is equal to stmf_nworkers_cur while we are increasing # workers and 233 * stmf_nworkers_needed while we are decreasing the worker count. 234 */ 235 static int stmf_nworkers_accepting_cmds; 236 static stmf_worker_t *stmf_workers = NULL; 237 static clock_t stmf_worker_mgmt_delay = 2; 238 static clock_t stmf_worker_scale_down_timer = 0; 239 static int stmf_worker_scale_down_qd = 0; 240 241 static struct cb_ops stmf_cb_ops = { 242 stmf_open, /* open */ 243 stmf_close, /* close */ 244 nodev, /* strategy */ 245 nodev, /* print */ 246 nodev, /* dump */ 247 nodev, /* read */ 248 nodev, /* write */ 249 stmf_ioctl, /* ioctl */ 250 nodev, /* devmap */ 251 nodev, /* mmap */ 252 nodev, /* segmap */ 253 nochpoll, /* chpoll */ 254 ddi_prop_op, /* cb_prop_op */ 255 0, /* streamtab */ 256 D_NEW | D_MP, /* cb_flag */ 257 CB_REV, /* rev */ 258 nodev, /* aread */ 259 nodev /* awrite */ 260 }; 261 262 static struct dev_ops stmf_ops = { 263 DEVO_REV, 264 0, 265 stmf_getinfo, 266 nulldev, /* identify */ 267 nulldev, /* probe */ 268 stmf_attach, 269 stmf_detach, 270 nodev, /* reset */ 271 &stmf_cb_ops, 272 NULL, /* bus_ops */ 273 NULL /* power */ 274 }; 275 276 #define STMF_NAME "COMSTAR STMF" 277 #define STMF_MODULE_NAME "stmf" 278 279 static struct modldrv modldrv = { 280 &mod_driverops, 281 STMF_NAME, 282 &stmf_ops 283 }; 284 285 static struct modlinkage modlinkage = { 286 MODREV_1, 287 { &modldrv, NULL } 288 }; 289 290 int 291 _init(void) 292 { 293 int ret; 294 295 ret = mod_install(&modlinkage); 296 if (ret) 297 return (ret); 298 stmf_trace_buf = kmem_zalloc(stmf_trace_buf_size, KM_SLEEP); 299 trace_buf_size = stmf_trace_buf_size; 300 trace_buf_curndx = 0; 301 mutex_init(&trace_buf_lock, NULL, MUTEX_DRIVER, 0); 302 bzero(&stmf_state, sizeof (stmf_state_t)); 303 /* STMF service is off by default */ 304 stmf_state.stmf_service_running = 0; 305 /* default lu/lport states are online */ 306 stmf_state.stmf_default_lu_state = STMF_STATE_ONLINE; 307 stmf_state.stmf_default_lport_state = STMF_STATE_ONLINE; 308 mutex_init(&stmf_state.stmf_lock, NULL, MUTEX_DRIVER, NULL); 309 cv_init(&stmf_state.stmf_cv, NULL, CV_DRIVER, NULL); 310 stmf_session_counter = (uint64_t)ddi_get_lbolt(); 311 avl_create(&stmf_state.stmf_irportlist, 312 stmf_irport_compare, sizeof (stmf_i_remote_port_t), 313 offsetof(stmf_i_remote_port_t, irport_ln)); 314 stmf_state.stmf_ilport_inst_space = 315 id_space_create("lport-instances", 0, MAX_ILPORT); 316 stmf_state.stmf_irport_inst_space = 317 id_space_create("rport-instances", 0, MAX_IRPORT); 318 stmf_view_init(); 319 stmf_svc_init(); 320 stmf_dlun_init(); 321 return (ret); 322 } 323 324 int 325 _fini(void) 326 { 327 int ret; 328 stmf_i_remote_port_t *irport; 329 void *avl_dest_cookie = NULL; 330 331 if (stmf_state.stmf_service_running) 332 return (EBUSY); 333 if ((!stmf_allow_modunload) && 334 (stmf_state.stmf_config_state != STMF_CONFIG_NONE)) { 335 return (EBUSY); 336 } 337 if (stmf_state.stmf_nlps || stmf_state.stmf_npps) { 338 return (EBUSY); 339 } 340 if (stmf_dlun_fini() != STMF_SUCCESS) 341 return (EBUSY); 342 if (stmf_worker_fini() != STMF_SUCCESS) { 343 stmf_dlun_init(); 344 return (EBUSY); 345 } 346 if (stmf_svc_fini() != STMF_SUCCESS) { 347 stmf_dlun_init(); 348 stmf_worker_init(); 349 return (EBUSY); 350 } 351 352 ret = mod_remove(&modlinkage); 353 if (ret) { 354 stmf_svc_init(); 355 stmf_dlun_init(); 356 stmf_worker_init(); 357 return (ret); 358 } 359 360 stmf_view_clear_config(); 361 362 while ((irport = avl_destroy_nodes(&stmf_state.stmf_irportlist, 363 &avl_dest_cookie)) != NULL) 364 stmf_irport_destroy(irport); 365 avl_destroy(&stmf_state.stmf_irportlist); 366 id_space_destroy(stmf_state.stmf_ilport_inst_space); 367 id_space_destroy(stmf_state.stmf_irport_inst_space); 368 369 kmem_free(stmf_trace_buf, stmf_trace_buf_size); 370 mutex_destroy(&trace_buf_lock); 371 mutex_destroy(&stmf_state.stmf_lock); 372 cv_destroy(&stmf_state.stmf_cv); 373 return (ret); 374 } 375 376 int 377 _info(struct modinfo *modinfop) 378 { 379 return (mod_info(&modlinkage, modinfop)); 380 } 381 382 /* ARGSUSED */ 383 static int 384 stmf_getinfo(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg, void **result) 385 { 386 switch (cmd) { 387 case DDI_INFO_DEVT2DEVINFO: 388 *result = stmf_state.stmf_dip; 389 break; 390 case DDI_INFO_DEVT2INSTANCE: 391 *result = 392 (void *)(uintptr_t)ddi_get_instance(stmf_state.stmf_dip); 393 break; 394 default: 395 return (DDI_FAILURE); 396 } 397 398 return (DDI_SUCCESS); 399 } 400 401 static int 402 stmf_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) 403 { 404 switch (cmd) { 405 case DDI_ATTACH: 406 stmf_state.stmf_dip = dip; 407 408 if (ddi_create_minor_node(dip, "admin", S_IFCHR, 0, 409 DDI_NT_STMF, 0) != DDI_SUCCESS) { 410 break; 411 } 412 ddi_report_dev(dip); 413 return (DDI_SUCCESS); 414 } 415 416 return (DDI_FAILURE); 417 } 418 419 static int 420 stmf_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) 421 { 422 switch (cmd) { 423 case DDI_DETACH: 424 ddi_remove_minor_node(dip, 0); 425 return (DDI_SUCCESS); 426 } 427 428 return (DDI_FAILURE); 429 } 430 431 /* ARGSUSED */ 432 static int 433 stmf_open(dev_t *devp, int flag, int otype, cred_t *credp) 434 { 435 mutex_enter(&stmf_state.stmf_lock); 436 if (stmf_state.stmf_exclusive_open) { 437 mutex_exit(&stmf_state.stmf_lock); 438 return (EBUSY); 439 } 440 if (flag & FEXCL) { 441 if (stmf_state.stmf_opened) { 442 mutex_exit(&stmf_state.stmf_lock); 443 return (EBUSY); 444 } 445 stmf_state.stmf_exclusive_open = 1; 446 } 447 stmf_state.stmf_opened = 1; 448 mutex_exit(&stmf_state.stmf_lock); 449 return (0); 450 } 451 452 /* ARGSUSED */ 453 static int 454 stmf_close(dev_t dev, int flag, int otype, cred_t *credp) 455 { 456 mutex_enter(&stmf_state.stmf_lock); 457 stmf_state.stmf_opened = 0; 458 if (stmf_state.stmf_exclusive_open && 459 (stmf_state.stmf_config_state != STMF_CONFIG_INIT_DONE)) { 460 stmf_state.stmf_config_state = STMF_CONFIG_NONE; 461 stmf_delete_all_ppds(); 462 stmf_view_clear_config(); 463 stmf_view_init(); 464 } 465 stmf_state.stmf_exclusive_open = 0; 466 mutex_exit(&stmf_state.stmf_lock); 467 return (0); 468 } 469 470 int 471 stmf_copyin_iocdata(intptr_t data, int mode, stmf_iocdata_t **iocd, 472 void **ibuf, void **obuf) 473 { 474 int ret; 475 476 *ibuf = NULL; 477 *obuf = NULL; 478 *iocd = kmem_zalloc(sizeof (stmf_iocdata_t), KM_SLEEP); 479 480 ret = ddi_copyin((void *)data, *iocd, sizeof (stmf_iocdata_t), mode); 481 if (ret) 482 return (EFAULT); 483 if ((*iocd)->stmf_version != STMF_VERSION_1) { 484 ret = EINVAL; 485 goto copyin_iocdata_done; 486 } 487 if ((*iocd)->stmf_ibuf_size) { 488 *ibuf = kmem_zalloc((*iocd)->stmf_ibuf_size, KM_SLEEP); 489 ret = ddi_copyin((void *)((unsigned long)(*iocd)->stmf_ibuf), 490 *ibuf, (*iocd)->stmf_ibuf_size, mode); 491 } 492 if ((*iocd)->stmf_obuf_size) 493 *obuf = kmem_zalloc((*iocd)->stmf_obuf_size, KM_SLEEP); 494 495 if (ret == 0) 496 return (0); 497 ret = EFAULT; 498 copyin_iocdata_done:; 499 if (*obuf) { 500 kmem_free(*obuf, (*iocd)->stmf_obuf_size); 501 *obuf = NULL; 502 } 503 if (*ibuf) { 504 kmem_free(*ibuf, (*iocd)->stmf_ibuf_size); 505 *ibuf = NULL; 506 } 507 kmem_free(*iocd, sizeof (stmf_iocdata_t)); 508 return (ret); 509 } 510 511 int 512 stmf_copyout_iocdata(intptr_t data, int mode, stmf_iocdata_t *iocd, void *obuf) 513 { 514 int ret; 515 516 if (iocd->stmf_obuf_size) { 517 ret = ddi_copyout(obuf, (void *)(unsigned long)iocd->stmf_obuf, 518 iocd->stmf_obuf_size, mode); 519 if (ret) 520 return (EFAULT); 521 } 522 ret = ddi_copyout(iocd, (void *)data, sizeof (stmf_iocdata_t), mode); 523 if (ret) 524 return (EFAULT); 525 return (0); 526 } 527 528 /* ARGSUSED */ 529 static int 530 stmf_ioctl(dev_t dev, int cmd, intptr_t data, int mode, 531 cred_t *credp, int *rval) 532 { 533 stmf_iocdata_t *iocd; 534 void *ibuf = NULL, *obuf = NULL; 535 slist_lu_t *luid_list; 536 slist_target_port_t *lportid_list; 537 stmf_i_lu_t *ilu; 538 stmf_i_local_port_t *ilport; 539 stmf_i_scsi_session_t *iss; 540 slist_scsi_session_t *iss_list; 541 sioc_lu_props_t *lup; 542 sioc_target_port_props_t *lportp; 543 stmf_ppioctl_data_t *ppi, *ppi_out = NULL; 544 uint64_t *ppi_token = NULL; 545 uint8_t *p_id, *id; 546 stmf_state_desc_t *std; 547 stmf_status_t ctl_ret; 548 stmf_state_change_info_t ssi; 549 int ret = 0; 550 uint32_t n; 551 int i; 552 stmf_group_op_data_t *grp_entry; 553 stmf_group_name_t *grpname; 554 stmf_view_op_entry_t *ve; 555 stmf_id_type_t idtype; 556 stmf_id_data_t *id_entry; 557 stmf_id_list_t *id_list; 558 stmf_view_entry_t *view_entry; 559 stmf_set_props_t *stmf_set_props; 560 uint32_t veid; 561 if ((cmd & 0xff000000) != STMF_IOCTL) { 562 return (ENOTTY); 563 } 564 565 if (drv_priv(credp) != 0) { 566 return (EPERM); 567 } 568 569 ret = stmf_copyin_iocdata(data, mode, &iocd, &ibuf, &obuf); 570 if (ret) 571 return (ret); 572 iocd->stmf_error = 0; 573 574 switch (cmd) { 575 case STMF_IOCTL_LU_LIST: 576 /* retrieves both registered/unregistered */ 577 mutex_enter(&stmf_state.stmf_lock); 578 id_list = &stmf_state.stmf_luid_list; 579 n = min(id_list->id_count, 580 (iocd->stmf_obuf_size)/sizeof (slist_lu_t)); 581 iocd->stmf_obuf_max_nentries = id_list->id_count; 582 luid_list = (slist_lu_t *)obuf; 583 id_entry = id_list->idl_head; 584 for (i = 0; i < n; i++) { 585 bcopy(id_entry->id_data, luid_list[i].lu_guid, 16); 586 id_entry = id_entry->id_next; 587 } 588 589 n = iocd->stmf_obuf_size/sizeof (slist_lu_t); 590 for (ilu = stmf_state.stmf_ilulist; ilu; ilu = ilu->ilu_next) { 591 id = (uint8_t *)ilu->ilu_lu->lu_id; 592 if (stmf_lookup_id(id_list, 16, id + 4) == NULL) { 593 iocd->stmf_obuf_max_nentries++; 594 if (i < n) { 595 bcopy(id + 4, luid_list[i].lu_guid, 596 sizeof (slist_lu_t)); 597 i++; 598 } 599 } 600 } 601 iocd->stmf_obuf_nentries = i; 602 mutex_exit(&stmf_state.stmf_lock); 603 break; 604 605 case STMF_IOCTL_REG_LU_LIST: 606 mutex_enter(&stmf_state.stmf_lock); 607 iocd->stmf_obuf_max_nentries = stmf_state.stmf_nlus; 608 n = min(stmf_state.stmf_nlus, 609 (iocd->stmf_obuf_size)/sizeof (slist_lu_t)); 610 iocd->stmf_obuf_nentries = n; 611 ilu = stmf_state.stmf_ilulist; 612 luid_list = (slist_lu_t *)obuf; 613 for (i = 0; i < n; i++) { 614 uint8_t *id; 615 id = (uint8_t *)ilu->ilu_lu->lu_id; 616 bcopy(id + 4, luid_list[i].lu_guid, 16); 617 ilu = ilu->ilu_next; 618 } 619 mutex_exit(&stmf_state.stmf_lock); 620 break; 621 622 case STMF_IOCTL_VE_LU_LIST: 623 mutex_enter(&stmf_state.stmf_lock); 624 id_list = &stmf_state.stmf_luid_list; 625 n = min(id_list->id_count, 626 (iocd->stmf_obuf_size)/sizeof (slist_lu_t)); 627 iocd->stmf_obuf_max_nentries = id_list->id_count; 628 iocd->stmf_obuf_nentries = n; 629 luid_list = (slist_lu_t *)obuf; 630 id_entry = id_list->idl_head; 631 for (i = 0; i < n; i++) { 632 bcopy(id_entry->id_data, luid_list[i].lu_guid, 16); 633 id_entry = id_entry->id_next; 634 } 635 mutex_exit(&stmf_state.stmf_lock); 636 break; 637 638 case STMF_IOCTL_TARGET_PORT_LIST: 639 mutex_enter(&stmf_state.stmf_lock); 640 iocd->stmf_obuf_max_nentries = stmf_state.stmf_nlports; 641 n = min(stmf_state.stmf_nlports, 642 (iocd->stmf_obuf_size)/sizeof (slist_target_port_t)); 643 iocd->stmf_obuf_nentries = n; 644 ilport = stmf_state.stmf_ilportlist; 645 lportid_list = (slist_target_port_t *)obuf; 646 for (i = 0; i < n; i++) { 647 uint8_t *id; 648 id = (uint8_t *)ilport->ilport_lport->lport_id; 649 bcopy(id, lportid_list[i].target, id[3] + 4); 650 ilport = ilport->ilport_next; 651 } 652 mutex_exit(&stmf_state.stmf_lock); 653 break; 654 655 case STMF_IOCTL_SESSION_LIST: 656 p_id = (uint8_t *)ibuf; 657 if ((p_id == NULL) || (iocd->stmf_ibuf_size < 4) || 658 (iocd->stmf_ibuf_size < (p_id[3] + 4))) { 659 ret = EINVAL; 660 break; 661 } 662 mutex_enter(&stmf_state.stmf_lock); 663 for (ilport = stmf_state.stmf_ilportlist; ilport; ilport = 664 ilport->ilport_next) { 665 uint8_t *id; 666 id = (uint8_t *)ilport->ilport_lport->lport_id; 667 if ((p_id[3] == id[3]) && 668 (bcmp(p_id + 4, id + 4, id[3]) == 0)) { 669 break; 670 } 671 } 672 if (ilport == NULL) { 673 mutex_exit(&stmf_state.stmf_lock); 674 ret = ENOENT; 675 break; 676 } 677 iocd->stmf_obuf_max_nentries = ilport->ilport_nsessions; 678 n = min(ilport->ilport_nsessions, 679 (iocd->stmf_obuf_size)/sizeof (slist_scsi_session_t)); 680 iocd->stmf_obuf_nentries = n; 681 iss = ilport->ilport_ss_list; 682 iss_list = (slist_scsi_session_t *)obuf; 683 for (i = 0; i < n; i++) { 684 uint8_t *id; 685 id = (uint8_t *)iss->iss_ss->ss_rport_id; 686 bcopy(id, iss_list[i].initiator, id[3] + 4); 687 iss_list[i].creation_time = (uint32_t) 688 iss->iss_creation_time; 689 if (iss->iss_ss->ss_rport_alias) { 690 (void) strncpy(iss_list[i].alias, 691 iss->iss_ss->ss_rport_alias, 255); 692 iss_list[i].alias[255] = 0; 693 } else { 694 iss_list[i].alias[0] = 0; 695 } 696 iss = iss->iss_next; 697 } 698 mutex_exit(&stmf_state.stmf_lock); 699 break; 700 701 case STMF_IOCTL_GET_LU_PROPERTIES: 702 p_id = (uint8_t *)ibuf; 703 if ((iocd->stmf_ibuf_size < 16) || 704 (iocd->stmf_obuf_size < sizeof (sioc_lu_props_t)) || 705 (p_id[0] == 0)) { 706 ret = EINVAL; 707 break; 708 } 709 mutex_enter(&stmf_state.stmf_lock); 710 for (ilu = stmf_state.stmf_ilulist; ilu; ilu = ilu->ilu_next) { 711 if (bcmp(p_id, ilu->ilu_lu->lu_id->ident, 16) == 0) 712 break; 713 } 714 if (ilu == NULL) { 715 mutex_exit(&stmf_state.stmf_lock); 716 ret = ENOENT; 717 break; 718 } 719 lup = (sioc_lu_props_t *)obuf; 720 bcopy(ilu->ilu_lu->lu_id->ident, lup->lu_guid, 16); 721 lup->lu_state = ilu->ilu_state & 0x0f; 722 lup->lu_present = 1; /* XXX */ 723 (void) strncpy(lup->lu_provider_name, 724 ilu->ilu_lu->lu_lp->lp_name, 255); 725 lup->lu_provider_name[254] = 0; 726 if (ilu->ilu_lu->lu_alias) { 727 (void) strncpy(lup->lu_alias, 728 ilu->ilu_lu->lu_alias, 255); 729 lup->lu_alias[255] = 0; 730 } else { 731 lup->lu_alias[0] = 0; 732 } 733 mutex_exit(&stmf_state.stmf_lock); 734 break; 735 736 case STMF_IOCTL_GET_TARGET_PORT_PROPERTIES: 737 p_id = (uint8_t *)ibuf; 738 if ((p_id == NULL) || 739 (iocd->stmf_ibuf_size < (p_id[3] + 4)) || 740 (iocd->stmf_obuf_size < 741 sizeof (sioc_target_port_props_t))) { 742 ret = EINVAL; 743 break; 744 } 745 mutex_enter(&stmf_state.stmf_lock); 746 for (ilport = stmf_state.stmf_ilportlist; ilport; 747 ilport = ilport->ilport_next) { 748 uint8_t *id; 749 id = (uint8_t *)ilport->ilport_lport->lport_id; 750 if ((p_id[3] == id[3]) && 751 (bcmp(p_id+4, id+4, id[3]) == 0)) 752 break; 753 } 754 if (ilport == NULL) { 755 mutex_exit(&stmf_state.stmf_lock); 756 ret = ENOENT; 757 break; 758 } 759 lportp = (sioc_target_port_props_t *)obuf; 760 bcopy(ilport->ilport_lport->lport_id, lportp->tgt_id, 761 ilport->ilport_lport->lport_id->ident_length + 4); 762 lportp->tgt_state = ilport->ilport_state & 0x0f; 763 lportp->tgt_present = 1; /* XXX */ 764 (void) strncpy(lportp->tgt_provider_name, 765 ilport->ilport_lport->lport_pp->pp_name, 255); 766 lportp->tgt_provider_name[254] = 0; 767 if (ilport->ilport_lport->lport_alias) { 768 (void) strncpy(lportp->tgt_alias, 769 ilport->ilport_lport->lport_alias, 255); 770 lportp->tgt_alias[255] = 0; 771 } else { 772 lportp->tgt_alias[0] = 0; 773 } 774 mutex_exit(&stmf_state.stmf_lock); 775 break; 776 777 case STMF_IOCTL_SET_STMF_STATE: 778 if ((ibuf == NULL) || 779 (iocd->stmf_ibuf_size < sizeof (stmf_state_desc_t))) { 780 ret = EINVAL; 781 break; 782 } 783 ret = stmf_set_stmf_state((stmf_state_desc_t *)ibuf); 784 break; 785 786 case STMF_IOCTL_GET_STMF_STATE: 787 if ((obuf == NULL) || 788 (iocd->stmf_obuf_size < sizeof (stmf_state_desc_t))) { 789 ret = EINVAL; 790 break; 791 } 792 ret = stmf_get_stmf_state((stmf_state_desc_t *)obuf); 793 break; 794 795 case STMF_IOCTL_SET_ALUA_STATE: 796 if ((ibuf == NULL) || 797 (iocd->stmf_ibuf_size < sizeof (stmf_alua_state_desc_t))) { 798 ret = EINVAL; 799 break; 800 } 801 ret = stmf_set_alua_state((stmf_alua_state_desc_t *)ibuf); 802 break; 803 804 case STMF_IOCTL_GET_ALUA_STATE: 805 if ((obuf == NULL) || 806 (iocd->stmf_obuf_size < sizeof (stmf_alua_state_desc_t))) { 807 ret = EINVAL; 808 break; 809 } 810 stmf_get_alua_state((stmf_alua_state_desc_t *)obuf); 811 break; 812 813 case STMF_IOCTL_SET_LU_STATE: 814 ssi.st_rflags = STMF_RFLAG_USER_REQUEST; 815 ssi.st_additional_info = NULL; 816 std = (stmf_state_desc_t *)ibuf; 817 if ((ibuf == NULL) || 818 (iocd->stmf_ibuf_size < sizeof (stmf_state_desc_t))) { 819 ret = EINVAL; 820 break; 821 } 822 p_id = std->ident; 823 mutex_enter(&stmf_state.stmf_lock); 824 if (stmf_state.stmf_inventory_locked) { 825 mutex_exit(&stmf_state.stmf_lock); 826 ret = EBUSY; 827 break; 828 } 829 for (ilu = stmf_state.stmf_ilulist; ilu; ilu = ilu->ilu_next) { 830 if (bcmp(p_id, ilu->ilu_lu->lu_id->ident, 16) == 0) 831 break; 832 } 833 if (ilu == NULL) { 834 mutex_exit(&stmf_state.stmf_lock); 835 ret = ENOENT; 836 break; 837 } 838 stmf_state.stmf_inventory_locked = 1; 839 mutex_exit(&stmf_state.stmf_lock); 840 cmd = (std->state == STMF_STATE_ONLINE) ? STMF_CMD_LU_ONLINE : 841 STMF_CMD_LU_OFFLINE; 842 ctl_ret = stmf_ctl(cmd, (void *)ilu->ilu_lu, &ssi); 843 if (ctl_ret == STMF_ALREADY) 844 ret = 0; 845 else if (ctl_ret == STMF_BUSY) 846 ret = EBUSY; 847 else if (ctl_ret != STMF_SUCCESS) 848 ret = EIO; 849 mutex_enter(&stmf_state.stmf_lock); 850 stmf_state.stmf_inventory_locked = 0; 851 mutex_exit(&stmf_state.stmf_lock); 852 break; 853 854 case STMF_IOCTL_SET_STMF_PROPS: 855 if ((ibuf == NULL) || 856 (iocd->stmf_ibuf_size < sizeof (stmf_set_props_t))) { 857 ret = EINVAL; 858 break; 859 } 860 stmf_set_props = (stmf_set_props_t *)ibuf; 861 mutex_enter(&stmf_state.stmf_lock); 862 if ((stmf_set_props->default_lu_state_value == 863 STMF_STATE_OFFLINE) || 864 (stmf_set_props->default_lu_state_value == 865 STMF_STATE_ONLINE)) { 866 stmf_state.stmf_default_lu_state = 867 stmf_set_props->default_lu_state_value; 868 } 869 if ((stmf_set_props->default_target_state_value == 870 STMF_STATE_OFFLINE) || 871 (stmf_set_props->default_target_state_value == 872 STMF_STATE_ONLINE)) { 873 stmf_state.stmf_default_lport_state = 874 stmf_set_props->default_target_state_value; 875 } 876 877 mutex_exit(&stmf_state.stmf_lock); 878 break; 879 880 case STMF_IOCTL_SET_TARGET_PORT_STATE: 881 ssi.st_rflags = STMF_RFLAG_USER_REQUEST; 882 ssi.st_additional_info = NULL; 883 std = (stmf_state_desc_t *)ibuf; 884 if ((ibuf == NULL) || 885 (iocd->stmf_ibuf_size < sizeof (stmf_state_desc_t))) { 886 ret = EINVAL; 887 break; 888 } 889 p_id = std->ident; 890 mutex_enter(&stmf_state.stmf_lock); 891 if (stmf_state.stmf_inventory_locked) { 892 mutex_exit(&stmf_state.stmf_lock); 893 ret = EBUSY; 894 break; 895 } 896 for (ilport = stmf_state.stmf_ilportlist; ilport; 897 ilport = ilport->ilport_next) { 898 uint8_t *id; 899 id = (uint8_t *)ilport->ilport_lport->lport_id; 900 if ((id[3] == p_id[3]) && 901 (bcmp(id+4, p_id+4, id[3]) == 0)) { 902 break; 903 } 904 } 905 if (ilport == NULL) { 906 mutex_exit(&stmf_state.stmf_lock); 907 ret = ENOENT; 908 break; 909 } 910 stmf_state.stmf_inventory_locked = 1; 911 mutex_exit(&stmf_state.stmf_lock); 912 cmd = (std->state == STMF_STATE_ONLINE) ? 913 STMF_CMD_LPORT_ONLINE : STMF_CMD_LPORT_OFFLINE; 914 ctl_ret = stmf_ctl(cmd, (void *)ilport->ilport_lport, &ssi); 915 if (ctl_ret == STMF_ALREADY) 916 ret = 0; 917 else if (ctl_ret == STMF_BUSY) 918 ret = EBUSY; 919 else if (ctl_ret != STMF_SUCCESS) 920 ret = EIO; 921 mutex_enter(&stmf_state.stmf_lock); 922 stmf_state.stmf_inventory_locked = 0; 923 mutex_exit(&stmf_state.stmf_lock); 924 break; 925 926 case STMF_IOCTL_ADD_HG_ENTRY: 927 idtype = STMF_ID_TYPE_HOST; 928 /* FALLTHROUGH */ 929 case STMF_IOCTL_ADD_TG_ENTRY: 930 if (stmf_state.stmf_config_state == STMF_CONFIG_NONE) { 931 ret = EACCES; 932 iocd->stmf_error = STMF_IOCERR_UPDATE_NEED_CFG_INIT; 933 break; 934 } 935 if (cmd == STMF_IOCTL_ADD_TG_ENTRY) { 936 idtype = STMF_ID_TYPE_TARGET; 937 } 938 grp_entry = (stmf_group_op_data_t *)ibuf; 939 if ((ibuf == NULL) || 940 (iocd->stmf_ibuf_size < sizeof (stmf_group_op_data_t))) { 941 ret = EINVAL; 942 break; 943 } 944 if (grp_entry->group.name[0] == '*') { 945 ret = EINVAL; 946 break; /* not allowed */ 947 } 948 mutex_enter(&stmf_state.stmf_lock); 949 ret = stmf_add_group_member(grp_entry->group.name, 950 grp_entry->group.name_size, 951 grp_entry->ident + 4, 952 grp_entry->ident[3], 953 idtype, 954 &iocd->stmf_error); 955 mutex_exit(&stmf_state.stmf_lock); 956 break; 957 case STMF_IOCTL_REMOVE_HG_ENTRY: 958 idtype = STMF_ID_TYPE_HOST; 959 /* FALLTHROUGH */ 960 case STMF_IOCTL_REMOVE_TG_ENTRY: 961 if (stmf_state.stmf_config_state == STMF_CONFIG_NONE) { 962 ret = EACCES; 963 iocd->stmf_error = STMF_IOCERR_UPDATE_NEED_CFG_INIT; 964 break; 965 } 966 if (cmd == STMF_IOCTL_REMOVE_TG_ENTRY) { 967 idtype = STMF_ID_TYPE_TARGET; 968 } 969 grp_entry = (stmf_group_op_data_t *)ibuf; 970 if ((ibuf == NULL) || 971 (iocd->stmf_ibuf_size < sizeof (stmf_group_op_data_t))) { 972 ret = EINVAL; 973 break; 974 } 975 if (grp_entry->group.name[0] == '*') { 976 ret = EINVAL; 977 break; /* not allowed */ 978 } 979 mutex_enter(&stmf_state.stmf_lock); 980 ret = stmf_remove_group_member(grp_entry->group.name, 981 grp_entry->group.name_size, 982 grp_entry->ident + 4, 983 grp_entry->ident[3], 984 idtype, 985 &iocd->stmf_error); 986 mutex_exit(&stmf_state.stmf_lock); 987 break; 988 case STMF_IOCTL_CREATE_HOST_GROUP: 989 idtype = STMF_ID_TYPE_HOST_GROUP; 990 /* FALLTHROUGH */ 991 case STMF_IOCTL_CREATE_TARGET_GROUP: 992 if (stmf_state.stmf_config_state == STMF_CONFIG_NONE) { 993 ret = EACCES; 994 iocd->stmf_error = STMF_IOCERR_UPDATE_NEED_CFG_INIT; 995 break; 996 } 997 grpname = (stmf_group_name_t *)ibuf; 998 999 if (cmd == STMF_IOCTL_CREATE_TARGET_GROUP) 1000 idtype = STMF_ID_TYPE_TARGET_GROUP; 1001 if ((ibuf == NULL) || 1002 (iocd->stmf_ibuf_size < sizeof (stmf_group_name_t))) { 1003 ret = EINVAL; 1004 break; 1005 } 1006 if (grpname->name[0] == '*') { 1007 ret = EINVAL; 1008 break; /* not allowed */ 1009 } 1010 mutex_enter(&stmf_state.stmf_lock); 1011 ret = stmf_add_group(grpname->name, 1012 grpname->name_size, idtype, &iocd->stmf_error); 1013 mutex_exit(&stmf_state.stmf_lock); 1014 break; 1015 case STMF_IOCTL_REMOVE_HOST_GROUP: 1016 idtype = STMF_ID_TYPE_HOST_GROUP; 1017 /* FALLTHROUGH */ 1018 case STMF_IOCTL_REMOVE_TARGET_GROUP: 1019 if (stmf_state.stmf_config_state == STMF_CONFIG_NONE) { 1020 ret = EACCES; 1021 iocd->stmf_error = STMF_IOCERR_UPDATE_NEED_CFG_INIT; 1022 break; 1023 } 1024 grpname = (stmf_group_name_t *)ibuf; 1025 if (cmd == STMF_IOCTL_REMOVE_TARGET_GROUP) 1026 idtype = STMF_ID_TYPE_TARGET_GROUP; 1027 if ((ibuf == NULL) || 1028 (iocd->stmf_ibuf_size < sizeof (stmf_group_name_t))) { 1029 ret = EINVAL; 1030 break; 1031 } 1032 if (grpname->name[0] == '*') { 1033 ret = EINVAL; 1034 break; /* not allowed */ 1035 } 1036 mutex_enter(&stmf_state.stmf_lock); 1037 ret = stmf_remove_group(grpname->name, 1038 grpname->name_size, idtype, &iocd->stmf_error); 1039 mutex_exit(&stmf_state.stmf_lock); 1040 break; 1041 case STMF_IOCTL_VALIDATE_VIEW: 1042 case STMF_IOCTL_ADD_VIEW_ENTRY: 1043 if (stmf_state.stmf_config_state == STMF_CONFIG_NONE) { 1044 ret = EACCES; 1045 iocd->stmf_error = STMF_IOCERR_UPDATE_NEED_CFG_INIT; 1046 break; 1047 } 1048 ve = (stmf_view_op_entry_t *)ibuf; 1049 if ((ibuf == NULL) || 1050 (iocd->stmf_ibuf_size < sizeof (stmf_view_op_entry_t))) { 1051 ret = EINVAL; 1052 break; 1053 } 1054 if (!ve->ve_lu_number_valid) 1055 ve->ve_lu_nbr[2] = 0xFF; 1056 if (ve->ve_all_hosts) { 1057 ve->ve_host_group.name[0] = '*'; 1058 ve->ve_host_group.name_size = 1; 1059 } 1060 if (ve->ve_all_targets) { 1061 ve->ve_target_group.name[0] = '*'; 1062 ve->ve_target_group.name_size = 1; 1063 } 1064 if (ve->ve_ndx_valid) 1065 veid = ve->ve_ndx; 1066 else 1067 veid = 0xffffffff; 1068 mutex_enter(&stmf_state.stmf_lock); 1069 if (cmd == STMF_IOCTL_ADD_VIEW_ENTRY) { 1070 ret = stmf_add_ve(ve->ve_host_group.name, 1071 ve->ve_host_group.name_size, 1072 ve->ve_target_group.name, 1073 ve->ve_target_group.name_size, 1074 ve->ve_guid, 1075 &veid, 1076 ve->ve_lu_nbr, 1077 &iocd->stmf_error); 1078 } else { /* STMF_IOCTL_VALIDATE_VIEW */ 1079 ret = stmf_validate_lun_ve(ve->ve_host_group.name, 1080 ve->ve_host_group.name_size, 1081 ve->ve_target_group.name, 1082 ve->ve_target_group.name_size, 1083 ve->ve_lu_nbr, 1084 &iocd->stmf_error); 1085 } 1086 mutex_exit(&stmf_state.stmf_lock); 1087 if (ret == 0 && 1088 (!ve->ve_ndx_valid || !ve->ve_lu_number_valid) && 1089 iocd->stmf_obuf_size >= sizeof (stmf_view_op_entry_t)) { 1090 stmf_view_op_entry_t *ve_ret = 1091 (stmf_view_op_entry_t *)obuf; 1092 iocd->stmf_obuf_nentries = 1; 1093 iocd->stmf_obuf_max_nentries = 1; 1094 if (!ve->ve_ndx_valid) { 1095 ve_ret->ve_ndx = veid; 1096 ve_ret->ve_ndx_valid = 1; 1097 } 1098 if (!ve->ve_lu_number_valid) { 1099 ve_ret->ve_lu_number_valid = 1; 1100 bcopy(ve->ve_lu_nbr, ve_ret->ve_lu_nbr, 8); 1101 } 1102 } 1103 break; 1104 case STMF_IOCTL_REMOVE_VIEW_ENTRY: 1105 if (stmf_state.stmf_config_state == STMF_CONFIG_NONE) { 1106 ret = EACCES; 1107 iocd->stmf_error = STMF_IOCERR_UPDATE_NEED_CFG_INIT; 1108 break; 1109 } 1110 ve = (stmf_view_op_entry_t *)ibuf; 1111 if ((ibuf == NULL) || 1112 (iocd->stmf_ibuf_size < sizeof (stmf_view_op_entry_t))) { 1113 ret = EINVAL; 1114 break; 1115 } 1116 if (!ve->ve_ndx_valid) { 1117 ret = EINVAL; 1118 break; 1119 } 1120 mutex_enter(&stmf_state.stmf_lock); 1121 ret = stmf_remove_ve_by_id(ve->ve_guid, ve->ve_ndx, 1122 &iocd->stmf_error); 1123 mutex_exit(&stmf_state.stmf_lock); 1124 break; 1125 case STMF_IOCTL_GET_HG_LIST: 1126 id_list = &stmf_state.stmf_hg_list; 1127 /* FALLTHROUGH */ 1128 case STMF_IOCTL_GET_TG_LIST: 1129 if (cmd == STMF_IOCTL_GET_TG_LIST) 1130 id_list = &stmf_state.stmf_tg_list; 1131 mutex_enter(&stmf_state.stmf_lock); 1132 iocd->stmf_obuf_max_nentries = id_list->id_count; 1133 n = min(id_list->id_count, 1134 (iocd->stmf_obuf_size)/sizeof (stmf_group_name_t)); 1135 iocd->stmf_obuf_nentries = n; 1136 id_entry = id_list->idl_head; 1137 grpname = (stmf_group_name_t *)obuf; 1138 for (i = 0; i < n; i++) { 1139 if (id_entry->id_data[0] == '*') { 1140 if (iocd->stmf_obuf_nentries > 0) { 1141 iocd->stmf_obuf_nentries--; 1142 } 1143 id_entry = id_entry->id_next; 1144 continue; 1145 } 1146 grpname->name_size = id_entry->id_data_size; 1147 bcopy(id_entry->id_data, grpname->name, 1148 id_entry->id_data_size); 1149 grpname++; 1150 id_entry = id_entry->id_next; 1151 } 1152 mutex_exit(&stmf_state.stmf_lock); 1153 break; 1154 case STMF_IOCTL_GET_HG_ENTRIES: 1155 id_list = &stmf_state.stmf_hg_list; 1156 /* FALLTHROUGH */ 1157 case STMF_IOCTL_GET_TG_ENTRIES: 1158 grpname = (stmf_group_name_t *)ibuf; 1159 if ((ibuf == NULL) || 1160 (iocd->stmf_ibuf_size < sizeof (stmf_group_name_t))) { 1161 ret = EINVAL; 1162 break; 1163 } 1164 if (cmd == STMF_IOCTL_GET_TG_ENTRIES) { 1165 id_list = &stmf_state.stmf_tg_list; 1166 } 1167 mutex_enter(&stmf_state.stmf_lock); 1168 id_entry = stmf_lookup_id(id_list, grpname->name_size, 1169 grpname->name); 1170 if (!id_entry) 1171 ret = ENODEV; 1172 else { 1173 stmf_ge_ident_t *grp_entry; 1174 id_list = (stmf_id_list_t *)id_entry->id_impl_specific; 1175 iocd->stmf_obuf_max_nentries = id_list->id_count; 1176 n = min(id_list->id_count, 1177 iocd->stmf_obuf_size/sizeof (stmf_ge_ident_t)); 1178 iocd->stmf_obuf_nentries = n; 1179 id_entry = id_list->idl_head; 1180 grp_entry = (stmf_ge_ident_t *)obuf; 1181 for (i = 0; i < n; i++) { 1182 bcopy(id_entry->id_data, grp_entry->ident, 1183 id_entry->id_data_size); 1184 grp_entry->ident_size = id_entry->id_data_size; 1185 id_entry = id_entry->id_next; 1186 grp_entry++; 1187 } 1188 } 1189 mutex_exit(&stmf_state.stmf_lock); 1190 break; 1191 1192 case STMF_IOCTL_GET_VE_LIST: 1193 n = iocd->stmf_obuf_size/sizeof (stmf_view_op_entry_t); 1194 mutex_enter(&stmf_state.stmf_lock); 1195 ve = (stmf_view_op_entry_t *)obuf; 1196 for (id_entry = stmf_state.stmf_luid_list.idl_head; 1197 id_entry; id_entry = id_entry->id_next) { 1198 for (view_entry = (stmf_view_entry_t *) 1199 id_entry->id_impl_specific; view_entry; 1200 view_entry = view_entry->ve_next) { 1201 iocd->stmf_obuf_max_nentries++; 1202 if (iocd->stmf_obuf_nentries >= n) 1203 continue; 1204 ve->ve_ndx_valid = 1; 1205 ve->ve_ndx = view_entry->ve_id; 1206 ve->ve_lu_number_valid = 1; 1207 bcopy(view_entry->ve_lun, ve->ve_lu_nbr, 8); 1208 bcopy(view_entry->ve_luid->id_data, ve->ve_guid, 1209 view_entry->ve_luid->id_data_size); 1210 if (view_entry->ve_hg->id_data[0] == '*') { 1211 ve->ve_all_hosts = 1; 1212 } else { 1213 bcopy(view_entry->ve_hg->id_data, 1214 ve->ve_host_group.name, 1215 view_entry->ve_hg->id_data_size); 1216 ve->ve_host_group.name_size = 1217 view_entry->ve_hg->id_data_size; 1218 } 1219 1220 if (view_entry->ve_tg->id_data[0] == '*') { 1221 ve->ve_all_targets = 1; 1222 } else { 1223 bcopy(view_entry->ve_tg->id_data, 1224 ve->ve_target_group.name, 1225 view_entry->ve_tg->id_data_size); 1226 ve->ve_target_group.name_size = 1227 view_entry->ve_tg->id_data_size; 1228 } 1229 ve++; 1230 iocd->stmf_obuf_nentries++; 1231 } 1232 } 1233 mutex_exit(&stmf_state.stmf_lock); 1234 break; 1235 1236 case STMF_IOCTL_LU_VE_LIST: 1237 p_id = (uint8_t *)ibuf; 1238 if ((iocd->stmf_ibuf_size != 16) || 1239 (iocd->stmf_obuf_size < sizeof (stmf_view_op_entry_t))) { 1240 ret = EINVAL; 1241 break; 1242 } 1243 1244 n = iocd->stmf_obuf_size/sizeof (stmf_view_op_entry_t); 1245 mutex_enter(&stmf_state.stmf_lock); 1246 ve = (stmf_view_op_entry_t *)obuf; 1247 for (id_entry = stmf_state.stmf_luid_list.idl_head; 1248 id_entry; id_entry = id_entry->id_next) { 1249 if (bcmp(id_entry->id_data, p_id, 16) != 0) 1250 continue; 1251 for (view_entry = (stmf_view_entry_t *) 1252 id_entry->id_impl_specific; view_entry; 1253 view_entry = view_entry->ve_next) { 1254 iocd->stmf_obuf_max_nentries++; 1255 if (iocd->stmf_obuf_nentries >= n) 1256 continue; 1257 ve->ve_ndx_valid = 1; 1258 ve->ve_ndx = view_entry->ve_id; 1259 ve->ve_lu_number_valid = 1; 1260 bcopy(view_entry->ve_lun, ve->ve_lu_nbr, 8); 1261 bcopy(view_entry->ve_luid->id_data, ve->ve_guid, 1262 view_entry->ve_luid->id_data_size); 1263 if (view_entry->ve_hg->id_data[0] == '*') { 1264 ve->ve_all_hosts = 1; 1265 } else { 1266 bcopy(view_entry->ve_hg->id_data, 1267 ve->ve_host_group.name, 1268 view_entry->ve_hg->id_data_size); 1269 ve->ve_host_group.name_size = 1270 view_entry->ve_hg->id_data_size; 1271 } 1272 1273 if (view_entry->ve_tg->id_data[0] == '*') { 1274 ve->ve_all_targets = 1; 1275 } else { 1276 bcopy(view_entry->ve_tg->id_data, 1277 ve->ve_target_group.name, 1278 view_entry->ve_tg->id_data_size); 1279 ve->ve_target_group.name_size = 1280 view_entry->ve_tg->id_data_size; 1281 } 1282 ve++; 1283 iocd->stmf_obuf_nentries++; 1284 } 1285 break; 1286 } 1287 mutex_exit(&stmf_state.stmf_lock); 1288 break; 1289 1290 case STMF_IOCTL_LOAD_PP_DATA: 1291 if (stmf_state.stmf_config_state == STMF_CONFIG_NONE) { 1292 ret = EACCES; 1293 iocd->stmf_error = STMF_IOCERR_UPDATE_NEED_CFG_INIT; 1294 break; 1295 } 1296 ppi = (stmf_ppioctl_data_t *)ibuf; 1297 if ((ppi == NULL) || 1298 (iocd->stmf_ibuf_size < sizeof (stmf_ppioctl_data_t))) { 1299 ret = EINVAL; 1300 break; 1301 } 1302 /* returned token */ 1303 ppi_token = (uint64_t *)obuf; 1304 if ((ppi_token == NULL) || 1305 (iocd->stmf_obuf_size < sizeof (uint64_t))) { 1306 ret = EINVAL; 1307 break; 1308 } 1309 ret = stmf_load_ppd_ioctl(ppi, ppi_token, &iocd->stmf_error); 1310 break; 1311 1312 case STMF_IOCTL_GET_PP_DATA: 1313 if (stmf_state.stmf_config_state == STMF_CONFIG_NONE) { 1314 ret = EACCES; 1315 iocd->stmf_error = STMF_IOCERR_UPDATE_NEED_CFG_INIT; 1316 break; 1317 } 1318 ppi = (stmf_ppioctl_data_t *)ibuf; 1319 if (ppi == NULL || 1320 (iocd->stmf_ibuf_size < sizeof (stmf_ppioctl_data_t))) { 1321 ret = EINVAL; 1322 break; 1323 } 1324 ppi_out = (stmf_ppioctl_data_t *)obuf; 1325 if ((ppi_out == NULL) || 1326 (iocd->stmf_obuf_size < sizeof (stmf_ppioctl_data_t))) { 1327 ret = EINVAL; 1328 break; 1329 } 1330 ret = stmf_get_ppd_ioctl(ppi, ppi_out, &iocd->stmf_error); 1331 break; 1332 1333 case STMF_IOCTL_CLEAR_PP_DATA: 1334 if (stmf_state.stmf_config_state == STMF_CONFIG_NONE) { 1335 ret = EACCES; 1336 iocd->stmf_error = STMF_IOCERR_UPDATE_NEED_CFG_INIT; 1337 break; 1338 } 1339 ppi = (stmf_ppioctl_data_t *)ibuf; 1340 if ((ppi == NULL) || 1341 (iocd->stmf_ibuf_size < sizeof (stmf_ppioctl_data_t))) { 1342 ret = EINVAL; 1343 break; 1344 } 1345 ret = stmf_delete_ppd_ioctl(ppi); 1346 break; 1347 1348 case STMF_IOCTL_CLEAR_TRACE: 1349 stmf_trace_clear(); 1350 break; 1351 1352 case STMF_IOCTL_ADD_TRACE: 1353 if (iocd->stmf_ibuf_size && ibuf) { 1354 ((uint8_t *)ibuf)[iocd->stmf_ibuf_size - 1] = 0; 1355 stmf_trace("\nstradm", "%s\n", ibuf); 1356 } 1357 break; 1358 1359 case STMF_IOCTL_GET_TRACE_POSITION: 1360 if (obuf && (iocd->stmf_obuf_size > 3)) { 1361 mutex_enter(&trace_buf_lock); 1362 *((int *)obuf) = trace_buf_curndx; 1363 mutex_exit(&trace_buf_lock); 1364 } else { 1365 ret = EINVAL; 1366 } 1367 break; 1368 1369 case STMF_IOCTL_GET_TRACE: 1370 if ((iocd->stmf_obuf_size == 0) || (iocd->stmf_ibuf_size < 4)) { 1371 ret = EINVAL; 1372 break; 1373 } 1374 i = *((int *)ibuf); 1375 if ((i > trace_buf_size) || ((i + iocd->stmf_obuf_size) > 1376 trace_buf_size)) { 1377 ret = EINVAL; 1378 break; 1379 } 1380 mutex_enter(&trace_buf_lock); 1381 bcopy(stmf_trace_buf + i, obuf, iocd->stmf_obuf_size); 1382 mutex_exit(&trace_buf_lock); 1383 break; 1384 1385 default: 1386 ret = ENOTTY; 1387 } 1388 1389 if (ret == 0) { 1390 ret = stmf_copyout_iocdata(data, mode, iocd, obuf); 1391 } else if (iocd->stmf_error) { 1392 (void) stmf_copyout_iocdata(data, mode, iocd, obuf); 1393 } 1394 if (obuf) { 1395 kmem_free(obuf, iocd->stmf_obuf_size); 1396 obuf = NULL; 1397 } 1398 if (ibuf) { 1399 kmem_free(ibuf, iocd->stmf_ibuf_size); 1400 ibuf = NULL; 1401 } 1402 kmem_free(iocd, sizeof (stmf_iocdata_t)); 1403 return (ret); 1404 } 1405 1406 static int 1407 stmf_get_service_state() 1408 { 1409 stmf_i_local_port_t *ilport; 1410 stmf_i_lu_t *ilu; 1411 int online = 0; 1412 int offline = 0; 1413 int onlining = 0; 1414 int offlining = 0; 1415 1416 ASSERT(mutex_owned(&stmf_state.stmf_lock)); 1417 for (ilport = stmf_state.stmf_ilportlist; ilport != NULL; 1418 ilport = ilport->ilport_next) { 1419 if (ilport->ilport_state == STMF_STATE_OFFLINE) 1420 offline++; 1421 else if (ilport->ilport_state == STMF_STATE_ONLINE) 1422 online++; 1423 else if (ilport->ilport_state == STMF_STATE_ONLINING) 1424 onlining++; 1425 else if (ilport->ilport_state == STMF_STATE_OFFLINING) 1426 offlining++; 1427 } 1428 1429 for (ilu = stmf_state.stmf_ilulist; ilu != NULL; 1430 ilu = ilu->ilu_next) { 1431 if (ilu->ilu_state == STMF_STATE_OFFLINE) 1432 offline++; 1433 else if (ilu->ilu_state == STMF_STATE_ONLINE) 1434 online++; 1435 else if (ilu->ilu_state == STMF_STATE_ONLINING) 1436 onlining++; 1437 else if (ilu->ilu_state == STMF_STATE_OFFLINING) 1438 offlining++; 1439 } 1440 1441 if (stmf_state.stmf_service_running) { 1442 if (onlining) 1443 return (STMF_STATE_ONLINING); 1444 else 1445 return (STMF_STATE_ONLINE); 1446 } 1447 1448 if (offlining) { 1449 return (STMF_STATE_OFFLINING); 1450 } 1451 1452 return (STMF_STATE_OFFLINE); 1453 } 1454 1455 static int 1456 stmf_set_stmf_state(stmf_state_desc_t *std) 1457 { 1458 stmf_i_local_port_t *ilport; 1459 stmf_i_lu_t *ilu; 1460 stmf_state_change_info_t ssi; 1461 int svc_state; 1462 1463 ssi.st_rflags = STMF_RFLAG_USER_REQUEST; 1464 ssi.st_additional_info = NULL; 1465 1466 mutex_enter(&stmf_state.stmf_lock); 1467 if (!stmf_state.stmf_exclusive_open) { 1468 mutex_exit(&stmf_state.stmf_lock); 1469 return (EACCES); 1470 } 1471 1472 if (stmf_state.stmf_inventory_locked) { 1473 mutex_exit(&stmf_state.stmf_lock); 1474 return (EBUSY); 1475 } 1476 1477 if ((std->state != STMF_STATE_ONLINE) && 1478 (std->state != STMF_STATE_OFFLINE)) { 1479 mutex_exit(&stmf_state.stmf_lock); 1480 return (EINVAL); 1481 } 1482 1483 svc_state = stmf_get_service_state(); 1484 if ((svc_state == STMF_STATE_OFFLINING) || 1485 (svc_state == STMF_STATE_ONLINING)) { 1486 mutex_exit(&stmf_state.stmf_lock); 1487 return (EBUSY); 1488 } 1489 1490 if (svc_state == STMF_STATE_OFFLINE) { 1491 if (std->config_state == STMF_CONFIG_INIT) { 1492 if (std->state != STMF_STATE_OFFLINE) { 1493 mutex_exit(&stmf_state.stmf_lock); 1494 return (EINVAL); 1495 } 1496 stmf_state.stmf_config_state = STMF_CONFIG_INIT; 1497 stmf_delete_all_ppds(); 1498 stmf_view_clear_config(); 1499 stmf_view_init(); 1500 mutex_exit(&stmf_state.stmf_lock); 1501 return (0); 1502 } 1503 if ((stmf_state.stmf_config_state == STMF_CONFIG_INIT) || 1504 (stmf_state.stmf_config_state == STMF_CONFIG_NONE)) { 1505 if (std->config_state != STMF_CONFIG_INIT_DONE) { 1506 mutex_exit(&stmf_state.stmf_lock); 1507 return (EINVAL); 1508 } 1509 stmf_state.stmf_config_state = STMF_CONFIG_INIT_DONE; 1510 } 1511 if (std->state == STMF_STATE_OFFLINE) { 1512 mutex_exit(&stmf_state.stmf_lock); 1513 return (0); 1514 } 1515 if (stmf_state.stmf_config_state == STMF_CONFIG_INIT) { 1516 mutex_exit(&stmf_state.stmf_lock); 1517 return (EINVAL); 1518 } 1519 stmf_state.stmf_inventory_locked = 1; 1520 stmf_state.stmf_service_running = 1; 1521 mutex_exit(&stmf_state.stmf_lock); 1522 1523 for (ilport = stmf_state.stmf_ilportlist; ilport != NULL; 1524 ilport = ilport->ilport_next) { 1525 if (stmf_state.stmf_default_lport_state != 1526 STMF_STATE_ONLINE) 1527 continue; 1528 (void) stmf_ctl(STMF_CMD_LPORT_ONLINE, 1529 ilport->ilport_lport, &ssi); 1530 } 1531 1532 for (ilu = stmf_state.stmf_ilulist; ilu != NULL; 1533 ilu = ilu->ilu_next) { 1534 if (stmf_state.stmf_default_lu_state != 1535 STMF_STATE_ONLINE) 1536 continue; 1537 (void) stmf_ctl(STMF_CMD_LU_ONLINE, ilu->ilu_lu, &ssi); 1538 } 1539 mutex_enter(&stmf_state.stmf_lock); 1540 stmf_state.stmf_inventory_locked = 0; 1541 mutex_exit(&stmf_state.stmf_lock); 1542 return (0); 1543 } 1544 1545 /* svc_state is STMF_STATE_ONLINE here */ 1546 if ((std->state != STMF_STATE_OFFLINE) || 1547 (std->config_state == STMF_CONFIG_INIT)) { 1548 mutex_exit(&stmf_state.stmf_lock); 1549 return (EACCES); 1550 } 1551 1552 stmf_state.stmf_inventory_locked = 1; 1553 stmf_state.stmf_service_running = 0; 1554 1555 mutex_exit(&stmf_state.stmf_lock); 1556 for (ilport = stmf_state.stmf_ilportlist; ilport != NULL; 1557 ilport = ilport->ilport_next) { 1558 if (ilport->ilport_state != STMF_STATE_ONLINE) 1559 continue; 1560 (void) stmf_ctl(STMF_CMD_LPORT_OFFLINE, 1561 ilport->ilport_lport, &ssi); 1562 } 1563 1564 for (ilu = stmf_state.stmf_ilulist; ilu != NULL; 1565 ilu = ilu->ilu_next) { 1566 if (ilu->ilu_state != STMF_STATE_ONLINE) 1567 continue; 1568 (void) stmf_ctl(STMF_CMD_LU_OFFLINE, ilu->ilu_lu, &ssi); 1569 } 1570 mutex_enter(&stmf_state.stmf_lock); 1571 stmf_state.stmf_inventory_locked = 0; 1572 mutex_exit(&stmf_state.stmf_lock); 1573 return (0); 1574 } 1575 1576 static int 1577 stmf_get_stmf_state(stmf_state_desc_t *std) 1578 { 1579 mutex_enter(&stmf_state.stmf_lock); 1580 std->state = stmf_get_service_state(); 1581 std->config_state = stmf_state.stmf_config_state; 1582 mutex_exit(&stmf_state.stmf_lock); 1583 1584 return (0); 1585 } 1586 1587 /* 1588 * handles registration message from pppt for a logical unit 1589 */ 1590 stmf_status_t 1591 stmf_ic_lu_reg(stmf_ic_reg_dereg_lun_msg_t *msg, uint32_t type) 1592 { 1593 stmf_i_lu_provider_t *ilp; 1594 stmf_lu_provider_t *lp; 1595 mutex_enter(&stmf_state.stmf_lock); 1596 for (ilp = stmf_state.stmf_ilplist; ilp != NULL; ilp = ilp->ilp_next) { 1597 if (strcmp(msg->icrl_lu_provider_name, 1598 ilp->ilp_lp->lp_name) == 0) { 1599 lp = ilp->ilp_lp; 1600 mutex_exit(&stmf_state.stmf_lock); 1601 lp->lp_proxy_msg(msg->icrl_lun_id, msg->icrl_cb_arg, 1602 msg->icrl_cb_arg_len, type); 1603 return (STMF_SUCCESS); 1604 } 1605 } 1606 mutex_exit(&stmf_state.stmf_lock); 1607 return (STMF_SUCCESS); 1608 } 1609 1610 /* 1611 * handles de-registration message from pppt for a logical unit 1612 */ 1613 stmf_status_t 1614 stmf_ic_lu_dereg(stmf_ic_reg_dereg_lun_msg_t *msg) 1615 { 1616 stmf_i_lu_provider_t *ilp; 1617 stmf_lu_provider_t *lp; 1618 mutex_enter(&stmf_state.stmf_lock); 1619 for (ilp = stmf_state.stmf_ilplist; ilp != NULL; ilp = ilp->ilp_next) { 1620 if (strcmp(msg->icrl_lu_provider_name, 1621 ilp->ilp_lp->lp_name) == 0) { 1622 lp = ilp->ilp_lp; 1623 mutex_exit(&stmf_state.stmf_lock); 1624 lp->lp_proxy_msg(msg->icrl_lun_id, NULL, 0, 1625 STMF_MSG_LU_DEREGISTER); 1626 return (STMF_SUCCESS); 1627 } 1628 } 1629 mutex_exit(&stmf_state.stmf_lock); 1630 return (STMF_SUCCESS); 1631 } 1632 1633 /* 1634 * helper function to find a task that matches a task_msgid 1635 */ 1636 scsi_task_t * 1637 find_task_from_msgid(uint8_t *lu_id, stmf_ic_msgid_t task_msgid) 1638 { 1639 stmf_i_lu_t *ilu; 1640 stmf_i_scsi_task_t *itask; 1641 1642 mutex_enter(&stmf_state.stmf_lock); 1643 for (ilu = stmf_state.stmf_ilulist; ilu != NULL; ilu = ilu->ilu_next) { 1644 if (bcmp(lu_id, ilu->ilu_lu->lu_id->ident, 16) == 0) { 1645 break; 1646 } 1647 } 1648 1649 if (ilu == NULL) { 1650 mutex_exit(&stmf_state.stmf_lock); 1651 return (NULL); 1652 } 1653 1654 mutex_enter(&ilu->ilu_task_lock); 1655 for (itask = ilu->ilu_tasks; itask != NULL; 1656 itask = itask->itask_lu_next) { 1657 if (itask->itask_flags & (ITASK_IN_FREE_LIST | 1658 ITASK_BEING_ABORTED)) { 1659 continue; 1660 } 1661 if (itask->itask_proxy_msg_id == task_msgid) { 1662 break; 1663 } 1664 } 1665 mutex_exit(&ilu->ilu_task_lock); 1666 mutex_exit(&stmf_state.stmf_lock); 1667 1668 if (itask != NULL) { 1669 return (itask->itask_task); 1670 } else { 1671 /* task not found. Likely already aborted. */ 1672 return (NULL); 1673 } 1674 } 1675 1676 /* 1677 * message received from pppt/ic 1678 */ 1679 stmf_status_t 1680 stmf_msg_rx(stmf_ic_msg_t *msg) 1681 { 1682 mutex_enter(&stmf_state.stmf_lock); 1683 if (stmf_state.stmf_alua_state != 1) { 1684 mutex_exit(&stmf_state.stmf_lock); 1685 cmn_err(CE_WARN, "stmf alua state is disabled"); 1686 ic_msg_free(msg); 1687 return (STMF_FAILURE); 1688 } 1689 mutex_exit(&stmf_state.stmf_lock); 1690 1691 switch (msg->icm_msg_type) { 1692 case STMF_ICM_REGISTER_LUN: 1693 (void) stmf_ic_lu_reg( 1694 (stmf_ic_reg_dereg_lun_msg_t *)msg->icm_msg, 1695 STMF_MSG_LU_REGISTER); 1696 break; 1697 case STMF_ICM_LUN_ACTIVE: 1698 (void) stmf_ic_lu_reg( 1699 (stmf_ic_reg_dereg_lun_msg_t *)msg->icm_msg, 1700 STMF_MSG_LU_ACTIVE); 1701 break; 1702 case STMF_ICM_DEREGISTER_LUN: 1703 (void) stmf_ic_lu_dereg( 1704 (stmf_ic_reg_dereg_lun_msg_t *)msg->icm_msg); 1705 break; 1706 case STMF_ICM_SCSI_DATA: 1707 (void) stmf_ic_rx_scsi_data( 1708 (stmf_ic_scsi_data_msg_t *)msg->icm_msg); 1709 break; 1710 case STMF_ICM_SCSI_STATUS: 1711 (void) stmf_ic_rx_scsi_status( 1712 (stmf_ic_scsi_status_msg_t *)msg->icm_msg); 1713 break; 1714 case STMF_ICM_STATUS: 1715 (void) stmf_ic_rx_status( 1716 (stmf_ic_status_msg_t *)msg->icm_msg); 1717 break; 1718 default: 1719 cmn_err(CE_WARN, "unknown message received %d", 1720 msg->icm_msg_type); 1721 ic_msg_free(msg); 1722 return (STMF_FAILURE); 1723 } 1724 ic_msg_free(msg); 1725 return (STMF_SUCCESS); 1726 } 1727 1728 stmf_status_t 1729 stmf_ic_rx_status(stmf_ic_status_msg_t *msg) 1730 { 1731 stmf_i_local_port_t *ilport; 1732 1733 if (msg->ics_msg_type != STMF_ICM_REGISTER_PROXY_PORT) { 1734 /* for now, ignore other message status */ 1735 return (STMF_SUCCESS); 1736 } 1737 1738 if (msg->ics_status != STMF_SUCCESS) { 1739 return (STMF_SUCCESS); 1740 } 1741 1742 mutex_enter(&stmf_state.stmf_lock); 1743 for (ilport = stmf_state.stmf_ilportlist; ilport != NULL; 1744 ilport = ilport->ilport_next) { 1745 if (msg->ics_msgid == ilport->ilport_reg_msgid) { 1746 ilport->ilport_proxy_registered = 1; 1747 break; 1748 } 1749 } 1750 mutex_exit(&stmf_state.stmf_lock); 1751 return (STMF_SUCCESS); 1752 } 1753 1754 /* 1755 * handles scsi status message from pppt 1756 */ 1757 stmf_status_t 1758 stmf_ic_rx_scsi_status(stmf_ic_scsi_status_msg_t *msg) 1759 { 1760 scsi_task_t *task; 1761 1762 /* is this a task management command */ 1763 if (msg->icss_task_msgid & MSG_ID_TM_BIT) { 1764 return (STMF_SUCCESS); 1765 } 1766 1767 task = find_task_from_msgid(msg->icss_lun_id, msg->icss_task_msgid); 1768 1769 if (task == NULL) { 1770 return (STMF_SUCCESS); 1771 } 1772 1773 task->task_scsi_status = msg->icss_status; 1774 task->task_sense_data = msg->icss_sense; 1775 task->task_sense_length = msg->icss_sense_len; 1776 (void) stmf_send_scsi_status(task, STMF_IOF_LU_DONE); 1777 1778 return (STMF_SUCCESS); 1779 } 1780 1781 /* 1782 * handles scsi data message from pppt 1783 */ 1784 stmf_status_t 1785 stmf_ic_rx_scsi_data(stmf_ic_scsi_data_msg_t *msg) 1786 { 1787 stmf_i_scsi_task_t *itask; 1788 scsi_task_t *task; 1789 stmf_xfer_data_t *xd = NULL; 1790 stmf_data_buf_t *dbuf; 1791 uint32_t sz, minsz, xd_sz, asz; 1792 1793 /* is this a task management command */ 1794 if (msg->icsd_task_msgid & MSG_ID_TM_BIT) { 1795 return (STMF_SUCCESS); 1796 } 1797 1798 task = find_task_from_msgid(msg->icsd_lun_id, msg->icsd_task_msgid); 1799 if (task == NULL) { 1800 stmf_ic_msg_t *ic_xfer_done_msg = NULL; 1801 static uint64_t data_msg_id; 1802 stmf_status_t ic_ret = STMF_FAILURE; 1803 mutex_enter(&stmf_state.stmf_lock); 1804 data_msg_id = stmf_proxy_msg_id++; 1805 mutex_exit(&stmf_state.stmf_lock); 1806 /* 1807 * send xfer done status to pppt 1808 * for now, set the session id to 0 as we cannot 1809 * ascertain it since we cannot find the task 1810 */ 1811 ic_xfer_done_msg = ic_scsi_data_xfer_done_msg_alloc( 1812 msg->icsd_task_msgid, 0, STMF_FAILURE, data_msg_id); 1813 if (ic_xfer_done_msg) { 1814 ic_ret = ic_tx_msg(ic_xfer_done_msg); 1815 if (ic_ret != STMF_IC_MSG_SUCCESS) { 1816 cmn_err(CE_WARN, "unable to xmit proxy msg"); 1817 } 1818 } 1819 return (STMF_FAILURE); 1820 } 1821 1822 itask = (stmf_i_scsi_task_t *)task->task_stmf_private; 1823 dbuf = itask->itask_proxy_dbuf; 1824 1825 task->task_cmd_xfer_length += msg->icsd_data_len; 1826 1827 if (task->task_additional_flags & 1828 TASK_AF_NO_EXPECTED_XFER_LENGTH) { 1829 task->task_expected_xfer_length = 1830 task->task_cmd_xfer_length; 1831 } 1832 1833 sz = min(task->task_expected_xfer_length, 1834 task->task_cmd_xfer_length); 1835 1836 xd_sz = msg->icsd_data_len; 1837 asz = xd_sz + sizeof (*xd) - 4; 1838 xd = (stmf_xfer_data_t *)kmem_zalloc(asz, KM_NOSLEEP); 1839 1840 if (xd == NULL) { 1841 stmf_abort(STMF_QUEUE_TASK_ABORT, task, 1842 STMF_ALLOC_FAILURE, NULL); 1843 return (STMF_FAILURE); 1844 } 1845 1846 xd->alloc_size = asz; 1847 xd->size_left = xd_sz; 1848 bcopy(msg->icsd_data, xd->buf, xd_sz); 1849 1850 sz = min(sz, xd->size_left); 1851 xd->size_left = sz; 1852 minsz = min(512, sz); 1853 1854 if (dbuf == NULL) 1855 dbuf = stmf_alloc_dbuf(task, sz, &minsz, 0); 1856 if (dbuf == NULL) { 1857 kmem_free(xd, xd->alloc_size); 1858 stmf_abort(STMF_QUEUE_TASK_ABORT, task, 1859 STMF_ALLOC_FAILURE, NULL); 1860 return (STMF_FAILURE); 1861 } 1862 dbuf->db_lu_private = xd; 1863 dbuf->db_relative_offset = task->task_nbytes_transferred; 1864 stmf_xd_to_dbuf(dbuf, 0); 1865 1866 dbuf->db_flags = DB_DIRECTION_TO_RPORT; 1867 (void) stmf_xfer_data(task, dbuf, 0); 1868 return (STMF_SUCCESS); 1869 } 1870 1871 stmf_status_t 1872 stmf_proxy_scsi_cmd(scsi_task_t *task, stmf_data_buf_t *dbuf) 1873 { 1874 stmf_i_scsi_task_t *itask = 1875 (stmf_i_scsi_task_t *)task->task_stmf_private; 1876 stmf_i_local_port_t *ilport = 1877 (stmf_i_local_port_t *)task->task_lport->lport_stmf_private; 1878 stmf_ic_msg_t *ic_cmd_msg; 1879 stmf_ic_msg_status_t ic_ret; 1880 stmf_status_t ret = STMF_FAILURE; 1881 1882 if (stmf_state.stmf_alua_state != 1) { 1883 cmn_err(CE_WARN, "stmf alua state is disabled"); 1884 return (STMF_FAILURE); 1885 } 1886 1887 if (ilport->ilport_proxy_registered == 0) { 1888 return (STMF_FAILURE); 1889 } 1890 1891 mutex_enter(&stmf_state.stmf_lock); 1892 itask->itask_proxy_msg_id = stmf_proxy_msg_id++; 1893 mutex_exit(&stmf_state.stmf_lock); 1894 itask->itask_proxy_dbuf = dbuf; 1895 1896 /* 1897 * stmf will now take over the task handling for this task 1898 * but it still needs to be treated differently from other 1899 * default handled tasks, hence the ITASK_PROXY_TASK. 1900 * If this is a task management function, we're really just 1901 * duping the command to the peer. Set the TM bit so that 1902 * we can recognize this on return since we won't be completing 1903 * the proxied task in that case. 1904 */ 1905 if (task->task_mgmt_function) { 1906 itask->itask_proxy_msg_id |= MSG_ID_TM_BIT; 1907 } else { 1908 uint32_t new, old; 1909 do { 1910 new = old = itask->itask_flags; 1911 if (new & ITASK_BEING_ABORTED) 1912 return (STMF_FAILURE); 1913 new |= ITASK_DEFAULT_HANDLING | ITASK_PROXY_TASK; 1914 } while (atomic_cas_32(&itask->itask_flags, old, new) != old); 1915 } 1916 if (dbuf) { 1917 ic_cmd_msg = ic_scsi_cmd_msg_alloc(itask->itask_proxy_msg_id, 1918 task, dbuf->db_data_size, dbuf->db_sglist[0].seg_addr, 1919 itask->itask_proxy_msg_id); 1920 } else { 1921 ic_cmd_msg = ic_scsi_cmd_msg_alloc(itask->itask_proxy_msg_id, 1922 task, 0, NULL, itask->itask_proxy_msg_id); 1923 } 1924 if (ic_cmd_msg) { 1925 ic_ret = ic_tx_msg(ic_cmd_msg); 1926 if (ic_ret == STMF_IC_MSG_SUCCESS) { 1927 ret = STMF_SUCCESS; 1928 } 1929 } 1930 return (ret); 1931 } 1932 1933 1934 stmf_status_t 1935 pppt_modload() 1936 { 1937 int error; 1938 1939 if (pppt_mod == NULL && ((pppt_mod = 1940 ddi_modopen("drv/pppt", KRTLD_MODE_FIRST, &error)) == NULL)) { 1941 cmn_err(CE_WARN, "Unable to load pppt"); 1942 return (STMF_FAILURE); 1943 } 1944 1945 if (ic_reg_port_msg_alloc == NULL && ((ic_reg_port_msg_alloc = 1946 (stmf_ic_reg_port_msg_alloc_func_t) 1947 ddi_modsym(pppt_mod, "stmf_ic_reg_port_msg_alloc", 1948 &error)) == NULL)) { 1949 cmn_err(CE_WARN, 1950 "Unable to find symbol - stmf_ic_reg_port_msg_alloc"); 1951 return (STMF_FAILURE); 1952 } 1953 1954 1955 if (ic_dereg_port_msg_alloc == NULL && ((ic_dereg_port_msg_alloc = 1956 (stmf_ic_dereg_port_msg_alloc_func_t) 1957 ddi_modsym(pppt_mod, "stmf_ic_dereg_port_msg_alloc", 1958 &error)) == NULL)) { 1959 cmn_err(CE_WARN, 1960 "Unable to find symbol - stmf_ic_dereg_port_msg_alloc"); 1961 return (STMF_FAILURE); 1962 } 1963 1964 if (ic_reg_lun_msg_alloc == NULL && ((ic_reg_lun_msg_alloc = 1965 (stmf_ic_reg_lun_msg_alloc_func_t) 1966 ddi_modsym(pppt_mod, "stmf_ic_reg_lun_msg_alloc", 1967 &error)) == NULL)) { 1968 cmn_err(CE_WARN, 1969 "Unable to find symbol - stmf_ic_reg_lun_msg_alloc"); 1970 return (STMF_FAILURE); 1971 } 1972 1973 if (ic_lun_active_msg_alloc == NULL && ((ic_lun_active_msg_alloc = 1974 (stmf_ic_lun_active_msg_alloc_func_t) 1975 ddi_modsym(pppt_mod, "stmf_ic_lun_active_msg_alloc", 1976 &error)) == NULL)) { 1977 cmn_err(CE_WARN, 1978 "Unable to find symbol - stmf_ic_lun_active_msg_alloc"); 1979 return (STMF_FAILURE); 1980 } 1981 1982 if (ic_dereg_lun_msg_alloc == NULL && ((ic_dereg_lun_msg_alloc = 1983 (stmf_ic_dereg_lun_msg_alloc_func_t) 1984 ddi_modsym(pppt_mod, "stmf_ic_dereg_lun_msg_alloc", 1985 &error)) == NULL)) { 1986 cmn_err(CE_WARN, 1987 "Unable to find symbol - stmf_ic_dereg_lun_msg_alloc"); 1988 return (STMF_FAILURE); 1989 } 1990 1991 if (ic_scsi_cmd_msg_alloc == NULL && ((ic_scsi_cmd_msg_alloc = 1992 (stmf_ic_scsi_cmd_msg_alloc_func_t) 1993 ddi_modsym(pppt_mod, "stmf_ic_scsi_cmd_msg_alloc", 1994 &error)) == NULL)) { 1995 cmn_err(CE_WARN, 1996 "Unable to find symbol - stmf_ic_scsi_cmd_msg_alloc"); 1997 return (STMF_FAILURE); 1998 } 1999 2000 if (ic_scsi_data_xfer_done_msg_alloc == NULL && 2001 ((ic_scsi_data_xfer_done_msg_alloc = 2002 (stmf_ic_scsi_data_xfer_done_msg_alloc_func_t) 2003 ddi_modsym(pppt_mod, "stmf_ic_scsi_data_xfer_done_msg_alloc", 2004 &error)) == NULL)) { 2005 cmn_err(CE_WARN, 2006 "Unable to find symbol -" 2007 "stmf_ic_scsi_data_xfer_done_msg_alloc"); 2008 return (STMF_FAILURE); 2009 } 2010 2011 if (ic_session_reg_msg_alloc == NULL && 2012 ((ic_session_reg_msg_alloc = 2013 (stmf_ic_session_create_msg_alloc_func_t) 2014 ddi_modsym(pppt_mod, "stmf_ic_session_create_msg_alloc", 2015 &error)) == NULL)) { 2016 cmn_err(CE_WARN, 2017 "Unable to find symbol -" 2018 "stmf_ic_session_create_msg_alloc"); 2019 return (STMF_FAILURE); 2020 } 2021 2022 if (ic_session_dereg_msg_alloc == NULL && 2023 ((ic_session_dereg_msg_alloc = 2024 (stmf_ic_session_destroy_msg_alloc_func_t) 2025 ddi_modsym(pppt_mod, "stmf_ic_session_destroy_msg_alloc", 2026 &error)) == NULL)) { 2027 cmn_err(CE_WARN, 2028 "Unable to find symbol -" 2029 "stmf_ic_session_destroy_msg_alloc"); 2030 return (STMF_FAILURE); 2031 } 2032 2033 if (ic_tx_msg == NULL && ((ic_tx_msg = 2034 (stmf_ic_tx_msg_func_t)ddi_modsym(pppt_mod, "stmf_ic_tx_msg", 2035 &error)) == NULL)) { 2036 cmn_err(CE_WARN, "Unable to find symbol - stmf_ic_tx_msg"); 2037 return (STMF_FAILURE); 2038 } 2039 2040 if (ic_msg_free == NULL && ((ic_msg_free = 2041 (stmf_ic_msg_free_func_t)ddi_modsym(pppt_mod, "stmf_ic_msg_free", 2042 &error)) == NULL)) { 2043 cmn_err(CE_WARN, "Unable to find symbol - stmf_ic_msg_free"); 2044 return (STMF_FAILURE); 2045 } 2046 return (STMF_SUCCESS); 2047 } 2048 2049 static void 2050 stmf_get_alua_state(stmf_alua_state_desc_t *alua_state) 2051 { 2052 mutex_enter(&stmf_state.stmf_lock); 2053 alua_state->alua_node = stmf_state.stmf_alua_node; 2054 alua_state->alua_state = stmf_state.stmf_alua_state; 2055 mutex_exit(&stmf_state.stmf_lock); 2056 } 2057 2058 2059 static int 2060 stmf_set_alua_state(stmf_alua_state_desc_t *alua_state) 2061 { 2062 stmf_i_local_port_t *ilport; 2063 stmf_i_lu_t *ilu; 2064 stmf_lu_t *lu; 2065 stmf_ic_msg_status_t ic_ret; 2066 stmf_ic_msg_t *ic_reg_lun, *ic_reg_port; 2067 stmf_local_port_t *lport; 2068 int ret = 0; 2069 2070 if (alua_state->alua_state > 1 || alua_state->alua_node > 1) { 2071 return (EINVAL); 2072 } 2073 2074 mutex_enter(&stmf_state.stmf_lock); 2075 if (alua_state->alua_state == 1) { 2076 if (pppt_modload() == STMF_FAILURE) { 2077 ret = EIO; 2078 goto err; 2079 } 2080 if (alua_state->alua_node != 0) { 2081 /* reset existing rtpids to new base */ 2082 stmf_rtpid_counter = 255; 2083 } 2084 stmf_state.stmf_alua_node = alua_state->alua_node; 2085 stmf_state.stmf_alua_state = 1; 2086 /* register existing local ports with ppp */ 2087 for (ilport = stmf_state.stmf_ilportlist; ilport != NULL; 2088 ilport = ilport->ilport_next) { 2089 /* skip standby ports and non-alua participants */ 2090 if (ilport->ilport_standby == 1 || 2091 ilport->ilport_alua == 0) { 2092 continue; 2093 } 2094 if (alua_state->alua_node != 0) { 2095 ilport->ilport_rtpid = 2096 atomic_inc_16_nv(&stmf_rtpid_counter); 2097 } 2098 lport = ilport->ilport_lport; 2099 ic_reg_port = ic_reg_port_msg_alloc( 2100 lport->lport_id, ilport->ilport_rtpid, 2101 0, NULL, stmf_proxy_msg_id); 2102 if (ic_reg_port) { 2103 ic_ret = ic_tx_msg(ic_reg_port); 2104 if (ic_ret == STMF_IC_MSG_SUCCESS) { 2105 ilport->ilport_reg_msgid = 2106 stmf_proxy_msg_id++; 2107 } else { 2108 cmn_err(CE_WARN, 2109 "error on port registration " 2110 "port - %s", 2111 ilport->ilport_kstat_tgt_name); 2112 } 2113 } 2114 } 2115 /* register existing logical units */ 2116 for (ilu = stmf_state.stmf_ilulist; ilu != NULL; 2117 ilu = ilu->ilu_next) { 2118 if (ilu->ilu_access != STMF_LU_ACTIVE) { 2119 continue; 2120 } 2121 /* register with proxy module */ 2122 lu = ilu->ilu_lu; 2123 if (lu->lu_lp && lu->lu_lp->lp_lpif_rev == LPIF_REV_2 && 2124 lu->lu_lp->lp_alua_support) { 2125 ilu->ilu_alua = 1; 2126 /* allocate the register message */ 2127 ic_reg_lun = ic_reg_lun_msg_alloc( 2128 lu->lu_id->ident, lu->lu_lp->lp_name, 2129 lu->lu_proxy_reg_arg_len, 2130 (uint8_t *)lu->lu_proxy_reg_arg, 2131 stmf_proxy_msg_id); 2132 /* send the message */ 2133 if (ic_reg_lun) { 2134 ic_ret = ic_tx_msg(ic_reg_lun); 2135 if (ic_ret == STMF_IC_MSG_SUCCESS) { 2136 stmf_proxy_msg_id++; 2137 } 2138 } 2139 } 2140 } 2141 } else { 2142 stmf_state.stmf_alua_state = 0; 2143 } 2144 2145 err: 2146 mutex_exit(&stmf_state.stmf_lock); 2147 return (ret); 2148 } 2149 2150 2151 typedef struct { 2152 void *bp; /* back pointer from internal struct to main struct */ 2153 int alloc_size; 2154 } __istmf_t; 2155 2156 typedef struct { 2157 __istmf_t *fp; /* Framework private */ 2158 void *cp; /* Caller private */ 2159 void *ss; /* struct specific */ 2160 } __stmf_t; 2161 2162 static struct { 2163 int shared; 2164 int fw_private; 2165 } stmf_sizes[] = { { 0, 0 }, 2166 { GET_STRUCT_SIZE(stmf_lu_provider_t), 2167 GET_STRUCT_SIZE(stmf_i_lu_provider_t) }, 2168 { GET_STRUCT_SIZE(stmf_port_provider_t), 2169 GET_STRUCT_SIZE(stmf_i_port_provider_t) }, 2170 { GET_STRUCT_SIZE(stmf_local_port_t), 2171 GET_STRUCT_SIZE(stmf_i_local_port_t) }, 2172 { GET_STRUCT_SIZE(stmf_lu_t), 2173 GET_STRUCT_SIZE(stmf_i_lu_t) }, 2174 { GET_STRUCT_SIZE(stmf_scsi_session_t), 2175 GET_STRUCT_SIZE(stmf_i_scsi_session_t) }, 2176 { GET_STRUCT_SIZE(scsi_task_t), 2177 GET_STRUCT_SIZE(stmf_i_scsi_task_t) }, 2178 { GET_STRUCT_SIZE(stmf_data_buf_t), 2179 GET_STRUCT_SIZE(__istmf_t) }, 2180 { GET_STRUCT_SIZE(stmf_dbuf_store_t), 2181 GET_STRUCT_SIZE(__istmf_t) } 2182 2183 }; 2184 2185 void * 2186 stmf_alloc(stmf_struct_id_t struct_id, int additional_size, int flags) 2187 { 2188 int stmf_size; 2189 int kmem_flag; 2190 __stmf_t *sh; 2191 2192 if ((struct_id == 0) || (struct_id >= STMF_MAX_STRUCT_IDS)) 2193 return (NULL); 2194 2195 if ((curthread->t_flag & T_INTR_THREAD) || (flags & AF_FORCE_NOSLEEP)) { 2196 kmem_flag = KM_NOSLEEP; 2197 } else { 2198 kmem_flag = KM_SLEEP; 2199 } 2200 2201 additional_size = (additional_size + 7) & (~7); 2202 stmf_size = stmf_sizes[struct_id].shared + 2203 stmf_sizes[struct_id].fw_private + additional_size; 2204 2205 if (flags & AF_DONTZERO) 2206 sh = (__stmf_t *)kmem_alloc(stmf_size, kmem_flag); 2207 else 2208 sh = (__stmf_t *)kmem_zalloc(stmf_size, kmem_flag); 2209 2210 if (sh == NULL) 2211 return (NULL); 2212 2213 /* 2214 * In principle, the implementation inside stmf_alloc should not 2215 * be changed anyway. But the original order of framework private 2216 * data and caller private data does not support sglist in the caller 2217 * private data. 2218 * To work around this, the memory segments of framework private 2219 * data and caller private data are re-ordered here. 2220 * A better solution is to provide a specific interface to allocate 2221 * the sglist, then we will not need this workaround any more. 2222 * But before the new interface is available, the memory segment 2223 * ordering should be kept as is. 2224 */ 2225 sh->cp = GET_BYTE_OFFSET(sh, stmf_sizes[struct_id].shared); 2226 sh->fp = (__istmf_t *)GET_BYTE_OFFSET(sh, 2227 stmf_sizes[struct_id].shared + additional_size); 2228 2229 sh->fp->bp = sh; 2230 /* Just store the total size instead of storing additional size */ 2231 sh->fp->alloc_size = stmf_size; 2232 2233 return (sh); 2234 } 2235 2236 void 2237 stmf_free(void *ptr) 2238 { 2239 __stmf_t *sh = (__stmf_t *)ptr; 2240 2241 /* 2242 * So far we dont need any struct specific processing. If such 2243 * a need ever arises, then store the struct id in the framework 2244 * private section and get it here as sh->fp->struct_id. 2245 */ 2246 kmem_free(ptr, sh->fp->alloc_size); 2247 } 2248 2249 /* 2250 * Given a pointer to stmf_lu_t, verifies if this lu is registered with the 2251 * framework and returns a pointer to framework private data for the lu. 2252 * Returns NULL if the lu was not found. 2253 */ 2254 stmf_i_lu_t * 2255 stmf_lookup_lu(stmf_lu_t *lu) 2256 { 2257 stmf_i_lu_t *ilu; 2258 ASSERT(mutex_owned(&stmf_state.stmf_lock)); 2259 2260 for (ilu = stmf_state.stmf_ilulist; ilu != NULL; ilu = ilu->ilu_next) { 2261 if (ilu->ilu_lu == lu) 2262 return (ilu); 2263 } 2264 return (NULL); 2265 } 2266 2267 /* 2268 * Given a pointer to stmf_local_port_t, verifies if this lport is registered 2269 * with the framework and returns a pointer to framework private data for 2270 * the lport. 2271 * Returns NULL if the lport was not found. 2272 */ 2273 stmf_i_local_port_t * 2274 stmf_lookup_lport(stmf_local_port_t *lport) 2275 { 2276 stmf_i_local_port_t *ilport; 2277 ASSERT(mutex_owned(&stmf_state.stmf_lock)); 2278 2279 for (ilport = stmf_state.stmf_ilportlist; ilport != NULL; 2280 ilport = ilport->ilport_next) { 2281 if (ilport->ilport_lport == lport) 2282 return (ilport); 2283 } 2284 return (NULL); 2285 } 2286 2287 stmf_status_t 2288 stmf_register_lu_provider(stmf_lu_provider_t *lp) 2289 { 2290 stmf_i_lu_provider_t *ilp = (stmf_i_lu_provider_t *)lp->lp_stmf_private; 2291 stmf_pp_data_t *ppd; 2292 uint32_t cb_flags; 2293 2294 if (lp->lp_lpif_rev != LPIF_REV_1 && lp->lp_lpif_rev != LPIF_REV_2) 2295 return (STMF_FAILURE); 2296 2297 mutex_enter(&stmf_state.stmf_lock); 2298 ilp->ilp_next = stmf_state.stmf_ilplist; 2299 stmf_state.stmf_ilplist = ilp; 2300 stmf_state.stmf_nlps++; 2301 2302 /* See if we need to do a callback */ 2303 for (ppd = stmf_state.stmf_ppdlist; ppd != NULL; ppd = ppd->ppd_next) { 2304 if (strcmp(ppd->ppd_name, lp->lp_name) == 0) { 2305 break; 2306 } 2307 } 2308 if ((ppd == NULL) || (ppd->ppd_nv == NULL)) { 2309 goto rlp_bail_out; 2310 } 2311 ilp->ilp_ppd = ppd; 2312 ppd->ppd_provider = ilp; 2313 if (lp->lp_cb == NULL) 2314 goto rlp_bail_out; 2315 ilp->ilp_cb_in_progress = 1; 2316 cb_flags = STMF_PCB_PREG_COMPLETE; 2317 if (stmf_state.stmf_config_state == STMF_CONFIG_INIT) 2318 cb_flags |= STMF_PCB_STMF_ONLINING; 2319 mutex_exit(&stmf_state.stmf_lock); 2320 lp->lp_cb(lp, STMF_PROVIDER_DATA_UPDATED, ppd->ppd_nv, cb_flags); 2321 mutex_enter(&stmf_state.stmf_lock); 2322 ilp->ilp_cb_in_progress = 0; 2323 2324 rlp_bail_out: 2325 mutex_exit(&stmf_state.stmf_lock); 2326 2327 return (STMF_SUCCESS); 2328 } 2329 2330 stmf_status_t 2331 stmf_deregister_lu_provider(stmf_lu_provider_t *lp) 2332 { 2333 stmf_i_lu_provider_t **ppilp; 2334 stmf_i_lu_provider_t *ilp = (stmf_i_lu_provider_t *)lp->lp_stmf_private; 2335 2336 mutex_enter(&stmf_state.stmf_lock); 2337 if (ilp->ilp_nlus || ilp->ilp_cb_in_progress) { 2338 mutex_exit(&stmf_state.stmf_lock); 2339 return (STMF_BUSY); 2340 } 2341 for (ppilp = &stmf_state.stmf_ilplist; *ppilp != NULL; 2342 ppilp = &((*ppilp)->ilp_next)) { 2343 if (*ppilp == ilp) { 2344 *ppilp = ilp->ilp_next; 2345 stmf_state.stmf_nlps--; 2346 if (ilp->ilp_ppd) { 2347 ilp->ilp_ppd->ppd_provider = NULL; 2348 ilp->ilp_ppd = NULL; 2349 } 2350 mutex_exit(&stmf_state.stmf_lock); 2351 return (STMF_SUCCESS); 2352 } 2353 } 2354 mutex_exit(&stmf_state.stmf_lock); 2355 return (STMF_NOT_FOUND); 2356 } 2357 2358 stmf_status_t 2359 stmf_register_port_provider(stmf_port_provider_t *pp) 2360 { 2361 stmf_i_port_provider_t *ipp = 2362 (stmf_i_port_provider_t *)pp->pp_stmf_private; 2363 stmf_pp_data_t *ppd; 2364 uint32_t cb_flags; 2365 2366 if (pp->pp_portif_rev != PORTIF_REV_1) 2367 return (STMF_FAILURE); 2368 2369 mutex_enter(&stmf_state.stmf_lock); 2370 ipp->ipp_next = stmf_state.stmf_ipplist; 2371 stmf_state.stmf_ipplist = ipp; 2372 stmf_state.stmf_npps++; 2373 /* See if we need to do a callback */ 2374 for (ppd = stmf_state.stmf_ppdlist; ppd != NULL; ppd = ppd->ppd_next) { 2375 if (strcmp(ppd->ppd_name, pp->pp_name) == 0) { 2376 break; 2377 } 2378 } 2379 if ((ppd == NULL) || (ppd->ppd_nv == NULL)) { 2380 goto rpp_bail_out; 2381 } 2382 ipp->ipp_ppd = ppd; 2383 ppd->ppd_provider = ipp; 2384 if (pp->pp_cb == NULL) 2385 goto rpp_bail_out; 2386 ipp->ipp_cb_in_progress = 1; 2387 cb_flags = STMF_PCB_PREG_COMPLETE; 2388 if (stmf_state.stmf_config_state == STMF_CONFIG_INIT) 2389 cb_flags |= STMF_PCB_STMF_ONLINING; 2390 mutex_exit(&stmf_state.stmf_lock); 2391 pp->pp_cb(pp, STMF_PROVIDER_DATA_UPDATED, ppd->ppd_nv, cb_flags); 2392 mutex_enter(&stmf_state.stmf_lock); 2393 ipp->ipp_cb_in_progress = 0; 2394 2395 rpp_bail_out: 2396 mutex_exit(&stmf_state.stmf_lock); 2397 2398 return (STMF_SUCCESS); 2399 } 2400 2401 stmf_status_t 2402 stmf_deregister_port_provider(stmf_port_provider_t *pp) 2403 { 2404 stmf_i_port_provider_t *ipp = 2405 (stmf_i_port_provider_t *)pp->pp_stmf_private; 2406 stmf_i_port_provider_t **ppipp; 2407 2408 mutex_enter(&stmf_state.stmf_lock); 2409 if (ipp->ipp_npps || ipp->ipp_cb_in_progress) { 2410 mutex_exit(&stmf_state.stmf_lock); 2411 return (STMF_BUSY); 2412 } 2413 for (ppipp = &stmf_state.stmf_ipplist; *ppipp != NULL; 2414 ppipp = &((*ppipp)->ipp_next)) { 2415 if (*ppipp == ipp) { 2416 *ppipp = ipp->ipp_next; 2417 stmf_state.stmf_npps--; 2418 if (ipp->ipp_ppd) { 2419 ipp->ipp_ppd->ppd_provider = NULL; 2420 ipp->ipp_ppd = NULL; 2421 } 2422 mutex_exit(&stmf_state.stmf_lock); 2423 return (STMF_SUCCESS); 2424 } 2425 } 2426 mutex_exit(&stmf_state.stmf_lock); 2427 return (STMF_NOT_FOUND); 2428 } 2429 2430 int 2431 stmf_load_ppd_ioctl(stmf_ppioctl_data_t *ppi, uint64_t *ppi_token, 2432 uint32_t *err_ret) 2433 { 2434 stmf_i_port_provider_t *ipp; 2435 stmf_i_lu_provider_t *ilp; 2436 stmf_pp_data_t *ppd; 2437 nvlist_t *nv; 2438 int s; 2439 int ret; 2440 2441 *err_ret = 0; 2442 2443 if ((ppi->ppi_lu_provider + ppi->ppi_port_provider) != 1) { 2444 return (EINVAL); 2445 } 2446 2447 mutex_enter(&stmf_state.stmf_lock); 2448 for (ppd = stmf_state.stmf_ppdlist; ppd != NULL; ppd = ppd->ppd_next) { 2449 if (ppi->ppi_lu_provider) { 2450 if (!ppd->ppd_lu_provider) 2451 continue; 2452 } else if (ppi->ppi_port_provider) { 2453 if (!ppd->ppd_port_provider) 2454 continue; 2455 } 2456 if (strncmp(ppi->ppi_name, ppd->ppd_name, 254) == 0) 2457 break; 2458 } 2459 2460 if (ppd == NULL) { 2461 /* New provider */ 2462 s = strlen(ppi->ppi_name); 2463 if (s > 254) { 2464 mutex_exit(&stmf_state.stmf_lock); 2465 return (EINVAL); 2466 } 2467 s += sizeof (stmf_pp_data_t) - 7; 2468 2469 ppd = kmem_zalloc(s, KM_NOSLEEP); 2470 if (ppd == NULL) { 2471 mutex_exit(&stmf_state.stmf_lock); 2472 return (ENOMEM); 2473 } 2474 ppd->ppd_alloc_size = s; 2475 (void) strcpy(ppd->ppd_name, ppi->ppi_name); 2476 2477 /* See if this provider already exists */ 2478 if (ppi->ppi_lu_provider) { 2479 ppd->ppd_lu_provider = 1; 2480 for (ilp = stmf_state.stmf_ilplist; ilp != NULL; 2481 ilp = ilp->ilp_next) { 2482 if (strcmp(ppi->ppi_name, 2483 ilp->ilp_lp->lp_name) == 0) { 2484 ppd->ppd_provider = ilp; 2485 ilp->ilp_ppd = ppd; 2486 break; 2487 } 2488 } 2489 } else { 2490 ppd->ppd_port_provider = 1; 2491 for (ipp = stmf_state.stmf_ipplist; ipp != NULL; 2492 ipp = ipp->ipp_next) { 2493 if (strcmp(ppi->ppi_name, 2494 ipp->ipp_pp->pp_name) == 0) { 2495 ppd->ppd_provider = ipp; 2496 ipp->ipp_ppd = ppd; 2497 break; 2498 } 2499 } 2500 } 2501 2502 /* Link this ppd in */ 2503 ppd->ppd_next = stmf_state.stmf_ppdlist; 2504 stmf_state.stmf_ppdlist = ppd; 2505 } 2506 2507 /* 2508 * User is requesting that the token be checked. 2509 * If there was another set after the user's get 2510 * it's an error 2511 */ 2512 if (ppi->ppi_token_valid) { 2513 if (ppi->ppi_token != ppd->ppd_token) { 2514 *err_ret = STMF_IOCERR_PPD_UPDATED; 2515 mutex_exit(&stmf_state.stmf_lock); 2516 return (EINVAL); 2517 } 2518 } 2519 2520 if ((ret = nvlist_unpack((char *)ppi->ppi_data, 2521 (size_t)ppi->ppi_data_size, &nv, KM_NOSLEEP)) != 0) { 2522 mutex_exit(&stmf_state.stmf_lock); 2523 return (ret); 2524 } 2525 2526 /* Free any existing lists and add this one to the ppd */ 2527 nvlist_free(ppd->ppd_nv); 2528 ppd->ppd_nv = nv; 2529 2530 /* set the token for writes */ 2531 ppd->ppd_token++; 2532 /* return token to caller */ 2533 if (ppi_token) { 2534 *ppi_token = ppd->ppd_token; 2535 } 2536 2537 /* If there is a provider registered, do the notifications */ 2538 if (ppd->ppd_provider) { 2539 uint32_t cb_flags = 0; 2540 2541 if (stmf_state.stmf_config_state == STMF_CONFIG_INIT) 2542 cb_flags |= STMF_PCB_STMF_ONLINING; 2543 if (ppi->ppi_lu_provider) { 2544 ilp = (stmf_i_lu_provider_t *)ppd->ppd_provider; 2545 if (ilp->ilp_lp->lp_cb == NULL) 2546 goto bail_out; 2547 ilp->ilp_cb_in_progress = 1; 2548 mutex_exit(&stmf_state.stmf_lock); 2549 ilp->ilp_lp->lp_cb(ilp->ilp_lp, 2550 STMF_PROVIDER_DATA_UPDATED, ppd->ppd_nv, cb_flags); 2551 mutex_enter(&stmf_state.stmf_lock); 2552 ilp->ilp_cb_in_progress = 0; 2553 } else { 2554 ipp = (stmf_i_port_provider_t *)ppd->ppd_provider; 2555 if (ipp->ipp_pp->pp_cb == NULL) 2556 goto bail_out; 2557 ipp->ipp_cb_in_progress = 1; 2558 mutex_exit(&stmf_state.stmf_lock); 2559 ipp->ipp_pp->pp_cb(ipp->ipp_pp, 2560 STMF_PROVIDER_DATA_UPDATED, ppd->ppd_nv, cb_flags); 2561 mutex_enter(&stmf_state.stmf_lock); 2562 ipp->ipp_cb_in_progress = 0; 2563 } 2564 } 2565 2566 bail_out: 2567 mutex_exit(&stmf_state.stmf_lock); 2568 2569 return (0); 2570 } 2571 2572 void 2573 stmf_delete_ppd(stmf_pp_data_t *ppd) 2574 { 2575 stmf_pp_data_t **pppd; 2576 2577 ASSERT(mutex_owned(&stmf_state.stmf_lock)); 2578 if (ppd->ppd_provider) { 2579 if (ppd->ppd_lu_provider) { 2580 ((stmf_i_lu_provider_t *) 2581 ppd->ppd_provider)->ilp_ppd = NULL; 2582 } else { 2583 ((stmf_i_port_provider_t *) 2584 ppd->ppd_provider)->ipp_ppd = NULL; 2585 } 2586 ppd->ppd_provider = NULL; 2587 } 2588 2589 for (pppd = &stmf_state.stmf_ppdlist; *pppd != NULL; 2590 pppd = &((*pppd)->ppd_next)) { 2591 if (*pppd == ppd) 2592 break; 2593 } 2594 2595 if (*pppd == NULL) 2596 return; 2597 2598 *pppd = ppd->ppd_next; 2599 nvlist_free(ppd->ppd_nv); 2600 2601 kmem_free(ppd, ppd->ppd_alloc_size); 2602 } 2603 2604 int 2605 stmf_delete_ppd_ioctl(stmf_ppioctl_data_t *ppi) 2606 { 2607 stmf_pp_data_t *ppd; 2608 int ret = ENOENT; 2609 2610 if ((ppi->ppi_lu_provider + ppi->ppi_port_provider) != 1) { 2611 return (EINVAL); 2612 } 2613 2614 mutex_enter(&stmf_state.stmf_lock); 2615 2616 for (ppd = stmf_state.stmf_ppdlist; ppd != NULL; ppd = ppd->ppd_next) { 2617 if (ppi->ppi_lu_provider) { 2618 if (!ppd->ppd_lu_provider) 2619 continue; 2620 } else if (ppi->ppi_port_provider) { 2621 if (!ppd->ppd_port_provider) 2622 continue; 2623 } 2624 if (strncmp(ppi->ppi_name, ppd->ppd_name, 254) == 0) 2625 break; 2626 } 2627 2628 if (ppd) { 2629 ret = 0; 2630 stmf_delete_ppd(ppd); 2631 } 2632 mutex_exit(&stmf_state.stmf_lock); 2633 2634 return (ret); 2635 } 2636 2637 int 2638 stmf_get_ppd_ioctl(stmf_ppioctl_data_t *ppi, stmf_ppioctl_data_t *ppi_out, 2639 uint32_t *err_ret) 2640 { 2641 stmf_pp_data_t *ppd; 2642 size_t req_size; 2643 int ret = ENOENT; 2644 char *bufp = (char *)ppi_out->ppi_data; 2645 2646 if ((ppi->ppi_lu_provider + ppi->ppi_port_provider) != 1) { 2647 return (EINVAL); 2648 } 2649 2650 mutex_enter(&stmf_state.stmf_lock); 2651 2652 for (ppd = stmf_state.stmf_ppdlist; ppd != NULL; ppd = ppd->ppd_next) { 2653 if (ppi->ppi_lu_provider) { 2654 if (!ppd->ppd_lu_provider) 2655 continue; 2656 } else if (ppi->ppi_port_provider) { 2657 if (!ppd->ppd_port_provider) 2658 continue; 2659 } 2660 if (strncmp(ppi->ppi_name, ppd->ppd_name, 254) == 0) 2661 break; 2662 } 2663 2664 if (ppd && ppd->ppd_nv) { 2665 ppi_out->ppi_token = ppd->ppd_token; 2666 if ((ret = nvlist_size(ppd->ppd_nv, &req_size, 2667 NV_ENCODE_XDR)) != 0) { 2668 goto done; 2669 } 2670 ppi_out->ppi_data_size = req_size; 2671 if (req_size > ppi->ppi_data_size) { 2672 *err_ret = STMF_IOCERR_INSUFFICIENT_BUF; 2673 ret = EINVAL; 2674 goto done; 2675 } 2676 2677 if ((ret = nvlist_pack(ppd->ppd_nv, &bufp, &req_size, 2678 NV_ENCODE_XDR, 0)) != 0) { 2679 goto done; 2680 } 2681 ret = 0; 2682 } 2683 2684 done: 2685 mutex_exit(&stmf_state.stmf_lock); 2686 2687 return (ret); 2688 } 2689 2690 void 2691 stmf_delete_all_ppds() 2692 { 2693 stmf_pp_data_t *ppd, *nppd; 2694 2695 ASSERT(mutex_owned(&stmf_state.stmf_lock)); 2696 for (ppd = stmf_state.stmf_ppdlist; ppd != NULL; ppd = nppd) { 2697 nppd = ppd->ppd_next; 2698 stmf_delete_ppd(ppd); 2699 } 2700 } 2701 2702 /* 2703 * 16 is the max string length of a protocol_ident, increase 2704 * the size if needed. 2705 */ 2706 #define STMF_KSTAT_LU_SZ (STMF_GUID_INPUT + 1 + 256) 2707 #define STMF_KSTAT_TGT_SZ (256 * 2 + 16) 2708 2709 /* 2710 * This array matches the Protocol Identifier in stmf_ioctl.h 2711 */ 2712 #define MAX_PROTO_STR_LEN 32 2713 2714 char *protocol_ident[PROTOCOL_ANY] = { 2715 "Fibre Channel", 2716 "Parallel SCSI", 2717 "SSA", 2718 "IEEE_1394", 2719 "SRP", 2720 "iSCSI", 2721 "SAS", 2722 "ADT", 2723 "ATAPI", 2724 "UNKNOWN", "UNKNOWN", "UNKNOWN", "UNKNOWN", "UNKNOWN", "UNKNOWN" 2725 }; 2726 2727 /* 2728 * Update the lun wait/run queue count 2729 */ 2730 static void 2731 stmf_update_kstat_lu_q(scsi_task_t *task, void func()) 2732 { 2733 stmf_i_lu_t *ilu; 2734 kstat_io_t *kip; 2735 2736 if (task->task_lu == dlun0) 2737 return; 2738 ilu = (stmf_i_lu_t *)task->task_lu->lu_stmf_private; 2739 if (ilu != NULL && ilu->ilu_kstat_io != NULL) { 2740 kip = KSTAT_IO_PTR(ilu->ilu_kstat_io); 2741 if (kip != NULL) { 2742 func(kip); 2743 } 2744 } 2745 } 2746 2747 /* 2748 * Update the target(lport) wait/run queue count 2749 */ 2750 static void 2751 stmf_update_kstat_lport_q(scsi_task_t *task, void func()) 2752 { 2753 stmf_i_local_port_t *ilp; 2754 kstat_io_t *kip; 2755 2756 ilp = (stmf_i_local_port_t *)task->task_lport->lport_stmf_private; 2757 if (ilp != NULL && ilp->ilport_kstat_io != NULL) { 2758 kip = KSTAT_IO_PTR(ilp->ilport_kstat_io); 2759 if (kip != NULL) { 2760 mutex_enter(ilp->ilport_kstat_io->ks_lock); 2761 func(kip); 2762 mutex_exit(ilp->ilport_kstat_io->ks_lock); 2763 } 2764 } 2765 } 2766 2767 static void 2768 stmf_update_kstat_lport_io(scsi_task_t *task, stmf_data_buf_t *dbuf) 2769 { 2770 stmf_i_local_port_t *ilp; 2771 kstat_io_t *kip; 2772 2773 ilp = (stmf_i_local_port_t *)task->task_lport->lport_stmf_private; 2774 if (ilp != NULL && ilp->ilport_kstat_io != NULL) { 2775 kip = KSTAT_IO_PTR(ilp->ilport_kstat_io); 2776 if (kip != NULL) { 2777 mutex_enter(ilp->ilport_kstat_io->ks_lock); 2778 STMF_UPDATE_KSTAT_IO(kip, dbuf); 2779 mutex_exit(ilp->ilport_kstat_io->ks_lock); 2780 } 2781 } 2782 } 2783 2784 static void 2785 stmf_update_kstat_lu_io(scsi_task_t *task, stmf_data_buf_t *dbuf) 2786 { 2787 stmf_i_lu_t *ilu; 2788 kstat_io_t *kip; 2789 2790 ilu = (stmf_i_lu_t *)task->task_lu->lu_stmf_private; 2791 if (ilu != NULL && ilu->ilu_kstat_io != NULL) { 2792 kip = KSTAT_IO_PTR(ilu->ilu_kstat_io); 2793 if (kip != NULL) { 2794 mutex_enter(ilu->ilu_kstat_io->ks_lock); 2795 STMF_UPDATE_KSTAT_IO(kip, dbuf); 2796 mutex_exit(ilu->ilu_kstat_io->ks_lock); 2797 } 2798 } 2799 } 2800 2801 static void 2802 stmf_create_kstat_lu(stmf_i_lu_t *ilu) 2803 { 2804 char ks_nm[KSTAT_STRLEN]; 2805 stmf_kstat_lu_info_t *ks_lu; 2806 2807 /* create kstat lun info */ 2808 ks_lu = (stmf_kstat_lu_info_t *)kmem_zalloc(STMF_KSTAT_LU_SZ, 2809 KM_NOSLEEP); 2810 if (ks_lu == NULL) { 2811 cmn_err(CE_WARN, "STMF: kmem_zalloc failed"); 2812 return; 2813 } 2814 2815 bzero(ks_nm, sizeof (ks_nm)); 2816 (void) sprintf(ks_nm, "stmf_lu_%"PRIxPTR"", (uintptr_t)ilu); 2817 if ((ilu->ilu_kstat_info = kstat_create(STMF_MODULE_NAME, 0, 2818 ks_nm, "misc", KSTAT_TYPE_NAMED, 2819 sizeof (stmf_kstat_lu_info_t) / sizeof (kstat_named_t), 2820 KSTAT_FLAG_VIRTUAL)) == NULL) { 2821 kmem_free(ks_lu, STMF_KSTAT_LU_SZ); 2822 cmn_err(CE_WARN, "STMF: kstat_create lu failed"); 2823 return; 2824 } 2825 2826 ilu->ilu_kstat_info->ks_data_size = STMF_KSTAT_LU_SZ; 2827 ilu->ilu_kstat_info->ks_data = ks_lu; 2828 2829 kstat_named_init(&ks_lu->i_lun_guid, "lun-guid", 2830 KSTAT_DATA_STRING); 2831 kstat_named_init(&ks_lu->i_lun_alias, "lun-alias", 2832 KSTAT_DATA_STRING); 2833 2834 /* convert guid to hex string */ 2835 int i; 2836 uint8_t *p = ilu->ilu_lu->lu_id->ident; 2837 bzero(ilu->ilu_ascii_hex_guid, sizeof (ilu->ilu_ascii_hex_guid)); 2838 for (i = 0; i < STMF_GUID_INPUT / 2; i++) { 2839 (void) sprintf(&ilu->ilu_ascii_hex_guid[i * 2], "%02x", p[i]); 2840 } 2841 kstat_named_setstr(&ks_lu->i_lun_guid, 2842 (const char *)ilu->ilu_ascii_hex_guid); 2843 kstat_named_setstr(&ks_lu->i_lun_alias, 2844 (const char *)ilu->ilu_lu->lu_alias); 2845 kstat_install(ilu->ilu_kstat_info); 2846 2847 /* create kstat lun io */ 2848 bzero(ks_nm, sizeof (ks_nm)); 2849 (void) sprintf(ks_nm, "stmf_lu_io_%"PRIxPTR"", (uintptr_t)ilu); 2850 if ((ilu->ilu_kstat_io = kstat_create(STMF_MODULE_NAME, 0, 2851 ks_nm, "io", KSTAT_TYPE_IO, 1, 0)) == NULL) { 2852 cmn_err(CE_WARN, "STMF: kstat_create lu_io failed"); 2853 return; 2854 } 2855 mutex_init(&ilu->ilu_kstat_lock, NULL, MUTEX_DRIVER, 0); 2856 ilu->ilu_kstat_io->ks_lock = &ilu->ilu_kstat_lock; 2857 kstat_install(ilu->ilu_kstat_io); 2858 } 2859 2860 static void 2861 stmf_create_kstat_lport(stmf_i_local_port_t *ilport) 2862 { 2863 char ks_nm[KSTAT_STRLEN]; 2864 stmf_kstat_tgt_info_t *ks_tgt; 2865 int id, len; 2866 2867 /* create kstat lport info */ 2868 ks_tgt = (stmf_kstat_tgt_info_t *)kmem_zalloc(STMF_KSTAT_TGT_SZ, 2869 KM_NOSLEEP); 2870 if (ks_tgt == NULL) { 2871 cmn_err(CE_WARN, "STMF: kmem_zalloc failed"); 2872 return; 2873 } 2874 2875 bzero(ks_nm, sizeof (ks_nm)); 2876 (void) sprintf(ks_nm, "stmf_tgt_%"PRIxPTR"", (uintptr_t)ilport); 2877 if ((ilport->ilport_kstat_info = kstat_create(STMF_MODULE_NAME, 2878 0, ks_nm, "misc", KSTAT_TYPE_NAMED, 2879 sizeof (stmf_kstat_tgt_info_t) / sizeof (kstat_named_t), 2880 KSTAT_FLAG_VIRTUAL)) == NULL) { 2881 kmem_free(ks_tgt, STMF_KSTAT_TGT_SZ); 2882 cmn_err(CE_WARN, "STMF: kstat_create target failed"); 2883 return; 2884 } 2885 2886 ilport->ilport_kstat_info->ks_data_size = STMF_KSTAT_TGT_SZ; 2887 ilport->ilport_kstat_info->ks_data = ks_tgt; 2888 2889 kstat_named_init(&ks_tgt->i_tgt_name, "target-name", 2890 KSTAT_DATA_STRING); 2891 kstat_named_init(&ks_tgt->i_tgt_alias, "target-alias", 2892 KSTAT_DATA_STRING); 2893 kstat_named_init(&ks_tgt->i_protocol, "protocol", 2894 KSTAT_DATA_STRING); 2895 2896 /* ident might not be null terminated */ 2897 len = ilport->ilport_lport->lport_id->ident_length; 2898 bcopy(ilport->ilport_lport->lport_id->ident, 2899 ilport->ilport_kstat_tgt_name, len); 2900 ilport->ilport_kstat_tgt_name[len + 1] = NULL; 2901 kstat_named_setstr(&ks_tgt->i_tgt_name, 2902 (const char *)ilport->ilport_kstat_tgt_name); 2903 kstat_named_setstr(&ks_tgt->i_tgt_alias, 2904 (const char *)ilport->ilport_lport->lport_alias); 2905 /* protocol */ 2906 if ((id = ilport->ilport_lport->lport_id->protocol_id) > PROTOCOL_ANY) { 2907 cmn_err(CE_WARN, "STMF: protocol_id out of bound"); 2908 id = PROTOCOL_ANY; 2909 } 2910 kstat_named_setstr(&ks_tgt->i_protocol, protocol_ident[id]); 2911 kstat_install(ilport->ilport_kstat_info); 2912 2913 /* create kstat lport io */ 2914 bzero(ks_nm, sizeof (ks_nm)); 2915 (void) sprintf(ks_nm, "stmf_tgt_io_%"PRIxPTR"", (uintptr_t)ilport); 2916 if ((ilport->ilport_kstat_io = kstat_create(STMF_MODULE_NAME, 0, 2917 ks_nm, "io", KSTAT_TYPE_IO, 1, 0)) == NULL) { 2918 cmn_err(CE_WARN, "STMF: kstat_create target_io failed"); 2919 return; 2920 } 2921 mutex_init(&ilport->ilport_kstat_lock, NULL, MUTEX_DRIVER, 0); 2922 ilport->ilport_kstat_io->ks_lock = &ilport->ilport_kstat_lock; 2923 kstat_install(ilport->ilport_kstat_io); 2924 } 2925 2926 /* 2927 * set the asymmetric access state for a logical unit 2928 * caller is responsible for establishing SCSI unit attention on 2929 * state change 2930 */ 2931 stmf_status_t 2932 stmf_set_lu_access(stmf_lu_t *lu, uint8_t access_state) 2933 { 2934 stmf_i_lu_t *ilu; 2935 uint8_t *p1, *p2; 2936 2937 if ((access_state != STMF_LU_STANDBY) && 2938 (access_state != STMF_LU_ACTIVE)) { 2939 return (STMF_INVALID_ARG); 2940 } 2941 2942 p1 = &lu->lu_id->ident[0]; 2943 mutex_enter(&stmf_state.stmf_lock); 2944 if (stmf_state.stmf_inventory_locked) { 2945 mutex_exit(&stmf_state.stmf_lock); 2946 return (STMF_BUSY); 2947 } 2948 2949 for (ilu = stmf_state.stmf_ilulist; ilu != NULL; ilu = ilu->ilu_next) { 2950 p2 = &ilu->ilu_lu->lu_id->ident[0]; 2951 if (bcmp(p1, p2, 16) == 0) { 2952 break; 2953 } 2954 } 2955 2956 if (!ilu) { 2957 ilu = (stmf_i_lu_t *)lu->lu_stmf_private; 2958 } else { 2959 /* 2960 * We're changing access state on an existing logical unit 2961 * Send the proxy registration message for this logical unit 2962 * if we're in alua mode. 2963 * If the requested state is STMF_LU_ACTIVE, we want to register 2964 * this logical unit. 2965 * If the requested state is STMF_LU_STANDBY, we're going to 2966 * abort all tasks for this logical unit. 2967 */ 2968 if (stmf_state.stmf_alua_state == 1 && 2969 access_state == STMF_LU_ACTIVE) { 2970 stmf_ic_msg_status_t ic_ret = STMF_IC_MSG_SUCCESS; 2971 stmf_ic_msg_t *ic_reg_lun; 2972 if (lu->lu_lp && lu->lu_lp->lp_lpif_rev == LPIF_REV_2 && 2973 lu->lu_lp->lp_alua_support) { 2974 ilu->ilu_alua = 1; 2975 /* allocate the register message */ 2976 ic_reg_lun = ic_lun_active_msg_alloc(p1, 2977 lu->lu_lp->lp_name, 2978 lu->lu_proxy_reg_arg_len, 2979 (uint8_t *)lu->lu_proxy_reg_arg, 2980 stmf_proxy_msg_id); 2981 /* send the message */ 2982 if (ic_reg_lun) { 2983 ic_ret = ic_tx_msg(ic_reg_lun); 2984 if (ic_ret == STMF_IC_MSG_SUCCESS) { 2985 stmf_proxy_msg_id++; 2986 } 2987 } 2988 } 2989 } else if (stmf_state.stmf_alua_state == 1 && 2990 access_state == STMF_LU_STANDBY) { 2991 /* abort all tasks for this lu */ 2992 stmf_task_lu_killall(lu, NULL, STMF_ABORTED); 2993 } 2994 } 2995 2996 ilu->ilu_access = access_state; 2997 2998 mutex_exit(&stmf_state.stmf_lock); 2999 return (STMF_SUCCESS); 3000 } 3001 3002 3003 stmf_status_t 3004 stmf_register_lu(stmf_lu_t *lu) 3005 { 3006 stmf_i_lu_t *ilu; 3007 uint8_t *p1, *p2; 3008 stmf_state_change_info_t ssci; 3009 stmf_id_data_t *luid; 3010 3011 if ((lu->lu_id->ident_type != ID_TYPE_NAA) || 3012 (lu->lu_id->ident_length != 16) || 3013 ((lu->lu_id->ident[0] & 0xf0) != 0x60)) { 3014 return (STMF_INVALID_ARG); 3015 } 3016 p1 = &lu->lu_id->ident[0]; 3017 mutex_enter(&stmf_state.stmf_lock); 3018 if (stmf_state.stmf_inventory_locked) { 3019 mutex_exit(&stmf_state.stmf_lock); 3020 return (STMF_BUSY); 3021 } 3022 3023 for (ilu = stmf_state.stmf_ilulist; ilu != NULL; ilu = ilu->ilu_next) { 3024 p2 = &ilu->ilu_lu->lu_id->ident[0]; 3025 if (bcmp(p1, p2, 16) == 0) { 3026 mutex_exit(&stmf_state.stmf_lock); 3027 return (STMF_ALREADY); 3028 } 3029 } 3030 3031 ilu = (stmf_i_lu_t *)lu->lu_stmf_private; 3032 luid = stmf_lookup_id(&stmf_state.stmf_luid_list, 3033 lu->lu_id->ident_length, lu->lu_id->ident); 3034 if (luid) { 3035 luid->id_pt_to_object = (void *)ilu; 3036 ilu->ilu_luid = luid; 3037 } 3038 ilu->ilu_alias = NULL; 3039 3040 ilu->ilu_next = stmf_state.stmf_ilulist; 3041 ilu->ilu_prev = NULL; 3042 if (ilu->ilu_next) 3043 ilu->ilu_next->ilu_prev = ilu; 3044 stmf_state.stmf_ilulist = ilu; 3045 stmf_state.stmf_nlus++; 3046 if (lu->lu_lp) { 3047 ((stmf_i_lu_provider_t *) 3048 (lu->lu_lp->lp_stmf_private))->ilp_nlus++; 3049 } 3050 ilu->ilu_cur_task_cntr = &ilu->ilu_task_cntr1; 3051 STMF_EVENT_ALLOC_HANDLE(ilu->ilu_event_hdl); 3052 cv_init(&ilu->ilu_offline_pending_cv, NULL, CV_DRIVER, NULL); 3053 stmf_create_kstat_lu(ilu); 3054 /* 3055 * register with proxy module if available and logical unit 3056 * is in active state 3057 */ 3058 if (stmf_state.stmf_alua_state == 1 && 3059 ilu->ilu_access == STMF_LU_ACTIVE) { 3060 stmf_ic_msg_status_t ic_ret = STMF_IC_MSG_SUCCESS; 3061 stmf_ic_msg_t *ic_reg_lun; 3062 if (lu->lu_lp && lu->lu_lp->lp_lpif_rev == LPIF_REV_2 && 3063 lu->lu_lp->lp_alua_support) { 3064 ilu->ilu_alua = 1; 3065 /* allocate the register message */ 3066 ic_reg_lun = ic_reg_lun_msg_alloc(p1, 3067 lu->lu_lp->lp_name, lu->lu_proxy_reg_arg_len, 3068 (uint8_t *)lu->lu_proxy_reg_arg, stmf_proxy_msg_id); 3069 /* send the message */ 3070 if (ic_reg_lun) { 3071 ic_ret = ic_tx_msg(ic_reg_lun); 3072 if (ic_ret == STMF_IC_MSG_SUCCESS) { 3073 stmf_proxy_msg_id++; 3074 } 3075 } 3076 } 3077 } 3078 mutex_exit(&stmf_state.stmf_lock); 3079 3080 /* check the default state for lu */ 3081 if (stmf_state.stmf_default_lu_state == STMF_STATE_OFFLINE) { 3082 ilu->ilu_prev_state = STMF_STATE_OFFLINE; 3083 } else { 3084 ilu->ilu_prev_state = STMF_STATE_ONLINE; 3085 if (stmf_state.stmf_service_running) { 3086 ssci.st_rflags = 0; 3087 ssci.st_additional_info = NULL; 3088 (void) stmf_ctl(STMF_CMD_LU_ONLINE, lu, &ssci); 3089 } 3090 } 3091 3092 /* XXX: Generate event */ 3093 return (STMF_SUCCESS); 3094 } 3095 3096 stmf_status_t 3097 stmf_deregister_lu(stmf_lu_t *lu) 3098 { 3099 stmf_i_lu_t *ilu; 3100 3101 mutex_enter(&stmf_state.stmf_lock); 3102 if (stmf_state.stmf_inventory_locked) { 3103 mutex_exit(&stmf_state.stmf_lock); 3104 return (STMF_BUSY); 3105 } 3106 ilu = stmf_lookup_lu(lu); 3107 if (ilu == NULL) { 3108 mutex_exit(&stmf_state.stmf_lock); 3109 return (STMF_INVALID_ARG); 3110 } 3111 if (ilu->ilu_state == STMF_STATE_OFFLINE) { 3112 ASSERT(ilu->ilu_ntasks == ilu->ilu_ntasks_free); 3113 while (ilu->ilu_flags & ILU_STALL_DEREGISTER) { 3114 cv_wait(&stmf_state.stmf_cv, &stmf_state.stmf_lock); 3115 } 3116 if (ilu->ilu_ntasks) { 3117 stmf_i_scsi_task_t *itask, *nitask; 3118 3119 nitask = ilu->ilu_tasks; 3120 do { 3121 itask = nitask; 3122 nitask = itask->itask_lu_next; 3123 lu->lu_task_free(itask->itask_task); 3124 stmf_free(itask->itask_task); 3125 } while (nitask != NULL); 3126 3127 ilu->ilu_tasks = ilu->ilu_free_tasks = NULL; 3128 ilu->ilu_ntasks = ilu->ilu_ntasks_free = 0; 3129 } 3130 /* de-register with proxy if available */ 3131 if (ilu->ilu_access == STMF_LU_ACTIVE && 3132 stmf_state.stmf_alua_state == 1) { 3133 /* de-register with proxy module */ 3134 stmf_ic_msg_status_t ic_ret = STMF_IC_MSG_SUCCESS; 3135 stmf_ic_msg_t *ic_dereg_lun; 3136 if (lu->lu_lp && lu->lu_lp->lp_lpif_rev == LPIF_REV_2 && 3137 lu->lu_lp->lp_alua_support) { 3138 ilu->ilu_alua = 1; 3139 /* allocate the de-register message */ 3140 ic_dereg_lun = ic_dereg_lun_msg_alloc( 3141 lu->lu_id->ident, lu->lu_lp->lp_name, 0, 3142 NULL, stmf_proxy_msg_id); 3143 /* send the message */ 3144 if (ic_dereg_lun) { 3145 ic_ret = ic_tx_msg(ic_dereg_lun); 3146 if (ic_ret == STMF_IC_MSG_SUCCESS) { 3147 stmf_proxy_msg_id++; 3148 } 3149 } 3150 } 3151 } 3152 3153 if (ilu->ilu_next) 3154 ilu->ilu_next->ilu_prev = ilu->ilu_prev; 3155 if (ilu->ilu_prev) 3156 ilu->ilu_prev->ilu_next = ilu->ilu_next; 3157 else 3158 stmf_state.stmf_ilulist = ilu->ilu_next; 3159 stmf_state.stmf_nlus--; 3160 3161 if (ilu == stmf_state.stmf_svc_ilu_draining) { 3162 stmf_state.stmf_svc_ilu_draining = ilu->ilu_next; 3163 } 3164 if (ilu == stmf_state.stmf_svc_ilu_timing) { 3165 stmf_state.stmf_svc_ilu_timing = ilu->ilu_next; 3166 } 3167 if (lu->lu_lp) { 3168 ((stmf_i_lu_provider_t *) 3169 (lu->lu_lp->lp_stmf_private))->ilp_nlus--; 3170 } 3171 if (ilu->ilu_luid) { 3172 ((stmf_id_data_t *)ilu->ilu_luid)->id_pt_to_object = 3173 NULL; 3174 ilu->ilu_luid = NULL; 3175 } 3176 STMF_EVENT_FREE_HANDLE(ilu->ilu_event_hdl); 3177 } else { 3178 mutex_exit(&stmf_state.stmf_lock); 3179 return (STMF_BUSY); 3180 } 3181 if (ilu->ilu_kstat_info) { 3182 kmem_free(ilu->ilu_kstat_info->ks_data, STMF_KSTAT_LU_SZ); 3183 kstat_delete(ilu->ilu_kstat_info); 3184 } 3185 if (ilu->ilu_kstat_io) { 3186 kstat_delete(ilu->ilu_kstat_io); 3187 mutex_destroy(&ilu->ilu_kstat_lock); 3188 } 3189 cv_destroy(&ilu->ilu_offline_pending_cv); 3190 mutex_exit(&stmf_state.stmf_lock); 3191 return (STMF_SUCCESS); 3192 } 3193 3194 void 3195 stmf_set_port_standby(stmf_local_port_t *lport, uint16_t rtpid) 3196 { 3197 stmf_i_local_port_t *ilport = 3198 (stmf_i_local_port_t *)lport->lport_stmf_private; 3199 ilport->ilport_rtpid = rtpid; 3200 ilport->ilport_standby = 1; 3201 } 3202 3203 void 3204 stmf_set_port_alua(stmf_local_port_t *lport) 3205 { 3206 stmf_i_local_port_t *ilport = 3207 (stmf_i_local_port_t *)lport->lport_stmf_private; 3208 ilport->ilport_alua = 1; 3209 } 3210 3211 stmf_status_t 3212 stmf_register_local_port(stmf_local_port_t *lport) 3213 { 3214 stmf_i_local_port_t *ilport; 3215 stmf_state_change_info_t ssci; 3216 int start_workers = 0; 3217 3218 mutex_enter(&stmf_state.stmf_lock); 3219 if (stmf_state.stmf_inventory_locked) { 3220 mutex_exit(&stmf_state.stmf_lock); 3221 return (STMF_BUSY); 3222 } 3223 ilport = (stmf_i_local_port_t *)lport->lport_stmf_private; 3224 rw_init(&ilport->ilport_lock, NULL, RW_DRIVER, NULL); 3225 3226 ilport->ilport_instance = 3227 id_alloc_nosleep(stmf_state.stmf_ilport_inst_space); 3228 if (ilport->ilport_instance == -1) { 3229 mutex_exit(&stmf_state.stmf_lock); 3230 return (STMF_FAILURE); 3231 } 3232 ilport->ilport_next = stmf_state.stmf_ilportlist; 3233 ilport->ilport_prev = NULL; 3234 if (ilport->ilport_next) 3235 ilport->ilport_next->ilport_prev = ilport; 3236 stmf_state.stmf_ilportlist = ilport; 3237 stmf_state.stmf_nlports++; 3238 if (lport->lport_pp) { 3239 ((stmf_i_port_provider_t *) 3240 (lport->lport_pp->pp_stmf_private))->ipp_npps++; 3241 } 3242 ilport->ilport_tg = 3243 stmf_lookup_group_for_target(lport->lport_id->ident, 3244 lport->lport_id->ident_length); 3245 3246 /* 3247 * rtpid will/must be set if this is a standby port 3248 * only register ports that are not standby (proxy) ports 3249 * and ports that are alua participants (ilport_alua == 1) 3250 */ 3251 if (ilport->ilport_standby == 0) { 3252 ilport->ilport_rtpid = atomic_inc_16_nv(&stmf_rtpid_counter); 3253 } 3254 3255 if (stmf_state.stmf_alua_state == 1 && 3256 ilport->ilport_standby == 0 && 3257 ilport->ilport_alua == 1) { 3258 stmf_ic_msg_t *ic_reg_port; 3259 stmf_ic_msg_status_t ic_ret; 3260 stmf_local_port_t *lport; 3261 lport = ilport->ilport_lport; 3262 ic_reg_port = ic_reg_port_msg_alloc( 3263 lport->lport_id, ilport->ilport_rtpid, 3264 0, NULL, stmf_proxy_msg_id); 3265 if (ic_reg_port) { 3266 ic_ret = ic_tx_msg(ic_reg_port); 3267 if (ic_ret == STMF_IC_MSG_SUCCESS) { 3268 ilport->ilport_reg_msgid = stmf_proxy_msg_id++; 3269 } else { 3270 cmn_err(CE_WARN, "error on port registration " 3271 "port - %s", ilport->ilport_kstat_tgt_name); 3272 } 3273 } 3274 } 3275 STMF_EVENT_ALLOC_HANDLE(ilport->ilport_event_hdl); 3276 stmf_create_kstat_lport(ilport); 3277 if (stmf_workers_state == STMF_WORKERS_DISABLED) { 3278 stmf_workers_state = STMF_WORKERS_ENABLING; 3279 start_workers = 1; 3280 } 3281 mutex_exit(&stmf_state.stmf_lock); 3282 3283 if (start_workers) 3284 stmf_worker_init(); 3285 3286 /* the default state of LPORT */ 3287 3288 if (stmf_state.stmf_default_lport_state == STMF_STATE_OFFLINE) { 3289 ilport->ilport_prev_state = STMF_STATE_OFFLINE; 3290 } else { 3291 ilport->ilport_prev_state = STMF_STATE_ONLINE; 3292 if (stmf_state.stmf_service_running) { 3293 ssci.st_rflags = 0; 3294 ssci.st_additional_info = NULL; 3295 (void) stmf_ctl(STMF_CMD_LPORT_ONLINE, lport, &ssci); 3296 } 3297 } 3298 3299 /* XXX: Generate event */ 3300 return (STMF_SUCCESS); 3301 } 3302 3303 stmf_status_t 3304 stmf_deregister_local_port(stmf_local_port_t *lport) 3305 { 3306 stmf_i_local_port_t *ilport; 3307 3308 mutex_enter(&stmf_state.stmf_lock); 3309 if (stmf_state.stmf_inventory_locked) { 3310 mutex_exit(&stmf_state.stmf_lock); 3311 return (STMF_BUSY); 3312 } 3313 3314 /* dequeue all object requests from active queue */ 3315 stmf_svc_kill_obj_requests(lport); 3316 3317 ilport = (stmf_i_local_port_t *)lport->lport_stmf_private; 3318 3319 /* 3320 * deregister ports that are not standby (proxy) 3321 */ 3322 if (stmf_state.stmf_alua_state == 1 && 3323 ilport->ilport_standby == 0 && 3324 ilport->ilport_alua == 1) { 3325 stmf_ic_msg_t *ic_dereg_port; 3326 stmf_ic_msg_status_t ic_ret; 3327 ic_dereg_port = ic_dereg_port_msg_alloc( 3328 lport->lport_id, 0, NULL, stmf_proxy_msg_id); 3329 if (ic_dereg_port) { 3330 ic_ret = ic_tx_msg(ic_dereg_port); 3331 if (ic_ret == STMF_IC_MSG_SUCCESS) { 3332 stmf_proxy_msg_id++; 3333 } 3334 } 3335 } 3336 3337 if (ilport->ilport_nsessions == 0) { 3338 if (ilport->ilport_next) 3339 ilport->ilport_next->ilport_prev = ilport->ilport_prev; 3340 if (ilport->ilport_prev) 3341 ilport->ilport_prev->ilport_next = ilport->ilport_next; 3342 else 3343 stmf_state.stmf_ilportlist = ilport->ilport_next; 3344 id_free(stmf_state.stmf_ilport_inst_space, 3345 ilport->ilport_instance); 3346 rw_destroy(&ilport->ilport_lock); 3347 stmf_state.stmf_nlports--; 3348 if (lport->lport_pp) { 3349 ((stmf_i_port_provider_t *) 3350 (lport->lport_pp->pp_stmf_private))->ipp_npps--; 3351 } 3352 ilport->ilport_tg = NULL; 3353 STMF_EVENT_FREE_HANDLE(ilport->ilport_event_hdl); 3354 } else { 3355 mutex_exit(&stmf_state.stmf_lock); 3356 return (STMF_BUSY); 3357 } 3358 if (ilport->ilport_kstat_info) { 3359 kmem_free(ilport->ilport_kstat_info->ks_data, 3360 STMF_KSTAT_TGT_SZ); 3361 kstat_delete(ilport->ilport_kstat_info); 3362 } 3363 if (ilport->ilport_kstat_io) { 3364 kstat_delete(ilport->ilport_kstat_io); 3365 mutex_destroy(&ilport->ilport_kstat_lock); 3366 } 3367 mutex_exit(&stmf_state.stmf_lock); 3368 return (STMF_SUCCESS); 3369 } 3370 3371 /* 3372 * Rport id/instance mappings remain valid until STMF is unloaded 3373 */ 3374 static int 3375 stmf_irport_compare(const void *void_irport1, const void *void_irport2) 3376 { 3377 const stmf_i_remote_port_t *irport1 = void_irport1; 3378 const stmf_i_remote_port_t *irport2 = void_irport2; 3379 int result; 3380 3381 /* Sort by code set then ident */ 3382 if (irport1->irport_id->code_set < 3383 irport2->irport_id->code_set) { 3384 return (-1); 3385 } else if (irport1->irport_id->code_set > 3386 irport2->irport_id->code_set) { 3387 return (1); 3388 } 3389 3390 /* Next by ident length */ 3391 if (irport1->irport_id->ident_length < 3392 irport2->irport_id->ident_length) { 3393 return (-1); 3394 } else if (irport1->irport_id->ident_length > 3395 irport2->irport_id->ident_length) { 3396 return (1); 3397 } 3398 3399 /* Code set and ident length both match, now compare idents */ 3400 result = memcmp(irport1->irport_id->ident, 3401 irport2->irport_id->ident, 3402 irport1->irport_id->ident_length); 3403 3404 if (result < 0) { 3405 return (-1); 3406 } else if (result > 0) { 3407 return (1); 3408 } 3409 3410 return (0); 3411 } 3412 3413 static stmf_i_remote_port_t * 3414 stmf_irport_create(scsi_devid_desc_t *rport_devid) 3415 { 3416 int alloc_len; 3417 stmf_i_remote_port_t *irport; 3418 3419 /* 3420 * Lookup will bump the refcnt if there's an existing rport 3421 * context for this identifier. 3422 */ 3423 ASSERT(mutex_owned(&stmf_state.stmf_lock)); 3424 3425 alloc_len = sizeof (*irport) + sizeof (scsi_devid_desc_t) + 3426 rport_devid->ident_length - 1; 3427 irport = kmem_zalloc(alloc_len, KM_NOSLEEP); 3428 if (irport == NULL) { 3429 return (NULL); 3430 } 3431 3432 irport->irport_instance = 3433 id_alloc_nosleep(stmf_state.stmf_irport_inst_space); 3434 if (irport->irport_instance == -1) { 3435 kmem_free(irport, alloc_len); 3436 return (NULL); 3437 } 3438 3439 irport->irport_id = 3440 (struct scsi_devid_desc *)(irport + 1); /* Ptr. Arith. */ 3441 bcopy(rport_devid, irport->irport_id, 3442 sizeof (scsi_devid_desc_t) + rport_devid->ident_length - 1); 3443 irport->irport_refcnt = 1; 3444 mutex_init(&irport->irport_mutex, NULL, MUTEX_DEFAULT, NULL); 3445 3446 return (irport); 3447 } 3448 3449 static void 3450 stmf_irport_destroy(stmf_i_remote_port_t *irport) 3451 { 3452 id_free(stmf_state.stmf_irport_inst_space, irport->irport_instance); 3453 mutex_destroy(&irport->irport_mutex); 3454 kmem_free(irport, sizeof (*irport) + sizeof (scsi_devid_desc_t) + 3455 irport->irport_id->ident_length - 1); 3456 } 3457 3458 static stmf_i_remote_port_t * 3459 stmf_irport_register(scsi_devid_desc_t *rport_devid) 3460 { 3461 stmf_i_remote_port_t *irport; 3462 3463 mutex_enter(&stmf_state.stmf_lock); 3464 3465 /* 3466 * Lookup will bump the refcnt if there's an existing rport 3467 * context for this identifier. 3468 */ 3469 if ((irport = stmf_irport_lookup_locked(rport_devid)) != NULL) { 3470 mutex_exit(&stmf_state.stmf_lock); 3471 return (irport); 3472 } 3473 3474 irport = stmf_irport_create(rport_devid); 3475 if (irport == NULL) { 3476 mutex_exit(&stmf_state.stmf_lock); 3477 return (NULL); 3478 } 3479 3480 avl_add(&stmf_state.stmf_irportlist, irport); 3481 mutex_exit(&stmf_state.stmf_lock); 3482 3483 return (irport); 3484 } 3485 3486 static stmf_i_remote_port_t * 3487 stmf_irport_lookup_locked(scsi_devid_desc_t *rport_devid) 3488 { 3489 stmf_i_remote_port_t *irport; 3490 stmf_i_remote_port_t tmp_irport; 3491 3492 ASSERT(mutex_owned(&stmf_state.stmf_lock)); 3493 tmp_irport.irport_id = rport_devid; 3494 irport = avl_find(&stmf_state.stmf_irportlist, &tmp_irport, NULL); 3495 if (irport != NULL) { 3496 mutex_enter(&irport->irport_mutex); 3497 irport->irport_refcnt++; 3498 mutex_exit(&irport->irport_mutex); 3499 } 3500 3501 return (irport); 3502 } 3503 3504 static void 3505 stmf_irport_deregister(stmf_i_remote_port_t *irport) 3506 { 3507 /* 3508 * If we were actually going to remove unreferenced remote ports 3509 * we would want to acquire stmf_state.stmf_lock before getting 3510 * the irport mutex. 3511 * 3512 * Instead we're just going to leave it there even if unreferenced. 3513 */ 3514 mutex_enter(&irport->irport_mutex); 3515 irport->irport_refcnt--; 3516 mutex_exit(&irport->irport_mutex); 3517 } 3518 3519 /* 3520 * Port provider has to make sure that register/deregister session and 3521 * port are serialized calls. 3522 */ 3523 stmf_status_t 3524 stmf_register_scsi_session(stmf_local_port_t *lport, stmf_scsi_session_t *ss) 3525 { 3526 stmf_i_scsi_session_t *iss; 3527 stmf_i_local_port_t *ilport = (stmf_i_local_port_t *) 3528 lport->lport_stmf_private; 3529 uint8_t lun[8]; 3530 3531 /* 3532 * Port state has to be online to register a scsi session. It is 3533 * possible that we started an offline operation and a new SCSI 3534 * session started at the same time (in that case also we are going 3535 * to fail the registeration). But any other state is simply 3536 * a bad port provider implementation. 3537 */ 3538 if (ilport->ilport_state != STMF_STATE_ONLINE) { 3539 if (ilport->ilport_state != STMF_STATE_OFFLINING) { 3540 stmf_trace(lport->lport_alias, "Port is trying to " 3541 "register a session while the state is neither " 3542 "online nor offlining"); 3543 } 3544 return (STMF_FAILURE); 3545 } 3546 bzero(lun, 8); 3547 iss = (stmf_i_scsi_session_t *)ss->ss_stmf_private; 3548 if ((iss->iss_irport = stmf_irport_register(ss->ss_rport_id)) == NULL) { 3549 stmf_trace(lport->lport_alias, "Could not register " 3550 "remote port during session registration"); 3551 return (STMF_FAILURE); 3552 } 3553 3554 iss->iss_flags |= ISS_BEING_CREATED; 3555 3556 if (ss->ss_rport == NULL) { 3557 iss->iss_flags |= ISS_NULL_TPTID; 3558 ss->ss_rport = stmf_scsilib_devid_to_remote_port( 3559 ss->ss_rport_id); 3560 if (ss->ss_rport == NULL) { 3561 iss->iss_flags &= ~(ISS_NULL_TPTID | ISS_BEING_CREATED); 3562 stmf_trace(lport->lport_alias, "Device id to " 3563 "remote port conversion failed"); 3564 return (STMF_FAILURE); 3565 } 3566 } else { 3567 if (!stmf_scsilib_tptid_validate(ss->ss_rport->rport_tptid, 3568 ss->ss_rport->rport_tptid_sz, NULL)) { 3569 iss->iss_flags &= ~ISS_BEING_CREATED; 3570 stmf_trace(lport->lport_alias, "Remote port " 3571 "transport id validation failed"); 3572 return (STMF_FAILURE); 3573 } 3574 } 3575 3576 /* sessions use the ilport_lock. No separate lock is required */ 3577 iss->iss_lockp = &ilport->ilport_lock; 3578 3579 if (iss->iss_sm != NULL) 3580 cmn_err(CE_PANIC, "create lun map called with non NULL map"); 3581 iss->iss_sm = (stmf_lun_map_t *)kmem_zalloc(sizeof (stmf_lun_map_t), 3582 KM_SLEEP); 3583 3584 mutex_enter(&stmf_state.stmf_lock); 3585 rw_enter(&ilport->ilport_lock, RW_WRITER); 3586 (void) stmf_session_create_lun_map(ilport, iss); 3587 ilport->ilport_nsessions++; 3588 iss->iss_next = ilport->ilport_ss_list; 3589 ilport->ilport_ss_list = iss; 3590 rw_exit(&ilport->ilport_lock); 3591 mutex_exit(&stmf_state.stmf_lock); 3592 3593 iss->iss_creation_time = ddi_get_time(); 3594 ss->ss_session_id = atomic_inc_64_nv(&stmf_session_counter); 3595 iss->iss_flags &= ~ISS_BEING_CREATED; 3596 /* XXX should we remove ISS_LUN_INVENTORY_CHANGED on new session? */ 3597 iss->iss_flags &= ~ISS_LUN_INVENTORY_CHANGED; 3598 DTRACE_PROBE2(session__online, stmf_local_port_t *, lport, 3599 stmf_scsi_session_t *, ss); 3600 return (STMF_SUCCESS); 3601 } 3602 3603 void 3604 stmf_deregister_scsi_session(stmf_local_port_t *lport, stmf_scsi_session_t *ss) 3605 { 3606 stmf_i_local_port_t *ilport = (stmf_i_local_port_t *) 3607 lport->lport_stmf_private; 3608 stmf_i_scsi_session_t *iss, **ppss; 3609 int found = 0; 3610 stmf_ic_msg_t *ic_session_dereg; 3611 stmf_status_t ic_ret = STMF_FAILURE; 3612 3613 DTRACE_PROBE2(session__offline, stmf_local_port_t *, lport, 3614 stmf_scsi_session_t *, ss); 3615 3616 iss = (stmf_i_scsi_session_t *)ss->ss_stmf_private; 3617 if (ss->ss_rport_alias) { 3618 ss->ss_rport_alias = NULL; 3619 } 3620 3621 try_dereg_ss_again: 3622 mutex_enter(&stmf_state.stmf_lock); 3623 atomic_and_32(&iss->iss_flags, 3624 ~(ISS_LUN_INVENTORY_CHANGED | ISS_GOT_INITIAL_LUNS)); 3625 if (iss->iss_flags & ISS_EVENT_ACTIVE) { 3626 mutex_exit(&stmf_state.stmf_lock); 3627 delay(1); 3628 goto try_dereg_ss_again; 3629 } 3630 3631 /* dereg proxy session if not standby port */ 3632 if (stmf_state.stmf_alua_state == 1 && 3633 ilport->ilport_standby == 0 && 3634 ilport->ilport_alua == 1) { 3635 ic_session_dereg = ic_session_dereg_msg_alloc( 3636 ss, stmf_proxy_msg_id); 3637 if (ic_session_dereg) { 3638 ic_ret = ic_tx_msg(ic_session_dereg); 3639 if (ic_ret == STMF_IC_MSG_SUCCESS) { 3640 stmf_proxy_msg_id++; 3641 } 3642 } 3643 } 3644 3645 rw_enter(&ilport->ilport_lock, RW_WRITER); 3646 for (ppss = &ilport->ilport_ss_list; *ppss != NULL; 3647 ppss = &((*ppss)->iss_next)) { 3648 if (iss == (*ppss)) { 3649 *ppss = (*ppss)->iss_next; 3650 found = 1; 3651 break; 3652 } 3653 } 3654 if (!found) { 3655 cmn_err(CE_PANIC, "Deregister session called for non existent" 3656 " session"); 3657 } 3658 ilport->ilport_nsessions--; 3659 3660 stmf_irport_deregister(iss->iss_irport); 3661 (void) stmf_session_destroy_lun_map(ilport, iss); 3662 rw_exit(&ilport->ilport_lock); 3663 mutex_exit(&stmf_state.stmf_lock); 3664 3665 if (iss->iss_flags & ISS_NULL_TPTID) { 3666 stmf_remote_port_free(ss->ss_rport); 3667 } 3668 } 3669 3670 stmf_i_scsi_session_t * 3671 stmf_session_id_to_issptr(uint64_t session_id, int stay_locked) 3672 { 3673 stmf_i_local_port_t *ilport; 3674 stmf_i_scsi_session_t *iss; 3675 3676 mutex_enter(&stmf_state.stmf_lock); 3677 for (ilport = stmf_state.stmf_ilportlist; ilport != NULL; 3678 ilport = ilport->ilport_next) { 3679 rw_enter(&ilport->ilport_lock, RW_WRITER); 3680 for (iss = ilport->ilport_ss_list; iss != NULL; 3681 iss = iss->iss_next) { 3682 if (iss->iss_ss->ss_session_id == session_id) { 3683 if (!stay_locked) 3684 rw_exit(&ilport->ilport_lock); 3685 mutex_exit(&stmf_state.stmf_lock); 3686 return (iss); 3687 } 3688 } 3689 rw_exit(&ilport->ilport_lock); 3690 } 3691 mutex_exit(&stmf_state.stmf_lock); 3692 return (NULL); 3693 } 3694 3695 void 3696 stmf_release_itl_handle(stmf_lu_t *lu, stmf_itl_data_t *itl) 3697 { 3698 stmf_itl_data_t **itlpp; 3699 stmf_i_lu_t *ilu; 3700 3701 ASSERT(itl->itl_flags & STMF_ITL_BEING_TERMINATED); 3702 3703 ilu = (stmf_i_lu_t *)lu->lu_stmf_private; 3704 mutex_enter(&ilu->ilu_task_lock); 3705 for (itlpp = &ilu->ilu_itl_list; (*itlpp) != NULL; 3706 itlpp = &(*itlpp)->itl_next) { 3707 if ((*itlpp) == itl) 3708 break; 3709 } 3710 ASSERT((*itlpp) != NULL); 3711 *itlpp = itl->itl_next; 3712 mutex_exit(&ilu->ilu_task_lock); 3713 lu->lu_abort(lu, STMF_LU_ITL_HANDLE_REMOVED, itl->itl_handle, 3714 (uint32_t)itl->itl_hdlrm_reason); 3715 3716 kmem_free(itl, sizeof (*itl)); 3717 } 3718 3719 stmf_status_t 3720 stmf_register_itl_handle(stmf_lu_t *lu, uint8_t *lun, 3721 stmf_scsi_session_t *ss, uint64_t session_id, void *itl_handle) 3722 { 3723 stmf_itl_data_t *itl; 3724 stmf_i_scsi_session_t *iss; 3725 stmf_lun_map_ent_t *lun_map_ent; 3726 stmf_i_lu_t *ilu; 3727 uint16_t n; 3728 3729 ilu = (stmf_i_lu_t *)lu->lu_stmf_private; 3730 if (ss == NULL) { 3731 iss = stmf_session_id_to_issptr(session_id, 1); 3732 if (iss == NULL) 3733 return (STMF_NOT_FOUND); 3734 } else { 3735 iss = (stmf_i_scsi_session_t *)ss->ss_stmf_private; 3736 } 3737 3738 mutex_enter(&stmf_state.stmf_lock); 3739 rw_enter(iss->iss_lockp, RW_WRITER); 3740 n = ((uint16_t)lun[1] | (((uint16_t)(lun[0] & 0x3F)) << 8)); 3741 lun_map_ent = (stmf_lun_map_ent_t *) 3742 stmf_get_ent_from_map(iss->iss_sm, n); 3743 if ((lun_map_ent == NULL) || (lun_map_ent->ent_lu != lu)) { 3744 rw_exit(iss->iss_lockp); 3745 mutex_exit(&stmf_state.stmf_lock); 3746 return (STMF_NOT_FOUND); 3747 } 3748 if (lun_map_ent->ent_itl_datap != NULL) { 3749 rw_exit(iss->iss_lockp); 3750 mutex_exit(&stmf_state.stmf_lock); 3751 return (STMF_ALREADY); 3752 } 3753 3754 itl = (stmf_itl_data_t *)kmem_zalloc(sizeof (*itl), KM_NOSLEEP); 3755 if (itl == NULL) { 3756 rw_exit(iss->iss_lockp); 3757 mutex_exit(&stmf_state.stmf_lock); 3758 return (STMF_ALLOC_FAILURE); 3759 } 3760 3761 itl->itl_ilu = ilu; 3762 itl->itl_session = iss; 3763 itl->itl_counter = 1; 3764 itl->itl_lun = n; 3765 itl->itl_handle = itl_handle; 3766 3767 mutex_enter(&ilu->ilu_task_lock); 3768 itl->itl_next = ilu->ilu_itl_list; 3769 ilu->ilu_itl_list = itl; 3770 mutex_exit(&ilu->ilu_task_lock); 3771 lun_map_ent->ent_itl_datap = itl; 3772 rw_exit(iss->iss_lockp); 3773 mutex_exit(&stmf_state.stmf_lock); 3774 3775 return (STMF_SUCCESS); 3776 } 3777 3778 void 3779 stmf_do_itl_dereg(stmf_lu_t *lu, stmf_itl_data_t *itl, uint8_t hdlrm_reason) 3780 { 3781 uint8_t old, new; 3782 3783 do { 3784 old = new = itl->itl_flags; 3785 if (old & STMF_ITL_BEING_TERMINATED) 3786 return; 3787 new |= STMF_ITL_BEING_TERMINATED; 3788 } while (atomic_cas_8(&itl->itl_flags, old, new) != old); 3789 itl->itl_hdlrm_reason = hdlrm_reason; 3790 3791 ASSERT(itl->itl_counter); 3792 3793 if (atomic_dec_32_nv(&itl->itl_counter)) 3794 return; 3795 3796 stmf_release_itl_handle(lu, itl); 3797 } 3798 3799 stmf_status_t 3800 stmf_deregister_all_lu_itl_handles(stmf_lu_t *lu) 3801 { 3802 stmf_i_lu_t *ilu; 3803 stmf_i_local_port_t *ilport; 3804 stmf_i_scsi_session_t *iss; 3805 stmf_lun_map_t *lm; 3806 stmf_lun_map_ent_t *ent; 3807 uint32_t nmaps, nu; 3808 stmf_itl_data_t **itl_list; 3809 int i; 3810 3811 ilu = (stmf_i_lu_t *)lu->lu_stmf_private; 3812 3813 dereg_itl_start:; 3814 nmaps = ilu->ilu_ref_cnt; 3815 if (nmaps == 0) 3816 return (STMF_NOT_FOUND); 3817 itl_list = (stmf_itl_data_t **)kmem_zalloc( 3818 nmaps * sizeof (stmf_itl_data_t *), KM_SLEEP); 3819 mutex_enter(&stmf_state.stmf_lock); 3820 if (nmaps != ilu->ilu_ref_cnt) { 3821 /* Something changed, start all over */ 3822 mutex_exit(&stmf_state.stmf_lock); 3823 kmem_free(itl_list, nmaps * sizeof (stmf_itl_data_t *)); 3824 goto dereg_itl_start; 3825 } 3826 nu = 0; 3827 for (ilport = stmf_state.stmf_ilportlist; ilport != NULL; 3828 ilport = ilport->ilport_next) { 3829 rw_enter(&ilport->ilport_lock, RW_WRITER); 3830 for (iss = ilport->ilport_ss_list; iss != NULL; 3831 iss = iss->iss_next) { 3832 lm = iss->iss_sm; 3833 if (!lm) 3834 continue; 3835 for (i = 0; i < lm->lm_nentries; i++) { 3836 if (lm->lm_plus[i] == NULL) 3837 continue; 3838 ent = (stmf_lun_map_ent_t *)lm->lm_plus[i]; 3839 if ((ent->ent_lu == lu) && 3840 (ent->ent_itl_datap)) { 3841 itl_list[nu++] = ent->ent_itl_datap; 3842 ent->ent_itl_datap = NULL; 3843 if (nu == nmaps) { 3844 rw_exit(&ilport->ilport_lock); 3845 goto dai_scan_done; 3846 } 3847 } 3848 } /* lun table for a session */ 3849 } /* sessions */ 3850 rw_exit(&ilport->ilport_lock); 3851 } /* ports */ 3852 3853 dai_scan_done: 3854 mutex_exit(&stmf_state.stmf_lock); 3855 3856 for (i = 0; i < nu; i++) { 3857 stmf_do_itl_dereg(lu, itl_list[i], 3858 STMF_ITL_REASON_DEREG_REQUEST); 3859 } 3860 kmem_free(itl_list, nmaps * sizeof (stmf_itl_data_t *)); 3861 3862 return (STMF_SUCCESS); 3863 } 3864 3865 stmf_status_t 3866 stmf_get_itl_handle(stmf_lu_t *lu, uint8_t *lun, stmf_scsi_session_t *ss, 3867 uint64_t session_id, void **itl_handle_retp) 3868 { 3869 stmf_i_scsi_session_t *iss; 3870 stmf_lun_map_ent_t *ent; 3871 stmf_lun_map_t *lm; 3872 stmf_status_t ret; 3873 int i; 3874 uint16_t n; 3875 3876 if (ss == NULL) { 3877 iss = stmf_session_id_to_issptr(session_id, 1); 3878 if (iss == NULL) 3879 return (STMF_NOT_FOUND); 3880 } else { 3881 iss = (stmf_i_scsi_session_t *)ss->ss_stmf_private; 3882 rw_enter(iss->iss_lockp, RW_WRITER); 3883 } 3884 3885 ent = NULL; 3886 if (lun == NULL) { 3887 lm = iss->iss_sm; 3888 for (i = 0; i < lm->lm_nentries; i++) { 3889 if (lm->lm_plus[i] == NULL) 3890 continue; 3891 ent = (stmf_lun_map_ent_t *)lm->lm_plus[i]; 3892 if (ent->ent_lu == lu) 3893 break; 3894 } 3895 } else { 3896 n = ((uint16_t)lun[1] | (((uint16_t)(lun[0] & 0x3F)) << 8)); 3897 ent = (stmf_lun_map_ent_t *) 3898 stmf_get_ent_from_map(iss->iss_sm, n); 3899 if (lu && (ent->ent_lu != lu)) 3900 ent = NULL; 3901 } 3902 if (ent && ent->ent_itl_datap) { 3903 *itl_handle_retp = ent->ent_itl_datap->itl_handle; 3904 ret = STMF_SUCCESS; 3905 } else { 3906 ret = STMF_NOT_FOUND; 3907 } 3908 3909 rw_exit(iss->iss_lockp); 3910 return (ret); 3911 } 3912 3913 stmf_data_buf_t * 3914 stmf_alloc_dbuf(scsi_task_t *task, uint32_t size, uint32_t *pminsize, 3915 uint32_t flags) 3916 { 3917 stmf_i_scsi_task_t *itask = 3918 (stmf_i_scsi_task_t *)task->task_stmf_private; 3919 stmf_local_port_t *lport = task->task_lport; 3920 stmf_data_buf_t *dbuf; 3921 uint8_t ndx; 3922 3923 ndx = stmf_first_zero[itask->itask_allocated_buf_map]; 3924 if (ndx == 0xff) 3925 return (NULL); 3926 dbuf = itask->itask_dbufs[ndx] = lport->lport_ds->ds_alloc_data_buf( 3927 task, size, pminsize, flags); 3928 if (dbuf) { 3929 task->task_cur_nbufs++; 3930 itask->itask_allocated_buf_map |= (1 << ndx); 3931 dbuf->db_flags &= ~DB_LPORT_XFER_ACTIVE; 3932 dbuf->db_handle = ndx; 3933 return (dbuf); 3934 } 3935 3936 return (NULL); 3937 } 3938 3939 stmf_status_t 3940 stmf_setup_dbuf(scsi_task_t *task, stmf_data_buf_t *dbuf, uint32_t flags) 3941 { 3942 stmf_i_scsi_task_t *itask = 3943 (stmf_i_scsi_task_t *)task->task_stmf_private; 3944 stmf_local_port_t *lport = task->task_lport; 3945 uint8_t ndx; 3946 stmf_status_t ret; 3947 3948 ASSERT(task->task_additional_flags & TASK_AF_ACCEPT_LU_DBUF); 3949 ASSERT(lport->lport_ds->ds_setup_dbuf != NULL); 3950 ASSERT(dbuf->db_flags & DB_LU_DATA_BUF); 3951 3952 if ((task->task_additional_flags & TASK_AF_ACCEPT_LU_DBUF) == 0) 3953 return (STMF_FAILURE); 3954 if (lport->lport_ds->ds_setup_dbuf == NULL) 3955 return (STMF_FAILURE); 3956 3957 ndx = stmf_first_zero[itask->itask_allocated_buf_map]; 3958 if (ndx == 0xff) 3959 return (STMF_FAILURE); 3960 ret = lport->lport_ds->ds_setup_dbuf(task, dbuf, flags); 3961 if (ret == STMF_FAILURE) 3962 return (STMF_FAILURE); 3963 itask->itask_dbufs[ndx] = dbuf; 3964 task->task_cur_nbufs++; 3965 itask->itask_allocated_buf_map |= (1 << ndx); 3966 dbuf->db_handle = ndx; 3967 3968 return (STMF_SUCCESS); 3969 } 3970 3971 void 3972 stmf_teardown_dbuf(scsi_task_t *task, stmf_data_buf_t *dbuf) 3973 { 3974 stmf_i_scsi_task_t *itask = 3975 (stmf_i_scsi_task_t *)task->task_stmf_private; 3976 stmf_local_port_t *lport = task->task_lport; 3977 3978 ASSERT(task->task_additional_flags & TASK_AF_ACCEPT_LU_DBUF); 3979 ASSERT(lport->lport_ds->ds_teardown_dbuf != NULL); 3980 ASSERT(dbuf->db_flags & DB_LU_DATA_BUF); 3981 3982 itask->itask_allocated_buf_map &= ~(1 << dbuf->db_handle); 3983 task->task_cur_nbufs--; 3984 lport->lport_ds->ds_teardown_dbuf(lport->lport_ds, dbuf); 3985 } 3986 3987 void 3988 stmf_free_dbuf(scsi_task_t *task, stmf_data_buf_t *dbuf) 3989 { 3990 stmf_i_scsi_task_t *itask = 3991 (stmf_i_scsi_task_t *)task->task_stmf_private; 3992 stmf_local_port_t *lport = task->task_lport; 3993 3994 itask->itask_allocated_buf_map &= ~(1 << dbuf->db_handle); 3995 task->task_cur_nbufs--; 3996 lport->lport_ds->ds_free_data_buf(lport->lport_ds, dbuf); 3997 } 3998 3999 stmf_data_buf_t * 4000 stmf_handle_to_buf(scsi_task_t *task, uint8_t h) 4001 { 4002 stmf_i_scsi_task_t *itask; 4003 4004 itask = (stmf_i_scsi_task_t *)task->task_stmf_private; 4005 if (h > 3) 4006 return (NULL); 4007 return (itask->itask_dbufs[h]); 4008 } 4009 4010 /* ARGSUSED */ 4011 struct scsi_task * 4012 stmf_task_alloc(struct stmf_local_port *lport, stmf_scsi_session_t *ss, 4013 uint8_t *lun, uint16_t cdb_length_in, uint16_t ext_id) 4014 { 4015 stmf_lu_t *lu; 4016 stmf_i_scsi_session_t *iss; 4017 stmf_i_lu_t *ilu; 4018 stmf_i_scsi_task_t *itask; 4019 stmf_i_scsi_task_t **ppitask; 4020 scsi_task_t *task; 4021 uint8_t *l; 4022 stmf_lun_map_ent_t *lun_map_ent; 4023 uint16_t cdb_length; 4024 uint16_t luNbr; 4025 uint8_t new_task = 0; 4026 4027 /* 4028 * We allocate 7 extra bytes for CDB to provide a cdb pointer which 4029 * is guaranteed to be 8 byte aligned. Some LU providers like OSD 4030 * depend upon this alignment. 4031 */ 4032 if (cdb_length_in >= 16) 4033 cdb_length = cdb_length_in + 7; 4034 else 4035 cdb_length = 16 + 7; 4036 iss = (stmf_i_scsi_session_t *)ss->ss_stmf_private; 4037 luNbr = ((uint16_t)lun[1] | (((uint16_t)(lun[0] & 0x3F)) << 8)); 4038 rw_enter(iss->iss_lockp, RW_READER); 4039 lun_map_ent = 4040 (stmf_lun_map_ent_t *)stmf_get_ent_from_map(iss->iss_sm, luNbr); 4041 if (!lun_map_ent) { 4042 lu = dlun0; 4043 } else { 4044 lu = lun_map_ent->ent_lu; 4045 } 4046 ilu = lu->lu_stmf_private; 4047 if (ilu->ilu_flags & ILU_RESET_ACTIVE) { 4048 rw_exit(iss->iss_lockp); 4049 return (NULL); 4050 } 4051 ASSERT(lu == dlun0 || (ilu->ilu_state != STMF_STATE_OFFLINING && 4052 ilu->ilu_state != STMF_STATE_OFFLINE)); 4053 do { 4054 if (ilu->ilu_free_tasks == NULL) { 4055 new_task = 1; 4056 break; 4057 } 4058 mutex_enter(&ilu->ilu_task_lock); 4059 for (ppitask = &ilu->ilu_free_tasks; (*ppitask != NULL) && 4060 ((*ppitask)->itask_cdb_buf_size < cdb_length); 4061 ppitask = &((*ppitask)->itask_lu_free_next)) 4062 ; 4063 if (*ppitask) { 4064 itask = *ppitask; 4065 *ppitask = (*ppitask)->itask_lu_free_next; 4066 ilu->ilu_ntasks_free--; 4067 if (ilu->ilu_ntasks_free < ilu->ilu_ntasks_min_free) 4068 ilu->ilu_ntasks_min_free = ilu->ilu_ntasks_free; 4069 } else { 4070 new_task = 1; 4071 } 4072 mutex_exit(&ilu->ilu_task_lock); 4073 /* CONSTCOND */ 4074 } while (0); 4075 4076 if (!new_task) { 4077 /* 4078 * Save the task_cdb pointer and zero per cmd fields. 4079 * We know the task_cdb_length is large enough by task 4080 * selection process above. 4081 */ 4082 uint8_t *save_cdb; 4083 uintptr_t t_start, t_end; 4084 4085 task = itask->itask_task; 4086 save_cdb = task->task_cdb; /* save */ 4087 t_start = (uintptr_t)&task->task_flags; 4088 t_end = (uintptr_t)&task->task_extended_cmd; 4089 bzero((void *)t_start, (size_t)(t_end - t_start)); 4090 task->task_cdb = save_cdb; /* restore */ 4091 itask->itask_ncmds = 0; 4092 } else { 4093 task = (scsi_task_t *)stmf_alloc(STMF_STRUCT_SCSI_TASK, 4094 cdb_length, AF_FORCE_NOSLEEP); 4095 if (task == NULL) { 4096 rw_exit(iss->iss_lockp); 4097 return (NULL); 4098 } 4099 task->task_lu = lu; 4100 l = task->task_lun_no; 4101 l[0] = lun[0]; 4102 l[1] = lun[1]; 4103 l[2] = lun[2]; 4104 l[3] = lun[3]; 4105 l[4] = lun[4]; 4106 l[5] = lun[5]; 4107 l[6] = lun[6]; 4108 l[7] = lun[7]; 4109 task->task_cdb = (uint8_t *)task->task_port_private; 4110 if ((ulong_t)(task->task_cdb) & 7ul) { 4111 task->task_cdb = (uint8_t *)(((ulong_t) 4112 (task->task_cdb) + 7ul) & ~(7ul)); 4113 } 4114 itask = (stmf_i_scsi_task_t *)task->task_stmf_private; 4115 itask->itask_cdb_buf_size = cdb_length; 4116 mutex_init(&itask->itask_audit_mutex, NULL, MUTEX_DRIVER, NULL); 4117 } 4118 task->task_session = ss; 4119 task->task_lport = lport; 4120 task->task_cdb_length = cdb_length_in; 4121 itask->itask_flags = ITASK_IN_TRANSITION; 4122 itask->itask_waitq_time = 0; 4123 itask->itask_lu_read_time = itask->itask_lu_write_time = 0; 4124 itask->itask_lport_read_time = itask->itask_lport_write_time = 0; 4125 itask->itask_read_xfer = itask->itask_write_xfer = 0; 4126 itask->itask_audit_index = 0; 4127 4128 if (new_task) { 4129 if (lu->lu_task_alloc(task) != STMF_SUCCESS) { 4130 rw_exit(iss->iss_lockp); 4131 stmf_free(task); 4132 return (NULL); 4133 } 4134 mutex_enter(&ilu->ilu_task_lock); 4135 if (ilu->ilu_flags & ILU_RESET_ACTIVE) { 4136 mutex_exit(&ilu->ilu_task_lock); 4137 rw_exit(iss->iss_lockp); 4138 stmf_free(task); 4139 return (NULL); 4140 } 4141 itask->itask_lu_next = ilu->ilu_tasks; 4142 if (ilu->ilu_tasks) 4143 ilu->ilu_tasks->itask_lu_prev = itask; 4144 ilu->ilu_tasks = itask; 4145 /* kmem_zalloc automatically makes itask->itask_lu_prev NULL */ 4146 ilu->ilu_ntasks++; 4147 mutex_exit(&ilu->ilu_task_lock); 4148 } 4149 4150 itask->itask_ilu_task_cntr = ilu->ilu_cur_task_cntr; 4151 atomic_inc_32(itask->itask_ilu_task_cntr); 4152 itask->itask_start_time = ddi_get_lbolt(); 4153 4154 if ((lun_map_ent != NULL) && ((itask->itask_itl_datap = 4155 lun_map_ent->ent_itl_datap) != NULL)) { 4156 atomic_inc_32(&itask->itask_itl_datap->itl_counter); 4157 task->task_lu_itl_handle = itask->itask_itl_datap->itl_handle; 4158 } else { 4159 itask->itask_itl_datap = NULL; 4160 task->task_lu_itl_handle = NULL; 4161 } 4162 4163 rw_exit(iss->iss_lockp); 4164 return (task); 4165 } 4166 4167 static void 4168 stmf_task_lu_free(scsi_task_t *task, stmf_i_scsi_session_t *iss) 4169 { 4170 stmf_i_scsi_task_t *itask = 4171 (stmf_i_scsi_task_t *)task->task_stmf_private; 4172 stmf_i_lu_t *ilu = (stmf_i_lu_t *)task->task_lu->lu_stmf_private; 4173 4174 ASSERT(rw_lock_held(iss->iss_lockp)); 4175 itask->itask_flags = ITASK_IN_FREE_LIST; 4176 itask->itask_proxy_msg_id = 0; 4177 mutex_enter(&ilu->ilu_task_lock); 4178 itask->itask_lu_free_next = ilu->ilu_free_tasks; 4179 ilu->ilu_free_tasks = itask; 4180 ilu->ilu_ntasks_free++; 4181 if (ilu->ilu_ntasks == ilu->ilu_ntasks_free) 4182 cv_signal(&ilu->ilu_offline_pending_cv); 4183 mutex_exit(&ilu->ilu_task_lock); 4184 atomic_dec_32(itask->itask_ilu_task_cntr); 4185 } 4186 4187 void 4188 stmf_task_lu_check_freelist(stmf_i_lu_t *ilu) 4189 { 4190 uint32_t num_to_release, ndx; 4191 stmf_i_scsi_task_t *itask; 4192 stmf_lu_t *lu = ilu->ilu_lu; 4193 4194 ASSERT(ilu->ilu_ntasks_min_free <= ilu->ilu_ntasks_free); 4195 4196 /* free half of the minimal free of the free tasks */ 4197 num_to_release = (ilu->ilu_ntasks_min_free + 1) / 2; 4198 if (!num_to_release) { 4199 return; 4200 } 4201 for (ndx = 0; ndx < num_to_release; ndx++) { 4202 mutex_enter(&ilu->ilu_task_lock); 4203 itask = ilu->ilu_free_tasks; 4204 if (itask == NULL) { 4205 mutex_exit(&ilu->ilu_task_lock); 4206 break; 4207 } 4208 ilu->ilu_free_tasks = itask->itask_lu_free_next; 4209 ilu->ilu_ntasks_free--; 4210 mutex_exit(&ilu->ilu_task_lock); 4211 4212 lu->lu_task_free(itask->itask_task); 4213 mutex_enter(&ilu->ilu_task_lock); 4214 if (itask->itask_lu_next) 4215 itask->itask_lu_next->itask_lu_prev = 4216 itask->itask_lu_prev; 4217 if (itask->itask_lu_prev) 4218 itask->itask_lu_prev->itask_lu_next = 4219 itask->itask_lu_next; 4220 else 4221 ilu->ilu_tasks = itask->itask_lu_next; 4222 4223 ilu->ilu_ntasks--; 4224 mutex_exit(&ilu->ilu_task_lock); 4225 stmf_free(itask->itask_task); 4226 } 4227 } 4228 4229 /* 4230 * Called with stmf_lock held 4231 */ 4232 void 4233 stmf_check_freetask() 4234 { 4235 stmf_i_lu_t *ilu; 4236 clock_t endtime = ddi_get_lbolt() + drv_usectohz(10000); 4237 4238 /* stmf_svc_ilu_draining may get changed after stmf_lock is released */ 4239 while ((ilu = stmf_state.stmf_svc_ilu_draining) != NULL) { 4240 stmf_state.stmf_svc_ilu_draining = ilu->ilu_next; 4241 if (!ilu->ilu_ntasks_min_free) { 4242 ilu->ilu_ntasks_min_free = ilu->ilu_ntasks_free; 4243 continue; 4244 } 4245 ilu->ilu_flags |= ILU_STALL_DEREGISTER; 4246 mutex_exit(&stmf_state.stmf_lock); 4247 stmf_task_lu_check_freelist(ilu); 4248 /* 4249 * we do not care about the accuracy of 4250 * ilu_ntasks_min_free, so we don't lock here 4251 */ 4252 ilu->ilu_ntasks_min_free = ilu->ilu_ntasks_free; 4253 mutex_enter(&stmf_state.stmf_lock); 4254 ilu->ilu_flags &= ~ILU_STALL_DEREGISTER; 4255 cv_broadcast(&stmf_state.stmf_cv); 4256 if (ddi_get_lbolt() >= endtime) 4257 break; 4258 } 4259 } 4260 4261 void 4262 stmf_do_ilu_timeouts(stmf_i_lu_t *ilu) 4263 { 4264 clock_t l = ddi_get_lbolt(); 4265 clock_t ps = drv_usectohz(1000000); 4266 stmf_i_scsi_task_t *itask; 4267 scsi_task_t *task; 4268 uint32_t to; 4269 4270 mutex_enter(&ilu->ilu_task_lock); 4271 for (itask = ilu->ilu_tasks; itask != NULL; 4272 itask = itask->itask_lu_next) { 4273 if (itask->itask_flags & (ITASK_IN_FREE_LIST | 4274 ITASK_BEING_ABORTED)) { 4275 continue; 4276 } 4277 task = itask->itask_task; 4278 if (task->task_timeout == 0) 4279 to = stmf_default_task_timeout; 4280 else 4281 to = task->task_timeout; 4282 if ((itask->itask_start_time + (to * ps)) > l) 4283 continue; 4284 stmf_abort(STMF_QUEUE_TASK_ABORT, task, 4285 STMF_TIMEOUT, NULL); 4286 } 4287 mutex_exit(&ilu->ilu_task_lock); 4288 } 4289 4290 /* 4291 * Called with stmf_lock held 4292 */ 4293 void 4294 stmf_check_ilu_timing() 4295 { 4296 stmf_i_lu_t *ilu; 4297 clock_t endtime = ddi_get_lbolt() + drv_usectohz(10000); 4298 4299 /* stmf_svc_ilu_timing may get changed after stmf_lock is released */ 4300 while ((ilu = stmf_state.stmf_svc_ilu_timing) != NULL) { 4301 stmf_state.stmf_svc_ilu_timing = ilu->ilu_next; 4302 if (ilu->ilu_cur_task_cntr == (&ilu->ilu_task_cntr1)) { 4303 if (ilu->ilu_task_cntr2 == 0) { 4304 ilu->ilu_cur_task_cntr = &ilu->ilu_task_cntr2; 4305 continue; 4306 } 4307 } else { 4308 if (ilu->ilu_task_cntr1 == 0) { 4309 ilu->ilu_cur_task_cntr = &ilu->ilu_task_cntr1; 4310 continue; 4311 } 4312 } 4313 /* 4314 * If we are here then it means that there is some slowdown 4315 * in tasks on this lu. We need to check. 4316 */ 4317 ilu->ilu_flags |= ILU_STALL_DEREGISTER; 4318 mutex_exit(&stmf_state.stmf_lock); 4319 stmf_do_ilu_timeouts(ilu); 4320 mutex_enter(&stmf_state.stmf_lock); 4321 ilu->ilu_flags &= ~ILU_STALL_DEREGISTER; 4322 cv_broadcast(&stmf_state.stmf_cv); 4323 if (ddi_get_lbolt() >= endtime) 4324 break; 4325 } 4326 } 4327 4328 /* 4329 * Kills all tasks on a lu except tm_task 4330 */ 4331 void 4332 stmf_task_lu_killall(stmf_lu_t *lu, scsi_task_t *tm_task, stmf_status_t s) 4333 { 4334 stmf_i_lu_t *ilu = (stmf_i_lu_t *)lu->lu_stmf_private; 4335 stmf_i_scsi_task_t *itask; 4336 4337 mutex_enter(&ilu->ilu_task_lock); 4338 4339 for (itask = ilu->ilu_tasks; itask != NULL; 4340 itask = itask->itask_lu_next) { 4341 if (itask->itask_flags & ITASK_IN_FREE_LIST) 4342 continue; 4343 if (itask->itask_task == tm_task) 4344 continue; 4345 stmf_abort(STMF_QUEUE_TASK_ABORT, itask->itask_task, s, NULL); 4346 } 4347 mutex_exit(&ilu->ilu_task_lock); 4348 } 4349 4350 void 4351 stmf_free_task_bufs(stmf_i_scsi_task_t *itask, stmf_local_port_t *lport) 4352 { 4353 int i; 4354 uint8_t map; 4355 4356 if ((map = itask->itask_allocated_buf_map) == 0) 4357 return; 4358 for (i = 0; i < 4; i++) { 4359 if (map & 1) { 4360 stmf_data_buf_t *dbuf; 4361 4362 dbuf = itask->itask_dbufs[i]; 4363 if (dbuf->db_xfer_start_timestamp) { 4364 stmf_lport_xfer_done(itask, dbuf); 4365 } 4366 if (dbuf->db_flags & DB_LU_DATA_BUF) { 4367 /* 4368 * LU needs to clean up buffer. 4369 * LU is required to free the buffer 4370 * in the xfer_done handler. 4371 */ 4372 scsi_task_t *task = itask->itask_task; 4373 stmf_lu_t *lu = task->task_lu; 4374 4375 lu->lu_dbuf_free(task, dbuf); 4376 ASSERT(((itask->itask_allocated_buf_map>>i) 4377 & 1) == 0); /* must be gone */ 4378 } else { 4379 ASSERT(dbuf->db_lu_private == NULL); 4380 dbuf->db_lu_private = NULL; 4381 lport->lport_ds->ds_free_data_buf( 4382 lport->lport_ds, dbuf); 4383 } 4384 } 4385 map >>= 1; 4386 } 4387 itask->itask_allocated_buf_map = 0; 4388 } 4389 4390 void 4391 stmf_task_free(scsi_task_t *task) 4392 { 4393 stmf_local_port_t *lport = task->task_lport; 4394 stmf_i_scsi_task_t *itask = (stmf_i_scsi_task_t *) 4395 task->task_stmf_private; 4396 stmf_i_scsi_session_t *iss = (stmf_i_scsi_session_t *) 4397 task->task_session->ss_stmf_private; 4398 4399 stmf_task_audit(itask, TE_TASK_FREE, CMD_OR_IOF_NA, NULL); 4400 4401 stmf_free_task_bufs(itask, lport); 4402 stmf_itl_task_done(itask); 4403 DTRACE_PROBE2(stmf__task__end, scsi_task_t *, task, 4404 hrtime_t, 4405 itask->itask_done_timestamp - itask->itask_start_timestamp); 4406 if (itask->itask_itl_datap) { 4407 if (atomic_dec_32_nv(&itask->itask_itl_datap->itl_counter) == 4408 0) { 4409 stmf_release_itl_handle(task->task_lu, 4410 itask->itask_itl_datap); 4411 } 4412 } 4413 4414 rw_enter(iss->iss_lockp, RW_READER); 4415 lport->lport_task_free(task); 4416 if (itask->itask_worker) { 4417 atomic_dec_32(&stmf_cur_ntasks); 4418 atomic_dec_32(&itask->itask_worker->worker_ref_count); 4419 } 4420 /* 4421 * After calling stmf_task_lu_free, the task pointer can no longer 4422 * be trusted. 4423 */ 4424 stmf_task_lu_free(task, iss); 4425 rw_exit(iss->iss_lockp); 4426 } 4427 4428 void 4429 stmf_post_task(scsi_task_t *task, stmf_data_buf_t *dbuf) 4430 { 4431 stmf_i_scsi_task_t *itask = (stmf_i_scsi_task_t *) 4432 task->task_stmf_private; 4433 stmf_i_lu_t *ilu = (stmf_i_lu_t *)task->task_lu->lu_stmf_private; 4434 int nv; 4435 uint32_t old, new; 4436 uint32_t ct; 4437 stmf_worker_t *w, *w1; 4438 uint8_t tm; 4439 4440 if (task->task_max_nbufs > 4) 4441 task->task_max_nbufs = 4; 4442 task->task_cur_nbufs = 0; 4443 /* Latest value of currently running tasks */ 4444 ct = atomic_inc_32_nv(&stmf_cur_ntasks); 4445 4446 /* Select the next worker using round robin */ 4447 nv = (int)atomic_inc_32_nv((uint32_t *)&stmf_worker_sel_counter); 4448 if (nv >= stmf_nworkers_accepting_cmds) { 4449 int s = nv; 4450 do { 4451 nv -= stmf_nworkers_accepting_cmds; 4452 } while (nv >= stmf_nworkers_accepting_cmds); 4453 if (nv < 0) 4454 nv = 0; 4455 /* Its ok if this cas fails */ 4456 (void) atomic_cas_32((uint32_t *)&stmf_worker_sel_counter, 4457 s, nv); 4458 } 4459 w = &stmf_workers[nv]; 4460 4461 /* 4462 * A worker can be pinned by interrupt. So select the next one 4463 * if it has lower load. 4464 */ 4465 if ((nv + 1) >= stmf_nworkers_accepting_cmds) { 4466 w1 = stmf_workers; 4467 } else { 4468 w1 = &stmf_workers[nv + 1]; 4469 } 4470 if (w1->worker_queue_depth < w->worker_queue_depth) 4471 w = w1; 4472 4473 mutex_enter(&w->worker_lock); 4474 if (((w->worker_flags & STMF_WORKER_STARTED) == 0) || 4475 (w->worker_flags & STMF_WORKER_TERMINATE)) { 4476 /* 4477 * Maybe we are in the middle of a change. Just go to 4478 * the 1st worker. 4479 */ 4480 mutex_exit(&w->worker_lock); 4481 w = stmf_workers; 4482 mutex_enter(&w->worker_lock); 4483 } 4484 itask->itask_worker = w; 4485 /* 4486 * Track max system load inside the worker as we already have the 4487 * worker lock (no point implementing another lock). The service 4488 * thread will do the comparisons and figure out the max overall 4489 * system load. 4490 */ 4491 if (w->worker_max_sys_qdepth_pu < ct) 4492 w->worker_max_sys_qdepth_pu = ct; 4493 4494 do { 4495 old = new = itask->itask_flags; 4496 new |= ITASK_KNOWN_TO_TGT_PORT | ITASK_IN_WORKER_QUEUE; 4497 if (task->task_mgmt_function) { 4498 tm = task->task_mgmt_function; 4499 if ((tm == TM_TARGET_RESET) || 4500 (tm == TM_TARGET_COLD_RESET) || 4501 (tm == TM_TARGET_WARM_RESET)) { 4502 new |= ITASK_DEFAULT_HANDLING; 4503 } 4504 } else if (task->task_cdb[0] == SCMD_REPORT_LUNS) { 4505 new |= ITASK_DEFAULT_HANDLING; 4506 } 4507 new &= ~ITASK_IN_TRANSITION; 4508 } while (atomic_cas_32(&itask->itask_flags, old, new) != old); 4509 4510 stmf_itl_task_start(itask); 4511 4512 itask->itask_worker_next = NULL; 4513 if (w->worker_task_tail) { 4514 w->worker_task_tail->itask_worker_next = itask; 4515 } else { 4516 w->worker_task_head = itask; 4517 } 4518 w->worker_task_tail = itask; 4519 if (++(w->worker_queue_depth) > w->worker_max_qdepth_pu) { 4520 w->worker_max_qdepth_pu = w->worker_queue_depth; 4521 } 4522 /* Measure task waitq time */ 4523 itask->itask_waitq_enter_timestamp = gethrtime(); 4524 atomic_inc_32(&w->worker_ref_count); 4525 itask->itask_cmd_stack[0] = ITASK_CMD_NEW_TASK; 4526 itask->itask_ncmds = 1; 4527 stmf_task_audit(itask, TE_TASK_START, CMD_OR_IOF_NA, dbuf); 4528 if (dbuf) { 4529 itask->itask_allocated_buf_map = 1; 4530 itask->itask_dbufs[0] = dbuf; 4531 dbuf->db_handle = 0; 4532 } else { 4533 itask->itask_allocated_buf_map = 0; 4534 itask->itask_dbufs[0] = NULL; 4535 } 4536 4537 if ((w->worker_flags & STMF_WORKER_ACTIVE) == 0) { 4538 w->worker_signal_timestamp = gethrtime(); 4539 DTRACE_PROBE2(worker__signal, stmf_worker_t *, w, 4540 scsi_task_t *, task); 4541 cv_signal(&w->worker_cv); 4542 } 4543 mutex_exit(&w->worker_lock); 4544 4545 /* 4546 * This can only happen if during stmf_task_alloc(), ILU_RESET_ACTIVE 4547 * was set between checking of ILU_RESET_ACTIVE and clearing of the 4548 * ITASK_IN_FREE_LIST flag. Take care of these "sneaked-in" tasks here. 4549 */ 4550 if (ilu->ilu_flags & ILU_RESET_ACTIVE) { 4551 stmf_abort(STMF_QUEUE_TASK_ABORT, task, STMF_ABORTED, NULL); 4552 } 4553 } 4554 4555 static void 4556 stmf_task_audit(stmf_i_scsi_task_t *itask, 4557 task_audit_event_t te, uint32_t cmd_or_iof, stmf_data_buf_t *dbuf) 4558 { 4559 stmf_task_audit_rec_t *ar; 4560 4561 mutex_enter(&itask->itask_audit_mutex); 4562 ar = &itask->itask_audit_records[itask->itask_audit_index++]; 4563 itask->itask_audit_index &= (ITASK_TASK_AUDIT_DEPTH - 1); 4564 ar->ta_event = te; 4565 ar->ta_cmd_or_iof = cmd_or_iof; 4566 ar->ta_itask_flags = itask->itask_flags; 4567 ar->ta_dbuf = dbuf; 4568 gethrestime(&ar->ta_timestamp); 4569 mutex_exit(&itask->itask_audit_mutex); 4570 } 4571 4572 4573 /* 4574 * ++++++++++++++ ABORT LOGIC ++++++++++++++++++++ 4575 * Once ITASK_BEING_ABORTED is set, ITASK_KNOWN_TO_LU can be reset already 4576 * i.e. before ITASK_BEING_ABORTED being set. But if it was not, it cannot 4577 * be reset until the LU explicitly calls stmf_task_lu_aborted(). Of course 4578 * the LU will make this call only if we call the LU's abort entry point. 4579 * we will only call that entry point if ITASK_KNOWN_TO_LU was set. 4580 * 4581 * Same logic applies for the port. 4582 * 4583 * Also ITASK_BEING_ABORTED will not be allowed to set if both KNOWN_TO_LU 4584 * and KNOWN_TO_TGT_PORT are reset. 4585 * 4586 * +++++++++++++++++++++++++++++++++++++++++++++++ 4587 */ 4588 4589 stmf_status_t 4590 stmf_xfer_data(scsi_task_t *task, stmf_data_buf_t *dbuf, uint32_t ioflags) 4591 { 4592 stmf_status_t ret = STMF_SUCCESS; 4593 4594 stmf_i_scsi_task_t *itask = 4595 (stmf_i_scsi_task_t *)task->task_stmf_private; 4596 4597 stmf_task_audit(itask, TE_XFER_START, ioflags, dbuf); 4598 4599 if (ioflags & STMF_IOF_LU_DONE) { 4600 uint32_t new, old; 4601 do { 4602 new = old = itask->itask_flags; 4603 if (new & ITASK_BEING_ABORTED) 4604 return (STMF_ABORTED); 4605 new &= ~ITASK_KNOWN_TO_LU; 4606 } while (atomic_cas_32(&itask->itask_flags, old, new) != old); 4607 } 4608 if (itask->itask_flags & ITASK_BEING_ABORTED) 4609 return (STMF_ABORTED); 4610 #ifdef DEBUG 4611 if (!(ioflags & STMF_IOF_STATS_ONLY) && stmf_drop_buf_counter > 0) { 4612 if (atomic_dec_32_nv(&stmf_drop_buf_counter) == 1) 4613 return (STMF_SUCCESS); 4614 } 4615 #endif 4616 4617 stmf_update_kstat_lu_io(task, dbuf); 4618 stmf_update_kstat_lport_io(task, dbuf); 4619 stmf_lport_xfer_start(itask, dbuf); 4620 if (ioflags & STMF_IOF_STATS_ONLY) { 4621 stmf_lport_xfer_done(itask, dbuf); 4622 return (STMF_SUCCESS); 4623 } 4624 4625 dbuf->db_flags |= DB_LPORT_XFER_ACTIVE; 4626 ret = task->task_lport->lport_xfer_data(task, dbuf, ioflags); 4627 4628 /* 4629 * Port provider may have already called the buffer callback in 4630 * which case dbuf->db_xfer_start_timestamp will be 0. 4631 */ 4632 if (ret != STMF_SUCCESS) { 4633 dbuf->db_flags &= ~DB_LPORT_XFER_ACTIVE; 4634 if (dbuf->db_xfer_start_timestamp != 0) 4635 stmf_lport_xfer_done(itask, dbuf); 4636 } 4637 4638 return (ret); 4639 } 4640 4641 void 4642 stmf_data_xfer_done(scsi_task_t *task, stmf_data_buf_t *dbuf, uint32_t iof) 4643 { 4644 stmf_i_scsi_task_t *itask = 4645 (stmf_i_scsi_task_t *)task->task_stmf_private; 4646 stmf_i_local_port_t *ilport; 4647 stmf_worker_t *w = itask->itask_worker; 4648 uint32_t new, old; 4649 uint8_t update_queue_flags, free_it, queue_it; 4650 4651 stmf_lport_xfer_done(itask, dbuf); 4652 4653 stmf_task_audit(itask, TE_XFER_DONE, iof, dbuf); 4654 4655 /* Guard against unexpected completions from the lport */ 4656 if (dbuf->db_flags & DB_LPORT_XFER_ACTIVE) { 4657 dbuf->db_flags &= ~DB_LPORT_XFER_ACTIVE; 4658 } else { 4659 /* 4660 * This should never happen. 4661 */ 4662 ilport = task->task_lport->lport_stmf_private; 4663 ilport->ilport_unexpected_comp++; 4664 cmn_err(CE_PANIC, "Unexpected xfer completion task %p dbuf %p", 4665 (void *)task, (void *)dbuf); 4666 return; 4667 } 4668 4669 mutex_enter(&w->worker_lock); 4670 do { 4671 new = old = itask->itask_flags; 4672 if (old & ITASK_BEING_ABORTED) { 4673 mutex_exit(&w->worker_lock); 4674 return; 4675 } 4676 free_it = 0; 4677 if (iof & STMF_IOF_LPORT_DONE) { 4678 new &= ~ITASK_KNOWN_TO_TGT_PORT; 4679 task->task_completion_status = dbuf->db_xfer_status; 4680 free_it = 1; 4681 } 4682 /* 4683 * If the task is known to LU then queue it. But if 4684 * it is already queued (multiple completions) then 4685 * just update the buffer information by grabbing the 4686 * worker lock. If the task is not known to LU, 4687 * completed/aborted, then see if we need to 4688 * free this task. 4689 */ 4690 if (old & ITASK_KNOWN_TO_LU) { 4691 free_it = 0; 4692 update_queue_flags = 1; 4693 if (old & ITASK_IN_WORKER_QUEUE) { 4694 queue_it = 0; 4695 } else { 4696 queue_it = 1; 4697 new |= ITASK_IN_WORKER_QUEUE; 4698 } 4699 } else { 4700 update_queue_flags = 0; 4701 queue_it = 0; 4702 } 4703 } while (atomic_cas_32(&itask->itask_flags, old, new) != old); 4704 4705 if (update_queue_flags) { 4706 uint8_t cmd = (dbuf->db_handle << 5) | ITASK_CMD_DATA_XFER_DONE; 4707 4708 ASSERT(itask->itask_ncmds < ITASK_MAX_NCMDS); 4709 itask->itask_cmd_stack[itask->itask_ncmds++] = cmd; 4710 if (queue_it) { 4711 itask->itask_worker_next = NULL; 4712 if (w->worker_task_tail) { 4713 w->worker_task_tail->itask_worker_next = itask; 4714 } else { 4715 w->worker_task_head = itask; 4716 } 4717 w->worker_task_tail = itask; 4718 /* Measure task waitq time */ 4719 itask->itask_waitq_enter_timestamp = gethrtime(); 4720 if (++(w->worker_queue_depth) > 4721 w->worker_max_qdepth_pu) { 4722 w->worker_max_qdepth_pu = w->worker_queue_depth; 4723 } 4724 if ((w->worker_flags & STMF_WORKER_ACTIVE) == 0) 4725 cv_signal(&w->worker_cv); 4726 } 4727 } 4728 mutex_exit(&w->worker_lock); 4729 4730 if (free_it) { 4731 if ((itask->itask_flags & (ITASK_KNOWN_TO_LU | 4732 ITASK_KNOWN_TO_TGT_PORT | ITASK_IN_WORKER_QUEUE | 4733 ITASK_BEING_ABORTED)) == 0) { 4734 stmf_task_free(task); 4735 } 4736 } 4737 } 4738 4739 stmf_status_t 4740 stmf_send_scsi_status(scsi_task_t *task, uint32_t ioflags) 4741 { 4742 DTRACE_PROBE1(scsi__send__status, scsi_task_t *, task); 4743 4744 stmf_i_scsi_task_t *itask = 4745 (stmf_i_scsi_task_t *)task->task_stmf_private; 4746 4747 stmf_task_audit(itask, TE_SEND_STATUS, ioflags, NULL); 4748 4749 if (ioflags & STMF_IOF_LU_DONE) { 4750 uint32_t new, old; 4751 do { 4752 new = old = itask->itask_flags; 4753 if (new & ITASK_BEING_ABORTED) 4754 return (STMF_ABORTED); 4755 new &= ~ITASK_KNOWN_TO_LU; 4756 } while (atomic_cas_32(&itask->itask_flags, old, new) != old); 4757 } 4758 4759 if (!(itask->itask_flags & ITASK_KNOWN_TO_TGT_PORT)) { 4760 return (STMF_SUCCESS); 4761 } 4762 4763 if (itask->itask_flags & ITASK_BEING_ABORTED) 4764 return (STMF_ABORTED); 4765 4766 if (task->task_additional_flags & TASK_AF_NO_EXPECTED_XFER_LENGTH) { 4767 task->task_status_ctrl = 0; 4768 task->task_resid = 0; 4769 } else if (task->task_cmd_xfer_length > 4770 task->task_expected_xfer_length) { 4771 task->task_status_ctrl = TASK_SCTRL_OVER; 4772 task->task_resid = task->task_cmd_xfer_length - 4773 task->task_expected_xfer_length; 4774 } else if (task->task_nbytes_transferred < 4775 task->task_expected_xfer_length) { 4776 task->task_status_ctrl = TASK_SCTRL_UNDER; 4777 task->task_resid = task->task_expected_xfer_length - 4778 task->task_nbytes_transferred; 4779 } else { 4780 task->task_status_ctrl = 0; 4781 task->task_resid = 0; 4782 } 4783 return (task->task_lport->lport_send_status(task, ioflags)); 4784 } 4785 4786 void 4787 stmf_send_status_done(scsi_task_t *task, stmf_status_t s, uint32_t iof) 4788 { 4789 stmf_i_scsi_task_t *itask = 4790 (stmf_i_scsi_task_t *)task->task_stmf_private; 4791 stmf_worker_t *w = itask->itask_worker; 4792 uint32_t new, old; 4793 uint8_t free_it, queue_it; 4794 4795 stmf_task_audit(itask, TE_SEND_STATUS_DONE, iof, NULL); 4796 4797 mutex_enter(&w->worker_lock); 4798 do { 4799 new = old = itask->itask_flags; 4800 if (old & ITASK_BEING_ABORTED) { 4801 mutex_exit(&w->worker_lock); 4802 return; 4803 } 4804 free_it = 0; 4805 if (iof & STMF_IOF_LPORT_DONE) { 4806 new &= ~ITASK_KNOWN_TO_TGT_PORT; 4807 free_it = 1; 4808 } 4809 /* 4810 * If the task is known to LU then queue it. But if 4811 * it is already queued (multiple completions) then 4812 * just update the buffer information by grabbing the 4813 * worker lock. If the task is not known to LU, 4814 * completed/aborted, then see if we need to 4815 * free this task. 4816 */ 4817 if (old & ITASK_KNOWN_TO_LU) { 4818 free_it = 0; 4819 queue_it = 1; 4820 if (old & ITASK_IN_WORKER_QUEUE) { 4821 cmn_err(CE_PANIC, "status completion received" 4822 " when task is already in worker queue " 4823 " task = %p", (void *)task); 4824 } 4825 new |= ITASK_IN_WORKER_QUEUE; 4826 } else { 4827 queue_it = 0; 4828 } 4829 } while (atomic_cas_32(&itask->itask_flags, old, new) != old); 4830 task->task_completion_status = s; 4831 4832 4833 if (queue_it) { 4834 ASSERT(itask->itask_ncmds < ITASK_MAX_NCMDS); 4835 itask->itask_cmd_stack[itask->itask_ncmds++] = 4836 ITASK_CMD_STATUS_DONE; 4837 itask->itask_worker_next = NULL; 4838 if (w->worker_task_tail) { 4839 w->worker_task_tail->itask_worker_next = itask; 4840 } else { 4841 w->worker_task_head = itask; 4842 } 4843 w->worker_task_tail = itask; 4844 /* Measure task waitq time */ 4845 itask->itask_waitq_enter_timestamp = gethrtime(); 4846 if (++(w->worker_queue_depth) > w->worker_max_qdepth_pu) { 4847 w->worker_max_qdepth_pu = w->worker_queue_depth; 4848 } 4849 if ((w->worker_flags & STMF_WORKER_ACTIVE) == 0) 4850 cv_signal(&w->worker_cv); 4851 } 4852 mutex_exit(&w->worker_lock); 4853 4854 if (free_it) { 4855 if ((itask->itask_flags & (ITASK_KNOWN_TO_LU | 4856 ITASK_KNOWN_TO_TGT_PORT | ITASK_IN_WORKER_QUEUE | 4857 ITASK_BEING_ABORTED)) == 0) { 4858 stmf_task_free(task); 4859 } else { 4860 cmn_err(CE_PANIC, "LU is done with the task but LPORT " 4861 " is not done, itask %p itask_flags %x", 4862 (void *)itask, itask->itask_flags); 4863 } 4864 } 4865 } 4866 4867 void 4868 stmf_task_lu_done(scsi_task_t *task) 4869 { 4870 stmf_i_scsi_task_t *itask = 4871 (stmf_i_scsi_task_t *)task->task_stmf_private; 4872 stmf_worker_t *w = itask->itask_worker; 4873 uint32_t new, old; 4874 4875 mutex_enter(&w->worker_lock); 4876 do { 4877 new = old = itask->itask_flags; 4878 if (old & ITASK_BEING_ABORTED) { 4879 mutex_exit(&w->worker_lock); 4880 return; 4881 } 4882 if (old & ITASK_IN_WORKER_QUEUE) { 4883 cmn_err(CE_PANIC, "task_lu_done received" 4884 " when task is in worker queue " 4885 " task = %p", (void *)task); 4886 } 4887 new &= ~ITASK_KNOWN_TO_LU; 4888 } while (atomic_cas_32(&itask->itask_flags, old, new) != old); 4889 4890 mutex_exit(&w->worker_lock); 4891 4892 if ((itask->itask_flags & (ITASK_KNOWN_TO_LU | 4893 ITASK_KNOWN_TO_TGT_PORT | ITASK_IN_WORKER_QUEUE | 4894 ITASK_BEING_ABORTED)) == 0) { 4895 stmf_task_free(task); 4896 } else { 4897 cmn_err(CE_PANIC, "stmf_lu_done should be the last stage but " 4898 " the task is still not done, task = %p", (void *)task); 4899 } 4900 } 4901 4902 void 4903 stmf_queue_task_for_abort(scsi_task_t *task, stmf_status_t s) 4904 { 4905 stmf_i_scsi_task_t *itask = 4906 (stmf_i_scsi_task_t *)task->task_stmf_private; 4907 stmf_worker_t *w; 4908 uint32_t old, new; 4909 4910 stmf_task_audit(itask, TE_TASK_ABORT, CMD_OR_IOF_NA, NULL); 4911 4912 do { 4913 old = new = itask->itask_flags; 4914 if ((old & ITASK_BEING_ABORTED) || 4915 ((old & (ITASK_KNOWN_TO_TGT_PORT | 4916 ITASK_KNOWN_TO_LU)) == 0)) { 4917 return; 4918 } 4919 new |= ITASK_BEING_ABORTED; 4920 } while (atomic_cas_32(&itask->itask_flags, old, new) != old); 4921 task->task_completion_status = s; 4922 itask->itask_start_time = ddi_get_lbolt(); 4923 4924 if (((w = itask->itask_worker) == NULL) || 4925 (itask->itask_flags & ITASK_IN_TRANSITION)) { 4926 return; 4927 } 4928 4929 /* Queue it and get out */ 4930 mutex_enter(&w->worker_lock); 4931 if (itask->itask_flags & ITASK_IN_WORKER_QUEUE) { 4932 mutex_exit(&w->worker_lock); 4933 return; 4934 } 4935 atomic_or_32(&itask->itask_flags, ITASK_IN_WORKER_QUEUE); 4936 itask->itask_worker_next = NULL; 4937 if (w->worker_task_tail) { 4938 w->worker_task_tail->itask_worker_next = itask; 4939 } else { 4940 w->worker_task_head = itask; 4941 } 4942 w->worker_task_tail = itask; 4943 if (++(w->worker_queue_depth) > w->worker_max_qdepth_pu) { 4944 w->worker_max_qdepth_pu = w->worker_queue_depth; 4945 } 4946 if ((w->worker_flags & STMF_WORKER_ACTIVE) == 0) 4947 cv_signal(&w->worker_cv); 4948 mutex_exit(&w->worker_lock); 4949 } 4950 4951 void 4952 stmf_abort(int abort_cmd, scsi_task_t *task, stmf_status_t s, void *arg) 4953 { 4954 stmf_i_scsi_task_t *itask = NULL; 4955 uint32_t old, new, f, rf; 4956 4957 DTRACE_PROBE2(scsi__task__abort, scsi_task_t *, task, 4958 stmf_status_t, s); 4959 4960 switch (abort_cmd) { 4961 case STMF_QUEUE_ABORT_LU: 4962 stmf_task_lu_killall((stmf_lu_t *)arg, task, s); 4963 return; 4964 case STMF_QUEUE_TASK_ABORT: 4965 stmf_queue_task_for_abort(task, s); 4966 return; 4967 case STMF_REQUEUE_TASK_ABORT_LPORT: 4968 rf = ITASK_TGT_PORT_ABORT_CALLED; 4969 f = ITASK_KNOWN_TO_TGT_PORT; 4970 break; 4971 case STMF_REQUEUE_TASK_ABORT_LU: 4972 rf = ITASK_LU_ABORT_CALLED; 4973 f = ITASK_KNOWN_TO_LU; 4974 break; 4975 default: 4976 return; 4977 } 4978 itask = (stmf_i_scsi_task_t *)task->task_stmf_private; 4979 f |= ITASK_BEING_ABORTED | rf; 4980 do { 4981 old = new = itask->itask_flags; 4982 if ((old & f) != f) { 4983 return; 4984 } 4985 new &= ~rf; 4986 } while (atomic_cas_32(&itask->itask_flags, old, new) != old); 4987 } 4988 4989 void 4990 stmf_task_lu_aborted(scsi_task_t *task, stmf_status_t s, uint32_t iof) 4991 { 4992 char info[STMF_CHANGE_INFO_LEN]; 4993 stmf_i_scsi_task_t *itask = TASK_TO_ITASK(task); 4994 unsigned long long st; 4995 4996 stmf_task_audit(itask, TE_TASK_LU_ABORTED, iof, NULL); 4997 4998 st = s; /* gcc fix */ 4999 if ((s != STMF_ABORT_SUCCESS) && (s != STMF_NOT_FOUND)) { 5000 (void) snprintf(info, sizeof (info), 5001 "task %p, lu failed to abort ret=%llx", (void *)task, st); 5002 } else if ((iof & STMF_IOF_LU_DONE) == 0) { 5003 (void) snprintf(info, sizeof (info), 5004 "Task aborted but LU is not finished, task =" 5005 "%p, s=%llx, iof=%x", (void *)task, st, iof); 5006 } else { 5007 /* 5008 * LU abort successfully 5009 */ 5010 atomic_and_32(&itask->itask_flags, ~ITASK_KNOWN_TO_LU); 5011 return; 5012 } 5013 5014 stmf_abort_task_offline(task, 1, info); 5015 } 5016 5017 void 5018 stmf_task_lport_aborted(scsi_task_t *task, stmf_status_t s, uint32_t iof) 5019 { 5020 char info[STMF_CHANGE_INFO_LEN]; 5021 stmf_i_scsi_task_t *itask = TASK_TO_ITASK(task); 5022 unsigned long long st; 5023 uint32_t old, new; 5024 5025 stmf_task_audit(itask, TE_TASK_LPORT_ABORTED, iof, NULL); 5026 5027 st = s; 5028 if ((s != STMF_ABORT_SUCCESS) && (s != STMF_NOT_FOUND)) { 5029 (void) snprintf(info, sizeof (info), 5030 "task %p, tgt port failed to abort ret=%llx", (void *)task, 5031 st); 5032 } else if ((iof & STMF_IOF_LPORT_DONE) == 0) { 5033 (void) snprintf(info, sizeof (info), 5034 "Task aborted but tgt port is not finished, " 5035 "task=%p, s=%llx, iof=%x", (void *)task, st, iof); 5036 } else { 5037 /* 5038 * LPORT abort successfully 5039 */ 5040 do { 5041 old = new = itask->itask_flags; 5042 if (!(old & ITASK_KNOWN_TO_TGT_PORT)) 5043 return; 5044 new &= ~ITASK_KNOWN_TO_TGT_PORT; 5045 } while (atomic_cas_32(&itask->itask_flags, old, new) != old); 5046 return; 5047 } 5048 5049 stmf_abort_task_offline(task, 0, info); 5050 } 5051 5052 stmf_status_t 5053 stmf_task_poll_lu(scsi_task_t *task, uint32_t timeout) 5054 { 5055 stmf_i_scsi_task_t *itask = (stmf_i_scsi_task_t *) 5056 task->task_stmf_private; 5057 stmf_worker_t *w = itask->itask_worker; 5058 int i; 5059 5060 ASSERT(itask->itask_flags & ITASK_KNOWN_TO_LU); 5061 mutex_enter(&w->worker_lock); 5062 if (itask->itask_ncmds >= ITASK_MAX_NCMDS) { 5063 mutex_exit(&w->worker_lock); 5064 return (STMF_BUSY); 5065 } 5066 for (i = 0; i < itask->itask_ncmds; i++) { 5067 if (itask->itask_cmd_stack[i] == ITASK_CMD_POLL_LU) { 5068 mutex_exit(&w->worker_lock); 5069 return (STMF_SUCCESS); 5070 } 5071 } 5072 itask->itask_cmd_stack[itask->itask_ncmds++] = ITASK_CMD_POLL_LU; 5073 if (timeout == ITASK_DEFAULT_POLL_TIMEOUT) { 5074 itask->itask_poll_timeout = ddi_get_lbolt() + 1; 5075 } else { 5076 clock_t t = drv_usectohz(timeout * 1000); 5077 if (t == 0) 5078 t = 1; 5079 itask->itask_poll_timeout = ddi_get_lbolt() + t; 5080 } 5081 if ((itask->itask_flags & ITASK_IN_WORKER_QUEUE) == 0) { 5082 itask->itask_worker_next = NULL; 5083 if (w->worker_task_tail) { 5084 w->worker_task_tail->itask_worker_next = itask; 5085 } else { 5086 w->worker_task_head = itask; 5087 } 5088 w->worker_task_tail = itask; 5089 if (++(w->worker_queue_depth) > w->worker_max_qdepth_pu) { 5090 w->worker_max_qdepth_pu = w->worker_queue_depth; 5091 } 5092 atomic_or_32(&itask->itask_flags, ITASK_IN_WORKER_QUEUE); 5093 if ((w->worker_flags & STMF_WORKER_ACTIVE) == 0) 5094 cv_signal(&w->worker_cv); 5095 } 5096 mutex_exit(&w->worker_lock); 5097 return (STMF_SUCCESS); 5098 } 5099 5100 stmf_status_t 5101 stmf_task_poll_lport(scsi_task_t *task, uint32_t timeout) 5102 { 5103 stmf_i_scsi_task_t *itask = (stmf_i_scsi_task_t *) 5104 task->task_stmf_private; 5105 stmf_worker_t *w = itask->itask_worker; 5106 int i; 5107 5108 ASSERT(itask->itask_flags & ITASK_KNOWN_TO_TGT_PORT); 5109 mutex_enter(&w->worker_lock); 5110 if (itask->itask_ncmds >= ITASK_MAX_NCMDS) { 5111 mutex_exit(&w->worker_lock); 5112 return (STMF_BUSY); 5113 } 5114 for (i = 0; i < itask->itask_ncmds; i++) { 5115 if (itask->itask_cmd_stack[i] == ITASK_CMD_POLL_LPORT) { 5116 mutex_exit(&w->worker_lock); 5117 return (STMF_SUCCESS); 5118 } 5119 } 5120 itask->itask_cmd_stack[itask->itask_ncmds++] = ITASK_CMD_POLL_LPORT; 5121 if (timeout == ITASK_DEFAULT_POLL_TIMEOUT) { 5122 itask->itask_poll_timeout = ddi_get_lbolt() + 1; 5123 } else { 5124 clock_t t = drv_usectohz(timeout * 1000); 5125 if (t == 0) 5126 t = 1; 5127 itask->itask_poll_timeout = ddi_get_lbolt() + t; 5128 } 5129 if ((itask->itask_flags & ITASK_IN_WORKER_QUEUE) == 0) { 5130 itask->itask_worker_next = NULL; 5131 if (w->worker_task_tail) { 5132 w->worker_task_tail->itask_worker_next = itask; 5133 } else { 5134 w->worker_task_head = itask; 5135 } 5136 w->worker_task_tail = itask; 5137 if (++(w->worker_queue_depth) > w->worker_max_qdepth_pu) { 5138 w->worker_max_qdepth_pu = w->worker_queue_depth; 5139 } 5140 if ((w->worker_flags & STMF_WORKER_ACTIVE) == 0) 5141 cv_signal(&w->worker_cv); 5142 } 5143 mutex_exit(&w->worker_lock); 5144 return (STMF_SUCCESS); 5145 } 5146 5147 void 5148 stmf_do_task_abort(scsi_task_t *task) 5149 { 5150 stmf_i_scsi_task_t *itask = TASK_TO_ITASK(task); 5151 stmf_lu_t *lu; 5152 stmf_local_port_t *lport; 5153 unsigned long long ret; 5154 uint32_t old, new; 5155 uint8_t call_lu_abort, call_port_abort; 5156 char info[STMF_CHANGE_INFO_LEN]; 5157 5158 lu = task->task_lu; 5159 lport = task->task_lport; 5160 do { 5161 old = new = itask->itask_flags; 5162 if ((old & (ITASK_KNOWN_TO_LU | ITASK_LU_ABORT_CALLED)) == 5163 ITASK_KNOWN_TO_LU) { 5164 new |= ITASK_LU_ABORT_CALLED; 5165 call_lu_abort = 1; 5166 } else { 5167 call_lu_abort = 0; 5168 } 5169 } while (atomic_cas_32(&itask->itask_flags, old, new) != old); 5170 5171 if (call_lu_abort) { 5172 if ((itask->itask_flags & ITASK_DEFAULT_HANDLING) == 0) { 5173 ret = lu->lu_abort(lu, STMF_LU_ABORT_TASK, task, 0); 5174 } else { 5175 ret = dlun0->lu_abort(lu, STMF_LU_ABORT_TASK, task, 0); 5176 } 5177 if ((ret == STMF_ABORT_SUCCESS) || (ret == STMF_NOT_FOUND)) { 5178 stmf_task_lu_aborted(task, ret, STMF_IOF_LU_DONE); 5179 } else if (ret == STMF_BUSY) { 5180 atomic_and_32(&itask->itask_flags, 5181 ~ITASK_LU_ABORT_CALLED); 5182 } else if (ret != STMF_SUCCESS) { 5183 (void) snprintf(info, sizeof (info), 5184 "Abort failed by LU %p, ret %llx", (void *)lu, ret); 5185 stmf_abort_task_offline(task, 1, info); 5186 } 5187 } else if (itask->itask_flags & ITASK_KNOWN_TO_LU) { 5188 if (ddi_get_lbolt() > (itask->itask_start_time + 5189 STMF_SEC2TICK(lu->lu_abort_timeout? 5190 lu->lu_abort_timeout : ITASK_DEFAULT_ABORT_TIMEOUT))) { 5191 (void) snprintf(info, sizeof (info), 5192 "lu abort timed out"); 5193 stmf_abort_task_offline(itask->itask_task, 1, info); 5194 } 5195 } 5196 5197 do { 5198 old = new = itask->itask_flags; 5199 if ((old & (ITASK_KNOWN_TO_TGT_PORT | 5200 ITASK_TGT_PORT_ABORT_CALLED)) == ITASK_KNOWN_TO_TGT_PORT) { 5201 new |= ITASK_TGT_PORT_ABORT_CALLED; 5202 call_port_abort = 1; 5203 } else { 5204 call_port_abort = 0; 5205 } 5206 } while (atomic_cas_32(&itask->itask_flags, old, new) != old); 5207 if (call_port_abort) { 5208 ret = lport->lport_abort(lport, STMF_LPORT_ABORT_TASK, task, 0); 5209 if ((ret == STMF_ABORT_SUCCESS) || (ret == STMF_NOT_FOUND)) { 5210 stmf_task_lport_aborted(task, ret, STMF_IOF_LPORT_DONE); 5211 } else if (ret == STMF_BUSY) { 5212 atomic_and_32(&itask->itask_flags, 5213 ~ITASK_TGT_PORT_ABORT_CALLED); 5214 } else if (ret != STMF_SUCCESS) { 5215 (void) snprintf(info, sizeof (info), 5216 "Abort failed by tgt port %p ret %llx", 5217 (void *)lport, ret); 5218 stmf_abort_task_offline(task, 0, info); 5219 } 5220 } else if (itask->itask_flags & ITASK_KNOWN_TO_TGT_PORT) { 5221 if (ddi_get_lbolt() > (itask->itask_start_time + 5222 STMF_SEC2TICK(lport->lport_abort_timeout? 5223 lport->lport_abort_timeout : 5224 ITASK_DEFAULT_ABORT_TIMEOUT))) { 5225 (void) snprintf(info, sizeof (info), 5226 "lport abort timed out"); 5227 stmf_abort_task_offline(itask->itask_task, 0, info); 5228 } 5229 } 5230 } 5231 5232 stmf_status_t 5233 stmf_ctl(int cmd, void *obj, void *arg) 5234 { 5235 stmf_status_t ret; 5236 stmf_i_lu_t *ilu; 5237 stmf_i_local_port_t *ilport; 5238 stmf_state_change_info_t *ssci = (stmf_state_change_info_t *)arg; 5239 5240 mutex_enter(&stmf_state.stmf_lock); 5241 ret = STMF_INVALID_ARG; 5242 if (cmd & STMF_CMD_LU_OP) { 5243 ilu = stmf_lookup_lu((stmf_lu_t *)obj); 5244 if (ilu == NULL) { 5245 goto stmf_ctl_lock_exit; 5246 } 5247 DTRACE_PROBE3(lu__state__change, 5248 stmf_lu_t *, ilu->ilu_lu, 5249 int, cmd, stmf_state_change_info_t *, ssci); 5250 } else if (cmd & STMF_CMD_LPORT_OP) { 5251 ilport = stmf_lookup_lport((stmf_local_port_t *)obj); 5252 if (ilport == NULL) { 5253 goto stmf_ctl_lock_exit; 5254 } 5255 DTRACE_PROBE3(lport__state__change, 5256 stmf_local_port_t *, ilport->ilport_lport, 5257 int, cmd, stmf_state_change_info_t *, ssci); 5258 } else { 5259 goto stmf_ctl_lock_exit; 5260 } 5261 5262 switch (cmd) { 5263 case STMF_CMD_LU_ONLINE: 5264 switch (ilu->ilu_state) { 5265 case STMF_STATE_OFFLINE: 5266 ret = STMF_SUCCESS; 5267 break; 5268 case STMF_STATE_ONLINE: 5269 case STMF_STATE_ONLINING: 5270 ret = STMF_ALREADY; 5271 break; 5272 case STMF_STATE_OFFLINING: 5273 ret = STMF_BUSY; 5274 break; 5275 default: 5276 ret = STMF_BADSTATE; 5277 break; 5278 } 5279 if (ret != STMF_SUCCESS) 5280 goto stmf_ctl_lock_exit; 5281 5282 ilu->ilu_state = STMF_STATE_ONLINING; 5283 mutex_exit(&stmf_state.stmf_lock); 5284 stmf_svc_queue(cmd, obj, (stmf_state_change_info_t *)arg); 5285 break; 5286 5287 case STMF_CMD_LU_ONLINE_COMPLETE: 5288 if (ilu->ilu_state != STMF_STATE_ONLINING) { 5289 ret = STMF_BADSTATE; 5290 goto stmf_ctl_lock_exit; 5291 } 5292 if (((stmf_change_status_t *)arg)->st_completion_status == 5293 STMF_SUCCESS) { 5294 ilu->ilu_state = STMF_STATE_ONLINE; 5295 mutex_exit(&stmf_state.stmf_lock); 5296 ((stmf_lu_t *)obj)->lu_ctl((stmf_lu_t *)obj, 5297 STMF_ACK_LU_ONLINE_COMPLETE, arg); 5298 mutex_enter(&stmf_state.stmf_lock); 5299 stmf_add_lu_to_active_sessions((stmf_lu_t *)obj); 5300 } else { 5301 /* XXX: should throw a meesage an record more data */ 5302 ilu->ilu_state = STMF_STATE_OFFLINE; 5303 } 5304 ret = STMF_SUCCESS; 5305 goto stmf_ctl_lock_exit; 5306 5307 case STMF_CMD_LU_OFFLINE: 5308 switch (ilu->ilu_state) { 5309 case STMF_STATE_ONLINE: 5310 ret = STMF_SUCCESS; 5311 break; 5312 case STMF_STATE_OFFLINE: 5313 case STMF_STATE_OFFLINING: 5314 ret = STMF_ALREADY; 5315 break; 5316 case STMF_STATE_ONLINING: 5317 ret = STMF_BUSY; 5318 break; 5319 default: 5320 ret = STMF_BADSTATE; 5321 break; 5322 } 5323 if (ret != STMF_SUCCESS) 5324 goto stmf_ctl_lock_exit; 5325 ilu->ilu_state = STMF_STATE_OFFLINING; 5326 mutex_exit(&stmf_state.stmf_lock); 5327 stmf_svc_queue(cmd, obj, (stmf_state_change_info_t *)arg); 5328 break; 5329 5330 case STMF_CMD_LU_OFFLINE_COMPLETE: 5331 if (ilu->ilu_state != STMF_STATE_OFFLINING) { 5332 ret = STMF_BADSTATE; 5333 goto stmf_ctl_lock_exit; 5334 } 5335 if (((stmf_change_status_t *)arg)->st_completion_status == 5336 STMF_SUCCESS) { 5337 ilu->ilu_state = STMF_STATE_OFFLINE; 5338 mutex_exit(&stmf_state.stmf_lock); 5339 ((stmf_lu_t *)obj)->lu_ctl((stmf_lu_t *)obj, 5340 STMF_ACK_LU_OFFLINE_COMPLETE, arg); 5341 mutex_enter(&stmf_state.stmf_lock); 5342 } else { 5343 ilu->ilu_state = STMF_STATE_ONLINE; 5344 stmf_add_lu_to_active_sessions((stmf_lu_t *)obj); 5345 } 5346 mutex_exit(&stmf_state.stmf_lock); 5347 break; 5348 5349 /* 5350 * LPORT_ONLINE/OFFLINE has nothing to do with link offline/online. 5351 * It's related with hardware disable/enable. 5352 */ 5353 case STMF_CMD_LPORT_ONLINE: 5354 switch (ilport->ilport_state) { 5355 case STMF_STATE_OFFLINE: 5356 ret = STMF_SUCCESS; 5357 break; 5358 case STMF_STATE_ONLINE: 5359 case STMF_STATE_ONLINING: 5360 ret = STMF_ALREADY; 5361 break; 5362 case STMF_STATE_OFFLINING: 5363 ret = STMF_BUSY; 5364 break; 5365 default: 5366 ret = STMF_BADSTATE; 5367 break; 5368 } 5369 if (ret != STMF_SUCCESS) 5370 goto stmf_ctl_lock_exit; 5371 5372 /* 5373 * Only user request can recover the port from the 5374 * FORCED_OFFLINE state 5375 */ 5376 if (ilport->ilport_flags & ILPORT_FORCED_OFFLINE) { 5377 if (!(ssci->st_rflags & STMF_RFLAG_USER_REQUEST)) { 5378 ret = STMF_FAILURE; 5379 goto stmf_ctl_lock_exit; 5380 } 5381 } 5382 5383 /* 5384 * Avoid too frequent request to online 5385 */ 5386 if (ssci->st_rflags & STMF_RFLAG_USER_REQUEST) { 5387 ilport->ilport_online_times = 0; 5388 ilport->ilport_avg_interval = 0; 5389 } 5390 if ((ilport->ilport_avg_interval < STMF_AVG_ONLINE_INTERVAL) && 5391 (ilport->ilport_online_times >= 4)) { 5392 ret = STMF_FAILURE; 5393 ilport->ilport_flags |= ILPORT_FORCED_OFFLINE; 5394 stmf_trace(NULL, "stmf_ctl: too frequent request to " 5395 "online the port"); 5396 cmn_err(CE_WARN, "stmf_ctl: too frequent request to " 5397 "online the port, set FORCED_OFFLINE now"); 5398 goto stmf_ctl_lock_exit; 5399 } 5400 if (ilport->ilport_online_times > 0) { 5401 if (ilport->ilport_online_times == 1) { 5402 ilport->ilport_avg_interval = ddi_get_lbolt() - 5403 ilport->ilport_last_online_clock; 5404 } else { 5405 ilport->ilport_avg_interval = 5406 (ilport->ilport_avg_interval + 5407 ddi_get_lbolt() - 5408 ilport->ilport_last_online_clock) >> 1; 5409 } 5410 } 5411 ilport->ilport_last_online_clock = ddi_get_lbolt(); 5412 ilport->ilport_online_times++; 5413 5414 /* 5415 * Submit online service request 5416 */ 5417 ilport->ilport_flags &= ~ILPORT_FORCED_OFFLINE; 5418 ilport->ilport_state = STMF_STATE_ONLINING; 5419 mutex_exit(&stmf_state.stmf_lock); 5420 stmf_svc_queue(cmd, obj, (stmf_state_change_info_t *)arg); 5421 break; 5422 5423 case STMF_CMD_LPORT_ONLINE_COMPLETE: 5424 if (ilport->ilport_state != STMF_STATE_ONLINING) { 5425 ret = STMF_BADSTATE; 5426 goto stmf_ctl_lock_exit; 5427 } 5428 if (((stmf_change_status_t *)arg)->st_completion_status == 5429 STMF_SUCCESS) { 5430 ilport->ilport_state = STMF_STATE_ONLINE; 5431 mutex_exit(&stmf_state.stmf_lock); 5432 ((stmf_local_port_t *)obj)->lport_ctl( 5433 (stmf_local_port_t *)obj, 5434 STMF_ACK_LPORT_ONLINE_COMPLETE, arg); 5435 mutex_enter(&stmf_state.stmf_lock); 5436 } else { 5437 ilport->ilport_state = STMF_STATE_OFFLINE; 5438 } 5439 ret = STMF_SUCCESS; 5440 goto stmf_ctl_lock_exit; 5441 5442 case STMF_CMD_LPORT_OFFLINE: 5443 switch (ilport->ilport_state) { 5444 case STMF_STATE_ONLINE: 5445 ret = STMF_SUCCESS; 5446 break; 5447 case STMF_STATE_OFFLINE: 5448 case STMF_STATE_OFFLINING: 5449 ret = STMF_ALREADY; 5450 break; 5451 case STMF_STATE_ONLINING: 5452 ret = STMF_BUSY; 5453 break; 5454 default: 5455 ret = STMF_BADSTATE; 5456 break; 5457 } 5458 if (ret != STMF_SUCCESS) 5459 goto stmf_ctl_lock_exit; 5460 5461 ilport->ilport_state = STMF_STATE_OFFLINING; 5462 mutex_exit(&stmf_state.stmf_lock); 5463 stmf_svc_queue(cmd, obj, (stmf_state_change_info_t *)arg); 5464 break; 5465 5466 case STMF_CMD_LPORT_OFFLINE_COMPLETE: 5467 if (ilport->ilport_state != STMF_STATE_OFFLINING) { 5468 ret = STMF_BADSTATE; 5469 goto stmf_ctl_lock_exit; 5470 } 5471 if (((stmf_change_status_t *)arg)->st_completion_status == 5472 STMF_SUCCESS) { 5473 ilport->ilport_state = STMF_STATE_OFFLINE; 5474 mutex_exit(&stmf_state.stmf_lock); 5475 ((stmf_local_port_t *)obj)->lport_ctl( 5476 (stmf_local_port_t *)obj, 5477 STMF_ACK_LPORT_OFFLINE_COMPLETE, arg); 5478 mutex_enter(&stmf_state.stmf_lock); 5479 } else { 5480 ilport->ilport_state = STMF_STATE_ONLINE; 5481 } 5482 mutex_exit(&stmf_state.stmf_lock); 5483 break; 5484 5485 default: 5486 cmn_err(CE_WARN, "Invalid ctl cmd received %x", cmd); 5487 ret = STMF_INVALID_ARG; 5488 goto stmf_ctl_lock_exit; 5489 } 5490 5491 return (STMF_SUCCESS); 5492 5493 stmf_ctl_lock_exit:; 5494 mutex_exit(&stmf_state.stmf_lock); 5495 return (ret); 5496 } 5497 5498 /* ARGSUSED */ 5499 stmf_status_t 5500 stmf_info_impl(uint32_t cmd, void *arg1, void *arg2, uint8_t *buf, 5501 uint32_t *bufsizep) 5502 { 5503 return (STMF_NOT_SUPPORTED); 5504 } 5505 5506 /* ARGSUSED */ 5507 stmf_status_t 5508 stmf_info(uint32_t cmd, void *arg1, void *arg2, uint8_t *buf, 5509 uint32_t *bufsizep) 5510 { 5511 uint32_t cl = SI_GET_CLASS(cmd); 5512 5513 if (cl == SI_STMF) { 5514 return (stmf_info_impl(cmd, arg1, arg2, buf, bufsizep)); 5515 } 5516 if (cl == SI_LPORT) { 5517 return (((stmf_local_port_t *)arg1)->lport_info(cmd, arg1, 5518 arg2, buf, bufsizep)); 5519 } else if (cl == SI_LU) { 5520 return (((stmf_lu_t *)arg1)->lu_info(cmd, arg1, arg2, buf, 5521 bufsizep)); 5522 } 5523 5524 return (STMF_NOT_SUPPORTED); 5525 } 5526 5527 /* 5528 * Used by port providers. pwwn is 8 byte wwn, sdid is the devid used by 5529 * stmf to register local ports. The ident should have 20 bytes in buffer 5530 * space to convert the wwn to "wwn.xxxxxxxxxxxxxxxx" string. 5531 */ 5532 void 5533 stmf_wwn_to_devid_desc(scsi_devid_desc_t *sdid, uint8_t *wwn, 5534 uint8_t protocol_id) 5535 { 5536 char wwn_str[20+1]; 5537 5538 sdid->protocol_id = protocol_id; 5539 sdid->piv = 1; 5540 sdid->code_set = CODE_SET_ASCII; 5541 sdid->association = ID_IS_TARGET_PORT; 5542 sdid->ident_length = 20; 5543 /* Convert wwn value to "wwn.XXXXXXXXXXXXXXXX" format */ 5544 (void) snprintf(wwn_str, sizeof (wwn_str), 5545 "wwn.%02X%02X%02X%02X%02X%02X%02X%02X", 5546 wwn[0], wwn[1], wwn[2], wwn[3], wwn[4], wwn[5], wwn[6], wwn[7]); 5547 bcopy(wwn_str, (char *)sdid->ident, 20); 5548 } 5549 5550 5551 stmf_xfer_data_t * 5552 stmf_prepare_tpgs_data(uint8_t ilu_alua) 5553 { 5554 stmf_xfer_data_t *xd; 5555 stmf_i_local_port_t *ilport; 5556 uint8_t *p; 5557 uint32_t sz, asz, nports = 0, nports_standby = 0; 5558 5559 mutex_enter(&stmf_state.stmf_lock); 5560 /* check if any ports are standby and create second group */ 5561 for (ilport = stmf_state.stmf_ilportlist; ilport; 5562 ilport = ilport->ilport_next) { 5563 if (ilport->ilport_standby == 1) { 5564 nports_standby++; 5565 } else { 5566 nports++; 5567 } 5568 } 5569 5570 /* The spec only allows for 255 ports to be reported per group */ 5571 nports = min(nports, 255); 5572 nports_standby = min(nports_standby, 255); 5573 sz = (nports * 4) + 12; 5574 if (nports_standby && ilu_alua) { 5575 sz += (nports_standby * 4) + 8; 5576 } 5577 asz = sz + sizeof (*xd) - 4; 5578 xd = (stmf_xfer_data_t *)kmem_zalloc(asz, KM_NOSLEEP); 5579 if (xd == NULL) { 5580 mutex_exit(&stmf_state.stmf_lock); 5581 return (NULL); 5582 } 5583 xd->alloc_size = asz; 5584 xd->size_left = sz; 5585 5586 p = xd->buf; 5587 5588 *((uint32_t *)p) = BE_32(sz - 4); 5589 p += 4; 5590 p[0] = 0x80; /* PREF */ 5591 p[1] = 5; /* AO_SUP, S_SUP */ 5592 if (stmf_state.stmf_alua_node == 1) { 5593 p[3] = 1; /* Group 1 */ 5594 } else { 5595 p[3] = 0; /* Group 0 */ 5596 } 5597 p[7] = nports & 0xff; 5598 p += 8; 5599 for (ilport = stmf_state.stmf_ilportlist; ilport; 5600 ilport = ilport->ilport_next) { 5601 if (ilport->ilport_standby == 1) { 5602 continue; 5603 } 5604 ((uint16_t *)p)[1] = BE_16(ilport->ilport_rtpid); 5605 p += 4; 5606 } 5607 if (nports_standby && ilu_alua) { 5608 p[0] = 0x02; /* Non PREF, Standby */ 5609 p[1] = 5; /* AO_SUP, S_SUP */ 5610 if (stmf_state.stmf_alua_node == 1) { 5611 p[3] = 0; /* Group 0 */ 5612 } else { 5613 p[3] = 1; /* Group 1 */ 5614 } 5615 p[7] = nports_standby & 0xff; 5616 p += 8; 5617 for (ilport = stmf_state.stmf_ilportlist; ilport; 5618 ilport = ilport->ilport_next) { 5619 if (ilport->ilport_standby == 0) { 5620 continue; 5621 } 5622 ((uint16_t *)p)[1] = BE_16(ilport->ilport_rtpid); 5623 p += 4; 5624 } 5625 } 5626 5627 mutex_exit(&stmf_state.stmf_lock); 5628 5629 return (xd); 5630 } 5631 5632 struct scsi_devid_desc * 5633 stmf_scsilib_get_devid_desc(uint16_t rtpid) 5634 { 5635 scsi_devid_desc_t *devid = NULL; 5636 stmf_i_local_port_t *ilport; 5637 5638 mutex_enter(&stmf_state.stmf_lock); 5639 5640 for (ilport = stmf_state.stmf_ilportlist; ilport; 5641 ilport = ilport->ilport_next) { 5642 if (ilport->ilport_rtpid == rtpid) { 5643 scsi_devid_desc_t *id = ilport->ilport_lport->lport_id; 5644 uint32_t id_sz = sizeof (scsi_devid_desc_t) + 5645 id->ident_length; 5646 devid = (scsi_devid_desc_t *)kmem_zalloc(id_sz, 5647 KM_NOSLEEP); 5648 if (devid != NULL) { 5649 bcopy(id, devid, id_sz); 5650 } 5651 break; 5652 } 5653 } 5654 5655 mutex_exit(&stmf_state.stmf_lock); 5656 return (devid); 5657 } 5658 5659 uint16_t 5660 stmf_scsilib_get_lport_rtid(struct scsi_devid_desc *devid) 5661 { 5662 stmf_i_local_port_t *ilport; 5663 scsi_devid_desc_t *id; 5664 uint16_t rtpid = 0; 5665 5666 mutex_enter(&stmf_state.stmf_lock); 5667 for (ilport = stmf_state.stmf_ilportlist; ilport; 5668 ilport = ilport->ilport_next) { 5669 id = ilport->ilport_lport->lport_id; 5670 if ((devid->ident_length == id->ident_length) && 5671 (memcmp(devid->ident, id->ident, id->ident_length) == 0)) { 5672 rtpid = ilport->ilport_rtpid; 5673 break; 5674 } 5675 } 5676 mutex_exit(&stmf_state.stmf_lock); 5677 return (rtpid); 5678 } 5679 5680 static uint16_t stmf_lu_id_gen_number = 0; 5681 5682 stmf_status_t 5683 stmf_scsilib_uniq_lu_id(uint32_t company_id, scsi_devid_desc_t *lu_id) 5684 { 5685 return (stmf_scsilib_uniq_lu_id2(company_id, 0, lu_id)); 5686 } 5687 5688 stmf_status_t 5689 stmf_scsilib_uniq_lu_id2(uint32_t company_id, uint32_t host_id, 5690 scsi_devid_desc_t *lu_id) 5691 { 5692 uint8_t *p; 5693 struct timeval32 timestamp32; 5694 uint32_t *t = (uint32_t *)×tamp32; 5695 struct ether_addr mac; 5696 uint8_t *e = (uint8_t *)&mac; 5697 int hid = (int)host_id; 5698 uint16_t gen_number; 5699 5700 if (company_id == COMPANY_ID_NONE) 5701 company_id = COMPANY_ID_SUN; 5702 5703 if (lu_id->ident_length != 0x10) 5704 return (STMF_INVALID_ARG); 5705 5706 p = (uint8_t *)lu_id; 5707 5708 gen_number = atomic_inc_16_nv(&stmf_lu_id_gen_number); 5709 5710 p[0] = 0xf1; p[1] = 3; p[2] = 0; p[3] = 0x10; 5711 p[4] = ((company_id >> 20) & 0xf) | 0x60; 5712 p[5] = (company_id >> 12) & 0xff; 5713 p[6] = (company_id >> 4) & 0xff; 5714 p[7] = (company_id << 4) & 0xf0; 5715 if (hid == 0 && !localetheraddr((struct ether_addr *)NULL, &mac)) { 5716 hid = BE_32((int)zone_get_hostid(NULL)); 5717 } 5718 if (hid != 0) { 5719 e[0] = (hid >> 24) & 0xff; 5720 e[1] = (hid >> 16) & 0xff; 5721 e[2] = (hid >> 8) & 0xff; 5722 e[3] = hid & 0xff; 5723 e[4] = e[5] = 0; 5724 } 5725 bcopy(e, p+8, 6); 5726 uniqtime32(×tamp32); 5727 *t = BE_32(*t); 5728 bcopy(t, p+14, 4); 5729 p[18] = (gen_number >> 8) & 0xff; 5730 p[19] = gen_number & 0xff; 5731 5732 return (STMF_SUCCESS); 5733 } 5734 5735 /* 5736 * saa is sense key, ASC, ASCQ 5737 */ 5738 void 5739 stmf_scsilib_send_status(scsi_task_t *task, uint8_t st, uint32_t saa) 5740 { 5741 uint8_t sd[18]; 5742 task->task_scsi_status = st; 5743 if (st == 2) { 5744 bzero(sd, 18); 5745 sd[0] = 0x70; 5746 sd[2] = (saa >> 16) & 0xf; 5747 sd[7] = 10; 5748 sd[12] = (saa >> 8) & 0xff; 5749 sd[13] = saa & 0xff; 5750 task->task_sense_data = sd; 5751 task->task_sense_length = 18; 5752 } else { 5753 task->task_sense_data = NULL; 5754 task->task_sense_length = 0; 5755 } 5756 (void) stmf_send_scsi_status(task, STMF_IOF_LU_DONE); 5757 } 5758 5759 uint32_t 5760 stmf_scsilib_prepare_vpd_page83(scsi_task_t *task, uint8_t *page, 5761 uint32_t page_len, uint8_t byte0, uint32_t vpd_mask) 5762 { 5763 uint8_t *p = NULL; 5764 uint8_t small_buf[32]; 5765 uint32_t sz = 0; 5766 uint32_t n = 4; 5767 uint32_t m = 0; 5768 uint32_t last_bit = 0; 5769 5770 if (page_len < 4) 5771 return (0); 5772 if (page_len > 65535) 5773 page_len = 65535; 5774 5775 page[0] = byte0; 5776 page[1] = 0x83; 5777 5778 /* CONSTCOND */ 5779 while (1) { 5780 m += sz; 5781 if (sz && (page_len > n)) { 5782 uint32_t copysz; 5783 copysz = page_len > (n + sz) ? sz : page_len - n; 5784 bcopy(p, page + n, copysz); 5785 n += copysz; 5786 } 5787 vpd_mask &= ~last_bit; 5788 if (vpd_mask == 0) 5789 break; 5790 5791 if (vpd_mask & STMF_VPD_LU_ID) { 5792 last_bit = STMF_VPD_LU_ID; 5793 sz = task->task_lu->lu_id->ident_length + 4; 5794 p = (uint8_t *)task->task_lu->lu_id; 5795 continue; 5796 } else if (vpd_mask & STMF_VPD_TARGET_ID) { 5797 last_bit = STMF_VPD_TARGET_ID; 5798 sz = task->task_lport->lport_id->ident_length + 4; 5799 p = (uint8_t *)task->task_lport->lport_id; 5800 continue; 5801 } else if (vpd_mask & STMF_VPD_TP_GROUP) { 5802 stmf_i_local_port_t *ilport; 5803 last_bit = STMF_VPD_TP_GROUP; 5804 p = small_buf; 5805 bzero(p, 8); 5806 p[0] = 1; 5807 p[1] = 0x15; 5808 p[3] = 4; 5809 ilport = (stmf_i_local_port_t *) 5810 task->task_lport->lport_stmf_private; 5811 /* 5812 * If we're in alua mode, group 1 contains all alua 5813 * participating ports and all standby ports 5814 * > 255. Otherwise, if we're in alua mode, any local 5815 * ports (non standby/pppt) are also in group 1 if the 5816 * alua node is 1. Otherwise the group is 0. 5817 */ 5818 if ((stmf_state.stmf_alua_state && 5819 (ilport->ilport_alua || ilport->ilport_standby) && 5820 ilport->ilport_rtpid > 255) || 5821 (stmf_state.stmf_alua_node == 1 && 5822 ilport->ilport_standby != 1)) { 5823 p[7] = 1; /* Group 1 */ 5824 } 5825 sz = 8; 5826 continue; 5827 } else if (vpd_mask & STMF_VPD_RELATIVE_TP_ID) { 5828 stmf_i_local_port_t *ilport; 5829 5830 last_bit = STMF_VPD_RELATIVE_TP_ID; 5831 p = small_buf; 5832 bzero(p, 8); 5833 p[0] = 1; 5834 p[1] = 0x14; 5835 p[3] = 4; 5836 ilport = (stmf_i_local_port_t *) 5837 task->task_lport->lport_stmf_private; 5838 p[6] = (ilport->ilport_rtpid >> 8) & 0xff; 5839 p[7] = ilport->ilport_rtpid & 0xff; 5840 sz = 8; 5841 continue; 5842 } else { 5843 cmn_err(CE_WARN, "Invalid vpd_mask"); 5844 break; 5845 } 5846 } 5847 5848 page[2] = (m >> 8) & 0xff; 5849 page[3] = m & 0xff; 5850 5851 return (n); 5852 } 5853 5854 void 5855 stmf_scsilib_handle_report_tpgs(scsi_task_t *task, stmf_data_buf_t *dbuf) 5856 { 5857 stmf_i_scsi_task_t *itask = 5858 (stmf_i_scsi_task_t *)task->task_stmf_private; 5859 stmf_i_lu_t *ilu = 5860 (stmf_i_lu_t *)task->task_lu->lu_stmf_private; 5861 stmf_xfer_data_t *xd; 5862 uint32_t sz, minsz; 5863 5864 itask->itask_flags |= ITASK_DEFAULT_HANDLING; 5865 task->task_cmd_xfer_length = 5866 ((((uint32_t)task->task_cdb[6]) << 24) | 5867 (((uint32_t)task->task_cdb[7]) << 16) | 5868 (((uint32_t)task->task_cdb[8]) << 8) | 5869 ((uint32_t)task->task_cdb[9])); 5870 5871 if (task->task_additional_flags & 5872 TASK_AF_NO_EXPECTED_XFER_LENGTH) { 5873 task->task_expected_xfer_length = 5874 task->task_cmd_xfer_length; 5875 } 5876 5877 if (task->task_cmd_xfer_length == 0) { 5878 stmf_scsilib_send_status(task, STATUS_GOOD, 0); 5879 return; 5880 } 5881 if (task->task_cmd_xfer_length < 4) { 5882 stmf_scsilib_send_status(task, STATUS_CHECK, 5883 STMF_SAA_INVALID_FIELD_IN_CDB); 5884 return; 5885 } 5886 5887 sz = min(task->task_expected_xfer_length, 5888 task->task_cmd_xfer_length); 5889 5890 xd = stmf_prepare_tpgs_data(ilu->ilu_alua); 5891 5892 if (xd == NULL) { 5893 stmf_abort(STMF_QUEUE_TASK_ABORT, task, 5894 STMF_ALLOC_FAILURE, NULL); 5895 return; 5896 } 5897 5898 sz = min(sz, xd->size_left); 5899 xd->size_left = sz; 5900 minsz = min(512, sz); 5901 5902 if (dbuf == NULL) 5903 dbuf = stmf_alloc_dbuf(task, sz, &minsz, 0); 5904 if (dbuf == NULL) { 5905 kmem_free(xd, xd->alloc_size); 5906 stmf_abort(STMF_QUEUE_TASK_ABORT, task, 5907 STMF_ALLOC_FAILURE, NULL); 5908 return; 5909 } 5910 dbuf->db_lu_private = xd; 5911 stmf_xd_to_dbuf(dbuf, 1); 5912 5913 dbuf->db_flags = DB_DIRECTION_TO_RPORT; 5914 (void) stmf_xfer_data(task, dbuf, 0); 5915 5916 } 5917 5918 void 5919 stmf_scsilib_handle_task_mgmt(scsi_task_t *task) 5920 { 5921 5922 switch (task->task_mgmt_function) { 5923 /* 5924 * For now we will abort all I/Os on the LU in case of ABORT_TASK_SET 5925 * and ABORT_TASK. But unlike LUN_RESET we will not reset LU state 5926 * in these cases. This needs to be changed to abort only the required 5927 * set. 5928 */ 5929 case TM_ABORT_TASK: 5930 case TM_ABORT_TASK_SET: 5931 case TM_CLEAR_TASK_SET: 5932 case TM_LUN_RESET: 5933 stmf_handle_lun_reset(task); 5934 /* issue the reset to the proxy node as well */ 5935 if (stmf_state.stmf_alua_state == 1) { 5936 (void) stmf_proxy_scsi_cmd(task, NULL); 5937 } 5938 return; 5939 case TM_TARGET_RESET: 5940 case TM_TARGET_COLD_RESET: 5941 case TM_TARGET_WARM_RESET: 5942 stmf_handle_target_reset(task); 5943 return; 5944 default: 5945 /* We dont support this task mgmt function */ 5946 stmf_scsilib_send_status(task, STATUS_CHECK, 5947 STMF_SAA_INVALID_FIELD_IN_CMD_IU); 5948 return; 5949 } 5950 } 5951 5952 void 5953 stmf_handle_lun_reset(scsi_task_t *task) 5954 { 5955 stmf_i_scsi_task_t *itask; 5956 stmf_i_lu_t *ilu; 5957 5958 itask = (stmf_i_scsi_task_t *)task->task_stmf_private; 5959 ilu = (stmf_i_lu_t *)task->task_lu->lu_stmf_private; 5960 5961 /* 5962 * To sync with target reset, grab this lock. The LU is not going 5963 * anywhere as there is atleast one task pending (this task). 5964 */ 5965 mutex_enter(&stmf_state.stmf_lock); 5966 5967 if (ilu->ilu_flags & ILU_RESET_ACTIVE) { 5968 mutex_exit(&stmf_state.stmf_lock); 5969 stmf_scsilib_send_status(task, STATUS_CHECK, 5970 STMF_SAA_OPERATION_IN_PROGRESS); 5971 return; 5972 } 5973 atomic_or_32(&ilu->ilu_flags, ILU_RESET_ACTIVE); 5974 mutex_exit(&stmf_state.stmf_lock); 5975 5976 /* 5977 * Mark this task as the one causing LU reset so that we know who 5978 * was responsible for setting the ILU_RESET_ACTIVE. In case this 5979 * task itself gets aborted, we will clear ILU_RESET_ACTIVE. 5980 */ 5981 itask->itask_flags |= ITASK_DEFAULT_HANDLING | ITASK_CAUSING_LU_RESET; 5982 5983 /* Initiatiate abort on all commands on this LU except this one */ 5984 stmf_abort(STMF_QUEUE_ABORT_LU, task, STMF_ABORTED, task->task_lu); 5985 5986 /* Start polling on this task */ 5987 if (stmf_task_poll_lu(task, ITASK_DEFAULT_POLL_TIMEOUT) 5988 != STMF_SUCCESS) { 5989 stmf_abort(STMF_QUEUE_TASK_ABORT, task, STMF_ALLOC_FAILURE, 5990 NULL); 5991 return; 5992 } 5993 } 5994 5995 void 5996 stmf_handle_target_reset(scsi_task_t *task) 5997 { 5998 stmf_i_scsi_task_t *itask; 5999 stmf_i_lu_t *ilu; 6000 stmf_i_scsi_session_t *iss; 6001 stmf_lun_map_t *lm; 6002 stmf_lun_map_ent_t *lm_ent; 6003 int i, lf; 6004 6005 itask = (stmf_i_scsi_task_t *)task->task_stmf_private; 6006 iss = (stmf_i_scsi_session_t *)task->task_session->ss_stmf_private; 6007 ilu = (stmf_i_lu_t *)task->task_lu->lu_stmf_private; 6008 6009 /* 6010 * To sync with LUN reset, grab this lock. The session is not going 6011 * anywhere as there is atleast one task pending (this task). 6012 */ 6013 mutex_enter(&stmf_state.stmf_lock); 6014 6015 /* Grab the session lock as a writer to prevent any changes in it */ 6016 rw_enter(iss->iss_lockp, RW_WRITER); 6017 6018 if (iss->iss_flags & ISS_RESET_ACTIVE) { 6019 rw_exit(iss->iss_lockp); 6020 mutex_exit(&stmf_state.stmf_lock); 6021 stmf_scsilib_send_status(task, STATUS_CHECK, 6022 STMF_SAA_OPERATION_IN_PROGRESS); 6023 return; 6024 } 6025 atomic_or_32(&iss->iss_flags, ISS_RESET_ACTIVE); 6026 6027 /* 6028 * Now go through each LUN in this session and make sure all of them 6029 * can be reset. 6030 */ 6031 lm = iss->iss_sm; 6032 for (i = 0, lf = 0; i < lm->lm_nentries; i++) { 6033 if (lm->lm_plus[i] == NULL) 6034 continue; 6035 lf++; 6036 lm_ent = (stmf_lun_map_ent_t *)lm->lm_plus[i]; 6037 ilu = (stmf_i_lu_t *)(lm_ent->ent_lu->lu_stmf_private); 6038 if (ilu->ilu_flags & ILU_RESET_ACTIVE) { 6039 atomic_and_32(&iss->iss_flags, ~ISS_RESET_ACTIVE); 6040 rw_exit(iss->iss_lockp); 6041 mutex_exit(&stmf_state.stmf_lock); 6042 stmf_scsilib_send_status(task, STATUS_CHECK, 6043 STMF_SAA_OPERATION_IN_PROGRESS); 6044 return; 6045 } 6046 } 6047 if (lf == 0) { 6048 /* No luns in this session */ 6049 atomic_and_32(&iss->iss_flags, ~ISS_RESET_ACTIVE); 6050 rw_exit(iss->iss_lockp); 6051 mutex_exit(&stmf_state.stmf_lock); 6052 stmf_scsilib_send_status(task, STATUS_GOOD, 0); 6053 return; 6054 } 6055 6056 /* ok, start the damage */ 6057 itask->itask_flags |= ITASK_DEFAULT_HANDLING | 6058 ITASK_CAUSING_TARGET_RESET; 6059 for (i = 0; i < lm->lm_nentries; i++) { 6060 if (lm->lm_plus[i] == NULL) 6061 continue; 6062 lm_ent = (stmf_lun_map_ent_t *)lm->lm_plus[i]; 6063 ilu = (stmf_i_lu_t *)(lm_ent->ent_lu->lu_stmf_private); 6064 atomic_or_32(&ilu->ilu_flags, ILU_RESET_ACTIVE); 6065 } 6066 6067 for (i = 0; i < lm->lm_nentries; i++) { 6068 if (lm->lm_plus[i] == NULL) 6069 continue; 6070 lm_ent = (stmf_lun_map_ent_t *)lm->lm_plus[i]; 6071 stmf_abort(STMF_QUEUE_ABORT_LU, task, STMF_ABORTED, 6072 lm_ent->ent_lu); 6073 } 6074 6075 rw_exit(iss->iss_lockp); 6076 mutex_exit(&stmf_state.stmf_lock); 6077 6078 /* Start polling on this task */ 6079 if (stmf_task_poll_lu(task, ITASK_DEFAULT_POLL_TIMEOUT) 6080 != STMF_SUCCESS) { 6081 stmf_abort(STMF_QUEUE_TASK_ABORT, task, STMF_ALLOC_FAILURE, 6082 NULL); 6083 return; 6084 } 6085 } 6086 6087 int 6088 stmf_handle_cmd_during_ic(stmf_i_scsi_task_t *itask) 6089 { 6090 scsi_task_t *task = itask->itask_task; 6091 stmf_i_scsi_session_t *iss = (stmf_i_scsi_session_t *) 6092 task->task_session->ss_stmf_private; 6093 6094 rw_enter(iss->iss_lockp, RW_WRITER); 6095 if (((iss->iss_flags & ISS_LUN_INVENTORY_CHANGED) == 0) || 6096 (task->task_cdb[0] == SCMD_INQUIRY)) { 6097 rw_exit(iss->iss_lockp); 6098 return (0); 6099 } 6100 atomic_and_32(&iss->iss_flags, 6101 ~(ISS_LUN_INVENTORY_CHANGED | ISS_GOT_INITIAL_LUNS)); 6102 rw_exit(iss->iss_lockp); 6103 6104 if (task->task_cdb[0] == SCMD_REPORT_LUNS) { 6105 return (0); 6106 } 6107 stmf_scsilib_send_status(task, STATUS_CHECK, 6108 STMF_SAA_REPORT_LUN_DATA_HAS_CHANGED); 6109 return (1); 6110 } 6111 6112 void 6113 stmf_worker_init() 6114 { 6115 uint32_t i; 6116 6117 /* Make local copy of global tunables */ 6118 stmf_i_max_nworkers = stmf_max_nworkers; 6119 stmf_i_min_nworkers = stmf_min_nworkers; 6120 6121 ASSERT(stmf_workers == NULL); 6122 if (stmf_i_min_nworkers < 4) { 6123 stmf_i_min_nworkers = 4; 6124 } 6125 if (stmf_i_max_nworkers < stmf_i_min_nworkers) { 6126 stmf_i_max_nworkers = stmf_i_min_nworkers; 6127 } 6128 stmf_workers = (stmf_worker_t *)kmem_zalloc( 6129 sizeof (stmf_worker_t) * stmf_i_max_nworkers, KM_SLEEP); 6130 for (i = 0; i < stmf_i_max_nworkers; i++) { 6131 stmf_worker_t *w = &stmf_workers[i]; 6132 mutex_init(&w->worker_lock, NULL, MUTEX_DRIVER, NULL); 6133 cv_init(&w->worker_cv, NULL, CV_DRIVER, NULL); 6134 } 6135 stmf_worker_mgmt_delay = drv_usectohz(20 * 1000); 6136 stmf_workers_state = STMF_WORKERS_ENABLED; 6137 6138 /* Workers will be started by stmf_worker_mgmt() */ 6139 6140 /* Lets wait for atleast one worker to start */ 6141 while (stmf_nworkers_cur == 0) 6142 delay(drv_usectohz(20 * 1000)); 6143 stmf_worker_mgmt_delay = drv_usectohz(3 * 1000 * 1000); 6144 } 6145 6146 stmf_status_t 6147 stmf_worker_fini() 6148 { 6149 int i; 6150 clock_t sb; 6151 6152 if (stmf_workers_state == STMF_WORKERS_DISABLED) 6153 return (STMF_SUCCESS); 6154 ASSERT(stmf_workers); 6155 stmf_workers_state = STMF_WORKERS_DISABLED; 6156 stmf_worker_mgmt_delay = drv_usectohz(20 * 1000); 6157 cv_signal(&stmf_state.stmf_cv); 6158 6159 sb = ddi_get_lbolt() + drv_usectohz(10 * 1000 * 1000); 6160 /* Wait for all the threads to die */ 6161 while (stmf_nworkers_cur != 0) { 6162 if (ddi_get_lbolt() > sb) { 6163 stmf_workers_state = STMF_WORKERS_ENABLED; 6164 return (STMF_BUSY); 6165 } 6166 delay(drv_usectohz(100 * 1000)); 6167 } 6168 for (i = 0; i < stmf_i_max_nworkers; i++) { 6169 stmf_worker_t *w = &stmf_workers[i]; 6170 mutex_destroy(&w->worker_lock); 6171 cv_destroy(&w->worker_cv); 6172 } 6173 kmem_free(stmf_workers, sizeof (stmf_worker_t) * stmf_i_max_nworkers); 6174 stmf_workers = NULL; 6175 6176 return (STMF_SUCCESS); 6177 } 6178 6179 void 6180 stmf_worker_task(void *arg) 6181 { 6182 stmf_worker_t *w; 6183 stmf_i_scsi_session_t *iss; 6184 scsi_task_t *task; 6185 stmf_i_scsi_task_t *itask; 6186 stmf_data_buf_t *dbuf; 6187 stmf_lu_t *lu; 6188 clock_t wait_timer = 0; 6189 clock_t wait_ticks, wait_delta = 0; 6190 uint32_t old, new; 6191 uint8_t curcmd; 6192 uint8_t abort_free; 6193 uint8_t wait_queue; 6194 uint8_t dec_qdepth; 6195 6196 w = (stmf_worker_t *)arg; 6197 wait_ticks = drv_usectohz(10000); 6198 6199 DTRACE_PROBE1(worker__create, stmf_worker_t, w); 6200 mutex_enter(&w->worker_lock); 6201 w->worker_flags |= STMF_WORKER_STARTED | STMF_WORKER_ACTIVE; 6202 stmf_worker_loop:; 6203 if ((w->worker_ref_count == 0) && 6204 (w->worker_flags & STMF_WORKER_TERMINATE)) { 6205 w->worker_flags &= ~(STMF_WORKER_STARTED | 6206 STMF_WORKER_ACTIVE | STMF_WORKER_TERMINATE); 6207 w->worker_tid = NULL; 6208 mutex_exit(&w->worker_lock); 6209 DTRACE_PROBE1(worker__destroy, stmf_worker_t, w); 6210 thread_exit(); 6211 } 6212 /* CONSTCOND */ 6213 while (1) { 6214 dec_qdepth = 0; 6215 if (wait_timer && (ddi_get_lbolt() >= wait_timer)) { 6216 wait_timer = 0; 6217 wait_delta = 0; 6218 if (w->worker_wait_head) { 6219 ASSERT(w->worker_wait_tail); 6220 if (w->worker_task_head == NULL) 6221 w->worker_task_head = 6222 w->worker_wait_head; 6223 else 6224 w->worker_task_tail->itask_worker_next = 6225 w->worker_wait_head; 6226 w->worker_task_tail = w->worker_wait_tail; 6227 w->worker_wait_head = w->worker_wait_tail = 6228 NULL; 6229 } 6230 } 6231 if ((itask = w->worker_task_head) == NULL) { 6232 break; 6233 } 6234 task = itask->itask_task; 6235 DTRACE_PROBE2(worker__active, stmf_worker_t, w, 6236 scsi_task_t *, task); 6237 w->worker_task_head = itask->itask_worker_next; 6238 if (w->worker_task_head == NULL) 6239 w->worker_task_tail = NULL; 6240 6241 wait_queue = 0; 6242 abort_free = 0; 6243 if (itask->itask_ncmds > 0) { 6244 curcmd = itask->itask_cmd_stack[itask->itask_ncmds - 1]; 6245 } else { 6246 ASSERT(itask->itask_flags & ITASK_BEING_ABORTED); 6247 } 6248 do { 6249 old = itask->itask_flags; 6250 if (old & ITASK_BEING_ABORTED) { 6251 itask->itask_ncmds = 1; 6252 curcmd = itask->itask_cmd_stack[0] = 6253 ITASK_CMD_ABORT; 6254 goto out_itask_flag_loop; 6255 } else if ((curcmd & ITASK_CMD_MASK) == 6256 ITASK_CMD_NEW_TASK) { 6257 /* 6258 * set ITASK_KSTAT_IN_RUNQ, this flag 6259 * will not reset until task completed 6260 */ 6261 new = old | ITASK_KNOWN_TO_LU | 6262 ITASK_KSTAT_IN_RUNQ; 6263 } else { 6264 goto out_itask_flag_loop; 6265 } 6266 } while (atomic_cas_32(&itask->itask_flags, old, new) != old); 6267 6268 out_itask_flag_loop: 6269 6270 /* 6271 * Decide if this task needs to go to a queue and/or if 6272 * we can decrement the itask_cmd_stack. 6273 */ 6274 if (curcmd == ITASK_CMD_ABORT) { 6275 if (itask->itask_flags & (ITASK_KNOWN_TO_LU | 6276 ITASK_KNOWN_TO_TGT_PORT)) { 6277 wait_queue = 1; 6278 } else { 6279 abort_free = 1; 6280 } 6281 } else if ((curcmd & ITASK_CMD_POLL) && 6282 (itask->itask_poll_timeout > ddi_get_lbolt())) { 6283 wait_queue = 1; 6284 } 6285 6286 if (wait_queue) { 6287 itask->itask_worker_next = NULL; 6288 if (w->worker_wait_tail) { 6289 w->worker_wait_tail->itask_worker_next = itask; 6290 } else { 6291 w->worker_wait_head = itask; 6292 } 6293 w->worker_wait_tail = itask; 6294 if (wait_timer == 0) { 6295 wait_timer = ddi_get_lbolt() + wait_ticks; 6296 wait_delta = wait_ticks; 6297 } 6298 } else if ((--(itask->itask_ncmds)) != 0) { 6299 itask->itask_worker_next = NULL; 6300 if (w->worker_task_tail) { 6301 w->worker_task_tail->itask_worker_next = itask; 6302 } else { 6303 w->worker_task_head = itask; 6304 } 6305 w->worker_task_tail = itask; 6306 } else { 6307 atomic_and_32(&itask->itask_flags, 6308 ~ITASK_IN_WORKER_QUEUE); 6309 /* 6310 * This is where the queue depth should go down by 6311 * one but we delay that on purpose to account for 6312 * the call into the provider. The actual decrement 6313 * happens after the worker has done its job. 6314 */ 6315 dec_qdepth = 1; 6316 itask->itask_waitq_time += 6317 gethrtime() - itask->itask_waitq_enter_timestamp; 6318 } 6319 6320 /* We made it here means we are going to call LU */ 6321 if ((itask->itask_flags & ITASK_DEFAULT_HANDLING) == 0) 6322 lu = task->task_lu; 6323 else 6324 lu = dlun0; 6325 dbuf = itask->itask_dbufs[ITASK_CMD_BUF_NDX(curcmd)]; 6326 mutex_exit(&w->worker_lock); 6327 curcmd &= ITASK_CMD_MASK; 6328 stmf_task_audit(itask, TE_PROCESS_CMD, curcmd, dbuf); 6329 switch (curcmd) { 6330 case ITASK_CMD_NEW_TASK: 6331 iss = (stmf_i_scsi_session_t *) 6332 task->task_session->ss_stmf_private; 6333 stmf_itl_lu_new_task(itask); 6334 if (iss->iss_flags & ISS_LUN_INVENTORY_CHANGED) { 6335 if (stmf_handle_cmd_during_ic(itask)) 6336 break; 6337 } 6338 #ifdef DEBUG 6339 if (stmf_drop_task_counter > 0) { 6340 if (atomic_dec_32_nv(&stmf_drop_task_counter) 6341 == 1) 6342 break; 6343 } 6344 #endif 6345 DTRACE_PROBE1(scsi__task__start, scsi_task_t *, task); 6346 lu->lu_new_task(task, dbuf); 6347 break; 6348 case ITASK_CMD_DATA_XFER_DONE: 6349 lu->lu_dbuf_xfer_done(task, dbuf); 6350 break; 6351 case ITASK_CMD_STATUS_DONE: 6352 lu->lu_send_status_done(task); 6353 break; 6354 case ITASK_CMD_ABORT: 6355 if (abort_free) { 6356 stmf_task_free(task); 6357 } else { 6358 stmf_do_task_abort(task); 6359 } 6360 break; 6361 case ITASK_CMD_POLL_LU: 6362 if (!wait_queue) { 6363 lu->lu_task_poll(task); 6364 } 6365 break; 6366 case ITASK_CMD_POLL_LPORT: 6367 if (!wait_queue) 6368 task->task_lport->lport_task_poll(task); 6369 break; 6370 case ITASK_CMD_SEND_STATUS: 6371 /* case ITASK_CMD_XFER_DATA: */ 6372 break; 6373 } 6374 mutex_enter(&w->worker_lock); 6375 if (dec_qdepth) { 6376 w->worker_queue_depth--; 6377 } 6378 } 6379 if ((w->worker_flags & STMF_WORKER_TERMINATE) && (wait_timer == 0)) { 6380 if (w->worker_ref_count == 0) 6381 goto stmf_worker_loop; 6382 else { 6383 wait_timer = ddi_get_lbolt() + 1; 6384 wait_delta = 1; 6385 } 6386 } 6387 w->worker_flags &= ~STMF_WORKER_ACTIVE; 6388 if (wait_timer) { 6389 DTRACE_PROBE1(worker__timed__sleep, stmf_worker_t, w); 6390 (void) cv_reltimedwait(&w->worker_cv, &w->worker_lock, 6391 wait_delta, TR_CLOCK_TICK); 6392 } else { 6393 DTRACE_PROBE1(worker__sleep, stmf_worker_t, w); 6394 cv_wait(&w->worker_cv, &w->worker_lock); 6395 } 6396 DTRACE_PROBE1(worker__wakeup, stmf_worker_t, w); 6397 w->worker_flags |= STMF_WORKER_ACTIVE; 6398 goto stmf_worker_loop; 6399 } 6400 6401 void 6402 stmf_worker_mgmt() 6403 { 6404 int i; 6405 int workers_needed; 6406 uint32_t qd; 6407 clock_t tps, d = 0; 6408 uint32_t cur_max_ntasks = 0; 6409 stmf_worker_t *w; 6410 6411 /* Check if we are trying to increase the # of threads */ 6412 for (i = stmf_nworkers_cur; i < stmf_nworkers_needed; i++) { 6413 if (stmf_workers[i].worker_flags & STMF_WORKER_STARTED) { 6414 stmf_nworkers_cur++; 6415 stmf_nworkers_accepting_cmds++; 6416 } else { 6417 /* Wait for transition to complete */ 6418 return; 6419 } 6420 } 6421 /* Check if we are trying to decrease the # of workers */ 6422 for (i = (stmf_nworkers_cur - 1); i >= stmf_nworkers_needed; i--) { 6423 if ((stmf_workers[i].worker_flags & STMF_WORKER_STARTED) == 0) { 6424 stmf_nworkers_cur--; 6425 /* 6426 * stmf_nworkers_accepting_cmds has already been 6427 * updated by the request to reduce the # of workers. 6428 */ 6429 } else { 6430 /* Wait for transition to complete */ 6431 return; 6432 } 6433 } 6434 /* Check if we are being asked to quit */ 6435 if (stmf_workers_state != STMF_WORKERS_ENABLED) { 6436 if (stmf_nworkers_cur) { 6437 workers_needed = 0; 6438 goto worker_mgmt_trigger_change; 6439 } 6440 return; 6441 } 6442 /* Check if we are starting */ 6443 if (stmf_nworkers_cur < stmf_i_min_nworkers) { 6444 workers_needed = stmf_i_min_nworkers; 6445 goto worker_mgmt_trigger_change; 6446 } 6447 6448 tps = drv_usectohz(1 * 1000 * 1000); 6449 if ((stmf_wm_last != 0) && 6450 ((d = ddi_get_lbolt() - stmf_wm_last) > tps)) { 6451 qd = 0; 6452 for (i = 0; i < stmf_nworkers_accepting_cmds; i++) { 6453 qd += stmf_workers[i].worker_max_qdepth_pu; 6454 stmf_workers[i].worker_max_qdepth_pu = 0; 6455 if (stmf_workers[i].worker_max_sys_qdepth_pu > 6456 cur_max_ntasks) { 6457 cur_max_ntasks = 6458 stmf_workers[i].worker_max_sys_qdepth_pu; 6459 } 6460 stmf_workers[i].worker_max_sys_qdepth_pu = 0; 6461 } 6462 } 6463 stmf_wm_last = ddi_get_lbolt(); 6464 if (d <= tps) { 6465 /* still ramping up */ 6466 return; 6467 } 6468 /* max qdepth cannot be more than max tasks */ 6469 if (qd > cur_max_ntasks) 6470 qd = cur_max_ntasks; 6471 6472 /* See if we have more workers */ 6473 if (qd < stmf_nworkers_accepting_cmds) { 6474 /* 6475 * Since we dont reduce the worker count right away, monitor 6476 * the highest load during the scale_down_delay. 6477 */ 6478 if (qd > stmf_worker_scale_down_qd) 6479 stmf_worker_scale_down_qd = qd; 6480 if (stmf_worker_scale_down_timer == 0) { 6481 stmf_worker_scale_down_timer = ddi_get_lbolt() + 6482 drv_usectohz(stmf_worker_scale_down_delay * 6483 1000 * 1000); 6484 return; 6485 } 6486 if (ddi_get_lbolt() < stmf_worker_scale_down_timer) { 6487 return; 6488 } 6489 /* Its time to reduce the workers */ 6490 if (stmf_worker_scale_down_qd < stmf_i_min_nworkers) 6491 stmf_worker_scale_down_qd = stmf_i_min_nworkers; 6492 if (stmf_worker_scale_down_qd > stmf_i_max_nworkers) 6493 stmf_worker_scale_down_qd = stmf_i_max_nworkers; 6494 if (stmf_worker_scale_down_qd == stmf_nworkers_cur) 6495 return; 6496 workers_needed = stmf_worker_scale_down_qd; 6497 stmf_worker_scale_down_qd = 0; 6498 goto worker_mgmt_trigger_change; 6499 } 6500 stmf_worker_scale_down_qd = 0; 6501 stmf_worker_scale_down_timer = 0; 6502 if (qd > stmf_i_max_nworkers) 6503 qd = stmf_i_max_nworkers; 6504 if (qd < stmf_i_min_nworkers) 6505 qd = stmf_i_min_nworkers; 6506 if (qd == stmf_nworkers_cur) 6507 return; 6508 workers_needed = qd; 6509 goto worker_mgmt_trigger_change; 6510 6511 /* NOTREACHED */ 6512 return; 6513 6514 worker_mgmt_trigger_change: 6515 ASSERT(workers_needed != stmf_nworkers_cur); 6516 if (workers_needed > stmf_nworkers_cur) { 6517 stmf_nworkers_needed = workers_needed; 6518 for (i = stmf_nworkers_cur; i < workers_needed; i++) { 6519 w = &stmf_workers[i]; 6520 w->worker_tid = thread_create(NULL, 0, stmf_worker_task, 6521 (void *)&stmf_workers[i], 0, &p0, TS_RUN, 6522 minclsyspri); 6523 } 6524 return; 6525 } 6526 /* At this point we know that we are decreasing the # of workers */ 6527 stmf_nworkers_accepting_cmds = workers_needed; 6528 stmf_nworkers_needed = workers_needed; 6529 /* Signal the workers that its time to quit */ 6530 for (i = (stmf_nworkers_cur - 1); i >= stmf_nworkers_needed; i--) { 6531 w = &stmf_workers[i]; 6532 ASSERT(w && (w->worker_flags & STMF_WORKER_STARTED)); 6533 mutex_enter(&w->worker_lock); 6534 w->worker_flags |= STMF_WORKER_TERMINATE; 6535 if ((w->worker_flags & STMF_WORKER_ACTIVE) == 0) 6536 cv_signal(&w->worker_cv); 6537 mutex_exit(&w->worker_lock); 6538 } 6539 } 6540 6541 /* 6542 * Fills out a dbuf from stmf_xfer_data_t (contained in the db_lu_private). 6543 * If all the data has been filled out, frees the xd and makes 6544 * db_lu_private NULL. 6545 */ 6546 void 6547 stmf_xd_to_dbuf(stmf_data_buf_t *dbuf, int set_rel_off) 6548 { 6549 stmf_xfer_data_t *xd; 6550 uint8_t *p; 6551 int i; 6552 uint32_t s; 6553 6554 xd = (stmf_xfer_data_t *)dbuf->db_lu_private; 6555 dbuf->db_data_size = 0; 6556 if (set_rel_off) 6557 dbuf->db_relative_offset = xd->size_done; 6558 for (i = 0; i < dbuf->db_sglist_length; i++) { 6559 s = min(xd->size_left, dbuf->db_sglist[i].seg_length); 6560 p = &xd->buf[xd->size_done]; 6561 bcopy(p, dbuf->db_sglist[i].seg_addr, s); 6562 xd->size_left -= s; 6563 xd->size_done += s; 6564 dbuf->db_data_size += s; 6565 if (xd->size_left == 0) { 6566 kmem_free(xd, xd->alloc_size); 6567 dbuf->db_lu_private = NULL; 6568 return; 6569 } 6570 } 6571 } 6572 6573 /* ARGSUSED */ 6574 stmf_status_t 6575 stmf_dlun0_task_alloc(scsi_task_t *task) 6576 { 6577 return (STMF_SUCCESS); 6578 } 6579 6580 void 6581 stmf_dlun0_new_task(scsi_task_t *task, stmf_data_buf_t *dbuf) 6582 { 6583 uint8_t *cdbp = (uint8_t *)&task->task_cdb[0]; 6584 stmf_i_scsi_session_t *iss; 6585 uint32_t sz, minsz; 6586 uint8_t *p; 6587 stmf_xfer_data_t *xd; 6588 uint8_t inq_page_length = 31; 6589 6590 if (task->task_mgmt_function) { 6591 stmf_scsilib_handle_task_mgmt(task); 6592 return; 6593 } 6594 6595 switch (cdbp[0]) { 6596 case SCMD_INQUIRY: 6597 /* 6598 * Basic protocol checks. In addition, only reply to 6599 * standard inquiry. Otherwise, the LU provider needs 6600 * to respond. 6601 */ 6602 6603 if (cdbp[2] || (cdbp[1] & 1) || cdbp[5]) { 6604 stmf_scsilib_send_status(task, STATUS_CHECK, 6605 STMF_SAA_INVALID_FIELD_IN_CDB); 6606 return; 6607 } 6608 6609 task->task_cmd_xfer_length = 6610 (((uint32_t)cdbp[3]) << 8) | cdbp[4]; 6611 6612 if (task->task_additional_flags & 6613 TASK_AF_NO_EXPECTED_XFER_LENGTH) { 6614 task->task_expected_xfer_length = 6615 task->task_cmd_xfer_length; 6616 } 6617 6618 sz = min(task->task_expected_xfer_length, 6619 min(36, task->task_cmd_xfer_length)); 6620 minsz = 36; 6621 6622 if (sz == 0) { 6623 stmf_scsilib_send_status(task, STATUS_GOOD, 0); 6624 return; 6625 } 6626 6627 if (dbuf && (dbuf->db_sglist[0].seg_length < 36)) { 6628 /* 6629 * Ignore any preallocated dbuf if the size is less 6630 * than 36. It will be freed during the task_free. 6631 */ 6632 dbuf = NULL; 6633 } 6634 if (dbuf == NULL) 6635 dbuf = stmf_alloc_dbuf(task, minsz, &minsz, 0); 6636 if ((dbuf == NULL) || (dbuf->db_sglist[0].seg_length < sz)) { 6637 stmf_abort(STMF_QUEUE_TASK_ABORT, task, 6638 STMF_ALLOC_FAILURE, NULL); 6639 return; 6640 } 6641 dbuf->db_lu_private = NULL; 6642 6643 p = dbuf->db_sglist[0].seg_addr; 6644 6645 /* 6646 * Standard inquiry handling only. 6647 */ 6648 6649 bzero(p, inq_page_length + 5); 6650 6651 p[0] = DPQ_SUPPORTED | DTYPE_UNKNOWN; 6652 p[2] = 5; 6653 p[3] = 0x12; 6654 p[4] = inq_page_length; 6655 p[6] = 0x80; 6656 6657 (void) strncpy((char *)p+8, "SUN ", 8); 6658 (void) strncpy((char *)p+16, "COMSTAR ", 16); 6659 (void) strncpy((char *)p+32, "1.0 ", 4); 6660 6661 dbuf->db_data_size = sz; 6662 dbuf->db_relative_offset = 0; 6663 dbuf->db_flags = DB_DIRECTION_TO_RPORT; 6664 (void) stmf_xfer_data(task, dbuf, 0); 6665 6666 return; 6667 6668 case SCMD_REPORT_LUNS: 6669 task->task_cmd_xfer_length = 6670 ((((uint32_t)task->task_cdb[6]) << 24) | 6671 (((uint32_t)task->task_cdb[7]) << 16) | 6672 (((uint32_t)task->task_cdb[8]) << 8) | 6673 ((uint32_t)task->task_cdb[9])); 6674 6675 if (task->task_additional_flags & 6676 TASK_AF_NO_EXPECTED_XFER_LENGTH) { 6677 task->task_expected_xfer_length = 6678 task->task_cmd_xfer_length; 6679 } 6680 6681 sz = min(task->task_expected_xfer_length, 6682 task->task_cmd_xfer_length); 6683 6684 if (sz < 16) { 6685 stmf_scsilib_send_status(task, STATUS_CHECK, 6686 STMF_SAA_INVALID_FIELD_IN_CDB); 6687 return; 6688 } 6689 6690 iss = (stmf_i_scsi_session_t *) 6691 task->task_session->ss_stmf_private; 6692 rw_enter(iss->iss_lockp, RW_WRITER); 6693 xd = stmf_session_prepare_report_lun_data(iss->iss_sm); 6694 rw_exit(iss->iss_lockp); 6695 6696 if (xd == NULL) { 6697 stmf_abort(STMF_QUEUE_TASK_ABORT, task, 6698 STMF_ALLOC_FAILURE, NULL); 6699 return; 6700 } 6701 6702 sz = min(sz, xd->size_left); 6703 xd->size_left = sz; 6704 minsz = min(512, sz); 6705 6706 if (dbuf == NULL) 6707 dbuf = stmf_alloc_dbuf(task, sz, &minsz, 0); 6708 if (dbuf == NULL) { 6709 kmem_free(xd, xd->alloc_size); 6710 stmf_abort(STMF_QUEUE_TASK_ABORT, task, 6711 STMF_ALLOC_FAILURE, NULL); 6712 return; 6713 } 6714 dbuf->db_lu_private = xd; 6715 stmf_xd_to_dbuf(dbuf, 1); 6716 6717 atomic_and_32(&iss->iss_flags, 6718 ~(ISS_LUN_INVENTORY_CHANGED | ISS_GOT_INITIAL_LUNS)); 6719 dbuf->db_flags = DB_DIRECTION_TO_RPORT; 6720 (void) stmf_xfer_data(task, dbuf, 0); 6721 return; 6722 } 6723 6724 stmf_scsilib_send_status(task, STATUS_CHECK, STMF_SAA_INVALID_OPCODE); 6725 } 6726 6727 void 6728 stmf_dlun0_dbuf_done(scsi_task_t *task, stmf_data_buf_t *dbuf) 6729 { 6730 stmf_i_scsi_task_t *itask = 6731 (stmf_i_scsi_task_t *)task->task_stmf_private; 6732 6733 if (dbuf->db_xfer_status != STMF_SUCCESS) { 6734 stmf_abort(STMF_QUEUE_TASK_ABORT, task, 6735 dbuf->db_xfer_status, NULL); 6736 return; 6737 } 6738 task->task_nbytes_transferred += dbuf->db_data_size; 6739 if (dbuf->db_lu_private) { 6740 /* There is more */ 6741 stmf_xd_to_dbuf(dbuf, 1); 6742 (void) stmf_xfer_data(task, dbuf, 0); 6743 return; 6744 } 6745 6746 stmf_free_dbuf(task, dbuf); 6747 /* 6748 * If this is a proxy task, it will need to be completed from the 6749 * proxy port provider. This message lets pppt know that the xfer 6750 * is complete. When we receive the status from pppt, we will 6751 * then relay that status back to the lport. 6752 */ 6753 if (itask->itask_flags & ITASK_PROXY_TASK) { 6754 stmf_ic_msg_t *ic_xfer_done_msg = NULL; 6755 stmf_status_t ic_ret = STMF_FAILURE; 6756 uint64_t session_msg_id; 6757 mutex_enter(&stmf_state.stmf_lock); 6758 session_msg_id = stmf_proxy_msg_id++; 6759 mutex_exit(&stmf_state.stmf_lock); 6760 /* send xfer done status to pppt */ 6761 ic_xfer_done_msg = ic_scsi_data_xfer_done_msg_alloc( 6762 itask->itask_proxy_msg_id, 6763 task->task_session->ss_session_id, 6764 STMF_SUCCESS, session_msg_id); 6765 if (ic_xfer_done_msg) { 6766 ic_ret = ic_tx_msg(ic_xfer_done_msg); 6767 if (ic_ret != STMF_IC_MSG_SUCCESS) { 6768 cmn_err(CE_WARN, "unable to xmit session msg"); 6769 } 6770 } 6771 /* task will be completed from pppt */ 6772 return; 6773 } 6774 stmf_scsilib_send_status(task, STATUS_GOOD, 0); 6775 } 6776 6777 /* ARGSUSED */ 6778 void 6779 stmf_dlun0_status_done(scsi_task_t *task) 6780 { 6781 } 6782 6783 /* ARGSUSED */ 6784 void 6785 stmf_dlun0_task_free(scsi_task_t *task) 6786 { 6787 } 6788 6789 /* ARGSUSED */ 6790 stmf_status_t 6791 stmf_dlun0_abort(struct stmf_lu *lu, int abort_cmd, void *arg, uint32_t flags) 6792 { 6793 scsi_task_t *task = (scsi_task_t *)arg; 6794 stmf_i_scsi_task_t *itask = 6795 (stmf_i_scsi_task_t *)task->task_stmf_private; 6796 stmf_i_lu_t *ilu = (stmf_i_lu_t *)task->task_lu->lu_stmf_private; 6797 int i; 6798 uint8_t map; 6799 6800 if ((task->task_mgmt_function) && (itask->itask_flags & 6801 (ITASK_CAUSING_LU_RESET | ITASK_CAUSING_TARGET_RESET))) { 6802 switch (task->task_mgmt_function) { 6803 case TM_ABORT_TASK: 6804 case TM_ABORT_TASK_SET: 6805 case TM_CLEAR_TASK_SET: 6806 case TM_LUN_RESET: 6807 atomic_and_32(&ilu->ilu_flags, ~ILU_RESET_ACTIVE); 6808 break; 6809 case TM_TARGET_RESET: 6810 case TM_TARGET_COLD_RESET: 6811 case TM_TARGET_WARM_RESET: 6812 stmf_abort_target_reset(task); 6813 break; 6814 } 6815 return (STMF_ABORT_SUCCESS); 6816 } 6817 6818 /* 6819 * OK so its not a task mgmt. Make sure we free any xd sitting 6820 * inside any dbuf. 6821 */ 6822 if ((map = itask->itask_allocated_buf_map) != 0) { 6823 for (i = 0; i < 4; i++) { 6824 if ((map & 1) && 6825 ((itask->itask_dbufs[i])->db_lu_private)) { 6826 stmf_xfer_data_t *xd; 6827 stmf_data_buf_t *dbuf; 6828 6829 dbuf = itask->itask_dbufs[i]; 6830 xd = (stmf_xfer_data_t *)dbuf->db_lu_private; 6831 dbuf->db_lu_private = NULL; 6832 kmem_free(xd, xd->alloc_size); 6833 } 6834 map >>= 1; 6835 } 6836 } 6837 return (STMF_ABORT_SUCCESS); 6838 } 6839 6840 void 6841 stmf_dlun0_task_poll(struct scsi_task *task) 6842 { 6843 /* Right now we only do this for handling task management functions */ 6844 ASSERT(task->task_mgmt_function); 6845 6846 switch (task->task_mgmt_function) { 6847 case TM_ABORT_TASK: 6848 case TM_ABORT_TASK_SET: 6849 case TM_CLEAR_TASK_SET: 6850 case TM_LUN_RESET: 6851 (void) stmf_lun_reset_poll(task->task_lu, task, 0); 6852 return; 6853 case TM_TARGET_RESET: 6854 case TM_TARGET_COLD_RESET: 6855 case TM_TARGET_WARM_RESET: 6856 stmf_target_reset_poll(task); 6857 return; 6858 } 6859 } 6860 6861 /* ARGSUSED */ 6862 void 6863 stmf_dlun0_ctl(struct stmf_lu *lu, int cmd, void *arg) 6864 { 6865 /* This function will never be called */ 6866 cmn_err(CE_WARN, "stmf_dlun0_ctl called with cmd %x", cmd); 6867 } 6868 6869 void 6870 stmf_dlun_init() 6871 { 6872 stmf_i_lu_t *ilu; 6873 6874 dlun0 = stmf_alloc(STMF_STRUCT_STMF_LU, 0, 0); 6875 dlun0->lu_task_alloc = stmf_dlun0_task_alloc; 6876 dlun0->lu_new_task = stmf_dlun0_new_task; 6877 dlun0->lu_dbuf_xfer_done = stmf_dlun0_dbuf_done; 6878 dlun0->lu_send_status_done = stmf_dlun0_status_done; 6879 dlun0->lu_task_free = stmf_dlun0_task_free; 6880 dlun0->lu_abort = stmf_dlun0_abort; 6881 dlun0->lu_task_poll = stmf_dlun0_task_poll; 6882 dlun0->lu_ctl = stmf_dlun0_ctl; 6883 6884 ilu = (stmf_i_lu_t *)dlun0->lu_stmf_private; 6885 ilu->ilu_cur_task_cntr = &ilu->ilu_task_cntr1; 6886 } 6887 6888 stmf_status_t 6889 stmf_dlun_fini() 6890 { 6891 stmf_i_lu_t *ilu; 6892 6893 ilu = (stmf_i_lu_t *)dlun0->lu_stmf_private; 6894 6895 ASSERT(ilu->ilu_ntasks == ilu->ilu_ntasks_free); 6896 if (ilu->ilu_ntasks) { 6897 stmf_i_scsi_task_t *itask, *nitask; 6898 6899 nitask = ilu->ilu_tasks; 6900 do { 6901 itask = nitask; 6902 nitask = itask->itask_lu_next; 6903 dlun0->lu_task_free(itask->itask_task); 6904 stmf_free(itask->itask_task); 6905 } while (nitask != NULL); 6906 6907 } 6908 stmf_free(dlun0); 6909 return (STMF_SUCCESS); 6910 } 6911 6912 void 6913 stmf_abort_target_reset(scsi_task_t *task) 6914 { 6915 stmf_i_scsi_session_t *iss = (stmf_i_scsi_session_t *) 6916 task->task_session->ss_stmf_private; 6917 stmf_lun_map_t *lm; 6918 stmf_lun_map_ent_t *lm_ent; 6919 stmf_i_lu_t *ilu; 6920 int i; 6921 6922 rw_enter(iss->iss_lockp, RW_READER); 6923 lm = iss->iss_sm; 6924 for (i = 0; i < lm->lm_nentries; i++) { 6925 if (lm->lm_plus[i] == NULL) 6926 continue; 6927 lm_ent = (stmf_lun_map_ent_t *)lm->lm_plus[i]; 6928 ilu = (stmf_i_lu_t *)lm_ent->ent_lu->lu_stmf_private; 6929 if (ilu->ilu_flags & ILU_RESET_ACTIVE) { 6930 atomic_and_32(&ilu->ilu_flags, ~ILU_RESET_ACTIVE); 6931 } 6932 } 6933 atomic_and_32(&iss->iss_flags, ~ISS_RESET_ACTIVE); 6934 rw_exit(iss->iss_lockp); 6935 } 6936 6937 /* 6938 * The return value is only used by function managing target reset. 6939 */ 6940 stmf_status_t 6941 stmf_lun_reset_poll(stmf_lu_t *lu, struct scsi_task *task, int target_reset) 6942 { 6943 stmf_i_lu_t *ilu = (stmf_i_lu_t *)lu->lu_stmf_private; 6944 int ntasks_pending; 6945 6946 ntasks_pending = ilu->ilu_ntasks - ilu->ilu_ntasks_free; 6947 /* 6948 * This function is also used during Target reset. The idea is that 6949 * once all the commands are aborted, call the LU's reset entry 6950 * point (abort entry point with a reset flag). But if this Task 6951 * mgmt is running on this LU then all the tasks cannot be aborted. 6952 * one task (this task) will still be running which is OK. 6953 */ 6954 if ((ntasks_pending == 0) || ((task->task_lu == lu) && 6955 (ntasks_pending == 1))) { 6956 stmf_status_t ret; 6957 6958 if ((task->task_mgmt_function == TM_LUN_RESET) || 6959 (task->task_mgmt_function == TM_TARGET_RESET) || 6960 (task->task_mgmt_function == TM_TARGET_WARM_RESET) || 6961 (task->task_mgmt_function == TM_TARGET_COLD_RESET)) { 6962 ret = lu->lu_abort(lu, STMF_LU_RESET_STATE, task, 0); 6963 } else { 6964 ret = STMF_SUCCESS; 6965 } 6966 if (ret == STMF_SUCCESS) { 6967 atomic_and_32(&ilu->ilu_flags, ~ILU_RESET_ACTIVE); 6968 } 6969 if (target_reset) { 6970 return (ret); 6971 } 6972 if (ret == STMF_SUCCESS) { 6973 stmf_scsilib_send_status(task, STATUS_GOOD, 0); 6974 return (ret); 6975 } 6976 if (ret != STMF_BUSY) { 6977 stmf_abort(STMF_QUEUE_TASK_ABORT, task, ret, NULL); 6978 return (ret); 6979 } 6980 } 6981 6982 if (target_reset) { 6983 /* Tell target reset polling code that we are not done */ 6984 return (STMF_BUSY); 6985 } 6986 6987 if (stmf_task_poll_lu(task, ITASK_DEFAULT_POLL_TIMEOUT) 6988 != STMF_SUCCESS) { 6989 stmf_abort(STMF_QUEUE_TASK_ABORT, task, 6990 STMF_ALLOC_FAILURE, NULL); 6991 return (STMF_SUCCESS); 6992 } 6993 6994 return (STMF_SUCCESS); 6995 } 6996 6997 void 6998 stmf_target_reset_poll(struct scsi_task *task) 6999 { 7000 stmf_i_scsi_session_t *iss = (stmf_i_scsi_session_t *) 7001 task->task_session->ss_stmf_private; 7002 stmf_lun_map_t *lm; 7003 stmf_lun_map_ent_t *lm_ent; 7004 stmf_i_lu_t *ilu; 7005 stmf_status_t ret; 7006 int i; 7007 int not_done = 0; 7008 7009 ASSERT(iss->iss_flags & ISS_RESET_ACTIVE); 7010 7011 rw_enter(iss->iss_lockp, RW_READER); 7012 lm = iss->iss_sm; 7013 for (i = 0; i < lm->lm_nentries; i++) { 7014 if (lm->lm_plus[i] == NULL) 7015 continue; 7016 lm_ent = (stmf_lun_map_ent_t *)lm->lm_plus[i]; 7017 ilu = (stmf_i_lu_t *)lm_ent->ent_lu->lu_stmf_private; 7018 if (ilu->ilu_flags & ILU_RESET_ACTIVE) { 7019 rw_exit(iss->iss_lockp); 7020 ret = stmf_lun_reset_poll(lm_ent->ent_lu, task, 1); 7021 rw_enter(iss->iss_lockp, RW_READER); 7022 if (ret == STMF_SUCCESS) 7023 continue; 7024 not_done = 1; 7025 if (ret != STMF_BUSY) { 7026 rw_exit(iss->iss_lockp); 7027 stmf_abort(STMF_QUEUE_TASK_ABORT, task, 7028 STMF_ABORTED, NULL); 7029 return; 7030 } 7031 } 7032 } 7033 rw_exit(iss->iss_lockp); 7034 7035 if (not_done) { 7036 if (stmf_task_poll_lu(task, ITASK_DEFAULT_POLL_TIMEOUT) 7037 != STMF_SUCCESS) { 7038 stmf_abort(STMF_QUEUE_TASK_ABORT, task, 7039 STMF_ALLOC_FAILURE, NULL); 7040 return; 7041 } 7042 return; 7043 } 7044 7045 atomic_and_32(&iss->iss_flags, ~ISS_RESET_ACTIVE); 7046 7047 stmf_scsilib_send_status(task, STATUS_GOOD, 0); 7048 } 7049 7050 stmf_status_t 7051 stmf_lu_add_event(stmf_lu_t *lu, int eventid) 7052 { 7053 stmf_i_lu_t *ilu = (stmf_i_lu_t *)lu->lu_stmf_private; 7054 7055 if ((eventid < 0) || (eventid >= STMF_MAX_NUM_EVENTS)) { 7056 return (STMF_INVALID_ARG); 7057 } 7058 7059 STMF_EVENT_ADD(ilu->ilu_event_hdl, eventid); 7060 return (STMF_SUCCESS); 7061 } 7062 7063 stmf_status_t 7064 stmf_lu_remove_event(stmf_lu_t *lu, int eventid) 7065 { 7066 stmf_i_lu_t *ilu = (stmf_i_lu_t *)lu->lu_stmf_private; 7067 7068 if (eventid == STMF_EVENT_ALL) { 7069 STMF_EVENT_CLEAR_ALL(ilu->ilu_event_hdl); 7070 return (STMF_SUCCESS); 7071 } 7072 7073 if ((eventid < 0) || (eventid >= STMF_MAX_NUM_EVENTS)) { 7074 return (STMF_INVALID_ARG); 7075 } 7076 7077 STMF_EVENT_REMOVE(ilu->ilu_event_hdl, eventid); 7078 return (STMF_SUCCESS); 7079 } 7080 7081 stmf_status_t 7082 stmf_lport_add_event(stmf_local_port_t *lport, int eventid) 7083 { 7084 stmf_i_local_port_t *ilport = 7085 (stmf_i_local_port_t *)lport->lport_stmf_private; 7086 7087 if ((eventid < 0) || (eventid >= STMF_MAX_NUM_EVENTS)) { 7088 return (STMF_INVALID_ARG); 7089 } 7090 7091 STMF_EVENT_ADD(ilport->ilport_event_hdl, eventid); 7092 return (STMF_SUCCESS); 7093 } 7094 7095 stmf_status_t 7096 stmf_lport_remove_event(stmf_local_port_t *lport, int eventid) 7097 { 7098 stmf_i_local_port_t *ilport = 7099 (stmf_i_local_port_t *)lport->lport_stmf_private; 7100 7101 if (eventid == STMF_EVENT_ALL) { 7102 STMF_EVENT_CLEAR_ALL(ilport->ilport_event_hdl); 7103 return (STMF_SUCCESS); 7104 } 7105 7106 if ((eventid < 0) || (eventid >= STMF_MAX_NUM_EVENTS)) { 7107 return (STMF_INVALID_ARG); 7108 } 7109 7110 STMF_EVENT_REMOVE(ilport->ilport_event_hdl, eventid); 7111 return (STMF_SUCCESS); 7112 } 7113 7114 void 7115 stmf_generate_lu_event(stmf_i_lu_t *ilu, int eventid, void *arg, uint32_t flags) 7116 { 7117 if (STMF_EVENT_ENABLED(ilu->ilu_event_hdl, eventid) && 7118 (ilu->ilu_lu->lu_event_handler != NULL)) { 7119 ilu->ilu_lu->lu_event_handler(ilu->ilu_lu, eventid, arg, flags); 7120 } 7121 } 7122 7123 void 7124 stmf_generate_lport_event(stmf_i_local_port_t *ilport, int eventid, void *arg, 7125 uint32_t flags) 7126 { 7127 if (STMF_EVENT_ENABLED(ilport->ilport_event_hdl, eventid) && 7128 (ilport->ilport_lport->lport_event_handler != NULL)) { 7129 ilport->ilport_lport->lport_event_handler( 7130 ilport->ilport_lport, eventid, arg, flags); 7131 } 7132 } 7133 7134 /* 7135 * With the possibility of having multiple itl sessions pointing to the 7136 * same itl_kstat_info, the ilu_kstat_lock mutex is used to synchronize 7137 * the kstat update of the ilu_kstat_io, itl_kstat_taskq and itl_kstat_lu_xfer 7138 * statistics. 7139 */ 7140 void 7141 stmf_itl_task_start(stmf_i_scsi_task_t *itask) 7142 { 7143 stmf_itl_data_t *itl = itask->itask_itl_datap; 7144 scsi_task_t *task = itask->itask_task; 7145 stmf_i_lu_t *ilu; 7146 7147 if (itl == NULL || task->task_lu == dlun0) 7148 return; 7149 ilu = (stmf_i_lu_t *)task->task_lu->lu_stmf_private; 7150 itask->itask_start_timestamp = gethrtime(); 7151 if (ilu->ilu_kstat_io != NULL) { 7152 mutex_enter(ilu->ilu_kstat_io->ks_lock); 7153 stmf_update_kstat_lu_q(itask->itask_task, kstat_waitq_enter); 7154 mutex_exit(ilu->ilu_kstat_io->ks_lock); 7155 } 7156 7157 stmf_update_kstat_lport_q(itask->itask_task, kstat_waitq_enter); 7158 } 7159 7160 void 7161 stmf_itl_lu_new_task(stmf_i_scsi_task_t *itask) 7162 { 7163 stmf_itl_data_t *itl = itask->itask_itl_datap; 7164 scsi_task_t *task = itask->itask_task; 7165 stmf_i_lu_t *ilu; 7166 7167 if (itl == NULL || task->task_lu == dlun0) 7168 return; 7169 ilu = (stmf_i_lu_t *)task->task_lu->lu_stmf_private; 7170 if (ilu->ilu_kstat_io != NULL) { 7171 mutex_enter(ilu->ilu_kstat_io->ks_lock); 7172 stmf_update_kstat_lu_q(itask->itask_task, kstat_waitq_to_runq); 7173 mutex_exit(ilu->ilu_kstat_io->ks_lock); 7174 } 7175 7176 stmf_update_kstat_lport_q(itask->itask_task, kstat_waitq_to_runq); 7177 } 7178 7179 void 7180 stmf_itl_task_done(stmf_i_scsi_task_t *itask) 7181 { 7182 stmf_itl_data_t *itl = itask->itask_itl_datap; 7183 scsi_task_t *task = itask->itask_task; 7184 stmf_i_lu_t *ilu; 7185 7186 itask->itask_done_timestamp = gethrtime(); 7187 7188 if (itl == NULL || task->task_lu == dlun0) 7189 return; 7190 ilu = (stmf_i_lu_t *)task->task_lu->lu_stmf_private; 7191 7192 if (ilu->ilu_kstat_io == NULL) 7193 return; 7194 7195 mutex_enter(ilu->ilu_kstat_io->ks_lock); 7196 7197 if (itask->itask_flags & ITASK_KSTAT_IN_RUNQ) { 7198 stmf_update_kstat_lu_q(task, kstat_runq_exit); 7199 mutex_exit(ilu->ilu_kstat_io->ks_lock); 7200 stmf_update_kstat_lport_q(task, kstat_runq_exit); 7201 } else { 7202 stmf_update_kstat_lu_q(task, kstat_waitq_exit); 7203 mutex_exit(ilu->ilu_kstat_io->ks_lock); 7204 stmf_update_kstat_lport_q(task, kstat_waitq_exit); 7205 } 7206 } 7207 7208 static void 7209 stmf_lport_xfer_start(stmf_i_scsi_task_t *itask, stmf_data_buf_t *dbuf) 7210 { 7211 stmf_itl_data_t *itl = itask->itask_itl_datap; 7212 7213 if (itl == NULL) 7214 return; 7215 7216 DTRACE_PROBE2(scsi__xfer__start, scsi_task_t *, itask->itask_task, 7217 stmf_data_buf_t *, dbuf); 7218 7219 dbuf->db_xfer_start_timestamp = gethrtime(); 7220 } 7221 7222 static void 7223 stmf_lport_xfer_done(stmf_i_scsi_task_t *itask, stmf_data_buf_t *dbuf) 7224 { 7225 stmf_itl_data_t *itl = itask->itask_itl_datap; 7226 hrtime_t elapsed_time; 7227 uint64_t xfer_size; 7228 7229 if (itl == NULL) 7230 return; 7231 7232 xfer_size = (dbuf->db_xfer_status == STMF_SUCCESS) ? 7233 dbuf->db_data_size : 0; 7234 7235 elapsed_time = gethrtime() - dbuf->db_xfer_start_timestamp; 7236 if (dbuf->db_flags & DB_DIRECTION_TO_RPORT) { 7237 atomic_add_64((uint64_t *)&itask->itask_lport_read_time, 7238 elapsed_time); 7239 atomic_add_64((uint64_t *)&itask->itask_read_xfer, 7240 xfer_size); 7241 } else { 7242 atomic_add_64((uint64_t *)&itask->itask_lport_write_time, 7243 elapsed_time); 7244 atomic_add_64((uint64_t *)&itask->itask_write_xfer, 7245 xfer_size); 7246 } 7247 7248 DTRACE_PROBE3(scsi__xfer__end, scsi_task_t *, itask->itask_task, 7249 stmf_data_buf_t *, dbuf, hrtime_t, elapsed_time); 7250 7251 dbuf->db_xfer_start_timestamp = 0; 7252 } 7253 7254 void 7255 stmf_svc_init() 7256 { 7257 if (stmf_state.stmf_svc_flags & STMF_SVC_STARTED) 7258 return; 7259 stmf_state.stmf_svc_tailp = &stmf_state.stmf_svc_active; 7260 stmf_state.stmf_svc_taskq = ddi_taskq_create(0, "STMF_SVC_TASKQ", 1, 7261 TASKQ_DEFAULTPRI, 0); 7262 (void) ddi_taskq_dispatch(stmf_state.stmf_svc_taskq, 7263 stmf_svc, 0, DDI_SLEEP); 7264 } 7265 7266 stmf_status_t 7267 stmf_svc_fini() 7268 { 7269 uint32_t i; 7270 7271 mutex_enter(&stmf_state.stmf_lock); 7272 if (stmf_state.stmf_svc_flags & STMF_SVC_STARTED) { 7273 stmf_state.stmf_svc_flags |= STMF_SVC_TERMINATE; 7274 cv_signal(&stmf_state.stmf_cv); 7275 } 7276 mutex_exit(&stmf_state.stmf_lock); 7277 7278 /* Wait for 5 seconds */ 7279 for (i = 0; i < 500; i++) { 7280 if (stmf_state.stmf_svc_flags & STMF_SVC_STARTED) 7281 delay(drv_usectohz(10000)); 7282 else 7283 break; 7284 } 7285 if (i == 500) 7286 return (STMF_BUSY); 7287 7288 ddi_taskq_destroy(stmf_state.stmf_svc_taskq); 7289 7290 return (STMF_SUCCESS); 7291 } 7292 7293 struct stmf_svc_clocks { 7294 clock_t drain_start, drain_next; 7295 clock_t timing_start, timing_next; 7296 clock_t worker_delay; 7297 }; 7298 7299 /* ARGSUSED */ 7300 void 7301 stmf_svc(void *arg) 7302 { 7303 stmf_svc_req_t *req; 7304 stmf_lu_t *lu; 7305 stmf_i_lu_t *ilu; 7306 stmf_local_port_t *lport; 7307 struct stmf_svc_clocks clks = { 0 }; 7308 7309 mutex_enter(&stmf_state.stmf_lock); 7310 stmf_state.stmf_svc_flags |= STMF_SVC_STARTED | STMF_SVC_ACTIVE; 7311 7312 while (!(stmf_state.stmf_svc_flags & STMF_SVC_TERMINATE)) { 7313 if (stmf_state.stmf_svc_active == NULL) { 7314 stmf_svc_timeout(&clks); 7315 continue; 7316 } 7317 7318 /* 7319 * Pop the front request from the active list. After this, 7320 * the request will no longer be referenced by global state, 7321 * so it should be safe to access it without holding the 7322 * stmf state lock. 7323 */ 7324 req = stmf_state.stmf_svc_active; 7325 stmf_state.stmf_svc_active = req->svc_next; 7326 7327 if (stmf_state.stmf_svc_active == NULL) 7328 stmf_state.stmf_svc_tailp = &stmf_state.stmf_svc_active; 7329 7330 switch (req->svc_cmd) { 7331 case STMF_CMD_LPORT_ONLINE: 7332 /* Fallthrough */ 7333 case STMF_CMD_LPORT_OFFLINE: 7334 mutex_exit(&stmf_state.stmf_lock); 7335 lport = (stmf_local_port_t *)req->svc_obj; 7336 lport->lport_ctl(lport, req->svc_cmd, &req->svc_info); 7337 break; 7338 case STMF_CMD_LU_ONLINE: 7339 mutex_exit(&stmf_state.stmf_lock); 7340 lu = (stmf_lu_t *)req->svc_obj; 7341 lu->lu_ctl(lu, req->svc_cmd, &req->svc_info); 7342 break; 7343 case STMF_CMD_LU_OFFLINE: 7344 /* Remove all mappings of this LU */ 7345 stmf_session_lu_unmapall((stmf_lu_t *)req->svc_obj); 7346 /* Kill all the pending I/Os for this LU */ 7347 mutex_exit(&stmf_state.stmf_lock); 7348 stmf_task_lu_killall((stmf_lu_t *)req->svc_obj, NULL, 7349 STMF_ABORTED); 7350 lu = (stmf_lu_t *)req->svc_obj; 7351 ilu = (stmf_i_lu_t *)lu->lu_stmf_private; 7352 stmf_wait_ilu_tasks_finish(ilu); 7353 lu->lu_ctl(lu, req->svc_cmd, &req->svc_info); 7354 break; 7355 default: 7356 cmn_err(CE_PANIC, "stmf_svc: unknown cmd %d", 7357 req->svc_cmd); 7358 } 7359 7360 kmem_free(req, req->svc_req_alloc_size); 7361 mutex_enter(&stmf_state.stmf_lock); 7362 } 7363 7364 stmf_state.stmf_svc_flags &= ~(STMF_SVC_STARTED | STMF_SVC_ACTIVE); 7365 mutex_exit(&stmf_state.stmf_lock); 7366 } 7367 7368 static void 7369 stmf_svc_timeout(struct stmf_svc_clocks *clks) 7370 { 7371 clock_t td; 7372 stmf_i_local_port_t *ilport, *next_ilport; 7373 stmf_i_scsi_session_t *iss; 7374 7375 ASSERT(mutex_owned(&stmf_state.stmf_lock)); 7376 7377 td = drv_usectohz(20000); 7378 7379 /* Do timeouts */ 7380 if (stmf_state.stmf_nlus && 7381 ((!clks->timing_next) || (ddi_get_lbolt() >= clks->timing_next))) { 7382 if (!stmf_state.stmf_svc_ilu_timing) { 7383 /* we are starting a new round */ 7384 stmf_state.stmf_svc_ilu_timing = 7385 stmf_state.stmf_ilulist; 7386 clks->timing_start = ddi_get_lbolt(); 7387 } 7388 7389 stmf_check_ilu_timing(); 7390 if (!stmf_state.stmf_svc_ilu_timing) { 7391 /* we finished a complete round */ 7392 clks->timing_next = 7393 clks->timing_start + drv_usectohz(5*1000*1000); 7394 } else { 7395 /* we still have some ilu items to check */ 7396 clks->timing_next = 7397 ddi_get_lbolt() + drv_usectohz(1*1000*1000); 7398 } 7399 7400 if (stmf_state.stmf_svc_active) 7401 return; 7402 } 7403 7404 /* Check if there are free tasks to clear */ 7405 if (stmf_state.stmf_nlus && 7406 ((!clks->drain_next) || (ddi_get_lbolt() >= clks->drain_next))) { 7407 if (!stmf_state.stmf_svc_ilu_draining) { 7408 /* we are starting a new round */ 7409 stmf_state.stmf_svc_ilu_draining = 7410 stmf_state.stmf_ilulist; 7411 clks->drain_start = ddi_get_lbolt(); 7412 } 7413 7414 stmf_check_freetask(); 7415 if (!stmf_state.stmf_svc_ilu_draining) { 7416 /* we finished a complete round */ 7417 clks->drain_next = 7418 clks->drain_start + drv_usectohz(10*1000*1000); 7419 } else { 7420 /* we still have some ilu items to check */ 7421 clks->drain_next = 7422 ddi_get_lbolt() + drv_usectohz(1*1000*1000); 7423 } 7424 7425 if (stmf_state.stmf_svc_active) 7426 return; 7427 } 7428 7429 /* Check if we need to run worker_mgmt */ 7430 if (ddi_get_lbolt() > clks->worker_delay) { 7431 stmf_worker_mgmt(); 7432 clks->worker_delay = ddi_get_lbolt() + 7433 stmf_worker_mgmt_delay; 7434 } 7435 7436 /* Check if any active session got its 1st LUN */ 7437 if (stmf_state.stmf_process_initial_luns) { 7438 int stmf_level = 0; 7439 int port_level; 7440 7441 for (ilport = stmf_state.stmf_ilportlist; ilport; 7442 ilport = next_ilport) { 7443 int ilport_lock_held; 7444 next_ilport = ilport->ilport_next; 7445 7446 if ((ilport->ilport_flags & 7447 ILPORT_SS_GOT_INITIAL_LUNS) == 0) 7448 continue; 7449 7450 port_level = 0; 7451 rw_enter(&ilport->ilport_lock, RW_READER); 7452 ilport_lock_held = 1; 7453 7454 for (iss = ilport->ilport_ss_list; iss; 7455 iss = iss->iss_next) { 7456 if ((iss->iss_flags & 7457 ISS_GOT_INITIAL_LUNS) == 0) 7458 continue; 7459 7460 port_level++; 7461 stmf_level++; 7462 atomic_and_32(&iss->iss_flags, 7463 ~ISS_GOT_INITIAL_LUNS); 7464 atomic_or_32(&iss->iss_flags, 7465 ISS_EVENT_ACTIVE); 7466 rw_exit(&ilport->ilport_lock); 7467 ilport_lock_held = 0; 7468 mutex_exit(&stmf_state.stmf_lock); 7469 stmf_generate_lport_event(ilport, 7470 LPORT_EVENT_INITIAL_LUN_MAPPED, 7471 iss->iss_ss, 0); 7472 atomic_and_32(&iss->iss_flags, 7473 ~ISS_EVENT_ACTIVE); 7474 mutex_enter(&stmf_state.stmf_lock); 7475 /* 7476 * scan all the ilports again as the 7477 * ilport list might have changed. 7478 */ 7479 next_ilport = stmf_state.stmf_ilportlist; 7480 break; 7481 } 7482 7483 if (port_level == 0) 7484 atomic_and_32(&ilport->ilport_flags, 7485 ~ILPORT_SS_GOT_INITIAL_LUNS); 7486 /* drop the lock if we are holding it. */ 7487 if (ilport_lock_held == 1) 7488 rw_exit(&ilport->ilport_lock); 7489 7490 /* Max 4 session at a time */ 7491 if (stmf_level >= 4) 7492 break; 7493 } 7494 7495 if (stmf_level == 0) 7496 stmf_state.stmf_process_initial_luns = 0; 7497 } 7498 7499 stmf_state.stmf_svc_flags &= ~STMF_SVC_ACTIVE; 7500 (void) cv_reltimedwait(&stmf_state.stmf_cv, 7501 &stmf_state.stmf_lock, td, TR_CLOCK_TICK); 7502 stmf_state.stmf_svc_flags |= STMF_SVC_ACTIVE; 7503 } 7504 7505 /* 7506 * Waits for ongoing I/O tasks to finish on an LU in preparation for 7507 * the LU's offlining. The LU should already be in an Offlining state 7508 * (otherwise I/O to the LU might never end). There is an additional 7509 * enforcement of this via a deadman timer check. 7510 */ 7511 static void 7512 stmf_wait_ilu_tasks_finish(stmf_i_lu_t *ilu) 7513 { 7514 clock_t start, now, deadline; 7515 7516 start = now = ddi_get_lbolt(); 7517 deadline = start + drv_usectohz(stmf_io_deadman * 1000000llu); 7518 mutex_enter(&ilu->ilu_task_lock); 7519 while (ilu->ilu_ntasks != ilu->ilu_ntasks_free) { 7520 (void) cv_timedwait(&ilu->ilu_offline_pending_cv, 7521 &ilu->ilu_task_lock, deadline); 7522 now = ddi_get_lbolt(); 7523 if (now > deadline) { 7524 if (stmf_io_deadman_enabled) { 7525 cmn_err(CE_PANIC, "stmf_svc: I/O deadman hit " 7526 "on STMF_CMD_LU_OFFLINE after %d seconds", 7527 stmf_io_deadman); 7528 } else { 7529 /* keep on spinning */ 7530 deadline = now + drv_usectohz(stmf_io_deadman * 7531 1000000llu); 7532 } 7533 } 7534 } 7535 mutex_exit(&ilu->ilu_task_lock); 7536 DTRACE_PROBE1(deadman__timeout__wait, clock_t, now - start); 7537 } 7538 7539 void 7540 stmf_svc_queue(int cmd, void *obj, stmf_state_change_info_t *info) 7541 { 7542 stmf_svc_req_t *req; 7543 int s; 7544 7545 ASSERT(!mutex_owned(&stmf_state.stmf_lock)); 7546 s = sizeof (stmf_svc_req_t); 7547 if (info->st_additional_info) { 7548 s += strlen(info->st_additional_info) + 1; 7549 } 7550 req = kmem_zalloc(s, KM_SLEEP); 7551 7552 req->svc_cmd = cmd; 7553 req->svc_obj = obj; 7554 req->svc_info.st_rflags = info->st_rflags; 7555 if (info->st_additional_info) { 7556 req->svc_info.st_additional_info = (char *)(GET_BYTE_OFFSET(req, 7557 sizeof (stmf_svc_req_t))); 7558 (void) strcpy(req->svc_info.st_additional_info, 7559 info->st_additional_info); 7560 } 7561 req->svc_req_alloc_size = s; 7562 req->svc_next = NULL; 7563 7564 mutex_enter(&stmf_state.stmf_lock); 7565 *stmf_state.stmf_svc_tailp = req; 7566 stmf_state.stmf_svc_tailp = &req->svc_next; 7567 if ((stmf_state.stmf_svc_flags & STMF_SVC_ACTIVE) == 0) { 7568 cv_signal(&stmf_state.stmf_cv); 7569 } 7570 mutex_exit(&stmf_state.stmf_lock); 7571 } 7572 7573 static void 7574 stmf_svc_kill_obj_requests(void *obj) 7575 { 7576 stmf_svc_req_t *prev_req = NULL; 7577 stmf_svc_req_t *next_req; 7578 stmf_svc_req_t *req; 7579 7580 ASSERT(mutex_owned(&stmf_state.stmf_lock)); 7581 7582 for (req = stmf_state.stmf_svc_active; req != NULL; req = next_req) { 7583 next_req = req->svc_next; 7584 7585 if (req->svc_obj == obj) { 7586 if (prev_req != NULL) 7587 prev_req->svc_next = next_req; 7588 else 7589 stmf_state.stmf_svc_active = next_req; 7590 7591 if (next_req == NULL) 7592 stmf_state.stmf_svc_tailp = (prev_req != NULL) ? 7593 &prev_req->svc_next : 7594 &stmf_state.stmf_svc_active; 7595 7596 kmem_free(req, req->svc_req_alloc_size); 7597 } else { 7598 prev_req = req; 7599 } 7600 } 7601 } 7602 7603 void 7604 stmf_trace(caddr_t ident, const char *fmt, ...) 7605 { 7606 va_list args; 7607 char tbuf[160]; 7608 int len; 7609 7610 if (!stmf_trace_on) 7611 return; 7612 len = snprintf(tbuf, 158, "%s:%07lu: ", ident ? ident : "", 7613 ddi_get_lbolt()); 7614 va_start(args, fmt); 7615 len += vsnprintf(tbuf + len, 158 - len, fmt, args); 7616 va_end(args); 7617 7618 if (len > 158) { 7619 len = 158; 7620 } 7621 tbuf[len++] = '\n'; 7622 tbuf[len] = 0; 7623 7624 mutex_enter(&trace_buf_lock); 7625 bcopy(tbuf, &stmf_trace_buf[trace_buf_curndx], len+1); 7626 trace_buf_curndx += len; 7627 if (trace_buf_curndx > (trace_buf_size - 320)) 7628 trace_buf_curndx = 0; 7629 mutex_exit(&trace_buf_lock); 7630 } 7631 7632 void 7633 stmf_trace_clear() 7634 { 7635 if (!stmf_trace_on) 7636 return; 7637 mutex_enter(&trace_buf_lock); 7638 trace_buf_curndx = 0; 7639 if (trace_buf_size > 0) 7640 stmf_trace_buf[0] = 0; 7641 mutex_exit(&trace_buf_lock); 7642 } 7643 7644 static void 7645 stmf_abort_task_offline(scsi_task_t *task, int offline_lu, char *info) 7646 { 7647 stmf_state_change_info_t change_info; 7648 void *ctl_private; 7649 uint32_t ctl_cmd; 7650 int msg = 0; 7651 7652 stmf_trace("FROM STMF", "abort_task_offline called for %s: %s", 7653 offline_lu ? "LU" : "LPORT", info ? info : "no additional info"); 7654 change_info.st_additional_info = info; 7655 if (offline_lu) { 7656 change_info.st_rflags = STMF_RFLAG_RESET | 7657 STMF_RFLAG_LU_ABORT; 7658 ctl_private = task->task_lu; 7659 if (((stmf_i_lu_t *) 7660 task->task_lu->lu_stmf_private)->ilu_state == 7661 STMF_STATE_ONLINE) { 7662 msg = 1; 7663 } 7664 ctl_cmd = STMF_CMD_LU_OFFLINE; 7665 } else { 7666 change_info.st_rflags = STMF_RFLAG_RESET | 7667 STMF_RFLAG_LPORT_ABORT; 7668 ctl_private = task->task_lport; 7669 if (((stmf_i_local_port_t *) 7670 task->task_lport->lport_stmf_private)->ilport_state == 7671 STMF_STATE_ONLINE) { 7672 msg = 1; 7673 } 7674 ctl_cmd = STMF_CMD_LPORT_OFFLINE; 7675 } 7676 7677 if (msg) { 7678 stmf_trace(0, "Calling stmf_ctl to offline %s : %s", 7679 offline_lu ? "LU" : "LPORT", info ? info : 7680 "<no additional info>"); 7681 } 7682 (void) stmf_ctl(ctl_cmd, ctl_private, &change_info); 7683 } 7684 7685 static char 7686 stmf_ctoi(char c) 7687 { 7688 if ((c >= '0') && (c <= '9')) 7689 c -= '0'; 7690 else if ((c >= 'A') && (c <= 'F')) 7691 c = c - 'A' + 10; 7692 else if ((c >= 'a') && (c <= 'f')) 7693 c = c - 'a' + 10; 7694 else 7695 c = -1; 7696 return (c); 7697 } 7698 7699 /* Convert from Hex value in ASCII format to the equivalent bytes */ 7700 static boolean_t 7701 stmf_base16_str_to_binary(char *c, int dplen, uint8_t *dp) 7702 { 7703 int ii; 7704 7705 for (ii = 0; ii < dplen; ii++) { 7706 char nibble1, nibble2; 7707 char enc_char = *c++; 7708 nibble1 = stmf_ctoi(enc_char); 7709 7710 enc_char = *c++; 7711 nibble2 = stmf_ctoi(enc_char); 7712 if (nibble1 == -1 || nibble2 == -1) 7713 return (B_FALSE); 7714 7715 dp[ii] = (nibble1 << 4) | nibble2; 7716 } 7717 return (B_TRUE); 7718 } 7719 7720 boolean_t 7721 stmf_scsilib_tptid_validate(scsi_transport_id_t *tptid, uint32_t total_sz, 7722 uint16_t *tptid_sz) 7723 { 7724 uint16_t tpd_len = SCSI_TPTID_SIZE; 7725 7726 if (tptid_sz) 7727 *tptid_sz = 0; 7728 if (total_sz < sizeof (scsi_transport_id_t)) 7729 return (B_FALSE); 7730 7731 switch (tptid->protocol_id) { 7732 7733 case PROTOCOL_FIBRE_CHANNEL: 7734 /* FC Transport ID validation checks. SPC3 rev23, Table 284 */ 7735 if (total_sz < tpd_len || tptid->format_code != 0) 7736 return (B_FALSE); 7737 break; 7738 7739 case PROTOCOL_iSCSI: 7740 { 7741 iscsi_transport_id_t *iscsiid; 7742 uint16_t adn_len, name_len; 7743 7744 /* Check for valid format code, SPC3 rev 23 Table 288 */ 7745 if ((total_sz < tpd_len) || 7746 (tptid->format_code != 0 && tptid->format_code != 1)) 7747 return (B_FALSE); 7748 7749 iscsiid = (iscsi_transport_id_t *)tptid; 7750 adn_len = READ_SCSI16(iscsiid->add_len, uint16_t); 7751 tpd_len = sizeof (iscsi_transport_id_t) + adn_len - 1; 7752 7753 /* 7754 * iSCSI Transport ID validation checks. 7755 * As per SPC3 rev 23 Section 7.5.4.6 and Table 289 & Table 290 7756 */ 7757 if (adn_len < 20 || (adn_len % 4 != 0)) 7758 return (B_FALSE); 7759 7760 name_len = strnlen(iscsiid->iscsi_name, adn_len); 7761 if (name_len == 0 || name_len >= adn_len) 7762 return (B_FALSE); 7763 7764 /* If the format_code is 1 check for ISID seperator */ 7765 if ((tptid->format_code == 1) && (strstr(iscsiid->iscsi_name, 7766 SCSI_TPTID_ISCSI_ISID_SEPERATOR) == NULL)) 7767 return (B_FALSE); 7768 7769 } 7770 break; 7771 7772 case PROTOCOL_SRP: 7773 /* SRP Transport ID validation checks. SPC3 rev23, Table 287 */ 7774 if (total_sz < tpd_len || tptid->format_code != 0) 7775 return (B_FALSE); 7776 break; 7777 7778 case PROTOCOL_PARALLEL_SCSI: 7779 case PROTOCOL_SSA: 7780 case PROTOCOL_IEEE_1394: 7781 case PROTOCOL_SAS: 7782 case PROTOCOL_ADT: 7783 case PROTOCOL_ATAPI: 7784 default: 7785 { 7786 stmf_dflt_scsi_tptid_t *dflttpd; 7787 7788 tpd_len = sizeof (stmf_dflt_scsi_tptid_t); 7789 if (total_sz < tpd_len) 7790 return (B_FALSE); 7791 dflttpd = (stmf_dflt_scsi_tptid_t *)tptid; 7792 tpd_len = tpd_len + SCSI_READ16(&dflttpd->ident_len) - 1; 7793 if (total_sz < tpd_len) 7794 return (B_FALSE); 7795 } 7796 break; 7797 } 7798 if (tptid_sz) 7799 *tptid_sz = tpd_len; 7800 return (B_TRUE); 7801 } 7802 7803 boolean_t 7804 stmf_scsilib_tptid_compare(scsi_transport_id_t *tpd1, 7805 scsi_transport_id_t *tpd2) 7806 { 7807 if ((tpd1->protocol_id != tpd2->protocol_id) || 7808 (tpd1->format_code != tpd2->format_code)) 7809 return (B_FALSE); 7810 7811 switch (tpd1->protocol_id) { 7812 7813 case PROTOCOL_iSCSI: 7814 { 7815 iscsi_transport_id_t *iscsitpd1, *iscsitpd2; 7816 uint16_t len; 7817 7818 iscsitpd1 = (iscsi_transport_id_t *)tpd1; 7819 iscsitpd2 = (iscsi_transport_id_t *)tpd2; 7820 len = SCSI_READ16(&iscsitpd1->add_len); 7821 if ((memcmp(iscsitpd1->add_len, iscsitpd2->add_len, 2) != 0) || 7822 (memcmp(iscsitpd1->iscsi_name, iscsitpd2->iscsi_name, len) 7823 != 0)) 7824 return (B_FALSE); 7825 } 7826 break; 7827 7828 case PROTOCOL_SRP: 7829 { 7830 scsi_srp_transport_id_t *srptpd1, *srptpd2; 7831 7832 srptpd1 = (scsi_srp_transport_id_t *)tpd1; 7833 srptpd2 = (scsi_srp_transport_id_t *)tpd2; 7834 if (memcmp(srptpd1->srp_name, srptpd2->srp_name, 7835 sizeof (srptpd1->srp_name)) != 0) 7836 return (B_FALSE); 7837 } 7838 break; 7839 7840 case PROTOCOL_FIBRE_CHANNEL: 7841 { 7842 scsi_fc_transport_id_t *fctpd1, *fctpd2; 7843 7844 fctpd1 = (scsi_fc_transport_id_t *)tpd1; 7845 fctpd2 = (scsi_fc_transport_id_t *)tpd2; 7846 if (memcmp(fctpd1->port_name, fctpd2->port_name, 7847 sizeof (fctpd1->port_name)) != 0) 7848 return (B_FALSE); 7849 } 7850 break; 7851 7852 case PROTOCOL_PARALLEL_SCSI: 7853 case PROTOCOL_SSA: 7854 case PROTOCOL_IEEE_1394: 7855 case PROTOCOL_SAS: 7856 case PROTOCOL_ADT: 7857 case PROTOCOL_ATAPI: 7858 default: 7859 { 7860 stmf_dflt_scsi_tptid_t *dflt1, *dflt2; 7861 uint16_t len; 7862 7863 dflt1 = (stmf_dflt_scsi_tptid_t *)tpd1; 7864 dflt2 = (stmf_dflt_scsi_tptid_t *)tpd2; 7865 len = SCSI_READ16(&dflt1->ident_len); 7866 if ((memcmp(dflt1->ident_len, dflt2->ident_len, 2) != 0) || 7867 (memcmp(dflt1->ident, dflt2->ident, len) != 0)) 7868 return (B_FALSE); 7869 } 7870 break; 7871 } 7872 return (B_TRUE); 7873 } 7874 7875 /* 7876 * Changes devid_desc to corresponding TransportID format 7877 * Returns :- pointer to stmf_remote_port_t 7878 * Note :- Allocates continous memory for stmf_remote_port_t and TransportID, 7879 * This memory need to be freed when this remote_port is no longer 7880 * used. 7881 */ 7882 stmf_remote_port_t * 7883 stmf_scsilib_devid_to_remote_port(scsi_devid_desc_t *devid) 7884 { 7885 struct scsi_fc_transport_id *fc_tpd; 7886 struct iscsi_transport_id *iscsi_tpd; 7887 struct scsi_srp_transport_id *srp_tpd; 7888 struct stmf_dflt_scsi_tptid *dflt_tpd; 7889 uint16_t ident_len, sz = 0; 7890 stmf_remote_port_t *rpt = NULL; 7891 7892 ident_len = devid->ident_length; 7893 ASSERT(ident_len); 7894 switch (devid->protocol_id) { 7895 case PROTOCOL_FIBRE_CHANNEL: 7896 sz = sizeof (scsi_fc_transport_id_t); 7897 rpt = stmf_remote_port_alloc(sz); 7898 rpt->rport_tptid->format_code = 0; 7899 rpt->rport_tptid->protocol_id = devid->protocol_id; 7900 fc_tpd = (scsi_fc_transport_id_t *)rpt->rport_tptid; 7901 /* 7902 * convert from "wwn.xxxxxxxxxxxxxxxx" to 8-byte binary 7903 * skip first 4 byte for "wwn." 7904 */ 7905 ASSERT(strncmp("wwn.", (char *)devid->ident, 4) == 0); 7906 if ((ident_len < SCSI_TPTID_FC_PORT_NAME_SIZE * 2 + 4) || 7907 !stmf_base16_str_to_binary((char *)devid->ident + 4, 7908 SCSI_TPTID_FC_PORT_NAME_SIZE, fc_tpd->port_name)) 7909 goto devid_to_remote_port_fail; 7910 break; 7911 7912 case PROTOCOL_iSCSI: 7913 sz = ALIGNED_TO_8BYTE_BOUNDARY(sizeof (iscsi_transport_id_t) + 7914 ident_len - 1); 7915 rpt = stmf_remote_port_alloc(sz); 7916 rpt->rport_tptid->format_code = 0; 7917 rpt->rport_tptid->protocol_id = devid->protocol_id; 7918 iscsi_tpd = (iscsi_transport_id_t *)rpt->rport_tptid; 7919 SCSI_WRITE16(iscsi_tpd->add_len, ident_len); 7920 (void) memcpy(iscsi_tpd->iscsi_name, devid->ident, ident_len); 7921 break; 7922 7923 case PROTOCOL_SRP: 7924 sz = sizeof (scsi_srp_transport_id_t); 7925 rpt = stmf_remote_port_alloc(sz); 7926 rpt->rport_tptid->format_code = 0; 7927 rpt->rport_tptid->protocol_id = devid->protocol_id; 7928 srp_tpd = (scsi_srp_transport_id_t *)rpt->rport_tptid; 7929 /* 7930 * convert from "eui.xxxxxxxxxxxxxxx" to 8-byte binary 7931 * skip first 4 byte for "eui." 7932 * Assume 8-byte initiator-extension part of srp_name is NOT 7933 * stored in devid and hence will be set as zero 7934 */ 7935 ASSERT(strncmp("eui.", (char *)devid->ident, 4) == 0); 7936 if ((ident_len < (SCSI_TPTID_SRP_PORT_NAME_LEN - 8) * 2 + 4) || 7937 !stmf_base16_str_to_binary((char *)devid->ident+4, 7938 SCSI_TPTID_SRP_PORT_NAME_LEN, srp_tpd->srp_name)) 7939 goto devid_to_remote_port_fail; 7940 break; 7941 7942 case PROTOCOL_PARALLEL_SCSI: 7943 case PROTOCOL_SSA: 7944 case PROTOCOL_IEEE_1394: 7945 case PROTOCOL_SAS: 7946 case PROTOCOL_ADT: 7947 case PROTOCOL_ATAPI: 7948 default : 7949 ident_len = devid->ident_length; 7950 sz = ALIGNED_TO_8BYTE_BOUNDARY(sizeof (stmf_dflt_scsi_tptid_t) + 7951 ident_len - 1); 7952 rpt = stmf_remote_port_alloc(sz); 7953 rpt->rport_tptid->format_code = 0; 7954 rpt->rport_tptid->protocol_id = devid->protocol_id; 7955 dflt_tpd = (stmf_dflt_scsi_tptid_t *)rpt->rport_tptid; 7956 SCSI_WRITE16(dflt_tpd->ident_len, ident_len); 7957 (void) memcpy(dflt_tpd->ident, devid->ident, ident_len); 7958 break; 7959 } 7960 return (rpt); 7961 7962 devid_to_remote_port_fail: 7963 stmf_remote_port_free(rpt); 7964 return (NULL); 7965 7966 } 7967 7968 stmf_remote_port_t * 7969 stmf_remote_port_alloc(uint16_t tptid_sz) { 7970 stmf_remote_port_t *rpt; 7971 rpt = (stmf_remote_port_t *)kmem_zalloc( 7972 sizeof (stmf_remote_port_t) + tptid_sz, KM_SLEEP); 7973 rpt->rport_tptid_sz = tptid_sz; 7974 rpt->rport_tptid = (scsi_transport_id_t *)(rpt + 1); 7975 return (rpt); 7976 } 7977 7978 void 7979 stmf_remote_port_free(stmf_remote_port_t *rpt) 7980 { 7981 /* 7982 * Note: stmf_scsilib_devid_to_remote_port() function allocates 7983 * remote port structures for all transports in the same way, So 7984 * it is safe to deallocate it in a protocol independent manner. 7985 * If any of the allocation method changes, corresponding changes 7986 * need to be made here too. 7987 */ 7988 kmem_free(rpt, sizeof (stmf_remote_port_t) + rpt->rport_tptid_sz); 7989 }