1 /*
   2  * CDDL HEADER START
   3  *
   4  * The contents of this file are subject to the terms of the
   5  * Common Development and Distribution License (the "License").
   6  * You may not use this file except in compliance with the License.
   7  *
   8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
   9  * or http://www.opensolaris.org/os/licensing.
  10  * See the License for the specific language governing permissions
  11  * and limitations under the License.
  12  *
  13  * When distributing Covered Code, include this CDDL HEADER in each
  14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
  15  * If applicable, add the following below this CDDL HEADER, with the
  16  * fields enclosed by brackets "[]" replaced with your own identifying
  17  * information: Portions Copyright [yyyy] [name of copyright owner]
  18  *
  19  * CDDL HEADER END
  20  */
  21 /*
  22  * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
  23  */
  24 /*
  25  * Copyright 2012, Nexenta Systems, Inc. All rights reserved.
  26  * Copyright (c) 2013 by Delphix. All rights reserved.
  27  * Copyright (c) 2013 by Saso Kiselkov. All rights reserved.
  28  */
  29 
  30 #include <sys/conf.h>
  31 #include <sys/file.h>
  32 #include <sys/ddi.h>
  33 #include <sys/sunddi.h>
  34 #include <sys/modctl.h>
  35 #include <sys/scsi/scsi.h>
  36 #include <sys/scsi/generic/persist.h>
  37 #include <sys/scsi/impl/scsi_reset_notify.h>
  38 #include <sys/disp.h>
  39 #include <sys/byteorder.h>
  40 #include <sys/atomic.h>
  41 #include <sys/ethernet.h>
  42 #include <sys/sdt.h>
  43 #include <sys/nvpair.h>
  44 #include <sys/zone.h>
  45 #include <sys/id_space.h>
  46 
  47 #include <sys/stmf.h>
  48 #include <sys/lpif.h>
  49 #include <sys/portif.h>
  50 #include <sys/stmf_ioctl.h>
  51 #include <sys/pppt_ic_if.h>
  52 
  53 #include "stmf_impl.h"
  54 #include "lun_map.h"
  55 #include "stmf_state.h"
  56 #include "stmf_stats.h"
  57 
  58 /*
  59  * Lock order:
  60  * stmf_state_lock --> ilport_lock/iss_lockp --> ilu_task_lock
  61  */
  62 
  63 static uint64_t stmf_session_counter = 0;
  64 static uint16_t stmf_rtpid_counter = 0;
  65 /* start messages at 1 */
  66 static uint64_t stmf_proxy_msg_id = 1;
  67 #define MSG_ID_TM_BIT   0x8000000000000000
  68 #define ALIGNED_TO_8BYTE_BOUNDARY(i)    (((i) + 7) & ~7)
  69 
  70 /*
  71  * When stmf_io_deadman_enabled is set to B_TRUE, we check that finishing up
  72  * I/O operations on an offlining LU doesn't take longer than stmf_io_deadman
  73  * seconds. If it does, we trigger a panic to inform the user of hung I/O
  74  * blocking us for too long.
  75  */
  76 boolean_t stmf_io_deadman_enabled = B_TRUE;
  77 int stmf_io_deadman = 1000;                     /* seconds */
  78 
  79 struct stmf_svc_clocks;
  80 
  81 static int stmf_attach(dev_info_t *dip, ddi_attach_cmd_t cmd);
  82 static int stmf_detach(dev_info_t *dip, ddi_detach_cmd_t cmd);
  83 static int stmf_getinfo(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg,
  84         void **result);
  85 static int stmf_open(dev_t *devp, int flag, int otype, cred_t *credp);
  86 static int stmf_close(dev_t dev, int flag, int otype, cred_t *credp);
  87 static int stmf_ioctl(dev_t dev, int cmd, intptr_t data, int mode,
  88         cred_t *credp, int *rval);
  89 static int stmf_get_stmf_state(stmf_state_desc_t *std);
  90 static int stmf_set_stmf_state(stmf_state_desc_t *std);
  91 static void stmf_abort_task_offline(scsi_task_t *task, int offline_lu,
  92     char *info);
  93 static int stmf_set_alua_state(stmf_alua_state_desc_t *alua_state);
  94 static void stmf_get_alua_state(stmf_alua_state_desc_t *alua_state);
  95 
  96 static void stmf_task_audit(stmf_i_scsi_task_t *itask,
  97     task_audit_event_t te, uint32_t cmd_or_iof, stmf_data_buf_t *dbuf);
  98 
  99 static boolean_t stmf_base16_str_to_binary(char *c, int dplen, uint8_t *dp);
 100 static char stmf_ctoi(char c);
 101 stmf_xfer_data_t *stmf_prepare_tpgs_data(uint8_t ilu_alua);
 102 void stmf_svc_init();
 103 stmf_status_t stmf_svc_fini();
 104 void stmf_svc(void *arg);
 105 static void stmf_wait_ilu_tasks_finish(stmf_i_lu_t *ilu);
 106 void stmf_svc_queue(int cmd, void *obj, stmf_state_change_info_t *info);
 107 static void stmf_svc_kill_obj_requests(void *obj);
 108 static void stmf_svc_timeout(struct stmf_svc_clocks *);
 109 void stmf_check_freetask();
 110 void stmf_abort_target_reset(scsi_task_t *task);
 111 stmf_status_t stmf_lun_reset_poll(stmf_lu_t *lu, struct scsi_task *task,
 112                                                         int target_reset);
 113 void stmf_target_reset_poll(struct scsi_task *task);
 114 void stmf_handle_lun_reset(scsi_task_t *task);
 115 void stmf_handle_target_reset(scsi_task_t *task);
 116 void stmf_xd_to_dbuf(stmf_data_buf_t *dbuf, int set_rel_off);
 117 int stmf_load_ppd_ioctl(stmf_ppioctl_data_t *ppi, uint64_t *ppi_token,
 118     uint32_t *err_ret);
 119 int stmf_delete_ppd_ioctl(stmf_ppioctl_data_t *ppi);
 120 int stmf_get_ppd_ioctl(stmf_ppioctl_data_t *ppi, stmf_ppioctl_data_t *ppi_out,
 121     uint32_t *err_ret);
 122 void stmf_delete_ppd(stmf_pp_data_t *ppd);
 123 void stmf_delete_all_ppds();
 124 void stmf_trace_clear();
 125 void stmf_worker_init();
 126 stmf_status_t stmf_worker_fini();
 127 void stmf_worker_mgmt();
 128 void stmf_worker_task(void *arg);
 129 static void stmf_task_lu_free(scsi_task_t *task, stmf_i_scsi_session_t *iss);
 130 static stmf_status_t stmf_ic_lu_reg(stmf_ic_reg_dereg_lun_msg_t *msg,
 131     uint32_t type);
 132 static stmf_status_t stmf_ic_lu_dereg(stmf_ic_reg_dereg_lun_msg_t *msg);
 133 static stmf_status_t stmf_ic_rx_scsi_status(stmf_ic_scsi_status_msg_t *msg);
 134 static stmf_status_t stmf_ic_rx_status(stmf_ic_status_msg_t *msg);
 135 static stmf_status_t stmf_ic_rx_scsi_data(stmf_ic_scsi_data_msg_t *msg);
 136 void stmf_task_lu_killall(stmf_lu_t *lu, scsi_task_t *tm_task, stmf_status_t s);
 137 
 138 /* pppt modhandle */
 139 ddi_modhandle_t pppt_mod;
 140 
 141 /* pppt modload imported functions */
 142 stmf_ic_reg_port_msg_alloc_func_t ic_reg_port_msg_alloc;
 143 stmf_ic_dereg_port_msg_alloc_func_t ic_dereg_port_msg_alloc;
 144 stmf_ic_reg_lun_msg_alloc_func_t ic_reg_lun_msg_alloc;
 145 stmf_ic_dereg_lun_msg_alloc_func_t ic_dereg_lun_msg_alloc;
 146 stmf_ic_lun_active_msg_alloc_func_t ic_lun_active_msg_alloc;
 147 stmf_ic_scsi_cmd_msg_alloc_func_t ic_scsi_cmd_msg_alloc;
 148 stmf_ic_scsi_data_xfer_done_msg_alloc_func_t ic_scsi_data_xfer_done_msg_alloc;
 149 stmf_ic_session_create_msg_alloc_func_t ic_session_reg_msg_alloc;
 150 stmf_ic_session_destroy_msg_alloc_func_t ic_session_dereg_msg_alloc;
 151 stmf_ic_tx_msg_func_t ic_tx_msg;
 152 stmf_ic_msg_free_func_t ic_msg_free;
 153 
 154 static void stmf_itl_task_start(stmf_i_scsi_task_t *itask);
 155 static void stmf_itl_lu_new_task(stmf_i_scsi_task_t *itask);
 156 static void stmf_itl_task_done(stmf_i_scsi_task_t *itask);
 157 
 158 static void stmf_lport_xfer_start(stmf_i_scsi_task_t *itask,
 159     stmf_data_buf_t *dbuf);
 160 static void stmf_lport_xfer_done(stmf_i_scsi_task_t *itask,
 161     stmf_data_buf_t *dbuf);
 162 
 163 static void stmf_update_kstat_lu_q(scsi_task_t *, void());
 164 static void stmf_update_kstat_lport_q(scsi_task_t *, void());
 165 static void stmf_update_kstat_lu_io(scsi_task_t *, stmf_data_buf_t *);
 166 static void stmf_update_kstat_lport_io(scsi_task_t *, stmf_data_buf_t *);
 167 
 168 static int stmf_irport_compare(const void *void_irport1,
 169     const void *void_irport2);
 170 static stmf_i_remote_port_t *stmf_irport_create(scsi_devid_desc_t *rport_devid);
 171 static void stmf_irport_destroy(stmf_i_remote_port_t *irport);
 172 static stmf_i_remote_port_t *stmf_irport_register(
 173     scsi_devid_desc_t *rport_devid);
 174 static stmf_i_remote_port_t *stmf_irport_lookup_locked(
 175     scsi_devid_desc_t *rport_devid);
 176 static void stmf_irport_deregister(stmf_i_remote_port_t *irport);
 177 
 178 static void stmf_teardown_itl_kstats(stmf_i_itl_kstat_t *ks);
 179 static void stmf_delete_itl_kstat_by_lport(char *);
 180 static void stmf_delete_itl_kstat_by_guid(char *);
 181 static int stmf_itl_kstat_compare(const void*, const void*);
 182 static stmf_i_itl_kstat_t *stmf_itl_kstat_lookup(char *kstat_nm);
 183 static stmf_i_itl_kstat_t *stmf_itl_kstat_create(stmf_itl_data_t *itl,
 184     char *nm, scsi_devid_desc_t *lport, scsi_devid_desc_t *lun);
 185 
 186 extern struct mod_ops mod_driverops;
 187 
 188 /* =====[ Tunables ]===== */
 189 /* Internal tracing */
 190 volatile int    stmf_trace_on = 1;
 191 volatile int    stmf_trace_buf_size = (1 * 1024 * 1024);
 192 /*
 193  * The reason default task timeout is 75 is because we want the
 194  * host to timeout 1st and mostly host timeout is 60 seconds.
 195  */
 196 volatile int    stmf_default_task_timeout = 75;
 197 /*
 198  * Setting this to one means, you are responsible for config load and keeping
 199  * things in sync with persistent database.
 200  */
 201 volatile int    stmf_allow_modunload = 0;
 202 
 203 volatile int stmf_max_nworkers = 256;
 204 volatile int stmf_min_nworkers = 4;
 205 volatile int stmf_worker_scale_down_delay = 20;
 206 
 207 /* === [ Debugging and fault injection ] === */
 208 #ifdef  DEBUG
 209 volatile int stmf_drop_task_counter = 0;
 210 volatile int stmf_drop_buf_counter = 0;
 211 
 212 #endif
 213 
 214 stmf_state_t            stmf_state;
 215 static stmf_lu_t        *dlun0;
 216 
 217 static uint8_t stmf_first_zero[] =
 218         { 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 0xff };
 219 static uint8_t stmf_first_one[] =
 220         { 0xff, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0 };
 221 
 222 static kmutex_t trace_buf_lock;
 223 static int      trace_buf_size;
 224 static int      trace_buf_curndx;
 225 caddr_t stmf_trace_buf;
 226 
 227 static enum {
 228         STMF_WORKERS_DISABLED = 0,
 229         STMF_WORKERS_ENABLING,
 230         STMF_WORKERS_ENABLED
 231 } stmf_workers_state = STMF_WORKERS_DISABLED;
 232 static int stmf_i_max_nworkers;
 233 static int stmf_i_min_nworkers;
 234 static int stmf_nworkers_cur;           /* # of workers currently running */
 235 static int stmf_nworkers_needed;        /* # of workers need to be running */
 236 static int stmf_worker_sel_counter = 0;
 237 static uint32_t stmf_cur_ntasks = 0;
 238 static clock_t stmf_wm_last = 0;
 239 /*
 240  * This is equal to stmf_nworkers_cur while we are increasing # workers and
 241  * stmf_nworkers_needed while we are decreasing the worker count.
 242  */
 243 static int stmf_nworkers_accepting_cmds;
 244 static stmf_worker_t *stmf_workers = NULL;
 245 static clock_t stmf_worker_mgmt_delay = 2;
 246 static clock_t stmf_worker_scale_down_timer = 0;
 247 static int stmf_worker_scale_down_qd = 0;
 248 
 249 static struct cb_ops stmf_cb_ops = {
 250         stmf_open,                      /* open */
 251         stmf_close,                     /* close */
 252         nodev,                          /* strategy */
 253         nodev,                          /* print */
 254         nodev,                          /* dump */
 255         nodev,                          /* read */
 256         nodev,                          /* write */
 257         stmf_ioctl,                     /* ioctl */
 258         nodev,                          /* devmap */
 259         nodev,                          /* mmap */
 260         nodev,                          /* segmap */
 261         nochpoll,                       /* chpoll */
 262         ddi_prop_op,                    /* cb_prop_op */
 263         0,                              /* streamtab */
 264         D_NEW | D_MP,                   /* cb_flag */
 265         CB_REV,                         /* rev */
 266         nodev,                          /* aread */
 267         nodev                           /* awrite */
 268 };
 269 
 270 static struct dev_ops stmf_ops = {
 271         DEVO_REV,
 272         0,
 273         stmf_getinfo,
 274         nulldev,                /* identify */
 275         nulldev,                /* probe */
 276         stmf_attach,
 277         stmf_detach,
 278         nodev,                  /* reset */
 279         &stmf_cb_ops,
 280         NULL,                   /* bus_ops */
 281         NULL                    /* power */
 282 };
 283 
 284 #define STMF_NAME               "COMSTAR STMF"
 285 #define STMF_MODULE_NAME        "stmf"
 286 
 287 static struct modldrv modldrv = {
 288         &mod_driverops,
 289         STMF_NAME,
 290         &stmf_ops
 291 };
 292 
 293 static struct modlinkage modlinkage = {
 294         MODREV_1,
 295         &modldrv,
 296         NULL
 297 };
 298 
 299 int
 300 _init(void)
 301 {
 302         int ret;
 303 
 304         ret = mod_install(&modlinkage);
 305         if (ret)
 306                 return (ret);
 307         stmf_trace_buf = kmem_zalloc(stmf_trace_buf_size, KM_SLEEP);
 308         trace_buf_size = stmf_trace_buf_size;
 309         trace_buf_curndx = 0;
 310         mutex_init(&trace_buf_lock, NULL, MUTEX_DRIVER, 0);
 311         bzero(&stmf_state, sizeof (stmf_state_t));
 312         /* STMF service is off by default */
 313         stmf_state.stmf_service_running = 0;
 314         /* default lu/lport states are online */
 315         stmf_state.stmf_default_lu_state = STMF_STATE_ONLINE;
 316         stmf_state.stmf_default_lport_state = STMF_STATE_ONLINE;
 317         mutex_init(&stmf_state.stmf_lock, NULL, MUTEX_DRIVER, NULL);
 318         cv_init(&stmf_state.stmf_cv, NULL, CV_DRIVER, NULL);
 319         stmf_session_counter = (uint64_t)ddi_get_lbolt();
 320         avl_create(&stmf_state.stmf_irportlist,
 321             stmf_irport_compare, sizeof (stmf_i_remote_port_t),
 322             offsetof(stmf_i_remote_port_t, irport_ln));
 323         stmf_state.stmf_ilport_inst_space =
 324             id_space_create("lport-instances", 0, MAX_ILPORT);
 325         stmf_state.stmf_irport_inst_space =
 326             id_space_create("rport-instances", 0, MAX_IRPORT);
 327         avl_create(&stmf_state.stmf_itl_kstat_list,
 328             stmf_itl_kstat_compare, sizeof (stmf_i_itl_kstat_t),
 329             offsetof(stmf_i_itl_kstat_t, iitl_kstat_ln));
 330         stmf_view_init();
 331         stmf_svc_init();
 332         stmf_dlun_init();
 333         return (ret);
 334 }
 335 
 336 int
 337 _fini(void)
 338 {
 339         int ret;
 340         stmf_i_remote_port_t    *irport;
 341         stmf_i_itl_kstat_t      *ks_itl;
 342         void                    *avl_dest_cookie = NULL;
 343 
 344         if (stmf_state.stmf_service_running)
 345                 return (EBUSY);
 346         if ((!stmf_allow_modunload) &&
 347             (stmf_state.stmf_config_state != STMF_CONFIG_NONE)) {
 348                 return (EBUSY);
 349         }
 350         if (stmf_state.stmf_nlps || stmf_state.stmf_npps) {
 351                 return (EBUSY);
 352         }
 353         if (stmf_dlun_fini() != STMF_SUCCESS)
 354                 return (EBUSY);
 355         if (stmf_worker_fini() != STMF_SUCCESS) {
 356                 stmf_dlun_init();
 357                 return (EBUSY);
 358         }
 359         if (stmf_svc_fini() != STMF_SUCCESS) {
 360                 stmf_dlun_init();
 361                 stmf_worker_init();
 362                 return (EBUSY);
 363         }
 364 
 365         ret = mod_remove(&modlinkage);
 366         if (ret) {
 367                 stmf_svc_init();
 368                 stmf_dlun_init();
 369                 stmf_worker_init();
 370                 return (ret);
 371         }
 372 
 373         stmf_view_clear_config();
 374 
 375         while ((irport = avl_destroy_nodes(&stmf_state.stmf_irportlist,
 376             &avl_dest_cookie)) != NULL)
 377                 stmf_irport_destroy(irport);
 378         avl_destroy(&stmf_state.stmf_irportlist);
 379         id_space_destroy(stmf_state.stmf_ilport_inst_space);
 380         id_space_destroy(stmf_state.stmf_irport_inst_space);
 381 
 382         avl_dest_cookie = NULL;
 383         while ((ks_itl = avl_destroy_nodes(&stmf_state.stmf_itl_kstat_list,
 384             &avl_dest_cookie)) != NULL) {
 385                 stmf_teardown_itl_kstats(ks_itl);
 386                 kmem_free(ks_itl, sizeof (ks_itl));
 387         }
 388         avl_destroy(&stmf_state.stmf_itl_kstat_list);
 389 
 390         kmem_free(stmf_trace_buf, stmf_trace_buf_size);
 391         mutex_destroy(&trace_buf_lock);
 392         mutex_destroy(&stmf_state.stmf_lock);
 393         cv_destroy(&stmf_state.stmf_cv);
 394         return (ret);
 395 }
 396 
 397 int
 398 _info(struct modinfo *modinfop)
 399 {
 400         return (mod_info(&modlinkage, modinfop));
 401 }
 402 
 403 /* ARGSUSED */
 404 static int
 405 stmf_getinfo(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg, void **result)
 406 {
 407         switch (cmd) {
 408         case DDI_INFO_DEVT2DEVINFO:
 409                 *result = stmf_state.stmf_dip;
 410                 break;
 411         case DDI_INFO_DEVT2INSTANCE:
 412                 *result =
 413                     (void *)(uintptr_t)ddi_get_instance(stmf_state.stmf_dip);
 414                 break;
 415         default:
 416                 return (DDI_FAILURE);
 417         }
 418 
 419         return (DDI_SUCCESS);
 420 }
 421 
 422 static int
 423 stmf_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
 424 {
 425         switch (cmd) {
 426         case DDI_ATTACH:
 427                 stmf_state.stmf_dip = dip;
 428 
 429                 if (ddi_create_minor_node(dip, "admin", S_IFCHR, 0,
 430                     DDI_NT_STMF, 0) != DDI_SUCCESS) {
 431                         break;
 432                 }
 433                 ddi_report_dev(dip);
 434                 return (DDI_SUCCESS);
 435         }
 436 
 437         return (DDI_FAILURE);
 438 }
 439 
 440 static int
 441 stmf_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
 442 {
 443         switch (cmd) {
 444         case DDI_DETACH:
 445                 ddi_remove_minor_node(dip, 0);
 446                 return (DDI_SUCCESS);
 447         }
 448 
 449         return (DDI_FAILURE);
 450 }
 451 
 452 /* ARGSUSED */
 453 static int
 454 stmf_open(dev_t *devp, int flag, int otype, cred_t *credp)
 455 {
 456         mutex_enter(&stmf_state.stmf_lock);
 457         if (stmf_state.stmf_exclusive_open) {
 458                 mutex_exit(&stmf_state.stmf_lock);
 459                 return (EBUSY);
 460         }
 461         if (flag & FEXCL) {
 462                 if (stmf_state.stmf_opened) {
 463                         mutex_exit(&stmf_state.stmf_lock);
 464                         return (EBUSY);
 465                 }
 466                 stmf_state.stmf_exclusive_open = 1;
 467         }
 468         stmf_state.stmf_opened = 1;
 469         mutex_exit(&stmf_state.stmf_lock);
 470         return (0);
 471 }
 472 
 473 /* ARGSUSED */
 474 static int
 475 stmf_close(dev_t dev, int flag, int otype, cred_t *credp)
 476 {
 477         mutex_enter(&stmf_state.stmf_lock);
 478         stmf_state.stmf_opened = 0;
 479         if (stmf_state.stmf_exclusive_open &&
 480             (stmf_state.stmf_config_state != STMF_CONFIG_INIT_DONE)) {
 481                 stmf_state.stmf_config_state = STMF_CONFIG_NONE;
 482                 stmf_delete_all_ppds();
 483                 stmf_view_clear_config();
 484                 stmf_view_init();
 485         }
 486         stmf_state.stmf_exclusive_open = 0;
 487         mutex_exit(&stmf_state.stmf_lock);
 488         return (0);
 489 }
 490 
 491 int
 492 stmf_copyin_iocdata(intptr_t data, int mode, stmf_iocdata_t **iocd,
 493                                                 void **ibuf, void **obuf)
 494 {
 495         int ret;
 496 
 497         *ibuf = NULL;
 498         *obuf = NULL;
 499         *iocd = kmem_zalloc(sizeof (stmf_iocdata_t), KM_SLEEP);
 500 
 501         ret = ddi_copyin((void *)data, *iocd, sizeof (stmf_iocdata_t), mode);
 502         if (ret)
 503                 return (EFAULT);
 504         if ((*iocd)->stmf_version != STMF_VERSION_1) {
 505                 ret = EINVAL;
 506                 goto copyin_iocdata_done;
 507         }
 508         if ((*iocd)->stmf_ibuf_size) {
 509                 *ibuf = kmem_zalloc((*iocd)->stmf_ibuf_size, KM_SLEEP);
 510                 ret = ddi_copyin((void *)((unsigned long)(*iocd)->stmf_ibuf),
 511                     *ibuf, (*iocd)->stmf_ibuf_size, mode);
 512         }
 513         if ((*iocd)->stmf_obuf_size)
 514                 *obuf = kmem_zalloc((*iocd)->stmf_obuf_size, KM_SLEEP);
 515 
 516         if (ret == 0)
 517                 return (0);
 518         ret = EFAULT;
 519 copyin_iocdata_done:;
 520         if (*obuf) {
 521                 kmem_free(*obuf, (*iocd)->stmf_obuf_size);
 522                 *obuf = NULL;
 523         }
 524         if (*ibuf) {
 525                 kmem_free(*ibuf, (*iocd)->stmf_ibuf_size);
 526                 *ibuf = NULL;
 527         }
 528         kmem_free(*iocd, sizeof (stmf_iocdata_t));
 529         return (ret);
 530 }
 531 
 532 int
 533 stmf_copyout_iocdata(intptr_t data, int mode, stmf_iocdata_t *iocd, void *obuf)
 534 {
 535         int ret;
 536 
 537         if (iocd->stmf_obuf_size) {
 538                 ret = ddi_copyout(obuf, (void *)(unsigned long)iocd->stmf_obuf,
 539                     iocd->stmf_obuf_size, mode);
 540                 if (ret)
 541                         return (EFAULT);
 542         }
 543         ret = ddi_copyout(iocd, (void *)data, sizeof (stmf_iocdata_t), mode);
 544         if (ret)
 545                 return (EFAULT);
 546         return (0);
 547 }
 548 
 549 /* ARGSUSED */
 550 static int
 551 stmf_ioctl(dev_t dev, int cmd, intptr_t data, int mode,
 552         cred_t *credp, int *rval)
 553 {
 554         stmf_iocdata_t *iocd;
 555         void *ibuf = NULL, *obuf = NULL;
 556         slist_lu_t *luid_list;
 557         slist_target_port_t *lportid_list;
 558         stmf_i_lu_t *ilu;
 559         stmf_i_local_port_t *ilport;
 560         stmf_i_scsi_session_t *iss;
 561         slist_scsi_session_t *iss_list;
 562         sioc_lu_props_t *lup;
 563         sioc_target_port_props_t *lportp;
 564         stmf_ppioctl_data_t *ppi, *ppi_out = NULL;
 565         uint64_t *ppi_token = NULL;
 566         uint8_t *p_id, *id;
 567         stmf_state_desc_t *std;
 568         stmf_status_t ctl_ret;
 569         stmf_state_change_info_t ssi;
 570         int ret = 0;
 571         uint32_t n;
 572         int i;
 573         stmf_group_op_data_t *grp_entry;
 574         stmf_group_name_t *grpname;
 575         stmf_view_op_entry_t *ve;
 576         stmf_id_type_t idtype;
 577         stmf_id_data_t *id_entry;
 578         stmf_id_list_t  *id_list;
 579         stmf_view_entry_t *view_entry;
 580         stmf_set_props_t *stmf_set_props;
 581         uint32_t        veid;
 582         if ((cmd & 0xff000000) != STMF_IOCTL) {
 583                 return (ENOTTY);
 584         }
 585 
 586         if (drv_priv(credp) != 0) {
 587                 return (EPERM);
 588         }
 589 
 590         ret = stmf_copyin_iocdata(data, mode, &iocd, &ibuf, &obuf);
 591         if (ret)
 592                 return (ret);
 593         iocd->stmf_error = 0;
 594 
 595         switch (cmd) {
 596         case STMF_IOCTL_LU_LIST:
 597                 /* retrieves both registered/unregistered */
 598                 mutex_enter(&stmf_state.stmf_lock);
 599                 id_list = &stmf_state.stmf_luid_list;
 600                 n = min(id_list->id_count,
 601                     (iocd->stmf_obuf_size)/sizeof (slist_lu_t));
 602                 iocd->stmf_obuf_max_nentries = id_list->id_count;
 603                 luid_list = (slist_lu_t *)obuf;
 604                 id_entry = id_list->idl_head;
 605                 for (i = 0; i < n; i++) {
 606                         bcopy(id_entry->id_data, luid_list[i].lu_guid, 16);
 607                         id_entry = id_entry->id_next;
 608                 }
 609 
 610                 n = iocd->stmf_obuf_size/sizeof (slist_lu_t);
 611                 for (ilu = stmf_state.stmf_ilulist; ilu; ilu = ilu->ilu_next) {
 612                         id = (uint8_t *)ilu->ilu_lu->lu_id;
 613                         if (stmf_lookup_id(id_list, 16, id + 4) == NULL) {
 614                                 iocd->stmf_obuf_max_nentries++;
 615                                 if (i < n) {
 616                                         bcopy(id + 4, luid_list[i].lu_guid,
 617                                             sizeof (slist_lu_t));
 618                                         i++;
 619                                 }
 620                         }
 621                 }
 622                 iocd->stmf_obuf_nentries = i;
 623                 mutex_exit(&stmf_state.stmf_lock);
 624                 break;
 625 
 626         case STMF_IOCTL_REG_LU_LIST:
 627                 mutex_enter(&stmf_state.stmf_lock);
 628                 iocd->stmf_obuf_max_nentries = stmf_state.stmf_nlus;
 629                 n = min(stmf_state.stmf_nlus,
 630                     (iocd->stmf_obuf_size)/sizeof (slist_lu_t));
 631                 iocd->stmf_obuf_nentries = n;
 632                 ilu = stmf_state.stmf_ilulist;
 633                 luid_list = (slist_lu_t *)obuf;
 634                 for (i = 0; i < n; i++) {
 635                         uint8_t *id;
 636                         id = (uint8_t *)ilu->ilu_lu->lu_id;
 637                         bcopy(id + 4, luid_list[i].lu_guid, 16);
 638                         ilu = ilu->ilu_next;
 639                 }
 640                 mutex_exit(&stmf_state.stmf_lock);
 641                 break;
 642 
 643         case STMF_IOCTL_VE_LU_LIST:
 644                 mutex_enter(&stmf_state.stmf_lock);
 645                 id_list = &stmf_state.stmf_luid_list;
 646                 n = min(id_list->id_count,
 647                     (iocd->stmf_obuf_size)/sizeof (slist_lu_t));
 648                 iocd->stmf_obuf_max_nentries = id_list->id_count;
 649                 iocd->stmf_obuf_nentries = n;
 650                 luid_list = (slist_lu_t *)obuf;
 651                 id_entry = id_list->idl_head;
 652                 for (i = 0; i < n; i++) {
 653                         bcopy(id_entry->id_data, luid_list[i].lu_guid, 16);
 654                         id_entry = id_entry->id_next;
 655                 }
 656                 mutex_exit(&stmf_state.stmf_lock);
 657                 break;
 658 
 659         case STMF_IOCTL_TARGET_PORT_LIST:
 660                 mutex_enter(&stmf_state.stmf_lock);
 661                 iocd->stmf_obuf_max_nentries = stmf_state.stmf_nlports;
 662                 n = min(stmf_state.stmf_nlports,
 663                     (iocd->stmf_obuf_size)/sizeof (slist_target_port_t));
 664                 iocd->stmf_obuf_nentries = n;
 665                 ilport = stmf_state.stmf_ilportlist;
 666                 lportid_list = (slist_target_port_t *)obuf;
 667                 for (i = 0; i < n; i++) {
 668                         uint8_t *id;
 669                         id = (uint8_t *)ilport->ilport_lport->lport_id;
 670                         bcopy(id, lportid_list[i].target, id[3] + 4);
 671                         ilport = ilport->ilport_next;
 672                 }
 673                 mutex_exit(&stmf_state.stmf_lock);
 674                 break;
 675 
 676         case STMF_IOCTL_SESSION_LIST:
 677                 p_id = (uint8_t *)ibuf;
 678                 if ((p_id == NULL) || (iocd->stmf_ibuf_size < 4) ||
 679                     (iocd->stmf_ibuf_size < (p_id[3] + 4))) {
 680                         ret = EINVAL;
 681                         break;
 682                 }
 683                 mutex_enter(&stmf_state.stmf_lock);
 684                 for (ilport = stmf_state.stmf_ilportlist; ilport; ilport =
 685                     ilport->ilport_next) {
 686                         uint8_t *id;
 687                         id = (uint8_t *)ilport->ilport_lport->lport_id;
 688                         if ((p_id[3] == id[3]) &&
 689                             (bcmp(p_id + 4, id + 4, id[3]) == 0)) {
 690                                 break;
 691                         }
 692                 }
 693                 if (ilport == NULL) {
 694                         mutex_exit(&stmf_state.stmf_lock);
 695                         ret = ENOENT;
 696                         break;
 697                 }
 698                 iocd->stmf_obuf_max_nentries = ilport->ilport_nsessions;
 699                 n = min(ilport->ilport_nsessions,
 700                     (iocd->stmf_obuf_size)/sizeof (slist_scsi_session_t));
 701                 iocd->stmf_obuf_nentries = n;
 702                 iss = ilport->ilport_ss_list;
 703                 iss_list = (slist_scsi_session_t *)obuf;
 704                 for (i = 0; i < n; i++) {
 705                         uint8_t *id;
 706                         id = (uint8_t *)iss->iss_ss->ss_rport_id;
 707                         bcopy(id, iss_list[i].initiator, id[3] + 4);
 708                         iss_list[i].creation_time = (uint32_t)
 709                             iss->iss_creation_time;
 710                         if (iss->iss_ss->ss_rport_alias) {
 711                                 (void) strncpy(iss_list[i].alias,
 712                                     iss->iss_ss->ss_rport_alias, 255);
 713                                 iss_list[i].alias[255] = 0;
 714                         } else {
 715                                 iss_list[i].alias[0] = 0;
 716                         }
 717                         iss = iss->iss_next;
 718                 }
 719                 mutex_exit(&stmf_state.stmf_lock);
 720                 break;
 721 
 722         case STMF_IOCTL_GET_LU_PROPERTIES:
 723                 p_id = (uint8_t *)ibuf;
 724                 if ((iocd->stmf_ibuf_size < 16) ||
 725                     (iocd->stmf_obuf_size < sizeof (sioc_lu_props_t)) ||
 726                     (p_id[0] == 0)) {
 727                         ret = EINVAL;
 728                         break;
 729                 }
 730                 mutex_enter(&stmf_state.stmf_lock);
 731                 for (ilu = stmf_state.stmf_ilulist; ilu; ilu = ilu->ilu_next) {
 732                         if (bcmp(p_id, ilu->ilu_lu->lu_id->ident, 16) == 0)
 733                                 break;
 734                 }
 735                 if (ilu == NULL) {
 736                         mutex_exit(&stmf_state.stmf_lock);
 737                         ret = ENOENT;
 738                         break;
 739                 }
 740                 lup = (sioc_lu_props_t *)obuf;
 741                 bcopy(ilu->ilu_lu->lu_id->ident, lup->lu_guid, 16);
 742                 lup->lu_state = ilu->ilu_state & 0x0f;
 743                 lup->lu_present = 1; /* XXX */
 744                 (void) strncpy(lup->lu_provider_name,
 745                     ilu->ilu_lu->lu_lp->lp_name, 255);
 746                 lup->lu_provider_name[254] = 0;
 747                 if (ilu->ilu_lu->lu_alias) {
 748                         (void) strncpy(lup->lu_alias,
 749                             ilu->ilu_lu->lu_alias, 255);
 750                         lup->lu_alias[255] = 0;
 751                 } else {
 752                         lup->lu_alias[0] = 0;
 753                 }
 754                 mutex_exit(&stmf_state.stmf_lock);
 755                 break;
 756 
 757         case STMF_IOCTL_GET_TARGET_PORT_PROPERTIES:
 758                 p_id = (uint8_t *)ibuf;
 759                 if ((p_id == NULL) ||
 760                     (iocd->stmf_ibuf_size < (p_id[3] + 4)) ||
 761                     (iocd->stmf_obuf_size <
 762                     sizeof (sioc_target_port_props_t))) {
 763                         ret = EINVAL;
 764                         break;
 765                 }
 766                 mutex_enter(&stmf_state.stmf_lock);
 767                 for (ilport = stmf_state.stmf_ilportlist; ilport;
 768                     ilport = ilport->ilport_next) {
 769                         uint8_t *id;
 770                         id = (uint8_t *)ilport->ilport_lport->lport_id;
 771                         if ((p_id[3] == id[3]) &&
 772                             (bcmp(p_id+4, id+4, id[3]) == 0))
 773                                 break;
 774                 }
 775                 if (ilport == NULL) {
 776                         mutex_exit(&stmf_state.stmf_lock);
 777                         ret = ENOENT;
 778                         break;
 779                 }
 780                 lportp = (sioc_target_port_props_t *)obuf;
 781                 bcopy(ilport->ilport_lport->lport_id, lportp->tgt_id,
 782                     ilport->ilport_lport->lport_id->ident_length + 4);
 783                 lportp->tgt_state = ilport->ilport_state & 0x0f;
 784                 lportp->tgt_present = 1; /* XXX */
 785                 (void) strncpy(lportp->tgt_provider_name,
 786                     ilport->ilport_lport->lport_pp->pp_name, 255);
 787                 lportp->tgt_provider_name[254] = 0;
 788                 if (ilport->ilport_lport->lport_alias) {
 789                         (void) strncpy(lportp->tgt_alias,
 790                             ilport->ilport_lport->lport_alias, 255);
 791                         lportp->tgt_alias[255] = 0;
 792                 } else {
 793                         lportp->tgt_alias[0] = 0;
 794                 }
 795                 mutex_exit(&stmf_state.stmf_lock);
 796                 break;
 797 
 798         case STMF_IOCTL_SET_STMF_STATE:
 799                 if ((ibuf == NULL) ||
 800                     (iocd->stmf_ibuf_size < sizeof (stmf_state_desc_t))) {
 801                         ret = EINVAL;
 802                         break;
 803                 }
 804                 ret = stmf_set_stmf_state((stmf_state_desc_t *)ibuf);
 805                 break;
 806 
 807         case STMF_IOCTL_GET_STMF_STATE:
 808                 if ((obuf == NULL) ||
 809                     (iocd->stmf_obuf_size < sizeof (stmf_state_desc_t))) {
 810                         ret = EINVAL;
 811                         break;
 812                 }
 813                 ret = stmf_get_stmf_state((stmf_state_desc_t *)obuf);
 814                 break;
 815 
 816         case STMF_IOCTL_SET_ALUA_STATE:
 817                 if ((ibuf == NULL) ||
 818                     (iocd->stmf_ibuf_size < sizeof (stmf_alua_state_desc_t))) {
 819                         ret = EINVAL;
 820                         break;
 821                 }
 822                 ret = stmf_set_alua_state((stmf_alua_state_desc_t *)ibuf);
 823                 break;
 824 
 825         case STMF_IOCTL_GET_ALUA_STATE:
 826                 if ((obuf == NULL) ||
 827                     (iocd->stmf_obuf_size < sizeof (stmf_alua_state_desc_t))) {
 828                         ret = EINVAL;
 829                         break;
 830                 }
 831                 stmf_get_alua_state((stmf_alua_state_desc_t *)obuf);
 832                 break;
 833 
 834         case STMF_IOCTL_SET_LU_STATE:
 835                 ssi.st_rflags = STMF_RFLAG_USER_REQUEST;
 836                 ssi.st_additional_info = NULL;
 837                 std = (stmf_state_desc_t *)ibuf;
 838                 if ((ibuf == NULL) ||
 839                     (iocd->stmf_ibuf_size < sizeof (stmf_state_desc_t))) {
 840                         ret = EINVAL;
 841                         break;
 842                 }
 843                 p_id = std->ident;
 844                 mutex_enter(&stmf_state.stmf_lock);
 845                 if (stmf_state.stmf_inventory_locked) {
 846                         mutex_exit(&stmf_state.stmf_lock);
 847                         ret = EBUSY;
 848                         break;
 849                 }
 850                 for (ilu = stmf_state.stmf_ilulist; ilu; ilu = ilu->ilu_next) {
 851                         if (bcmp(p_id, ilu->ilu_lu->lu_id->ident, 16) == 0)
 852                                 break;
 853                 }
 854                 if (ilu == NULL) {
 855                         mutex_exit(&stmf_state.stmf_lock);
 856                         ret = ENOENT;
 857                         break;
 858                 }
 859                 stmf_state.stmf_inventory_locked = 1;
 860                 mutex_exit(&stmf_state.stmf_lock);
 861                 cmd = (std->state == STMF_STATE_ONLINE) ? STMF_CMD_LU_ONLINE :
 862                     STMF_CMD_LU_OFFLINE;
 863                 ctl_ret = stmf_ctl(cmd, (void *)ilu->ilu_lu, &ssi);
 864                 if (ctl_ret == STMF_ALREADY)
 865                         ret = 0;
 866                 else if (ctl_ret == STMF_BUSY)
 867                         ret = EBUSY;
 868                 else if (ctl_ret != STMF_SUCCESS)
 869                         ret = EIO;
 870                 mutex_enter(&stmf_state.stmf_lock);
 871                 stmf_state.stmf_inventory_locked = 0;
 872                 mutex_exit(&stmf_state.stmf_lock);
 873                 break;
 874 
 875         case STMF_IOCTL_SET_STMF_PROPS:
 876                 if ((ibuf == NULL) ||
 877                     (iocd->stmf_ibuf_size < sizeof (stmf_set_props_t))) {
 878                         ret = EINVAL;
 879                         break;
 880                 }
 881                 stmf_set_props = (stmf_set_props_t *)ibuf;
 882                 mutex_enter(&stmf_state.stmf_lock);
 883                 if ((stmf_set_props->default_lu_state_value ==
 884                     STMF_STATE_OFFLINE) ||
 885                     (stmf_set_props->default_lu_state_value ==
 886                     STMF_STATE_ONLINE)) {
 887                         stmf_state.stmf_default_lu_state =
 888                             stmf_set_props->default_lu_state_value;
 889                 }
 890                 if ((stmf_set_props->default_target_state_value ==
 891                     STMF_STATE_OFFLINE) ||
 892                     (stmf_set_props->default_target_state_value ==
 893                     STMF_STATE_ONLINE)) {
 894                         stmf_state.stmf_default_lport_state =
 895                             stmf_set_props->default_target_state_value;
 896                 }
 897 
 898                 mutex_exit(&stmf_state.stmf_lock);
 899                 break;
 900 
 901         case STMF_IOCTL_SET_TARGET_PORT_STATE:
 902                 ssi.st_rflags = STMF_RFLAG_USER_REQUEST;
 903                 ssi.st_additional_info = NULL;
 904                 std = (stmf_state_desc_t *)ibuf;
 905                 if ((ibuf == NULL) ||
 906                     (iocd->stmf_ibuf_size < sizeof (stmf_state_desc_t))) {
 907                         ret = EINVAL;
 908                         break;
 909                 }
 910                 p_id = std->ident;
 911                 mutex_enter(&stmf_state.stmf_lock);
 912                 if (stmf_state.stmf_inventory_locked) {
 913                         mutex_exit(&stmf_state.stmf_lock);
 914                         ret = EBUSY;
 915                         break;
 916                 }
 917                 for (ilport = stmf_state.stmf_ilportlist; ilport;
 918                     ilport = ilport->ilport_next) {
 919                         uint8_t *id;
 920                         id = (uint8_t *)ilport->ilport_lport->lport_id;
 921                         if ((id[3] == p_id[3]) &&
 922                             (bcmp(id+4, p_id+4, id[3]) == 0)) {
 923                                 break;
 924                         }
 925                 }
 926                 if (ilport == NULL) {
 927                         mutex_exit(&stmf_state.stmf_lock);
 928                         ret = ENOENT;
 929                         break;
 930                 }
 931                 stmf_state.stmf_inventory_locked = 1;
 932                 mutex_exit(&stmf_state.stmf_lock);
 933                 cmd = (std->state == STMF_STATE_ONLINE) ?
 934                     STMF_CMD_LPORT_ONLINE : STMF_CMD_LPORT_OFFLINE;
 935                 ctl_ret = stmf_ctl(cmd, (void *)ilport->ilport_lport, &ssi);
 936                 if (ctl_ret == STMF_ALREADY)
 937                         ret = 0;
 938                 else if (ctl_ret == STMF_BUSY)
 939                         ret = EBUSY;
 940                 else if (ctl_ret != STMF_SUCCESS)
 941                         ret = EIO;
 942                 mutex_enter(&stmf_state.stmf_lock);
 943                 stmf_state.stmf_inventory_locked = 0;
 944                 mutex_exit(&stmf_state.stmf_lock);
 945                 break;
 946 
 947         case STMF_IOCTL_ADD_HG_ENTRY:
 948                 idtype = STMF_ID_TYPE_HOST;
 949                 /* FALLTHROUGH */
 950         case STMF_IOCTL_ADD_TG_ENTRY:
 951                 if (stmf_state.stmf_config_state == STMF_CONFIG_NONE) {
 952                         ret = EACCES;
 953                         iocd->stmf_error = STMF_IOCERR_UPDATE_NEED_CFG_INIT;
 954                         break;
 955                 }
 956                 if (cmd == STMF_IOCTL_ADD_TG_ENTRY) {
 957                         idtype = STMF_ID_TYPE_TARGET;
 958                 }
 959                 grp_entry = (stmf_group_op_data_t *)ibuf;
 960                 if ((ibuf == NULL) ||
 961                     (iocd->stmf_ibuf_size < sizeof (stmf_group_op_data_t))) {
 962                         ret = EINVAL;
 963                         break;
 964                 }
 965                 if (grp_entry->group.name[0] == '*') {
 966                         ret = EINVAL;
 967                         break; /* not allowed */
 968                 }
 969                 mutex_enter(&stmf_state.stmf_lock);
 970                 ret = stmf_add_group_member(grp_entry->group.name,
 971                     grp_entry->group.name_size,
 972                     grp_entry->ident + 4,
 973                     grp_entry->ident[3],
 974                     idtype,
 975                     &iocd->stmf_error);
 976                 mutex_exit(&stmf_state.stmf_lock);
 977                 break;
 978         case STMF_IOCTL_REMOVE_HG_ENTRY:
 979                 idtype = STMF_ID_TYPE_HOST;
 980                 /* FALLTHROUGH */
 981         case STMF_IOCTL_REMOVE_TG_ENTRY:
 982                 if (stmf_state.stmf_config_state == STMF_CONFIG_NONE) {
 983                         ret = EACCES;
 984                         iocd->stmf_error = STMF_IOCERR_UPDATE_NEED_CFG_INIT;
 985                         break;
 986                 }
 987                 if (cmd == STMF_IOCTL_REMOVE_TG_ENTRY) {
 988                         idtype = STMF_ID_TYPE_TARGET;
 989                 }
 990                 grp_entry = (stmf_group_op_data_t *)ibuf;
 991                 if ((ibuf == NULL) ||
 992                     (iocd->stmf_ibuf_size < sizeof (stmf_group_op_data_t))) {
 993                         ret = EINVAL;
 994                         break;
 995                 }
 996                 if (grp_entry->group.name[0] == '*') {
 997                         ret = EINVAL;
 998                         break; /* not allowed */
 999                 }
1000                 mutex_enter(&stmf_state.stmf_lock);
1001                 ret = stmf_remove_group_member(grp_entry->group.name,
1002                     grp_entry->group.name_size,
1003                     grp_entry->ident + 4,
1004                     grp_entry->ident[3],
1005                     idtype,
1006                     &iocd->stmf_error);
1007                 mutex_exit(&stmf_state.stmf_lock);
1008                 break;
1009         case STMF_IOCTL_CREATE_HOST_GROUP:
1010                 idtype = STMF_ID_TYPE_HOST_GROUP;
1011                 /* FALLTHROUGH */
1012         case STMF_IOCTL_CREATE_TARGET_GROUP:
1013                 if (stmf_state.stmf_config_state == STMF_CONFIG_NONE) {
1014                         ret = EACCES;
1015                         iocd->stmf_error = STMF_IOCERR_UPDATE_NEED_CFG_INIT;
1016                         break;
1017                 }
1018                 grpname = (stmf_group_name_t *)ibuf;
1019 
1020                 if (cmd == STMF_IOCTL_CREATE_TARGET_GROUP)
1021                         idtype = STMF_ID_TYPE_TARGET_GROUP;
1022                 if ((ibuf == NULL) ||
1023                     (iocd->stmf_ibuf_size < sizeof (stmf_group_name_t))) {
1024                         ret = EINVAL;
1025                         break;
1026                 }
1027                 if (grpname->name[0] == '*') {
1028                         ret = EINVAL;
1029                         break; /* not allowed */
1030                 }
1031                 mutex_enter(&stmf_state.stmf_lock);
1032                 ret = stmf_add_group(grpname->name,
1033                     grpname->name_size, idtype, &iocd->stmf_error);
1034                 mutex_exit(&stmf_state.stmf_lock);
1035                 break;
1036         case STMF_IOCTL_REMOVE_HOST_GROUP:
1037                 idtype = STMF_ID_TYPE_HOST_GROUP;
1038                 /* FALLTHROUGH */
1039         case STMF_IOCTL_REMOVE_TARGET_GROUP:
1040                 if (stmf_state.stmf_config_state == STMF_CONFIG_NONE) {
1041                         ret = EACCES;
1042                         iocd->stmf_error = STMF_IOCERR_UPDATE_NEED_CFG_INIT;
1043                         break;
1044                 }
1045                 grpname = (stmf_group_name_t *)ibuf;
1046                 if (cmd == STMF_IOCTL_REMOVE_TARGET_GROUP)
1047                         idtype = STMF_ID_TYPE_TARGET_GROUP;
1048                 if ((ibuf == NULL) ||
1049                     (iocd->stmf_ibuf_size < sizeof (stmf_group_name_t))) {
1050                         ret = EINVAL;
1051                         break;
1052                 }
1053                 if (grpname->name[0] == '*') {
1054                         ret = EINVAL;
1055                         break; /* not allowed */
1056                 }
1057                 mutex_enter(&stmf_state.stmf_lock);
1058                 ret = stmf_remove_group(grpname->name,
1059                     grpname->name_size, idtype, &iocd->stmf_error);
1060                 mutex_exit(&stmf_state.stmf_lock);
1061                 break;
1062         case STMF_IOCTL_VALIDATE_VIEW:
1063         case STMF_IOCTL_ADD_VIEW_ENTRY:
1064                 if (stmf_state.stmf_config_state == STMF_CONFIG_NONE) {
1065                         ret = EACCES;
1066                         iocd->stmf_error = STMF_IOCERR_UPDATE_NEED_CFG_INIT;
1067                         break;
1068                 }
1069                 ve = (stmf_view_op_entry_t *)ibuf;
1070                 if ((ibuf == NULL) ||
1071                     (iocd->stmf_ibuf_size < sizeof (stmf_view_op_entry_t))) {
1072                         ret = EINVAL;
1073                         break;
1074                 }
1075                 if (!ve->ve_lu_number_valid)
1076                         ve->ve_lu_nbr[2] = 0xFF;
1077                 if (ve->ve_all_hosts) {
1078                         ve->ve_host_group.name[0] = '*';
1079                         ve->ve_host_group.name_size = 1;
1080                 }
1081                 if (ve->ve_all_targets) {
1082                         ve->ve_target_group.name[0] = '*';
1083                         ve->ve_target_group.name_size = 1;
1084                 }
1085                 if (ve->ve_ndx_valid)
1086                         veid = ve->ve_ndx;
1087                 else
1088                         veid = 0xffffffff;
1089                 mutex_enter(&stmf_state.stmf_lock);
1090                 if (cmd == STMF_IOCTL_ADD_VIEW_ENTRY) {
1091                         ret = stmf_add_ve(ve->ve_host_group.name,
1092                             ve->ve_host_group.name_size,
1093                             ve->ve_target_group.name,
1094                             ve->ve_target_group.name_size,
1095                             ve->ve_guid,
1096                             &veid,
1097                             ve->ve_lu_nbr,
1098                             &iocd->stmf_error);
1099                 } else {  /* STMF_IOCTL_VALIDATE_VIEW */
1100                         ret = stmf_validate_lun_ve(ve->ve_host_group.name,
1101                             ve->ve_host_group.name_size,
1102                             ve->ve_target_group.name,
1103                             ve->ve_target_group.name_size,
1104                             ve->ve_lu_nbr,
1105                             &iocd->stmf_error);
1106                 }
1107                 mutex_exit(&stmf_state.stmf_lock);
1108                 if (ret == 0 &&
1109                     (!ve->ve_ndx_valid || !ve->ve_lu_number_valid) &&
1110                     iocd->stmf_obuf_size >= sizeof (stmf_view_op_entry_t)) {
1111                         stmf_view_op_entry_t *ve_ret =
1112                             (stmf_view_op_entry_t *)obuf;
1113                         iocd->stmf_obuf_nentries = 1;
1114                         iocd->stmf_obuf_max_nentries = 1;
1115                         if (!ve->ve_ndx_valid) {
1116                                 ve_ret->ve_ndx = veid;
1117                                 ve_ret->ve_ndx_valid = 1;
1118                         }
1119                         if (!ve->ve_lu_number_valid) {
1120                                 ve_ret->ve_lu_number_valid = 1;
1121                                 bcopy(ve->ve_lu_nbr, ve_ret->ve_lu_nbr, 8);
1122                         }
1123                 }
1124                 break;
1125         case STMF_IOCTL_REMOVE_VIEW_ENTRY:
1126                 if (stmf_state.stmf_config_state == STMF_CONFIG_NONE) {
1127                         ret = EACCES;
1128                         iocd->stmf_error = STMF_IOCERR_UPDATE_NEED_CFG_INIT;
1129                         break;
1130                 }
1131                 ve = (stmf_view_op_entry_t *)ibuf;
1132                 if ((ibuf == NULL) ||
1133                     (iocd->stmf_ibuf_size < sizeof (stmf_view_op_entry_t))) {
1134                         ret = EINVAL;
1135                         break;
1136                 }
1137                 if (!ve->ve_ndx_valid) {
1138                         ret = EINVAL;
1139                         break;
1140                 }
1141                 mutex_enter(&stmf_state.stmf_lock);
1142                 ret = stmf_remove_ve_by_id(ve->ve_guid, ve->ve_ndx,
1143                     &iocd->stmf_error);
1144                 mutex_exit(&stmf_state.stmf_lock);
1145                 break;
1146         case STMF_IOCTL_GET_HG_LIST:
1147                 id_list = &stmf_state.stmf_hg_list;
1148                 /* FALLTHROUGH */
1149         case STMF_IOCTL_GET_TG_LIST:
1150                 if (cmd == STMF_IOCTL_GET_TG_LIST)
1151                         id_list = &stmf_state.stmf_tg_list;
1152                 mutex_enter(&stmf_state.stmf_lock);
1153                 iocd->stmf_obuf_max_nentries = id_list->id_count;
1154                 n = min(id_list->id_count,
1155                     (iocd->stmf_obuf_size)/sizeof (stmf_group_name_t));
1156                 iocd->stmf_obuf_nentries = n;
1157                 id_entry = id_list->idl_head;
1158                 grpname = (stmf_group_name_t *)obuf;
1159                 for (i = 0; i < n; i++) {
1160                         if (id_entry->id_data[0] == '*') {
1161                                 if (iocd->stmf_obuf_nentries > 0) {
1162                                         iocd->stmf_obuf_nentries--;
1163                                 }
1164                                 id_entry = id_entry->id_next;
1165                                 continue;
1166                         }
1167                         grpname->name_size = id_entry->id_data_size;
1168                         bcopy(id_entry->id_data, grpname->name,
1169                             id_entry->id_data_size);
1170                         grpname++;
1171                         id_entry = id_entry->id_next;
1172                 }
1173                 mutex_exit(&stmf_state.stmf_lock);
1174                 break;
1175         case STMF_IOCTL_GET_HG_ENTRIES:
1176                 id_list = &stmf_state.stmf_hg_list;
1177                 /* FALLTHROUGH */
1178         case STMF_IOCTL_GET_TG_ENTRIES:
1179                 grpname = (stmf_group_name_t *)ibuf;
1180                 if ((ibuf == NULL) ||
1181                     (iocd->stmf_ibuf_size < sizeof (stmf_group_name_t))) {
1182                         ret = EINVAL;
1183                         break;
1184                 }
1185                 if (cmd == STMF_IOCTL_GET_TG_ENTRIES) {
1186                         id_list = &stmf_state.stmf_tg_list;
1187                 }
1188                 mutex_enter(&stmf_state.stmf_lock);
1189                 id_entry = stmf_lookup_id(id_list, grpname->name_size,
1190                     grpname->name);
1191                 if (!id_entry)
1192                         ret = ENODEV;
1193                 else {
1194                         stmf_ge_ident_t *grp_entry;
1195                         id_list = (stmf_id_list_t *)id_entry->id_impl_specific;
1196                         iocd->stmf_obuf_max_nentries = id_list->id_count;
1197                         n = min(id_list->id_count,
1198                             iocd->stmf_obuf_size/sizeof (stmf_ge_ident_t));
1199                         iocd->stmf_obuf_nentries = n;
1200                         id_entry = id_list->idl_head;
1201                         grp_entry = (stmf_ge_ident_t *)obuf;
1202                         for (i = 0; i < n; i++) {
1203                                 bcopy(id_entry->id_data, grp_entry->ident,
1204                                     id_entry->id_data_size);
1205                                 grp_entry->ident_size = id_entry->id_data_size;
1206                                 id_entry = id_entry->id_next;
1207                                 grp_entry++;
1208                         }
1209                 }
1210                 mutex_exit(&stmf_state.stmf_lock);
1211                 break;
1212 
1213         case STMF_IOCTL_GET_VE_LIST:
1214                 n = iocd->stmf_obuf_size/sizeof (stmf_view_op_entry_t);
1215                 mutex_enter(&stmf_state.stmf_lock);
1216                 ve = (stmf_view_op_entry_t *)obuf;
1217                 for (id_entry = stmf_state.stmf_luid_list.idl_head;
1218                     id_entry; id_entry = id_entry->id_next) {
1219                         for (view_entry = (stmf_view_entry_t *)
1220                             id_entry->id_impl_specific; view_entry;
1221                             view_entry = view_entry->ve_next) {
1222                                 iocd->stmf_obuf_max_nentries++;
1223                                 if (iocd->stmf_obuf_nentries >= n)
1224                                         continue;
1225                                 ve->ve_ndx_valid = 1;
1226                                 ve->ve_ndx = view_entry->ve_id;
1227                                 ve->ve_lu_number_valid = 1;
1228                                 bcopy(view_entry->ve_lun, ve->ve_lu_nbr, 8);
1229                                 bcopy(view_entry->ve_luid->id_data, ve->ve_guid,
1230                                     view_entry->ve_luid->id_data_size);
1231                                 if (view_entry->ve_hg->id_data[0] == '*') {
1232                                         ve->ve_all_hosts = 1;
1233                                 } else {
1234                                         bcopy(view_entry->ve_hg->id_data,
1235                                             ve->ve_host_group.name,
1236                                             view_entry->ve_hg->id_data_size);
1237                                         ve->ve_host_group.name_size =
1238                                             view_entry->ve_hg->id_data_size;
1239                                 }
1240 
1241                                 if (view_entry->ve_tg->id_data[0] == '*') {
1242                                         ve->ve_all_targets = 1;
1243                                 } else {
1244                                         bcopy(view_entry->ve_tg->id_data,
1245                                             ve->ve_target_group.name,
1246                                             view_entry->ve_tg->id_data_size);
1247                                         ve->ve_target_group.name_size =
1248                                             view_entry->ve_tg->id_data_size;
1249                                 }
1250                                 ve++;
1251                                 iocd->stmf_obuf_nentries++;
1252                         }
1253                 }
1254                 mutex_exit(&stmf_state.stmf_lock);
1255                 break;
1256 
1257         case STMF_IOCTL_LU_VE_LIST:
1258                 p_id = (uint8_t *)ibuf;
1259                 if ((iocd->stmf_ibuf_size != 16) ||
1260                     (iocd->stmf_obuf_size < sizeof (stmf_view_op_entry_t))) {
1261                         ret = EINVAL;
1262                         break;
1263                 }
1264 
1265                 n = iocd->stmf_obuf_size/sizeof (stmf_view_op_entry_t);
1266                 mutex_enter(&stmf_state.stmf_lock);
1267                 ve = (stmf_view_op_entry_t *)obuf;
1268                 for (id_entry = stmf_state.stmf_luid_list.idl_head;
1269                     id_entry; id_entry = id_entry->id_next) {
1270                         if (bcmp(id_entry->id_data, p_id, 16) != 0)
1271                                 continue;
1272                         for (view_entry = (stmf_view_entry_t *)
1273                             id_entry->id_impl_specific; view_entry;
1274                             view_entry = view_entry->ve_next) {
1275                                 iocd->stmf_obuf_max_nentries++;
1276                                 if (iocd->stmf_obuf_nentries >= n)
1277                                         continue;
1278                                 ve->ve_ndx_valid = 1;
1279                                 ve->ve_ndx = view_entry->ve_id;
1280                                 ve->ve_lu_number_valid = 1;
1281                                 bcopy(view_entry->ve_lun, ve->ve_lu_nbr, 8);
1282                                 bcopy(view_entry->ve_luid->id_data, ve->ve_guid,
1283                                     view_entry->ve_luid->id_data_size);
1284                                 if (view_entry->ve_hg->id_data[0] == '*') {
1285                                         ve->ve_all_hosts = 1;
1286                                 } else {
1287                                         bcopy(view_entry->ve_hg->id_data,
1288                                             ve->ve_host_group.name,
1289                                             view_entry->ve_hg->id_data_size);
1290                                         ve->ve_host_group.name_size =
1291                                             view_entry->ve_hg->id_data_size;
1292                                 }
1293 
1294                                 if (view_entry->ve_tg->id_data[0] == '*') {
1295                                         ve->ve_all_targets = 1;
1296                                 } else {
1297                                         bcopy(view_entry->ve_tg->id_data,
1298                                             ve->ve_target_group.name,
1299                                             view_entry->ve_tg->id_data_size);
1300                                         ve->ve_target_group.name_size =
1301                                             view_entry->ve_tg->id_data_size;
1302                                 }
1303                                 ve++;
1304                                 iocd->stmf_obuf_nentries++;
1305                         }
1306                         break;
1307                 }
1308                 mutex_exit(&stmf_state.stmf_lock);
1309                 break;
1310 
1311         case STMF_IOCTL_LOAD_PP_DATA:
1312                 if (stmf_state.stmf_config_state == STMF_CONFIG_NONE) {
1313                         ret = EACCES;
1314                         iocd->stmf_error = STMF_IOCERR_UPDATE_NEED_CFG_INIT;
1315                         break;
1316                 }
1317                 ppi = (stmf_ppioctl_data_t *)ibuf;
1318                 if ((ppi == NULL) ||
1319                     (iocd->stmf_ibuf_size < sizeof (stmf_ppioctl_data_t))) {
1320                         ret = EINVAL;
1321                         break;
1322                 }
1323                 /* returned token */
1324                 ppi_token = (uint64_t *)obuf;
1325                 if ((ppi_token == NULL) ||
1326                     (iocd->stmf_obuf_size < sizeof (uint64_t))) {
1327                         ret = EINVAL;
1328                         break;
1329                 }
1330                 ret = stmf_load_ppd_ioctl(ppi, ppi_token, &iocd->stmf_error);
1331                 break;
1332 
1333         case STMF_IOCTL_GET_PP_DATA:
1334                 if (stmf_state.stmf_config_state == STMF_CONFIG_NONE) {
1335                         ret = EACCES;
1336                         iocd->stmf_error = STMF_IOCERR_UPDATE_NEED_CFG_INIT;
1337                         break;
1338                 }
1339                 ppi = (stmf_ppioctl_data_t *)ibuf;
1340                 if (ppi == NULL ||
1341                     (iocd->stmf_ibuf_size < sizeof (stmf_ppioctl_data_t))) {
1342                         ret = EINVAL;
1343                         break;
1344                 }
1345                 ppi_out = (stmf_ppioctl_data_t *)obuf;
1346                 if ((ppi_out == NULL) ||
1347                     (iocd->stmf_obuf_size < sizeof (stmf_ppioctl_data_t))) {
1348                         ret = EINVAL;
1349                         break;
1350                 }
1351                 ret = stmf_get_ppd_ioctl(ppi, ppi_out, &iocd->stmf_error);
1352                 break;
1353 
1354         case STMF_IOCTL_CLEAR_PP_DATA:
1355                 if (stmf_state.stmf_config_state == STMF_CONFIG_NONE) {
1356                         ret = EACCES;
1357                         iocd->stmf_error = STMF_IOCERR_UPDATE_NEED_CFG_INIT;
1358                         break;
1359                 }
1360                 ppi = (stmf_ppioctl_data_t *)ibuf;
1361                 if ((ppi == NULL) ||
1362                     (iocd->stmf_ibuf_size < sizeof (stmf_ppioctl_data_t))) {
1363                         ret = EINVAL;
1364                         break;
1365                 }
1366                 ret = stmf_delete_ppd_ioctl(ppi);
1367                 break;
1368 
1369         case STMF_IOCTL_CLEAR_TRACE:
1370                 stmf_trace_clear();
1371                 break;
1372 
1373         case STMF_IOCTL_ADD_TRACE:
1374                 if (iocd->stmf_ibuf_size && ibuf) {
1375                         ((uint8_t *)ibuf)[iocd->stmf_ibuf_size - 1] = 0;
1376                         stmf_trace("\nstradm", "%s\n", ibuf);
1377                 }
1378                 break;
1379 
1380         case STMF_IOCTL_GET_TRACE_POSITION:
1381                 if (obuf && (iocd->stmf_obuf_size > 3)) {
1382                         mutex_enter(&trace_buf_lock);
1383                         *((int *)obuf) = trace_buf_curndx;
1384                         mutex_exit(&trace_buf_lock);
1385                 } else {
1386                         ret = EINVAL;
1387                 }
1388                 break;
1389 
1390         case STMF_IOCTL_GET_TRACE:
1391                 if ((iocd->stmf_obuf_size == 0) || (iocd->stmf_ibuf_size < 4)) {
1392                         ret = EINVAL;
1393                         break;
1394                 }
1395                 i = *((int *)ibuf);
1396                 if ((i > trace_buf_size) || ((i + iocd->stmf_obuf_size) >
1397                     trace_buf_size)) {
1398                         ret = EINVAL;
1399                         break;
1400                 }
1401                 mutex_enter(&trace_buf_lock);
1402                 bcopy(stmf_trace_buf + i, obuf, iocd->stmf_obuf_size);
1403                 mutex_exit(&trace_buf_lock);
1404                 break;
1405 
1406         default:
1407                 ret = ENOTTY;
1408         }
1409 
1410         if (ret == 0) {
1411                 ret = stmf_copyout_iocdata(data, mode, iocd, obuf);
1412         } else if (iocd->stmf_error) {
1413                 (void) stmf_copyout_iocdata(data, mode, iocd, obuf);
1414         }
1415         if (obuf) {
1416                 kmem_free(obuf, iocd->stmf_obuf_size);
1417                 obuf = NULL;
1418         }
1419         if (ibuf) {
1420                 kmem_free(ibuf, iocd->stmf_ibuf_size);
1421                 ibuf = NULL;
1422         }
1423         kmem_free(iocd, sizeof (stmf_iocdata_t));
1424         return (ret);
1425 }
1426 
1427 static int
1428 stmf_get_service_state()
1429 {
1430         stmf_i_local_port_t *ilport;
1431         stmf_i_lu_t *ilu;
1432         int online = 0;
1433         int offline = 0;
1434         int onlining = 0;
1435         int offlining = 0;
1436 
1437         ASSERT(mutex_owned(&stmf_state.stmf_lock));
1438         for (ilport = stmf_state.stmf_ilportlist; ilport != NULL;
1439             ilport = ilport->ilport_next) {
1440                 if (ilport->ilport_state == STMF_STATE_OFFLINE)
1441                         offline++;
1442                 else if (ilport->ilport_state == STMF_STATE_ONLINE)
1443                         online++;
1444                 else if (ilport->ilport_state == STMF_STATE_ONLINING)
1445                         onlining++;
1446                 else if (ilport->ilport_state == STMF_STATE_OFFLINING)
1447                         offlining++;
1448         }
1449 
1450         for (ilu = stmf_state.stmf_ilulist; ilu != NULL;
1451             ilu = ilu->ilu_next) {
1452                 if (ilu->ilu_state == STMF_STATE_OFFLINE)
1453                         offline++;
1454                 else if (ilu->ilu_state == STMF_STATE_ONLINE)
1455                         online++;
1456                 else if (ilu->ilu_state == STMF_STATE_ONLINING)
1457                         onlining++;
1458                 else if (ilu->ilu_state == STMF_STATE_OFFLINING)
1459                         offlining++;
1460         }
1461 
1462         if (stmf_state.stmf_service_running) {
1463                 if (onlining)
1464                         return (STMF_STATE_ONLINING);
1465                 else
1466                         return (STMF_STATE_ONLINE);
1467         }
1468 
1469         if (offlining) {
1470                 return (STMF_STATE_OFFLINING);
1471         }
1472 
1473         return (STMF_STATE_OFFLINE);
1474 }
1475 
1476 static int
1477 stmf_set_stmf_state(stmf_state_desc_t *std)
1478 {
1479         stmf_i_local_port_t *ilport;
1480         stmf_i_lu_t *ilu;
1481         stmf_state_change_info_t ssi;
1482         int svc_state;
1483 
1484         ssi.st_rflags = STMF_RFLAG_USER_REQUEST;
1485         ssi.st_additional_info = NULL;
1486 
1487         mutex_enter(&stmf_state.stmf_lock);
1488         if (!stmf_state.stmf_exclusive_open) {
1489                 mutex_exit(&stmf_state.stmf_lock);
1490                 return (EACCES);
1491         }
1492 
1493         if (stmf_state.stmf_inventory_locked) {
1494                 mutex_exit(&stmf_state.stmf_lock);
1495                 return (EBUSY);
1496         }
1497 
1498         if ((std->state != STMF_STATE_ONLINE) &&
1499             (std->state != STMF_STATE_OFFLINE)) {
1500                 mutex_exit(&stmf_state.stmf_lock);
1501                 return (EINVAL);
1502         }
1503 
1504         svc_state = stmf_get_service_state();
1505         if ((svc_state == STMF_STATE_OFFLINING) ||
1506             (svc_state == STMF_STATE_ONLINING)) {
1507                 mutex_exit(&stmf_state.stmf_lock);
1508                 return (EBUSY);
1509         }
1510 
1511         if (svc_state == STMF_STATE_OFFLINE) {
1512                 if (std->config_state == STMF_CONFIG_INIT) {
1513                         if (std->state != STMF_STATE_OFFLINE) {
1514                                 mutex_exit(&stmf_state.stmf_lock);
1515                                 return (EINVAL);
1516                         }
1517                         stmf_state.stmf_config_state = STMF_CONFIG_INIT;
1518                         stmf_delete_all_ppds();
1519                         stmf_view_clear_config();
1520                         stmf_view_init();
1521                         mutex_exit(&stmf_state.stmf_lock);
1522                         return (0);
1523                 }
1524                 if ((stmf_state.stmf_config_state == STMF_CONFIG_INIT) ||
1525                     (stmf_state.stmf_config_state == STMF_CONFIG_NONE)) {
1526                         if (std->config_state != STMF_CONFIG_INIT_DONE) {
1527                                 mutex_exit(&stmf_state.stmf_lock);
1528                                 return (EINVAL);
1529                         }
1530                         stmf_state.stmf_config_state = STMF_CONFIG_INIT_DONE;
1531                 }
1532                 if (std->state == STMF_STATE_OFFLINE) {
1533                         mutex_exit(&stmf_state.stmf_lock);
1534                         return (0);
1535                 }
1536                 if (stmf_state.stmf_config_state == STMF_CONFIG_INIT) {
1537                         mutex_exit(&stmf_state.stmf_lock);
1538                         return (EINVAL);
1539                 }
1540                 stmf_state.stmf_inventory_locked = 1;
1541                 stmf_state.stmf_service_running = 1;
1542                 mutex_exit(&stmf_state.stmf_lock);
1543 
1544                 for (ilport = stmf_state.stmf_ilportlist; ilport != NULL;
1545                     ilport = ilport->ilport_next) {
1546                         if (stmf_state.stmf_default_lport_state !=
1547                             STMF_STATE_ONLINE)
1548                                 continue;
1549                         (void) stmf_ctl(STMF_CMD_LPORT_ONLINE,
1550                             ilport->ilport_lport, &ssi);
1551                 }
1552 
1553                 for (ilu = stmf_state.stmf_ilulist; ilu != NULL;
1554                     ilu = ilu->ilu_next) {
1555                         if (stmf_state.stmf_default_lu_state !=
1556                             STMF_STATE_ONLINE)
1557                                 continue;
1558                         (void) stmf_ctl(STMF_CMD_LU_ONLINE, ilu->ilu_lu, &ssi);
1559                 }
1560                 mutex_enter(&stmf_state.stmf_lock);
1561                 stmf_state.stmf_inventory_locked = 0;
1562                 mutex_exit(&stmf_state.stmf_lock);
1563                 return (0);
1564         }
1565 
1566         /* svc_state is STMF_STATE_ONLINE here */
1567         if ((std->state != STMF_STATE_OFFLINE) ||
1568             (std->config_state == STMF_CONFIG_INIT)) {
1569                 mutex_exit(&stmf_state.stmf_lock);
1570                 return (EACCES);
1571         }
1572 
1573         stmf_state.stmf_inventory_locked = 1;
1574         stmf_state.stmf_service_running = 0;
1575 
1576         mutex_exit(&stmf_state.stmf_lock);
1577         for (ilport = stmf_state.stmf_ilportlist; ilport != NULL;
1578             ilport = ilport->ilport_next) {
1579                 if (ilport->ilport_state != STMF_STATE_ONLINE)
1580                         continue;
1581                 (void) stmf_ctl(STMF_CMD_LPORT_OFFLINE,
1582                     ilport->ilport_lport, &ssi);
1583         }
1584 
1585         for (ilu = stmf_state.stmf_ilulist; ilu != NULL;
1586             ilu = ilu->ilu_next) {
1587                 if (ilu->ilu_state != STMF_STATE_ONLINE)
1588                         continue;
1589                 (void) stmf_ctl(STMF_CMD_LU_OFFLINE, ilu->ilu_lu, &ssi);
1590         }
1591         mutex_enter(&stmf_state.stmf_lock);
1592         stmf_state.stmf_inventory_locked = 0;
1593         mutex_exit(&stmf_state.stmf_lock);
1594         return (0);
1595 }
1596 
1597 static int
1598 stmf_get_stmf_state(stmf_state_desc_t *std)
1599 {
1600         mutex_enter(&stmf_state.stmf_lock);
1601         std->state = stmf_get_service_state();
1602         std->config_state = stmf_state.stmf_config_state;
1603         mutex_exit(&stmf_state.stmf_lock);
1604 
1605         return (0);
1606 }
1607 /*
1608  * handles registration message from pppt for a logical unit
1609  */
1610 stmf_status_t
1611 stmf_ic_lu_reg(stmf_ic_reg_dereg_lun_msg_t *msg, uint32_t type)
1612 {
1613         stmf_i_lu_provider_t    *ilp;
1614         stmf_lu_provider_t      *lp;
1615         mutex_enter(&stmf_state.stmf_lock);
1616         for (ilp = stmf_state.stmf_ilplist; ilp != NULL; ilp = ilp->ilp_next) {
1617                 if (strcmp(msg->icrl_lu_provider_name,
1618                     ilp->ilp_lp->lp_name) == 0) {
1619                         lp = ilp->ilp_lp;
1620                         mutex_exit(&stmf_state.stmf_lock);
1621                         lp->lp_proxy_msg(msg->icrl_lun_id, msg->icrl_cb_arg,
1622                             msg->icrl_cb_arg_len, type);
1623                         return (STMF_SUCCESS);
1624                 }
1625         }
1626         mutex_exit(&stmf_state.stmf_lock);
1627         return (STMF_SUCCESS);
1628 }
1629 
1630 /*
1631  * handles de-registration message from pppt for a logical unit
1632  */
1633 stmf_status_t
1634 stmf_ic_lu_dereg(stmf_ic_reg_dereg_lun_msg_t *msg)
1635 {
1636         stmf_i_lu_provider_t    *ilp;
1637         stmf_lu_provider_t      *lp;
1638         mutex_enter(&stmf_state.stmf_lock);
1639         for (ilp = stmf_state.stmf_ilplist; ilp != NULL; ilp = ilp->ilp_next) {
1640                 if (strcmp(msg->icrl_lu_provider_name,
1641                     ilp->ilp_lp->lp_name) == 0) {
1642                         lp = ilp->ilp_lp;
1643                         mutex_exit(&stmf_state.stmf_lock);
1644                         lp->lp_proxy_msg(msg->icrl_lun_id, NULL, 0,
1645                             STMF_MSG_LU_DEREGISTER);
1646                         return (STMF_SUCCESS);
1647                 }
1648         }
1649         mutex_exit(&stmf_state.stmf_lock);
1650         return (STMF_SUCCESS);
1651 }
1652 
1653 /*
1654  * helper function to find a task that matches a task_msgid
1655  */
1656 scsi_task_t *
1657 find_task_from_msgid(uint8_t *lu_id, stmf_ic_msgid_t task_msgid)
1658 {
1659         stmf_i_lu_t *ilu;
1660         stmf_i_scsi_task_t *itask;
1661 
1662         mutex_enter(&stmf_state.stmf_lock);
1663         for (ilu = stmf_state.stmf_ilulist; ilu != NULL; ilu = ilu->ilu_next) {
1664                 if (bcmp(lu_id, ilu->ilu_lu->lu_id->ident, 16) == 0) {
1665                         break;
1666                 }
1667         }
1668 
1669         if (ilu == NULL) {
1670                 mutex_exit(&stmf_state.stmf_lock);
1671                 return (NULL);
1672         }
1673 
1674         mutex_enter(&ilu->ilu_task_lock);
1675         for (itask = ilu->ilu_tasks; itask != NULL;
1676             itask = itask->itask_lu_next) {
1677                 if (itask->itask_flags & (ITASK_IN_FREE_LIST |
1678                     ITASK_BEING_ABORTED)) {
1679                         continue;
1680                 }
1681                 if (itask->itask_proxy_msg_id == task_msgid) {
1682                         break;
1683                 }
1684         }
1685         mutex_exit(&ilu->ilu_task_lock);
1686         mutex_exit(&stmf_state.stmf_lock);
1687 
1688         if (itask != NULL) {
1689                 return (itask->itask_task);
1690         } else {
1691                 /* task not found. Likely already aborted. */
1692                 return (NULL);
1693         }
1694 }
1695 
1696 /*
1697  * message received from pppt/ic
1698  */
1699 stmf_status_t
1700 stmf_msg_rx(stmf_ic_msg_t *msg)
1701 {
1702         mutex_enter(&stmf_state.stmf_lock);
1703         if (stmf_state.stmf_alua_state != 1) {
1704                 mutex_exit(&stmf_state.stmf_lock);
1705                 cmn_err(CE_WARN, "stmf alua state is disabled");
1706                 ic_msg_free(msg);
1707                 return (STMF_FAILURE);
1708         }
1709         mutex_exit(&stmf_state.stmf_lock);
1710 
1711         switch (msg->icm_msg_type) {
1712                 case STMF_ICM_REGISTER_LUN:
1713                         (void) stmf_ic_lu_reg(
1714                             (stmf_ic_reg_dereg_lun_msg_t *)msg->icm_msg,
1715                             STMF_MSG_LU_REGISTER);
1716                         break;
1717                 case STMF_ICM_LUN_ACTIVE:
1718                         (void) stmf_ic_lu_reg(
1719                             (stmf_ic_reg_dereg_lun_msg_t *)msg->icm_msg,
1720                             STMF_MSG_LU_ACTIVE);
1721                         break;
1722                 case STMF_ICM_DEREGISTER_LUN:
1723                         (void) stmf_ic_lu_dereg(
1724                             (stmf_ic_reg_dereg_lun_msg_t *)msg->icm_msg);
1725                         break;
1726                 case STMF_ICM_SCSI_DATA:
1727                         (void) stmf_ic_rx_scsi_data(
1728                             (stmf_ic_scsi_data_msg_t *)msg->icm_msg);
1729                         break;
1730                 case STMF_ICM_SCSI_STATUS:
1731                         (void) stmf_ic_rx_scsi_status(
1732                             (stmf_ic_scsi_status_msg_t *)msg->icm_msg);
1733                         break;
1734                 case STMF_ICM_STATUS:
1735                         (void) stmf_ic_rx_status(
1736                             (stmf_ic_status_msg_t *)msg->icm_msg);
1737                         break;
1738                 default:
1739                         cmn_err(CE_WARN, "unknown message received %d",
1740                             msg->icm_msg_type);
1741                         ic_msg_free(msg);
1742                         return (STMF_FAILURE);
1743         }
1744         ic_msg_free(msg);
1745         return (STMF_SUCCESS);
1746 }
1747 
1748 stmf_status_t
1749 stmf_ic_rx_status(stmf_ic_status_msg_t *msg)
1750 {
1751         stmf_i_local_port_t *ilport;
1752 
1753         if (msg->ics_msg_type != STMF_ICM_REGISTER_PROXY_PORT) {
1754                 /* for now, ignore other message status */
1755                 return (STMF_SUCCESS);
1756         }
1757 
1758         if (msg->ics_status != STMF_SUCCESS) {
1759                 return (STMF_SUCCESS);
1760         }
1761 
1762         mutex_enter(&stmf_state.stmf_lock);
1763         for (ilport = stmf_state.stmf_ilportlist; ilport != NULL;
1764             ilport = ilport->ilport_next) {
1765                 if (msg->ics_msgid == ilport->ilport_reg_msgid) {
1766                         ilport->ilport_proxy_registered = 1;
1767                         break;
1768                 }
1769         }
1770         mutex_exit(&stmf_state.stmf_lock);
1771         return (STMF_SUCCESS);
1772 }
1773 
1774 /*
1775  * handles scsi status message from pppt
1776  */
1777 stmf_status_t
1778 stmf_ic_rx_scsi_status(stmf_ic_scsi_status_msg_t *msg)
1779 {
1780         scsi_task_t *task;
1781 
1782         /* is this a task management command */
1783         if (msg->icss_task_msgid & MSG_ID_TM_BIT) {
1784                 return (STMF_SUCCESS);
1785         }
1786 
1787         task = find_task_from_msgid(msg->icss_lun_id, msg->icss_task_msgid);
1788 
1789         if (task == NULL) {
1790                 return (STMF_SUCCESS);
1791         }
1792 
1793         task->task_scsi_status = msg->icss_status;
1794         task->task_sense_data = msg->icss_sense;
1795         task->task_sense_length = msg->icss_sense_len;
1796         (void) stmf_send_scsi_status(task, STMF_IOF_LU_DONE);
1797 
1798         return (STMF_SUCCESS);
1799 }
1800 
1801 /*
1802  * handles scsi data message from pppt
1803  */
1804 stmf_status_t
1805 stmf_ic_rx_scsi_data(stmf_ic_scsi_data_msg_t *msg)
1806 {
1807         stmf_i_scsi_task_t *itask;
1808         scsi_task_t *task;
1809         stmf_xfer_data_t *xd = NULL;
1810         stmf_data_buf_t *dbuf;
1811         uint32_t sz, minsz, xd_sz, asz;
1812 
1813         /* is this a task management command */
1814         if (msg->icsd_task_msgid & MSG_ID_TM_BIT) {
1815                 return (STMF_SUCCESS);
1816         }
1817 
1818         task = find_task_from_msgid(msg->icsd_lun_id, msg->icsd_task_msgid);
1819         if (task == NULL) {
1820                 stmf_ic_msg_t *ic_xfer_done_msg = NULL;
1821                 static uint64_t data_msg_id;
1822                 stmf_status_t ic_ret = STMF_FAILURE;
1823                 mutex_enter(&stmf_state.stmf_lock);
1824                 data_msg_id = stmf_proxy_msg_id++;
1825                 mutex_exit(&stmf_state.stmf_lock);
1826                 /*
1827                  * send xfer done status to pppt
1828                  * for now, set the session id to 0 as we cannot
1829                  * ascertain it since we cannot find the task
1830                  */
1831                 ic_xfer_done_msg = ic_scsi_data_xfer_done_msg_alloc(
1832                     msg->icsd_task_msgid, 0, STMF_FAILURE, data_msg_id);
1833                 if (ic_xfer_done_msg) {
1834                         ic_ret = ic_tx_msg(ic_xfer_done_msg);
1835                         if (ic_ret != STMF_IC_MSG_SUCCESS) {
1836                                 cmn_err(CE_WARN, "unable to xmit proxy msg");
1837                         }
1838                 }
1839                 return (STMF_FAILURE);
1840         }
1841 
1842         itask = (stmf_i_scsi_task_t *)task->task_stmf_private;
1843         dbuf = itask->itask_proxy_dbuf;
1844 
1845         task->task_cmd_xfer_length += msg->icsd_data_len;
1846 
1847         if (task->task_additional_flags &
1848             TASK_AF_NO_EXPECTED_XFER_LENGTH) {
1849                 task->task_expected_xfer_length =
1850                     task->task_cmd_xfer_length;
1851         }
1852 
1853         sz = min(task->task_expected_xfer_length,
1854             task->task_cmd_xfer_length);
1855 
1856         xd_sz = msg->icsd_data_len;
1857         asz = xd_sz + sizeof (*xd) - 4;
1858         xd = (stmf_xfer_data_t *)kmem_zalloc(asz, KM_NOSLEEP);
1859 
1860         if (xd == NULL) {
1861                 stmf_abort(STMF_QUEUE_TASK_ABORT, task,
1862                     STMF_ALLOC_FAILURE, NULL);
1863                 return (STMF_FAILURE);
1864         }
1865 
1866         xd->alloc_size = asz;
1867         xd->size_left = xd_sz;
1868         bcopy(msg->icsd_data, xd->buf, xd_sz);
1869 
1870         sz = min(sz, xd->size_left);
1871         xd->size_left = sz;
1872         minsz = min(512, sz);
1873 
1874         if (dbuf == NULL)
1875                 dbuf = stmf_alloc_dbuf(task, sz, &minsz, 0);
1876         if (dbuf == NULL) {
1877                 kmem_free(xd, xd->alloc_size);
1878                 stmf_abort(STMF_QUEUE_TASK_ABORT, task,
1879                     STMF_ALLOC_FAILURE, NULL);
1880                 return (STMF_FAILURE);
1881         }
1882         dbuf->db_lu_private = xd;
1883         dbuf->db_relative_offset = task->task_nbytes_transferred;
1884         stmf_xd_to_dbuf(dbuf, 0);
1885 
1886         dbuf->db_flags = DB_DIRECTION_TO_RPORT;
1887         (void) stmf_xfer_data(task, dbuf, 0);
1888         return (STMF_SUCCESS);
1889 }
1890 
1891 stmf_status_t
1892 stmf_proxy_scsi_cmd(scsi_task_t *task, stmf_data_buf_t *dbuf)
1893 {
1894         stmf_i_scsi_task_t *itask =
1895             (stmf_i_scsi_task_t *)task->task_stmf_private;
1896         stmf_i_local_port_t *ilport =
1897             (stmf_i_local_port_t *)task->task_lport->lport_stmf_private;
1898         stmf_ic_msg_t *ic_cmd_msg;
1899         stmf_ic_msg_status_t ic_ret;
1900         stmf_status_t ret = STMF_FAILURE;
1901 
1902         if (stmf_state.stmf_alua_state != 1) {
1903                 cmn_err(CE_WARN, "stmf alua state is disabled");
1904                 return (STMF_FAILURE);
1905         }
1906 
1907         if (ilport->ilport_proxy_registered == 0) {
1908                 return (STMF_FAILURE);
1909         }
1910 
1911         mutex_enter(&stmf_state.stmf_lock);
1912         itask->itask_proxy_msg_id = stmf_proxy_msg_id++;
1913         mutex_exit(&stmf_state.stmf_lock);
1914         itask->itask_proxy_dbuf = dbuf;
1915 
1916         /*
1917          * stmf will now take over the task handling for this task
1918          * but it still needs to be treated differently from other
1919          * default handled tasks, hence the ITASK_PROXY_TASK.
1920          * If this is a task management function, we're really just
1921          * duping the command to the peer. Set the TM bit so that
1922          * we can recognize this on return since we won't be completing
1923          * the proxied task in that case.
1924          */
1925         if (task->task_mgmt_function) {
1926                 itask->itask_proxy_msg_id |= MSG_ID_TM_BIT;
1927         } else {
1928                 uint32_t new, old;
1929                 do {
1930                         new = old = itask->itask_flags;
1931                         if (new & ITASK_BEING_ABORTED)
1932                                 return (STMF_FAILURE);
1933                         new |= ITASK_DEFAULT_HANDLING | ITASK_PROXY_TASK;
1934                 } while (atomic_cas_32(&itask->itask_flags, old, new) != old);
1935         }
1936         if (dbuf) {
1937                 ic_cmd_msg = ic_scsi_cmd_msg_alloc(itask->itask_proxy_msg_id,
1938                     task, dbuf->db_data_size, dbuf->db_sglist[0].seg_addr,
1939                     itask->itask_proxy_msg_id);
1940         } else {
1941                 ic_cmd_msg = ic_scsi_cmd_msg_alloc(itask->itask_proxy_msg_id,
1942                     task, 0, NULL, itask->itask_proxy_msg_id);
1943         }
1944         if (ic_cmd_msg) {
1945                 ic_ret = ic_tx_msg(ic_cmd_msg);
1946                 if (ic_ret == STMF_IC_MSG_SUCCESS) {
1947                         ret = STMF_SUCCESS;
1948                 }
1949         }
1950         return (ret);
1951 }
1952 
1953 
1954 stmf_status_t
1955 pppt_modload()
1956 {
1957         int error;
1958 
1959         if (pppt_mod == NULL && ((pppt_mod =
1960             ddi_modopen("drv/pppt", KRTLD_MODE_FIRST, &error)) == NULL)) {
1961                 cmn_err(CE_WARN, "Unable to load pppt");
1962                 return (STMF_FAILURE);
1963         }
1964 
1965         if (ic_reg_port_msg_alloc == NULL && ((ic_reg_port_msg_alloc =
1966             (stmf_ic_reg_port_msg_alloc_func_t)
1967             ddi_modsym(pppt_mod, "stmf_ic_reg_port_msg_alloc",
1968             &error)) == NULL)) {
1969                 cmn_err(CE_WARN,
1970                     "Unable to find symbol - stmf_ic_reg_port_msg_alloc");
1971                 return (STMF_FAILURE);
1972         }
1973 
1974 
1975         if (ic_dereg_port_msg_alloc == NULL && ((ic_dereg_port_msg_alloc =
1976             (stmf_ic_dereg_port_msg_alloc_func_t)
1977             ddi_modsym(pppt_mod, "stmf_ic_dereg_port_msg_alloc",
1978             &error)) == NULL)) {
1979                 cmn_err(CE_WARN,
1980                     "Unable to find symbol - stmf_ic_dereg_port_msg_alloc");
1981                 return (STMF_FAILURE);
1982         }
1983 
1984         if (ic_reg_lun_msg_alloc == NULL && ((ic_reg_lun_msg_alloc =
1985             (stmf_ic_reg_lun_msg_alloc_func_t)
1986             ddi_modsym(pppt_mod, "stmf_ic_reg_lun_msg_alloc",
1987             &error)) == NULL)) {
1988                 cmn_err(CE_WARN,
1989                     "Unable to find symbol - stmf_ic_reg_lun_msg_alloc");
1990                 return (STMF_FAILURE);
1991         }
1992 
1993         if (ic_lun_active_msg_alloc == NULL && ((ic_lun_active_msg_alloc =
1994             (stmf_ic_lun_active_msg_alloc_func_t)
1995             ddi_modsym(pppt_mod, "stmf_ic_lun_active_msg_alloc",
1996             &error)) == NULL)) {
1997                 cmn_err(CE_WARN,
1998                     "Unable to find symbol - stmf_ic_lun_active_msg_alloc");
1999                 return (STMF_FAILURE);
2000         }
2001 
2002         if (ic_dereg_lun_msg_alloc == NULL && ((ic_dereg_lun_msg_alloc =
2003             (stmf_ic_dereg_lun_msg_alloc_func_t)
2004             ddi_modsym(pppt_mod, "stmf_ic_dereg_lun_msg_alloc",
2005             &error)) == NULL)) {
2006                 cmn_err(CE_WARN,
2007                     "Unable to find symbol - stmf_ic_dereg_lun_msg_alloc");
2008                 return (STMF_FAILURE);
2009         }
2010 
2011         if (ic_scsi_cmd_msg_alloc == NULL && ((ic_scsi_cmd_msg_alloc =
2012             (stmf_ic_scsi_cmd_msg_alloc_func_t)
2013             ddi_modsym(pppt_mod, "stmf_ic_scsi_cmd_msg_alloc",
2014             &error)) == NULL)) {
2015                 cmn_err(CE_WARN,
2016                     "Unable to find symbol - stmf_ic_scsi_cmd_msg_alloc");
2017                 return (STMF_FAILURE);
2018         }
2019 
2020         if (ic_scsi_data_xfer_done_msg_alloc == NULL &&
2021             ((ic_scsi_data_xfer_done_msg_alloc =
2022             (stmf_ic_scsi_data_xfer_done_msg_alloc_func_t)
2023             ddi_modsym(pppt_mod, "stmf_ic_scsi_data_xfer_done_msg_alloc",
2024             &error)) == NULL)) {
2025                 cmn_err(CE_WARN,
2026                     "Unable to find symbol -"
2027                     "stmf_ic_scsi_data_xfer_done_msg_alloc");
2028                 return (STMF_FAILURE);
2029         }
2030 
2031         if (ic_session_reg_msg_alloc == NULL &&
2032             ((ic_session_reg_msg_alloc =
2033             (stmf_ic_session_create_msg_alloc_func_t)
2034             ddi_modsym(pppt_mod, "stmf_ic_session_create_msg_alloc",
2035             &error)) == NULL)) {
2036                 cmn_err(CE_WARN,
2037                     "Unable to find symbol -"
2038                     "stmf_ic_session_create_msg_alloc");
2039                 return (STMF_FAILURE);
2040         }
2041 
2042         if (ic_session_dereg_msg_alloc == NULL &&
2043             ((ic_session_dereg_msg_alloc =
2044             (stmf_ic_session_destroy_msg_alloc_func_t)
2045             ddi_modsym(pppt_mod, "stmf_ic_session_destroy_msg_alloc",
2046             &error)) == NULL)) {
2047                 cmn_err(CE_WARN,
2048                     "Unable to find symbol -"
2049                     "stmf_ic_session_destroy_msg_alloc");
2050                 return (STMF_FAILURE);
2051         }
2052 
2053         if (ic_tx_msg == NULL && ((ic_tx_msg =
2054             (stmf_ic_tx_msg_func_t)ddi_modsym(pppt_mod, "stmf_ic_tx_msg",
2055             &error)) == NULL)) {
2056                 cmn_err(CE_WARN, "Unable to find symbol - stmf_ic_tx_msg");
2057                 return (STMF_FAILURE);
2058         }
2059 
2060         if (ic_msg_free == NULL && ((ic_msg_free =
2061             (stmf_ic_msg_free_func_t)ddi_modsym(pppt_mod, "stmf_ic_msg_free",
2062             &error)) == NULL)) {
2063                 cmn_err(CE_WARN, "Unable to find symbol - stmf_ic_msg_free");
2064                 return (STMF_FAILURE);
2065         }
2066         return (STMF_SUCCESS);
2067 }
2068 
2069 static void
2070 stmf_get_alua_state(stmf_alua_state_desc_t *alua_state)
2071 {
2072         mutex_enter(&stmf_state.stmf_lock);
2073         alua_state->alua_node = stmf_state.stmf_alua_node;
2074         alua_state->alua_state = stmf_state.stmf_alua_state;
2075         mutex_exit(&stmf_state.stmf_lock);
2076 }
2077 
2078 
2079 static int
2080 stmf_set_alua_state(stmf_alua_state_desc_t *alua_state)
2081 {
2082         stmf_i_local_port_t *ilport;
2083         stmf_i_lu_t *ilu;
2084         stmf_lu_t *lu;
2085         stmf_ic_msg_status_t ic_ret;
2086         stmf_ic_msg_t *ic_reg_lun, *ic_reg_port;
2087         stmf_local_port_t *lport;
2088         int ret = 0;
2089 
2090         if (alua_state->alua_state > 1 || alua_state->alua_node > 1) {
2091                 return (EINVAL);
2092         }
2093 
2094         mutex_enter(&stmf_state.stmf_lock);
2095         if (alua_state->alua_state == 1) {
2096                 if (pppt_modload() == STMF_FAILURE) {
2097                         ret = EIO;
2098                         goto err;
2099                 }
2100                 if (alua_state->alua_node != 0) {
2101                         /* reset existing rtpids to new base */
2102                         stmf_rtpid_counter = 255;
2103                 }
2104                 stmf_state.stmf_alua_node = alua_state->alua_node;
2105                 stmf_state.stmf_alua_state = 1;
2106                 /* register existing local ports with ppp */
2107                 for (ilport = stmf_state.stmf_ilportlist; ilport != NULL;
2108                     ilport = ilport->ilport_next) {
2109                         /* skip standby ports and non-alua participants */
2110                         if (ilport->ilport_standby == 1 ||
2111                             ilport->ilport_alua == 0) {
2112                                 continue;
2113                         }
2114                         if (alua_state->alua_node != 0) {
2115                                 ilport->ilport_rtpid =
2116                                     atomic_add_16_nv(&stmf_rtpid_counter, 1);
2117                         }
2118                         lport = ilport->ilport_lport;
2119                         ic_reg_port = ic_reg_port_msg_alloc(
2120                             lport->lport_id, ilport->ilport_rtpid,
2121                             0, NULL, stmf_proxy_msg_id);
2122                         if (ic_reg_port) {
2123                                 ic_ret = ic_tx_msg(ic_reg_port);
2124                                 if (ic_ret == STMF_IC_MSG_SUCCESS) {
2125                                         ilport->ilport_reg_msgid =
2126                                             stmf_proxy_msg_id++;
2127                                 } else {
2128                                         cmn_err(CE_WARN,
2129                                             "error on port registration "
2130                                             "port - %s",
2131                                             ilport->ilport_kstat_tgt_name);
2132                                 }
2133                         }
2134                 }
2135                 /* register existing logical units */
2136                 for (ilu = stmf_state.stmf_ilulist; ilu != NULL;
2137                     ilu = ilu->ilu_next) {
2138                         if (ilu->ilu_access != STMF_LU_ACTIVE) {
2139                                 continue;
2140                         }
2141                         /* register with proxy module */
2142                         lu = ilu->ilu_lu;
2143                         if (lu->lu_lp && lu->lu_lp->lp_lpif_rev == LPIF_REV_2 &&
2144                             lu->lu_lp->lp_alua_support) {
2145                                 ilu->ilu_alua = 1;
2146                                 /* allocate the register message */
2147                                 ic_reg_lun = ic_reg_lun_msg_alloc(
2148                                     lu->lu_id->ident, lu->lu_lp->lp_name,
2149                                     lu->lu_proxy_reg_arg_len,
2150                                     (uint8_t *)lu->lu_proxy_reg_arg,
2151                                     stmf_proxy_msg_id);
2152                                 /* send the message */
2153                                 if (ic_reg_lun) {
2154                                         ic_ret = ic_tx_msg(ic_reg_lun);
2155                                         if (ic_ret == STMF_IC_MSG_SUCCESS) {
2156                                                 stmf_proxy_msg_id++;
2157                                         }
2158                                 }
2159                         }
2160                 }
2161         } else {
2162                 stmf_state.stmf_alua_state = 0;
2163         }
2164 
2165 err:
2166         mutex_exit(&stmf_state.stmf_lock);
2167         return (ret);
2168 }
2169 
2170 
2171 typedef struct {
2172         void    *bp;    /* back pointer from internal struct to main struct */
2173         int     alloc_size;
2174 } __istmf_t;
2175 
2176 typedef struct {
2177         __istmf_t       *fp;    /* Framework private */
2178         void            *cp;    /* Caller private */
2179         void            *ss;    /* struct specific */
2180 } __stmf_t;
2181 
2182 static struct {
2183         int shared;
2184         int fw_private;
2185 } stmf_sizes[] = { { 0, 0 },
2186         { GET_STRUCT_SIZE(stmf_lu_provider_t),
2187                 GET_STRUCT_SIZE(stmf_i_lu_provider_t) },
2188         { GET_STRUCT_SIZE(stmf_port_provider_t),
2189                 GET_STRUCT_SIZE(stmf_i_port_provider_t) },
2190         { GET_STRUCT_SIZE(stmf_local_port_t),
2191                 GET_STRUCT_SIZE(stmf_i_local_port_t) },
2192         { GET_STRUCT_SIZE(stmf_lu_t),
2193                 GET_STRUCT_SIZE(stmf_i_lu_t) },
2194         { GET_STRUCT_SIZE(stmf_scsi_session_t),
2195                 GET_STRUCT_SIZE(stmf_i_scsi_session_t) },
2196         { GET_STRUCT_SIZE(scsi_task_t),
2197                 GET_STRUCT_SIZE(stmf_i_scsi_task_t) },
2198         { GET_STRUCT_SIZE(stmf_data_buf_t),
2199                 GET_STRUCT_SIZE(__istmf_t) },
2200         { GET_STRUCT_SIZE(stmf_dbuf_store_t),
2201                 GET_STRUCT_SIZE(__istmf_t) }
2202 
2203 };
2204 
2205 void *
2206 stmf_alloc(stmf_struct_id_t struct_id, int additional_size, int flags)
2207 {
2208         int stmf_size;
2209         int kmem_flag;
2210         __stmf_t *sh;
2211 
2212         if ((struct_id == 0) || (struct_id >= STMF_MAX_STRUCT_IDS))
2213                 return (NULL);
2214 
2215         if ((curthread->t_flag & T_INTR_THREAD) || (flags & AF_FORCE_NOSLEEP)) {
2216                 kmem_flag = KM_NOSLEEP;
2217         } else {
2218                 kmem_flag = KM_SLEEP;
2219         }
2220 
2221         additional_size = (additional_size + 7) & (~7);
2222         stmf_size = stmf_sizes[struct_id].shared +
2223             stmf_sizes[struct_id].fw_private + additional_size;
2224 
2225         if (flags & AF_DONTZERO)
2226                 sh = (__stmf_t *)kmem_alloc(stmf_size, kmem_flag);
2227         else
2228                 sh = (__stmf_t *)kmem_zalloc(stmf_size, kmem_flag);
2229 
2230         if (sh == NULL)
2231                 return (NULL);
2232 
2233         /*
2234          * In principle, the implementation inside stmf_alloc should not
2235          * be changed anyway. But the original order of framework private
2236          * data and caller private data does not support sglist in the caller
2237          * private data.
2238          * To work around this, the memory segments of framework private
2239          * data and caller private data are re-ordered here.
2240          * A better solution is to provide a specific interface to allocate
2241          * the sglist, then we will not need this workaround any more.
2242          * But before the new interface is available, the memory segment
2243          * ordering should be kept as is.
2244          */
2245         sh->cp = GET_BYTE_OFFSET(sh, stmf_sizes[struct_id].shared);
2246         sh->fp = (__istmf_t *)GET_BYTE_OFFSET(sh,
2247             stmf_sizes[struct_id].shared + additional_size);
2248 
2249         sh->fp->bp = sh;
2250         /* Just store the total size instead of storing additional size */
2251         sh->fp->alloc_size = stmf_size;
2252 
2253         return (sh);
2254 }
2255 
2256 void
2257 stmf_free(void *ptr)
2258 {
2259         __stmf_t *sh = (__stmf_t *)ptr;
2260 
2261         /*
2262          * So far we dont need any struct specific processing. If such
2263          * a need ever arises, then store the struct id in the framework
2264          * private section and get it here as sh->fp->struct_id.
2265          */
2266         kmem_free(ptr, sh->fp->alloc_size);
2267 }
2268 
2269 /*
2270  * Given a pointer to stmf_lu_t, verifies if this lu is registered with the
2271  * framework and returns a pointer to framework private data for the lu.
2272  * Returns NULL if the lu was not found.
2273  */
2274 stmf_i_lu_t *
2275 stmf_lookup_lu(stmf_lu_t *lu)
2276 {
2277         stmf_i_lu_t *ilu;
2278         ASSERT(mutex_owned(&stmf_state.stmf_lock));
2279 
2280         for (ilu = stmf_state.stmf_ilulist; ilu != NULL; ilu = ilu->ilu_next) {
2281                 if (ilu->ilu_lu == lu)
2282                         return (ilu);
2283         }
2284         return (NULL);
2285 }
2286 
2287 /*
2288  * Given a pointer to stmf_local_port_t, verifies if this lport is registered
2289  * with the framework and returns a pointer to framework private data for
2290  * the lport.
2291  * Returns NULL if the lport was not found.
2292  */
2293 stmf_i_local_port_t *
2294 stmf_lookup_lport(stmf_local_port_t *lport)
2295 {
2296         stmf_i_local_port_t *ilport;
2297         ASSERT(mutex_owned(&stmf_state.stmf_lock));
2298 
2299         for (ilport = stmf_state.stmf_ilportlist; ilport != NULL;
2300             ilport = ilport->ilport_next) {
2301                 if (ilport->ilport_lport == lport)
2302                         return (ilport);
2303         }
2304         return (NULL);
2305 }
2306 
2307 stmf_status_t
2308 stmf_register_lu_provider(stmf_lu_provider_t *lp)
2309 {
2310         stmf_i_lu_provider_t *ilp = (stmf_i_lu_provider_t *)lp->lp_stmf_private;
2311         stmf_pp_data_t *ppd;
2312         uint32_t cb_flags;
2313 
2314         if (lp->lp_lpif_rev != LPIF_REV_1 && lp->lp_lpif_rev != LPIF_REV_2)
2315                 return (STMF_FAILURE);
2316 
2317         mutex_enter(&stmf_state.stmf_lock);
2318         ilp->ilp_next = stmf_state.stmf_ilplist;
2319         stmf_state.stmf_ilplist = ilp;
2320         stmf_state.stmf_nlps++;
2321 
2322         /* See if we need to do a callback */
2323         for (ppd = stmf_state.stmf_ppdlist; ppd != NULL; ppd = ppd->ppd_next) {
2324                 if (strcmp(ppd->ppd_name, lp->lp_name) == 0) {
2325                         break;
2326                 }
2327         }
2328         if ((ppd == NULL) || (ppd->ppd_nv == NULL)) {
2329                 goto rlp_bail_out;
2330         }
2331         ilp->ilp_ppd = ppd;
2332         ppd->ppd_provider = ilp;
2333         if (lp->lp_cb == NULL)
2334                 goto rlp_bail_out;
2335         ilp->ilp_cb_in_progress = 1;
2336         cb_flags = STMF_PCB_PREG_COMPLETE;
2337         if (stmf_state.stmf_config_state == STMF_CONFIG_INIT)
2338                 cb_flags |= STMF_PCB_STMF_ONLINING;
2339         mutex_exit(&stmf_state.stmf_lock);
2340         lp->lp_cb(lp, STMF_PROVIDER_DATA_UPDATED, ppd->ppd_nv, cb_flags);
2341         mutex_enter(&stmf_state.stmf_lock);
2342         ilp->ilp_cb_in_progress = 0;
2343 
2344 rlp_bail_out:
2345         mutex_exit(&stmf_state.stmf_lock);
2346 
2347         return (STMF_SUCCESS);
2348 }
2349 
2350 stmf_status_t
2351 stmf_deregister_lu_provider(stmf_lu_provider_t *lp)
2352 {
2353         stmf_i_lu_provider_t    **ppilp;
2354         stmf_i_lu_provider_t *ilp = (stmf_i_lu_provider_t *)lp->lp_stmf_private;
2355 
2356         mutex_enter(&stmf_state.stmf_lock);
2357         if (ilp->ilp_nlus || ilp->ilp_cb_in_progress) {
2358                 mutex_exit(&stmf_state.stmf_lock);
2359                 return (STMF_BUSY);
2360         }
2361         for (ppilp = &stmf_state.stmf_ilplist; *ppilp != NULL;
2362             ppilp = &((*ppilp)->ilp_next)) {
2363                 if (*ppilp == ilp) {
2364                         *ppilp = ilp->ilp_next;
2365                         stmf_state.stmf_nlps--;
2366                         if (ilp->ilp_ppd) {
2367                                 ilp->ilp_ppd->ppd_provider = NULL;
2368                                 ilp->ilp_ppd = NULL;
2369                         }
2370                         mutex_exit(&stmf_state.stmf_lock);
2371                         return (STMF_SUCCESS);
2372                 }
2373         }
2374         mutex_exit(&stmf_state.stmf_lock);
2375         return (STMF_NOT_FOUND);
2376 }
2377 
2378 stmf_status_t
2379 stmf_register_port_provider(stmf_port_provider_t *pp)
2380 {
2381         stmf_i_port_provider_t *ipp =
2382             (stmf_i_port_provider_t *)pp->pp_stmf_private;
2383         stmf_pp_data_t *ppd;
2384         uint32_t cb_flags;
2385 
2386         if (pp->pp_portif_rev != PORTIF_REV_1)
2387                 return (STMF_FAILURE);
2388 
2389         mutex_enter(&stmf_state.stmf_lock);
2390         ipp->ipp_next = stmf_state.stmf_ipplist;
2391         stmf_state.stmf_ipplist = ipp;
2392         stmf_state.stmf_npps++;
2393         /* See if we need to do a callback */
2394         for (ppd = stmf_state.stmf_ppdlist; ppd != NULL; ppd = ppd->ppd_next) {
2395                 if (strcmp(ppd->ppd_name, pp->pp_name) == 0) {
2396                         break;
2397                 }
2398         }
2399         if ((ppd == NULL) || (ppd->ppd_nv == NULL)) {
2400                 goto rpp_bail_out;
2401         }
2402         ipp->ipp_ppd = ppd;
2403         ppd->ppd_provider = ipp;
2404         if (pp->pp_cb == NULL)
2405                 goto rpp_bail_out;
2406         ipp->ipp_cb_in_progress = 1;
2407         cb_flags = STMF_PCB_PREG_COMPLETE;
2408         if (stmf_state.stmf_config_state == STMF_CONFIG_INIT)
2409                 cb_flags |= STMF_PCB_STMF_ONLINING;
2410         mutex_exit(&stmf_state.stmf_lock);
2411         pp->pp_cb(pp, STMF_PROVIDER_DATA_UPDATED, ppd->ppd_nv, cb_flags);
2412         mutex_enter(&stmf_state.stmf_lock);
2413         ipp->ipp_cb_in_progress = 0;
2414 
2415 rpp_bail_out:
2416         mutex_exit(&stmf_state.stmf_lock);
2417 
2418         return (STMF_SUCCESS);
2419 }
2420 
2421 stmf_status_t
2422 stmf_deregister_port_provider(stmf_port_provider_t *pp)
2423 {
2424         stmf_i_port_provider_t *ipp =
2425             (stmf_i_port_provider_t *)pp->pp_stmf_private;
2426         stmf_i_port_provider_t **ppipp;
2427 
2428         mutex_enter(&stmf_state.stmf_lock);
2429         if (ipp->ipp_npps || ipp->ipp_cb_in_progress) {
2430                 mutex_exit(&stmf_state.stmf_lock);
2431                 return (STMF_BUSY);
2432         }
2433         for (ppipp = &stmf_state.stmf_ipplist; *ppipp != NULL;
2434             ppipp = &((*ppipp)->ipp_next)) {
2435                 if (*ppipp == ipp) {
2436                         *ppipp = ipp->ipp_next;
2437                         stmf_state.stmf_npps--;
2438                         if (ipp->ipp_ppd) {
2439                                 ipp->ipp_ppd->ppd_provider = NULL;
2440                                 ipp->ipp_ppd = NULL;
2441                         }
2442                         mutex_exit(&stmf_state.stmf_lock);
2443                         return (STMF_SUCCESS);
2444                 }
2445         }
2446         mutex_exit(&stmf_state.stmf_lock);
2447         return (STMF_NOT_FOUND);
2448 }
2449 
2450 int
2451 stmf_load_ppd_ioctl(stmf_ppioctl_data_t *ppi, uint64_t *ppi_token,
2452     uint32_t *err_ret)
2453 {
2454         stmf_i_port_provider_t          *ipp;
2455         stmf_i_lu_provider_t            *ilp;
2456         stmf_pp_data_t                  *ppd;
2457         nvlist_t                        *nv;
2458         int                             s;
2459         int                             ret;
2460 
2461         *err_ret = 0;
2462 
2463         if ((ppi->ppi_lu_provider + ppi->ppi_port_provider) != 1) {
2464                 return (EINVAL);
2465         }
2466 
2467         mutex_enter(&stmf_state.stmf_lock);
2468         for (ppd = stmf_state.stmf_ppdlist; ppd != NULL; ppd = ppd->ppd_next) {
2469                 if (ppi->ppi_lu_provider) {
2470                         if (!ppd->ppd_lu_provider)
2471                                 continue;
2472                 } else if (ppi->ppi_port_provider) {
2473                         if (!ppd->ppd_port_provider)
2474                                 continue;
2475                 }
2476                 if (strncmp(ppi->ppi_name, ppd->ppd_name, 254) == 0)
2477                         break;
2478         }
2479 
2480         if (ppd == NULL) {
2481                 /* New provider */
2482                 s = strlen(ppi->ppi_name);
2483                 if (s > 254) {
2484                         mutex_exit(&stmf_state.stmf_lock);
2485                         return (EINVAL);
2486                 }
2487                 s += sizeof (stmf_pp_data_t) - 7;
2488 
2489                 ppd = kmem_zalloc(s, KM_NOSLEEP);
2490                 if (ppd == NULL) {
2491                         mutex_exit(&stmf_state.stmf_lock);
2492                         return (ENOMEM);
2493                 }
2494                 ppd->ppd_alloc_size = s;
2495                 (void) strcpy(ppd->ppd_name, ppi->ppi_name);
2496 
2497                 /* See if this provider already exists */
2498                 if (ppi->ppi_lu_provider) {
2499                         ppd->ppd_lu_provider = 1;
2500                         for (ilp = stmf_state.stmf_ilplist; ilp != NULL;
2501                             ilp = ilp->ilp_next) {
2502                                 if (strcmp(ppi->ppi_name,
2503                                     ilp->ilp_lp->lp_name) == 0) {
2504                                         ppd->ppd_provider = ilp;
2505                                         ilp->ilp_ppd = ppd;
2506                                         break;
2507                                 }
2508                         }
2509                 } else {
2510                         ppd->ppd_port_provider = 1;
2511                         for (ipp = stmf_state.stmf_ipplist; ipp != NULL;
2512                             ipp = ipp->ipp_next) {
2513                                 if (strcmp(ppi->ppi_name,
2514                                     ipp->ipp_pp->pp_name) == 0) {
2515                                         ppd->ppd_provider = ipp;
2516                                         ipp->ipp_ppd = ppd;
2517                                         break;
2518                                 }
2519                         }
2520                 }
2521 
2522                 /* Link this ppd in */
2523                 ppd->ppd_next = stmf_state.stmf_ppdlist;
2524                 stmf_state.stmf_ppdlist = ppd;
2525         }
2526 
2527         /*
2528          * User is requesting that the token be checked.
2529          * If there was another set after the user's get
2530          * it's an error
2531          */
2532         if (ppi->ppi_token_valid) {
2533                 if (ppi->ppi_token != ppd->ppd_token) {
2534                         *err_ret = STMF_IOCERR_PPD_UPDATED;
2535                         mutex_exit(&stmf_state.stmf_lock);
2536                         return (EINVAL);
2537                 }
2538         }
2539 
2540         if ((ret = nvlist_unpack((char *)ppi->ppi_data,
2541             (size_t)ppi->ppi_data_size, &nv, KM_NOSLEEP)) != 0) {
2542                 mutex_exit(&stmf_state.stmf_lock);
2543                 return (ret);
2544         }
2545 
2546         /* Free any existing lists and add this one to the ppd */
2547         if (ppd->ppd_nv)
2548                 nvlist_free(ppd->ppd_nv);
2549         ppd->ppd_nv = nv;
2550 
2551         /* set the token for writes */
2552         ppd->ppd_token++;
2553         /* return token to caller */
2554         if (ppi_token) {
2555                 *ppi_token = ppd->ppd_token;
2556         }
2557 
2558         /* If there is a provider registered, do the notifications */
2559         if (ppd->ppd_provider) {
2560                 uint32_t cb_flags = 0;
2561 
2562                 if (stmf_state.stmf_config_state == STMF_CONFIG_INIT)
2563                         cb_flags |= STMF_PCB_STMF_ONLINING;
2564                 if (ppi->ppi_lu_provider) {
2565                         ilp = (stmf_i_lu_provider_t *)ppd->ppd_provider;
2566                         if (ilp->ilp_lp->lp_cb == NULL)
2567                                 goto bail_out;
2568                         ilp->ilp_cb_in_progress = 1;
2569                         mutex_exit(&stmf_state.stmf_lock);
2570                         ilp->ilp_lp->lp_cb(ilp->ilp_lp,
2571                             STMF_PROVIDER_DATA_UPDATED, ppd->ppd_nv, cb_flags);
2572                         mutex_enter(&stmf_state.stmf_lock);
2573                         ilp->ilp_cb_in_progress = 0;
2574                 } else {
2575                         ipp = (stmf_i_port_provider_t *)ppd->ppd_provider;
2576                         if (ipp->ipp_pp->pp_cb == NULL)
2577                                 goto bail_out;
2578                         ipp->ipp_cb_in_progress = 1;
2579                         mutex_exit(&stmf_state.stmf_lock);
2580                         ipp->ipp_pp->pp_cb(ipp->ipp_pp,
2581                             STMF_PROVIDER_DATA_UPDATED, ppd->ppd_nv, cb_flags);
2582                         mutex_enter(&stmf_state.stmf_lock);
2583                         ipp->ipp_cb_in_progress = 0;
2584                 }
2585         }
2586 
2587 bail_out:
2588         mutex_exit(&stmf_state.stmf_lock);
2589 
2590         return (0);
2591 }
2592 
2593 void
2594 stmf_delete_ppd(stmf_pp_data_t *ppd)
2595 {
2596         stmf_pp_data_t **pppd;
2597 
2598         ASSERT(mutex_owned(&stmf_state.stmf_lock));
2599         if (ppd->ppd_provider) {
2600                 if (ppd->ppd_lu_provider) {
2601                         ((stmf_i_lu_provider_t *)
2602                             ppd->ppd_provider)->ilp_ppd = NULL;
2603                 } else {
2604                         ((stmf_i_port_provider_t *)
2605                             ppd->ppd_provider)->ipp_ppd = NULL;
2606                 }
2607                 ppd->ppd_provider = NULL;
2608         }
2609 
2610         for (pppd = &stmf_state.stmf_ppdlist; *pppd != NULL;
2611             pppd = &((*pppd)->ppd_next)) {
2612                 if (*pppd == ppd)
2613                         break;
2614         }
2615 
2616         if (*pppd == NULL)
2617                 return;
2618 
2619         *pppd = ppd->ppd_next;
2620         if (ppd->ppd_nv)
2621                 nvlist_free(ppd->ppd_nv);
2622 
2623         kmem_free(ppd, ppd->ppd_alloc_size);
2624 }
2625 
2626 int
2627 stmf_delete_ppd_ioctl(stmf_ppioctl_data_t *ppi)
2628 {
2629         stmf_pp_data_t *ppd;
2630         int ret = ENOENT;
2631 
2632         if ((ppi->ppi_lu_provider + ppi->ppi_port_provider) != 1) {
2633                 return (EINVAL);
2634         }
2635 
2636         mutex_enter(&stmf_state.stmf_lock);
2637 
2638         for (ppd = stmf_state.stmf_ppdlist; ppd != NULL; ppd = ppd->ppd_next) {
2639                 if (ppi->ppi_lu_provider) {
2640                         if (!ppd->ppd_lu_provider)
2641                                 continue;
2642                 } else if (ppi->ppi_port_provider) {
2643                         if (!ppd->ppd_port_provider)
2644                                 continue;
2645                 }
2646                 if (strncmp(ppi->ppi_name, ppd->ppd_name, 254) == 0)
2647                         break;
2648         }
2649 
2650         if (ppd) {
2651                 ret = 0;
2652                 stmf_delete_ppd(ppd);
2653         }
2654         mutex_exit(&stmf_state.stmf_lock);
2655 
2656         return (ret);
2657 }
2658 
2659 int
2660 stmf_get_ppd_ioctl(stmf_ppioctl_data_t *ppi, stmf_ppioctl_data_t *ppi_out,
2661     uint32_t *err_ret)
2662 {
2663         stmf_pp_data_t *ppd;
2664         size_t req_size;
2665         int ret = ENOENT;
2666         char *bufp = (char *)ppi_out->ppi_data;
2667 
2668         if ((ppi->ppi_lu_provider + ppi->ppi_port_provider) != 1) {
2669                 return (EINVAL);
2670         }
2671 
2672         mutex_enter(&stmf_state.stmf_lock);
2673 
2674         for (ppd = stmf_state.stmf_ppdlist; ppd != NULL; ppd = ppd->ppd_next) {
2675                 if (ppi->ppi_lu_provider) {
2676                         if (!ppd->ppd_lu_provider)
2677                                 continue;
2678                 } else if (ppi->ppi_port_provider) {
2679                         if (!ppd->ppd_port_provider)
2680                                 continue;
2681                 }
2682                 if (strncmp(ppi->ppi_name, ppd->ppd_name, 254) == 0)
2683                         break;
2684         }
2685 
2686         if (ppd && ppd->ppd_nv) {
2687                 ppi_out->ppi_token = ppd->ppd_token;
2688                 if ((ret = nvlist_size(ppd->ppd_nv, &req_size,
2689                     NV_ENCODE_XDR)) != 0) {
2690                         goto done;
2691                 }
2692                 ppi_out->ppi_data_size = req_size;
2693                 if (req_size > ppi->ppi_data_size) {
2694                         *err_ret = STMF_IOCERR_INSUFFICIENT_BUF;
2695                         ret = EINVAL;
2696                         goto done;
2697                 }
2698 
2699                 if ((ret = nvlist_pack(ppd->ppd_nv, &bufp, &req_size,
2700                     NV_ENCODE_XDR, 0)) != 0) {
2701                         goto done;
2702                 }
2703                 ret = 0;
2704         }
2705 
2706 done:
2707         mutex_exit(&stmf_state.stmf_lock);
2708 
2709         return (ret);
2710 }
2711 
2712 void
2713 stmf_delete_all_ppds()
2714 {
2715         stmf_pp_data_t *ppd, *nppd;
2716 
2717         ASSERT(mutex_owned(&stmf_state.stmf_lock));
2718         for (ppd = stmf_state.stmf_ppdlist; ppd != NULL; ppd = nppd) {
2719                 nppd = ppd->ppd_next;
2720                 stmf_delete_ppd(ppd);
2721         }
2722 }
2723 
2724 /*
2725  * 16 is the max string length of a protocol_ident, increase
2726  * the size if needed.
2727  */
2728 #define STMF_KSTAT_LU_SZ        (STMF_GUID_INPUT + 1 + 256)
2729 #define STMF_KSTAT_TGT_SZ       (256 * 2 + 16)
2730 
2731 /*
2732  * This array matches the Protocol Identifier in stmf_ioctl.h
2733  */
2734 #define MAX_PROTO_STR_LEN       32
2735 
2736 char *protocol_ident[PROTOCOL_ANY] = {
2737         "Fibre Channel",
2738         "Parallel SCSI",
2739         "SSA",
2740         "IEEE_1394",
2741         "SRP",
2742         "iSCSI",
2743         "SAS",
2744         "ADT",
2745         "ATAPI",
2746         "UNKNOWN", "UNKNOWN", "UNKNOWN", "UNKNOWN", "UNKNOWN", "UNKNOWN"
2747 };
2748 
2749 /*
2750  * Update the lun wait/run queue count
2751  */
2752 static void
2753 stmf_update_kstat_lu_q(scsi_task_t *task, void func())
2754 {
2755         stmf_i_lu_t             *ilu;
2756         kstat_io_t              *kip;
2757 
2758         if (task->task_lu == dlun0)
2759                 return;
2760         ilu = (stmf_i_lu_t *)task->task_lu->lu_stmf_private;
2761         if (ilu != NULL && ilu->ilu_kstat_io != NULL) {
2762                 kip = KSTAT_IO_PTR(ilu->ilu_kstat_io);
2763                 if (kip != NULL) {
2764                         func(kip);
2765                 }
2766         }
2767 }
2768 
2769 /*
2770  * Update the target(lport) wait/run queue count
2771  */
2772 static void
2773 stmf_update_kstat_lport_q(scsi_task_t *task, void func())
2774 {
2775         stmf_i_local_port_t     *ilp;
2776         kstat_io_t              *kip;
2777 
2778         ilp = (stmf_i_local_port_t *)task->task_lport->lport_stmf_private;
2779         if (ilp != NULL && ilp->ilport_kstat_io != NULL) {
2780                 kip = KSTAT_IO_PTR(ilp->ilport_kstat_io);
2781                 if (kip != NULL) {
2782                         mutex_enter(ilp->ilport_kstat_io->ks_lock);
2783                         func(kip);
2784                         mutex_exit(ilp->ilport_kstat_io->ks_lock);
2785                 }
2786         }
2787 }
2788 
2789 static void
2790 stmf_update_kstat_lport_io(scsi_task_t *task, stmf_data_buf_t *dbuf)
2791 {
2792         stmf_i_local_port_t     *ilp;
2793         kstat_io_t              *kip;
2794 
2795         ilp = (stmf_i_local_port_t *)task->task_lport->lport_stmf_private;
2796         if (ilp != NULL && ilp->ilport_kstat_io != NULL) {
2797                 kip = KSTAT_IO_PTR(ilp->ilport_kstat_io);
2798                 if (kip != NULL) {
2799                         mutex_enter(ilp->ilport_kstat_io->ks_lock);
2800                         STMF_UPDATE_KSTAT_IO(kip, dbuf);
2801                         mutex_exit(ilp->ilport_kstat_io->ks_lock);
2802                 }
2803         }
2804 }
2805 
2806 static void
2807 stmf_update_kstat_lu_io(scsi_task_t *task, stmf_data_buf_t *dbuf)
2808 {
2809         stmf_i_lu_t             *ilu;
2810         kstat_io_t              *kip;
2811 
2812         ilu = (stmf_i_lu_t *)task->task_lu->lu_stmf_private;
2813         if (ilu != NULL && ilu->ilu_kstat_io != NULL) {
2814                 kip = KSTAT_IO_PTR(ilu->ilu_kstat_io);
2815                 if (kip != NULL) {
2816                         mutex_enter(ilu->ilu_kstat_io->ks_lock);
2817                         STMF_UPDATE_KSTAT_IO(kip, dbuf);
2818                         mutex_exit(ilu->ilu_kstat_io->ks_lock);
2819                 }
2820         }
2821 }
2822 
2823 static void
2824 stmf_create_kstat_lu(stmf_i_lu_t *ilu)
2825 {
2826         char                            ks_nm[KSTAT_STRLEN];
2827         stmf_kstat_lu_info_t            *ks_lu;
2828 
2829         /* create kstat lun info */
2830         ks_lu = (stmf_kstat_lu_info_t *)kmem_zalloc(STMF_KSTAT_LU_SZ,
2831             KM_NOSLEEP);
2832         if (ks_lu == NULL) {
2833                 cmn_err(CE_WARN, "STMF: kmem_zalloc failed");
2834                 return;
2835         }
2836 
2837         bzero(ks_nm, sizeof (ks_nm));
2838         (void) sprintf(ks_nm, "stmf_lu_%"PRIxPTR"", (uintptr_t)ilu);
2839         if ((ilu->ilu_kstat_info = kstat_create(STMF_MODULE_NAME, 0,
2840             ks_nm, "misc", KSTAT_TYPE_NAMED,
2841             sizeof (stmf_kstat_lu_info_t) / sizeof (kstat_named_t),
2842             KSTAT_FLAG_VIRTUAL)) == NULL) {
2843                 kmem_free(ks_lu, STMF_KSTAT_LU_SZ);
2844                 cmn_err(CE_WARN, "STMF: kstat_create lu failed");
2845                 return;
2846         }
2847 
2848         ilu->ilu_kstat_info->ks_data_size = STMF_KSTAT_LU_SZ;
2849         ilu->ilu_kstat_info->ks_data = ks_lu;
2850 
2851         kstat_named_init(&ks_lu->i_lun_guid, "lun-guid",
2852             KSTAT_DATA_STRING);
2853         kstat_named_init(&ks_lu->i_lun_alias, "lun-alias",
2854             KSTAT_DATA_STRING);
2855 
2856         /* convert guid to hex string */
2857         int             i;
2858         uint8_t         *p = ilu->ilu_lu->lu_id->ident;
2859         bzero(ilu->ilu_ascii_hex_guid, sizeof (ilu->ilu_ascii_hex_guid));
2860         for (i = 0; i < STMF_GUID_INPUT / 2; i++) {
2861                 (void) sprintf(&ilu->ilu_ascii_hex_guid[i * 2], "%02x", p[i]);
2862         }
2863         kstat_named_setstr(&ks_lu->i_lun_guid,
2864             (const char *)ilu->ilu_ascii_hex_guid);
2865         kstat_named_setstr(&ks_lu->i_lun_alias,
2866             (const char *)ilu->ilu_lu->lu_alias);
2867         kstat_install(ilu->ilu_kstat_info);
2868 
2869         /* create kstat lun io */
2870         bzero(ks_nm, sizeof (ks_nm));
2871         (void) sprintf(ks_nm, "stmf_lu_io_%"PRIxPTR"", (uintptr_t)ilu);
2872         if ((ilu->ilu_kstat_io = kstat_create(STMF_MODULE_NAME, 0,
2873             ks_nm, "io", KSTAT_TYPE_IO, 1, 0)) == NULL) {
2874                 cmn_err(CE_WARN, "STMF: kstat_create lu_io failed");
2875                 return;
2876         }
2877         mutex_init(&ilu->ilu_kstat_lock, NULL, MUTEX_DRIVER, 0);
2878         ilu->ilu_kstat_io->ks_lock = &ilu->ilu_kstat_lock;
2879         kstat_install(ilu->ilu_kstat_io);
2880 }
2881 
2882 static void
2883 stmf_create_kstat_lport(stmf_i_local_port_t *ilport)
2884 {
2885         char                            ks_nm[KSTAT_STRLEN];
2886         stmf_kstat_tgt_info_t           *ks_tgt;
2887         int                             id, len;
2888 
2889         /* create kstat lport info */
2890         ks_tgt = (stmf_kstat_tgt_info_t *)kmem_zalloc(STMF_KSTAT_TGT_SZ,
2891             KM_NOSLEEP);
2892         if (ks_tgt == NULL) {
2893                 cmn_err(CE_WARN, "STMF: kmem_zalloc failed");
2894                 return;
2895         }
2896 
2897         bzero(ks_nm, sizeof (ks_nm));
2898         (void) sprintf(ks_nm, "stmf_tgt_%"PRIxPTR"", (uintptr_t)ilport);
2899         if ((ilport->ilport_kstat_info = kstat_create(STMF_MODULE_NAME,
2900             0, ks_nm, "misc", KSTAT_TYPE_NAMED,
2901             sizeof (stmf_kstat_tgt_info_t) / sizeof (kstat_named_t),
2902             KSTAT_FLAG_VIRTUAL)) == NULL) {
2903                 kmem_free(ks_tgt, STMF_KSTAT_TGT_SZ);
2904                 cmn_err(CE_WARN, "STMF: kstat_create target failed");
2905                 return;
2906         }
2907 
2908         ilport->ilport_kstat_info->ks_data_size = STMF_KSTAT_TGT_SZ;
2909         ilport->ilport_kstat_info->ks_data = ks_tgt;
2910 
2911         kstat_named_init(&ks_tgt->i_tgt_name, "target-name",
2912             KSTAT_DATA_STRING);
2913         kstat_named_init(&ks_tgt->i_tgt_alias, "target-alias",
2914             KSTAT_DATA_STRING);
2915         kstat_named_init(&ks_tgt->i_protocol, "protocol",
2916             KSTAT_DATA_STRING);
2917 
2918         /* ident might not be null terminated */
2919         len = ilport->ilport_lport->lport_id->ident_length;
2920         bcopy(ilport->ilport_lport->lport_id->ident,
2921             ilport->ilport_kstat_tgt_name, len);
2922         ilport->ilport_kstat_tgt_name[len + 1] = NULL;
2923         kstat_named_setstr(&ks_tgt->i_tgt_name,
2924             (const char *)ilport->ilport_kstat_tgt_name);
2925         kstat_named_setstr(&ks_tgt->i_tgt_alias,
2926             (const char *)ilport->ilport_lport->lport_alias);
2927         /* protocol */
2928         if ((id = ilport->ilport_lport->lport_id->protocol_id) > PROTOCOL_ANY) {
2929                 cmn_err(CE_WARN, "STMF: protocol_id out of bound");
2930                 id = PROTOCOL_ANY;
2931         }
2932         kstat_named_setstr(&ks_tgt->i_protocol, protocol_ident[id]);
2933         kstat_install(ilport->ilport_kstat_info);
2934 
2935         /* create kstat lport io */
2936         bzero(ks_nm, sizeof (ks_nm));
2937         (void) sprintf(ks_nm, "stmf_tgt_io_%"PRIxPTR"", (uintptr_t)ilport);
2938         if ((ilport->ilport_kstat_io = kstat_create(STMF_MODULE_NAME, 0,
2939             ks_nm, "io", KSTAT_TYPE_IO, 1, 0)) == NULL) {
2940                 cmn_err(CE_WARN, "STMF: kstat_create target_io failed");
2941                 return;
2942         }
2943         mutex_init(&ilport->ilport_kstat_lock, NULL, MUTEX_DRIVER, 0);
2944         ilport->ilport_kstat_io->ks_lock = &ilport->ilport_kstat_lock;
2945         kstat_install(ilport->ilport_kstat_io);
2946 }
2947 
2948 /*
2949  * set the asymmetric access state for a logical unit
2950  * caller is responsible for establishing SCSI unit attention on
2951  * state change
2952  */
2953 stmf_status_t
2954 stmf_set_lu_access(stmf_lu_t *lu, uint8_t access_state)
2955 {
2956         stmf_i_lu_t *ilu;
2957         uint8_t *p1, *p2;
2958 
2959         if ((access_state != STMF_LU_STANDBY) &&
2960             (access_state != STMF_LU_ACTIVE)) {
2961                 return (STMF_INVALID_ARG);
2962         }
2963 
2964         p1 = &lu->lu_id->ident[0];
2965         mutex_enter(&stmf_state.stmf_lock);
2966         if (stmf_state.stmf_inventory_locked) {
2967                 mutex_exit(&stmf_state.stmf_lock);
2968                 return (STMF_BUSY);
2969         }
2970 
2971         for (ilu = stmf_state.stmf_ilulist; ilu != NULL; ilu = ilu->ilu_next) {
2972                 p2 = &ilu->ilu_lu->lu_id->ident[0];
2973                 if (bcmp(p1, p2, 16) == 0) {
2974                         break;
2975                 }
2976         }
2977 
2978         if (!ilu) {
2979                 ilu = (stmf_i_lu_t *)lu->lu_stmf_private;
2980         } else {
2981                 /*
2982                  * We're changing access state on an existing logical unit
2983                  * Send the proxy registration message for this logical unit
2984                  * if we're in alua mode.
2985                  * If the requested state is STMF_LU_ACTIVE, we want to register
2986                  * this logical unit.
2987                  * If the requested state is STMF_LU_STANDBY, we're going to
2988                  * abort all tasks for this logical unit.
2989                  */
2990                 if (stmf_state.stmf_alua_state == 1 &&
2991                     access_state == STMF_LU_ACTIVE) {
2992                         stmf_ic_msg_status_t ic_ret = STMF_IC_MSG_SUCCESS;
2993                         stmf_ic_msg_t *ic_reg_lun;
2994                         if (lu->lu_lp && lu->lu_lp->lp_lpif_rev == LPIF_REV_2 &&
2995                             lu->lu_lp->lp_alua_support) {
2996                                 ilu->ilu_alua = 1;
2997                                 /* allocate the register message */
2998                                 ic_reg_lun = ic_lun_active_msg_alloc(p1,
2999                                     lu->lu_lp->lp_name,
3000                                     lu->lu_proxy_reg_arg_len,
3001                                     (uint8_t *)lu->lu_proxy_reg_arg,
3002                                     stmf_proxy_msg_id);
3003                                 /* send the message */
3004                                 if (ic_reg_lun) {
3005                                         ic_ret = ic_tx_msg(ic_reg_lun);
3006                                         if (ic_ret == STMF_IC_MSG_SUCCESS) {
3007                                                 stmf_proxy_msg_id++;
3008                                         }
3009                                 }
3010                         }
3011                 } else if (stmf_state.stmf_alua_state == 1 &&
3012                     access_state == STMF_LU_STANDBY) {
3013                         /* abort all tasks for this lu */
3014                         stmf_task_lu_killall(lu, NULL, STMF_ABORTED);
3015                 }
3016         }
3017 
3018         ilu->ilu_access = access_state;
3019 
3020         mutex_exit(&stmf_state.stmf_lock);
3021         return (STMF_SUCCESS);
3022 }
3023 
3024 
3025 stmf_status_t
3026 stmf_register_lu(stmf_lu_t *lu)
3027 {
3028         stmf_i_lu_t *ilu;
3029         uint8_t *p1, *p2;
3030         stmf_state_change_info_t ssci;
3031         stmf_id_data_t *luid;
3032 
3033         if ((lu->lu_id->ident_type != ID_TYPE_NAA) ||
3034             (lu->lu_id->ident_length != 16) ||
3035             ((lu->lu_id->ident[0] & 0xf0) != 0x60)) {
3036                 return (STMF_INVALID_ARG);
3037         }
3038         p1 = &lu->lu_id->ident[0];
3039         mutex_enter(&stmf_state.stmf_lock);
3040         if (stmf_state.stmf_inventory_locked) {
3041                 mutex_exit(&stmf_state.stmf_lock);
3042                 return (STMF_BUSY);
3043         }
3044 
3045         for (ilu = stmf_state.stmf_ilulist; ilu != NULL; ilu = ilu->ilu_next) {
3046                 p2 = &ilu->ilu_lu->lu_id->ident[0];
3047                 if (bcmp(p1, p2, 16) == 0) {
3048                         mutex_exit(&stmf_state.stmf_lock);
3049                         return (STMF_ALREADY);
3050                 }
3051         }
3052 
3053         ilu = (stmf_i_lu_t *)lu->lu_stmf_private;
3054         luid = stmf_lookup_id(&stmf_state.stmf_luid_list,
3055             lu->lu_id->ident_length, lu->lu_id->ident);
3056         if (luid) {
3057                 luid->id_pt_to_object = (void *)ilu;
3058                 ilu->ilu_luid = luid;
3059         }
3060         ilu->ilu_alias = NULL;
3061 
3062         ilu->ilu_next = stmf_state.stmf_ilulist;
3063         ilu->ilu_prev = NULL;
3064         if (ilu->ilu_next)
3065                 ilu->ilu_next->ilu_prev = ilu;
3066         stmf_state.stmf_ilulist = ilu;
3067         stmf_state.stmf_nlus++;
3068         if (lu->lu_lp) {
3069                 ((stmf_i_lu_provider_t *)
3070                     (lu->lu_lp->lp_stmf_private))->ilp_nlus++;
3071         }
3072         ilu->ilu_cur_task_cntr = &ilu->ilu_task_cntr1;
3073         STMF_EVENT_ALLOC_HANDLE(ilu->ilu_event_hdl);
3074         cv_init(&ilu->ilu_offline_pending_cv, NULL, CV_DRIVER, NULL);
3075         stmf_create_kstat_lu(ilu);
3076         /*
3077          * register with proxy module if available and logical unit
3078          * is in active state
3079          */
3080         if (stmf_state.stmf_alua_state == 1 &&
3081             ilu->ilu_access == STMF_LU_ACTIVE) {
3082                 stmf_ic_msg_status_t ic_ret = STMF_IC_MSG_SUCCESS;
3083                 stmf_ic_msg_t *ic_reg_lun;
3084                 if (lu->lu_lp && lu->lu_lp->lp_lpif_rev == LPIF_REV_2 &&
3085                     lu->lu_lp->lp_alua_support) {
3086                         ilu->ilu_alua = 1;
3087                         /* allocate the register message */
3088                         ic_reg_lun = ic_reg_lun_msg_alloc(p1,
3089                             lu->lu_lp->lp_name, lu->lu_proxy_reg_arg_len,
3090                             (uint8_t *)lu->lu_proxy_reg_arg, stmf_proxy_msg_id);
3091                         /* send the message */
3092                         if (ic_reg_lun) {
3093                                 ic_ret = ic_tx_msg(ic_reg_lun);
3094                                 if (ic_ret == STMF_IC_MSG_SUCCESS) {
3095                                         stmf_proxy_msg_id++;
3096                                 }
3097                         }
3098                 }
3099         }
3100         mutex_exit(&stmf_state.stmf_lock);
3101 
3102         /*  check the default state for lu */
3103         if (stmf_state.stmf_default_lu_state == STMF_STATE_OFFLINE) {
3104                 ilu->ilu_prev_state = STMF_STATE_OFFLINE;
3105         } else {
3106                 ilu->ilu_prev_state = STMF_STATE_ONLINE;
3107                 if (stmf_state.stmf_service_running) {
3108                         ssci.st_rflags = 0;
3109                         ssci.st_additional_info = NULL;
3110                         (void) stmf_ctl(STMF_CMD_LU_ONLINE, lu, &ssci);
3111                 }
3112         }
3113 
3114         /* XXX: Generate event */
3115         return (STMF_SUCCESS);
3116 }
3117 
3118 stmf_status_t
3119 stmf_deregister_lu(stmf_lu_t *lu)
3120 {
3121         stmf_i_lu_t *ilu;
3122 
3123         mutex_enter(&stmf_state.stmf_lock);
3124         if (stmf_state.stmf_inventory_locked) {
3125                 mutex_exit(&stmf_state.stmf_lock);
3126                 return (STMF_BUSY);
3127         }
3128         ilu = stmf_lookup_lu(lu);
3129         if (ilu == NULL) {
3130                 mutex_exit(&stmf_state.stmf_lock);
3131                 return (STMF_INVALID_ARG);
3132         }
3133         if (ilu->ilu_state == STMF_STATE_OFFLINE) {
3134                 ASSERT(ilu->ilu_ntasks == ilu->ilu_ntasks_free);
3135                 while (ilu->ilu_flags & ILU_STALL_DEREGISTER) {
3136                         cv_wait(&stmf_state.stmf_cv, &stmf_state.stmf_lock);
3137                 }
3138                 if (ilu->ilu_ntasks) {
3139                         stmf_i_scsi_task_t *itask, *nitask;
3140 
3141                         nitask = ilu->ilu_tasks;
3142                         do {
3143                                 itask = nitask;
3144                                 nitask = itask->itask_lu_next;
3145                                 lu->lu_task_free(itask->itask_task);
3146                                 stmf_free(itask->itask_task);
3147                         } while (nitask != NULL);
3148 
3149                         ilu->ilu_tasks = ilu->ilu_free_tasks = NULL;
3150                         ilu->ilu_ntasks = ilu->ilu_ntasks_free = 0;
3151                 }
3152                 /* de-register with proxy if available */
3153                 if (ilu->ilu_access == STMF_LU_ACTIVE &&
3154                     stmf_state.stmf_alua_state == 1) {
3155                         /* de-register with proxy module */
3156                         stmf_ic_msg_status_t ic_ret = STMF_IC_MSG_SUCCESS;
3157                         stmf_ic_msg_t *ic_dereg_lun;
3158                         if (lu->lu_lp && lu->lu_lp->lp_lpif_rev == LPIF_REV_2 &&
3159                             lu->lu_lp->lp_alua_support) {
3160                                 ilu->ilu_alua = 1;
3161                                 /* allocate the de-register message */
3162                                 ic_dereg_lun = ic_dereg_lun_msg_alloc(
3163                                     lu->lu_id->ident, lu->lu_lp->lp_name, 0,
3164                                     NULL, stmf_proxy_msg_id);
3165                                 /* send the message */
3166                                 if (ic_dereg_lun) {
3167                                         ic_ret = ic_tx_msg(ic_dereg_lun);
3168                                         if (ic_ret == STMF_IC_MSG_SUCCESS) {
3169                                                 stmf_proxy_msg_id++;
3170                                         }
3171                                 }
3172                         }
3173                 }
3174 
3175                 if (ilu->ilu_next)
3176                         ilu->ilu_next->ilu_prev = ilu->ilu_prev;
3177                 if (ilu->ilu_prev)
3178                         ilu->ilu_prev->ilu_next = ilu->ilu_next;
3179                 else
3180                         stmf_state.stmf_ilulist = ilu->ilu_next;
3181                 stmf_state.stmf_nlus--;
3182 
3183                 if (ilu == stmf_state.stmf_svc_ilu_draining) {
3184                         stmf_state.stmf_svc_ilu_draining = ilu->ilu_next;
3185                 }
3186                 if (ilu == stmf_state.stmf_svc_ilu_timing) {
3187                         stmf_state.stmf_svc_ilu_timing = ilu->ilu_next;
3188                 }
3189                 if (lu->lu_lp) {
3190                         ((stmf_i_lu_provider_t *)
3191                             (lu->lu_lp->lp_stmf_private))->ilp_nlus--;
3192                 }
3193                 if (ilu->ilu_luid) {
3194                         ((stmf_id_data_t *)ilu->ilu_luid)->id_pt_to_object =
3195                             NULL;
3196                         ilu->ilu_luid = NULL;
3197                 }
3198                 STMF_EVENT_FREE_HANDLE(ilu->ilu_event_hdl);
3199         } else {
3200                 mutex_exit(&stmf_state.stmf_lock);
3201                 return (STMF_BUSY);
3202         }
3203         if (ilu->ilu_kstat_info) {
3204                 kmem_free(ilu->ilu_kstat_info->ks_data,
3205                     ilu->ilu_kstat_info->ks_data_size);
3206                 kstat_delete(ilu->ilu_kstat_info);
3207         }
3208         if (ilu->ilu_kstat_io) {
3209                 kstat_delete(ilu->ilu_kstat_io);
3210                 mutex_destroy(&ilu->ilu_kstat_lock);
3211         }
3212         stmf_delete_itl_kstat_by_guid(ilu->ilu_ascii_hex_guid);
3213         cv_destroy(&ilu->ilu_offline_pending_cv);
3214         mutex_exit(&stmf_state.stmf_lock);
3215         return (STMF_SUCCESS);
3216 }
3217 
3218 void
3219 stmf_set_port_standby(stmf_local_port_t *lport, uint16_t rtpid)
3220 {
3221         stmf_i_local_port_t *ilport =
3222             (stmf_i_local_port_t *)lport->lport_stmf_private;
3223         ilport->ilport_rtpid = rtpid;
3224         ilport->ilport_standby = 1;
3225 }
3226 
3227 void
3228 stmf_set_port_alua(stmf_local_port_t *lport)
3229 {
3230         stmf_i_local_port_t *ilport =
3231             (stmf_i_local_port_t *)lport->lport_stmf_private;
3232         ilport->ilport_alua = 1;
3233 }
3234 
3235 stmf_status_t
3236 stmf_register_local_port(stmf_local_port_t *lport)
3237 {
3238         stmf_i_local_port_t *ilport;
3239         stmf_state_change_info_t ssci;
3240         int start_workers = 0;
3241 
3242         mutex_enter(&stmf_state.stmf_lock);
3243         if (stmf_state.stmf_inventory_locked) {
3244                 mutex_exit(&stmf_state.stmf_lock);
3245                 return (STMF_BUSY);
3246         }
3247         ilport = (stmf_i_local_port_t *)lport->lport_stmf_private;
3248         rw_init(&ilport->ilport_lock, NULL, RW_DRIVER, NULL);
3249 
3250         ilport->ilport_instance =
3251             id_alloc_nosleep(stmf_state.stmf_ilport_inst_space);
3252         if (ilport->ilport_instance == -1) {
3253                 mutex_exit(&stmf_state.stmf_lock);
3254                 return (STMF_FAILURE);
3255         }
3256         ilport->ilport_next = stmf_state.stmf_ilportlist;
3257         ilport->ilport_prev = NULL;
3258         if (ilport->ilport_next)
3259                 ilport->ilport_next->ilport_prev = ilport;
3260         stmf_state.stmf_ilportlist = ilport;
3261         stmf_state.stmf_nlports++;
3262         if (lport->lport_pp) {
3263                 ((stmf_i_port_provider_t *)
3264                     (lport->lport_pp->pp_stmf_private))->ipp_npps++;
3265         }
3266         ilport->ilport_tg =
3267             stmf_lookup_group_for_target(lport->lport_id->ident,
3268             lport->lport_id->ident_length);
3269 
3270         /*
3271          * rtpid will/must be set if this is a standby port
3272          * only register ports that are not standby (proxy) ports
3273          * and ports that are alua participants (ilport_alua == 1)
3274          */
3275         if (ilport->ilport_standby == 0) {
3276                 ilport->ilport_rtpid = atomic_add_16_nv(&stmf_rtpid_counter, 1);
3277         }
3278 
3279         if (stmf_state.stmf_alua_state == 1 &&
3280             ilport->ilport_standby == 0 &&
3281             ilport->ilport_alua == 1) {
3282                 stmf_ic_msg_t *ic_reg_port;
3283                 stmf_ic_msg_status_t ic_ret;
3284                 stmf_local_port_t *lport;
3285                 lport = ilport->ilport_lport;
3286                 ic_reg_port = ic_reg_port_msg_alloc(
3287                     lport->lport_id, ilport->ilport_rtpid,
3288                     0, NULL, stmf_proxy_msg_id);
3289                 if (ic_reg_port) {
3290                         ic_ret = ic_tx_msg(ic_reg_port);
3291                         if (ic_ret == STMF_IC_MSG_SUCCESS) {
3292                                 ilport->ilport_reg_msgid = stmf_proxy_msg_id++;
3293                         } else {
3294                                 cmn_err(CE_WARN, "error on port registration "
3295                                 "port - %s", ilport->ilport_kstat_tgt_name);
3296                         }
3297                 }
3298         }
3299         STMF_EVENT_ALLOC_HANDLE(ilport->ilport_event_hdl);
3300         stmf_create_kstat_lport(ilport);
3301         if (stmf_workers_state == STMF_WORKERS_DISABLED) {
3302                 stmf_workers_state = STMF_WORKERS_ENABLING;
3303                 start_workers = 1;
3304         }
3305         mutex_exit(&stmf_state.stmf_lock);
3306 
3307         if (start_workers)
3308                 stmf_worker_init();
3309 
3310         /*  the default state of LPORT */
3311 
3312         if (stmf_state.stmf_default_lport_state == STMF_STATE_OFFLINE) {
3313                 ilport->ilport_prev_state = STMF_STATE_OFFLINE;
3314         } else {
3315                 ilport->ilport_prev_state = STMF_STATE_ONLINE;
3316                 if (stmf_state.stmf_service_running) {
3317                         ssci.st_rflags = 0;
3318                         ssci.st_additional_info = NULL;
3319                         (void) stmf_ctl(STMF_CMD_LPORT_ONLINE, lport, &ssci);
3320                 }
3321         }
3322 
3323         /* XXX: Generate event */
3324         return (STMF_SUCCESS);
3325 }
3326 
3327 stmf_status_t
3328 stmf_deregister_local_port(stmf_local_port_t *lport)
3329 {
3330         stmf_i_local_port_t *ilport;
3331 
3332         mutex_enter(&stmf_state.stmf_lock);
3333         if (stmf_state.stmf_inventory_locked) {
3334                 mutex_exit(&stmf_state.stmf_lock);
3335                 return (STMF_BUSY);
3336         }
3337 
3338         /* dequeue all object requests from active queue */
3339         stmf_svc_kill_obj_requests(lport);
3340 
3341         ilport = (stmf_i_local_port_t *)lport->lport_stmf_private;
3342 
3343         /*
3344          * deregister ports that are not standby (proxy)
3345          */
3346         if (stmf_state.stmf_alua_state == 1 &&
3347             ilport->ilport_standby == 0 &&
3348             ilport->ilport_alua == 1) {
3349                 stmf_ic_msg_t *ic_dereg_port;
3350                 stmf_ic_msg_status_t ic_ret;
3351                 ic_dereg_port = ic_dereg_port_msg_alloc(
3352                     lport->lport_id, 0, NULL, stmf_proxy_msg_id);
3353                 if (ic_dereg_port) {
3354                         ic_ret = ic_tx_msg(ic_dereg_port);
3355                         if (ic_ret == STMF_IC_MSG_SUCCESS) {
3356                                 stmf_proxy_msg_id++;
3357                         }
3358                 }
3359         }
3360 
3361         if (ilport->ilport_nsessions == 0) {
3362                 if (ilport->ilport_next)
3363                         ilport->ilport_next->ilport_prev = ilport->ilport_prev;
3364                 if (ilport->ilport_prev)
3365                         ilport->ilport_prev->ilport_next = ilport->ilport_next;
3366                 else
3367                         stmf_state.stmf_ilportlist = ilport->ilport_next;
3368                 id_free(stmf_state.stmf_ilport_inst_space,
3369                     ilport->ilport_instance);
3370                 rw_destroy(&ilport->ilport_lock);
3371                 stmf_state.stmf_nlports--;
3372                 if (lport->lport_pp) {
3373                         ((stmf_i_port_provider_t *)
3374                             (lport->lport_pp->pp_stmf_private))->ipp_npps--;
3375                 }
3376                 ilport->ilport_tg = NULL;
3377                 STMF_EVENT_FREE_HANDLE(ilport->ilport_event_hdl);
3378         } else {
3379                 mutex_exit(&stmf_state.stmf_lock);
3380                 return (STMF_BUSY);
3381         }
3382         if (ilport->ilport_kstat_info) {
3383                 kmem_free(ilport->ilport_kstat_info->ks_data,
3384                     ilport->ilport_kstat_info->ks_data_size);
3385                 kstat_delete(ilport->ilport_kstat_info);
3386         }
3387         if (ilport->ilport_kstat_io) {
3388                 kstat_delete(ilport->ilport_kstat_io);
3389                 mutex_destroy(&ilport->ilport_kstat_lock);
3390         }
3391         stmf_delete_itl_kstat_by_lport(ilport->ilport_kstat_tgt_name);
3392         mutex_exit(&stmf_state.stmf_lock);
3393         return (STMF_SUCCESS);
3394 }
3395 
3396 /*
3397  * Rport id/instance mappings remain valid until STMF is unloaded
3398  */
3399 static int
3400 stmf_irport_compare(const void *void_irport1, const void *void_irport2)
3401 {
3402         const   stmf_i_remote_port_t    *irport1 = void_irport1;
3403         const   stmf_i_remote_port_t    *irport2 = void_irport2;
3404         int                     result;
3405 
3406         /* Sort by code set then ident */
3407         if (irport1->irport_id->code_set <
3408             irport2->irport_id->code_set) {
3409                 return (-1);
3410         } else if (irport1->irport_id->code_set >
3411             irport2->irport_id->code_set) {
3412                 return (1);
3413         }
3414 
3415         /* Next by ident length */
3416         if (irport1->irport_id->ident_length <
3417             irport2->irport_id->ident_length) {
3418                 return (-1);
3419         } else if (irport1->irport_id->ident_length >
3420             irport2->irport_id->ident_length) {
3421                 return (1);
3422         }
3423 
3424         /* Code set and ident length both match, now compare idents */
3425         result = memcmp(irport1->irport_id->ident,
3426             irport2->irport_id->ident,
3427             irport1->irport_id->ident_length);
3428 
3429         if (result < 0) {
3430                 return (-1);
3431         } else if (result > 0) {
3432                 return (1);
3433         }
3434 
3435         return (0);
3436 }
3437 
3438 static stmf_i_remote_port_t *
3439 stmf_irport_create(scsi_devid_desc_t *rport_devid)
3440 {
3441         int                     alloc_len;
3442         stmf_i_remote_port_t    *irport;
3443 
3444         /*
3445          * Lookup will bump the refcnt if there's an existing rport
3446          * context for this identifier.
3447          */
3448         ASSERT(mutex_owned(&stmf_state.stmf_lock));
3449 
3450         alloc_len = sizeof (*irport) + sizeof (scsi_devid_desc_t) +
3451             rport_devid->ident_length - 1;
3452         irport = kmem_zalloc(alloc_len, KM_NOSLEEP);
3453         if (irport == NULL) {
3454                 return (NULL);
3455         }
3456 
3457         irport->irport_instance =
3458             id_alloc_nosleep(stmf_state.stmf_irport_inst_space);
3459         if (irport->irport_instance == -1) {
3460                 kmem_free(irport, alloc_len);
3461                 return (NULL);
3462         }
3463 
3464         irport->irport_id =
3465             (struct scsi_devid_desc *)(irport + 1); /* Ptr. Arith. */
3466         bcopy(rport_devid, irport->irport_id,
3467             sizeof (scsi_devid_desc_t) + rport_devid->ident_length - 1);
3468         irport->irport_refcnt = 1;
3469         mutex_init(&irport->irport_mutex, NULL, MUTEX_DEFAULT, NULL);
3470 
3471         return (irport);
3472 }
3473 
3474 static void
3475 stmf_irport_destroy(stmf_i_remote_port_t *irport)
3476 {
3477         id_free(stmf_state.stmf_irport_inst_space, irport->irport_instance);
3478         mutex_destroy(&irport->irport_mutex);
3479         kmem_free(irport, sizeof (*irport) + sizeof (scsi_devid_desc_t) +
3480             irport->irport_id->ident_length - 1);
3481 }
3482 
3483 static stmf_i_remote_port_t *
3484 stmf_irport_register(scsi_devid_desc_t *rport_devid)
3485 {
3486         stmf_i_remote_port_t    *irport;
3487 
3488         mutex_enter(&stmf_state.stmf_lock);
3489 
3490         /*
3491          * Lookup will bump the refcnt if there's an existing rport
3492          * context for this identifier.
3493          */
3494         if ((irport = stmf_irport_lookup_locked(rport_devid)) != NULL) {
3495                 mutex_exit(&stmf_state.stmf_lock);
3496                 return (irport);
3497         }
3498 
3499         irport = stmf_irport_create(rport_devid);
3500         if (irport == NULL) {
3501                 mutex_exit(&stmf_state.stmf_lock);
3502                 return (NULL);
3503         }
3504 
3505         avl_add(&stmf_state.stmf_irportlist, irport);
3506         mutex_exit(&stmf_state.stmf_lock);
3507 
3508         return (irport);
3509 }
3510 
3511 static stmf_i_remote_port_t *
3512 stmf_irport_lookup_locked(scsi_devid_desc_t *rport_devid)
3513 {
3514         stmf_i_remote_port_t    *irport;
3515         stmf_i_remote_port_t    tmp_irport;
3516 
3517         ASSERT(mutex_owned(&stmf_state.stmf_lock));
3518         tmp_irport.irport_id = rport_devid;
3519         irport = avl_find(&stmf_state.stmf_irportlist, &tmp_irport, NULL);
3520         if (irport != NULL) {
3521                 mutex_enter(&irport->irport_mutex);
3522                 irport->irport_refcnt++;
3523                 mutex_exit(&irport->irport_mutex);
3524         }
3525 
3526         return (irport);
3527 }
3528 
3529 static void
3530 stmf_irport_deregister(stmf_i_remote_port_t *irport)
3531 {
3532         /*
3533          * If we were actually going to remove unreferenced remote ports
3534          * we would want to acquire stmf_state.stmf_lock before getting
3535          * the irport mutex.
3536          *
3537          * Instead we're just going to leave it there even if unreferenced.
3538          */
3539         mutex_enter(&irport->irport_mutex);
3540         irport->irport_refcnt--;
3541         mutex_exit(&irport->irport_mutex);
3542 }
3543 
3544 /*
3545  * Port provider has to make sure that register/deregister session and
3546  * port are serialized calls.
3547  */
3548 stmf_status_t
3549 stmf_register_scsi_session(stmf_local_port_t *lport, stmf_scsi_session_t *ss)
3550 {
3551         stmf_i_scsi_session_t *iss;
3552         stmf_i_local_port_t *ilport = (stmf_i_local_port_t *)
3553             lport->lport_stmf_private;
3554         uint8_t         lun[8];
3555 
3556         /*
3557          * Port state has to be online to register a scsi session. It is
3558          * possible that we started an offline operation and a new SCSI
3559          * session started at the same time (in that case also we are going
3560          * to fail the registeration). But any other state is simply
3561          * a bad port provider implementation.
3562          */
3563         if (ilport->ilport_state != STMF_STATE_ONLINE) {
3564                 if (ilport->ilport_state != STMF_STATE_OFFLINING) {
3565                         stmf_trace(lport->lport_alias, "Port is trying to "
3566                             "register a session while the state is neither "
3567                             "online nor offlining");
3568                 }
3569                 return (STMF_FAILURE);
3570         }
3571         bzero(lun, 8);
3572         iss = (stmf_i_scsi_session_t *)ss->ss_stmf_private;
3573         if ((iss->iss_irport = stmf_irport_register(ss->ss_rport_id)) == NULL) {
3574                 stmf_trace(lport->lport_alias, "Could not register "
3575                     "remote port during session registration");
3576                 return (STMF_FAILURE);
3577         }
3578 
3579         iss->iss_flags |= ISS_BEING_CREATED;
3580 
3581         if (ss->ss_rport == NULL) {
3582                 iss->iss_flags |= ISS_NULL_TPTID;
3583                 ss->ss_rport = stmf_scsilib_devid_to_remote_port(
3584                     ss->ss_rport_id);
3585                 if (ss->ss_rport == NULL) {
3586                         iss->iss_flags &= ~(ISS_NULL_TPTID | ISS_BEING_CREATED);
3587                         stmf_trace(lport->lport_alias, "Device id to "
3588                             "remote port conversion failed");
3589                         return (STMF_FAILURE);
3590                 }
3591         } else {
3592                 if (!stmf_scsilib_tptid_validate(ss->ss_rport->rport_tptid,
3593                     ss->ss_rport->rport_tptid_sz, NULL)) {
3594                         iss->iss_flags &= ~ISS_BEING_CREATED;
3595                         stmf_trace(lport->lport_alias, "Remote port "
3596                             "transport id validation failed");
3597                         return (STMF_FAILURE);
3598                 }
3599         }
3600 
3601         /* sessions use the ilport_lock. No separate lock is required */
3602         iss->iss_lockp = &ilport->ilport_lock;
3603 
3604         if (iss->iss_sm != NULL)
3605                 cmn_err(CE_PANIC, "create lun map called with non NULL map");
3606         iss->iss_sm = (stmf_lun_map_t *)kmem_zalloc(sizeof (stmf_lun_map_t),
3607             KM_SLEEP);
3608 
3609         mutex_enter(&stmf_state.stmf_lock);
3610         rw_enter(&ilport->ilport_lock, RW_WRITER);
3611         (void) stmf_session_create_lun_map(ilport, iss);
3612         ilport->ilport_nsessions++;
3613         iss->iss_next = ilport->ilport_ss_list;
3614         ilport->ilport_ss_list = iss;
3615         rw_exit(&ilport->ilport_lock);
3616         mutex_exit(&stmf_state.stmf_lock);
3617 
3618         iss->iss_creation_time = ddi_get_time();
3619         ss->ss_session_id = atomic_add_64_nv(&stmf_session_counter, 1);
3620         iss->iss_flags &= ~ISS_BEING_CREATED;
3621         /* XXX should we remove ISS_LUN_INVENTORY_CHANGED on new session? */
3622         iss->iss_flags &= ~ISS_LUN_INVENTORY_CHANGED;
3623         DTRACE_PROBE2(session__online, stmf_local_port_t *, lport,
3624             stmf_scsi_session_t *, ss);
3625         return (STMF_SUCCESS);
3626 }
3627 
3628 void
3629 stmf_deregister_scsi_session(stmf_local_port_t *lport, stmf_scsi_session_t *ss)
3630 {
3631         stmf_i_local_port_t *ilport = (stmf_i_local_port_t *)
3632             lport->lport_stmf_private;
3633         stmf_i_scsi_session_t *iss, **ppss;
3634         int found = 0;
3635         stmf_ic_msg_t *ic_session_dereg;
3636         stmf_status_t ic_ret = STMF_FAILURE;
3637 
3638         DTRACE_PROBE2(session__offline, stmf_local_port_t *, lport,
3639             stmf_scsi_session_t *, ss);
3640 
3641         iss = (stmf_i_scsi_session_t *)ss->ss_stmf_private;
3642         if (ss->ss_rport_alias) {
3643                 ss->ss_rport_alias = NULL;
3644         }
3645 
3646 try_dereg_ss_again:
3647         mutex_enter(&stmf_state.stmf_lock);
3648         atomic_and_32(&iss->iss_flags,
3649             ~(ISS_LUN_INVENTORY_CHANGED | ISS_GOT_INITIAL_LUNS));
3650         if (iss->iss_flags & ISS_EVENT_ACTIVE) {
3651                 mutex_exit(&stmf_state.stmf_lock);
3652                 delay(1);
3653                 goto try_dereg_ss_again;
3654         }
3655 
3656         /* dereg proxy session if not standby port */
3657         if (stmf_state.stmf_alua_state == 1 &&
3658             ilport->ilport_standby == 0 &&
3659             ilport->ilport_alua == 1) {
3660                 ic_session_dereg = ic_session_dereg_msg_alloc(
3661                     ss, stmf_proxy_msg_id);
3662                 if (ic_session_dereg) {
3663                         ic_ret = ic_tx_msg(ic_session_dereg);
3664                         if (ic_ret == STMF_IC_MSG_SUCCESS) {
3665                                 stmf_proxy_msg_id++;
3666                         }
3667                 }
3668         }
3669 
3670         rw_enter(&ilport->ilport_lock, RW_WRITER);
3671         for (ppss = &ilport->ilport_ss_list; *ppss != NULL;
3672             ppss = &((*ppss)->iss_next)) {
3673                 if (iss == (*ppss)) {
3674                         *ppss = (*ppss)->iss_next;
3675                         found = 1;
3676                         break;
3677                 }
3678         }
3679         if (!found) {
3680                 cmn_err(CE_PANIC, "Deregister session called for non existent"
3681                     " session");
3682         }
3683         ilport->ilport_nsessions--;
3684 
3685         stmf_irport_deregister(iss->iss_irport);
3686         (void) stmf_session_destroy_lun_map(ilport, iss);
3687         rw_exit(&ilport->ilport_lock);
3688         mutex_exit(&stmf_state.stmf_lock);
3689 
3690         if (iss->iss_flags & ISS_NULL_TPTID) {
3691                 stmf_remote_port_free(ss->ss_rport);
3692         }
3693 }
3694 
3695 stmf_i_scsi_session_t *
3696 stmf_session_id_to_issptr(uint64_t session_id, int stay_locked)
3697 {
3698         stmf_i_local_port_t *ilport;
3699         stmf_i_scsi_session_t *iss;
3700 
3701         mutex_enter(&stmf_state.stmf_lock);
3702         for (ilport = stmf_state.stmf_ilportlist; ilport != NULL;
3703             ilport = ilport->ilport_next) {
3704                 rw_enter(&ilport->ilport_lock, RW_WRITER);
3705                 for (iss = ilport->ilport_ss_list; iss != NULL;
3706                     iss = iss->iss_next) {
3707                         if (iss->iss_ss->ss_session_id == session_id) {
3708                                 if (!stay_locked)
3709                                         rw_exit(&ilport->ilport_lock);
3710                                 mutex_exit(&stmf_state.stmf_lock);
3711                                 return (iss);
3712                         }
3713                 }
3714                 rw_exit(&ilport->ilport_lock);
3715         }
3716         mutex_exit(&stmf_state.stmf_lock);
3717         return (NULL);
3718 }
3719 
3720 #define MAX_ALIAS               128
3721 
3722 static int
3723 stmf_itl_kstat_compare(const void *itl_kstat_1, const void *itl_kstat_2)
3724 {
3725         const   stmf_i_itl_kstat_t      *kstat_nm1 = itl_kstat_1;
3726         const   stmf_i_itl_kstat_t      *kstat_nm2 = itl_kstat_2;
3727         int     ret;
3728 
3729         ret = strcmp(kstat_nm1->iitl_kstat_nm, kstat_nm2->iitl_kstat_nm);
3730         if (ret < 0) {
3731                 return (-1);
3732         } else if (ret > 0) {
3733                 return (1);
3734         }
3735         return (0);
3736 }
3737 
3738 static stmf_i_itl_kstat_t *
3739 stmf_itl_kstat_lookup(char *kstat_nm)
3740 {
3741         stmf_i_itl_kstat_t      tmp;
3742         stmf_i_itl_kstat_t      *itl_kstat;
3743 
3744         ASSERT(mutex_owned(&stmf_state.stmf_lock));
3745         (void) strcpy(tmp.iitl_kstat_nm, kstat_nm);
3746         itl_kstat = avl_find(&stmf_state.stmf_itl_kstat_list, &tmp, NULL);
3747         return (itl_kstat);
3748 }
3749 
3750 static void
3751 stmf_delete_itl_kstat_by_lport(char *tgt)
3752 {
3753         stmf_i_itl_kstat_t      *ks_itl, *next;
3754 
3755         ASSERT(mutex_owned(&stmf_state.stmf_lock));
3756         ks_itl = avl_first(&stmf_state.stmf_itl_kstat_list);
3757         for (; ks_itl != NULL; ks_itl = next) {
3758                 next = AVL_NEXT(&stmf_state.stmf_itl_kstat_list, ks_itl);
3759                 if (strcmp(ks_itl->iitl_kstat_lport, tgt) == 0) {
3760                         stmf_teardown_itl_kstats(ks_itl);
3761                         avl_remove(&stmf_state.stmf_itl_kstat_list, ks_itl);
3762                         kmem_free(ks_itl, sizeof (stmf_i_itl_kstat_t));
3763                 }
3764         }
3765 }
3766 
3767 static void
3768 stmf_delete_itl_kstat_by_guid(char *guid)
3769 {
3770         stmf_i_itl_kstat_t      *ks_itl, *next;
3771 
3772         ASSERT(mutex_owned(&stmf_state.stmf_lock));
3773         ks_itl = avl_first(&stmf_state.stmf_itl_kstat_list);
3774         for (; ks_itl != NULL; ks_itl = next) {
3775                 next = AVL_NEXT(&stmf_state.stmf_itl_kstat_list, ks_itl);
3776                 if (strcmp(ks_itl->iitl_kstat_guid, guid) == 0) {
3777                         stmf_teardown_itl_kstats(ks_itl);
3778                         avl_remove(&stmf_state.stmf_itl_kstat_list, ks_itl);
3779                         kmem_free(ks_itl, sizeof (stmf_i_itl_kstat_t));
3780                 }
3781         }
3782 }
3783 
3784 static stmf_i_itl_kstat_t *
3785 stmf_itl_kstat_create(stmf_itl_data_t *itl, char *nm,
3786     scsi_devid_desc_t *lport, scsi_devid_desc_t *lun)
3787 {
3788         stmf_i_itl_kstat_t      *ks_itl;
3789         int                     i, len;
3790 
3791         ASSERT(mutex_owned(&stmf_state.stmf_lock));
3792         if ((ks_itl = stmf_itl_kstat_lookup(nm)) != NULL)
3793                 return (ks_itl);
3794 
3795         len = sizeof (stmf_i_itl_kstat_t);
3796         ks_itl = kmem_zalloc(len, KM_NOSLEEP);
3797         if (ks_itl == NULL)
3798                 return (NULL);
3799 
3800         (void) strcpy(ks_itl->iitl_kstat_nm, nm);
3801         bcopy(lport->ident, ks_itl->iitl_kstat_lport, lport->ident_length);
3802         ks_itl->iitl_kstat_lport[lport->ident_length] = '\0';
3803         for (i = 0; i < STMF_GUID_INPUT / 2; i++) {
3804                 (void) sprintf(&ks_itl->iitl_kstat_guid[i * 2], "%02x",
3805                     lun->ident[i]);
3806         }
3807         ks_itl->iitl_kstat_strbuf = itl->itl_kstat_strbuf;
3808         ks_itl->iitl_kstat_strbuflen = itl->itl_kstat_strbuflen;
3809         ks_itl->iitl_kstat_info = itl->itl_kstat_info;
3810         ks_itl->iitl_kstat_taskq = itl->itl_kstat_taskq;
3811         ks_itl->iitl_kstat_lu_xfer = itl->itl_kstat_lu_xfer;
3812         ks_itl->iitl_kstat_lport_xfer = itl->itl_kstat_lport_xfer;
3813         avl_add(&stmf_state.stmf_itl_kstat_list, ks_itl);
3814 
3815         return (ks_itl);
3816 }
3817 
3818 stmf_status_t
3819 stmf_setup_itl_kstats(stmf_itl_data_t *itl)
3820 {
3821         char                            ks_itl_id[32];
3822         char                            ks_nm[KSTAT_STRLEN];
3823         char                            ks_itl_nm[KSTAT_STRLEN];
3824         stmf_kstat_itl_info_t           *ks_itl;
3825         stmf_scsi_session_t             *ss;
3826         stmf_i_scsi_session_t           *iss;
3827         stmf_i_local_port_t             *ilport;
3828         char                            *strbuf;
3829         int                             id, len, i;
3830         char                            *rport_alias;
3831         char                            *lport_alias;
3832         char                            *lu_alias;
3833         stmf_i_itl_kstat_t              *tmp_kstat;
3834 
3835         /*
3836          * Allocate enough memory in the ITL to hold the relevant
3837          * identifiers.
3838          * rport and lport identifiers come from the stmf_scsi_session_t.
3839          * ident might not be null terminated.
3840          */
3841         ss = itl->itl_session->iss_ss;
3842         iss = ss->ss_stmf_private;
3843         ilport = ss->ss_lport->lport_stmf_private;
3844         (void) snprintf(ks_itl_id, 32, "%d.%d.%d",
3845             iss->iss_irport->irport_instance, ilport->ilport_instance,
3846             itl->itl_lun);
3847 
3848         (void) snprintf(ks_itl_nm, KSTAT_STRLEN, "itl_%s", ks_itl_id);
3849         /*
3850          * let's verify this itl_kstat already exist
3851          */
3852         if ((tmp_kstat = stmf_itl_kstat_lookup(ks_itl_nm)) != NULL) {
3853                 itl->itl_kstat_strbuf = tmp_kstat->iitl_kstat_strbuf;
3854                 itl->itl_kstat_strbuflen = tmp_kstat->iitl_kstat_strbuflen;
3855                 itl->itl_kstat_info = tmp_kstat->iitl_kstat_info;
3856                 itl->itl_kstat_taskq = tmp_kstat->iitl_kstat_taskq;
3857                 itl->itl_kstat_lu_xfer = tmp_kstat->iitl_kstat_lu_xfer;
3858                 itl->itl_kstat_lport_xfer = tmp_kstat->iitl_kstat_lport_xfer;
3859                 return (STMF_SUCCESS);
3860         }
3861 
3862         /* New itl_kstat */
3863         rport_alias = (ss->ss_rport_alias == NULL) ?
3864             "" : ss->ss_rport_alias;
3865         lport_alias = (ss->ss_lport->lport_alias == NULL) ?
3866             "" : ss->ss_lport->lport_alias;
3867         lu_alias = (itl->itl_ilu->ilu_lu->lu_alias == NULL) ?
3868             "" : itl->itl_ilu->ilu_lu->lu_alias;
3869 
3870         itl->itl_kstat_strbuflen = (ss->ss_rport_id->ident_length + 1) +
3871             (strnlen(rport_alias, MAX_ALIAS) + 1) +
3872             (ss->ss_lport->lport_id->ident_length + 1) +
3873             (strnlen(lport_alias, MAX_ALIAS) + 1) +
3874             (STMF_GUID_INPUT + 1) +
3875             (strnlen(lu_alias, MAX_ALIAS) + 1) +
3876             MAX_PROTO_STR_LEN;
3877         itl->itl_kstat_strbuf = kmem_zalloc(itl->itl_kstat_strbuflen,
3878             KM_NOSLEEP);
3879         if (itl->itl_kstat_strbuf == NULL) {
3880                 return (STMF_ALLOC_FAILURE);
3881         }
3882 
3883         ks_itl = (stmf_kstat_itl_info_t *)kmem_zalloc(sizeof (*ks_itl),
3884             KM_NOSLEEP);
3885         if (ks_itl == NULL) {
3886                 kmem_free(itl->itl_kstat_strbuf, itl->itl_kstat_strbuflen);
3887                 return (STMF_ALLOC_FAILURE);
3888         }
3889 
3890         if ((itl->itl_kstat_info = kstat_create(STMF_MODULE_NAME,
3891             0, ks_itl_nm, "misc", KSTAT_TYPE_NAMED,
3892             sizeof (stmf_kstat_itl_info_t) / sizeof (kstat_named_t),
3893             KSTAT_FLAG_VIRTUAL)) == NULL) {
3894                 goto itl_kstat_cleanup;
3895         }
3896 
3897         itl->itl_kstat_info->ks_data_size += itl->itl_kstat_strbuflen;
3898         itl->itl_kstat_info->ks_data = ks_itl;
3899 
3900         kstat_named_init(&ks_itl->i_rport_name, "rport-name",
3901             KSTAT_DATA_STRING);
3902         kstat_named_init(&ks_itl->i_rport_alias, "rport-alias",
3903             KSTAT_DATA_STRING);
3904         kstat_named_init(&ks_itl->i_lport_name, "lport-name",
3905             KSTAT_DATA_STRING);
3906         kstat_named_init(&ks_itl->i_lport_alias, "lport-alias",
3907             KSTAT_DATA_STRING);
3908         kstat_named_init(&ks_itl->i_protocol, "protocol",
3909             KSTAT_DATA_STRING);
3910         kstat_named_init(&ks_itl->i_lu_guid, "lu-guid",
3911             KSTAT_DATA_STRING);
3912         kstat_named_init(&ks_itl->i_lu_alias, "lu-alias",
3913             KSTAT_DATA_STRING);
3914         kstat_named_init(&ks_itl->i_lu_number, "lu-number",
3915             KSTAT_DATA_UINT64);
3916         kstat_named_init(&ks_itl->i_task_waitq_elapsed, "task-waitq-elapsed",
3917             KSTAT_DATA_UINT64);
3918         kstat_named_init(&ks_itl->i_task_read_elapsed, "task-read-elapsed",
3919             KSTAT_DATA_UINT64);
3920         kstat_named_init(&ks_itl->i_task_write_elapsed, "task-write-elapsed",
3921             KSTAT_DATA_UINT64);
3922         kstat_named_init(&ks_itl->i_lu_read_elapsed, "lu-read-elapsed",
3923             KSTAT_DATA_UINT64);
3924         kstat_named_init(&ks_itl->i_lu_write_elapsed, "lu-write-elapsed",
3925             KSTAT_DATA_UINT64);
3926         kstat_named_init(&ks_itl->i_lport_read_elapsed, "lport-read-elapsed",
3927             KSTAT_DATA_UINT64);
3928         kstat_named_init(&ks_itl->i_lport_write_elapsed, "lport-write-elapsed",
3929             KSTAT_DATA_UINT64);
3930 
3931         strbuf = itl->itl_kstat_strbuf;
3932 
3933         /* Rport */
3934         len = ss->ss_rport_id->ident_length;
3935         bcopy(ss->ss_rport_id->ident, strbuf, len);
3936         strbuf += len;
3937         *strbuf = '\0';
3938         kstat_named_setstr(&ks_itl->i_rport_name, strbuf - len);
3939         strbuf++;
3940 
3941         len = strnlen(rport_alias, MAX_ALIAS);
3942         (void) strncpy(strbuf, rport_alias, len + 1);
3943         kstat_named_setstr(&ks_itl->i_rport_alias, strbuf);
3944         strbuf += len + 1;
3945 
3946         /* Lport */
3947         len = ss->ss_lport->lport_id->ident_length;
3948         bcopy(ss->ss_lport->lport_id->ident, strbuf, len);
3949         strbuf += len;
3950         *strbuf = '\0';
3951         kstat_named_setstr(&ks_itl->i_lport_name, strbuf - len);
3952         strbuf++;
3953 
3954         len = strnlen(lport_alias, MAX_ALIAS);
3955         (void) strncpy(strbuf, lport_alias, len + 1);
3956         kstat_named_setstr(&ks_itl->i_lport_alias, strbuf);
3957         strbuf += len + 1;
3958 
3959         id = (ss->ss_lport->lport_id->protocol_id > PROTOCOL_ANY) ?
3960             PROTOCOL_ANY : ss->ss_lport->lport_id->protocol_id;
3961         kstat_named_setstr(&ks_itl->i_protocol, protocol_ident[id]);
3962 
3963         /* LU */
3964         for (i = 0; i < STMF_GUID_INPUT / 2; i++) {
3965                 (void) sprintf(&strbuf[i * 2], "%02x",
3966                     itl->itl_ilu->ilu_lu->lu_id->ident[i]);
3967         }
3968         kstat_named_setstr(&ks_itl->i_lu_guid, strbuf);
3969         strbuf += STMF_GUID_INPUT + 1;
3970 
3971         len = strnlen(lu_alias, MAX_ALIAS);
3972         (void) strncpy(strbuf, lu_alias, len + 1);
3973         kstat_named_setstr(&ks_itl->i_lu_alias, strbuf);
3974         strbuf += len + 1;
3975 
3976         ks_itl->i_lu_number.value.ui64 = itl->itl_lun;
3977 
3978         /* Now create the I/O kstats */
3979         (void) snprintf(ks_nm, KSTAT_STRLEN, "itl_tasks_%s",  ks_itl_id);
3980         if ((itl->itl_kstat_taskq = kstat_create(STMF_MODULE_NAME, 0,
3981             ks_nm, "io", KSTAT_TYPE_IO, 1, 0)) == NULL) {
3982                 goto itl_kstat_cleanup;
3983         }
3984 
3985         (void) snprintf(ks_nm, KSTAT_STRLEN, "itl_lu_%s",  ks_itl_id);
3986         if ((itl->itl_kstat_lu_xfer = kstat_create(STMF_MODULE_NAME, 0,
3987             ks_nm, "io", KSTAT_TYPE_IO, 1, 0)) == NULL) {
3988                 goto itl_kstat_cleanup;
3989         }
3990 
3991         (void) snprintf(ks_nm, KSTAT_STRLEN, "itl_lport_%s",  ks_itl_id);
3992         if ((itl->itl_kstat_lport_xfer = kstat_create(STMF_MODULE_NAME, 0,
3993             ks_nm, "io", KSTAT_TYPE_IO, 1, 0)) == NULL) {
3994                 goto itl_kstat_cleanup;
3995         }
3996 
3997         /* Install all the kstats */
3998         kstat_install(itl->itl_kstat_info);
3999         kstat_install(itl->itl_kstat_taskq);
4000         kstat_install(itl->itl_kstat_lu_xfer);
4001         kstat_install(itl->itl_kstat_lport_xfer);
4002 
4003         /* Add new itl_kstat to stmf_itl_kstat_list */
4004         if (stmf_itl_kstat_create(itl, ks_itl_nm, ss->ss_lport->lport_id,
4005             itl->itl_ilu->ilu_lu->lu_id) != NULL)
4006                 return (STMF_SUCCESS);
4007 
4008 itl_kstat_cleanup:
4009         if (itl->itl_kstat_taskq)
4010                 kstat_delete(itl->itl_kstat_taskq);
4011         if (itl->itl_kstat_lu_xfer)
4012                 kstat_delete(itl->itl_kstat_lu_xfer);
4013         if (itl->itl_kstat_lport_xfer)
4014                 kstat_delete(itl->itl_kstat_lport_xfer);
4015         if (itl->itl_kstat_info)
4016                 kstat_delete(itl->itl_kstat_info);
4017         kmem_free(ks_itl, sizeof (*ks_itl));
4018         kmem_free(itl->itl_kstat_strbuf, itl->itl_kstat_strbuflen);
4019         cmn_err(CE_WARN, "STMF: kstat_create itl failed");
4020         return (STMF_ALLOC_FAILURE);
4021 }
4022 
4023 static void
4024 stmf_teardown_itl_kstats(stmf_i_itl_kstat_t *ks)
4025 {
4026         kstat_delete(ks->iitl_kstat_lport_xfer);
4027         kstat_delete(ks->iitl_kstat_lu_xfer);
4028         kstat_delete(ks->iitl_kstat_taskq);
4029         kmem_free(ks->iitl_kstat_info->ks_data, sizeof (stmf_kstat_itl_info_t));
4030         kstat_delete(ks->iitl_kstat_info);
4031         kmem_free(ks->iitl_kstat_strbuf, ks->iitl_kstat_strbuflen);
4032 }
4033 
4034 void
4035 stmf_release_itl_handle(stmf_lu_t *lu, stmf_itl_data_t *itl)
4036 {
4037         stmf_itl_data_t **itlpp;
4038         stmf_i_lu_t *ilu;
4039 
4040         ASSERT(itl->itl_flags & STMF_ITL_BEING_TERMINATED);
4041 
4042         ilu = (stmf_i_lu_t *)lu->lu_stmf_private;
4043         mutex_enter(&ilu->ilu_task_lock);
4044         for (itlpp = &ilu->ilu_itl_list; (*itlpp) != NULL;
4045             itlpp = &(*itlpp)->itl_next) {
4046                 if ((*itlpp) == itl)
4047                         break;
4048         }
4049         ASSERT((*itlpp) != NULL);
4050         *itlpp = itl->itl_next;
4051         mutex_exit(&ilu->ilu_task_lock);
4052         lu->lu_abort(lu, STMF_LU_ITL_HANDLE_REMOVED, itl->itl_handle,
4053             (uint32_t)itl->itl_hdlrm_reason);
4054 
4055         kmem_free(itl, sizeof (*itl));
4056 }
4057 
4058 stmf_status_t
4059 stmf_register_itl_handle(stmf_lu_t *lu, uint8_t *lun,
4060     stmf_scsi_session_t *ss, uint64_t session_id, void *itl_handle)
4061 {
4062         stmf_itl_data_t *itl;
4063         stmf_i_scsi_session_t *iss;
4064         stmf_lun_map_ent_t *lun_map_ent;
4065         stmf_i_lu_t *ilu;
4066         uint16_t n;
4067 
4068         ilu = (stmf_i_lu_t *)lu->lu_stmf_private;
4069         if (ss == NULL) {
4070                 iss = stmf_session_id_to_issptr(session_id, 1);
4071                 if (iss == NULL)
4072                         return (STMF_NOT_FOUND);
4073         } else {
4074                 iss = (stmf_i_scsi_session_t *)ss->ss_stmf_private;
4075         }
4076 
4077         /*
4078          * Acquire stmf_lock for stmf_itl_kstat_lookup.
4079          */
4080         mutex_enter(&stmf_state.stmf_lock);
4081         rw_enter(iss->iss_lockp, RW_WRITER);
4082         n = ((uint16_t)lun[1] | (((uint16_t)(lun[0] & 0x3F)) << 8));
4083         lun_map_ent = (stmf_lun_map_ent_t *)
4084             stmf_get_ent_from_map(iss->iss_sm, n);
4085         if ((lun_map_ent == NULL) || (lun_map_ent->ent_lu != lu)) {
4086                 rw_exit(iss->iss_lockp);
4087                 mutex_exit(&stmf_state.stmf_lock);
4088                 return (STMF_NOT_FOUND);
4089         }
4090         if (lun_map_ent->ent_itl_datap != NULL) {
4091                 rw_exit(iss->iss_lockp);
4092                 mutex_exit(&stmf_state.stmf_lock);
4093                 return (STMF_ALREADY);
4094         }
4095 
4096         itl = (stmf_itl_data_t *)kmem_zalloc(sizeof (*itl), KM_NOSLEEP);
4097         if (itl == NULL) {
4098                 rw_exit(iss->iss_lockp);
4099                 mutex_exit(&stmf_state.stmf_lock);
4100                 return (STMF_ALLOC_FAILURE);
4101         }
4102 
4103         itl->itl_ilu = ilu;
4104         itl->itl_session = iss;
4105         itl->itl_counter = 1;
4106         itl->itl_lun = n;
4107         itl->itl_handle = itl_handle;
4108 
4109         if (stmf_setup_itl_kstats(itl) != STMF_SUCCESS) {
4110                 kmem_free(itl, sizeof (*itl));
4111                 rw_exit(iss->iss_lockp);
4112                 mutex_exit(&stmf_state.stmf_lock);
4113                 return (STMF_ALLOC_FAILURE);
4114         }
4115 
4116         mutex_enter(&ilu->ilu_task_lock);
4117         itl->itl_next = ilu->ilu_itl_list;
4118         ilu->ilu_itl_list = itl;
4119         mutex_exit(&ilu->ilu_task_lock);
4120         lun_map_ent->ent_itl_datap = itl;
4121         rw_exit(iss->iss_lockp);
4122         mutex_exit(&stmf_state.stmf_lock);
4123 
4124         return (STMF_SUCCESS);
4125 }
4126 
4127 void
4128 stmf_do_itl_dereg(stmf_lu_t *lu, stmf_itl_data_t *itl, uint8_t hdlrm_reason)
4129 {
4130         uint8_t old, new;
4131 
4132         do {
4133                 old = new = itl->itl_flags;
4134                 if (old & STMF_ITL_BEING_TERMINATED)
4135                         return;
4136                 new |= STMF_ITL_BEING_TERMINATED;
4137         } while (atomic_cas_8(&itl->itl_flags, old, new) != old);
4138         itl->itl_hdlrm_reason = hdlrm_reason;
4139 
4140         ASSERT(itl->itl_counter);
4141 
4142         if (atomic_add_32_nv(&itl->itl_counter, -1))
4143                 return;
4144 
4145         drv_usecwait(10);
4146         if (itl->itl_counter)
4147                 return;
4148 
4149         stmf_release_itl_handle(lu, itl);
4150 }
4151 
4152 stmf_status_t
4153 stmf_deregister_all_lu_itl_handles(stmf_lu_t *lu)
4154 {
4155         stmf_i_lu_t *ilu;
4156         stmf_i_local_port_t *ilport;
4157         stmf_i_scsi_session_t *iss;
4158         stmf_lun_map_t *lm;
4159         stmf_lun_map_ent_t *ent;
4160         uint32_t nmaps, nu;
4161         stmf_itl_data_t **itl_list;
4162         int i;
4163 
4164         ilu = (stmf_i_lu_t *)lu->lu_stmf_private;
4165 
4166 dereg_itl_start:;
4167         nmaps = ilu->ilu_ref_cnt;
4168         if (nmaps == 0)
4169                 return (STMF_NOT_FOUND);
4170         itl_list = (stmf_itl_data_t **)kmem_zalloc(
4171             nmaps * sizeof (stmf_itl_data_t *), KM_SLEEP);
4172         mutex_enter(&stmf_state.stmf_lock);
4173         if (nmaps != ilu->ilu_ref_cnt) {
4174                 /* Something changed, start all over */
4175                 mutex_exit(&stmf_state.stmf_lock);
4176                 kmem_free(itl_list, nmaps * sizeof (stmf_itl_data_t *));
4177                 goto dereg_itl_start;
4178         }
4179         nu = 0;
4180         for (ilport = stmf_state.stmf_ilportlist; ilport != NULL;
4181             ilport = ilport->ilport_next) {
4182                 rw_enter(&ilport->ilport_lock, RW_WRITER);
4183                 for (iss = ilport->ilport_ss_list; iss != NULL;
4184                     iss = iss->iss_next) {
4185                         lm = iss->iss_sm;
4186                         if (!lm)
4187                                 continue;
4188                         for (i = 0; i < lm->lm_nentries; i++) {
4189                                 if (lm->lm_plus[i] == NULL)
4190                                         continue;
4191                                 ent = (stmf_lun_map_ent_t *)lm->lm_plus[i];
4192                                 if ((ent->ent_lu == lu) &&
4193                                     (ent->ent_itl_datap)) {
4194                                         itl_list[nu++] = ent->ent_itl_datap;
4195                                         ent->ent_itl_datap = NULL;
4196                                         if (nu == nmaps) {
4197                                                 rw_exit(&ilport->ilport_lock);
4198                                                 goto dai_scan_done;
4199                                         }
4200                                 }
4201                         } /* lun table for a session */
4202                 } /* sessions */
4203                 rw_exit(&ilport->ilport_lock);
4204         } /* ports */
4205 
4206 dai_scan_done:
4207         mutex_exit(&stmf_state.stmf_lock);
4208 
4209         for (i = 0; i < nu; i++) {
4210                 stmf_do_itl_dereg(lu, itl_list[i],
4211                     STMF_ITL_REASON_DEREG_REQUEST);
4212         }
4213         kmem_free(itl_list, nmaps * sizeof (stmf_itl_data_t *));
4214 
4215         return (STMF_SUCCESS);
4216 }
4217 
4218 stmf_status_t
4219 stmf_deregister_itl_handle(stmf_lu_t *lu, uint8_t *lun,
4220     stmf_scsi_session_t *ss, uint64_t session_id, void *itl_handle)
4221 {
4222         stmf_i_scsi_session_t *iss;
4223         stmf_itl_data_t *itl;
4224         stmf_lun_map_ent_t *ent;
4225         stmf_lun_map_t *lm;
4226         int i;
4227         uint16_t n;
4228 
4229         if (ss == NULL) {
4230                 if (session_id == STMF_SESSION_ID_NONE)
4231                         return (STMF_INVALID_ARG);
4232                 iss = stmf_session_id_to_issptr(session_id, 1);
4233                 if (iss == NULL)
4234                         return (STMF_NOT_FOUND);
4235         } else {
4236                 iss = (stmf_i_scsi_session_t *)ss->ss_stmf_private;
4237                 rw_enter(iss->iss_lockp, RW_WRITER);
4238         }
4239         lm = iss->iss_sm;
4240         if (lm == NULL) {
4241                 rw_exit(iss->iss_lockp);
4242                 return (STMF_NOT_FOUND);
4243         }
4244 
4245         if (lun) {
4246                 n = ((uint16_t)lun[1] | (((uint16_t)(lun[0] & 0x3F)) << 8));
4247                 ent = (stmf_lun_map_ent_t *)
4248                     stmf_get_ent_from_map(iss->iss_sm, n);
4249         } else {
4250                 if (itl_handle == NULL) {
4251                         rw_exit(iss->iss_lockp);
4252                         return (STMF_INVALID_ARG);
4253                 }
4254                 ent = NULL;
4255                 for (i = 0; i < lm->lm_nentries; i++) {
4256                         if (lm->lm_plus[i] == NULL)
4257                                 continue;
4258                         ent = (stmf_lun_map_ent_t *)lm->lm_plus[i];
4259                         if (ent->ent_itl_datap &&
4260                             (ent->ent_itl_datap->itl_handle == itl_handle)) {
4261                                 break;
4262                         }
4263                 }
4264         }
4265         if ((ent == NULL) || (ent->ent_lu != lu) ||
4266             (ent->ent_itl_datap == NULL)) {
4267                 rw_exit(iss->iss_lockp);
4268                 return (STMF_NOT_FOUND);
4269         }
4270         itl = ent->ent_itl_datap;
4271         ent->ent_itl_datap = NULL;
4272         rw_exit(iss->iss_lockp);
4273         stmf_do_itl_dereg(lu, itl, STMF_ITL_REASON_DEREG_REQUEST);
4274 
4275         return (STMF_SUCCESS);
4276 }
4277 
4278 stmf_status_t
4279 stmf_get_itl_handle(stmf_lu_t *lu, uint8_t *lun, stmf_scsi_session_t *ss,
4280     uint64_t session_id, void **itl_handle_retp)
4281 {
4282         stmf_i_scsi_session_t *iss;
4283         stmf_lun_map_ent_t *ent;
4284         stmf_lun_map_t *lm;
4285         stmf_status_t ret;
4286         int i;
4287         uint16_t n;
4288 
4289         if (ss == NULL) {
4290                 iss = stmf_session_id_to_issptr(session_id, 1);
4291                 if (iss == NULL)
4292                         return (STMF_NOT_FOUND);
4293         } else {
4294                 iss = (stmf_i_scsi_session_t *)ss->ss_stmf_private;
4295                 rw_enter(iss->iss_lockp, RW_WRITER);
4296         }
4297 
4298         ent = NULL;
4299         if (lun == NULL) {
4300                 lm = iss->iss_sm;
4301                 for (i = 0; i < lm->lm_nentries; i++) {
4302                         if (lm->lm_plus[i] == NULL)
4303                                 continue;
4304                         ent = (stmf_lun_map_ent_t *)lm->lm_plus[i];
4305                         if (ent->ent_lu == lu)
4306                                 break;
4307                 }
4308         } else {
4309                 n = ((uint16_t)lun[1] | (((uint16_t)(lun[0] & 0x3F)) << 8));
4310                 ent = (stmf_lun_map_ent_t *)
4311                     stmf_get_ent_from_map(iss->iss_sm, n);
4312                 if (lu && (ent->ent_lu != lu))
4313                         ent = NULL;
4314         }
4315         if (ent && ent->ent_itl_datap) {
4316                 *itl_handle_retp = ent->ent_itl_datap->itl_handle;
4317                 ret = STMF_SUCCESS;
4318         } else {
4319                 ret = STMF_NOT_FOUND;
4320         }
4321 
4322         rw_exit(iss->iss_lockp);
4323         return (ret);
4324 }
4325 
4326 stmf_data_buf_t *
4327 stmf_alloc_dbuf(scsi_task_t *task, uint32_t size, uint32_t *pminsize,
4328     uint32_t flags)
4329 {
4330         stmf_i_scsi_task_t *itask =
4331             (stmf_i_scsi_task_t *)task->task_stmf_private;
4332         stmf_local_port_t *lport = task->task_lport;
4333         stmf_data_buf_t *dbuf;
4334         uint8_t ndx;
4335 
4336         ndx = stmf_first_zero[itask->itask_allocated_buf_map];
4337         if (ndx == 0xff)
4338                 return (NULL);
4339         dbuf = itask->itask_dbufs[ndx] = lport->lport_ds->ds_alloc_data_buf(
4340             task, size, pminsize, flags);
4341         if (dbuf) {
4342                 task->task_cur_nbufs++;
4343                 itask->itask_allocated_buf_map |= (1 << ndx);
4344                 dbuf->db_flags &= ~DB_LPORT_XFER_ACTIVE;
4345                 dbuf->db_handle = ndx;
4346                 return (dbuf);
4347         }
4348 
4349         return (NULL);
4350 }
4351 
4352 stmf_status_t
4353 stmf_setup_dbuf(scsi_task_t *task, stmf_data_buf_t *dbuf, uint32_t flags)
4354 {
4355         stmf_i_scsi_task_t *itask =
4356             (stmf_i_scsi_task_t *)task->task_stmf_private;
4357         stmf_local_port_t *lport = task->task_lport;
4358         uint8_t ndx;
4359         stmf_status_t ret;
4360 
4361         ASSERT(task->task_additional_flags & TASK_AF_ACCEPT_LU_DBUF);
4362         ASSERT(lport->lport_ds->ds_setup_dbuf != NULL);
4363         ASSERT(dbuf->db_flags & DB_LU_DATA_BUF);
4364 
4365         if ((task->task_additional_flags & TASK_AF_ACCEPT_LU_DBUF) == 0)
4366                 return (STMF_FAILURE);
4367         if (lport->lport_ds->ds_setup_dbuf == NULL)
4368                 return (STMF_FAILURE);
4369 
4370         ndx = stmf_first_zero[itask->itask_allocated_buf_map];
4371         if (ndx == 0xff)
4372                 return (STMF_FAILURE);
4373         ret = lport->lport_ds->ds_setup_dbuf(task, dbuf, flags);
4374         if (ret == STMF_FAILURE)
4375                 return (STMF_FAILURE);
4376         itask->itask_dbufs[ndx] = dbuf;
4377         task->task_cur_nbufs++;
4378         itask->itask_allocated_buf_map |= (1 << ndx);
4379         dbuf->db_handle = ndx;
4380 
4381         return (STMF_SUCCESS);
4382 }
4383 
4384 void
4385 stmf_teardown_dbuf(scsi_task_t *task, stmf_data_buf_t *dbuf)
4386 {
4387         stmf_i_scsi_task_t *itask =
4388             (stmf_i_scsi_task_t *)task->task_stmf_private;
4389         stmf_local_port_t *lport = task->task_lport;
4390 
4391         ASSERT(task->task_additional_flags & TASK_AF_ACCEPT_LU_DBUF);
4392         ASSERT(lport->lport_ds->ds_teardown_dbuf != NULL);
4393         ASSERT(dbuf->db_flags & DB_LU_DATA_BUF);
4394 
4395         itask->itask_allocated_buf_map &= ~(1 << dbuf->db_handle);
4396         task->task_cur_nbufs--;
4397         lport->lport_ds->ds_teardown_dbuf(lport->lport_ds, dbuf);
4398 }
4399 
4400 void
4401 stmf_free_dbuf(scsi_task_t *task, stmf_data_buf_t *dbuf)
4402 {
4403         stmf_i_scsi_task_t *itask =
4404             (stmf_i_scsi_task_t *)task->task_stmf_private;
4405         stmf_local_port_t *lport = task->task_lport;
4406 
4407         itask->itask_allocated_buf_map &= ~(1 << dbuf->db_handle);
4408         task->task_cur_nbufs--;
4409         lport->lport_ds->ds_free_data_buf(lport->lport_ds, dbuf);
4410 }
4411 
4412 stmf_data_buf_t *
4413 stmf_handle_to_buf(scsi_task_t *task, uint8_t h)
4414 {
4415         stmf_i_scsi_task_t *itask;
4416 
4417         itask = (stmf_i_scsi_task_t *)task->task_stmf_private;
4418         if (h > 3)
4419                 return (NULL);
4420         return (itask->itask_dbufs[h]);
4421 }
4422 
4423 /* ARGSUSED */
4424 struct scsi_task *
4425 stmf_task_alloc(struct stmf_local_port *lport, stmf_scsi_session_t *ss,
4426                         uint8_t *lun, uint16_t cdb_length_in, uint16_t ext_id)
4427 {
4428         stmf_lu_t *lu;
4429         stmf_i_scsi_session_t *iss;
4430         stmf_i_lu_t *ilu;
4431         stmf_i_scsi_task_t *itask;
4432         stmf_i_scsi_task_t **ppitask;
4433         scsi_task_t *task;
4434         uint8_t *l;
4435         stmf_lun_map_ent_t *lun_map_ent;
4436         uint16_t cdb_length;
4437         uint16_t luNbr;
4438         uint8_t new_task = 0;
4439 
4440         /*
4441          * We allocate 7 extra bytes for CDB to provide a cdb pointer which
4442          * is guaranteed to be 8 byte aligned. Some LU providers like OSD
4443          * depend upon this alignment.
4444          */
4445         if (cdb_length_in >= 16)
4446                 cdb_length = cdb_length_in + 7;
4447         else
4448                 cdb_length = 16 + 7;
4449         iss = (stmf_i_scsi_session_t *)ss->ss_stmf_private;
4450         luNbr = ((uint16_t)lun[1] | (((uint16_t)(lun[0] & 0x3F)) << 8));
4451         rw_enter(iss->iss_lockp, RW_READER);
4452         lun_map_ent =
4453             (stmf_lun_map_ent_t *)stmf_get_ent_from_map(iss->iss_sm, luNbr);
4454         if (!lun_map_ent) {
4455                 lu = dlun0;
4456         } else {
4457                 lu = lun_map_ent->ent_lu;
4458         }
4459         ilu = lu->lu_stmf_private;
4460         if (ilu->ilu_flags & ILU_RESET_ACTIVE) {
4461                 rw_exit(iss->iss_lockp);
4462                 return (NULL);
4463         }
4464         ASSERT(lu == dlun0 || (ilu->ilu_state != STMF_STATE_OFFLINING &&
4465             ilu->ilu_state != STMF_STATE_OFFLINE));
4466         do {
4467                 if (ilu->ilu_free_tasks == NULL) {
4468                         new_task = 1;
4469                         break;
4470                 }
4471                 mutex_enter(&ilu->ilu_task_lock);
4472                 for (ppitask = &ilu->ilu_free_tasks; (*ppitask != NULL) &&
4473                     ((*ppitask)->itask_cdb_buf_size < cdb_length);
4474                     ppitask = &((*ppitask)->itask_lu_free_next))
4475                         ;
4476                 if (*ppitask) {
4477                         itask = *ppitask;
4478                         *ppitask = (*ppitask)->itask_lu_free_next;
4479                         ilu->ilu_ntasks_free--;
4480                         if (ilu->ilu_ntasks_free < ilu->ilu_ntasks_min_free)
4481                                 ilu->ilu_ntasks_min_free = ilu->ilu_ntasks_free;
4482                 } else {
4483                         new_task = 1;
4484                 }
4485                 mutex_exit(&ilu->ilu_task_lock);
4486         /* CONSTCOND */
4487         } while (0);
4488 
4489         if (!new_task) {
4490                 /*
4491                  * Save the task_cdb pointer and zero per cmd fields.
4492                  * We know the task_cdb_length is large enough by task
4493                  * selection process above.
4494                  */
4495                 uint8_t *save_cdb;
4496                 uintptr_t t_start, t_end;
4497 
4498                 task = itask->itask_task;
4499                 save_cdb = task->task_cdb;   /* save */
4500                 t_start = (uintptr_t)&task->task_flags;
4501                 t_end = (uintptr_t)&task->task_extended_cmd;
4502                 bzero((void *)t_start, (size_t)(t_end - t_start));
4503                 task->task_cdb = save_cdb;   /* restore */
4504                 itask->itask_ncmds = 0;
4505         } else {
4506                 task = (scsi_task_t *)stmf_alloc(STMF_STRUCT_SCSI_TASK,
4507                     cdb_length, AF_FORCE_NOSLEEP);
4508                 if (task == NULL) {
4509                         rw_exit(iss->iss_lockp);
4510                         return (NULL);
4511                 }
4512                 task->task_lu = lu;
4513                 l = task->task_lun_no;
4514                 l[0] = lun[0];
4515                 l[1] = lun[1];
4516                 l[2] = lun[2];
4517                 l[3] = lun[3];
4518                 l[4] = lun[4];
4519                 l[5] = lun[5];
4520                 l[6] = lun[6];
4521                 l[7] = lun[7];
4522                 task->task_cdb = (uint8_t *)task->task_port_private;
4523                 if ((ulong_t)(task->task_cdb) & 7ul) {
4524                         task->task_cdb = (uint8_t *)(((ulong_t)
4525                             (task->task_cdb) + 7ul) & ~(7ul));
4526                 }
4527                 itask = (stmf_i_scsi_task_t *)task->task_stmf_private;
4528                 itask->itask_cdb_buf_size = cdb_length;
4529                 mutex_init(&itask->itask_audit_mutex, NULL, MUTEX_DRIVER, NULL);
4530         }
4531         task->task_session = ss;
4532         task->task_lport = lport;
4533         task->task_cdb_length = cdb_length_in;
4534         itask->itask_flags = ITASK_IN_TRANSITION;
4535         itask->itask_waitq_time = 0;
4536         itask->itask_lu_read_time = itask->itask_lu_write_time = 0;
4537         itask->itask_lport_read_time = itask->itask_lport_write_time = 0;
4538         itask->itask_read_xfer = itask->itask_write_xfer = 0;
4539         itask->itask_audit_index = 0;
4540 
4541         if (new_task) {
4542                 if (lu->lu_task_alloc(task) != STMF_SUCCESS) {
4543                         rw_exit(iss->iss_lockp);
4544                         stmf_free(task);
4545                         return (NULL);
4546                 }
4547                 mutex_enter(&ilu->ilu_task_lock);
4548                 if (ilu->ilu_flags & ILU_RESET_ACTIVE) {
4549                         mutex_exit(&ilu->ilu_task_lock);
4550                         rw_exit(iss->iss_lockp);
4551                         stmf_free(task);
4552                         return (NULL);
4553                 }
4554                 itask->itask_lu_next = ilu->ilu_tasks;
4555                 if (ilu->ilu_tasks)
4556                         ilu->ilu_tasks->itask_lu_prev = itask;
4557                 ilu->ilu_tasks = itask;
4558                 /* kmem_zalloc automatically makes itask->itask_lu_prev NULL */
4559                 ilu->ilu_ntasks++;
4560                 mutex_exit(&ilu->ilu_task_lock);
4561         }
4562 
4563         itask->itask_ilu_task_cntr = ilu->ilu_cur_task_cntr;
4564         atomic_add_32(itask->itask_ilu_task_cntr, 1);
4565         itask->itask_start_time = ddi_get_lbolt();
4566 
4567         if ((lun_map_ent != NULL) && ((itask->itask_itl_datap =
4568             lun_map_ent->ent_itl_datap) != NULL)) {
4569                 atomic_add_32(&itask->itask_itl_datap->itl_counter, 1);
4570                 task->task_lu_itl_handle = itask->itask_itl_datap->itl_handle;
4571         } else {
4572                 itask->itask_itl_datap = NULL;
4573                 task->task_lu_itl_handle = NULL;
4574         }
4575 
4576         rw_exit(iss->iss_lockp);
4577         return (task);
4578 }
4579 
4580 static void
4581 stmf_task_lu_free(scsi_task_t *task, stmf_i_scsi_session_t *iss)
4582 {
4583         stmf_i_scsi_task_t *itask =
4584             (stmf_i_scsi_task_t *)task->task_stmf_private;
4585         stmf_i_lu_t *ilu = (stmf_i_lu_t *)task->task_lu->lu_stmf_private;
4586 
4587         ASSERT(rw_lock_held(iss->iss_lockp));
4588         itask->itask_flags = ITASK_IN_FREE_LIST;
4589         itask->itask_proxy_msg_id = 0;
4590         mutex_enter(&ilu->ilu_task_lock);
4591         itask->itask_lu_free_next = ilu->ilu_free_tasks;
4592         ilu->ilu_free_tasks = itask;
4593         ilu->ilu_ntasks_free++;
4594         if (ilu->ilu_ntasks == ilu->ilu_ntasks_free)
4595                 cv_signal(&ilu->ilu_offline_pending_cv);
4596         mutex_exit(&ilu->ilu_task_lock);
4597         atomic_add_32(itask->itask_ilu_task_cntr, -1);
4598 }
4599 
4600 void
4601 stmf_task_lu_check_freelist(stmf_i_lu_t *ilu)
4602 {
4603         uint32_t        num_to_release, ndx;
4604         stmf_i_scsi_task_t *itask;
4605         stmf_lu_t       *lu = ilu->ilu_lu;
4606 
4607         ASSERT(ilu->ilu_ntasks_min_free <= ilu->ilu_ntasks_free);
4608 
4609         /* free half of the minimal free of the free tasks */
4610         num_to_release = (ilu->ilu_ntasks_min_free + 1) / 2;
4611         if (!num_to_release) {
4612                 return;
4613         }
4614         for (ndx = 0; ndx < num_to_release; ndx++) {
4615                 mutex_enter(&ilu->ilu_task_lock);
4616                 itask = ilu->ilu_free_tasks;
4617                 if (itask == NULL) {
4618                         mutex_exit(&ilu->ilu_task_lock);
4619                         break;
4620                 }
4621                 ilu->ilu_free_tasks = itask->itask_lu_free_next;
4622                 ilu->ilu_ntasks_free--;
4623                 mutex_exit(&ilu->ilu_task_lock);
4624 
4625                 lu->lu_task_free(itask->itask_task);
4626                 mutex_enter(&ilu->ilu_task_lock);
4627                 if (itask->itask_lu_next)
4628                         itask->itask_lu_next->itask_lu_prev =
4629                             itask->itask_lu_prev;
4630                 if (itask->itask_lu_prev)
4631                         itask->itask_lu_prev->itask_lu_next =
4632                             itask->itask_lu_next;
4633                 else
4634                         ilu->ilu_tasks = itask->itask_lu_next;
4635 
4636                 ilu->ilu_ntasks--;
4637                 mutex_exit(&ilu->ilu_task_lock);
4638                 stmf_free(itask->itask_task);
4639         }
4640 }
4641 
4642 /*
4643  * Called with stmf_lock held
4644  */
4645 void
4646 stmf_check_freetask()
4647 {
4648         stmf_i_lu_t *ilu;
4649         clock_t endtime = ddi_get_lbolt() + drv_usectohz(10000);
4650 
4651         /* stmf_svc_ilu_draining may get changed after stmf_lock is released */
4652         while ((ilu = stmf_state.stmf_svc_ilu_draining) != NULL) {
4653                 stmf_state.stmf_svc_ilu_draining = ilu->ilu_next;
4654                 if (!ilu->ilu_ntasks_min_free) {
4655                         ilu->ilu_ntasks_min_free = ilu->ilu_ntasks_free;
4656                         continue;
4657                 }
4658                 ilu->ilu_flags |= ILU_STALL_DEREGISTER;
4659                 mutex_exit(&stmf_state.stmf_lock);
4660                 stmf_task_lu_check_freelist(ilu);
4661                 /*
4662                  * we do not care about the accuracy of
4663                  * ilu_ntasks_min_free, so we don't lock here
4664                  */
4665                 ilu->ilu_ntasks_min_free = ilu->ilu_ntasks_free;
4666                 mutex_enter(&stmf_state.stmf_lock);
4667                 ilu->ilu_flags &= ~ILU_STALL_DEREGISTER;
4668                 cv_broadcast(&stmf_state.stmf_cv);
4669                 if (ddi_get_lbolt() >= endtime)
4670                         break;
4671         }
4672 }
4673 
4674 void
4675 stmf_do_ilu_timeouts(stmf_i_lu_t *ilu)
4676 {
4677         clock_t l = ddi_get_lbolt();
4678         clock_t ps = drv_usectohz(1000000);
4679         stmf_i_scsi_task_t *itask;
4680         scsi_task_t *task;
4681         uint32_t to;
4682 
4683         mutex_enter(&ilu->ilu_task_lock);
4684         for (itask = ilu->ilu_tasks; itask != NULL;
4685             itask = itask->itask_lu_next) {
4686                 if (itask->itask_flags & (ITASK_IN_FREE_LIST |
4687                     ITASK_BEING_ABORTED)) {
4688                         continue;
4689                 }
4690                 task = itask->itask_task;
4691                 if (task->task_timeout == 0)
4692                         to = stmf_default_task_timeout;
4693                 else
4694                         to = task->task_timeout;
4695                 if ((itask->itask_start_time + (to * ps)) > l)
4696                         continue;
4697                 stmf_abort(STMF_QUEUE_TASK_ABORT, task,
4698                     STMF_TIMEOUT, NULL);
4699         }
4700         mutex_exit(&ilu->ilu_task_lock);
4701 }
4702 
4703 /*
4704  * Called with stmf_lock held
4705  */
4706 void
4707 stmf_check_ilu_timing()
4708 {
4709         stmf_i_lu_t *ilu;
4710         clock_t endtime = ddi_get_lbolt() + drv_usectohz(10000);
4711 
4712         /* stmf_svc_ilu_timing may get changed after stmf_lock is released */
4713         while ((ilu = stmf_state.stmf_svc_ilu_timing) != NULL) {
4714                 stmf_state.stmf_svc_ilu_timing = ilu->ilu_next;
4715                 if (ilu->ilu_cur_task_cntr == (&ilu->ilu_task_cntr1)) {
4716                         if (ilu->ilu_task_cntr2 == 0) {
4717                                 ilu->ilu_cur_task_cntr = &ilu->ilu_task_cntr2;
4718                                 continue;
4719                         }
4720                 } else {
4721                         if (ilu->ilu_task_cntr1 == 0) {
4722                                 ilu->ilu_cur_task_cntr = &ilu->ilu_task_cntr1;
4723                                 continue;
4724                         }
4725                 }
4726                 /*
4727                  * If we are here then it means that there is some slowdown
4728                  * in tasks on this lu. We need to check.
4729                  */
4730                 ilu->ilu_flags |= ILU_STALL_DEREGISTER;
4731                 mutex_exit(&stmf_state.stmf_lock);
4732                 stmf_do_ilu_timeouts(ilu);
4733                 mutex_enter(&stmf_state.stmf_lock);
4734                 ilu->ilu_flags &= ~ILU_STALL_DEREGISTER;
4735                 cv_broadcast(&stmf_state.stmf_cv);
4736                 if (ddi_get_lbolt() >= endtime)
4737                         break;
4738         }
4739 }
4740 
4741 /*
4742  * Kills all tasks on a lu except tm_task
4743  */
4744 void
4745 stmf_task_lu_killall(stmf_lu_t *lu, scsi_task_t *tm_task, stmf_status_t s)
4746 {
4747         stmf_i_lu_t *ilu = (stmf_i_lu_t *)lu->lu_stmf_private;
4748         stmf_i_scsi_task_t *itask;
4749 
4750         mutex_enter(&ilu->ilu_task_lock);
4751 
4752         for (itask = ilu->ilu_tasks; itask != NULL;
4753             itask = itask->itask_lu_next) {
4754                 if (itask->itask_flags & ITASK_IN_FREE_LIST)
4755                         continue;
4756                 if (itask->itask_task == tm_task)
4757                         continue;
4758                 stmf_abort(STMF_QUEUE_TASK_ABORT, itask->itask_task, s, NULL);
4759         }
4760         mutex_exit(&ilu->ilu_task_lock);
4761 }
4762 
4763 void
4764 stmf_free_task_bufs(stmf_i_scsi_task_t *itask, stmf_local_port_t *lport)
4765 {
4766         int i;
4767         uint8_t map;
4768 
4769         if ((map = itask->itask_allocated_buf_map) == 0)
4770                 return;
4771         for (i = 0; i < 4; i++) {
4772                 if (map & 1) {
4773                         stmf_data_buf_t *dbuf;
4774 
4775                         dbuf = itask->itask_dbufs[i];
4776                         if (dbuf->db_xfer_start_timestamp) {
4777                                 stmf_lport_xfer_done(itask, dbuf);
4778                         }
4779                         if (dbuf->db_flags & DB_LU_DATA_BUF) {
4780                                 /*
4781                                  * LU needs to clean up buffer.
4782                                  * LU is required to free the buffer
4783                                  * in the xfer_done handler.
4784                                  */
4785                                 scsi_task_t *task = itask->itask_task;
4786                                 stmf_lu_t *lu = task->task_lu;
4787 
4788                                 lu->lu_dbuf_free(task, dbuf);
4789                                 ASSERT(((itask->itask_allocated_buf_map>>i)
4790                                     & 1) == 0); /* must be gone */
4791                         } else {
4792                                 ASSERT(dbuf->db_lu_private == NULL);
4793                                 dbuf->db_lu_private = NULL;
4794                                 lport->lport_ds->ds_free_data_buf(
4795                                     lport->lport_ds, dbuf);
4796                         }
4797                 }
4798                 map >>= 1;
4799         }
4800         itask->itask_allocated_buf_map = 0;
4801 }
4802 
4803 void
4804 stmf_task_free(scsi_task_t *task)
4805 {
4806         stmf_local_port_t *lport = task->task_lport;
4807         stmf_i_scsi_task_t *itask = (stmf_i_scsi_task_t *)
4808             task->task_stmf_private;
4809         stmf_i_scsi_session_t *iss = (stmf_i_scsi_session_t *)
4810             task->task_session->ss_stmf_private;
4811 
4812         stmf_task_audit(itask, TE_TASK_FREE, CMD_OR_IOF_NA, NULL);
4813 
4814         stmf_free_task_bufs(itask, lport);
4815         stmf_itl_task_done(itask);
4816         DTRACE_PROBE2(stmf__task__end, scsi_task_t *, task,
4817             hrtime_t,
4818             itask->itask_done_timestamp - itask->itask_start_timestamp);
4819         if (itask->itask_itl_datap) {
4820                 if (atomic_add_32_nv(&itask->itask_itl_datap->itl_counter,
4821                     -1) == 0) {
4822                         stmf_release_itl_handle(task->task_lu,
4823                             itask->itask_itl_datap);
4824                 }
4825         }
4826 
4827         rw_enter(iss->iss_lockp, RW_READER);
4828         lport->lport_task_free(task);
4829         if (itask->itask_worker) {
4830                 atomic_add_32(&stmf_cur_ntasks, -1);
4831                 atomic_add_32(&itask->itask_worker->worker_ref_count, -1);
4832         }
4833         /*
4834          * After calling stmf_task_lu_free, the task pointer can no longer
4835          * be trusted.
4836          */
4837         stmf_task_lu_free(task, iss);
4838         rw_exit(iss->iss_lockp);
4839 }
4840 
4841 void
4842 stmf_post_task(scsi_task_t *task, stmf_data_buf_t *dbuf)
4843 {
4844         stmf_i_scsi_task_t *itask = (stmf_i_scsi_task_t *)
4845             task->task_stmf_private;
4846         stmf_i_lu_t *ilu = (stmf_i_lu_t *)task->task_lu->lu_stmf_private;
4847         int nv;
4848         uint32_t old, new;
4849         uint32_t ct;
4850         stmf_worker_t *w, *w1;
4851         uint8_t tm;
4852 
4853         if (task->task_max_nbufs > 4)
4854                 task->task_max_nbufs = 4;
4855         task->task_cur_nbufs = 0;
4856         /* Latest value of currently running tasks */
4857         ct = atomic_add_32_nv(&stmf_cur_ntasks, 1);
4858 
4859         /* Select the next worker using round robin */
4860         nv = (int)atomic_add_32_nv((uint32_t *)&stmf_worker_sel_counter, 1);
4861         if (nv >= stmf_nworkers_accepting_cmds) {
4862                 int s = nv;
4863                 do {
4864                         nv -= stmf_nworkers_accepting_cmds;
4865                 } while (nv >= stmf_nworkers_accepting_cmds);
4866                 if (nv < 0)
4867                         nv = 0;
4868                 /* Its ok if this cas fails */
4869                 (void) atomic_cas_32((uint32_t *)&stmf_worker_sel_counter,
4870                     s, nv);
4871         }
4872         w = &stmf_workers[nv];
4873 
4874         /*
4875          * A worker can be pinned by interrupt. So select the next one
4876          * if it has lower load.
4877          */
4878         if ((nv + 1) >= stmf_nworkers_accepting_cmds) {
4879                 w1 = stmf_workers;
4880         } else {
4881                 w1 = &stmf_workers[nv + 1];
4882         }
4883         if (w1->worker_queue_depth < w->worker_queue_depth)
4884                 w = w1;
4885 
4886         mutex_enter(&w->worker_lock);
4887         if (((w->worker_flags & STMF_WORKER_STARTED) == 0) ||
4888             (w->worker_flags & STMF_WORKER_TERMINATE)) {
4889                 /*
4890                  * Maybe we are in the middle of a change. Just go to
4891                  * the 1st worker.
4892                  */
4893                 mutex_exit(&w->worker_lock);
4894                 w = stmf_workers;
4895                 mutex_enter(&w->worker_lock);
4896         }
4897         itask->itask_worker = w;
4898         /*
4899          * Track max system load inside the worker as we already have the
4900          * worker lock (no point implementing another lock). The service
4901          * thread will do the comparisons and figure out the max overall
4902          * system load.
4903          */
4904         if (w->worker_max_sys_qdepth_pu < ct)
4905                 w->worker_max_sys_qdepth_pu = ct;
4906 
4907         do {
4908                 old = new = itask->itask_flags;
4909                 new |= ITASK_KNOWN_TO_TGT_PORT | ITASK_IN_WORKER_QUEUE;
4910                 if (task->task_mgmt_function) {
4911                         tm = task->task_mgmt_function;
4912                         if ((tm == TM_TARGET_RESET) ||
4913                             (tm == TM_TARGET_COLD_RESET) ||
4914                             (tm == TM_TARGET_WARM_RESET)) {
4915                                 new |= ITASK_DEFAULT_HANDLING;
4916                         }
4917                 } else if (task->task_cdb[0] == SCMD_REPORT_LUNS) {
4918                         new |= ITASK_DEFAULT_HANDLING;
4919                 }
4920                 new &= ~ITASK_IN_TRANSITION;
4921         } while (atomic_cas_32(&itask->itask_flags, old, new) != old);
4922 
4923         stmf_itl_task_start(itask);
4924 
4925         itask->itask_worker_next = NULL;
4926         if (w->worker_task_tail) {
4927                 w->worker_task_tail->itask_worker_next = itask;
4928         } else {
4929                 w->worker_task_head = itask;
4930         }
4931         w->worker_task_tail = itask;
4932         if (++(w->worker_queue_depth) > w->worker_max_qdepth_pu) {
4933                 w->worker_max_qdepth_pu = w->worker_queue_depth;
4934         }
4935         /* Measure task waitq time */
4936         itask->itask_waitq_enter_timestamp = gethrtime();
4937         atomic_add_32(&w->worker_ref_count, 1);
4938         itask->itask_cmd_stack[0] = ITASK_CMD_NEW_TASK;
4939         itask->itask_ncmds = 1;
4940         stmf_task_audit(itask, TE_TASK_START, CMD_OR_IOF_NA, dbuf);
4941         if (dbuf) {
4942                 itask->itask_allocated_buf_map = 1;
4943                 itask->itask_dbufs[0] = dbuf;
4944                 dbuf->db_handle = 0;
4945         } else {
4946                 itask->itask_allocated_buf_map = 0;
4947                 itask->itask_dbufs[0] = NULL;
4948         }
4949 
4950         if ((w->worker_flags & STMF_WORKER_ACTIVE) == 0) {
4951                 w->worker_signal_timestamp = gethrtime();
4952                 DTRACE_PROBE2(worker__signal, stmf_worker_t *, w,
4953                     scsi_task_t *, task);
4954                 cv_signal(&w->worker_cv);
4955         }
4956         mutex_exit(&w->worker_lock);
4957 
4958         /*
4959          * This can only happen if during stmf_task_alloc(), ILU_RESET_ACTIVE
4960          * was set between checking of ILU_RESET_ACTIVE and clearing of the
4961          * ITASK_IN_FREE_LIST flag. Take care of these "sneaked-in" tasks here.
4962          */
4963         if (ilu->ilu_flags & ILU_RESET_ACTIVE) {
4964                 stmf_abort(STMF_QUEUE_TASK_ABORT, task, STMF_ABORTED, NULL);
4965         }
4966 }
4967 
4968 static void
4969 stmf_task_audit(stmf_i_scsi_task_t *itask,
4970     task_audit_event_t te, uint32_t cmd_or_iof, stmf_data_buf_t *dbuf)
4971 {
4972         stmf_task_audit_rec_t *ar;
4973 
4974         mutex_enter(&itask->itask_audit_mutex);
4975         ar = &itask->itask_audit_records[itask->itask_audit_index++];
4976         itask->itask_audit_index &= (ITASK_TASK_AUDIT_DEPTH - 1);
4977         ar->ta_event = te;
4978         ar->ta_cmd_or_iof = cmd_or_iof;
4979         ar->ta_itask_flags = itask->itask_flags;
4980         ar->ta_dbuf = dbuf;
4981         gethrestime(&ar->ta_timestamp);
4982         mutex_exit(&itask->itask_audit_mutex);
4983 }
4984 
4985 
4986 /*
4987  * ++++++++++++++ ABORT LOGIC ++++++++++++++++++++
4988  * Once ITASK_BEING_ABORTED is set, ITASK_KNOWN_TO_LU can be reset already
4989  * i.e. before ITASK_BEING_ABORTED being set. But if it was not, it cannot
4990  * be reset until the LU explicitly calls stmf_task_lu_aborted(). Of course
4991  * the LU will make this call only if we call the LU's abort entry point.
4992  * we will only call that entry point if ITASK_KNOWN_TO_LU was set.
4993  *
4994  * Same logic applies for the port.
4995  *
4996  * Also ITASK_BEING_ABORTED will not be allowed to set if both KNOWN_TO_LU
4997  * and KNOWN_TO_TGT_PORT are reset.
4998  *
4999  * +++++++++++++++++++++++++++++++++++++++++++++++
5000  */
5001 
5002 stmf_status_t
5003 stmf_xfer_data(scsi_task_t *task, stmf_data_buf_t *dbuf, uint32_t ioflags)
5004 {
5005         stmf_status_t ret = STMF_SUCCESS;
5006 
5007         stmf_i_scsi_task_t *itask =
5008             (stmf_i_scsi_task_t *)task->task_stmf_private;
5009 
5010         stmf_task_audit(itask, TE_XFER_START, ioflags, dbuf);
5011 
5012         if (ioflags & STMF_IOF_LU_DONE) {
5013                 uint32_t new, old;
5014                 do {
5015                         new = old = itask->itask_flags;
5016                         if (new & ITASK_BEING_ABORTED)
5017                                 return (STMF_ABORTED);
5018                         new &= ~ITASK_KNOWN_TO_LU;
5019                 } while (atomic_cas_32(&itask->itask_flags, old, new) != old);
5020         }
5021         if (itask->itask_flags & ITASK_BEING_ABORTED)
5022                 return (STMF_ABORTED);
5023 #ifdef  DEBUG
5024         if (!(ioflags & STMF_IOF_STATS_ONLY) && stmf_drop_buf_counter > 0) {
5025                 if (atomic_add_32_nv((uint32_t *)&stmf_drop_buf_counter, -1) ==
5026                     1)
5027                         return (STMF_SUCCESS);
5028         }
5029 #endif
5030 
5031         stmf_update_kstat_lu_io(task, dbuf);
5032         stmf_update_kstat_lport_io(task, dbuf);
5033         stmf_lport_xfer_start(itask, dbuf);
5034         if (ioflags & STMF_IOF_STATS_ONLY) {
5035                 stmf_lport_xfer_done(itask, dbuf);
5036                 return (STMF_SUCCESS);
5037         }
5038 
5039         dbuf->db_flags |= DB_LPORT_XFER_ACTIVE;
5040         ret = task->task_lport->lport_xfer_data(task, dbuf, ioflags);
5041 
5042         /*
5043          * Port provider may have already called the buffer callback in
5044          * which case dbuf->db_xfer_start_timestamp will be 0.
5045          */
5046         if (ret != STMF_SUCCESS) {
5047                 dbuf->db_flags &= ~DB_LPORT_XFER_ACTIVE;
5048                 if (dbuf->db_xfer_start_timestamp != 0)
5049                         stmf_lport_xfer_done(itask, dbuf);
5050         }
5051 
5052         return (ret);
5053 }
5054 
5055 void
5056 stmf_data_xfer_done(scsi_task_t *task, stmf_data_buf_t *dbuf, uint32_t iof)
5057 {
5058         stmf_i_scsi_task_t *itask =
5059             (stmf_i_scsi_task_t *)task->task_stmf_private;
5060         stmf_i_local_port_t *ilport;
5061         stmf_worker_t *w = itask->itask_worker;
5062         uint32_t new, old;
5063         uint8_t update_queue_flags, free_it, queue_it;
5064 
5065         stmf_lport_xfer_done(itask, dbuf);
5066 
5067         stmf_task_audit(itask, TE_XFER_DONE, iof, dbuf);
5068 
5069         /* Guard against unexpected completions from the lport */
5070         if (dbuf->db_flags & DB_LPORT_XFER_ACTIVE) {
5071                 dbuf->db_flags &= ~DB_LPORT_XFER_ACTIVE;
5072         } else {
5073                 /*
5074                  * This should never happen.
5075                  */
5076                 ilport = task->task_lport->lport_stmf_private;
5077                 ilport->ilport_unexpected_comp++;
5078                 cmn_err(CE_PANIC, "Unexpected xfer completion task %p dbuf %p",
5079                     (void *)task, (void *)dbuf);
5080                 return;
5081         }
5082 
5083         mutex_enter(&w->worker_lock);
5084         do {
5085                 new = old = itask->itask_flags;
5086                 if (old & ITASK_BEING_ABORTED) {
5087                         mutex_exit(&w->worker_lock);
5088                         return;
5089                 }
5090                 free_it = 0;
5091                 if (iof & STMF_IOF_LPORT_DONE) {
5092                         new &= ~ITASK_KNOWN_TO_TGT_PORT;
5093                         task->task_completion_status = dbuf->db_xfer_status;
5094                         free_it = 1;
5095                 }
5096                 /*
5097                  * If the task is known to LU then queue it. But if
5098                  * it is already queued (multiple completions) then
5099                  * just update the buffer information by grabbing the
5100                  * worker lock. If the task is not known to LU,
5101                  * completed/aborted, then see if we need to
5102                  * free this task.
5103                  */
5104                 if (old & ITASK_KNOWN_TO_LU) {
5105                         free_it = 0;
5106                         update_queue_flags = 1;
5107                         if (old & ITASK_IN_WORKER_QUEUE) {
5108                                 queue_it = 0;
5109                         } else {
5110                                 queue_it = 1;
5111                                 new |= ITASK_IN_WORKER_QUEUE;
5112                         }
5113                 } else {
5114                         update_queue_flags = 0;
5115                         queue_it = 0;
5116                 }
5117         } while (atomic_cas_32(&itask->itask_flags, old, new) != old);
5118 
5119         if (update_queue_flags) {
5120                 uint8_t cmd = (dbuf->db_handle << 5) | ITASK_CMD_DATA_XFER_DONE;
5121 
5122                 ASSERT(itask->itask_ncmds < ITASK_MAX_NCMDS);
5123                 itask->itask_cmd_stack[itask->itask_ncmds++] = cmd;
5124                 if (queue_it) {
5125                         itask->itask_worker_next = NULL;
5126                         if (w->worker_task_tail) {
5127                                 w->worker_task_tail->itask_worker_next = itask;
5128                         } else {
5129                                 w->worker_task_head = itask;
5130                         }
5131                         w->worker_task_tail = itask;
5132                         /* Measure task waitq time */
5133                         itask->itask_waitq_enter_timestamp = gethrtime();
5134                         if (++(w->worker_queue_depth) >
5135                             w->worker_max_qdepth_pu) {
5136                                 w->worker_max_qdepth_pu = w->worker_queue_depth;
5137                         }
5138                         if ((w->worker_flags & STMF_WORKER_ACTIVE) == 0)
5139                                 cv_signal(&w->worker_cv);
5140                 }
5141         }
5142         mutex_exit(&w->worker_lock);
5143 
5144         if (free_it) {
5145                 if ((itask->itask_flags & (ITASK_KNOWN_TO_LU |
5146                     ITASK_KNOWN_TO_TGT_PORT | ITASK_IN_WORKER_QUEUE |
5147                     ITASK_BEING_ABORTED)) == 0) {
5148                         stmf_task_free(task);
5149                 }
5150         }
5151 }
5152 
5153 stmf_status_t
5154 stmf_send_scsi_status(scsi_task_t *task, uint32_t ioflags)
5155 {
5156         DTRACE_PROBE1(scsi__send__status, scsi_task_t *, task);
5157 
5158         stmf_i_scsi_task_t *itask =
5159             (stmf_i_scsi_task_t *)task->task_stmf_private;
5160 
5161         stmf_task_audit(itask, TE_SEND_STATUS, ioflags, NULL);
5162 
5163         if (ioflags & STMF_IOF_LU_DONE) {
5164                 uint32_t new, old;
5165                 do {
5166                         new = old = itask->itask_flags;
5167                         if (new & ITASK_BEING_ABORTED)
5168                                 return (STMF_ABORTED);
5169                         new &= ~ITASK_KNOWN_TO_LU;
5170                 } while (atomic_cas_32(&itask->itask_flags, old, new) != old);
5171         }
5172 
5173         if (!(itask->itask_flags & ITASK_KNOWN_TO_TGT_PORT)) {
5174                 return (STMF_SUCCESS);
5175         }
5176 
5177         if (itask->itask_flags & ITASK_BEING_ABORTED)
5178                 return (STMF_ABORTED);
5179 
5180         if (task->task_additional_flags & TASK_AF_NO_EXPECTED_XFER_LENGTH) {
5181                 task->task_status_ctrl = 0;
5182                 task->task_resid = 0;
5183         } else if (task->task_cmd_xfer_length >
5184             task->task_expected_xfer_length) {
5185                 task->task_status_ctrl = TASK_SCTRL_OVER;
5186                 task->task_resid = task->task_cmd_xfer_length -
5187                     task->task_expected_xfer_length;
5188         } else if (task->task_nbytes_transferred <
5189             task->task_expected_xfer_length) {
5190                 task->task_status_ctrl = TASK_SCTRL_UNDER;
5191                 task->task_resid = task->task_expected_xfer_length -
5192                     task->task_nbytes_transferred;
5193         } else {
5194                 task->task_status_ctrl = 0;
5195                 task->task_resid = 0;
5196         }
5197         return (task->task_lport->lport_send_status(task, ioflags));
5198 }
5199 
5200 void
5201 stmf_send_status_done(scsi_task_t *task, stmf_status_t s, uint32_t iof)
5202 {
5203         stmf_i_scsi_task_t *itask =
5204             (stmf_i_scsi_task_t *)task->task_stmf_private;
5205         stmf_worker_t *w = itask->itask_worker;
5206         uint32_t new, old;
5207         uint8_t free_it, queue_it;
5208 
5209         stmf_task_audit(itask, TE_SEND_STATUS_DONE, iof, NULL);
5210 
5211         mutex_enter(&w->worker_lock);
5212         do {
5213                 new = old = itask->itask_flags;
5214                 if (old & ITASK_BEING_ABORTED) {
5215                         mutex_exit(&w->worker_lock);
5216                         return;
5217                 }
5218                 free_it = 0;
5219                 if (iof & STMF_IOF_LPORT_DONE) {
5220                         new &= ~ITASK_KNOWN_TO_TGT_PORT;
5221                         free_it = 1;
5222                 }
5223                 /*
5224                  * If the task is known to LU then queue it. But if
5225                  * it is already queued (multiple completions) then
5226                  * just update the buffer information by grabbing the
5227                  * worker lock. If the task is not known to LU,
5228                  * completed/aborted, then see if we need to
5229                  * free this task.
5230                  */
5231                 if (old & ITASK_KNOWN_TO_LU) {
5232                         free_it = 0;
5233                         queue_it = 1;
5234                         if (old & ITASK_IN_WORKER_QUEUE) {
5235                                 cmn_err(CE_PANIC, "status completion received"
5236                                     " when task is already in worker queue "
5237                                     " task = %p", (void *)task);
5238                         }
5239                         new |= ITASK_IN_WORKER_QUEUE;
5240                 } else {
5241                         queue_it = 0;
5242                 }
5243         } while (atomic_cas_32(&itask->itask_flags, old, new) != old);
5244         task->task_completion_status = s;
5245 
5246 
5247         if (queue_it) {
5248                 ASSERT(itask->itask_ncmds < ITASK_MAX_NCMDS);
5249                 itask->itask_cmd_stack[itask->itask_ncmds++] =
5250                     ITASK_CMD_STATUS_DONE;
5251                 itask->itask_worker_next = NULL;
5252                 if (w->worker_task_tail) {
5253                         w->worker_task_tail->itask_worker_next = itask;
5254                 } else {
5255                         w->worker_task_head = itask;
5256                 }
5257                 w->worker_task_tail = itask;
5258                 /* Measure task waitq time */
5259                 itask->itask_waitq_enter_timestamp = gethrtime();
5260                 if (++(w->worker_queue_depth) > w->worker_max_qdepth_pu) {
5261                         w->worker_max_qdepth_pu = w->worker_queue_depth;
5262                 }
5263                 if ((w->worker_flags & STMF_WORKER_ACTIVE) == 0)
5264                         cv_signal(&w->worker_cv);
5265         }
5266         mutex_exit(&w->worker_lock);
5267 
5268         if (free_it) {
5269                 if ((itask->itask_flags & (ITASK_KNOWN_TO_LU |
5270                     ITASK_KNOWN_TO_TGT_PORT | ITASK_IN_WORKER_QUEUE |
5271                     ITASK_BEING_ABORTED)) == 0) {
5272                         stmf_task_free(task);
5273                 } else {
5274                         cmn_err(CE_PANIC, "LU is done with the task but LPORT "
5275                             " is not done, itask %p itask_flags %x",
5276                             (void *)itask, itask->itask_flags);
5277                 }
5278         }
5279 }
5280 
5281 void
5282 stmf_task_lu_done(scsi_task_t *task)
5283 {
5284         stmf_i_scsi_task_t *itask =
5285             (stmf_i_scsi_task_t *)task->task_stmf_private;
5286         stmf_worker_t *w = itask->itask_worker;
5287         uint32_t new, old;
5288 
5289         mutex_enter(&w->worker_lock);
5290         do {
5291                 new = old = itask->itask_flags;
5292                 if (old & ITASK_BEING_ABORTED) {
5293                         mutex_exit(&w->worker_lock);
5294                         return;
5295                 }
5296                 if (old & ITASK_IN_WORKER_QUEUE) {
5297                         cmn_err(CE_PANIC, "task_lu_done received"
5298                             " when task is in worker queue "
5299                             " task = %p", (void *)task);
5300                 }
5301                 new &= ~ITASK_KNOWN_TO_LU;
5302         } while (atomic_cas_32(&itask->itask_flags, old, new) != old);
5303 
5304         mutex_exit(&w->worker_lock);
5305 
5306         if ((itask->itask_flags & (ITASK_KNOWN_TO_LU |
5307             ITASK_KNOWN_TO_TGT_PORT | ITASK_IN_WORKER_QUEUE |
5308             ITASK_BEING_ABORTED)) == 0) {
5309                 stmf_task_free(task);
5310         } else {
5311                 cmn_err(CE_PANIC, "stmf_lu_done should be the last stage but "
5312                     " the task is still not done, task = %p", (void *)task);
5313         }
5314 }
5315 
5316 void
5317 stmf_queue_task_for_abort(scsi_task_t *task, stmf_status_t s)
5318 {
5319         stmf_i_scsi_task_t *itask =
5320             (stmf_i_scsi_task_t *)task->task_stmf_private;
5321         stmf_worker_t *w;
5322         uint32_t old, new;
5323 
5324         stmf_task_audit(itask, TE_TASK_ABORT, CMD_OR_IOF_NA, NULL);
5325 
5326         do {
5327                 old = new = itask->itask_flags;
5328                 if ((old & ITASK_BEING_ABORTED) ||
5329                     ((old & (ITASK_KNOWN_TO_TGT_PORT |
5330                     ITASK_KNOWN_TO_LU)) == 0)) {
5331                         return;
5332                 }
5333                 new |= ITASK_BEING_ABORTED;
5334         } while (atomic_cas_32(&itask->itask_flags, old, new) != old);
5335         task->task_completion_status = s;
5336         itask->itask_start_time = ddi_get_lbolt();
5337 
5338         if (((w = itask->itask_worker) == NULL) ||
5339             (itask->itask_flags & ITASK_IN_TRANSITION)) {
5340                 return;
5341         }
5342 
5343         /* Queue it and get out */
5344         mutex_enter(&w->worker_lock);
5345         if (itask->itask_flags & ITASK_IN_WORKER_QUEUE) {
5346                 mutex_exit(&w->worker_lock);
5347                 return;
5348         }
5349         atomic_or_32(&itask->itask_flags, ITASK_IN_WORKER_QUEUE);
5350         itask->itask_worker_next = NULL;
5351         if (w->worker_task_tail) {
5352                 w->worker_task_tail->itask_worker_next = itask;
5353         } else {
5354                 w->worker_task_head = itask;
5355         }
5356         w->worker_task_tail = itask;
5357         if (++(w->worker_queue_depth) > w->worker_max_qdepth_pu) {
5358                 w->worker_max_qdepth_pu = w->worker_queue_depth;
5359         }
5360         if ((w->worker_flags & STMF_WORKER_ACTIVE) == 0)
5361                 cv_signal(&w->worker_cv);
5362         mutex_exit(&w->worker_lock);
5363 }
5364 
5365 void
5366 stmf_abort(int abort_cmd, scsi_task_t *task, stmf_status_t s, void *arg)
5367 {
5368         stmf_i_scsi_task_t *itask = NULL;
5369         uint32_t old, new, f, rf;
5370 
5371         DTRACE_PROBE2(scsi__task__abort, scsi_task_t *, task,
5372             stmf_status_t, s);
5373 
5374         switch (abort_cmd) {
5375         case STMF_QUEUE_ABORT_LU:
5376                 stmf_task_lu_killall((stmf_lu_t *)arg, task, s);
5377                 return;
5378         case STMF_QUEUE_TASK_ABORT:
5379                 stmf_queue_task_for_abort(task, s);
5380                 return;
5381         case STMF_REQUEUE_TASK_ABORT_LPORT:
5382                 rf = ITASK_TGT_PORT_ABORT_CALLED;
5383                 f = ITASK_KNOWN_TO_TGT_PORT;
5384                 break;
5385         case STMF_REQUEUE_TASK_ABORT_LU:
5386                 rf = ITASK_LU_ABORT_CALLED;
5387                 f = ITASK_KNOWN_TO_LU;
5388                 break;
5389         default:
5390                 return;
5391         }
5392         itask = (stmf_i_scsi_task_t *)task->task_stmf_private;
5393         f |= ITASK_BEING_ABORTED | rf;
5394         do {
5395                 old = new = itask->itask_flags;
5396                 if ((old & f) != f) {
5397                         return;
5398                 }
5399                 new &= ~rf;
5400         } while (atomic_cas_32(&itask->itask_flags, old, new) != old);
5401 }
5402 
5403 void
5404 stmf_task_lu_aborted(scsi_task_t *task, stmf_status_t s, uint32_t iof)
5405 {
5406         char                     info[STMF_CHANGE_INFO_LEN];
5407         stmf_i_scsi_task_t      *itask = TASK_TO_ITASK(task);
5408         unsigned long long      st;
5409 
5410         stmf_task_audit(itask, TE_TASK_LU_ABORTED, iof, NULL);
5411 
5412         st = s; /* gcc fix */
5413         if ((s != STMF_ABORT_SUCCESS) && (s != STMF_NOT_FOUND)) {
5414                 (void) snprintf(info, sizeof (info),
5415                     "task %p, lu failed to abort ret=%llx", (void *)task, st);
5416         } else if ((iof & STMF_IOF_LU_DONE) == 0) {
5417                 (void) snprintf(info, sizeof (info),
5418                     "Task aborted but LU is not finished, task ="
5419                     "%p, s=%llx, iof=%x", (void *)task, st, iof);
5420         } else {
5421                 /*
5422                  * LU abort successfully
5423                  */
5424                 atomic_and_32(&itask->itask_flags, ~ITASK_KNOWN_TO_LU);
5425                 return;
5426         }
5427 
5428         stmf_abort_task_offline(task, 1, info);
5429 }
5430 
5431 void
5432 stmf_task_lport_aborted(scsi_task_t *task, stmf_status_t s, uint32_t iof)
5433 {
5434         char                    info[STMF_CHANGE_INFO_LEN];
5435         stmf_i_scsi_task_t      *itask = TASK_TO_ITASK(task);
5436         unsigned long long      st;
5437         uint32_t                old, new;
5438 
5439         stmf_task_audit(itask, TE_TASK_LPORT_ABORTED, iof, NULL);
5440 
5441         st = s;
5442         if ((s != STMF_ABORT_SUCCESS) && (s != STMF_NOT_FOUND)) {
5443                 (void) snprintf(info, sizeof (info),
5444                     "task %p, tgt port failed to abort ret=%llx", (void *)task,
5445                     st);
5446         } else if ((iof & STMF_IOF_LPORT_DONE) == 0) {
5447                 (void) snprintf(info, sizeof (info),
5448                     "Task aborted but tgt port is not finished, "
5449                     "task=%p, s=%llx, iof=%x", (void *)task, st, iof);
5450         } else {
5451                 /*
5452                  * LPORT abort successfully
5453                  */
5454                 do {
5455                         old = new = itask->itask_flags;
5456                         if (!(old & ITASK_KNOWN_TO_TGT_PORT))
5457                                 return;
5458                         new &= ~ITASK_KNOWN_TO_TGT_PORT;
5459                 } while (atomic_cas_32(&itask->itask_flags, old, new) != old);
5460                 return;
5461         }
5462 
5463         stmf_abort_task_offline(task, 0, info);
5464 }
5465 
5466 stmf_status_t
5467 stmf_task_poll_lu(scsi_task_t *task, uint32_t timeout)
5468 {
5469         stmf_i_scsi_task_t *itask = (stmf_i_scsi_task_t *)
5470             task->task_stmf_private;
5471         stmf_worker_t *w = itask->itask_worker;
5472         int i;
5473 
5474         ASSERT(itask->itask_flags & ITASK_KNOWN_TO_LU);
5475         mutex_enter(&w->worker_lock);
5476         if (itask->itask_ncmds >= ITASK_MAX_NCMDS) {
5477                 mutex_exit(&w->worker_lock);
5478                 return (STMF_BUSY);
5479         }
5480         for (i = 0; i < itask->itask_ncmds; i++) {
5481                 if (itask->itask_cmd_stack[i] == ITASK_CMD_POLL_LU) {
5482                         mutex_exit(&w->worker_lock);
5483                         return (STMF_SUCCESS);
5484                 }
5485         }
5486         itask->itask_cmd_stack[itask->itask_ncmds++] = ITASK_CMD_POLL_LU;
5487         if (timeout == ITASK_DEFAULT_POLL_TIMEOUT) {
5488                 itask->itask_poll_timeout = ddi_get_lbolt() + 1;
5489         } else {
5490                 clock_t t = drv_usectohz(timeout * 1000);
5491                 if (t == 0)
5492                         t = 1;
5493                 itask->itask_poll_timeout = ddi_get_lbolt() + t;
5494         }
5495         if ((itask->itask_flags & ITASK_IN_WORKER_QUEUE) == 0) {
5496                 itask->itask_worker_next = NULL;
5497                 if (w->worker_task_tail) {
5498                         w->worker_task_tail->itask_worker_next = itask;
5499                 } else {
5500                         w->worker_task_head = itask;
5501                 }
5502                 w->worker_task_tail = itask;
5503                 if (++(w->worker_queue_depth) > w->worker_max_qdepth_pu) {
5504                         w->worker_max_qdepth_pu = w->worker_queue_depth;
5505                 }
5506                 atomic_or_32(&itask->itask_flags, ITASK_IN_WORKER_QUEUE);
5507                 if ((w->worker_flags & STMF_WORKER_ACTIVE) == 0)
5508                         cv_signal(&w->worker_cv);
5509         }
5510         mutex_exit(&w->worker_lock);
5511         return (STMF_SUCCESS);
5512 }
5513 
5514 stmf_status_t
5515 stmf_task_poll_lport(scsi_task_t *task, uint32_t timeout)
5516 {
5517         stmf_i_scsi_task_t *itask = (stmf_i_scsi_task_t *)
5518             task->task_stmf_private;
5519         stmf_worker_t *w = itask->itask_worker;
5520         int i;
5521 
5522         ASSERT(itask->itask_flags & ITASK_KNOWN_TO_TGT_PORT);
5523         mutex_enter(&w->worker_lock);
5524         if (itask->itask_ncmds >= ITASK_MAX_NCMDS) {
5525                 mutex_exit(&w->worker_lock);
5526                 return (STMF_BUSY);
5527         }
5528         for (i = 0; i < itask->itask_ncmds; i++) {
5529                 if (itask->itask_cmd_stack[i] == ITASK_CMD_POLL_LPORT) {
5530                         mutex_exit(&w->worker_lock);
5531                         return (STMF_SUCCESS);
5532                 }
5533         }
5534         itask->itask_cmd_stack[itask->itask_ncmds++] = ITASK_CMD_POLL_LPORT;
5535         if (timeout == ITASK_DEFAULT_POLL_TIMEOUT) {
5536                 itask->itask_poll_timeout = ddi_get_lbolt() + 1;
5537         } else {
5538                 clock_t t = drv_usectohz(timeout * 1000);
5539                 if (t == 0)
5540                         t = 1;
5541                 itask->itask_poll_timeout = ddi_get_lbolt() + t;
5542         }
5543         if ((itask->itask_flags & ITASK_IN_WORKER_QUEUE) == 0) {
5544                 itask->itask_worker_next = NULL;
5545                 if (w->worker_task_tail) {
5546                         w->worker_task_tail->itask_worker_next = itask;
5547                 } else {
5548                         w->worker_task_head = itask;
5549                 }
5550                 w->worker_task_tail = itask;
5551                 if (++(w->worker_queue_depth) > w->worker_max_qdepth_pu) {
5552                         w->worker_max_qdepth_pu = w->worker_queue_depth;
5553                 }
5554                 if ((w->worker_flags & STMF_WORKER_ACTIVE) == 0)
5555                         cv_signal(&w->worker_cv);
5556         }
5557         mutex_exit(&w->worker_lock);
5558         return (STMF_SUCCESS);
5559 }
5560 
5561 void
5562 stmf_do_task_abort(scsi_task_t *task)
5563 {
5564         stmf_i_scsi_task_t      *itask = TASK_TO_ITASK(task);
5565         stmf_lu_t               *lu;
5566         stmf_local_port_t       *lport;
5567         unsigned long long       ret;
5568         uint32_t                 old, new;
5569         uint8_t                  call_lu_abort, call_port_abort;
5570         char                     info[STMF_CHANGE_INFO_LEN];
5571 
5572         lu = task->task_lu;
5573         lport = task->task_lport;
5574         do {
5575                 old = new = itask->itask_flags;
5576                 if ((old & (ITASK_KNOWN_TO_LU | ITASK_LU_ABORT_CALLED)) ==
5577                     ITASK_KNOWN_TO_LU) {
5578                         new |= ITASK_LU_ABORT_CALLED;
5579                         call_lu_abort = 1;
5580                 } else {
5581                         call_lu_abort = 0;
5582                 }
5583         } while (atomic_cas_32(&itask->itask_flags, old, new) != old);
5584 
5585         if (call_lu_abort) {
5586                 if ((itask->itask_flags & ITASK_DEFAULT_HANDLING) == 0) {
5587                         ret = lu->lu_abort(lu, STMF_LU_ABORT_TASK, task, 0);
5588                 } else {
5589                         ret = dlun0->lu_abort(lu, STMF_LU_ABORT_TASK, task, 0);
5590                 }
5591                 if ((ret == STMF_ABORT_SUCCESS) || (ret == STMF_NOT_FOUND)) {
5592                         stmf_task_lu_aborted(task, ret, STMF_IOF_LU_DONE);
5593                 } else if (ret == STMF_BUSY) {
5594                         atomic_and_32(&itask->itask_flags,
5595                             ~ITASK_LU_ABORT_CALLED);
5596                 } else if (ret != STMF_SUCCESS) {
5597                         (void) snprintf(info, sizeof (info),
5598                             "Abort failed by LU %p, ret %llx", (void *)lu, ret);
5599                         stmf_abort_task_offline(task, 1, info);
5600                 }
5601         } else if (itask->itask_flags & ITASK_KNOWN_TO_LU) {
5602                 if (ddi_get_lbolt() > (itask->itask_start_time +
5603                     STMF_SEC2TICK(lu->lu_abort_timeout?
5604                     lu->lu_abort_timeout : ITASK_DEFAULT_ABORT_TIMEOUT))) {
5605                         (void) snprintf(info, sizeof (info),
5606                             "lu abort timed out");
5607                         stmf_abort_task_offline(itask->itask_task, 1, info);
5608                 }
5609         }
5610 
5611         do {
5612                 old = new = itask->itask_flags;
5613                 if ((old & (ITASK_KNOWN_TO_TGT_PORT |
5614                     ITASK_TGT_PORT_ABORT_CALLED)) == ITASK_KNOWN_TO_TGT_PORT) {
5615                         new |= ITASK_TGT_PORT_ABORT_CALLED;
5616                         call_port_abort = 1;
5617                 } else {
5618                         call_port_abort = 0;
5619                 }
5620         } while (atomic_cas_32(&itask->itask_flags, old, new) != old);
5621         if (call_port_abort) {
5622                 ret = lport->lport_abort(lport, STMF_LPORT_ABORT_TASK, task, 0);
5623                 if ((ret == STMF_ABORT_SUCCESS) || (ret == STMF_NOT_FOUND)) {
5624                         stmf_task_lport_aborted(task, ret, STMF_IOF_LPORT_DONE);
5625                 } else if (ret == STMF_BUSY) {
5626                         atomic_and_32(&itask->itask_flags,
5627                             ~ITASK_TGT_PORT_ABORT_CALLED);
5628                 } else if (ret != STMF_SUCCESS) {
5629                         (void) snprintf(info, sizeof (info),
5630                             "Abort failed by tgt port %p ret %llx",
5631                             (void *)lport, ret);
5632                         stmf_abort_task_offline(task, 0, info);
5633                 }
5634         } else if (itask->itask_flags & ITASK_KNOWN_TO_TGT_PORT) {
5635                 if (ddi_get_lbolt() > (itask->itask_start_time +
5636                     STMF_SEC2TICK(lport->lport_abort_timeout?
5637                     lport->lport_abort_timeout :
5638                     ITASK_DEFAULT_ABORT_TIMEOUT))) {
5639                         (void) snprintf(info, sizeof (info),
5640                             "lport abort timed out");
5641                         stmf_abort_task_offline(itask->itask_task, 0, info);
5642                 }
5643         }
5644 }
5645 
5646 stmf_status_t
5647 stmf_ctl(int cmd, void *obj, void *arg)
5648 {
5649         stmf_status_t                   ret;
5650         stmf_i_lu_t                     *ilu;
5651         stmf_i_local_port_t             *ilport;
5652         stmf_state_change_info_t        *ssci = (stmf_state_change_info_t *)arg;
5653 
5654         mutex_enter(&stmf_state.stmf_lock);
5655         ret = STMF_INVALID_ARG;
5656         if (cmd & STMF_CMD_LU_OP) {
5657                 ilu = stmf_lookup_lu((stmf_lu_t *)obj);
5658                 if (ilu == NULL) {
5659                         goto stmf_ctl_lock_exit;
5660                 }
5661                 DTRACE_PROBE3(lu__state__change,
5662                     stmf_lu_t *, ilu->ilu_lu,
5663                     int, cmd, stmf_state_change_info_t *, ssci);
5664         } else if (cmd & STMF_CMD_LPORT_OP) {
5665                 ilport = stmf_lookup_lport((stmf_local_port_t *)obj);
5666                 if (ilport == NULL) {
5667                         goto stmf_ctl_lock_exit;
5668                 }
5669                 DTRACE_PROBE3(lport__state__change,
5670                     stmf_local_port_t *, ilport->ilport_lport,
5671                     int, cmd, stmf_state_change_info_t *, ssci);
5672         } else {
5673                 goto stmf_ctl_lock_exit;
5674         }
5675 
5676         switch (cmd) {
5677         case STMF_CMD_LU_ONLINE:
5678                 switch (ilu->ilu_state) {
5679                         case STMF_STATE_OFFLINE:
5680                                 ret = STMF_SUCCESS;
5681                                 break;
5682                         case STMF_STATE_ONLINE:
5683                         case STMF_STATE_ONLINING:
5684                                 ret = STMF_ALREADY;
5685                                 break;
5686                         case STMF_STATE_OFFLINING:
5687                                 ret = STMF_BUSY;
5688                                 break;
5689                         default:
5690                                 ret = STMF_BADSTATE;
5691                                 break;
5692                 }
5693                 if (ret != STMF_SUCCESS)
5694                         goto stmf_ctl_lock_exit;
5695 
5696                 ilu->ilu_state = STMF_STATE_ONLINING;
5697                 mutex_exit(&stmf_state.stmf_lock);
5698                 stmf_svc_queue(cmd, obj, (stmf_state_change_info_t *)arg);
5699                 break;
5700 
5701         case STMF_CMD_LU_ONLINE_COMPLETE:
5702                 if (ilu->ilu_state != STMF_STATE_ONLINING) {
5703                         ret = STMF_BADSTATE;
5704                         goto stmf_ctl_lock_exit;
5705                 }
5706                 if (((stmf_change_status_t *)arg)->st_completion_status ==
5707                     STMF_SUCCESS) {
5708                         ilu->ilu_state = STMF_STATE_ONLINE;
5709                         mutex_exit(&stmf_state.stmf_lock);
5710                         ((stmf_lu_t *)obj)->lu_ctl((stmf_lu_t *)obj,
5711                             STMF_ACK_LU_ONLINE_COMPLETE, arg);
5712                         mutex_enter(&stmf_state.stmf_lock);
5713                         stmf_add_lu_to_active_sessions((stmf_lu_t *)obj);
5714                 } else {
5715                         /* XXX: should throw a meesage an record more data */
5716                         ilu->ilu_state = STMF_STATE_OFFLINE;
5717                 }
5718                 ret = STMF_SUCCESS;
5719                 goto stmf_ctl_lock_exit;
5720 
5721         case STMF_CMD_LU_OFFLINE:
5722                 switch (ilu->ilu_state) {
5723                         case STMF_STATE_ONLINE:
5724                                 ret = STMF_SUCCESS;
5725                                 break;
5726                         case STMF_STATE_OFFLINE:
5727                         case STMF_STATE_OFFLINING:
5728                                 ret = STMF_ALREADY;
5729                                 break;
5730                         case STMF_STATE_ONLINING:
5731                                 ret = STMF_BUSY;
5732                                 break;
5733                         default:
5734                                 ret = STMF_BADSTATE;
5735                                 break;
5736                 }
5737                 if (ret != STMF_SUCCESS)
5738                         goto stmf_ctl_lock_exit;
5739                 ilu->ilu_state = STMF_STATE_OFFLINING;
5740                 mutex_exit(&stmf_state.stmf_lock);
5741                 stmf_svc_queue(cmd, obj, (stmf_state_change_info_t *)arg);
5742                 break;
5743 
5744         case STMF_CMD_LU_OFFLINE_COMPLETE:
5745                 if (ilu->ilu_state != STMF_STATE_OFFLINING) {
5746                         ret = STMF_BADSTATE;
5747                         goto stmf_ctl_lock_exit;
5748                 }
5749                 if (((stmf_change_status_t *)arg)->st_completion_status ==
5750                     STMF_SUCCESS) {
5751                         ilu->ilu_state = STMF_STATE_OFFLINE;
5752                         mutex_exit(&stmf_state.stmf_lock);
5753                         ((stmf_lu_t *)obj)->lu_ctl((stmf_lu_t *)obj,
5754                             STMF_ACK_LU_OFFLINE_COMPLETE, arg);
5755                         mutex_enter(&stmf_state.stmf_lock);
5756                 } else {
5757                         ilu->ilu_state = STMF_STATE_ONLINE;
5758                         stmf_add_lu_to_active_sessions((stmf_lu_t *)obj);
5759                 }
5760                 mutex_exit(&stmf_state.stmf_lock);
5761                 break;
5762 
5763         /*
5764          * LPORT_ONLINE/OFFLINE has nothing to do with link offline/online.
5765          * It's related with hardware disable/enable.
5766          */
5767         case STMF_CMD_LPORT_ONLINE:
5768                 switch (ilport->ilport_state) {
5769                         case STMF_STATE_OFFLINE:
5770                                 ret = STMF_SUCCESS;
5771                                 break;
5772                         case STMF_STATE_ONLINE:
5773                         case STMF_STATE_ONLINING:
5774                                 ret = STMF_ALREADY;
5775                                 break;
5776                         case STMF_STATE_OFFLINING:
5777                                 ret = STMF_BUSY;
5778                                 break;
5779                         default:
5780                                 ret = STMF_BADSTATE;
5781                                 break;
5782                 }
5783                 if (ret != STMF_SUCCESS)
5784                         goto stmf_ctl_lock_exit;
5785 
5786                 /*
5787                  * Only user request can recover the port from the
5788                  * FORCED_OFFLINE state
5789                  */
5790                 if (ilport->ilport_flags & ILPORT_FORCED_OFFLINE) {
5791                         if (!(ssci->st_rflags & STMF_RFLAG_USER_REQUEST)) {
5792                                 ret = STMF_FAILURE;
5793                                 goto stmf_ctl_lock_exit;
5794                         }
5795                 }
5796 
5797                 /*
5798                  * Avoid too frequent request to online
5799                  */
5800                 if (ssci->st_rflags & STMF_RFLAG_USER_REQUEST) {
5801                         ilport->ilport_online_times = 0;
5802                         ilport->ilport_avg_interval = 0;
5803                 }
5804                 if ((ilport->ilport_avg_interval < STMF_AVG_ONLINE_INTERVAL) &&
5805                     (ilport->ilport_online_times >= 4)) {
5806                         ret = STMF_FAILURE;
5807                         ilport->ilport_flags |= ILPORT_FORCED_OFFLINE;
5808                         stmf_trace(NULL, "stmf_ctl: too frequent request to "
5809                             "online the port");
5810                         cmn_err(CE_WARN, "stmf_ctl: too frequent request to "
5811                             "online the port, set FORCED_OFFLINE now");
5812                         goto stmf_ctl_lock_exit;
5813                 }
5814                 if (ilport->ilport_online_times > 0) {
5815                         if (ilport->ilport_online_times == 1) {
5816                                 ilport->ilport_avg_interval = ddi_get_lbolt() -
5817                                     ilport->ilport_last_online_clock;
5818                         } else {
5819                                 ilport->ilport_avg_interval =
5820                                     (ilport->ilport_avg_interval +
5821                                     ddi_get_lbolt() -
5822                                     ilport->ilport_last_online_clock) >> 1;
5823                         }
5824                 }
5825                 ilport->ilport_last_online_clock = ddi_get_lbolt();
5826                 ilport->ilport_online_times++;
5827 
5828                 /*
5829                  * Submit online service request
5830                  */
5831                 ilport->ilport_flags &= ~ILPORT_FORCED_OFFLINE;
5832                 ilport->ilport_state = STMF_STATE_ONLINING;
5833                 mutex_exit(&stmf_state.stmf_lock);
5834                 stmf_svc_queue(cmd, obj, (stmf_state_change_info_t *)arg);
5835                 break;
5836 
5837         case STMF_CMD_LPORT_ONLINE_COMPLETE:
5838                 if (ilport->ilport_state != STMF_STATE_ONLINING) {
5839                         ret = STMF_BADSTATE;
5840                         goto stmf_ctl_lock_exit;
5841                 }
5842                 if (((stmf_change_status_t *)arg)->st_completion_status ==
5843                     STMF_SUCCESS) {
5844                         ilport->ilport_state = STMF_STATE_ONLINE;
5845                         mutex_exit(&stmf_state.stmf_lock);
5846                         ((stmf_local_port_t *)obj)->lport_ctl(
5847                             (stmf_local_port_t *)obj,
5848                             STMF_ACK_LPORT_ONLINE_COMPLETE, arg);
5849                         mutex_enter(&stmf_state.stmf_lock);
5850                 } else {
5851                         ilport->ilport_state = STMF_STATE_OFFLINE;
5852                 }
5853                 ret = STMF_SUCCESS;
5854                 goto stmf_ctl_lock_exit;
5855 
5856         case STMF_CMD_LPORT_OFFLINE:
5857                 switch (ilport->ilport_state) {
5858                         case STMF_STATE_ONLINE:
5859                                 ret = STMF_SUCCESS;
5860                                 break;
5861                         case STMF_STATE_OFFLINE:
5862                         case STMF_STATE_OFFLINING:
5863                                 ret = STMF_ALREADY;
5864                                 break;
5865                         case STMF_STATE_ONLINING:
5866                                 ret = STMF_BUSY;
5867                                 break;
5868                         default:
5869                                 ret = STMF_BADSTATE;
5870                                 break;
5871                 }
5872                 if (ret != STMF_SUCCESS)
5873                         goto stmf_ctl_lock_exit;
5874 
5875                 ilport->ilport_state = STMF_STATE_OFFLINING;
5876                 mutex_exit(&stmf_state.stmf_lock);
5877                 stmf_svc_queue(cmd, obj, (stmf_state_change_info_t *)arg);
5878                 break;
5879 
5880         case STMF_CMD_LPORT_OFFLINE_COMPLETE:
5881                 if (ilport->ilport_state != STMF_STATE_OFFLINING) {
5882                         ret = STMF_BADSTATE;
5883                         goto stmf_ctl_lock_exit;
5884                 }
5885                 if (((stmf_change_status_t *)arg)->st_completion_status ==
5886                     STMF_SUCCESS) {
5887                         ilport->ilport_state = STMF_STATE_OFFLINE;
5888                         mutex_exit(&stmf_state.stmf_lock);
5889                         ((stmf_local_port_t *)obj)->lport_ctl(
5890                             (stmf_local_port_t *)obj,
5891                             STMF_ACK_LPORT_OFFLINE_COMPLETE, arg);
5892                         mutex_enter(&stmf_state.stmf_lock);
5893                 } else {
5894                         ilport->ilport_state = STMF_STATE_ONLINE;
5895                 }
5896                 mutex_exit(&stmf_state.stmf_lock);
5897                 break;
5898 
5899         default:
5900                 cmn_err(CE_WARN, "Invalid ctl cmd received %x", cmd);
5901                 ret = STMF_INVALID_ARG;
5902                 goto stmf_ctl_lock_exit;
5903         }
5904 
5905         return (STMF_SUCCESS);
5906 
5907 stmf_ctl_lock_exit:;
5908         mutex_exit(&stmf_state.stmf_lock);
5909         return (ret);
5910 }
5911 
5912 /* ARGSUSED */
5913 stmf_status_t
5914 stmf_info_impl(uint32_t cmd, void *arg1, void *arg2, uint8_t *buf,
5915                                                 uint32_t *bufsizep)
5916 {
5917         return (STMF_NOT_SUPPORTED);
5918 }
5919 
5920 /* ARGSUSED */
5921 stmf_status_t
5922 stmf_info(uint32_t cmd, void *arg1, void *arg2, uint8_t *buf,
5923                                                 uint32_t *bufsizep)
5924 {
5925         uint32_t cl = SI_GET_CLASS(cmd);
5926 
5927         if (cl == SI_STMF) {
5928                 return (stmf_info_impl(cmd, arg1, arg2, buf, bufsizep));
5929         }
5930         if (cl == SI_LPORT) {
5931                 return (((stmf_local_port_t *)arg1)->lport_info(cmd, arg1,
5932                     arg2, buf, bufsizep));
5933         } else if (cl == SI_LU) {
5934                 return (((stmf_lu_t *)arg1)->lu_info(cmd, arg1, arg2, buf,
5935                     bufsizep));
5936         }
5937 
5938         return (STMF_NOT_SUPPORTED);
5939 }
5940 
5941 /*
5942  * Used by port providers. pwwn is 8 byte wwn, sdid is the devid used by
5943  * stmf to register local ports. The ident should have 20 bytes in buffer
5944  * space to convert the wwn to "wwn.xxxxxxxxxxxxxxxx" string.
5945  */
5946 void
5947 stmf_wwn_to_devid_desc(scsi_devid_desc_t *sdid, uint8_t *wwn,
5948     uint8_t protocol_id)
5949 {
5950         char wwn_str[20+1];
5951 
5952         sdid->protocol_id = protocol_id;
5953         sdid->piv = 1;
5954         sdid->code_set = CODE_SET_ASCII;
5955         sdid->association = ID_IS_TARGET_PORT;
5956         sdid->ident_length = 20;
5957         /* Convert wwn value to "wwn.XXXXXXXXXXXXXXXX" format */
5958         (void) snprintf(wwn_str, sizeof (wwn_str),
5959             "wwn.%02X%02X%02X%02X%02X%02X%02X%02X",
5960             wwn[0], wwn[1], wwn[2], wwn[3], wwn[4], wwn[5], wwn[6], wwn[7]);
5961         bcopy(wwn_str, (char *)sdid->ident, 20);
5962 }
5963 
5964 
5965 stmf_xfer_data_t *
5966 stmf_prepare_tpgs_data(uint8_t ilu_alua)
5967 {
5968         stmf_xfer_data_t *xd;
5969         stmf_i_local_port_t *ilport;
5970         uint8_t *p;
5971         uint32_t sz, asz, nports = 0, nports_standby = 0;
5972 
5973         mutex_enter(&stmf_state.stmf_lock);
5974         /* check if any ports are standby and create second group */
5975         for (ilport = stmf_state.stmf_ilportlist; ilport;
5976             ilport = ilport->ilport_next) {
5977                 if (ilport->ilport_standby == 1) {
5978                         nports_standby++;
5979                 } else {
5980                         nports++;
5981                 }
5982         }
5983 
5984         /* The spec only allows for 255 ports to be reported per group */
5985         nports = min(nports, 255);
5986         nports_standby = min(nports_standby, 255);
5987         sz = (nports * 4) + 12;
5988         if (nports_standby && ilu_alua) {
5989                 sz += (nports_standby * 4) + 8;
5990         }
5991         asz = sz + sizeof (*xd) - 4;
5992         xd = (stmf_xfer_data_t *)kmem_zalloc(asz, KM_NOSLEEP);
5993         if (xd == NULL) {
5994                 mutex_exit(&stmf_state.stmf_lock);
5995                 return (NULL);
5996         }
5997         xd->alloc_size = asz;
5998         xd->size_left = sz;
5999 
6000         p = xd->buf;
6001 
6002         *((uint32_t *)p) = BE_32(sz - 4);
6003         p += 4;
6004         p[0] = 0x80;    /* PREF */
6005         p[1] = 5;       /* AO_SUP, S_SUP */
6006         if (stmf_state.stmf_alua_node == 1) {
6007                 p[3] = 1;       /* Group 1 */
6008         } else {
6009                 p[3] = 0;       /* Group 0 */
6010         }
6011         p[7] = nports & 0xff;
6012         p += 8;
6013         for (ilport = stmf_state.stmf_ilportlist; ilport;
6014             ilport = ilport->ilport_next) {
6015                 if (ilport->ilport_standby == 1) {
6016                         continue;
6017                 }
6018                 ((uint16_t *)p)[1] = BE_16(ilport->ilport_rtpid);
6019                 p += 4;
6020         }
6021         if (nports_standby && ilu_alua) {
6022                 p[0] = 0x02;    /* Non PREF, Standby */
6023                 p[1] = 5;       /* AO_SUP, S_SUP */
6024                 if (stmf_state.stmf_alua_node == 1) {
6025                         p[3] = 0;       /* Group 0 */
6026                 } else {
6027                         p[3] = 1;       /* Group 1 */
6028                 }
6029                 p[7] = nports_standby & 0xff;
6030                 p += 8;
6031                 for (ilport = stmf_state.stmf_ilportlist; ilport;
6032                     ilport = ilport->ilport_next) {
6033                         if (ilport->ilport_standby == 0) {
6034                                 continue;
6035                         }
6036                         ((uint16_t *)p)[1] = BE_16(ilport->ilport_rtpid);
6037                         p += 4;
6038                 }
6039         }
6040 
6041         mutex_exit(&stmf_state.stmf_lock);
6042 
6043         return (xd);
6044 }
6045 
6046 struct scsi_devid_desc *
6047 stmf_scsilib_get_devid_desc(uint16_t rtpid)
6048 {
6049         scsi_devid_desc_t *devid = NULL;
6050         stmf_i_local_port_t *ilport;
6051 
6052         mutex_enter(&stmf_state.stmf_lock);
6053 
6054         for (ilport = stmf_state.stmf_ilportlist; ilport;
6055             ilport = ilport->ilport_next) {
6056                 if (ilport->ilport_rtpid == rtpid) {
6057                         scsi_devid_desc_t *id = ilport->ilport_lport->lport_id;
6058                         uint32_t id_sz = sizeof (scsi_devid_desc_t) +
6059                             id->ident_length;
6060                         devid = (scsi_devid_desc_t *)kmem_zalloc(id_sz,
6061                             KM_NOSLEEP);
6062                         if (devid != NULL) {
6063                                 bcopy(id, devid, id_sz);
6064                         }
6065                         break;
6066                 }
6067         }
6068 
6069         mutex_exit(&stmf_state.stmf_lock);
6070         return (devid);
6071 }
6072 
6073 uint16_t
6074 stmf_scsilib_get_lport_rtid(struct scsi_devid_desc *devid)
6075 {
6076         stmf_i_local_port_t     *ilport;
6077         scsi_devid_desc_t       *id;
6078         uint16_t                rtpid = 0;
6079 
6080         mutex_enter(&stmf_state.stmf_lock);
6081         for (ilport = stmf_state.stmf_ilportlist; ilport;
6082             ilport = ilport->ilport_next) {
6083                 id = ilport->ilport_lport->lport_id;
6084                 if ((devid->ident_length == id->ident_length) &&
6085                     (memcmp(devid->ident, id->ident, id->ident_length) == 0)) {
6086                         rtpid = ilport->ilport_rtpid;
6087                         break;
6088                 }
6089         }
6090         mutex_exit(&stmf_state.stmf_lock);
6091         return (rtpid);
6092 }
6093 
6094 static uint16_t stmf_lu_id_gen_number = 0;
6095 
6096 stmf_status_t
6097 stmf_scsilib_uniq_lu_id(uint32_t company_id, scsi_devid_desc_t *lu_id)
6098 {
6099         return (stmf_scsilib_uniq_lu_id2(company_id, 0, lu_id));
6100 }
6101 
6102 stmf_status_t
6103 stmf_scsilib_uniq_lu_id2(uint32_t company_id, uint32_t host_id,
6104     scsi_devid_desc_t *lu_id)
6105 {
6106         uint8_t *p;
6107         struct timeval32 timestamp32;
6108         uint32_t *t = (uint32_t *)&timestamp32;
6109         struct ether_addr mac;
6110         uint8_t *e = (uint8_t *)&mac;
6111         int hid = (int)host_id;
6112         uint16_t gen_number;
6113 
6114         if (company_id == COMPANY_ID_NONE)
6115                 company_id = COMPANY_ID_SUN;
6116 
6117         if (lu_id->ident_length != 0x10)
6118                 return (STMF_INVALID_ARG);
6119 
6120         p = (uint8_t *)lu_id;
6121 
6122         gen_number = atomic_add_16_nv(&stmf_lu_id_gen_number, 1);
6123 
6124         p[0] = 0xf1; p[1] = 3; p[2] = 0; p[3] = 0x10;
6125         p[4] = ((company_id >> 20) & 0xf) | 0x60;
6126         p[5] = (company_id >> 12) & 0xff;
6127         p[6] = (company_id >> 4) & 0xff;
6128         p[7] = (company_id << 4) & 0xf0;
6129         if (hid == 0 && !localetheraddr((struct ether_addr *)NULL, &mac)) {
6130                 hid = BE_32((int)zone_get_hostid(NULL));
6131         }
6132         if (hid != 0) {
6133                 e[0] = (hid >> 24) & 0xff;
6134                 e[1] = (hid >> 16) & 0xff;
6135                 e[2] = (hid >> 8) & 0xff;
6136                 e[3] = hid & 0xff;
6137                 e[4] = e[5] = 0;
6138         }
6139         bcopy(e, p+8, 6);
6140         uniqtime32(&timestamp32);
6141         *t = BE_32(*t);
6142         bcopy(t, p+14, 4);
6143         p[18] = (gen_number >> 8) & 0xff;
6144         p[19] = gen_number & 0xff;
6145 
6146         return (STMF_SUCCESS);
6147 }
6148 
6149 /*
6150  * saa is sense key, ASC, ASCQ
6151  */
6152 void
6153 stmf_scsilib_send_status(scsi_task_t *task, uint8_t st, uint32_t saa)
6154 {
6155         uint8_t sd[18];
6156         task->task_scsi_status = st;
6157         if (st == 2) {
6158                 bzero(sd, 18);
6159                 sd[0] = 0x70;
6160                 sd[2] = (saa >> 16) & 0xf;
6161                 sd[7] = 10;
6162                 sd[12] = (saa >> 8) & 0xff;
6163                 sd[13] = saa & 0xff;
6164                 task->task_sense_data = sd;
6165                 task->task_sense_length = 18;
6166         } else {
6167                 task->task_sense_data = NULL;
6168                 task->task_sense_length = 0;
6169         }
6170         (void) stmf_send_scsi_status(task, STMF_IOF_LU_DONE);
6171 }
6172 
6173 uint32_t
6174 stmf_scsilib_prepare_vpd_page83(scsi_task_t *task, uint8_t *page,
6175     uint32_t page_len, uint8_t byte0, uint32_t vpd_mask)
6176 {
6177         uint8_t         *p = NULL;
6178         uint8_t         small_buf[32];
6179         uint32_t        sz = 0;
6180         uint32_t        n = 4;
6181         uint32_t        m = 0;
6182         uint32_t        last_bit = 0;
6183 
6184         if (page_len < 4)
6185                 return (0);
6186         if (page_len > 65535)
6187                 page_len = 65535;
6188 
6189         page[0] = byte0;
6190         page[1] = 0x83;
6191 
6192         /* CONSTCOND */
6193         while (1) {
6194                 m += sz;
6195                 if (sz && (page_len > n)) {
6196                         uint32_t copysz;
6197                         copysz = page_len > (n + sz) ? sz : page_len - n;
6198                         bcopy(p, page + n, copysz);
6199                         n += copysz;
6200                 }
6201                 vpd_mask &= ~last_bit;
6202                 if (vpd_mask == 0)
6203                         break;
6204 
6205                 if (vpd_mask & STMF_VPD_LU_ID) {
6206                         last_bit = STMF_VPD_LU_ID;
6207                         sz = task->task_lu->lu_id->ident_length + 4;
6208                         p = (uint8_t *)task->task_lu->lu_id;
6209                         continue;
6210                 } else if (vpd_mask & STMF_VPD_TARGET_ID) {
6211                         last_bit = STMF_VPD_TARGET_ID;
6212                         sz = task->task_lport->lport_id->ident_length + 4;
6213                         p = (uint8_t *)task->task_lport->lport_id;
6214                         continue;
6215                 } else if (vpd_mask & STMF_VPD_TP_GROUP) {
6216                         stmf_i_local_port_t *ilport;
6217                         last_bit = STMF_VPD_TP_GROUP;
6218                         p = small_buf;
6219                         bzero(p, 8);
6220                         p[0] = 1;
6221                         p[1] = 0x15;
6222                         p[3] = 4;
6223                         ilport = (stmf_i_local_port_t *)
6224                             task->task_lport->lport_stmf_private;
6225                         /*
6226                          * If we're in alua mode, group 1 contains all alua
6227                          * participating ports and all standby ports
6228                          * > 255. Otherwise, if we're in alua mode, any local
6229                          * ports (non standby/pppt) are also in group 1 if the
6230                          * alua node is 1. Otherwise the group is 0.
6231                          */
6232                         if ((stmf_state.stmf_alua_state &&
6233                             (ilport->ilport_alua || ilport->ilport_standby) &&
6234                             ilport->ilport_rtpid > 255) ||
6235                             (stmf_state.stmf_alua_node == 1 &&
6236                             ilport->ilport_standby != 1)) {
6237                                 p[7] = 1;       /* Group 1 */
6238                         }
6239                         sz = 8;
6240                         continue;
6241                 } else if (vpd_mask & STMF_VPD_RELATIVE_TP_ID) {
6242                         stmf_i_local_port_t *ilport;
6243 
6244                         last_bit = STMF_VPD_RELATIVE_TP_ID;
6245                         p = small_buf;
6246                         bzero(p, 8);
6247                         p[0] = 1;
6248                         p[1] = 0x14;
6249                         p[3] = 4;
6250                         ilport = (stmf_i_local_port_t *)
6251                             task->task_lport->lport_stmf_private;
6252                         p[6] = (ilport->ilport_rtpid >> 8) & 0xff;
6253                         p[7] = ilport->ilport_rtpid & 0xff;
6254                         sz = 8;
6255                         continue;
6256                 } else {
6257                         cmn_err(CE_WARN, "Invalid vpd_mask");
6258                         break;
6259                 }
6260         }
6261 
6262         page[2] = (m >> 8) & 0xff;
6263         page[3] = m & 0xff;
6264 
6265         return (n);
6266 }
6267 
6268 void
6269 stmf_scsilib_handle_report_tpgs(scsi_task_t *task, stmf_data_buf_t *dbuf)
6270 {
6271         stmf_i_scsi_task_t *itask =
6272             (stmf_i_scsi_task_t *)task->task_stmf_private;
6273         stmf_i_lu_t *ilu =
6274             (stmf_i_lu_t *)task->task_lu->lu_stmf_private;
6275         stmf_xfer_data_t *xd;
6276         uint32_t sz, minsz;
6277 
6278         itask->itask_flags |= ITASK_DEFAULT_HANDLING;
6279         task->task_cmd_xfer_length =
6280             ((((uint32_t)task->task_cdb[6]) << 24) |
6281             (((uint32_t)task->task_cdb[7]) << 16) |
6282             (((uint32_t)task->task_cdb[8]) << 8) |
6283             ((uint32_t)task->task_cdb[9]));
6284 
6285         if (task->task_additional_flags &
6286             TASK_AF_NO_EXPECTED_XFER_LENGTH) {
6287                 task->task_expected_xfer_length =
6288                     task->task_cmd_xfer_length;
6289         }
6290 
6291         if (task->task_cmd_xfer_length == 0) {
6292                 stmf_scsilib_send_status(task, STATUS_GOOD, 0);
6293                 return;
6294         }
6295         if (task->task_cmd_xfer_length < 4) {
6296                 stmf_scsilib_send_status(task, STATUS_CHECK,
6297                     STMF_SAA_INVALID_FIELD_IN_CDB);
6298                 return;
6299         }
6300 
6301         sz = min(task->task_expected_xfer_length,
6302             task->task_cmd_xfer_length);
6303 
6304         xd = stmf_prepare_tpgs_data(ilu->ilu_alua);
6305 
6306         if (xd == NULL) {
6307                 stmf_abort(STMF_QUEUE_TASK_ABORT, task,
6308                     STMF_ALLOC_FAILURE, NULL);
6309                 return;
6310         }
6311 
6312         sz = min(sz, xd->size_left);
6313         xd->size_left = sz;
6314         minsz = min(512, sz);
6315 
6316         if (dbuf == NULL)
6317                 dbuf = stmf_alloc_dbuf(task, sz, &minsz, 0);
6318         if (dbuf == NULL) {
6319                 kmem_free(xd, xd->alloc_size);
6320                 stmf_abort(STMF_QUEUE_TASK_ABORT, task,
6321                     STMF_ALLOC_FAILURE, NULL);
6322                 return;
6323         }
6324         dbuf->db_lu_private = xd;
6325         stmf_xd_to_dbuf(dbuf, 1);
6326 
6327         dbuf->db_flags = DB_DIRECTION_TO_RPORT;
6328         (void) stmf_xfer_data(task, dbuf, 0);
6329 
6330 }
6331 
6332 void
6333 stmf_scsilib_handle_task_mgmt(scsi_task_t *task)
6334 {
6335 
6336         switch (task->task_mgmt_function) {
6337         /*
6338          * For now we will abort all I/Os on the LU in case of ABORT_TASK_SET
6339          * and ABORT_TASK. But unlike LUN_RESET we will not reset LU state
6340          * in these cases. This needs to be changed to abort only the required
6341          * set.
6342          */
6343         case TM_ABORT_TASK:
6344         case TM_ABORT_TASK_SET:
6345         case TM_CLEAR_TASK_SET:
6346         case TM_LUN_RESET:
6347                 stmf_handle_lun_reset(task);
6348                 /* issue the reset to the proxy node as well */
6349                 if (stmf_state.stmf_alua_state == 1) {
6350                         (void) stmf_proxy_scsi_cmd(task, NULL);
6351                 }
6352                 return;
6353         case TM_TARGET_RESET:
6354         case TM_TARGET_COLD_RESET:
6355         case TM_TARGET_WARM_RESET:
6356                 stmf_handle_target_reset(task);
6357                 return;
6358         default:
6359                 /* We dont support this task mgmt function */
6360                 stmf_scsilib_send_status(task, STATUS_CHECK,
6361                     STMF_SAA_INVALID_FIELD_IN_CMD_IU);
6362                 return;
6363         }
6364 }
6365 
6366 void
6367 stmf_handle_lun_reset(scsi_task_t *task)
6368 {
6369         stmf_i_scsi_task_t *itask;
6370         stmf_i_lu_t *ilu;
6371 
6372         itask = (stmf_i_scsi_task_t *)task->task_stmf_private;
6373         ilu = (stmf_i_lu_t *)task->task_lu->lu_stmf_private;
6374 
6375         /*
6376          * To sync with target reset, grab this lock. The LU is not going
6377          * anywhere as there is atleast one task pending (this task).
6378          */
6379         mutex_enter(&stmf_state.stmf_lock);
6380 
6381         if (ilu->ilu_flags & ILU_RESET_ACTIVE) {
6382                 mutex_exit(&stmf_state.stmf_lock);
6383                 stmf_scsilib_send_status(task, STATUS_CHECK,
6384                     STMF_SAA_OPERATION_IN_PROGRESS);
6385                 return;
6386         }
6387         atomic_or_32(&ilu->ilu_flags, ILU_RESET_ACTIVE);
6388         mutex_exit(&stmf_state.stmf_lock);
6389 
6390         /*
6391          * Mark this task as the one causing LU reset so that we know who
6392          * was responsible for setting the ILU_RESET_ACTIVE. In case this
6393          * task itself gets aborted, we will clear ILU_RESET_ACTIVE.
6394          */
6395         itask->itask_flags |= ITASK_DEFAULT_HANDLING | ITASK_CAUSING_LU_RESET;
6396 
6397         /* Initiatiate abort on all commands on this LU except this one */
6398         stmf_abort(STMF_QUEUE_ABORT_LU, task, STMF_ABORTED, task->task_lu);
6399 
6400         /* Start polling on this task */
6401         if (stmf_task_poll_lu(task, ITASK_DEFAULT_POLL_TIMEOUT)
6402             != STMF_SUCCESS) {
6403                 stmf_abort(STMF_QUEUE_TASK_ABORT, task, STMF_ALLOC_FAILURE,
6404                     NULL);
6405                 return;
6406         }
6407 }
6408 
6409 void
6410 stmf_handle_target_reset(scsi_task_t *task)
6411 {
6412         stmf_i_scsi_task_t *itask;
6413         stmf_i_lu_t *ilu;
6414         stmf_i_scsi_session_t *iss;
6415         stmf_lun_map_t *lm;
6416         stmf_lun_map_ent_t *lm_ent;
6417         int i, lf;
6418 
6419         itask = (stmf_i_scsi_task_t *)task->task_stmf_private;
6420         iss = (stmf_i_scsi_session_t *)task->task_session->ss_stmf_private;
6421         ilu = (stmf_i_lu_t *)task->task_lu->lu_stmf_private;
6422 
6423         /*
6424          * To sync with LUN reset, grab this lock. The session is not going
6425          * anywhere as there is atleast one task pending (this task).
6426          */
6427         mutex_enter(&stmf_state.stmf_lock);
6428 
6429         /* Grab the session lock as a writer to prevent any changes in it */
6430         rw_enter(iss->iss_lockp, RW_WRITER);
6431 
6432         if (iss->iss_flags & ISS_RESET_ACTIVE) {
6433                 rw_exit(iss->iss_lockp);
6434                 mutex_exit(&stmf_state.stmf_lock);
6435                 stmf_scsilib_send_status(task, STATUS_CHECK,
6436                     STMF_SAA_OPERATION_IN_PROGRESS);
6437                 return;
6438         }
6439         atomic_or_32(&iss->iss_flags, ISS_RESET_ACTIVE);
6440 
6441         /*
6442          * Now go through each LUN in this session and make sure all of them
6443          * can be reset.
6444          */
6445         lm = iss->iss_sm;
6446         for (i = 0, lf = 0; i < lm->lm_nentries; i++) {
6447                 if (lm->lm_plus[i] == NULL)
6448                         continue;
6449                 lf++;
6450                 lm_ent = (stmf_lun_map_ent_t *)lm->lm_plus[i];
6451                 ilu = (stmf_i_lu_t *)(lm_ent->ent_lu->lu_stmf_private);
6452                 if (ilu->ilu_flags & ILU_RESET_ACTIVE) {
6453                         atomic_and_32(&iss->iss_flags, ~ISS_RESET_ACTIVE);
6454                         rw_exit(iss->iss_lockp);
6455                         mutex_exit(&stmf_state.stmf_lock);
6456                         stmf_scsilib_send_status(task, STATUS_CHECK,
6457                             STMF_SAA_OPERATION_IN_PROGRESS);
6458                         return;
6459                 }
6460         }
6461         if (lf == 0) {
6462                 /* No luns in this session */
6463                 atomic_and_32(&iss->iss_flags, ~ISS_RESET_ACTIVE);
6464                 rw_exit(iss->iss_lockp);
6465                 mutex_exit(&stmf_state.stmf_lock);
6466                 stmf_scsilib_send_status(task, STATUS_GOOD, 0);
6467                 return;
6468         }
6469 
6470         /* ok, start the damage */
6471         itask->itask_flags |= ITASK_DEFAULT_HANDLING |
6472             ITASK_CAUSING_TARGET_RESET;
6473         for (i = 0; i < lm->lm_nentries; i++) {
6474                 if (lm->lm_plus[i] == NULL)
6475                         continue;
6476                 lm_ent = (stmf_lun_map_ent_t *)lm->lm_plus[i];
6477                 ilu = (stmf_i_lu_t *)(lm_ent->ent_lu->lu_stmf_private);
6478                 atomic_or_32(&ilu->ilu_flags, ILU_RESET_ACTIVE);
6479         }
6480 
6481         for (i = 0; i < lm->lm_nentries; i++) {
6482                 if (lm->lm_plus[i] == NULL)
6483                         continue;
6484                 lm_ent = (stmf_lun_map_ent_t *)lm->lm_plus[i];
6485                 stmf_abort(STMF_QUEUE_ABORT_LU, task, STMF_ABORTED,
6486                     lm_ent->ent_lu);
6487         }
6488 
6489         rw_exit(iss->iss_lockp);
6490         mutex_exit(&stmf_state.stmf_lock);
6491 
6492         /* Start polling on this task */
6493         if (stmf_task_poll_lu(task, ITASK_DEFAULT_POLL_TIMEOUT)
6494             != STMF_SUCCESS) {
6495                 stmf_abort(STMF_QUEUE_TASK_ABORT, task, STMF_ALLOC_FAILURE,
6496                     NULL);
6497                 return;
6498         }
6499 }
6500 
6501 int
6502 stmf_handle_cmd_during_ic(stmf_i_scsi_task_t *itask)
6503 {
6504         scsi_task_t *task = itask->itask_task;
6505         stmf_i_scsi_session_t *iss = (stmf_i_scsi_session_t *)
6506             task->task_session->ss_stmf_private;
6507 
6508         rw_enter(iss->iss_lockp, RW_WRITER);
6509         if (((iss->iss_flags & ISS_LUN_INVENTORY_CHANGED) == 0) ||
6510             (task->task_cdb[0] == SCMD_INQUIRY)) {
6511                 rw_exit(iss->iss_lockp);
6512                 return (0);
6513         }
6514         atomic_and_32(&iss->iss_flags,
6515             ~(ISS_LUN_INVENTORY_CHANGED | ISS_GOT_INITIAL_LUNS));
6516         rw_exit(iss->iss_lockp);
6517 
6518         if (task->task_cdb[0] == SCMD_REPORT_LUNS) {
6519                 return (0);
6520         }
6521         stmf_scsilib_send_status(task, STATUS_CHECK,
6522             STMF_SAA_REPORT_LUN_DATA_HAS_CHANGED);
6523         return (1);
6524 }
6525 
6526 void
6527 stmf_worker_init()
6528 {
6529         uint32_t i;
6530 
6531         /* Make local copy of global tunables */
6532         stmf_i_max_nworkers = stmf_max_nworkers;
6533         stmf_i_min_nworkers = stmf_min_nworkers;
6534 
6535         ASSERT(stmf_workers == NULL);
6536         if (stmf_i_min_nworkers < 4) {
6537                 stmf_i_min_nworkers = 4;
6538         }
6539         if (stmf_i_max_nworkers < stmf_i_min_nworkers) {
6540                 stmf_i_max_nworkers = stmf_i_min_nworkers;
6541         }
6542         stmf_workers = (stmf_worker_t *)kmem_zalloc(
6543             sizeof (stmf_worker_t) * stmf_i_max_nworkers, KM_SLEEP);
6544         for (i = 0; i < stmf_i_max_nworkers; i++) {
6545                 stmf_worker_t *w = &stmf_workers[i];
6546                 mutex_init(&w->worker_lock, NULL, MUTEX_DRIVER, NULL);
6547                 cv_init(&w->worker_cv, NULL, CV_DRIVER, NULL);
6548         }
6549         stmf_worker_mgmt_delay = drv_usectohz(20 * 1000);
6550         stmf_workers_state = STMF_WORKERS_ENABLED;
6551 
6552         /* Workers will be started by stmf_worker_mgmt() */
6553 
6554         /* Lets wait for atleast one worker to start */
6555         while (stmf_nworkers_cur == 0)
6556                 delay(drv_usectohz(20 * 1000));
6557         stmf_worker_mgmt_delay = drv_usectohz(3 * 1000 * 1000);
6558 }
6559 
6560 stmf_status_t
6561 stmf_worker_fini()
6562 {
6563         int i;
6564         clock_t sb;
6565 
6566         if (stmf_workers_state == STMF_WORKERS_DISABLED)
6567                 return (STMF_SUCCESS);
6568         ASSERT(stmf_workers);
6569         stmf_workers_state = STMF_WORKERS_DISABLED;
6570         stmf_worker_mgmt_delay = drv_usectohz(20 * 1000);
6571         cv_signal(&stmf_state.stmf_cv);
6572 
6573         sb = ddi_get_lbolt() + drv_usectohz(10 * 1000 * 1000);
6574         /* Wait for all the threads to die */
6575         while (stmf_nworkers_cur != 0) {
6576                 if (ddi_get_lbolt() > sb) {
6577                         stmf_workers_state = STMF_WORKERS_ENABLED;
6578                         return (STMF_BUSY);
6579                 }
6580                 delay(drv_usectohz(100 * 1000));
6581         }
6582         for (i = 0; i < stmf_i_max_nworkers; i++) {
6583                 stmf_worker_t *w = &stmf_workers[i];
6584                 mutex_destroy(&w->worker_lock);
6585                 cv_destroy(&w->worker_cv);
6586         }
6587         kmem_free(stmf_workers, sizeof (stmf_worker_t) * stmf_i_max_nworkers);
6588         stmf_workers = NULL;
6589 
6590         return (STMF_SUCCESS);
6591 }
6592 
6593 void
6594 stmf_worker_task(void *arg)
6595 {
6596         stmf_worker_t *w;
6597         stmf_i_scsi_session_t *iss;
6598         scsi_task_t *task;
6599         stmf_i_scsi_task_t *itask;
6600         stmf_data_buf_t *dbuf;
6601         stmf_lu_t *lu;
6602         clock_t wait_timer = 0;
6603         clock_t wait_ticks, wait_delta = 0;
6604         uint32_t old, new;
6605         uint8_t curcmd;
6606         uint8_t abort_free;
6607         uint8_t wait_queue;
6608         uint8_t dec_qdepth;
6609 
6610         w = (stmf_worker_t *)arg;
6611         wait_ticks = drv_usectohz(10000);
6612 
6613         DTRACE_PROBE1(worker__create, stmf_worker_t, w);
6614         mutex_enter(&w->worker_lock);
6615         w->worker_flags |= STMF_WORKER_STARTED | STMF_WORKER_ACTIVE;
6616 stmf_worker_loop:;
6617         if ((w->worker_ref_count == 0) &&
6618             (w->worker_flags & STMF_WORKER_TERMINATE)) {
6619                 w->worker_flags &= ~(STMF_WORKER_STARTED |
6620                     STMF_WORKER_ACTIVE | STMF_WORKER_TERMINATE);
6621                 w->worker_tid = NULL;
6622                 mutex_exit(&w->worker_lock);
6623                 DTRACE_PROBE1(worker__destroy, stmf_worker_t, w);
6624                 thread_exit();
6625         }
6626         /* CONSTCOND */
6627         while (1) {
6628                 dec_qdepth = 0;
6629                 if (wait_timer && (ddi_get_lbolt() >= wait_timer)) {
6630                         wait_timer = 0;
6631                         wait_delta = 0;
6632                         if (w->worker_wait_head) {
6633                                 ASSERT(w->worker_wait_tail);
6634                                 if (w->worker_task_head == NULL)
6635                                         w->worker_task_head =
6636                                             w->worker_wait_head;
6637                                 else
6638                                         w->worker_task_tail->itask_worker_next =
6639                                             w->worker_wait_head;
6640                                 w->worker_task_tail = w->worker_wait_tail;
6641                                 w->worker_wait_head = w->worker_wait_tail =
6642                                     NULL;
6643                         }
6644                 }
6645                 if ((itask = w->worker_task_head) == NULL) {
6646                         break;
6647                 }
6648                 task = itask->itask_task;
6649                 DTRACE_PROBE2(worker__active, stmf_worker_t, w,
6650                     scsi_task_t *, task);
6651                 w->worker_task_head = itask->itask_worker_next;
6652                 if (w->worker_task_head == NULL)
6653                         w->worker_task_tail = NULL;
6654 
6655                 wait_queue = 0;
6656                 abort_free = 0;
6657                 if (itask->itask_ncmds > 0) {
6658                         curcmd = itask->itask_cmd_stack[itask->itask_ncmds - 1];
6659                 } else {
6660                         ASSERT(itask->itask_flags & ITASK_BEING_ABORTED);
6661                 }
6662                 do {
6663                         old = itask->itask_flags;
6664                         if (old & ITASK_BEING_ABORTED) {
6665                                 itask->itask_ncmds = 1;
6666                                 curcmd = itask->itask_cmd_stack[0] =
6667                                     ITASK_CMD_ABORT;
6668                                 goto out_itask_flag_loop;
6669                         } else if ((curcmd & ITASK_CMD_MASK) ==
6670                             ITASK_CMD_NEW_TASK) {
6671                                 /*
6672                                  * set ITASK_KSTAT_IN_RUNQ, this flag
6673                                  * will not reset until task completed
6674                                  */
6675                                 new = old | ITASK_KNOWN_TO_LU |
6676                                     ITASK_KSTAT_IN_RUNQ;
6677                         } else {
6678                                 goto out_itask_flag_loop;
6679                         }
6680                 } while (atomic_cas_32(&itask->itask_flags, old, new) != old);
6681 
6682 out_itask_flag_loop:
6683 
6684                 /*
6685                  * Decide if this task needs to go to a queue and/or if
6686                  * we can decrement the itask_cmd_stack.
6687                  */
6688                 if (curcmd == ITASK_CMD_ABORT) {
6689                         if (itask->itask_flags & (ITASK_KNOWN_TO_LU |
6690                             ITASK_KNOWN_TO_TGT_PORT)) {
6691                                 wait_queue = 1;
6692                         } else {
6693                                 abort_free = 1;
6694                         }
6695                 } else if ((curcmd & ITASK_CMD_POLL) &&
6696                     (itask->itask_poll_timeout > ddi_get_lbolt())) {
6697                         wait_queue = 1;
6698                 }
6699 
6700                 if (wait_queue) {
6701                         itask->itask_worker_next = NULL;
6702                         if (w->worker_wait_tail) {
6703                                 w->worker_wait_tail->itask_worker_next = itask;
6704                         } else {
6705                                 w->worker_wait_head = itask;
6706                         }
6707                         w->worker_wait_tail = itask;
6708                         if (wait_timer == 0) {
6709                                 wait_timer = ddi_get_lbolt() + wait_ticks;
6710                                 wait_delta = wait_ticks;
6711                         }
6712                 } else if ((--(itask->itask_ncmds)) != 0) {
6713                         itask->itask_worker_next = NULL;
6714                         if (w->worker_task_tail) {
6715                                 w->worker_task_tail->itask_worker_next = itask;
6716                         } else {
6717                                 w->worker_task_head = itask;
6718                         }
6719                         w->worker_task_tail = itask;
6720                 } else {
6721                         atomic_and_32(&itask->itask_flags,
6722                             ~ITASK_IN_WORKER_QUEUE);
6723                         /*
6724                          * This is where the queue depth should go down by
6725                          * one but we delay that on purpose to account for
6726                          * the call into the provider. The actual decrement
6727                          * happens after the worker has done its job.
6728                          */
6729                         dec_qdepth = 1;
6730                         itask->itask_waitq_time +=
6731                             gethrtime() - itask->itask_waitq_enter_timestamp;
6732                 }
6733 
6734                 /* We made it here means we are going to call LU */
6735                 if ((itask->itask_flags & ITASK_DEFAULT_HANDLING) == 0)
6736                         lu = task->task_lu;
6737                 else
6738                         lu = dlun0;
6739                 dbuf = itask->itask_dbufs[ITASK_CMD_BUF_NDX(curcmd)];
6740                 mutex_exit(&w->worker_lock);
6741                 curcmd &= ITASK_CMD_MASK;
6742                 stmf_task_audit(itask, TE_PROCESS_CMD, curcmd, dbuf);
6743                 switch (curcmd) {
6744                 case ITASK_CMD_NEW_TASK:
6745                         iss = (stmf_i_scsi_session_t *)
6746                             task->task_session->ss_stmf_private;
6747                         stmf_itl_lu_new_task(itask);
6748                         if (iss->iss_flags & ISS_LUN_INVENTORY_CHANGED) {
6749                                 if (stmf_handle_cmd_during_ic(itask))
6750                                         break;
6751                         }
6752 #ifdef  DEBUG
6753                         if (stmf_drop_task_counter > 0) {
6754                                 if (atomic_add_32_nv(
6755                                     (uint32_t *)&stmf_drop_task_counter,
6756                                     -1) == 1) {
6757                                         break;
6758                                 }
6759                         }
6760 #endif
6761                         DTRACE_PROBE1(scsi__task__start, scsi_task_t *, task);
6762                         lu->lu_new_task(task, dbuf);
6763                         break;
6764                 case ITASK_CMD_DATA_XFER_DONE:
6765                         lu->lu_dbuf_xfer_done(task, dbuf);
6766                         break;
6767                 case ITASK_CMD_STATUS_DONE:
6768                         lu->lu_send_status_done(task);
6769                         break;
6770                 case ITASK_CMD_ABORT:
6771                         if (abort_free) {
6772                                 stmf_task_free(task);
6773                         } else {
6774                                 stmf_do_task_abort(task);
6775                         }
6776                         break;
6777                 case ITASK_CMD_POLL_LU:
6778                         if (!wait_queue) {
6779                                 lu->lu_task_poll(task);
6780                         }
6781                         break;
6782                 case ITASK_CMD_POLL_LPORT:
6783                         if (!wait_queue)
6784                                 task->task_lport->lport_task_poll(task);
6785                         break;
6786                 case ITASK_CMD_SEND_STATUS:
6787                 /* case ITASK_CMD_XFER_DATA: */
6788                         break;
6789                 }
6790                 mutex_enter(&w->worker_lock);
6791                 if (dec_qdepth) {
6792                         w->worker_queue_depth--;
6793                 }
6794         }
6795         if ((w->worker_flags & STMF_WORKER_TERMINATE) && (wait_timer == 0)) {
6796                 if (w->worker_ref_count == 0)
6797                         goto stmf_worker_loop;
6798                 else {
6799                         wait_timer = ddi_get_lbolt() + 1;
6800                         wait_delta = 1;
6801                 }
6802         }
6803         w->worker_flags &= ~STMF_WORKER_ACTIVE;
6804         if (wait_timer) {
6805                 DTRACE_PROBE1(worker__timed__sleep, stmf_worker_t, w);
6806                 (void) cv_reltimedwait(&w->worker_cv, &w->worker_lock,
6807                     wait_delta, TR_CLOCK_TICK);
6808         } else {
6809                 DTRACE_PROBE1(worker__sleep, stmf_worker_t, w);
6810                 cv_wait(&w->worker_cv, &w->worker_lock);
6811         }
6812         DTRACE_PROBE1(worker__wakeup, stmf_worker_t, w);
6813         w->worker_flags |= STMF_WORKER_ACTIVE;
6814         goto stmf_worker_loop;
6815 }
6816 
6817 void
6818 stmf_worker_mgmt()
6819 {
6820         int i;
6821         int workers_needed;
6822         uint32_t qd;
6823         clock_t tps, d = 0;
6824         uint32_t cur_max_ntasks = 0;
6825         stmf_worker_t *w;
6826 
6827         /* Check if we are trying to increase the # of threads */
6828         for (i = stmf_nworkers_cur; i < stmf_nworkers_needed; i++) {
6829                 if (stmf_workers[i].worker_flags & STMF_WORKER_STARTED) {
6830                         stmf_nworkers_cur++;
6831                         stmf_nworkers_accepting_cmds++;
6832                 } else {
6833                         /* Wait for transition to complete */
6834                         return;
6835                 }
6836         }
6837         /* Check if we are trying to decrease the # of workers */
6838         for (i = (stmf_nworkers_cur - 1); i >= stmf_nworkers_needed; i--) {
6839                 if ((stmf_workers[i].worker_flags & STMF_WORKER_STARTED) == 0) {
6840                         stmf_nworkers_cur--;
6841                         /*
6842                          * stmf_nworkers_accepting_cmds has already been
6843                          * updated by the request to reduce the # of workers.
6844                          */
6845                 } else {
6846                         /* Wait for transition to complete */
6847                         return;
6848                 }
6849         }
6850         /* Check if we are being asked to quit */
6851         if (stmf_workers_state != STMF_WORKERS_ENABLED) {
6852                 if (stmf_nworkers_cur) {
6853                         workers_needed = 0;
6854                         goto worker_mgmt_trigger_change;
6855                 }
6856                 return;
6857         }
6858         /* Check if we are starting */
6859         if (stmf_nworkers_cur < stmf_i_min_nworkers) {
6860                 workers_needed = stmf_i_min_nworkers;
6861                 goto worker_mgmt_trigger_change;
6862         }
6863 
6864         tps = drv_usectohz(1 * 1000 * 1000);
6865         if ((stmf_wm_last != 0) &&
6866             ((d = ddi_get_lbolt() - stmf_wm_last) > tps)) {
6867                 qd = 0;
6868                 for (i = 0; i < stmf_nworkers_accepting_cmds; i++) {
6869                         qd += stmf_workers[i].worker_max_qdepth_pu;
6870                         stmf_workers[i].worker_max_qdepth_pu = 0;
6871                         if (stmf_workers[i].worker_max_sys_qdepth_pu >
6872                             cur_max_ntasks) {
6873                                 cur_max_ntasks =
6874                                     stmf_workers[i].worker_max_sys_qdepth_pu;
6875                         }
6876                         stmf_workers[i].worker_max_sys_qdepth_pu = 0;
6877                 }
6878         }
6879         stmf_wm_last = ddi_get_lbolt();
6880         if (d <= tps) {
6881                 /* still ramping up */
6882                 return;
6883         }
6884         /* max qdepth cannot be more than max tasks */
6885         if (qd > cur_max_ntasks)
6886                 qd = cur_max_ntasks;
6887 
6888         /* See if we have more workers */
6889         if (qd < stmf_nworkers_accepting_cmds) {
6890                 /*
6891                  * Since we dont reduce the worker count right away, monitor
6892                  * the highest load during the scale_down_delay.
6893                  */
6894                 if (qd > stmf_worker_scale_down_qd)
6895                         stmf_worker_scale_down_qd = qd;
6896                 if (stmf_worker_scale_down_timer == 0) {
6897                         stmf_worker_scale_down_timer = ddi_get_lbolt() +
6898                             drv_usectohz(stmf_worker_scale_down_delay *
6899                             1000 * 1000);
6900                         return;
6901                 }
6902                 if (ddi_get_lbolt() < stmf_worker_scale_down_timer) {
6903                         return;
6904                 }
6905                 /* Its time to reduce the workers */
6906                 if (stmf_worker_scale_down_qd < stmf_i_min_nworkers)
6907                         stmf_worker_scale_down_qd = stmf_i_min_nworkers;
6908                 if (stmf_worker_scale_down_qd > stmf_i_max_nworkers)
6909                         stmf_worker_scale_down_qd = stmf_i_max_nworkers;
6910                 if (stmf_worker_scale_down_qd == stmf_nworkers_cur)
6911                         return;
6912                 workers_needed = stmf_worker_scale_down_qd;
6913                 stmf_worker_scale_down_qd = 0;
6914                 goto worker_mgmt_trigger_change;
6915         }
6916         stmf_worker_scale_down_qd = 0;
6917         stmf_worker_scale_down_timer = 0;
6918         if (qd > stmf_i_max_nworkers)
6919                 qd = stmf_i_max_nworkers;
6920         if (qd < stmf_i_min_nworkers)
6921                 qd = stmf_i_min_nworkers;
6922         if (qd == stmf_nworkers_cur)
6923                 return;
6924         workers_needed = qd;
6925         goto worker_mgmt_trigger_change;
6926 
6927         /* NOTREACHED */
6928         return;
6929 
6930 worker_mgmt_trigger_change:
6931         ASSERT(workers_needed != stmf_nworkers_cur);
6932         if (workers_needed > stmf_nworkers_cur) {
6933                 stmf_nworkers_needed = workers_needed;
6934                 for (i = stmf_nworkers_cur; i < workers_needed; i++) {
6935                         w = &stmf_workers[i];
6936                         w->worker_tid = thread_create(NULL, 0, stmf_worker_task,
6937                             (void *)&stmf_workers[i], 0, &p0, TS_RUN,
6938                             minclsyspri);
6939                 }
6940                 return;
6941         }
6942         /* At this point we know that we are decreasing the # of workers */
6943         stmf_nworkers_accepting_cmds = workers_needed;
6944         stmf_nworkers_needed = workers_needed;
6945         /* Signal the workers that its time to quit */
6946         for (i = (stmf_nworkers_cur - 1); i >= stmf_nworkers_needed; i--) {
6947                 w = &stmf_workers[i];
6948                 ASSERT(w && (w->worker_flags & STMF_WORKER_STARTED));
6949                 mutex_enter(&w->worker_lock);
6950                 w->worker_flags |= STMF_WORKER_TERMINATE;
6951                 if ((w->worker_flags & STMF_WORKER_ACTIVE) == 0)
6952                         cv_signal(&w->worker_cv);
6953                 mutex_exit(&w->worker_lock);
6954         }
6955 }
6956 
6957 /*
6958  * Fills out a dbuf from stmf_xfer_data_t (contained in the db_lu_private).
6959  * If all the data has been filled out, frees the xd and makes
6960  * db_lu_private NULL.
6961  */
6962 void
6963 stmf_xd_to_dbuf(stmf_data_buf_t *dbuf, int set_rel_off)
6964 {
6965         stmf_xfer_data_t *xd;
6966         uint8_t *p;
6967         int i;
6968         uint32_t s;
6969 
6970         xd = (stmf_xfer_data_t *)dbuf->db_lu_private;
6971         dbuf->db_data_size = 0;
6972         if (set_rel_off)
6973                 dbuf->db_relative_offset = xd->size_done;
6974         for (i = 0; i < dbuf->db_sglist_length; i++) {
6975                 s = min(xd->size_left, dbuf->db_sglist[i].seg_length);
6976                 p = &xd->buf[xd->size_done];
6977                 bcopy(p, dbuf->db_sglist[i].seg_addr, s);
6978                 xd->size_left -= s;
6979                 xd->size_done += s;
6980                 dbuf->db_data_size += s;
6981                 if (xd->size_left == 0) {
6982                         kmem_free(xd, xd->alloc_size);
6983                         dbuf->db_lu_private = NULL;
6984                         return;
6985                 }
6986         }
6987 }
6988 
6989 /* ARGSUSED */
6990 stmf_status_t
6991 stmf_dlun0_task_alloc(scsi_task_t *task)
6992 {
6993         return (STMF_SUCCESS);
6994 }
6995 
6996 void
6997 stmf_dlun0_new_task(scsi_task_t *task, stmf_data_buf_t *dbuf)
6998 {
6999         uint8_t *cdbp = (uint8_t *)&task->task_cdb[0];
7000         stmf_i_scsi_session_t *iss;
7001         uint32_t sz, minsz;
7002         uint8_t *p;
7003         stmf_xfer_data_t *xd;
7004         uint8_t inq_page_length = 31;
7005 
7006         if (task->task_mgmt_function) {
7007                 stmf_scsilib_handle_task_mgmt(task);
7008                 return;
7009         }
7010 
7011         switch (cdbp[0]) {
7012         case SCMD_INQUIRY:
7013                 /*
7014                  * Basic protocol checks.  In addition, only reply to
7015                  * standard inquiry.  Otherwise, the LU provider needs
7016                  * to respond.
7017                  */
7018 
7019                 if (cdbp[2] || (cdbp[1] & 1) || cdbp[5]) {
7020                         stmf_scsilib_send_status(task, STATUS_CHECK,
7021                             STMF_SAA_INVALID_FIELD_IN_CDB);
7022                         return;
7023                 }
7024 
7025                 task->task_cmd_xfer_length =
7026                     (((uint32_t)cdbp[3]) << 8) | cdbp[4];
7027 
7028                 if (task->task_additional_flags &
7029                     TASK_AF_NO_EXPECTED_XFER_LENGTH) {
7030                         task->task_expected_xfer_length =
7031                             task->task_cmd_xfer_length;
7032                 }
7033 
7034                 sz = min(task->task_expected_xfer_length,
7035                     min(36, task->task_cmd_xfer_length));
7036                 minsz = 36;
7037 
7038                 if (sz == 0) {
7039                         stmf_scsilib_send_status(task, STATUS_GOOD, 0);
7040                         return;
7041                 }
7042 
7043                 if (dbuf && (dbuf->db_sglist[0].seg_length < 36)) {
7044                         /*
7045                          * Ignore any preallocated dbuf if the size is less
7046                          * than 36. It will be freed during the task_free.
7047                          */
7048                         dbuf = NULL;
7049                 }
7050                 if (dbuf == NULL)
7051                         dbuf = stmf_alloc_dbuf(task, minsz, &minsz, 0);
7052                 if ((dbuf == NULL) || (dbuf->db_sglist[0].seg_length < sz)) {
7053                         stmf_abort(STMF_QUEUE_TASK_ABORT, task,
7054                             STMF_ALLOC_FAILURE, NULL);
7055                         return;
7056                 }
7057                 dbuf->db_lu_private = NULL;
7058 
7059                 p = dbuf->db_sglist[0].seg_addr;
7060 
7061                 /*
7062                  * Standard inquiry handling only.
7063                  */
7064 
7065                 bzero(p, inq_page_length + 5);
7066 
7067                 p[0] = DPQ_SUPPORTED | DTYPE_UNKNOWN;
7068                 p[2] = 5;
7069                 p[3] = 0x12;
7070                 p[4] = inq_page_length;
7071                 p[6] = 0x80;
7072 
7073                 (void) strncpy((char *)p+8, "SUN     ", 8);
7074                 (void) strncpy((char *)p+16, "COMSTAR          ", 16);
7075                 (void) strncpy((char *)p+32, "1.0 ", 4);
7076 
7077                 dbuf->db_data_size = sz;
7078                 dbuf->db_relative_offset = 0;
7079                 dbuf->db_flags = DB_DIRECTION_TO_RPORT;
7080                 (void) stmf_xfer_data(task, dbuf, 0);
7081 
7082                 return;
7083 
7084         case SCMD_REPORT_LUNS:
7085                 task->task_cmd_xfer_length =
7086                     ((((uint32_t)task->task_cdb[6]) << 24) |
7087                     (((uint32_t)task->task_cdb[7]) << 16) |
7088                     (((uint32_t)task->task_cdb[8]) << 8) |
7089                     ((uint32_t)task->task_cdb[9]));
7090 
7091                 if (task->task_additional_flags &
7092                     TASK_AF_NO_EXPECTED_XFER_LENGTH) {
7093                         task->task_expected_xfer_length =
7094                             task->task_cmd_xfer_length;
7095                 }
7096 
7097                 sz = min(task->task_expected_xfer_length,
7098                     task->task_cmd_xfer_length);
7099 
7100                 if (sz < 16) {
7101                         stmf_scsilib_send_status(task, STATUS_CHECK,
7102                             STMF_SAA_INVALID_FIELD_IN_CDB);
7103                         return;
7104                 }
7105 
7106                 iss = (stmf_i_scsi_session_t *)
7107                     task->task_session->ss_stmf_private;
7108                 rw_enter(iss->iss_lockp, RW_WRITER);
7109                 xd = stmf_session_prepare_report_lun_data(iss->iss_sm);
7110                 rw_exit(iss->iss_lockp);
7111 
7112                 if (xd == NULL) {
7113                         stmf_abort(STMF_QUEUE_TASK_ABORT, task,
7114                             STMF_ALLOC_FAILURE, NULL);
7115                         return;
7116                 }
7117 
7118                 sz = min(sz, xd->size_left);
7119                 xd->size_left = sz;
7120                 minsz = min(512, sz);
7121 
7122                 if (dbuf == NULL)
7123                         dbuf = stmf_alloc_dbuf(task, sz, &minsz, 0);
7124                 if (dbuf == NULL) {
7125                         kmem_free(xd, xd->alloc_size);
7126                         stmf_abort(STMF_QUEUE_TASK_ABORT, task,
7127                             STMF_ALLOC_FAILURE, NULL);
7128                         return;
7129                 }
7130                 dbuf->db_lu_private = xd;
7131                 stmf_xd_to_dbuf(dbuf, 1);
7132 
7133                 atomic_and_32(&iss->iss_flags,
7134                     ~(ISS_LUN_INVENTORY_CHANGED | ISS_GOT_INITIAL_LUNS));
7135                 dbuf->db_flags = DB_DIRECTION_TO_RPORT;
7136                 (void) stmf_xfer_data(task, dbuf, 0);
7137                 return;
7138         }
7139 
7140         stmf_scsilib_send_status(task, STATUS_CHECK, STMF_SAA_INVALID_OPCODE);
7141 }
7142 
7143 void
7144 stmf_dlun0_dbuf_done(scsi_task_t *task, stmf_data_buf_t *dbuf)
7145 {
7146         stmf_i_scsi_task_t *itask =
7147             (stmf_i_scsi_task_t *)task->task_stmf_private;
7148 
7149         if (dbuf->db_xfer_status != STMF_SUCCESS) {
7150                 stmf_abort(STMF_QUEUE_TASK_ABORT, task,
7151                     dbuf->db_xfer_status, NULL);
7152                 return;
7153         }
7154         task->task_nbytes_transferred += dbuf->db_data_size;
7155         if (dbuf->db_lu_private) {
7156                 /* There is more */
7157                 stmf_xd_to_dbuf(dbuf, 1);
7158                 (void) stmf_xfer_data(task, dbuf, 0);
7159                 return;
7160         }
7161 
7162         stmf_free_dbuf(task, dbuf);
7163         /*
7164          * If this is a proxy task, it will need to be completed from the
7165          * proxy port provider. This message lets pppt know that the xfer
7166          * is complete. When we receive the status from pppt, we will
7167          * then relay that status back to the lport.
7168          */
7169         if (itask->itask_flags & ITASK_PROXY_TASK) {
7170                 stmf_ic_msg_t *ic_xfer_done_msg = NULL;
7171                 stmf_status_t ic_ret = STMF_FAILURE;
7172                 uint64_t session_msg_id;
7173                 mutex_enter(&stmf_state.stmf_lock);
7174                 session_msg_id = stmf_proxy_msg_id++;
7175                 mutex_exit(&stmf_state.stmf_lock);
7176                 /* send xfer done status to pppt */
7177                 ic_xfer_done_msg = ic_scsi_data_xfer_done_msg_alloc(
7178                     itask->itask_proxy_msg_id,
7179                     task->task_session->ss_session_id,
7180                     STMF_SUCCESS, session_msg_id);
7181                 if (ic_xfer_done_msg) {
7182                         ic_ret = ic_tx_msg(ic_xfer_done_msg);
7183                         if (ic_ret != STMF_IC_MSG_SUCCESS) {
7184                                 cmn_err(CE_WARN, "unable to xmit session msg");
7185                         }
7186                 }
7187                 /* task will be completed from pppt */
7188                 return;
7189         }
7190         stmf_scsilib_send_status(task, STATUS_GOOD, 0);
7191 }
7192 
7193 /* ARGSUSED */
7194 void
7195 stmf_dlun0_status_done(scsi_task_t *task)
7196 {
7197 }
7198 
7199 /* ARGSUSED */
7200 void
7201 stmf_dlun0_task_free(scsi_task_t *task)
7202 {
7203 }
7204 
7205 /* ARGSUSED */
7206 stmf_status_t
7207 stmf_dlun0_abort(struct stmf_lu *lu, int abort_cmd, void *arg, uint32_t flags)
7208 {
7209         scsi_task_t *task = (scsi_task_t *)arg;
7210         stmf_i_scsi_task_t *itask =
7211             (stmf_i_scsi_task_t *)task->task_stmf_private;
7212         stmf_i_lu_t *ilu = (stmf_i_lu_t *)task->task_lu->lu_stmf_private;
7213         int i;
7214         uint8_t map;
7215 
7216         if ((task->task_mgmt_function) && (itask->itask_flags &
7217             (ITASK_CAUSING_LU_RESET | ITASK_CAUSING_TARGET_RESET))) {
7218                 switch (task->task_mgmt_function) {
7219                 case TM_ABORT_TASK:
7220                 case TM_ABORT_TASK_SET:
7221                 case TM_CLEAR_TASK_SET:
7222                 case TM_LUN_RESET:
7223                         atomic_and_32(&ilu->ilu_flags, ~ILU_RESET_ACTIVE);
7224                         break;
7225                 case TM_TARGET_RESET:
7226                 case TM_TARGET_COLD_RESET:
7227                 case TM_TARGET_WARM_RESET:
7228                         stmf_abort_target_reset(task);
7229                         break;
7230                 }
7231                 return (STMF_ABORT_SUCCESS);
7232         }
7233 
7234         /*
7235          * OK so its not a task mgmt. Make sure we free any xd sitting
7236          * inside any dbuf.
7237          */
7238         if ((map = itask->itask_allocated_buf_map) != 0) {
7239                 for (i = 0; i < 4; i++) {
7240                         if ((map & 1) &&
7241                             ((itask->itask_dbufs[i])->db_lu_private)) {
7242                                 stmf_xfer_data_t *xd;
7243                                 stmf_data_buf_t *dbuf;
7244 
7245                                 dbuf = itask->itask_dbufs[i];
7246                                 xd = (stmf_xfer_data_t *)dbuf->db_lu_private;
7247                                 dbuf->db_lu_private = NULL;
7248                                 kmem_free(xd, xd->alloc_size);
7249                         }
7250                         map >>= 1;
7251                 }
7252         }
7253         return (STMF_ABORT_SUCCESS);
7254 }
7255 
7256 void
7257 stmf_dlun0_task_poll(struct scsi_task *task)
7258 {
7259         /* Right now we only do this for handling task management functions */
7260         ASSERT(task->task_mgmt_function);
7261 
7262         switch (task->task_mgmt_function) {
7263         case TM_ABORT_TASK:
7264         case TM_ABORT_TASK_SET:
7265         case TM_CLEAR_TASK_SET:
7266         case TM_LUN_RESET:
7267                 (void) stmf_lun_reset_poll(task->task_lu, task, 0);
7268                 return;
7269         case TM_TARGET_RESET:
7270         case TM_TARGET_COLD_RESET:
7271         case TM_TARGET_WARM_RESET:
7272                 stmf_target_reset_poll(task);
7273                 return;
7274         }
7275 }
7276 
7277 /* ARGSUSED */
7278 void
7279 stmf_dlun0_ctl(struct stmf_lu *lu, int cmd, void *arg)
7280 {
7281         /* This function will never be called */
7282         cmn_err(CE_WARN, "stmf_dlun0_ctl called with cmd %x", cmd);
7283 }
7284 
7285 void
7286 stmf_dlun_init()
7287 {
7288         stmf_i_lu_t *ilu;
7289 
7290         dlun0 = stmf_alloc(STMF_STRUCT_STMF_LU, 0, 0);
7291         dlun0->lu_task_alloc = stmf_dlun0_task_alloc;
7292         dlun0->lu_new_task = stmf_dlun0_new_task;
7293         dlun0->lu_dbuf_xfer_done = stmf_dlun0_dbuf_done;
7294         dlun0->lu_send_status_done = stmf_dlun0_status_done;
7295         dlun0->lu_task_free = stmf_dlun0_task_free;
7296         dlun0->lu_abort = stmf_dlun0_abort;
7297         dlun0->lu_task_poll = stmf_dlun0_task_poll;
7298         dlun0->lu_ctl = stmf_dlun0_ctl;
7299 
7300         ilu = (stmf_i_lu_t *)dlun0->lu_stmf_private;
7301         ilu->ilu_cur_task_cntr = &ilu->ilu_task_cntr1;
7302 }
7303 
7304 stmf_status_t
7305 stmf_dlun_fini()
7306 {
7307         stmf_i_lu_t *ilu;
7308 
7309         ilu = (stmf_i_lu_t *)dlun0->lu_stmf_private;
7310 
7311         ASSERT(ilu->ilu_ntasks == ilu->ilu_ntasks_free);
7312         if (ilu->ilu_ntasks) {
7313                 stmf_i_scsi_task_t *itask, *nitask;
7314 
7315                 nitask = ilu->ilu_tasks;
7316                 do {
7317                         itask = nitask;
7318                         nitask = itask->itask_lu_next;
7319                         dlun0->lu_task_free(itask->itask_task);
7320                         stmf_free(itask->itask_task);
7321                 } while (nitask != NULL);
7322 
7323         }
7324         stmf_free(dlun0);
7325         return (STMF_SUCCESS);
7326 }
7327 
7328 void
7329 stmf_abort_target_reset(scsi_task_t *task)
7330 {
7331         stmf_i_scsi_session_t *iss = (stmf_i_scsi_session_t *)
7332             task->task_session->ss_stmf_private;
7333         stmf_lun_map_t *lm;
7334         stmf_lun_map_ent_t *lm_ent;
7335         stmf_i_lu_t *ilu;
7336         int i;
7337 
7338         rw_enter(iss->iss_lockp, RW_READER);
7339         lm = iss->iss_sm;
7340         for (i = 0; i < lm->lm_nentries; i++) {
7341                 if (lm->lm_plus[i] == NULL)
7342                         continue;
7343                 lm_ent = (stmf_lun_map_ent_t *)lm->lm_plus[i];
7344                 ilu = (stmf_i_lu_t *)lm_ent->ent_lu->lu_stmf_private;
7345                 if (ilu->ilu_flags & ILU_RESET_ACTIVE) {
7346                         atomic_and_32(&ilu->ilu_flags, ~ILU_RESET_ACTIVE);
7347                 }
7348         }
7349         atomic_and_32(&iss->iss_flags, ~ISS_RESET_ACTIVE);
7350         rw_exit(iss->iss_lockp);
7351 }
7352 
7353 /*
7354  * The return value is only used by function managing target reset.
7355  */
7356 stmf_status_t
7357 stmf_lun_reset_poll(stmf_lu_t *lu, struct scsi_task *task, int target_reset)
7358 {
7359         stmf_i_lu_t *ilu = (stmf_i_lu_t *)lu->lu_stmf_private;
7360         int ntasks_pending;
7361 
7362         ntasks_pending = ilu->ilu_ntasks - ilu->ilu_ntasks_free;
7363         /*
7364          * This function is also used during Target reset. The idea is that
7365          * once all the commands are aborted, call the LU's reset entry
7366          * point (abort entry point with a reset flag). But if this Task
7367          * mgmt is running on this LU then all the tasks cannot be aborted.
7368          * one task (this task) will still be running which is OK.
7369          */
7370         if ((ntasks_pending == 0) || ((task->task_lu == lu) &&
7371             (ntasks_pending == 1))) {
7372                 stmf_status_t ret;
7373 
7374                 if ((task->task_mgmt_function == TM_LUN_RESET) ||
7375                     (task->task_mgmt_function == TM_TARGET_RESET) ||
7376                     (task->task_mgmt_function == TM_TARGET_WARM_RESET) ||
7377                     (task->task_mgmt_function == TM_TARGET_COLD_RESET)) {
7378                         ret = lu->lu_abort(lu, STMF_LU_RESET_STATE, task, 0);
7379                 } else {
7380                         ret = STMF_SUCCESS;
7381                 }
7382                 if (ret == STMF_SUCCESS) {
7383                         atomic_and_32(&ilu->ilu_flags, ~ILU_RESET_ACTIVE);
7384                 }
7385                 if (target_reset) {
7386                         return (ret);
7387                 }
7388                 if (ret == STMF_SUCCESS) {
7389                         stmf_scsilib_send_status(task, STATUS_GOOD, 0);
7390                         return (ret);
7391                 }
7392                 if (ret != STMF_BUSY) {
7393                         stmf_abort(STMF_QUEUE_TASK_ABORT, task, ret, NULL);
7394                         return (ret);
7395                 }
7396         }
7397 
7398         if (target_reset) {
7399                 /* Tell target reset polling code that we are not done */
7400                 return (STMF_BUSY);
7401         }
7402 
7403         if (stmf_task_poll_lu(task, ITASK_DEFAULT_POLL_TIMEOUT)
7404             != STMF_SUCCESS) {
7405                 stmf_abort(STMF_QUEUE_TASK_ABORT, task,
7406                     STMF_ALLOC_FAILURE, NULL);
7407                 return (STMF_SUCCESS);
7408         }
7409 
7410         return (STMF_SUCCESS);
7411 }
7412 
7413 void
7414 stmf_target_reset_poll(struct scsi_task *task)
7415 {
7416         stmf_i_scsi_session_t *iss = (stmf_i_scsi_session_t *)
7417             task->task_session->ss_stmf_private;
7418         stmf_lun_map_t *lm;
7419         stmf_lun_map_ent_t *lm_ent;
7420         stmf_i_lu_t *ilu;
7421         stmf_status_t ret;
7422         int i;
7423         int not_done = 0;
7424 
7425         ASSERT(iss->iss_flags & ISS_RESET_ACTIVE);
7426 
7427         rw_enter(iss->iss_lockp, RW_READER);
7428         lm = iss->iss_sm;
7429         for (i = 0; i < lm->lm_nentries; i++) {
7430                 if (lm->lm_plus[i] == NULL)
7431                         continue;
7432                 lm_ent = (stmf_lun_map_ent_t *)lm->lm_plus[i];
7433                 ilu = (stmf_i_lu_t *)lm_ent->ent_lu->lu_stmf_private;
7434                 if (ilu->ilu_flags & ILU_RESET_ACTIVE) {
7435                         rw_exit(iss->iss_lockp);
7436                         ret = stmf_lun_reset_poll(lm_ent->ent_lu, task, 1);
7437                         rw_enter(iss->iss_lockp, RW_READER);
7438                         if (ret == STMF_SUCCESS)
7439                                 continue;
7440                         not_done = 1;
7441                         if (ret != STMF_BUSY) {
7442                                 rw_exit(iss->iss_lockp);
7443                                 stmf_abort(STMF_QUEUE_TASK_ABORT, task,
7444                                     STMF_ABORTED, NULL);
7445                                 return;
7446                         }
7447                 }
7448         }
7449         rw_exit(iss->iss_lockp);
7450 
7451         if (not_done) {
7452                 if (stmf_task_poll_lu(task, ITASK_DEFAULT_POLL_TIMEOUT)
7453                     != STMF_SUCCESS) {
7454                         stmf_abort(STMF_QUEUE_TASK_ABORT, task,
7455                             STMF_ALLOC_FAILURE, NULL);
7456                         return;
7457                 }
7458                 return;
7459         }
7460 
7461         atomic_and_32(&iss->iss_flags, ~ISS_RESET_ACTIVE);
7462 
7463         stmf_scsilib_send_status(task, STATUS_GOOD, 0);
7464 }
7465 
7466 stmf_status_t
7467 stmf_lu_add_event(stmf_lu_t *lu, int eventid)
7468 {
7469         stmf_i_lu_t *ilu = (stmf_i_lu_t *)lu->lu_stmf_private;
7470 
7471         if ((eventid < 0) || (eventid >= STMF_MAX_NUM_EVENTS)) {
7472                 return (STMF_INVALID_ARG);
7473         }
7474 
7475         STMF_EVENT_ADD(ilu->ilu_event_hdl, eventid);
7476         return (STMF_SUCCESS);
7477 }
7478 
7479 stmf_status_t
7480 stmf_lu_remove_event(stmf_lu_t *lu, int eventid)
7481 {
7482         stmf_i_lu_t *ilu = (stmf_i_lu_t *)lu->lu_stmf_private;
7483 
7484         if (eventid == STMF_EVENT_ALL) {
7485                 STMF_EVENT_CLEAR_ALL(ilu->ilu_event_hdl);
7486                 return (STMF_SUCCESS);
7487         }
7488 
7489         if ((eventid < 0) || (eventid >= STMF_MAX_NUM_EVENTS)) {
7490                 return (STMF_INVALID_ARG);
7491         }
7492 
7493         STMF_EVENT_REMOVE(ilu->ilu_event_hdl, eventid);
7494         return (STMF_SUCCESS);
7495 }
7496 
7497 stmf_status_t
7498 stmf_lport_add_event(stmf_local_port_t *lport, int eventid)
7499 {
7500         stmf_i_local_port_t *ilport =
7501             (stmf_i_local_port_t *)lport->lport_stmf_private;
7502 
7503         if ((eventid < 0) || (eventid >= STMF_MAX_NUM_EVENTS)) {
7504                 return (STMF_INVALID_ARG);
7505         }
7506 
7507         STMF_EVENT_ADD(ilport->ilport_event_hdl, eventid);
7508         return (STMF_SUCCESS);
7509 }
7510 
7511 stmf_status_t
7512 stmf_lport_remove_event(stmf_local_port_t *lport, int eventid)
7513 {
7514         stmf_i_local_port_t *ilport =
7515             (stmf_i_local_port_t *)lport->lport_stmf_private;
7516 
7517         if (eventid == STMF_EVENT_ALL) {
7518                 STMF_EVENT_CLEAR_ALL(ilport->ilport_event_hdl);
7519                 return (STMF_SUCCESS);
7520         }
7521 
7522         if ((eventid < 0) || (eventid >= STMF_MAX_NUM_EVENTS)) {
7523                 return (STMF_INVALID_ARG);
7524         }
7525 
7526         STMF_EVENT_REMOVE(ilport->ilport_event_hdl, eventid);
7527         return (STMF_SUCCESS);
7528 }
7529 
7530 void
7531 stmf_generate_lu_event(stmf_i_lu_t *ilu, int eventid, void *arg, uint32_t flags)
7532 {
7533         if (STMF_EVENT_ENABLED(ilu->ilu_event_hdl, eventid) &&
7534             (ilu->ilu_lu->lu_event_handler != NULL)) {
7535                 ilu->ilu_lu->lu_event_handler(ilu->ilu_lu, eventid, arg, flags);
7536         }
7537 }
7538 
7539 void
7540 stmf_generate_lport_event(stmf_i_local_port_t *ilport, int eventid, void *arg,
7541                                 uint32_t flags)
7542 {
7543         if (STMF_EVENT_ENABLED(ilport->ilport_event_hdl, eventid) &&
7544             (ilport->ilport_lport->lport_event_handler != NULL)) {
7545                 ilport->ilport_lport->lport_event_handler(
7546                     ilport->ilport_lport, eventid, arg, flags);
7547         }
7548 }
7549 
7550 /*
7551  * With the possibility of having multiple itl sessions pointing to the
7552  * same itl_kstat_info, the ilu_kstat_lock mutex is used to synchronize
7553  * the kstat update of the ilu_kstat_io, itl_kstat_taskq and itl_kstat_lu_xfer
7554  * statistics.
7555  */
7556 void
7557 stmf_itl_task_start(stmf_i_scsi_task_t *itask)
7558 {
7559         stmf_itl_data_t *itl = itask->itask_itl_datap;
7560         scsi_task_t     *task = itask->itask_task;
7561         stmf_i_lu_t     *ilu;
7562 
7563         if (itl == NULL || task->task_lu == dlun0)
7564                 return;
7565         ilu = (stmf_i_lu_t *)task->task_lu->lu_stmf_private;
7566         mutex_enter(ilu->ilu_kstat_io->ks_lock);
7567         itask->itask_start_timestamp = gethrtime();
7568         kstat_waitq_enter(KSTAT_IO_PTR(itl->itl_kstat_taskq));
7569         stmf_update_kstat_lu_q(itask->itask_task, kstat_waitq_enter);
7570         mutex_exit(ilu->ilu_kstat_io->ks_lock);
7571 
7572         stmf_update_kstat_lport_q(itask->itask_task, kstat_waitq_enter);
7573 }
7574 
7575 void
7576 stmf_itl_lu_new_task(stmf_i_scsi_task_t *itask)
7577 {
7578         stmf_itl_data_t *itl = itask->itask_itl_datap;
7579         scsi_task_t     *task = itask->itask_task;
7580         stmf_i_lu_t     *ilu;
7581 
7582         if (itl == NULL || task->task_lu == dlun0)
7583                 return;
7584         ilu = (stmf_i_lu_t *)task->task_lu->lu_stmf_private;
7585         mutex_enter(ilu->ilu_kstat_io->ks_lock);
7586         kstat_waitq_to_runq(KSTAT_IO_PTR(itl->itl_kstat_taskq));
7587         stmf_update_kstat_lu_q(itask->itask_task, kstat_waitq_to_runq);
7588         mutex_exit(ilu->ilu_kstat_io->ks_lock);
7589 
7590         stmf_update_kstat_lport_q(itask->itask_task, kstat_waitq_to_runq);
7591 }
7592 
7593 void
7594 stmf_itl_task_done(stmf_i_scsi_task_t *itask)
7595 {
7596         stmf_itl_data_t         *itl = itask->itask_itl_datap;
7597         scsi_task_t             *task = itask->itask_task;
7598         kstat_io_t              *kip;
7599         hrtime_t                elapsed_time;
7600         stmf_kstat_itl_info_t   *itli;
7601         stmf_i_lu_t     *ilu;
7602 
7603         if (itl == NULL || task->task_lu == dlun0)
7604                 return;
7605         ilu = (stmf_i_lu_t *)task->task_lu->lu_stmf_private;
7606 
7607         mutex_enter(ilu->ilu_kstat_io->ks_lock);
7608         itli = (stmf_kstat_itl_info_t *)KSTAT_NAMED_PTR(itl->itl_kstat_info);
7609         kip = KSTAT_IO_PTR(itl->itl_kstat_taskq);
7610 
7611         itli->i_task_waitq_elapsed.value.ui64 += itask->itask_waitq_time;
7612 
7613         itask->itask_done_timestamp = gethrtime();
7614         elapsed_time =
7615             itask->itask_done_timestamp - itask->itask_start_timestamp;
7616 
7617         if (task->task_flags & TF_READ_DATA) {
7618                 kip->reads++;
7619                 kip->nread += itask->itask_read_xfer;
7620                 itli->i_task_read_elapsed.value.ui64 += elapsed_time;
7621                 itli->i_lu_read_elapsed.value.ui64 +=
7622                     itask->itask_lu_read_time;
7623                 itli->i_lport_read_elapsed.value.ui64 +=
7624                     itask->itask_lport_read_time;
7625         }
7626 
7627         if (task->task_flags & TF_WRITE_DATA) {
7628                 kip->writes++;
7629                 kip->nwritten += itask->itask_write_xfer;
7630                 itli->i_task_write_elapsed.value.ui64 += elapsed_time;
7631                 itli->i_lu_write_elapsed.value.ui64 +=
7632                     itask->itask_lu_write_time;
7633                 itli->i_lport_write_elapsed.value.ui64 +=
7634                     itask->itask_lport_write_time;
7635         }
7636 
7637         if (itask->itask_flags & ITASK_KSTAT_IN_RUNQ) {
7638                 kstat_runq_exit(kip);
7639                 stmf_update_kstat_lu_q(task, kstat_runq_exit);
7640                 mutex_exit(ilu->ilu_kstat_io->ks_lock);
7641                 stmf_update_kstat_lport_q(task, kstat_runq_exit);
7642         } else {
7643                 kstat_waitq_exit(kip);
7644                 stmf_update_kstat_lu_q(task, kstat_waitq_exit);
7645                 mutex_exit(ilu->ilu_kstat_io->ks_lock);
7646                 stmf_update_kstat_lport_q(task, kstat_waitq_exit);
7647         }
7648 }
7649 
7650 void
7651 stmf_lu_xfer_start(scsi_task_t *task)
7652 {
7653         stmf_i_scsi_task_t *itask = task->task_stmf_private;
7654         stmf_itl_data_t *itl = itask->itask_itl_datap;
7655         stmf_i_lu_t     *ilu = (stmf_i_lu_t *)task->task_lu->lu_stmf_private;
7656         kstat_io_t              *kip;
7657 
7658         if (itl == NULL || task->task_lu == dlun0)
7659                 return;
7660 
7661         kip = KSTAT_IO_PTR(itl->itl_kstat_lu_xfer);
7662         mutex_enter(ilu->ilu_kstat_io->ks_lock);
7663         kstat_runq_enter(kip);
7664         mutex_exit(ilu->ilu_kstat_io->ks_lock);
7665 }
7666 
7667 void
7668 stmf_lu_xfer_done(scsi_task_t *task, boolean_t read, uint64_t xfer_bytes,
7669     hrtime_t elapsed_time)
7670 {
7671         stmf_i_scsi_task_t      *itask = task->task_stmf_private;
7672         stmf_itl_data_t         *itl = itask->itask_itl_datap;
7673         stmf_i_lu_t     *ilu = (stmf_i_lu_t *)task->task_lu->lu_stmf_private;
7674         kstat_io_t              *kip;
7675 
7676         if (itl == NULL || task->task_lu == dlun0)
7677                 return;
7678 
7679         if (read) {
7680                 atomic_add_64((uint64_t *)&itask->itask_lu_read_time,
7681                     elapsed_time);
7682         } else {
7683                 atomic_add_64((uint64_t *)&itask->itask_lu_write_time,
7684                     elapsed_time);
7685         }
7686 
7687         kip = KSTAT_IO_PTR(itl->itl_kstat_lu_xfer);
7688         mutex_enter(ilu->ilu_kstat_io->ks_lock);
7689         kstat_runq_exit(kip);
7690         if (read) {
7691                 kip->reads++;
7692                 kip->nread += xfer_bytes;
7693         } else {
7694                 kip->writes++;
7695                 kip->nwritten += xfer_bytes;
7696         }
7697         mutex_exit(ilu->ilu_kstat_io->ks_lock);
7698 }
7699 
7700 static void
7701 stmf_lport_xfer_start(stmf_i_scsi_task_t *itask, stmf_data_buf_t *dbuf)
7702 {
7703         stmf_itl_data_t         *itl = itask->itask_itl_datap;
7704 
7705         if (itl == NULL)
7706                 return;
7707 
7708         DTRACE_PROBE2(scsi__xfer__start, scsi_task_t *, itask->itask_task,
7709             stmf_data_buf_t *, dbuf);
7710 
7711         dbuf->db_xfer_start_timestamp = gethrtime();
7712 }
7713 
7714 static void
7715 stmf_lport_xfer_done(stmf_i_scsi_task_t *itask, stmf_data_buf_t *dbuf)
7716 {
7717         stmf_itl_data_t         *itl = itask->itask_itl_datap;
7718         scsi_task_t             *task;
7719         stmf_i_local_port_t     *ilp;
7720         kstat_io_t              *kip;
7721         hrtime_t                elapsed_time;
7722         uint64_t                xfer_size;
7723 
7724         if (itl == NULL)
7725                 return;
7726 
7727         task = (scsi_task_t *)itask->itask_task;
7728         ilp = (stmf_i_local_port_t *)task->task_lport->lport_stmf_private;
7729         xfer_size = (dbuf->db_xfer_status == STMF_SUCCESS) ?
7730             dbuf->db_data_size : 0;
7731 
7732         elapsed_time = gethrtime() - dbuf->db_xfer_start_timestamp;
7733         if (dbuf->db_flags & DB_DIRECTION_TO_RPORT) {
7734                 atomic_add_64((uint64_t *)&itask->itask_lport_read_time,
7735                     elapsed_time);
7736                 atomic_add_64((uint64_t *)&itask->itask_read_xfer,
7737                     xfer_size);
7738         } else {
7739                 atomic_add_64((uint64_t *)&itask->itask_lport_write_time,
7740                     elapsed_time);
7741                 atomic_add_64((uint64_t *)&itask->itask_write_xfer,
7742                     xfer_size);
7743         }
7744 
7745         DTRACE_PROBE3(scsi__xfer__end, scsi_task_t *, itask->itask_task,
7746             stmf_data_buf_t *, dbuf, hrtime_t, elapsed_time);
7747 
7748         kip = KSTAT_IO_PTR(itl->itl_kstat_lport_xfer);
7749         mutex_enter(ilp->ilport_kstat_io->ks_lock);
7750         if (dbuf->db_flags & DB_DIRECTION_TO_RPORT) {
7751                 kip->reads++;
7752                 kip->nread += xfer_size;
7753         } else {
7754                 kip->writes++;
7755                 kip->nwritten += xfer_size;
7756         }
7757         mutex_exit(ilp->ilport_kstat_io->ks_lock);
7758 
7759         dbuf->db_xfer_start_timestamp = 0;
7760 }
7761 
7762 void
7763 stmf_svc_init()
7764 {
7765         if (stmf_state.stmf_svc_flags & STMF_SVC_STARTED)
7766                 return;
7767         stmf_state.stmf_svc_tailp = &stmf_state.stmf_svc_active;
7768         stmf_state.stmf_svc_taskq = ddi_taskq_create(0, "STMF_SVC_TASKQ", 1,
7769             TASKQ_DEFAULTPRI, 0);
7770         (void) ddi_taskq_dispatch(stmf_state.stmf_svc_taskq,
7771             stmf_svc, 0, DDI_SLEEP);
7772 }
7773 
7774 stmf_status_t
7775 stmf_svc_fini()
7776 {
7777         uint32_t i;
7778 
7779         mutex_enter(&stmf_state.stmf_lock);
7780         if (stmf_state.stmf_svc_flags & STMF_SVC_STARTED) {
7781                 stmf_state.stmf_svc_flags |= STMF_SVC_TERMINATE;
7782                 cv_signal(&stmf_state.stmf_cv);
7783         }
7784         mutex_exit(&stmf_state.stmf_lock);
7785 
7786         /* Wait for 5 seconds */
7787         for (i = 0; i < 500; i++) {
7788                 if (stmf_state.stmf_svc_flags & STMF_SVC_STARTED)
7789                         delay(drv_usectohz(10000));
7790                 else
7791                         break;
7792         }
7793         if (i == 500)
7794                 return (STMF_BUSY);
7795 
7796         ddi_taskq_destroy(stmf_state.stmf_svc_taskq);
7797 
7798         return (STMF_SUCCESS);
7799 }
7800 
7801 struct stmf_svc_clocks {
7802         clock_t drain_start, drain_next;
7803         clock_t timing_start, timing_next;
7804         clock_t worker_delay;
7805 };
7806 
7807 /* ARGSUSED */
7808 void
7809 stmf_svc(void *arg)
7810 {
7811         stmf_svc_req_t *req;
7812         stmf_lu_t *lu;
7813         stmf_i_lu_t *ilu;
7814         stmf_local_port_t *lport;
7815         struct stmf_svc_clocks clks = { 0 };
7816 
7817         mutex_enter(&stmf_state.stmf_lock);
7818         stmf_state.stmf_svc_flags |= STMF_SVC_STARTED | STMF_SVC_ACTIVE;
7819 
7820         while (!(stmf_state.stmf_svc_flags & STMF_SVC_TERMINATE)) {
7821                 if (stmf_state.stmf_svc_active == NULL) {
7822                         stmf_svc_timeout(&clks);
7823                         continue;
7824                 }
7825 
7826                 /*
7827                  * Pop the front request from the active list.  After this,
7828                  * the request will no longer be referenced by global state,
7829                  * so it should be safe to access it without holding the
7830                  * stmf state lock.
7831                  */
7832                 req = stmf_state.stmf_svc_active;
7833                 stmf_state.stmf_svc_active = req->svc_next;
7834 
7835                 if (stmf_state.stmf_svc_active == NULL)
7836                         stmf_state.stmf_svc_tailp = &stmf_state.stmf_svc_active;
7837 
7838                 switch (req->svc_cmd) {
7839                 case STMF_CMD_LPORT_ONLINE:
7840                         /* Fallthrough */
7841                 case STMF_CMD_LPORT_OFFLINE:
7842                         mutex_exit(&stmf_state.stmf_lock);
7843                         lport = (stmf_local_port_t *)req->svc_obj;
7844                         lport->lport_ctl(lport, req->svc_cmd, &req->svc_info);
7845                         break;
7846                 case STMF_CMD_LU_ONLINE:
7847                         mutex_exit(&stmf_state.stmf_lock);
7848                         lu = (stmf_lu_t *)req->svc_obj;
7849                         lu->lu_ctl(lu, req->svc_cmd, &req->svc_info);
7850                         break;
7851                 case STMF_CMD_LU_OFFLINE:
7852                         /* Remove all mappings of this LU */
7853                         stmf_session_lu_unmapall((stmf_lu_t *)req->svc_obj);
7854                         /* Kill all the pending I/Os for this LU */
7855                         mutex_exit(&stmf_state.stmf_lock);
7856                         stmf_task_lu_killall((stmf_lu_t *)req->svc_obj, NULL,
7857                             STMF_ABORTED);
7858                         lu = (stmf_lu_t *)req->svc_obj;
7859                         ilu = (stmf_i_lu_t *)lu->lu_stmf_private;
7860                         stmf_wait_ilu_tasks_finish(ilu);
7861                         lu->lu_ctl(lu, req->svc_cmd, &req->svc_info);
7862                         break;
7863                 default:
7864                         cmn_err(CE_PANIC, "stmf_svc: unknown cmd %d",
7865                             req->svc_cmd);
7866                 }
7867 
7868                 mutex_enter(&stmf_state.stmf_lock);
7869         }
7870 
7871         stmf_state.stmf_svc_flags &= ~(STMF_SVC_STARTED | STMF_SVC_ACTIVE);
7872         mutex_exit(&stmf_state.stmf_lock);
7873 }
7874 
7875 static void
7876 stmf_svc_timeout(struct stmf_svc_clocks *clks)
7877 {
7878         clock_t td;
7879         stmf_i_local_port_t *ilport, *next_ilport;
7880         stmf_i_scsi_session_t *iss;
7881 
7882         ASSERT(mutex_owned(&stmf_state.stmf_lock));
7883 
7884         td = drv_usectohz(20000);
7885 
7886         /* Do timeouts */
7887         if (stmf_state.stmf_nlus &&
7888             ((!clks->timing_next) || (ddi_get_lbolt() >= clks->timing_next))) {
7889                 if (!stmf_state.stmf_svc_ilu_timing) {
7890                         /* we are starting a new round */
7891                         stmf_state.stmf_svc_ilu_timing =
7892                             stmf_state.stmf_ilulist;
7893                         clks->timing_start = ddi_get_lbolt();
7894                 }
7895 
7896                 stmf_check_ilu_timing();
7897                 if (!stmf_state.stmf_svc_ilu_timing) {
7898                         /* we finished a complete round */
7899                         clks->timing_next =
7900                             clks->timing_start + drv_usectohz(5*1000*1000);
7901                 } else {
7902                         /* we still have some ilu items to check */
7903                         clks->timing_next =
7904                             ddi_get_lbolt() + drv_usectohz(1*1000*1000);
7905                 }
7906 
7907                 if (stmf_state.stmf_svc_active)
7908                         return;
7909         }
7910 
7911         /* Check if there are free tasks to clear */
7912         if (stmf_state.stmf_nlus &&
7913             ((!clks->drain_next) || (ddi_get_lbolt() >= clks->drain_next))) {
7914                 if (!stmf_state.stmf_svc_ilu_draining) {
7915                         /* we are starting a new round */
7916                         stmf_state.stmf_svc_ilu_draining =
7917                             stmf_state.stmf_ilulist;
7918                         clks->drain_start = ddi_get_lbolt();
7919                 }
7920 
7921                 stmf_check_freetask();
7922                 if (!stmf_state.stmf_svc_ilu_draining) {
7923                         /* we finished a complete round */
7924                         clks->drain_next =
7925                             clks->drain_start + drv_usectohz(10*1000*1000);
7926                 } else {
7927                         /* we still have some ilu items to check */
7928                         clks->drain_next =
7929                             ddi_get_lbolt() + drv_usectohz(1*1000*1000);
7930                 }
7931 
7932                 if (stmf_state.stmf_svc_active)
7933                         return;
7934         }
7935 
7936         /* Check if we need to run worker_mgmt */
7937         if (ddi_get_lbolt() > clks->worker_delay) {
7938                 stmf_worker_mgmt();
7939                 clks->worker_delay = ddi_get_lbolt() +
7940                     stmf_worker_mgmt_delay;
7941         }
7942 
7943         /* Check if any active session got its 1st LUN */
7944         if (stmf_state.stmf_process_initial_luns) {
7945                 int stmf_level = 0;
7946                 int port_level;
7947 
7948                 for (ilport = stmf_state.stmf_ilportlist; ilport;
7949                     ilport = next_ilport) {
7950                         int ilport_lock_held;
7951                         next_ilport = ilport->ilport_next;
7952 
7953                         if ((ilport->ilport_flags &
7954                             ILPORT_SS_GOT_INITIAL_LUNS) == 0)
7955                                 continue;
7956 
7957                         port_level = 0;
7958                         rw_enter(&ilport->ilport_lock, RW_READER);
7959                         ilport_lock_held = 1;
7960 
7961                         for (iss = ilport->ilport_ss_list; iss;
7962                             iss = iss->iss_next) {
7963                                 if ((iss->iss_flags &
7964                                     ISS_GOT_INITIAL_LUNS) == 0)
7965                                         continue;
7966 
7967                                 port_level++;
7968                                 stmf_level++;
7969                                 atomic_and_32(&iss->iss_flags,
7970                                     ~ISS_GOT_INITIAL_LUNS);
7971                                 atomic_or_32(&iss->iss_flags,
7972                                     ISS_EVENT_ACTIVE);
7973                                 rw_exit(&ilport->ilport_lock);
7974                                 ilport_lock_held = 0;
7975                                 mutex_exit(&stmf_state.stmf_lock);
7976                                 stmf_generate_lport_event(ilport,
7977                                     LPORT_EVENT_INITIAL_LUN_MAPPED,
7978                                     iss->iss_ss, 0);
7979                                 atomic_and_32(&iss->iss_flags,
7980                                     ~ISS_EVENT_ACTIVE);
7981                                 mutex_enter(&stmf_state.stmf_lock);
7982                                 /*
7983                                  * scan all the ilports again as the
7984                                  * ilport list might have changed.
7985                                  */
7986                                 next_ilport = stmf_state.stmf_ilportlist;
7987                                 break;
7988                         }
7989 
7990                         if (port_level == 0)
7991                                 atomic_and_32(&ilport->ilport_flags,
7992                                     ~ILPORT_SS_GOT_INITIAL_LUNS);
7993                         /* drop the lock if we are holding it. */
7994                         if (ilport_lock_held == 1)
7995                                 rw_exit(&ilport->ilport_lock);
7996 
7997                         /* Max 4 session at a time */
7998                         if (stmf_level >= 4)
7999                                 break;
8000                 }
8001 
8002                 if (stmf_level == 0)
8003                         stmf_state.stmf_process_initial_luns = 0;
8004         }
8005 
8006         stmf_state.stmf_svc_flags &= ~STMF_SVC_ACTIVE;
8007         (void) cv_reltimedwait(&stmf_state.stmf_cv,
8008             &stmf_state.stmf_lock, td, TR_CLOCK_TICK);
8009         stmf_state.stmf_svc_flags |= STMF_SVC_ACTIVE;
8010 }
8011 
8012 /*
8013  * Waits for ongoing I/O tasks to finish on an LU in preparation for
8014  * the LU's offlining. The LU should already be in an Offlining state
8015  * (otherwise I/O to the LU might never end). There is an additional
8016  * enforcement of this via a deadman timer check.
8017  */
8018 static void
8019 stmf_wait_ilu_tasks_finish(stmf_i_lu_t *ilu)
8020 {
8021         clock_t start, now, deadline;
8022 
8023         start = now = ddi_get_lbolt();
8024         deadline = start + drv_usectohz(stmf_io_deadman * 1000000llu);
8025         mutex_enter(&ilu->ilu_task_lock);
8026         while (ilu->ilu_ntasks != ilu->ilu_ntasks_free) {
8027                 (void) cv_timedwait(&ilu->ilu_offline_pending_cv,
8028                     &ilu->ilu_task_lock, deadline);
8029                 now = ddi_get_lbolt();
8030                 if (now > deadline) {
8031                         if (stmf_io_deadman_enabled) {
8032                                 cmn_err(CE_PANIC, "stmf_svc: I/O deadman hit "
8033                                     "on STMF_CMD_LU_OFFLINE after %d seconds",
8034                                     stmf_io_deadman);
8035                         } else {
8036                                 /* keep on spinning */
8037                                 deadline = now + drv_usectohz(stmf_io_deadman *
8038                                     1000000llu);
8039                         }
8040                 }
8041         }
8042         mutex_exit(&ilu->ilu_task_lock);
8043         DTRACE_PROBE1(deadman__timeout__wait, clock_t, now - start);
8044 }
8045 
8046 void
8047 stmf_svc_queue(int cmd, void *obj, stmf_state_change_info_t *info)
8048 {
8049         stmf_svc_req_t *req;
8050         int s;
8051 
8052         ASSERT(!mutex_owned(&stmf_state.stmf_lock));
8053         s = sizeof (stmf_svc_req_t);
8054         if (info->st_additional_info) {
8055                 s += strlen(info->st_additional_info) + 1;
8056         }
8057         req = kmem_zalloc(s, KM_SLEEP);
8058 
8059         req->svc_cmd = cmd;
8060         req->svc_obj = obj;
8061         req->svc_info.st_rflags = info->st_rflags;
8062         if (info->st_additional_info) {
8063                 req->svc_info.st_additional_info = (char *)(GET_BYTE_OFFSET(req,
8064                     sizeof (stmf_svc_req_t)));
8065                 (void) strcpy(req->svc_info.st_additional_info,
8066                     info->st_additional_info);
8067         }
8068         req->svc_req_alloc_size = s;
8069         req->svc_next = NULL;
8070 
8071         mutex_enter(&stmf_state.stmf_lock);
8072         *stmf_state.stmf_svc_tailp = req;
8073         stmf_state.stmf_svc_tailp = &req->svc_next;
8074         if ((stmf_state.stmf_svc_flags & STMF_SVC_ACTIVE) == 0) {
8075                 cv_signal(&stmf_state.stmf_cv);
8076         }
8077         mutex_exit(&stmf_state.stmf_lock);
8078 }
8079 
8080 static void
8081 stmf_svc_kill_obj_requests(void *obj)
8082 {
8083         stmf_svc_req_t *prev_req = NULL;
8084         stmf_svc_req_t *next_req;
8085         stmf_svc_req_t *req;
8086 
8087         ASSERT(mutex_owned(&stmf_state.stmf_lock));
8088 
8089         for (req = stmf_state.stmf_svc_active; req != NULL; req = next_req) {
8090                 next_req = req->svc_next;
8091 
8092                 if (req->svc_obj == obj) {
8093                         if (prev_req != NULL)
8094                                 prev_req->svc_next = next_req;
8095                         else
8096                                 stmf_state.stmf_svc_active = next_req;
8097 
8098                         if (next_req == NULL)
8099                                 stmf_state.stmf_svc_tailp = (prev_req != NULL) ?
8100                                     &prev_req->svc_next :
8101                                     &stmf_state.stmf_svc_active;
8102 
8103                         kmem_free(req, req->svc_req_alloc_size);
8104                 } else {
8105                         prev_req = req;
8106                 }
8107         }
8108 }
8109 
8110 void
8111 stmf_trace(caddr_t ident, const char *fmt, ...)
8112 {
8113         va_list args;
8114         char tbuf[160];
8115         int len;
8116 
8117         if (!stmf_trace_on)
8118                 return;
8119         len = snprintf(tbuf, 158, "%s:%07lu: ", ident ? ident : "",
8120             ddi_get_lbolt());
8121         va_start(args, fmt);
8122         len += vsnprintf(tbuf + len, 158 - len, fmt, args);
8123         va_end(args);
8124 
8125         if (len > 158) {
8126                 len = 158;
8127         }
8128         tbuf[len++] = '\n';
8129         tbuf[len] = 0;
8130 
8131         mutex_enter(&trace_buf_lock);
8132         bcopy(tbuf, &stmf_trace_buf[trace_buf_curndx], len+1);
8133         trace_buf_curndx += len;
8134         if (trace_buf_curndx > (trace_buf_size - 320))
8135                 trace_buf_curndx = 0;
8136         mutex_exit(&trace_buf_lock);
8137 }
8138 
8139 void
8140 stmf_trace_clear()
8141 {
8142         if (!stmf_trace_on)
8143                 return;
8144         mutex_enter(&trace_buf_lock);
8145         trace_buf_curndx = 0;
8146         if (trace_buf_size > 0)
8147                 stmf_trace_buf[0] = 0;
8148         mutex_exit(&trace_buf_lock);
8149 }
8150 
8151 static void
8152 stmf_abort_task_offline(scsi_task_t *task, int offline_lu, char *info)
8153 {
8154         stmf_state_change_info_t        change_info;
8155         void                            *ctl_private;
8156         uint32_t                        ctl_cmd;
8157         int                             msg = 0;
8158 
8159         stmf_trace("FROM STMF", "abort_task_offline called for %s: %s",
8160             offline_lu ? "LU" : "LPORT", info ? info : "no additional info");
8161         change_info.st_additional_info = info;
8162         if (offline_lu) {
8163                 change_info.st_rflags = STMF_RFLAG_RESET |
8164                     STMF_RFLAG_LU_ABORT;
8165                 ctl_private = task->task_lu;
8166                 if (((stmf_i_lu_t *)
8167                     task->task_lu->lu_stmf_private)->ilu_state ==
8168                     STMF_STATE_ONLINE) {
8169                         msg = 1;
8170                 }
8171                 ctl_cmd = STMF_CMD_LU_OFFLINE;
8172         } else {
8173                 change_info.st_rflags = STMF_RFLAG_RESET |
8174                     STMF_RFLAG_LPORT_ABORT;
8175                 ctl_private = task->task_lport;
8176                 if (((stmf_i_local_port_t *)
8177                     task->task_lport->lport_stmf_private)->ilport_state ==
8178                     STMF_STATE_ONLINE) {
8179                         msg = 1;
8180                 }
8181                 ctl_cmd = STMF_CMD_LPORT_OFFLINE;
8182         }
8183 
8184         if (msg) {
8185                 stmf_trace(0, "Calling stmf_ctl to offline %s : %s",
8186                     offline_lu ? "LU" : "LPORT", info ? info :
8187                     "<no additional info>");
8188         }
8189         (void) stmf_ctl(ctl_cmd, ctl_private, &change_info);
8190 }
8191 
8192 static char
8193 stmf_ctoi(char c)
8194 {
8195         if ((c >= '0') && (c <= '9'))
8196                 c -= '0';
8197         else if ((c >= 'A') && (c <= 'F'))
8198                 c = c - 'A' + 10;
8199         else if ((c >= 'a') && (c <= 'f'))
8200                 c = c - 'a' + 10;
8201         else
8202                 c = -1;
8203         return (c);
8204 }
8205 
8206 /* Convert from Hex value in ASCII format to the equivalent bytes */
8207 static boolean_t
8208 stmf_base16_str_to_binary(char *c, int dplen, uint8_t *dp)
8209 {
8210         int             ii;
8211 
8212         for (ii = 0; ii < dplen; ii++) {
8213                 char nibble1, nibble2;
8214                 char enc_char = *c++;
8215                 nibble1 = stmf_ctoi(enc_char);
8216 
8217                 enc_char = *c++;
8218                 nibble2 = stmf_ctoi(enc_char);
8219                 if (nibble1 == -1 || nibble2 == -1)
8220                         return (B_FALSE);
8221 
8222                 dp[ii] = (nibble1 << 4) | nibble2;
8223         }
8224         return (B_TRUE);
8225 }
8226 
8227 boolean_t
8228 stmf_scsilib_tptid_validate(scsi_transport_id_t *tptid, uint32_t total_sz,
8229                                 uint16_t *tptid_sz)
8230 {
8231         uint16_t tpd_len = SCSI_TPTID_SIZE;
8232 
8233         if (tptid_sz)
8234                 *tptid_sz = 0;
8235         if (total_sz < sizeof (scsi_transport_id_t))
8236                 return (B_FALSE);
8237 
8238         switch (tptid->protocol_id) {
8239 
8240         case PROTOCOL_FIBRE_CHANNEL:
8241                 /* FC Transport ID validation checks. SPC3 rev23, Table 284 */
8242                 if (total_sz < tpd_len || tptid->format_code != 0)
8243                         return (B_FALSE);
8244                 break;
8245 
8246         case PROTOCOL_iSCSI:
8247                 {
8248                 iscsi_transport_id_t    *iscsiid;
8249                 uint16_t                adn_len, name_len;
8250 
8251                 /* Check for valid format code, SPC3 rev 23 Table 288 */
8252                 if ((total_sz < tpd_len) ||
8253                     (tptid->format_code != 0 && tptid->format_code != 1))
8254                         return (B_FALSE);
8255 
8256                 iscsiid = (iscsi_transport_id_t *)tptid;
8257                 adn_len = READ_SCSI16(iscsiid->add_len, uint16_t);
8258                 tpd_len = sizeof (iscsi_transport_id_t) + adn_len - 1;
8259 
8260                 /*
8261                  * iSCSI Transport ID validation checks.
8262                  * As per SPC3 rev 23 Section 7.5.4.6 and Table 289 & Table 290
8263                  */
8264                 if (adn_len < 20 || (adn_len % 4 != 0))
8265                         return (B_FALSE);
8266 
8267                 name_len = strnlen(iscsiid->iscsi_name, adn_len);
8268                 if (name_len == 0 || name_len >= adn_len)
8269                         return (B_FALSE);
8270 
8271                 /* If the format_code is 1 check for ISID seperator */
8272                 if ((tptid->format_code == 1) && (strstr(iscsiid->iscsi_name,
8273                     SCSI_TPTID_ISCSI_ISID_SEPERATOR) == NULL))
8274                         return (B_FALSE);
8275 
8276                 }
8277                 break;
8278 
8279         case PROTOCOL_SRP:
8280                 /* SRP Transport ID validation checks. SPC3 rev23, Table 287 */
8281                 if (total_sz < tpd_len || tptid->format_code != 0)
8282                         return (B_FALSE);
8283                 break;
8284 
8285         case PROTOCOL_PARALLEL_SCSI:
8286         case PROTOCOL_SSA:
8287         case PROTOCOL_IEEE_1394:
8288         case PROTOCOL_SAS:
8289         case PROTOCOL_ADT:
8290         case PROTOCOL_ATAPI:
8291         default:
8292                 {
8293                 stmf_dflt_scsi_tptid_t *dflttpd;
8294 
8295                 tpd_len = sizeof (stmf_dflt_scsi_tptid_t);
8296                 if (total_sz < tpd_len)
8297                         return (B_FALSE);
8298                 dflttpd = (stmf_dflt_scsi_tptid_t *)tptid;
8299                 tpd_len = tpd_len + SCSI_READ16(&dflttpd->ident_len) - 1;
8300                 if (total_sz < tpd_len)
8301                         return (B_FALSE);
8302                 }
8303                 break;
8304         }
8305         if (tptid_sz)
8306                 *tptid_sz = tpd_len;
8307         return (B_TRUE);
8308 }
8309 
8310 boolean_t
8311 stmf_scsilib_tptid_compare(scsi_transport_id_t *tpd1,
8312                                 scsi_transport_id_t *tpd2)
8313 {
8314         if ((tpd1->protocol_id != tpd2->protocol_id) ||
8315             (tpd1->format_code != tpd2->format_code))
8316                 return (B_FALSE);
8317 
8318         switch (tpd1->protocol_id) {
8319 
8320         case PROTOCOL_iSCSI:
8321                 {
8322                 iscsi_transport_id_t *iscsitpd1, *iscsitpd2;
8323                 uint16_t len;
8324 
8325                 iscsitpd1 = (iscsi_transport_id_t *)tpd1;
8326                 iscsitpd2 = (iscsi_transport_id_t *)tpd2;
8327                 len = SCSI_READ16(&iscsitpd1->add_len);
8328                 if ((memcmp(iscsitpd1->add_len, iscsitpd2->add_len, 2) != 0) ||
8329                     (memcmp(iscsitpd1->iscsi_name, iscsitpd2->iscsi_name, len)
8330                     != 0))
8331                         return (B_FALSE);
8332                 }
8333                 break;
8334 
8335         case PROTOCOL_SRP:
8336                 {
8337                 scsi_srp_transport_id_t *srptpd1, *srptpd2;
8338 
8339                 srptpd1 = (scsi_srp_transport_id_t *)tpd1;
8340                 srptpd2 = (scsi_srp_transport_id_t *)tpd2;
8341                 if (memcmp(srptpd1->srp_name, srptpd2->srp_name,
8342                     sizeof (srptpd1->srp_name)) != 0)
8343                         return (B_FALSE);
8344                 }
8345                 break;
8346 
8347         case PROTOCOL_FIBRE_CHANNEL:
8348                 {
8349                 scsi_fc_transport_id_t *fctpd1, *fctpd2;
8350 
8351                 fctpd1 = (scsi_fc_transport_id_t *)tpd1;
8352                 fctpd2 = (scsi_fc_transport_id_t *)tpd2;
8353                 if (memcmp(fctpd1->port_name, fctpd2->port_name,
8354                     sizeof (fctpd1->port_name)) != 0)
8355                         return (B_FALSE);
8356                 }
8357                 break;
8358 
8359         case PROTOCOL_PARALLEL_SCSI:
8360         case PROTOCOL_SSA:
8361         case PROTOCOL_IEEE_1394:
8362         case PROTOCOL_SAS:
8363         case PROTOCOL_ADT:
8364         case PROTOCOL_ATAPI:
8365         default:
8366                 {
8367                 stmf_dflt_scsi_tptid_t *dflt1, *dflt2;
8368                 uint16_t len;
8369 
8370                 dflt1 = (stmf_dflt_scsi_tptid_t *)tpd1;
8371                 dflt2 = (stmf_dflt_scsi_tptid_t *)tpd2;
8372                 len = SCSI_READ16(&dflt1->ident_len);
8373                 if ((memcmp(dflt1->ident_len, dflt2->ident_len, 2) != 0) ||
8374                     (memcmp(dflt1->ident, dflt2->ident, len) != 0))
8375                         return (B_FALSE);
8376                 }
8377                 break;
8378         }
8379         return (B_TRUE);
8380 }
8381 
8382 /*
8383  * Changes devid_desc to corresponding TransportID format
8384  * Returns :- pointer to stmf_remote_port_t
8385  * Note    :- Allocates continous memory for stmf_remote_port_t and TransportID,
8386  *            This memory need to be freed when this remote_port is no longer
8387  *            used.
8388  */
8389 stmf_remote_port_t *
8390 stmf_scsilib_devid_to_remote_port(scsi_devid_desc_t *devid)
8391 {
8392         struct scsi_fc_transport_id     *fc_tpd;
8393         struct iscsi_transport_id       *iscsi_tpd;
8394         struct scsi_srp_transport_id    *srp_tpd;
8395         struct stmf_dflt_scsi_tptid     *dflt_tpd;
8396         uint16_t ident_len,  sz = 0;
8397         stmf_remote_port_t *rpt = NULL;
8398 
8399         ident_len = devid->ident_length;
8400         ASSERT(ident_len);
8401         switch (devid->protocol_id) {
8402         case PROTOCOL_FIBRE_CHANNEL:
8403                 sz = sizeof (scsi_fc_transport_id_t);
8404                 rpt = stmf_remote_port_alloc(sz);
8405                 rpt->rport_tptid->format_code = 0;
8406                 rpt->rport_tptid->protocol_id = devid->protocol_id;
8407                 fc_tpd = (scsi_fc_transport_id_t *)rpt->rport_tptid;
8408                 /*
8409                  * convert from "wwn.xxxxxxxxxxxxxxxx" to 8-byte binary
8410                  * skip first 4 byte for "wwn."
8411                  */
8412                 ASSERT(strncmp("wwn.", (char *)devid->ident, 4) == 0);
8413                 if ((ident_len < SCSI_TPTID_FC_PORT_NAME_SIZE * 2 + 4) ||
8414                     !stmf_base16_str_to_binary((char *)devid->ident + 4,
8415                     SCSI_TPTID_FC_PORT_NAME_SIZE, fc_tpd->port_name))
8416                         goto devid_to_remote_port_fail;
8417                 break;
8418 
8419         case PROTOCOL_iSCSI:
8420                 sz = ALIGNED_TO_8BYTE_BOUNDARY(sizeof (iscsi_transport_id_t) +
8421                     ident_len - 1);
8422                 rpt = stmf_remote_port_alloc(sz);
8423                 rpt->rport_tptid->format_code = 0;
8424                 rpt->rport_tptid->protocol_id = devid->protocol_id;
8425                 iscsi_tpd = (iscsi_transport_id_t *)rpt->rport_tptid;
8426                 SCSI_WRITE16(iscsi_tpd->add_len, ident_len);
8427                 (void) memcpy(iscsi_tpd->iscsi_name, devid->ident, ident_len);
8428                 break;
8429 
8430         case PROTOCOL_SRP:
8431                 sz = sizeof (scsi_srp_transport_id_t);
8432                 rpt = stmf_remote_port_alloc(sz);
8433                 rpt->rport_tptid->format_code = 0;
8434                 rpt->rport_tptid->protocol_id = devid->protocol_id;
8435                 srp_tpd = (scsi_srp_transport_id_t *)rpt->rport_tptid;
8436                 /*
8437                  * convert from "eui.xxxxxxxxxxxxxxx" to 8-byte binary
8438                  * skip first 4 byte for "eui."
8439                  * Assume 8-byte initiator-extension part of srp_name is NOT
8440                  * stored in devid and hence will be set as zero
8441                  */
8442                 ASSERT(strncmp("eui.", (char *)devid->ident, 4) == 0);
8443                 if ((ident_len < (SCSI_TPTID_SRP_PORT_NAME_LEN - 8) * 2 + 4) ||
8444                     !stmf_base16_str_to_binary((char *)devid->ident+4,
8445                     SCSI_TPTID_SRP_PORT_NAME_LEN, srp_tpd->srp_name))
8446                         goto devid_to_remote_port_fail;
8447                 break;
8448 
8449         case PROTOCOL_PARALLEL_SCSI:
8450         case PROTOCOL_SSA:
8451         case PROTOCOL_IEEE_1394:
8452         case PROTOCOL_SAS:
8453         case PROTOCOL_ADT:
8454         case PROTOCOL_ATAPI:
8455         default :
8456                 ident_len = devid->ident_length;
8457                 sz = ALIGNED_TO_8BYTE_BOUNDARY(sizeof (stmf_dflt_scsi_tptid_t) +
8458                     ident_len - 1);
8459                 rpt = stmf_remote_port_alloc(sz);
8460                 rpt->rport_tptid->format_code = 0;
8461                 rpt->rport_tptid->protocol_id = devid->protocol_id;
8462                 dflt_tpd = (stmf_dflt_scsi_tptid_t *)rpt->rport_tptid;
8463                 SCSI_WRITE16(dflt_tpd->ident_len, ident_len);
8464                 (void) memcpy(dflt_tpd->ident, devid->ident, ident_len);
8465                 break;
8466         }
8467         return (rpt);
8468 
8469 devid_to_remote_port_fail:
8470         stmf_remote_port_free(rpt);
8471         return (NULL);
8472 
8473 }
8474 
8475 stmf_remote_port_t *
8476 stmf_remote_port_alloc(uint16_t tptid_sz) {
8477         stmf_remote_port_t *rpt;
8478         rpt = (stmf_remote_port_t *)kmem_zalloc(
8479             sizeof (stmf_remote_port_t) + tptid_sz, KM_SLEEP);
8480         rpt->rport_tptid_sz = tptid_sz;
8481         rpt->rport_tptid = (scsi_transport_id_t *)(rpt + 1);
8482         return (rpt);
8483 }
8484 
8485 void
8486 stmf_remote_port_free(stmf_remote_port_t *rpt)
8487 {
8488         /*
8489          * Note: stmf_scsilib_devid_to_remote_port() function allocates
8490          *      remote port structures for all transports in the same way, So
8491          *      it is safe to deallocate it in a protocol independent manner.
8492          *      If any of the allocation method changes, corresponding changes
8493          *      need to be made here too.
8494          */
8495         kmem_free(rpt, sizeof (stmf_remote_port_t) + rpt->rport_tptid_sz);
8496 }