1 /*
   2  * CDDL HEADER START
   3  *
   4  * The contents of this file are subject to the terms of the
   5  * Common Development and Distribution License (the "License").
   6  * You may not use this file except in compliance with the License.
   7  *
   8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
   9  * or http://www.opensolaris.org/os/licensing.
  10  * See the License for the specific language governing permissions
  11  * and limitations under the License.
  12  *
  13  * When distributing Covered Code, include this CDDL HEADER in each
  14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
  15  * If applicable, add the following below this CDDL HEADER, with the
  16  * fields enclosed by brackets "[]" replaced with your own identifying
  17  * information: Portions Copyright [yyyy] [name of copyright owner]
  18  *
  19  * CDDL HEADER END
  20  */
  21 /*
  22  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
  23  * Use is subject to license terms.
  24  * Copyright (c) 2011 Bayard G. Bell. All rights reserved.
  25  */
  26 
  27 /*
  28  * sf - Solaris Fibre Channel driver
  29  *
  30  * This module implements some of the Fibre Channel FC-4 layer, converting
  31  * from FC frames to SCSI and back.  (Note: no sequence management is done
  32  * here, though.)
  33  */
  34 
  35 #if defined(lint) && !defined(DEBUG)
  36 #define DEBUG   1
  37 #endif
  38 
  39 /*
  40  * XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
  41  * Need to use the ugly RAID LUN mappings in FCP Annex D
  42  * to prevent SCSA from barfing.  This *REALLY* needs to
  43  * be addressed by the standards committee.
  44  */
  45 #define RAID_LUNS       1
  46 
  47 #ifdef DEBUG
  48 static int sfdebug = 0;
  49 #include <sys/debug.h>
  50 
  51 #define SF_DEBUG(level, args) \
  52         if (sfdebug >= (level)) sf_log args
  53 #else
  54 #define SF_DEBUG(level, args)
  55 #endif
  56 
  57 static int sf_bus_config_debug = 0;
  58 
  59 #include <sys/scsi/scsi.h>
  60 #include <sys/fc4/fcal.h>
  61 #include <sys/fc4/fcp.h>
  62 #include <sys/fc4/fcal_linkapp.h>
  63 #include <sys/socal_cq_defs.h>
  64 #include <sys/fc4/fcal_transport.h>
  65 #include <sys/fc4/fcio.h>
  66 #include <sys/scsi/adapters/sfvar.h>
  67 #include <sys/scsi/impl/scsi_reset_notify.h>
  68 #include <sys/stat.h>
  69 #include <sys/varargs.h>
  70 #include <sys/var.h>
  71 #include <sys/thread.h>
  72 #include <sys/proc.h>
  73 #include <sys/kstat.h>
  74 #include <sys/devctl.h>
  75 #include <sys/scsi/targets/ses.h>
  76 #include <sys/callb.h>
  77 #include <sys/sysmacros.h>
  78 
  79 static int sf_info(dev_info_t *, ddi_info_cmd_t, void *, void **);
  80 static int sf_attach(dev_info_t *, ddi_attach_cmd_t);
  81 static int sf_detach(dev_info_t *, ddi_detach_cmd_t);
  82 static void sf_softstate_unlink(struct sf *);
  83 static int sf_scsi_bus_config(dev_info_t *parent, uint_t flag,
  84     ddi_bus_config_op_t op, void *arg, dev_info_t **childp);
  85 static int sf_scsi_bus_unconfig(dev_info_t *parent, uint_t flag,
  86     ddi_bus_config_op_t op, void *arg);
  87 static int sf_scsi_tgt_init(dev_info_t *, dev_info_t *,
  88     scsi_hba_tran_t *, struct scsi_device *);
  89 static void sf_scsi_tgt_free(dev_info_t *, dev_info_t *,
  90     scsi_hba_tran_t *, struct scsi_device *);
  91 static int sf_pkt_alloc_extern(struct sf *, struct sf_pkt *,
  92     int, int, int);
  93 static void sf_pkt_destroy_extern(struct sf *, struct sf_pkt *);
  94 static struct scsi_pkt *sf_scsi_init_pkt(struct scsi_address *,
  95     struct scsi_pkt *, struct buf *, int, int, int, int, int (*)(), caddr_t);
  96 static void sf_scsi_destroy_pkt(struct scsi_address *, struct scsi_pkt *);
  97 static void sf_scsi_dmafree(struct scsi_address *, struct scsi_pkt *);
  98 static void sf_scsi_sync_pkt(struct scsi_address *, struct scsi_pkt *);
  99 static int sf_scsi_reset_notify(struct scsi_address *, int,
 100     void (*)(caddr_t), caddr_t);
 101 static int sf_scsi_get_name(struct scsi_device *, char *, int);
 102 static int sf_scsi_get_bus_addr(struct scsi_device *, char *, int);
 103 static int sf_add_cr_pool(struct sf *);
 104 static int sf_cr_alloc(struct sf *, struct sf_pkt *, int (*)());
 105 static void sf_cr_free(struct sf_cr_pool *, struct sf_pkt *);
 106 static void sf_crpool_free(struct sf *);
 107 static int sf_kmem_cache_constructor(void *, void *, int);
 108 static void sf_kmem_cache_destructor(void *, void *);
 109 static void sf_statec_callback(void *, int);
 110 static int sf_login(struct sf *, uchar_t, uchar_t, uint_t, int);
 111 static int sf_els_transport(struct sf *, struct sf_els_hdr *);
 112 static void sf_els_callback(struct fcal_packet *);
 113 static int sf_do_prli(struct sf *, struct sf_els_hdr *, struct la_els_logi *);
 114 static int sf_do_adisc(struct sf *, struct sf_els_hdr *);
 115 static int sf_do_reportlun(struct sf *, struct sf_els_hdr *,
 116     struct sf_target *);
 117 static void sf_reportlun_callback(struct fcal_packet *);
 118 static int sf_do_inquiry(struct sf *, struct sf_els_hdr *,
 119     struct sf_target *);
 120 static void sf_inq_callback(struct fcal_packet *);
 121 static struct fcal_packet *sf_els_alloc(struct sf *, uchar_t, int, int,
 122     int, caddr_t *, caddr_t *);
 123 static void sf_els_free(struct fcal_packet *);
 124 static struct sf_target *sf_create_target(struct sf *,
 125     struct sf_els_hdr *, int, int64_t);
 126 #ifdef RAID_LUNS
 127 static struct sf_target *sf_lookup_target(struct sf *, uchar_t *, int);
 128 #else
 129 static struct sf_target *sf_lookup_target(struct sf *, uchar_t *, int64_t);
 130 #endif
 131 static void sf_finish_init(struct sf *, int);
 132 static void sf_offline_target(struct sf *, struct sf_target *);
 133 static void sf_create_devinfo(struct sf *, struct sf_target *, int);
 134 static int sf_create_props(dev_info_t *, struct sf_target *, int);
 135 static int sf_commoncap(struct scsi_address *, char *, int, int, int);
 136 static int sf_getcap(struct scsi_address *, char *, int);
 137 static int sf_setcap(struct scsi_address *, char *, int, int);
 138 static int sf_abort(struct scsi_address *, struct scsi_pkt *);
 139 static int sf_reset(struct scsi_address *, int);
 140 static void sf_abort_all(struct sf *, struct sf_target *, int, int, int);
 141 static int sf_start(struct scsi_address *, struct scsi_pkt *);
 142 static int sf_start_internal(struct sf *, struct sf_pkt *);
 143 static void sf_fill_ids(struct sf *, struct sf_pkt *, struct sf_target *);
 144 static int sf_prepare_pkt(struct sf *, struct sf_pkt *, struct sf_target *);
 145 static int sf_dopoll(struct sf *, struct sf_pkt *);
 146 static void sf_cmd_callback(struct fcal_packet *);
 147 static void sf_throttle(struct sf *);
 148 static void sf_watch(void *);
 149 static void sf_throttle_start(struct sf *);
 150 static void sf_check_targets(struct sf *);
 151 static void sf_check_reset_delay(void *);
 152 static int sf_target_timeout(struct sf *, struct sf_pkt *);
 153 static void sf_force_lip(struct sf *);
 154 static void sf_unsol_els_callback(void *, soc_response_t *, caddr_t);
 155 static struct sf_els_hdr *sf_els_timeout(struct sf *, struct sf_els_hdr *);
 156 /*PRINTFLIKE3*/
 157 static void sf_log(struct sf *, int, const char *, ...);
 158 static int sf_kstat_update(kstat_t *, int);
 159 static int sf_open(dev_t *, int, int, cred_t *);
 160 static int sf_close(dev_t, int, int, cred_t *);
 161 static int sf_ioctl(dev_t, int, intptr_t, int, cred_t *, int *);
 162 static struct sf_target *sf_get_target_from_dip(struct sf *, dev_info_t *);
 163 static int sf_bus_get_eventcookie(dev_info_t *, dev_info_t *, char *,
 164     ddi_eventcookie_t *);
 165 static int sf_bus_add_eventcall(dev_info_t *, dev_info_t *,
 166     ddi_eventcookie_t, void (*)(), void *, ddi_callback_id_t *cb_id);
 167 static int sf_bus_remove_eventcall(dev_info_t *devi, ddi_callback_id_t cb_id);
 168 static int sf_bus_post_event(dev_info_t *, dev_info_t *,
 169     ddi_eventcookie_t, void *);
 170 
 171 static void sf_hp_daemon(void *);
 172 
 173 /*
 174  * this is required to be able to supply a control node
 175  * where ioctls can be executed
 176  */
 177 struct cb_ops sf_cb_ops = {
 178         sf_open,                        /* open */
 179         sf_close,                       /* close */
 180         nodev,                          /* strategy */
 181         nodev,                          /* print */
 182         nodev,                          /* dump */
 183         nodev,                          /* read */
 184         nodev,                          /* write */
 185         sf_ioctl,                       /* ioctl */
 186         nodev,                          /* devmap */
 187         nodev,                          /* mmap */
 188         nodev,                          /* segmap */
 189         nochpoll,                       /* poll */
 190         ddi_prop_op,                    /* cb_prop_op */
 191         0,                              /* streamtab  */
 192         D_MP | D_NEW | D_HOTPLUG        /* driver flags */
 193 
 194 };
 195 
 196 /*
 197  * autoconfiguration routines.
 198  */
 199 static struct dev_ops sf_ops = {
 200         DEVO_REV,               /* devo_rev, */
 201         0,                      /* refcnt  */
 202         sf_info,                /* info */
 203         nulldev,                /* identify */
 204         nulldev,                /* probe */
 205         sf_attach,              /* attach */
 206         sf_detach,              /* detach */
 207         nodev,                  /* reset */
 208         &sf_cb_ops,         /* driver operations */
 209         NULL,                   /* bus operations */
 210         NULL,                   /* power management */
 211         ddi_quiesce_not_supported,      /* devo_quiesce */
 212 };
 213 
 214 #define SF_NAME "FC-AL FCP Nexus Driver"        /* Name of the module. */
 215 static  char    sf_version[] = "1.72 08/19/2008"; /* version of the module */
 216 
 217 static struct modldrv modldrv = {
 218         &mod_driverops, /* Type of module. This one is a driver */
 219         SF_NAME,
 220         &sf_ops,    /* driver ops */
 221 };
 222 
 223 static struct modlinkage modlinkage = {
 224         MODREV_1, (void *)&modldrv, NULL
 225 };
 226 
 227 /* XXXXXX The following is here to handle broken targets -- remove it later */
 228 static int sf_reportlun_forever = 0;
 229 /* XXXXXX */
 230 static int sf_lip_on_plogo = 0;
 231 static int sf_els_retries = SF_ELS_RETRIES;
 232 static struct sf *sf_head = NULL;
 233 static int sf_target_scan_cnt = 4;
 234 static int sf_pkt_scan_cnt = 5;
 235 static int sf_pool_scan_cnt = 1800;
 236 static void *sf_state = NULL;
 237 static int sf_watchdog_init = 0;
 238 static int sf_watchdog_time = 0;
 239 static int sf_watchdog_timeout = 1;
 240 static int sf_watchdog_tick;
 241 static int sf_watch_running = 0;
 242 static timeout_id_t sf_watchdog_id;
 243 static timeout_id_t sf_reset_timeout_id;
 244 static int sf_max_targets = SF_MAX_TARGETS;
 245 static kmutex_t sf_global_mutex;
 246 static int sf_core = 0;
 247 int *sf_token = NULL; /* Must not be static or lint complains. */
 248 static kcondvar_t sf_watch_cv;
 249 extern pri_t minclsyspri;
 250 static ddi_eventcookie_t        sf_insert_eid;
 251 static ddi_eventcookie_t        sf_remove_eid;
 252 
 253 static ndi_event_definition_t   sf_event_defs[] = {
 254 { SF_EVENT_TAG_INSERT, FCAL_INSERT_EVENT, EPL_KERNEL, 0 },
 255 { SF_EVENT_TAG_REMOVE, FCAL_REMOVE_EVENT, EPL_INTERRUPT, 0 }
 256 };
 257 
 258 #define SF_N_NDI_EVENTS \
 259         (sizeof (sf_event_defs) / sizeof (ndi_event_definition_t))
 260 
 261 #ifdef DEBUG
 262 static int sf_lip_flag = 1;             /* bool: to allow LIPs */
 263 static int sf_reset_flag = 1;           /* bool: to allow reset after LIP */
 264 static int sf_abort_flag = 0;           /* bool: to do just one abort */
 265 #endif
 266 
 267 extern int64_t ddi_get_lbolt64(void);
 268 
 269 /*
 270  * for converting between target number (switch) and hard address/AL_PA
 271  */
 272 static uchar_t sf_switch_to_alpa[] = {
 273         0xef, 0xe8, 0xe4, 0xe2, 0xe1, 0xe0, 0xdc, 0xda, 0xd9, 0xd6,
 274         0xd5, 0xd4, 0xd3, 0xd2, 0xd1, 0xce, 0xcd, 0xcc, 0xcb, 0xca,
 275         0xc9, 0xc7, 0xc6, 0xc5, 0xc3, 0xbc, 0xba, 0xb9, 0xb6, 0xb5,
 276         0xb4, 0xb3, 0xb2, 0xb1, 0xae, 0xad, 0xac, 0xab, 0xaa, 0xa9,
 277         0xa7, 0xa6, 0xa5, 0xa3, 0x9f, 0x9e, 0x9d, 0x9b, 0x98, 0x97,
 278         0x90, 0x8f, 0x88, 0x84, 0x82, 0x81, 0x80, 0x7c, 0x7a, 0x79,
 279         0x76, 0x75, 0x74, 0x73, 0x72, 0x71, 0x6e, 0x6d, 0x6c, 0x6b,
 280         0x6a, 0x69, 0x67, 0x66, 0x65, 0x63, 0x5c, 0x5a, 0x59, 0x56,
 281         0x55, 0x54, 0x53, 0x52, 0x51, 0x4e, 0x4d, 0x4c, 0x4b, 0x4a,
 282         0x49, 0x47, 0x46, 0x45, 0x43, 0x3c, 0x3a, 0x39, 0x36, 0x35,
 283         0x34, 0x33, 0x32, 0x31, 0x2e, 0x2d, 0x2c, 0x2b, 0x2a, 0x29,
 284         0x27, 0x26, 0x25, 0x23, 0x1f, 0x1e, 0x1d, 0x1b, 0x18, 0x17,
 285         0x10, 0x0f, 0x08, 0x04, 0x02, 0x01
 286 };
 287 
 288 static uchar_t sf_alpa_to_switch[] = {
 289         0x00, 0x7d, 0x7c, 0x00, 0x7b, 0x00, 0x00, 0x00, 0x7a, 0x00,
 290         0x00, 0x00, 0x00, 0x00, 0x00, 0x79, 0x78, 0x00, 0x00, 0x00,
 291         0x00, 0x00, 0x00, 0x77, 0x76, 0x00, 0x00, 0x75, 0x00, 0x74,
 292         0x73, 0x72, 0x00, 0x00, 0x00, 0x71, 0x00, 0x70, 0x6f, 0x6e,
 293         0x00, 0x6d, 0x6c, 0x6b, 0x6a, 0x69, 0x68, 0x00, 0x00, 0x67,
 294         0x66, 0x65, 0x64, 0x63, 0x62, 0x00, 0x00, 0x61, 0x60, 0x00,
 295         0x5f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x5e, 0x00, 0x5d,
 296         0x5c, 0x5b, 0x00, 0x5a, 0x59, 0x58, 0x57, 0x56, 0x55, 0x00,
 297         0x00, 0x54, 0x53, 0x52, 0x51, 0x50, 0x4f, 0x00, 0x00, 0x4e,
 298         0x4d, 0x00, 0x4c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x4b,
 299         0x00, 0x4a, 0x49, 0x48, 0x00, 0x47, 0x46, 0x45, 0x44, 0x43,
 300         0x42, 0x00, 0x00, 0x41, 0x40, 0x3f, 0x3e, 0x3d, 0x3c, 0x00,
 301         0x00, 0x3b, 0x3a, 0x00, 0x39, 0x00, 0x00, 0x00, 0x38, 0x37,
 302         0x36, 0x00, 0x35, 0x00, 0x00, 0x00, 0x34, 0x00, 0x00, 0x00,
 303         0x00, 0x00, 0x00, 0x33, 0x32, 0x00, 0x00, 0x00, 0x00, 0x00,
 304         0x00, 0x31, 0x30, 0x00, 0x00, 0x2f, 0x00, 0x2e, 0x2d, 0x2c,
 305         0x00, 0x00, 0x00, 0x2b, 0x00, 0x2a, 0x29, 0x28, 0x00, 0x27,
 306         0x26, 0x25, 0x24, 0x23, 0x22, 0x00, 0x00, 0x21, 0x20, 0x1f,
 307         0x1e, 0x1d, 0x1c, 0x00, 0x00, 0x1b, 0x1a, 0x00, 0x19, 0x00,
 308         0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x00, 0x17, 0x16, 0x15,
 309         0x00, 0x14, 0x13, 0x12, 0x11, 0x10, 0x0f, 0x00, 0x00, 0x0e,
 310         0x0d, 0x0c, 0x0b, 0x0a, 0x09, 0x00, 0x00, 0x08, 0x07, 0x00,
 311         0x06, 0x00, 0x00, 0x00, 0x05, 0x04, 0x03, 0x00, 0x02, 0x00,
 312         0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
 313 };
 314 
 315 /*
 316  * these macros call the proper transport-layer function given
 317  * a particular transport
 318  */
 319 #define soc_transport(a, b, c, d) (*a->fcal_ops->fcal_transport)(b, c, d)
 320 #define soc_transport_poll(a, b, c, d)\
 321         (*a->fcal_ops->fcal_transport_poll)(b, c, d)
 322 #define soc_get_lilp_map(a, b, c, d, e)\
 323         (*a->fcal_ops->fcal_lilp_map)(b, c, d, e)
 324 #define soc_force_lip(a, b, c, d, e)\
 325         (*a->fcal_ops->fcal_force_lip)(b, c, d, e)
 326 #define soc_abort(a, b, c, d, e)\
 327         (*a->fcal_ops->fcal_abort_cmd)(b, c, d, e)
 328 #define soc_force_reset(a, b, c, d)\
 329         (*a->fcal_ops->fcal_force_reset)(b, c, d)
 330 #define soc_add_ulp(a, b, c, d, e, f, g, h)\
 331         (*a->fcal_ops->fcal_add_ulp)(b, c, d, e, f, g, h)
 332 #define soc_remove_ulp(a, b, c, d, e)\
 333         (*a->fcal_ops->fcal_remove_ulp)(b, c, d, e)
 334 #define soc_take_core(a, b) (*a->fcal_ops->fcal_take_core)(b)
 335 
 336 
 337 /* power management property defines (should be in a common include file?) */
 338 #define PM_HARDWARE_STATE_PROP          "pm-hardware-state"
 339 #define PM_NEEDS_SUSPEND_RESUME         "needs-suspend-resume"
 340 
 341 
 342 /* node properties */
 343 #define NODE_WWN_PROP                   "node-wwn"
 344 #define PORT_WWN_PROP                   "port-wwn"
 345 #define LIP_CNT_PROP                    "lip-count"
 346 #define TARGET_PROP                     "target"
 347 #define LUN_PROP                        "lun"
 348 
 349 
 350 /*
 351  * initialize this driver and install this module
 352  */
 353 int
 354 _init(void)
 355 {
 356         int     i;
 357 
 358         i = ddi_soft_state_init(&sf_state, sizeof (struct sf),
 359             SF_INIT_ITEMS);
 360         if (i != 0)
 361                 return (i);
 362 
 363         if ((i = scsi_hba_init(&modlinkage)) != 0) {
 364                 ddi_soft_state_fini(&sf_state);
 365                 return (i);
 366         }
 367 
 368         mutex_init(&sf_global_mutex, NULL, MUTEX_DRIVER, NULL);
 369         sf_watch_running = 0;
 370         cv_init(&sf_watch_cv, NULL, CV_DRIVER, NULL);
 371 
 372         if ((i = mod_install(&modlinkage)) != 0) {
 373                 mutex_destroy(&sf_global_mutex);
 374                 cv_destroy(&sf_watch_cv);
 375                 scsi_hba_fini(&modlinkage);
 376                 ddi_soft_state_fini(&sf_state);
 377                 return (i);
 378         }
 379 
 380         return (i);
 381 }
 382 
 383 
 384 /*
 385  * remove this driver module from the system
 386  */
 387 int
 388 _fini(void)
 389 {
 390         int     i;
 391 
 392         if ((i = mod_remove(&modlinkage)) == 0) {
 393                 scsi_hba_fini(&modlinkage);
 394                 mutex_destroy(&sf_global_mutex);
 395                 cv_destroy(&sf_watch_cv);
 396                 ddi_soft_state_fini(&sf_state);
 397         }
 398         return (i);
 399 }
 400 
 401 
 402 int
 403 _info(struct modinfo *modinfop)
 404 {
 405         return (mod_info(&modlinkage, modinfop));
 406 }
 407 
 408 /*
 409  * Given the device number return the devinfo pointer or instance
 410  */
 411 /*ARGSUSED*/
 412 static int
 413 sf_info(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result)
 414 {
 415         int             instance = SF_MINOR2INST(getminor((dev_t)arg));
 416         struct sf       *sf;
 417 
 418         switch (infocmd) {
 419         case DDI_INFO_DEVT2DEVINFO:
 420                 sf = ddi_get_soft_state(sf_state, instance);
 421                 if (sf != NULL)
 422                         *result = sf->sf_dip;
 423                 else {
 424                         *result = NULL;
 425                         return (DDI_FAILURE);
 426                 }
 427                 break;
 428 
 429         case DDI_INFO_DEVT2INSTANCE:
 430                 *result = (void *)(uintptr_t)instance;
 431                 break;
 432         default:
 433                 return (DDI_FAILURE);
 434         }
 435         return (DDI_SUCCESS);
 436 }
 437 
 438 /*
 439  * either attach or resume this driver
 440  */
 441 static int
 442 sf_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
 443 {
 444         int instance;
 445         int mutex_initted = FALSE;
 446         uint_t ccount;
 447         size_t i, real_size;
 448         struct fcal_transport *handle;
 449         char buf[64];
 450         struct sf *sf, *tsf;
 451         scsi_hba_tran_t *tran = NULL;
 452         int     handle_bound = FALSE;
 453         kthread_t *tp;
 454 
 455 
 456         switch ((int)cmd) {
 457 
 458         case DDI_RESUME:
 459 
 460                 /*
 461                  * we've previously been SF_STATE_OFFLINEd by a DDI_SUSPEND,
 462                  * so time to undo that and get going again by forcing a
 463                  * lip
 464                  */
 465 
 466                 instance = ddi_get_instance(dip);
 467 
 468                 sf = ddi_get_soft_state(sf_state, instance);
 469                 SF_DEBUG(2, (sf, CE_CONT,
 470                     "sf_attach: DDI_RESUME for sf%d\n", instance));
 471                 if (sf == NULL) {
 472                         cmn_err(CE_WARN, "sf%d: bad soft state", instance);
 473                         return (DDI_FAILURE);
 474                 }
 475 
 476                 /*
 477                  * clear suspended flag so that normal operations can resume
 478                  */
 479                 mutex_enter(&sf->sf_mutex);
 480                 sf->sf_state &= ~SF_STATE_SUSPENDED;
 481                 mutex_exit(&sf->sf_mutex);
 482 
 483                 /*
 484                  * force a login by setting our state to offline
 485                  */
 486                 sf->sf_timer = sf_watchdog_time + SF_OFFLINE_TIMEOUT;
 487                 sf->sf_state = SF_STATE_OFFLINE;
 488 
 489                 /*
 490                  * call transport routine to register state change and
 491                  * ELS callback routines (to register us as a ULP)
 492                  */
 493                 soc_add_ulp(sf->sf_sochandle, sf->sf_socp,
 494                     sf->sf_sochandle->fcal_portno, TYPE_SCSI_FCP,
 495                     sf_statec_callback, sf_unsol_els_callback, NULL, sf);
 496 
 497                 /*
 498                  * call transport routine to force loop initialization
 499                  */
 500                 (void) soc_force_lip(sf->sf_sochandle, sf->sf_socp,
 501                     sf->sf_sochandle->fcal_portno, 0, FCAL_NO_LIP);
 502 
 503                 /*
 504                  * increment watchdog init flag, setting watchdog timeout
 505                  * if we are the first (since somebody has to do it)
 506                  */
 507                 mutex_enter(&sf_global_mutex);
 508                 if (!sf_watchdog_init++) {
 509                         mutex_exit(&sf_global_mutex);
 510                         sf_watchdog_id = timeout(sf_watch,
 511                             (caddr_t)0, sf_watchdog_tick);
 512                 } else {
 513                         mutex_exit(&sf_global_mutex);
 514                 }
 515 
 516                 return (DDI_SUCCESS);
 517 
 518         case DDI_ATTACH:
 519 
 520                 /*
 521                  * this instance attaching for the first time
 522                  */
 523 
 524                 instance = ddi_get_instance(dip);
 525 
 526                 if (ddi_soft_state_zalloc(sf_state, instance) !=
 527                     DDI_SUCCESS) {
 528                         cmn_err(CE_WARN, "sf%d: failed to allocate soft state",
 529                             instance);
 530                         return (DDI_FAILURE);
 531                 }
 532 
 533                 sf = ddi_get_soft_state(sf_state, instance);
 534                 SF_DEBUG(4, (sf, CE_CONT,
 535                     "sf_attach: DDI_ATTACH for sf%d\n", instance));
 536                 if (sf == NULL) {
 537                         /* this shouldn't happen since we just allocated it */
 538                         cmn_err(CE_WARN, "sf%d: bad soft state", instance);
 539                         return (DDI_FAILURE);
 540                 }
 541 
 542                 /*
 543                  * from this point on, if there's an error, we must de-allocate
 544                  * soft state before returning DDI_FAILURE
 545                  */
 546 
 547                 if ((handle = ddi_get_parent_data(dip)) == NULL) {
 548                         cmn_err(CE_WARN,
 549                             "sf%d: failed to obtain transport handle",
 550                             instance);
 551                         goto fail;
 552                 }
 553 
 554                 /* fill in our soft state structure */
 555                 sf->sf_dip = dip;
 556                 sf->sf_state = SF_STATE_INIT;
 557                 sf->sf_throttle = handle->fcal_cmdmax;
 558                 sf->sf_sochandle = handle;
 559                 sf->sf_socp = handle->fcal_handle;
 560                 sf->sf_check_n_close = 0;
 561 
 562                 /* create a command/response buffer pool for this instance */
 563                 if (sf_add_cr_pool(sf) != DDI_SUCCESS) {
 564                         cmn_err(CE_WARN,
 565                             "sf%d: failed to allocate command/response pool",
 566                             instance);
 567                         goto fail;
 568                 }
 569 
 570                 /* create a a cache for this instance */
 571                 (void) sprintf(buf, "sf%d_cache", instance);
 572                 sf->sf_pkt_cache = kmem_cache_create(buf,
 573                     sizeof (fcal_packet_t) + sizeof (struct sf_pkt) +
 574                     scsi_pkt_size(), 8,
 575                     sf_kmem_cache_constructor, sf_kmem_cache_destructor,
 576                     NULL, NULL, NULL, 0);
 577                 if (sf->sf_pkt_cache == NULL) {
 578                         cmn_err(CE_WARN, "sf%d: failed to allocate kmem cache",
 579                             instance);
 580                         goto fail;
 581                 }
 582 
 583                 /* set up a handle and allocate memory for DMA */
 584                 if (ddi_dma_alloc_handle(sf->sf_dip, sf->sf_sochandle->
 585                     fcal_dmaattr, DDI_DMA_DONTWAIT, NULL, &sf->
 586                     sf_lilp_dmahandle) != DDI_SUCCESS) {
 587                         cmn_err(CE_WARN,
 588                             "sf%d: failed to allocate dma handle for lilp map",
 589                             instance);
 590                         goto fail;
 591                 }
 592                 i = sizeof (struct fcal_lilp_map) + 1;
 593                 if (ddi_dma_mem_alloc(sf->sf_lilp_dmahandle,
 594                     i, sf->sf_sochandle->
 595                     fcal_accattr, DDI_DMA_CONSISTENT, DDI_DMA_DONTWAIT, NULL,
 596                     (caddr_t *)&sf->sf_lilp_map, &real_size,
 597                     &sf->sf_lilp_acchandle) != DDI_SUCCESS) {
 598                         cmn_err(CE_WARN, "sf%d: failed to allocate lilp map",
 599                             instance);
 600                         goto fail;
 601                 }
 602                 if (real_size < i) {
 603                         /* no error message ??? */
 604                         goto fail;              /* trouble allocating memory */
 605                 }
 606 
 607                 /*
 608                  * set up the address for the DMA transfers (getting a cookie)
 609                  */
 610                 if (ddi_dma_addr_bind_handle(sf->sf_lilp_dmahandle, NULL,
 611                     (caddr_t)sf->sf_lilp_map, real_size,
 612                     DDI_DMA_READ | DDI_DMA_CONSISTENT, DDI_DMA_DONTWAIT, NULL,
 613                     &sf->sf_lilp_dmacookie, &ccount) != DDI_DMA_MAPPED) {
 614                         cmn_err(CE_WARN,
 615                             "sf%d: failed to bind dma handle for lilp map",
 616                             instance);
 617                         goto fail;
 618                 }
 619                 handle_bound = TRUE;
 620                 /* ensure only one cookie was allocated */
 621                 if (ccount != 1) {
 622                         goto fail;
 623                 }
 624 
 625                 /* ensure LILP map and DMA cookie addresses are even?? */
 626                 sf->sf_lilp_map = (struct fcal_lilp_map *)(((uintptr_t)sf->
 627                     sf_lilp_map + 1) & ~1);
 628                 sf->sf_lilp_dmacookie.dmac_address = (sf->
 629                     sf_lilp_dmacookie.dmac_address + 1) & ~1;
 630 
 631                 /* set up all of our mutexes and condition variables */
 632                 mutex_init(&sf->sf_mutex, NULL, MUTEX_DRIVER, NULL);
 633                 mutex_init(&sf->sf_cmd_mutex, NULL, MUTEX_DRIVER, NULL);
 634                 mutex_init(&sf->sf_cr_mutex, NULL, MUTEX_DRIVER, NULL);
 635                 mutex_init(&sf->sf_hp_daemon_mutex, NULL, MUTEX_DRIVER, NULL);
 636                 cv_init(&sf->sf_cr_cv, NULL, CV_DRIVER, NULL);
 637                 cv_init(&sf->sf_hp_daemon_cv, NULL, CV_DRIVER, NULL);
 638 
 639                 mutex_initted = TRUE;
 640 
 641                 /* create our devctl minor node */
 642                 if (ddi_create_minor_node(dip, "devctl", S_IFCHR,
 643                     SF_INST2DEVCTL_MINOR(instance),
 644                     DDI_NT_NEXUS, 0) != DDI_SUCCESS) {
 645                         cmn_err(CE_WARN, "sf%d: ddi_create_minor_node failed"
 646                             " for devctl", instance);
 647                         goto fail;
 648                 }
 649 
 650                 /* create fc minor node */
 651                 if (ddi_create_minor_node(dip, "fc", S_IFCHR,
 652                     SF_INST2FC_MINOR(instance), DDI_NT_FC_ATTACHMENT_POINT,
 653                     0) != DDI_SUCCESS) {
 654                         cmn_err(CE_WARN, "sf%d: ddi_create_minor_node failed"
 655                             " for fc", instance);
 656                         goto fail;
 657                 }
 658                 /* allocate a SCSI transport structure */
 659                 tran = scsi_hba_tran_alloc(dip, 0);
 660                 if (tran == NULL) {
 661                         /* remove all minor nodes created */
 662                         ddi_remove_minor_node(dip, NULL);
 663                         cmn_err(CE_WARN, "sf%d: scsi_hba_tran_alloc failed",
 664                             instance);
 665                         goto fail;
 666                 }
 667 
 668                 /* Indicate that we are 'sizeof (scsi_*(9S))' clean. */
 669                 scsi_size_clean(dip);           /* SCSI_SIZE_CLEAN_VERIFY ok */
 670 
 671                 /* save ptr to new transport structure and fill it in */
 672                 sf->sf_tran = tran;
 673 
 674                 tran->tran_hba_private               = sf;
 675                 tran->tran_tgt_private               = NULL;
 676                 tran->tran_tgt_init          = sf_scsi_tgt_init;
 677                 tran->tran_tgt_probe         = NULL;
 678                 tran->tran_tgt_free          = sf_scsi_tgt_free;
 679 
 680                 tran->tran_start             = sf_start;
 681                 tran->tran_abort             = sf_abort;
 682                 tran->tran_reset             = sf_reset;
 683                 tran->tran_getcap            = sf_getcap;
 684                 tran->tran_setcap            = sf_setcap;
 685                 tran->tran_init_pkt          = sf_scsi_init_pkt;
 686                 tran->tran_destroy_pkt               = sf_scsi_destroy_pkt;
 687                 tran->tran_dmafree           = sf_scsi_dmafree;
 688                 tran->tran_sync_pkt          = sf_scsi_sync_pkt;
 689                 tran->tran_reset_notify              = sf_scsi_reset_notify;
 690 
 691                 /*
 692                  * register event notification routines with scsa
 693                  */
 694                 tran->tran_get_eventcookie   = sf_bus_get_eventcookie;
 695                 tran->tran_add_eventcall     = sf_bus_add_eventcall;
 696                 tran->tran_remove_eventcall  = sf_bus_remove_eventcall;
 697                 tran->tran_post_event                = sf_bus_post_event;
 698 
 699                 /*
 700                  * register bus configure/unconfigure
 701                  */
 702                 tran->tran_bus_config                = sf_scsi_bus_config;
 703                 tran->tran_bus_unconfig              = sf_scsi_bus_unconfig;
 704 
 705                 /*
 706                  * allocate an ndi event handle
 707                  */
 708                 sf->sf_event_defs = (ndi_event_definition_t *)
 709                     kmem_zalloc(sizeof (sf_event_defs), KM_SLEEP);
 710 
 711                 bcopy(sf_event_defs, sf->sf_event_defs,
 712                     sizeof (sf_event_defs));
 713 
 714                 (void) ndi_event_alloc_hdl(dip, NULL,
 715                     &sf->sf_event_hdl, NDI_SLEEP);
 716 
 717                 sf->sf_events.ndi_events_version = NDI_EVENTS_REV1;
 718                 sf->sf_events.ndi_n_events = SF_N_NDI_EVENTS;
 719                 sf->sf_events.ndi_event_defs = sf->sf_event_defs;
 720 
 721                 if (ndi_event_bind_set(sf->sf_event_hdl,
 722                     &sf->sf_events, NDI_SLEEP) != NDI_SUCCESS) {
 723                         goto fail;
 724                 }
 725 
 726                 tran->tran_get_name          = sf_scsi_get_name;
 727                 tran->tran_get_bus_addr              = sf_scsi_get_bus_addr;
 728 
 729                 /* setup and attach SCSI hba transport */
 730                 if (scsi_hba_attach_setup(dip, sf->sf_sochandle->
 731                     fcal_dmaattr, tran, SCSI_HBA_TRAN_CLONE) != DDI_SUCCESS) {
 732                         cmn_err(CE_WARN, "sf%d: scsi_hba_attach_setup failed",
 733                             instance);
 734                         goto fail;
 735                 }
 736 
 737                 /* set up kstats */
 738                 if ((sf->sf_ksp = kstat_create("sf", instance, "statistics",
 739                     "controller", KSTAT_TYPE_RAW, sizeof (struct sf_stats),
 740                     KSTAT_FLAG_VIRTUAL)) == NULL) {
 741                         cmn_err(CE_WARN, "sf%d: failed to create kstat",
 742                             instance);
 743                 } else {
 744                         sf->sf_stats.version = 2;
 745                         (void) sprintf(sf->sf_stats.drvr_name,
 746                         "%s: %s", SF_NAME, sf_version);
 747                         sf->sf_ksp->ks_data = (void *)&sf->sf_stats;
 748                         sf->sf_ksp->ks_private = sf;
 749                         sf->sf_ksp->ks_update = sf_kstat_update;
 750                         kstat_install(sf->sf_ksp);
 751                 }
 752 
 753                 /* create the hotplug thread */
 754                 mutex_enter(&sf->sf_hp_daemon_mutex);
 755                 tp = thread_create(NULL, 0,
 756                     (void (*)())sf_hp_daemon, sf, 0, &p0, TS_RUN, minclsyspri);
 757                 sf->sf_hp_tid = tp->t_did;
 758                 mutex_exit(&sf->sf_hp_daemon_mutex);
 759 
 760                 /* add this soft state instance to the head of the list */
 761                 mutex_enter(&sf_global_mutex);
 762                 sf->sf_next = sf_head;
 763                 tsf = sf_head;
 764                 sf_head = sf;
 765 
 766                 /*
 767                  * find entry in list that has the same FC-AL handle (if any)
 768                  */
 769                 while (tsf != NULL) {
 770                         if (tsf->sf_socp == sf->sf_socp) {
 771                                 break;          /* found matching entry */
 772                         }
 773                         tsf = tsf->sf_next;
 774                 }
 775 
 776                 if (tsf != NULL) {
 777                         /* if we found a matching entry keep track of it */
 778                         sf->sf_sibling = tsf;
 779                 }
 780 
 781                 /*
 782                  * increment watchdog init flag, setting watchdog timeout
 783                  * if we are the first (since somebody has to do it)
 784                  */
 785                 if (!sf_watchdog_init++) {
 786                         mutex_exit(&sf_global_mutex);
 787                         sf_watchdog_tick = sf_watchdog_timeout *
 788                             drv_usectohz(1000000);
 789                         sf_watchdog_id = timeout(sf_watch,
 790                             NULL, sf_watchdog_tick);
 791                 } else {
 792                         mutex_exit(&sf_global_mutex);
 793                 }
 794 
 795                 if (tsf != NULL) {
 796                         /*
 797                          * set up matching entry to be our sibling
 798                          */
 799                         mutex_enter(&tsf->sf_mutex);
 800                         tsf->sf_sibling = sf;
 801                         mutex_exit(&tsf->sf_mutex);
 802                 }
 803 
 804                 /*
 805                  * create this property so that PM code knows we want
 806                  * to be suspended at PM time
 807                  */
 808                 (void) ddi_prop_update_string(DDI_DEV_T_NONE, dip,
 809                     PM_HARDWARE_STATE_PROP, PM_NEEDS_SUSPEND_RESUME);
 810 
 811                 /* log the fact that we have a new device */
 812                 ddi_report_dev(dip);
 813 
 814                 /*
 815                  * force a login by setting our state to offline
 816                  */
 817                 sf->sf_timer = sf_watchdog_time + SF_OFFLINE_TIMEOUT;
 818                 sf->sf_state = SF_STATE_OFFLINE;
 819 
 820                 /*
 821                  * call transport routine to register state change and
 822                  * ELS callback routines (to register us as a ULP)
 823                  */
 824                 soc_add_ulp(sf->sf_sochandle, sf->sf_socp,
 825                     sf->sf_sochandle->fcal_portno, TYPE_SCSI_FCP,
 826                     sf_statec_callback, sf_unsol_els_callback, NULL, sf);
 827 
 828                 /*
 829                  * call transport routine to force loop initialization
 830                  */
 831                 (void) soc_force_lip(sf->sf_sochandle, sf->sf_socp,
 832                     sf->sf_sochandle->fcal_portno, 0, FCAL_NO_LIP);
 833                 sf->sf_reset_time = ddi_get_lbolt64();
 834                 return (DDI_SUCCESS);
 835 
 836         default:
 837                 return (DDI_FAILURE);
 838         }
 839 
 840 fail:
 841         cmn_err(CE_WARN, "sf%d: failed to attach", instance);
 842 
 843         /*
 844          * Unbind and free event set
 845          */
 846         if (sf->sf_event_hdl) {
 847                 (void) ndi_event_unbind_set(sf->sf_event_hdl,
 848                     &sf->sf_events, NDI_SLEEP);
 849                 (void) ndi_event_free_hdl(sf->sf_event_hdl);
 850         }
 851 
 852         if (sf->sf_event_defs) {
 853                 kmem_free(sf->sf_event_defs, sizeof (sf_event_defs));
 854         }
 855 
 856         if (sf->sf_tran != NULL) {
 857                 scsi_hba_tran_free(sf->sf_tran);
 858         }
 859         while (sf->sf_cr_pool != NULL) {
 860                 sf_crpool_free(sf);
 861         }
 862         if (sf->sf_lilp_dmahandle != NULL) {
 863                 if (handle_bound) {
 864                         (void) ddi_dma_unbind_handle(sf->sf_lilp_dmahandle);
 865                 }
 866                 ddi_dma_free_handle(&sf->sf_lilp_dmahandle);
 867         }
 868         if (sf->sf_pkt_cache != NULL) {
 869                 kmem_cache_destroy(sf->sf_pkt_cache);
 870         }
 871         if (sf->sf_lilp_map != NULL) {
 872                 ddi_dma_mem_free(&sf->sf_lilp_acchandle);
 873         }
 874         if (sf->sf_ksp != NULL) {
 875                 kstat_delete(sf->sf_ksp);
 876         }
 877         if (mutex_initted) {
 878                 mutex_destroy(&sf->sf_mutex);
 879                 mutex_destroy(&sf->sf_cmd_mutex);
 880                 mutex_destroy(&sf->sf_cr_mutex);
 881                 mutex_destroy(&sf->sf_hp_daemon_mutex);
 882                 cv_destroy(&sf->sf_cr_cv);
 883                 cv_destroy(&sf->sf_hp_daemon_cv);
 884         }
 885         mutex_enter(&sf_global_mutex);
 886 
 887         /*
 888          * kill off the watchdog if we are the last instance
 889          */
 890         if (!--sf_watchdog_init) {
 891                 timeout_id_t tid = sf_watchdog_id;
 892                 mutex_exit(&sf_global_mutex);
 893                 (void) untimeout(tid);
 894         } else {
 895                 mutex_exit(&sf_global_mutex);
 896         }
 897 
 898         ddi_soft_state_free(sf_state, instance);
 899 
 900         if (tran != NULL) {
 901                 /* remove all minor nodes */
 902                 ddi_remove_minor_node(dip, NULL);
 903         }
 904 
 905         return (DDI_FAILURE);
 906 }
 907 
 908 
 909 /* ARGSUSED */
 910 static int
 911 sf_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
 912 {
 913         struct sf               *sf;
 914         int                     instance;
 915         int                     i;
 916         struct sf_target        *target;
 917         timeout_id_t            tid;
 918 
 919 
 920 
 921         /* NO OTHER THREADS ARE RUNNING */
 922 
 923         instance = ddi_get_instance(dip);
 924 
 925         if ((sf = ddi_get_soft_state(sf_state, instance)) == NULL) {
 926                 cmn_err(CE_WARN, "sf_detach, sf%d: bad soft state", instance);
 927                 return (DDI_FAILURE);
 928         }
 929 
 930         switch (cmd) {
 931 
 932         case DDI_SUSPEND:
 933                 /*
 934                  * suspend our instance
 935                  */
 936 
 937                 SF_DEBUG(2, (sf, CE_CONT,
 938                     "sf_detach: DDI_SUSPEND for sf%d\n", instance));
 939                 /*
 940                  * There is a race condition in socal where while doing
 941                  * callbacks if a ULP removes it self from the callback list
 942                  * the for loop in socal may panic as cblist is junk and
 943                  * while trying to get cblist->next the system will panic.
 944                  */
 945 
 946                 /* call transport to remove our unregister our callbacks */
 947                 soc_remove_ulp(sf->sf_sochandle, sf->sf_socp,
 948                     sf->sf_sochandle->fcal_portno, TYPE_SCSI_FCP, sf);
 949 
 950                 /*
 951                  * begin process of clearing outstanding commands
 952                  * by issuing a lip
 953                  */
 954                 sf_force_lip(sf);
 955 
 956                 /*
 957                  * toggle the device OFFLINE in order to cause
 958                  * outstanding commands to drain
 959                  */
 960                 mutex_enter(&sf->sf_mutex);
 961                 sf->sf_lip_cnt++;
 962                 sf->sf_timer = sf_watchdog_time + SF_OFFLINE_TIMEOUT;
 963                 sf->sf_state = (SF_STATE_OFFLINE | SF_STATE_SUSPENDED);
 964                 for (i = 0; i < sf_max_targets; i++) {
 965                         target = sf->sf_targets[i];
 966                         if (target != NULL) {
 967                                 struct sf_target *ntarget;
 968 
 969                                 mutex_enter(&target->sft_mutex);
 970                                 if (!(target->sft_state & SF_TARGET_OFFLINE)) {
 971                                         target->sft_state |=
 972                                             (SF_TARGET_BUSY | SF_TARGET_MARK);
 973                                 }
 974                                 /* do this for all LUNs as well */
 975                                 for (ntarget = target->sft_next_lun;
 976                                     ntarget;
 977                                     ntarget = ntarget->sft_next_lun) {
 978                                         mutex_enter(&ntarget->sft_mutex);
 979                                         if (!(ntarget->sft_state &
 980                                             SF_TARGET_OFFLINE)) {
 981                                                 ntarget->sft_state |=
 982                                                     (SF_TARGET_BUSY |
 983                                                     SF_TARGET_MARK);
 984                                         }
 985                                         mutex_exit(&ntarget->sft_mutex);
 986                                 }
 987                                 mutex_exit(&target->sft_mutex);
 988                         }
 989                 }
 990                 mutex_exit(&sf->sf_mutex);
 991                 mutex_enter(&sf_global_mutex);
 992 
 993                 /*
 994                  * kill off the watchdog if we are the last instance
 995                  */
 996                 if (!--sf_watchdog_init) {
 997                         tid = sf_watchdog_id;
 998                         mutex_exit(&sf_global_mutex);
 999                         (void) untimeout(tid);
1000                 } else {
1001                         mutex_exit(&sf_global_mutex);
1002                 }
1003 
1004                 return (DDI_SUCCESS);
1005 
1006         case DDI_DETACH:
1007                 /*
1008                  * detach this instance
1009                  */
1010 
1011                 SF_DEBUG(2, (sf, CE_CONT,
1012                     "sf_detach: DDI_DETACH for sf%d\n", instance));
1013 
1014                 /* remove this "sf" from the list of sf softstates */
1015                 sf_softstate_unlink(sf);
1016 
1017                 /*
1018                  * prior to taking any DDI_DETACH actions, toggle the
1019                  * device OFFLINE in order to cause outstanding
1020                  * commands to drain
1021                  */
1022                 mutex_enter(&sf->sf_mutex);
1023                 sf->sf_lip_cnt++;
1024                 sf->sf_timer = sf_watchdog_time + SF_OFFLINE_TIMEOUT;
1025                 sf->sf_state = SF_STATE_OFFLINE;
1026                 for (i = 0; i < sf_max_targets; i++) {
1027                         target = sf->sf_targets[i];
1028                         if (target != NULL) {
1029                                 struct sf_target *ntarget;
1030 
1031                                 mutex_enter(&target->sft_mutex);
1032                                 if (!(target->sft_state & SF_TARGET_OFFLINE)) {
1033                                         target->sft_state |=
1034                                             (SF_TARGET_BUSY | SF_TARGET_MARK);
1035                                 }
1036                                 for (ntarget = target->sft_next_lun;
1037                                     ntarget;
1038                                     ntarget = ntarget->sft_next_lun) {
1039                                         mutex_enter(&ntarget->sft_mutex);
1040                                         if (!(ntarget->sft_state &
1041                                             SF_TARGET_OFFLINE)) {
1042                                                 ntarget->sft_state |=
1043                                                     (SF_TARGET_BUSY |
1044                                                     SF_TARGET_MARK);
1045                                         }
1046                                         mutex_exit(&ntarget->sft_mutex);
1047                                 }
1048                                 mutex_exit(&target->sft_mutex);
1049                         }
1050                 }
1051                 mutex_exit(&sf->sf_mutex);
1052 
1053                 /* call transport to remove and unregister our callbacks */
1054                 soc_remove_ulp(sf->sf_sochandle, sf->sf_socp,
1055                     sf->sf_sochandle->fcal_portno, TYPE_SCSI_FCP, sf);
1056 
1057                 /*
1058                  * kill off the watchdog if we are the last instance
1059                  */
1060                 mutex_enter(&sf_global_mutex);
1061                 if (!--sf_watchdog_init) {
1062                         tid = sf_watchdog_id;
1063                         mutex_exit(&sf_global_mutex);
1064                         (void) untimeout(tid);
1065                 } else {
1066                         mutex_exit(&sf_global_mutex);
1067                 }
1068 
1069                 /* signal sf_hp_daemon() to exit and wait for exit */
1070                 mutex_enter(&sf->sf_hp_daemon_mutex);
1071                 ASSERT(sf->sf_hp_tid);
1072                 sf->sf_hp_exit = 1;          /* flag exit */
1073                 cv_signal(&sf->sf_hp_daemon_cv);
1074                 mutex_exit(&sf->sf_hp_daemon_mutex);
1075                 thread_join(sf->sf_hp_tid);  /* wait for hotplug to exit */
1076 
1077                 /*
1078                  * Unbind and free event set
1079                  */
1080                 if (sf->sf_event_hdl) {
1081                         (void) ndi_event_unbind_set(sf->sf_event_hdl,
1082                             &sf->sf_events, NDI_SLEEP);
1083                         (void) ndi_event_free_hdl(sf->sf_event_hdl);
1084                 }
1085 
1086                 if (sf->sf_event_defs) {
1087                         kmem_free(sf->sf_event_defs, sizeof (sf_event_defs));
1088                 }
1089 
1090                 /* detach this instance of the HBA driver */
1091                 (void) scsi_hba_detach(dip);
1092                 scsi_hba_tran_free(sf->sf_tran);
1093 
1094                 /* deallocate/unbind DMA handle for lilp map */
1095                 if (sf->sf_lilp_map != NULL) {
1096                         (void) ddi_dma_unbind_handle(sf->sf_lilp_dmahandle);
1097                         if (sf->sf_lilp_dmahandle != NULL) {
1098                                 ddi_dma_free_handle(&sf->sf_lilp_dmahandle);
1099                         }
1100                         ddi_dma_mem_free(&sf->sf_lilp_acchandle);
1101                 }
1102 
1103                 /*
1104                  * the kmem cache must be destroyed before free'ing
1105                  * up the crpools
1106                  *
1107                  * our finagle of "ntot" and "nfree"
1108                  * causes an ASSERT failure in "sf_cr_free()"
1109                  * if the kmem cache is free'd after invoking
1110                  * "sf_crpool_free()".
1111                  */
1112                 kmem_cache_destroy(sf->sf_pkt_cache);
1113 
1114                 SF_DEBUG(2, (sf, CE_CONT,
1115                     "sf_detach: sf_crpool_free() for instance 0x%x\n",
1116                     instance));
1117                 while (sf->sf_cr_pool != NULL) {
1118                         /*
1119                          * set ntot to nfree for this particular entry
1120                          *
1121                          * this causes sf_crpool_free() to update
1122                          * the cr_pool list when deallocating this entry
1123                          */
1124                         sf->sf_cr_pool->ntot = sf->sf_cr_pool->nfree;
1125                         sf_crpool_free(sf);
1126                 }
1127 
1128                 /*
1129                  * now that the cr_pool's are gone it's safe
1130                  * to destroy all softstate mutex's and cv's
1131                  */
1132                 mutex_destroy(&sf->sf_mutex);
1133                 mutex_destroy(&sf->sf_cmd_mutex);
1134                 mutex_destroy(&sf->sf_cr_mutex);
1135                 mutex_destroy(&sf->sf_hp_daemon_mutex);
1136                 cv_destroy(&sf->sf_cr_cv);
1137                 cv_destroy(&sf->sf_hp_daemon_cv);
1138 
1139                 /* remove all minor nodes from the device tree */
1140                 ddi_remove_minor_node(dip, NULL);
1141 
1142                 /* remove properties created during attach() */
1143                 ddi_prop_remove_all(dip);
1144 
1145                 /* remove kstat's if present */
1146                 if (sf->sf_ksp != NULL) {
1147                         kstat_delete(sf->sf_ksp);
1148                 }
1149 
1150                 SF_DEBUG(2, (sf, CE_CONT,
1151                     "sf_detach: ddi_soft_state_free() for instance 0x%x\n",
1152                     instance));
1153                 ddi_soft_state_free(sf_state, instance);
1154                 return (DDI_SUCCESS);
1155 
1156         default:
1157                 SF_DEBUG(2, (sf, CE_CONT, "sf_detach: sf%d unknown cmd %x\n",
1158                     instance, (int)cmd));
1159                 return (DDI_FAILURE);
1160         }
1161 }
1162 
1163 
1164 /*
1165  * sf_softstate_unlink() - remove an sf instance from the list of softstates
1166  */
1167 static void
1168 sf_softstate_unlink(struct sf *sf)
1169 {
1170         struct sf       *sf_ptr;
1171         struct sf       *sf_found_sibling;
1172         struct sf       *sf_reposition = NULL;
1173 
1174 
1175         mutex_enter(&sf_global_mutex);
1176         while (sf_watch_running) {
1177                 /* Busy working the list -- wait */
1178                 cv_wait(&sf_watch_cv, &sf_global_mutex);
1179         }
1180         if ((sf_found_sibling = sf->sf_sibling) != NULL) {
1181                 /*
1182                  * we have a sibling so NULL out its reference to us
1183                  */
1184                 mutex_enter(&sf_found_sibling->sf_mutex);
1185                 sf_found_sibling->sf_sibling = NULL;
1186                 mutex_exit(&sf_found_sibling->sf_mutex);
1187         }
1188 
1189         /* remove our instance from the global list */
1190         if (sf == sf_head) {
1191                 /* we were at at head of the list */
1192                 sf_head = sf->sf_next;
1193         } else {
1194                 /* find us in the list */
1195                 for (sf_ptr = sf_head;
1196                     sf_ptr != NULL;
1197                     sf_ptr = sf_ptr->sf_next) {
1198                         if (sf_ptr == sf) {
1199                                 break;
1200                         }
1201                         /* remember this place */
1202                         sf_reposition = sf_ptr;
1203                 }
1204                 ASSERT(sf_ptr == sf);
1205                 ASSERT(sf_reposition != NULL);
1206 
1207                 sf_reposition->sf_next = sf_ptr->sf_next;
1208         }
1209         mutex_exit(&sf_global_mutex);
1210 }
1211 
1212 
1213 static int
1214 sf_scsi_bus_config(dev_info_t *parent, uint_t flag,
1215     ddi_bus_config_op_t op, void *arg, dev_info_t **childp)
1216 {
1217         int64_t         reset_delay;
1218         struct sf       *sf;
1219 
1220         sf = ddi_get_soft_state(sf_state, ddi_get_instance(parent));
1221         ASSERT(sf);
1222 
1223         reset_delay = (int64_t)(USEC_TO_TICK(SF_INIT_WAIT_TIMEOUT)) -
1224             (ddi_get_lbolt64() - sf->sf_reset_time);
1225         if (reset_delay < 0)
1226                 reset_delay = 0;
1227 
1228         if (sf_bus_config_debug)
1229                 flag |= NDI_DEVI_DEBUG;
1230 
1231         return (ndi_busop_bus_config(parent, flag, op,
1232             arg, childp, (clock_t)reset_delay));
1233 }
1234 
1235 static int
1236 sf_scsi_bus_unconfig(dev_info_t *parent, uint_t flag,
1237     ddi_bus_config_op_t op, void *arg)
1238 {
1239         if (sf_bus_config_debug)
1240                 flag |= NDI_DEVI_DEBUG;
1241 
1242         return (ndi_busop_bus_unconfig(parent, flag, op, arg));
1243 }
1244 
1245 
1246 /*
1247  * called by transport to initialize a SCSI target
1248  */
1249 /* ARGSUSED */
1250 static int
1251 sf_scsi_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
1252     scsi_hba_tran_t *hba_tran, struct scsi_device *sd)
1253 {
1254 #ifdef RAID_LUNS
1255         int lun;
1256 #else
1257         int64_t lun;
1258 #endif
1259         struct sf_target *target;
1260         struct sf *sf = (struct sf *)hba_tran->tran_hba_private;
1261         int i, t_len;
1262         unsigned int lip_cnt;
1263         unsigned char wwn[FC_WWN_SIZE];
1264 
1265 
1266         /* get and validate our SCSI target ID */
1267         i = sd->sd_address.a_target;
1268         if (i >= sf_max_targets) {
1269                 return (DDI_NOT_WELL_FORMED);
1270         }
1271 
1272         /* get our port WWN property */
1273         t_len = sizeof (wwn);
1274         if (ddi_prop_op(DDI_DEV_T_ANY, tgt_dip, PROP_LEN_AND_VAL_BUF,
1275             DDI_PROP_DONTPASS | DDI_PROP_CANSLEEP, PORT_WWN_PROP,
1276             (caddr_t)&wwn, &t_len) != DDI_SUCCESS) {
1277                 /* no port WWN property - ignore the OBP stub node */
1278                 return (DDI_NOT_WELL_FORMED);
1279         }
1280 
1281         /* get our LIP count property */
1282         t_len = sizeof (lip_cnt);
1283         if (ddi_prop_op(DDI_DEV_T_ANY, tgt_dip, PROP_LEN_AND_VAL_BUF,
1284             DDI_PROP_DONTPASS | DDI_PROP_CANSLEEP, LIP_CNT_PROP,
1285             (caddr_t)&lip_cnt, &t_len) != DDI_SUCCESS) {
1286                 return (DDI_FAILURE);
1287         }
1288         /* and our LUN property */
1289         t_len = sizeof (lun);
1290         if (ddi_prop_op(DDI_DEV_T_ANY, tgt_dip, PROP_LEN_AND_VAL_BUF,
1291             DDI_PROP_DONTPASS | DDI_PROP_CANSLEEP, "lun",
1292             (caddr_t)&lun, &t_len) != DDI_SUCCESS) {
1293                 return (DDI_FAILURE);
1294         }
1295 
1296         /* find the target structure for this instance */
1297         mutex_enter(&sf->sf_mutex);
1298         if ((target = sf_lookup_target(sf, wwn, lun)) == NULL) {
1299                 mutex_exit(&sf->sf_mutex);
1300                 return (DDI_FAILURE);
1301         }
1302 
1303         mutex_enter(&target->sft_mutex);
1304         if ((sf->sf_lip_cnt == lip_cnt) && !(target->sft_state
1305             & SF_TARGET_INIT_DONE)) {
1306                 /*
1307                  * set links between HBA transport and target structures
1308                  * and set done flag
1309                  */
1310                 hba_tran->tran_tgt_private = target;
1311                 target->sft_tran = hba_tran;
1312                 target->sft_state |= SF_TARGET_INIT_DONE;
1313         } else {
1314                 /* already initialized ?? */
1315                 mutex_exit(&target->sft_mutex);
1316                 mutex_exit(&sf->sf_mutex);
1317                 return (DDI_FAILURE);
1318         }
1319         mutex_exit(&target->sft_mutex);
1320         mutex_exit(&sf->sf_mutex);
1321 
1322         return (DDI_SUCCESS);
1323 }
1324 
1325 
1326 /*
1327  * called by transport to free a target
1328  */
1329 /* ARGSUSED */
1330 static void
1331 sf_scsi_tgt_free(dev_info_t *hba_dip, dev_info_t *tgt_dip,
1332     scsi_hba_tran_t *hba_tran, struct scsi_device *sd)
1333 {
1334         struct sf_target *target = hba_tran->tran_tgt_private;
1335 
1336         if (target != NULL) {
1337                 mutex_enter(&target->sft_mutex);
1338                 target->sft_tran = NULL;
1339                 target->sft_state &= ~SF_TARGET_INIT_DONE;
1340                 mutex_exit(&target->sft_mutex);
1341         }
1342 }
1343 
1344 
1345 /*
1346  * allocator for non-std size cdb/pkt_private/status -- return TRUE iff
1347  * success, else return FALSE
1348  */
1349 /*ARGSUSED*/
1350 static int
1351 sf_pkt_alloc_extern(struct sf *sf, struct sf_pkt *cmd,
1352     int tgtlen, int statuslen, int kf)
1353 {
1354         caddr_t scbp, tgt;
1355         int failure = FALSE;
1356         struct scsi_pkt *pkt = CMD2PKT(cmd);
1357 
1358 
1359         tgt = scbp = NULL;
1360 
1361         if (tgtlen > PKT_PRIV_LEN) {
1362                 if ((tgt = kmem_zalloc(tgtlen, kf)) == NULL) {
1363                         failure = TRUE;
1364                 } else {
1365                         cmd->cmd_flags |= CFLAG_PRIVEXTERN;
1366                         pkt->pkt_private = tgt;
1367                 }
1368         }
1369         if (statuslen > EXTCMDS_STATUS_SIZE) {
1370                 if ((scbp = kmem_zalloc((size_t)statuslen, kf)) == NULL) {
1371                         failure = TRUE;
1372                 } else {
1373                         cmd->cmd_flags |= CFLAG_SCBEXTERN;
1374                         pkt->pkt_scbp = (opaque_t)scbp;
1375                 }
1376         }
1377         if (failure) {
1378                 sf_pkt_destroy_extern(sf, cmd);
1379         }
1380         return (failure);
1381 }
1382 
1383 
1384 /*
1385  * deallocator for non-std size cdb/pkt_private/status
1386  */
1387 static void
1388 sf_pkt_destroy_extern(struct sf *sf, struct sf_pkt *cmd)
1389 {
1390         struct scsi_pkt *pkt = CMD2PKT(cmd);
1391 
1392         if (cmd->cmd_flags & CFLAG_FREE) {
1393                 cmn_err(CE_PANIC,
1394                     "sf_scsi_impl_pktfree: freeing free packet");
1395                 _NOTE(NOT_REACHED)
1396                 /* NOTREACHED */
1397         }
1398         if (cmd->cmd_flags & CFLAG_SCBEXTERN) {
1399                 kmem_free((caddr_t)pkt->pkt_scbp,
1400                     (size_t)cmd->cmd_scblen);
1401         }
1402         if (cmd->cmd_flags & CFLAG_PRIVEXTERN) {
1403                 kmem_free((caddr_t)pkt->pkt_private,
1404                     (size_t)cmd->cmd_privlen);
1405         }
1406 
1407         cmd->cmd_flags = CFLAG_FREE;
1408         kmem_cache_free(sf->sf_pkt_cache, (void *)cmd);
1409 }
1410 
1411 
1412 /*
1413  * create or initialize a SCSI packet -- called internally and
1414  * by the transport
1415  */
1416 static struct scsi_pkt *
1417 sf_scsi_init_pkt(struct scsi_address *ap, struct scsi_pkt *pkt,
1418     struct buf *bp, int cmdlen, int statuslen, int tgtlen,
1419     int flags, int (*callback)(), caddr_t arg)
1420 {
1421         int kf;
1422         int failure = FALSE;
1423         struct sf_pkt *cmd;
1424         struct sf *sf = ADDR2SF(ap);
1425         struct sf_target *target = ADDR2TARGET(ap);
1426         struct sf_pkt   *new_cmd = NULL;
1427         struct fcal_packet      *fpkt;
1428         fc_frame_header_t       *hp;
1429         struct fcp_cmd *fcmd;
1430 
1431 
1432         /*
1433          * If we've already allocated a pkt once,
1434          * this request is for dma allocation only.
1435          */
1436         if (pkt == NULL) {
1437 
1438                 /*
1439                  * First step of sf_scsi_init_pkt:  pkt allocation
1440                  */
1441                 if (cmdlen > FCP_CDB_SIZE) {
1442                         return (NULL);
1443                 }
1444 
1445                 kf = (callback == SLEEP_FUNC)? KM_SLEEP: KM_NOSLEEP;
1446 
1447                 if ((cmd = kmem_cache_alloc(sf->sf_pkt_cache, kf)) != NULL) {
1448                         /*
1449                          * Selective zeroing of the pkt.
1450                          */
1451 
1452                         cmd->cmd_flags = 0;
1453                         cmd->cmd_forw = 0;
1454                         cmd->cmd_back = 0;
1455                         cmd->cmd_next = 0;
1456                         cmd->cmd_pkt = (struct scsi_pkt *)((char *)cmd +
1457                             sizeof (struct sf_pkt) + sizeof (struct
1458                             fcal_packet));
1459                         cmd->cmd_fp_pkt = (struct fcal_packet *)((char *)cmd +
1460                             sizeof (struct sf_pkt));
1461                         cmd->cmd_fp_pkt->fcal_pkt_private = (opaque_t)cmd;
1462                         cmd->cmd_state = SF_STATE_IDLE;
1463                         cmd->cmd_pkt->pkt_ha_private = (opaque_t)cmd;
1464                         cmd->cmd_pkt->pkt_scbp = (opaque_t)cmd->cmd_scsi_scb;
1465                         cmd->cmd_pkt->pkt_comp    = NULL;
1466                         cmd->cmd_pkt->pkt_flags   = 0;
1467                         cmd->cmd_pkt->pkt_time    = 0;
1468                         cmd->cmd_pkt->pkt_resid   = 0;
1469                         cmd->cmd_pkt->pkt_reason = 0;
1470                         cmd->cmd_cdblen = (uchar_t)cmdlen;
1471                         cmd->cmd_scblen              = statuslen;
1472                         cmd->cmd_privlen     = tgtlen;
1473                         cmd->cmd_pkt->pkt_address = *ap;
1474 
1475                         /* zero pkt_private */
1476                         (int *)(cmd->cmd_pkt->pkt_private =
1477                             cmd->cmd_pkt_private);
1478                         bzero((caddr_t)cmd->cmd_pkt->pkt_private,
1479                             PKT_PRIV_LEN);
1480                 } else {
1481                         failure = TRUE;
1482                 }
1483 
1484                 if (failure ||
1485                     (tgtlen > PKT_PRIV_LEN) ||
1486                     (statuslen > EXTCMDS_STATUS_SIZE)) {
1487                         if (!failure) {
1488                                 /* need to allocate more space */
1489                                 failure = sf_pkt_alloc_extern(sf, cmd,
1490                                     tgtlen, statuslen, kf);
1491                         }
1492                         if (failure) {
1493                                 return (NULL);
1494                         }
1495                 }
1496 
1497                 fpkt = cmd->cmd_fp_pkt;
1498                 if (cmd->cmd_block == NULL) {
1499 
1500                         /* allocate cmd/response pool buffers */
1501                         if (sf_cr_alloc(sf, cmd, callback) == DDI_FAILURE) {
1502                                 sf_pkt_destroy_extern(sf, cmd);
1503                                 return (NULL);
1504                         }
1505 
1506                         /* fill in the FC-AL packet */
1507                         fpkt->fcal_pkt_cookie = sf->sf_socp;
1508                         fpkt->fcal_pkt_comp = sf_cmd_callback;
1509                         fpkt->fcal_pkt_flags = 0;
1510                         fpkt->fcal_magic = FCALP_MAGIC;
1511                         fpkt->fcal_socal_request.sr_soc_hdr.sh_flags =
1512                             (ushort_t)(SOC_FC_HEADER |
1513                             sf->sf_sochandle->fcal_portno);
1514                         fpkt->fcal_socal_request.sr_soc_hdr.sh_class = 3;
1515                         fpkt->fcal_socal_request.sr_cqhdr.cq_hdr_count = 1;
1516                         fpkt->fcal_socal_request.sr_cqhdr.cq_hdr_flags = 0;
1517                         fpkt->fcal_socal_request.sr_cqhdr.cq_hdr_seqno = 0;
1518                         fpkt->fcal_socal_request.sr_dataseg[0].fc_base =
1519                             (uint32_t)cmd->cmd_dmac;
1520                         fpkt->fcal_socal_request.sr_dataseg[0].fc_count =
1521                             sizeof (struct fcp_cmd);
1522                         fpkt->fcal_socal_request.sr_dataseg[1].fc_base =
1523                             (uint32_t)cmd->cmd_rsp_dmac;
1524                         fpkt->fcal_socal_request.sr_dataseg[1].fc_count =
1525                             FCP_MAX_RSP_IU_SIZE;
1526 
1527                         /* Fill in the Fabric Channel Header */
1528                         hp = &fpkt->fcal_socal_request.sr_fc_frame_hdr;
1529                         hp->r_ctl = R_CTL_COMMAND;
1530                         hp->type = TYPE_SCSI_FCP;
1531                         hp->f_ctl = F_CTL_SEQ_INITIATIVE | F_CTL_FIRST_SEQ;
1532                         hp->reserved1 = 0;
1533                         hp->seq_id = 0;
1534                         hp->df_ctl  = 0;
1535                         hp->seq_cnt = 0;
1536                         hp->ox_id = 0xffff;
1537                         hp->rx_id = 0xffff;
1538                         hp->ro = 0;
1539 
1540                         /* Establish the LUN */
1541                         bcopy((caddr_t)&target->sft_lun.b,
1542                             (caddr_t)&cmd->cmd_block->fcp_ent_addr,
1543                             FCP_LUN_SIZE);
1544                         *((int32_t *)&cmd->cmd_block->fcp_cntl) = 0;
1545                 }
1546                 cmd->cmd_pkt->pkt_cdbp = cmd->cmd_block->fcp_cdb;
1547 
1548                 mutex_enter(&target->sft_pkt_mutex);
1549 
1550                 target->sft_pkt_tail->cmd_forw = cmd;
1551                 cmd->cmd_back = target->sft_pkt_tail;
1552                 cmd->cmd_forw = (struct sf_pkt *)&target->sft_pkt_head;
1553                 target->sft_pkt_tail = cmd;
1554 
1555                 mutex_exit(&target->sft_pkt_mutex);
1556                 new_cmd = cmd;          /* for later cleanup if needed */
1557         } else {
1558                 /* pkt already exists -- just a request for DMA allocation */
1559                 cmd = PKT2CMD(pkt);
1560                 fpkt = cmd->cmd_fp_pkt;
1561         }
1562 
1563         /* zero cdb (bzero is too slow) */
1564         bzero((caddr_t)cmd->cmd_pkt->pkt_cdbp, cmdlen);
1565 
1566         /*
1567          * Second step of sf_scsi_init_pkt:  dma allocation
1568          * Set up dma info
1569          */
1570         if ((bp != NULL) && (bp->b_bcount != 0)) {
1571                 int cmd_flags, dma_flags;
1572                 int rval = 0;
1573                 uint_t dmacookie_count;
1574 
1575                 /* there is a buffer and some data to transfer */
1576 
1577                 /* set up command and DMA flags */
1578                 cmd_flags = cmd->cmd_flags;
1579                 if (bp->b_flags & B_READ) {
1580                         /* a read */
1581                         cmd_flags &= ~CFLAG_DMASEND;
1582                         dma_flags = DDI_DMA_READ;
1583                 } else {
1584                         /* a write */
1585                         cmd_flags |= CFLAG_DMASEND;
1586                         dma_flags = DDI_DMA_WRITE;
1587                 }
1588                 if (flags & PKT_CONSISTENT) {
1589                         cmd_flags |= CFLAG_CMDIOPB;
1590                         dma_flags |= DDI_DMA_CONSISTENT;
1591                 }
1592 
1593                 /* ensure we have a DMA handle */
1594                 if (cmd->cmd_dmahandle == NULL) {
1595                         rval = ddi_dma_alloc_handle(sf->sf_dip,
1596                             sf->sf_sochandle->fcal_dmaattr, callback, arg,
1597                             &cmd->cmd_dmahandle);
1598                 }
1599 
1600                 if (rval == 0) {
1601                         /* bind our DMA handle to our buffer */
1602                         rval = ddi_dma_buf_bind_handle(cmd->cmd_dmahandle, bp,
1603                             dma_flags, callback, arg, &cmd->cmd_dmacookie,
1604                             &dmacookie_count);
1605                 }
1606 
1607                 if (rval != 0) {
1608                         /* DMA failure */
1609                         SF_DEBUG(2, (sf, CE_CONT, "ddi_dma_buf.. failed\n"));
1610                         switch (rval) {
1611                         case DDI_DMA_NORESOURCES:
1612                                 bioerror(bp, 0);
1613                                 break;
1614                         case DDI_DMA_BADATTR:
1615                         case DDI_DMA_NOMAPPING:
1616                                 bioerror(bp, EFAULT);
1617                                 break;
1618                         case DDI_DMA_TOOBIG:
1619                         default:
1620                                 bioerror(bp, EINVAL);
1621                                 break;
1622                         }
1623                         /* clear valid flag */
1624                         cmd->cmd_flags = cmd_flags & ~CFLAG_DMAVALID;
1625                         if (new_cmd != NULL) {
1626                                 /* destroy packet if we just created it */
1627                                 sf_scsi_destroy_pkt(ap, new_cmd->cmd_pkt);
1628                         }
1629                         return (NULL);
1630                 }
1631 
1632                 ASSERT(dmacookie_count == 1);
1633                 /* set up amt to transfer and set valid flag */
1634                 cmd->cmd_dmacount = bp->b_bcount;
1635                 cmd->cmd_flags = cmd_flags | CFLAG_DMAVALID;
1636 
1637                 ASSERT(cmd->cmd_dmahandle != NULL);
1638         }
1639 
1640         /* set up FC-AL packet */
1641         fcmd = cmd->cmd_block;
1642 
1643         if (cmd->cmd_flags & CFLAG_DMAVALID) {
1644                 if (cmd->cmd_flags & CFLAG_DMASEND) {
1645                         /* DMA write */
1646                         fcmd->fcp_cntl.cntl_read_data = 0;
1647                         fcmd->fcp_cntl.cntl_write_data = 1;
1648                         fpkt->fcal_socal_request.sr_cqhdr.cq_hdr_type =
1649                             CQ_TYPE_IO_WRITE;
1650                 } else {
1651                         /* DMA read */
1652                         fcmd->fcp_cntl.cntl_read_data = 1;
1653                         fcmd->fcp_cntl.cntl_write_data = 0;
1654                         fpkt->fcal_socal_request.sr_cqhdr.cq_hdr_type =
1655                             CQ_TYPE_IO_READ;
1656                 }
1657                 fpkt->fcal_socal_request.sr_dataseg[2].fc_base =
1658                     (uint32_t)cmd->cmd_dmacookie.dmac_address;
1659                 fpkt->fcal_socal_request.sr_dataseg[2].fc_count =
1660                     cmd->cmd_dmacookie.dmac_size;
1661                 fpkt->fcal_socal_request.sr_soc_hdr.sh_seg_cnt = 3;
1662                 fpkt->fcal_socal_request.sr_soc_hdr.sh_byte_cnt =
1663                     cmd->cmd_dmacookie.dmac_size;
1664                 fcmd->fcp_data_len = cmd->cmd_dmacookie.dmac_size;
1665         } else {
1666                 /* not a read or write */
1667                 fcmd->fcp_cntl.cntl_read_data = 0;
1668                 fcmd->fcp_cntl.cntl_write_data = 0;
1669                 fpkt->fcal_socal_request.sr_cqhdr.cq_hdr_type = CQ_TYPE_SIMPLE;
1670                 fpkt->fcal_socal_request.sr_soc_hdr.sh_seg_cnt = 2;
1671                 fpkt->fcal_socal_request.sr_soc_hdr.sh_byte_cnt =
1672                     sizeof (struct fcp_cmd);
1673                 fcmd->fcp_data_len = 0;
1674         }
1675         fcmd->fcp_cntl.cntl_qtype = FCP_QTYPE_SIMPLE;
1676 
1677         return (cmd->cmd_pkt);
1678 }
1679 
1680 
1681 /*
1682  * destroy a SCSI packet -- called internally and by the transport
1683  */
1684 static void
1685 sf_scsi_destroy_pkt(struct scsi_address *ap, struct scsi_pkt *pkt)
1686 {
1687         struct sf_pkt *cmd = PKT2CMD(pkt);
1688         struct sf *sf = ADDR2SF(ap);
1689         struct sf_target *target = ADDR2TARGET(ap);
1690         struct fcal_packet      *fpkt = cmd->cmd_fp_pkt;
1691 
1692 
1693         if (cmd->cmd_flags & CFLAG_DMAVALID) {
1694                 /* DMA was set up -- clean up */
1695                 (void) ddi_dma_unbind_handle(cmd->cmd_dmahandle);
1696                 cmd->cmd_flags ^= CFLAG_DMAVALID;
1697         }
1698 
1699         /* take this packet off the doubly-linked list */
1700         mutex_enter(&target->sft_pkt_mutex);
1701         cmd->cmd_back->cmd_forw = cmd->cmd_forw;
1702         cmd->cmd_forw->cmd_back = cmd->cmd_back;
1703         mutex_exit(&target->sft_pkt_mutex);
1704 
1705         fpkt->fcal_pkt_flags = 0;
1706         /* free the packet */
1707         if ((cmd->cmd_flags &
1708             (CFLAG_FREE | CFLAG_PRIVEXTERN | CFLAG_SCBEXTERN)) == 0) {
1709                 /* just a regular packet */
1710                 ASSERT(cmd->cmd_state != SF_STATE_ISSUED);
1711                 cmd->cmd_flags = CFLAG_FREE;
1712                 kmem_cache_free(sf->sf_pkt_cache, (void *)cmd);
1713         } else {
1714                 /* a packet with extra memory */
1715                 sf_pkt_destroy_extern(sf, cmd);
1716         }
1717 }
1718 
1719 
1720 /*
1721  * called by transport to unbind DMA handle
1722  */
1723 /* ARGSUSED */
1724 static void
1725 sf_scsi_dmafree(struct scsi_address *ap, struct scsi_pkt *pkt)
1726 {
1727         struct sf_pkt *cmd = PKT2CMD(pkt);
1728 
1729 
1730         if (cmd->cmd_flags & CFLAG_DMAVALID) {
1731                 (void) ddi_dma_unbind_handle(cmd->cmd_dmahandle);
1732                 cmd->cmd_flags ^= CFLAG_DMAVALID;
1733         }
1734 
1735 }
1736 
1737 
1738 /*
1739  * called by transport to synchronize CPU and I/O views of memory
1740  */
1741 /* ARGSUSED */
1742 static void
1743 sf_scsi_sync_pkt(struct scsi_address *ap, struct scsi_pkt *pkt)
1744 {
1745         struct sf_pkt *cmd = PKT2CMD(pkt);
1746 
1747 
1748         if (cmd->cmd_flags & CFLAG_DMAVALID) {
1749                 if (ddi_dma_sync(cmd->cmd_dmahandle, (off_t)0, (size_t)0,
1750                     (cmd->cmd_flags & CFLAG_DMASEND) ?
1751                     DDI_DMA_SYNC_FORDEV : DDI_DMA_SYNC_FORCPU) !=
1752                     DDI_SUCCESS) {
1753                         cmn_err(CE_WARN, "sf: sync pkt failed");
1754                 }
1755         }
1756 }
1757 
1758 
1759 /*
1760  * routine for reset notification setup, to register or cancel. -- called
1761  * by transport
1762  */
1763 static int
1764 sf_scsi_reset_notify(struct scsi_address *ap, int flag,
1765     void (*callback)(caddr_t), caddr_t arg)
1766 {
1767         struct sf       *sf = ADDR2SF(ap);
1768 
1769         return (scsi_hba_reset_notify_setup(ap, flag, callback, arg,
1770             &sf->sf_mutex, &sf->sf_reset_notify_listf));
1771 }
1772 
1773 
1774 /*
1775  * called by transport to get port WWN property (except sun4u)
1776  */
1777 /* ARGSUSED */
1778 static int
1779 sf_scsi_get_name(struct scsi_device *sd, char *name, int len)
1780 {
1781         char tbuf[(FC_WWN_SIZE*2)+1];
1782         unsigned char wwn[FC_WWN_SIZE];
1783         int i, lun;
1784         dev_info_t *tgt_dip;
1785 
1786         tgt_dip = sd->sd_dev;
1787         i = sizeof (wwn);
1788         if (ddi_prop_op(DDI_DEV_T_ANY, tgt_dip, PROP_LEN_AND_VAL_BUF,
1789             DDI_PROP_DONTPASS | DDI_PROP_CANSLEEP, PORT_WWN_PROP,
1790             (caddr_t)&wwn, &i) != DDI_SUCCESS) {
1791                 name[0] = '\0';
1792                 return (0);
1793         }
1794         i = sizeof (lun);
1795         if (ddi_prop_op(DDI_DEV_T_ANY, tgt_dip, PROP_LEN_AND_VAL_BUF,
1796             DDI_PROP_DONTPASS | DDI_PROP_CANSLEEP, "lun",
1797             (caddr_t)&lun, &i) != DDI_SUCCESS) {
1798                 name[0] = '\0';
1799                 return (0);
1800         }
1801         for (i = 0; i < FC_WWN_SIZE; i++)
1802                 (void) sprintf(&tbuf[i << 1], "%02x", wwn[i]);
1803         (void) sprintf(name, "w%s,%x", tbuf, lun);
1804         return (1);
1805 }
1806 
1807 
1808 /*
1809  * called by transport to get target soft AL-PA (except sun4u)
1810  */
1811 /* ARGSUSED */
1812 static int
1813 sf_scsi_get_bus_addr(struct scsi_device *sd, char *name, int len)
1814 {
1815         struct sf_target *target = ADDR2TARGET(&sd->sd_address);
1816 
1817         if (target == NULL)
1818                 return (0);
1819 
1820         (void) sprintf(name, "%x", target->sft_al_pa);
1821         return (1);
1822 }
1823 
1824 
1825 /*
1826  * add to the command/response buffer pool for this sf instance
1827  */
1828 static int
1829 sf_add_cr_pool(struct sf *sf)
1830 {
1831         int             cmd_buf_size;
1832         size_t          real_cmd_buf_size;
1833         int             rsp_buf_size;
1834         size_t          real_rsp_buf_size;
1835         uint_t          i, ccount;
1836         struct sf_cr_pool       *ptr;
1837         struct sf_cr_free_elem *cptr;
1838         caddr_t dptr, eptr;
1839         ddi_dma_cookie_t        cmd_cookie;
1840         ddi_dma_cookie_t        rsp_cookie;
1841         int             cmd_bound = FALSE, rsp_bound = FALSE;
1842 
1843 
1844         /* allocate room for the pool */
1845         if ((ptr = kmem_zalloc(sizeof (struct sf_cr_pool), KM_NOSLEEP)) ==
1846             NULL) {
1847                 return (DDI_FAILURE);
1848         }
1849 
1850         /* allocate a DMA handle for the command pool */
1851         if (ddi_dma_alloc_handle(sf->sf_dip, sf->sf_sochandle->fcal_dmaattr,
1852             DDI_DMA_DONTWAIT, NULL, &ptr->cmd_dma_handle) != DDI_SUCCESS) {
1853                 goto fail;
1854         }
1855 
1856         /*
1857          * Get a piece of memory in which to put commands
1858          */
1859         cmd_buf_size = (sizeof (struct fcp_cmd) * SF_ELEMS_IN_POOL + 7) & ~7;
1860         if (ddi_dma_mem_alloc(ptr->cmd_dma_handle, cmd_buf_size,
1861             sf->sf_sochandle->fcal_accattr, DDI_DMA_CONSISTENT,
1862             DDI_DMA_DONTWAIT, NULL, (caddr_t *)&ptr->cmd_base,
1863             &real_cmd_buf_size, &ptr->cmd_acc_handle) != DDI_SUCCESS) {
1864                 goto fail;
1865         }
1866 
1867         /* bind the DMA handle to an address */
1868         if (ddi_dma_addr_bind_handle(ptr->cmd_dma_handle, NULL,
1869             ptr->cmd_base, real_cmd_buf_size,
1870             DDI_DMA_WRITE | DDI_DMA_CONSISTENT, DDI_DMA_DONTWAIT,
1871             NULL, &cmd_cookie, &ccount) != DDI_DMA_MAPPED) {
1872                 goto fail;
1873         }
1874         cmd_bound = TRUE;
1875         /* ensure only one cookie was allocated */
1876         if (ccount != 1) {
1877                 goto fail;
1878         }
1879 
1880         /* allocate a DMA handle for the response pool */
1881         if (ddi_dma_alloc_handle(sf->sf_dip, sf->sf_sochandle->fcal_dmaattr,
1882             DDI_DMA_DONTWAIT, NULL, &ptr->rsp_dma_handle) != DDI_SUCCESS) {
1883                 goto fail;
1884         }
1885 
1886         /*
1887          * Get a piece of memory in which to put responses
1888          */
1889         rsp_buf_size = FCP_MAX_RSP_IU_SIZE * SF_ELEMS_IN_POOL;
1890         if (ddi_dma_mem_alloc(ptr->rsp_dma_handle, rsp_buf_size,
1891             sf->sf_sochandle->fcal_accattr, DDI_DMA_CONSISTENT,
1892             DDI_DMA_DONTWAIT, NULL, (caddr_t *)&ptr->rsp_base,
1893             &real_rsp_buf_size, &ptr->rsp_acc_handle) != DDI_SUCCESS) {
1894                 goto fail;
1895         }
1896 
1897         /* bind the DMA handle to an address */
1898         if (ddi_dma_addr_bind_handle(ptr->rsp_dma_handle, NULL,
1899             ptr->rsp_base, real_rsp_buf_size,
1900             DDI_DMA_READ | DDI_DMA_CONSISTENT, DDI_DMA_DONTWAIT,
1901             NULL, &rsp_cookie, &ccount) != DDI_DMA_MAPPED) {
1902                 goto fail;
1903         }
1904         rsp_bound = TRUE;
1905         /* ensure only one cookie was allocated */
1906         if (ccount != 1) {
1907                 goto fail;
1908         }
1909 
1910         /*
1911          * Generate a (cmd/rsp structure) free list
1912          */
1913         /* ensure ptr points to start of long word (8-byte block) */
1914         dptr = (caddr_t)((uintptr_t)(ptr->cmd_base) + 7 & ~7);
1915         /* keep track of actual size after moving pointer */
1916         real_cmd_buf_size -= (dptr - ptr->cmd_base);
1917         eptr = ptr->rsp_base;
1918 
1919         /* set actual total number of entries */
1920         ptr->ntot = min((real_cmd_buf_size / sizeof (struct fcp_cmd)),
1921             (real_rsp_buf_size / FCP_MAX_RSP_IU_SIZE));
1922         ptr->nfree = ptr->ntot;
1923         ptr->free = (struct sf_cr_free_elem *)ptr->cmd_base;
1924         ptr->sf = sf;
1925 
1926         /* set up DMA for each pair of entries */
1927         i = 0;
1928         while (i < ptr->ntot) {
1929                 cptr = (struct sf_cr_free_elem *)dptr;
1930                 dptr += sizeof (struct fcp_cmd);
1931 
1932                 cptr->next = (struct sf_cr_free_elem *)dptr;
1933                 cptr->rsp = eptr;
1934 
1935                 cptr->cmd_dmac = cmd_cookie.dmac_address +
1936                     (uint32_t)((caddr_t)cptr - ptr->cmd_base);
1937 
1938                 cptr->rsp_dmac = rsp_cookie.dmac_address +
1939                     (uint32_t)((caddr_t)eptr - ptr->rsp_base);
1940 
1941                 eptr += FCP_MAX_RSP_IU_SIZE;
1942                 i++;
1943         }
1944 
1945         /* terminate the list */
1946         cptr->next = NULL;
1947 
1948         /* add this list at front of current one */
1949         mutex_enter(&sf->sf_cr_mutex);
1950         ptr->next = sf->sf_cr_pool;
1951         sf->sf_cr_pool = ptr;
1952         sf->sf_cr_pool_cnt++;
1953         mutex_exit(&sf->sf_cr_mutex);
1954 
1955         return (DDI_SUCCESS);
1956 
1957 fail:
1958         /* we failed so clean up */
1959         if (ptr->cmd_dma_handle != NULL) {
1960                 if (cmd_bound) {
1961                         (void) ddi_dma_unbind_handle(ptr->cmd_dma_handle);
1962                 }
1963                 ddi_dma_free_handle(&ptr->cmd_dma_handle);
1964         }
1965 
1966         if (ptr->rsp_dma_handle != NULL) {
1967                 if (rsp_bound) {
1968                         (void) ddi_dma_unbind_handle(ptr->rsp_dma_handle);
1969                 }
1970                 ddi_dma_free_handle(&ptr->rsp_dma_handle);
1971         }
1972 
1973         if (ptr->cmd_base != NULL) {
1974                 ddi_dma_mem_free(&ptr->cmd_acc_handle);
1975         }
1976 
1977         if (ptr->rsp_base != NULL) {
1978                 ddi_dma_mem_free(&ptr->rsp_acc_handle);
1979         }
1980 
1981         kmem_free((caddr_t)ptr, sizeof (struct sf_cr_pool));
1982         return (DDI_FAILURE);
1983 }
1984 
1985 
1986 /*
1987  * allocate a command/response buffer from the pool, allocating more
1988  * in the pool as needed
1989  */
1990 static int
1991 sf_cr_alloc(struct sf *sf, struct sf_pkt *cmd, int (*func)())
1992 {
1993         struct sf_cr_pool *ptr;
1994         struct sf_cr_free_elem *cptr;
1995 
1996 
1997         mutex_enter(&sf->sf_cr_mutex);
1998 
1999 try_again:
2000 
2001         /* find a free buffer in the existing pool */
2002         ptr = sf->sf_cr_pool;
2003         while (ptr != NULL) {
2004                 if (ptr->nfree != 0) {
2005                         ptr->nfree--;
2006                         break;
2007                 } else {
2008                         ptr = ptr->next;
2009                 }
2010         }
2011 
2012         /* did we find a free buffer ? */
2013         if (ptr != NULL) {
2014                 /* we found a free buffer -- take it off the free list */
2015                 cptr = ptr->free;
2016                 ptr->free = cptr->next;
2017                 mutex_exit(&sf->sf_cr_mutex);
2018                 /* set up the command to use the buffer pair */
2019                 cmd->cmd_block = (struct fcp_cmd *)cptr;
2020                 cmd->cmd_dmac = cptr->cmd_dmac;
2021                 cmd->cmd_rsp_dmac = cptr->rsp_dmac;
2022                 cmd->cmd_rsp_block = (struct fcp_rsp *)cptr->rsp;
2023                 cmd->cmd_cr_pool = ptr;
2024                 return (DDI_SUCCESS);           /* success */
2025         }
2026 
2027         /* no free buffer available -- can we allocate more ? */
2028         if (sf->sf_cr_pool_cnt < SF_CR_POOL_MAX) {
2029                 /* we need to allocate more buffer pairs */
2030                 if (sf->sf_cr_flag) {
2031                         /* somebody already allocating for this instance */
2032                         if (func == SLEEP_FUNC) {
2033                                 /* user wants to wait */
2034                                 cv_wait(&sf->sf_cr_cv, &sf->sf_cr_mutex);
2035                                 /* we've been woken so go try again */
2036                                 goto try_again;
2037                         }
2038                         /* user does not want to wait */
2039                         mutex_exit(&sf->sf_cr_mutex);
2040                         sf->sf_stats.cralloc_failures++;
2041                         return (DDI_FAILURE);   /* give up */
2042                 }
2043                 /* set flag saying we're allocating */
2044                 sf->sf_cr_flag = 1;
2045                 mutex_exit(&sf->sf_cr_mutex);
2046                 /* add to our pool */
2047                 if (sf_add_cr_pool(sf) != DDI_SUCCESS) {
2048                         /* couldn't add to our pool for some reason */
2049                         mutex_enter(&sf->sf_cr_mutex);
2050                         sf->sf_cr_flag = 0;
2051                         cv_broadcast(&sf->sf_cr_cv);
2052                         mutex_exit(&sf->sf_cr_mutex);
2053                         sf->sf_stats.cralloc_failures++;
2054                         return (DDI_FAILURE);   /* give up */
2055                 }
2056                 /*
2057                  * clear flag saying we're allocating and tell all other
2058                  * that care
2059                  */
2060                 mutex_enter(&sf->sf_cr_mutex);
2061                 sf->sf_cr_flag = 0;
2062                 cv_broadcast(&sf->sf_cr_cv);
2063                 /* now that we have more buffers try again */
2064                 goto try_again;
2065         }
2066 
2067         /* we don't have room to allocate any more buffers */
2068         mutex_exit(&sf->sf_cr_mutex);
2069         sf->sf_stats.cralloc_failures++;
2070         return (DDI_FAILURE);                   /* give up */
2071 }
2072 
2073 
2074 /*
2075  * free a cmd/response buffer pair in our pool
2076  */
2077 static void
2078 sf_cr_free(struct sf_cr_pool *cp, struct sf_pkt *cmd)
2079 {
2080         struct sf *sf = cp->sf;
2081         struct sf_cr_free_elem *elem;
2082 
2083         elem = (struct sf_cr_free_elem *)cmd->cmd_block;
2084         elem->rsp = (caddr_t)cmd->cmd_rsp_block;
2085         elem->cmd_dmac = cmd->cmd_dmac;
2086         elem->rsp_dmac = cmd->cmd_rsp_dmac;
2087 
2088         mutex_enter(&sf->sf_cr_mutex);
2089         cp->nfree++;
2090         ASSERT(cp->nfree <= cp->ntot);
2091 
2092         elem->next = cp->free;
2093         cp->free = elem;
2094         mutex_exit(&sf->sf_cr_mutex);
2095 }
2096 
2097 
2098 /*
2099  * free our pool of cmd/response buffers
2100  */
2101 static void
2102 sf_crpool_free(struct sf *sf)
2103 {
2104         struct sf_cr_pool *cp, *prev;
2105 
2106         prev = NULL;
2107         mutex_enter(&sf->sf_cr_mutex);
2108         cp = sf->sf_cr_pool;
2109         while (cp != NULL) {
2110                 if (cp->nfree == cp->ntot) {
2111                         if (prev != NULL) {
2112                                 prev->next = cp->next;
2113                         } else {
2114                                 sf->sf_cr_pool = cp->next;
2115                         }
2116                         sf->sf_cr_pool_cnt--;
2117                         mutex_exit(&sf->sf_cr_mutex);
2118 
2119                         (void) ddi_dma_unbind_handle(cp->cmd_dma_handle);
2120                         ddi_dma_free_handle(&cp->cmd_dma_handle);
2121                         (void) ddi_dma_unbind_handle(cp->rsp_dma_handle);
2122                         ddi_dma_free_handle(&cp->rsp_dma_handle);
2123                         ddi_dma_mem_free(&cp->cmd_acc_handle);
2124                         ddi_dma_mem_free(&cp->rsp_acc_handle);
2125                         kmem_free((caddr_t)cp, sizeof (struct sf_cr_pool));
2126                         return;
2127                 }
2128                 prev = cp;
2129                 cp = cp->next;
2130         }
2131         mutex_exit(&sf->sf_cr_mutex);
2132 }
2133 
2134 
2135 /* ARGSUSED */
2136 static int
2137 sf_kmem_cache_constructor(void *buf, void *arg, int size)
2138 {
2139         struct sf_pkt *cmd = buf;
2140 
2141         mutex_init(&cmd->cmd_abort_mutex, NULL, MUTEX_DRIVER, NULL);
2142         cmd->cmd_block = NULL;
2143         cmd->cmd_dmahandle = NULL;
2144         return (0);
2145 }
2146 
2147 
2148 /* ARGSUSED */
2149 static void
2150 sf_kmem_cache_destructor(void *buf, void *size)
2151 {
2152         struct sf_pkt *cmd = buf;
2153 
2154         if (cmd->cmd_dmahandle != NULL) {
2155                 ddi_dma_free_handle(&cmd->cmd_dmahandle);
2156         }
2157 
2158         if (cmd->cmd_block != NULL) {
2159                 sf_cr_free(cmd->cmd_cr_pool, cmd);
2160         }
2161         mutex_destroy(&cmd->cmd_abort_mutex);
2162 }
2163 
2164 
2165 /*
2166  * called by transport when a state change occurs
2167  */
2168 static void
2169 sf_statec_callback(void *arg, int msg)
2170 {
2171         struct sf *sf = (struct sf *)arg;
2172         struct sf_target        *target;
2173         int i;
2174         struct sf_pkt *cmd;
2175         struct scsi_pkt *pkt;
2176 
2177 
2178 
2179         switch (msg) {
2180 
2181         case FCAL_STATUS_LOOP_ONLINE: {
2182                 uchar_t         al_pa;          /* to save AL-PA */
2183                 int             ret;            /* ret value from getmap */
2184                 int             lip_cnt;        /* to save current count */
2185                 int             cnt;            /* map length */
2186 
2187                 /*
2188                  * the loop has gone online
2189                  */
2190                 SF_DEBUG(1, (sf, CE_CONT, "sf%d: loop online\n",
2191                     ddi_get_instance(sf->sf_dip)));
2192                 mutex_enter(&sf->sf_mutex);
2193                 sf->sf_lip_cnt++;
2194                 sf->sf_state = SF_STATE_ONLINING;
2195                 mutex_exit(&sf->sf_mutex);
2196 
2197                 /* scan each target hash queue */
2198                 for (i = 0; i < SF_NUM_HASH_QUEUES; i++) {
2199                         target = sf->sf_wwn_lists[i];
2200                         while (target != NULL) {
2201                                 /*
2202                                  * foreach target, if it's not offline then
2203                                  * mark it as busy
2204                                  */
2205                                 mutex_enter(&target->sft_mutex);
2206                                 if (!(target->sft_state & SF_TARGET_OFFLINE))
2207                                         target->sft_state |= (SF_TARGET_BUSY
2208                                             | SF_TARGET_MARK);
2209 #ifdef DEBUG
2210                                 /*
2211                                  * for debugging, print out info on any
2212                                  * pending commands (left hanging)
2213                                  */
2214                                 cmd = target->sft_pkt_head;
2215                                 while (cmd != (struct sf_pkt *)&target->
2216                                     sft_pkt_head) {
2217                                         if (cmd->cmd_state ==
2218                                             SF_STATE_ISSUED) {
2219                                                 SF_DEBUG(1, (sf, CE_CONT,
2220                                                     "cmd 0x%p pending "
2221                                                     "after lip\n",
2222                                                     (void *)cmd->cmd_fp_pkt));
2223                                         }
2224                                         cmd = cmd->cmd_forw;
2225                                 }
2226 #endif
2227                                 mutex_exit(&target->sft_mutex);
2228                                 target = target->sft_next;
2229                         }
2230                 }
2231 
2232                 /*
2233                  * since the loop has just gone online get a new map from
2234                  * the transport
2235                  */
2236                 if ((ret = soc_get_lilp_map(sf->sf_sochandle, sf->sf_socp,
2237                     sf->sf_sochandle->fcal_portno, (uint32_t)sf->
2238                     sf_lilp_dmacookie.dmac_address, 1)) != FCAL_SUCCESS) {
2239                         if (sf_core && (sf_core & SF_CORE_LILP_FAILED)) {
2240                                 (void) soc_take_core(sf->sf_sochandle,
2241                                     sf->sf_socp);
2242                                 sf_core = 0;
2243                         }
2244                         sf_log(sf, CE_WARN,
2245                             "!soc lilp map failed status=0x%x\n", ret);
2246                         mutex_enter(&sf->sf_mutex);
2247                         sf->sf_timer = sf_watchdog_time + SF_OFFLINE_TIMEOUT;
2248                         sf->sf_lip_cnt++;
2249                         sf->sf_state = SF_STATE_OFFLINE;
2250                         mutex_exit(&sf->sf_mutex);
2251                         return;
2252                 }
2253 
2254                 /* ensure consistent view of DMA memory */
2255                 (void) ddi_dma_sync(sf->sf_lilp_dmahandle, (off_t)0, (size_t)0,
2256                     DDI_DMA_SYNC_FORKERNEL);
2257 
2258                 /* how many entries in map ? */
2259                 cnt = sf->sf_lilp_map->lilp_length;
2260                 if (cnt >= SF_MAX_LILP_ENTRIES) {
2261                         sf_log(sf, CE_WARN, "invalid lilp map\n");
2262                         return;
2263                 }
2264 
2265                 mutex_enter(&sf->sf_mutex);
2266                 sf->sf_device_count = cnt - 1;
2267                 sf->sf_al_pa = sf->sf_lilp_map->lilp_myalpa;
2268                 lip_cnt = sf->sf_lip_cnt;
2269                 al_pa = sf->sf_al_pa;
2270 
2271                 SF_DEBUG(1, (sf, CE_CONT,
2272                     "!lilp map has %d entries, al_pa is %x\n", cnt, al_pa));
2273 
2274                 /*
2275                  * since the last entry of the map may be mine (common) check
2276                  * for that, and if it is we have one less entry to look at
2277                  */
2278                 if (sf->sf_lilp_map->lilp_alpalist[cnt-1] == al_pa) {
2279                         cnt--;
2280                 }
2281                 /* If we didn't get a valid loop map enable all targets */
2282                 if (sf->sf_lilp_map->lilp_magic == FCAL_BADLILP_MAGIC) {
2283                         for (i = 0; i < sizeof (sf_switch_to_alpa); i++)
2284                                 sf->sf_lilp_map->lilp_alpalist[i] =
2285                                     sf_switch_to_alpa[i];
2286                         cnt = i;
2287                         sf->sf_device_count = cnt - 1;
2288                 }
2289                 if (sf->sf_device_count == 0) {
2290                         sf_finish_init(sf, lip_cnt);
2291                         mutex_exit(&sf->sf_mutex);
2292                         break;
2293                 }
2294                 mutex_exit(&sf->sf_mutex);
2295 
2296                 SF_DEBUG(2, (sf, CE_WARN,
2297                     "!statec_callback: starting with %d targets\n",
2298                     sf->sf_device_count));
2299 
2300                 /* scan loop map, logging into all ports (except mine) */
2301                 for (i = 0; i < cnt; i++) {
2302                         SF_DEBUG(1, (sf, CE_CONT,
2303                             "!lilp map entry %d = %x,%x\n", i,
2304                             sf->sf_lilp_map->lilp_alpalist[i],
2305                             sf_alpa_to_switch[
2306                             sf->sf_lilp_map->lilp_alpalist[i]]));
2307                         /* is this entry for somebody else ? */
2308                         if (sf->sf_lilp_map->lilp_alpalist[i] != al_pa) {
2309                                 /* do a PLOGI to this port */
2310                                 if (!sf_login(sf, LA_ELS_PLOGI,
2311                                     sf->sf_lilp_map->lilp_alpalist[i],
2312                                     sf->sf_lilp_map->lilp_alpalist[cnt-1],
2313                                     lip_cnt)) {
2314                                         /* a problem logging in */
2315                                         mutex_enter(&sf->sf_mutex);
2316                                         if (lip_cnt == sf->sf_lip_cnt) {
2317                                                 /*
2318                                                  * problem not from a new LIP
2319                                                  */
2320                                                 sf->sf_device_count--;
2321                                                 ASSERT(sf->sf_device_count
2322                                                     >= 0);
2323                                                 if (sf->sf_device_count == 0) {
2324                                                         sf_finish_init(sf,
2325                                                             lip_cnt);
2326                                                 }
2327                                         }
2328                                         mutex_exit(&sf->sf_mutex);
2329                                 }
2330                         }
2331                 }
2332                 break;
2333         }
2334 
2335         case FCAL_STATUS_ERR_OFFLINE:
2336                 /*
2337                  * loop has gone offline due to an error
2338                  */
2339                 SF_DEBUG(1, (sf, CE_CONT, "sf%d: loop offline\n",
2340                     ddi_get_instance(sf->sf_dip)));
2341                 mutex_enter(&sf->sf_mutex);
2342                 sf->sf_lip_cnt++;
2343                 sf->sf_timer = sf_watchdog_time + SF_OFFLINE_TIMEOUT;
2344                 if (!sf->sf_online_timer) {
2345                         sf->sf_online_timer = sf_watchdog_time +
2346                             SF_ONLINE_TIMEOUT;
2347                 }
2348                 /*
2349                  * if we are suspended, preserve the SF_STATE_SUSPENDED flag,
2350                  * since throttling logic in sf_watch() depends on
2351                  * preservation of this flag while device is suspended
2352                  */
2353                 if (sf->sf_state & SF_STATE_SUSPENDED) {
2354                         sf->sf_state |= SF_STATE_OFFLINE;
2355                         SF_DEBUG(1, (sf, CE_CONT,
2356                             "sf_statec_callback, sf%d: "
2357                             "got FCAL_STATE_OFFLINE during DDI_SUSPEND\n",
2358                             ddi_get_instance(sf->sf_dip)));
2359                 } else {
2360                         sf->sf_state = SF_STATE_OFFLINE;
2361                 }
2362 
2363                 /* scan each possible target on the loop */
2364                 for (i = 0; i < sf_max_targets; i++) {
2365                         target = sf->sf_targets[i];
2366                         while (target != NULL) {
2367                                 mutex_enter(&target->sft_mutex);
2368                                 if (!(target->sft_state & SF_TARGET_OFFLINE))
2369                                         target->sft_state |= (SF_TARGET_BUSY
2370                                             | SF_TARGET_MARK);
2371                                 mutex_exit(&target->sft_mutex);
2372                                 target = target->sft_next_lun;
2373                         }
2374                 }
2375                 mutex_exit(&sf->sf_mutex);
2376                 break;
2377 
2378         case FCAL_STATE_RESET: {
2379                 struct sf_els_hdr       *privp; /* ptr to private list */
2380                 struct sf_els_hdr       *tmpp1; /* tmp prev hdr ptr */
2381                 struct sf_els_hdr       *tmpp2; /* tmp next hdr ptr */
2382                 struct sf_els_hdr       *head;  /* to save our private list */
2383                 struct fcal_packet      *fpkt;  /* ptr to pkt in hdr */
2384 
2385                 /*
2386                  * a transport reset
2387                  */
2388                 SF_DEBUG(1, (sf, CE_CONT, "!sf%d: soc reset\n",
2389                     ddi_get_instance(sf->sf_dip)));
2390                 tmpp1 = head = NULL;
2391                 mutex_enter(&sf->sf_mutex);
2392                 sf->sf_lip_cnt++;
2393                 sf->sf_timer = sf_watchdog_time + SF_RESET_TIMEOUT;
2394                 /*
2395                  * if we are suspended, preserve the SF_STATE_SUSPENDED flag,
2396                  * since throttling logic in sf_watch() depends on
2397                  * preservation of this flag while device is suspended
2398                  */
2399                 if (sf->sf_state & SF_STATE_SUSPENDED) {
2400                         sf->sf_state |= SF_STATE_OFFLINE;
2401                         SF_DEBUG(1, (sf, CE_CONT,
2402                             "sf_statec_callback, sf%d: "
2403                             "got FCAL_STATE_RESET during DDI_SUSPEND\n",
2404                             ddi_get_instance(sf->sf_dip)));
2405                 } else {
2406                         sf->sf_state = SF_STATE_OFFLINE;
2407                 }
2408 
2409                 /*
2410                  * scan each possible target on the loop, looking for targets
2411                  * that need callbacks ran
2412                  */
2413                 for (i = 0; i < sf_max_targets; i++) {
2414                         target = sf->sf_targets[i];
2415                         while (target != NULL) {
2416                                 if (!(target->sft_state & SF_TARGET_OFFLINE)) {
2417                                         target->sft_state |= (SF_TARGET_BUSY
2418                                             | SF_TARGET_MARK);
2419                                         mutex_exit(&sf->sf_mutex);
2420                                         /*
2421                                          * run remove event callbacks for lun
2422                                          *
2423                                          * We have a nasty race condition here
2424                                          * 'cause we're dropping this mutex to
2425                                          * run the callback and expect the
2426                                          * linked list to be the same.
2427                                          */
2428                                         (void) ndi_event_retrieve_cookie(
2429                                             sf->sf_event_hdl, target->sft_dip,
2430                                             FCAL_REMOVE_EVENT, &sf_remove_eid,
2431                                             NDI_EVENT_NOPASS);
2432                                         (void) ndi_event_run_callbacks(
2433                                             sf->sf_event_hdl,
2434                                             target->sft_dip,
2435                                             sf_remove_eid, NULL);
2436                                         mutex_enter(&sf->sf_mutex);
2437                                 }
2438                                 target = target->sft_next_lun;
2439                         }
2440                 }
2441 
2442                 /*
2443                  * scan for ELS commands that are in transport, not complete,
2444                  * and have a valid timeout, building a private list
2445                  */
2446                 privp = sf->sf_els_list;
2447                 while (privp != NULL) {
2448                         fpkt = privp->fpkt;
2449                         if ((fpkt->fcal_cmd_state & FCAL_CMD_IN_TRANSPORT) &&
2450                             (!(fpkt->fcal_cmd_state & FCAL_CMD_COMPLETE)) &&
2451                             (privp->timeout != SF_INVALID_TIMEOUT)) {
2452                                 /*
2453                                  * cmd in transport && not complete &&
2454                                  * timeout valid
2455                                  *
2456                                  * move this entry from ELS input list to our
2457                                  * private list
2458                                  */
2459 
2460                                 tmpp2 = privp->next; /* save ptr to next */
2461 
2462                                 /* push this on private list head */
2463                                 privp->next = head;
2464                                 head = privp;
2465 
2466                                 /* remove this entry from input list */
2467                                 if (tmpp1 != NULL) {
2468                                         /*
2469                                          * remove this entry from somewhere in
2470                                          * the middle of the list
2471                                          */
2472                                         tmpp1->next = tmpp2;
2473                                         if (tmpp2 != NULL) {
2474                                                 tmpp2->prev = tmpp1;
2475                                         }
2476                                 } else {
2477                                         /*
2478                                          * remove this entry from the head
2479                                          * of the list
2480                                          */
2481                                         sf->sf_els_list = tmpp2;
2482                                         if (tmpp2 != NULL) {
2483                                                 tmpp2->prev = NULL;
2484                                         }
2485                                 }
2486                                 privp = tmpp2;  /* skip to next entry */
2487                         } else {
2488                                 tmpp1 = privp;  /* save ptr to prev entry */
2489                                 privp = privp->next; /* skip to next entry */
2490                         }
2491                 }
2492 
2493                 mutex_exit(&sf->sf_mutex);
2494 
2495                 /*
2496                  * foreach cmd in our list free the ELS packet associated
2497                  * with it
2498                  */
2499                 privp = head;
2500                 while (privp != NULL) {
2501                         fpkt = privp->fpkt;
2502                         privp = privp->next;
2503                         sf_els_free(fpkt);
2504                 }
2505 
2506                 /*
2507                  * scan for commands from each possible target
2508                  */
2509                 for (i = 0; i < sf_max_targets; i++) {
2510                         target = sf->sf_targets[i];
2511                         while (target != NULL) {
2512                                 /*
2513                                  * scan all active commands for this target,
2514                                  * looking for commands that have been issued,
2515                                  * are in transport, and are not yet complete
2516                                  * (so we can terminate them because of the
2517                                  * reset)
2518                                  */
2519                                 mutex_enter(&target->sft_pkt_mutex);
2520                                 cmd = target->sft_pkt_head;
2521                                 while (cmd != (struct sf_pkt *)&target->
2522                                     sft_pkt_head) {
2523                                         fpkt = cmd->cmd_fp_pkt;
2524                                         mutex_enter(&cmd->cmd_abort_mutex);
2525                                         if ((cmd->cmd_state ==
2526                                             SF_STATE_ISSUED) &&
2527                                             (fpkt->fcal_cmd_state &
2528                                             FCAL_CMD_IN_TRANSPORT) &&
2529                                             (!(fpkt->fcal_cmd_state &
2530                                             FCAL_CMD_COMPLETE))) {
2531                                                 /* a command to be reset */
2532                                                 pkt = cmd->cmd_pkt;
2533                                                 pkt->pkt_reason = CMD_RESET;
2534                                                 pkt->pkt_statistics |=
2535                                                     STAT_BUS_RESET;
2536                                                 cmd->cmd_state = SF_STATE_IDLE;
2537                                                 mutex_exit(&cmd->
2538                                                     cmd_abort_mutex);
2539                                                 mutex_exit(&target->
2540                                                     sft_pkt_mutex);
2541                                                 if (pkt->pkt_comp != NULL) {
2542                                                         (*pkt->pkt_comp)(pkt);
2543                                                 }
2544                                                 mutex_enter(&target->
2545                                                     sft_pkt_mutex);
2546                                                 cmd = target->sft_pkt_head;
2547                                         } else {
2548                                                 mutex_exit(&cmd->
2549                                                     cmd_abort_mutex);
2550                                                 /* get next command */
2551                                                 cmd = cmd->cmd_forw;
2552                                         }
2553                                 }
2554                                 mutex_exit(&target->sft_pkt_mutex);
2555                                 target = target->sft_next_lun;
2556                         }
2557                 }
2558 
2559                 /*
2560                  * get packet queue for this target, resetting all remaining
2561                  * commands
2562                  */
2563                 mutex_enter(&sf->sf_mutex);
2564                 cmd = sf->sf_pkt_head;
2565                 sf->sf_pkt_head = NULL;
2566                 mutex_exit(&sf->sf_mutex);
2567 
2568                 while (cmd != NULL) {
2569                         pkt = cmd->cmd_pkt;
2570                         cmd = cmd->cmd_next;
2571                         pkt->pkt_reason = CMD_RESET;
2572                         pkt->pkt_statistics |= STAT_BUS_RESET;
2573                         if (pkt->pkt_comp != NULL) {
2574                                 (*pkt->pkt_comp)(pkt);
2575                         }
2576                 }
2577                 break;
2578         }
2579 
2580         default:
2581                 break;
2582         }
2583 }
2584 
2585 
2586 /*
2587  * called to send a PLOGI (N_port login) ELS request to a destination ID,
2588  * returning TRUE upon success, else returning FALSE
2589  */
2590 static int
2591 sf_login(struct sf *sf, uchar_t els_code, uchar_t dest_id, uint_t arg1,
2592     int lip_cnt)
2593 {
2594         struct la_els_logi      *logi;
2595         struct  sf_els_hdr      *privp;
2596 
2597 
2598         if (sf_els_alloc(sf, dest_id, sizeof (struct sf_els_hdr),
2599             sizeof (union sf_els_cmd), sizeof (union sf_els_rsp),
2600             (caddr_t *)&privp, (caddr_t *)&logi) == NULL) {
2601                 sf_log(sf, CE_WARN, "Cannot allocate PLOGI for target %x "
2602                     "due to DVMA shortage.\n", sf_alpa_to_switch[dest_id]);
2603                 return (FALSE);
2604         }
2605 
2606         privp->lip_cnt = lip_cnt;
2607         if (els_code == LA_ELS_PLOGI) {
2608                 bcopy((caddr_t)sf->sf_sochandle->fcal_loginparms,
2609                     (caddr_t)&logi->common_service, sizeof (struct la_els_logi)
2610                     - 4);
2611                 bcopy((caddr_t)&sf->sf_sochandle->fcal_p_wwn,
2612                     (caddr_t)&logi->nport_ww_name, sizeof (la_wwn_t));
2613                 bcopy((caddr_t)&sf->sf_sochandle->fcal_n_wwn,
2614                     (caddr_t)&logi->node_ww_name, sizeof (la_wwn_t));
2615                 bzero((caddr_t)&logi->reserved, 16);
2616         } else if (els_code == LA_ELS_LOGO) {
2617                 bcopy((caddr_t)&sf->sf_sochandle->fcal_p_wwn,
2618                     (caddr_t)&(((struct la_els_logo *)logi)->nport_ww_name), 8);
2619                 ((struct la_els_logo    *)logi)->reserved = 0;
2620                 ((struct la_els_logo    *)logi)->nport_id[0] = 0;
2621                 ((struct la_els_logo    *)logi)->nport_id[1] = 0;
2622                 ((struct la_els_logo    *)logi)->nport_id[2] = arg1;
2623         }
2624 
2625         privp->els_code = els_code;
2626         logi->ls_code = els_code;
2627         logi->mbz[0] = 0;
2628         logi->mbz[1] = 0;
2629         logi->mbz[2] = 0;
2630 
2631         privp->timeout = sf_watchdog_time + SF_ELS_TIMEOUT;
2632         return (sf_els_transport(sf, privp));
2633 }
2634 
2635 
2636 /*
2637  * send an ELS IU via the transport,
2638  * returning TRUE upon success, else returning FALSE
2639  */
2640 static int
2641 sf_els_transport(struct sf *sf, struct sf_els_hdr *privp)
2642 {
2643         struct fcal_packet *fpkt = privp->fpkt;
2644 
2645 
2646         (void) ddi_dma_sync(privp->cmd_dma_handle, (off_t)0, (size_t)0,
2647             DDI_DMA_SYNC_FORDEV);
2648         privp->prev = NULL;
2649         mutex_enter(&sf->sf_mutex);
2650         privp->next = sf->sf_els_list;
2651         if (sf->sf_els_list != NULL) {
2652                 sf->sf_els_list->prev = privp;
2653         }
2654         sf->sf_els_list = privp;
2655         mutex_exit(&sf->sf_mutex);
2656 
2657         /* call the transport to send a packet */
2658         if (soc_transport(sf->sf_sochandle, fpkt, FCAL_NOSLEEP,
2659             CQ_REQUEST_1) != FCAL_TRANSPORT_SUCCESS) {
2660                 mutex_enter(&sf->sf_mutex);
2661                 if (privp->prev != NULL) {
2662                         privp->prev->next = privp->next;
2663                 }
2664                 if (privp->next != NULL) {
2665                         privp->next->prev = privp->prev;
2666                 }
2667                 if (sf->sf_els_list == privp) {
2668                         sf->sf_els_list = privp->next;
2669                 }
2670                 mutex_exit(&sf->sf_mutex);
2671                 sf_els_free(fpkt);
2672                 return (FALSE);                 /* failure */
2673         }
2674         return (TRUE);                          /* success */
2675 }
2676 
2677 
2678 /*
2679  * called as the pkt_comp routine for ELS FC packets
2680  */
2681 static void
2682 sf_els_callback(struct fcal_packet *fpkt)
2683 {
2684         struct sf_els_hdr *privp = fpkt->fcal_pkt_private;
2685         struct sf *sf = privp->sf;
2686         struct sf *tsf;
2687         int tgt_id;
2688         struct la_els_logi *ptr = (struct la_els_logi *)privp->rsp;
2689         struct la_els_adisc *adisc = (struct la_els_adisc *)ptr;
2690         struct  sf_target *target;
2691         short   ncmds;
2692         short   free_pkt = TRUE;
2693 
2694 
2695         /*
2696          * we've received an ELS callback, i.e. an ELS packet has arrived
2697          */
2698 
2699         /* take the current packet off of the queue */
2700         mutex_enter(&sf->sf_mutex);
2701         if (privp->timeout == SF_INVALID_TIMEOUT) {
2702                 mutex_exit(&sf->sf_mutex);
2703                 return;
2704         }
2705         if (privp->prev != NULL) {
2706                 privp->prev->next = privp->next;
2707         }
2708         if (privp->next != NULL) {
2709                 privp->next->prev = privp->prev;
2710         }
2711         if (sf->sf_els_list == privp) {
2712                 sf->sf_els_list = privp->next;
2713         }
2714         privp->prev = privp->next = NULL;
2715         mutex_exit(&sf->sf_mutex);
2716 
2717         /* get # pkts in this callback */
2718         ncmds = fpkt->fcal_ncmds;
2719         ASSERT(ncmds >= 0);
2720         mutex_enter(&sf->sf_cmd_mutex);
2721         sf->sf_ncmds = ncmds;
2722         mutex_exit(&sf->sf_cmd_mutex);
2723 
2724         /* sync idea of memory */
2725         (void) ddi_dma_sync(privp->rsp_dma_handle, (off_t)0, (size_t)0,
2726             DDI_DMA_SYNC_FORKERNEL);
2727 
2728         /* was this an OK ACC msg ?? */
2729         if ((fpkt->fcal_pkt_status == FCAL_STATUS_OK) &&
2730             (ptr->ls_code == LA_ELS_ACC)) {
2731 
2732                 /*
2733                  * this was an OK ACC pkt
2734                  */
2735 
2736                 switch (privp->els_code) {
2737                 case LA_ELS_PLOGI:
2738                         /*
2739                          * was able to to an N_port login
2740                          */
2741                         SF_DEBUG(2, (sf, CE_CONT,
2742                             "!PLOGI to al_pa %x succeeded, wwn %x%x\n",
2743                             privp->dest_nport_id,
2744                             *((int *)&ptr->nport_ww_name.raw_wwn[0]),
2745                             *((int *)&ptr->nport_ww_name.raw_wwn[4])));
2746                         /* try to do a process login */
2747                         if (!sf_do_prli(sf, privp, ptr)) {
2748                                 free_pkt = FALSE;
2749                                 goto fail;      /* PRLI failed */
2750                         }
2751                         break;
2752                 case LA_ELS_PRLI:
2753                         /*
2754                          * was able to do a process login
2755                          */
2756                         SF_DEBUG(2, (sf, CE_CONT,
2757                             "!PRLI to al_pa %x succeeded\n",
2758                             privp->dest_nport_id));
2759                         /* try to do address discovery */
2760                         if (sf_do_adisc(sf, privp) != 1) {
2761                                 free_pkt = FALSE;
2762                                 goto fail;      /* ADISC failed */
2763                         }
2764                         break;
2765                 case LA_ELS_ADISC:
2766                         /*
2767                          * found a target via ADISC
2768                          */
2769 
2770                         SF_DEBUG(2, (sf, CE_CONT,
2771                             "!ADISC to al_pa %x succeeded\n",
2772                             privp->dest_nport_id));
2773 
2774                         /* create the target info */
2775                         if ((target = sf_create_target(sf, privp,
2776                             sf_alpa_to_switch[(uchar_t)adisc->hard_address],
2777                             (int64_t)0))
2778                             == NULL) {
2779                                 goto fail;      /* can't create target */
2780                         }
2781 
2782                         /*
2783                          * ensure address discovered matches what we thought
2784                          * it would be
2785                          */
2786                         if ((uchar_t)adisc->hard_address !=
2787                             privp->dest_nport_id) {
2788                                 sf_log(sf, CE_WARN,
2789                                     "target 0x%x, AL-PA 0x%x and "
2790                                     "hard address 0x%x don't match\n",
2791                                     sf_alpa_to_switch[
2792                                     (uchar_t)privp->dest_nport_id],
2793                                     privp->dest_nport_id,
2794                                     (uchar_t)adisc->hard_address);
2795                                 mutex_enter(&sf->sf_mutex);
2796                                 sf_offline_target(sf, target);
2797                                 mutex_exit(&sf->sf_mutex);
2798                                 goto fail;      /* addr doesn't match */
2799                         }
2800                         /*
2801                          * get inquiry data from the target
2802                          */
2803                         if (!sf_do_reportlun(sf, privp, target)) {
2804                                 mutex_enter(&sf->sf_mutex);
2805                                 sf_offline_target(sf, target);
2806                                 mutex_exit(&sf->sf_mutex);
2807                                 free_pkt = FALSE;
2808                                 goto fail;      /* inquiry failed */
2809                         }
2810                         break;
2811                 default:
2812                         SF_DEBUG(2, (sf, CE_CONT,
2813                             "!ELS %x to al_pa %x succeeded\n",
2814                             privp->els_code, privp->dest_nport_id));
2815                         sf_els_free(fpkt);
2816                         break;
2817                 }
2818 
2819         } else {
2820 
2821                 /*
2822                  * oh oh -- this was not an OK ACC packet
2823                  */
2824 
2825                 /* get target ID from dest loop address */
2826                 tgt_id = sf_alpa_to_switch[(uchar_t)privp->dest_nport_id];
2827 
2828                 /* keep track of failures */
2829                 sf->sf_stats.tstats[tgt_id].els_failures++;
2830                 if (++(privp->retries) < sf_els_retries &&
2831                     fpkt->fcal_pkt_status != FCAL_STATUS_OPEN_FAIL) {
2832                         if (fpkt->fcal_pkt_status ==
2833                             FCAL_STATUS_MAX_XCHG_EXCEEDED)  {
2834                                 tsf = sf->sf_sibling;
2835                                 if (tsf != NULL) {
2836                                         mutex_enter(&tsf->sf_cmd_mutex);
2837                                         tsf->sf_flag = 1;
2838                                         tsf->sf_throttle = SF_DECR_DELTA;
2839                                         mutex_exit(&tsf->sf_cmd_mutex);
2840                                 }
2841                         }
2842                         privp->timeout = sf_watchdog_time + SF_ELS_TIMEOUT;
2843                         privp->prev = NULL;
2844 
2845                         mutex_enter(&sf->sf_mutex);
2846 
2847                         if (privp->lip_cnt == sf->sf_lip_cnt) {
2848                                 SF_DEBUG(1, (sf, CE_WARN,
2849                                     "!ELS %x to al_pa %x failed, retrying",
2850                                     privp->els_code, privp->dest_nport_id));
2851                                 privp->next = sf->sf_els_list;
2852                                 if (sf->sf_els_list != NULL) {
2853                                         sf->sf_els_list->prev = privp;
2854                                 }
2855 
2856                                 sf->sf_els_list = privp;
2857 
2858                                 mutex_exit(&sf->sf_mutex);
2859                                 /* device busy?  wait a bit ... */
2860                                 if (fpkt->fcal_pkt_status ==
2861                                     FCAL_STATUS_MAX_XCHG_EXCEEDED)  {
2862                                         privp->delayed_retry = 1;
2863                                         return;
2864                                 }
2865                                 /* call the transport to send a pkt */
2866                                 if (soc_transport(sf->sf_sochandle, fpkt,
2867                                     FCAL_NOSLEEP, CQ_REQUEST_1) !=
2868                                     FCAL_TRANSPORT_SUCCESS) {
2869                                         mutex_enter(&sf->sf_mutex);
2870                                         if (privp->prev != NULL) {
2871                                                 privp->prev->next =
2872                                                     privp->next;
2873                                         }
2874                                         if (privp->next != NULL) {
2875                                                 privp->next->prev =
2876                                                     privp->prev;
2877                                         }
2878                                         if (sf->sf_els_list == privp) {
2879                                                 sf->sf_els_list = privp->next;
2880                                         }
2881                                         mutex_exit(&sf->sf_mutex);
2882                                         goto fail;
2883                                 } else
2884                                         return;
2885                         } else {
2886                                 mutex_exit(&sf->sf_mutex);
2887                                 goto fail;
2888                         }
2889                 } else {
2890 #ifdef  DEBUG
2891                         if (fpkt->fcal_pkt_status != 0x36 || sfdebug > 4) {
2892                         SF_DEBUG(2, (sf, CE_NOTE, "ELS %x to al_pa %x failed",
2893                             privp->els_code, privp->dest_nport_id));
2894                         if (fpkt->fcal_pkt_status == FCAL_STATUS_OK) {
2895                                 SF_DEBUG(2, (sf, CE_NOTE,
2896                                     "els reply code = %x", ptr->ls_code));
2897                                 if (ptr->ls_code == LA_ELS_RJT)
2898                                         SF_DEBUG(1, (sf, CE_CONT,
2899                                             "LS_RJT reason = %x\n",
2900                                             *(((uint_t *)ptr) + 1)));
2901                         } else
2902                                 SF_DEBUG(2, (sf, CE_NOTE,
2903                                     "fc packet status = %x",
2904                                     fpkt->fcal_pkt_status));
2905                         }
2906 #endif
2907                         goto fail;
2908                 }
2909         }
2910         return;                                 /* success */
2911 fail:
2912         mutex_enter(&sf->sf_mutex);
2913         if (sf->sf_lip_cnt == privp->lip_cnt) {
2914                 sf->sf_device_count--;
2915                 ASSERT(sf->sf_device_count >= 0);
2916                 if (sf->sf_device_count == 0) {
2917                         sf_finish_init(sf, privp->lip_cnt);
2918                 }
2919         }
2920         mutex_exit(&sf->sf_mutex);
2921         if (free_pkt) {
2922                 sf_els_free(fpkt);
2923         }
2924 }
2925 
2926 
2927 /*
2928  * send a PRLI (process login) ELS IU via the transport,
2929  * returning TRUE upon success, else returning FALSE
2930  */
2931 static int
2932 sf_do_prli(struct sf *sf, struct sf_els_hdr *privp, struct la_els_logi *ptr)
2933 {
2934         struct la_els_prli      *prli = (struct la_els_prli *)privp->cmd;
2935         struct fcp_prli         *fprli;
2936         struct  fcal_packet     *fpkt = privp->fpkt;
2937 
2938 
2939         fpkt->fcal_socal_request.sr_dataseg[0].fc_count =
2940             sizeof (struct la_els_prli);
2941         privp->els_code = LA_ELS_PRLI;
2942         fprli = (struct fcp_prli *)prli->service_params;
2943         prli->ls_code = LA_ELS_PRLI;
2944         prli->page_length = 0x10;
2945         prli->payload_length = sizeof (struct la_els_prli);
2946         fprli->type = 0x08;                  /* no define here? */
2947         fprli->resvd1 = 0;
2948         fprli->orig_process_assoc_valid = 0;
2949         fprli->resp_process_assoc_valid = 0;
2950         fprli->establish_image_pair = 1;
2951         fprli->resvd2 = 0;
2952         fprli->resvd3 = 0;
2953         fprli->data_overlay_allowed = 0;
2954         fprli->initiator_fn = 1;
2955         fprli->target_fn = 0;
2956         fprli->cmd_data_mixed = 0;
2957         fprli->data_resp_mixed = 0;
2958         fprli->read_xfer_rdy_disabled = 1;
2959         fprli->write_xfer_rdy_disabled = 0;
2960 
2961         bcopy((caddr_t)&ptr->nport_ww_name, (caddr_t)&privp->port_wwn,
2962             sizeof (privp->port_wwn));
2963         bcopy((caddr_t)&ptr->node_ww_name, (caddr_t)&privp->node_wwn,
2964             sizeof (privp->node_wwn));
2965 
2966         privp->timeout = sf_watchdog_time + SF_ELS_TIMEOUT;
2967         return (sf_els_transport(sf, privp));
2968 }
2969 
2970 
2971 /*
2972  * send an ADISC (address discovery) ELS IU via the transport,
2973  * returning TRUE upon success, else returning FALSE
2974  */
2975 static int
2976 sf_do_adisc(struct sf *sf, struct sf_els_hdr *privp)
2977 {
2978         struct la_els_adisc     *adisc = (struct la_els_adisc *)privp->cmd;
2979         struct  fcal_packet     *fpkt = privp->fpkt;
2980 
2981         privp->els_code = LA_ELS_ADISC;
2982         adisc->ls_code = LA_ELS_ADISC;
2983         adisc->mbz[0] = 0;
2984         adisc->mbz[1] = 0;
2985         adisc->mbz[2] = 0;
2986         adisc->hard_address = 0; /* ??? */
2987         fpkt->fcal_socal_request.sr_dataseg[0].fc_count =
2988             sizeof (struct la_els_adisc);
2989         bcopy((caddr_t)&sf->sf_sochandle->fcal_p_wwn,
2990             (caddr_t)&adisc->port_wwn, sizeof (adisc->port_wwn));
2991         bcopy((caddr_t)&sf->sf_sochandle->fcal_n_wwn,
2992             (caddr_t)&adisc->node_wwn, sizeof (adisc->node_wwn));
2993         adisc->nport_id = sf->sf_al_pa;
2994 
2995         privp->timeout = sf_watchdog_time + SF_ELS_TIMEOUT;
2996         return (sf_els_transport(sf, privp));
2997 }
2998 
2999 
3000 static struct fcal_packet *
3001 sf_els_alloc(struct sf *sf, uchar_t dest_id, int priv_size, int cmd_size,
3002     int rsp_size, caddr_t *rprivp, caddr_t *cmd_buf)
3003 {
3004         struct  fcal_packet     *fpkt;
3005         ddi_dma_cookie_t        pcookie;
3006         ddi_dma_cookie_t        rcookie;
3007         struct  sf_els_hdr      *privp;
3008         ddi_dma_handle_t        cmd_dma_handle = NULL;
3009         ddi_dma_handle_t        rsp_dma_handle = NULL;
3010         ddi_acc_handle_t        cmd_acc_handle = NULL;
3011         ddi_acc_handle_t        rsp_acc_handle = NULL;
3012         size_t                  real_size;
3013         uint_t                  ccount;
3014         fc_frame_header_t       *hp;
3015         int                     cmd_bound = FALSE, rsp_bound = FALSE;
3016         caddr_t                 cmd = NULL;
3017         caddr_t                 rsp = NULL;
3018 
3019         if ((fpkt = (struct fcal_packet *)kmem_zalloc(
3020             sizeof (struct fcal_packet), KM_NOSLEEP)) == NULL) {
3021                 SF_DEBUG(1, (sf, CE_WARN,
3022                         "Could not allocate fcal_packet for ELS\n"));
3023                 return (NULL);
3024         }
3025 
3026         if ((privp = (struct sf_els_hdr *)kmem_zalloc(priv_size,
3027             KM_NOSLEEP)) == NULL) {
3028                 SF_DEBUG(1, (sf, CE_WARN,
3029                     "Could not allocate sf_els_hdr for ELS\n"));
3030                 goto fail;
3031         }
3032 
3033         privp->size = priv_size;
3034         fpkt->fcal_pkt_private = (caddr_t)privp;
3035 
3036         if (ddi_dma_alloc_handle(sf->sf_dip, sf->sf_sochandle->fcal_dmaattr,
3037             DDI_DMA_DONTWAIT, NULL, &cmd_dma_handle) != DDI_SUCCESS) {
3038                 SF_DEBUG(1, (sf, CE_WARN,
3039                     "Could not allocate DMA handle for ELS\n"));
3040                 goto fail;
3041         }
3042 
3043         if (ddi_dma_mem_alloc(cmd_dma_handle, cmd_size,
3044             sf->sf_sochandle->fcal_accattr, DDI_DMA_CONSISTENT,
3045             DDI_DMA_DONTWAIT, NULL, &cmd,
3046             &real_size, &cmd_acc_handle) != DDI_SUCCESS) {
3047                 SF_DEBUG(1, (sf, CE_WARN,
3048                     "Could not allocate DMA memory for ELS\n"));
3049                 goto fail;
3050         }
3051 
3052         if (real_size < cmd_size) {
3053                 SF_DEBUG(1, (sf, CE_WARN,
3054                     "DMA memory too small for ELS\n"));
3055                 goto fail;
3056         }
3057 
3058         if (ddi_dma_addr_bind_handle(cmd_dma_handle, NULL,
3059             cmd, real_size, DDI_DMA_WRITE | DDI_DMA_CONSISTENT,
3060             DDI_DMA_DONTWAIT, NULL, &pcookie, &ccount) != DDI_DMA_MAPPED) {
3061                 SF_DEBUG(1, (sf, CE_WARN,
3062                     "Could not bind DMA memory for ELS\n"));
3063                 goto fail;
3064         }
3065         cmd_bound = TRUE;
3066 
3067         if (ccount != 1) {
3068                 SF_DEBUG(1, (sf, CE_WARN,
3069                     "Wrong cookie count for ELS\n"));
3070                 goto fail;
3071         }
3072 
3073         if (ddi_dma_alloc_handle(sf->sf_dip, sf->sf_sochandle->fcal_dmaattr,
3074             DDI_DMA_DONTWAIT, NULL, &rsp_dma_handle) != DDI_SUCCESS) {
3075                 SF_DEBUG(1, (sf, CE_WARN,
3076                     "Could not allocate DMA handle for ELS rsp\n"));
3077                 goto fail;
3078         }
3079         if (ddi_dma_mem_alloc(rsp_dma_handle, rsp_size,
3080             sf->sf_sochandle->fcal_accattr, DDI_DMA_CONSISTENT,
3081             DDI_DMA_DONTWAIT, NULL, &rsp,
3082             &real_size, &rsp_acc_handle) != DDI_SUCCESS) {
3083                 SF_DEBUG(1, (sf, CE_WARN,
3084                     "Could not allocate DMA memory for ELS rsp\n"));
3085                 goto fail;
3086         }
3087 
3088         if (real_size < rsp_size) {
3089                 SF_DEBUG(1, (sf, CE_WARN,
3090                     "DMA memory too small for ELS rsp\n"));
3091                 goto fail;
3092         }
3093 
3094         if (ddi_dma_addr_bind_handle(rsp_dma_handle, NULL,
3095             rsp, real_size, DDI_DMA_READ | DDI_DMA_CONSISTENT,
3096             DDI_DMA_DONTWAIT, NULL, &rcookie, &ccount) != DDI_DMA_MAPPED) {
3097                 SF_DEBUG(1, (sf, CE_WARN,
3098                     "Could not bind DMA memory for ELS rsp\n"));
3099                 goto fail;
3100         }
3101         rsp_bound = TRUE;
3102 
3103         if (ccount != 1) {
3104                 SF_DEBUG(1, (sf, CE_WARN,
3105                     "Wrong cookie count for ELS rsp\n"));
3106                 goto fail;
3107         }
3108 
3109         privp->cmd = cmd;
3110         privp->sf = sf;
3111         privp->cmd_dma_handle = cmd_dma_handle;
3112         privp->cmd_acc_handle = cmd_acc_handle;
3113         privp->rsp = rsp;
3114         privp->rsp_dma_handle = rsp_dma_handle;
3115         privp->rsp_acc_handle = rsp_acc_handle;
3116         privp->dest_nport_id = dest_id;
3117         privp->fpkt = fpkt;
3118 
3119         fpkt->fcal_pkt_cookie = sf->sf_socp;
3120         fpkt->fcal_pkt_comp = sf_els_callback;
3121         fpkt->fcal_magic = FCALP_MAGIC;
3122         fpkt->fcal_pkt_flags = 0;
3123         fpkt->fcal_socal_request.sr_soc_hdr.sh_flags =
3124             (ushort_t)(SOC_FC_HEADER | sf->sf_sochandle->fcal_portno);
3125         fpkt->fcal_socal_request.sr_soc_hdr.sh_class = 3;
3126         fpkt->fcal_socal_request.sr_soc_hdr.sh_seg_cnt = 2;
3127         fpkt->fcal_socal_request.sr_soc_hdr.sh_byte_cnt = cmd_size;
3128         fpkt->fcal_socal_request.sr_cqhdr.cq_hdr_count = 1;
3129         fpkt->fcal_socal_request.sr_cqhdr.cq_hdr_flags = 0;
3130         fpkt->fcal_socal_request.sr_cqhdr.cq_hdr_seqno = 0;
3131         fpkt->fcal_socal_request.sr_cqhdr.cq_hdr_type = CQ_TYPE_SIMPLE;
3132         fpkt->fcal_socal_request.sr_dataseg[0].fc_base = (uint32_t)
3133             pcookie.dmac_address;
3134         fpkt->fcal_socal_request.sr_dataseg[0].fc_count = cmd_size;
3135         fpkt->fcal_socal_request.sr_dataseg[1].fc_base = (uint32_t)
3136             rcookie.dmac_address;
3137         fpkt->fcal_socal_request.sr_dataseg[1].fc_count = rsp_size;
3138 
3139         /* Fill in the Fabric Channel Header */
3140         hp = &fpkt->fcal_socal_request.sr_fc_frame_hdr;
3141         hp->r_ctl = R_CTL_ELS_REQ;
3142         hp->d_id = dest_id;
3143         hp->s_id = sf->sf_al_pa;
3144         hp->type = TYPE_EXTENDED_LS;
3145         hp->reserved1 = 0;
3146         hp->f_ctl = F_CTL_SEQ_INITIATIVE | F_CTL_FIRST_SEQ;
3147         hp->seq_id = 0;
3148         hp->df_ctl  = 0;
3149         hp->seq_cnt = 0;
3150         hp->ox_id = 0xffff;
3151         hp->rx_id = 0xffff;
3152         hp->ro = 0;
3153 
3154         *rprivp = (caddr_t)privp;
3155         *cmd_buf = cmd;
3156         return (fpkt);
3157 
3158 fail:
3159         if (cmd_dma_handle != NULL) {
3160                 if (cmd_bound) {
3161                         (void) ddi_dma_unbind_handle(cmd_dma_handle);
3162                 }
3163                 ddi_dma_free_handle(&cmd_dma_handle);
3164                 privp->cmd_dma_handle = NULL;
3165         }
3166         if (rsp_dma_handle != NULL) {
3167                 if (rsp_bound) {
3168                         (void) ddi_dma_unbind_handle(rsp_dma_handle);
3169                 }
3170                 ddi_dma_free_handle(&rsp_dma_handle);
3171                 privp->rsp_dma_handle = NULL;
3172         }
3173         sf_els_free(fpkt);
3174         return (NULL);
3175 }
3176 
3177 
3178 static void
3179 sf_els_free(struct fcal_packet *fpkt)
3180 {
3181         struct  sf_els_hdr      *privp = fpkt->fcal_pkt_private;
3182 
3183         if (privp != NULL) {
3184                 if (privp->cmd_dma_handle != NULL) {
3185                         (void) ddi_dma_unbind_handle(privp->cmd_dma_handle);
3186                         ddi_dma_free_handle(&privp->cmd_dma_handle);
3187                 }
3188                 if (privp->cmd != NULL) {
3189                         ddi_dma_mem_free(&privp->cmd_acc_handle);
3190                 }
3191 
3192                 if (privp->rsp_dma_handle != NULL) {
3193                         (void) ddi_dma_unbind_handle(privp->rsp_dma_handle);
3194                         ddi_dma_free_handle(&privp->rsp_dma_handle);
3195                 }
3196 
3197                 if (privp->rsp != NULL) {
3198                         ddi_dma_mem_free(&privp->rsp_acc_handle);
3199                 }
3200                 if (privp->data_dma_handle) {
3201                         (void) ddi_dma_unbind_handle(privp->data_dma_handle);
3202                         ddi_dma_free_handle(&privp->data_dma_handle);
3203                 }
3204                 if (privp->data_buf) {
3205                         ddi_dma_mem_free(&privp->data_acc_handle);
3206                 }
3207                 kmem_free(privp, privp->size);
3208         }
3209         kmem_free(fpkt, sizeof (struct fcal_packet));
3210 }
3211 
3212 
3213 static struct sf_target *
3214 sf_create_target(struct sf *sf, struct sf_els_hdr *privp, int tnum, int64_t lun)
3215 {
3216         struct sf_target *target, *ntarget, *otarget, *ptarget;
3217         int hash;
3218 #ifdef RAID_LUNS
3219         int64_t orig_lun = lun;
3220 
3221         /* XXXX Work around SCSA limitations. */
3222         lun = *((short *)&lun);
3223 #endif
3224         ntarget = kmem_zalloc(sizeof (struct sf_target), KM_NOSLEEP);
3225         mutex_enter(&sf->sf_mutex);
3226         if (sf->sf_lip_cnt != privp->lip_cnt) {
3227                 mutex_exit(&sf->sf_mutex);
3228                 if (ntarget != NULL)
3229                         kmem_free(ntarget, sizeof (struct sf_target));
3230                 return (NULL);
3231         }
3232 
3233         target = sf_lookup_target(sf, privp->port_wwn, lun);
3234         if (lun != 0) {
3235                 /*
3236                  * Since LUNs != 0 are queued up after LUN == 0, find LUN == 0
3237                  * and enqueue the new LUN.
3238                  */
3239                 if ((ptarget = sf_lookup_target(sf, privp->port_wwn,
3240                     (int64_t)0)) ==     NULL) {
3241                         /*
3242                          * Yeep -- no LUN 0?
3243                          */
3244                         mutex_exit(&sf->sf_mutex);
3245                         sf_log(sf, CE_WARN, "target 0x%x "
3246                             "lun %" PRIx64 ": No LUN 0\n", tnum, lun);
3247                         if (ntarget != NULL)
3248                                 kmem_free(ntarget, sizeof (struct sf_target));
3249                         return (NULL);
3250                 }
3251                 mutex_enter(&ptarget->sft_mutex);
3252                 if (target != NULL && ptarget->sft_lip_cnt == sf->sf_lip_cnt &&
3253                     ptarget->sft_state&SF_TARGET_OFFLINE) {
3254                         /* LUN 0 already finished, duplicate its state */
3255                         mutex_exit(&ptarget->sft_mutex);
3256                         sf_offline_target(sf, target);
3257                         mutex_exit(&sf->sf_mutex);
3258                         if (ntarget != NULL)
3259                                 kmem_free(ntarget, sizeof (struct sf_target));
3260                         return (target);
3261                 } else if (target != NULL) {
3262                         /*
3263                          * LUN 0 online or not examined yet.
3264                          * Try to bring the LUN back online
3265                          */
3266                         mutex_exit(&ptarget->sft_mutex);
3267                         mutex_enter(&target->sft_mutex);
3268                         target->sft_lip_cnt = privp->lip_cnt;
3269                         target->sft_state |= SF_TARGET_BUSY;
3270                         target->sft_state &= ~(SF_TARGET_OFFLINE|
3271                             SF_TARGET_MARK);
3272                         target->sft_al_pa = (uchar_t)privp->dest_nport_id;
3273                         target->sft_hard_address = sf_switch_to_alpa[tnum];
3274                         mutex_exit(&target->sft_mutex);
3275                         mutex_exit(&sf->sf_mutex);
3276                         if (ntarget != NULL)
3277                                 kmem_free(ntarget, sizeof (struct sf_target));
3278                         return (target);
3279                 }
3280                 mutex_exit(&ptarget->sft_mutex);
3281                 if (ntarget == NULL) {
3282                         mutex_exit(&sf->sf_mutex);
3283                         return (NULL);
3284                 }
3285                 /* Initialize new target structure */
3286                 bcopy((caddr_t)&privp->node_wwn,
3287                     (caddr_t)&ntarget->sft_node_wwn, sizeof (privp->node_wwn));
3288                 bcopy((caddr_t)&privp->port_wwn,
3289                     (caddr_t)&ntarget->sft_port_wwn, sizeof (privp->port_wwn));
3290                 ntarget->sft_lun.l = lun;
3291 #ifdef RAID_LUNS
3292                 ntarget->sft_lun.l = orig_lun;
3293                 ntarget->sft_raid_lun = (uint_t)lun;
3294 #endif
3295                 mutex_init(&ntarget->sft_mutex, NULL, MUTEX_DRIVER, NULL);
3296                 mutex_init(&ntarget->sft_pkt_mutex, NULL, MUTEX_DRIVER, NULL);
3297                 /* Don't let anyone use this till we finishup init. */
3298                 mutex_enter(&ntarget->sft_mutex);
3299                 mutex_enter(&ntarget->sft_pkt_mutex);
3300 
3301                 hash = SF_HASH(privp->port_wwn, lun);
3302                 ntarget->sft_next = sf->sf_wwn_lists[hash];
3303                 sf->sf_wwn_lists[hash] = ntarget;
3304 
3305                 ntarget->sft_lip_cnt = privp->lip_cnt;
3306                 ntarget->sft_al_pa = (uchar_t)privp->dest_nport_id;
3307                 ntarget->sft_hard_address = sf_switch_to_alpa[tnum];
3308                 ntarget->sft_device_type = DTYPE_UNKNOWN;
3309                 ntarget->sft_state = SF_TARGET_BUSY;
3310                 ntarget->sft_pkt_head = (struct sf_pkt *)&ntarget->
3311                     sft_pkt_head;
3312                 ntarget->sft_pkt_tail = (struct sf_pkt *)&ntarget->
3313                     sft_pkt_head;
3314 
3315                 mutex_enter(&ptarget->sft_mutex);
3316                 /* Traverse the list looking for this target */
3317                 for (target = ptarget; target->sft_next_lun;
3318                     target = target->sft_next_lun) {
3319                         otarget = target->sft_next_lun;
3320                 }
3321                 ntarget->sft_next_lun = target->sft_next_lun;
3322                 target->sft_next_lun = ntarget;
3323                 mutex_exit(&ptarget->sft_mutex);
3324                 mutex_exit(&ntarget->sft_pkt_mutex);
3325                 mutex_exit(&ntarget->sft_mutex);
3326                 mutex_exit(&sf->sf_mutex);
3327                 return (ntarget);
3328 
3329         }
3330         if (target != NULL && target->sft_lip_cnt == sf->sf_lip_cnt) {
3331                 /* It's been touched this LIP -- duplicate WWNs */
3332                 sf_offline_target(sf, target); /* And all the baby targets */
3333                 mutex_exit(&sf->sf_mutex);
3334                 sf_log(sf, CE_WARN, "target 0x%x, duplicate port wwns\n",
3335                     tnum);
3336                 if (ntarget != NULL) {
3337                         kmem_free(ntarget, sizeof (struct sf_target));
3338                 }
3339                 return (NULL);
3340         }
3341 
3342         if ((otarget = sf->sf_targets[tnum]) != NULL) {
3343                 /* Someone else is in our slot */
3344                 mutex_enter(&otarget->sft_mutex);
3345                 if (otarget->sft_lip_cnt == sf->sf_lip_cnt) {
3346                         mutex_exit(&otarget->sft_mutex);
3347                         sf_offline_target(sf, otarget);
3348                         if (target != NULL)
3349                                 sf_offline_target(sf, target);
3350                         mutex_exit(&sf->sf_mutex);
3351                         sf_log(sf, CE_WARN,
3352                             "target 0x%x, duplicate switch settings\n", tnum);
3353                         if (ntarget != NULL)
3354                                 kmem_free(ntarget, sizeof (struct sf_target));
3355                         return (NULL);
3356                 }
3357                 mutex_exit(&otarget->sft_mutex);
3358                 if (bcmp((caddr_t)&privp->port_wwn, (caddr_t)&otarget->
3359                     sft_port_wwn, sizeof (privp->port_wwn))) {
3360                         sf_offline_target(sf, otarget);
3361                         mutex_exit(&sf->sf_mutex);
3362                         sf_log(sf, CE_WARN, "wwn changed on target 0x%x\n",
3363                             tnum);
3364                         bzero((caddr_t)&sf->sf_stats.tstats[tnum],
3365                             sizeof (struct sf_target_stats));
3366                         mutex_enter(&sf->sf_mutex);
3367                 }
3368         }
3369 
3370         sf->sf_targets[tnum] = target;
3371         if ((target = sf->sf_targets[tnum]) == NULL) {
3372                 if (ntarget == NULL) {
3373                         mutex_exit(&sf->sf_mutex);
3374                         return (NULL);
3375                 }
3376                 bcopy((caddr_t)&privp->node_wwn,
3377                     (caddr_t)&ntarget->sft_node_wwn, sizeof (privp->node_wwn));
3378                 bcopy((caddr_t)&privp->port_wwn,
3379                     (caddr_t)&ntarget->sft_port_wwn, sizeof (privp->port_wwn));
3380                 ntarget->sft_lun.l = lun;
3381 #ifdef RAID_LUNS
3382                 ntarget->sft_lun.l = orig_lun;
3383                 ntarget->sft_raid_lun = (uint_t)lun;
3384 #endif
3385                 mutex_init(&ntarget->sft_mutex, NULL, MUTEX_DRIVER, NULL);
3386                 mutex_init(&ntarget->sft_pkt_mutex, NULL, MUTEX_DRIVER, NULL);
3387                 mutex_enter(&ntarget->sft_mutex);
3388                 mutex_enter(&ntarget->sft_pkt_mutex);
3389                 hash = SF_HASH(privp->port_wwn, lun); /* lun 0 */
3390                 ntarget->sft_next = sf->sf_wwn_lists[hash];
3391                 sf->sf_wwn_lists[hash] = ntarget;
3392 
3393                 target = ntarget;
3394                 target->sft_lip_cnt = privp->lip_cnt;
3395                 target->sft_al_pa = (uchar_t)privp->dest_nport_id;
3396                 target->sft_hard_address = sf_switch_to_alpa[tnum];
3397                 target->sft_device_type = DTYPE_UNKNOWN;
3398                 target->sft_state = SF_TARGET_BUSY;
3399                 target->sft_pkt_head = (struct sf_pkt *)&target->
3400                     sft_pkt_head;
3401                 target->sft_pkt_tail = (struct sf_pkt *)&target->
3402                     sft_pkt_head;
3403                 sf->sf_targets[tnum] = target;
3404                 mutex_exit(&ntarget->sft_mutex);
3405                 mutex_exit(&ntarget->sft_pkt_mutex);
3406                 mutex_exit(&sf->sf_mutex);
3407         } else {
3408                 mutex_enter(&target->sft_mutex);
3409                 target->sft_lip_cnt = privp->lip_cnt;
3410                 target->sft_state |= SF_TARGET_BUSY;
3411                 target->sft_state &= ~(SF_TARGET_OFFLINE|SF_TARGET_MARK);
3412                 target->sft_al_pa = (uchar_t)privp->dest_nport_id;
3413                 target->sft_hard_address = sf_switch_to_alpa[tnum];
3414                 mutex_exit(&target->sft_mutex);
3415                 mutex_exit(&sf->sf_mutex);
3416                 if (ntarget != NULL)
3417                         kmem_free(ntarget, sizeof (struct sf_target));
3418         }
3419         return (target);
3420 }
3421 
3422 
3423 /*
3424  * find the target for a given sf instance
3425  */
3426 /* ARGSUSED */
3427 static struct sf_target *
3428 #ifdef RAID_LUNS
3429 sf_lookup_target(struct sf *sf, uchar_t *wwn, int lun)
3430 #else
3431 sf_lookup_target(struct sf *sf, uchar_t *wwn, int64_t lun)
3432 #endif
3433 {
3434         int hash;
3435         struct sf_target *target;
3436 
3437         ASSERT(mutex_owned(&sf->sf_mutex));
3438         hash = SF_HASH(wwn, lun);
3439 
3440         target = sf->sf_wwn_lists[hash];
3441         while (target != NULL) {
3442 
3443 #ifndef RAID_LUNS
3444                 if (bcmp((caddr_t)wwn, (caddr_t)&target->sft_port_wwn,
3445                     sizeof (target->sft_port_wwn)) == 0 &&
3446                         target->sft_lun.l == lun)
3447                         break;
3448 #else
3449                 if (bcmp((caddr_t)wwn, (caddr_t)&target->sft_port_wwn,
3450                     sizeof (target->sft_port_wwn)) == 0 &&
3451                         target->sft_raid_lun == lun)
3452                         break;
3453 #endif
3454                 target = target->sft_next;
3455         }
3456 
3457         return (target);
3458 }
3459 
3460 
3461 /*
3462  * Send out a REPORT_LUNS command.
3463  */
3464 static int
3465 sf_do_reportlun(struct sf *sf, struct sf_els_hdr *privp,
3466     struct sf_target *target)
3467 {
3468         struct  fcal_packet     *fpkt = privp->fpkt;
3469         ddi_dma_cookie_t        pcookie;
3470         ddi_dma_handle_t        lun_dma_handle = NULL;
3471         ddi_acc_handle_t        lun_acc_handle;
3472         uint_t                  ccount;
3473         size_t                  real_size;
3474         caddr_t                 lun_buf = NULL;
3475         int                     handle_bound = 0;
3476         fc_frame_header_t       *hp = &fpkt->fcal_socal_request.sr_fc_frame_hdr;
3477         struct fcp_cmd          *reportlun = (struct fcp_cmd *)privp->cmd;
3478         char                    *msg = "Transport";
3479 
3480         if (ddi_dma_alloc_handle(sf->sf_dip, sf->sf_sochandle->fcal_dmaattr,
3481             DDI_DMA_DONTWAIT, NULL, &lun_dma_handle) != DDI_SUCCESS) {
3482                 msg = "ddi_dma_alloc_handle()";
3483                 goto fail;
3484         }
3485 
3486         if (ddi_dma_mem_alloc(lun_dma_handle, REPORT_LUNS_SIZE,
3487             sf->sf_sochandle->fcal_accattr, DDI_DMA_CONSISTENT,
3488             DDI_DMA_DONTWAIT, NULL, &lun_buf,
3489             &real_size, &lun_acc_handle) != DDI_SUCCESS) {
3490                 msg = "ddi_dma_mem_alloc()";
3491                 goto fail;
3492         }
3493 
3494         if (real_size < REPORT_LUNS_SIZE) {
3495                 msg = "DMA mem < REPORT_LUNS_SIZE";
3496                 goto fail;
3497         }
3498 
3499         if (ddi_dma_addr_bind_handle(lun_dma_handle, NULL,
3500             lun_buf, real_size, DDI_DMA_READ |
3501             DDI_DMA_CONSISTENT, DDI_DMA_DONTWAIT,
3502             NULL, &pcookie, &ccount) != DDI_DMA_MAPPED) {
3503                 msg = "ddi_dma_addr_bind_handle()";
3504                 goto fail;
3505         }
3506         handle_bound = 1;
3507 
3508         if (ccount != 1) {
3509                 msg = "ccount != 1";
3510                 goto fail;
3511         }
3512         privp->els_code = 0;
3513         privp->target = target;
3514         privp->data_dma_handle = lun_dma_handle;
3515         privp->data_acc_handle = lun_acc_handle;
3516         privp->data_buf = lun_buf;
3517 
3518         fpkt->fcal_pkt_comp = sf_reportlun_callback;
3519         fpkt->fcal_socal_request.sr_soc_hdr.sh_seg_cnt = 3;
3520         fpkt->fcal_socal_request.sr_cqhdr.cq_hdr_type = CQ_TYPE_IO_READ;
3521         fpkt->fcal_socal_request.sr_dataseg[0].fc_count =
3522             sizeof (struct fcp_cmd);
3523         fpkt->fcal_socal_request.sr_dataseg[2].fc_base =
3524             (uint32_t)pcookie.dmac_address;
3525         fpkt->fcal_socal_request.sr_dataseg[2].fc_count = pcookie.dmac_size;
3526         fpkt->fcal_socal_request.sr_soc_hdr.sh_byte_cnt = pcookie.dmac_size;
3527         hp->r_ctl = R_CTL_COMMAND;
3528         hp->type = TYPE_SCSI_FCP;
3529         bzero((caddr_t)reportlun, sizeof (struct fcp_cmd));
3530         ((union scsi_cdb *)reportlun->fcp_cdb)->scc_cmd = SCMD_REPORT_LUNS;
3531         /* Now set the buffer size.  If DDI gave us extra, that's O.K. */
3532         ((union scsi_cdb *)reportlun->fcp_cdb)->scc5_count0 =
3533             (real_size&0x0ff);
3534         ((union scsi_cdb *)reportlun->fcp_cdb)->scc5_count1 =
3535             (real_size>>8)&0x0ff;
3536         ((union scsi_cdb *)reportlun->fcp_cdb)->scc5_count2 =
3537             (real_size>>16)&0x0ff;
3538         ((union scsi_cdb *)reportlun->fcp_cdb)->scc5_count3 =
3539             (real_size>>24)&0x0ff;
3540         reportlun->fcp_cntl.cntl_read_data = 1;
3541         reportlun->fcp_cntl.cntl_write_data = 0;
3542         reportlun->fcp_data_len = pcookie.dmac_size;
3543         reportlun->fcp_cntl.cntl_qtype = FCP_QTYPE_SIMPLE;
3544 
3545         (void) ddi_dma_sync(lun_dma_handle, 0, 0, DDI_DMA_SYNC_FORDEV);
3546         /* We know he's there, so this should be fast */
3547         privp->timeout = sf_watchdog_time + SF_FCP_TIMEOUT;
3548         if (sf_els_transport(sf, privp) == 1)
3549                 return (1);
3550 
3551 fail:
3552         sf_log(sf, CE_WARN,
3553             "%s failure for REPORTLUN to target 0x%x\n",
3554             msg, sf_alpa_to_switch[privp->dest_nport_id]);
3555         sf_els_free(fpkt);
3556         if (lun_dma_handle != NULL) {
3557                 if (handle_bound)
3558                         (void) ddi_dma_unbind_handle(lun_dma_handle);
3559                 ddi_dma_free_handle(&lun_dma_handle);
3560         }
3561         if (lun_buf != NULL) {
3562                 ddi_dma_mem_free(&lun_acc_handle);
3563         }
3564         return (0);
3565 }
3566 
3567 /*
3568  * Handle the results of a REPORT_LUNS command:
3569  *      Create additional targets if necessary
3570  *      Initiate INQUIRYs on all LUNs.
3571  */
3572 static void
3573 sf_reportlun_callback(struct fcal_packet *fpkt)
3574 {
3575         struct sf_els_hdr *privp = (struct sf_els_hdr *)fpkt->
3576             fcal_pkt_private;
3577         struct scsi_report_luns *ptr =
3578             (struct scsi_report_luns *)privp->data_buf;
3579         struct sf *sf = privp->sf;
3580         struct sf_target *target = privp->target;
3581         struct fcp_rsp *rsp = NULL;
3582         int delayed_retry = 0;
3583         int tid = sf_alpa_to_switch[target->sft_hard_address];
3584         int i, free_pkt = 1;
3585         short   ncmds;
3586 
3587         mutex_enter(&sf->sf_mutex);
3588         /* use as temporary state variable */
3589         if (privp->timeout == SF_INVALID_TIMEOUT) {
3590                 mutex_exit(&sf->sf_mutex);
3591                 return;
3592         }
3593         if (privp->prev)
3594                 privp->prev->next = privp->next;
3595         if (privp->next)
3596                 privp->next->prev = privp->prev;
3597         if (sf->sf_els_list == privp)
3598                 sf->sf_els_list = privp->next;
3599         privp->prev = privp->next = NULL;
3600         mutex_exit(&sf->sf_mutex);
3601         ncmds = fpkt->fcal_ncmds;
3602         ASSERT(ncmds >= 0);
3603         mutex_enter(&sf->sf_cmd_mutex);
3604         sf->sf_ncmds = ncmds;
3605         mutex_exit(&sf->sf_cmd_mutex);
3606 
3607         if (fpkt->fcal_pkt_status == FCAL_STATUS_OK) {
3608                 (void) ddi_dma_sync(privp->rsp_dma_handle, 0,
3609                     0, DDI_DMA_SYNC_FORKERNEL);
3610 
3611                 rsp = (struct fcp_rsp *)privp->rsp;
3612         }
3613         SF_DEBUG(1, (sf, CE_CONT,
3614             "!REPORTLUN to al_pa %x pkt status %x scsi status %x\n",
3615             privp->dest_nport_id,
3616             fpkt->fcal_pkt_status,
3617             rsp?rsp->fcp_u.fcp_status.scsi_status:0));
3618 
3619                 /* See if target simply does not support REPORT_LUNS. */
3620         if (rsp && rsp->fcp_u.fcp_status.scsi_status == STATUS_CHECK &&
3621             rsp->fcp_u.fcp_status.sense_len_set &&
3622             rsp->fcp_sense_len >=
3623                 offsetof(struct scsi_extended_sense, es_qual_code)) {
3624                         struct scsi_extended_sense *sense;
3625                         sense = (struct scsi_extended_sense *)
3626                         ((caddr_t)rsp + sizeof (struct fcp_rsp)
3627                                 + rsp->fcp_response_len);
3628                         if (sense->es_key == KEY_ILLEGAL_REQUEST) {
3629                                 if (sense->es_add_code == 0x20) {
3630                                         /* Fake LUN 0 */
3631                                 SF_DEBUG(1, (sf, CE_CONT,
3632                                         "!REPORTLUN Faking good "
3633                                         "completion for alpa %x\n",
3634                                         privp->dest_nport_id));
3635                                         ptr->lun_list_len = FCP_LUN_SIZE;
3636                                         ptr->lun[0] = 0;
3637                                         rsp->fcp_u.fcp_status.scsi_status =
3638                                                 STATUS_GOOD;
3639                                 } else if (sense->es_add_code == 0x25) {
3640                                         SF_DEBUG(1, (sf, CE_CONT,
3641                                             "!REPORTLUN device alpa %x "
3642                                             "key %x code %x\n",
3643                                             privp->dest_nport_id,
3644                                             sense->es_key, sense->es_add_code));
3645                                             goto fail;
3646                                 }
3647                         } else if (sense->es_key ==
3648                                 KEY_UNIT_ATTENTION &&
3649                                 sense->es_add_code == 0x29) {
3650                                 SF_DEBUG(1, (sf, CE_CONT,
3651                                         "!REPORTLUN device alpa %x was reset\n",
3652                                         privp->dest_nport_id));
3653                         } else {
3654                                 SF_DEBUG(1, (sf, CE_CONT,
3655                                         "!REPORTLUN device alpa %x "
3656                                         "key %x code %x\n",
3657                                         privp->dest_nport_id,
3658                                         sense->es_key, sense->es_add_code));
3659 /* XXXXXX The following is here to handle broken targets -- remove it later */
3660                                 if (sf_reportlun_forever &&
3661                                         sense->es_key == KEY_UNIT_ATTENTION)
3662                                         goto retry;
3663 /* XXXXXX */
3664                                 if (sense->es_key == KEY_NOT_READY)
3665                                         delayed_retry = 1;
3666                                 }
3667                 }
3668 
3669         if (rsp && rsp->fcp_u.fcp_status.scsi_status == STATUS_GOOD) {
3670                 struct fcp_rsp_info *bep;
3671 
3672                 bep = (struct fcp_rsp_info *)(&rsp->
3673                     fcp_response_len + 1);
3674                 if (!rsp->fcp_u.fcp_status.rsp_len_set ||
3675                     bep->rsp_code == FCP_NO_FAILURE) {
3676                         (void) ddi_dma_sync(privp->data_dma_handle,
3677                             0, 0, DDI_DMA_SYNC_FORKERNEL);
3678 
3679                         /* Convert from #bytes to #ints */
3680                         ptr->lun_list_len = ptr->lun_list_len >> 3;
3681                         SF_DEBUG(2, (sf, CE_CONT,
3682                             "!REPORTLUN to al_pa %x succeeded: %d LUNs\n",
3683                             privp->dest_nport_id, ptr->lun_list_len));
3684                         if (!ptr->lun_list_len) {
3685                                 /* No LUNs? Ya gotta be kidding... */
3686                                 sf_log(sf, CE_WARN,
3687                                     "SCSI violation -- "
3688                                     "target 0x%x reports no LUNs\n",
3689                                     sf_alpa_to_switch[
3690                                     privp->dest_nport_id]);
3691                                 ptr->lun_list_len = 1;
3692                                 ptr->lun[0] = 0;
3693                         }
3694 
3695                         mutex_enter(&sf->sf_mutex);
3696                         if (sf->sf_lip_cnt == privp->lip_cnt) {
3697                                 sf->sf_device_count += ptr->lun_list_len - 1;
3698                         }
3699 
3700                         mutex_exit(&sf->sf_mutex);
3701                         for (i = 0; i < ptr->lun_list_len && privp->lip_cnt ==
3702                             sf->sf_lip_cnt; i++) {
3703                                 struct sf_els_hdr *nprivp;
3704                                 struct fcal_packet *nfpkt;
3705 
3706                                 /* LUN 0 is already in `target' */
3707                                 if (ptr->lun[i] != 0) {
3708                                         target = sf_create_target(sf,
3709                                             privp, tid, ptr->lun[i]);
3710                                 }
3711                                 nprivp = NULL;
3712                                 nfpkt = NULL;
3713                                 if (target) {
3714                                         nfpkt = sf_els_alloc(sf,
3715                                             target->sft_al_pa,
3716                                             sizeof (struct sf_els_hdr),
3717                                             sizeof (union sf_els_cmd),
3718                                             sizeof (union sf_els_rsp),
3719                                             (caddr_t *)&nprivp,
3720                                             (caddr_t *)&rsp);
3721                                         if (nprivp)
3722                                                 nprivp->lip_cnt =
3723                                                     privp->lip_cnt;
3724                                 }
3725                                 if (nfpkt && nprivp &&
3726                                     (sf_do_inquiry(sf, nprivp, target) ==
3727                                     0)) {
3728                                         mutex_enter(&sf->sf_mutex);
3729                                         if (sf->sf_lip_cnt == privp->
3730                                             lip_cnt) {
3731                                                 sf->sf_device_count --;
3732                                         }
3733                                         sf_offline_target(sf, target);
3734                                         mutex_exit(&sf->sf_mutex);
3735                                 }
3736                         }
3737                         sf_els_free(fpkt);
3738                         return;
3739                 } else {
3740                         SF_DEBUG(1, (sf, CE_CONT,
3741                             "!REPORTLUN al_pa %x fcp failure, "
3742                             "fcp_rsp_code %x scsi status %x\n",
3743                             privp->dest_nport_id, bep->rsp_code,
3744                             rsp ? rsp->fcp_u.fcp_status.scsi_status:0));
3745                         goto fail;
3746                 }
3747         }
3748         if (rsp && ((rsp->fcp_u.fcp_status.scsi_status == STATUS_BUSY) ||
3749             (rsp->fcp_u.fcp_status.scsi_status == STATUS_QFULL))) {
3750                 delayed_retry = 1;
3751         }
3752 
3753         if (++(privp->retries) < sf_els_retries ||
3754             (delayed_retry && privp->retries < SF_BSY_RETRIES)) {
3755 /* XXXXXX The following is here to handle broken targets -- remove it later */
3756 retry:
3757 /* XXXXXX */
3758                 if (delayed_retry) {
3759                         privp->retries--;
3760                         privp->timeout = sf_watchdog_time + SF_BSY_TIMEOUT;
3761                         privp->delayed_retry = 1;
3762                 } else {
3763                         privp->timeout = sf_watchdog_time + SF_FCP_TIMEOUT;
3764                 }
3765 
3766                 privp->prev = NULL;
3767                 mutex_enter(&sf->sf_mutex);
3768                 if (privp->lip_cnt == sf->sf_lip_cnt) {
3769                         if (!delayed_retry)
3770                                 SF_DEBUG(1, (sf, CE_WARN,
3771                                     "!REPORTLUN to al_pa %x failed, retrying\n",
3772                                     privp->dest_nport_id));
3773                         privp->next = sf->sf_els_list;
3774                         if (sf->sf_els_list != NULL)
3775                                 sf->sf_els_list->prev = privp;
3776                         sf->sf_els_list = privp;
3777                         mutex_exit(&sf->sf_mutex);
3778                         if (!delayed_retry && soc_transport(sf->sf_sochandle,
3779                             fpkt, FCAL_NOSLEEP, CQ_REQUEST_1) !=
3780                             FCAL_TRANSPORT_SUCCESS) {
3781                                 mutex_enter(&sf->sf_mutex);
3782                                 if (privp->prev)
3783                                         privp->prev->next = privp->next;
3784                                 if (privp->next)
3785                                         privp->next->prev = privp->prev;
3786                                 if (sf->sf_els_list == privp)
3787                                         sf->sf_els_list = privp->next;
3788                                 mutex_exit(&sf->sf_mutex);
3789                                 goto fail;
3790                         } else
3791                                 return;
3792                 } else {
3793                         mutex_exit(&sf->sf_mutex);
3794                 }
3795         } else {
3796 fail:
3797 
3798                 /* REPORT_LUN failed -- try inquiry */
3799                 if (sf_do_inquiry(sf, privp, target) != 0) {
3800                         return;
3801                 } else {
3802                         free_pkt = 0;
3803                 }
3804                 mutex_enter(&sf->sf_mutex);
3805                 if (sf->sf_lip_cnt == privp->lip_cnt) {
3806                         sf_log(sf, CE_WARN,
3807                             "!REPORTLUN to target 0x%x failed\n",
3808                             sf_alpa_to_switch[privp->dest_nport_id]);
3809                         sf_offline_target(sf, target);
3810                         sf->sf_device_count--;
3811                         ASSERT(sf->sf_device_count >= 0);
3812                         if (sf->sf_device_count == 0)
3813                         sf_finish_init(sf, privp->lip_cnt);
3814                 }
3815                 mutex_exit(&sf->sf_mutex);
3816         }
3817         if (free_pkt) {
3818                 sf_els_free(fpkt);
3819         }
3820 }
3821 
3822 static int
3823 sf_do_inquiry(struct sf *sf, struct sf_els_hdr *privp,
3824     struct sf_target *target)
3825 {
3826         struct  fcal_packet     *fpkt = privp->fpkt;
3827         ddi_dma_cookie_t        pcookie;
3828         ddi_dma_handle_t        inq_dma_handle = NULL;
3829         ddi_acc_handle_t        inq_acc_handle;
3830         uint_t                  ccount;
3831         size_t                  real_size;
3832         caddr_t                 inq_buf = NULL;
3833         int                     handle_bound = FALSE;
3834         fc_frame_header_t *hp = &fpkt->fcal_socal_request.sr_fc_frame_hdr;
3835         struct fcp_cmd          *inq = (struct fcp_cmd *)privp->cmd;
3836         char                    *msg = "Transport";
3837 
3838 
3839         if (ddi_dma_alloc_handle(sf->sf_dip, sf->sf_sochandle->fcal_dmaattr,
3840             DDI_DMA_DONTWAIT, NULL, &inq_dma_handle) != DDI_SUCCESS) {
3841                 msg = "ddi_dma_alloc_handle()";
3842                 goto fail;
3843         }
3844 
3845         if (ddi_dma_mem_alloc(inq_dma_handle, SUN_INQSIZE,
3846             sf->sf_sochandle->fcal_accattr, DDI_DMA_CONSISTENT,
3847             DDI_DMA_DONTWAIT, NULL, &inq_buf,
3848             &real_size, &inq_acc_handle) != DDI_SUCCESS) {
3849                 msg = "ddi_dma_mem_alloc()";
3850                 goto fail;
3851         }
3852 
3853         if (real_size < SUN_INQSIZE) {
3854                 msg = "DMA mem < inquiry size";
3855                 goto fail;
3856         }
3857 
3858         if (ddi_dma_addr_bind_handle(inq_dma_handle, NULL,
3859             inq_buf, real_size, DDI_DMA_READ | DDI_DMA_CONSISTENT,
3860             DDI_DMA_DONTWAIT, NULL, &pcookie, &ccount) != DDI_DMA_MAPPED) {
3861                 msg = "ddi_dma_addr_bind_handle()";
3862                 goto fail;
3863         }
3864         handle_bound = TRUE;
3865 
3866         if (ccount != 1) {
3867                 msg = "ccount != 1";
3868                 goto fail;
3869         }
3870         privp->els_code = 0;                 /* not an ELS command */
3871         privp->target = target;
3872         privp->data_dma_handle = inq_dma_handle;
3873         privp->data_acc_handle = inq_acc_handle;
3874         privp->data_buf = inq_buf;
3875         fpkt->fcal_pkt_comp = sf_inq_callback;
3876         fpkt->fcal_socal_request.sr_soc_hdr.sh_seg_cnt = 3;
3877         fpkt->fcal_socal_request.sr_cqhdr.cq_hdr_type = CQ_TYPE_IO_READ;
3878         fpkt->fcal_socal_request.sr_dataseg[0].fc_count =
3879             sizeof (struct fcp_cmd);
3880         fpkt->fcal_socal_request.sr_dataseg[2].fc_base =
3881             (uint32_t)pcookie.dmac_address;
3882         fpkt->fcal_socal_request.sr_dataseg[2].fc_count = pcookie.dmac_size;
3883         fpkt->fcal_socal_request.sr_soc_hdr.sh_byte_cnt = pcookie.dmac_size;
3884         hp->r_ctl = R_CTL_COMMAND;
3885         hp->type = TYPE_SCSI_FCP;
3886         bzero((caddr_t)inq, sizeof (struct fcp_cmd));
3887         ((union scsi_cdb *)inq->fcp_cdb)->scc_cmd = SCMD_INQUIRY;
3888         ((union scsi_cdb *)inq->fcp_cdb)->g0_count0 = SUN_INQSIZE;
3889         bcopy((caddr_t)&target->sft_lun.b, (caddr_t)&inq->fcp_ent_addr,
3890             FCP_LUN_SIZE);
3891         inq->fcp_cntl.cntl_read_data = 1;
3892         inq->fcp_cntl.cntl_write_data = 0;
3893         inq->fcp_data_len = pcookie.dmac_size;
3894         inq->fcp_cntl.cntl_qtype = FCP_QTYPE_SIMPLE;
3895 
3896         (void) ddi_dma_sync(inq_dma_handle, (off_t)0, (size_t)0,
3897             DDI_DMA_SYNC_FORDEV);
3898         privp->timeout = sf_watchdog_time + SF_FCP_TIMEOUT;
3899         SF_DEBUG(5, (sf, CE_WARN,
3900             "!Sending INQUIRY to al_pa %x lun %" PRIx64 "\n",
3901             privp->dest_nport_id,
3902             SCSA_LUN(target)));
3903         return (sf_els_transport(sf, privp));
3904 
3905 fail:
3906         sf_log(sf, CE_WARN,
3907             "%s failure for INQUIRY to target 0x%x\n",
3908             msg, sf_alpa_to_switch[privp->dest_nport_id]);
3909         sf_els_free(fpkt);
3910         if (inq_dma_handle != NULL) {
3911                 if (handle_bound) {
3912                         (void) ddi_dma_unbind_handle(inq_dma_handle);
3913                 }
3914                 ddi_dma_free_handle(&inq_dma_handle);
3915         }
3916         if (inq_buf != NULL) {
3917                 ddi_dma_mem_free(&inq_acc_handle);
3918         }
3919         return (FALSE);
3920 }
3921 
3922 
3923 /*
3924  * called as the pkt_comp routine for INQ packets
3925  */
3926 static void
3927 sf_inq_callback(struct fcal_packet *fpkt)
3928 {
3929         struct sf_els_hdr *privp = (struct sf_els_hdr *)fpkt->
3930             fcal_pkt_private;
3931         struct scsi_inquiry *prt = (struct scsi_inquiry *)privp->data_buf;
3932         struct sf *sf = privp->sf;
3933         struct sf *tsf;
3934         struct sf_target *target = privp->target;
3935         struct fcp_rsp *rsp;
3936         int delayed_retry = FALSE;
3937         short   ncmds;
3938 
3939 
3940         mutex_enter(&sf->sf_mutex);
3941         /* use as temporary state variable */
3942         if (privp->timeout == SF_INVALID_TIMEOUT) {
3943                 mutex_exit(&sf->sf_mutex);
3944                 return;
3945         }
3946         if (privp->prev != NULL) {
3947                 privp->prev->next = privp->next;
3948         }
3949         if (privp->next != NULL) {
3950                 privp->next->prev = privp->prev;
3951         }
3952         if (sf->sf_els_list == privp) {
3953                 sf->sf_els_list = privp->next;
3954         }
3955         privp->prev = privp->next = NULL;
3956         mutex_exit(&sf->sf_mutex);
3957         ncmds = fpkt->fcal_ncmds;
3958         ASSERT(ncmds >= 0);
3959         mutex_enter(&sf->sf_cmd_mutex);
3960         sf->sf_ncmds = ncmds;
3961         mutex_exit(&sf->sf_cmd_mutex);
3962 
3963         if (fpkt->fcal_pkt_status == FCAL_STATUS_OK) {
3964 
3965                 (void) ddi_dma_sync(privp->rsp_dma_handle, (off_t)0,
3966                     (size_t)0, DDI_DMA_SYNC_FORKERNEL);
3967 
3968                 rsp = (struct fcp_rsp *)privp->rsp;
3969                 SF_DEBUG(2, (sf, CE_CONT,
3970                     "!INQUIRY to al_pa %x scsi status %x",
3971                     privp->dest_nport_id, rsp->fcp_u.fcp_status.scsi_status));
3972 
3973                 if ((rsp->fcp_u.fcp_status.scsi_status == STATUS_GOOD) &&
3974                     !rsp->fcp_u.fcp_status.resid_over &&
3975                     (!rsp->fcp_u.fcp_status.resid_under ||
3976                     ((SUN_INQSIZE - rsp->fcp_resid) >= SUN_MIN_INQLEN))) {
3977                         struct fcp_rsp_info *bep;
3978 
3979                         bep = (struct fcp_rsp_info *)(&rsp->
3980                             fcp_response_len + 1);
3981 
3982                         if (!rsp->fcp_u.fcp_status.rsp_len_set ||
3983                             (bep->rsp_code == FCP_NO_FAILURE)) {
3984 
3985                                 SF_DEBUG(2, (sf, CE_CONT,
3986                                     "!INQUIRY to al_pa %x lun %" PRIx64
3987                                     " succeeded\n",
3988                                     privp->dest_nport_id, SCSA_LUN(target)));
3989 
3990                                 (void) ddi_dma_sync(privp->data_dma_handle,
3991                                     (off_t)0, (size_t)0,
3992                                     DDI_DMA_SYNC_FORKERNEL);
3993 
3994                                 mutex_enter(&sf->sf_mutex);
3995 
3996                                 if (sf->sf_lip_cnt == privp->lip_cnt) {
3997                                         mutex_enter(&target->sft_mutex);
3998                                         target->sft_device_type =
3999                                             prt->inq_dtype;
4000                                         bcopy(prt, &target->sft_inq,
4001                                             sizeof (*prt));
4002                                         mutex_exit(&target->sft_mutex);
4003                                         sf->sf_device_count--;
4004                                         ASSERT(sf->sf_device_count >= 0);
4005                                         if (sf->sf_device_count == 0) {
4006                                                 sf_finish_init(sf,
4007                                                     privp->lip_cnt);
4008                                         }
4009                                 }
4010                                 mutex_exit(&sf->sf_mutex);
4011                                 sf_els_free(fpkt);
4012                                 return;
4013                         }
4014                 } else if ((rsp->fcp_u.fcp_status.scsi_status ==
4015                     STATUS_BUSY) ||
4016                     (rsp->fcp_u.fcp_status.scsi_status == STATUS_QFULL) ||
4017                     (rsp->fcp_u.fcp_status.scsi_status == STATUS_CHECK)) {
4018                         delayed_retry = TRUE;
4019                 }
4020         } else {
4021                 SF_DEBUG(2, (sf, CE_CONT, "!INQUIRY to al_pa %x fc status %x",
4022                     privp->dest_nport_id, fpkt->fcal_pkt_status));
4023         }
4024 
4025         if (++(privp->retries) < sf_els_retries ||
4026             (delayed_retry && privp->retries < SF_BSY_RETRIES)) {
4027                 if (fpkt->fcal_pkt_status == FCAL_STATUS_MAX_XCHG_EXCEEDED)  {
4028                         tsf = sf->sf_sibling;
4029                         if (tsf != NULL) {
4030                                 mutex_enter(&tsf->sf_cmd_mutex);
4031                                 tsf->sf_flag = 1;
4032                                 tsf->sf_throttle = SF_DECR_DELTA;
4033                                 mutex_exit(&tsf->sf_cmd_mutex);
4034                         }
4035                         delayed_retry = 1;
4036                 }
4037                 if (delayed_retry) {
4038                         privp->retries--;
4039                         privp->timeout = sf_watchdog_time + SF_BSY_TIMEOUT;
4040                         privp->delayed_retry = TRUE;
4041                 } else {
4042                         privp->timeout = sf_watchdog_time + SF_FCP_TIMEOUT;
4043                 }
4044 
4045                 privp->prev = NULL;
4046                 mutex_enter(&sf->sf_mutex);
4047                 if (privp->lip_cnt == sf->sf_lip_cnt) {
4048                         if (!delayed_retry) {
4049                                 SF_DEBUG(1, (sf, CE_WARN,
4050                                     "INQUIRY to al_pa %x failed, retrying",
4051                                     privp->dest_nport_id));
4052                         }
4053                         privp->next = sf->sf_els_list;
4054                         if (sf->sf_els_list != NULL) {
4055                                 sf->sf_els_list->prev = privp;
4056                         }
4057                         sf->sf_els_list = privp;
4058                         mutex_exit(&sf->sf_mutex);
4059                         /* if not delayed call transport to send a pkt */
4060                         if (!delayed_retry &&
4061                             (soc_transport(sf->sf_sochandle, fpkt,
4062                             FCAL_NOSLEEP, CQ_REQUEST_1) !=
4063                             FCAL_TRANSPORT_SUCCESS)) {
4064                                 mutex_enter(&sf->sf_mutex);
4065                                 if (privp->prev != NULL) {
4066                                         privp->prev->next = privp->next;
4067                                 }
4068                                 if (privp->next != NULL) {
4069                                         privp->next->prev = privp->prev;
4070                                 }
4071                                 if (sf->sf_els_list == privp) {
4072                                         sf->sf_els_list = privp->next;
4073                                 }
4074                                 mutex_exit(&sf->sf_mutex);
4075                                 goto fail;
4076                         }
4077                         return;
4078                 }
4079                 mutex_exit(&sf->sf_mutex);
4080         } else {
4081 fail:
4082                 mutex_enter(&sf->sf_mutex);
4083                 if (sf->sf_lip_cnt == privp->lip_cnt) {
4084                         sf_offline_target(sf, target);
4085                         sf_log(sf, CE_NOTE,
4086                             "INQUIRY to target 0x%x lun %" PRIx64 " failed. "
4087                             "Retry Count: %d\n",
4088                             sf_alpa_to_switch[privp->dest_nport_id],
4089                             SCSA_LUN(target),
4090                             privp->retries);
4091                         sf->sf_device_count--;
4092                         ASSERT(sf->sf_device_count >= 0);
4093                         if (sf->sf_device_count == 0) {
4094                                 sf_finish_init(sf, privp->lip_cnt);
4095                         }
4096                 }
4097                 mutex_exit(&sf->sf_mutex);
4098         }
4099         sf_els_free(fpkt);
4100 }
4101 
4102 
4103 static void
4104 sf_finish_init(struct sf *sf, int lip_cnt)
4105 {
4106         int                     i;              /* loop index */
4107         int                     cflag;
4108         struct sf_target        *target;        /* current target */
4109         dev_info_t              *dip;
4110         struct sf_hp_elem       *elem;          /* hotplug element created */
4111 
4112         SF_DEBUG(1, (sf, CE_WARN, "!sf_finish_init\n"));
4113         ASSERT(mutex_owned(&sf->sf_mutex));
4114 
4115         /* scan all hash queues */
4116         for (i = 0; i < SF_NUM_HASH_QUEUES; i++) {
4117                 target = sf->sf_wwn_lists[i];
4118                 while (target != NULL) {
4119                         mutex_enter(&target->sft_mutex);
4120 
4121                         /* see if target is not offline */
4122                         if ((target->sft_state & SF_TARGET_OFFLINE)) {
4123                                 /*
4124                                  * target already offline
4125                                  */
4126                                 mutex_exit(&target->sft_mutex);
4127                                 goto next_entry;
4128                         }
4129 
4130                         /*
4131                          * target is not already offline -- see if it has
4132                          * already been marked as ready to go offline
4133                          */
4134                         if (target->sft_state & SF_TARGET_MARK) {
4135                                 /*
4136                                  * target already marked, so take it offline
4137                                  */
4138                                 mutex_exit(&target->sft_mutex);
4139                                 sf_offline_target(sf, target);
4140                                 goto next_entry;
4141                         }
4142 
4143                         /* clear target busy flag */
4144                         target->sft_state &= ~SF_TARGET_BUSY;
4145 
4146                         /* is target init not yet done ?? */
4147                         cflag = !(target->sft_state & SF_TARGET_INIT_DONE);
4148 
4149                         /* get pointer to target dip */
4150                         dip = target->sft_dip;
4151 
4152                         mutex_exit(&target->sft_mutex);
4153                         mutex_exit(&sf->sf_mutex);
4154 
4155                         if (cflag && (dip == NULL)) {
4156                                 /*
4157                                  * target init not yet done &&
4158                                  * devinfo not yet created
4159                                  */
4160                                 sf_create_devinfo(sf, target, lip_cnt);
4161                                 mutex_enter(&sf->sf_mutex);
4162                                 goto next_entry;
4163                         }
4164 
4165                         /*
4166                          * target init already done || devinfo already created
4167                          */
4168                         ASSERT(dip != NULL);
4169                         if (!sf_create_props(dip, target, lip_cnt)) {
4170                                 /* a problem creating properties */
4171                                 mutex_enter(&sf->sf_mutex);
4172                                 goto next_entry;
4173                         }
4174 
4175                         /* create a new element for the hotplug list */
4176                         if ((elem = kmem_zalloc(sizeof (struct sf_hp_elem),
4177                             KM_NOSLEEP)) != NULL) {
4178 
4179                                 /* fill in the new element */
4180                                 elem->dip = dip;
4181                                 elem->target = target;
4182                                 elem->what = SF_ONLINE;
4183 
4184                                 /* add the new element into the hotplug list */
4185                                 mutex_enter(&sf->sf_hp_daemon_mutex);
4186                                 if (sf->sf_hp_elem_tail != NULL) {
4187                                         sf->sf_hp_elem_tail->next = elem;
4188                                         sf->sf_hp_elem_tail = elem;
4189                                 } else {
4190                                         /* this is the first element in list */
4191                                         sf->sf_hp_elem_head =
4192                                             sf->sf_hp_elem_tail =
4193                                             elem;
4194                                 }
4195                                 cv_signal(&sf->sf_hp_daemon_cv);
4196                                 mutex_exit(&sf->sf_hp_daemon_mutex);
4197                         } else {
4198                                 /* could not allocate memory for element ?? */
4199                                 (void) ndi_devi_online_async(dip, 0);
4200                         }
4201 
4202                         mutex_enter(&sf->sf_mutex);
4203 
4204 next_entry:
4205                         /* ensure no new LIPs have occurred */
4206                         if (sf->sf_lip_cnt != lip_cnt) {
4207                                 return;
4208                         }
4209                         target = target->sft_next;
4210                 }
4211 
4212                 /* done scanning all targets in this queue */
4213         }
4214 
4215         /* done with all hash queues */
4216 
4217         sf->sf_state = SF_STATE_ONLINE;
4218         sf->sf_online_timer = 0;
4219 }
4220 
4221 
4222 /*
4223  * create devinfo node
4224  */
4225 static void
4226 sf_create_devinfo(struct sf *sf, struct sf_target *target, int lip_cnt)
4227 {
4228         dev_info_t              *cdip = NULL;
4229         char                    *nname = NULL;
4230         char                    **compatible = NULL;
4231         int                     ncompatible;
4232         struct scsi_inquiry     *inq = &target->sft_inq;
4233         char                    *scsi_binding_set;
4234 
4235         /* get the 'scsi-binding-set' property */
4236         if (ddi_prop_lookup_string(DDI_DEV_T_ANY, sf->sf_dip,
4237             DDI_PROP_NOTPROM | DDI_PROP_DONTPASS, "scsi-binding-set",
4238             &scsi_binding_set) != DDI_PROP_SUCCESS)
4239                 scsi_binding_set = NULL;
4240 
4241         /* determine the node name and compatible */
4242         scsi_hba_nodename_compatible_get(inq, scsi_binding_set,
4243             inq->inq_dtype, NULL, &nname, &compatible, &ncompatible);
4244         if (scsi_binding_set)
4245                 ddi_prop_free(scsi_binding_set);
4246 
4247         /* if nodename can't be determined then print a message and skip it */
4248         if (nname == NULL) {
4249 #ifndef RAID_LUNS
4250                 sf_log(sf, CE_WARN, "%s%d: no driver for device "
4251                     "@w%02x%02x%02x%02x%02x%02x%02x%02x,%x\n"
4252                     "    compatible: %s",
4253                     ddi_driver_name(sf->sf_dip), ddi_get_instance(sf->sf_dip),
4254                     target->sft_port_wwn[0], target->sft_port_wwn[1],
4255                     target->sft_port_wwn[2], target->sft_port_wwn[3],
4256                     target->sft_port_wwn[4], target->sft_port_wwn[5],
4257                     target->sft_port_wwn[6], target->sft_port_wwn[7],
4258                     target->sft_lun.l, *compatible);
4259 #else
4260                 sf_log(sf, CE_WARN, "%s%d: no driver for device "
4261                     "@w%02x%02x%02x%02x%02x%02x%02x%02x,%x\n"
4262                     "    compatible: %s",
4263                     ddi_driver_name(sf->sf_dip), ddi_get_instance(sf->sf_dip),
4264                     target->sft_port_wwn[0], target->sft_port_wwn[1],
4265                     target->sft_port_wwn[2], target->sft_port_wwn[3],
4266                     target->sft_port_wwn[4], target->sft_port_wwn[5],
4267                     target->sft_port_wwn[6], target->sft_port_wwn[7],
4268                     target->sft_raid_lun, *compatible);
4269 #endif
4270                 goto fail;
4271         }
4272 
4273         /* allocate the node */
4274         if (ndi_devi_alloc(sf->sf_dip, nname,
4275             DEVI_SID_NODEID, &cdip) != NDI_SUCCESS) {
4276                 goto fail;
4277         }
4278 
4279         /* decorate the node with compatible */
4280         if (ndi_prop_update_string_array(DDI_DEV_T_NONE, cdip,
4281             "compatible", compatible, ncompatible) != DDI_PROP_SUCCESS) {
4282                 goto fail;
4283         }
4284 
4285         /* add addressing properties to the node */
4286         if (sf_create_props(cdip, target, lip_cnt) != 1) {
4287                 goto fail;
4288         }
4289 
4290         mutex_enter(&target->sft_mutex);
4291         if (target->sft_dip != NULL) {
4292                 mutex_exit(&target->sft_mutex);
4293                 goto fail;
4294         }
4295         target->sft_dip = cdip;
4296         mutex_exit(&target->sft_mutex);
4297 
4298         if (ndi_devi_online_async(cdip, 0) != DDI_SUCCESS) {
4299                 goto fail;
4300         }
4301 
4302         scsi_hba_nodename_compatible_free(nname, compatible);
4303         return;
4304 
4305 fail:
4306         scsi_hba_nodename_compatible_free(nname, compatible);
4307         if (cdip != NULL) {
4308                 (void) ndi_prop_remove(DDI_DEV_T_NONE, cdip, NODE_WWN_PROP);
4309                 (void) ndi_prop_remove(DDI_DEV_T_NONE, cdip, PORT_WWN_PROP);
4310                 (void) ndi_prop_remove(DDI_DEV_T_NONE, cdip, LIP_CNT_PROP);
4311                 (void) ndi_prop_remove(DDI_DEV_T_NONE, cdip, TARGET_PROP);
4312                 (void) ndi_prop_remove(DDI_DEV_T_NONE, cdip, LUN_PROP);
4313                 if (ndi_devi_free(cdip) != NDI_SUCCESS) {
4314                         sf_log(sf, CE_WARN, "ndi_devi_free failed\n");
4315                 } else {
4316                         mutex_enter(&target->sft_mutex);
4317                         if (cdip == target->sft_dip) {
4318                                 target->sft_dip = NULL;
4319                         }
4320                         mutex_exit(&target->sft_mutex);
4321                 }
4322         }
4323 }
4324 
4325 /*
4326  * create required properties, returning TRUE iff we succeed, else
4327  * returning FALSE
4328  */
4329 static int
4330 sf_create_props(dev_info_t *cdip, struct sf_target *target, int lip_cnt)
4331 {
4332         int tgt_id = sf_alpa_to_switch[target->sft_al_pa];
4333 
4334 
4335         if (ndi_prop_update_byte_array(DDI_DEV_T_NONE,
4336             cdip, NODE_WWN_PROP, target->sft_node_wwn, FC_WWN_SIZE) !=
4337             DDI_PROP_SUCCESS) {
4338                 return (FALSE);
4339         }
4340 
4341         if (ndi_prop_update_byte_array(DDI_DEV_T_NONE,
4342             cdip, PORT_WWN_PROP, target->sft_port_wwn, FC_WWN_SIZE) !=
4343             DDI_PROP_SUCCESS) {
4344                 return (FALSE);
4345         }
4346 
4347         if (ndi_prop_update_int(DDI_DEV_T_NONE,
4348             cdip, LIP_CNT_PROP, lip_cnt) != DDI_PROP_SUCCESS) {
4349                 return (FALSE);
4350         }
4351 
4352         if (ndi_prop_update_int(DDI_DEV_T_NONE,
4353             cdip, TARGET_PROP, tgt_id) != DDI_PROP_SUCCESS) {
4354                 return (FALSE);
4355         }
4356 
4357 #ifndef RAID_LUNS
4358         if (ndi_prop_update_int(DDI_DEV_T_NONE,
4359             cdip, LUN_PROP, target->sft_lun.l) != DDI_PROP_SUCCESS) {
4360                 return (0);
4361         }
4362 #else
4363         if (ndi_prop_update_int(DDI_DEV_T_NONE,
4364             cdip, LUN_PROP, target->sft_raid_lun) != DDI_PROP_SUCCESS) {
4365                 return (0);
4366         }
4367 #endif
4368 
4369         return (TRUE);
4370 }
4371 
4372 
4373 /*
4374  * called by the transport to offline a target
4375  */
4376 /* ARGSUSED */
4377 static void
4378 sf_offline_target(struct sf *sf, struct sf_target *target)
4379 {
4380         dev_info_t *dip;
4381         struct sf_target *next_target = NULL;
4382         struct sf_hp_elem       *elem;
4383 
4384         ASSERT(mutex_owned(&sf->sf_mutex));
4385 
4386         if (sf_core && (sf_core & SF_CORE_OFFLINE_TARGET)) {
4387                 (void) soc_take_core(sf->sf_sochandle, sf->sf_socp);
4388                 sf_core = 0;
4389         }
4390 
4391         while (target != NULL) {
4392                 sf_log(sf, CE_NOTE,
4393                     "!target 0x%x al_pa 0x%x lun %" PRIx64 " offlined\n",
4394                     sf_alpa_to_switch[target->sft_al_pa],
4395                     target->sft_al_pa, SCSA_LUN(target));
4396                 mutex_enter(&target->sft_mutex);
4397                 target->sft_state &= ~(SF_TARGET_BUSY|SF_TARGET_MARK);
4398                 target->sft_state |= SF_TARGET_OFFLINE;
4399                 mutex_exit(&target->sft_mutex);
4400                 mutex_exit(&sf->sf_mutex);
4401 
4402                 /* XXXX if this is LUN 0, offline all other LUNs */
4403                 if (next_target || target->sft_lun.l == 0)
4404                         next_target = target->sft_next_lun;
4405 
4406                 /* abort all cmds for this target */
4407                 sf_abort_all(sf, target, FALSE, sf->sf_lip_cnt, FALSE);
4408 
4409                 mutex_enter(&sf->sf_mutex);
4410                 mutex_enter(&target->sft_mutex);
4411                 if (target->sft_state & SF_TARGET_INIT_DONE) {
4412                         dip = target->sft_dip;
4413                         mutex_exit(&target->sft_mutex);
4414                         mutex_exit(&sf->sf_mutex);
4415                         (void) ndi_prop_remove(DDI_DEV_T_NONE, dip,
4416                             TARGET_PROP);
4417                         (void) ndi_event_retrieve_cookie(sf->sf_event_hdl,
4418                             dip, FCAL_REMOVE_EVENT, &sf_remove_eid,
4419                             NDI_EVENT_NOPASS);
4420                         (void) ndi_event_run_callbacks(sf->sf_event_hdl,
4421                             target->sft_dip, sf_remove_eid, NULL);
4422 
4423                         elem = kmem_zalloc(sizeof (struct sf_hp_elem),
4424                             KM_NOSLEEP);
4425                         if (elem != NULL) {
4426                                 elem->dip = dip;
4427                                 elem->target = target;
4428                                 elem->what = SF_OFFLINE;
4429                                 mutex_enter(&sf->sf_hp_daemon_mutex);
4430                                 if (sf->sf_hp_elem_tail != NULL) {
4431                                         sf->sf_hp_elem_tail->next = elem;
4432                                         sf->sf_hp_elem_tail = elem;
4433                                 } else {
4434                                         sf->sf_hp_elem_head =
4435                                             sf->sf_hp_elem_tail =
4436                                             elem;
4437                                 }
4438                                 cv_signal(&sf->sf_hp_daemon_cv);
4439                                 mutex_exit(&sf->sf_hp_daemon_mutex);
4440                         } else {
4441                                 /* don't do NDI_DEVI_REMOVE for now */
4442                                 if (ndi_devi_offline(dip, 0) != NDI_SUCCESS) {
4443                                         SF_DEBUG(1, (sf, CE_WARN,
4444                                             "target %x lun %" PRIx64 ", "
4445                                             "device offline failed",
4446                                             sf_alpa_to_switch[target->
4447                                             sft_al_pa],
4448                                             SCSA_LUN(target)));
4449                                 } else {
4450                                         SF_DEBUG(1, (sf, CE_NOTE,
4451                                             "target %x, lun %" PRIx64 ", "
4452                                             "device offline succeeded\n",
4453                                             sf_alpa_to_switch[target->
4454                                             sft_al_pa],
4455                                             SCSA_LUN(target)));
4456                                 }
4457                         }
4458                         mutex_enter(&sf->sf_mutex);
4459                 } else {
4460                         mutex_exit(&target->sft_mutex);
4461                 }
4462                 target = next_target;
4463         }
4464 }
4465 
4466 
4467 /*
4468  * routine to get/set a capability
4469  *
4470  * returning:
4471  *      1 (TRUE)        boolean capability is true (on get)
4472  *      0 (FALSE)       invalid capability, can't set capability (on set),
4473  *                      or boolean capability is false (on get)
4474  *      -1 (UNDEFINED)  can't find capability (SCSA) or unsupported capability
4475  *      3               when getting SCSI version number
4476  *      AL_PA           when getting port initiator ID
4477  */
4478 static int
4479 sf_commoncap(struct scsi_address *ap, char *cap,
4480     int val, int tgtonly, int doset)
4481 {
4482         struct sf *sf = ADDR2SF(ap);
4483         int cidx;
4484         int rval = FALSE;
4485 
4486 
4487         if (cap == NULL) {
4488                 SF_DEBUG(3, (sf, CE_WARN, "sf_commoncap: invalid arg"));
4489                 return (rval);
4490         }
4491 
4492         /* get index of capability string */
4493         if ((cidx = scsi_hba_lookup_capstr(cap)) == -1) {
4494                 /* can't find capability */
4495                 return (UNDEFINED);
4496         }
4497 
4498         if (doset) {
4499                 /*
4500                  * Process setcap request.
4501                  */
4502 
4503                 /*
4504                  * At present, we can only set binary (0/1) values
4505                  */
4506                 switch (cidx) {
4507                 case SCSI_CAP_ARQ:      /* can't set this capability */
4508                         break;
4509                 default:
4510                         SF_DEBUG(3, (sf, CE_WARN,
4511                             "sf_setcap: unsupported %d", cidx));
4512                         rval = UNDEFINED;
4513                         break;
4514                 }
4515 
4516                 SF_DEBUG(4, (sf, CE_NOTE,
4517                     "set cap: cap=%s,val=0x%x,tgtonly=0x%x"
4518                     ",doset=0x%x,rval=%d\n",
4519                     cap, val, tgtonly, doset, rval));
4520 
4521         } else {
4522                 /*
4523                  * Process getcap request.
4524                  */
4525                 switch (cidx) {
4526                 case SCSI_CAP_DMA_MAX:
4527                         break;          /* don't' have this capability */
4528                 case SCSI_CAP_INITIATOR_ID:
4529                         rval = sf->sf_al_pa;
4530                         break;
4531                 case SCSI_CAP_ARQ:
4532                         rval = TRUE;    /* do have this capability */
4533                         break;
4534                 case SCSI_CAP_RESET_NOTIFICATION:
4535                 case SCSI_CAP_TAGGED_QING:
4536                         rval = TRUE;    /* do have this capability */
4537                         break;
4538                 case SCSI_CAP_SCSI_VERSION:
4539                         rval = 3;
4540                         break;
4541                 case SCSI_CAP_INTERCONNECT_TYPE:
4542                         rval = INTERCONNECT_FIBRE;
4543                         break;
4544                 default:
4545                         SF_DEBUG(4, (sf, CE_WARN,
4546                             "sf_scsi_getcap: unsupported"));
4547                         rval = UNDEFINED;
4548                         break;
4549                 }
4550                 SF_DEBUG(4, (sf, CE_NOTE,
4551                     "get cap: cap=%s,val=0x%x,tgtonly=0x%x,"
4552                     "doset=0x%x,rval=%d\n",
4553                     cap, val, tgtonly, doset, rval));
4554         }
4555 
4556         return (rval);
4557 }
4558 
4559 
4560 /*
4561  * called by the transport to get a capability
4562  */
4563 static int
4564 sf_getcap(struct scsi_address *ap, char *cap, int whom)
4565 {
4566         return (sf_commoncap(ap, cap, 0, whom, FALSE));
4567 }
4568 
4569 
4570 /*
4571  * called by the transport to set a capability
4572  */
4573 static int
4574 sf_setcap(struct scsi_address *ap, char *cap, int value, int whom)
4575 {
4576         return (sf_commoncap(ap, cap, value, whom, TRUE));
4577 }
4578 
4579 
4580 /*
4581  * called by the transport to abort a target
4582  */
4583 static int
4584 sf_abort(struct scsi_address *ap, struct scsi_pkt *pkt)
4585 {
4586         struct sf *sf = ADDR2SF(ap);
4587         struct sf_target *target = ADDR2TARGET(ap);
4588         struct sf_pkt *cmd, *ncmd, *pcmd;
4589         struct fcal_packet *fpkt;
4590         int     rval = 0, t, my_rval = FALSE;
4591         int     old_target_state;
4592         int     lip_cnt;
4593         int     tgt_id;
4594         fc_frame_header_t       *hp;
4595         int     deferred_destroy;
4596 
4597         deferred_destroy = 0;
4598 
4599         if (pkt != NULL) {
4600                 cmd = PKT2CMD(pkt);
4601                 fpkt = cmd->cmd_fp_pkt;
4602                 SF_DEBUG(2, (sf, CE_NOTE, "sf_abort packet %p\n",
4603                     (void *)fpkt));
4604                 pcmd = NULL;
4605                 mutex_enter(&sf->sf_cmd_mutex);
4606                 ncmd = sf->sf_pkt_head;
4607                 while (ncmd != NULL) {
4608                         if (ncmd == cmd) {
4609                                 if (pcmd != NULL) {
4610                                         pcmd->cmd_next = cmd->cmd_next;
4611                                 } else {
4612                                         sf->sf_pkt_head = cmd->cmd_next;
4613                                 }
4614                                 cmd->cmd_flags &= ~CFLAG_IN_QUEUE;
4615                                 cmd->cmd_state = SF_STATE_IDLE;
4616                                 pkt->pkt_reason = CMD_ABORTED;
4617                                 pkt->pkt_statistics |= STAT_ABORTED;
4618                                 my_rval = TRUE;
4619                                 break;
4620                         } else {
4621                                 pcmd = ncmd;
4622                                 ncmd = ncmd->cmd_next;
4623                         }
4624                 }
4625                 mutex_exit(&sf->sf_cmd_mutex);
4626                 if (ncmd == NULL) {
4627                         mutex_enter(&cmd->cmd_abort_mutex);
4628                         if (cmd->cmd_state == SF_STATE_ISSUED) {
4629                                 cmd->cmd_state = SF_STATE_ABORTING;
4630                                 cmd->cmd_timeout = sf_watchdog_time + 20;
4631                                 mutex_exit(&cmd->cmd_abort_mutex);
4632                                 /* call transport to abort command */
4633                                 if (((rval = soc_abort(sf->sf_sochandle,
4634                                     sf->sf_socp, sf->sf_sochandle->fcal_portno,
4635                                     fpkt, 1)) == FCAL_ABORTED) ||
4636                                     (rval == FCAL_ABORT_FAILED)) {
4637                                         my_rval = TRUE;
4638                                         pkt->pkt_reason = CMD_ABORTED;
4639                                         pkt->pkt_statistics |= STAT_ABORTED;
4640                                         cmd->cmd_state = SF_STATE_IDLE;
4641                                 } else if (rval == FCAL_BAD_ABORT) {
4642                                         cmd->cmd_timeout = sf_watchdog_time
4643                                             + 20;
4644                                         my_rval = FALSE;
4645                                 } else {
4646                                         SF_DEBUG(1, (sf, CE_NOTE,
4647                                             "Command Abort failed\n"));
4648                                 }
4649                         } else {
4650                                 mutex_exit(&cmd->cmd_abort_mutex);
4651                         }
4652                 }
4653         } else {
4654                 SF_DEBUG(2, (sf, CE_NOTE, "sf_abort target\n"));
4655                 mutex_enter(&sf->sf_mutex);
4656                 lip_cnt = sf->sf_lip_cnt;
4657                 mutex_enter(&target->sft_mutex);
4658                 if (target->sft_state & (SF_TARGET_BUSY |
4659                     SF_TARGET_OFFLINE)) {
4660                         mutex_exit(&target->sft_mutex);
4661                         return (rval);
4662                 }
4663                 old_target_state = target->sft_state;
4664                 target->sft_state |= SF_TARGET_BUSY;
4665                 mutex_exit(&target->sft_mutex);
4666                 mutex_exit(&sf->sf_mutex);
4667 
4668                 if ((pkt = sf_scsi_init_pkt(ap, NULL, NULL, 0,
4669                     0, 0, 0, NULL, 0)) != NULL) {
4670 
4671                         cmd = PKT2CMD(pkt);
4672                         cmd->cmd_block->fcp_cntl.cntl_abort_tsk = 1;
4673                         cmd->cmd_fp_pkt->fcal_pkt_comp = NULL;
4674                         cmd->cmd_pkt->pkt_flags |= FLAG_NOINTR;
4675 
4676                         /* prepare the packet for transport */
4677                         if (sf_prepare_pkt(sf, cmd, target) == TRAN_ACCEPT) {
4678 
4679                                 cmd->cmd_state = SF_STATE_ISSUED;
4680                                 /*
4681                                  * call transport to send a pkt polled
4682                                  *
4683                                  * if that fails call the transport to abort it
4684                                  */
4685                                 if (soc_transport_poll(sf->sf_sochandle,
4686                                     cmd->cmd_fp_pkt, SF_ABORT_TIMEOUT,
4687                                     CQ_REQUEST_1) == FCAL_TRANSPORT_SUCCESS) {
4688                                         (void) ddi_dma_sync(
4689                                             cmd->cmd_cr_pool->rsp_dma_handle,
4690                                             (off_t)
4691                                             ((caddr_t)cmd->cmd_rsp_block -
4692                                             cmd->cmd_cr_pool->rsp_base),
4693                                             FCP_MAX_RSP_IU_SIZE,
4694                                             DDI_DMA_SYNC_FORKERNEL);
4695                                         if (((struct fcp_rsp_info *)
4696                                             (&cmd->cmd_rsp_block->
4697                                             fcp_response_len + 1))->
4698                                             rsp_code == FCP_NO_FAILURE) {
4699                                                 /* abort cmds for this targ */
4700                                                 sf_abort_all(sf, target, TRUE,
4701                                                     lip_cnt, TRUE);
4702                                         } else {
4703                                                 hp = &cmd->cmd_fp_pkt->
4704                                                     fcal_socal_request.
4705                                                     sr_fc_frame_hdr;
4706                                                 tgt_id = sf_alpa_to_switch[
4707                                                     (uchar_t)hp->d_id];
4708                                                 sf->sf_stats.tstats[tgt_id].
4709                                                     task_mgmt_failures++;
4710                                                 SF_DEBUG(1, (sf, CE_NOTE,
4711                                                     "Target %d Abort Task "
4712                                                     "Set failed\n", hp->d_id));
4713                                         }
4714                                 } else {
4715                                         mutex_enter(&cmd->cmd_abort_mutex);
4716                                         if (cmd->cmd_state == SF_STATE_ISSUED) {
4717                                         cmd->cmd_state = SF_STATE_ABORTING;
4718                                         cmd->cmd_timeout = sf_watchdog_time
4719                                             + 20;
4720                                         mutex_exit(&cmd->cmd_abort_mutex);
4721                                         if ((t = soc_abort(sf->sf_sochandle,
4722                                             sf->sf_socp, sf->sf_sochandle->
4723                                             fcal_portno, cmd->cmd_fp_pkt, 1)) !=
4724                                             FCAL_ABORTED &&
4725                                             (t != FCAL_ABORT_FAILED)) {
4726                                                 sf_log(sf, CE_NOTE,
4727                                                     "sf_abort failed, "
4728                                                     "initiating LIP\n");
4729                                                 sf_force_lip(sf);
4730                                                 deferred_destroy = 1;
4731                                         }
4732                                         } else {
4733                                         mutex_exit(&cmd->cmd_abort_mutex);
4734                                         }
4735                                 }
4736                         }
4737                         if (!deferred_destroy) {
4738                                 cmd->cmd_fp_pkt->fcal_pkt_comp =
4739                                     sf_cmd_callback;
4740                                 cmd->cmd_block->fcp_cntl.cntl_abort_tsk = 0;
4741                                 sf_scsi_destroy_pkt(ap, pkt);
4742                                 my_rval = TRUE;
4743                         }
4744                 }
4745                 mutex_enter(&sf->sf_mutex);
4746                 if (lip_cnt == sf->sf_lip_cnt) {
4747                         mutex_enter(&target->sft_mutex);
4748                         target->sft_state = old_target_state;
4749                         mutex_exit(&target->sft_mutex);
4750                 }
4751                 mutex_exit(&sf->sf_mutex);
4752         }
4753         return (my_rval);
4754 }
4755 
4756 
4757 /*
4758  * called by the transport and internally to reset a target
4759  */
4760 static int
4761 sf_reset(struct scsi_address *ap, int level)
4762 {
4763         struct scsi_pkt *pkt;
4764         struct fcal_packet *fpkt;
4765         struct sf *sf = ADDR2SF(ap);
4766         struct sf_target *target = ADDR2TARGET(ap), *ntarget;
4767         struct sf_pkt *cmd;
4768         int     rval = FALSE, t;
4769         int     lip_cnt;
4770         int     tgt_id, ret;
4771         fc_frame_header_t       *hp;
4772         int     deferred_destroy;
4773 
4774         /* We don't support RESET_LUN yet. */
4775         if (level == RESET_TARGET) {
4776                 struct sf_reset_list *p;
4777 
4778                 if ((p = kmem_alloc(sizeof (struct sf_reset_list), KM_NOSLEEP))
4779                     == NULL)
4780                         return (rval);
4781 
4782                 SF_DEBUG(2, (sf, CE_NOTE, "sf_reset target\n"));
4783                 mutex_enter(&sf->sf_mutex);
4784                 /* All target resets go to LUN 0 */
4785                 if (target->sft_lun.l) {
4786                         target = sf_lookup_target(sf, target->sft_port_wwn, 0);
4787                 }
4788                 mutex_enter(&target->sft_mutex);
4789                 if (target->sft_state & (SF_TARGET_BUSY |
4790                     SF_TARGET_OFFLINE)) {
4791                         mutex_exit(&target->sft_mutex);
4792                         mutex_exit(&sf->sf_mutex);
4793                         kmem_free(p, sizeof (struct sf_reset_list));
4794                         return (rval);
4795                 }
4796                 lip_cnt = sf->sf_lip_cnt;
4797                 target->sft_state |= SF_TARGET_BUSY;
4798                 for (ntarget = target->sft_next_lun;
4799                     ntarget;
4800                     ntarget = ntarget->sft_next_lun) {
4801                         mutex_enter(&ntarget->sft_mutex);
4802                         /*
4803                          * XXXX If we supported RESET_LUN we should check here
4804                          * to see if any LUN were being reset and somehow fail
4805                          * that operation.
4806                          */
4807                         ntarget->sft_state |= SF_TARGET_BUSY;
4808                         mutex_exit(&ntarget->sft_mutex);
4809                 }
4810                 mutex_exit(&target->sft_mutex);
4811                 mutex_exit(&sf->sf_mutex);
4812 
4813                 deferred_destroy = 0;
4814                 if ((pkt = sf_scsi_init_pkt(ap, NULL, NULL, 0,
4815                     0, 0, 0, NULL, 0)) != NULL) {
4816                         cmd = PKT2CMD(pkt);
4817                         cmd->cmd_block->fcp_cntl.cntl_reset = 1;
4818                         cmd->cmd_fp_pkt->fcal_pkt_comp = NULL;
4819                         cmd->cmd_pkt->pkt_flags |= FLAG_NOINTR;
4820 
4821                         /* prepare the packet for transport */
4822                         if (sf_prepare_pkt(sf, cmd, target) == TRAN_ACCEPT) {
4823                                 /* call transport to send a pkt polled */
4824                                 cmd->cmd_state = SF_STATE_ISSUED;
4825                                 if ((ret = soc_transport_poll(sf->sf_sochandle,
4826                                     cmd->cmd_fp_pkt, SF_ABORT_TIMEOUT,
4827                                     CQ_REQUEST_1)) == FCAL_TRANSPORT_SUCCESS) {
4828                                         (void) ddi_dma_sync(cmd->cmd_cr_pool->
4829                                             rsp_dma_handle, (caddr_t)cmd->
4830                                             cmd_rsp_block - cmd->cmd_cr_pool->
4831                                             rsp_base, FCP_MAX_RSP_IU_SIZE,
4832                                             DDI_DMA_SYNC_FORKERNEL);
4833                                         fpkt = cmd->cmd_fp_pkt;
4834                                         if ((fpkt->fcal_pkt_status ==
4835                                             FCAL_STATUS_OK) &&
4836                                             (((struct fcp_rsp_info *)
4837                                             (&cmd->cmd_rsp_block->
4838                                             fcp_response_len + 1))->
4839                                             rsp_code == FCP_NO_FAILURE)) {
4840                                                 sf_log(sf, CE_NOTE,
4841                                                     "!sf%d: Target 0x%x Reset "
4842                                                     "successful\n",
4843                                                     ddi_get_instance(\
4844                                                     sf->sf_dip),
4845                                                     sf_alpa_to_switch[
4846                                                     target->sft_al_pa]);
4847                                                 rval = TRUE;
4848                                         } else {
4849                                                 hp = &cmd->cmd_fp_pkt->
4850                                                     fcal_socal_request.
4851                                                     sr_fc_frame_hdr;
4852                                                 tgt_id = sf_alpa_to_switch[
4853                                                     (uchar_t)hp->d_id];
4854                                                 sf->sf_stats.tstats[tgt_id].
4855                                                     task_mgmt_failures++;
4856                                                 sf_log(sf, CE_NOTE,
4857                                                     "!sf%d: Target 0x%x "
4858                                                     "Reset failed."
4859                                                     "Status code 0x%x "
4860                                                     "Resp code 0x%x\n",
4861                                                     ddi_get_instance(\
4862                                                     sf->sf_dip),
4863                                                     tgt_id,
4864                                                     fpkt->fcal_pkt_status,
4865                                                     ((struct fcp_rsp_info *)
4866                                                     (&cmd->cmd_rsp_block->
4867                                                     fcp_response_len + 1))->
4868                                                     rsp_code);
4869                                         }
4870                                 } else {
4871                                         sf_log(sf, CE_NOTE, "!sf%d: Target "
4872                                             "0x%x Reset Failed. Ret=%x\n",
4873                                             ddi_get_instance(sf->sf_dip),
4874                                             sf_alpa_to_switch[
4875                                             target->sft_al_pa], ret);
4876                                         mutex_enter(&cmd->cmd_abort_mutex);
4877                                         if (cmd->cmd_state == SF_STATE_ISSUED) {
4878                                         /* call the transport to abort a cmd */
4879                                         cmd->cmd_timeout = sf_watchdog_time
4880                                             + 20;
4881                                         cmd->cmd_state = SF_STATE_ABORTING;
4882                                         mutex_exit(&cmd->cmd_abort_mutex);
4883                                         if (((t = soc_abort(sf->sf_sochandle,
4884                                             sf->sf_socp,
4885                                             sf->sf_sochandle->fcal_portno,
4886                                             cmd->cmd_fp_pkt, 1)) !=
4887                                             FCAL_ABORTED) &&
4888                                             (t != FCAL_ABORT_FAILED)) {
4889                                                 sf_log(sf, CE_NOTE,
4890                                                     "!sf%d: Target 0x%x Reset "
4891                                                     "failed. Abort Failed, "
4892                                                     "forcing LIP\n",
4893                                                     ddi_get_instance(
4894                                                     sf->sf_dip),
4895                                                     sf_alpa_to_switch[
4896                                                     target->sft_al_pa]);
4897                                                 sf_force_lip(sf);
4898                                                 rval = TRUE;
4899                                                 deferred_destroy = 1;
4900                                         }
4901                                         } else {
4902                                                 mutex_exit
4903                                                     (&cmd->cmd_abort_mutex);
4904                                         }
4905                                 }
4906                         }
4907                         /*
4908                          * Defer releasing the packet if we abort returned with
4909                          * a BAD_ABORT or timed out, because there is a
4910                          * possibility that the ucode might return it.
4911                          * We wait for at least 20s and let it be released
4912                          * by the sf_watch thread
4913                          */
4914                         if (!deferred_destroy) {
4915                                 cmd->cmd_block->fcp_cntl.cntl_reset = 0;
4916                                 cmd->cmd_fp_pkt->fcal_pkt_comp =
4917                                     sf_cmd_callback;
4918                                 cmd->cmd_state = SF_STATE_IDLE;
4919                                 /* for cache */
4920                                 sf_scsi_destroy_pkt(ap, pkt);
4921                         }
4922                 } else {
4923                         cmn_err(CE_WARN, "!sf%d: Target 0x%x Reset Failed. "
4924                             "Resource allocation error.\n",
4925                             ddi_get_instance(sf->sf_dip),
4926                             sf_alpa_to_switch[target->sft_al_pa]);
4927                 }
4928                 mutex_enter(&sf->sf_mutex);
4929                 if ((rval == TRUE) && (lip_cnt == sf->sf_lip_cnt)) {
4930                         p->target = target;
4931                         p->lip_cnt = lip_cnt;
4932                         p->timeout = ddi_get_lbolt() +
4933                             drv_usectohz(SF_TARGET_RESET_DELAY);
4934                         p->next = sf->sf_reset_list;
4935                         sf->sf_reset_list = p;
4936                         mutex_exit(&sf->sf_mutex);
4937                         mutex_enter(&sf_global_mutex);
4938                         if (sf_reset_timeout_id == 0) {
4939                                 sf_reset_timeout_id = timeout(
4940                                     sf_check_reset_delay, NULL,
4941                                     drv_usectohz(SF_TARGET_RESET_DELAY));
4942                         }
4943                         mutex_exit(&sf_global_mutex);
4944                 } else {
4945                         if (lip_cnt == sf->sf_lip_cnt) {
4946                                 mutex_enter(&target->sft_mutex);
4947                                 target->sft_state &= ~SF_TARGET_BUSY;
4948                                 for (ntarget = target->sft_next_lun;
4949                                     ntarget;
4950                                     ntarget = ntarget->sft_next_lun) {
4951                                         mutex_enter(&ntarget->sft_mutex);
4952                                         ntarget->sft_state &= ~SF_TARGET_BUSY;
4953                                         mutex_exit(&ntarget->sft_mutex);
4954                                 }
4955                                 mutex_exit(&target->sft_mutex);
4956                         }
4957                         mutex_exit(&sf->sf_mutex);
4958                         kmem_free(p, sizeof (struct sf_reset_list));
4959                 }
4960         } else {
4961                 mutex_enter(&sf->sf_mutex);
4962                 if ((sf->sf_state == SF_STATE_OFFLINE) &&
4963                     (sf_watchdog_time < sf->sf_timer)) {
4964                         /*
4965                          * We are currently in a lip, so let this one
4966                          * finish before forcing another one.
4967                          */
4968                         mutex_exit(&sf->sf_mutex);
4969                         return (TRUE);
4970                 }
4971                 mutex_exit(&sf->sf_mutex);
4972                 sf_log(sf, CE_NOTE, "!sf:Target driver initiated lip\n");
4973                 sf_force_lip(sf);
4974                 rval = TRUE;
4975         }
4976         return (rval);
4977 }
4978 
4979 
4980 /*
4981  * abort all commands for a target
4982  *
4983  * if try_abort is set then send an abort
4984  * if abort is set then this is abort, else this is a reset
4985  */
4986 static void
4987 sf_abort_all(struct sf *sf, struct sf_target *target, int abort, int
4988     lip_cnt, int try_abort)
4989 {
4990         struct sf_target *ntarget;
4991         struct sf_pkt *cmd, *head = NULL, *tail = NULL, *pcmd = NULL, *tcmd;
4992         struct fcal_packet *fpkt;
4993         struct scsi_pkt *pkt;
4994         int rval = FCAL_ABORTED;
4995 
4996         /*
4997          * First pull all commands for all LUNs on this target out of the
4998          * overflow list.  We can tell it's the same target by comparing
4999          * the node WWN.
5000          */
5001         mutex_enter(&sf->sf_mutex);
5002         if (lip_cnt == sf->sf_lip_cnt) {
5003                 mutex_enter(&sf->sf_cmd_mutex);
5004                 cmd = sf->sf_pkt_head;
5005                 while (cmd != NULL) {
5006                         ntarget = ADDR2TARGET(&cmd->cmd_pkt->
5007                             pkt_address);
5008                         if (ntarget == target) {
5009                                 if (pcmd != NULL)
5010                                         pcmd->cmd_next = cmd->cmd_next;
5011                                 else
5012                                         sf->sf_pkt_head = cmd->cmd_next;
5013                                 if (sf->sf_pkt_tail == cmd) {
5014                                         sf->sf_pkt_tail = pcmd;
5015                                         if (pcmd != NULL)
5016                                                 pcmd->cmd_next = NULL;
5017                                 }
5018                                 tcmd = cmd->cmd_next;
5019                                 if (head == NULL) {
5020                                         head = cmd;
5021                                         tail = cmd;
5022                                 } else {
5023                                         tail->cmd_next = cmd;
5024                                         tail = cmd;
5025                                 }
5026                                 cmd->cmd_next = NULL;
5027                                 cmd = tcmd;
5028                         } else {
5029                                 pcmd = cmd;
5030                                 cmd = cmd->cmd_next;
5031                         }
5032                 }
5033                 mutex_exit(&sf->sf_cmd_mutex);
5034         }
5035         mutex_exit(&sf->sf_mutex);
5036 
5037         /*
5038          * Now complete all the commands on our list.  In the process,
5039          * the completion routine may take the commands off the target
5040          * lists.
5041          */
5042         cmd = head;
5043         while (cmd != NULL) {
5044                 pkt = cmd->cmd_pkt;
5045                 if (abort) {
5046                         pkt->pkt_reason = CMD_ABORTED;
5047                         pkt->pkt_statistics |= STAT_ABORTED;
5048                 } else {
5049                         pkt->pkt_reason = CMD_RESET;
5050                         pkt->pkt_statistics |= STAT_DEV_RESET;
5051                 }
5052                 cmd->cmd_flags &= ~CFLAG_IN_QUEUE;
5053                 cmd->cmd_state = SF_STATE_IDLE;
5054                 cmd = cmd->cmd_next;
5055                 /*
5056                  * call the packet completion routine only for
5057                  * non-polled commands. Ignore the polled commands as
5058                  * they timeout and will be handled differently
5059                  */
5060                 if ((pkt->pkt_comp) && !(pkt->pkt_flags & FLAG_NOINTR))
5061                         (*pkt->pkt_comp)(pkt);
5062 
5063         }
5064 
5065         /*
5066          * Finally get all outstanding commands for each LUN, and abort them if
5067          * they've been issued, and call the completion routine.
5068          * For the case where sf_offline_target is called from sf_watch
5069          * due to a Offline Timeout, it is quite possible that the soc+
5070          * ucode is hosed and therefore  cannot return the commands.
5071          * Clear up all the issued commands as well.
5072          * Try_abort will be false only if sf_abort_all is coming from
5073          * sf_target_offline.
5074          */
5075 
5076         if (try_abort || sf->sf_state == SF_STATE_OFFLINE) {
5077                 mutex_enter(&target->sft_pkt_mutex);
5078                 cmd = tcmd = target->sft_pkt_head;
5079                 while (cmd != (struct sf_pkt *)&target->sft_pkt_head) {
5080                         fpkt = cmd->cmd_fp_pkt;
5081                         pkt = cmd->cmd_pkt;
5082                         mutex_enter(&cmd->cmd_abort_mutex);
5083                         if ((cmd->cmd_state == SF_STATE_ISSUED) &&
5084                             (fpkt->fcal_cmd_state &
5085                             FCAL_CMD_IN_TRANSPORT) &&
5086                             ((fpkt->fcal_cmd_state & FCAL_CMD_COMPLETE) ==
5087                             0) && !(pkt->pkt_flags & FLAG_NOINTR)) {
5088                                 cmd->cmd_state = SF_STATE_ABORTING;
5089                                 cmd->cmd_timeout = sf_watchdog_time +
5090                                     cmd->cmd_pkt->pkt_time + 20;
5091                                 mutex_exit(&cmd->cmd_abort_mutex);
5092                                 mutex_exit(&target->sft_pkt_mutex);
5093                                 if (try_abort) {
5094                                         /* call the transport to abort a pkt */
5095                                         rval = soc_abort(sf->sf_sochandle,
5096                                             sf->sf_socp,
5097                                             sf->sf_sochandle->fcal_portno,
5098                                             fpkt, 1);
5099                                 }
5100                                 if ((rval == FCAL_ABORTED) ||
5101                                     (rval == FCAL_ABORT_FAILED)) {
5102                                         if (abort) {
5103                                                 pkt->pkt_reason = CMD_ABORTED;
5104                                                 pkt->pkt_statistics |=
5105                                                     STAT_ABORTED;
5106                                         } else {
5107                                                 pkt->pkt_reason = CMD_RESET;
5108                                                 pkt->pkt_statistics |=
5109                                                     STAT_DEV_RESET;
5110                                         }
5111                                         cmd->cmd_state = SF_STATE_IDLE;
5112                                         if (pkt->pkt_comp)
5113                                                 (*pkt->pkt_comp)(pkt);
5114                                 }
5115                                 mutex_enter(&sf->sf_mutex);
5116                                 if (lip_cnt != sf->sf_lip_cnt) {
5117                                         mutex_exit(&sf->sf_mutex);
5118                                         return;
5119                                 }
5120                                 mutex_exit(&sf->sf_mutex);
5121                                 mutex_enter(&target->sft_pkt_mutex);
5122                                 cmd = target->sft_pkt_head;
5123                         } else {
5124                                 mutex_exit(&cmd->cmd_abort_mutex);
5125                                 cmd = cmd->cmd_forw;
5126                         }
5127                 }
5128                 mutex_exit(&target->sft_pkt_mutex);
5129         }
5130 }
5131 
5132 
5133 /*
5134  * called by the transport to start a packet
5135  */
5136 static int
5137 sf_start(struct scsi_address *ap, struct scsi_pkt *pkt)
5138 {
5139         struct sf *sf = ADDR2SF(ap);
5140         struct sf_target *target = ADDR2TARGET(ap);
5141         struct sf_pkt *cmd = PKT2CMD(pkt);
5142         int rval;
5143 
5144 
5145         SF_DEBUG(6, (sf, CE_NOTE, "sf_start\n"));
5146 
5147         if (cmd->cmd_state == SF_STATE_ISSUED) {
5148                 cmn_err(CE_PANIC, "sf: issuing packet twice 0x%p\n",
5149                     (void *)cmd);
5150         }
5151 
5152         /* prepare the packet for transport */
5153         if ((rval = sf_prepare_pkt(sf, cmd, target)) != TRAN_ACCEPT) {
5154                 return (rval);
5155         }
5156 
5157         if (target->sft_state & (SF_TARGET_BUSY|SF_TARGET_OFFLINE)) {
5158                 if (target->sft_state & SF_TARGET_OFFLINE) {
5159                         return (TRAN_FATAL_ERROR);
5160                 }
5161                 if (pkt->pkt_flags & FLAG_NOINTR) {
5162                         return (TRAN_BUSY);
5163                 }
5164                 mutex_enter(&sf->sf_cmd_mutex);
5165                 sf->sf_use_lock = TRUE;
5166                 goto enque;
5167         }
5168 
5169 
5170         /* if no interrupts then do polled I/O */
5171         if (pkt->pkt_flags & FLAG_NOINTR) {
5172                 return (sf_dopoll(sf, cmd));
5173         }
5174 
5175         /* regular interrupt-driven I/O */
5176 
5177         if (!sf->sf_use_lock) {
5178 
5179                 /* locking no needed */
5180 
5181                 cmd->cmd_timeout = cmd->cmd_pkt->pkt_time ?
5182                     sf_watchdog_time + cmd->cmd_pkt->pkt_time : 0;
5183                 cmd->cmd_state = SF_STATE_ISSUED;
5184 
5185                 /* call the transport to send a pkt */
5186                 if (soc_transport(sf->sf_sochandle, cmd->cmd_fp_pkt,
5187                     FCAL_NOSLEEP, CQ_REQUEST_1) != FCAL_TRANSPORT_SUCCESS) {
5188                         cmd->cmd_state = SF_STATE_IDLE;
5189                         return (TRAN_BADPKT);
5190                 }
5191                 return (TRAN_ACCEPT);
5192         }
5193 
5194         /* regular I/O using locking */
5195 
5196         mutex_enter(&sf->sf_cmd_mutex);
5197         if ((sf->sf_ncmds >= sf->sf_throttle) ||
5198             (sf->sf_pkt_head != NULL)) {
5199 enque:
5200                 /*
5201                  * either we're throttling back or there are already commands
5202                  * on the queue, so enqueue this one for later
5203                  */
5204                 cmd->cmd_flags |= CFLAG_IN_QUEUE;
5205                 if (sf->sf_pkt_head != NULL) {
5206                         /* add to the queue */
5207                         sf->sf_pkt_tail->cmd_next = cmd;
5208                         cmd->cmd_next = NULL;
5209                         sf->sf_pkt_tail = cmd;
5210                 } else {
5211                         /* this is the first entry in the queue */
5212                         sf->sf_pkt_head = sf->sf_pkt_tail = cmd;
5213                         cmd->cmd_next = NULL;
5214                 }
5215                 mutex_exit(&sf->sf_cmd_mutex);
5216                 return (TRAN_ACCEPT);
5217         }
5218 
5219         /*
5220          * start this packet now
5221          */
5222 
5223         /* still have cmd mutex */
5224         return (sf_start_internal(sf, cmd));
5225 }
5226 
5227 
5228 /*
5229  * internal routine to start a packet from the queue now
5230  *
5231  * enter with cmd mutex held and leave with it released
5232  */
5233 static int
5234 sf_start_internal(struct sf *sf, struct sf_pkt *cmd)
5235 {
5236         /* we have the cmd mutex */
5237         sf->sf_ncmds++;
5238         mutex_exit(&sf->sf_cmd_mutex);
5239 
5240         ASSERT(cmd->cmd_state != SF_STATE_ISSUED);
5241         SF_DEBUG(6, (sf, CE_NOTE, "sf_start_internal\n"));
5242 
5243         cmd->cmd_timeout = cmd->cmd_pkt->pkt_time ? sf_watchdog_time +
5244             cmd->cmd_pkt->pkt_time : 0;
5245         cmd->cmd_state = SF_STATE_ISSUED;
5246 
5247         /* call transport to send the pkt */
5248         if (soc_transport(sf->sf_sochandle, cmd->cmd_fp_pkt, FCAL_NOSLEEP,
5249             CQ_REQUEST_1) != FCAL_TRANSPORT_SUCCESS) {
5250                 cmd->cmd_state = SF_STATE_IDLE;
5251                 mutex_enter(&sf->sf_cmd_mutex);
5252                 sf->sf_ncmds--;
5253                 mutex_exit(&sf->sf_cmd_mutex);
5254                 return (TRAN_BADPKT);
5255         }
5256         return (TRAN_ACCEPT);
5257 }
5258 
5259 
5260 /*
5261  * prepare a packet for transport
5262  */
5263 static int
5264 sf_prepare_pkt(struct sf *sf, struct sf_pkt *cmd, struct sf_target *target)
5265 {
5266         struct fcp_cmd *fcmd = cmd->cmd_block;
5267 
5268 /* XXXX Need to set the LUN ? */
5269         bcopy((caddr_t)&target->sft_lun.b,
5270             (caddr_t)&fcmd->fcp_ent_addr,
5271             FCP_LUN_SIZE);
5272         cmd->cmd_pkt->pkt_reason = CMD_CMPLT;
5273         cmd->cmd_pkt->pkt_state = 0;
5274         cmd->cmd_pkt->pkt_statistics = 0;
5275 
5276 
5277         if ((cmd->cmd_pkt->pkt_comp == NULL) &&
5278             ((cmd->cmd_pkt->pkt_flags & FLAG_NOINTR) == 0)) {
5279                 return (TRAN_BADPKT);
5280         }
5281 
5282         /* invalidate imp field(s) of rsp block */
5283         cmd->cmd_rsp_block->fcp_u.i_fcp_status = SF_BAD_DMA_MAGIC;
5284 
5285         /* set up amt of I/O to do */
5286         if (cmd->cmd_flags & CFLAG_DMAVALID) {
5287                 cmd->cmd_pkt->pkt_resid = cmd->cmd_dmacount;
5288                 if (cmd->cmd_flags & CFLAG_CMDIOPB) {
5289                         (void) ddi_dma_sync(cmd->cmd_dmahandle, 0, 0,
5290                             DDI_DMA_SYNC_FORDEV);
5291                 }
5292         } else {
5293                 cmd->cmd_pkt->pkt_resid = 0;
5294         }
5295 
5296         /* set up the Tagged Queuing type */
5297         if (cmd->cmd_pkt->pkt_flags & FLAG_HTAG) {
5298                 fcmd->fcp_cntl.cntl_qtype = FCP_QTYPE_HEAD_OF_Q;
5299         } else if (cmd->cmd_pkt->pkt_flags & FLAG_OTAG) {
5300                 fcmd->fcp_cntl.cntl_qtype = FCP_QTYPE_ORDERED;
5301         }
5302 
5303         /*
5304          * Sync the cmd segment
5305          */
5306         (void) ddi_dma_sync(cmd->cmd_cr_pool->cmd_dma_handle,
5307             (caddr_t)fcmd - cmd->cmd_cr_pool->cmd_base,
5308             sizeof (struct fcp_cmd), DDI_DMA_SYNC_FORDEV);
5309 
5310         sf_fill_ids(sf, cmd, target);
5311         return (TRAN_ACCEPT);
5312 }
5313 
5314 
5315 /*
5316  * fill in packet hdr source and destination IDs and hdr byte count
5317  */
5318 static void
5319 sf_fill_ids(struct sf *sf, struct sf_pkt *cmd, struct sf_target *target)
5320 {
5321         struct fcal_packet *fpkt = cmd->cmd_fp_pkt;
5322         fc_frame_header_t       *hp;
5323 
5324 
5325         hp = &fpkt->fcal_socal_request.sr_fc_frame_hdr;
5326         hp->d_id = target->sft_al_pa;
5327         hp->s_id = sf->sf_al_pa;
5328         fpkt->fcal_socal_request.sr_soc_hdr.sh_byte_cnt =
5329             cmd->cmd_dmacookie.dmac_size;
5330 }
5331 
5332 
5333 /*
5334  * do polled I/O using transport
5335  */
5336 static int
5337 sf_dopoll(struct sf *sf, struct sf_pkt *cmd)
5338 {
5339         int timeout;
5340         int rval;
5341 
5342 
5343         mutex_enter(&sf->sf_cmd_mutex);
5344         sf->sf_ncmds++;
5345         mutex_exit(&sf->sf_cmd_mutex);
5346 
5347         timeout = cmd->cmd_pkt->pkt_time ? cmd->cmd_pkt->pkt_time
5348             : SF_POLL_TIMEOUT;
5349         cmd->cmd_timeout = 0;
5350         cmd->cmd_fp_pkt->fcal_pkt_comp = NULL;
5351         cmd->cmd_state = SF_STATE_ISSUED;
5352 
5353         /* call transport to send a pkt polled */
5354         rval = soc_transport_poll(sf->sf_sochandle, cmd->cmd_fp_pkt,
5355             timeout*1000000, CQ_REQUEST_1);
5356         mutex_enter(&cmd->cmd_abort_mutex);
5357         cmd->cmd_fp_pkt->fcal_pkt_comp = sf_cmd_callback;
5358         if (rval != FCAL_TRANSPORT_SUCCESS) {
5359                 if (rval == FCAL_TRANSPORT_TIMEOUT) {
5360                         cmd->cmd_state = SF_STATE_ABORTING;
5361                         mutex_exit(&cmd->cmd_abort_mutex);
5362                         (void) sf_target_timeout(sf, cmd);
5363                 } else {
5364                         mutex_exit(&cmd->cmd_abort_mutex);
5365                 }
5366                 cmd->cmd_state = SF_STATE_IDLE;
5367                 cmd->cmd_fp_pkt->fcal_pkt_comp = sf_cmd_callback;
5368                 mutex_enter(&sf->sf_cmd_mutex);
5369                 sf->sf_ncmds--;
5370                 mutex_exit(&sf->sf_cmd_mutex);
5371                 return (TRAN_BADPKT);
5372         }
5373         mutex_exit(&cmd->cmd_abort_mutex);
5374         cmd->cmd_fp_pkt->fcal_pkt_comp = sf_cmd_callback;
5375         sf_cmd_callback(cmd->cmd_fp_pkt);
5376         return (TRAN_ACCEPT);
5377 }
5378 
5379 
5380 /* a shortcut for defining debug messages below */
5381 #ifdef  DEBUG
5382 #define SF_DMSG1(s)             msg1 = s
5383 #else
5384 #define SF_DMSG1(s)             /* do nothing */
5385 #endif
5386 
5387 
5388 /*
5389  * the pkt_comp callback for command packets
5390  */
5391 static void
5392 sf_cmd_callback(struct fcal_packet *fpkt)
5393 {
5394         struct sf_pkt *cmd = (struct sf_pkt *)fpkt->fcal_pkt_private;
5395         struct scsi_pkt *pkt = cmd->cmd_pkt;
5396         struct sf *sf = ADDR2SF(&pkt->pkt_address);
5397         struct sf_target *target = ADDR2TARGET(&pkt->pkt_address);
5398         struct fcp_rsp *rsp;
5399         char *msg1 = NULL;
5400         char *msg2 = NULL;
5401         short ncmds;
5402         int tgt_id;
5403         int good_scsi_status = TRUE;
5404 
5405 
5406 
5407         if (cmd->cmd_state == SF_STATE_IDLE) {
5408                 cmn_err(CE_PANIC, "sf: completing idle packet 0x%p\n",
5409                     (void *)cmd);
5410         }
5411 
5412         mutex_enter(&cmd->cmd_abort_mutex);
5413         if (cmd->cmd_state == SF_STATE_ABORTING) {
5414                 /* cmd already being aborted -- nothing to do */
5415                 mutex_exit(&cmd->cmd_abort_mutex);
5416                 return;
5417         }
5418 
5419         cmd->cmd_state = SF_STATE_IDLE;
5420         mutex_exit(&cmd->cmd_abort_mutex);
5421 
5422         if (fpkt->fcal_pkt_status == FCAL_STATUS_OK) {
5423 
5424                 (void) ddi_dma_sync(cmd->cmd_cr_pool->rsp_dma_handle,
5425                     (caddr_t)cmd->cmd_rsp_block - cmd->cmd_cr_pool->rsp_base,
5426                     FCP_MAX_RSP_IU_SIZE, DDI_DMA_SYNC_FORKERNEL);
5427 
5428                 rsp = (struct fcp_rsp *)cmd->cmd_rsp_block;
5429 
5430                 if (rsp->fcp_u.i_fcp_status == SF_BAD_DMA_MAGIC) {
5431 
5432                         if (sf_core && (sf_core & SF_CORE_BAD_DMA)) {
5433                                 sf_token = (int *)(uintptr_t)
5434                                     fpkt->fcal_socal_request.\
5435                                     sr_soc_hdr.sh_request_token;
5436                                 (void) soc_take_core(sf->sf_sochandle,
5437                                     sf->sf_socp);
5438                         }
5439 
5440                         pkt->pkt_reason = CMD_INCOMPLETE;
5441                         pkt->pkt_state = STATE_GOT_BUS;
5442                         pkt->pkt_statistics |= STAT_ABORTED;
5443 
5444                 } else {
5445 
5446                         pkt->pkt_state = STATE_GOT_BUS | STATE_GOT_TARGET |
5447                             STATE_SENT_CMD | STATE_GOT_STATUS;
5448                         pkt->pkt_resid = 0;
5449                         if (cmd->cmd_flags & CFLAG_DMAVALID) {
5450                                 pkt->pkt_state |= STATE_XFERRED_DATA;
5451                         }
5452 
5453                         if ((pkt->pkt_scbp != NULL) &&
5454                             ((*(pkt->pkt_scbp) =
5455                             rsp->fcp_u.fcp_status.scsi_status)
5456                             != STATUS_GOOD)) {
5457                                 good_scsi_status = FALSE;
5458                         /*
5459                          * The next two checks make sure that if there
5460                          * is no sense data or a valid response and
5461                          * the command came back with check condition,
5462                          * the command should be retried
5463                          */
5464                                 if (!rsp->fcp_u.fcp_status.rsp_len_set &&
5465                                     !rsp->fcp_u.fcp_status.sense_len_set) {
5466                                         pkt->pkt_state &= ~STATE_XFERRED_DATA;
5467                                         pkt->pkt_resid = cmd->cmd_dmacount;
5468                                 }
5469                         }
5470 
5471                         if ((cmd->cmd_flags & CFLAG_CMDIOPB) &&
5472                             (pkt->pkt_state & STATE_XFERRED_DATA)) {
5473                                 (void) ddi_dma_sync(cmd->cmd_dmahandle, 0,
5474                                     (uint_t)0, DDI_DMA_SYNC_FORCPU);
5475                         }
5476                         /*
5477                          * Update the transfer resid, if appropriate
5478                          */
5479                         if (rsp->fcp_u.fcp_status.resid_over ||
5480                             rsp->fcp_u.fcp_status.resid_under)
5481                                 pkt->pkt_resid = rsp->fcp_resid;
5482 
5483                         /*
5484                          * Check to see if the SCSI command failed.
5485                          *
5486                          */
5487 
5488                         /*
5489                          * First see if we got a FCP protocol error.
5490                          */
5491                         if (rsp->fcp_u.fcp_status.rsp_len_set) {
5492                                 struct fcp_rsp_info *bep;
5493 
5494                                 bep = (struct fcp_rsp_info *)
5495                                     (&rsp->fcp_response_len + 1);
5496                                 if (bep->rsp_code != FCP_NO_FAILURE) {
5497                                                 pkt->pkt_reason = CMD_TRAN_ERR;
5498                                         tgt_id = pkt->pkt_address.a_target;
5499                                         switch (bep->rsp_code) {
5500                                         case FCP_CMND_INVALID:
5501                                                 SF_DMSG1("FCP_RSP FCP_CMND "
5502                                                     "fields invalid");
5503                                                 break;
5504                                         case FCP_TASK_MGMT_NOT_SUPPTD:
5505                                                 SF_DMSG1("FCP_RSP Task"
5506                                                     "Management Function"
5507                                                     "Not Supported");
5508                                                 break;
5509                                         case FCP_TASK_MGMT_FAILED:
5510                                                 SF_DMSG1("FCP_RSP Task "
5511                                                     "Management Function"
5512                                                     "Failed");
5513                                                 sf->sf_stats.tstats[tgt_id].
5514                                                     task_mgmt_failures++;
5515                                                 break;
5516                                         case FCP_DATA_RO_MISMATCH:
5517                                                 SF_DMSG1("FCP_RSP FCP_DATA RO "
5518                                                     "mismatch with "
5519                                                     "FCP_XFER_RDY DATA_RO");
5520                                                 sf->sf_stats.tstats[tgt_id].
5521                                                     data_ro_mismatches++;
5522                                                 break;
5523                                         case FCP_DL_LEN_MISMATCH:
5524                                                 SF_DMSG1("FCP_RSP FCP_DATA "
5525                                                     "length "
5526                                                     "different than BURST_LEN");
5527                                                 sf->sf_stats.tstats[tgt_id].
5528                                                     dl_len_mismatches++;
5529                                                 break;
5530                                         default:
5531                                                 SF_DMSG1("FCP_RSP invalid "
5532                                                     "RSP_CODE");
5533                                                 break;
5534                                         }
5535                                 }
5536                         }
5537 
5538                         /*
5539                          * See if we got a SCSI error with sense data
5540                          */
5541                         if (rsp->fcp_u.fcp_status.sense_len_set) {
5542                                 uchar_t rqlen = min(rsp->fcp_sense_len,
5543                                     sizeof (struct scsi_extended_sense));
5544                                 caddr_t sense = (caddr_t)rsp +
5545                                     sizeof (struct fcp_rsp) +
5546                                     rsp->fcp_response_len;
5547                                 struct scsi_arq_status *arq;
5548                                 struct scsi_extended_sense *sensep =
5549                                     (struct scsi_extended_sense *)sense;
5550 
5551                                 if (rsp->fcp_u.fcp_status.scsi_status !=
5552                                     STATUS_GOOD) {
5553                                 if (rsp->fcp_u.fcp_status.scsi_status
5554                                     == STATUS_CHECK) {
5555                                         if (sensep->es_key ==
5556                                             KEY_RECOVERABLE_ERROR)
5557                                                 good_scsi_status = 1;
5558                                         if (sensep->es_key ==
5559                                             KEY_UNIT_ATTENTION &&
5560                                             sensep->es_add_code == 0x3f &&
5561                                             sensep->es_qual_code == 0x0e) {
5562                                                 /* REPORT_LUNS_HAS_CHANGED */
5563                                                 sf_log(sf, CE_NOTE,
5564                                                 "!REPORT_LUNS_HAS_CHANGED\n");
5565                                                 sf_force_lip(sf);
5566                                         }
5567                                 }
5568                                 }
5569 
5570                                 if ((pkt->pkt_scbp != NULL) &&
5571                                     (cmd->cmd_scblen >=
5572                                         sizeof (struct scsi_arq_status))) {
5573 
5574                                 pkt->pkt_state |= STATE_ARQ_DONE;
5575 
5576                                 arq = (struct scsi_arq_status *)pkt->pkt_scbp;
5577                                 /*
5578                                  * copy out sense information
5579                                  */
5580                                 bcopy(sense, (caddr_t)&arq->sts_sensedata,
5581                                     rqlen);
5582                                 arq->sts_rqpkt_resid =
5583                                     sizeof (struct scsi_extended_sense) -
5584                                         rqlen;
5585                                 *((uchar_t *)&arq->sts_rqpkt_status) =
5586                                     STATUS_GOOD;
5587                                 arq->sts_rqpkt_reason = 0;
5588                                 arq->sts_rqpkt_statistics = 0;
5589                                 arq->sts_rqpkt_state = STATE_GOT_BUS |
5590                                     STATE_GOT_TARGET | STATE_SENT_CMD |
5591                                     STATE_GOT_STATUS | STATE_ARQ_DONE |
5592                                     STATE_XFERRED_DATA;
5593                             }
5594                                 target->sft_alive = TRUE;
5595                         }
5596 
5597                         /*
5598                          * The firmware returns the number of bytes actually
5599                          * xfered into/out of host. Compare this with what
5600                          * we asked and if it is different, we lost frames ?
5601                          */
5602                         if ((pkt->pkt_reason == 0) && (pkt->pkt_resid == 0) &&
5603                             (good_scsi_status) &&
5604                             (pkt->pkt_state & STATE_XFERRED_DATA) &&
5605                             (!(cmd->cmd_flags & CFLAG_CMDIOPB)) &&
5606                             (target->sft_device_type != DTYPE_ESI)) {
5607                                 int byte_cnt =
5608                                     fpkt->fcal_socal_request.
5609                                     sr_soc_hdr.sh_byte_cnt;
5610                                 if (cmd->cmd_flags & CFLAG_DMASEND) {
5611                                         if (byte_cnt != 0) {
5612                                         sf_log(sf, CE_NOTE,
5613                                             "!sf_cmd_callback: Lost Frame: "
5614                                             "(write) received 0x%x expected"
5615                                             " 0x%x target 0x%x\n",
5616                                             byte_cnt, cmd->cmd_dmacount,
5617                                             sf_alpa_to_switch[
5618                                             target->sft_al_pa]);
5619                                         pkt->pkt_reason = CMD_INCOMPLETE;
5620                                         pkt->pkt_statistics |= STAT_ABORTED;
5621                                         }
5622                                 } else if (byte_cnt < cmd->cmd_dmacount) {
5623                                         sf_log(sf, CE_NOTE,
5624                                             "!sf_cmd_callback: "
5625                                             "Lost Frame: (read) "
5626                                             "received 0x%x expected 0x%x "
5627                                             "target 0x%x\n", byte_cnt,
5628                                             cmd->cmd_dmacount,
5629                                             sf_alpa_to_switch[
5630                                             target->sft_al_pa]);
5631                                         pkt->pkt_reason = CMD_INCOMPLETE;
5632                                         pkt->pkt_statistics |= STAT_ABORTED;
5633                                 }
5634                         }
5635                 }
5636 
5637         } else {
5638 
5639                 /* pkt status was not ok */
5640 
5641                 switch (fpkt->fcal_pkt_status) {
5642 
5643                 case FCAL_STATUS_ERR_OFFLINE:
5644                         SF_DMSG1("Fibre Channel Offline");
5645                         mutex_enter(&target->sft_mutex);
5646                         if (!(target->sft_state & SF_TARGET_OFFLINE)) {
5647                                 target->sft_state |= (SF_TARGET_BUSY
5648                                     | SF_TARGET_MARK);
5649                         }
5650                         mutex_exit(&target->sft_mutex);
5651                         (void) ndi_event_retrieve_cookie(sf->sf_event_hdl,
5652                             target->sft_dip, FCAL_REMOVE_EVENT,
5653                             &sf_remove_eid, NDI_EVENT_NOPASS);
5654                         (void) ndi_event_run_callbacks(sf->sf_event_hdl,
5655                             target->sft_dip, sf_remove_eid, NULL);
5656                         pkt->pkt_reason = CMD_TRAN_ERR;
5657                         pkt->pkt_statistics |= STAT_BUS_RESET;
5658                         break;
5659 
5660                 case FCAL_STATUS_MAX_XCHG_EXCEEDED:
5661                         sf_throttle(sf);
5662                         sf->sf_use_lock = TRUE;
5663                         pkt->pkt_reason = CMD_TRAN_ERR;
5664                         pkt->pkt_state = STATE_GOT_BUS;
5665                         pkt->pkt_statistics |= STAT_ABORTED;
5666                         break;
5667 
5668                 case FCAL_STATUS_TIMEOUT:
5669                         SF_DMSG1("Fibre Channel Timeout");
5670                         pkt->pkt_reason = CMD_TIMEOUT;
5671                         break;
5672 
5673                 case FCAL_STATUS_ERR_OVERRUN:
5674                         SF_DMSG1("CMD_DATA_OVR");
5675                         pkt->pkt_reason = CMD_DATA_OVR;
5676                         break;
5677 
5678                 case FCAL_STATUS_UNKNOWN_CQ_TYPE:
5679                         SF_DMSG1("Unknown CQ type");
5680                         pkt->pkt_reason = CMD_TRAN_ERR;
5681                         break;
5682 
5683                 case FCAL_STATUS_BAD_SEG_CNT:
5684                         SF_DMSG1("Bad SEG CNT");
5685                         pkt->pkt_reason = CMD_TRAN_ERR;
5686                         break;
5687 
5688                 case FCAL_STATUS_BAD_XID:
5689                         SF_DMSG1("Fibre Channel Invalid X_ID");
5690                         pkt->pkt_reason = CMD_TRAN_ERR;
5691                         break;
5692 
5693                 case FCAL_STATUS_XCHG_BUSY:
5694                         SF_DMSG1("Fibre Channel Exchange Busy");
5695                         pkt->pkt_reason = CMD_TRAN_ERR;
5696                         break;
5697 
5698                 case FCAL_STATUS_INSUFFICIENT_CQES:
5699                         SF_DMSG1("Insufficient CQEs");
5700                         pkt->pkt_reason = CMD_TRAN_ERR;
5701                         break;
5702 
5703                 case FCAL_STATUS_ALLOC_FAIL:
5704                         SF_DMSG1("ALLOC FAIL");
5705                         pkt->pkt_reason = CMD_TRAN_ERR;
5706                         break;
5707 
5708                 case FCAL_STATUS_BAD_SID:
5709                         SF_DMSG1("Fibre Channel Invalid S_ID");
5710                         pkt->pkt_reason = CMD_TRAN_ERR;
5711                         break;
5712 
5713                 case FCAL_STATUS_INCOMPLETE_DMA_ERR:
5714                         if (sf_core && (sf_core & SF_CORE_INCOMPLETE_DMA)) {
5715                                 sf_token = (int *)(uintptr_t)
5716                                     fpkt->fcal_socal_request.\
5717                                     sr_soc_hdr.sh_request_token;
5718                                 (void) soc_take_core(sf->sf_sochandle,
5719                                     sf->sf_socp);
5720                                 sf_core = 0;
5721                         }
5722                         msg2 =
5723                         "INCOMPLETE DMA XFER due to bad SOC+ card, replace HBA";
5724                         pkt->pkt_reason = CMD_INCOMPLETE;
5725                         pkt->pkt_state = STATE_GOT_BUS;
5726                         pkt->pkt_statistics |= STAT_ABORTED;
5727                         break;
5728 
5729                 case FCAL_STATUS_CRC_ERR:
5730                         msg2 = "Fibre Channel CRC Error on frames";
5731                         pkt->pkt_reason = CMD_INCOMPLETE;
5732                         pkt->pkt_state = STATE_GOT_BUS;
5733                         pkt->pkt_statistics |= STAT_ABORTED;
5734                         break;
5735 
5736                 case FCAL_STATUS_NO_SEQ_INIT:
5737                         SF_DMSG1("Fibre Channel Seq Init Error");
5738                         pkt->pkt_reason = CMD_TRAN_ERR;
5739                         break;
5740 
5741                 case  FCAL_STATUS_OPEN_FAIL:
5742                         pkt->pkt_reason = CMD_TRAN_ERR;
5743                         SF_DMSG1("Fibre Channel Open Failure");
5744                         if ((target->sft_state & (SF_TARGET_BUSY |
5745                             SF_TARGET_MARK | SF_TARGET_OFFLINE)) == 0) {
5746                                 sf_log(sf, CE_NOTE,
5747                                     "!Open failure to target 0x%x "
5748                                     "forcing LIP\n",
5749                                     sf_alpa_to_switch[target->sft_al_pa]);
5750                                 sf_force_lip(sf);
5751                         }
5752                         break;
5753 
5754 
5755                 case FCAL_STATUS_ONLINE_TIMEOUT:
5756                         SF_DMSG1("Fibre Channel Online Timeout");
5757                         pkt->pkt_reason = CMD_TRAN_ERR;
5758                         break;
5759 
5760                 default:
5761                         SF_DMSG1("Unknown FC Status");
5762                         pkt->pkt_reason = CMD_TRAN_ERR;
5763                         break;
5764                 }
5765         }
5766 
5767 #ifdef  DEBUG
5768         /*
5769          * msg1 will be non-NULL if we've detected some sort of error
5770          */
5771         if (msg1 != NULL && sfdebug >= 4) {
5772                 sf_log(sf, CE_WARN,
5773                     "!Transport error on cmd=0x%p target=0x%x:  %s\n",
5774                     (void *)fpkt, pkt->pkt_address.a_target, msg1);
5775         }
5776 #endif
5777 
5778         if (msg2 != NULL) {
5779                 sf_log(sf, CE_WARN, "!Transport error on target=0x%x:  %s\n",
5780                     pkt->pkt_address.a_target, msg2);
5781         }
5782 
5783         ncmds = fpkt->fcal_ncmds;
5784         ASSERT(ncmds >= 0);
5785         if (ncmds >= (sf->sf_throttle - SF_HI_CMD_DELTA)) {
5786 #ifdef DEBUG
5787                 if (!sf->sf_use_lock) {
5788                         SF_DEBUG(4, (sf, CE_NOTE, "use lock flag on\n"));
5789                 }
5790 #endif
5791                 sf->sf_use_lock = TRUE;
5792         }
5793 
5794         mutex_enter(&sf->sf_cmd_mutex);
5795         sf->sf_ncmds = ncmds;
5796         sf_throttle_start(sf);
5797         mutex_exit(&sf->sf_cmd_mutex);
5798 
5799         if (!msg1 && !msg2)
5800                 SF_DEBUG(6, (sf, CE_NOTE, "Completing pkt 0x%p\n",
5801                     (void *)pkt));
5802         if (pkt->pkt_comp != NULL) {
5803                 (*pkt->pkt_comp)(pkt);
5804         }
5805 }
5806 
5807 #undef  SF_DMSG1
5808 
5809 
5810 
5811 /*
5812  * start throttling for this instance
5813  */
5814 static void
5815 sf_throttle_start(struct sf *sf)
5816 {
5817         struct sf_pkt *cmd, *prev_cmd = NULL;
5818         struct scsi_pkt *pkt;
5819         struct sf_target *target;
5820 
5821 
5822         ASSERT(mutex_owned(&sf->sf_cmd_mutex));
5823 
5824         cmd = sf->sf_pkt_head;
5825         while ((cmd != NULL) &&
5826             (sf->sf_state == SF_STATE_ONLINE) &&
5827             (sf->sf_ncmds < sf->sf_throttle)) {
5828 
5829                 pkt = CMD2PKT(cmd);
5830 
5831                 target = ADDR2TARGET(&pkt->pkt_address);
5832                 if (target->sft_state & SF_TARGET_BUSY) {
5833                         /* this command is busy -- go to next */
5834                         ASSERT(cmd->cmd_state != SF_STATE_ISSUED);
5835                         prev_cmd = cmd;
5836                         cmd = cmd->cmd_next;
5837                         continue;
5838                 }
5839 
5840                 ASSERT(cmd->cmd_state != SF_STATE_ISSUED);
5841 
5842                 /* this cmd not busy and not issued */
5843 
5844                 /* remove this packet from the queue */
5845                 if (sf->sf_pkt_head == cmd) {
5846                         /* this was the first packet */
5847                         sf->sf_pkt_head = cmd->cmd_next;
5848                 } else if (sf->sf_pkt_tail == cmd) {
5849                         /* this was the last packet */
5850                         sf->sf_pkt_tail = prev_cmd;
5851                         if (prev_cmd != NULL) {
5852                                 prev_cmd->cmd_next = NULL;
5853                         }
5854                 } else {
5855                         /* some packet in the middle of the queue */
5856                         ASSERT(prev_cmd != NULL);
5857                         prev_cmd->cmd_next = cmd->cmd_next;
5858                 }
5859                 cmd->cmd_flags &= ~CFLAG_IN_QUEUE;
5860 
5861                 if (target->sft_state & SF_TARGET_OFFLINE) {
5862                         mutex_exit(&sf->sf_cmd_mutex);
5863                         pkt->pkt_reason = CMD_TRAN_ERR;
5864                         if (pkt->pkt_comp != NULL) {
5865                                 (*pkt->pkt_comp)(cmd->cmd_pkt);
5866                         }
5867                 } else {
5868                         sf_fill_ids(sf, cmd, target);
5869                         if (sf_start_internal(sf, cmd) != TRAN_ACCEPT) {
5870                                 pkt->pkt_reason = CMD_TRAN_ERR;
5871                                 if (pkt->pkt_comp != NULL) {
5872                                         (*pkt->pkt_comp)(cmd->cmd_pkt);
5873                                 }
5874                         }
5875                 }
5876                 mutex_enter(&sf->sf_cmd_mutex);
5877                 cmd = sf->sf_pkt_head;
5878                 prev_cmd = NULL;
5879         }
5880 }
5881 
5882 
5883 /*
5884  * called when the max exchange value is exceeded to throttle back commands
5885  */
5886 static void
5887 sf_throttle(struct sf *sf)
5888 {
5889         int cmdmax = sf->sf_sochandle->fcal_cmdmax;
5890 
5891 
5892         mutex_enter(&sf->sf_cmd_mutex);
5893 
5894         sf->sf_flag = TRUE;
5895 
5896         if (sf->sf_ncmds > (cmdmax / 2)) {
5897                 sf->sf_throttle = cmdmax / 2;
5898         } else {
5899                 if (sf->sf_ncmds > SF_DECR_DELTA) {
5900                         sf->sf_throttle = sf->sf_ncmds - SF_DECR_DELTA;
5901                 } else {
5902                         /*
5903                          * This case is just a safeguard, should not really
5904                          * happen(ncmds < SF_DECR_DELTA and MAX_EXCHG exceed
5905                          */
5906                         sf->sf_throttle = SF_DECR_DELTA;
5907                 }
5908         }
5909         mutex_exit(&sf->sf_cmd_mutex);
5910 
5911         sf = sf->sf_sibling;
5912         if (sf != NULL) {
5913                 mutex_enter(&sf->sf_cmd_mutex);
5914                 sf->sf_flag = TRUE;
5915                 if (sf->sf_ncmds >= (cmdmax / 2)) {
5916                         sf->sf_throttle = cmdmax / 2;
5917                 } else {
5918                         if (sf->sf_ncmds > SF_DECR_DELTA) {
5919                                 sf->sf_throttle = sf->sf_ncmds - SF_DECR_DELTA;
5920                         } else {
5921                                 sf->sf_throttle = SF_DECR_DELTA;
5922                         }
5923                 }
5924 
5925                 mutex_exit(&sf->sf_cmd_mutex);
5926         }
5927 }
5928 
5929 
5930 /*
5931  * sf watchdog routine, called for a timeout
5932  */
5933 /*ARGSUSED*/
5934 static void
5935 sf_watch(void *arg)
5936 {
5937         struct sf *sf;
5938         struct sf_els_hdr       *privp;
5939         static int count = 0, pscan_count = 0;
5940         int cmdmax, i, mescount = 0;
5941         struct sf_target *target;
5942 
5943 
5944         sf_watchdog_time += sf_watchdog_timeout;
5945         count++;
5946         pscan_count++;
5947 
5948         mutex_enter(&sf_global_mutex);
5949         sf_watch_running = 1;
5950         for (sf = sf_head; sf != NULL; sf = sf->sf_next) {
5951 
5952                 mutex_exit(&sf_global_mutex);
5953 
5954                 /* disable throttling while we're suspended */
5955                 mutex_enter(&sf->sf_mutex);
5956                 if (sf->sf_state & SF_STATE_SUSPENDED) {
5957                         mutex_exit(&sf->sf_mutex);
5958                         SF_DEBUG(1, (sf, CE_CONT,
5959                             "sf_watch, sf%d:throttle disabled "
5960                             "due to DDI_SUSPEND\n",
5961                             ddi_get_instance(sf->sf_dip)));
5962                         mutex_enter(&sf_global_mutex);
5963                         continue;
5964                 }
5965                 mutex_exit(&sf->sf_mutex);
5966 
5967                 cmdmax = sf->sf_sochandle->fcal_cmdmax;
5968 
5969                 if (sf->sf_take_core) {
5970                         (void) soc_take_core(sf->sf_sochandle, sf->sf_socp);
5971                 }
5972 
5973                 mutex_enter(&sf->sf_cmd_mutex);
5974 
5975                 if (!sf->sf_flag) {
5976                         if (sf->sf_throttle < (cmdmax / 2)) {
5977                                 sf->sf_throttle = cmdmax / 2;
5978                         } else if ((sf->sf_throttle += SF_INCR_DELTA) >
5979                             cmdmax) {
5980                                 sf->sf_throttle = cmdmax;
5981                         }
5982                 } else {
5983                         sf->sf_flag = FALSE;
5984                 }
5985 
5986                 sf->sf_ncmds_exp_avg = (sf->sf_ncmds + sf->sf_ncmds_exp_avg)
5987                     >> 2;
5988                 if ((sf->sf_ncmds <= (sf->sf_throttle - SF_LO_CMD_DELTA)) &&
5989                     (sf->sf_pkt_head == NULL)) {
5990 #ifdef DEBUG
5991                         if (sf->sf_use_lock) {
5992                                 SF_DEBUG(4, (sf, CE_NOTE,
5993                                     "use lock flag off\n"));
5994                         }
5995 #endif
5996                         sf->sf_use_lock = FALSE;
5997                 }
5998 
5999                 if (sf->sf_state == SF_STATE_ONLINE && sf->sf_pkt_head &&
6000                     sf->sf_ncmds < sf->sf_throttle) {
6001                         sf_throttle_start(sf);
6002                 }
6003 
6004                 mutex_exit(&sf->sf_cmd_mutex);
6005 
6006                 if (pscan_count >= sf_pool_scan_cnt) {
6007                         if (sf->sf_ncmds_exp_avg < (sf->sf_cr_pool_cnt <<
6008                             SF_LOG2_ELEMS_IN_POOL) - SF_FREE_CR_EPSILON) {
6009                                 sf_crpool_free(sf);
6010                         }
6011                 }
6012                 mutex_enter(&sf->sf_mutex);
6013 
6014                 privp = sf->sf_els_list;
6015                 while (privp != NULL) {
6016                         if (privp->timeout < sf_watchdog_time) {
6017                                 /* timeout this command */
6018                                 privp = sf_els_timeout(sf, privp);
6019                         } else if ((privp->timeout == SF_INVALID_TIMEOUT) &&
6020                             (privp->lip_cnt != sf->sf_lip_cnt)) {
6021                                 if (privp->prev != NULL) {
6022                                         privp->prev->next = privp->next;
6023                                 }
6024                                 if (sf->sf_els_list == privp) {
6025                                         sf->sf_els_list = privp->next;
6026                                 }
6027                                 if (privp->next != NULL) {
6028                                         privp->next->prev = privp->prev;
6029                                 }
6030                                 mutex_exit(&sf->sf_mutex);
6031                                 sf_els_free(privp->fpkt);
6032                                 mutex_enter(&sf->sf_mutex);
6033                                 privp = sf->sf_els_list;
6034                         } else {
6035                                 privp = privp->next;
6036                         }
6037                 }
6038 
6039                 if (sf->sf_online_timer && sf->sf_online_timer <
6040                     sf_watchdog_time) {
6041                         for (i = 0; i < sf_max_targets; i++) {
6042                                 target = sf->sf_targets[i];
6043                                 if (target != NULL) {
6044                                         if (!mescount && target->sft_state &
6045                                             SF_TARGET_BUSY) {
6046                                                 sf_log(sf, CE_WARN, "!Loop "
6047                                                     "Unstable: Failed to bring "
6048                                                     "Loop Online\n");
6049                                                 mescount = 1;
6050                                         }
6051                                         target->sft_state |= SF_TARGET_MARK;
6052                                 }
6053                         }
6054                         sf_finish_init(sf, sf->sf_lip_cnt);
6055                         sf->sf_state = SF_STATE_INIT;
6056                         sf->sf_online_timer = 0;
6057                 }
6058 
6059                 if (sf->sf_state == SF_STATE_ONLINE) {
6060                         mutex_exit(&sf->sf_mutex);
6061                         if (count >= sf_pkt_scan_cnt) {
6062                                 sf_check_targets(sf);
6063                         }
6064                 } else if ((sf->sf_state == SF_STATE_OFFLINE) &&
6065                     (sf->sf_timer < sf_watchdog_time)) {
6066                         for (i = 0; i < sf_max_targets; i++) {
6067                                 target = sf->sf_targets[i];
6068                                 if ((target != NULL) &&
6069                                     (target->sft_state &
6070                                     SF_TARGET_BUSY)) {
6071                                         sf_log(sf, CE_WARN,
6072                                             "!Offline Timeout\n");
6073                                         if (sf_core && (sf_core &
6074                                             SF_CORE_OFFLINE_TIMEOUT)) {
6075                                                 (void) soc_take_core(
6076                                                     sf->sf_sochandle,
6077                                                     sf->sf_socp);
6078                                                 sf_core = 0;
6079                                         }
6080                                         break;
6081                                 }
6082                         }
6083                         sf_finish_init(sf, sf->sf_lip_cnt);
6084                         sf->sf_state = SF_STATE_INIT;
6085                         mutex_exit(&sf->sf_mutex);
6086                 } else {
6087                         mutex_exit(&sf->sf_mutex);
6088                 }
6089                 mutex_enter(&sf_global_mutex);
6090         }
6091         mutex_exit(&sf_global_mutex);
6092         if (count >= sf_pkt_scan_cnt) {
6093                 count = 0;
6094         }
6095         if (pscan_count >= sf_pool_scan_cnt) {
6096                 pscan_count = 0;
6097         }
6098 
6099         /* reset timeout */
6100         sf_watchdog_id = timeout(sf_watch, (caddr_t)0, sf_watchdog_tick);
6101 
6102         /* signal waiting thread */
6103         mutex_enter(&sf_global_mutex);
6104         sf_watch_running = 0;
6105         cv_broadcast(&sf_watch_cv);
6106         mutex_exit(&sf_global_mutex);
6107 }
6108 
6109 
6110 /*
6111  * called during a timeout to check targets
6112  */
6113 static void
6114 sf_check_targets(struct sf *sf)
6115 {
6116         struct sf_target *target;
6117         int i;
6118         struct sf_pkt *cmd;
6119         struct scsi_pkt *pkt;
6120         int lip_cnt;
6121 
6122         mutex_enter(&sf->sf_mutex);
6123         lip_cnt = sf->sf_lip_cnt;
6124         mutex_exit(&sf->sf_mutex);
6125 
6126         /* check scan all possible targets */
6127         for (i = 0; i < sf_max_targets; i++) {
6128                 target = sf->sf_targets[i];
6129                 while (target != NULL) {
6130                         mutex_enter(&target->sft_pkt_mutex);
6131                         if (target->sft_alive && target->sft_scan_count !=
6132                             sf_target_scan_cnt) {
6133                                 target->sft_alive = 0;
6134                                 target->sft_scan_count++;
6135                                 mutex_exit(&target->sft_pkt_mutex);
6136                                 return;
6137                         }
6138                         target->sft_alive = 0;
6139                         target->sft_scan_count = 0;
6140                         cmd = target->sft_pkt_head;
6141                         while (cmd != (struct sf_pkt *)&target->sft_pkt_head) {
6142                                 mutex_enter(&cmd->cmd_abort_mutex);
6143                                 if (cmd->cmd_state == SF_STATE_ISSUED &&
6144                                     ((cmd->cmd_timeout && sf_watchdog_time >
6145 #ifdef  DEBUG
6146                                     cmd->cmd_timeout) || sf_abort_flag)) {
6147                                         sf_abort_flag = 0;
6148 #else
6149                                         cmd->cmd_timeout))) {
6150 #endif
6151                                         cmd->cmd_timeout = 0;
6152         /* prevent reset from getting at this packet */
6153                                         cmd->cmd_state = SF_STATE_ABORTING;
6154                                         mutex_exit(&cmd->cmd_abort_mutex);
6155                                         mutex_exit(&target->sft_pkt_mutex);
6156                                         sf->sf_stats.tstats[i].timeouts++;
6157                                         if (sf_target_timeout(sf, cmd))
6158                                                 return;
6159                                         else {
6160                                                 if (lip_cnt != sf->sf_lip_cnt) {
6161                                                         return;
6162                                                 } else {
6163                                                         mutex_enter(&target->
6164                                                             sft_pkt_mutex);
6165                                                         cmd = target->
6166                                                             sft_pkt_head;
6167                                                 }
6168                                         }
6169         /*
6170          * if the abort and lip fail, a reset will be carried out.
6171          * But the reset will ignore this packet. We have waited at least
6172          * 20 seconds after the initial timeout. Now, complete it here.
6173          * This also takes care of spurious bad aborts.
6174          */
6175                                 } else if ((cmd->cmd_state ==
6176                                     SF_STATE_ABORTING) && (cmd->cmd_timeout
6177                                     <= sf_watchdog_time)) {
6178                                         cmd->cmd_state = SF_STATE_IDLE;
6179                                         mutex_exit(&cmd->cmd_abort_mutex);
6180                                         mutex_exit(&target->sft_pkt_mutex);
6181                                         SF_DEBUG(1, (sf, CE_NOTE,
6182                                             "Command 0x%p to sft 0x%p"
6183                                             " delayed release\n",
6184                                             (void *)cmd, (void *)target));
6185                                         pkt = cmd->cmd_pkt;
6186                                         pkt->pkt_statistics |=
6187                                             (STAT_TIMEOUT|STAT_ABORTED);
6188                                         pkt->pkt_reason = CMD_TIMEOUT;
6189                                         if (pkt->pkt_comp) {
6190                                                 scsi_hba_pkt_comp(pkt);
6191                                         /* handle deferred_destroy case */
6192                                         } else {
6193                                                 if ((cmd->cmd_block->fcp_cntl.
6194                                                     cntl_reset == 1) ||
6195                                                     (cmd->cmd_block->
6196                                                     fcp_cntl.cntl_abort_tsk ==
6197                                                     1)) {
6198                                                         cmd->cmd_block->
6199                                                             fcp_cntl.
6200                                                             cntl_reset = 0;
6201                                                         cmd->cmd_block->
6202                                                             fcp_cntl.
6203                                                             cntl_abort_tsk = 0;
6204                                                         cmd->cmd_fp_pkt->
6205                                                             fcal_pkt_comp =
6206                                                             sf_cmd_callback;
6207                                                         /* for cache */
6208                                                         sf_scsi_destroy_pkt
6209                                                             (&pkt->pkt_address,
6210                                                             pkt);
6211                                                 }
6212                                         }
6213                                         mutex_enter(&target->sft_pkt_mutex);
6214                                         cmd = target->sft_pkt_head;
6215                                 } else {
6216                                         mutex_exit(&cmd->cmd_abort_mutex);
6217                                         cmd = cmd->cmd_forw;
6218                                 }
6219                         }
6220                         mutex_exit(&target->sft_pkt_mutex);
6221                         target = target->sft_next_lun;
6222                 }
6223         }
6224 }
6225 
6226 
6227 /*
6228  * a command to a target has timed out
6229  * return TRUE iff cmd abort failed or timed out, else return FALSE
6230  */
6231 static int
6232 sf_target_timeout(struct sf *sf, struct sf_pkt *cmd)
6233 {
6234         int rval;
6235         struct scsi_pkt *pkt;
6236         struct fcal_packet *fpkt;
6237         int tgt_id;
6238         int retval = FALSE;
6239 
6240 
6241         SF_DEBUG(1, (sf, CE_NOTE, "Command 0x%p to target %x timed out\n",
6242             (void *)cmd->cmd_fp_pkt, cmd->cmd_pkt->pkt_address.a_target));
6243 
6244         fpkt = cmd->cmd_fp_pkt;
6245 
6246         if (sf_core && (sf_core & SF_CORE_CMD_TIMEOUT)) {
6247                 sf_token = (int *)(uintptr_t)
6248                     fpkt->fcal_socal_request.sr_soc_hdr.\
6249                     sh_request_token;
6250                 (void) soc_take_core(sf->sf_sochandle, sf->sf_socp);
6251                 sf_core = 0;
6252         }
6253 
6254         /* call the transport to abort a command */
6255         rval = soc_abort(sf->sf_sochandle, sf->sf_socp,
6256             sf->sf_sochandle->fcal_portno, fpkt, 1);
6257 
6258         switch (rval) {
6259         case FCAL_ABORTED:
6260                 SF_DEBUG(1, (sf, CE_NOTE, "Command Abort succeeded\n"));
6261                 pkt = cmd->cmd_pkt;
6262                 cmd->cmd_state = SF_STATE_IDLE;
6263                 pkt->pkt_statistics |= (STAT_TIMEOUT|STAT_ABORTED);
6264                 pkt->pkt_reason = CMD_TIMEOUT;
6265                 if (pkt->pkt_comp != NULL) {
6266                         (*pkt->pkt_comp)(pkt);
6267                 }
6268                 break;                          /* success */
6269 
6270         case FCAL_ABORT_FAILED:
6271                 SF_DEBUG(1, (sf, CE_NOTE, "Command Abort failed at target\n"));
6272                 pkt = cmd->cmd_pkt;
6273                 cmd->cmd_state = SF_STATE_IDLE;
6274                 pkt->pkt_reason = CMD_TIMEOUT;
6275                 pkt->pkt_statistics |= STAT_TIMEOUT;
6276                 tgt_id = pkt->pkt_address.a_target;
6277                 sf->sf_stats.tstats[tgt_id].abts_failures++;
6278                 if (pkt->pkt_comp != NULL) {
6279                         (*pkt->pkt_comp)(pkt);
6280                 }
6281                 break;
6282 
6283         case FCAL_BAD_ABORT:
6284                 if (sf_core && (sf_core & SF_CORE_BAD_ABORT)) {
6285                         sf_token = (int *)(uintptr_t)fpkt->fcal_socal_request.\
6286                             sr_soc_hdr.sh_request_token;
6287                         (void) soc_take_core(sf->sf_sochandle, sf->sf_socp);
6288                         sf_core = 0;
6289                 }
6290                 SF_DEBUG(1, (sf, CE_NOTE, "Command Abort bad abort\n"));
6291                 cmd->cmd_timeout = sf_watchdog_time + cmd->cmd_pkt->pkt_time
6292                     + 20;
6293                 break;
6294 
6295         case FCAL_TIMEOUT:
6296                 retval = TRUE;
6297                 break;
6298 
6299         default:
6300                 pkt = cmd->cmd_pkt;
6301                 tgt_id = pkt->pkt_address.a_target;
6302                 sf_log(sf, CE_WARN,
6303                 "Command Abort failed target 0x%x, forcing a LIP\n", tgt_id);
6304                 if (sf_core && (sf_core & SF_CORE_ABORT_TIMEOUT)) {
6305                         sf_token = (int *)(uintptr_t)fpkt->fcal_socal_request.\
6306                             sr_soc_hdr.sh_request_token;
6307                         (void) soc_take_core(sf->sf_sochandle, sf->sf_socp);
6308                         sf_core = 0;
6309                 }
6310                 sf_force_lip(sf);
6311                 retval = TRUE;
6312                 break;
6313         }
6314 
6315         return (retval);
6316 }
6317 
6318 
6319 /*
6320  * an ELS command has timed out
6321  * return ???
6322  */
6323 static struct sf_els_hdr *
6324 sf_els_timeout(struct sf *sf, struct sf_els_hdr *privp)
6325 {
6326         struct fcal_packet *fpkt;
6327         int rval, dflag, timeout = SF_ELS_TIMEOUT;
6328         uint_t lip_cnt = privp->lip_cnt;
6329         uchar_t els_code = privp->els_code;
6330         struct sf_target *target = privp->target;
6331         char what[64];
6332 
6333         fpkt = privp->fpkt;
6334         dflag = privp->delayed_retry;
6335         /* use as temporary state variable */
6336         privp->timeout = SF_INVALID_TIMEOUT;
6337         mutex_exit(&sf->sf_mutex);
6338 
6339         if (privp->fpkt->fcal_pkt_comp == sf_els_callback) {
6340                 /*
6341                  * take socal core if required. Timeouts for IB and hosts
6342                  * are not very interesting, so we take socal core only
6343                  * if the timeout is *not* for a IB or host.
6344                  */
6345                 if (sf_core && (sf_core & SF_CORE_ELS_TIMEOUT) &&
6346                     ((sf_alpa_to_switch[privp->dest_nport_id] &
6347                     0x0d) != 0x0d) && ((privp->dest_nport_id != 1) ||
6348                     (privp->dest_nport_id != 2) ||
6349                     (privp->dest_nport_id != 4) ||
6350                     (privp->dest_nport_id != 8) ||
6351                     (privp->dest_nport_id != 0xf))) {
6352                         sf_token = (int *)(uintptr_t)fpkt->fcal_socal_request.\
6353                             sr_soc_hdr.sh_request_token;
6354                         (void) soc_take_core(sf->sf_sochandle, sf->sf_socp);
6355                         sf_core = 0;
6356                 }
6357                 (void) sprintf(what, "ELS 0x%x", privp->els_code);
6358         } else if (privp->fpkt->fcal_pkt_comp == sf_reportlun_callback) {
6359                 if (sf_core && (sf_core & SF_CORE_REPORTLUN_TIMEOUT)) {
6360                         sf_token = (int *)(uintptr_t)fpkt->fcal_socal_request.\
6361                             sr_soc_hdr.sh_request_token;
6362                         (void) soc_take_core(sf->sf_sochandle, sf->sf_socp);
6363                         sf_core = 0;
6364                 }
6365                 timeout = SF_FCP_TIMEOUT;
6366                 (void) sprintf(what, "REPORT_LUNS");
6367         } else if (privp->fpkt->fcal_pkt_comp == sf_inq_callback) {
6368                 if (sf_core && (sf_core & SF_CORE_INQUIRY_TIMEOUT)) {
6369                         sf_token = (int *)(uintptr_t)
6370                             fpkt->fcal_socal_request.\
6371                             sr_soc_hdr.sh_request_token;
6372                         (void) soc_take_core(sf->sf_sochandle, sf->sf_socp);
6373                         sf_core = 0;
6374                 }
6375                 timeout = SF_FCP_TIMEOUT;
6376                 (void) sprintf(what, "INQUIRY to LUN 0x%lx",
6377                     (long)SCSA_LUN(target));
6378         } else {
6379                 (void) sprintf(what, "UNKNOWN OPERATION");
6380         }
6381 
6382         if (dflag) {
6383                 /* delayed retry */
6384                 SF_DEBUG(2, (sf, CE_CONT,
6385                     "!sf%d: %s to target %x delayed retry\n",
6386                     ddi_get_instance(sf->sf_dip), what,
6387                     sf_alpa_to_switch[privp->dest_nport_id]));
6388                 privp->delayed_retry = FALSE;
6389                 goto try_again;
6390         }
6391 
6392         sf_log(sf, CE_NOTE, "!%s to target 0x%x alpa 0x%x timed out\n",
6393             what, sf_alpa_to_switch[privp->dest_nport_id],
6394             privp->dest_nport_id);
6395 
6396         rval = soc_abort(sf->sf_sochandle, sf->sf_socp, sf->sf_sochandle
6397             ->fcal_portno, fpkt, 1);
6398         if (rval == FCAL_ABORTED || rval == FCAL_ABORT_FAILED) {
6399         SF_DEBUG(1, (sf, CE_NOTE, "!%s abort to al_pa %x succeeded\n",
6400             what, privp->dest_nport_id));
6401 try_again:
6402 
6403                 mutex_enter(&sf->sf_mutex);
6404                 if (privp->prev != NULL) {
6405                         privp->prev->next = privp->next;
6406                 }
6407                 if (sf->sf_els_list == privp) {
6408                         sf->sf_els_list = privp->next;
6409                 }
6410                 if (privp->next != NULL) {
6411                         privp->next->prev = privp->prev;
6412                 }
6413                 privp->prev = privp->next = NULL;
6414                 if (lip_cnt == sf->sf_lip_cnt) {
6415                         privp->timeout = sf_watchdog_time + timeout;
6416                         if ((++(privp->retries) < sf_els_retries) ||
6417                             (dflag && (privp->retries < SF_BSY_RETRIES))) {
6418                                 mutex_exit(&sf->sf_mutex);
6419                                 sf_log(sf, CE_NOTE,
6420                                     "!%s to target 0x%x retrying\n",
6421                                     what,
6422                                     sf_alpa_to_switch[privp->dest_nport_id]);
6423                                 if (sf_els_transport(sf, privp) == 1) {
6424                                         mutex_enter(&sf->sf_mutex);
6425                                         return (sf->sf_els_list); /* success */
6426                                 }
6427                                 mutex_enter(&sf->sf_mutex);
6428                                 fpkt = NULL;
6429                         }
6430                         if ((lip_cnt == sf->sf_lip_cnt) &&
6431                             (els_code != LA_ELS_LOGO)) {
6432                                 if (target != NULL) {
6433                                         sf_offline_target(sf, target);
6434                                 }
6435                                 if (sf->sf_lip_cnt == lip_cnt) {
6436                                         sf->sf_device_count--;
6437                                         ASSERT(sf->sf_device_count >= 0);
6438                                         if (sf->sf_device_count == 0) {
6439                                                 sf_finish_init(sf,
6440                                                     sf->sf_lip_cnt);
6441                                         }
6442                                 }
6443                         }
6444                         privp = sf->sf_els_list;
6445                         mutex_exit(&sf->sf_mutex);
6446                         if (fpkt != NULL) {
6447                                 sf_els_free(fpkt);
6448                         }
6449                 } else {
6450                         mutex_exit(&sf->sf_mutex);
6451                         sf_els_free(privp->fpkt);
6452                         privp = NULL;
6453                 }
6454         } else {
6455                 if (sf_core && (sf_core & SF_CORE_ELS_FAILED)) {
6456                         sf_token = (int *)(uintptr_t)
6457                             fpkt->fcal_socal_request.\
6458                             sr_soc_hdr.sh_request_token;
6459                         (void) soc_take_core(sf->sf_sochandle, sf->sf_socp);
6460                         sf_core = 0;
6461                 }
6462                 sf_log(sf, CE_NOTE, "%s abort to target 0x%x failed. "
6463                     "status=0x%x, forcing LIP\n", what,
6464                     sf_alpa_to_switch[privp->dest_nport_id], rval);
6465                 privp = NULL;
6466                 if (sf->sf_lip_cnt == lip_cnt) {
6467                         sf_force_lip(sf);
6468                 }
6469         }
6470 
6471         mutex_enter(&sf->sf_mutex);
6472         return (privp);
6473 }
6474 
6475 
6476 /*
6477  * called by timeout when a reset times out
6478  */
6479 /*ARGSUSED*/
6480 static void
6481 sf_check_reset_delay(void *arg)
6482 {
6483         struct sf *sf;
6484         struct sf_target *target;
6485         struct sf_reset_list *rp, *tp;
6486         uint_t lip_cnt, reset_timeout_flag = FALSE;
6487         clock_t lb;
6488 
6489         lb = ddi_get_lbolt();
6490 
6491         mutex_enter(&sf_global_mutex);
6492 
6493         sf_reset_timeout_id = 0;
6494 
6495         for (sf = sf_head; sf != NULL; sf = sf->sf_next) {
6496 
6497                 mutex_exit(&sf_global_mutex);
6498                 mutex_enter(&sf->sf_mutex);
6499 
6500                 /* is this type cast needed? */
6501                 tp = (struct sf_reset_list *)&sf->sf_reset_list;
6502 
6503                 rp = sf->sf_reset_list;
6504                 while (rp != NULL) {
6505                         if (((rp->timeout - lb) < 0) &&
6506                             (rp->lip_cnt == sf->sf_lip_cnt)) {
6507                                 tp->next = rp->next;
6508                                 mutex_exit(&sf->sf_mutex);
6509                                 target = rp->target;
6510                                 lip_cnt = rp->lip_cnt;
6511                                 kmem_free(rp, sizeof (struct sf_reset_list));
6512                                 /* abort all cmds for this target */
6513                                 while (target) {
6514                                         sf_abort_all(sf, target, FALSE,
6515                                             lip_cnt, TRUE);
6516                                         mutex_enter(&target->sft_mutex);
6517                                         if (lip_cnt == sf->sf_lip_cnt) {
6518                                                 target->sft_state &=
6519                                                     ~SF_TARGET_BUSY;
6520                                         }
6521                                         mutex_exit(&target->sft_mutex);
6522                                         target = target->sft_next_lun;
6523                                 }
6524                                 mutex_enter(&sf->sf_mutex);
6525                                 tp = (struct sf_reset_list *)
6526                                     &sf->sf_reset_list;
6527                                 rp = sf->sf_reset_list;
6528                                 lb = ddi_get_lbolt();
6529                         } else if (rp->lip_cnt != sf->sf_lip_cnt) {
6530                                 tp->next = rp->next;
6531                                 kmem_free(rp, sizeof (struct sf_reset_list));
6532                                 rp = tp->next;
6533                         } else {
6534                                 reset_timeout_flag = TRUE;
6535                                 tp = rp;
6536                                 rp = rp->next;
6537                         }
6538                 }
6539                 mutex_exit(&sf->sf_mutex);
6540                 mutex_enter(&sf_global_mutex);
6541         }
6542 
6543         if (reset_timeout_flag && (sf_reset_timeout_id == 0)) {
6544                 sf_reset_timeout_id = timeout(sf_check_reset_delay,
6545                     NULL, drv_usectohz(SF_TARGET_RESET_DELAY));
6546         }
6547 
6548         mutex_exit(&sf_global_mutex);
6549 }
6550 
6551 
6552 /*
6553  * called to "reset the bus", i.e. force loop initialization (and address
6554  * re-negotiation)
6555  */
6556 static void
6557 sf_force_lip(struct sf *sf)
6558 {
6559         int i;
6560         struct sf_target *target;
6561 
6562 
6563         /* disable restart of lip if we're suspended */
6564         mutex_enter(&sf->sf_mutex);
6565         if (sf->sf_state & SF_STATE_SUSPENDED) {
6566                 mutex_exit(&sf->sf_mutex);
6567                 SF_DEBUG(1, (sf, CE_CONT,
6568                     "sf_force_lip, sf%d: lip restart disabled "
6569                     "due to DDI_SUSPEND\n",
6570                     ddi_get_instance(sf->sf_dip)));
6571                 return;
6572         }
6573 
6574         sf_log(sf, CE_NOTE, "Forcing lip\n");
6575 
6576         for (i = 0; i < sf_max_targets; i++) {
6577                 target = sf->sf_targets[i];
6578                 while (target != NULL) {
6579                         mutex_enter(&target->sft_mutex);
6580                         if (!(target->sft_state & SF_TARGET_OFFLINE))
6581                                 target->sft_state |= SF_TARGET_BUSY;
6582                         mutex_exit(&target->sft_mutex);
6583                         target = target->sft_next_lun;
6584                 }
6585         }
6586 
6587         sf->sf_lip_cnt++;
6588         sf->sf_timer = sf_watchdog_time + SF_OFFLINE_TIMEOUT;
6589         sf->sf_state = SF_STATE_OFFLINE;
6590         mutex_exit(&sf->sf_mutex);
6591         sf->sf_stats.lip_count++;            /* no mutex for this? */
6592 
6593 #ifdef DEBUG
6594         /* are we allowing LIPs ?? */
6595         if (sf_lip_flag != 0) {
6596 #endif
6597                 /* call the transport to force loop initialization */
6598                 if (((i = soc_force_lip(sf->sf_sochandle, sf->sf_socp,
6599                     sf->sf_sochandle->fcal_portno, 1,
6600                     FCAL_FORCE_LIP)) != FCAL_SUCCESS) &&
6601                     (i != FCAL_TIMEOUT)) {
6602                         /* force LIP failed */
6603                         if (sf_core && (sf_core & SF_CORE_LIP_FAILED)) {
6604                                 (void) soc_take_core(sf->sf_sochandle,
6605                                     sf->sf_socp);
6606                                 sf_core = 0;
6607                         }
6608 #ifdef DEBUG
6609                         /* are we allowing reset after LIP failed ?? */
6610                         if (sf_reset_flag != 0) {
6611 #endif
6612                                 /* restart socal after resetting it */
6613                                 sf_log(sf, CE_NOTE,
6614                                     "!Force lip failed Status code 0x%x."
6615                                     " Reseting\n", i);
6616                                 /* call transport to force a reset */
6617                                 soc_force_reset(sf->sf_sochandle, sf->sf_socp,
6618                                     sf->sf_sochandle->fcal_portno, 1);
6619 #ifdef  DEBUG
6620                         }
6621 #endif
6622                 }
6623 #ifdef  DEBUG
6624         }
6625 #endif
6626 }
6627 
6628 
6629 /*
6630  * called by the transport when an unsolicited ELS is received
6631  */
6632 static void
6633 sf_unsol_els_callback(void *arg, soc_response_t *srp, caddr_t payload)
6634 {
6635         struct sf *sf = (struct sf *)arg;
6636         els_payload_t   *els = (els_payload_t *)payload;
6637         struct la_els_rjt *rsp;
6638         int     i, tgt_id;
6639         uchar_t dest_id;
6640         struct fcal_packet *fpkt;
6641         fc_frame_header_t *hp;
6642         struct sf_els_hdr *privp;
6643 
6644 
6645         if ((els == NULL) || ((i = srp->sr_soc_hdr.sh_byte_cnt) == 0)) {
6646                 return;
6647         }
6648 
6649         if (i > SOC_CQE_PAYLOAD) {
6650                 i = SOC_CQE_PAYLOAD;
6651         }
6652 
6653         dest_id = (uchar_t)srp->sr_fc_frame_hdr.s_id;
6654         tgt_id = sf_alpa_to_switch[dest_id];
6655 
6656         switch (els->els_cmd.c.ls_command) {
6657 
6658         case LA_ELS_LOGO:
6659                 /*
6660                  * logout received -- log the fact
6661                  */
6662                 sf->sf_stats.tstats[tgt_id].logouts_recvd++;
6663                 sf_log(sf, CE_NOTE, "!LOGO recvd from target %x, %s\n",
6664                     tgt_id,
6665                     sf_lip_on_plogo ? "Forcing LIP...." : "");
6666                 if (sf_lip_on_plogo) {
6667                         sf_force_lip(sf);
6668                 }
6669                 break;
6670 
6671         default:  /* includes LA_ELS_PLOGI */
6672                 /*
6673                  * something besides a logout received -- we don't handle
6674                  * this so send back a reject saying its unsupported
6675                  */
6676 
6677                 sf_log(sf, CE_NOTE, "!ELS 0x%x recvd from target 0x%x\n",
6678                     els->els_cmd.c.ls_command, tgt_id);
6679 
6680 
6681                 /* allocate room for a response */
6682                 if (sf_els_alloc(sf, dest_id, sizeof (struct sf_els_hdr),
6683                     sizeof (struct la_els_rjt), sizeof (union sf_els_rsp),
6684                     (caddr_t *)&privp, (caddr_t *)&rsp) == NULL) {
6685                         break;
6686                 }
6687 
6688                 fpkt = privp->fpkt;
6689 
6690                 /* fill in pkt header */
6691                 hp = &fpkt->fcal_socal_request.sr_fc_frame_hdr;
6692                 hp->r_ctl = R_CTL_ELS_RSP;
6693                 hp->f_ctl = F_CTL_LAST_SEQ | F_CTL_XCHG_CONTEXT;
6694                 hp->ox_id = srp->sr_fc_frame_hdr.ox_id;
6695                 hp->rx_id = srp->sr_fc_frame_hdr.rx_id;
6696                 fpkt->fcal_socal_request.sr_cqhdr.cq_hdr_type =
6697                     CQ_TYPE_OUTBOUND;
6698 
6699                 fpkt->fcal_socal_request.sr_soc_hdr.sh_seg_cnt = 1;
6700 
6701                 /* fill in response */
6702                 rsp->ls_code = LA_ELS_RJT;   /* reject this ELS */
6703                 rsp->mbz[0] = 0;
6704                 rsp->mbz[1] = 0;
6705                 rsp->mbz[2] = 0;
6706                 ((struct la_els_logi *)privp->rsp)->ls_code = LA_ELS_ACC;
6707                 *((int *)&rsp->reserved) = 0;
6708                 rsp->reason_code = RJT_UNSUPPORTED;
6709                 privp->retries = sf_els_retries;
6710                 privp->els_code = LA_ELS_RJT;
6711                 privp->timeout = (unsigned)0xffffffff;
6712                 (void) sf_els_transport(sf, privp);
6713                 break;
6714         }
6715 }
6716 
6717 
6718 /*
6719  * Error logging, printing, and debug print routines
6720  */
6721 
6722 /*PRINTFLIKE3*/
6723 static void
6724 sf_log(struct sf *sf, int level, const char *fmt, ...)
6725 {
6726         char buf[256];
6727         dev_info_t *dip;
6728         va_list ap;
6729 
6730         if (sf != NULL) {
6731                 dip = sf->sf_dip;
6732         } else {
6733                 dip = NULL;
6734         }
6735 
6736         va_start(ap, fmt);
6737         (void) vsprintf(buf, fmt, ap);
6738         va_end(ap);
6739         scsi_log(dip, "sf", level, buf);
6740 }
6741 
6742 
6743 /*
6744  * called to get some sf kstats -- return 0 on success else return errno
6745  */
6746 static int
6747 sf_kstat_update(kstat_t *ksp, int rw)
6748 {
6749         struct sf *sf;
6750 
6751         if (rw == KSTAT_WRITE) {
6752                 /* can't write */
6753                 return (EACCES);
6754         }
6755 
6756         sf = ksp->ks_private;
6757         sf->sf_stats.ncmds = sf->sf_ncmds;
6758         sf->sf_stats.throttle_limit = sf->sf_throttle;
6759         sf->sf_stats.cr_pool_size = sf->sf_cr_pool_cnt;
6760 
6761         return (0);                             /* success */
6762 }
6763 
6764 
6765 /*
6766  * Unix Entry Points
6767  */
6768 
6769 /*
6770  * driver entry point for opens on control device
6771  */
6772 /* ARGSUSED */
6773 static int
6774 sf_open(dev_t *dev_p, int flag, int otyp, cred_t *cred_p)
6775 {
6776         dev_t dev = *dev_p;
6777         struct sf *sf;
6778 
6779 
6780         /* just ensure soft state exists for this device */
6781         sf = ddi_get_soft_state(sf_state, SF_MINOR2INST(getminor(dev)));
6782         if (sf == NULL) {
6783                 return (ENXIO);
6784         }
6785 
6786         ++(sf->sf_check_n_close);
6787 
6788         return (0);
6789 }
6790 
6791 
6792 /*
6793  * driver entry point for last close on control device
6794  */
6795 /* ARGSUSED */
6796 static int
6797 sf_close(dev_t dev, int flag, int otyp, cred_t *cred_p)
6798 {
6799         struct sf *sf;
6800 
6801         sf = ddi_get_soft_state(sf_state, SF_MINOR2INST(getminor(dev)));
6802         if (sf == NULL) {
6803                 return (ENXIO);
6804         }
6805 
6806         if (!sf->sf_check_n_close) { /* if this flag is zero */
6807                 cmn_err(CE_WARN, "sf%d: trying to close unopened instance",
6808                     SF_MINOR2INST(getminor(dev)));
6809                 return (ENODEV);
6810         } else {
6811                 --(sf->sf_check_n_close);
6812         }
6813         return (0);
6814 }
6815 
6816 
6817 /*
6818  * driver entry point for sf ioctl commands
6819  */
6820 /* ARGSUSED */
6821 static int
6822 sf_ioctl(dev_t dev,
6823     int cmd, intptr_t arg, int mode, cred_t *cred_p, int *rval_p)
6824 {
6825         struct sf *sf;
6826         struct sf_target *target;
6827         uchar_t al_pa;
6828         struct sf_al_map map;
6829         int cnt, i;
6830         int     retval;                         /* return value */
6831         struct devctl_iocdata *dcp;
6832         dev_info_t *cdip;
6833         struct scsi_address ap;
6834         scsi_hba_tran_t *tran;
6835 
6836 
6837         sf = ddi_get_soft_state(sf_state, SF_MINOR2INST(getminor(dev)));
6838         if (sf == NULL) {
6839                 return (ENXIO);
6840         }
6841 
6842         /* handle all ioctls */
6843         switch (cmd) {
6844 
6845         /*
6846          * We can use the generic implementation for these ioctls
6847          */
6848         case DEVCTL_DEVICE_GETSTATE:
6849         case DEVCTL_DEVICE_ONLINE:
6850         case DEVCTL_DEVICE_OFFLINE:
6851         case DEVCTL_BUS_GETSTATE:
6852                 return (ndi_devctl_ioctl(sf->sf_dip, cmd, arg, mode, 0));
6853 
6854         /*
6855          * return FC map
6856          */
6857         case SFIOCGMAP:
6858                 if ((sf->sf_lilp_map->lilp_magic != FCAL_LILP_MAGIC &&
6859                     sf->sf_lilp_map->lilp_magic != FCAL_BADLILP_MAGIC) ||
6860                     sf->sf_state != SF_STATE_ONLINE) {
6861                         retval = ENOENT;
6862                         goto dun;
6863                 }
6864                 mutex_enter(&sf->sf_mutex);
6865                 if (sf->sf_lilp_map->lilp_magic == FCAL_BADLILP_MAGIC) {
6866                         int i, j = 0;
6867 
6868                         /* Need to generate a fake lilp map */
6869                         for (i = 0; i < sf_max_targets; i++) {
6870                                 if (sf->sf_targets[i])
6871                                         sf->sf_lilp_map->lilp_alpalist[j++] =
6872                                             sf->sf_targets[i]->
6873                                             sft_hard_address;
6874                         }
6875                         sf->sf_lilp_map->lilp_length = (uchar_t)j;
6876                 }
6877                 cnt = sf->sf_lilp_map->lilp_length;
6878                 map.sf_count = (short)cnt;
6879                 bcopy((caddr_t)&sf->sf_sochandle->fcal_n_wwn,
6880                     (caddr_t)&map.sf_hba_addr.sf_node_wwn,
6881                     sizeof (la_wwn_t));
6882                 bcopy((caddr_t)&sf->sf_sochandle->fcal_p_wwn,
6883                     (caddr_t)&map.sf_hba_addr.sf_port_wwn,
6884                     sizeof (la_wwn_t));
6885                 map.sf_hba_addr.sf_al_pa = sf->sf_al_pa;
6886                 map.sf_hba_addr.sf_hard_address = 0;
6887                 map.sf_hba_addr.sf_inq_dtype = DTYPE_UNKNOWN;
6888                 for (i = 0; i < cnt; i++) {
6889                         al_pa = sf->sf_lilp_map->lilp_alpalist[i];
6890                         map.sf_addr_pair[i].sf_al_pa = al_pa;
6891                         if (al_pa == sf->sf_al_pa) {
6892                                 (void) bcopy((caddr_t)&sf->sf_sochandle
6893                                     ->fcal_n_wwn, (caddr_t)&map.
6894                                     sf_addr_pair[i].sf_node_wwn,
6895                                     sizeof (la_wwn_t));
6896                                 (void) bcopy((caddr_t)&sf->sf_sochandle
6897                                     ->fcal_p_wwn, (caddr_t)&map.
6898                                     sf_addr_pair[i].sf_port_wwn,
6899                                     sizeof (la_wwn_t));
6900                                 map.sf_addr_pair[i].sf_hard_address =
6901                                     al_pa;
6902                                 map.sf_addr_pair[i].sf_inq_dtype =
6903                                     DTYPE_PROCESSOR;
6904                                 continue;
6905                         }
6906                         target = sf->sf_targets[sf_alpa_to_switch[
6907                             al_pa]];
6908                         if (target != NULL) {
6909                                 mutex_enter(&target->sft_mutex);
6910                                 if (!(target->sft_state &
6911                                     (SF_TARGET_OFFLINE |
6912                                     SF_TARGET_BUSY))) {
6913                                         bcopy((caddr_t)&target->
6914                                             sft_node_wwn,
6915                                             (caddr_t)&map.sf_addr_pair
6916                                             [i].sf_node_wwn,
6917                                             sizeof (la_wwn_t));
6918                                         bcopy((caddr_t)&target->
6919                                             sft_port_wwn,
6920                                             (caddr_t)&map.sf_addr_pair
6921                                             [i].sf_port_wwn,
6922                                             sizeof (la_wwn_t));
6923                                         map.sf_addr_pair[i].
6924                                             sf_hard_address
6925                                             = target->sft_hard_address;
6926                                         map.sf_addr_pair[i].
6927                                             sf_inq_dtype
6928                                             = target->sft_device_type;
6929                                         mutex_exit(&target->sft_mutex);
6930                                         continue;
6931                                 }
6932                                 mutex_exit(&target->sft_mutex);
6933                         }
6934                         bzero((caddr_t)&map.sf_addr_pair[i].
6935                             sf_node_wwn, sizeof (la_wwn_t));
6936                         bzero((caddr_t)&map.sf_addr_pair[i].
6937                             sf_port_wwn, sizeof (la_wwn_t));
6938                         map.sf_addr_pair[i].sf_inq_dtype =
6939                             DTYPE_UNKNOWN;
6940                 }
6941                 mutex_exit(&sf->sf_mutex);
6942                 if (ddi_copyout((caddr_t)&map, (caddr_t)arg,
6943                     sizeof (struct sf_al_map), mode) != 0) {
6944                         retval = EFAULT;
6945                         goto dun;
6946                 }
6947                 break;
6948 
6949         /*
6950          * handle device control ioctls
6951          */
6952         case DEVCTL_DEVICE_RESET:
6953                 if (ndi_dc_allochdl((void *)arg, &dcp) != NDI_SUCCESS) {
6954                         retval = EFAULT;
6955                         goto dun;
6956                 }
6957                 if ((ndi_dc_getname(dcp) == NULL) ||
6958                     (ndi_dc_getaddr(dcp) == NULL)) {
6959                         ndi_dc_freehdl(dcp);
6960                         retval = EINVAL;
6961                         goto dun;
6962                 }
6963                 cdip = ndi_devi_find(sf->sf_dip,
6964                     ndi_dc_getname(dcp), ndi_dc_getaddr(dcp));
6965                 ndi_dc_freehdl(dcp);
6966 
6967                 if (cdip == NULL) {
6968                         retval = ENXIO;
6969                         goto dun;
6970                 }
6971 
6972                 if ((target = sf_get_target_from_dip(sf, cdip)) == NULL) {
6973                         retval = ENXIO;
6974                         goto dun;
6975                 }
6976                 mutex_enter(&target->sft_mutex);
6977                 if (!(target->sft_state & SF_TARGET_INIT_DONE)) {
6978                         mutex_exit(&target->sft_mutex);
6979                         retval = ENXIO;
6980                         goto dun;
6981                 }
6982 
6983                 /* This is ugly */
6984                 tran = kmem_zalloc(scsi_hba_tran_size(), KM_SLEEP);
6985                 bcopy(target->sft_tran, tran, scsi_hba_tran_size());
6986                 mutex_exit(&target->sft_mutex);
6987                 ap.a_hba_tran = tran;
6988                 ap.a_target = sf_alpa_to_switch[target->sft_al_pa];
6989                 if (sf_reset(&ap, RESET_TARGET) == FALSE) {
6990                         retval = EIO;
6991                 } else {
6992                         retval = 0;
6993                 }
6994                 kmem_free(tran, scsi_hba_tran_size());
6995                 goto dun;
6996 
6997         case DEVCTL_BUS_QUIESCE:
6998         case DEVCTL_BUS_UNQUIESCE:
6999                 retval = ENOTSUP;
7000                 goto dun;
7001 
7002         case DEVCTL_BUS_RESET:
7003         case DEVCTL_BUS_RESETALL:
7004                 sf_force_lip(sf);
7005                 break;
7006 
7007         default:
7008                 retval = ENOTTY;
7009                 goto dun;
7010         }
7011 
7012         retval = 0;                             /* success */
7013 
7014 dun:
7015         return (retval);
7016 }
7017 
7018 
7019 /*
7020  * get the target given a DIP
7021  */
7022 static struct sf_target *
7023 sf_get_target_from_dip(struct sf *sf, dev_info_t *dip)
7024 {
7025         int i;
7026         struct sf_target *target;
7027 
7028 
7029         /* scan each hash queue for the DIP in question */
7030         for (i = 0; i < SF_NUM_HASH_QUEUES; i++) {
7031                 target = sf->sf_wwn_lists[i];
7032                 while (target != NULL) {
7033                         if (target->sft_dip == dip) {
7034                                 return (target); /* success: target found */
7035                         }
7036                         target = target->sft_next;
7037                 }
7038         }
7039         return (NULL);                          /* failure: target not found */
7040 }
7041 
7042 
7043 /*
7044  * called by the transport to get an event cookie
7045  */
7046 static int
7047 sf_bus_get_eventcookie(dev_info_t *dip, dev_info_t *rdip, char *name,
7048     ddi_eventcookie_t *event_cookiep)
7049 {
7050         struct sf *sf;
7051 
7052         sf = ddi_get_soft_state(sf_state, ddi_get_instance(dip));
7053         if (sf == NULL) {
7054                 /* can't find instance for this device */
7055                 return (DDI_FAILURE);
7056         }
7057 
7058         return (ndi_event_retrieve_cookie(sf->sf_event_hdl, rdip, name,
7059             event_cookiep, NDI_EVENT_NOPASS));
7060 
7061 }
7062 
7063 
7064 /*
7065  * called by the transport to add an event callback
7066  */
7067 static int
7068 sf_bus_add_eventcall(dev_info_t *dip, dev_info_t *rdip,
7069     ddi_eventcookie_t eventid, void (*callback)(dev_info_t *dip,
7070     ddi_eventcookie_t event, void *arg, void *impl_data), void *arg,
7071     ddi_callback_id_t *cb_id)
7072 {
7073         struct sf *sf;
7074 
7075         sf = ddi_get_soft_state(sf_state, ddi_get_instance(dip));
7076         if (sf == NULL) {
7077                 /* can't find instance for this device */
7078                 return (DDI_FAILURE);
7079         }
7080 
7081         return (ndi_event_add_callback(sf->sf_event_hdl, rdip,
7082             eventid, callback, arg, NDI_SLEEP, cb_id));
7083 
7084 }
7085 
7086 
7087 /*
7088  * called by the transport to remove an event callback
7089  */
7090 static int
7091 sf_bus_remove_eventcall(dev_info_t *devi, ddi_callback_id_t cb_id)
7092 {
7093         struct sf *sf;
7094 
7095         sf = ddi_get_soft_state(sf_state, ddi_get_instance(devi));
7096         if (sf == NULL) {
7097                 /* can't find instance for this device */
7098                 return (DDI_FAILURE);
7099         }
7100 
7101         return (ndi_event_remove_callback(sf->sf_event_hdl, cb_id));
7102 }
7103 
7104 
7105 /*
7106  * called by the transport to post an event
7107  */
7108 static int
7109 sf_bus_post_event(dev_info_t *dip, dev_info_t *rdip,
7110     ddi_eventcookie_t eventid, void *impldata)
7111 {
7112         ddi_eventcookie_t remove_cookie, cookie;
7113 
7114         /* is this a remove event ?? */
7115         struct sf *sf = ddi_get_soft_state(sf_state, ddi_get_instance(dip));
7116         remove_cookie = ndi_event_tag_to_cookie(sf->sf_event_hdl,
7117             SF_EVENT_TAG_REMOVE);
7118 
7119         if (remove_cookie == eventid) {
7120                 struct sf_target *target;
7121 
7122                 /* handle remove event */
7123 
7124                 if (sf == NULL) {
7125                         /* no sf instance for this device */
7126                         return (NDI_FAILURE);
7127                 }
7128 
7129                 /* get the target for this event */
7130                 if ((target = sf_get_target_from_dip(sf, rdip)) != NULL) {
7131                         /*
7132                          * clear device info for this target and mark as
7133                          * not done
7134                          */
7135                         mutex_enter(&target->sft_mutex);
7136                         target->sft_dip = NULL;
7137                         target->sft_state &= ~SF_TARGET_INIT_DONE;
7138                         mutex_exit(&target->sft_mutex);
7139                         return (NDI_SUCCESS); /* event handled */
7140                 }
7141 
7142                 /* no target for this event */
7143                 return (NDI_FAILURE);
7144         }
7145 
7146         /* an insertion event */
7147         if (ndi_busop_get_eventcookie(dip, rdip, FCAL_INSERT_EVENT, &cookie)
7148             != NDI_SUCCESS) {
7149                 return (NDI_FAILURE);
7150         }
7151 
7152         return (ndi_post_event(dip, rdip, cookie, impldata));
7153 }
7154 
7155 
7156 /*
7157  * the sf hotplug daemon, one thread per sf instance
7158  */
7159 static void
7160 sf_hp_daemon(void *arg)
7161 {
7162         struct sf *sf = (struct sf *)arg;
7163         struct sf_hp_elem *elem;
7164         struct sf_target *target;
7165         int tgt_id;
7166         callb_cpr_t cprinfo;
7167 
7168         CALLB_CPR_INIT(&cprinfo, &sf->sf_hp_daemon_mutex,
7169             callb_generic_cpr, "sf_hp_daemon");
7170 
7171         mutex_enter(&sf->sf_hp_daemon_mutex);
7172 
7173         do {
7174                 while (sf->sf_hp_elem_head != NULL) {
7175 
7176                         /* save ptr to head of list */
7177                         elem = sf->sf_hp_elem_head;
7178 
7179                         /* take element off of list */
7180                         if (sf->sf_hp_elem_head == sf->sf_hp_elem_tail) {
7181                                 /* element only one in list -- list now empty */
7182                                 sf->sf_hp_elem_head = NULL;
7183                                 sf->sf_hp_elem_tail = NULL;
7184                         } else {
7185                                 /* remove element from head of list */
7186                                 sf->sf_hp_elem_head = sf->sf_hp_elem_head->next;
7187                         }
7188 
7189                         mutex_exit(&sf->sf_hp_daemon_mutex);
7190 
7191                         switch (elem->what) {
7192                         case SF_ONLINE:
7193                                 /* online this target */
7194                                 target = elem->target;
7195                                 (void) ndi_devi_online(elem->dip, 0);
7196                                 (void) ndi_event_retrieve_cookie(
7197                                     sf->sf_event_hdl,
7198                                     target->sft_dip, FCAL_INSERT_EVENT,
7199                                     &sf_insert_eid, NDI_EVENT_NOPASS);
7200                                 (void) ndi_event_run_callbacks(sf->sf_event_hdl,
7201                                     target->sft_dip, sf_insert_eid, NULL);
7202                                 break;
7203                         case SF_OFFLINE:
7204                                 /* offline this target */
7205                                 target = elem->target;
7206                                 tgt_id = sf_alpa_to_switch[target->sft_al_pa];
7207                                 /* don't do NDI_DEVI_REMOVE for now */
7208                                 if (ndi_devi_offline(elem->dip, 0) !=
7209                                     NDI_SUCCESS) {
7210                                         SF_DEBUG(1, (sf, CE_WARN, "target %x, "
7211                                             "device offline failed", tgt_id));
7212                                 } else {
7213                                         SF_DEBUG(1, (sf, CE_NOTE, "target %x, "
7214                                             "device offline succeeded\n",
7215                                             tgt_id));
7216                                 }
7217                                 break;
7218                         }
7219                         kmem_free(elem, sizeof (struct sf_hp_elem));
7220                         mutex_enter(&sf->sf_hp_daemon_mutex);
7221                 }
7222 
7223                 /* if exit is not already signaled */
7224                 if (sf->sf_hp_exit == 0) {
7225                         /* wait to be signaled by work or exit */
7226                         CALLB_CPR_SAFE_BEGIN(&cprinfo);
7227                         cv_wait(&sf->sf_hp_daemon_cv, &sf->sf_hp_daemon_mutex);
7228                         CALLB_CPR_SAFE_END(&cprinfo, &sf->sf_hp_daemon_mutex);
7229                 }
7230         } while (sf->sf_hp_exit == 0);
7231 
7232         /* sf_hp_daemon_mutex is dropped by CALLB_CPR_EXIT */
7233         CALLB_CPR_EXIT(&cprinfo);
7234         thread_exit();                  /* no more hotplug thread */
7235         /* NOTREACHED */
7236 }