Print this page
2976 remove useless offsetof() macros
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/sun/io/scsi/adapters/sf.c
+++ new/usr/src/uts/sun/io/scsi/adapters/sf.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21 /*
22 22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
23 23 * Use is subject to license terms.
24 24 * Copyright (c) 2011 Bayard G. Bell. All rights reserved.
25 25 */
26 26
27 27 /*
28 28 * sf - Solaris Fibre Channel driver
29 29 *
30 30 * This module implements some of the Fibre Channel FC-4 layer, converting
31 31 * from FC frames to SCSI and back. (Note: no sequence management is done
32 32 * here, though.)
33 33 */
34 34
35 35 #if defined(lint) && !defined(DEBUG)
36 36 #define DEBUG 1
37 37 #endif
38 38
39 39 /*
40 40 * XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
41 41 * Need to use the ugly RAID LUN mappings in FCP Annex D
42 42 * to prevent SCSA from barfing. This *REALLY* needs to
43 43 * be addressed by the standards committee.
44 44 */
45 45 #define RAID_LUNS 1
46 46
47 47 #ifdef DEBUG
48 48 static int sfdebug = 0;
↓ open down ↓ |
48 lines elided |
↑ open up ↑ |
49 49 #include <sys/debug.h>
50 50
51 51 #define SF_DEBUG(level, args) \
52 52 if (sfdebug >= (level)) sf_log args
53 53 #else
54 54 #define SF_DEBUG(level, args)
55 55 #endif
56 56
57 57 static int sf_bus_config_debug = 0;
58 58
59 -/* Why do I have to do this? */
60 -#define offsetof(s, m) (size_t)(&(((s *)0)->m))
61 -
62 59 #include <sys/scsi/scsi.h>
63 60 #include <sys/fc4/fcal.h>
64 61 #include <sys/fc4/fcp.h>
65 62 #include <sys/fc4/fcal_linkapp.h>
66 63 #include <sys/socal_cq_defs.h>
67 64 #include <sys/fc4/fcal_transport.h>
68 65 #include <sys/fc4/fcio.h>
69 66 #include <sys/scsi/adapters/sfvar.h>
70 67 #include <sys/scsi/impl/scsi_reset_notify.h>
71 68 #include <sys/stat.h>
72 69 #include <sys/varargs.h>
73 70 #include <sys/var.h>
74 71 #include <sys/thread.h>
75 72 #include <sys/proc.h>
76 73 #include <sys/kstat.h>
77 74 #include <sys/devctl.h>
78 75 #include <sys/scsi/targets/ses.h>
79 76 #include <sys/callb.h>
77 +#include <sys/sysmacros.h>
80 78
81 79 static int sf_info(dev_info_t *, ddi_info_cmd_t, void *, void **);
82 80 static int sf_attach(dev_info_t *, ddi_attach_cmd_t);
83 81 static int sf_detach(dev_info_t *, ddi_detach_cmd_t);
84 82 static void sf_softstate_unlink(struct sf *);
85 83 static int sf_scsi_bus_config(dev_info_t *parent, uint_t flag,
86 84 ddi_bus_config_op_t op, void *arg, dev_info_t **childp);
87 85 static int sf_scsi_bus_unconfig(dev_info_t *parent, uint_t flag,
88 86 ddi_bus_config_op_t op, void *arg);
89 87 static int sf_scsi_tgt_init(dev_info_t *, dev_info_t *,
90 88 scsi_hba_tran_t *, struct scsi_device *);
91 89 static void sf_scsi_tgt_free(dev_info_t *, dev_info_t *,
92 90 scsi_hba_tran_t *, struct scsi_device *);
93 91 static int sf_pkt_alloc_extern(struct sf *, struct sf_pkt *,
94 92 int, int, int);
95 93 static void sf_pkt_destroy_extern(struct sf *, struct sf_pkt *);
96 94 static struct scsi_pkt *sf_scsi_init_pkt(struct scsi_address *,
97 95 struct scsi_pkt *, struct buf *, int, int, int, int, int (*)(), caddr_t);
98 96 static void sf_scsi_destroy_pkt(struct scsi_address *, struct scsi_pkt *);
99 97 static void sf_scsi_dmafree(struct scsi_address *, struct scsi_pkt *);
100 98 static void sf_scsi_sync_pkt(struct scsi_address *, struct scsi_pkt *);
101 99 static int sf_scsi_reset_notify(struct scsi_address *, int,
102 100 void (*)(caddr_t), caddr_t);
103 101 static int sf_scsi_get_name(struct scsi_device *, char *, int);
104 102 static int sf_scsi_get_bus_addr(struct scsi_device *, char *, int);
105 103 static int sf_add_cr_pool(struct sf *);
106 104 static int sf_cr_alloc(struct sf *, struct sf_pkt *, int (*)());
107 105 static void sf_cr_free(struct sf_cr_pool *, struct sf_pkt *);
108 106 static void sf_crpool_free(struct sf *);
109 107 static int sf_kmem_cache_constructor(void *, void *, int);
110 108 static void sf_kmem_cache_destructor(void *, void *);
111 109 static void sf_statec_callback(void *, int);
112 110 static int sf_login(struct sf *, uchar_t, uchar_t, uint_t, int);
113 111 static int sf_els_transport(struct sf *, struct sf_els_hdr *);
114 112 static void sf_els_callback(struct fcal_packet *);
115 113 static int sf_do_prli(struct sf *, struct sf_els_hdr *, struct la_els_logi *);
116 114 static int sf_do_adisc(struct sf *, struct sf_els_hdr *);
117 115 static int sf_do_reportlun(struct sf *, struct sf_els_hdr *,
118 116 struct sf_target *);
119 117 static void sf_reportlun_callback(struct fcal_packet *);
120 118 static int sf_do_inquiry(struct sf *, struct sf_els_hdr *,
121 119 struct sf_target *);
122 120 static void sf_inq_callback(struct fcal_packet *);
123 121 static struct fcal_packet *sf_els_alloc(struct sf *, uchar_t, int, int,
124 122 int, caddr_t *, caddr_t *);
125 123 static void sf_els_free(struct fcal_packet *);
126 124 static struct sf_target *sf_create_target(struct sf *,
127 125 struct sf_els_hdr *, int, int64_t);
128 126 #ifdef RAID_LUNS
129 127 static struct sf_target *sf_lookup_target(struct sf *, uchar_t *, int);
130 128 #else
131 129 static struct sf_target *sf_lookup_target(struct sf *, uchar_t *, int64_t);
132 130 #endif
133 131 static void sf_finish_init(struct sf *, int);
134 132 static void sf_offline_target(struct sf *, struct sf_target *);
135 133 static void sf_create_devinfo(struct sf *, struct sf_target *, int);
136 134 static int sf_create_props(dev_info_t *, struct sf_target *, int);
137 135 static int sf_commoncap(struct scsi_address *, char *, int, int, int);
138 136 static int sf_getcap(struct scsi_address *, char *, int);
139 137 static int sf_setcap(struct scsi_address *, char *, int, int);
140 138 static int sf_abort(struct scsi_address *, struct scsi_pkt *);
141 139 static int sf_reset(struct scsi_address *, int);
142 140 static void sf_abort_all(struct sf *, struct sf_target *, int, int, int);
143 141 static int sf_start(struct scsi_address *, struct scsi_pkt *);
144 142 static int sf_start_internal(struct sf *, struct sf_pkt *);
145 143 static void sf_fill_ids(struct sf *, struct sf_pkt *, struct sf_target *);
146 144 static int sf_prepare_pkt(struct sf *, struct sf_pkt *, struct sf_target *);
147 145 static int sf_dopoll(struct sf *, struct sf_pkt *);
148 146 static void sf_cmd_callback(struct fcal_packet *);
149 147 static void sf_throttle(struct sf *);
150 148 static void sf_watch(void *);
151 149 static void sf_throttle_start(struct sf *);
152 150 static void sf_check_targets(struct sf *);
153 151 static void sf_check_reset_delay(void *);
154 152 static int sf_target_timeout(struct sf *, struct sf_pkt *);
155 153 static void sf_force_lip(struct sf *);
156 154 static void sf_unsol_els_callback(void *, soc_response_t *, caddr_t);
157 155 static struct sf_els_hdr *sf_els_timeout(struct sf *, struct sf_els_hdr *);
158 156 /*PRINTFLIKE3*/
159 157 static void sf_log(struct sf *, int, const char *, ...);
160 158 static int sf_kstat_update(kstat_t *, int);
161 159 static int sf_open(dev_t *, int, int, cred_t *);
162 160 static int sf_close(dev_t, int, int, cred_t *);
163 161 static int sf_ioctl(dev_t, int, intptr_t, int, cred_t *, int *);
164 162 static struct sf_target *sf_get_target_from_dip(struct sf *, dev_info_t *);
165 163 static int sf_bus_get_eventcookie(dev_info_t *, dev_info_t *, char *,
166 164 ddi_eventcookie_t *);
167 165 static int sf_bus_add_eventcall(dev_info_t *, dev_info_t *,
168 166 ddi_eventcookie_t, void (*)(), void *, ddi_callback_id_t *cb_id);
169 167 static int sf_bus_remove_eventcall(dev_info_t *devi, ddi_callback_id_t cb_id);
170 168 static int sf_bus_post_event(dev_info_t *, dev_info_t *,
171 169 ddi_eventcookie_t, void *);
172 170
173 171 static void sf_hp_daemon(void *);
174 172
175 173 /*
176 174 * this is required to be able to supply a control node
177 175 * where ioctls can be executed
178 176 */
179 177 struct cb_ops sf_cb_ops = {
180 178 sf_open, /* open */
181 179 sf_close, /* close */
182 180 nodev, /* strategy */
183 181 nodev, /* print */
184 182 nodev, /* dump */
185 183 nodev, /* read */
186 184 nodev, /* write */
187 185 sf_ioctl, /* ioctl */
188 186 nodev, /* devmap */
189 187 nodev, /* mmap */
190 188 nodev, /* segmap */
191 189 nochpoll, /* poll */
192 190 ddi_prop_op, /* cb_prop_op */
193 191 0, /* streamtab */
194 192 D_MP | D_NEW | D_HOTPLUG /* driver flags */
195 193
196 194 };
197 195
198 196 /*
199 197 * autoconfiguration routines.
200 198 */
201 199 static struct dev_ops sf_ops = {
202 200 DEVO_REV, /* devo_rev, */
203 201 0, /* refcnt */
204 202 sf_info, /* info */
205 203 nulldev, /* identify */
206 204 nulldev, /* probe */
207 205 sf_attach, /* attach */
208 206 sf_detach, /* detach */
209 207 nodev, /* reset */
210 208 &sf_cb_ops, /* driver operations */
211 209 NULL, /* bus operations */
212 210 NULL, /* power management */
213 211 ddi_quiesce_not_supported, /* devo_quiesce */
214 212 };
215 213
216 214 #define SF_NAME "FC-AL FCP Nexus Driver" /* Name of the module. */
217 215 static char sf_version[] = "1.72 08/19/2008"; /* version of the module */
218 216
219 217 static struct modldrv modldrv = {
220 218 &mod_driverops, /* Type of module. This one is a driver */
221 219 SF_NAME,
222 220 &sf_ops, /* driver ops */
223 221 };
224 222
225 223 static struct modlinkage modlinkage = {
226 224 MODREV_1, (void *)&modldrv, NULL
227 225 };
228 226
229 227 /* XXXXXX The following is here to handle broken targets -- remove it later */
230 228 static int sf_reportlun_forever = 0;
231 229 /* XXXXXX */
232 230 static int sf_lip_on_plogo = 0;
233 231 static int sf_els_retries = SF_ELS_RETRIES;
234 232 static struct sf *sf_head = NULL;
235 233 static int sf_target_scan_cnt = 4;
236 234 static int sf_pkt_scan_cnt = 5;
237 235 static int sf_pool_scan_cnt = 1800;
238 236 static void *sf_state = NULL;
239 237 static int sf_watchdog_init = 0;
240 238 static int sf_watchdog_time = 0;
241 239 static int sf_watchdog_timeout = 1;
242 240 static int sf_watchdog_tick;
243 241 static int sf_watch_running = 0;
244 242 static timeout_id_t sf_watchdog_id;
245 243 static timeout_id_t sf_reset_timeout_id;
246 244 static int sf_max_targets = SF_MAX_TARGETS;
247 245 static kmutex_t sf_global_mutex;
248 246 static int sf_core = 0;
249 247 int *sf_token = NULL; /* Must not be static or lint complains. */
250 248 static kcondvar_t sf_watch_cv;
251 249 extern pri_t minclsyspri;
252 250 static ddi_eventcookie_t sf_insert_eid;
253 251 static ddi_eventcookie_t sf_remove_eid;
254 252
255 253 static ndi_event_definition_t sf_event_defs[] = {
256 254 { SF_EVENT_TAG_INSERT, FCAL_INSERT_EVENT, EPL_KERNEL, 0 },
257 255 { SF_EVENT_TAG_REMOVE, FCAL_REMOVE_EVENT, EPL_INTERRUPT, 0 }
258 256 };
259 257
260 258 #define SF_N_NDI_EVENTS \
261 259 (sizeof (sf_event_defs) / sizeof (ndi_event_definition_t))
262 260
263 261 #ifdef DEBUG
264 262 static int sf_lip_flag = 1; /* bool: to allow LIPs */
265 263 static int sf_reset_flag = 1; /* bool: to allow reset after LIP */
266 264 static int sf_abort_flag = 0; /* bool: to do just one abort */
267 265 #endif
268 266
269 267 extern int64_t ddi_get_lbolt64(void);
270 268
271 269 /*
272 270 * for converting between target number (switch) and hard address/AL_PA
273 271 */
274 272 static uchar_t sf_switch_to_alpa[] = {
275 273 0xef, 0xe8, 0xe4, 0xe2, 0xe1, 0xe0, 0xdc, 0xda, 0xd9, 0xd6,
276 274 0xd5, 0xd4, 0xd3, 0xd2, 0xd1, 0xce, 0xcd, 0xcc, 0xcb, 0xca,
277 275 0xc9, 0xc7, 0xc6, 0xc5, 0xc3, 0xbc, 0xba, 0xb9, 0xb6, 0xb5,
278 276 0xb4, 0xb3, 0xb2, 0xb1, 0xae, 0xad, 0xac, 0xab, 0xaa, 0xa9,
279 277 0xa7, 0xa6, 0xa5, 0xa3, 0x9f, 0x9e, 0x9d, 0x9b, 0x98, 0x97,
280 278 0x90, 0x8f, 0x88, 0x84, 0x82, 0x81, 0x80, 0x7c, 0x7a, 0x79,
281 279 0x76, 0x75, 0x74, 0x73, 0x72, 0x71, 0x6e, 0x6d, 0x6c, 0x6b,
282 280 0x6a, 0x69, 0x67, 0x66, 0x65, 0x63, 0x5c, 0x5a, 0x59, 0x56,
283 281 0x55, 0x54, 0x53, 0x52, 0x51, 0x4e, 0x4d, 0x4c, 0x4b, 0x4a,
284 282 0x49, 0x47, 0x46, 0x45, 0x43, 0x3c, 0x3a, 0x39, 0x36, 0x35,
285 283 0x34, 0x33, 0x32, 0x31, 0x2e, 0x2d, 0x2c, 0x2b, 0x2a, 0x29,
286 284 0x27, 0x26, 0x25, 0x23, 0x1f, 0x1e, 0x1d, 0x1b, 0x18, 0x17,
287 285 0x10, 0x0f, 0x08, 0x04, 0x02, 0x01
288 286 };
289 287
290 288 static uchar_t sf_alpa_to_switch[] = {
291 289 0x00, 0x7d, 0x7c, 0x00, 0x7b, 0x00, 0x00, 0x00, 0x7a, 0x00,
292 290 0x00, 0x00, 0x00, 0x00, 0x00, 0x79, 0x78, 0x00, 0x00, 0x00,
293 291 0x00, 0x00, 0x00, 0x77, 0x76, 0x00, 0x00, 0x75, 0x00, 0x74,
294 292 0x73, 0x72, 0x00, 0x00, 0x00, 0x71, 0x00, 0x70, 0x6f, 0x6e,
295 293 0x00, 0x6d, 0x6c, 0x6b, 0x6a, 0x69, 0x68, 0x00, 0x00, 0x67,
296 294 0x66, 0x65, 0x64, 0x63, 0x62, 0x00, 0x00, 0x61, 0x60, 0x00,
297 295 0x5f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x5e, 0x00, 0x5d,
298 296 0x5c, 0x5b, 0x00, 0x5a, 0x59, 0x58, 0x57, 0x56, 0x55, 0x00,
299 297 0x00, 0x54, 0x53, 0x52, 0x51, 0x50, 0x4f, 0x00, 0x00, 0x4e,
300 298 0x4d, 0x00, 0x4c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x4b,
301 299 0x00, 0x4a, 0x49, 0x48, 0x00, 0x47, 0x46, 0x45, 0x44, 0x43,
302 300 0x42, 0x00, 0x00, 0x41, 0x40, 0x3f, 0x3e, 0x3d, 0x3c, 0x00,
303 301 0x00, 0x3b, 0x3a, 0x00, 0x39, 0x00, 0x00, 0x00, 0x38, 0x37,
304 302 0x36, 0x00, 0x35, 0x00, 0x00, 0x00, 0x34, 0x00, 0x00, 0x00,
305 303 0x00, 0x00, 0x00, 0x33, 0x32, 0x00, 0x00, 0x00, 0x00, 0x00,
306 304 0x00, 0x31, 0x30, 0x00, 0x00, 0x2f, 0x00, 0x2e, 0x2d, 0x2c,
307 305 0x00, 0x00, 0x00, 0x2b, 0x00, 0x2a, 0x29, 0x28, 0x00, 0x27,
308 306 0x26, 0x25, 0x24, 0x23, 0x22, 0x00, 0x00, 0x21, 0x20, 0x1f,
309 307 0x1e, 0x1d, 0x1c, 0x00, 0x00, 0x1b, 0x1a, 0x00, 0x19, 0x00,
310 308 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x00, 0x17, 0x16, 0x15,
311 309 0x00, 0x14, 0x13, 0x12, 0x11, 0x10, 0x0f, 0x00, 0x00, 0x0e,
312 310 0x0d, 0x0c, 0x0b, 0x0a, 0x09, 0x00, 0x00, 0x08, 0x07, 0x00,
313 311 0x06, 0x00, 0x00, 0x00, 0x05, 0x04, 0x03, 0x00, 0x02, 0x00,
314 312 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
315 313 };
316 314
317 315 /*
318 316 * these macros call the proper transport-layer function given
319 317 * a particular transport
320 318 */
321 319 #define soc_transport(a, b, c, d) (*a->fcal_ops->fcal_transport)(b, c, d)
322 320 #define soc_transport_poll(a, b, c, d)\
323 321 (*a->fcal_ops->fcal_transport_poll)(b, c, d)
324 322 #define soc_get_lilp_map(a, b, c, d, e)\
325 323 (*a->fcal_ops->fcal_lilp_map)(b, c, d, e)
326 324 #define soc_force_lip(a, b, c, d, e)\
327 325 (*a->fcal_ops->fcal_force_lip)(b, c, d, e)
328 326 #define soc_abort(a, b, c, d, e)\
329 327 (*a->fcal_ops->fcal_abort_cmd)(b, c, d, e)
330 328 #define soc_force_reset(a, b, c, d)\
331 329 (*a->fcal_ops->fcal_force_reset)(b, c, d)
332 330 #define soc_add_ulp(a, b, c, d, e, f, g, h)\
333 331 (*a->fcal_ops->fcal_add_ulp)(b, c, d, e, f, g, h)
334 332 #define soc_remove_ulp(a, b, c, d, e)\
335 333 (*a->fcal_ops->fcal_remove_ulp)(b, c, d, e)
336 334 #define soc_take_core(a, b) (*a->fcal_ops->fcal_take_core)(b)
337 335
338 336
339 337 /* power management property defines (should be in a common include file?) */
340 338 #define PM_HARDWARE_STATE_PROP "pm-hardware-state"
341 339 #define PM_NEEDS_SUSPEND_RESUME "needs-suspend-resume"
342 340
343 341
344 342 /* node properties */
345 343 #define NODE_WWN_PROP "node-wwn"
346 344 #define PORT_WWN_PROP "port-wwn"
347 345 #define LIP_CNT_PROP "lip-count"
348 346 #define TARGET_PROP "target"
349 347 #define LUN_PROP "lun"
350 348
351 349
352 350 /*
353 351 * initialize this driver and install this module
354 352 */
355 353 int
356 354 _init(void)
357 355 {
358 356 int i;
359 357
360 358 i = ddi_soft_state_init(&sf_state, sizeof (struct sf),
361 359 SF_INIT_ITEMS);
362 360 if (i != 0)
363 361 return (i);
364 362
365 363 if ((i = scsi_hba_init(&modlinkage)) != 0) {
366 364 ddi_soft_state_fini(&sf_state);
367 365 return (i);
368 366 }
369 367
370 368 mutex_init(&sf_global_mutex, NULL, MUTEX_DRIVER, NULL);
371 369 sf_watch_running = 0;
372 370 cv_init(&sf_watch_cv, NULL, CV_DRIVER, NULL);
373 371
374 372 if ((i = mod_install(&modlinkage)) != 0) {
375 373 mutex_destroy(&sf_global_mutex);
376 374 cv_destroy(&sf_watch_cv);
377 375 scsi_hba_fini(&modlinkage);
378 376 ddi_soft_state_fini(&sf_state);
379 377 return (i);
380 378 }
381 379
382 380 return (i);
383 381 }
384 382
385 383
386 384 /*
387 385 * remove this driver module from the system
388 386 */
389 387 int
390 388 _fini(void)
391 389 {
392 390 int i;
393 391
394 392 if ((i = mod_remove(&modlinkage)) == 0) {
395 393 scsi_hba_fini(&modlinkage);
396 394 mutex_destroy(&sf_global_mutex);
397 395 cv_destroy(&sf_watch_cv);
398 396 ddi_soft_state_fini(&sf_state);
399 397 }
400 398 return (i);
401 399 }
402 400
403 401
404 402 int
405 403 _info(struct modinfo *modinfop)
406 404 {
407 405 return (mod_info(&modlinkage, modinfop));
408 406 }
409 407
410 408 /*
411 409 * Given the device number return the devinfo pointer or instance
412 410 */
413 411 /*ARGSUSED*/
414 412 static int
415 413 sf_info(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result)
416 414 {
417 415 int instance = SF_MINOR2INST(getminor((dev_t)arg));
418 416 struct sf *sf;
419 417
420 418 switch (infocmd) {
421 419 case DDI_INFO_DEVT2DEVINFO:
422 420 sf = ddi_get_soft_state(sf_state, instance);
423 421 if (sf != NULL)
424 422 *result = sf->sf_dip;
425 423 else {
426 424 *result = NULL;
427 425 return (DDI_FAILURE);
428 426 }
429 427 break;
430 428
431 429 case DDI_INFO_DEVT2INSTANCE:
432 430 *result = (void *)(uintptr_t)instance;
433 431 break;
434 432 default:
435 433 return (DDI_FAILURE);
436 434 }
437 435 return (DDI_SUCCESS);
438 436 }
439 437
440 438 /*
441 439 * either attach or resume this driver
442 440 */
443 441 static int
444 442 sf_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
445 443 {
446 444 int instance;
447 445 int mutex_initted = FALSE;
448 446 uint_t ccount;
449 447 size_t i, real_size;
450 448 struct fcal_transport *handle;
451 449 char buf[64];
452 450 struct sf *sf, *tsf;
453 451 scsi_hba_tran_t *tran = NULL;
454 452 int handle_bound = FALSE;
455 453 kthread_t *tp;
456 454
457 455
458 456 switch ((int)cmd) {
459 457
460 458 case DDI_RESUME:
461 459
462 460 /*
463 461 * we've previously been SF_STATE_OFFLINEd by a DDI_SUSPEND,
464 462 * so time to undo that and get going again by forcing a
465 463 * lip
466 464 */
467 465
468 466 instance = ddi_get_instance(dip);
469 467
470 468 sf = ddi_get_soft_state(sf_state, instance);
471 469 SF_DEBUG(2, (sf, CE_CONT,
472 470 "sf_attach: DDI_RESUME for sf%d\n", instance));
473 471 if (sf == NULL) {
474 472 cmn_err(CE_WARN, "sf%d: bad soft state", instance);
475 473 return (DDI_FAILURE);
476 474 }
477 475
478 476 /*
479 477 * clear suspended flag so that normal operations can resume
480 478 */
481 479 mutex_enter(&sf->sf_mutex);
482 480 sf->sf_state &= ~SF_STATE_SUSPENDED;
483 481 mutex_exit(&sf->sf_mutex);
484 482
485 483 /*
486 484 * force a login by setting our state to offline
487 485 */
488 486 sf->sf_timer = sf_watchdog_time + SF_OFFLINE_TIMEOUT;
489 487 sf->sf_state = SF_STATE_OFFLINE;
490 488
491 489 /*
492 490 * call transport routine to register state change and
493 491 * ELS callback routines (to register us as a ULP)
494 492 */
495 493 soc_add_ulp(sf->sf_sochandle, sf->sf_socp,
496 494 sf->sf_sochandle->fcal_portno, TYPE_SCSI_FCP,
497 495 sf_statec_callback, sf_unsol_els_callback, NULL, sf);
498 496
499 497 /*
500 498 * call transport routine to force loop initialization
501 499 */
502 500 (void) soc_force_lip(sf->sf_sochandle, sf->sf_socp,
503 501 sf->sf_sochandle->fcal_portno, 0, FCAL_NO_LIP);
504 502
505 503 /*
506 504 * increment watchdog init flag, setting watchdog timeout
507 505 * if we are the first (since somebody has to do it)
508 506 */
509 507 mutex_enter(&sf_global_mutex);
510 508 if (!sf_watchdog_init++) {
511 509 mutex_exit(&sf_global_mutex);
512 510 sf_watchdog_id = timeout(sf_watch,
513 511 (caddr_t)0, sf_watchdog_tick);
514 512 } else {
515 513 mutex_exit(&sf_global_mutex);
516 514 }
517 515
518 516 return (DDI_SUCCESS);
519 517
520 518 case DDI_ATTACH:
521 519
522 520 /*
523 521 * this instance attaching for the first time
524 522 */
525 523
526 524 instance = ddi_get_instance(dip);
527 525
528 526 if (ddi_soft_state_zalloc(sf_state, instance) !=
529 527 DDI_SUCCESS) {
530 528 cmn_err(CE_WARN, "sf%d: failed to allocate soft state",
531 529 instance);
532 530 return (DDI_FAILURE);
533 531 }
534 532
535 533 sf = ddi_get_soft_state(sf_state, instance);
536 534 SF_DEBUG(4, (sf, CE_CONT,
537 535 "sf_attach: DDI_ATTACH for sf%d\n", instance));
538 536 if (sf == NULL) {
539 537 /* this shouldn't happen since we just allocated it */
540 538 cmn_err(CE_WARN, "sf%d: bad soft state", instance);
541 539 return (DDI_FAILURE);
542 540 }
543 541
544 542 /*
545 543 * from this point on, if there's an error, we must de-allocate
546 544 * soft state before returning DDI_FAILURE
547 545 */
548 546
549 547 if ((handle = ddi_get_parent_data(dip)) == NULL) {
550 548 cmn_err(CE_WARN,
551 549 "sf%d: failed to obtain transport handle",
552 550 instance);
553 551 goto fail;
554 552 }
555 553
556 554 /* fill in our soft state structure */
557 555 sf->sf_dip = dip;
558 556 sf->sf_state = SF_STATE_INIT;
559 557 sf->sf_throttle = handle->fcal_cmdmax;
560 558 sf->sf_sochandle = handle;
561 559 sf->sf_socp = handle->fcal_handle;
562 560 sf->sf_check_n_close = 0;
563 561
564 562 /* create a command/response buffer pool for this instance */
565 563 if (sf_add_cr_pool(sf) != DDI_SUCCESS) {
566 564 cmn_err(CE_WARN,
567 565 "sf%d: failed to allocate command/response pool",
568 566 instance);
569 567 goto fail;
570 568 }
571 569
572 570 /* create a a cache for this instance */
573 571 (void) sprintf(buf, "sf%d_cache", instance);
574 572 sf->sf_pkt_cache = kmem_cache_create(buf,
575 573 sizeof (fcal_packet_t) + sizeof (struct sf_pkt) +
576 574 scsi_pkt_size(), 8,
577 575 sf_kmem_cache_constructor, sf_kmem_cache_destructor,
578 576 NULL, NULL, NULL, 0);
579 577 if (sf->sf_pkt_cache == NULL) {
580 578 cmn_err(CE_WARN, "sf%d: failed to allocate kmem cache",
581 579 instance);
582 580 goto fail;
583 581 }
584 582
585 583 /* set up a handle and allocate memory for DMA */
586 584 if (ddi_dma_alloc_handle(sf->sf_dip, sf->sf_sochandle->
587 585 fcal_dmaattr, DDI_DMA_DONTWAIT, NULL, &sf->
588 586 sf_lilp_dmahandle) != DDI_SUCCESS) {
589 587 cmn_err(CE_WARN,
590 588 "sf%d: failed to allocate dma handle for lilp map",
591 589 instance);
592 590 goto fail;
593 591 }
594 592 i = sizeof (struct fcal_lilp_map) + 1;
595 593 if (ddi_dma_mem_alloc(sf->sf_lilp_dmahandle,
596 594 i, sf->sf_sochandle->
597 595 fcal_accattr, DDI_DMA_CONSISTENT, DDI_DMA_DONTWAIT, NULL,
598 596 (caddr_t *)&sf->sf_lilp_map, &real_size,
599 597 &sf->sf_lilp_acchandle) != DDI_SUCCESS) {
600 598 cmn_err(CE_WARN, "sf%d: failed to allocate lilp map",
601 599 instance);
602 600 goto fail;
603 601 }
604 602 if (real_size < i) {
605 603 /* no error message ??? */
606 604 goto fail; /* trouble allocating memory */
607 605 }
608 606
609 607 /*
610 608 * set up the address for the DMA transfers (getting a cookie)
611 609 */
612 610 if (ddi_dma_addr_bind_handle(sf->sf_lilp_dmahandle, NULL,
613 611 (caddr_t)sf->sf_lilp_map, real_size,
614 612 DDI_DMA_READ | DDI_DMA_CONSISTENT, DDI_DMA_DONTWAIT, NULL,
615 613 &sf->sf_lilp_dmacookie, &ccount) != DDI_DMA_MAPPED) {
616 614 cmn_err(CE_WARN,
617 615 "sf%d: failed to bind dma handle for lilp map",
618 616 instance);
619 617 goto fail;
620 618 }
621 619 handle_bound = TRUE;
622 620 /* ensure only one cookie was allocated */
623 621 if (ccount != 1) {
624 622 goto fail;
625 623 }
626 624
627 625 /* ensure LILP map and DMA cookie addresses are even?? */
628 626 sf->sf_lilp_map = (struct fcal_lilp_map *)(((uintptr_t)sf->
629 627 sf_lilp_map + 1) & ~1);
630 628 sf->sf_lilp_dmacookie.dmac_address = (sf->
631 629 sf_lilp_dmacookie.dmac_address + 1) & ~1;
632 630
633 631 /* set up all of our mutexes and condition variables */
634 632 mutex_init(&sf->sf_mutex, NULL, MUTEX_DRIVER, NULL);
635 633 mutex_init(&sf->sf_cmd_mutex, NULL, MUTEX_DRIVER, NULL);
636 634 mutex_init(&sf->sf_cr_mutex, NULL, MUTEX_DRIVER, NULL);
637 635 mutex_init(&sf->sf_hp_daemon_mutex, NULL, MUTEX_DRIVER, NULL);
638 636 cv_init(&sf->sf_cr_cv, NULL, CV_DRIVER, NULL);
639 637 cv_init(&sf->sf_hp_daemon_cv, NULL, CV_DRIVER, NULL);
640 638
641 639 mutex_initted = TRUE;
642 640
643 641 /* create our devctl minor node */
644 642 if (ddi_create_minor_node(dip, "devctl", S_IFCHR,
645 643 SF_INST2DEVCTL_MINOR(instance),
646 644 DDI_NT_NEXUS, 0) != DDI_SUCCESS) {
647 645 cmn_err(CE_WARN, "sf%d: ddi_create_minor_node failed"
648 646 " for devctl", instance);
649 647 goto fail;
650 648 }
651 649
652 650 /* create fc minor node */
653 651 if (ddi_create_minor_node(dip, "fc", S_IFCHR,
654 652 SF_INST2FC_MINOR(instance), DDI_NT_FC_ATTACHMENT_POINT,
655 653 0) != DDI_SUCCESS) {
656 654 cmn_err(CE_WARN, "sf%d: ddi_create_minor_node failed"
657 655 " for fc", instance);
658 656 goto fail;
659 657 }
660 658 /* allocate a SCSI transport structure */
661 659 tran = scsi_hba_tran_alloc(dip, 0);
662 660 if (tran == NULL) {
663 661 /* remove all minor nodes created */
664 662 ddi_remove_minor_node(dip, NULL);
665 663 cmn_err(CE_WARN, "sf%d: scsi_hba_tran_alloc failed",
666 664 instance);
667 665 goto fail;
668 666 }
669 667
670 668 /* Indicate that we are 'sizeof (scsi_*(9S))' clean. */
671 669 scsi_size_clean(dip); /* SCSI_SIZE_CLEAN_VERIFY ok */
672 670
673 671 /* save ptr to new transport structure and fill it in */
674 672 sf->sf_tran = tran;
675 673
676 674 tran->tran_hba_private = sf;
677 675 tran->tran_tgt_private = NULL;
678 676 tran->tran_tgt_init = sf_scsi_tgt_init;
679 677 tran->tran_tgt_probe = NULL;
680 678 tran->tran_tgt_free = sf_scsi_tgt_free;
681 679
682 680 tran->tran_start = sf_start;
683 681 tran->tran_abort = sf_abort;
684 682 tran->tran_reset = sf_reset;
685 683 tran->tran_getcap = sf_getcap;
686 684 tran->tran_setcap = sf_setcap;
687 685 tran->tran_init_pkt = sf_scsi_init_pkt;
688 686 tran->tran_destroy_pkt = sf_scsi_destroy_pkt;
689 687 tran->tran_dmafree = sf_scsi_dmafree;
690 688 tran->tran_sync_pkt = sf_scsi_sync_pkt;
691 689 tran->tran_reset_notify = sf_scsi_reset_notify;
692 690
693 691 /*
694 692 * register event notification routines with scsa
695 693 */
696 694 tran->tran_get_eventcookie = sf_bus_get_eventcookie;
697 695 tran->tran_add_eventcall = sf_bus_add_eventcall;
698 696 tran->tran_remove_eventcall = sf_bus_remove_eventcall;
699 697 tran->tran_post_event = sf_bus_post_event;
700 698
701 699 /*
702 700 * register bus configure/unconfigure
703 701 */
704 702 tran->tran_bus_config = sf_scsi_bus_config;
705 703 tran->tran_bus_unconfig = sf_scsi_bus_unconfig;
706 704
707 705 /*
708 706 * allocate an ndi event handle
709 707 */
710 708 sf->sf_event_defs = (ndi_event_definition_t *)
711 709 kmem_zalloc(sizeof (sf_event_defs), KM_SLEEP);
712 710
713 711 bcopy(sf_event_defs, sf->sf_event_defs,
714 712 sizeof (sf_event_defs));
715 713
716 714 (void) ndi_event_alloc_hdl(dip, NULL,
717 715 &sf->sf_event_hdl, NDI_SLEEP);
718 716
719 717 sf->sf_events.ndi_events_version = NDI_EVENTS_REV1;
720 718 sf->sf_events.ndi_n_events = SF_N_NDI_EVENTS;
721 719 sf->sf_events.ndi_event_defs = sf->sf_event_defs;
722 720
723 721 if (ndi_event_bind_set(sf->sf_event_hdl,
724 722 &sf->sf_events, NDI_SLEEP) != NDI_SUCCESS) {
725 723 goto fail;
726 724 }
727 725
728 726 tran->tran_get_name = sf_scsi_get_name;
729 727 tran->tran_get_bus_addr = sf_scsi_get_bus_addr;
730 728
731 729 /* setup and attach SCSI hba transport */
732 730 if (scsi_hba_attach_setup(dip, sf->sf_sochandle->
733 731 fcal_dmaattr, tran, SCSI_HBA_TRAN_CLONE) != DDI_SUCCESS) {
734 732 cmn_err(CE_WARN, "sf%d: scsi_hba_attach_setup failed",
735 733 instance);
736 734 goto fail;
737 735 }
738 736
739 737 /* set up kstats */
740 738 if ((sf->sf_ksp = kstat_create("sf", instance, "statistics",
741 739 "controller", KSTAT_TYPE_RAW, sizeof (struct sf_stats),
742 740 KSTAT_FLAG_VIRTUAL)) == NULL) {
743 741 cmn_err(CE_WARN, "sf%d: failed to create kstat",
744 742 instance);
745 743 } else {
746 744 sf->sf_stats.version = 2;
747 745 (void) sprintf(sf->sf_stats.drvr_name,
748 746 "%s: %s", SF_NAME, sf_version);
749 747 sf->sf_ksp->ks_data = (void *)&sf->sf_stats;
750 748 sf->sf_ksp->ks_private = sf;
751 749 sf->sf_ksp->ks_update = sf_kstat_update;
752 750 kstat_install(sf->sf_ksp);
753 751 }
754 752
755 753 /* create the hotplug thread */
756 754 mutex_enter(&sf->sf_hp_daemon_mutex);
757 755 tp = thread_create(NULL, 0,
758 756 (void (*)())sf_hp_daemon, sf, 0, &p0, TS_RUN, minclsyspri);
759 757 sf->sf_hp_tid = tp->t_did;
760 758 mutex_exit(&sf->sf_hp_daemon_mutex);
761 759
762 760 /* add this soft state instance to the head of the list */
763 761 mutex_enter(&sf_global_mutex);
764 762 sf->sf_next = sf_head;
765 763 tsf = sf_head;
766 764 sf_head = sf;
767 765
768 766 /*
769 767 * find entry in list that has the same FC-AL handle (if any)
770 768 */
771 769 while (tsf != NULL) {
772 770 if (tsf->sf_socp == sf->sf_socp) {
773 771 break; /* found matching entry */
774 772 }
775 773 tsf = tsf->sf_next;
776 774 }
777 775
778 776 if (tsf != NULL) {
779 777 /* if we found a matching entry keep track of it */
780 778 sf->sf_sibling = tsf;
781 779 }
782 780
783 781 /*
784 782 * increment watchdog init flag, setting watchdog timeout
785 783 * if we are the first (since somebody has to do it)
786 784 */
787 785 if (!sf_watchdog_init++) {
788 786 mutex_exit(&sf_global_mutex);
789 787 sf_watchdog_tick = sf_watchdog_timeout *
790 788 drv_usectohz(1000000);
791 789 sf_watchdog_id = timeout(sf_watch,
792 790 NULL, sf_watchdog_tick);
793 791 } else {
794 792 mutex_exit(&sf_global_mutex);
795 793 }
796 794
797 795 if (tsf != NULL) {
798 796 /*
799 797 * set up matching entry to be our sibling
800 798 */
801 799 mutex_enter(&tsf->sf_mutex);
802 800 tsf->sf_sibling = sf;
803 801 mutex_exit(&tsf->sf_mutex);
804 802 }
805 803
806 804 /*
807 805 * create this property so that PM code knows we want
808 806 * to be suspended at PM time
809 807 */
810 808 (void) ddi_prop_update_string(DDI_DEV_T_NONE, dip,
811 809 PM_HARDWARE_STATE_PROP, PM_NEEDS_SUSPEND_RESUME);
812 810
813 811 /* log the fact that we have a new device */
814 812 ddi_report_dev(dip);
815 813
816 814 /*
817 815 * force a login by setting our state to offline
818 816 */
819 817 sf->sf_timer = sf_watchdog_time + SF_OFFLINE_TIMEOUT;
820 818 sf->sf_state = SF_STATE_OFFLINE;
821 819
822 820 /*
823 821 * call transport routine to register state change and
824 822 * ELS callback routines (to register us as a ULP)
825 823 */
826 824 soc_add_ulp(sf->sf_sochandle, sf->sf_socp,
827 825 sf->sf_sochandle->fcal_portno, TYPE_SCSI_FCP,
828 826 sf_statec_callback, sf_unsol_els_callback, NULL, sf);
829 827
830 828 /*
831 829 * call transport routine to force loop initialization
832 830 */
833 831 (void) soc_force_lip(sf->sf_sochandle, sf->sf_socp,
834 832 sf->sf_sochandle->fcal_portno, 0, FCAL_NO_LIP);
835 833 sf->sf_reset_time = ddi_get_lbolt64();
836 834 return (DDI_SUCCESS);
837 835
838 836 default:
839 837 return (DDI_FAILURE);
840 838 }
841 839
842 840 fail:
843 841 cmn_err(CE_WARN, "sf%d: failed to attach", instance);
844 842
845 843 /*
846 844 * Unbind and free event set
847 845 */
848 846 if (sf->sf_event_hdl) {
849 847 (void) ndi_event_unbind_set(sf->sf_event_hdl,
850 848 &sf->sf_events, NDI_SLEEP);
851 849 (void) ndi_event_free_hdl(sf->sf_event_hdl);
852 850 }
853 851
854 852 if (sf->sf_event_defs) {
855 853 kmem_free(sf->sf_event_defs, sizeof (sf_event_defs));
856 854 }
857 855
858 856 if (sf->sf_tran != NULL) {
859 857 scsi_hba_tran_free(sf->sf_tran);
860 858 }
861 859 while (sf->sf_cr_pool != NULL) {
862 860 sf_crpool_free(sf);
863 861 }
864 862 if (sf->sf_lilp_dmahandle != NULL) {
865 863 if (handle_bound) {
866 864 (void) ddi_dma_unbind_handle(sf->sf_lilp_dmahandle);
867 865 }
868 866 ddi_dma_free_handle(&sf->sf_lilp_dmahandle);
869 867 }
870 868 if (sf->sf_pkt_cache != NULL) {
871 869 kmem_cache_destroy(sf->sf_pkt_cache);
872 870 }
873 871 if (sf->sf_lilp_map != NULL) {
874 872 ddi_dma_mem_free(&sf->sf_lilp_acchandle);
875 873 }
876 874 if (sf->sf_ksp != NULL) {
877 875 kstat_delete(sf->sf_ksp);
878 876 }
879 877 if (mutex_initted) {
880 878 mutex_destroy(&sf->sf_mutex);
881 879 mutex_destroy(&sf->sf_cmd_mutex);
882 880 mutex_destroy(&sf->sf_cr_mutex);
883 881 mutex_destroy(&sf->sf_hp_daemon_mutex);
884 882 cv_destroy(&sf->sf_cr_cv);
885 883 cv_destroy(&sf->sf_hp_daemon_cv);
886 884 }
887 885 mutex_enter(&sf_global_mutex);
888 886
889 887 /*
890 888 * kill off the watchdog if we are the last instance
891 889 */
892 890 if (!--sf_watchdog_init) {
893 891 timeout_id_t tid = sf_watchdog_id;
894 892 mutex_exit(&sf_global_mutex);
895 893 (void) untimeout(tid);
896 894 } else {
897 895 mutex_exit(&sf_global_mutex);
898 896 }
899 897
900 898 ddi_soft_state_free(sf_state, instance);
901 899
902 900 if (tran != NULL) {
903 901 /* remove all minor nodes */
904 902 ddi_remove_minor_node(dip, NULL);
905 903 }
906 904
907 905 return (DDI_FAILURE);
908 906 }
909 907
910 908
911 909 /* ARGSUSED */
912 910 static int
913 911 sf_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
914 912 {
915 913 struct sf *sf;
916 914 int instance;
917 915 int i;
918 916 struct sf_target *target;
919 917 timeout_id_t tid;
920 918
921 919
922 920
923 921 /* NO OTHER THREADS ARE RUNNING */
924 922
925 923 instance = ddi_get_instance(dip);
926 924
927 925 if ((sf = ddi_get_soft_state(sf_state, instance)) == NULL) {
928 926 cmn_err(CE_WARN, "sf_detach, sf%d: bad soft state", instance);
929 927 return (DDI_FAILURE);
930 928 }
931 929
932 930 switch (cmd) {
933 931
934 932 case DDI_SUSPEND:
935 933 /*
936 934 * suspend our instance
937 935 */
938 936
939 937 SF_DEBUG(2, (sf, CE_CONT,
940 938 "sf_detach: DDI_SUSPEND for sf%d\n", instance));
941 939 /*
942 940 * There is a race condition in socal where while doing
943 941 * callbacks if a ULP removes it self from the callback list
944 942 * the for loop in socal may panic as cblist is junk and
945 943 * while trying to get cblist->next the system will panic.
946 944 */
947 945
948 946 /* call transport to remove our unregister our callbacks */
949 947 soc_remove_ulp(sf->sf_sochandle, sf->sf_socp,
950 948 sf->sf_sochandle->fcal_portno, TYPE_SCSI_FCP, sf);
951 949
952 950 /*
953 951 * begin process of clearing outstanding commands
954 952 * by issuing a lip
955 953 */
956 954 sf_force_lip(sf);
957 955
958 956 /*
959 957 * toggle the device OFFLINE in order to cause
960 958 * outstanding commands to drain
961 959 */
962 960 mutex_enter(&sf->sf_mutex);
963 961 sf->sf_lip_cnt++;
964 962 sf->sf_timer = sf_watchdog_time + SF_OFFLINE_TIMEOUT;
965 963 sf->sf_state = (SF_STATE_OFFLINE | SF_STATE_SUSPENDED);
966 964 for (i = 0; i < sf_max_targets; i++) {
967 965 target = sf->sf_targets[i];
968 966 if (target != NULL) {
969 967 struct sf_target *ntarget;
970 968
971 969 mutex_enter(&target->sft_mutex);
972 970 if (!(target->sft_state & SF_TARGET_OFFLINE)) {
973 971 target->sft_state |=
974 972 (SF_TARGET_BUSY | SF_TARGET_MARK);
975 973 }
976 974 /* do this for all LUNs as well */
977 975 for (ntarget = target->sft_next_lun;
978 976 ntarget;
979 977 ntarget = ntarget->sft_next_lun) {
980 978 mutex_enter(&ntarget->sft_mutex);
981 979 if (!(ntarget->sft_state &
982 980 SF_TARGET_OFFLINE)) {
983 981 ntarget->sft_state |=
984 982 (SF_TARGET_BUSY |
985 983 SF_TARGET_MARK);
986 984 }
987 985 mutex_exit(&ntarget->sft_mutex);
988 986 }
989 987 mutex_exit(&target->sft_mutex);
990 988 }
991 989 }
992 990 mutex_exit(&sf->sf_mutex);
993 991 mutex_enter(&sf_global_mutex);
994 992
995 993 /*
996 994 * kill off the watchdog if we are the last instance
997 995 */
998 996 if (!--sf_watchdog_init) {
999 997 tid = sf_watchdog_id;
1000 998 mutex_exit(&sf_global_mutex);
1001 999 (void) untimeout(tid);
1002 1000 } else {
1003 1001 mutex_exit(&sf_global_mutex);
1004 1002 }
1005 1003
1006 1004 return (DDI_SUCCESS);
1007 1005
1008 1006 case DDI_DETACH:
1009 1007 /*
1010 1008 * detach this instance
1011 1009 */
1012 1010
1013 1011 SF_DEBUG(2, (sf, CE_CONT,
1014 1012 "sf_detach: DDI_DETACH for sf%d\n", instance));
1015 1013
1016 1014 /* remove this "sf" from the list of sf softstates */
1017 1015 sf_softstate_unlink(sf);
1018 1016
1019 1017 /*
1020 1018 * prior to taking any DDI_DETACH actions, toggle the
1021 1019 * device OFFLINE in order to cause outstanding
1022 1020 * commands to drain
1023 1021 */
1024 1022 mutex_enter(&sf->sf_mutex);
1025 1023 sf->sf_lip_cnt++;
1026 1024 sf->sf_timer = sf_watchdog_time + SF_OFFLINE_TIMEOUT;
1027 1025 sf->sf_state = SF_STATE_OFFLINE;
1028 1026 for (i = 0; i < sf_max_targets; i++) {
1029 1027 target = sf->sf_targets[i];
1030 1028 if (target != NULL) {
1031 1029 struct sf_target *ntarget;
1032 1030
1033 1031 mutex_enter(&target->sft_mutex);
1034 1032 if (!(target->sft_state & SF_TARGET_OFFLINE)) {
1035 1033 target->sft_state |=
1036 1034 (SF_TARGET_BUSY | SF_TARGET_MARK);
1037 1035 }
1038 1036 for (ntarget = target->sft_next_lun;
1039 1037 ntarget;
1040 1038 ntarget = ntarget->sft_next_lun) {
1041 1039 mutex_enter(&ntarget->sft_mutex);
1042 1040 if (!(ntarget->sft_state &
1043 1041 SF_TARGET_OFFLINE)) {
1044 1042 ntarget->sft_state |=
1045 1043 (SF_TARGET_BUSY |
1046 1044 SF_TARGET_MARK);
1047 1045 }
1048 1046 mutex_exit(&ntarget->sft_mutex);
1049 1047 }
1050 1048 mutex_exit(&target->sft_mutex);
1051 1049 }
1052 1050 }
1053 1051 mutex_exit(&sf->sf_mutex);
1054 1052
1055 1053 /* call transport to remove and unregister our callbacks */
1056 1054 soc_remove_ulp(sf->sf_sochandle, sf->sf_socp,
1057 1055 sf->sf_sochandle->fcal_portno, TYPE_SCSI_FCP, sf);
1058 1056
1059 1057 /*
1060 1058 * kill off the watchdog if we are the last instance
1061 1059 */
1062 1060 mutex_enter(&sf_global_mutex);
1063 1061 if (!--sf_watchdog_init) {
1064 1062 tid = sf_watchdog_id;
1065 1063 mutex_exit(&sf_global_mutex);
1066 1064 (void) untimeout(tid);
1067 1065 } else {
1068 1066 mutex_exit(&sf_global_mutex);
1069 1067 }
1070 1068
1071 1069 /* signal sf_hp_daemon() to exit and wait for exit */
1072 1070 mutex_enter(&sf->sf_hp_daemon_mutex);
1073 1071 ASSERT(sf->sf_hp_tid);
1074 1072 sf->sf_hp_exit = 1; /* flag exit */
1075 1073 cv_signal(&sf->sf_hp_daemon_cv);
1076 1074 mutex_exit(&sf->sf_hp_daemon_mutex);
1077 1075 thread_join(sf->sf_hp_tid); /* wait for hotplug to exit */
1078 1076
1079 1077 /*
1080 1078 * Unbind and free event set
1081 1079 */
1082 1080 if (sf->sf_event_hdl) {
1083 1081 (void) ndi_event_unbind_set(sf->sf_event_hdl,
1084 1082 &sf->sf_events, NDI_SLEEP);
1085 1083 (void) ndi_event_free_hdl(sf->sf_event_hdl);
1086 1084 }
1087 1085
1088 1086 if (sf->sf_event_defs) {
1089 1087 kmem_free(sf->sf_event_defs, sizeof (sf_event_defs));
1090 1088 }
1091 1089
1092 1090 /* detach this instance of the HBA driver */
1093 1091 (void) scsi_hba_detach(dip);
1094 1092 scsi_hba_tran_free(sf->sf_tran);
1095 1093
1096 1094 /* deallocate/unbind DMA handle for lilp map */
1097 1095 if (sf->sf_lilp_map != NULL) {
1098 1096 (void) ddi_dma_unbind_handle(sf->sf_lilp_dmahandle);
1099 1097 if (sf->sf_lilp_dmahandle != NULL) {
1100 1098 ddi_dma_free_handle(&sf->sf_lilp_dmahandle);
1101 1099 }
1102 1100 ddi_dma_mem_free(&sf->sf_lilp_acchandle);
1103 1101 }
1104 1102
1105 1103 /*
1106 1104 * the kmem cache must be destroyed before free'ing
1107 1105 * up the crpools
1108 1106 *
1109 1107 * our finagle of "ntot" and "nfree"
1110 1108 * causes an ASSERT failure in "sf_cr_free()"
1111 1109 * if the kmem cache is free'd after invoking
1112 1110 * "sf_crpool_free()".
1113 1111 */
1114 1112 kmem_cache_destroy(sf->sf_pkt_cache);
1115 1113
1116 1114 SF_DEBUG(2, (sf, CE_CONT,
1117 1115 "sf_detach: sf_crpool_free() for instance 0x%x\n",
1118 1116 instance));
1119 1117 while (sf->sf_cr_pool != NULL) {
1120 1118 /*
1121 1119 * set ntot to nfree for this particular entry
1122 1120 *
1123 1121 * this causes sf_crpool_free() to update
1124 1122 * the cr_pool list when deallocating this entry
1125 1123 */
1126 1124 sf->sf_cr_pool->ntot = sf->sf_cr_pool->nfree;
1127 1125 sf_crpool_free(sf);
1128 1126 }
1129 1127
1130 1128 /*
1131 1129 * now that the cr_pool's are gone it's safe
1132 1130 * to destroy all softstate mutex's and cv's
1133 1131 */
1134 1132 mutex_destroy(&sf->sf_mutex);
1135 1133 mutex_destroy(&sf->sf_cmd_mutex);
1136 1134 mutex_destroy(&sf->sf_cr_mutex);
1137 1135 mutex_destroy(&sf->sf_hp_daemon_mutex);
1138 1136 cv_destroy(&sf->sf_cr_cv);
1139 1137 cv_destroy(&sf->sf_hp_daemon_cv);
1140 1138
1141 1139 /* remove all minor nodes from the device tree */
1142 1140 ddi_remove_minor_node(dip, NULL);
1143 1141
1144 1142 /* remove properties created during attach() */
1145 1143 ddi_prop_remove_all(dip);
1146 1144
1147 1145 /* remove kstat's if present */
1148 1146 if (sf->sf_ksp != NULL) {
1149 1147 kstat_delete(sf->sf_ksp);
1150 1148 }
1151 1149
1152 1150 SF_DEBUG(2, (sf, CE_CONT,
1153 1151 "sf_detach: ddi_soft_state_free() for instance 0x%x\n",
1154 1152 instance));
1155 1153 ddi_soft_state_free(sf_state, instance);
1156 1154 return (DDI_SUCCESS);
1157 1155
1158 1156 default:
1159 1157 SF_DEBUG(2, (sf, CE_CONT, "sf_detach: sf%d unknown cmd %x\n",
1160 1158 instance, (int)cmd));
1161 1159 return (DDI_FAILURE);
1162 1160 }
1163 1161 }
1164 1162
1165 1163
1166 1164 /*
1167 1165 * sf_softstate_unlink() - remove an sf instance from the list of softstates
1168 1166 */
1169 1167 static void
1170 1168 sf_softstate_unlink(struct sf *sf)
1171 1169 {
1172 1170 struct sf *sf_ptr;
1173 1171 struct sf *sf_found_sibling;
1174 1172 struct sf *sf_reposition = NULL;
1175 1173
1176 1174
1177 1175 mutex_enter(&sf_global_mutex);
1178 1176 while (sf_watch_running) {
1179 1177 /* Busy working the list -- wait */
1180 1178 cv_wait(&sf_watch_cv, &sf_global_mutex);
1181 1179 }
1182 1180 if ((sf_found_sibling = sf->sf_sibling) != NULL) {
1183 1181 /*
1184 1182 * we have a sibling so NULL out its reference to us
1185 1183 */
1186 1184 mutex_enter(&sf_found_sibling->sf_mutex);
1187 1185 sf_found_sibling->sf_sibling = NULL;
1188 1186 mutex_exit(&sf_found_sibling->sf_mutex);
1189 1187 }
1190 1188
1191 1189 /* remove our instance from the global list */
1192 1190 if (sf == sf_head) {
1193 1191 /* we were at at head of the list */
1194 1192 sf_head = sf->sf_next;
1195 1193 } else {
1196 1194 /* find us in the list */
1197 1195 for (sf_ptr = sf_head;
1198 1196 sf_ptr != NULL;
1199 1197 sf_ptr = sf_ptr->sf_next) {
1200 1198 if (sf_ptr == sf) {
1201 1199 break;
1202 1200 }
1203 1201 /* remember this place */
1204 1202 sf_reposition = sf_ptr;
1205 1203 }
1206 1204 ASSERT(sf_ptr == sf);
1207 1205 ASSERT(sf_reposition != NULL);
1208 1206
1209 1207 sf_reposition->sf_next = sf_ptr->sf_next;
1210 1208 }
1211 1209 mutex_exit(&sf_global_mutex);
1212 1210 }
1213 1211
1214 1212
1215 1213 static int
1216 1214 sf_scsi_bus_config(dev_info_t *parent, uint_t flag,
1217 1215 ddi_bus_config_op_t op, void *arg, dev_info_t **childp)
1218 1216 {
1219 1217 int64_t reset_delay;
1220 1218 struct sf *sf;
1221 1219
1222 1220 sf = ddi_get_soft_state(sf_state, ddi_get_instance(parent));
1223 1221 ASSERT(sf);
1224 1222
1225 1223 reset_delay = (int64_t)(USEC_TO_TICK(SF_INIT_WAIT_TIMEOUT)) -
1226 1224 (ddi_get_lbolt64() - sf->sf_reset_time);
1227 1225 if (reset_delay < 0)
1228 1226 reset_delay = 0;
1229 1227
1230 1228 if (sf_bus_config_debug)
1231 1229 flag |= NDI_DEVI_DEBUG;
1232 1230
1233 1231 return (ndi_busop_bus_config(parent, flag, op,
1234 1232 arg, childp, (clock_t)reset_delay));
1235 1233 }
1236 1234
1237 1235 static int
1238 1236 sf_scsi_bus_unconfig(dev_info_t *parent, uint_t flag,
1239 1237 ddi_bus_config_op_t op, void *arg)
1240 1238 {
1241 1239 if (sf_bus_config_debug)
1242 1240 flag |= NDI_DEVI_DEBUG;
1243 1241
1244 1242 return (ndi_busop_bus_unconfig(parent, flag, op, arg));
1245 1243 }
1246 1244
1247 1245
1248 1246 /*
1249 1247 * called by transport to initialize a SCSI target
1250 1248 */
1251 1249 /* ARGSUSED */
1252 1250 static int
1253 1251 sf_scsi_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
1254 1252 scsi_hba_tran_t *hba_tran, struct scsi_device *sd)
1255 1253 {
1256 1254 #ifdef RAID_LUNS
1257 1255 int lun;
1258 1256 #else
1259 1257 int64_t lun;
1260 1258 #endif
1261 1259 struct sf_target *target;
1262 1260 struct sf *sf = (struct sf *)hba_tran->tran_hba_private;
1263 1261 int i, t_len;
1264 1262 unsigned int lip_cnt;
1265 1263 unsigned char wwn[FC_WWN_SIZE];
1266 1264
1267 1265
1268 1266 /* get and validate our SCSI target ID */
1269 1267 i = sd->sd_address.a_target;
1270 1268 if (i >= sf_max_targets) {
1271 1269 return (DDI_NOT_WELL_FORMED);
1272 1270 }
1273 1271
1274 1272 /* get our port WWN property */
1275 1273 t_len = sizeof (wwn);
1276 1274 if (ddi_prop_op(DDI_DEV_T_ANY, tgt_dip, PROP_LEN_AND_VAL_BUF,
1277 1275 DDI_PROP_DONTPASS | DDI_PROP_CANSLEEP, PORT_WWN_PROP,
1278 1276 (caddr_t)&wwn, &t_len) != DDI_SUCCESS) {
1279 1277 /* no port WWN property - ignore the OBP stub node */
1280 1278 return (DDI_NOT_WELL_FORMED);
1281 1279 }
1282 1280
1283 1281 /* get our LIP count property */
1284 1282 t_len = sizeof (lip_cnt);
1285 1283 if (ddi_prop_op(DDI_DEV_T_ANY, tgt_dip, PROP_LEN_AND_VAL_BUF,
1286 1284 DDI_PROP_DONTPASS | DDI_PROP_CANSLEEP, LIP_CNT_PROP,
1287 1285 (caddr_t)&lip_cnt, &t_len) != DDI_SUCCESS) {
1288 1286 return (DDI_FAILURE);
1289 1287 }
1290 1288 /* and our LUN property */
1291 1289 t_len = sizeof (lun);
1292 1290 if (ddi_prop_op(DDI_DEV_T_ANY, tgt_dip, PROP_LEN_AND_VAL_BUF,
1293 1291 DDI_PROP_DONTPASS | DDI_PROP_CANSLEEP, "lun",
1294 1292 (caddr_t)&lun, &t_len) != DDI_SUCCESS) {
1295 1293 return (DDI_FAILURE);
1296 1294 }
1297 1295
1298 1296 /* find the target structure for this instance */
1299 1297 mutex_enter(&sf->sf_mutex);
1300 1298 if ((target = sf_lookup_target(sf, wwn, lun)) == NULL) {
1301 1299 mutex_exit(&sf->sf_mutex);
1302 1300 return (DDI_FAILURE);
1303 1301 }
1304 1302
1305 1303 mutex_enter(&target->sft_mutex);
1306 1304 if ((sf->sf_lip_cnt == lip_cnt) && !(target->sft_state
1307 1305 & SF_TARGET_INIT_DONE)) {
1308 1306 /*
1309 1307 * set links between HBA transport and target structures
1310 1308 * and set done flag
1311 1309 */
1312 1310 hba_tran->tran_tgt_private = target;
1313 1311 target->sft_tran = hba_tran;
1314 1312 target->sft_state |= SF_TARGET_INIT_DONE;
1315 1313 } else {
1316 1314 /* already initialized ?? */
1317 1315 mutex_exit(&target->sft_mutex);
1318 1316 mutex_exit(&sf->sf_mutex);
1319 1317 return (DDI_FAILURE);
1320 1318 }
1321 1319 mutex_exit(&target->sft_mutex);
1322 1320 mutex_exit(&sf->sf_mutex);
1323 1321
1324 1322 return (DDI_SUCCESS);
1325 1323 }
1326 1324
1327 1325
1328 1326 /*
1329 1327 * called by transport to free a target
1330 1328 */
1331 1329 /* ARGSUSED */
1332 1330 static void
1333 1331 sf_scsi_tgt_free(dev_info_t *hba_dip, dev_info_t *tgt_dip,
1334 1332 scsi_hba_tran_t *hba_tran, struct scsi_device *sd)
1335 1333 {
1336 1334 struct sf_target *target = hba_tran->tran_tgt_private;
1337 1335
1338 1336 if (target != NULL) {
1339 1337 mutex_enter(&target->sft_mutex);
1340 1338 target->sft_tran = NULL;
1341 1339 target->sft_state &= ~SF_TARGET_INIT_DONE;
1342 1340 mutex_exit(&target->sft_mutex);
1343 1341 }
1344 1342 }
1345 1343
1346 1344
1347 1345 /*
1348 1346 * allocator for non-std size cdb/pkt_private/status -- return TRUE iff
1349 1347 * success, else return FALSE
1350 1348 */
1351 1349 /*ARGSUSED*/
1352 1350 static int
1353 1351 sf_pkt_alloc_extern(struct sf *sf, struct sf_pkt *cmd,
1354 1352 int tgtlen, int statuslen, int kf)
1355 1353 {
1356 1354 caddr_t scbp, tgt;
1357 1355 int failure = FALSE;
1358 1356 struct scsi_pkt *pkt = CMD2PKT(cmd);
1359 1357
1360 1358
1361 1359 tgt = scbp = NULL;
1362 1360
1363 1361 if (tgtlen > PKT_PRIV_LEN) {
1364 1362 if ((tgt = kmem_zalloc(tgtlen, kf)) == NULL) {
1365 1363 failure = TRUE;
1366 1364 } else {
1367 1365 cmd->cmd_flags |= CFLAG_PRIVEXTERN;
1368 1366 pkt->pkt_private = tgt;
1369 1367 }
1370 1368 }
1371 1369 if (statuslen > EXTCMDS_STATUS_SIZE) {
1372 1370 if ((scbp = kmem_zalloc((size_t)statuslen, kf)) == NULL) {
1373 1371 failure = TRUE;
1374 1372 } else {
1375 1373 cmd->cmd_flags |= CFLAG_SCBEXTERN;
1376 1374 pkt->pkt_scbp = (opaque_t)scbp;
1377 1375 }
1378 1376 }
1379 1377 if (failure) {
1380 1378 sf_pkt_destroy_extern(sf, cmd);
1381 1379 }
1382 1380 return (failure);
1383 1381 }
1384 1382
1385 1383
1386 1384 /*
1387 1385 * deallocator for non-std size cdb/pkt_private/status
1388 1386 */
1389 1387 static void
1390 1388 sf_pkt_destroy_extern(struct sf *sf, struct sf_pkt *cmd)
1391 1389 {
1392 1390 struct scsi_pkt *pkt = CMD2PKT(cmd);
1393 1391
1394 1392 if (cmd->cmd_flags & CFLAG_FREE) {
1395 1393 cmn_err(CE_PANIC,
1396 1394 "sf_scsi_impl_pktfree: freeing free packet");
1397 1395 _NOTE(NOT_REACHED)
1398 1396 /* NOTREACHED */
1399 1397 }
1400 1398 if (cmd->cmd_flags & CFLAG_SCBEXTERN) {
1401 1399 kmem_free((caddr_t)pkt->pkt_scbp,
1402 1400 (size_t)cmd->cmd_scblen);
1403 1401 }
1404 1402 if (cmd->cmd_flags & CFLAG_PRIVEXTERN) {
1405 1403 kmem_free((caddr_t)pkt->pkt_private,
1406 1404 (size_t)cmd->cmd_privlen);
1407 1405 }
1408 1406
1409 1407 cmd->cmd_flags = CFLAG_FREE;
1410 1408 kmem_cache_free(sf->sf_pkt_cache, (void *)cmd);
1411 1409 }
1412 1410
1413 1411
1414 1412 /*
1415 1413 * create or initialize a SCSI packet -- called internally and
1416 1414 * by the transport
1417 1415 */
1418 1416 static struct scsi_pkt *
1419 1417 sf_scsi_init_pkt(struct scsi_address *ap, struct scsi_pkt *pkt,
1420 1418 struct buf *bp, int cmdlen, int statuslen, int tgtlen,
1421 1419 int flags, int (*callback)(), caddr_t arg)
1422 1420 {
1423 1421 int kf;
1424 1422 int failure = FALSE;
1425 1423 struct sf_pkt *cmd;
1426 1424 struct sf *sf = ADDR2SF(ap);
1427 1425 struct sf_target *target = ADDR2TARGET(ap);
1428 1426 struct sf_pkt *new_cmd = NULL;
1429 1427 struct fcal_packet *fpkt;
1430 1428 fc_frame_header_t *hp;
1431 1429 struct fcp_cmd *fcmd;
1432 1430
1433 1431
1434 1432 /*
1435 1433 * If we've already allocated a pkt once,
1436 1434 * this request is for dma allocation only.
1437 1435 */
1438 1436 if (pkt == NULL) {
1439 1437
1440 1438 /*
1441 1439 * First step of sf_scsi_init_pkt: pkt allocation
1442 1440 */
1443 1441 if (cmdlen > FCP_CDB_SIZE) {
1444 1442 return (NULL);
1445 1443 }
1446 1444
1447 1445 kf = (callback == SLEEP_FUNC)? KM_SLEEP: KM_NOSLEEP;
1448 1446
1449 1447 if ((cmd = kmem_cache_alloc(sf->sf_pkt_cache, kf)) != NULL) {
1450 1448 /*
1451 1449 * Selective zeroing of the pkt.
1452 1450 */
1453 1451
1454 1452 cmd->cmd_flags = 0;
1455 1453 cmd->cmd_forw = 0;
1456 1454 cmd->cmd_back = 0;
1457 1455 cmd->cmd_next = 0;
1458 1456 cmd->cmd_pkt = (struct scsi_pkt *)((char *)cmd +
1459 1457 sizeof (struct sf_pkt) + sizeof (struct
1460 1458 fcal_packet));
1461 1459 cmd->cmd_fp_pkt = (struct fcal_packet *)((char *)cmd +
1462 1460 sizeof (struct sf_pkt));
1463 1461 cmd->cmd_fp_pkt->fcal_pkt_private = (opaque_t)cmd;
1464 1462 cmd->cmd_state = SF_STATE_IDLE;
1465 1463 cmd->cmd_pkt->pkt_ha_private = (opaque_t)cmd;
1466 1464 cmd->cmd_pkt->pkt_scbp = (opaque_t)cmd->cmd_scsi_scb;
1467 1465 cmd->cmd_pkt->pkt_comp = NULL;
1468 1466 cmd->cmd_pkt->pkt_flags = 0;
1469 1467 cmd->cmd_pkt->pkt_time = 0;
1470 1468 cmd->cmd_pkt->pkt_resid = 0;
1471 1469 cmd->cmd_pkt->pkt_reason = 0;
1472 1470 cmd->cmd_cdblen = (uchar_t)cmdlen;
1473 1471 cmd->cmd_scblen = statuslen;
1474 1472 cmd->cmd_privlen = tgtlen;
1475 1473 cmd->cmd_pkt->pkt_address = *ap;
1476 1474
1477 1475 /* zero pkt_private */
1478 1476 (int *)(cmd->cmd_pkt->pkt_private =
1479 1477 cmd->cmd_pkt_private);
1480 1478 bzero((caddr_t)cmd->cmd_pkt->pkt_private,
1481 1479 PKT_PRIV_LEN);
1482 1480 } else {
1483 1481 failure = TRUE;
1484 1482 }
1485 1483
1486 1484 if (failure ||
1487 1485 (tgtlen > PKT_PRIV_LEN) ||
1488 1486 (statuslen > EXTCMDS_STATUS_SIZE)) {
1489 1487 if (!failure) {
1490 1488 /* need to allocate more space */
1491 1489 failure = sf_pkt_alloc_extern(sf, cmd,
1492 1490 tgtlen, statuslen, kf);
1493 1491 }
1494 1492 if (failure) {
1495 1493 return (NULL);
1496 1494 }
1497 1495 }
1498 1496
1499 1497 fpkt = cmd->cmd_fp_pkt;
1500 1498 if (cmd->cmd_block == NULL) {
1501 1499
1502 1500 /* allocate cmd/response pool buffers */
1503 1501 if (sf_cr_alloc(sf, cmd, callback) == DDI_FAILURE) {
1504 1502 sf_pkt_destroy_extern(sf, cmd);
1505 1503 return (NULL);
1506 1504 }
1507 1505
1508 1506 /* fill in the FC-AL packet */
1509 1507 fpkt->fcal_pkt_cookie = sf->sf_socp;
1510 1508 fpkt->fcal_pkt_comp = sf_cmd_callback;
1511 1509 fpkt->fcal_pkt_flags = 0;
1512 1510 fpkt->fcal_magic = FCALP_MAGIC;
1513 1511 fpkt->fcal_socal_request.sr_soc_hdr.sh_flags =
1514 1512 (ushort_t)(SOC_FC_HEADER |
1515 1513 sf->sf_sochandle->fcal_portno);
1516 1514 fpkt->fcal_socal_request.sr_soc_hdr.sh_class = 3;
1517 1515 fpkt->fcal_socal_request.sr_cqhdr.cq_hdr_count = 1;
1518 1516 fpkt->fcal_socal_request.sr_cqhdr.cq_hdr_flags = 0;
1519 1517 fpkt->fcal_socal_request.sr_cqhdr.cq_hdr_seqno = 0;
1520 1518 fpkt->fcal_socal_request.sr_dataseg[0].fc_base =
1521 1519 (uint32_t)cmd->cmd_dmac;
1522 1520 fpkt->fcal_socal_request.sr_dataseg[0].fc_count =
1523 1521 sizeof (struct fcp_cmd);
1524 1522 fpkt->fcal_socal_request.sr_dataseg[1].fc_base =
1525 1523 (uint32_t)cmd->cmd_rsp_dmac;
1526 1524 fpkt->fcal_socal_request.sr_dataseg[1].fc_count =
1527 1525 FCP_MAX_RSP_IU_SIZE;
1528 1526
1529 1527 /* Fill in the Fabric Channel Header */
1530 1528 hp = &fpkt->fcal_socal_request.sr_fc_frame_hdr;
1531 1529 hp->r_ctl = R_CTL_COMMAND;
1532 1530 hp->type = TYPE_SCSI_FCP;
1533 1531 hp->f_ctl = F_CTL_SEQ_INITIATIVE | F_CTL_FIRST_SEQ;
1534 1532 hp->reserved1 = 0;
1535 1533 hp->seq_id = 0;
1536 1534 hp->df_ctl = 0;
1537 1535 hp->seq_cnt = 0;
1538 1536 hp->ox_id = 0xffff;
1539 1537 hp->rx_id = 0xffff;
1540 1538 hp->ro = 0;
1541 1539
1542 1540 /* Establish the LUN */
1543 1541 bcopy((caddr_t)&target->sft_lun.b,
1544 1542 (caddr_t)&cmd->cmd_block->fcp_ent_addr,
1545 1543 FCP_LUN_SIZE);
1546 1544 *((int32_t *)&cmd->cmd_block->fcp_cntl) = 0;
1547 1545 }
1548 1546 cmd->cmd_pkt->pkt_cdbp = cmd->cmd_block->fcp_cdb;
1549 1547
1550 1548 mutex_enter(&target->sft_pkt_mutex);
1551 1549
1552 1550 target->sft_pkt_tail->cmd_forw = cmd;
1553 1551 cmd->cmd_back = target->sft_pkt_tail;
1554 1552 cmd->cmd_forw = (struct sf_pkt *)&target->sft_pkt_head;
1555 1553 target->sft_pkt_tail = cmd;
1556 1554
1557 1555 mutex_exit(&target->sft_pkt_mutex);
1558 1556 new_cmd = cmd; /* for later cleanup if needed */
1559 1557 } else {
1560 1558 /* pkt already exists -- just a request for DMA allocation */
1561 1559 cmd = PKT2CMD(pkt);
1562 1560 fpkt = cmd->cmd_fp_pkt;
1563 1561 }
1564 1562
1565 1563 /* zero cdb (bzero is too slow) */
1566 1564 bzero((caddr_t)cmd->cmd_pkt->pkt_cdbp, cmdlen);
1567 1565
1568 1566 /*
1569 1567 * Second step of sf_scsi_init_pkt: dma allocation
1570 1568 * Set up dma info
1571 1569 */
1572 1570 if ((bp != NULL) && (bp->b_bcount != 0)) {
1573 1571 int cmd_flags, dma_flags;
1574 1572 int rval = 0;
1575 1573 uint_t dmacookie_count;
1576 1574
1577 1575 /* there is a buffer and some data to transfer */
1578 1576
1579 1577 /* set up command and DMA flags */
1580 1578 cmd_flags = cmd->cmd_flags;
1581 1579 if (bp->b_flags & B_READ) {
1582 1580 /* a read */
1583 1581 cmd_flags &= ~CFLAG_DMASEND;
1584 1582 dma_flags = DDI_DMA_READ;
1585 1583 } else {
1586 1584 /* a write */
1587 1585 cmd_flags |= CFLAG_DMASEND;
1588 1586 dma_flags = DDI_DMA_WRITE;
1589 1587 }
1590 1588 if (flags & PKT_CONSISTENT) {
1591 1589 cmd_flags |= CFLAG_CMDIOPB;
1592 1590 dma_flags |= DDI_DMA_CONSISTENT;
1593 1591 }
1594 1592
1595 1593 /* ensure we have a DMA handle */
1596 1594 if (cmd->cmd_dmahandle == NULL) {
1597 1595 rval = ddi_dma_alloc_handle(sf->sf_dip,
1598 1596 sf->sf_sochandle->fcal_dmaattr, callback, arg,
1599 1597 &cmd->cmd_dmahandle);
1600 1598 }
1601 1599
1602 1600 if (rval == 0) {
1603 1601 /* bind our DMA handle to our buffer */
1604 1602 rval = ddi_dma_buf_bind_handle(cmd->cmd_dmahandle, bp,
1605 1603 dma_flags, callback, arg, &cmd->cmd_dmacookie,
1606 1604 &dmacookie_count);
1607 1605 }
1608 1606
1609 1607 if (rval != 0) {
1610 1608 /* DMA failure */
1611 1609 SF_DEBUG(2, (sf, CE_CONT, "ddi_dma_buf.. failed\n"));
1612 1610 switch (rval) {
1613 1611 case DDI_DMA_NORESOURCES:
1614 1612 bioerror(bp, 0);
1615 1613 break;
1616 1614 case DDI_DMA_BADATTR:
1617 1615 case DDI_DMA_NOMAPPING:
1618 1616 bioerror(bp, EFAULT);
1619 1617 break;
1620 1618 case DDI_DMA_TOOBIG:
1621 1619 default:
1622 1620 bioerror(bp, EINVAL);
1623 1621 break;
1624 1622 }
1625 1623 /* clear valid flag */
1626 1624 cmd->cmd_flags = cmd_flags & ~CFLAG_DMAVALID;
1627 1625 if (new_cmd != NULL) {
1628 1626 /* destroy packet if we just created it */
1629 1627 sf_scsi_destroy_pkt(ap, new_cmd->cmd_pkt);
1630 1628 }
1631 1629 return (NULL);
1632 1630 }
1633 1631
1634 1632 ASSERT(dmacookie_count == 1);
1635 1633 /* set up amt to transfer and set valid flag */
1636 1634 cmd->cmd_dmacount = bp->b_bcount;
1637 1635 cmd->cmd_flags = cmd_flags | CFLAG_DMAVALID;
1638 1636
1639 1637 ASSERT(cmd->cmd_dmahandle != NULL);
1640 1638 }
1641 1639
1642 1640 /* set up FC-AL packet */
1643 1641 fcmd = cmd->cmd_block;
1644 1642
1645 1643 if (cmd->cmd_flags & CFLAG_DMAVALID) {
1646 1644 if (cmd->cmd_flags & CFLAG_DMASEND) {
1647 1645 /* DMA write */
1648 1646 fcmd->fcp_cntl.cntl_read_data = 0;
1649 1647 fcmd->fcp_cntl.cntl_write_data = 1;
1650 1648 fpkt->fcal_socal_request.sr_cqhdr.cq_hdr_type =
1651 1649 CQ_TYPE_IO_WRITE;
1652 1650 } else {
1653 1651 /* DMA read */
1654 1652 fcmd->fcp_cntl.cntl_read_data = 1;
1655 1653 fcmd->fcp_cntl.cntl_write_data = 0;
1656 1654 fpkt->fcal_socal_request.sr_cqhdr.cq_hdr_type =
1657 1655 CQ_TYPE_IO_READ;
1658 1656 }
1659 1657 fpkt->fcal_socal_request.sr_dataseg[2].fc_base =
1660 1658 (uint32_t)cmd->cmd_dmacookie.dmac_address;
1661 1659 fpkt->fcal_socal_request.sr_dataseg[2].fc_count =
1662 1660 cmd->cmd_dmacookie.dmac_size;
1663 1661 fpkt->fcal_socal_request.sr_soc_hdr.sh_seg_cnt = 3;
1664 1662 fpkt->fcal_socal_request.sr_soc_hdr.sh_byte_cnt =
1665 1663 cmd->cmd_dmacookie.dmac_size;
1666 1664 fcmd->fcp_data_len = cmd->cmd_dmacookie.dmac_size;
1667 1665 } else {
1668 1666 /* not a read or write */
1669 1667 fcmd->fcp_cntl.cntl_read_data = 0;
1670 1668 fcmd->fcp_cntl.cntl_write_data = 0;
1671 1669 fpkt->fcal_socal_request.sr_cqhdr.cq_hdr_type = CQ_TYPE_SIMPLE;
1672 1670 fpkt->fcal_socal_request.sr_soc_hdr.sh_seg_cnt = 2;
1673 1671 fpkt->fcal_socal_request.sr_soc_hdr.sh_byte_cnt =
1674 1672 sizeof (struct fcp_cmd);
1675 1673 fcmd->fcp_data_len = 0;
1676 1674 }
1677 1675 fcmd->fcp_cntl.cntl_qtype = FCP_QTYPE_SIMPLE;
1678 1676
1679 1677 return (cmd->cmd_pkt);
1680 1678 }
1681 1679
1682 1680
1683 1681 /*
1684 1682 * destroy a SCSI packet -- called internally and by the transport
1685 1683 */
1686 1684 static void
1687 1685 sf_scsi_destroy_pkt(struct scsi_address *ap, struct scsi_pkt *pkt)
1688 1686 {
1689 1687 struct sf_pkt *cmd = PKT2CMD(pkt);
1690 1688 struct sf *sf = ADDR2SF(ap);
1691 1689 struct sf_target *target = ADDR2TARGET(ap);
1692 1690 struct fcal_packet *fpkt = cmd->cmd_fp_pkt;
1693 1691
1694 1692
1695 1693 if (cmd->cmd_flags & CFLAG_DMAVALID) {
1696 1694 /* DMA was set up -- clean up */
1697 1695 (void) ddi_dma_unbind_handle(cmd->cmd_dmahandle);
1698 1696 cmd->cmd_flags ^= CFLAG_DMAVALID;
1699 1697 }
1700 1698
1701 1699 /* take this packet off the doubly-linked list */
1702 1700 mutex_enter(&target->sft_pkt_mutex);
1703 1701 cmd->cmd_back->cmd_forw = cmd->cmd_forw;
1704 1702 cmd->cmd_forw->cmd_back = cmd->cmd_back;
1705 1703 mutex_exit(&target->sft_pkt_mutex);
1706 1704
1707 1705 fpkt->fcal_pkt_flags = 0;
1708 1706 /* free the packet */
1709 1707 if ((cmd->cmd_flags &
1710 1708 (CFLAG_FREE | CFLAG_PRIVEXTERN | CFLAG_SCBEXTERN)) == 0) {
1711 1709 /* just a regular packet */
1712 1710 ASSERT(cmd->cmd_state != SF_STATE_ISSUED);
1713 1711 cmd->cmd_flags = CFLAG_FREE;
1714 1712 kmem_cache_free(sf->sf_pkt_cache, (void *)cmd);
1715 1713 } else {
1716 1714 /* a packet with extra memory */
1717 1715 sf_pkt_destroy_extern(sf, cmd);
1718 1716 }
1719 1717 }
1720 1718
1721 1719
1722 1720 /*
1723 1721 * called by transport to unbind DMA handle
1724 1722 */
1725 1723 /* ARGSUSED */
1726 1724 static void
1727 1725 sf_scsi_dmafree(struct scsi_address *ap, struct scsi_pkt *pkt)
1728 1726 {
1729 1727 struct sf_pkt *cmd = PKT2CMD(pkt);
1730 1728
1731 1729
1732 1730 if (cmd->cmd_flags & CFLAG_DMAVALID) {
1733 1731 (void) ddi_dma_unbind_handle(cmd->cmd_dmahandle);
1734 1732 cmd->cmd_flags ^= CFLAG_DMAVALID;
1735 1733 }
1736 1734
1737 1735 }
1738 1736
1739 1737
1740 1738 /*
1741 1739 * called by transport to synchronize CPU and I/O views of memory
1742 1740 */
1743 1741 /* ARGSUSED */
1744 1742 static void
1745 1743 sf_scsi_sync_pkt(struct scsi_address *ap, struct scsi_pkt *pkt)
1746 1744 {
1747 1745 struct sf_pkt *cmd = PKT2CMD(pkt);
1748 1746
1749 1747
1750 1748 if (cmd->cmd_flags & CFLAG_DMAVALID) {
1751 1749 if (ddi_dma_sync(cmd->cmd_dmahandle, (off_t)0, (size_t)0,
1752 1750 (cmd->cmd_flags & CFLAG_DMASEND) ?
1753 1751 DDI_DMA_SYNC_FORDEV : DDI_DMA_SYNC_FORCPU) !=
1754 1752 DDI_SUCCESS) {
1755 1753 cmn_err(CE_WARN, "sf: sync pkt failed");
1756 1754 }
1757 1755 }
1758 1756 }
1759 1757
1760 1758
1761 1759 /*
1762 1760 * routine for reset notification setup, to register or cancel. -- called
1763 1761 * by transport
1764 1762 */
1765 1763 static int
1766 1764 sf_scsi_reset_notify(struct scsi_address *ap, int flag,
1767 1765 void (*callback)(caddr_t), caddr_t arg)
1768 1766 {
1769 1767 struct sf *sf = ADDR2SF(ap);
1770 1768
1771 1769 return (scsi_hba_reset_notify_setup(ap, flag, callback, arg,
1772 1770 &sf->sf_mutex, &sf->sf_reset_notify_listf));
1773 1771 }
1774 1772
1775 1773
1776 1774 /*
1777 1775 * called by transport to get port WWN property (except sun4u)
1778 1776 */
1779 1777 /* ARGSUSED */
1780 1778 static int
1781 1779 sf_scsi_get_name(struct scsi_device *sd, char *name, int len)
1782 1780 {
1783 1781 char tbuf[(FC_WWN_SIZE*2)+1];
1784 1782 unsigned char wwn[FC_WWN_SIZE];
1785 1783 int i, lun;
1786 1784 dev_info_t *tgt_dip;
1787 1785
1788 1786 tgt_dip = sd->sd_dev;
1789 1787 i = sizeof (wwn);
1790 1788 if (ddi_prop_op(DDI_DEV_T_ANY, tgt_dip, PROP_LEN_AND_VAL_BUF,
1791 1789 DDI_PROP_DONTPASS | DDI_PROP_CANSLEEP, PORT_WWN_PROP,
1792 1790 (caddr_t)&wwn, &i) != DDI_SUCCESS) {
1793 1791 name[0] = '\0';
1794 1792 return (0);
1795 1793 }
1796 1794 i = sizeof (lun);
1797 1795 if (ddi_prop_op(DDI_DEV_T_ANY, tgt_dip, PROP_LEN_AND_VAL_BUF,
1798 1796 DDI_PROP_DONTPASS | DDI_PROP_CANSLEEP, "lun",
1799 1797 (caddr_t)&lun, &i) != DDI_SUCCESS) {
1800 1798 name[0] = '\0';
1801 1799 return (0);
1802 1800 }
1803 1801 for (i = 0; i < FC_WWN_SIZE; i++)
1804 1802 (void) sprintf(&tbuf[i << 1], "%02x", wwn[i]);
1805 1803 (void) sprintf(name, "w%s,%x", tbuf, lun);
1806 1804 return (1);
1807 1805 }
1808 1806
1809 1807
1810 1808 /*
1811 1809 * called by transport to get target soft AL-PA (except sun4u)
1812 1810 */
1813 1811 /* ARGSUSED */
1814 1812 static int
1815 1813 sf_scsi_get_bus_addr(struct scsi_device *sd, char *name, int len)
1816 1814 {
1817 1815 struct sf_target *target = ADDR2TARGET(&sd->sd_address);
1818 1816
1819 1817 if (target == NULL)
1820 1818 return (0);
1821 1819
1822 1820 (void) sprintf(name, "%x", target->sft_al_pa);
1823 1821 return (1);
1824 1822 }
1825 1823
1826 1824
1827 1825 /*
1828 1826 * add to the command/response buffer pool for this sf instance
1829 1827 */
1830 1828 static int
1831 1829 sf_add_cr_pool(struct sf *sf)
1832 1830 {
1833 1831 int cmd_buf_size;
1834 1832 size_t real_cmd_buf_size;
1835 1833 int rsp_buf_size;
1836 1834 size_t real_rsp_buf_size;
1837 1835 uint_t i, ccount;
1838 1836 struct sf_cr_pool *ptr;
1839 1837 struct sf_cr_free_elem *cptr;
1840 1838 caddr_t dptr, eptr;
1841 1839 ddi_dma_cookie_t cmd_cookie;
1842 1840 ddi_dma_cookie_t rsp_cookie;
1843 1841 int cmd_bound = FALSE, rsp_bound = FALSE;
1844 1842
1845 1843
1846 1844 /* allocate room for the pool */
1847 1845 if ((ptr = kmem_zalloc(sizeof (struct sf_cr_pool), KM_NOSLEEP)) ==
1848 1846 NULL) {
1849 1847 return (DDI_FAILURE);
1850 1848 }
1851 1849
1852 1850 /* allocate a DMA handle for the command pool */
1853 1851 if (ddi_dma_alloc_handle(sf->sf_dip, sf->sf_sochandle->fcal_dmaattr,
1854 1852 DDI_DMA_DONTWAIT, NULL, &ptr->cmd_dma_handle) != DDI_SUCCESS) {
1855 1853 goto fail;
1856 1854 }
1857 1855
1858 1856 /*
1859 1857 * Get a piece of memory in which to put commands
1860 1858 */
1861 1859 cmd_buf_size = (sizeof (struct fcp_cmd) * SF_ELEMS_IN_POOL + 7) & ~7;
1862 1860 if (ddi_dma_mem_alloc(ptr->cmd_dma_handle, cmd_buf_size,
1863 1861 sf->sf_sochandle->fcal_accattr, DDI_DMA_CONSISTENT,
1864 1862 DDI_DMA_DONTWAIT, NULL, (caddr_t *)&ptr->cmd_base,
1865 1863 &real_cmd_buf_size, &ptr->cmd_acc_handle) != DDI_SUCCESS) {
1866 1864 goto fail;
1867 1865 }
1868 1866
1869 1867 /* bind the DMA handle to an address */
1870 1868 if (ddi_dma_addr_bind_handle(ptr->cmd_dma_handle, NULL,
1871 1869 ptr->cmd_base, real_cmd_buf_size,
1872 1870 DDI_DMA_WRITE | DDI_DMA_CONSISTENT, DDI_DMA_DONTWAIT,
1873 1871 NULL, &cmd_cookie, &ccount) != DDI_DMA_MAPPED) {
1874 1872 goto fail;
1875 1873 }
1876 1874 cmd_bound = TRUE;
1877 1875 /* ensure only one cookie was allocated */
1878 1876 if (ccount != 1) {
1879 1877 goto fail;
1880 1878 }
1881 1879
1882 1880 /* allocate a DMA handle for the response pool */
1883 1881 if (ddi_dma_alloc_handle(sf->sf_dip, sf->sf_sochandle->fcal_dmaattr,
1884 1882 DDI_DMA_DONTWAIT, NULL, &ptr->rsp_dma_handle) != DDI_SUCCESS) {
1885 1883 goto fail;
1886 1884 }
1887 1885
1888 1886 /*
1889 1887 * Get a piece of memory in which to put responses
1890 1888 */
1891 1889 rsp_buf_size = FCP_MAX_RSP_IU_SIZE * SF_ELEMS_IN_POOL;
1892 1890 if (ddi_dma_mem_alloc(ptr->rsp_dma_handle, rsp_buf_size,
1893 1891 sf->sf_sochandle->fcal_accattr, DDI_DMA_CONSISTENT,
1894 1892 DDI_DMA_DONTWAIT, NULL, (caddr_t *)&ptr->rsp_base,
1895 1893 &real_rsp_buf_size, &ptr->rsp_acc_handle) != DDI_SUCCESS) {
1896 1894 goto fail;
1897 1895 }
1898 1896
1899 1897 /* bind the DMA handle to an address */
1900 1898 if (ddi_dma_addr_bind_handle(ptr->rsp_dma_handle, NULL,
1901 1899 ptr->rsp_base, real_rsp_buf_size,
1902 1900 DDI_DMA_READ | DDI_DMA_CONSISTENT, DDI_DMA_DONTWAIT,
1903 1901 NULL, &rsp_cookie, &ccount) != DDI_DMA_MAPPED) {
1904 1902 goto fail;
1905 1903 }
1906 1904 rsp_bound = TRUE;
1907 1905 /* ensure only one cookie was allocated */
1908 1906 if (ccount != 1) {
1909 1907 goto fail;
1910 1908 }
1911 1909
1912 1910 /*
1913 1911 * Generate a (cmd/rsp structure) free list
1914 1912 */
1915 1913 /* ensure ptr points to start of long word (8-byte block) */
1916 1914 dptr = (caddr_t)((uintptr_t)(ptr->cmd_base) + 7 & ~7);
1917 1915 /* keep track of actual size after moving pointer */
1918 1916 real_cmd_buf_size -= (dptr - ptr->cmd_base);
1919 1917 eptr = ptr->rsp_base;
1920 1918
1921 1919 /* set actual total number of entries */
1922 1920 ptr->ntot = min((real_cmd_buf_size / sizeof (struct fcp_cmd)),
1923 1921 (real_rsp_buf_size / FCP_MAX_RSP_IU_SIZE));
1924 1922 ptr->nfree = ptr->ntot;
1925 1923 ptr->free = (struct sf_cr_free_elem *)ptr->cmd_base;
1926 1924 ptr->sf = sf;
1927 1925
1928 1926 /* set up DMA for each pair of entries */
1929 1927 i = 0;
1930 1928 while (i < ptr->ntot) {
1931 1929 cptr = (struct sf_cr_free_elem *)dptr;
1932 1930 dptr += sizeof (struct fcp_cmd);
1933 1931
1934 1932 cptr->next = (struct sf_cr_free_elem *)dptr;
1935 1933 cptr->rsp = eptr;
1936 1934
1937 1935 cptr->cmd_dmac = cmd_cookie.dmac_address +
1938 1936 (uint32_t)((caddr_t)cptr - ptr->cmd_base);
1939 1937
1940 1938 cptr->rsp_dmac = rsp_cookie.dmac_address +
1941 1939 (uint32_t)((caddr_t)eptr - ptr->rsp_base);
1942 1940
1943 1941 eptr += FCP_MAX_RSP_IU_SIZE;
1944 1942 i++;
1945 1943 }
1946 1944
1947 1945 /* terminate the list */
1948 1946 cptr->next = NULL;
1949 1947
1950 1948 /* add this list at front of current one */
1951 1949 mutex_enter(&sf->sf_cr_mutex);
1952 1950 ptr->next = sf->sf_cr_pool;
1953 1951 sf->sf_cr_pool = ptr;
1954 1952 sf->sf_cr_pool_cnt++;
1955 1953 mutex_exit(&sf->sf_cr_mutex);
1956 1954
1957 1955 return (DDI_SUCCESS);
1958 1956
1959 1957 fail:
1960 1958 /* we failed so clean up */
1961 1959 if (ptr->cmd_dma_handle != NULL) {
1962 1960 if (cmd_bound) {
1963 1961 (void) ddi_dma_unbind_handle(ptr->cmd_dma_handle);
1964 1962 }
1965 1963 ddi_dma_free_handle(&ptr->cmd_dma_handle);
1966 1964 }
1967 1965
1968 1966 if (ptr->rsp_dma_handle != NULL) {
1969 1967 if (rsp_bound) {
1970 1968 (void) ddi_dma_unbind_handle(ptr->rsp_dma_handle);
1971 1969 }
1972 1970 ddi_dma_free_handle(&ptr->rsp_dma_handle);
1973 1971 }
1974 1972
1975 1973 if (ptr->cmd_base != NULL) {
1976 1974 ddi_dma_mem_free(&ptr->cmd_acc_handle);
1977 1975 }
1978 1976
1979 1977 if (ptr->rsp_base != NULL) {
1980 1978 ddi_dma_mem_free(&ptr->rsp_acc_handle);
1981 1979 }
1982 1980
1983 1981 kmem_free((caddr_t)ptr, sizeof (struct sf_cr_pool));
1984 1982 return (DDI_FAILURE);
1985 1983 }
1986 1984
1987 1985
1988 1986 /*
1989 1987 * allocate a command/response buffer from the pool, allocating more
1990 1988 * in the pool as needed
1991 1989 */
1992 1990 static int
1993 1991 sf_cr_alloc(struct sf *sf, struct sf_pkt *cmd, int (*func)())
1994 1992 {
1995 1993 struct sf_cr_pool *ptr;
1996 1994 struct sf_cr_free_elem *cptr;
1997 1995
1998 1996
1999 1997 mutex_enter(&sf->sf_cr_mutex);
2000 1998
2001 1999 try_again:
2002 2000
2003 2001 /* find a free buffer in the existing pool */
2004 2002 ptr = sf->sf_cr_pool;
2005 2003 while (ptr != NULL) {
2006 2004 if (ptr->nfree != 0) {
2007 2005 ptr->nfree--;
2008 2006 break;
2009 2007 } else {
2010 2008 ptr = ptr->next;
2011 2009 }
2012 2010 }
2013 2011
2014 2012 /* did we find a free buffer ? */
2015 2013 if (ptr != NULL) {
2016 2014 /* we found a free buffer -- take it off the free list */
2017 2015 cptr = ptr->free;
2018 2016 ptr->free = cptr->next;
2019 2017 mutex_exit(&sf->sf_cr_mutex);
2020 2018 /* set up the command to use the buffer pair */
2021 2019 cmd->cmd_block = (struct fcp_cmd *)cptr;
2022 2020 cmd->cmd_dmac = cptr->cmd_dmac;
2023 2021 cmd->cmd_rsp_dmac = cptr->rsp_dmac;
2024 2022 cmd->cmd_rsp_block = (struct fcp_rsp *)cptr->rsp;
2025 2023 cmd->cmd_cr_pool = ptr;
2026 2024 return (DDI_SUCCESS); /* success */
2027 2025 }
2028 2026
2029 2027 /* no free buffer available -- can we allocate more ? */
2030 2028 if (sf->sf_cr_pool_cnt < SF_CR_POOL_MAX) {
2031 2029 /* we need to allocate more buffer pairs */
2032 2030 if (sf->sf_cr_flag) {
2033 2031 /* somebody already allocating for this instance */
2034 2032 if (func == SLEEP_FUNC) {
2035 2033 /* user wants to wait */
2036 2034 cv_wait(&sf->sf_cr_cv, &sf->sf_cr_mutex);
2037 2035 /* we've been woken so go try again */
2038 2036 goto try_again;
2039 2037 }
2040 2038 /* user does not want to wait */
2041 2039 mutex_exit(&sf->sf_cr_mutex);
2042 2040 sf->sf_stats.cralloc_failures++;
2043 2041 return (DDI_FAILURE); /* give up */
2044 2042 }
2045 2043 /* set flag saying we're allocating */
2046 2044 sf->sf_cr_flag = 1;
2047 2045 mutex_exit(&sf->sf_cr_mutex);
2048 2046 /* add to our pool */
2049 2047 if (sf_add_cr_pool(sf) != DDI_SUCCESS) {
2050 2048 /* couldn't add to our pool for some reason */
2051 2049 mutex_enter(&sf->sf_cr_mutex);
2052 2050 sf->sf_cr_flag = 0;
2053 2051 cv_broadcast(&sf->sf_cr_cv);
2054 2052 mutex_exit(&sf->sf_cr_mutex);
2055 2053 sf->sf_stats.cralloc_failures++;
2056 2054 return (DDI_FAILURE); /* give up */
2057 2055 }
2058 2056 /*
2059 2057 * clear flag saying we're allocating and tell all other
2060 2058 * that care
2061 2059 */
2062 2060 mutex_enter(&sf->sf_cr_mutex);
2063 2061 sf->sf_cr_flag = 0;
2064 2062 cv_broadcast(&sf->sf_cr_cv);
2065 2063 /* now that we have more buffers try again */
2066 2064 goto try_again;
2067 2065 }
2068 2066
2069 2067 /* we don't have room to allocate any more buffers */
2070 2068 mutex_exit(&sf->sf_cr_mutex);
2071 2069 sf->sf_stats.cralloc_failures++;
2072 2070 return (DDI_FAILURE); /* give up */
2073 2071 }
2074 2072
2075 2073
2076 2074 /*
2077 2075 * free a cmd/response buffer pair in our pool
2078 2076 */
2079 2077 static void
2080 2078 sf_cr_free(struct sf_cr_pool *cp, struct sf_pkt *cmd)
2081 2079 {
2082 2080 struct sf *sf = cp->sf;
2083 2081 struct sf_cr_free_elem *elem;
2084 2082
2085 2083 elem = (struct sf_cr_free_elem *)cmd->cmd_block;
2086 2084 elem->rsp = (caddr_t)cmd->cmd_rsp_block;
2087 2085 elem->cmd_dmac = cmd->cmd_dmac;
2088 2086 elem->rsp_dmac = cmd->cmd_rsp_dmac;
2089 2087
2090 2088 mutex_enter(&sf->sf_cr_mutex);
2091 2089 cp->nfree++;
2092 2090 ASSERT(cp->nfree <= cp->ntot);
2093 2091
2094 2092 elem->next = cp->free;
2095 2093 cp->free = elem;
2096 2094 mutex_exit(&sf->sf_cr_mutex);
2097 2095 }
2098 2096
2099 2097
2100 2098 /*
2101 2099 * free our pool of cmd/response buffers
2102 2100 */
2103 2101 static void
2104 2102 sf_crpool_free(struct sf *sf)
2105 2103 {
2106 2104 struct sf_cr_pool *cp, *prev;
2107 2105
2108 2106 prev = NULL;
2109 2107 mutex_enter(&sf->sf_cr_mutex);
2110 2108 cp = sf->sf_cr_pool;
2111 2109 while (cp != NULL) {
2112 2110 if (cp->nfree == cp->ntot) {
2113 2111 if (prev != NULL) {
2114 2112 prev->next = cp->next;
2115 2113 } else {
2116 2114 sf->sf_cr_pool = cp->next;
2117 2115 }
2118 2116 sf->sf_cr_pool_cnt--;
2119 2117 mutex_exit(&sf->sf_cr_mutex);
2120 2118
2121 2119 (void) ddi_dma_unbind_handle(cp->cmd_dma_handle);
2122 2120 ddi_dma_free_handle(&cp->cmd_dma_handle);
2123 2121 (void) ddi_dma_unbind_handle(cp->rsp_dma_handle);
2124 2122 ddi_dma_free_handle(&cp->rsp_dma_handle);
2125 2123 ddi_dma_mem_free(&cp->cmd_acc_handle);
2126 2124 ddi_dma_mem_free(&cp->rsp_acc_handle);
2127 2125 kmem_free((caddr_t)cp, sizeof (struct sf_cr_pool));
2128 2126 return;
2129 2127 }
2130 2128 prev = cp;
2131 2129 cp = cp->next;
2132 2130 }
2133 2131 mutex_exit(&sf->sf_cr_mutex);
2134 2132 }
2135 2133
2136 2134
2137 2135 /* ARGSUSED */
2138 2136 static int
2139 2137 sf_kmem_cache_constructor(void *buf, void *arg, int size)
2140 2138 {
2141 2139 struct sf_pkt *cmd = buf;
2142 2140
2143 2141 mutex_init(&cmd->cmd_abort_mutex, NULL, MUTEX_DRIVER, NULL);
2144 2142 cmd->cmd_block = NULL;
2145 2143 cmd->cmd_dmahandle = NULL;
2146 2144 return (0);
2147 2145 }
2148 2146
2149 2147
2150 2148 /* ARGSUSED */
2151 2149 static void
2152 2150 sf_kmem_cache_destructor(void *buf, void *size)
2153 2151 {
2154 2152 struct sf_pkt *cmd = buf;
2155 2153
2156 2154 if (cmd->cmd_dmahandle != NULL) {
2157 2155 ddi_dma_free_handle(&cmd->cmd_dmahandle);
2158 2156 }
2159 2157
2160 2158 if (cmd->cmd_block != NULL) {
2161 2159 sf_cr_free(cmd->cmd_cr_pool, cmd);
2162 2160 }
2163 2161 mutex_destroy(&cmd->cmd_abort_mutex);
2164 2162 }
2165 2163
2166 2164
2167 2165 /*
2168 2166 * called by transport when a state change occurs
2169 2167 */
2170 2168 static void
2171 2169 sf_statec_callback(void *arg, int msg)
2172 2170 {
2173 2171 struct sf *sf = (struct sf *)arg;
2174 2172 struct sf_target *target;
2175 2173 int i;
2176 2174 struct sf_pkt *cmd;
2177 2175 struct scsi_pkt *pkt;
2178 2176
2179 2177
2180 2178
2181 2179 switch (msg) {
2182 2180
2183 2181 case FCAL_STATUS_LOOP_ONLINE: {
2184 2182 uchar_t al_pa; /* to save AL-PA */
2185 2183 int ret; /* ret value from getmap */
2186 2184 int lip_cnt; /* to save current count */
2187 2185 int cnt; /* map length */
2188 2186
2189 2187 /*
2190 2188 * the loop has gone online
2191 2189 */
2192 2190 SF_DEBUG(1, (sf, CE_CONT, "sf%d: loop online\n",
2193 2191 ddi_get_instance(sf->sf_dip)));
2194 2192 mutex_enter(&sf->sf_mutex);
2195 2193 sf->sf_lip_cnt++;
2196 2194 sf->sf_state = SF_STATE_ONLINING;
2197 2195 mutex_exit(&sf->sf_mutex);
2198 2196
2199 2197 /* scan each target hash queue */
2200 2198 for (i = 0; i < SF_NUM_HASH_QUEUES; i++) {
2201 2199 target = sf->sf_wwn_lists[i];
2202 2200 while (target != NULL) {
2203 2201 /*
2204 2202 * foreach target, if it's not offline then
2205 2203 * mark it as busy
2206 2204 */
2207 2205 mutex_enter(&target->sft_mutex);
2208 2206 if (!(target->sft_state & SF_TARGET_OFFLINE))
2209 2207 target->sft_state |= (SF_TARGET_BUSY
2210 2208 | SF_TARGET_MARK);
2211 2209 #ifdef DEBUG
2212 2210 /*
2213 2211 * for debugging, print out info on any
2214 2212 * pending commands (left hanging)
2215 2213 */
2216 2214 cmd = target->sft_pkt_head;
2217 2215 while (cmd != (struct sf_pkt *)&target->
2218 2216 sft_pkt_head) {
2219 2217 if (cmd->cmd_state ==
2220 2218 SF_STATE_ISSUED) {
2221 2219 SF_DEBUG(1, (sf, CE_CONT,
2222 2220 "cmd 0x%p pending "
2223 2221 "after lip\n",
2224 2222 (void *)cmd->cmd_fp_pkt));
2225 2223 }
2226 2224 cmd = cmd->cmd_forw;
2227 2225 }
2228 2226 #endif
2229 2227 mutex_exit(&target->sft_mutex);
2230 2228 target = target->sft_next;
2231 2229 }
2232 2230 }
2233 2231
2234 2232 /*
2235 2233 * since the loop has just gone online get a new map from
2236 2234 * the transport
2237 2235 */
2238 2236 if ((ret = soc_get_lilp_map(sf->sf_sochandle, sf->sf_socp,
2239 2237 sf->sf_sochandle->fcal_portno, (uint32_t)sf->
2240 2238 sf_lilp_dmacookie.dmac_address, 1)) != FCAL_SUCCESS) {
2241 2239 if (sf_core && (sf_core & SF_CORE_LILP_FAILED)) {
2242 2240 (void) soc_take_core(sf->sf_sochandle,
2243 2241 sf->sf_socp);
2244 2242 sf_core = 0;
2245 2243 }
2246 2244 sf_log(sf, CE_WARN,
2247 2245 "!soc lilp map failed status=0x%x\n", ret);
2248 2246 mutex_enter(&sf->sf_mutex);
2249 2247 sf->sf_timer = sf_watchdog_time + SF_OFFLINE_TIMEOUT;
2250 2248 sf->sf_lip_cnt++;
2251 2249 sf->sf_state = SF_STATE_OFFLINE;
2252 2250 mutex_exit(&sf->sf_mutex);
2253 2251 return;
2254 2252 }
2255 2253
2256 2254 /* ensure consistent view of DMA memory */
2257 2255 (void) ddi_dma_sync(sf->sf_lilp_dmahandle, (off_t)0, (size_t)0,
2258 2256 DDI_DMA_SYNC_FORKERNEL);
2259 2257
2260 2258 /* how many entries in map ? */
2261 2259 cnt = sf->sf_lilp_map->lilp_length;
2262 2260 if (cnt >= SF_MAX_LILP_ENTRIES) {
2263 2261 sf_log(sf, CE_WARN, "invalid lilp map\n");
2264 2262 return;
2265 2263 }
2266 2264
2267 2265 mutex_enter(&sf->sf_mutex);
2268 2266 sf->sf_device_count = cnt - 1;
2269 2267 sf->sf_al_pa = sf->sf_lilp_map->lilp_myalpa;
2270 2268 lip_cnt = sf->sf_lip_cnt;
2271 2269 al_pa = sf->sf_al_pa;
2272 2270
2273 2271 SF_DEBUG(1, (sf, CE_CONT,
2274 2272 "!lilp map has %d entries, al_pa is %x\n", cnt, al_pa));
2275 2273
2276 2274 /*
2277 2275 * since the last entry of the map may be mine (common) check
2278 2276 * for that, and if it is we have one less entry to look at
2279 2277 */
2280 2278 if (sf->sf_lilp_map->lilp_alpalist[cnt-1] == al_pa) {
2281 2279 cnt--;
2282 2280 }
2283 2281 /* If we didn't get a valid loop map enable all targets */
2284 2282 if (sf->sf_lilp_map->lilp_magic == FCAL_BADLILP_MAGIC) {
2285 2283 for (i = 0; i < sizeof (sf_switch_to_alpa); i++)
2286 2284 sf->sf_lilp_map->lilp_alpalist[i] =
2287 2285 sf_switch_to_alpa[i];
2288 2286 cnt = i;
2289 2287 sf->sf_device_count = cnt - 1;
2290 2288 }
2291 2289 if (sf->sf_device_count == 0) {
2292 2290 sf_finish_init(sf, lip_cnt);
2293 2291 mutex_exit(&sf->sf_mutex);
2294 2292 break;
2295 2293 }
2296 2294 mutex_exit(&sf->sf_mutex);
2297 2295
2298 2296 SF_DEBUG(2, (sf, CE_WARN,
2299 2297 "!statec_callback: starting with %d targets\n",
2300 2298 sf->sf_device_count));
2301 2299
2302 2300 /* scan loop map, logging into all ports (except mine) */
2303 2301 for (i = 0; i < cnt; i++) {
2304 2302 SF_DEBUG(1, (sf, CE_CONT,
2305 2303 "!lilp map entry %d = %x,%x\n", i,
2306 2304 sf->sf_lilp_map->lilp_alpalist[i],
2307 2305 sf_alpa_to_switch[
2308 2306 sf->sf_lilp_map->lilp_alpalist[i]]));
2309 2307 /* is this entry for somebody else ? */
2310 2308 if (sf->sf_lilp_map->lilp_alpalist[i] != al_pa) {
2311 2309 /* do a PLOGI to this port */
2312 2310 if (!sf_login(sf, LA_ELS_PLOGI,
2313 2311 sf->sf_lilp_map->lilp_alpalist[i],
2314 2312 sf->sf_lilp_map->lilp_alpalist[cnt-1],
2315 2313 lip_cnt)) {
2316 2314 /* a problem logging in */
2317 2315 mutex_enter(&sf->sf_mutex);
2318 2316 if (lip_cnt == sf->sf_lip_cnt) {
2319 2317 /*
2320 2318 * problem not from a new LIP
2321 2319 */
2322 2320 sf->sf_device_count--;
2323 2321 ASSERT(sf->sf_device_count
2324 2322 >= 0);
2325 2323 if (sf->sf_device_count == 0) {
2326 2324 sf_finish_init(sf,
2327 2325 lip_cnt);
2328 2326 }
2329 2327 }
2330 2328 mutex_exit(&sf->sf_mutex);
2331 2329 }
2332 2330 }
2333 2331 }
2334 2332 break;
2335 2333 }
2336 2334
2337 2335 case FCAL_STATUS_ERR_OFFLINE:
2338 2336 /*
2339 2337 * loop has gone offline due to an error
2340 2338 */
2341 2339 SF_DEBUG(1, (sf, CE_CONT, "sf%d: loop offline\n",
2342 2340 ddi_get_instance(sf->sf_dip)));
2343 2341 mutex_enter(&sf->sf_mutex);
2344 2342 sf->sf_lip_cnt++;
2345 2343 sf->sf_timer = sf_watchdog_time + SF_OFFLINE_TIMEOUT;
2346 2344 if (!sf->sf_online_timer) {
2347 2345 sf->sf_online_timer = sf_watchdog_time +
2348 2346 SF_ONLINE_TIMEOUT;
2349 2347 }
2350 2348 /*
2351 2349 * if we are suspended, preserve the SF_STATE_SUSPENDED flag,
2352 2350 * since throttling logic in sf_watch() depends on
2353 2351 * preservation of this flag while device is suspended
2354 2352 */
2355 2353 if (sf->sf_state & SF_STATE_SUSPENDED) {
2356 2354 sf->sf_state |= SF_STATE_OFFLINE;
2357 2355 SF_DEBUG(1, (sf, CE_CONT,
2358 2356 "sf_statec_callback, sf%d: "
2359 2357 "got FCAL_STATE_OFFLINE during DDI_SUSPEND\n",
2360 2358 ddi_get_instance(sf->sf_dip)));
2361 2359 } else {
2362 2360 sf->sf_state = SF_STATE_OFFLINE;
2363 2361 }
2364 2362
2365 2363 /* scan each possible target on the loop */
2366 2364 for (i = 0; i < sf_max_targets; i++) {
2367 2365 target = sf->sf_targets[i];
2368 2366 while (target != NULL) {
2369 2367 mutex_enter(&target->sft_mutex);
2370 2368 if (!(target->sft_state & SF_TARGET_OFFLINE))
2371 2369 target->sft_state |= (SF_TARGET_BUSY
2372 2370 | SF_TARGET_MARK);
2373 2371 mutex_exit(&target->sft_mutex);
2374 2372 target = target->sft_next_lun;
2375 2373 }
2376 2374 }
2377 2375 mutex_exit(&sf->sf_mutex);
2378 2376 break;
2379 2377
2380 2378 case FCAL_STATE_RESET: {
2381 2379 struct sf_els_hdr *privp; /* ptr to private list */
2382 2380 struct sf_els_hdr *tmpp1; /* tmp prev hdr ptr */
2383 2381 struct sf_els_hdr *tmpp2; /* tmp next hdr ptr */
2384 2382 struct sf_els_hdr *head; /* to save our private list */
2385 2383 struct fcal_packet *fpkt; /* ptr to pkt in hdr */
2386 2384
2387 2385 /*
2388 2386 * a transport reset
2389 2387 */
2390 2388 SF_DEBUG(1, (sf, CE_CONT, "!sf%d: soc reset\n",
2391 2389 ddi_get_instance(sf->sf_dip)));
2392 2390 tmpp1 = head = NULL;
2393 2391 mutex_enter(&sf->sf_mutex);
2394 2392 sf->sf_lip_cnt++;
2395 2393 sf->sf_timer = sf_watchdog_time + SF_RESET_TIMEOUT;
2396 2394 /*
2397 2395 * if we are suspended, preserve the SF_STATE_SUSPENDED flag,
2398 2396 * since throttling logic in sf_watch() depends on
2399 2397 * preservation of this flag while device is suspended
2400 2398 */
2401 2399 if (sf->sf_state & SF_STATE_SUSPENDED) {
2402 2400 sf->sf_state |= SF_STATE_OFFLINE;
2403 2401 SF_DEBUG(1, (sf, CE_CONT,
2404 2402 "sf_statec_callback, sf%d: "
2405 2403 "got FCAL_STATE_RESET during DDI_SUSPEND\n",
2406 2404 ddi_get_instance(sf->sf_dip)));
2407 2405 } else {
2408 2406 sf->sf_state = SF_STATE_OFFLINE;
2409 2407 }
2410 2408
2411 2409 /*
2412 2410 * scan each possible target on the loop, looking for targets
2413 2411 * that need callbacks ran
2414 2412 */
2415 2413 for (i = 0; i < sf_max_targets; i++) {
2416 2414 target = sf->sf_targets[i];
2417 2415 while (target != NULL) {
2418 2416 if (!(target->sft_state & SF_TARGET_OFFLINE)) {
2419 2417 target->sft_state |= (SF_TARGET_BUSY
2420 2418 | SF_TARGET_MARK);
2421 2419 mutex_exit(&sf->sf_mutex);
2422 2420 /*
2423 2421 * run remove event callbacks for lun
2424 2422 *
2425 2423 * We have a nasty race condition here
2426 2424 * 'cause we're dropping this mutex to
2427 2425 * run the callback and expect the
2428 2426 * linked list to be the same.
2429 2427 */
2430 2428 (void) ndi_event_retrieve_cookie(
2431 2429 sf->sf_event_hdl, target->sft_dip,
2432 2430 FCAL_REMOVE_EVENT, &sf_remove_eid,
2433 2431 NDI_EVENT_NOPASS);
2434 2432 (void) ndi_event_run_callbacks(
2435 2433 sf->sf_event_hdl,
2436 2434 target->sft_dip,
2437 2435 sf_remove_eid, NULL);
2438 2436 mutex_enter(&sf->sf_mutex);
2439 2437 }
2440 2438 target = target->sft_next_lun;
2441 2439 }
2442 2440 }
2443 2441
2444 2442 /*
2445 2443 * scan for ELS commands that are in transport, not complete,
2446 2444 * and have a valid timeout, building a private list
2447 2445 */
2448 2446 privp = sf->sf_els_list;
2449 2447 while (privp != NULL) {
2450 2448 fpkt = privp->fpkt;
2451 2449 if ((fpkt->fcal_cmd_state & FCAL_CMD_IN_TRANSPORT) &&
2452 2450 (!(fpkt->fcal_cmd_state & FCAL_CMD_COMPLETE)) &&
2453 2451 (privp->timeout != SF_INVALID_TIMEOUT)) {
2454 2452 /*
2455 2453 * cmd in transport && not complete &&
2456 2454 * timeout valid
2457 2455 *
2458 2456 * move this entry from ELS input list to our
2459 2457 * private list
2460 2458 */
2461 2459
2462 2460 tmpp2 = privp->next; /* save ptr to next */
2463 2461
2464 2462 /* push this on private list head */
2465 2463 privp->next = head;
2466 2464 head = privp;
2467 2465
2468 2466 /* remove this entry from input list */
2469 2467 if (tmpp1 != NULL) {
2470 2468 /*
2471 2469 * remove this entry from somewhere in
2472 2470 * the middle of the list
2473 2471 */
2474 2472 tmpp1->next = tmpp2;
2475 2473 if (tmpp2 != NULL) {
2476 2474 tmpp2->prev = tmpp1;
2477 2475 }
2478 2476 } else {
2479 2477 /*
2480 2478 * remove this entry from the head
2481 2479 * of the list
2482 2480 */
2483 2481 sf->sf_els_list = tmpp2;
2484 2482 if (tmpp2 != NULL) {
2485 2483 tmpp2->prev = NULL;
2486 2484 }
2487 2485 }
2488 2486 privp = tmpp2; /* skip to next entry */
2489 2487 } else {
2490 2488 tmpp1 = privp; /* save ptr to prev entry */
2491 2489 privp = privp->next; /* skip to next entry */
2492 2490 }
2493 2491 }
2494 2492
2495 2493 mutex_exit(&sf->sf_mutex);
2496 2494
2497 2495 /*
2498 2496 * foreach cmd in our list free the ELS packet associated
2499 2497 * with it
2500 2498 */
2501 2499 privp = head;
2502 2500 while (privp != NULL) {
2503 2501 fpkt = privp->fpkt;
2504 2502 privp = privp->next;
2505 2503 sf_els_free(fpkt);
2506 2504 }
2507 2505
2508 2506 /*
2509 2507 * scan for commands from each possible target
2510 2508 */
2511 2509 for (i = 0; i < sf_max_targets; i++) {
2512 2510 target = sf->sf_targets[i];
2513 2511 while (target != NULL) {
2514 2512 /*
2515 2513 * scan all active commands for this target,
2516 2514 * looking for commands that have been issued,
2517 2515 * are in transport, and are not yet complete
2518 2516 * (so we can terminate them because of the
2519 2517 * reset)
2520 2518 */
2521 2519 mutex_enter(&target->sft_pkt_mutex);
2522 2520 cmd = target->sft_pkt_head;
2523 2521 while (cmd != (struct sf_pkt *)&target->
2524 2522 sft_pkt_head) {
2525 2523 fpkt = cmd->cmd_fp_pkt;
2526 2524 mutex_enter(&cmd->cmd_abort_mutex);
2527 2525 if ((cmd->cmd_state ==
2528 2526 SF_STATE_ISSUED) &&
2529 2527 (fpkt->fcal_cmd_state &
2530 2528 FCAL_CMD_IN_TRANSPORT) &&
2531 2529 (!(fpkt->fcal_cmd_state &
2532 2530 FCAL_CMD_COMPLETE))) {
2533 2531 /* a command to be reset */
2534 2532 pkt = cmd->cmd_pkt;
2535 2533 pkt->pkt_reason = CMD_RESET;
2536 2534 pkt->pkt_statistics |=
2537 2535 STAT_BUS_RESET;
2538 2536 cmd->cmd_state = SF_STATE_IDLE;
2539 2537 mutex_exit(&cmd->
2540 2538 cmd_abort_mutex);
2541 2539 mutex_exit(&target->
2542 2540 sft_pkt_mutex);
2543 2541 if (pkt->pkt_comp != NULL) {
2544 2542 (*pkt->pkt_comp)(pkt);
2545 2543 }
2546 2544 mutex_enter(&target->
2547 2545 sft_pkt_mutex);
2548 2546 cmd = target->sft_pkt_head;
2549 2547 } else {
2550 2548 mutex_exit(&cmd->
2551 2549 cmd_abort_mutex);
2552 2550 /* get next command */
2553 2551 cmd = cmd->cmd_forw;
2554 2552 }
2555 2553 }
2556 2554 mutex_exit(&target->sft_pkt_mutex);
2557 2555 target = target->sft_next_lun;
2558 2556 }
2559 2557 }
2560 2558
2561 2559 /*
2562 2560 * get packet queue for this target, resetting all remaining
2563 2561 * commands
2564 2562 */
2565 2563 mutex_enter(&sf->sf_mutex);
2566 2564 cmd = sf->sf_pkt_head;
2567 2565 sf->sf_pkt_head = NULL;
2568 2566 mutex_exit(&sf->sf_mutex);
2569 2567
2570 2568 while (cmd != NULL) {
2571 2569 pkt = cmd->cmd_pkt;
2572 2570 cmd = cmd->cmd_next;
2573 2571 pkt->pkt_reason = CMD_RESET;
2574 2572 pkt->pkt_statistics |= STAT_BUS_RESET;
2575 2573 if (pkt->pkt_comp != NULL) {
2576 2574 (*pkt->pkt_comp)(pkt);
2577 2575 }
2578 2576 }
2579 2577 break;
2580 2578 }
2581 2579
2582 2580 default:
2583 2581 break;
2584 2582 }
2585 2583 }
2586 2584
2587 2585
2588 2586 /*
2589 2587 * called to send a PLOGI (N_port login) ELS request to a destination ID,
2590 2588 * returning TRUE upon success, else returning FALSE
2591 2589 */
2592 2590 static int
2593 2591 sf_login(struct sf *sf, uchar_t els_code, uchar_t dest_id, uint_t arg1,
2594 2592 int lip_cnt)
2595 2593 {
2596 2594 struct la_els_logi *logi;
2597 2595 struct sf_els_hdr *privp;
2598 2596
2599 2597
2600 2598 if (sf_els_alloc(sf, dest_id, sizeof (struct sf_els_hdr),
2601 2599 sizeof (union sf_els_cmd), sizeof (union sf_els_rsp),
2602 2600 (caddr_t *)&privp, (caddr_t *)&logi) == NULL) {
2603 2601 sf_log(sf, CE_WARN, "Cannot allocate PLOGI for target %x "
2604 2602 "due to DVMA shortage.\n", sf_alpa_to_switch[dest_id]);
2605 2603 return (FALSE);
2606 2604 }
2607 2605
2608 2606 privp->lip_cnt = lip_cnt;
2609 2607 if (els_code == LA_ELS_PLOGI) {
2610 2608 bcopy((caddr_t)sf->sf_sochandle->fcal_loginparms,
2611 2609 (caddr_t)&logi->common_service, sizeof (struct la_els_logi)
2612 2610 - 4);
2613 2611 bcopy((caddr_t)&sf->sf_sochandle->fcal_p_wwn,
2614 2612 (caddr_t)&logi->nport_ww_name, sizeof (la_wwn_t));
2615 2613 bcopy((caddr_t)&sf->sf_sochandle->fcal_n_wwn,
2616 2614 (caddr_t)&logi->node_ww_name, sizeof (la_wwn_t));
2617 2615 bzero((caddr_t)&logi->reserved, 16);
2618 2616 } else if (els_code == LA_ELS_LOGO) {
2619 2617 bcopy((caddr_t)&sf->sf_sochandle->fcal_p_wwn,
2620 2618 (caddr_t)&(((struct la_els_logo *)logi)->nport_ww_name), 8);
2621 2619 ((struct la_els_logo *)logi)->reserved = 0;
2622 2620 ((struct la_els_logo *)logi)->nport_id[0] = 0;
2623 2621 ((struct la_els_logo *)logi)->nport_id[1] = 0;
2624 2622 ((struct la_els_logo *)logi)->nport_id[2] = arg1;
2625 2623 }
2626 2624
2627 2625 privp->els_code = els_code;
2628 2626 logi->ls_code = els_code;
2629 2627 logi->mbz[0] = 0;
2630 2628 logi->mbz[1] = 0;
2631 2629 logi->mbz[2] = 0;
2632 2630
2633 2631 privp->timeout = sf_watchdog_time + SF_ELS_TIMEOUT;
2634 2632 return (sf_els_transport(sf, privp));
2635 2633 }
2636 2634
2637 2635
2638 2636 /*
2639 2637 * send an ELS IU via the transport,
2640 2638 * returning TRUE upon success, else returning FALSE
2641 2639 */
2642 2640 static int
2643 2641 sf_els_transport(struct sf *sf, struct sf_els_hdr *privp)
2644 2642 {
2645 2643 struct fcal_packet *fpkt = privp->fpkt;
2646 2644
2647 2645
2648 2646 (void) ddi_dma_sync(privp->cmd_dma_handle, (off_t)0, (size_t)0,
2649 2647 DDI_DMA_SYNC_FORDEV);
2650 2648 privp->prev = NULL;
2651 2649 mutex_enter(&sf->sf_mutex);
2652 2650 privp->next = sf->sf_els_list;
2653 2651 if (sf->sf_els_list != NULL) {
2654 2652 sf->sf_els_list->prev = privp;
2655 2653 }
2656 2654 sf->sf_els_list = privp;
2657 2655 mutex_exit(&sf->sf_mutex);
2658 2656
2659 2657 /* call the transport to send a packet */
2660 2658 if (soc_transport(sf->sf_sochandle, fpkt, FCAL_NOSLEEP,
2661 2659 CQ_REQUEST_1) != FCAL_TRANSPORT_SUCCESS) {
2662 2660 mutex_enter(&sf->sf_mutex);
2663 2661 if (privp->prev != NULL) {
2664 2662 privp->prev->next = privp->next;
2665 2663 }
2666 2664 if (privp->next != NULL) {
2667 2665 privp->next->prev = privp->prev;
2668 2666 }
2669 2667 if (sf->sf_els_list == privp) {
2670 2668 sf->sf_els_list = privp->next;
2671 2669 }
2672 2670 mutex_exit(&sf->sf_mutex);
2673 2671 sf_els_free(fpkt);
2674 2672 return (FALSE); /* failure */
2675 2673 }
2676 2674 return (TRUE); /* success */
2677 2675 }
2678 2676
2679 2677
2680 2678 /*
2681 2679 * called as the pkt_comp routine for ELS FC packets
2682 2680 */
2683 2681 static void
2684 2682 sf_els_callback(struct fcal_packet *fpkt)
2685 2683 {
2686 2684 struct sf_els_hdr *privp = fpkt->fcal_pkt_private;
2687 2685 struct sf *sf = privp->sf;
2688 2686 struct sf *tsf;
2689 2687 int tgt_id;
2690 2688 struct la_els_logi *ptr = (struct la_els_logi *)privp->rsp;
2691 2689 struct la_els_adisc *adisc = (struct la_els_adisc *)ptr;
2692 2690 struct sf_target *target;
2693 2691 short ncmds;
2694 2692 short free_pkt = TRUE;
2695 2693
2696 2694
2697 2695 /*
2698 2696 * we've received an ELS callback, i.e. an ELS packet has arrived
2699 2697 */
2700 2698
2701 2699 /* take the current packet off of the queue */
2702 2700 mutex_enter(&sf->sf_mutex);
2703 2701 if (privp->timeout == SF_INVALID_TIMEOUT) {
2704 2702 mutex_exit(&sf->sf_mutex);
2705 2703 return;
2706 2704 }
2707 2705 if (privp->prev != NULL) {
2708 2706 privp->prev->next = privp->next;
2709 2707 }
2710 2708 if (privp->next != NULL) {
2711 2709 privp->next->prev = privp->prev;
2712 2710 }
2713 2711 if (sf->sf_els_list == privp) {
2714 2712 sf->sf_els_list = privp->next;
2715 2713 }
2716 2714 privp->prev = privp->next = NULL;
2717 2715 mutex_exit(&sf->sf_mutex);
2718 2716
2719 2717 /* get # pkts in this callback */
2720 2718 ncmds = fpkt->fcal_ncmds;
2721 2719 ASSERT(ncmds >= 0);
2722 2720 mutex_enter(&sf->sf_cmd_mutex);
2723 2721 sf->sf_ncmds = ncmds;
2724 2722 mutex_exit(&sf->sf_cmd_mutex);
2725 2723
2726 2724 /* sync idea of memory */
2727 2725 (void) ddi_dma_sync(privp->rsp_dma_handle, (off_t)0, (size_t)0,
2728 2726 DDI_DMA_SYNC_FORKERNEL);
2729 2727
2730 2728 /* was this an OK ACC msg ?? */
2731 2729 if ((fpkt->fcal_pkt_status == FCAL_STATUS_OK) &&
2732 2730 (ptr->ls_code == LA_ELS_ACC)) {
2733 2731
2734 2732 /*
2735 2733 * this was an OK ACC pkt
2736 2734 */
2737 2735
2738 2736 switch (privp->els_code) {
2739 2737 case LA_ELS_PLOGI:
2740 2738 /*
2741 2739 * was able to to an N_port login
2742 2740 */
2743 2741 SF_DEBUG(2, (sf, CE_CONT,
2744 2742 "!PLOGI to al_pa %x succeeded, wwn %x%x\n",
2745 2743 privp->dest_nport_id,
2746 2744 *((int *)&ptr->nport_ww_name.raw_wwn[0]),
2747 2745 *((int *)&ptr->nport_ww_name.raw_wwn[4])));
2748 2746 /* try to do a process login */
2749 2747 if (!sf_do_prli(sf, privp, ptr)) {
2750 2748 free_pkt = FALSE;
2751 2749 goto fail; /* PRLI failed */
2752 2750 }
2753 2751 break;
2754 2752 case LA_ELS_PRLI:
2755 2753 /*
2756 2754 * was able to do a process login
2757 2755 */
2758 2756 SF_DEBUG(2, (sf, CE_CONT,
2759 2757 "!PRLI to al_pa %x succeeded\n",
2760 2758 privp->dest_nport_id));
2761 2759 /* try to do address discovery */
2762 2760 if (sf_do_adisc(sf, privp) != 1) {
2763 2761 free_pkt = FALSE;
2764 2762 goto fail; /* ADISC failed */
2765 2763 }
2766 2764 break;
2767 2765 case LA_ELS_ADISC:
2768 2766 /*
2769 2767 * found a target via ADISC
2770 2768 */
2771 2769
2772 2770 SF_DEBUG(2, (sf, CE_CONT,
2773 2771 "!ADISC to al_pa %x succeeded\n",
2774 2772 privp->dest_nport_id));
2775 2773
2776 2774 /* create the target info */
2777 2775 if ((target = sf_create_target(sf, privp,
2778 2776 sf_alpa_to_switch[(uchar_t)adisc->hard_address],
2779 2777 (int64_t)0))
2780 2778 == NULL) {
2781 2779 goto fail; /* can't create target */
2782 2780 }
2783 2781
2784 2782 /*
2785 2783 * ensure address discovered matches what we thought
2786 2784 * it would be
2787 2785 */
2788 2786 if ((uchar_t)adisc->hard_address !=
2789 2787 privp->dest_nport_id) {
2790 2788 sf_log(sf, CE_WARN,
2791 2789 "target 0x%x, AL-PA 0x%x and "
2792 2790 "hard address 0x%x don't match\n",
2793 2791 sf_alpa_to_switch[
2794 2792 (uchar_t)privp->dest_nport_id],
2795 2793 privp->dest_nport_id,
2796 2794 (uchar_t)adisc->hard_address);
2797 2795 mutex_enter(&sf->sf_mutex);
2798 2796 sf_offline_target(sf, target);
2799 2797 mutex_exit(&sf->sf_mutex);
2800 2798 goto fail; /* addr doesn't match */
2801 2799 }
2802 2800 /*
2803 2801 * get inquiry data from the target
2804 2802 */
2805 2803 if (!sf_do_reportlun(sf, privp, target)) {
2806 2804 mutex_enter(&sf->sf_mutex);
2807 2805 sf_offline_target(sf, target);
2808 2806 mutex_exit(&sf->sf_mutex);
2809 2807 free_pkt = FALSE;
2810 2808 goto fail; /* inquiry failed */
2811 2809 }
2812 2810 break;
2813 2811 default:
2814 2812 SF_DEBUG(2, (sf, CE_CONT,
2815 2813 "!ELS %x to al_pa %x succeeded\n",
2816 2814 privp->els_code, privp->dest_nport_id));
2817 2815 sf_els_free(fpkt);
2818 2816 break;
2819 2817 }
2820 2818
2821 2819 } else {
2822 2820
2823 2821 /*
2824 2822 * oh oh -- this was not an OK ACC packet
2825 2823 */
2826 2824
2827 2825 /* get target ID from dest loop address */
2828 2826 tgt_id = sf_alpa_to_switch[(uchar_t)privp->dest_nport_id];
2829 2827
2830 2828 /* keep track of failures */
2831 2829 sf->sf_stats.tstats[tgt_id].els_failures++;
2832 2830 if (++(privp->retries) < sf_els_retries &&
2833 2831 fpkt->fcal_pkt_status != FCAL_STATUS_OPEN_FAIL) {
2834 2832 if (fpkt->fcal_pkt_status ==
2835 2833 FCAL_STATUS_MAX_XCHG_EXCEEDED) {
2836 2834 tsf = sf->sf_sibling;
2837 2835 if (tsf != NULL) {
2838 2836 mutex_enter(&tsf->sf_cmd_mutex);
2839 2837 tsf->sf_flag = 1;
2840 2838 tsf->sf_throttle = SF_DECR_DELTA;
2841 2839 mutex_exit(&tsf->sf_cmd_mutex);
2842 2840 }
2843 2841 }
2844 2842 privp->timeout = sf_watchdog_time + SF_ELS_TIMEOUT;
2845 2843 privp->prev = NULL;
2846 2844
2847 2845 mutex_enter(&sf->sf_mutex);
2848 2846
2849 2847 if (privp->lip_cnt == sf->sf_lip_cnt) {
2850 2848 SF_DEBUG(1, (sf, CE_WARN,
2851 2849 "!ELS %x to al_pa %x failed, retrying",
2852 2850 privp->els_code, privp->dest_nport_id));
2853 2851 privp->next = sf->sf_els_list;
2854 2852 if (sf->sf_els_list != NULL) {
2855 2853 sf->sf_els_list->prev = privp;
2856 2854 }
2857 2855
2858 2856 sf->sf_els_list = privp;
2859 2857
2860 2858 mutex_exit(&sf->sf_mutex);
2861 2859 /* device busy? wait a bit ... */
2862 2860 if (fpkt->fcal_pkt_status ==
2863 2861 FCAL_STATUS_MAX_XCHG_EXCEEDED) {
2864 2862 privp->delayed_retry = 1;
2865 2863 return;
2866 2864 }
2867 2865 /* call the transport to send a pkt */
2868 2866 if (soc_transport(sf->sf_sochandle, fpkt,
2869 2867 FCAL_NOSLEEP, CQ_REQUEST_1) !=
2870 2868 FCAL_TRANSPORT_SUCCESS) {
2871 2869 mutex_enter(&sf->sf_mutex);
2872 2870 if (privp->prev != NULL) {
2873 2871 privp->prev->next =
2874 2872 privp->next;
2875 2873 }
2876 2874 if (privp->next != NULL) {
2877 2875 privp->next->prev =
2878 2876 privp->prev;
2879 2877 }
2880 2878 if (sf->sf_els_list == privp) {
2881 2879 sf->sf_els_list = privp->next;
2882 2880 }
2883 2881 mutex_exit(&sf->sf_mutex);
2884 2882 goto fail;
2885 2883 } else
2886 2884 return;
2887 2885 } else {
2888 2886 mutex_exit(&sf->sf_mutex);
2889 2887 goto fail;
2890 2888 }
2891 2889 } else {
2892 2890 #ifdef DEBUG
2893 2891 if (fpkt->fcal_pkt_status != 0x36 || sfdebug > 4) {
2894 2892 SF_DEBUG(2, (sf, CE_NOTE, "ELS %x to al_pa %x failed",
2895 2893 privp->els_code, privp->dest_nport_id));
2896 2894 if (fpkt->fcal_pkt_status == FCAL_STATUS_OK) {
2897 2895 SF_DEBUG(2, (sf, CE_NOTE,
2898 2896 "els reply code = %x", ptr->ls_code));
2899 2897 if (ptr->ls_code == LA_ELS_RJT)
2900 2898 SF_DEBUG(1, (sf, CE_CONT,
2901 2899 "LS_RJT reason = %x\n",
2902 2900 *(((uint_t *)ptr) + 1)));
2903 2901 } else
2904 2902 SF_DEBUG(2, (sf, CE_NOTE,
2905 2903 "fc packet status = %x",
2906 2904 fpkt->fcal_pkt_status));
2907 2905 }
2908 2906 #endif
2909 2907 goto fail;
2910 2908 }
2911 2909 }
2912 2910 return; /* success */
2913 2911 fail:
2914 2912 mutex_enter(&sf->sf_mutex);
2915 2913 if (sf->sf_lip_cnt == privp->lip_cnt) {
2916 2914 sf->sf_device_count--;
2917 2915 ASSERT(sf->sf_device_count >= 0);
2918 2916 if (sf->sf_device_count == 0) {
2919 2917 sf_finish_init(sf, privp->lip_cnt);
2920 2918 }
2921 2919 }
2922 2920 mutex_exit(&sf->sf_mutex);
2923 2921 if (free_pkt) {
2924 2922 sf_els_free(fpkt);
2925 2923 }
2926 2924 }
2927 2925
2928 2926
2929 2927 /*
2930 2928 * send a PRLI (process login) ELS IU via the transport,
2931 2929 * returning TRUE upon success, else returning FALSE
2932 2930 */
2933 2931 static int
2934 2932 sf_do_prli(struct sf *sf, struct sf_els_hdr *privp, struct la_els_logi *ptr)
2935 2933 {
2936 2934 struct la_els_prli *prli = (struct la_els_prli *)privp->cmd;
2937 2935 struct fcp_prli *fprli;
2938 2936 struct fcal_packet *fpkt = privp->fpkt;
2939 2937
2940 2938
2941 2939 fpkt->fcal_socal_request.sr_dataseg[0].fc_count =
2942 2940 sizeof (struct la_els_prli);
2943 2941 privp->els_code = LA_ELS_PRLI;
2944 2942 fprli = (struct fcp_prli *)prli->service_params;
2945 2943 prli->ls_code = LA_ELS_PRLI;
2946 2944 prli->page_length = 0x10;
2947 2945 prli->payload_length = sizeof (struct la_els_prli);
2948 2946 fprli->type = 0x08; /* no define here? */
2949 2947 fprli->resvd1 = 0;
2950 2948 fprli->orig_process_assoc_valid = 0;
2951 2949 fprli->resp_process_assoc_valid = 0;
2952 2950 fprli->establish_image_pair = 1;
2953 2951 fprli->resvd2 = 0;
2954 2952 fprli->resvd3 = 0;
2955 2953 fprli->data_overlay_allowed = 0;
2956 2954 fprli->initiator_fn = 1;
2957 2955 fprli->target_fn = 0;
2958 2956 fprli->cmd_data_mixed = 0;
2959 2957 fprli->data_resp_mixed = 0;
2960 2958 fprli->read_xfer_rdy_disabled = 1;
2961 2959 fprli->write_xfer_rdy_disabled = 0;
2962 2960
2963 2961 bcopy((caddr_t)&ptr->nport_ww_name, (caddr_t)&privp->port_wwn,
2964 2962 sizeof (privp->port_wwn));
2965 2963 bcopy((caddr_t)&ptr->node_ww_name, (caddr_t)&privp->node_wwn,
2966 2964 sizeof (privp->node_wwn));
2967 2965
2968 2966 privp->timeout = sf_watchdog_time + SF_ELS_TIMEOUT;
2969 2967 return (sf_els_transport(sf, privp));
2970 2968 }
2971 2969
2972 2970
2973 2971 /*
2974 2972 * send an ADISC (address discovery) ELS IU via the transport,
2975 2973 * returning TRUE upon success, else returning FALSE
2976 2974 */
2977 2975 static int
2978 2976 sf_do_adisc(struct sf *sf, struct sf_els_hdr *privp)
2979 2977 {
2980 2978 struct la_els_adisc *adisc = (struct la_els_adisc *)privp->cmd;
2981 2979 struct fcal_packet *fpkt = privp->fpkt;
2982 2980
2983 2981 privp->els_code = LA_ELS_ADISC;
2984 2982 adisc->ls_code = LA_ELS_ADISC;
2985 2983 adisc->mbz[0] = 0;
2986 2984 adisc->mbz[1] = 0;
2987 2985 adisc->mbz[2] = 0;
2988 2986 adisc->hard_address = 0; /* ??? */
2989 2987 fpkt->fcal_socal_request.sr_dataseg[0].fc_count =
2990 2988 sizeof (struct la_els_adisc);
2991 2989 bcopy((caddr_t)&sf->sf_sochandle->fcal_p_wwn,
2992 2990 (caddr_t)&adisc->port_wwn, sizeof (adisc->port_wwn));
2993 2991 bcopy((caddr_t)&sf->sf_sochandle->fcal_n_wwn,
2994 2992 (caddr_t)&adisc->node_wwn, sizeof (adisc->node_wwn));
2995 2993 adisc->nport_id = sf->sf_al_pa;
2996 2994
2997 2995 privp->timeout = sf_watchdog_time + SF_ELS_TIMEOUT;
2998 2996 return (sf_els_transport(sf, privp));
2999 2997 }
3000 2998
3001 2999
3002 3000 static struct fcal_packet *
3003 3001 sf_els_alloc(struct sf *sf, uchar_t dest_id, int priv_size, int cmd_size,
3004 3002 int rsp_size, caddr_t *rprivp, caddr_t *cmd_buf)
3005 3003 {
3006 3004 struct fcal_packet *fpkt;
3007 3005 ddi_dma_cookie_t pcookie;
3008 3006 ddi_dma_cookie_t rcookie;
3009 3007 struct sf_els_hdr *privp;
3010 3008 ddi_dma_handle_t cmd_dma_handle = NULL;
3011 3009 ddi_dma_handle_t rsp_dma_handle = NULL;
3012 3010 ddi_acc_handle_t cmd_acc_handle = NULL;
3013 3011 ddi_acc_handle_t rsp_acc_handle = NULL;
3014 3012 size_t real_size;
3015 3013 uint_t ccount;
3016 3014 fc_frame_header_t *hp;
3017 3015 int cmd_bound = FALSE, rsp_bound = FALSE;
3018 3016 caddr_t cmd = NULL;
3019 3017 caddr_t rsp = NULL;
3020 3018
3021 3019 if ((fpkt = (struct fcal_packet *)kmem_zalloc(
3022 3020 sizeof (struct fcal_packet), KM_NOSLEEP)) == NULL) {
3023 3021 SF_DEBUG(1, (sf, CE_WARN,
3024 3022 "Could not allocate fcal_packet for ELS\n"));
3025 3023 return (NULL);
3026 3024 }
3027 3025
3028 3026 if ((privp = (struct sf_els_hdr *)kmem_zalloc(priv_size,
3029 3027 KM_NOSLEEP)) == NULL) {
3030 3028 SF_DEBUG(1, (sf, CE_WARN,
3031 3029 "Could not allocate sf_els_hdr for ELS\n"));
3032 3030 goto fail;
3033 3031 }
3034 3032
3035 3033 privp->size = priv_size;
3036 3034 fpkt->fcal_pkt_private = (caddr_t)privp;
3037 3035
3038 3036 if (ddi_dma_alloc_handle(sf->sf_dip, sf->sf_sochandle->fcal_dmaattr,
3039 3037 DDI_DMA_DONTWAIT, NULL, &cmd_dma_handle) != DDI_SUCCESS) {
3040 3038 SF_DEBUG(1, (sf, CE_WARN,
3041 3039 "Could not allocate DMA handle for ELS\n"));
3042 3040 goto fail;
3043 3041 }
3044 3042
3045 3043 if (ddi_dma_mem_alloc(cmd_dma_handle, cmd_size,
3046 3044 sf->sf_sochandle->fcal_accattr, DDI_DMA_CONSISTENT,
3047 3045 DDI_DMA_DONTWAIT, NULL, &cmd,
3048 3046 &real_size, &cmd_acc_handle) != DDI_SUCCESS) {
3049 3047 SF_DEBUG(1, (sf, CE_WARN,
3050 3048 "Could not allocate DMA memory for ELS\n"));
3051 3049 goto fail;
3052 3050 }
3053 3051
3054 3052 if (real_size < cmd_size) {
3055 3053 SF_DEBUG(1, (sf, CE_WARN,
3056 3054 "DMA memory too small for ELS\n"));
3057 3055 goto fail;
3058 3056 }
3059 3057
3060 3058 if (ddi_dma_addr_bind_handle(cmd_dma_handle, NULL,
3061 3059 cmd, real_size, DDI_DMA_WRITE | DDI_DMA_CONSISTENT,
3062 3060 DDI_DMA_DONTWAIT, NULL, &pcookie, &ccount) != DDI_DMA_MAPPED) {
3063 3061 SF_DEBUG(1, (sf, CE_WARN,
3064 3062 "Could not bind DMA memory for ELS\n"));
3065 3063 goto fail;
3066 3064 }
3067 3065 cmd_bound = TRUE;
3068 3066
3069 3067 if (ccount != 1) {
3070 3068 SF_DEBUG(1, (sf, CE_WARN,
3071 3069 "Wrong cookie count for ELS\n"));
3072 3070 goto fail;
3073 3071 }
3074 3072
3075 3073 if (ddi_dma_alloc_handle(sf->sf_dip, sf->sf_sochandle->fcal_dmaattr,
3076 3074 DDI_DMA_DONTWAIT, NULL, &rsp_dma_handle) != DDI_SUCCESS) {
3077 3075 SF_DEBUG(1, (sf, CE_WARN,
3078 3076 "Could not allocate DMA handle for ELS rsp\n"));
3079 3077 goto fail;
3080 3078 }
3081 3079 if (ddi_dma_mem_alloc(rsp_dma_handle, rsp_size,
3082 3080 sf->sf_sochandle->fcal_accattr, DDI_DMA_CONSISTENT,
3083 3081 DDI_DMA_DONTWAIT, NULL, &rsp,
3084 3082 &real_size, &rsp_acc_handle) != DDI_SUCCESS) {
3085 3083 SF_DEBUG(1, (sf, CE_WARN,
3086 3084 "Could not allocate DMA memory for ELS rsp\n"));
3087 3085 goto fail;
3088 3086 }
3089 3087
3090 3088 if (real_size < rsp_size) {
3091 3089 SF_DEBUG(1, (sf, CE_WARN,
3092 3090 "DMA memory too small for ELS rsp\n"));
3093 3091 goto fail;
3094 3092 }
3095 3093
3096 3094 if (ddi_dma_addr_bind_handle(rsp_dma_handle, NULL,
3097 3095 rsp, real_size, DDI_DMA_READ | DDI_DMA_CONSISTENT,
3098 3096 DDI_DMA_DONTWAIT, NULL, &rcookie, &ccount) != DDI_DMA_MAPPED) {
3099 3097 SF_DEBUG(1, (sf, CE_WARN,
3100 3098 "Could not bind DMA memory for ELS rsp\n"));
3101 3099 goto fail;
3102 3100 }
3103 3101 rsp_bound = TRUE;
3104 3102
3105 3103 if (ccount != 1) {
3106 3104 SF_DEBUG(1, (sf, CE_WARN,
3107 3105 "Wrong cookie count for ELS rsp\n"));
3108 3106 goto fail;
3109 3107 }
3110 3108
3111 3109 privp->cmd = cmd;
3112 3110 privp->sf = sf;
3113 3111 privp->cmd_dma_handle = cmd_dma_handle;
3114 3112 privp->cmd_acc_handle = cmd_acc_handle;
3115 3113 privp->rsp = rsp;
3116 3114 privp->rsp_dma_handle = rsp_dma_handle;
3117 3115 privp->rsp_acc_handle = rsp_acc_handle;
3118 3116 privp->dest_nport_id = dest_id;
3119 3117 privp->fpkt = fpkt;
3120 3118
3121 3119 fpkt->fcal_pkt_cookie = sf->sf_socp;
3122 3120 fpkt->fcal_pkt_comp = sf_els_callback;
3123 3121 fpkt->fcal_magic = FCALP_MAGIC;
3124 3122 fpkt->fcal_pkt_flags = 0;
3125 3123 fpkt->fcal_socal_request.sr_soc_hdr.sh_flags =
3126 3124 (ushort_t)(SOC_FC_HEADER | sf->sf_sochandle->fcal_portno);
3127 3125 fpkt->fcal_socal_request.sr_soc_hdr.sh_class = 3;
3128 3126 fpkt->fcal_socal_request.sr_soc_hdr.sh_seg_cnt = 2;
3129 3127 fpkt->fcal_socal_request.sr_soc_hdr.sh_byte_cnt = cmd_size;
3130 3128 fpkt->fcal_socal_request.sr_cqhdr.cq_hdr_count = 1;
3131 3129 fpkt->fcal_socal_request.sr_cqhdr.cq_hdr_flags = 0;
3132 3130 fpkt->fcal_socal_request.sr_cqhdr.cq_hdr_seqno = 0;
3133 3131 fpkt->fcal_socal_request.sr_cqhdr.cq_hdr_type = CQ_TYPE_SIMPLE;
3134 3132 fpkt->fcal_socal_request.sr_dataseg[0].fc_base = (uint32_t)
3135 3133 pcookie.dmac_address;
3136 3134 fpkt->fcal_socal_request.sr_dataseg[0].fc_count = cmd_size;
3137 3135 fpkt->fcal_socal_request.sr_dataseg[1].fc_base = (uint32_t)
3138 3136 rcookie.dmac_address;
3139 3137 fpkt->fcal_socal_request.sr_dataseg[1].fc_count = rsp_size;
3140 3138
3141 3139 /* Fill in the Fabric Channel Header */
3142 3140 hp = &fpkt->fcal_socal_request.sr_fc_frame_hdr;
3143 3141 hp->r_ctl = R_CTL_ELS_REQ;
3144 3142 hp->d_id = dest_id;
3145 3143 hp->s_id = sf->sf_al_pa;
3146 3144 hp->type = TYPE_EXTENDED_LS;
3147 3145 hp->reserved1 = 0;
3148 3146 hp->f_ctl = F_CTL_SEQ_INITIATIVE | F_CTL_FIRST_SEQ;
3149 3147 hp->seq_id = 0;
3150 3148 hp->df_ctl = 0;
3151 3149 hp->seq_cnt = 0;
3152 3150 hp->ox_id = 0xffff;
3153 3151 hp->rx_id = 0xffff;
3154 3152 hp->ro = 0;
3155 3153
3156 3154 *rprivp = (caddr_t)privp;
3157 3155 *cmd_buf = cmd;
3158 3156 return (fpkt);
3159 3157
3160 3158 fail:
3161 3159 if (cmd_dma_handle != NULL) {
3162 3160 if (cmd_bound) {
3163 3161 (void) ddi_dma_unbind_handle(cmd_dma_handle);
3164 3162 }
3165 3163 ddi_dma_free_handle(&cmd_dma_handle);
3166 3164 privp->cmd_dma_handle = NULL;
3167 3165 }
3168 3166 if (rsp_dma_handle != NULL) {
3169 3167 if (rsp_bound) {
3170 3168 (void) ddi_dma_unbind_handle(rsp_dma_handle);
3171 3169 }
3172 3170 ddi_dma_free_handle(&rsp_dma_handle);
3173 3171 privp->rsp_dma_handle = NULL;
3174 3172 }
3175 3173 sf_els_free(fpkt);
3176 3174 return (NULL);
3177 3175 }
3178 3176
3179 3177
3180 3178 static void
3181 3179 sf_els_free(struct fcal_packet *fpkt)
3182 3180 {
3183 3181 struct sf_els_hdr *privp = fpkt->fcal_pkt_private;
3184 3182
3185 3183 if (privp != NULL) {
3186 3184 if (privp->cmd_dma_handle != NULL) {
3187 3185 (void) ddi_dma_unbind_handle(privp->cmd_dma_handle);
3188 3186 ddi_dma_free_handle(&privp->cmd_dma_handle);
3189 3187 }
3190 3188 if (privp->cmd != NULL) {
3191 3189 ddi_dma_mem_free(&privp->cmd_acc_handle);
3192 3190 }
3193 3191
3194 3192 if (privp->rsp_dma_handle != NULL) {
3195 3193 (void) ddi_dma_unbind_handle(privp->rsp_dma_handle);
3196 3194 ddi_dma_free_handle(&privp->rsp_dma_handle);
3197 3195 }
3198 3196
3199 3197 if (privp->rsp != NULL) {
3200 3198 ddi_dma_mem_free(&privp->rsp_acc_handle);
3201 3199 }
3202 3200 if (privp->data_dma_handle) {
3203 3201 (void) ddi_dma_unbind_handle(privp->data_dma_handle);
3204 3202 ddi_dma_free_handle(&privp->data_dma_handle);
3205 3203 }
3206 3204 if (privp->data_buf) {
3207 3205 ddi_dma_mem_free(&privp->data_acc_handle);
3208 3206 }
3209 3207 kmem_free(privp, privp->size);
3210 3208 }
3211 3209 kmem_free(fpkt, sizeof (struct fcal_packet));
3212 3210 }
3213 3211
3214 3212
3215 3213 static struct sf_target *
3216 3214 sf_create_target(struct sf *sf, struct sf_els_hdr *privp, int tnum, int64_t lun)
3217 3215 {
3218 3216 struct sf_target *target, *ntarget, *otarget, *ptarget;
3219 3217 int hash;
3220 3218 #ifdef RAID_LUNS
3221 3219 int64_t orig_lun = lun;
3222 3220
3223 3221 /* XXXX Work around SCSA limitations. */
3224 3222 lun = *((short *)&lun);
3225 3223 #endif
3226 3224 ntarget = kmem_zalloc(sizeof (struct sf_target), KM_NOSLEEP);
3227 3225 mutex_enter(&sf->sf_mutex);
3228 3226 if (sf->sf_lip_cnt != privp->lip_cnt) {
3229 3227 mutex_exit(&sf->sf_mutex);
3230 3228 if (ntarget != NULL)
3231 3229 kmem_free(ntarget, sizeof (struct sf_target));
3232 3230 return (NULL);
3233 3231 }
3234 3232
3235 3233 target = sf_lookup_target(sf, privp->port_wwn, lun);
3236 3234 if (lun != 0) {
3237 3235 /*
3238 3236 * Since LUNs != 0 are queued up after LUN == 0, find LUN == 0
3239 3237 * and enqueue the new LUN.
3240 3238 */
3241 3239 if ((ptarget = sf_lookup_target(sf, privp->port_wwn,
3242 3240 (int64_t)0)) == NULL) {
3243 3241 /*
3244 3242 * Yeep -- no LUN 0?
3245 3243 */
3246 3244 mutex_exit(&sf->sf_mutex);
3247 3245 sf_log(sf, CE_WARN, "target 0x%x "
3248 3246 "lun %" PRIx64 ": No LUN 0\n", tnum, lun);
3249 3247 if (ntarget != NULL)
3250 3248 kmem_free(ntarget, sizeof (struct sf_target));
3251 3249 return (NULL);
3252 3250 }
3253 3251 mutex_enter(&ptarget->sft_mutex);
3254 3252 if (target != NULL && ptarget->sft_lip_cnt == sf->sf_lip_cnt &&
3255 3253 ptarget->sft_state&SF_TARGET_OFFLINE) {
3256 3254 /* LUN 0 already finished, duplicate its state */
3257 3255 mutex_exit(&ptarget->sft_mutex);
3258 3256 sf_offline_target(sf, target);
3259 3257 mutex_exit(&sf->sf_mutex);
3260 3258 if (ntarget != NULL)
3261 3259 kmem_free(ntarget, sizeof (struct sf_target));
3262 3260 return (target);
3263 3261 } else if (target != NULL) {
3264 3262 /*
3265 3263 * LUN 0 online or not examined yet.
3266 3264 * Try to bring the LUN back online
3267 3265 */
3268 3266 mutex_exit(&ptarget->sft_mutex);
3269 3267 mutex_enter(&target->sft_mutex);
3270 3268 target->sft_lip_cnt = privp->lip_cnt;
3271 3269 target->sft_state |= SF_TARGET_BUSY;
3272 3270 target->sft_state &= ~(SF_TARGET_OFFLINE|
3273 3271 SF_TARGET_MARK);
3274 3272 target->sft_al_pa = (uchar_t)privp->dest_nport_id;
3275 3273 target->sft_hard_address = sf_switch_to_alpa[tnum];
3276 3274 mutex_exit(&target->sft_mutex);
3277 3275 mutex_exit(&sf->sf_mutex);
3278 3276 if (ntarget != NULL)
3279 3277 kmem_free(ntarget, sizeof (struct sf_target));
3280 3278 return (target);
3281 3279 }
3282 3280 mutex_exit(&ptarget->sft_mutex);
3283 3281 if (ntarget == NULL) {
3284 3282 mutex_exit(&sf->sf_mutex);
3285 3283 return (NULL);
3286 3284 }
3287 3285 /* Initialize new target structure */
3288 3286 bcopy((caddr_t)&privp->node_wwn,
3289 3287 (caddr_t)&ntarget->sft_node_wwn, sizeof (privp->node_wwn));
3290 3288 bcopy((caddr_t)&privp->port_wwn,
3291 3289 (caddr_t)&ntarget->sft_port_wwn, sizeof (privp->port_wwn));
3292 3290 ntarget->sft_lun.l = lun;
3293 3291 #ifdef RAID_LUNS
3294 3292 ntarget->sft_lun.l = orig_lun;
3295 3293 ntarget->sft_raid_lun = (uint_t)lun;
3296 3294 #endif
3297 3295 mutex_init(&ntarget->sft_mutex, NULL, MUTEX_DRIVER, NULL);
3298 3296 mutex_init(&ntarget->sft_pkt_mutex, NULL, MUTEX_DRIVER, NULL);
3299 3297 /* Don't let anyone use this till we finishup init. */
3300 3298 mutex_enter(&ntarget->sft_mutex);
3301 3299 mutex_enter(&ntarget->sft_pkt_mutex);
3302 3300
3303 3301 hash = SF_HASH(privp->port_wwn, lun);
3304 3302 ntarget->sft_next = sf->sf_wwn_lists[hash];
3305 3303 sf->sf_wwn_lists[hash] = ntarget;
3306 3304
3307 3305 ntarget->sft_lip_cnt = privp->lip_cnt;
3308 3306 ntarget->sft_al_pa = (uchar_t)privp->dest_nport_id;
3309 3307 ntarget->sft_hard_address = sf_switch_to_alpa[tnum];
3310 3308 ntarget->sft_device_type = DTYPE_UNKNOWN;
3311 3309 ntarget->sft_state = SF_TARGET_BUSY;
3312 3310 ntarget->sft_pkt_head = (struct sf_pkt *)&ntarget->
3313 3311 sft_pkt_head;
3314 3312 ntarget->sft_pkt_tail = (struct sf_pkt *)&ntarget->
3315 3313 sft_pkt_head;
3316 3314
3317 3315 mutex_enter(&ptarget->sft_mutex);
3318 3316 /* Traverse the list looking for this target */
3319 3317 for (target = ptarget; target->sft_next_lun;
3320 3318 target = target->sft_next_lun) {
3321 3319 otarget = target->sft_next_lun;
3322 3320 }
3323 3321 ntarget->sft_next_lun = target->sft_next_lun;
3324 3322 target->sft_next_lun = ntarget;
3325 3323 mutex_exit(&ptarget->sft_mutex);
3326 3324 mutex_exit(&ntarget->sft_pkt_mutex);
3327 3325 mutex_exit(&ntarget->sft_mutex);
3328 3326 mutex_exit(&sf->sf_mutex);
3329 3327 return (ntarget);
3330 3328
3331 3329 }
3332 3330 if (target != NULL && target->sft_lip_cnt == sf->sf_lip_cnt) {
3333 3331 /* It's been touched this LIP -- duplicate WWNs */
3334 3332 sf_offline_target(sf, target); /* And all the baby targets */
3335 3333 mutex_exit(&sf->sf_mutex);
3336 3334 sf_log(sf, CE_WARN, "target 0x%x, duplicate port wwns\n",
3337 3335 tnum);
3338 3336 if (ntarget != NULL) {
3339 3337 kmem_free(ntarget, sizeof (struct sf_target));
3340 3338 }
3341 3339 return (NULL);
3342 3340 }
3343 3341
3344 3342 if ((otarget = sf->sf_targets[tnum]) != NULL) {
3345 3343 /* Someone else is in our slot */
3346 3344 mutex_enter(&otarget->sft_mutex);
3347 3345 if (otarget->sft_lip_cnt == sf->sf_lip_cnt) {
3348 3346 mutex_exit(&otarget->sft_mutex);
3349 3347 sf_offline_target(sf, otarget);
3350 3348 if (target != NULL)
3351 3349 sf_offline_target(sf, target);
3352 3350 mutex_exit(&sf->sf_mutex);
3353 3351 sf_log(sf, CE_WARN,
3354 3352 "target 0x%x, duplicate switch settings\n", tnum);
3355 3353 if (ntarget != NULL)
3356 3354 kmem_free(ntarget, sizeof (struct sf_target));
3357 3355 return (NULL);
3358 3356 }
3359 3357 mutex_exit(&otarget->sft_mutex);
3360 3358 if (bcmp((caddr_t)&privp->port_wwn, (caddr_t)&otarget->
3361 3359 sft_port_wwn, sizeof (privp->port_wwn))) {
3362 3360 sf_offline_target(sf, otarget);
3363 3361 mutex_exit(&sf->sf_mutex);
3364 3362 sf_log(sf, CE_WARN, "wwn changed on target 0x%x\n",
3365 3363 tnum);
3366 3364 bzero((caddr_t)&sf->sf_stats.tstats[tnum],
3367 3365 sizeof (struct sf_target_stats));
3368 3366 mutex_enter(&sf->sf_mutex);
3369 3367 }
3370 3368 }
3371 3369
3372 3370 sf->sf_targets[tnum] = target;
3373 3371 if ((target = sf->sf_targets[tnum]) == NULL) {
3374 3372 if (ntarget == NULL) {
3375 3373 mutex_exit(&sf->sf_mutex);
3376 3374 return (NULL);
3377 3375 }
3378 3376 bcopy((caddr_t)&privp->node_wwn,
3379 3377 (caddr_t)&ntarget->sft_node_wwn, sizeof (privp->node_wwn));
3380 3378 bcopy((caddr_t)&privp->port_wwn,
3381 3379 (caddr_t)&ntarget->sft_port_wwn, sizeof (privp->port_wwn));
3382 3380 ntarget->sft_lun.l = lun;
3383 3381 #ifdef RAID_LUNS
3384 3382 ntarget->sft_lun.l = orig_lun;
3385 3383 ntarget->sft_raid_lun = (uint_t)lun;
3386 3384 #endif
3387 3385 mutex_init(&ntarget->sft_mutex, NULL, MUTEX_DRIVER, NULL);
3388 3386 mutex_init(&ntarget->sft_pkt_mutex, NULL, MUTEX_DRIVER, NULL);
3389 3387 mutex_enter(&ntarget->sft_mutex);
3390 3388 mutex_enter(&ntarget->sft_pkt_mutex);
3391 3389 hash = SF_HASH(privp->port_wwn, lun); /* lun 0 */
3392 3390 ntarget->sft_next = sf->sf_wwn_lists[hash];
3393 3391 sf->sf_wwn_lists[hash] = ntarget;
3394 3392
3395 3393 target = ntarget;
3396 3394 target->sft_lip_cnt = privp->lip_cnt;
3397 3395 target->sft_al_pa = (uchar_t)privp->dest_nport_id;
3398 3396 target->sft_hard_address = sf_switch_to_alpa[tnum];
3399 3397 target->sft_device_type = DTYPE_UNKNOWN;
3400 3398 target->sft_state = SF_TARGET_BUSY;
3401 3399 target->sft_pkt_head = (struct sf_pkt *)&target->
3402 3400 sft_pkt_head;
3403 3401 target->sft_pkt_tail = (struct sf_pkt *)&target->
3404 3402 sft_pkt_head;
3405 3403 sf->sf_targets[tnum] = target;
3406 3404 mutex_exit(&ntarget->sft_mutex);
3407 3405 mutex_exit(&ntarget->sft_pkt_mutex);
3408 3406 mutex_exit(&sf->sf_mutex);
3409 3407 } else {
3410 3408 mutex_enter(&target->sft_mutex);
3411 3409 target->sft_lip_cnt = privp->lip_cnt;
3412 3410 target->sft_state |= SF_TARGET_BUSY;
3413 3411 target->sft_state &= ~(SF_TARGET_OFFLINE|SF_TARGET_MARK);
3414 3412 target->sft_al_pa = (uchar_t)privp->dest_nport_id;
3415 3413 target->sft_hard_address = sf_switch_to_alpa[tnum];
3416 3414 mutex_exit(&target->sft_mutex);
3417 3415 mutex_exit(&sf->sf_mutex);
3418 3416 if (ntarget != NULL)
3419 3417 kmem_free(ntarget, sizeof (struct sf_target));
3420 3418 }
3421 3419 return (target);
3422 3420 }
3423 3421
3424 3422
3425 3423 /*
3426 3424 * find the target for a given sf instance
3427 3425 */
3428 3426 /* ARGSUSED */
3429 3427 static struct sf_target *
3430 3428 #ifdef RAID_LUNS
3431 3429 sf_lookup_target(struct sf *sf, uchar_t *wwn, int lun)
3432 3430 #else
3433 3431 sf_lookup_target(struct sf *sf, uchar_t *wwn, int64_t lun)
3434 3432 #endif
3435 3433 {
3436 3434 int hash;
3437 3435 struct sf_target *target;
3438 3436
3439 3437 ASSERT(mutex_owned(&sf->sf_mutex));
3440 3438 hash = SF_HASH(wwn, lun);
3441 3439
3442 3440 target = sf->sf_wwn_lists[hash];
3443 3441 while (target != NULL) {
3444 3442
3445 3443 #ifndef RAID_LUNS
3446 3444 if (bcmp((caddr_t)wwn, (caddr_t)&target->sft_port_wwn,
3447 3445 sizeof (target->sft_port_wwn)) == 0 &&
3448 3446 target->sft_lun.l == lun)
3449 3447 break;
3450 3448 #else
3451 3449 if (bcmp((caddr_t)wwn, (caddr_t)&target->sft_port_wwn,
3452 3450 sizeof (target->sft_port_wwn)) == 0 &&
3453 3451 target->sft_raid_lun == lun)
3454 3452 break;
3455 3453 #endif
3456 3454 target = target->sft_next;
3457 3455 }
3458 3456
3459 3457 return (target);
3460 3458 }
3461 3459
3462 3460
3463 3461 /*
3464 3462 * Send out a REPORT_LUNS command.
3465 3463 */
3466 3464 static int
3467 3465 sf_do_reportlun(struct sf *sf, struct sf_els_hdr *privp,
3468 3466 struct sf_target *target)
3469 3467 {
3470 3468 struct fcal_packet *fpkt = privp->fpkt;
3471 3469 ddi_dma_cookie_t pcookie;
3472 3470 ddi_dma_handle_t lun_dma_handle = NULL;
3473 3471 ddi_acc_handle_t lun_acc_handle;
3474 3472 uint_t ccount;
3475 3473 size_t real_size;
3476 3474 caddr_t lun_buf = NULL;
3477 3475 int handle_bound = 0;
3478 3476 fc_frame_header_t *hp = &fpkt->fcal_socal_request.sr_fc_frame_hdr;
3479 3477 struct fcp_cmd *reportlun = (struct fcp_cmd *)privp->cmd;
3480 3478 char *msg = "Transport";
3481 3479
3482 3480 if (ddi_dma_alloc_handle(sf->sf_dip, sf->sf_sochandle->fcal_dmaattr,
3483 3481 DDI_DMA_DONTWAIT, NULL, &lun_dma_handle) != DDI_SUCCESS) {
3484 3482 msg = "ddi_dma_alloc_handle()";
3485 3483 goto fail;
3486 3484 }
3487 3485
3488 3486 if (ddi_dma_mem_alloc(lun_dma_handle, REPORT_LUNS_SIZE,
3489 3487 sf->sf_sochandle->fcal_accattr, DDI_DMA_CONSISTENT,
3490 3488 DDI_DMA_DONTWAIT, NULL, &lun_buf,
3491 3489 &real_size, &lun_acc_handle) != DDI_SUCCESS) {
3492 3490 msg = "ddi_dma_mem_alloc()";
3493 3491 goto fail;
3494 3492 }
3495 3493
3496 3494 if (real_size < REPORT_LUNS_SIZE) {
3497 3495 msg = "DMA mem < REPORT_LUNS_SIZE";
3498 3496 goto fail;
3499 3497 }
3500 3498
3501 3499 if (ddi_dma_addr_bind_handle(lun_dma_handle, NULL,
3502 3500 lun_buf, real_size, DDI_DMA_READ |
3503 3501 DDI_DMA_CONSISTENT, DDI_DMA_DONTWAIT,
3504 3502 NULL, &pcookie, &ccount) != DDI_DMA_MAPPED) {
3505 3503 msg = "ddi_dma_addr_bind_handle()";
3506 3504 goto fail;
3507 3505 }
3508 3506 handle_bound = 1;
3509 3507
3510 3508 if (ccount != 1) {
3511 3509 msg = "ccount != 1";
3512 3510 goto fail;
3513 3511 }
3514 3512 privp->els_code = 0;
3515 3513 privp->target = target;
3516 3514 privp->data_dma_handle = lun_dma_handle;
3517 3515 privp->data_acc_handle = lun_acc_handle;
3518 3516 privp->data_buf = lun_buf;
3519 3517
3520 3518 fpkt->fcal_pkt_comp = sf_reportlun_callback;
3521 3519 fpkt->fcal_socal_request.sr_soc_hdr.sh_seg_cnt = 3;
3522 3520 fpkt->fcal_socal_request.sr_cqhdr.cq_hdr_type = CQ_TYPE_IO_READ;
3523 3521 fpkt->fcal_socal_request.sr_dataseg[0].fc_count =
3524 3522 sizeof (struct fcp_cmd);
3525 3523 fpkt->fcal_socal_request.sr_dataseg[2].fc_base =
3526 3524 (uint32_t)pcookie.dmac_address;
3527 3525 fpkt->fcal_socal_request.sr_dataseg[2].fc_count = pcookie.dmac_size;
3528 3526 fpkt->fcal_socal_request.sr_soc_hdr.sh_byte_cnt = pcookie.dmac_size;
3529 3527 hp->r_ctl = R_CTL_COMMAND;
3530 3528 hp->type = TYPE_SCSI_FCP;
3531 3529 bzero((caddr_t)reportlun, sizeof (struct fcp_cmd));
3532 3530 ((union scsi_cdb *)reportlun->fcp_cdb)->scc_cmd = SCMD_REPORT_LUNS;
3533 3531 /* Now set the buffer size. If DDI gave us extra, that's O.K. */
3534 3532 ((union scsi_cdb *)reportlun->fcp_cdb)->scc5_count0 =
3535 3533 (real_size&0x0ff);
3536 3534 ((union scsi_cdb *)reportlun->fcp_cdb)->scc5_count1 =
3537 3535 (real_size>>8)&0x0ff;
3538 3536 ((union scsi_cdb *)reportlun->fcp_cdb)->scc5_count2 =
3539 3537 (real_size>>16)&0x0ff;
3540 3538 ((union scsi_cdb *)reportlun->fcp_cdb)->scc5_count3 =
3541 3539 (real_size>>24)&0x0ff;
3542 3540 reportlun->fcp_cntl.cntl_read_data = 1;
3543 3541 reportlun->fcp_cntl.cntl_write_data = 0;
3544 3542 reportlun->fcp_data_len = pcookie.dmac_size;
3545 3543 reportlun->fcp_cntl.cntl_qtype = FCP_QTYPE_SIMPLE;
3546 3544
3547 3545 (void) ddi_dma_sync(lun_dma_handle, 0, 0, DDI_DMA_SYNC_FORDEV);
3548 3546 /* We know he's there, so this should be fast */
3549 3547 privp->timeout = sf_watchdog_time + SF_FCP_TIMEOUT;
3550 3548 if (sf_els_transport(sf, privp) == 1)
3551 3549 return (1);
3552 3550
3553 3551 fail:
3554 3552 sf_log(sf, CE_WARN,
3555 3553 "%s failure for REPORTLUN to target 0x%x\n",
3556 3554 msg, sf_alpa_to_switch[privp->dest_nport_id]);
3557 3555 sf_els_free(fpkt);
3558 3556 if (lun_dma_handle != NULL) {
3559 3557 if (handle_bound)
3560 3558 (void) ddi_dma_unbind_handle(lun_dma_handle);
3561 3559 ddi_dma_free_handle(&lun_dma_handle);
3562 3560 }
3563 3561 if (lun_buf != NULL) {
3564 3562 ddi_dma_mem_free(&lun_acc_handle);
3565 3563 }
3566 3564 return (0);
3567 3565 }
3568 3566
3569 3567 /*
3570 3568 * Handle the results of a REPORT_LUNS command:
3571 3569 * Create additional targets if necessary
3572 3570 * Initiate INQUIRYs on all LUNs.
3573 3571 */
3574 3572 static void
3575 3573 sf_reportlun_callback(struct fcal_packet *fpkt)
3576 3574 {
3577 3575 struct sf_els_hdr *privp = (struct sf_els_hdr *)fpkt->
3578 3576 fcal_pkt_private;
3579 3577 struct scsi_report_luns *ptr =
3580 3578 (struct scsi_report_luns *)privp->data_buf;
3581 3579 struct sf *sf = privp->sf;
3582 3580 struct sf_target *target = privp->target;
3583 3581 struct fcp_rsp *rsp = NULL;
3584 3582 int delayed_retry = 0;
3585 3583 int tid = sf_alpa_to_switch[target->sft_hard_address];
3586 3584 int i, free_pkt = 1;
3587 3585 short ncmds;
3588 3586
3589 3587 mutex_enter(&sf->sf_mutex);
3590 3588 /* use as temporary state variable */
3591 3589 if (privp->timeout == SF_INVALID_TIMEOUT) {
3592 3590 mutex_exit(&sf->sf_mutex);
3593 3591 return;
3594 3592 }
3595 3593 if (privp->prev)
3596 3594 privp->prev->next = privp->next;
3597 3595 if (privp->next)
3598 3596 privp->next->prev = privp->prev;
3599 3597 if (sf->sf_els_list == privp)
3600 3598 sf->sf_els_list = privp->next;
3601 3599 privp->prev = privp->next = NULL;
3602 3600 mutex_exit(&sf->sf_mutex);
3603 3601 ncmds = fpkt->fcal_ncmds;
3604 3602 ASSERT(ncmds >= 0);
3605 3603 mutex_enter(&sf->sf_cmd_mutex);
3606 3604 sf->sf_ncmds = ncmds;
3607 3605 mutex_exit(&sf->sf_cmd_mutex);
3608 3606
3609 3607 if (fpkt->fcal_pkt_status == FCAL_STATUS_OK) {
3610 3608 (void) ddi_dma_sync(privp->rsp_dma_handle, 0,
3611 3609 0, DDI_DMA_SYNC_FORKERNEL);
3612 3610
3613 3611 rsp = (struct fcp_rsp *)privp->rsp;
3614 3612 }
3615 3613 SF_DEBUG(1, (sf, CE_CONT,
3616 3614 "!REPORTLUN to al_pa %x pkt status %x scsi status %x\n",
3617 3615 privp->dest_nport_id,
3618 3616 fpkt->fcal_pkt_status,
3619 3617 rsp?rsp->fcp_u.fcp_status.scsi_status:0));
3620 3618
3621 3619 /* See if target simply does not support REPORT_LUNS. */
3622 3620 if (rsp && rsp->fcp_u.fcp_status.scsi_status == STATUS_CHECK &&
3623 3621 rsp->fcp_u.fcp_status.sense_len_set &&
3624 3622 rsp->fcp_sense_len >=
3625 3623 offsetof(struct scsi_extended_sense, es_qual_code)) {
3626 3624 struct scsi_extended_sense *sense;
3627 3625 sense = (struct scsi_extended_sense *)
3628 3626 ((caddr_t)rsp + sizeof (struct fcp_rsp)
3629 3627 + rsp->fcp_response_len);
3630 3628 if (sense->es_key == KEY_ILLEGAL_REQUEST) {
3631 3629 if (sense->es_add_code == 0x20) {
3632 3630 /* Fake LUN 0 */
3633 3631 SF_DEBUG(1, (sf, CE_CONT,
3634 3632 "!REPORTLUN Faking good "
3635 3633 "completion for alpa %x\n",
3636 3634 privp->dest_nport_id));
3637 3635 ptr->lun_list_len = FCP_LUN_SIZE;
3638 3636 ptr->lun[0] = 0;
3639 3637 rsp->fcp_u.fcp_status.scsi_status =
3640 3638 STATUS_GOOD;
3641 3639 } else if (sense->es_add_code == 0x25) {
3642 3640 SF_DEBUG(1, (sf, CE_CONT,
3643 3641 "!REPORTLUN device alpa %x "
3644 3642 "key %x code %x\n",
3645 3643 privp->dest_nport_id,
3646 3644 sense->es_key, sense->es_add_code));
3647 3645 goto fail;
3648 3646 }
3649 3647 } else if (sense->es_key ==
3650 3648 KEY_UNIT_ATTENTION &&
3651 3649 sense->es_add_code == 0x29) {
3652 3650 SF_DEBUG(1, (sf, CE_CONT,
3653 3651 "!REPORTLUN device alpa %x was reset\n",
3654 3652 privp->dest_nport_id));
3655 3653 } else {
3656 3654 SF_DEBUG(1, (sf, CE_CONT,
3657 3655 "!REPORTLUN device alpa %x "
3658 3656 "key %x code %x\n",
3659 3657 privp->dest_nport_id,
3660 3658 sense->es_key, sense->es_add_code));
3661 3659 /* XXXXXX The following is here to handle broken targets -- remove it later */
3662 3660 if (sf_reportlun_forever &&
3663 3661 sense->es_key == KEY_UNIT_ATTENTION)
3664 3662 goto retry;
3665 3663 /* XXXXXX */
3666 3664 if (sense->es_key == KEY_NOT_READY)
3667 3665 delayed_retry = 1;
3668 3666 }
3669 3667 }
3670 3668
3671 3669 if (rsp && rsp->fcp_u.fcp_status.scsi_status == STATUS_GOOD) {
3672 3670 struct fcp_rsp_info *bep;
3673 3671
3674 3672 bep = (struct fcp_rsp_info *)(&rsp->
3675 3673 fcp_response_len + 1);
3676 3674 if (!rsp->fcp_u.fcp_status.rsp_len_set ||
3677 3675 bep->rsp_code == FCP_NO_FAILURE) {
3678 3676 (void) ddi_dma_sync(privp->data_dma_handle,
3679 3677 0, 0, DDI_DMA_SYNC_FORKERNEL);
3680 3678
3681 3679 /* Convert from #bytes to #ints */
3682 3680 ptr->lun_list_len = ptr->lun_list_len >> 3;
3683 3681 SF_DEBUG(2, (sf, CE_CONT,
3684 3682 "!REPORTLUN to al_pa %x succeeded: %d LUNs\n",
3685 3683 privp->dest_nport_id, ptr->lun_list_len));
3686 3684 if (!ptr->lun_list_len) {
3687 3685 /* No LUNs? Ya gotta be kidding... */
3688 3686 sf_log(sf, CE_WARN,
3689 3687 "SCSI violation -- "
3690 3688 "target 0x%x reports no LUNs\n",
3691 3689 sf_alpa_to_switch[
3692 3690 privp->dest_nport_id]);
3693 3691 ptr->lun_list_len = 1;
3694 3692 ptr->lun[0] = 0;
3695 3693 }
3696 3694
3697 3695 mutex_enter(&sf->sf_mutex);
3698 3696 if (sf->sf_lip_cnt == privp->lip_cnt) {
3699 3697 sf->sf_device_count += ptr->lun_list_len - 1;
3700 3698 }
3701 3699
3702 3700 mutex_exit(&sf->sf_mutex);
3703 3701 for (i = 0; i < ptr->lun_list_len && privp->lip_cnt ==
3704 3702 sf->sf_lip_cnt; i++) {
3705 3703 struct sf_els_hdr *nprivp;
3706 3704 struct fcal_packet *nfpkt;
3707 3705
3708 3706 /* LUN 0 is already in `target' */
3709 3707 if (ptr->lun[i] != 0) {
3710 3708 target = sf_create_target(sf,
3711 3709 privp, tid, ptr->lun[i]);
3712 3710 }
3713 3711 nprivp = NULL;
3714 3712 nfpkt = NULL;
3715 3713 if (target) {
3716 3714 nfpkt = sf_els_alloc(sf,
3717 3715 target->sft_al_pa,
3718 3716 sizeof (struct sf_els_hdr),
3719 3717 sizeof (union sf_els_cmd),
3720 3718 sizeof (union sf_els_rsp),
3721 3719 (caddr_t *)&nprivp,
3722 3720 (caddr_t *)&rsp);
3723 3721 if (nprivp)
3724 3722 nprivp->lip_cnt =
3725 3723 privp->lip_cnt;
3726 3724 }
3727 3725 if (nfpkt && nprivp &&
3728 3726 (sf_do_inquiry(sf, nprivp, target) ==
3729 3727 0)) {
3730 3728 mutex_enter(&sf->sf_mutex);
3731 3729 if (sf->sf_lip_cnt == privp->
3732 3730 lip_cnt) {
3733 3731 sf->sf_device_count --;
3734 3732 }
3735 3733 sf_offline_target(sf, target);
3736 3734 mutex_exit(&sf->sf_mutex);
3737 3735 }
3738 3736 }
3739 3737 sf_els_free(fpkt);
3740 3738 return;
3741 3739 } else {
3742 3740 SF_DEBUG(1, (sf, CE_CONT,
3743 3741 "!REPORTLUN al_pa %x fcp failure, "
3744 3742 "fcp_rsp_code %x scsi status %x\n",
3745 3743 privp->dest_nport_id, bep->rsp_code,
3746 3744 rsp ? rsp->fcp_u.fcp_status.scsi_status:0));
3747 3745 goto fail;
3748 3746 }
3749 3747 }
3750 3748 if (rsp && ((rsp->fcp_u.fcp_status.scsi_status == STATUS_BUSY) ||
3751 3749 (rsp->fcp_u.fcp_status.scsi_status == STATUS_QFULL))) {
3752 3750 delayed_retry = 1;
3753 3751 }
3754 3752
3755 3753 if (++(privp->retries) < sf_els_retries ||
3756 3754 (delayed_retry && privp->retries < SF_BSY_RETRIES)) {
3757 3755 /* XXXXXX The following is here to handle broken targets -- remove it later */
3758 3756 retry:
3759 3757 /* XXXXXX */
3760 3758 if (delayed_retry) {
3761 3759 privp->retries--;
3762 3760 privp->timeout = sf_watchdog_time + SF_BSY_TIMEOUT;
3763 3761 privp->delayed_retry = 1;
3764 3762 } else {
3765 3763 privp->timeout = sf_watchdog_time + SF_FCP_TIMEOUT;
3766 3764 }
3767 3765
3768 3766 privp->prev = NULL;
3769 3767 mutex_enter(&sf->sf_mutex);
3770 3768 if (privp->lip_cnt == sf->sf_lip_cnt) {
3771 3769 if (!delayed_retry)
3772 3770 SF_DEBUG(1, (sf, CE_WARN,
3773 3771 "!REPORTLUN to al_pa %x failed, retrying\n",
3774 3772 privp->dest_nport_id));
3775 3773 privp->next = sf->sf_els_list;
3776 3774 if (sf->sf_els_list != NULL)
3777 3775 sf->sf_els_list->prev = privp;
3778 3776 sf->sf_els_list = privp;
3779 3777 mutex_exit(&sf->sf_mutex);
3780 3778 if (!delayed_retry && soc_transport(sf->sf_sochandle,
3781 3779 fpkt, FCAL_NOSLEEP, CQ_REQUEST_1) !=
3782 3780 FCAL_TRANSPORT_SUCCESS) {
3783 3781 mutex_enter(&sf->sf_mutex);
3784 3782 if (privp->prev)
3785 3783 privp->prev->next = privp->next;
3786 3784 if (privp->next)
3787 3785 privp->next->prev = privp->prev;
3788 3786 if (sf->sf_els_list == privp)
3789 3787 sf->sf_els_list = privp->next;
3790 3788 mutex_exit(&sf->sf_mutex);
3791 3789 goto fail;
3792 3790 } else
3793 3791 return;
3794 3792 } else {
3795 3793 mutex_exit(&sf->sf_mutex);
3796 3794 }
3797 3795 } else {
3798 3796 fail:
3799 3797
3800 3798 /* REPORT_LUN failed -- try inquiry */
3801 3799 if (sf_do_inquiry(sf, privp, target) != 0) {
3802 3800 return;
3803 3801 } else {
3804 3802 free_pkt = 0;
3805 3803 }
3806 3804 mutex_enter(&sf->sf_mutex);
3807 3805 if (sf->sf_lip_cnt == privp->lip_cnt) {
3808 3806 sf_log(sf, CE_WARN,
3809 3807 "!REPORTLUN to target 0x%x failed\n",
3810 3808 sf_alpa_to_switch[privp->dest_nport_id]);
3811 3809 sf_offline_target(sf, target);
3812 3810 sf->sf_device_count--;
3813 3811 ASSERT(sf->sf_device_count >= 0);
3814 3812 if (sf->sf_device_count == 0)
3815 3813 sf_finish_init(sf, privp->lip_cnt);
3816 3814 }
3817 3815 mutex_exit(&sf->sf_mutex);
3818 3816 }
3819 3817 if (free_pkt) {
3820 3818 sf_els_free(fpkt);
3821 3819 }
3822 3820 }
3823 3821
3824 3822 static int
3825 3823 sf_do_inquiry(struct sf *sf, struct sf_els_hdr *privp,
3826 3824 struct sf_target *target)
3827 3825 {
3828 3826 struct fcal_packet *fpkt = privp->fpkt;
3829 3827 ddi_dma_cookie_t pcookie;
3830 3828 ddi_dma_handle_t inq_dma_handle = NULL;
3831 3829 ddi_acc_handle_t inq_acc_handle;
3832 3830 uint_t ccount;
3833 3831 size_t real_size;
3834 3832 caddr_t inq_buf = NULL;
3835 3833 int handle_bound = FALSE;
3836 3834 fc_frame_header_t *hp = &fpkt->fcal_socal_request.sr_fc_frame_hdr;
3837 3835 struct fcp_cmd *inq = (struct fcp_cmd *)privp->cmd;
3838 3836 char *msg = "Transport";
3839 3837
3840 3838
3841 3839 if (ddi_dma_alloc_handle(sf->sf_dip, sf->sf_sochandle->fcal_dmaattr,
3842 3840 DDI_DMA_DONTWAIT, NULL, &inq_dma_handle) != DDI_SUCCESS) {
3843 3841 msg = "ddi_dma_alloc_handle()";
3844 3842 goto fail;
3845 3843 }
3846 3844
3847 3845 if (ddi_dma_mem_alloc(inq_dma_handle, SUN_INQSIZE,
3848 3846 sf->sf_sochandle->fcal_accattr, DDI_DMA_CONSISTENT,
3849 3847 DDI_DMA_DONTWAIT, NULL, &inq_buf,
3850 3848 &real_size, &inq_acc_handle) != DDI_SUCCESS) {
3851 3849 msg = "ddi_dma_mem_alloc()";
3852 3850 goto fail;
3853 3851 }
3854 3852
3855 3853 if (real_size < SUN_INQSIZE) {
3856 3854 msg = "DMA mem < inquiry size";
3857 3855 goto fail;
3858 3856 }
3859 3857
3860 3858 if (ddi_dma_addr_bind_handle(inq_dma_handle, NULL,
3861 3859 inq_buf, real_size, DDI_DMA_READ | DDI_DMA_CONSISTENT,
3862 3860 DDI_DMA_DONTWAIT, NULL, &pcookie, &ccount) != DDI_DMA_MAPPED) {
3863 3861 msg = "ddi_dma_addr_bind_handle()";
3864 3862 goto fail;
3865 3863 }
3866 3864 handle_bound = TRUE;
3867 3865
3868 3866 if (ccount != 1) {
3869 3867 msg = "ccount != 1";
3870 3868 goto fail;
3871 3869 }
3872 3870 privp->els_code = 0; /* not an ELS command */
3873 3871 privp->target = target;
3874 3872 privp->data_dma_handle = inq_dma_handle;
3875 3873 privp->data_acc_handle = inq_acc_handle;
3876 3874 privp->data_buf = inq_buf;
3877 3875 fpkt->fcal_pkt_comp = sf_inq_callback;
3878 3876 fpkt->fcal_socal_request.sr_soc_hdr.sh_seg_cnt = 3;
3879 3877 fpkt->fcal_socal_request.sr_cqhdr.cq_hdr_type = CQ_TYPE_IO_READ;
3880 3878 fpkt->fcal_socal_request.sr_dataseg[0].fc_count =
3881 3879 sizeof (struct fcp_cmd);
3882 3880 fpkt->fcal_socal_request.sr_dataseg[2].fc_base =
3883 3881 (uint32_t)pcookie.dmac_address;
3884 3882 fpkt->fcal_socal_request.sr_dataseg[2].fc_count = pcookie.dmac_size;
3885 3883 fpkt->fcal_socal_request.sr_soc_hdr.sh_byte_cnt = pcookie.dmac_size;
3886 3884 hp->r_ctl = R_CTL_COMMAND;
3887 3885 hp->type = TYPE_SCSI_FCP;
3888 3886 bzero((caddr_t)inq, sizeof (struct fcp_cmd));
3889 3887 ((union scsi_cdb *)inq->fcp_cdb)->scc_cmd = SCMD_INQUIRY;
3890 3888 ((union scsi_cdb *)inq->fcp_cdb)->g0_count0 = SUN_INQSIZE;
3891 3889 bcopy((caddr_t)&target->sft_lun.b, (caddr_t)&inq->fcp_ent_addr,
3892 3890 FCP_LUN_SIZE);
3893 3891 inq->fcp_cntl.cntl_read_data = 1;
3894 3892 inq->fcp_cntl.cntl_write_data = 0;
3895 3893 inq->fcp_data_len = pcookie.dmac_size;
3896 3894 inq->fcp_cntl.cntl_qtype = FCP_QTYPE_SIMPLE;
3897 3895
3898 3896 (void) ddi_dma_sync(inq_dma_handle, (off_t)0, (size_t)0,
3899 3897 DDI_DMA_SYNC_FORDEV);
3900 3898 privp->timeout = sf_watchdog_time + SF_FCP_TIMEOUT;
3901 3899 SF_DEBUG(5, (sf, CE_WARN,
3902 3900 "!Sending INQUIRY to al_pa %x lun %" PRIx64 "\n",
3903 3901 privp->dest_nport_id,
3904 3902 SCSA_LUN(target)));
3905 3903 return (sf_els_transport(sf, privp));
3906 3904
3907 3905 fail:
3908 3906 sf_log(sf, CE_WARN,
3909 3907 "%s failure for INQUIRY to target 0x%x\n",
3910 3908 msg, sf_alpa_to_switch[privp->dest_nport_id]);
3911 3909 sf_els_free(fpkt);
3912 3910 if (inq_dma_handle != NULL) {
3913 3911 if (handle_bound) {
3914 3912 (void) ddi_dma_unbind_handle(inq_dma_handle);
3915 3913 }
3916 3914 ddi_dma_free_handle(&inq_dma_handle);
3917 3915 }
3918 3916 if (inq_buf != NULL) {
3919 3917 ddi_dma_mem_free(&inq_acc_handle);
3920 3918 }
3921 3919 return (FALSE);
3922 3920 }
3923 3921
3924 3922
3925 3923 /*
3926 3924 * called as the pkt_comp routine for INQ packets
3927 3925 */
3928 3926 static void
3929 3927 sf_inq_callback(struct fcal_packet *fpkt)
3930 3928 {
3931 3929 struct sf_els_hdr *privp = (struct sf_els_hdr *)fpkt->
3932 3930 fcal_pkt_private;
3933 3931 struct scsi_inquiry *prt = (struct scsi_inquiry *)privp->data_buf;
3934 3932 struct sf *sf = privp->sf;
3935 3933 struct sf *tsf;
3936 3934 struct sf_target *target = privp->target;
3937 3935 struct fcp_rsp *rsp;
3938 3936 int delayed_retry = FALSE;
3939 3937 short ncmds;
3940 3938
3941 3939
3942 3940 mutex_enter(&sf->sf_mutex);
3943 3941 /* use as temporary state variable */
3944 3942 if (privp->timeout == SF_INVALID_TIMEOUT) {
3945 3943 mutex_exit(&sf->sf_mutex);
3946 3944 return;
3947 3945 }
3948 3946 if (privp->prev != NULL) {
3949 3947 privp->prev->next = privp->next;
3950 3948 }
3951 3949 if (privp->next != NULL) {
3952 3950 privp->next->prev = privp->prev;
3953 3951 }
3954 3952 if (sf->sf_els_list == privp) {
3955 3953 sf->sf_els_list = privp->next;
3956 3954 }
3957 3955 privp->prev = privp->next = NULL;
3958 3956 mutex_exit(&sf->sf_mutex);
3959 3957 ncmds = fpkt->fcal_ncmds;
3960 3958 ASSERT(ncmds >= 0);
3961 3959 mutex_enter(&sf->sf_cmd_mutex);
3962 3960 sf->sf_ncmds = ncmds;
3963 3961 mutex_exit(&sf->sf_cmd_mutex);
3964 3962
3965 3963 if (fpkt->fcal_pkt_status == FCAL_STATUS_OK) {
3966 3964
3967 3965 (void) ddi_dma_sync(privp->rsp_dma_handle, (off_t)0,
3968 3966 (size_t)0, DDI_DMA_SYNC_FORKERNEL);
3969 3967
3970 3968 rsp = (struct fcp_rsp *)privp->rsp;
3971 3969 SF_DEBUG(2, (sf, CE_CONT,
3972 3970 "!INQUIRY to al_pa %x scsi status %x",
3973 3971 privp->dest_nport_id, rsp->fcp_u.fcp_status.scsi_status));
3974 3972
3975 3973 if ((rsp->fcp_u.fcp_status.scsi_status == STATUS_GOOD) &&
3976 3974 !rsp->fcp_u.fcp_status.resid_over &&
3977 3975 (!rsp->fcp_u.fcp_status.resid_under ||
3978 3976 ((SUN_INQSIZE - rsp->fcp_resid) >= SUN_MIN_INQLEN))) {
3979 3977 struct fcp_rsp_info *bep;
3980 3978
3981 3979 bep = (struct fcp_rsp_info *)(&rsp->
3982 3980 fcp_response_len + 1);
3983 3981
3984 3982 if (!rsp->fcp_u.fcp_status.rsp_len_set ||
3985 3983 (bep->rsp_code == FCP_NO_FAILURE)) {
3986 3984
3987 3985 SF_DEBUG(2, (sf, CE_CONT,
3988 3986 "!INQUIRY to al_pa %x lun %" PRIx64
3989 3987 " succeeded\n",
3990 3988 privp->dest_nport_id, SCSA_LUN(target)));
3991 3989
3992 3990 (void) ddi_dma_sync(privp->data_dma_handle,
3993 3991 (off_t)0, (size_t)0,
3994 3992 DDI_DMA_SYNC_FORKERNEL);
3995 3993
3996 3994 mutex_enter(&sf->sf_mutex);
3997 3995
3998 3996 if (sf->sf_lip_cnt == privp->lip_cnt) {
3999 3997 mutex_enter(&target->sft_mutex);
4000 3998 target->sft_device_type =
4001 3999 prt->inq_dtype;
4002 4000 bcopy(prt, &target->sft_inq,
4003 4001 sizeof (*prt));
4004 4002 mutex_exit(&target->sft_mutex);
4005 4003 sf->sf_device_count--;
4006 4004 ASSERT(sf->sf_device_count >= 0);
4007 4005 if (sf->sf_device_count == 0) {
4008 4006 sf_finish_init(sf,
4009 4007 privp->lip_cnt);
4010 4008 }
4011 4009 }
4012 4010 mutex_exit(&sf->sf_mutex);
4013 4011 sf_els_free(fpkt);
4014 4012 return;
4015 4013 }
4016 4014 } else if ((rsp->fcp_u.fcp_status.scsi_status ==
4017 4015 STATUS_BUSY) ||
4018 4016 (rsp->fcp_u.fcp_status.scsi_status == STATUS_QFULL) ||
4019 4017 (rsp->fcp_u.fcp_status.scsi_status == STATUS_CHECK)) {
4020 4018 delayed_retry = TRUE;
4021 4019 }
4022 4020 } else {
4023 4021 SF_DEBUG(2, (sf, CE_CONT, "!INQUIRY to al_pa %x fc status %x",
4024 4022 privp->dest_nport_id, fpkt->fcal_pkt_status));
4025 4023 }
4026 4024
4027 4025 if (++(privp->retries) < sf_els_retries ||
4028 4026 (delayed_retry && privp->retries < SF_BSY_RETRIES)) {
4029 4027 if (fpkt->fcal_pkt_status == FCAL_STATUS_MAX_XCHG_EXCEEDED) {
4030 4028 tsf = sf->sf_sibling;
4031 4029 if (tsf != NULL) {
4032 4030 mutex_enter(&tsf->sf_cmd_mutex);
4033 4031 tsf->sf_flag = 1;
4034 4032 tsf->sf_throttle = SF_DECR_DELTA;
4035 4033 mutex_exit(&tsf->sf_cmd_mutex);
4036 4034 }
4037 4035 delayed_retry = 1;
4038 4036 }
4039 4037 if (delayed_retry) {
4040 4038 privp->retries--;
4041 4039 privp->timeout = sf_watchdog_time + SF_BSY_TIMEOUT;
4042 4040 privp->delayed_retry = TRUE;
4043 4041 } else {
4044 4042 privp->timeout = sf_watchdog_time + SF_FCP_TIMEOUT;
4045 4043 }
4046 4044
4047 4045 privp->prev = NULL;
4048 4046 mutex_enter(&sf->sf_mutex);
4049 4047 if (privp->lip_cnt == sf->sf_lip_cnt) {
4050 4048 if (!delayed_retry) {
4051 4049 SF_DEBUG(1, (sf, CE_WARN,
4052 4050 "INQUIRY to al_pa %x failed, retrying",
4053 4051 privp->dest_nport_id));
4054 4052 }
4055 4053 privp->next = sf->sf_els_list;
4056 4054 if (sf->sf_els_list != NULL) {
4057 4055 sf->sf_els_list->prev = privp;
4058 4056 }
4059 4057 sf->sf_els_list = privp;
4060 4058 mutex_exit(&sf->sf_mutex);
4061 4059 /* if not delayed call transport to send a pkt */
4062 4060 if (!delayed_retry &&
4063 4061 (soc_transport(sf->sf_sochandle, fpkt,
4064 4062 FCAL_NOSLEEP, CQ_REQUEST_1) !=
4065 4063 FCAL_TRANSPORT_SUCCESS)) {
4066 4064 mutex_enter(&sf->sf_mutex);
4067 4065 if (privp->prev != NULL) {
4068 4066 privp->prev->next = privp->next;
4069 4067 }
4070 4068 if (privp->next != NULL) {
4071 4069 privp->next->prev = privp->prev;
4072 4070 }
4073 4071 if (sf->sf_els_list == privp) {
4074 4072 sf->sf_els_list = privp->next;
4075 4073 }
4076 4074 mutex_exit(&sf->sf_mutex);
4077 4075 goto fail;
4078 4076 }
4079 4077 return;
4080 4078 }
4081 4079 mutex_exit(&sf->sf_mutex);
4082 4080 } else {
4083 4081 fail:
4084 4082 mutex_enter(&sf->sf_mutex);
4085 4083 if (sf->sf_lip_cnt == privp->lip_cnt) {
4086 4084 sf_offline_target(sf, target);
4087 4085 sf_log(sf, CE_NOTE,
4088 4086 "INQUIRY to target 0x%x lun %" PRIx64 " failed. "
4089 4087 "Retry Count: %d\n",
4090 4088 sf_alpa_to_switch[privp->dest_nport_id],
4091 4089 SCSA_LUN(target),
4092 4090 privp->retries);
4093 4091 sf->sf_device_count--;
4094 4092 ASSERT(sf->sf_device_count >= 0);
4095 4093 if (sf->sf_device_count == 0) {
4096 4094 sf_finish_init(sf, privp->lip_cnt);
4097 4095 }
4098 4096 }
4099 4097 mutex_exit(&sf->sf_mutex);
4100 4098 }
4101 4099 sf_els_free(fpkt);
4102 4100 }
4103 4101
4104 4102
4105 4103 static void
4106 4104 sf_finish_init(struct sf *sf, int lip_cnt)
4107 4105 {
4108 4106 int i; /* loop index */
4109 4107 int cflag;
4110 4108 struct sf_target *target; /* current target */
4111 4109 dev_info_t *dip;
4112 4110 struct sf_hp_elem *elem; /* hotplug element created */
4113 4111
4114 4112 SF_DEBUG(1, (sf, CE_WARN, "!sf_finish_init\n"));
4115 4113 ASSERT(mutex_owned(&sf->sf_mutex));
4116 4114
4117 4115 /* scan all hash queues */
4118 4116 for (i = 0; i < SF_NUM_HASH_QUEUES; i++) {
4119 4117 target = sf->sf_wwn_lists[i];
4120 4118 while (target != NULL) {
4121 4119 mutex_enter(&target->sft_mutex);
4122 4120
4123 4121 /* see if target is not offline */
4124 4122 if ((target->sft_state & SF_TARGET_OFFLINE)) {
4125 4123 /*
4126 4124 * target already offline
4127 4125 */
4128 4126 mutex_exit(&target->sft_mutex);
4129 4127 goto next_entry;
4130 4128 }
4131 4129
4132 4130 /*
4133 4131 * target is not already offline -- see if it has
4134 4132 * already been marked as ready to go offline
4135 4133 */
4136 4134 if (target->sft_state & SF_TARGET_MARK) {
4137 4135 /*
4138 4136 * target already marked, so take it offline
4139 4137 */
4140 4138 mutex_exit(&target->sft_mutex);
4141 4139 sf_offline_target(sf, target);
4142 4140 goto next_entry;
4143 4141 }
4144 4142
4145 4143 /* clear target busy flag */
4146 4144 target->sft_state &= ~SF_TARGET_BUSY;
4147 4145
4148 4146 /* is target init not yet done ?? */
4149 4147 cflag = !(target->sft_state & SF_TARGET_INIT_DONE);
4150 4148
4151 4149 /* get pointer to target dip */
4152 4150 dip = target->sft_dip;
4153 4151
4154 4152 mutex_exit(&target->sft_mutex);
4155 4153 mutex_exit(&sf->sf_mutex);
4156 4154
4157 4155 if (cflag && (dip == NULL)) {
4158 4156 /*
4159 4157 * target init not yet done &&
4160 4158 * devinfo not yet created
4161 4159 */
4162 4160 sf_create_devinfo(sf, target, lip_cnt);
4163 4161 mutex_enter(&sf->sf_mutex);
4164 4162 goto next_entry;
4165 4163 }
4166 4164
4167 4165 /*
4168 4166 * target init already done || devinfo already created
4169 4167 */
4170 4168 ASSERT(dip != NULL);
4171 4169 if (!sf_create_props(dip, target, lip_cnt)) {
4172 4170 /* a problem creating properties */
4173 4171 mutex_enter(&sf->sf_mutex);
4174 4172 goto next_entry;
4175 4173 }
4176 4174
4177 4175 /* create a new element for the hotplug list */
4178 4176 if ((elem = kmem_zalloc(sizeof (struct sf_hp_elem),
4179 4177 KM_NOSLEEP)) != NULL) {
4180 4178
4181 4179 /* fill in the new element */
4182 4180 elem->dip = dip;
4183 4181 elem->target = target;
4184 4182 elem->what = SF_ONLINE;
4185 4183
4186 4184 /* add the new element into the hotplug list */
4187 4185 mutex_enter(&sf->sf_hp_daemon_mutex);
4188 4186 if (sf->sf_hp_elem_tail != NULL) {
4189 4187 sf->sf_hp_elem_tail->next = elem;
4190 4188 sf->sf_hp_elem_tail = elem;
4191 4189 } else {
4192 4190 /* this is the first element in list */
4193 4191 sf->sf_hp_elem_head =
4194 4192 sf->sf_hp_elem_tail =
4195 4193 elem;
4196 4194 }
4197 4195 cv_signal(&sf->sf_hp_daemon_cv);
4198 4196 mutex_exit(&sf->sf_hp_daemon_mutex);
4199 4197 } else {
4200 4198 /* could not allocate memory for element ?? */
4201 4199 (void) ndi_devi_online_async(dip, 0);
4202 4200 }
4203 4201
4204 4202 mutex_enter(&sf->sf_mutex);
4205 4203
4206 4204 next_entry:
4207 4205 /* ensure no new LIPs have occurred */
4208 4206 if (sf->sf_lip_cnt != lip_cnt) {
4209 4207 return;
4210 4208 }
4211 4209 target = target->sft_next;
4212 4210 }
4213 4211
4214 4212 /* done scanning all targets in this queue */
4215 4213 }
4216 4214
4217 4215 /* done with all hash queues */
4218 4216
4219 4217 sf->sf_state = SF_STATE_ONLINE;
4220 4218 sf->sf_online_timer = 0;
4221 4219 }
4222 4220
4223 4221
4224 4222 /*
4225 4223 * create devinfo node
4226 4224 */
4227 4225 static void
4228 4226 sf_create_devinfo(struct sf *sf, struct sf_target *target, int lip_cnt)
4229 4227 {
4230 4228 dev_info_t *cdip = NULL;
4231 4229 char *nname = NULL;
4232 4230 char **compatible = NULL;
4233 4231 int ncompatible;
4234 4232 struct scsi_inquiry *inq = &target->sft_inq;
4235 4233 char *scsi_binding_set;
4236 4234
4237 4235 /* get the 'scsi-binding-set' property */
4238 4236 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, sf->sf_dip,
4239 4237 DDI_PROP_NOTPROM | DDI_PROP_DONTPASS, "scsi-binding-set",
4240 4238 &scsi_binding_set) != DDI_PROP_SUCCESS)
4241 4239 scsi_binding_set = NULL;
4242 4240
4243 4241 /* determine the node name and compatible */
4244 4242 scsi_hba_nodename_compatible_get(inq, scsi_binding_set,
4245 4243 inq->inq_dtype, NULL, &nname, &compatible, &ncompatible);
4246 4244 if (scsi_binding_set)
4247 4245 ddi_prop_free(scsi_binding_set);
4248 4246
4249 4247 /* if nodename can't be determined then print a message and skip it */
4250 4248 if (nname == NULL) {
4251 4249 #ifndef RAID_LUNS
4252 4250 sf_log(sf, CE_WARN, "%s%d: no driver for device "
4253 4251 "@w%02x%02x%02x%02x%02x%02x%02x%02x,%x\n"
4254 4252 " compatible: %s",
4255 4253 ddi_driver_name(sf->sf_dip), ddi_get_instance(sf->sf_dip),
4256 4254 target->sft_port_wwn[0], target->sft_port_wwn[1],
4257 4255 target->sft_port_wwn[2], target->sft_port_wwn[3],
4258 4256 target->sft_port_wwn[4], target->sft_port_wwn[5],
4259 4257 target->sft_port_wwn[6], target->sft_port_wwn[7],
4260 4258 target->sft_lun.l, *compatible);
4261 4259 #else
4262 4260 sf_log(sf, CE_WARN, "%s%d: no driver for device "
4263 4261 "@w%02x%02x%02x%02x%02x%02x%02x%02x,%x\n"
4264 4262 " compatible: %s",
4265 4263 ddi_driver_name(sf->sf_dip), ddi_get_instance(sf->sf_dip),
4266 4264 target->sft_port_wwn[0], target->sft_port_wwn[1],
4267 4265 target->sft_port_wwn[2], target->sft_port_wwn[3],
4268 4266 target->sft_port_wwn[4], target->sft_port_wwn[5],
4269 4267 target->sft_port_wwn[6], target->sft_port_wwn[7],
4270 4268 target->sft_raid_lun, *compatible);
4271 4269 #endif
4272 4270 goto fail;
4273 4271 }
4274 4272
4275 4273 /* allocate the node */
4276 4274 if (ndi_devi_alloc(sf->sf_dip, nname,
4277 4275 DEVI_SID_NODEID, &cdip) != NDI_SUCCESS) {
4278 4276 goto fail;
4279 4277 }
4280 4278
4281 4279 /* decorate the node with compatible */
4282 4280 if (ndi_prop_update_string_array(DDI_DEV_T_NONE, cdip,
4283 4281 "compatible", compatible, ncompatible) != DDI_PROP_SUCCESS) {
4284 4282 goto fail;
4285 4283 }
4286 4284
4287 4285 /* add addressing properties to the node */
4288 4286 if (sf_create_props(cdip, target, lip_cnt) != 1) {
4289 4287 goto fail;
4290 4288 }
4291 4289
4292 4290 mutex_enter(&target->sft_mutex);
4293 4291 if (target->sft_dip != NULL) {
4294 4292 mutex_exit(&target->sft_mutex);
4295 4293 goto fail;
4296 4294 }
4297 4295 target->sft_dip = cdip;
4298 4296 mutex_exit(&target->sft_mutex);
4299 4297
4300 4298 if (ndi_devi_online_async(cdip, 0) != DDI_SUCCESS) {
4301 4299 goto fail;
4302 4300 }
4303 4301
4304 4302 scsi_hba_nodename_compatible_free(nname, compatible);
4305 4303 return;
4306 4304
4307 4305 fail:
4308 4306 scsi_hba_nodename_compatible_free(nname, compatible);
4309 4307 if (cdip != NULL) {
4310 4308 (void) ndi_prop_remove(DDI_DEV_T_NONE, cdip, NODE_WWN_PROP);
4311 4309 (void) ndi_prop_remove(DDI_DEV_T_NONE, cdip, PORT_WWN_PROP);
4312 4310 (void) ndi_prop_remove(DDI_DEV_T_NONE, cdip, LIP_CNT_PROP);
4313 4311 (void) ndi_prop_remove(DDI_DEV_T_NONE, cdip, TARGET_PROP);
4314 4312 (void) ndi_prop_remove(DDI_DEV_T_NONE, cdip, LUN_PROP);
4315 4313 if (ndi_devi_free(cdip) != NDI_SUCCESS) {
4316 4314 sf_log(sf, CE_WARN, "ndi_devi_free failed\n");
4317 4315 } else {
4318 4316 mutex_enter(&target->sft_mutex);
4319 4317 if (cdip == target->sft_dip) {
4320 4318 target->sft_dip = NULL;
4321 4319 }
4322 4320 mutex_exit(&target->sft_mutex);
4323 4321 }
4324 4322 }
4325 4323 }
4326 4324
4327 4325 /*
4328 4326 * create required properties, returning TRUE iff we succeed, else
4329 4327 * returning FALSE
4330 4328 */
4331 4329 static int
4332 4330 sf_create_props(dev_info_t *cdip, struct sf_target *target, int lip_cnt)
4333 4331 {
4334 4332 int tgt_id = sf_alpa_to_switch[target->sft_al_pa];
4335 4333
4336 4334
4337 4335 if (ndi_prop_update_byte_array(DDI_DEV_T_NONE,
4338 4336 cdip, NODE_WWN_PROP, target->sft_node_wwn, FC_WWN_SIZE) !=
4339 4337 DDI_PROP_SUCCESS) {
4340 4338 return (FALSE);
4341 4339 }
4342 4340
4343 4341 if (ndi_prop_update_byte_array(DDI_DEV_T_NONE,
4344 4342 cdip, PORT_WWN_PROP, target->sft_port_wwn, FC_WWN_SIZE) !=
4345 4343 DDI_PROP_SUCCESS) {
4346 4344 return (FALSE);
4347 4345 }
4348 4346
4349 4347 if (ndi_prop_update_int(DDI_DEV_T_NONE,
4350 4348 cdip, LIP_CNT_PROP, lip_cnt) != DDI_PROP_SUCCESS) {
4351 4349 return (FALSE);
4352 4350 }
4353 4351
4354 4352 if (ndi_prop_update_int(DDI_DEV_T_NONE,
4355 4353 cdip, TARGET_PROP, tgt_id) != DDI_PROP_SUCCESS) {
4356 4354 return (FALSE);
4357 4355 }
4358 4356
4359 4357 #ifndef RAID_LUNS
4360 4358 if (ndi_prop_update_int(DDI_DEV_T_NONE,
4361 4359 cdip, LUN_PROP, target->sft_lun.l) != DDI_PROP_SUCCESS) {
4362 4360 return (0);
4363 4361 }
4364 4362 #else
4365 4363 if (ndi_prop_update_int(DDI_DEV_T_NONE,
4366 4364 cdip, LUN_PROP, target->sft_raid_lun) != DDI_PROP_SUCCESS) {
4367 4365 return (0);
4368 4366 }
4369 4367 #endif
4370 4368
4371 4369 return (TRUE);
4372 4370 }
4373 4371
4374 4372
4375 4373 /*
4376 4374 * called by the transport to offline a target
4377 4375 */
4378 4376 /* ARGSUSED */
4379 4377 static void
4380 4378 sf_offline_target(struct sf *sf, struct sf_target *target)
4381 4379 {
4382 4380 dev_info_t *dip;
4383 4381 struct sf_target *next_target = NULL;
4384 4382 struct sf_hp_elem *elem;
4385 4383
4386 4384 ASSERT(mutex_owned(&sf->sf_mutex));
4387 4385
4388 4386 if (sf_core && (sf_core & SF_CORE_OFFLINE_TARGET)) {
4389 4387 (void) soc_take_core(sf->sf_sochandle, sf->sf_socp);
4390 4388 sf_core = 0;
4391 4389 }
4392 4390
4393 4391 while (target != NULL) {
4394 4392 sf_log(sf, CE_NOTE,
4395 4393 "!target 0x%x al_pa 0x%x lun %" PRIx64 " offlined\n",
4396 4394 sf_alpa_to_switch[target->sft_al_pa],
4397 4395 target->sft_al_pa, SCSA_LUN(target));
4398 4396 mutex_enter(&target->sft_mutex);
4399 4397 target->sft_state &= ~(SF_TARGET_BUSY|SF_TARGET_MARK);
4400 4398 target->sft_state |= SF_TARGET_OFFLINE;
4401 4399 mutex_exit(&target->sft_mutex);
4402 4400 mutex_exit(&sf->sf_mutex);
4403 4401
4404 4402 /* XXXX if this is LUN 0, offline all other LUNs */
4405 4403 if (next_target || target->sft_lun.l == 0)
4406 4404 next_target = target->sft_next_lun;
4407 4405
4408 4406 /* abort all cmds for this target */
4409 4407 sf_abort_all(sf, target, FALSE, sf->sf_lip_cnt, FALSE);
4410 4408
4411 4409 mutex_enter(&sf->sf_mutex);
4412 4410 mutex_enter(&target->sft_mutex);
4413 4411 if (target->sft_state & SF_TARGET_INIT_DONE) {
4414 4412 dip = target->sft_dip;
4415 4413 mutex_exit(&target->sft_mutex);
4416 4414 mutex_exit(&sf->sf_mutex);
4417 4415 (void) ndi_prop_remove(DDI_DEV_T_NONE, dip,
4418 4416 TARGET_PROP);
4419 4417 (void) ndi_event_retrieve_cookie(sf->sf_event_hdl,
4420 4418 dip, FCAL_REMOVE_EVENT, &sf_remove_eid,
4421 4419 NDI_EVENT_NOPASS);
4422 4420 (void) ndi_event_run_callbacks(sf->sf_event_hdl,
4423 4421 target->sft_dip, sf_remove_eid, NULL);
4424 4422
4425 4423 elem = kmem_zalloc(sizeof (struct sf_hp_elem),
4426 4424 KM_NOSLEEP);
4427 4425 if (elem != NULL) {
4428 4426 elem->dip = dip;
4429 4427 elem->target = target;
4430 4428 elem->what = SF_OFFLINE;
4431 4429 mutex_enter(&sf->sf_hp_daemon_mutex);
4432 4430 if (sf->sf_hp_elem_tail != NULL) {
4433 4431 sf->sf_hp_elem_tail->next = elem;
4434 4432 sf->sf_hp_elem_tail = elem;
4435 4433 } else {
4436 4434 sf->sf_hp_elem_head =
4437 4435 sf->sf_hp_elem_tail =
4438 4436 elem;
4439 4437 }
4440 4438 cv_signal(&sf->sf_hp_daemon_cv);
4441 4439 mutex_exit(&sf->sf_hp_daemon_mutex);
4442 4440 } else {
4443 4441 /* don't do NDI_DEVI_REMOVE for now */
4444 4442 if (ndi_devi_offline(dip, 0) != NDI_SUCCESS) {
4445 4443 SF_DEBUG(1, (sf, CE_WARN,
4446 4444 "target %x lun %" PRIx64 ", "
4447 4445 "device offline failed",
4448 4446 sf_alpa_to_switch[target->
4449 4447 sft_al_pa],
4450 4448 SCSA_LUN(target)));
4451 4449 } else {
4452 4450 SF_DEBUG(1, (sf, CE_NOTE,
4453 4451 "target %x, lun %" PRIx64 ", "
4454 4452 "device offline succeeded\n",
4455 4453 sf_alpa_to_switch[target->
4456 4454 sft_al_pa],
4457 4455 SCSA_LUN(target)));
4458 4456 }
4459 4457 }
4460 4458 mutex_enter(&sf->sf_mutex);
4461 4459 } else {
4462 4460 mutex_exit(&target->sft_mutex);
4463 4461 }
4464 4462 target = next_target;
4465 4463 }
4466 4464 }
4467 4465
4468 4466
4469 4467 /*
4470 4468 * routine to get/set a capability
4471 4469 *
4472 4470 * returning:
4473 4471 * 1 (TRUE) boolean capability is true (on get)
4474 4472 * 0 (FALSE) invalid capability, can't set capability (on set),
4475 4473 * or boolean capability is false (on get)
4476 4474 * -1 (UNDEFINED) can't find capability (SCSA) or unsupported capability
4477 4475 * 3 when getting SCSI version number
4478 4476 * AL_PA when getting port initiator ID
4479 4477 */
4480 4478 static int
4481 4479 sf_commoncap(struct scsi_address *ap, char *cap,
4482 4480 int val, int tgtonly, int doset)
4483 4481 {
4484 4482 struct sf *sf = ADDR2SF(ap);
4485 4483 int cidx;
4486 4484 int rval = FALSE;
4487 4485
4488 4486
4489 4487 if (cap == NULL) {
4490 4488 SF_DEBUG(3, (sf, CE_WARN, "sf_commoncap: invalid arg"));
4491 4489 return (rval);
4492 4490 }
4493 4491
4494 4492 /* get index of capability string */
4495 4493 if ((cidx = scsi_hba_lookup_capstr(cap)) == -1) {
4496 4494 /* can't find capability */
4497 4495 return (UNDEFINED);
4498 4496 }
4499 4497
4500 4498 if (doset) {
4501 4499 /*
4502 4500 * Process setcap request.
4503 4501 */
4504 4502
4505 4503 /*
4506 4504 * At present, we can only set binary (0/1) values
4507 4505 */
4508 4506 switch (cidx) {
4509 4507 case SCSI_CAP_ARQ: /* can't set this capability */
4510 4508 break;
4511 4509 default:
4512 4510 SF_DEBUG(3, (sf, CE_WARN,
4513 4511 "sf_setcap: unsupported %d", cidx));
4514 4512 rval = UNDEFINED;
4515 4513 break;
4516 4514 }
4517 4515
4518 4516 SF_DEBUG(4, (sf, CE_NOTE,
4519 4517 "set cap: cap=%s,val=0x%x,tgtonly=0x%x"
4520 4518 ",doset=0x%x,rval=%d\n",
4521 4519 cap, val, tgtonly, doset, rval));
4522 4520
4523 4521 } else {
4524 4522 /*
4525 4523 * Process getcap request.
4526 4524 */
4527 4525 switch (cidx) {
4528 4526 case SCSI_CAP_DMA_MAX:
4529 4527 break; /* don't' have this capability */
4530 4528 case SCSI_CAP_INITIATOR_ID:
4531 4529 rval = sf->sf_al_pa;
4532 4530 break;
4533 4531 case SCSI_CAP_ARQ:
4534 4532 rval = TRUE; /* do have this capability */
4535 4533 break;
4536 4534 case SCSI_CAP_RESET_NOTIFICATION:
4537 4535 case SCSI_CAP_TAGGED_QING:
4538 4536 rval = TRUE; /* do have this capability */
4539 4537 break;
4540 4538 case SCSI_CAP_SCSI_VERSION:
4541 4539 rval = 3;
4542 4540 break;
4543 4541 case SCSI_CAP_INTERCONNECT_TYPE:
4544 4542 rval = INTERCONNECT_FIBRE;
4545 4543 break;
4546 4544 default:
4547 4545 SF_DEBUG(4, (sf, CE_WARN,
4548 4546 "sf_scsi_getcap: unsupported"));
4549 4547 rval = UNDEFINED;
4550 4548 break;
4551 4549 }
4552 4550 SF_DEBUG(4, (sf, CE_NOTE,
4553 4551 "get cap: cap=%s,val=0x%x,tgtonly=0x%x,"
4554 4552 "doset=0x%x,rval=%d\n",
4555 4553 cap, val, tgtonly, doset, rval));
4556 4554 }
4557 4555
4558 4556 return (rval);
4559 4557 }
4560 4558
4561 4559
4562 4560 /*
4563 4561 * called by the transport to get a capability
4564 4562 */
4565 4563 static int
4566 4564 sf_getcap(struct scsi_address *ap, char *cap, int whom)
4567 4565 {
4568 4566 return (sf_commoncap(ap, cap, 0, whom, FALSE));
4569 4567 }
4570 4568
4571 4569
4572 4570 /*
4573 4571 * called by the transport to set a capability
4574 4572 */
4575 4573 static int
4576 4574 sf_setcap(struct scsi_address *ap, char *cap, int value, int whom)
4577 4575 {
4578 4576 return (sf_commoncap(ap, cap, value, whom, TRUE));
4579 4577 }
4580 4578
4581 4579
4582 4580 /*
4583 4581 * called by the transport to abort a target
4584 4582 */
4585 4583 static int
4586 4584 sf_abort(struct scsi_address *ap, struct scsi_pkt *pkt)
4587 4585 {
4588 4586 struct sf *sf = ADDR2SF(ap);
4589 4587 struct sf_target *target = ADDR2TARGET(ap);
4590 4588 struct sf_pkt *cmd, *ncmd, *pcmd;
4591 4589 struct fcal_packet *fpkt;
4592 4590 int rval = 0, t, my_rval = FALSE;
4593 4591 int old_target_state;
4594 4592 int lip_cnt;
4595 4593 int tgt_id;
4596 4594 fc_frame_header_t *hp;
4597 4595 int deferred_destroy;
4598 4596
4599 4597 deferred_destroy = 0;
4600 4598
4601 4599 if (pkt != NULL) {
4602 4600 cmd = PKT2CMD(pkt);
4603 4601 fpkt = cmd->cmd_fp_pkt;
4604 4602 SF_DEBUG(2, (sf, CE_NOTE, "sf_abort packet %p\n",
4605 4603 (void *)fpkt));
4606 4604 pcmd = NULL;
4607 4605 mutex_enter(&sf->sf_cmd_mutex);
4608 4606 ncmd = sf->sf_pkt_head;
4609 4607 while (ncmd != NULL) {
4610 4608 if (ncmd == cmd) {
4611 4609 if (pcmd != NULL) {
4612 4610 pcmd->cmd_next = cmd->cmd_next;
4613 4611 } else {
4614 4612 sf->sf_pkt_head = cmd->cmd_next;
4615 4613 }
4616 4614 cmd->cmd_flags &= ~CFLAG_IN_QUEUE;
4617 4615 cmd->cmd_state = SF_STATE_IDLE;
4618 4616 pkt->pkt_reason = CMD_ABORTED;
4619 4617 pkt->pkt_statistics |= STAT_ABORTED;
4620 4618 my_rval = TRUE;
4621 4619 break;
4622 4620 } else {
4623 4621 pcmd = ncmd;
4624 4622 ncmd = ncmd->cmd_next;
4625 4623 }
4626 4624 }
4627 4625 mutex_exit(&sf->sf_cmd_mutex);
4628 4626 if (ncmd == NULL) {
4629 4627 mutex_enter(&cmd->cmd_abort_mutex);
4630 4628 if (cmd->cmd_state == SF_STATE_ISSUED) {
4631 4629 cmd->cmd_state = SF_STATE_ABORTING;
4632 4630 cmd->cmd_timeout = sf_watchdog_time + 20;
4633 4631 mutex_exit(&cmd->cmd_abort_mutex);
4634 4632 /* call transport to abort command */
4635 4633 if (((rval = soc_abort(sf->sf_sochandle,
4636 4634 sf->sf_socp, sf->sf_sochandle->fcal_portno,
4637 4635 fpkt, 1)) == FCAL_ABORTED) ||
4638 4636 (rval == FCAL_ABORT_FAILED)) {
4639 4637 my_rval = TRUE;
4640 4638 pkt->pkt_reason = CMD_ABORTED;
4641 4639 pkt->pkt_statistics |= STAT_ABORTED;
4642 4640 cmd->cmd_state = SF_STATE_IDLE;
4643 4641 } else if (rval == FCAL_BAD_ABORT) {
4644 4642 cmd->cmd_timeout = sf_watchdog_time
4645 4643 + 20;
4646 4644 my_rval = FALSE;
4647 4645 } else {
4648 4646 SF_DEBUG(1, (sf, CE_NOTE,
4649 4647 "Command Abort failed\n"));
4650 4648 }
4651 4649 } else {
4652 4650 mutex_exit(&cmd->cmd_abort_mutex);
4653 4651 }
4654 4652 }
4655 4653 } else {
4656 4654 SF_DEBUG(2, (sf, CE_NOTE, "sf_abort target\n"));
4657 4655 mutex_enter(&sf->sf_mutex);
4658 4656 lip_cnt = sf->sf_lip_cnt;
4659 4657 mutex_enter(&target->sft_mutex);
4660 4658 if (target->sft_state & (SF_TARGET_BUSY |
4661 4659 SF_TARGET_OFFLINE)) {
4662 4660 mutex_exit(&target->sft_mutex);
4663 4661 return (rval);
4664 4662 }
4665 4663 old_target_state = target->sft_state;
4666 4664 target->sft_state |= SF_TARGET_BUSY;
4667 4665 mutex_exit(&target->sft_mutex);
4668 4666 mutex_exit(&sf->sf_mutex);
4669 4667
4670 4668 if ((pkt = sf_scsi_init_pkt(ap, NULL, NULL, 0,
4671 4669 0, 0, 0, NULL, 0)) != NULL) {
4672 4670
4673 4671 cmd = PKT2CMD(pkt);
4674 4672 cmd->cmd_block->fcp_cntl.cntl_abort_tsk = 1;
4675 4673 cmd->cmd_fp_pkt->fcal_pkt_comp = NULL;
4676 4674 cmd->cmd_pkt->pkt_flags |= FLAG_NOINTR;
4677 4675
4678 4676 /* prepare the packet for transport */
4679 4677 if (sf_prepare_pkt(sf, cmd, target) == TRAN_ACCEPT) {
4680 4678
4681 4679 cmd->cmd_state = SF_STATE_ISSUED;
4682 4680 /*
4683 4681 * call transport to send a pkt polled
4684 4682 *
4685 4683 * if that fails call the transport to abort it
4686 4684 */
4687 4685 if (soc_transport_poll(sf->sf_sochandle,
4688 4686 cmd->cmd_fp_pkt, SF_ABORT_TIMEOUT,
4689 4687 CQ_REQUEST_1) == FCAL_TRANSPORT_SUCCESS) {
4690 4688 (void) ddi_dma_sync(
4691 4689 cmd->cmd_cr_pool->rsp_dma_handle,
4692 4690 (off_t)
4693 4691 ((caddr_t)cmd->cmd_rsp_block -
4694 4692 cmd->cmd_cr_pool->rsp_base),
4695 4693 FCP_MAX_RSP_IU_SIZE,
4696 4694 DDI_DMA_SYNC_FORKERNEL);
4697 4695 if (((struct fcp_rsp_info *)
4698 4696 (&cmd->cmd_rsp_block->
4699 4697 fcp_response_len + 1))->
4700 4698 rsp_code == FCP_NO_FAILURE) {
4701 4699 /* abort cmds for this targ */
4702 4700 sf_abort_all(sf, target, TRUE,
4703 4701 lip_cnt, TRUE);
4704 4702 } else {
4705 4703 hp = &cmd->cmd_fp_pkt->
4706 4704 fcal_socal_request.
4707 4705 sr_fc_frame_hdr;
4708 4706 tgt_id = sf_alpa_to_switch[
4709 4707 (uchar_t)hp->d_id];
4710 4708 sf->sf_stats.tstats[tgt_id].
4711 4709 task_mgmt_failures++;
4712 4710 SF_DEBUG(1, (sf, CE_NOTE,
4713 4711 "Target %d Abort Task "
4714 4712 "Set failed\n", hp->d_id));
4715 4713 }
4716 4714 } else {
4717 4715 mutex_enter(&cmd->cmd_abort_mutex);
4718 4716 if (cmd->cmd_state == SF_STATE_ISSUED) {
4719 4717 cmd->cmd_state = SF_STATE_ABORTING;
4720 4718 cmd->cmd_timeout = sf_watchdog_time
4721 4719 + 20;
4722 4720 mutex_exit(&cmd->cmd_abort_mutex);
4723 4721 if ((t = soc_abort(sf->sf_sochandle,
4724 4722 sf->sf_socp, sf->sf_sochandle->
4725 4723 fcal_portno, cmd->cmd_fp_pkt, 1)) !=
4726 4724 FCAL_ABORTED &&
4727 4725 (t != FCAL_ABORT_FAILED)) {
4728 4726 sf_log(sf, CE_NOTE,
4729 4727 "sf_abort failed, "
4730 4728 "initiating LIP\n");
4731 4729 sf_force_lip(sf);
4732 4730 deferred_destroy = 1;
4733 4731 }
4734 4732 } else {
4735 4733 mutex_exit(&cmd->cmd_abort_mutex);
4736 4734 }
4737 4735 }
4738 4736 }
4739 4737 if (!deferred_destroy) {
4740 4738 cmd->cmd_fp_pkt->fcal_pkt_comp =
4741 4739 sf_cmd_callback;
4742 4740 cmd->cmd_block->fcp_cntl.cntl_abort_tsk = 0;
4743 4741 sf_scsi_destroy_pkt(ap, pkt);
4744 4742 my_rval = TRUE;
4745 4743 }
4746 4744 }
4747 4745 mutex_enter(&sf->sf_mutex);
4748 4746 if (lip_cnt == sf->sf_lip_cnt) {
4749 4747 mutex_enter(&target->sft_mutex);
4750 4748 target->sft_state = old_target_state;
4751 4749 mutex_exit(&target->sft_mutex);
4752 4750 }
4753 4751 mutex_exit(&sf->sf_mutex);
4754 4752 }
4755 4753 return (my_rval);
4756 4754 }
4757 4755
4758 4756
4759 4757 /*
4760 4758 * called by the transport and internally to reset a target
4761 4759 */
4762 4760 static int
4763 4761 sf_reset(struct scsi_address *ap, int level)
4764 4762 {
4765 4763 struct scsi_pkt *pkt;
4766 4764 struct fcal_packet *fpkt;
4767 4765 struct sf *sf = ADDR2SF(ap);
4768 4766 struct sf_target *target = ADDR2TARGET(ap), *ntarget;
4769 4767 struct sf_pkt *cmd;
4770 4768 int rval = FALSE, t;
4771 4769 int lip_cnt;
4772 4770 int tgt_id, ret;
4773 4771 fc_frame_header_t *hp;
4774 4772 int deferred_destroy;
4775 4773
4776 4774 /* We don't support RESET_LUN yet. */
4777 4775 if (level == RESET_TARGET) {
4778 4776 struct sf_reset_list *p;
4779 4777
4780 4778 if ((p = kmem_alloc(sizeof (struct sf_reset_list), KM_NOSLEEP))
4781 4779 == NULL)
4782 4780 return (rval);
4783 4781
4784 4782 SF_DEBUG(2, (sf, CE_NOTE, "sf_reset target\n"));
4785 4783 mutex_enter(&sf->sf_mutex);
4786 4784 /* All target resets go to LUN 0 */
4787 4785 if (target->sft_lun.l) {
4788 4786 target = sf_lookup_target(sf, target->sft_port_wwn, 0);
4789 4787 }
4790 4788 mutex_enter(&target->sft_mutex);
4791 4789 if (target->sft_state & (SF_TARGET_BUSY |
4792 4790 SF_TARGET_OFFLINE)) {
4793 4791 mutex_exit(&target->sft_mutex);
4794 4792 mutex_exit(&sf->sf_mutex);
4795 4793 kmem_free(p, sizeof (struct sf_reset_list));
4796 4794 return (rval);
4797 4795 }
4798 4796 lip_cnt = sf->sf_lip_cnt;
4799 4797 target->sft_state |= SF_TARGET_BUSY;
4800 4798 for (ntarget = target->sft_next_lun;
4801 4799 ntarget;
4802 4800 ntarget = ntarget->sft_next_lun) {
4803 4801 mutex_enter(&ntarget->sft_mutex);
4804 4802 /*
4805 4803 * XXXX If we supported RESET_LUN we should check here
4806 4804 * to see if any LUN were being reset and somehow fail
4807 4805 * that operation.
4808 4806 */
4809 4807 ntarget->sft_state |= SF_TARGET_BUSY;
4810 4808 mutex_exit(&ntarget->sft_mutex);
4811 4809 }
4812 4810 mutex_exit(&target->sft_mutex);
4813 4811 mutex_exit(&sf->sf_mutex);
4814 4812
4815 4813 deferred_destroy = 0;
4816 4814 if ((pkt = sf_scsi_init_pkt(ap, NULL, NULL, 0,
4817 4815 0, 0, 0, NULL, 0)) != NULL) {
4818 4816 cmd = PKT2CMD(pkt);
4819 4817 cmd->cmd_block->fcp_cntl.cntl_reset = 1;
4820 4818 cmd->cmd_fp_pkt->fcal_pkt_comp = NULL;
4821 4819 cmd->cmd_pkt->pkt_flags |= FLAG_NOINTR;
4822 4820
4823 4821 /* prepare the packet for transport */
4824 4822 if (sf_prepare_pkt(sf, cmd, target) == TRAN_ACCEPT) {
4825 4823 /* call transport to send a pkt polled */
4826 4824 cmd->cmd_state = SF_STATE_ISSUED;
4827 4825 if ((ret = soc_transport_poll(sf->sf_sochandle,
4828 4826 cmd->cmd_fp_pkt, SF_ABORT_TIMEOUT,
4829 4827 CQ_REQUEST_1)) == FCAL_TRANSPORT_SUCCESS) {
4830 4828 (void) ddi_dma_sync(cmd->cmd_cr_pool->
4831 4829 rsp_dma_handle, (caddr_t)cmd->
4832 4830 cmd_rsp_block - cmd->cmd_cr_pool->
4833 4831 rsp_base, FCP_MAX_RSP_IU_SIZE,
4834 4832 DDI_DMA_SYNC_FORKERNEL);
4835 4833 fpkt = cmd->cmd_fp_pkt;
4836 4834 if ((fpkt->fcal_pkt_status ==
4837 4835 FCAL_STATUS_OK) &&
4838 4836 (((struct fcp_rsp_info *)
4839 4837 (&cmd->cmd_rsp_block->
4840 4838 fcp_response_len + 1))->
4841 4839 rsp_code == FCP_NO_FAILURE)) {
4842 4840 sf_log(sf, CE_NOTE,
4843 4841 "!sf%d: Target 0x%x Reset "
4844 4842 "successful\n",
4845 4843 ddi_get_instance(\
4846 4844 sf->sf_dip),
4847 4845 sf_alpa_to_switch[
4848 4846 target->sft_al_pa]);
4849 4847 rval = TRUE;
4850 4848 } else {
4851 4849 hp = &cmd->cmd_fp_pkt->
4852 4850 fcal_socal_request.
4853 4851 sr_fc_frame_hdr;
4854 4852 tgt_id = sf_alpa_to_switch[
4855 4853 (uchar_t)hp->d_id];
4856 4854 sf->sf_stats.tstats[tgt_id].
4857 4855 task_mgmt_failures++;
4858 4856 sf_log(sf, CE_NOTE,
4859 4857 "!sf%d: Target 0x%x "
4860 4858 "Reset failed."
4861 4859 "Status code 0x%x "
4862 4860 "Resp code 0x%x\n",
4863 4861 ddi_get_instance(\
4864 4862 sf->sf_dip),
4865 4863 tgt_id,
4866 4864 fpkt->fcal_pkt_status,
4867 4865 ((struct fcp_rsp_info *)
4868 4866 (&cmd->cmd_rsp_block->
4869 4867 fcp_response_len + 1))->
4870 4868 rsp_code);
4871 4869 }
4872 4870 } else {
4873 4871 sf_log(sf, CE_NOTE, "!sf%d: Target "
4874 4872 "0x%x Reset Failed. Ret=%x\n",
4875 4873 ddi_get_instance(sf->sf_dip),
4876 4874 sf_alpa_to_switch[
4877 4875 target->sft_al_pa], ret);
4878 4876 mutex_enter(&cmd->cmd_abort_mutex);
4879 4877 if (cmd->cmd_state == SF_STATE_ISSUED) {
4880 4878 /* call the transport to abort a cmd */
4881 4879 cmd->cmd_timeout = sf_watchdog_time
4882 4880 + 20;
4883 4881 cmd->cmd_state = SF_STATE_ABORTING;
4884 4882 mutex_exit(&cmd->cmd_abort_mutex);
4885 4883 if (((t = soc_abort(sf->sf_sochandle,
4886 4884 sf->sf_socp,
4887 4885 sf->sf_sochandle->fcal_portno,
4888 4886 cmd->cmd_fp_pkt, 1)) !=
4889 4887 FCAL_ABORTED) &&
4890 4888 (t != FCAL_ABORT_FAILED)) {
4891 4889 sf_log(sf, CE_NOTE,
4892 4890 "!sf%d: Target 0x%x Reset "
4893 4891 "failed. Abort Failed, "
4894 4892 "forcing LIP\n",
4895 4893 ddi_get_instance(
4896 4894 sf->sf_dip),
4897 4895 sf_alpa_to_switch[
4898 4896 target->sft_al_pa]);
4899 4897 sf_force_lip(sf);
4900 4898 rval = TRUE;
4901 4899 deferred_destroy = 1;
4902 4900 }
4903 4901 } else {
4904 4902 mutex_exit
4905 4903 (&cmd->cmd_abort_mutex);
4906 4904 }
4907 4905 }
4908 4906 }
4909 4907 /*
4910 4908 * Defer releasing the packet if we abort returned with
4911 4909 * a BAD_ABORT or timed out, because there is a
4912 4910 * possibility that the ucode might return it.
4913 4911 * We wait for at least 20s and let it be released
4914 4912 * by the sf_watch thread
4915 4913 */
4916 4914 if (!deferred_destroy) {
4917 4915 cmd->cmd_block->fcp_cntl.cntl_reset = 0;
4918 4916 cmd->cmd_fp_pkt->fcal_pkt_comp =
4919 4917 sf_cmd_callback;
4920 4918 cmd->cmd_state = SF_STATE_IDLE;
4921 4919 /* for cache */
4922 4920 sf_scsi_destroy_pkt(ap, pkt);
4923 4921 }
4924 4922 } else {
4925 4923 cmn_err(CE_WARN, "!sf%d: Target 0x%x Reset Failed. "
4926 4924 "Resource allocation error.\n",
4927 4925 ddi_get_instance(sf->sf_dip),
4928 4926 sf_alpa_to_switch[target->sft_al_pa]);
4929 4927 }
4930 4928 mutex_enter(&sf->sf_mutex);
4931 4929 if ((rval == TRUE) && (lip_cnt == sf->sf_lip_cnt)) {
4932 4930 p->target = target;
4933 4931 p->lip_cnt = lip_cnt;
4934 4932 p->timeout = ddi_get_lbolt() +
4935 4933 drv_usectohz(SF_TARGET_RESET_DELAY);
4936 4934 p->next = sf->sf_reset_list;
4937 4935 sf->sf_reset_list = p;
4938 4936 mutex_exit(&sf->sf_mutex);
4939 4937 mutex_enter(&sf_global_mutex);
4940 4938 if (sf_reset_timeout_id == 0) {
4941 4939 sf_reset_timeout_id = timeout(
4942 4940 sf_check_reset_delay, NULL,
4943 4941 drv_usectohz(SF_TARGET_RESET_DELAY));
4944 4942 }
4945 4943 mutex_exit(&sf_global_mutex);
4946 4944 } else {
4947 4945 if (lip_cnt == sf->sf_lip_cnt) {
4948 4946 mutex_enter(&target->sft_mutex);
4949 4947 target->sft_state &= ~SF_TARGET_BUSY;
4950 4948 for (ntarget = target->sft_next_lun;
4951 4949 ntarget;
4952 4950 ntarget = ntarget->sft_next_lun) {
4953 4951 mutex_enter(&ntarget->sft_mutex);
4954 4952 ntarget->sft_state &= ~SF_TARGET_BUSY;
4955 4953 mutex_exit(&ntarget->sft_mutex);
4956 4954 }
4957 4955 mutex_exit(&target->sft_mutex);
4958 4956 }
4959 4957 mutex_exit(&sf->sf_mutex);
4960 4958 kmem_free(p, sizeof (struct sf_reset_list));
4961 4959 }
4962 4960 } else {
4963 4961 mutex_enter(&sf->sf_mutex);
4964 4962 if ((sf->sf_state == SF_STATE_OFFLINE) &&
4965 4963 (sf_watchdog_time < sf->sf_timer)) {
4966 4964 /*
4967 4965 * We are currently in a lip, so let this one
4968 4966 * finish before forcing another one.
4969 4967 */
4970 4968 mutex_exit(&sf->sf_mutex);
4971 4969 return (TRUE);
4972 4970 }
4973 4971 mutex_exit(&sf->sf_mutex);
4974 4972 sf_log(sf, CE_NOTE, "!sf:Target driver initiated lip\n");
4975 4973 sf_force_lip(sf);
4976 4974 rval = TRUE;
4977 4975 }
4978 4976 return (rval);
4979 4977 }
4980 4978
4981 4979
4982 4980 /*
4983 4981 * abort all commands for a target
4984 4982 *
4985 4983 * if try_abort is set then send an abort
4986 4984 * if abort is set then this is abort, else this is a reset
4987 4985 */
4988 4986 static void
4989 4987 sf_abort_all(struct sf *sf, struct sf_target *target, int abort, int
4990 4988 lip_cnt, int try_abort)
4991 4989 {
4992 4990 struct sf_target *ntarget;
4993 4991 struct sf_pkt *cmd, *head = NULL, *tail = NULL, *pcmd = NULL, *tcmd;
4994 4992 struct fcal_packet *fpkt;
4995 4993 struct scsi_pkt *pkt;
4996 4994 int rval = FCAL_ABORTED;
4997 4995
4998 4996 /*
4999 4997 * First pull all commands for all LUNs on this target out of the
5000 4998 * overflow list. We can tell it's the same target by comparing
5001 4999 * the node WWN.
5002 5000 */
5003 5001 mutex_enter(&sf->sf_mutex);
5004 5002 if (lip_cnt == sf->sf_lip_cnt) {
5005 5003 mutex_enter(&sf->sf_cmd_mutex);
5006 5004 cmd = sf->sf_pkt_head;
5007 5005 while (cmd != NULL) {
5008 5006 ntarget = ADDR2TARGET(&cmd->cmd_pkt->
5009 5007 pkt_address);
5010 5008 if (ntarget == target) {
5011 5009 if (pcmd != NULL)
5012 5010 pcmd->cmd_next = cmd->cmd_next;
5013 5011 else
5014 5012 sf->sf_pkt_head = cmd->cmd_next;
5015 5013 if (sf->sf_pkt_tail == cmd) {
5016 5014 sf->sf_pkt_tail = pcmd;
5017 5015 if (pcmd != NULL)
5018 5016 pcmd->cmd_next = NULL;
5019 5017 }
5020 5018 tcmd = cmd->cmd_next;
5021 5019 if (head == NULL) {
5022 5020 head = cmd;
5023 5021 tail = cmd;
5024 5022 } else {
5025 5023 tail->cmd_next = cmd;
5026 5024 tail = cmd;
5027 5025 }
5028 5026 cmd->cmd_next = NULL;
5029 5027 cmd = tcmd;
5030 5028 } else {
5031 5029 pcmd = cmd;
5032 5030 cmd = cmd->cmd_next;
5033 5031 }
5034 5032 }
5035 5033 mutex_exit(&sf->sf_cmd_mutex);
5036 5034 }
5037 5035 mutex_exit(&sf->sf_mutex);
5038 5036
5039 5037 /*
5040 5038 * Now complete all the commands on our list. In the process,
5041 5039 * the completion routine may take the commands off the target
5042 5040 * lists.
5043 5041 */
5044 5042 cmd = head;
5045 5043 while (cmd != NULL) {
5046 5044 pkt = cmd->cmd_pkt;
5047 5045 if (abort) {
5048 5046 pkt->pkt_reason = CMD_ABORTED;
5049 5047 pkt->pkt_statistics |= STAT_ABORTED;
5050 5048 } else {
5051 5049 pkt->pkt_reason = CMD_RESET;
5052 5050 pkt->pkt_statistics |= STAT_DEV_RESET;
5053 5051 }
5054 5052 cmd->cmd_flags &= ~CFLAG_IN_QUEUE;
5055 5053 cmd->cmd_state = SF_STATE_IDLE;
5056 5054 cmd = cmd->cmd_next;
5057 5055 /*
5058 5056 * call the packet completion routine only for
5059 5057 * non-polled commands. Ignore the polled commands as
5060 5058 * they timeout and will be handled differently
5061 5059 */
5062 5060 if ((pkt->pkt_comp) && !(pkt->pkt_flags & FLAG_NOINTR))
5063 5061 (*pkt->pkt_comp)(pkt);
5064 5062
5065 5063 }
5066 5064
5067 5065 /*
5068 5066 * Finally get all outstanding commands for each LUN, and abort them if
5069 5067 * they've been issued, and call the completion routine.
5070 5068 * For the case where sf_offline_target is called from sf_watch
5071 5069 * due to a Offline Timeout, it is quite possible that the soc+
5072 5070 * ucode is hosed and therefore cannot return the commands.
5073 5071 * Clear up all the issued commands as well.
5074 5072 * Try_abort will be false only if sf_abort_all is coming from
5075 5073 * sf_target_offline.
5076 5074 */
5077 5075
5078 5076 if (try_abort || sf->sf_state == SF_STATE_OFFLINE) {
5079 5077 mutex_enter(&target->sft_pkt_mutex);
5080 5078 cmd = tcmd = target->sft_pkt_head;
5081 5079 while (cmd != (struct sf_pkt *)&target->sft_pkt_head) {
5082 5080 fpkt = cmd->cmd_fp_pkt;
5083 5081 pkt = cmd->cmd_pkt;
5084 5082 mutex_enter(&cmd->cmd_abort_mutex);
5085 5083 if ((cmd->cmd_state == SF_STATE_ISSUED) &&
5086 5084 (fpkt->fcal_cmd_state &
5087 5085 FCAL_CMD_IN_TRANSPORT) &&
5088 5086 ((fpkt->fcal_cmd_state & FCAL_CMD_COMPLETE) ==
5089 5087 0) && !(pkt->pkt_flags & FLAG_NOINTR)) {
5090 5088 cmd->cmd_state = SF_STATE_ABORTING;
5091 5089 cmd->cmd_timeout = sf_watchdog_time +
5092 5090 cmd->cmd_pkt->pkt_time + 20;
5093 5091 mutex_exit(&cmd->cmd_abort_mutex);
5094 5092 mutex_exit(&target->sft_pkt_mutex);
5095 5093 if (try_abort) {
5096 5094 /* call the transport to abort a pkt */
5097 5095 rval = soc_abort(sf->sf_sochandle,
5098 5096 sf->sf_socp,
5099 5097 sf->sf_sochandle->fcal_portno,
5100 5098 fpkt, 1);
5101 5099 }
5102 5100 if ((rval == FCAL_ABORTED) ||
5103 5101 (rval == FCAL_ABORT_FAILED)) {
5104 5102 if (abort) {
5105 5103 pkt->pkt_reason = CMD_ABORTED;
5106 5104 pkt->pkt_statistics |=
5107 5105 STAT_ABORTED;
5108 5106 } else {
5109 5107 pkt->pkt_reason = CMD_RESET;
5110 5108 pkt->pkt_statistics |=
5111 5109 STAT_DEV_RESET;
5112 5110 }
5113 5111 cmd->cmd_state = SF_STATE_IDLE;
5114 5112 if (pkt->pkt_comp)
5115 5113 (*pkt->pkt_comp)(pkt);
5116 5114 }
5117 5115 mutex_enter(&sf->sf_mutex);
5118 5116 if (lip_cnt != sf->sf_lip_cnt) {
5119 5117 mutex_exit(&sf->sf_mutex);
5120 5118 return;
5121 5119 }
5122 5120 mutex_exit(&sf->sf_mutex);
5123 5121 mutex_enter(&target->sft_pkt_mutex);
5124 5122 cmd = target->sft_pkt_head;
5125 5123 } else {
5126 5124 mutex_exit(&cmd->cmd_abort_mutex);
5127 5125 cmd = cmd->cmd_forw;
5128 5126 }
5129 5127 }
5130 5128 mutex_exit(&target->sft_pkt_mutex);
5131 5129 }
5132 5130 }
5133 5131
5134 5132
5135 5133 /*
5136 5134 * called by the transport to start a packet
5137 5135 */
5138 5136 static int
5139 5137 sf_start(struct scsi_address *ap, struct scsi_pkt *pkt)
5140 5138 {
5141 5139 struct sf *sf = ADDR2SF(ap);
5142 5140 struct sf_target *target = ADDR2TARGET(ap);
5143 5141 struct sf_pkt *cmd = PKT2CMD(pkt);
5144 5142 int rval;
5145 5143
5146 5144
5147 5145 SF_DEBUG(6, (sf, CE_NOTE, "sf_start\n"));
5148 5146
5149 5147 if (cmd->cmd_state == SF_STATE_ISSUED) {
5150 5148 cmn_err(CE_PANIC, "sf: issuing packet twice 0x%p\n",
5151 5149 (void *)cmd);
5152 5150 }
5153 5151
5154 5152 /* prepare the packet for transport */
5155 5153 if ((rval = sf_prepare_pkt(sf, cmd, target)) != TRAN_ACCEPT) {
5156 5154 return (rval);
5157 5155 }
5158 5156
5159 5157 if (target->sft_state & (SF_TARGET_BUSY|SF_TARGET_OFFLINE)) {
5160 5158 if (target->sft_state & SF_TARGET_OFFLINE) {
5161 5159 return (TRAN_FATAL_ERROR);
5162 5160 }
5163 5161 if (pkt->pkt_flags & FLAG_NOINTR) {
5164 5162 return (TRAN_BUSY);
5165 5163 }
5166 5164 mutex_enter(&sf->sf_cmd_mutex);
5167 5165 sf->sf_use_lock = TRUE;
5168 5166 goto enque;
5169 5167 }
5170 5168
5171 5169
5172 5170 /* if no interrupts then do polled I/O */
5173 5171 if (pkt->pkt_flags & FLAG_NOINTR) {
5174 5172 return (sf_dopoll(sf, cmd));
5175 5173 }
5176 5174
5177 5175 /* regular interrupt-driven I/O */
5178 5176
5179 5177 if (!sf->sf_use_lock) {
5180 5178
5181 5179 /* locking no needed */
5182 5180
5183 5181 cmd->cmd_timeout = cmd->cmd_pkt->pkt_time ?
5184 5182 sf_watchdog_time + cmd->cmd_pkt->pkt_time : 0;
5185 5183 cmd->cmd_state = SF_STATE_ISSUED;
5186 5184
5187 5185 /* call the transport to send a pkt */
5188 5186 if (soc_transport(sf->sf_sochandle, cmd->cmd_fp_pkt,
5189 5187 FCAL_NOSLEEP, CQ_REQUEST_1) != FCAL_TRANSPORT_SUCCESS) {
5190 5188 cmd->cmd_state = SF_STATE_IDLE;
5191 5189 return (TRAN_BADPKT);
5192 5190 }
5193 5191 return (TRAN_ACCEPT);
5194 5192 }
5195 5193
5196 5194 /* regular I/O using locking */
5197 5195
5198 5196 mutex_enter(&sf->sf_cmd_mutex);
5199 5197 if ((sf->sf_ncmds >= sf->sf_throttle) ||
5200 5198 (sf->sf_pkt_head != NULL)) {
5201 5199 enque:
5202 5200 /*
5203 5201 * either we're throttling back or there are already commands
5204 5202 * on the queue, so enqueue this one for later
5205 5203 */
5206 5204 cmd->cmd_flags |= CFLAG_IN_QUEUE;
5207 5205 if (sf->sf_pkt_head != NULL) {
5208 5206 /* add to the queue */
5209 5207 sf->sf_pkt_tail->cmd_next = cmd;
5210 5208 cmd->cmd_next = NULL;
5211 5209 sf->sf_pkt_tail = cmd;
5212 5210 } else {
5213 5211 /* this is the first entry in the queue */
5214 5212 sf->sf_pkt_head = sf->sf_pkt_tail = cmd;
5215 5213 cmd->cmd_next = NULL;
5216 5214 }
5217 5215 mutex_exit(&sf->sf_cmd_mutex);
5218 5216 return (TRAN_ACCEPT);
5219 5217 }
5220 5218
5221 5219 /*
5222 5220 * start this packet now
5223 5221 */
5224 5222
5225 5223 /* still have cmd mutex */
5226 5224 return (sf_start_internal(sf, cmd));
5227 5225 }
5228 5226
5229 5227
5230 5228 /*
5231 5229 * internal routine to start a packet from the queue now
5232 5230 *
5233 5231 * enter with cmd mutex held and leave with it released
5234 5232 */
5235 5233 static int
5236 5234 sf_start_internal(struct sf *sf, struct sf_pkt *cmd)
5237 5235 {
5238 5236 /* we have the cmd mutex */
5239 5237 sf->sf_ncmds++;
5240 5238 mutex_exit(&sf->sf_cmd_mutex);
5241 5239
5242 5240 ASSERT(cmd->cmd_state != SF_STATE_ISSUED);
5243 5241 SF_DEBUG(6, (sf, CE_NOTE, "sf_start_internal\n"));
5244 5242
5245 5243 cmd->cmd_timeout = cmd->cmd_pkt->pkt_time ? sf_watchdog_time +
5246 5244 cmd->cmd_pkt->pkt_time : 0;
5247 5245 cmd->cmd_state = SF_STATE_ISSUED;
5248 5246
5249 5247 /* call transport to send the pkt */
5250 5248 if (soc_transport(sf->sf_sochandle, cmd->cmd_fp_pkt, FCAL_NOSLEEP,
5251 5249 CQ_REQUEST_1) != FCAL_TRANSPORT_SUCCESS) {
5252 5250 cmd->cmd_state = SF_STATE_IDLE;
5253 5251 mutex_enter(&sf->sf_cmd_mutex);
5254 5252 sf->sf_ncmds--;
5255 5253 mutex_exit(&sf->sf_cmd_mutex);
5256 5254 return (TRAN_BADPKT);
5257 5255 }
5258 5256 return (TRAN_ACCEPT);
5259 5257 }
5260 5258
5261 5259
5262 5260 /*
5263 5261 * prepare a packet for transport
5264 5262 */
5265 5263 static int
5266 5264 sf_prepare_pkt(struct sf *sf, struct sf_pkt *cmd, struct sf_target *target)
5267 5265 {
5268 5266 struct fcp_cmd *fcmd = cmd->cmd_block;
5269 5267
5270 5268 /* XXXX Need to set the LUN ? */
5271 5269 bcopy((caddr_t)&target->sft_lun.b,
5272 5270 (caddr_t)&fcmd->fcp_ent_addr,
5273 5271 FCP_LUN_SIZE);
5274 5272 cmd->cmd_pkt->pkt_reason = CMD_CMPLT;
5275 5273 cmd->cmd_pkt->pkt_state = 0;
5276 5274 cmd->cmd_pkt->pkt_statistics = 0;
5277 5275
5278 5276
5279 5277 if ((cmd->cmd_pkt->pkt_comp == NULL) &&
5280 5278 ((cmd->cmd_pkt->pkt_flags & FLAG_NOINTR) == 0)) {
5281 5279 return (TRAN_BADPKT);
5282 5280 }
5283 5281
5284 5282 /* invalidate imp field(s) of rsp block */
5285 5283 cmd->cmd_rsp_block->fcp_u.i_fcp_status = SF_BAD_DMA_MAGIC;
5286 5284
5287 5285 /* set up amt of I/O to do */
5288 5286 if (cmd->cmd_flags & CFLAG_DMAVALID) {
5289 5287 cmd->cmd_pkt->pkt_resid = cmd->cmd_dmacount;
5290 5288 if (cmd->cmd_flags & CFLAG_CMDIOPB) {
5291 5289 (void) ddi_dma_sync(cmd->cmd_dmahandle, 0, 0,
5292 5290 DDI_DMA_SYNC_FORDEV);
5293 5291 }
5294 5292 } else {
5295 5293 cmd->cmd_pkt->pkt_resid = 0;
5296 5294 }
5297 5295
5298 5296 /* set up the Tagged Queuing type */
5299 5297 if (cmd->cmd_pkt->pkt_flags & FLAG_HTAG) {
5300 5298 fcmd->fcp_cntl.cntl_qtype = FCP_QTYPE_HEAD_OF_Q;
5301 5299 } else if (cmd->cmd_pkt->pkt_flags & FLAG_OTAG) {
5302 5300 fcmd->fcp_cntl.cntl_qtype = FCP_QTYPE_ORDERED;
5303 5301 }
5304 5302
5305 5303 /*
5306 5304 * Sync the cmd segment
5307 5305 */
5308 5306 (void) ddi_dma_sync(cmd->cmd_cr_pool->cmd_dma_handle,
5309 5307 (caddr_t)fcmd - cmd->cmd_cr_pool->cmd_base,
5310 5308 sizeof (struct fcp_cmd), DDI_DMA_SYNC_FORDEV);
5311 5309
5312 5310 sf_fill_ids(sf, cmd, target);
5313 5311 return (TRAN_ACCEPT);
5314 5312 }
5315 5313
5316 5314
5317 5315 /*
5318 5316 * fill in packet hdr source and destination IDs and hdr byte count
5319 5317 */
5320 5318 static void
5321 5319 sf_fill_ids(struct sf *sf, struct sf_pkt *cmd, struct sf_target *target)
5322 5320 {
5323 5321 struct fcal_packet *fpkt = cmd->cmd_fp_pkt;
5324 5322 fc_frame_header_t *hp;
5325 5323
5326 5324
5327 5325 hp = &fpkt->fcal_socal_request.sr_fc_frame_hdr;
5328 5326 hp->d_id = target->sft_al_pa;
5329 5327 hp->s_id = sf->sf_al_pa;
5330 5328 fpkt->fcal_socal_request.sr_soc_hdr.sh_byte_cnt =
5331 5329 cmd->cmd_dmacookie.dmac_size;
5332 5330 }
5333 5331
5334 5332
5335 5333 /*
5336 5334 * do polled I/O using transport
5337 5335 */
5338 5336 static int
5339 5337 sf_dopoll(struct sf *sf, struct sf_pkt *cmd)
5340 5338 {
5341 5339 int timeout;
5342 5340 int rval;
5343 5341
5344 5342
5345 5343 mutex_enter(&sf->sf_cmd_mutex);
5346 5344 sf->sf_ncmds++;
5347 5345 mutex_exit(&sf->sf_cmd_mutex);
5348 5346
5349 5347 timeout = cmd->cmd_pkt->pkt_time ? cmd->cmd_pkt->pkt_time
5350 5348 : SF_POLL_TIMEOUT;
5351 5349 cmd->cmd_timeout = 0;
5352 5350 cmd->cmd_fp_pkt->fcal_pkt_comp = NULL;
5353 5351 cmd->cmd_state = SF_STATE_ISSUED;
5354 5352
5355 5353 /* call transport to send a pkt polled */
5356 5354 rval = soc_transport_poll(sf->sf_sochandle, cmd->cmd_fp_pkt,
5357 5355 timeout*1000000, CQ_REQUEST_1);
5358 5356 mutex_enter(&cmd->cmd_abort_mutex);
5359 5357 cmd->cmd_fp_pkt->fcal_pkt_comp = sf_cmd_callback;
5360 5358 if (rval != FCAL_TRANSPORT_SUCCESS) {
5361 5359 if (rval == FCAL_TRANSPORT_TIMEOUT) {
5362 5360 cmd->cmd_state = SF_STATE_ABORTING;
5363 5361 mutex_exit(&cmd->cmd_abort_mutex);
5364 5362 (void) sf_target_timeout(sf, cmd);
5365 5363 } else {
5366 5364 mutex_exit(&cmd->cmd_abort_mutex);
5367 5365 }
5368 5366 cmd->cmd_state = SF_STATE_IDLE;
5369 5367 cmd->cmd_fp_pkt->fcal_pkt_comp = sf_cmd_callback;
5370 5368 mutex_enter(&sf->sf_cmd_mutex);
5371 5369 sf->sf_ncmds--;
5372 5370 mutex_exit(&sf->sf_cmd_mutex);
5373 5371 return (TRAN_BADPKT);
5374 5372 }
5375 5373 mutex_exit(&cmd->cmd_abort_mutex);
5376 5374 cmd->cmd_fp_pkt->fcal_pkt_comp = sf_cmd_callback;
5377 5375 sf_cmd_callback(cmd->cmd_fp_pkt);
5378 5376 return (TRAN_ACCEPT);
5379 5377 }
5380 5378
5381 5379
5382 5380 /* a shortcut for defining debug messages below */
5383 5381 #ifdef DEBUG
5384 5382 #define SF_DMSG1(s) msg1 = s
5385 5383 #else
5386 5384 #define SF_DMSG1(s) /* do nothing */
5387 5385 #endif
5388 5386
5389 5387
5390 5388 /*
5391 5389 * the pkt_comp callback for command packets
5392 5390 */
5393 5391 static void
5394 5392 sf_cmd_callback(struct fcal_packet *fpkt)
5395 5393 {
5396 5394 struct sf_pkt *cmd = (struct sf_pkt *)fpkt->fcal_pkt_private;
5397 5395 struct scsi_pkt *pkt = cmd->cmd_pkt;
5398 5396 struct sf *sf = ADDR2SF(&pkt->pkt_address);
5399 5397 struct sf_target *target = ADDR2TARGET(&pkt->pkt_address);
5400 5398 struct fcp_rsp *rsp;
5401 5399 char *msg1 = NULL;
5402 5400 char *msg2 = NULL;
5403 5401 short ncmds;
5404 5402 int tgt_id;
5405 5403 int good_scsi_status = TRUE;
5406 5404
5407 5405
5408 5406
5409 5407 if (cmd->cmd_state == SF_STATE_IDLE) {
5410 5408 cmn_err(CE_PANIC, "sf: completing idle packet 0x%p\n",
5411 5409 (void *)cmd);
5412 5410 }
5413 5411
5414 5412 mutex_enter(&cmd->cmd_abort_mutex);
5415 5413 if (cmd->cmd_state == SF_STATE_ABORTING) {
5416 5414 /* cmd already being aborted -- nothing to do */
5417 5415 mutex_exit(&cmd->cmd_abort_mutex);
5418 5416 return;
5419 5417 }
5420 5418
5421 5419 cmd->cmd_state = SF_STATE_IDLE;
5422 5420 mutex_exit(&cmd->cmd_abort_mutex);
5423 5421
5424 5422 if (fpkt->fcal_pkt_status == FCAL_STATUS_OK) {
5425 5423
5426 5424 (void) ddi_dma_sync(cmd->cmd_cr_pool->rsp_dma_handle,
5427 5425 (caddr_t)cmd->cmd_rsp_block - cmd->cmd_cr_pool->rsp_base,
5428 5426 FCP_MAX_RSP_IU_SIZE, DDI_DMA_SYNC_FORKERNEL);
5429 5427
5430 5428 rsp = (struct fcp_rsp *)cmd->cmd_rsp_block;
5431 5429
5432 5430 if (rsp->fcp_u.i_fcp_status == SF_BAD_DMA_MAGIC) {
5433 5431
5434 5432 if (sf_core && (sf_core & SF_CORE_BAD_DMA)) {
5435 5433 sf_token = (int *)(uintptr_t)
5436 5434 fpkt->fcal_socal_request.\
5437 5435 sr_soc_hdr.sh_request_token;
5438 5436 (void) soc_take_core(sf->sf_sochandle,
5439 5437 sf->sf_socp);
5440 5438 }
5441 5439
5442 5440 pkt->pkt_reason = CMD_INCOMPLETE;
5443 5441 pkt->pkt_state = STATE_GOT_BUS;
5444 5442 pkt->pkt_statistics |= STAT_ABORTED;
5445 5443
5446 5444 } else {
5447 5445
5448 5446 pkt->pkt_state = STATE_GOT_BUS | STATE_GOT_TARGET |
5449 5447 STATE_SENT_CMD | STATE_GOT_STATUS;
5450 5448 pkt->pkt_resid = 0;
5451 5449 if (cmd->cmd_flags & CFLAG_DMAVALID) {
5452 5450 pkt->pkt_state |= STATE_XFERRED_DATA;
5453 5451 }
5454 5452
5455 5453 if ((pkt->pkt_scbp != NULL) &&
5456 5454 ((*(pkt->pkt_scbp) =
5457 5455 rsp->fcp_u.fcp_status.scsi_status)
5458 5456 != STATUS_GOOD)) {
5459 5457 good_scsi_status = FALSE;
5460 5458 /*
5461 5459 * The next two checks make sure that if there
5462 5460 * is no sense data or a valid response and
5463 5461 * the command came back with check condition,
5464 5462 * the command should be retried
5465 5463 */
5466 5464 if (!rsp->fcp_u.fcp_status.rsp_len_set &&
5467 5465 !rsp->fcp_u.fcp_status.sense_len_set) {
5468 5466 pkt->pkt_state &= ~STATE_XFERRED_DATA;
5469 5467 pkt->pkt_resid = cmd->cmd_dmacount;
5470 5468 }
5471 5469 }
5472 5470
5473 5471 if ((cmd->cmd_flags & CFLAG_CMDIOPB) &&
5474 5472 (pkt->pkt_state & STATE_XFERRED_DATA)) {
5475 5473 (void) ddi_dma_sync(cmd->cmd_dmahandle, 0,
5476 5474 (uint_t)0, DDI_DMA_SYNC_FORCPU);
5477 5475 }
5478 5476 /*
5479 5477 * Update the transfer resid, if appropriate
5480 5478 */
5481 5479 if (rsp->fcp_u.fcp_status.resid_over ||
5482 5480 rsp->fcp_u.fcp_status.resid_under)
5483 5481 pkt->pkt_resid = rsp->fcp_resid;
5484 5482
5485 5483 /*
5486 5484 * Check to see if the SCSI command failed.
5487 5485 *
5488 5486 */
5489 5487
5490 5488 /*
5491 5489 * First see if we got a FCP protocol error.
5492 5490 */
5493 5491 if (rsp->fcp_u.fcp_status.rsp_len_set) {
5494 5492 struct fcp_rsp_info *bep;
5495 5493
5496 5494 bep = (struct fcp_rsp_info *)
5497 5495 (&rsp->fcp_response_len + 1);
5498 5496 if (bep->rsp_code != FCP_NO_FAILURE) {
5499 5497 pkt->pkt_reason = CMD_TRAN_ERR;
5500 5498 tgt_id = pkt->pkt_address.a_target;
5501 5499 switch (bep->rsp_code) {
5502 5500 case FCP_CMND_INVALID:
5503 5501 SF_DMSG1("FCP_RSP FCP_CMND "
5504 5502 "fields invalid");
5505 5503 break;
5506 5504 case FCP_TASK_MGMT_NOT_SUPPTD:
5507 5505 SF_DMSG1("FCP_RSP Task"
5508 5506 "Management Function"
5509 5507 "Not Supported");
5510 5508 break;
5511 5509 case FCP_TASK_MGMT_FAILED:
5512 5510 SF_DMSG1("FCP_RSP Task "
5513 5511 "Management Function"
5514 5512 "Failed");
5515 5513 sf->sf_stats.tstats[tgt_id].
5516 5514 task_mgmt_failures++;
5517 5515 break;
5518 5516 case FCP_DATA_RO_MISMATCH:
5519 5517 SF_DMSG1("FCP_RSP FCP_DATA RO "
5520 5518 "mismatch with "
5521 5519 "FCP_XFER_RDY DATA_RO");
5522 5520 sf->sf_stats.tstats[tgt_id].
5523 5521 data_ro_mismatches++;
5524 5522 break;
5525 5523 case FCP_DL_LEN_MISMATCH:
5526 5524 SF_DMSG1("FCP_RSP FCP_DATA "
5527 5525 "length "
5528 5526 "different than BURST_LEN");
5529 5527 sf->sf_stats.tstats[tgt_id].
5530 5528 dl_len_mismatches++;
5531 5529 break;
5532 5530 default:
5533 5531 SF_DMSG1("FCP_RSP invalid "
5534 5532 "RSP_CODE");
5535 5533 break;
5536 5534 }
5537 5535 }
5538 5536 }
5539 5537
5540 5538 /*
5541 5539 * See if we got a SCSI error with sense data
5542 5540 */
5543 5541 if (rsp->fcp_u.fcp_status.sense_len_set) {
5544 5542 uchar_t rqlen = min(rsp->fcp_sense_len,
5545 5543 sizeof (struct scsi_extended_sense));
5546 5544 caddr_t sense = (caddr_t)rsp +
5547 5545 sizeof (struct fcp_rsp) +
5548 5546 rsp->fcp_response_len;
5549 5547 struct scsi_arq_status *arq;
5550 5548 struct scsi_extended_sense *sensep =
5551 5549 (struct scsi_extended_sense *)sense;
5552 5550
5553 5551 if (rsp->fcp_u.fcp_status.scsi_status !=
5554 5552 STATUS_GOOD) {
5555 5553 if (rsp->fcp_u.fcp_status.scsi_status
5556 5554 == STATUS_CHECK) {
5557 5555 if (sensep->es_key ==
5558 5556 KEY_RECOVERABLE_ERROR)
5559 5557 good_scsi_status = 1;
5560 5558 if (sensep->es_key ==
5561 5559 KEY_UNIT_ATTENTION &&
5562 5560 sensep->es_add_code == 0x3f &&
5563 5561 sensep->es_qual_code == 0x0e) {
5564 5562 /* REPORT_LUNS_HAS_CHANGED */
5565 5563 sf_log(sf, CE_NOTE,
5566 5564 "!REPORT_LUNS_HAS_CHANGED\n");
5567 5565 sf_force_lip(sf);
5568 5566 }
5569 5567 }
5570 5568 }
5571 5569
5572 5570 if ((pkt->pkt_scbp != NULL) &&
5573 5571 (cmd->cmd_scblen >=
5574 5572 sizeof (struct scsi_arq_status))) {
5575 5573
5576 5574 pkt->pkt_state |= STATE_ARQ_DONE;
5577 5575
5578 5576 arq = (struct scsi_arq_status *)pkt->pkt_scbp;
5579 5577 /*
5580 5578 * copy out sense information
5581 5579 */
5582 5580 bcopy(sense, (caddr_t)&arq->sts_sensedata,
5583 5581 rqlen);
5584 5582 arq->sts_rqpkt_resid =
5585 5583 sizeof (struct scsi_extended_sense) -
5586 5584 rqlen;
5587 5585 *((uchar_t *)&arq->sts_rqpkt_status) =
5588 5586 STATUS_GOOD;
5589 5587 arq->sts_rqpkt_reason = 0;
5590 5588 arq->sts_rqpkt_statistics = 0;
5591 5589 arq->sts_rqpkt_state = STATE_GOT_BUS |
5592 5590 STATE_GOT_TARGET | STATE_SENT_CMD |
5593 5591 STATE_GOT_STATUS | STATE_ARQ_DONE |
5594 5592 STATE_XFERRED_DATA;
5595 5593 }
5596 5594 target->sft_alive = TRUE;
5597 5595 }
5598 5596
5599 5597 /*
5600 5598 * The firmware returns the number of bytes actually
5601 5599 * xfered into/out of host. Compare this with what
5602 5600 * we asked and if it is different, we lost frames ?
5603 5601 */
5604 5602 if ((pkt->pkt_reason == 0) && (pkt->pkt_resid == 0) &&
5605 5603 (good_scsi_status) &&
5606 5604 (pkt->pkt_state & STATE_XFERRED_DATA) &&
5607 5605 (!(cmd->cmd_flags & CFLAG_CMDIOPB)) &&
5608 5606 (target->sft_device_type != DTYPE_ESI)) {
5609 5607 int byte_cnt =
5610 5608 fpkt->fcal_socal_request.
5611 5609 sr_soc_hdr.sh_byte_cnt;
5612 5610 if (cmd->cmd_flags & CFLAG_DMASEND) {
5613 5611 if (byte_cnt != 0) {
5614 5612 sf_log(sf, CE_NOTE,
5615 5613 "!sf_cmd_callback: Lost Frame: "
5616 5614 "(write) received 0x%x expected"
5617 5615 " 0x%x target 0x%x\n",
5618 5616 byte_cnt, cmd->cmd_dmacount,
5619 5617 sf_alpa_to_switch[
5620 5618 target->sft_al_pa]);
5621 5619 pkt->pkt_reason = CMD_INCOMPLETE;
5622 5620 pkt->pkt_statistics |= STAT_ABORTED;
5623 5621 }
5624 5622 } else if (byte_cnt < cmd->cmd_dmacount) {
5625 5623 sf_log(sf, CE_NOTE,
5626 5624 "!sf_cmd_callback: "
5627 5625 "Lost Frame: (read) "
5628 5626 "received 0x%x expected 0x%x "
5629 5627 "target 0x%x\n", byte_cnt,
5630 5628 cmd->cmd_dmacount,
5631 5629 sf_alpa_to_switch[
5632 5630 target->sft_al_pa]);
5633 5631 pkt->pkt_reason = CMD_INCOMPLETE;
5634 5632 pkt->pkt_statistics |= STAT_ABORTED;
5635 5633 }
5636 5634 }
5637 5635 }
5638 5636
5639 5637 } else {
5640 5638
5641 5639 /* pkt status was not ok */
5642 5640
5643 5641 switch (fpkt->fcal_pkt_status) {
5644 5642
5645 5643 case FCAL_STATUS_ERR_OFFLINE:
5646 5644 SF_DMSG1("Fibre Channel Offline");
5647 5645 mutex_enter(&target->sft_mutex);
5648 5646 if (!(target->sft_state & SF_TARGET_OFFLINE)) {
5649 5647 target->sft_state |= (SF_TARGET_BUSY
5650 5648 | SF_TARGET_MARK);
5651 5649 }
5652 5650 mutex_exit(&target->sft_mutex);
5653 5651 (void) ndi_event_retrieve_cookie(sf->sf_event_hdl,
5654 5652 target->sft_dip, FCAL_REMOVE_EVENT,
5655 5653 &sf_remove_eid, NDI_EVENT_NOPASS);
5656 5654 (void) ndi_event_run_callbacks(sf->sf_event_hdl,
5657 5655 target->sft_dip, sf_remove_eid, NULL);
5658 5656 pkt->pkt_reason = CMD_TRAN_ERR;
5659 5657 pkt->pkt_statistics |= STAT_BUS_RESET;
5660 5658 break;
5661 5659
5662 5660 case FCAL_STATUS_MAX_XCHG_EXCEEDED:
5663 5661 sf_throttle(sf);
5664 5662 sf->sf_use_lock = TRUE;
5665 5663 pkt->pkt_reason = CMD_TRAN_ERR;
5666 5664 pkt->pkt_state = STATE_GOT_BUS;
5667 5665 pkt->pkt_statistics |= STAT_ABORTED;
5668 5666 break;
5669 5667
5670 5668 case FCAL_STATUS_TIMEOUT:
5671 5669 SF_DMSG1("Fibre Channel Timeout");
5672 5670 pkt->pkt_reason = CMD_TIMEOUT;
5673 5671 break;
5674 5672
5675 5673 case FCAL_STATUS_ERR_OVERRUN:
5676 5674 SF_DMSG1("CMD_DATA_OVR");
5677 5675 pkt->pkt_reason = CMD_DATA_OVR;
5678 5676 break;
5679 5677
5680 5678 case FCAL_STATUS_UNKNOWN_CQ_TYPE:
5681 5679 SF_DMSG1("Unknown CQ type");
5682 5680 pkt->pkt_reason = CMD_TRAN_ERR;
5683 5681 break;
5684 5682
5685 5683 case FCAL_STATUS_BAD_SEG_CNT:
5686 5684 SF_DMSG1("Bad SEG CNT");
5687 5685 pkt->pkt_reason = CMD_TRAN_ERR;
5688 5686 break;
5689 5687
5690 5688 case FCAL_STATUS_BAD_XID:
5691 5689 SF_DMSG1("Fibre Channel Invalid X_ID");
5692 5690 pkt->pkt_reason = CMD_TRAN_ERR;
5693 5691 break;
5694 5692
5695 5693 case FCAL_STATUS_XCHG_BUSY:
5696 5694 SF_DMSG1("Fibre Channel Exchange Busy");
5697 5695 pkt->pkt_reason = CMD_TRAN_ERR;
5698 5696 break;
5699 5697
5700 5698 case FCAL_STATUS_INSUFFICIENT_CQES:
5701 5699 SF_DMSG1("Insufficient CQEs");
5702 5700 pkt->pkt_reason = CMD_TRAN_ERR;
5703 5701 break;
5704 5702
5705 5703 case FCAL_STATUS_ALLOC_FAIL:
5706 5704 SF_DMSG1("ALLOC FAIL");
5707 5705 pkt->pkt_reason = CMD_TRAN_ERR;
5708 5706 break;
5709 5707
5710 5708 case FCAL_STATUS_BAD_SID:
5711 5709 SF_DMSG1("Fibre Channel Invalid S_ID");
5712 5710 pkt->pkt_reason = CMD_TRAN_ERR;
5713 5711 break;
5714 5712
5715 5713 case FCAL_STATUS_INCOMPLETE_DMA_ERR:
5716 5714 if (sf_core && (sf_core & SF_CORE_INCOMPLETE_DMA)) {
5717 5715 sf_token = (int *)(uintptr_t)
5718 5716 fpkt->fcal_socal_request.\
5719 5717 sr_soc_hdr.sh_request_token;
5720 5718 (void) soc_take_core(sf->sf_sochandle,
5721 5719 sf->sf_socp);
5722 5720 sf_core = 0;
5723 5721 }
5724 5722 msg2 =
5725 5723 "INCOMPLETE DMA XFER due to bad SOC+ card, replace HBA";
5726 5724 pkt->pkt_reason = CMD_INCOMPLETE;
5727 5725 pkt->pkt_state = STATE_GOT_BUS;
5728 5726 pkt->pkt_statistics |= STAT_ABORTED;
5729 5727 break;
5730 5728
5731 5729 case FCAL_STATUS_CRC_ERR:
5732 5730 msg2 = "Fibre Channel CRC Error on frames";
5733 5731 pkt->pkt_reason = CMD_INCOMPLETE;
5734 5732 pkt->pkt_state = STATE_GOT_BUS;
5735 5733 pkt->pkt_statistics |= STAT_ABORTED;
5736 5734 break;
5737 5735
5738 5736 case FCAL_STATUS_NO_SEQ_INIT:
5739 5737 SF_DMSG1("Fibre Channel Seq Init Error");
5740 5738 pkt->pkt_reason = CMD_TRAN_ERR;
5741 5739 break;
5742 5740
5743 5741 case FCAL_STATUS_OPEN_FAIL:
5744 5742 pkt->pkt_reason = CMD_TRAN_ERR;
5745 5743 SF_DMSG1("Fibre Channel Open Failure");
5746 5744 if ((target->sft_state & (SF_TARGET_BUSY |
5747 5745 SF_TARGET_MARK | SF_TARGET_OFFLINE)) == 0) {
5748 5746 sf_log(sf, CE_NOTE,
5749 5747 "!Open failure to target 0x%x "
5750 5748 "forcing LIP\n",
5751 5749 sf_alpa_to_switch[target->sft_al_pa]);
5752 5750 sf_force_lip(sf);
5753 5751 }
5754 5752 break;
5755 5753
5756 5754
5757 5755 case FCAL_STATUS_ONLINE_TIMEOUT:
5758 5756 SF_DMSG1("Fibre Channel Online Timeout");
5759 5757 pkt->pkt_reason = CMD_TRAN_ERR;
5760 5758 break;
5761 5759
5762 5760 default:
5763 5761 SF_DMSG1("Unknown FC Status");
5764 5762 pkt->pkt_reason = CMD_TRAN_ERR;
5765 5763 break;
5766 5764 }
5767 5765 }
5768 5766
5769 5767 #ifdef DEBUG
5770 5768 /*
5771 5769 * msg1 will be non-NULL if we've detected some sort of error
5772 5770 */
5773 5771 if (msg1 != NULL && sfdebug >= 4) {
5774 5772 sf_log(sf, CE_WARN,
5775 5773 "!Transport error on cmd=0x%p target=0x%x: %s\n",
5776 5774 (void *)fpkt, pkt->pkt_address.a_target, msg1);
5777 5775 }
5778 5776 #endif
5779 5777
5780 5778 if (msg2 != NULL) {
5781 5779 sf_log(sf, CE_WARN, "!Transport error on target=0x%x: %s\n",
5782 5780 pkt->pkt_address.a_target, msg2);
5783 5781 }
5784 5782
5785 5783 ncmds = fpkt->fcal_ncmds;
5786 5784 ASSERT(ncmds >= 0);
5787 5785 if (ncmds >= (sf->sf_throttle - SF_HI_CMD_DELTA)) {
5788 5786 #ifdef DEBUG
5789 5787 if (!sf->sf_use_lock) {
5790 5788 SF_DEBUG(4, (sf, CE_NOTE, "use lock flag on\n"));
5791 5789 }
5792 5790 #endif
5793 5791 sf->sf_use_lock = TRUE;
5794 5792 }
5795 5793
5796 5794 mutex_enter(&sf->sf_cmd_mutex);
5797 5795 sf->sf_ncmds = ncmds;
5798 5796 sf_throttle_start(sf);
5799 5797 mutex_exit(&sf->sf_cmd_mutex);
5800 5798
5801 5799 if (!msg1 && !msg2)
5802 5800 SF_DEBUG(6, (sf, CE_NOTE, "Completing pkt 0x%p\n",
5803 5801 (void *)pkt));
5804 5802 if (pkt->pkt_comp != NULL) {
5805 5803 (*pkt->pkt_comp)(pkt);
5806 5804 }
5807 5805 }
5808 5806
5809 5807 #undef SF_DMSG1
5810 5808
5811 5809
5812 5810
5813 5811 /*
5814 5812 * start throttling for this instance
5815 5813 */
5816 5814 static void
5817 5815 sf_throttle_start(struct sf *sf)
5818 5816 {
5819 5817 struct sf_pkt *cmd, *prev_cmd = NULL;
5820 5818 struct scsi_pkt *pkt;
5821 5819 struct sf_target *target;
5822 5820
5823 5821
5824 5822 ASSERT(mutex_owned(&sf->sf_cmd_mutex));
5825 5823
5826 5824 cmd = sf->sf_pkt_head;
5827 5825 while ((cmd != NULL) &&
5828 5826 (sf->sf_state == SF_STATE_ONLINE) &&
5829 5827 (sf->sf_ncmds < sf->sf_throttle)) {
5830 5828
5831 5829 pkt = CMD2PKT(cmd);
5832 5830
5833 5831 target = ADDR2TARGET(&pkt->pkt_address);
5834 5832 if (target->sft_state & SF_TARGET_BUSY) {
5835 5833 /* this command is busy -- go to next */
5836 5834 ASSERT(cmd->cmd_state != SF_STATE_ISSUED);
5837 5835 prev_cmd = cmd;
5838 5836 cmd = cmd->cmd_next;
5839 5837 continue;
5840 5838 }
5841 5839
5842 5840 ASSERT(cmd->cmd_state != SF_STATE_ISSUED);
5843 5841
5844 5842 /* this cmd not busy and not issued */
5845 5843
5846 5844 /* remove this packet from the queue */
5847 5845 if (sf->sf_pkt_head == cmd) {
5848 5846 /* this was the first packet */
5849 5847 sf->sf_pkt_head = cmd->cmd_next;
5850 5848 } else if (sf->sf_pkt_tail == cmd) {
5851 5849 /* this was the last packet */
5852 5850 sf->sf_pkt_tail = prev_cmd;
5853 5851 if (prev_cmd != NULL) {
5854 5852 prev_cmd->cmd_next = NULL;
5855 5853 }
5856 5854 } else {
5857 5855 /* some packet in the middle of the queue */
5858 5856 ASSERT(prev_cmd != NULL);
5859 5857 prev_cmd->cmd_next = cmd->cmd_next;
5860 5858 }
5861 5859 cmd->cmd_flags &= ~CFLAG_IN_QUEUE;
5862 5860
5863 5861 if (target->sft_state & SF_TARGET_OFFLINE) {
5864 5862 mutex_exit(&sf->sf_cmd_mutex);
5865 5863 pkt->pkt_reason = CMD_TRAN_ERR;
5866 5864 if (pkt->pkt_comp != NULL) {
5867 5865 (*pkt->pkt_comp)(cmd->cmd_pkt);
5868 5866 }
5869 5867 } else {
5870 5868 sf_fill_ids(sf, cmd, target);
5871 5869 if (sf_start_internal(sf, cmd) != TRAN_ACCEPT) {
5872 5870 pkt->pkt_reason = CMD_TRAN_ERR;
5873 5871 if (pkt->pkt_comp != NULL) {
5874 5872 (*pkt->pkt_comp)(cmd->cmd_pkt);
5875 5873 }
5876 5874 }
5877 5875 }
5878 5876 mutex_enter(&sf->sf_cmd_mutex);
5879 5877 cmd = sf->sf_pkt_head;
5880 5878 prev_cmd = NULL;
5881 5879 }
5882 5880 }
5883 5881
5884 5882
5885 5883 /*
5886 5884 * called when the max exchange value is exceeded to throttle back commands
5887 5885 */
5888 5886 static void
5889 5887 sf_throttle(struct sf *sf)
5890 5888 {
5891 5889 int cmdmax = sf->sf_sochandle->fcal_cmdmax;
5892 5890
5893 5891
5894 5892 mutex_enter(&sf->sf_cmd_mutex);
5895 5893
5896 5894 sf->sf_flag = TRUE;
5897 5895
5898 5896 if (sf->sf_ncmds > (cmdmax / 2)) {
5899 5897 sf->sf_throttle = cmdmax / 2;
5900 5898 } else {
5901 5899 if (sf->sf_ncmds > SF_DECR_DELTA) {
5902 5900 sf->sf_throttle = sf->sf_ncmds - SF_DECR_DELTA;
5903 5901 } else {
5904 5902 /*
5905 5903 * This case is just a safeguard, should not really
5906 5904 * happen(ncmds < SF_DECR_DELTA and MAX_EXCHG exceed
5907 5905 */
5908 5906 sf->sf_throttle = SF_DECR_DELTA;
5909 5907 }
5910 5908 }
5911 5909 mutex_exit(&sf->sf_cmd_mutex);
5912 5910
5913 5911 sf = sf->sf_sibling;
5914 5912 if (sf != NULL) {
5915 5913 mutex_enter(&sf->sf_cmd_mutex);
5916 5914 sf->sf_flag = TRUE;
5917 5915 if (sf->sf_ncmds >= (cmdmax / 2)) {
5918 5916 sf->sf_throttle = cmdmax / 2;
5919 5917 } else {
5920 5918 if (sf->sf_ncmds > SF_DECR_DELTA) {
5921 5919 sf->sf_throttle = sf->sf_ncmds - SF_DECR_DELTA;
5922 5920 } else {
5923 5921 sf->sf_throttle = SF_DECR_DELTA;
5924 5922 }
5925 5923 }
5926 5924
5927 5925 mutex_exit(&sf->sf_cmd_mutex);
5928 5926 }
5929 5927 }
5930 5928
5931 5929
5932 5930 /*
5933 5931 * sf watchdog routine, called for a timeout
5934 5932 */
5935 5933 /*ARGSUSED*/
5936 5934 static void
5937 5935 sf_watch(void *arg)
5938 5936 {
5939 5937 struct sf *sf;
5940 5938 struct sf_els_hdr *privp;
5941 5939 static int count = 0, pscan_count = 0;
5942 5940 int cmdmax, i, mescount = 0;
5943 5941 struct sf_target *target;
5944 5942
5945 5943
5946 5944 sf_watchdog_time += sf_watchdog_timeout;
5947 5945 count++;
5948 5946 pscan_count++;
5949 5947
5950 5948 mutex_enter(&sf_global_mutex);
5951 5949 sf_watch_running = 1;
5952 5950 for (sf = sf_head; sf != NULL; sf = sf->sf_next) {
5953 5951
5954 5952 mutex_exit(&sf_global_mutex);
5955 5953
5956 5954 /* disable throttling while we're suspended */
5957 5955 mutex_enter(&sf->sf_mutex);
5958 5956 if (sf->sf_state & SF_STATE_SUSPENDED) {
5959 5957 mutex_exit(&sf->sf_mutex);
5960 5958 SF_DEBUG(1, (sf, CE_CONT,
5961 5959 "sf_watch, sf%d:throttle disabled "
5962 5960 "due to DDI_SUSPEND\n",
5963 5961 ddi_get_instance(sf->sf_dip)));
5964 5962 mutex_enter(&sf_global_mutex);
5965 5963 continue;
5966 5964 }
5967 5965 mutex_exit(&sf->sf_mutex);
5968 5966
5969 5967 cmdmax = sf->sf_sochandle->fcal_cmdmax;
5970 5968
5971 5969 if (sf->sf_take_core) {
5972 5970 (void) soc_take_core(sf->sf_sochandle, sf->sf_socp);
5973 5971 }
5974 5972
5975 5973 mutex_enter(&sf->sf_cmd_mutex);
5976 5974
5977 5975 if (!sf->sf_flag) {
5978 5976 if (sf->sf_throttle < (cmdmax / 2)) {
5979 5977 sf->sf_throttle = cmdmax / 2;
5980 5978 } else if ((sf->sf_throttle += SF_INCR_DELTA) >
5981 5979 cmdmax) {
5982 5980 sf->sf_throttle = cmdmax;
5983 5981 }
5984 5982 } else {
5985 5983 sf->sf_flag = FALSE;
5986 5984 }
5987 5985
5988 5986 sf->sf_ncmds_exp_avg = (sf->sf_ncmds + sf->sf_ncmds_exp_avg)
5989 5987 >> 2;
5990 5988 if ((sf->sf_ncmds <= (sf->sf_throttle - SF_LO_CMD_DELTA)) &&
5991 5989 (sf->sf_pkt_head == NULL)) {
5992 5990 #ifdef DEBUG
5993 5991 if (sf->sf_use_lock) {
5994 5992 SF_DEBUG(4, (sf, CE_NOTE,
5995 5993 "use lock flag off\n"));
5996 5994 }
5997 5995 #endif
5998 5996 sf->sf_use_lock = FALSE;
5999 5997 }
6000 5998
6001 5999 if (sf->sf_state == SF_STATE_ONLINE && sf->sf_pkt_head &&
6002 6000 sf->sf_ncmds < sf->sf_throttle) {
6003 6001 sf_throttle_start(sf);
6004 6002 }
6005 6003
6006 6004 mutex_exit(&sf->sf_cmd_mutex);
6007 6005
6008 6006 if (pscan_count >= sf_pool_scan_cnt) {
6009 6007 if (sf->sf_ncmds_exp_avg < (sf->sf_cr_pool_cnt <<
6010 6008 SF_LOG2_ELEMS_IN_POOL) - SF_FREE_CR_EPSILON) {
6011 6009 sf_crpool_free(sf);
6012 6010 }
6013 6011 }
6014 6012 mutex_enter(&sf->sf_mutex);
6015 6013
6016 6014 privp = sf->sf_els_list;
6017 6015 while (privp != NULL) {
6018 6016 if (privp->timeout < sf_watchdog_time) {
6019 6017 /* timeout this command */
6020 6018 privp = sf_els_timeout(sf, privp);
6021 6019 } else if ((privp->timeout == SF_INVALID_TIMEOUT) &&
6022 6020 (privp->lip_cnt != sf->sf_lip_cnt)) {
6023 6021 if (privp->prev != NULL) {
6024 6022 privp->prev->next = privp->next;
6025 6023 }
6026 6024 if (sf->sf_els_list == privp) {
6027 6025 sf->sf_els_list = privp->next;
6028 6026 }
6029 6027 if (privp->next != NULL) {
6030 6028 privp->next->prev = privp->prev;
6031 6029 }
6032 6030 mutex_exit(&sf->sf_mutex);
6033 6031 sf_els_free(privp->fpkt);
6034 6032 mutex_enter(&sf->sf_mutex);
6035 6033 privp = sf->sf_els_list;
6036 6034 } else {
6037 6035 privp = privp->next;
6038 6036 }
6039 6037 }
6040 6038
6041 6039 if (sf->sf_online_timer && sf->sf_online_timer <
6042 6040 sf_watchdog_time) {
6043 6041 for (i = 0; i < sf_max_targets; i++) {
6044 6042 target = sf->sf_targets[i];
6045 6043 if (target != NULL) {
6046 6044 if (!mescount && target->sft_state &
6047 6045 SF_TARGET_BUSY) {
6048 6046 sf_log(sf, CE_WARN, "!Loop "
6049 6047 "Unstable: Failed to bring "
6050 6048 "Loop Online\n");
6051 6049 mescount = 1;
6052 6050 }
6053 6051 target->sft_state |= SF_TARGET_MARK;
6054 6052 }
6055 6053 }
6056 6054 sf_finish_init(sf, sf->sf_lip_cnt);
6057 6055 sf->sf_state = SF_STATE_INIT;
6058 6056 sf->sf_online_timer = 0;
6059 6057 }
6060 6058
6061 6059 if (sf->sf_state == SF_STATE_ONLINE) {
6062 6060 mutex_exit(&sf->sf_mutex);
6063 6061 if (count >= sf_pkt_scan_cnt) {
6064 6062 sf_check_targets(sf);
6065 6063 }
6066 6064 } else if ((sf->sf_state == SF_STATE_OFFLINE) &&
6067 6065 (sf->sf_timer < sf_watchdog_time)) {
6068 6066 for (i = 0; i < sf_max_targets; i++) {
6069 6067 target = sf->sf_targets[i];
6070 6068 if ((target != NULL) &&
6071 6069 (target->sft_state &
6072 6070 SF_TARGET_BUSY)) {
6073 6071 sf_log(sf, CE_WARN,
6074 6072 "!Offline Timeout\n");
6075 6073 if (sf_core && (sf_core &
6076 6074 SF_CORE_OFFLINE_TIMEOUT)) {
6077 6075 (void) soc_take_core(
6078 6076 sf->sf_sochandle,
6079 6077 sf->sf_socp);
6080 6078 sf_core = 0;
6081 6079 }
6082 6080 break;
6083 6081 }
6084 6082 }
6085 6083 sf_finish_init(sf, sf->sf_lip_cnt);
6086 6084 sf->sf_state = SF_STATE_INIT;
6087 6085 mutex_exit(&sf->sf_mutex);
6088 6086 } else {
6089 6087 mutex_exit(&sf->sf_mutex);
6090 6088 }
6091 6089 mutex_enter(&sf_global_mutex);
6092 6090 }
6093 6091 mutex_exit(&sf_global_mutex);
6094 6092 if (count >= sf_pkt_scan_cnt) {
6095 6093 count = 0;
6096 6094 }
6097 6095 if (pscan_count >= sf_pool_scan_cnt) {
6098 6096 pscan_count = 0;
6099 6097 }
6100 6098
6101 6099 /* reset timeout */
6102 6100 sf_watchdog_id = timeout(sf_watch, (caddr_t)0, sf_watchdog_tick);
6103 6101
6104 6102 /* signal waiting thread */
6105 6103 mutex_enter(&sf_global_mutex);
6106 6104 sf_watch_running = 0;
6107 6105 cv_broadcast(&sf_watch_cv);
6108 6106 mutex_exit(&sf_global_mutex);
6109 6107 }
6110 6108
6111 6109
6112 6110 /*
6113 6111 * called during a timeout to check targets
6114 6112 */
6115 6113 static void
6116 6114 sf_check_targets(struct sf *sf)
6117 6115 {
6118 6116 struct sf_target *target;
6119 6117 int i;
6120 6118 struct sf_pkt *cmd;
6121 6119 struct scsi_pkt *pkt;
6122 6120 int lip_cnt;
6123 6121
6124 6122 mutex_enter(&sf->sf_mutex);
6125 6123 lip_cnt = sf->sf_lip_cnt;
6126 6124 mutex_exit(&sf->sf_mutex);
6127 6125
6128 6126 /* check scan all possible targets */
6129 6127 for (i = 0; i < sf_max_targets; i++) {
6130 6128 target = sf->sf_targets[i];
6131 6129 while (target != NULL) {
6132 6130 mutex_enter(&target->sft_pkt_mutex);
6133 6131 if (target->sft_alive && target->sft_scan_count !=
6134 6132 sf_target_scan_cnt) {
6135 6133 target->sft_alive = 0;
6136 6134 target->sft_scan_count++;
6137 6135 mutex_exit(&target->sft_pkt_mutex);
6138 6136 return;
6139 6137 }
6140 6138 target->sft_alive = 0;
6141 6139 target->sft_scan_count = 0;
6142 6140 cmd = target->sft_pkt_head;
6143 6141 while (cmd != (struct sf_pkt *)&target->sft_pkt_head) {
6144 6142 mutex_enter(&cmd->cmd_abort_mutex);
6145 6143 if (cmd->cmd_state == SF_STATE_ISSUED &&
6146 6144 ((cmd->cmd_timeout && sf_watchdog_time >
6147 6145 #ifdef DEBUG
6148 6146 cmd->cmd_timeout) || sf_abort_flag)) {
6149 6147 sf_abort_flag = 0;
6150 6148 #else
6151 6149 cmd->cmd_timeout))) {
6152 6150 #endif
6153 6151 cmd->cmd_timeout = 0;
6154 6152 /* prevent reset from getting at this packet */
6155 6153 cmd->cmd_state = SF_STATE_ABORTING;
6156 6154 mutex_exit(&cmd->cmd_abort_mutex);
6157 6155 mutex_exit(&target->sft_pkt_mutex);
6158 6156 sf->sf_stats.tstats[i].timeouts++;
6159 6157 if (sf_target_timeout(sf, cmd))
6160 6158 return;
6161 6159 else {
6162 6160 if (lip_cnt != sf->sf_lip_cnt) {
6163 6161 return;
6164 6162 } else {
6165 6163 mutex_enter(&target->
6166 6164 sft_pkt_mutex);
6167 6165 cmd = target->
6168 6166 sft_pkt_head;
6169 6167 }
6170 6168 }
6171 6169 /*
6172 6170 * if the abort and lip fail, a reset will be carried out.
6173 6171 * But the reset will ignore this packet. We have waited at least
6174 6172 * 20 seconds after the initial timeout. Now, complete it here.
6175 6173 * This also takes care of spurious bad aborts.
6176 6174 */
6177 6175 } else if ((cmd->cmd_state ==
6178 6176 SF_STATE_ABORTING) && (cmd->cmd_timeout
6179 6177 <= sf_watchdog_time)) {
6180 6178 cmd->cmd_state = SF_STATE_IDLE;
6181 6179 mutex_exit(&cmd->cmd_abort_mutex);
6182 6180 mutex_exit(&target->sft_pkt_mutex);
6183 6181 SF_DEBUG(1, (sf, CE_NOTE,
6184 6182 "Command 0x%p to sft 0x%p"
6185 6183 " delayed release\n",
6186 6184 (void *)cmd, (void *)target));
6187 6185 pkt = cmd->cmd_pkt;
6188 6186 pkt->pkt_statistics |=
6189 6187 (STAT_TIMEOUT|STAT_ABORTED);
6190 6188 pkt->pkt_reason = CMD_TIMEOUT;
6191 6189 if (pkt->pkt_comp) {
6192 6190 scsi_hba_pkt_comp(pkt);
6193 6191 /* handle deferred_destroy case */
6194 6192 } else {
6195 6193 if ((cmd->cmd_block->fcp_cntl.
6196 6194 cntl_reset == 1) ||
6197 6195 (cmd->cmd_block->
6198 6196 fcp_cntl.cntl_abort_tsk ==
6199 6197 1)) {
6200 6198 cmd->cmd_block->
6201 6199 fcp_cntl.
6202 6200 cntl_reset = 0;
6203 6201 cmd->cmd_block->
6204 6202 fcp_cntl.
6205 6203 cntl_abort_tsk = 0;
6206 6204 cmd->cmd_fp_pkt->
6207 6205 fcal_pkt_comp =
6208 6206 sf_cmd_callback;
6209 6207 /* for cache */
6210 6208 sf_scsi_destroy_pkt
6211 6209 (&pkt->pkt_address,
6212 6210 pkt);
6213 6211 }
6214 6212 }
6215 6213 mutex_enter(&target->sft_pkt_mutex);
6216 6214 cmd = target->sft_pkt_head;
6217 6215 } else {
6218 6216 mutex_exit(&cmd->cmd_abort_mutex);
6219 6217 cmd = cmd->cmd_forw;
6220 6218 }
6221 6219 }
6222 6220 mutex_exit(&target->sft_pkt_mutex);
6223 6221 target = target->sft_next_lun;
6224 6222 }
6225 6223 }
6226 6224 }
6227 6225
6228 6226
6229 6227 /*
6230 6228 * a command to a target has timed out
6231 6229 * return TRUE iff cmd abort failed or timed out, else return FALSE
6232 6230 */
6233 6231 static int
6234 6232 sf_target_timeout(struct sf *sf, struct sf_pkt *cmd)
6235 6233 {
6236 6234 int rval;
6237 6235 struct scsi_pkt *pkt;
6238 6236 struct fcal_packet *fpkt;
6239 6237 int tgt_id;
6240 6238 int retval = FALSE;
6241 6239
6242 6240
6243 6241 SF_DEBUG(1, (sf, CE_NOTE, "Command 0x%p to target %x timed out\n",
6244 6242 (void *)cmd->cmd_fp_pkt, cmd->cmd_pkt->pkt_address.a_target));
6245 6243
6246 6244 fpkt = cmd->cmd_fp_pkt;
6247 6245
6248 6246 if (sf_core && (sf_core & SF_CORE_CMD_TIMEOUT)) {
6249 6247 sf_token = (int *)(uintptr_t)
6250 6248 fpkt->fcal_socal_request.sr_soc_hdr.\
6251 6249 sh_request_token;
6252 6250 (void) soc_take_core(sf->sf_sochandle, sf->sf_socp);
6253 6251 sf_core = 0;
6254 6252 }
6255 6253
6256 6254 /* call the transport to abort a command */
6257 6255 rval = soc_abort(sf->sf_sochandle, sf->sf_socp,
6258 6256 sf->sf_sochandle->fcal_portno, fpkt, 1);
6259 6257
6260 6258 switch (rval) {
6261 6259 case FCAL_ABORTED:
6262 6260 SF_DEBUG(1, (sf, CE_NOTE, "Command Abort succeeded\n"));
6263 6261 pkt = cmd->cmd_pkt;
6264 6262 cmd->cmd_state = SF_STATE_IDLE;
6265 6263 pkt->pkt_statistics |= (STAT_TIMEOUT|STAT_ABORTED);
6266 6264 pkt->pkt_reason = CMD_TIMEOUT;
6267 6265 if (pkt->pkt_comp != NULL) {
6268 6266 (*pkt->pkt_comp)(pkt);
6269 6267 }
6270 6268 break; /* success */
6271 6269
6272 6270 case FCAL_ABORT_FAILED:
6273 6271 SF_DEBUG(1, (sf, CE_NOTE, "Command Abort failed at target\n"));
6274 6272 pkt = cmd->cmd_pkt;
6275 6273 cmd->cmd_state = SF_STATE_IDLE;
6276 6274 pkt->pkt_reason = CMD_TIMEOUT;
6277 6275 pkt->pkt_statistics |= STAT_TIMEOUT;
6278 6276 tgt_id = pkt->pkt_address.a_target;
6279 6277 sf->sf_stats.tstats[tgt_id].abts_failures++;
6280 6278 if (pkt->pkt_comp != NULL) {
6281 6279 (*pkt->pkt_comp)(pkt);
6282 6280 }
6283 6281 break;
6284 6282
6285 6283 case FCAL_BAD_ABORT:
6286 6284 if (sf_core && (sf_core & SF_CORE_BAD_ABORT)) {
6287 6285 sf_token = (int *)(uintptr_t)fpkt->fcal_socal_request.\
6288 6286 sr_soc_hdr.sh_request_token;
6289 6287 (void) soc_take_core(sf->sf_sochandle, sf->sf_socp);
6290 6288 sf_core = 0;
6291 6289 }
6292 6290 SF_DEBUG(1, (sf, CE_NOTE, "Command Abort bad abort\n"));
6293 6291 cmd->cmd_timeout = sf_watchdog_time + cmd->cmd_pkt->pkt_time
6294 6292 + 20;
6295 6293 break;
6296 6294
6297 6295 case FCAL_TIMEOUT:
6298 6296 retval = TRUE;
6299 6297 break;
6300 6298
6301 6299 default:
6302 6300 pkt = cmd->cmd_pkt;
6303 6301 tgt_id = pkt->pkt_address.a_target;
6304 6302 sf_log(sf, CE_WARN,
6305 6303 "Command Abort failed target 0x%x, forcing a LIP\n", tgt_id);
6306 6304 if (sf_core && (sf_core & SF_CORE_ABORT_TIMEOUT)) {
6307 6305 sf_token = (int *)(uintptr_t)fpkt->fcal_socal_request.\
6308 6306 sr_soc_hdr.sh_request_token;
6309 6307 (void) soc_take_core(sf->sf_sochandle, sf->sf_socp);
6310 6308 sf_core = 0;
6311 6309 }
6312 6310 sf_force_lip(sf);
6313 6311 retval = TRUE;
6314 6312 break;
6315 6313 }
6316 6314
6317 6315 return (retval);
6318 6316 }
6319 6317
6320 6318
6321 6319 /*
6322 6320 * an ELS command has timed out
6323 6321 * return ???
6324 6322 */
6325 6323 static struct sf_els_hdr *
6326 6324 sf_els_timeout(struct sf *sf, struct sf_els_hdr *privp)
6327 6325 {
6328 6326 struct fcal_packet *fpkt;
6329 6327 int rval, dflag, timeout = SF_ELS_TIMEOUT;
6330 6328 uint_t lip_cnt = privp->lip_cnt;
6331 6329 uchar_t els_code = privp->els_code;
6332 6330 struct sf_target *target = privp->target;
6333 6331 char what[64];
6334 6332
6335 6333 fpkt = privp->fpkt;
6336 6334 dflag = privp->delayed_retry;
6337 6335 /* use as temporary state variable */
6338 6336 privp->timeout = SF_INVALID_TIMEOUT;
6339 6337 mutex_exit(&sf->sf_mutex);
6340 6338
6341 6339 if (privp->fpkt->fcal_pkt_comp == sf_els_callback) {
6342 6340 /*
6343 6341 * take socal core if required. Timeouts for IB and hosts
6344 6342 * are not very interesting, so we take socal core only
6345 6343 * if the timeout is *not* for a IB or host.
6346 6344 */
6347 6345 if (sf_core && (sf_core & SF_CORE_ELS_TIMEOUT) &&
6348 6346 ((sf_alpa_to_switch[privp->dest_nport_id] &
6349 6347 0x0d) != 0x0d) && ((privp->dest_nport_id != 1) ||
6350 6348 (privp->dest_nport_id != 2) ||
6351 6349 (privp->dest_nport_id != 4) ||
6352 6350 (privp->dest_nport_id != 8) ||
6353 6351 (privp->dest_nport_id != 0xf))) {
6354 6352 sf_token = (int *)(uintptr_t)fpkt->fcal_socal_request.\
6355 6353 sr_soc_hdr.sh_request_token;
6356 6354 (void) soc_take_core(sf->sf_sochandle, sf->sf_socp);
6357 6355 sf_core = 0;
6358 6356 }
6359 6357 (void) sprintf(what, "ELS 0x%x", privp->els_code);
6360 6358 } else if (privp->fpkt->fcal_pkt_comp == sf_reportlun_callback) {
6361 6359 if (sf_core && (sf_core & SF_CORE_REPORTLUN_TIMEOUT)) {
6362 6360 sf_token = (int *)(uintptr_t)fpkt->fcal_socal_request.\
6363 6361 sr_soc_hdr.sh_request_token;
6364 6362 (void) soc_take_core(sf->sf_sochandle, sf->sf_socp);
6365 6363 sf_core = 0;
6366 6364 }
6367 6365 timeout = SF_FCP_TIMEOUT;
6368 6366 (void) sprintf(what, "REPORT_LUNS");
6369 6367 } else if (privp->fpkt->fcal_pkt_comp == sf_inq_callback) {
6370 6368 if (sf_core && (sf_core & SF_CORE_INQUIRY_TIMEOUT)) {
6371 6369 sf_token = (int *)(uintptr_t)
6372 6370 fpkt->fcal_socal_request.\
6373 6371 sr_soc_hdr.sh_request_token;
6374 6372 (void) soc_take_core(sf->sf_sochandle, sf->sf_socp);
6375 6373 sf_core = 0;
6376 6374 }
6377 6375 timeout = SF_FCP_TIMEOUT;
6378 6376 (void) sprintf(what, "INQUIRY to LUN 0x%lx",
6379 6377 (long)SCSA_LUN(target));
6380 6378 } else {
6381 6379 (void) sprintf(what, "UNKNOWN OPERATION");
6382 6380 }
6383 6381
6384 6382 if (dflag) {
6385 6383 /* delayed retry */
6386 6384 SF_DEBUG(2, (sf, CE_CONT,
6387 6385 "!sf%d: %s to target %x delayed retry\n",
6388 6386 ddi_get_instance(sf->sf_dip), what,
6389 6387 sf_alpa_to_switch[privp->dest_nport_id]));
6390 6388 privp->delayed_retry = FALSE;
6391 6389 goto try_again;
6392 6390 }
6393 6391
6394 6392 sf_log(sf, CE_NOTE, "!%s to target 0x%x alpa 0x%x timed out\n",
6395 6393 what, sf_alpa_to_switch[privp->dest_nport_id],
6396 6394 privp->dest_nport_id);
6397 6395
6398 6396 rval = soc_abort(sf->sf_sochandle, sf->sf_socp, sf->sf_sochandle
6399 6397 ->fcal_portno, fpkt, 1);
6400 6398 if (rval == FCAL_ABORTED || rval == FCAL_ABORT_FAILED) {
6401 6399 SF_DEBUG(1, (sf, CE_NOTE, "!%s abort to al_pa %x succeeded\n",
6402 6400 what, privp->dest_nport_id));
6403 6401 try_again:
6404 6402
6405 6403 mutex_enter(&sf->sf_mutex);
6406 6404 if (privp->prev != NULL) {
6407 6405 privp->prev->next = privp->next;
6408 6406 }
6409 6407 if (sf->sf_els_list == privp) {
6410 6408 sf->sf_els_list = privp->next;
6411 6409 }
6412 6410 if (privp->next != NULL) {
6413 6411 privp->next->prev = privp->prev;
6414 6412 }
6415 6413 privp->prev = privp->next = NULL;
6416 6414 if (lip_cnt == sf->sf_lip_cnt) {
6417 6415 privp->timeout = sf_watchdog_time + timeout;
6418 6416 if ((++(privp->retries) < sf_els_retries) ||
6419 6417 (dflag && (privp->retries < SF_BSY_RETRIES))) {
6420 6418 mutex_exit(&sf->sf_mutex);
6421 6419 sf_log(sf, CE_NOTE,
6422 6420 "!%s to target 0x%x retrying\n",
6423 6421 what,
6424 6422 sf_alpa_to_switch[privp->dest_nport_id]);
6425 6423 if (sf_els_transport(sf, privp) == 1) {
6426 6424 mutex_enter(&sf->sf_mutex);
6427 6425 return (sf->sf_els_list); /* success */
6428 6426 }
6429 6427 mutex_enter(&sf->sf_mutex);
6430 6428 fpkt = NULL;
6431 6429 }
6432 6430 if ((lip_cnt == sf->sf_lip_cnt) &&
6433 6431 (els_code != LA_ELS_LOGO)) {
6434 6432 if (target != NULL) {
6435 6433 sf_offline_target(sf, target);
6436 6434 }
6437 6435 if (sf->sf_lip_cnt == lip_cnt) {
6438 6436 sf->sf_device_count--;
6439 6437 ASSERT(sf->sf_device_count >= 0);
6440 6438 if (sf->sf_device_count == 0) {
6441 6439 sf_finish_init(sf,
6442 6440 sf->sf_lip_cnt);
6443 6441 }
6444 6442 }
6445 6443 }
6446 6444 privp = sf->sf_els_list;
6447 6445 mutex_exit(&sf->sf_mutex);
6448 6446 if (fpkt != NULL) {
6449 6447 sf_els_free(fpkt);
6450 6448 }
6451 6449 } else {
6452 6450 mutex_exit(&sf->sf_mutex);
6453 6451 sf_els_free(privp->fpkt);
6454 6452 privp = NULL;
6455 6453 }
6456 6454 } else {
6457 6455 if (sf_core && (sf_core & SF_CORE_ELS_FAILED)) {
6458 6456 sf_token = (int *)(uintptr_t)
6459 6457 fpkt->fcal_socal_request.\
6460 6458 sr_soc_hdr.sh_request_token;
6461 6459 (void) soc_take_core(sf->sf_sochandle, sf->sf_socp);
6462 6460 sf_core = 0;
6463 6461 }
6464 6462 sf_log(sf, CE_NOTE, "%s abort to target 0x%x failed. "
6465 6463 "status=0x%x, forcing LIP\n", what,
6466 6464 sf_alpa_to_switch[privp->dest_nport_id], rval);
6467 6465 privp = NULL;
6468 6466 if (sf->sf_lip_cnt == lip_cnt) {
6469 6467 sf_force_lip(sf);
6470 6468 }
6471 6469 }
6472 6470
6473 6471 mutex_enter(&sf->sf_mutex);
6474 6472 return (privp);
6475 6473 }
6476 6474
6477 6475
6478 6476 /*
6479 6477 * called by timeout when a reset times out
6480 6478 */
6481 6479 /*ARGSUSED*/
6482 6480 static void
6483 6481 sf_check_reset_delay(void *arg)
6484 6482 {
6485 6483 struct sf *sf;
6486 6484 struct sf_target *target;
6487 6485 struct sf_reset_list *rp, *tp;
6488 6486 uint_t lip_cnt, reset_timeout_flag = FALSE;
6489 6487 clock_t lb;
6490 6488
6491 6489 lb = ddi_get_lbolt();
6492 6490
6493 6491 mutex_enter(&sf_global_mutex);
6494 6492
6495 6493 sf_reset_timeout_id = 0;
6496 6494
6497 6495 for (sf = sf_head; sf != NULL; sf = sf->sf_next) {
6498 6496
6499 6497 mutex_exit(&sf_global_mutex);
6500 6498 mutex_enter(&sf->sf_mutex);
6501 6499
6502 6500 /* is this type cast needed? */
6503 6501 tp = (struct sf_reset_list *)&sf->sf_reset_list;
6504 6502
6505 6503 rp = sf->sf_reset_list;
6506 6504 while (rp != NULL) {
6507 6505 if (((rp->timeout - lb) < 0) &&
6508 6506 (rp->lip_cnt == sf->sf_lip_cnt)) {
6509 6507 tp->next = rp->next;
6510 6508 mutex_exit(&sf->sf_mutex);
6511 6509 target = rp->target;
6512 6510 lip_cnt = rp->lip_cnt;
6513 6511 kmem_free(rp, sizeof (struct sf_reset_list));
6514 6512 /* abort all cmds for this target */
6515 6513 while (target) {
6516 6514 sf_abort_all(sf, target, FALSE,
6517 6515 lip_cnt, TRUE);
6518 6516 mutex_enter(&target->sft_mutex);
6519 6517 if (lip_cnt == sf->sf_lip_cnt) {
6520 6518 target->sft_state &=
6521 6519 ~SF_TARGET_BUSY;
6522 6520 }
6523 6521 mutex_exit(&target->sft_mutex);
6524 6522 target = target->sft_next_lun;
6525 6523 }
6526 6524 mutex_enter(&sf->sf_mutex);
6527 6525 tp = (struct sf_reset_list *)
6528 6526 &sf->sf_reset_list;
6529 6527 rp = sf->sf_reset_list;
6530 6528 lb = ddi_get_lbolt();
6531 6529 } else if (rp->lip_cnt != sf->sf_lip_cnt) {
6532 6530 tp->next = rp->next;
6533 6531 kmem_free(rp, sizeof (struct sf_reset_list));
6534 6532 rp = tp->next;
6535 6533 } else {
6536 6534 reset_timeout_flag = TRUE;
6537 6535 tp = rp;
6538 6536 rp = rp->next;
6539 6537 }
6540 6538 }
6541 6539 mutex_exit(&sf->sf_mutex);
6542 6540 mutex_enter(&sf_global_mutex);
6543 6541 }
6544 6542
6545 6543 if (reset_timeout_flag && (sf_reset_timeout_id == 0)) {
6546 6544 sf_reset_timeout_id = timeout(sf_check_reset_delay,
6547 6545 NULL, drv_usectohz(SF_TARGET_RESET_DELAY));
6548 6546 }
6549 6547
6550 6548 mutex_exit(&sf_global_mutex);
6551 6549 }
6552 6550
6553 6551
6554 6552 /*
6555 6553 * called to "reset the bus", i.e. force loop initialization (and address
6556 6554 * re-negotiation)
6557 6555 */
6558 6556 static void
6559 6557 sf_force_lip(struct sf *sf)
6560 6558 {
6561 6559 int i;
6562 6560 struct sf_target *target;
6563 6561
6564 6562
6565 6563 /* disable restart of lip if we're suspended */
6566 6564 mutex_enter(&sf->sf_mutex);
6567 6565 if (sf->sf_state & SF_STATE_SUSPENDED) {
6568 6566 mutex_exit(&sf->sf_mutex);
6569 6567 SF_DEBUG(1, (sf, CE_CONT,
6570 6568 "sf_force_lip, sf%d: lip restart disabled "
6571 6569 "due to DDI_SUSPEND\n",
6572 6570 ddi_get_instance(sf->sf_dip)));
6573 6571 return;
6574 6572 }
6575 6573
6576 6574 sf_log(sf, CE_NOTE, "Forcing lip\n");
6577 6575
6578 6576 for (i = 0; i < sf_max_targets; i++) {
6579 6577 target = sf->sf_targets[i];
6580 6578 while (target != NULL) {
6581 6579 mutex_enter(&target->sft_mutex);
6582 6580 if (!(target->sft_state & SF_TARGET_OFFLINE))
6583 6581 target->sft_state |= SF_TARGET_BUSY;
6584 6582 mutex_exit(&target->sft_mutex);
6585 6583 target = target->sft_next_lun;
6586 6584 }
6587 6585 }
6588 6586
6589 6587 sf->sf_lip_cnt++;
6590 6588 sf->sf_timer = sf_watchdog_time + SF_OFFLINE_TIMEOUT;
6591 6589 sf->sf_state = SF_STATE_OFFLINE;
6592 6590 mutex_exit(&sf->sf_mutex);
6593 6591 sf->sf_stats.lip_count++; /* no mutex for this? */
6594 6592
6595 6593 #ifdef DEBUG
6596 6594 /* are we allowing LIPs ?? */
6597 6595 if (sf_lip_flag != 0) {
6598 6596 #endif
6599 6597 /* call the transport to force loop initialization */
6600 6598 if (((i = soc_force_lip(sf->sf_sochandle, sf->sf_socp,
6601 6599 sf->sf_sochandle->fcal_portno, 1,
6602 6600 FCAL_FORCE_LIP)) != FCAL_SUCCESS) &&
6603 6601 (i != FCAL_TIMEOUT)) {
6604 6602 /* force LIP failed */
6605 6603 if (sf_core && (sf_core & SF_CORE_LIP_FAILED)) {
6606 6604 (void) soc_take_core(sf->sf_sochandle,
6607 6605 sf->sf_socp);
6608 6606 sf_core = 0;
6609 6607 }
6610 6608 #ifdef DEBUG
6611 6609 /* are we allowing reset after LIP failed ?? */
6612 6610 if (sf_reset_flag != 0) {
6613 6611 #endif
6614 6612 /* restart socal after resetting it */
6615 6613 sf_log(sf, CE_NOTE,
6616 6614 "!Force lip failed Status code 0x%x."
6617 6615 " Reseting\n", i);
6618 6616 /* call transport to force a reset */
6619 6617 soc_force_reset(sf->sf_sochandle, sf->sf_socp,
6620 6618 sf->sf_sochandle->fcal_portno, 1);
6621 6619 #ifdef DEBUG
6622 6620 }
6623 6621 #endif
6624 6622 }
6625 6623 #ifdef DEBUG
6626 6624 }
6627 6625 #endif
6628 6626 }
6629 6627
6630 6628
6631 6629 /*
6632 6630 * called by the transport when an unsolicited ELS is received
6633 6631 */
6634 6632 static void
6635 6633 sf_unsol_els_callback(void *arg, soc_response_t *srp, caddr_t payload)
6636 6634 {
6637 6635 struct sf *sf = (struct sf *)arg;
6638 6636 els_payload_t *els = (els_payload_t *)payload;
6639 6637 struct la_els_rjt *rsp;
6640 6638 int i, tgt_id;
6641 6639 uchar_t dest_id;
6642 6640 struct fcal_packet *fpkt;
6643 6641 fc_frame_header_t *hp;
6644 6642 struct sf_els_hdr *privp;
6645 6643
6646 6644
6647 6645 if ((els == NULL) || ((i = srp->sr_soc_hdr.sh_byte_cnt) == 0)) {
6648 6646 return;
6649 6647 }
6650 6648
6651 6649 if (i > SOC_CQE_PAYLOAD) {
6652 6650 i = SOC_CQE_PAYLOAD;
6653 6651 }
6654 6652
6655 6653 dest_id = (uchar_t)srp->sr_fc_frame_hdr.s_id;
6656 6654 tgt_id = sf_alpa_to_switch[dest_id];
6657 6655
6658 6656 switch (els->els_cmd.c.ls_command) {
6659 6657
6660 6658 case LA_ELS_LOGO:
6661 6659 /*
6662 6660 * logout received -- log the fact
6663 6661 */
6664 6662 sf->sf_stats.tstats[tgt_id].logouts_recvd++;
6665 6663 sf_log(sf, CE_NOTE, "!LOGO recvd from target %x, %s\n",
6666 6664 tgt_id,
6667 6665 sf_lip_on_plogo ? "Forcing LIP...." : "");
6668 6666 if (sf_lip_on_plogo) {
6669 6667 sf_force_lip(sf);
6670 6668 }
6671 6669 break;
6672 6670
6673 6671 default: /* includes LA_ELS_PLOGI */
6674 6672 /*
6675 6673 * something besides a logout received -- we don't handle
6676 6674 * this so send back a reject saying its unsupported
6677 6675 */
6678 6676
6679 6677 sf_log(sf, CE_NOTE, "!ELS 0x%x recvd from target 0x%x\n",
6680 6678 els->els_cmd.c.ls_command, tgt_id);
6681 6679
6682 6680
6683 6681 /* allocate room for a response */
6684 6682 if (sf_els_alloc(sf, dest_id, sizeof (struct sf_els_hdr),
6685 6683 sizeof (struct la_els_rjt), sizeof (union sf_els_rsp),
6686 6684 (caddr_t *)&privp, (caddr_t *)&rsp) == NULL) {
6687 6685 break;
6688 6686 }
6689 6687
6690 6688 fpkt = privp->fpkt;
6691 6689
6692 6690 /* fill in pkt header */
6693 6691 hp = &fpkt->fcal_socal_request.sr_fc_frame_hdr;
6694 6692 hp->r_ctl = R_CTL_ELS_RSP;
6695 6693 hp->f_ctl = F_CTL_LAST_SEQ | F_CTL_XCHG_CONTEXT;
6696 6694 hp->ox_id = srp->sr_fc_frame_hdr.ox_id;
6697 6695 hp->rx_id = srp->sr_fc_frame_hdr.rx_id;
6698 6696 fpkt->fcal_socal_request.sr_cqhdr.cq_hdr_type =
6699 6697 CQ_TYPE_OUTBOUND;
6700 6698
6701 6699 fpkt->fcal_socal_request.sr_soc_hdr.sh_seg_cnt = 1;
6702 6700
6703 6701 /* fill in response */
6704 6702 rsp->ls_code = LA_ELS_RJT; /* reject this ELS */
6705 6703 rsp->mbz[0] = 0;
6706 6704 rsp->mbz[1] = 0;
6707 6705 rsp->mbz[2] = 0;
6708 6706 ((struct la_els_logi *)privp->rsp)->ls_code = LA_ELS_ACC;
6709 6707 *((int *)&rsp->reserved) = 0;
6710 6708 rsp->reason_code = RJT_UNSUPPORTED;
6711 6709 privp->retries = sf_els_retries;
6712 6710 privp->els_code = LA_ELS_RJT;
6713 6711 privp->timeout = (unsigned)0xffffffff;
6714 6712 (void) sf_els_transport(sf, privp);
6715 6713 break;
6716 6714 }
6717 6715 }
6718 6716
6719 6717
6720 6718 /*
6721 6719 * Error logging, printing, and debug print routines
6722 6720 */
6723 6721
6724 6722 /*PRINTFLIKE3*/
6725 6723 static void
6726 6724 sf_log(struct sf *sf, int level, const char *fmt, ...)
6727 6725 {
6728 6726 char buf[256];
6729 6727 dev_info_t *dip;
6730 6728 va_list ap;
6731 6729
6732 6730 if (sf != NULL) {
6733 6731 dip = sf->sf_dip;
6734 6732 } else {
6735 6733 dip = NULL;
6736 6734 }
6737 6735
6738 6736 va_start(ap, fmt);
6739 6737 (void) vsprintf(buf, fmt, ap);
6740 6738 va_end(ap);
6741 6739 scsi_log(dip, "sf", level, buf);
6742 6740 }
6743 6741
6744 6742
6745 6743 /*
6746 6744 * called to get some sf kstats -- return 0 on success else return errno
6747 6745 */
6748 6746 static int
6749 6747 sf_kstat_update(kstat_t *ksp, int rw)
6750 6748 {
6751 6749 struct sf *sf;
6752 6750
6753 6751 if (rw == KSTAT_WRITE) {
6754 6752 /* can't write */
6755 6753 return (EACCES);
6756 6754 }
6757 6755
6758 6756 sf = ksp->ks_private;
6759 6757 sf->sf_stats.ncmds = sf->sf_ncmds;
6760 6758 sf->sf_stats.throttle_limit = sf->sf_throttle;
6761 6759 sf->sf_stats.cr_pool_size = sf->sf_cr_pool_cnt;
6762 6760
6763 6761 return (0); /* success */
6764 6762 }
6765 6763
6766 6764
6767 6765 /*
6768 6766 * Unix Entry Points
6769 6767 */
6770 6768
6771 6769 /*
6772 6770 * driver entry point for opens on control device
6773 6771 */
6774 6772 /* ARGSUSED */
6775 6773 static int
6776 6774 sf_open(dev_t *dev_p, int flag, int otyp, cred_t *cred_p)
6777 6775 {
6778 6776 dev_t dev = *dev_p;
6779 6777 struct sf *sf;
6780 6778
6781 6779
6782 6780 /* just ensure soft state exists for this device */
6783 6781 sf = ddi_get_soft_state(sf_state, SF_MINOR2INST(getminor(dev)));
6784 6782 if (sf == NULL) {
6785 6783 return (ENXIO);
6786 6784 }
6787 6785
6788 6786 ++(sf->sf_check_n_close);
6789 6787
6790 6788 return (0);
6791 6789 }
6792 6790
6793 6791
6794 6792 /*
6795 6793 * driver entry point for last close on control device
6796 6794 */
6797 6795 /* ARGSUSED */
6798 6796 static int
6799 6797 sf_close(dev_t dev, int flag, int otyp, cred_t *cred_p)
6800 6798 {
6801 6799 struct sf *sf;
6802 6800
6803 6801 sf = ddi_get_soft_state(sf_state, SF_MINOR2INST(getminor(dev)));
6804 6802 if (sf == NULL) {
6805 6803 return (ENXIO);
6806 6804 }
6807 6805
6808 6806 if (!sf->sf_check_n_close) { /* if this flag is zero */
6809 6807 cmn_err(CE_WARN, "sf%d: trying to close unopened instance",
6810 6808 SF_MINOR2INST(getminor(dev)));
6811 6809 return (ENODEV);
6812 6810 } else {
6813 6811 --(sf->sf_check_n_close);
6814 6812 }
6815 6813 return (0);
6816 6814 }
6817 6815
6818 6816
6819 6817 /*
6820 6818 * driver entry point for sf ioctl commands
6821 6819 */
6822 6820 /* ARGSUSED */
6823 6821 static int
6824 6822 sf_ioctl(dev_t dev,
6825 6823 int cmd, intptr_t arg, int mode, cred_t *cred_p, int *rval_p)
6826 6824 {
6827 6825 struct sf *sf;
6828 6826 struct sf_target *target;
6829 6827 uchar_t al_pa;
6830 6828 struct sf_al_map map;
6831 6829 int cnt, i;
6832 6830 int retval; /* return value */
6833 6831 struct devctl_iocdata *dcp;
6834 6832 dev_info_t *cdip;
6835 6833 struct scsi_address ap;
6836 6834 scsi_hba_tran_t *tran;
6837 6835
6838 6836
6839 6837 sf = ddi_get_soft_state(sf_state, SF_MINOR2INST(getminor(dev)));
6840 6838 if (sf == NULL) {
6841 6839 return (ENXIO);
6842 6840 }
6843 6841
6844 6842 /* handle all ioctls */
6845 6843 switch (cmd) {
6846 6844
6847 6845 /*
6848 6846 * We can use the generic implementation for these ioctls
6849 6847 */
6850 6848 case DEVCTL_DEVICE_GETSTATE:
6851 6849 case DEVCTL_DEVICE_ONLINE:
6852 6850 case DEVCTL_DEVICE_OFFLINE:
6853 6851 case DEVCTL_BUS_GETSTATE:
6854 6852 return (ndi_devctl_ioctl(sf->sf_dip, cmd, arg, mode, 0));
6855 6853
6856 6854 /*
6857 6855 * return FC map
6858 6856 */
6859 6857 case SFIOCGMAP:
6860 6858 if ((sf->sf_lilp_map->lilp_magic != FCAL_LILP_MAGIC &&
6861 6859 sf->sf_lilp_map->lilp_magic != FCAL_BADLILP_MAGIC) ||
6862 6860 sf->sf_state != SF_STATE_ONLINE) {
6863 6861 retval = ENOENT;
6864 6862 goto dun;
6865 6863 }
6866 6864 mutex_enter(&sf->sf_mutex);
6867 6865 if (sf->sf_lilp_map->lilp_magic == FCAL_BADLILP_MAGIC) {
6868 6866 int i, j = 0;
6869 6867
6870 6868 /* Need to generate a fake lilp map */
6871 6869 for (i = 0; i < sf_max_targets; i++) {
6872 6870 if (sf->sf_targets[i])
6873 6871 sf->sf_lilp_map->lilp_alpalist[j++] =
6874 6872 sf->sf_targets[i]->
6875 6873 sft_hard_address;
6876 6874 }
6877 6875 sf->sf_lilp_map->lilp_length = (uchar_t)j;
6878 6876 }
6879 6877 cnt = sf->sf_lilp_map->lilp_length;
6880 6878 map.sf_count = (short)cnt;
6881 6879 bcopy((caddr_t)&sf->sf_sochandle->fcal_n_wwn,
6882 6880 (caddr_t)&map.sf_hba_addr.sf_node_wwn,
6883 6881 sizeof (la_wwn_t));
6884 6882 bcopy((caddr_t)&sf->sf_sochandle->fcal_p_wwn,
6885 6883 (caddr_t)&map.sf_hba_addr.sf_port_wwn,
6886 6884 sizeof (la_wwn_t));
6887 6885 map.sf_hba_addr.sf_al_pa = sf->sf_al_pa;
6888 6886 map.sf_hba_addr.sf_hard_address = 0;
6889 6887 map.sf_hba_addr.sf_inq_dtype = DTYPE_UNKNOWN;
6890 6888 for (i = 0; i < cnt; i++) {
6891 6889 al_pa = sf->sf_lilp_map->lilp_alpalist[i];
6892 6890 map.sf_addr_pair[i].sf_al_pa = al_pa;
6893 6891 if (al_pa == sf->sf_al_pa) {
6894 6892 (void) bcopy((caddr_t)&sf->sf_sochandle
6895 6893 ->fcal_n_wwn, (caddr_t)&map.
6896 6894 sf_addr_pair[i].sf_node_wwn,
6897 6895 sizeof (la_wwn_t));
6898 6896 (void) bcopy((caddr_t)&sf->sf_sochandle
6899 6897 ->fcal_p_wwn, (caddr_t)&map.
6900 6898 sf_addr_pair[i].sf_port_wwn,
6901 6899 sizeof (la_wwn_t));
6902 6900 map.sf_addr_pair[i].sf_hard_address =
6903 6901 al_pa;
6904 6902 map.sf_addr_pair[i].sf_inq_dtype =
6905 6903 DTYPE_PROCESSOR;
6906 6904 continue;
6907 6905 }
6908 6906 target = sf->sf_targets[sf_alpa_to_switch[
6909 6907 al_pa]];
6910 6908 if (target != NULL) {
6911 6909 mutex_enter(&target->sft_mutex);
6912 6910 if (!(target->sft_state &
6913 6911 (SF_TARGET_OFFLINE |
6914 6912 SF_TARGET_BUSY))) {
6915 6913 bcopy((caddr_t)&target->
6916 6914 sft_node_wwn,
6917 6915 (caddr_t)&map.sf_addr_pair
6918 6916 [i].sf_node_wwn,
6919 6917 sizeof (la_wwn_t));
6920 6918 bcopy((caddr_t)&target->
6921 6919 sft_port_wwn,
6922 6920 (caddr_t)&map.sf_addr_pair
6923 6921 [i].sf_port_wwn,
6924 6922 sizeof (la_wwn_t));
6925 6923 map.sf_addr_pair[i].
6926 6924 sf_hard_address
6927 6925 = target->sft_hard_address;
6928 6926 map.sf_addr_pair[i].
6929 6927 sf_inq_dtype
6930 6928 = target->sft_device_type;
6931 6929 mutex_exit(&target->sft_mutex);
6932 6930 continue;
6933 6931 }
6934 6932 mutex_exit(&target->sft_mutex);
6935 6933 }
6936 6934 bzero((caddr_t)&map.sf_addr_pair[i].
6937 6935 sf_node_wwn, sizeof (la_wwn_t));
6938 6936 bzero((caddr_t)&map.sf_addr_pair[i].
6939 6937 sf_port_wwn, sizeof (la_wwn_t));
6940 6938 map.sf_addr_pair[i].sf_inq_dtype =
6941 6939 DTYPE_UNKNOWN;
6942 6940 }
6943 6941 mutex_exit(&sf->sf_mutex);
6944 6942 if (ddi_copyout((caddr_t)&map, (caddr_t)arg,
6945 6943 sizeof (struct sf_al_map), mode) != 0) {
6946 6944 retval = EFAULT;
6947 6945 goto dun;
6948 6946 }
6949 6947 break;
6950 6948
6951 6949 /*
6952 6950 * handle device control ioctls
6953 6951 */
6954 6952 case DEVCTL_DEVICE_RESET:
6955 6953 if (ndi_dc_allochdl((void *)arg, &dcp) != NDI_SUCCESS) {
6956 6954 retval = EFAULT;
6957 6955 goto dun;
6958 6956 }
6959 6957 if ((ndi_dc_getname(dcp) == NULL) ||
6960 6958 (ndi_dc_getaddr(dcp) == NULL)) {
6961 6959 ndi_dc_freehdl(dcp);
6962 6960 retval = EINVAL;
6963 6961 goto dun;
6964 6962 }
6965 6963 cdip = ndi_devi_find(sf->sf_dip,
6966 6964 ndi_dc_getname(dcp), ndi_dc_getaddr(dcp));
6967 6965 ndi_dc_freehdl(dcp);
6968 6966
6969 6967 if (cdip == NULL) {
6970 6968 retval = ENXIO;
6971 6969 goto dun;
6972 6970 }
6973 6971
6974 6972 if ((target = sf_get_target_from_dip(sf, cdip)) == NULL) {
6975 6973 retval = ENXIO;
6976 6974 goto dun;
6977 6975 }
6978 6976 mutex_enter(&target->sft_mutex);
6979 6977 if (!(target->sft_state & SF_TARGET_INIT_DONE)) {
6980 6978 mutex_exit(&target->sft_mutex);
6981 6979 retval = ENXIO;
6982 6980 goto dun;
6983 6981 }
6984 6982
6985 6983 /* This is ugly */
6986 6984 tran = kmem_zalloc(scsi_hba_tran_size(), KM_SLEEP);
6987 6985 bcopy(target->sft_tran, tran, scsi_hba_tran_size());
6988 6986 mutex_exit(&target->sft_mutex);
6989 6987 ap.a_hba_tran = tran;
6990 6988 ap.a_target = sf_alpa_to_switch[target->sft_al_pa];
6991 6989 if (sf_reset(&ap, RESET_TARGET) == FALSE) {
6992 6990 retval = EIO;
6993 6991 } else {
6994 6992 retval = 0;
6995 6993 }
6996 6994 kmem_free(tran, scsi_hba_tran_size());
6997 6995 goto dun;
6998 6996
6999 6997 case DEVCTL_BUS_QUIESCE:
7000 6998 case DEVCTL_BUS_UNQUIESCE:
7001 6999 retval = ENOTSUP;
7002 7000 goto dun;
7003 7001
7004 7002 case DEVCTL_BUS_RESET:
7005 7003 case DEVCTL_BUS_RESETALL:
7006 7004 sf_force_lip(sf);
7007 7005 break;
7008 7006
7009 7007 default:
7010 7008 retval = ENOTTY;
7011 7009 goto dun;
7012 7010 }
7013 7011
7014 7012 retval = 0; /* success */
7015 7013
7016 7014 dun:
7017 7015 return (retval);
7018 7016 }
7019 7017
7020 7018
7021 7019 /*
7022 7020 * get the target given a DIP
7023 7021 */
7024 7022 static struct sf_target *
7025 7023 sf_get_target_from_dip(struct sf *sf, dev_info_t *dip)
7026 7024 {
7027 7025 int i;
7028 7026 struct sf_target *target;
7029 7027
7030 7028
7031 7029 /* scan each hash queue for the DIP in question */
7032 7030 for (i = 0; i < SF_NUM_HASH_QUEUES; i++) {
7033 7031 target = sf->sf_wwn_lists[i];
7034 7032 while (target != NULL) {
7035 7033 if (target->sft_dip == dip) {
7036 7034 return (target); /* success: target found */
7037 7035 }
7038 7036 target = target->sft_next;
7039 7037 }
7040 7038 }
7041 7039 return (NULL); /* failure: target not found */
7042 7040 }
7043 7041
7044 7042
7045 7043 /*
7046 7044 * called by the transport to get an event cookie
7047 7045 */
7048 7046 static int
7049 7047 sf_bus_get_eventcookie(dev_info_t *dip, dev_info_t *rdip, char *name,
7050 7048 ddi_eventcookie_t *event_cookiep)
7051 7049 {
7052 7050 struct sf *sf;
7053 7051
7054 7052 sf = ddi_get_soft_state(sf_state, ddi_get_instance(dip));
7055 7053 if (sf == NULL) {
7056 7054 /* can't find instance for this device */
7057 7055 return (DDI_FAILURE);
7058 7056 }
7059 7057
7060 7058 return (ndi_event_retrieve_cookie(sf->sf_event_hdl, rdip, name,
7061 7059 event_cookiep, NDI_EVENT_NOPASS));
7062 7060
7063 7061 }
7064 7062
7065 7063
7066 7064 /*
7067 7065 * called by the transport to add an event callback
7068 7066 */
7069 7067 static int
7070 7068 sf_bus_add_eventcall(dev_info_t *dip, dev_info_t *rdip,
7071 7069 ddi_eventcookie_t eventid, void (*callback)(dev_info_t *dip,
7072 7070 ddi_eventcookie_t event, void *arg, void *impl_data), void *arg,
7073 7071 ddi_callback_id_t *cb_id)
7074 7072 {
7075 7073 struct sf *sf;
7076 7074
7077 7075 sf = ddi_get_soft_state(sf_state, ddi_get_instance(dip));
7078 7076 if (sf == NULL) {
7079 7077 /* can't find instance for this device */
7080 7078 return (DDI_FAILURE);
7081 7079 }
7082 7080
7083 7081 return (ndi_event_add_callback(sf->sf_event_hdl, rdip,
7084 7082 eventid, callback, arg, NDI_SLEEP, cb_id));
7085 7083
7086 7084 }
7087 7085
7088 7086
7089 7087 /*
7090 7088 * called by the transport to remove an event callback
7091 7089 */
7092 7090 static int
7093 7091 sf_bus_remove_eventcall(dev_info_t *devi, ddi_callback_id_t cb_id)
7094 7092 {
7095 7093 struct sf *sf;
7096 7094
7097 7095 sf = ddi_get_soft_state(sf_state, ddi_get_instance(devi));
7098 7096 if (sf == NULL) {
7099 7097 /* can't find instance for this device */
7100 7098 return (DDI_FAILURE);
7101 7099 }
7102 7100
7103 7101 return (ndi_event_remove_callback(sf->sf_event_hdl, cb_id));
7104 7102 }
7105 7103
7106 7104
7107 7105 /*
7108 7106 * called by the transport to post an event
7109 7107 */
7110 7108 static int
7111 7109 sf_bus_post_event(dev_info_t *dip, dev_info_t *rdip,
7112 7110 ddi_eventcookie_t eventid, void *impldata)
7113 7111 {
7114 7112 ddi_eventcookie_t remove_cookie, cookie;
7115 7113
7116 7114 /* is this a remove event ?? */
7117 7115 struct sf *sf = ddi_get_soft_state(sf_state, ddi_get_instance(dip));
7118 7116 remove_cookie = ndi_event_tag_to_cookie(sf->sf_event_hdl,
7119 7117 SF_EVENT_TAG_REMOVE);
7120 7118
7121 7119 if (remove_cookie == eventid) {
7122 7120 struct sf_target *target;
7123 7121
7124 7122 /* handle remove event */
7125 7123
7126 7124 if (sf == NULL) {
7127 7125 /* no sf instance for this device */
7128 7126 return (NDI_FAILURE);
7129 7127 }
7130 7128
7131 7129 /* get the target for this event */
7132 7130 if ((target = sf_get_target_from_dip(sf, rdip)) != NULL) {
7133 7131 /*
7134 7132 * clear device info for this target and mark as
7135 7133 * not done
7136 7134 */
7137 7135 mutex_enter(&target->sft_mutex);
7138 7136 target->sft_dip = NULL;
7139 7137 target->sft_state &= ~SF_TARGET_INIT_DONE;
7140 7138 mutex_exit(&target->sft_mutex);
7141 7139 return (NDI_SUCCESS); /* event handled */
7142 7140 }
7143 7141
7144 7142 /* no target for this event */
7145 7143 return (NDI_FAILURE);
7146 7144 }
7147 7145
7148 7146 /* an insertion event */
7149 7147 if (ndi_busop_get_eventcookie(dip, rdip, FCAL_INSERT_EVENT, &cookie)
7150 7148 != NDI_SUCCESS) {
7151 7149 return (NDI_FAILURE);
7152 7150 }
7153 7151
7154 7152 return (ndi_post_event(dip, rdip, cookie, impldata));
7155 7153 }
7156 7154
7157 7155
7158 7156 /*
7159 7157 * the sf hotplug daemon, one thread per sf instance
7160 7158 */
7161 7159 static void
7162 7160 sf_hp_daemon(void *arg)
7163 7161 {
7164 7162 struct sf *sf = (struct sf *)arg;
7165 7163 struct sf_hp_elem *elem;
7166 7164 struct sf_target *target;
7167 7165 int tgt_id;
7168 7166 callb_cpr_t cprinfo;
7169 7167
7170 7168 CALLB_CPR_INIT(&cprinfo, &sf->sf_hp_daemon_mutex,
7171 7169 callb_generic_cpr, "sf_hp_daemon");
7172 7170
7173 7171 mutex_enter(&sf->sf_hp_daemon_mutex);
7174 7172
7175 7173 do {
7176 7174 while (sf->sf_hp_elem_head != NULL) {
7177 7175
7178 7176 /* save ptr to head of list */
7179 7177 elem = sf->sf_hp_elem_head;
7180 7178
7181 7179 /* take element off of list */
7182 7180 if (sf->sf_hp_elem_head == sf->sf_hp_elem_tail) {
7183 7181 /* element only one in list -- list now empty */
7184 7182 sf->sf_hp_elem_head = NULL;
7185 7183 sf->sf_hp_elem_tail = NULL;
7186 7184 } else {
7187 7185 /* remove element from head of list */
7188 7186 sf->sf_hp_elem_head = sf->sf_hp_elem_head->next;
7189 7187 }
7190 7188
7191 7189 mutex_exit(&sf->sf_hp_daemon_mutex);
7192 7190
7193 7191 switch (elem->what) {
7194 7192 case SF_ONLINE:
7195 7193 /* online this target */
7196 7194 target = elem->target;
7197 7195 (void) ndi_devi_online(elem->dip, 0);
7198 7196 (void) ndi_event_retrieve_cookie(
7199 7197 sf->sf_event_hdl,
7200 7198 target->sft_dip, FCAL_INSERT_EVENT,
7201 7199 &sf_insert_eid, NDI_EVENT_NOPASS);
7202 7200 (void) ndi_event_run_callbacks(sf->sf_event_hdl,
7203 7201 target->sft_dip, sf_insert_eid, NULL);
7204 7202 break;
7205 7203 case SF_OFFLINE:
7206 7204 /* offline this target */
7207 7205 target = elem->target;
7208 7206 tgt_id = sf_alpa_to_switch[target->sft_al_pa];
7209 7207 /* don't do NDI_DEVI_REMOVE for now */
7210 7208 if (ndi_devi_offline(elem->dip, 0) !=
7211 7209 NDI_SUCCESS) {
7212 7210 SF_DEBUG(1, (sf, CE_WARN, "target %x, "
7213 7211 "device offline failed", tgt_id));
7214 7212 } else {
7215 7213 SF_DEBUG(1, (sf, CE_NOTE, "target %x, "
7216 7214 "device offline succeeded\n",
7217 7215 tgt_id));
7218 7216 }
7219 7217 break;
7220 7218 }
7221 7219 kmem_free(elem, sizeof (struct sf_hp_elem));
7222 7220 mutex_enter(&sf->sf_hp_daemon_mutex);
7223 7221 }
7224 7222
7225 7223 /* if exit is not already signaled */
7226 7224 if (sf->sf_hp_exit == 0) {
7227 7225 /* wait to be signaled by work or exit */
7228 7226 CALLB_CPR_SAFE_BEGIN(&cprinfo);
7229 7227 cv_wait(&sf->sf_hp_daemon_cv, &sf->sf_hp_daemon_mutex);
7230 7228 CALLB_CPR_SAFE_END(&cprinfo, &sf->sf_hp_daemon_mutex);
7231 7229 }
7232 7230 } while (sf->sf_hp_exit == 0);
7233 7231
7234 7232 /* sf_hp_daemon_mutex is dropped by CALLB_CPR_EXIT */
7235 7233 CALLB_CPR_EXIT(&cprinfo);
7236 7234 thread_exit(); /* no more hotplug thread */
7237 7235 /* NOTREACHED */
7238 7236 }
↓ open down ↓ |
7149 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX