Print this page
NEX-17866
Release lun_mutex/port_mutex before logging an online/offline event, and only
do logging if online/offline operation was successful.
While here, follow the comment suggestion and only hold lun_mutex while checking
its state in fcp_scsi_start().
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/io/fibre-channel/ulp/fcp.c
+++ new/usr/src/uts/common/io/fibre-channel/ulp/fcp.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21
22 22 /*
23 23 * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
24 24 */
25 25
26 26 /*
27 27 * Copyright 2018 Nexenta Systems, Inc.
28 28 */
29 29
30 30 /*
31 31 * Fibre Channel SCSI ULP Mapping driver
32 32 */
33 33
34 34 #include <sys/scsi/scsi.h>
35 35 #include <sys/types.h>
36 36 #include <sys/varargs.h>
37 37 #include <sys/devctl.h>
38 38 #include <sys/thread.h>
39 39 #include <sys/thread.h>
40 40 #include <sys/open.h>
41 41 #include <sys/file.h>
42 42 #include <sys/sunndi.h>
43 43 #include <sys/console.h>
44 44 #include <sys/proc.h>
45 45 #include <sys/time.h>
46 46 #include <sys/utsname.h>
47 47 #include <sys/scsi/impl/scsi_reset_notify.h>
48 48 #include <sys/ndi_impldefs.h>
49 49 #include <sys/byteorder.h>
50 50 #include <sys/ctype.h>
51 51 #include <sys/sunmdi.h>
52 52
53 53 #include <sys/fibre-channel/fc.h>
54 54 #include <sys/fibre-channel/impl/fc_ulpif.h>
55 55 #include <sys/fibre-channel/ulp/fcpvar.h>
56 56
57 57 /*
58 58 * Discovery Process
59 59 * =================
60 60 *
61 61 * The discovery process is a major function of FCP. In order to help
62 62 * understand that function a flow diagram is given here. This diagram
63 63 * doesn't claim to cover all the cases and the events that can occur during
64 64 * the discovery process nor the subtleties of the code. The code paths shown
65 65 * are simplified. Its purpose is to help the reader (and potentially bug
66 66 * fixer) have an overall view of the logic of the code. For that reason the
67 67 * diagram covers the simple case of the line coming up cleanly or of a new
68 68 * port attaching to FCP the link being up. The reader must keep in mind
69 69 * that:
70 70 *
71 71 * - There are special cases where bringing devices online and offline
72 72 * is driven by Ioctl.
73 73 *
74 74 * - The behavior of the discovery process can be modified through the
75 75 * .conf file.
76 76 *
77 77 * - The line can go down and come back up at any time during the
78 78 * discovery process which explains some of the complexity of the code.
79 79 *
80 80 * ............................................................................
81 81 *
82 82 * STEP 1: The line comes up or a new Fibre Channel port attaches to FCP.
83 83 *
84 84 *
85 85 * +-------------------------+
86 86 * fp/fctl module --->| fcp_port_attach |
87 87 * +-------------------------+
88 88 * | |
89 89 * | |
90 90 * | v
91 91 * | +-------------------------+
92 92 * | | fcp_handle_port_attach |
93 93 * | +-------------------------+
94 94 * | |
95 95 * | |
96 96 * +--------------------+ |
97 97 * | |
98 98 * v v
99 99 * +-------------------------+
100 100 * | fcp_statec_callback |
101 101 * +-------------------------+
102 102 * |
103 103 * |
104 104 * v
105 105 * +-------------------------+
106 106 * | fcp_handle_devices |
107 107 * +-------------------------+
108 108 * |
109 109 * |
110 110 * v
111 111 * +-------------------------+
112 112 * | fcp_handle_mapflags |
113 113 * +-------------------------+
114 114 * |
115 115 * |
116 116 * v
117 117 * +-------------------------+
118 118 * | fcp_send_els |
119 119 * | |
120 120 * | PLOGI or PRLI To all the|
121 121 * | reachable devices. |
122 122 * +-------------------------+
123 123 *
124 124 *
125 125 * ............................................................................
126 126 *
127 127 * STEP 2: The callback functions of the PLOGI and/or PRLI requests sent during
128 128 * STEP 1 are called (it is actually the same function).
129 129 *
130 130 *
131 131 * +-------------------------+
132 132 * | fcp_icmd_callback |
133 133 * fp/fctl module --->| |
134 134 * | callback for PLOGI and |
135 135 * | PRLI. |
136 136 * +-------------------------+
137 137 * |
138 138 * |
139 139 * Received PLOGI Accept /-\ Received PRLI Accept
140 140 * _ _ _ _ _ _ / \_ _ _ _ _ _
141 141 * | \ / |
142 142 * | \-/ |
143 143 * | |
144 144 * v v
145 145 * +-------------------------+ +-------------------------+
146 146 * | fcp_send_els | | fcp_send_scsi |
147 147 * | | | |
148 148 * | PRLI | | REPORT_LUN |
149 149 * +-------------------------+ +-------------------------+
150 150 *
151 151 * ............................................................................
152 152 *
153 153 * STEP 3: The callback functions of the SCSI commands issued by FCP are called
154 154 * (It is actually the same function).
155 155 *
156 156 *
157 157 * +-------------------------+
158 158 * fp/fctl module ------->| fcp_scsi_callback |
159 159 * +-------------------------+
160 160 * |
161 161 * |
162 162 * |
163 163 * Receive REPORT_LUN reply /-\ Receive INQUIRY PAGE83 reply
164 164 * _ _ _ _ _ _ _ _ _ _ / \_ _ _ _ _ _ _ _ _ _ _ _
165 165 * | \ / |
166 166 * | \-/ |
167 167 * | | |
168 168 * | Receive INQUIRY reply| |
169 169 * | | |
170 170 * v v v
171 171 * +------------------------+ +----------------------+ +----------------------+
172 172 * | fcp_handle_reportlun | | fcp_handle_inquiry | | fcp_handle_page83 |
173 173 * |(Called for each Target)| | (Called for each LUN)| |(Called for each LUN) |
174 174 * +------------------------+ +----------------------+ +----------------------+
175 175 * | | |
176 176 * | | |
177 177 * | | |
178 178 * v v |
179 179 * +-----------------+ +-----------------+ |
180 180 * | fcp_send_scsi | | fcp_send_scsi | |
181 181 * | | | | |
182 182 * | INQUIRY | | INQUIRY PAGE83 | |
183 183 * | (To each LUN) | +-----------------+ |
184 184 * +-----------------+ |
185 185 * |
186 186 * v
187 187 * +------------------------+
188 188 * | fcp_call_finish_init |
189 189 * +------------------------+
190 190 * |
191 191 * v
192 192 * +-----------------------------+
193 193 * | fcp_call_finish_init_held |
194 194 * +-----------------------------+
195 195 * |
196 196 * |
197 197 * All LUNs scanned /-\
198 198 * _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ __ / \
199 199 * | \ /
200 200 * | \-/
201 201 * v |
202 202 * +------------------+ |
203 203 * | fcp_finish_tgt | |
204 204 * +------------------+ |
205 205 * | Target Not Offline and |
206 206 * Target Not Offline and | not marked and tgt_node_state |
207 207 * marked /-\ not FCP_TGT_NODE_ON_DEMAND |
208 208 * _ _ _ _ _ _ / \_ _ _ _ _ _ _ _ |
209 209 * | \ / | |
210 210 * | \-/ | |
211 211 * v v |
212 212 * +----------------------------+ +-------------------+ |
213 213 * | fcp_offline_target | | fcp_create_luns | |
214 214 * | | +-------------------+ |
215 215 * | A structure fcp_tgt_elem | | |
216 216 * | is created and queued in | v |
217 217 * | the FCP port list | +-------------------+ |
218 218 * | port_offline_tgts. It | | fcp_pass_to_hp | |
219 219 * | will be unqueued by the | | | |
220 220 * | watchdog timer. | | Called for each | |
221 221 * +----------------------------+ | LUN. Dispatches | |
222 222 * | | fcp_hp_task | |
223 223 * | +-------------------+ |
224 224 * | | |
225 225 * | | |
226 226 * | | |
227 227 * | +---------------->|
228 228 * | |
229 229 * +---------------------------------------------->|
230 230 * |
231 231 * |
232 232 * All the targets (devices) have been scanned /-\
233 233 * _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ / \
234 234 * | \ /
235 235 * | \-/
236 236 * +-------------------------------------+ |
237 237 * | fcp_finish_init | |
238 238 * | | |
239 239 * | Signal broadcasts the condition | |
240 240 * | variable port_config_cv of the FCP | |
241 241 * | port. One potential code sequence | |
242 242 * | waiting on the condition variable | |
243 243 * | the code sequence handling | |
244 244 * | BUS_CONFIG_ALL and BUS_CONFIG_DRIVER| |
245 245 * | The other is in the function | |
246 246 * | fcp_reconfig_wait which is called | |
247 247 * | in the transmit path preventing IOs | |
248 248 * | from going through till the disco- | |
249 249 * | very process is over. | |
250 250 * +-------------------------------------+ |
251 251 * | |
252 252 * | |
253 253 * +--------------------------------->|
254 254 * |
255 255 * v
256 256 * Return
257 257 *
258 258 * ............................................................................
259 259 *
260 260 * STEP 4: The hot plug task is called (for each fcp_hp_elem).
261 261 *
262 262 *
263 263 * +-------------------------+
264 264 * | fcp_hp_task |
265 265 * +-------------------------+
266 266 * |
267 267 * |
268 268 * v
269 269 * +-------------------------+
270 270 * | fcp_trigger_lun |
271 271 * +-------------------------+
272 272 * |
273 273 * |
274 274 * v
275 275 * Bring offline /-\ Bring online
276 276 * _ _ _ _ _ _ _ _ _/ \_ _ _ _ _ _ _ _ _ _
277 277 * | \ / |
278 278 * | \-/ |
279 279 * v v
280 280 * +---------------------+ +-----------------------+
281 281 * | fcp_offline_child | | fcp_get_cip |
282 282 * +---------------------+ | |
283 283 * | Creates a dev_info_t |
284 284 * | or a mdi_pathinfo_t |
285 285 * | depending on whether |
286 286 * | mpxio is on or off. |
287 287 * +-----------------------+
288 288 * |
289 289 * |
290 290 * v
291 291 * +-----------------------+
292 292 * | fcp_online_child |
293 293 * | |
294 294 * | Set device online |
295 295 * | using NDI or MDI. |
296 296 * +-----------------------+
297 297 *
298 298 * ............................................................................
299 299 *
300 300 * STEP 5: The watchdog timer expires. The watch dog timer does much more that
301 301 * what is described here. We only show the target offline path.
302 302 *
303 303 *
304 304 * +--------------------------+
305 305 * | fcp_watch |
306 306 * +--------------------------+
307 307 * |
308 308 * |
309 309 * v
310 310 * +--------------------------+
311 311 * | fcp_scan_offline_tgts |
312 312 * +--------------------------+
313 313 * |
314 314 * |
315 315 * v
316 316 * +--------------------------+
317 317 * | fcp_offline_target_now |
318 318 * +--------------------------+
319 319 * |
320 320 * |
321 321 * v
322 322 * +--------------------------+
323 323 * | fcp_offline_tgt_luns |
324 324 * +--------------------------+
325 325 * |
326 326 * |
327 327 * v
328 328 * +--------------------------+
329 329 * | fcp_offline_lun |
330 330 * +--------------------------+
331 331 * |
332 332 * |
333 333 * v
334 334 * +----------------------------------+
335 335 * | fcp_offline_lun_now |
336 336 * | |
337 337 * | A request (or two if mpxio) is |
338 338 * | sent to the hot plug task using |
339 339 * | a fcp_hp_elem structure. |
340 340 * +----------------------------------+
341 341 */
342 342
343 343 /*
344 344 * Functions registered with DDI framework
345 345 */
346 346 static int fcp_attach(dev_info_t *devi, ddi_attach_cmd_t cmd);
347 347 static int fcp_detach(dev_info_t *devi, ddi_detach_cmd_t cmd);
348 348 static int fcp_open(dev_t *devp, int flag, int otype, cred_t *credp);
349 349 static int fcp_close(dev_t dev, int flag, int otype, cred_t *credp);
350 350 static int fcp_ioctl(dev_t dev, int cmd, intptr_t data, int mode,
351 351 cred_t *credp, int *rval);
352 352
353 353 /*
354 354 * Functions registered with FC Transport framework
355 355 */
356 356 static int fcp_port_attach(opaque_t ulph, fc_ulp_port_info_t *pinfo,
357 357 fc_attach_cmd_t cmd, uint32_t s_id);
358 358 static int fcp_port_detach(opaque_t ulph, fc_ulp_port_info_t *info,
359 359 fc_detach_cmd_t cmd);
360 360 static int fcp_port_ioctl(opaque_t ulph, opaque_t port_handle, dev_t dev,
361 361 int cmd, intptr_t data, int mode, cred_t *credp, int *rval,
362 362 uint32_t claimed);
363 363 static int fcp_els_callback(opaque_t ulph, opaque_t port_handle,
364 364 fc_unsol_buf_t *buf, uint32_t claimed);
365 365 static int fcp_data_callback(opaque_t ulph, opaque_t port_handle,
366 366 fc_unsol_buf_t *buf, uint32_t claimed);
367 367 static void fcp_statec_callback(opaque_t ulph, opaque_t port_handle,
368 368 uint32_t port_state, uint32_t port_top, fc_portmap_t *devlist,
369 369 uint32_t dev_cnt, uint32_t port_sid);
370 370
371 371 /*
372 372 * Functions registered with SCSA framework
373 373 */
374 374 static int fcp_phys_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
375 375 scsi_hba_tran_t *hba_tran, struct scsi_device *sd);
376 376 static int fcp_scsi_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
377 377 scsi_hba_tran_t *hba_tran, struct scsi_device *sd);
378 378 static void fcp_scsi_tgt_free(dev_info_t *hba_dip, dev_info_t *tgt_dip,
379 379 scsi_hba_tran_t *hba_tran, struct scsi_device *sd);
380 380 static int fcp_scsi_start(struct scsi_address *ap, struct scsi_pkt *pkt);
381 381 static int fcp_scsi_abort(struct scsi_address *ap, struct scsi_pkt *pkt);
382 382 static int fcp_scsi_reset(struct scsi_address *ap, int level);
383 383 static int fcp_scsi_getcap(struct scsi_address *ap, char *cap, int whom);
384 384 static int fcp_scsi_setcap(struct scsi_address *ap, char *cap, int value,
385 385 int whom);
386 386 static void fcp_pkt_teardown(struct scsi_pkt *pkt);
387 387 static int fcp_scsi_reset_notify(struct scsi_address *ap, int flag,
388 388 void (*callback)(caddr_t), caddr_t arg);
389 389 static int fcp_scsi_bus_get_eventcookie(dev_info_t *dip, dev_info_t *rdip,
390 390 char *name, ddi_eventcookie_t *event_cookiep);
391 391 static int fcp_scsi_bus_add_eventcall(dev_info_t *dip, dev_info_t *rdip,
392 392 ddi_eventcookie_t eventid, void (*callback)(), void *arg,
393 393 ddi_callback_id_t *cb_id);
394 394 static int fcp_scsi_bus_remove_eventcall(dev_info_t *devi,
395 395 ddi_callback_id_t cb_id);
396 396 static int fcp_scsi_bus_post_event(dev_info_t *dip, dev_info_t *rdip,
397 397 ddi_eventcookie_t eventid, void *impldata);
398 398 static int fcp_scsi_bus_config(dev_info_t *parent, uint_t flag,
399 399 ddi_bus_config_op_t op, void *arg, dev_info_t **childp);
400 400 static int fcp_scsi_bus_unconfig(dev_info_t *parent, uint_t flag,
401 401 ddi_bus_config_op_t op, void *arg);
402 402
403 403 /*
404 404 * Internal functions
405 405 */
406 406 static int fcp_setup_device_data_ioctl(int cmd, struct fcp_ioctl *data,
407 407 int mode, int *rval);
408 408
409 409 static int fcp_setup_scsi_ioctl(struct fcp_scsi_cmd *u_fscsi,
410 410 int mode, int *rval);
411 411 static int fcp_copyin_scsi_cmd(caddr_t base_addr,
412 412 struct fcp_scsi_cmd *fscsi, int mode);
413 413 static int fcp_copyout_scsi_cmd(struct fcp_scsi_cmd *fscsi,
414 414 caddr_t base_addr, int mode);
415 415 static int fcp_send_scsi_ioctl(struct fcp_scsi_cmd *fscsi);
416 416
417 417 static struct fcp_tgt *fcp_port_create_tgt(struct fcp_port *pptr,
418 418 la_wwn_t *pwwn, int *ret_val, int *fc_status, int *fc_pkt_state,
419 419 int *fc_pkt_reason, int *fc_pkt_action);
420 420 static int fcp_tgt_send_plogi(struct fcp_tgt *ptgt, int *fc_status,
421 421 int *fc_pkt_state, int *fc_pkt_reason, int *fc_pkt_action);
422 422 static int fcp_tgt_send_prli(struct fcp_tgt *ptgt, int *fc_status,
423 423 int *fc_pkt_state, int *fc_pkt_reason, int *fc_pkt_action);
424 424 static void fcp_ipkt_sema_init(struct fcp_ipkt *icmd);
425 425 static int fcp_ipkt_sema_wait(struct fcp_ipkt *icmd);
426 426 static void fcp_ipkt_sema_callback(struct fc_packet *fpkt);
427 427 static void fcp_ipkt_sema_cleanup(struct fcp_ipkt *icmd);
428 428
429 429 static void fcp_handle_devices(struct fcp_port *pptr,
430 430 fc_portmap_t devlist[], uint32_t dev_cnt, int link_cnt,
431 431 fcp_map_tag_t *map_tag, int cause);
432 432 static int fcp_handle_mapflags(struct fcp_port *pptr,
433 433 struct fcp_tgt *ptgt, fc_portmap_t *map_entry, int link_cnt,
434 434 int tgt_cnt, int cause);
435 435 static int fcp_handle_reportlun_changed(struct fcp_tgt *ptgt, int cause);
436 436 static int fcp_send_els(struct fcp_port *pptr, struct fcp_tgt *ptgt,
437 437 struct fcp_ipkt *icmd, uchar_t opcode, int lcount, int tcount, int cause);
438 438 static void fcp_update_state(struct fcp_port *pptr, uint32_t state,
439 439 int cause);
440 440 static void fcp_update_tgt_state(struct fcp_tgt *ptgt, int flag,
441 441 uint32_t state);
442 442 static struct fcp_port *fcp_get_port(opaque_t port_handle);
443 443 static void fcp_unsol_callback(fc_packet_t *fpkt);
444 444 static void fcp_unsol_resp_init(fc_packet_t *pkt, fc_unsol_buf_t *buf,
445 445 uchar_t r_ctl, uchar_t type);
446 446 static int fcp_unsol_prli(struct fcp_port *pptr, fc_unsol_buf_t *buf);
447 447 static struct fcp_ipkt *fcp_icmd_alloc(struct fcp_port *pptr,
448 448 struct fcp_tgt *ptgt, int cmd_len, int resp_len, int data_len,
449 449 int nodma, int lcount, int tcount, int cause, uint32_t rscn_count);
450 450 static void fcp_icmd_free(struct fcp_port *pptr, struct fcp_ipkt *icmd);
451 451 static int fcp_alloc_dma(struct fcp_port *pptr, struct fcp_ipkt *icmd,
452 452 int nodma, int flags);
453 453 static void fcp_free_dma(struct fcp_port *pptr, struct fcp_ipkt *icmd);
454 454 static struct fcp_tgt *fcp_lookup_target(struct fcp_port *pptr,
455 455 uchar_t *wwn);
456 456 static struct fcp_tgt *fcp_get_target_by_did(struct fcp_port *pptr,
457 457 uint32_t d_id);
458 458 static void fcp_icmd_callback(fc_packet_t *fpkt);
459 459 static int fcp_send_scsi(struct fcp_lun *plun, uchar_t opcode,
460 460 int len, int lcount, int tcount, int cause, uint32_t rscn_count);
461 461 static int fcp_check_reportlun(struct fcp_rsp *rsp, fc_packet_t *fpkt);
462 462 static void fcp_scsi_callback(fc_packet_t *fpkt);
463 463 static void fcp_retry_scsi_cmd(fc_packet_t *fpkt);
464 464 static void fcp_handle_inquiry(fc_packet_t *fpkt, struct fcp_ipkt *icmd);
465 465 static void fcp_handle_reportlun(fc_packet_t *fpkt, struct fcp_ipkt *icmd);
466 466 static struct fcp_lun *fcp_get_lun(struct fcp_tgt *ptgt,
467 467 uint16_t lun_num);
468 468 static int fcp_finish_tgt(struct fcp_port *pptr, struct fcp_tgt *ptgt,
469 469 int link_cnt, int tgt_cnt, int cause);
470 470 static void fcp_finish_init(struct fcp_port *pptr);
471 471 static void fcp_create_luns(struct fcp_tgt *ptgt, int link_cnt,
472 472 int tgt_cnt, int cause);
473 473 static int fcp_trigger_lun(struct fcp_lun *plun, child_info_t *cip,
474 474 int old_mpxio, int online, int link_cnt, int tgt_cnt, int flags);
475 475 static int fcp_offline_target(struct fcp_port *pptr, struct fcp_tgt *ptgt,
476 476 int link_cnt, int tgt_cnt, int nowait, int flags);
477 477 static void fcp_offline_target_now(struct fcp_port *pptr,
478 478 struct fcp_tgt *ptgt, int link_cnt, int tgt_cnt, int flags);
479 479 static void fcp_offline_tgt_luns(struct fcp_tgt *ptgt, int link_cnt,
480 480 int tgt_cnt, int flags);
481 481 static void fcp_offline_lun(struct fcp_lun *plun, int link_cnt, int tgt_cnt,
482 482 int nowait, int flags);
483 483 static void fcp_prepare_offline_lun(struct fcp_lun *plun, int link_cnt,
484 484 int tgt_cnt);
485 485 static void fcp_offline_lun_now(struct fcp_lun *plun, int link_cnt,
486 486 int tgt_cnt, int flags);
487 487 static void fcp_scan_offline_luns(struct fcp_port *pptr);
488 488 static void fcp_scan_offline_tgts(struct fcp_port *pptr);
489 489 static void fcp_update_offline_flags(struct fcp_lun *plun);
490 490 static struct fcp_pkt *fcp_scan_commands(struct fcp_lun *plun);
491 491 static void fcp_abort_commands(struct fcp_pkt *head, struct
492 492 fcp_port *pptr);
493 493 static void fcp_cmd_callback(fc_packet_t *fpkt);
494 494 static void fcp_complete_pkt(fc_packet_t *fpkt);
495 495 static int fcp_validate_fcp_response(struct fcp_rsp *rsp,
496 496 struct fcp_port *pptr);
497 497 static int fcp_device_changed(struct fcp_port *pptr, struct fcp_tgt *ptgt,
498 498 fc_portmap_t *map_entry, int link_cnt, int tgt_cnt, int cause);
499 499 static struct fcp_lun *fcp_alloc_lun(struct fcp_tgt *ptgt);
500 500 static void fcp_dealloc_lun(struct fcp_lun *plun);
501 501 static struct fcp_tgt *fcp_alloc_tgt(struct fcp_port *pptr,
502 502 fc_portmap_t *map_entry, int link_cnt);
503 503 static void fcp_dealloc_tgt(struct fcp_tgt *ptgt);
504 504 static void fcp_queue_ipkt(struct fcp_port *pptr, fc_packet_t *fpkt);
505 505 static int fcp_transport(opaque_t port_handle, fc_packet_t *fpkt,
506 506 int internal);
507 507 static void fcp_log(int level, dev_info_t *dip, const char *fmt, ...);
508 508 static int fcp_handle_port_attach(opaque_t ulph, fc_ulp_port_info_t *pinfo,
509 509 uint32_t s_id, int instance);
510 510 static int fcp_handle_port_detach(struct fcp_port *pptr, int flag,
511 511 int instance);
512 512 static void fcp_cleanup_port(struct fcp_port *pptr, int instance);
513 513 static int fcp_kmem_cache_constructor(struct scsi_pkt *, scsi_hba_tran_t *,
514 514 int);
515 515 static void fcp_kmem_cache_destructor(struct scsi_pkt *, scsi_hba_tran_t *);
516 516 static int fcp_pkt_setup(struct scsi_pkt *, int (*)(), caddr_t);
517 517 static int fcp_alloc_cmd_resp(struct fcp_port *pptr, fc_packet_t *fpkt,
518 518 int flags);
519 519 static void fcp_free_cmd_resp(struct fcp_port *pptr, fc_packet_t *fpkt);
520 520 static int fcp_reset_target(struct scsi_address *ap, int level);
521 521 static int fcp_commoncap(struct scsi_address *ap, char *cap,
522 522 int val, int tgtonly, int doset);
523 523 static int fcp_scsi_get_name(struct scsi_device *sd, char *name, int len);
524 524 static int fcp_scsi_get_bus_addr(struct scsi_device *sd, char *name, int len);
525 525 static int fcp_linkreset(struct fcp_port *pptr, struct scsi_address *ap,
526 526 int sleep);
527 527 static int fcp_handle_port_resume(opaque_t ulph, fc_ulp_port_info_t *pinfo,
528 528 uint32_t s_id, fc_attach_cmd_t cmd, int instance);
529 529 static void fcp_cp_pinfo(struct fcp_port *pptr, fc_ulp_port_info_t *pinfo);
530 530 static void fcp_process_elem(struct fcp_hp_elem *elem, int result);
531 531 static child_info_t *fcp_get_cip(struct fcp_lun *plun, child_info_t *cip,
532 532 int lcount, int tcount);
533 533 static int fcp_is_dip_present(struct fcp_lun *plun, dev_info_t *cdip);
534 534 static int fcp_is_child_present(struct fcp_lun *plun, child_info_t *cip);
535 535 static dev_info_t *fcp_create_dip(struct fcp_lun *plun, int link_cnt,
536 536 int tgt_cnt);
537 537 static dev_info_t *fcp_find_existing_dip(struct fcp_lun *plun,
538 538 dev_info_t *pdip, caddr_t name);
539 539 static int fcp_online_child(struct fcp_lun *plun, child_info_t *cip,
540 540 int lcount, int tcount, int flags, int *circ);
541 541 static int fcp_offline_child(struct fcp_lun *plun, child_info_t *cip,
542 542 int lcount, int tcount, int flags, int *circ);
543 543 static void fcp_remove_child(struct fcp_lun *plun);
544 544 static void fcp_watch(void *arg);
545 545 static void fcp_check_reset_delay(struct fcp_port *pptr);
546 546 static void fcp_abort_all(struct fcp_port *pptr, struct fcp_tgt *ttgt,
547 547 struct fcp_lun *rlun, int tgt_cnt);
548 548 struct fcp_port *fcp_soft_state_unlink(struct fcp_port *pptr);
549 549 static struct fcp_lun *fcp_lookup_lun(struct fcp_port *pptr,
550 550 uchar_t *wwn, uint16_t lun);
551 551 static void fcp_prepare_pkt(struct fcp_port *pptr, struct fcp_pkt *cmd,
552 552 struct fcp_lun *plun);
553 553 static void fcp_post_callback(struct fcp_pkt *cmd);
554 554 static int fcp_dopoll(struct fcp_port *pptr, struct fcp_pkt *cmd);
555 555 static struct fcp_port *fcp_dip2port(dev_info_t *dip);
556 556 struct fcp_lun *fcp_get_lun_from_cip(struct fcp_port *pptr,
557 557 child_info_t *cip);
558 558 static int fcp_pass_to_hp_and_wait(struct fcp_port *pptr,
559 559 struct fcp_lun *plun, child_info_t *cip, int what, int link_cnt,
560 560 int tgt_cnt, int flags);
561 561 static struct fcp_hp_elem *fcp_pass_to_hp(struct fcp_port *pptr,
562 562 struct fcp_lun *plun, child_info_t *cip, int what, int link_cnt,
563 563 int tgt_cnt, int flags, int wait);
564 564 static void fcp_retransport_cmd(struct fcp_port *pptr,
565 565 struct fcp_pkt *cmd);
566 566 static void fcp_fail_cmd(struct fcp_pkt *cmd, uchar_t reason,
567 567 uint_t statistics);
568 568 static void fcp_queue_pkt(struct fcp_port *pptr, struct fcp_pkt *cmd);
569 569 static void fcp_update_targets(struct fcp_port *pptr,
570 570 fc_portmap_t *dev_list, uint32_t count, uint32_t state, int cause);
571 571 static int fcp_call_finish_init(struct fcp_port *pptr,
572 572 struct fcp_tgt *ptgt, int lcount, int tcount, int cause);
573 573 static int fcp_call_finish_init_held(struct fcp_port *pptr,
574 574 struct fcp_tgt *ptgt, int lcount, int tcount, int cause);
575 575 static void fcp_reconfigure_luns(void * tgt_handle);
576 576 static void fcp_free_targets(struct fcp_port *pptr);
577 577 static void fcp_free_target(struct fcp_tgt *ptgt);
578 578 static int fcp_is_retryable(struct fcp_ipkt *icmd);
579 579 static int fcp_create_on_demand(struct fcp_port *pptr, uchar_t *pwwn);
580 580 static void fcp_ascii_to_wwn(caddr_t string, uchar_t bytes[], unsigned int);
581 581 static void fcp_wwn_to_ascii(uchar_t bytes[], char *string);
582 582 static void fcp_print_error(fc_packet_t *fpkt);
583 583 static int fcp_handle_ipkt_errors(struct fcp_port *pptr,
584 584 struct fcp_tgt *ptgt, struct fcp_ipkt *icmd, int rval, caddr_t op);
585 585 static int fcp_outstanding_lun_cmds(struct fcp_tgt *ptgt);
586 586 static fc_portmap_t *fcp_construct_map(struct fcp_port *pptr,
587 587 uint32_t *dev_cnt);
588 588 static void fcp_offline_all(struct fcp_port *pptr, int lcount, int cause);
589 589 static int fcp_get_statec_count(struct fcp_ioctl *data, int mode, int *rval);
590 590 static int fcp_copyin_fcp_ioctl_data(struct fcp_ioctl *, int, int *,
591 591 struct fcp_ioctl *, struct fcp_port **);
592 592 static char *fcp_get_lun_path(struct fcp_lun *plun);
593 593 static int fcp_get_target_mappings(struct fcp_ioctl *data, int mode,
594 594 int *rval);
595 595 static int fcp_do_ns_registry(struct fcp_port *pptr, uint32_t s_id);
596 596 static void fcp_retry_ns_registry(struct fcp_port *pptr, uint32_t s_id);
597 597 static char *fcp_get_lun_path(struct fcp_lun *plun);
598 598 static int fcp_get_target_mappings(struct fcp_ioctl *data, int mode,
599 599 int *rval);
600 600 static void fcp_reconfig_wait(struct fcp_port *pptr);
601 601
602 602 /*
603 603 * New functions added for mpxio support
604 604 */
605 605 static int fcp_virt_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
606 606 scsi_hba_tran_t *hba_tran, struct scsi_device *sd);
607 607 static mdi_pathinfo_t *fcp_create_pip(struct fcp_lun *plun, int lcount,
608 608 int tcount);
609 609 static mdi_pathinfo_t *fcp_find_existing_pip(struct fcp_lun *plun,
610 610 dev_info_t *pdip);
611 611 static int fcp_is_pip_present(struct fcp_lun *plun, mdi_pathinfo_t *pip);
612 612 static void fcp_handle_page83(fc_packet_t *, struct fcp_ipkt *, int);
613 613 static void fcp_update_mpxio_path_verifybusy(struct fcp_port *pptr);
614 614 static int fcp_copy_guid_2_lun_block(struct fcp_lun *plun, char *guidp);
615 615 static int fcp_update_mpxio_path(struct fcp_lun *plun, child_info_t *cip,
616 616 int what);
617 617 static int fcp_is_reconfig_needed(struct fcp_tgt *ptgt,
618 618 fc_packet_t *fpkt);
619 619 static int fcp_symmetric_device_probe(struct fcp_lun *plun);
620 620
621 621 /*
622 622 * New functions added for lun masking support
623 623 */
624 624 static void fcp_read_blacklist(dev_info_t *dip,
625 625 struct fcp_black_list_entry **pplun_blacklist);
626 626 static void fcp_mask_pwwn_lun(char *curr_pwwn, char *curr_lun,
627 627 struct fcp_black_list_entry **pplun_blacklist);
628 628 static void fcp_add_one_mask(char *curr_pwwn, uint32_t lun_id,
629 629 struct fcp_black_list_entry **pplun_blacklist);
630 630 static int fcp_should_mask(la_wwn_t *wwn, uint32_t lun_id);
631 631 static void fcp_cleanup_blacklist(struct fcp_black_list_entry **lun_blacklist);
632 632
633 633 /*
634 634 * New functions to support software FCA (like fcoei)
635 635 */
636 636 static struct scsi_pkt *fcp_pseudo_init_pkt(
637 637 struct scsi_address *ap, struct scsi_pkt *pkt,
638 638 struct buf *bp, int cmdlen, int statuslen,
639 639 int tgtlen, int flags, int (*callback)(), caddr_t arg);
640 640 static void fcp_pseudo_destroy_pkt(
641 641 struct scsi_address *ap, struct scsi_pkt *pkt);
642 642 static void fcp_pseudo_sync_pkt(
643 643 struct scsi_address *ap, struct scsi_pkt *pkt);
644 644 static int fcp_pseudo_start(struct scsi_address *ap, struct scsi_pkt *pkt);
645 645 static void fcp_pseudo_dmafree(
646 646 struct scsi_address *ap, struct scsi_pkt *pkt);
647 647
648 648 extern struct mod_ops mod_driverops;
649 649 /*
650 650 * This variable is defined in modctl.c and set to '1' after the root driver
651 651 * and fs are loaded. It serves as an indication that the root filesystem can
652 652 * be used.
653 653 */
654 654 extern int modrootloaded;
655 655 /*
656 656 * This table contains strings associated with the SCSI sense key codes. It
657 657 * is used by FCP to print a clear explanation of the code returned in the
658 658 * sense information by a device.
659 659 */
660 660 extern char *sense_keys[];
661 661 /*
662 662 * This device is created by the SCSI pseudo nexus driver (SCSI vHCI). It is
663 663 * under this device that the paths to a physical device are created when
664 664 * MPxIO is used.
665 665 */
666 666 extern dev_info_t *scsi_vhci_dip;
667 667
668 668 /*
669 669 * Report lun processing
670 670 */
671 671 #define FCP_LUN_ADDRESSING 0x80
672 672 #define FCP_PD_ADDRESSING 0x00
673 673 #define FCP_VOLUME_ADDRESSING 0x40
674 674
675 675 #define FCP_SVE_THROTTLE 0x28 /* Vicom */
676 676 #define MAX_INT_DMA 0x7fffffff
677 677 /*
678 678 * Property definitions
679 679 */
680 680 #define NODE_WWN_PROP (char *)fcp_node_wwn_prop
681 681 #define PORT_WWN_PROP (char *)fcp_port_wwn_prop
682 682 #define TARGET_PROP (char *)fcp_target_prop
683 683 #define LUN_PROP (char *)fcp_lun_prop
684 684 #define SAM_LUN_PROP (char *)fcp_sam_lun_prop
685 685 #define CONF_WWN_PROP (char *)fcp_conf_wwn_prop
686 686 #define OBP_BOOT_WWN (char *)fcp_obp_boot_wwn
687 687 #define MANUAL_CFG_ONLY (char *)fcp_manual_config_only
688 688 #define INIT_PORT_PROP (char *)fcp_init_port_prop
689 689 #define TGT_PORT_PROP (char *)fcp_tgt_port_prop
690 690 #define LUN_BLACKLIST_PROP (char *)fcp_lun_blacklist_prop
691 691 /*
692 692 * Short hand macros.
693 693 */
694 694 #define LUN_PORT (plun->lun_tgt->tgt_port)
695 695 #define LUN_TGT (plun->lun_tgt)
696 696
697 697 /*
698 698 * Driver private macros
699 699 */
700 700 #define FCP_ATOB(x) (((x) >= '0' && (x) <= '9') ? ((x) - '0') : \
701 701 ((x) >= 'a' && (x) <= 'f') ? \
702 702 ((x) - 'a' + 10) : ((x) - 'A' + 10))
703 703
704 704 #define FCP_MAX(a, b) ((a) > (b) ? (a) : (b))
705 705
706 706 #define FCP_N_NDI_EVENTS \
707 707 (sizeof (fcp_ndi_event_defs) / sizeof (ndi_event_definition_t))
708 708
709 709 #define FCP_LINK_STATE_CHANGED(p, c) \
710 710 ((p)->port_link_cnt != (c)->ipkt_link_cnt)
711 711
712 712 #define FCP_TGT_STATE_CHANGED(t, c) \
713 713 ((t)->tgt_change_cnt != (c)->ipkt_change_cnt)
714 714
715 715 #define FCP_STATE_CHANGED(p, t, c) \
716 716 (FCP_TGT_STATE_CHANGED(t, c))
717 717
718 718 #define FCP_MUST_RETRY(fpkt) \
719 719 ((fpkt)->pkt_state == FC_PKT_LOCAL_BSY || \
720 720 (fpkt)->pkt_state == FC_PKT_LOCAL_RJT || \
721 721 (fpkt)->pkt_state == FC_PKT_TRAN_BSY || \
722 722 (fpkt)->pkt_state == FC_PKT_ELS_IN_PROGRESS || \
723 723 (fpkt)->pkt_state == FC_PKT_NPORT_BSY || \
724 724 (fpkt)->pkt_state == FC_PKT_FABRIC_BSY || \
725 725 (fpkt)->pkt_state == FC_PKT_PORT_OFFLINE || \
726 726 (fpkt)->pkt_reason == FC_REASON_OFFLINE)
727 727
728 728 #define FCP_SENSE_REPORTLUN_CHANGED(es) \
729 729 ((es)->es_key == KEY_UNIT_ATTENTION && \
730 730 (es)->es_add_code == 0x3f && \
731 731 (es)->es_qual_code == 0x0e)
732 732
733 733 #define FCP_SENSE_NO_LUN(es) \
734 734 ((es)->es_key == KEY_ILLEGAL_REQUEST && \
735 735 (es)->es_add_code == 0x25 && \
736 736 (es)->es_qual_code == 0x0)
737 737
738 738 #define FCP_VERSION "20091208-1.192"
739 739 #define FCP_NAME_VERSION "SunFC FCP v" FCP_VERSION
740 740
741 741 #define FCP_NUM_ELEMENTS(array) \
742 742 (sizeof (array) / sizeof ((array)[0]))
743 743
744 744 /*
745 745 * Debugging, Error reporting, and tracing
746 746 */
747 747 #define FCP_LOG_SIZE 1024 * 1024
748 748
749 749 #define FCP_LEVEL_1 0x00001 /* attach/detach PM CPR */
750 750 #define FCP_LEVEL_2 0x00002 /* failures/Invalid data */
751 751 #define FCP_LEVEL_3 0x00004 /* state change, discovery */
752 752 #define FCP_LEVEL_4 0x00008 /* ULP messages */
753 753 #define FCP_LEVEL_5 0x00010 /* ELS/SCSI cmds */
754 754 #define FCP_LEVEL_6 0x00020 /* Transport failures */
755 755 #define FCP_LEVEL_7 0x00040
756 756 #define FCP_LEVEL_8 0x00080 /* I/O tracing */
757 757 #define FCP_LEVEL_9 0x00100 /* I/O tracing */
758 758
759 759
760 760
761 761 /*
762 762 * Log contents to system messages file
763 763 */
764 764 #define FCP_MSG_LEVEL_1 (FCP_LEVEL_1 | FC_TRACE_LOG_MSG)
765 765 #define FCP_MSG_LEVEL_2 (FCP_LEVEL_2 | FC_TRACE_LOG_MSG)
766 766 #define FCP_MSG_LEVEL_3 (FCP_LEVEL_3 | FC_TRACE_LOG_MSG)
767 767 #define FCP_MSG_LEVEL_4 (FCP_LEVEL_4 | FC_TRACE_LOG_MSG)
768 768 #define FCP_MSG_LEVEL_5 (FCP_LEVEL_5 | FC_TRACE_LOG_MSG)
769 769 #define FCP_MSG_LEVEL_6 (FCP_LEVEL_6 | FC_TRACE_LOG_MSG)
770 770 #define FCP_MSG_LEVEL_7 (FCP_LEVEL_7 | FC_TRACE_LOG_MSG)
771 771 #define FCP_MSG_LEVEL_8 (FCP_LEVEL_8 | FC_TRACE_LOG_MSG)
772 772 #define FCP_MSG_LEVEL_9 (FCP_LEVEL_9 | FC_TRACE_LOG_MSG)
773 773
774 774
775 775 /*
776 776 * Log contents to trace buffer
777 777 */
778 778 #define FCP_BUF_LEVEL_1 (FCP_LEVEL_1 | FC_TRACE_LOG_BUF)
779 779 #define FCP_BUF_LEVEL_2 (FCP_LEVEL_2 | FC_TRACE_LOG_BUF)
780 780 #define FCP_BUF_LEVEL_3 (FCP_LEVEL_3 | FC_TRACE_LOG_BUF)
781 781 #define FCP_BUF_LEVEL_4 (FCP_LEVEL_4 | FC_TRACE_LOG_BUF)
782 782 #define FCP_BUF_LEVEL_5 (FCP_LEVEL_5 | FC_TRACE_LOG_BUF)
783 783 #define FCP_BUF_LEVEL_6 (FCP_LEVEL_6 | FC_TRACE_LOG_BUF)
784 784 #define FCP_BUF_LEVEL_7 (FCP_LEVEL_7 | FC_TRACE_LOG_BUF)
785 785 #define FCP_BUF_LEVEL_8 (FCP_LEVEL_8 | FC_TRACE_LOG_BUF)
786 786 #define FCP_BUF_LEVEL_9 (FCP_LEVEL_9 | FC_TRACE_LOG_BUF)
787 787
788 788
789 789 /*
790 790 * Log contents to both system messages file and trace buffer
791 791 */
792 792 #define FCP_MSG_BUF_LEVEL_1 (FCP_LEVEL_1 | FC_TRACE_LOG_BUF | \
793 793 FC_TRACE_LOG_MSG)
794 794 #define FCP_MSG_BUF_LEVEL_2 (FCP_LEVEL_2 | FC_TRACE_LOG_BUF | \
795 795 FC_TRACE_LOG_MSG)
796 796 #define FCP_MSG_BUF_LEVEL_3 (FCP_LEVEL_3 | FC_TRACE_LOG_BUF | \
797 797 FC_TRACE_LOG_MSG)
798 798 #define FCP_MSG_BUF_LEVEL_4 (FCP_LEVEL_4 | FC_TRACE_LOG_BUF | \
799 799 FC_TRACE_LOG_MSG)
800 800 #define FCP_MSG_BUF_LEVEL_5 (FCP_LEVEL_5 | FC_TRACE_LOG_BUF | \
801 801 FC_TRACE_LOG_MSG)
802 802 #define FCP_MSG_BUF_LEVEL_6 (FCP_LEVEL_6 | FC_TRACE_LOG_BUF | \
803 803 FC_TRACE_LOG_MSG)
804 804 #define FCP_MSG_BUF_LEVEL_7 (FCP_LEVEL_7 | FC_TRACE_LOG_BUF | \
805 805 FC_TRACE_LOG_MSG)
806 806 #define FCP_MSG_BUF_LEVEL_8 (FCP_LEVEL_8 | FC_TRACE_LOG_BUF | \
807 807 FC_TRACE_LOG_MSG)
808 808 #define FCP_MSG_BUF_LEVEL_9 (FCP_LEVEL_9 | FC_TRACE_LOG_BUF | \
809 809 FC_TRACE_LOG_MSG)
810 810 #ifdef DEBUG
811 811 #define FCP_DTRACE fc_trace_debug
812 812 #else
813 813 #define FCP_DTRACE
814 814 #endif
815 815
816 816 #define FCP_TRACE fc_trace_debug
817 817
818 818 static struct cb_ops fcp_cb_ops = {
819 819 fcp_open, /* open */
820 820 fcp_close, /* close */
821 821 nodev, /* strategy */
822 822 nodev, /* print */
823 823 nodev, /* dump */
824 824 nodev, /* read */
825 825 nodev, /* write */
826 826 fcp_ioctl, /* ioctl */
827 827 nodev, /* devmap */
828 828 nodev, /* mmap */
829 829 nodev, /* segmap */
830 830 nochpoll, /* chpoll */
831 831 ddi_prop_op, /* cb_prop_op */
832 832 0, /* streamtab */
833 833 D_NEW | D_MP | D_HOTPLUG, /* cb_flag */
834 834 CB_REV, /* rev */
835 835 nodev, /* aread */
836 836 nodev /* awrite */
837 837 };
838 838
839 839
840 840 static struct dev_ops fcp_ops = {
841 841 DEVO_REV,
842 842 0,
843 843 ddi_getinfo_1to1,
844 844 nulldev, /* identify */
845 845 nulldev, /* probe */
846 846 fcp_attach, /* attach and detach are mandatory */
847 847 fcp_detach,
848 848 nodev, /* reset */
849 849 &fcp_cb_ops, /* cb_ops */
850 850 NULL, /* bus_ops */
851 851 NULL, /* power */
852 852 };
853 853
854 854
855 855 char *fcp_version = FCP_NAME_VERSION;
856 856
857 857 static struct modldrv modldrv = {
858 858 &mod_driverops,
859 859 FCP_NAME_VERSION,
860 860 &fcp_ops
861 861 };
862 862
863 863
864 864 static struct modlinkage modlinkage = {
865 865 MODREV_1,
866 866 &modldrv,
867 867 NULL
868 868 };
869 869
870 870
871 871 static fc_ulp_modinfo_t fcp_modinfo = {
872 872 &fcp_modinfo, /* ulp_handle */
873 873 FCTL_ULP_MODREV_4, /* ulp_rev */
874 874 FC4_SCSI_FCP, /* ulp_type */
875 875 "fcp", /* ulp_name */
876 876 FCP_STATEC_MASK, /* ulp_statec_mask */
877 877 fcp_port_attach, /* ulp_port_attach */
878 878 fcp_port_detach, /* ulp_port_detach */
879 879 fcp_port_ioctl, /* ulp_port_ioctl */
880 880 fcp_els_callback, /* ulp_els_callback */
881 881 fcp_data_callback, /* ulp_data_callback */
882 882 fcp_statec_callback /* ulp_statec_callback */
883 883 };
884 884
885 885 #ifdef DEBUG
886 886 #define FCP_TRACE_DEFAULT (FC_TRACE_LOG_MASK | FCP_LEVEL_1 | \
887 887 FCP_LEVEL_2 | FCP_LEVEL_3 | \
888 888 FCP_LEVEL_4 | FCP_LEVEL_5 | \
889 889 FCP_LEVEL_6 | FCP_LEVEL_7)
890 890 #else
891 891 #define FCP_TRACE_DEFAULT (FC_TRACE_LOG_MASK | FCP_LEVEL_1 | \
892 892 FCP_LEVEL_2 | FCP_LEVEL_3 | \
893 893 FCP_LEVEL_4 | FCP_LEVEL_5 | \
894 894 FCP_LEVEL_6 | FCP_LEVEL_7)
895 895 #endif
896 896
897 897 /* FCP global variables */
898 898 int fcp_bus_config_debug = 0;
899 899 static int fcp_log_size = FCP_LOG_SIZE;
900 900 static int fcp_trace = FCP_TRACE_DEFAULT;
901 901 static fc_trace_logq_t *fcp_logq = NULL;
902 902 static struct fcp_black_list_entry *fcp_lun_blacklist = NULL;
903 903 /*
904 904 * The auto-configuration is set by default. The only way of disabling it is
905 905 * through the property MANUAL_CFG_ONLY in the fcp.conf file.
906 906 */
907 907 static int fcp_enable_auto_configuration = 1;
908 908 static int fcp_max_bus_config_retries = 4;
909 909 static int fcp_lun_ready_retry = 300;
910 910 /*
911 911 * The value assigned to the following variable has changed several times due
912 912 * to a problem with the data underruns reporting of some firmware(s). The
913 913 * current value of 50 gives a timeout value of 25 seconds for a max number
914 914 * of 256 LUNs.
915 915 */
916 916 static int fcp_max_target_retries = 50;
917 917 /*
918 918 * Watchdog variables
919 919 * ------------------
920 920 *
921 921 * fcp_watchdog_init
922 922 *
923 923 * Indicates if the watchdog timer is running or not. This is actually
924 924 * a counter of the number of Fibre Channel ports that attached. When
925 925 * the first port attaches the watchdog is started. When the last port
926 926 * detaches the watchdog timer is stopped.
927 927 *
928 928 * fcp_watchdog_time
929 929 *
930 930 * This is the watchdog clock counter. It is incremented by
931 931 * fcp_watchdog_time each time the watchdog timer expires.
932 932 *
933 933 * fcp_watchdog_timeout
934 934 *
935 935 * Increment value of the variable fcp_watchdog_time as well as the
936 936 * the timeout value of the watchdog timer. The unit is 1 second. It
937 937 * is strange that this is not a #define but a variable since the code
938 938 * never changes this value. The reason why it can be said that the
939 939 * unit is 1 second is because the number of ticks for the watchdog
940 940 * timer is determined like this:
941 941 *
942 942 * fcp_watchdog_tick = fcp_watchdog_timeout *
943 943 * drv_usectohz(1000000);
944 944 *
945 945 * The value 1000000 is hard coded in the code.
946 946 *
947 947 * fcp_watchdog_tick
948 948 *
949 949 * Watchdog timer value in ticks.
950 950 */
951 951 static int fcp_watchdog_init = 0;
952 952 static int fcp_watchdog_time = 0;
953 953 static int fcp_watchdog_timeout = 1;
954 954 static int fcp_watchdog_tick;
955 955
956 956 /*
957 957 * fcp_offline_delay is a global variable to enable customisation of
958 958 * the timeout on link offlines or RSCNs. The default value is set
959 959 * to match FCP_OFFLINE_DELAY (20sec), which is 2*RA_TOV_els as
960 960 * specified in FCP4 Chapter 11 (see www.t10.org).
961 961 *
962 962 * The variable fcp_offline_delay is specified in SECONDS.
963 963 *
964 964 * If we made this a static var then the user would not be able to
965 965 * change it. This variable is set in fcp_attach().
966 966 */
967 967 unsigned int fcp_offline_delay = FCP_OFFLINE_DELAY;
968 968
969 969 static void *fcp_softstate = NULL; /* for soft state */
970 970 static uchar_t fcp_oflag = FCP_IDLE; /* open flag */
971 971 static kmutex_t fcp_global_mutex;
972 972 static kmutex_t fcp_ioctl_mutex;
973 973 static dev_info_t *fcp_global_dip = NULL;
974 974 static timeout_id_t fcp_watchdog_id;
975 975 const char *fcp_lun_prop = "lun";
976 976 const char *fcp_sam_lun_prop = "sam-lun";
977 977 const char *fcp_target_prop = "target";
978 978 /*
979 979 * NOTE: consumers of "node-wwn" property include stmsboot in ON
980 980 * consolidation.
981 981 */
982 982 const char *fcp_node_wwn_prop = "node-wwn";
983 983 const char *fcp_port_wwn_prop = "port-wwn";
984 984 const char *fcp_conf_wwn_prop = "fc-port-wwn";
985 985 const char *fcp_obp_boot_wwn = "fc-boot-dev-portwwn";
986 986 const char *fcp_manual_config_only = "manual_configuration_only";
987 987 const char *fcp_init_port_prop = "initiator-port";
988 988 const char *fcp_tgt_port_prop = "target-port";
989 989 const char *fcp_lun_blacklist_prop = "pwwn-lun-blacklist";
990 990
991 991 static struct fcp_port *fcp_port_head = NULL;
992 992 static ddi_eventcookie_t fcp_insert_eid;
993 993 static ddi_eventcookie_t fcp_remove_eid;
994 994
995 995 static ndi_event_definition_t fcp_ndi_event_defs[] = {
996 996 { FCP_EVENT_TAG_INSERT, FCAL_INSERT_EVENT, EPL_KERNEL },
997 997 { FCP_EVENT_TAG_REMOVE, FCAL_REMOVE_EVENT, EPL_INTERRUPT }
998 998 };
999 999
1000 1000 /*
1001 1001 * List of valid commands for the scsi_ioctl call
1002 1002 */
1003 1003 static uint8_t scsi_ioctl_list[] = {
1004 1004 SCMD_INQUIRY,
1005 1005 SCMD_REPORT_LUN,
1006 1006 SCMD_READ_CAPACITY
1007 1007 };
1008 1008
1009 1009 /*
1010 1010 * this is used to dummy up a report lun response for cases
1011 1011 * where the target doesn't support it
1012 1012 */
1013 1013 static uchar_t fcp_dummy_lun[] = {
1014 1014 0x00, /* MSB length (length = no of luns * 8) */
1015 1015 0x00,
1016 1016 0x00,
1017 1017 0x08, /* LSB length */
1018 1018 0x00, /* MSB reserved */
1019 1019 0x00,
1020 1020 0x00,
1021 1021 0x00, /* LSB reserved */
1022 1022 FCP_PD_ADDRESSING,
1023 1023 0x00, /* LUN is ZERO at the first level */
1024 1024 0x00,
1025 1025 0x00, /* second level is zero */
1026 1026 0x00,
1027 1027 0x00, /* third level is zero */
1028 1028 0x00,
1029 1029 0x00 /* fourth level is zero */
1030 1030 };
1031 1031
1032 1032 static uchar_t fcp_alpa_to_switch[] = {
1033 1033 0x00, 0x7d, 0x7c, 0x00, 0x7b, 0x00, 0x00, 0x00, 0x7a, 0x00,
1034 1034 0x00, 0x00, 0x00, 0x00, 0x00, 0x79, 0x78, 0x00, 0x00, 0x00,
1035 1035 0x00, 0x00, 0x00, 0x77, 0x76, 0x00, 0x00, 0x75, 0x00, 0x74,
1036 1036 0x73, 0x72, 0x00, 0x00, 0x00, 0x71, 0x00, 0x70, 0x6f, 0x6e,
1037 1037 0x00, 0x6d, 0x6c, 0x6b, 0x6a, 0x69, 0x68, 0x00, 0x00, 0x67,
1038 1038 0x66, 0x65, 0x64, 0x63, 0x62, 0x00, 0x00, 0x61, 0x60, 0x00,
1039 1039 0x5f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x5e, 0x00, 0x5d,
1040 1040 0x5c, 0x5b, 0x00, 0x5a, 0x59, 0x58, 0x57, 0x56, 0x55, 0x00,
1041 1041 0x00, 0x54, 0x53, 0x52, 0x51, 0x50, 0x4f, 0x00, 0x00, 0x4e,
1042 1042 0x4d, 0x00, 0x4c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x4b,
1043 1043 0x00, 0x4a, 0x49, 0x48, 0x00, 0x47, 0x46, 0x45, 0x44, 0x43,
1044 1044 0x42, 0x00, 0x00, 0x41, 0x40, 0x3f, 0x3e, 0x3d, 0x3c, 0x00,
1045 1045 0x00, 0x3b, 0x3a, 0x00, 0x39, 0x00, 0x00, 0x00, 0x38, 0x37,
1046 1046 0x36, 0x00, 0x35, 0x00, 0x00, 0x00, 0x34, 0x00, 0x00, 0x00,
1047 1047 0x00, 0x00, 0x00, 0x33, 0x32, 0x00, 0x00, 0x00, 0x00, 0x00,
1048 1048 0x00, 0x31, 0x30, 0x00, 0x00, 0x2f, 0x00, 0x2e, 0x2d, 0x2c,
1049 1049 0x00, 0x00, 0x00, 0x2b, 0x00, 0x2a, 0x29, 0x28, 0x00, 0x27,
1050 1050 0x26, 0x25, 0x24, 0x23, 0x22, 0x00, 0x00, 0x21, 0x20, 0x1f,
1051 1051 0x1e, 0x1d, 0x1c, 0x00, 0x00, 0x1b, 0x1a, 0x00, 0x19, 0x00,
1052 1052 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x00, 0x17, 0x16, 0x15,
1053 1053 0x00, 0x14, 0x13, 0x12, 0x11, 0x10, 0x0f, 0x00, 0x00, 0x0e,
1054 1054 0x0d, 0x0c, 0x0b, 0x0a, 0x09, 0x00, 0x00, 0x08, 0x07, 0x00,
1055 1055 0x06, 0x00, 0x00, 0x00, 0x05, 0x04, 0x03, 0x00, 0x02, 0x00,
1056 1056 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1057 1057 };
1058 1058
1059 1059 static caddr_t pid = "SESS01 ";
1060 1060
1061 1061 #if !defined(lint)
1062 1062
1063 1063 _NOTE(MUTEX_PROTECTS_DATA(fcp_global_mutex,
1064 1064 fcp_port::fcp_next fcp_watchdog_id))
1065 1065
1066 1066 _NOTE(DATA_READABLE_WITHOUT_LOCK(fcp_watchdog_time))
1067 1067
1068 1068 _NOTE(SCHEME_PROTECTS_DATA("Unshared",
1069 1069 fcp_insert_eid
1070 1070 fcp_remove_eid
1071 1071 fcp_watchdog_time))
1072 1072
1073 1073 _NOTE(SCHEME_PROTECTS_DATA("Unshared",
1074 1074 fcp_cb_ops
1075 1075 fcp_ops
1076 1076 callb_cpr))
1077 1077
1078 1078 #endif /* lint */
1079 1079
1080 1080 /*
1081 1081 * This table is used to determine whether or not it's safe to copy in
1082 1082 * the target node name for a lun. Since all luns behind the same target
1083 1083 * have the same wwnn, only tagets that do not support multiple luns are
1084 1084 * eligible to be enumerated under mpxio if they aren't page83 compliant.
1085 1085 */
1086 1086
1087 1087 char *fcp_symmetric_disk_table[] = {
1088 1088 "SEAGATE ST",
1089 1089 "IBM DDYFT",
1090 1090 "SUNW SUNWGS", /* Daktari enclosure */
1091 1091 "SUN SENA", /* SES device */
1092 1092 "SUN SESS01" /* VICOM SVE box */
1093 1093 };
1094 1094
1095 1095 int fcp_symmetric_disk_table_size =
1096 1096 sizeof (fcp_symmetric_disk_table)/sizeof (char *);
1097 1097
1098 1098 /*
1099 1099 * This structure is bogus. scsi_hba_attach_setup() requires, as in the kernel
1100 1100 * will panic if you don't pass this in to the routine, this information.
1101 1101 * Need to determine what the actual impact to the system is by providing
1102 1102 * this information if any. Since dma allocation is done in pkt_init it may
1103 1103 * not have any impact. These values are straight from the Writing Device
1104 1104 * Driver manual.
1105 1105 */
1106 1106 static ddi_dma_attr_t pseudo_fca_dma_attr = {
1107 1107 DMA_ATTR_V0, /* ddi_dma_attr version */
1108 1108 0, /* low address */
1109 1109 0xffffffff, /* high address */
1110 1110 0x00ffffff, /* counter upper bound */
1111 1111 1, /* alignment requirements */
1112 1112 0x3f, /* burst sizes */
1113 1113 1, /* minimum DMA access */
1114 1114 0xffffffff, /* maximum DMA access */
1115 1115 (1 << 24) - 1, /* segment boundary restrictions */
1116 1116 1, /* scater/gather list length */
1117 1117 512, /* device granularity */
1118 1118 0 /* DMA flags */
1119 1119 };
1120 1120
1121 1121 /*
1122 1122 * The _init(9e) return value should be that of mod_install(9f). Under
1123 1123 * some circumstances, a failure may not be related mod_install(9f) and
1124 1124 * one would then require a return value to indicate the failure. Looking
1125 1125 * at mod_install(9f), it is expected to return 0 for success and non-zero
1126 1126 * for failure. mod_install(9f) for device drivers, further goes down the
1127 1127 * calling chain and ends up in ddi_installdrv(), whose return values are
1128 1128 * DDI_SUCCESS and DDI_FAILURE - There are also other functions in the
1129 1129 * calling chain of mod_install(9f) which return values like EINVAL and
1130 1130 * in some even return -1.
1131 1131 *
1132 1132 * To work around the vagaries of the mod_install() calling chain, return
1133 1133 * either 0 or ENODEV depending on the success or failure of mod_install()
1134 1134 */
1135 1135 int
1136 1136 _init(void)
1137 1137 {
1138 1138 int rval;
1139 1139
1140 1140 /*
1141 1141 * Allocate soft state and prepare to do ddi_soft_state_zalloc()
1142 1142 * before registering with the transport first.
1143 1143 */
1144 1144 if (ddi_soft_state_init(&fcp_softstate,
1145 1145 sizeof (struct fcp_port), FCP_INIT_ITEMS) != 0) {
1146 1146 return (EINVAL);
1147 1147 }
1148 1148
1149 1149 mutex_init(&fcp_global_mutex, NULL, MUTEX_DRIVER, NULL);
1150 1150 mutex_init(&fcp_ioctl_mutex, NULL, MUTEX_DRIVER, NULL);
1151 1151
1152 1152 if ((rval = fc_ulp_add(&fcp_modinfo)) != FC_SUCCESS) {
1153 1153 cmn_err(CE_WARN, "fcp: fc_ulp_add failed");
1154 1154 mutex_destroy(&fcp_global_mutex);
1155 1155 mutex_destroy(&fcp_ioctl_mutex);
1156 1156 ddi_soft_state_fini(&fcp_softstate);
1157 1157 return (ENODEV);
1158 1158 }
1159 1159
1160 1160 fcp_logq = fc_trace_alloc_logq(fcp_log_size);
1161 1161
1162 1162 if ((rval = mod_install(&modlinkage)) != 0) {
1163 1163 fc_trace_free_logq(fcp_logq);
1164 1164 (void) fc_ulp_remove(&fcp_modinfo);
1165 1165 mutex_destroy(&fcp_global_mutex);
1166 1166 mutex_destroy(&fcp_ioctl_mutex);
1167 1167 ddi_soft_state_fini(&fcp_softstate);
1168 1168 rval = ENODEV;
1169 1169 }
1170 1170
1171 1171 return (rval);
1172 1172 }
1173 1173
1174 1174
1175 1175 /*
1176 1176 * the system is done with us as a driver, so clean up
1177 1177 */
1178 1178 int
1179 1179 _fini(void)
1180 1180 {
1181 1181 int rval;
1182 1182
1183 1183 /*
1184 1184 * don't start cleaning up until we know that the module remove
1185 1185 * has worked -- if this works, then we know that each instance
1186 1186 * has successfully been DDI_DETACHed
1187 1187 */
1188 1188 if ((rval = mod_remove(&modlinkage)) != 0) {
1189 1189 return (rval);
1190 1190 }
1191 1191
1192 1192 (void) fc_ulp_remove(&fcp_modinfo);
1193 1193
1194 1194 ddi_soft_state_fini(&fcp_softstate);
1195 1195 mutex_destroy(&fcp_global_mutex);
1196 1196 mutex_destroy(&fcp_ioctl_mutex);
1197 1197 fc_trace_free_logq(fcp_logq);
1198 1198
1199 1199 return (rval);
1200 1200 }
1201 1201
1202 1202
1203 1203 int
1204 1204 _info(struct modinfo *modinfop)
1205 1205 {
1206 1206 return (mod_info(&modlinkage, modinfop));
1207 1207 }
1208 1208
1209 1209
1210 1210 /*
1211 1211 * attach the module
1212 1212 */
1213 1213 static int
1214 1214 fcp_attach(dev_info_t *devi, ddi_attach_cmd_t cmd)
1215 1215 {
1216 1216 int rval = DDI_SUCCESS;
1217 1217
1218 1218 FCP_DTRACE(fcp_logq, "fcp", fcp_trace,
1219 1219 FCP_BUF_LEVEL_8, 0, "fcp module attach: cmd=0x%x", cmd);
1220 1220
1221 1221 if (cmd == DDI_ATTACH) {
1222 1222 /* The FCP pseudo device is created here. */
1223 1223 mutex_enter(&fcp_global_mutex);
1224 1224 fcp_global_dip = devi;
1225 1225 mutex_exit(&fcp_global_mutex);
1226 1226
1227 1227 if (ddi_create_minor_node(fcp_global_dip, "fcp", S_IFCHR,
1228 1228 0, DDI_PSEUDO, 0) == DDI_SUCCESS) {
1229 1229 ddi_report_dev(fcp_global_dip);
1230 1230 } else {
1231 1231 cmn_err(CE_WARN, "FCP: Cannot create minor node");
1232 1232 mutex_enter(&fcp_global_mutex);
1233 1233 fcp_global_dip = NULL;
1234 1234 mutex_exit(&fcp_global_mutex);
1235 1235
1236 1236 rval = DDI_FAILURE;
1237 1237 }
1238 1238 /*
1239 1239 * We check the fcp_offline_delay property at this
1240 1240 * point. This variable is global for the driver,
1241 1241 * not specific to an instance.
1242 1242 *
1243 1243 * We do not recommend setting the value to less
1244 1244 * than 10 seconds (RA_TOV_els), or greater than
1245 1245 * 60 seconds.
1246 1246 */
1247 1247 fcp_offline_delay = ddi_prop_get_int(DDI_DEV_T_ANY,
1248 1248 devi, DDI_PROP_DONTPASS | DDI_PROP_NOTPROM,
1249 1249 "fcp_offline_delay", FCP_OFFLINE_DELAY);
1250 1250 if ((fcp_offline_delay < 10) ||
1251 1251 (fcp_offline_delay > 60)) {
1252 1252 cmn_err(CE_WARN, "Setting fcp_offline_delay "
1253 1253 "to %d second(s). This is outside the "
1254 1254 "recommended range of 10..60 seconds.",
1255 1255 fcp_offline_delay);
1256 1256 }
1257 1257 }
1258 1258
1259 1259 return (rval);
1260 1260 }
1261 1261
1262 1262
1263 1263 /*ARGSUSED*/
1264 1264 static int
1265 1265 fcp_detach(dev_info_t *devi, ddi_detach_cmd_t cmd)
1266 1266 {
1267 1267 int res = DDI_SUCCESS;
1268 1268
1269 1269 FCP_DTRACE(fcp_logq, "fcp", fcp_trace,
1270 1270 FCP_BUF_LEVEL_8, 0, "module detach: cmd=0x%x", cmd);
1271 1271
1272 1272 if (cmd == DDI_DETACH) {
1273 1273 /*
1274 1274 * Check if there are active ports/threads. If there
1275 1275 * are any, we will fail, else we will succeed (there
1276 1276 * should not be much to clean up)
1277 1277 */
1278 1278 mutex_enter(&fcp_global_mutex);
1279 1279 FCP_DTRACE(fcp_logq, "fcp",
1280 1280 fcp_trace, FCP_BUF_LEVEL_8, 0, "port_head=%p",
1281 1281 (void *) fcp_port_head);
1282 1282
1283 1283 if (fcp_port_head == NULL) {
1284 1284 ddi_remove_minor_node(fcp_global_dip, NULL);
1285 1285 fcp_global_dip = NULL;
1286 1286 mutex_exit(&fcp_global_mutex);
1287 1287 } else {
1288 1288 mutex_exit(&fcp_global_mutex);
1289 1289 res = DDI_FAILURE;
1290 1290 }
1291 1291 }
1292 1292 FCP_DTRACE(fcp_logq, "fcp", fcp_trace,
1293 1293 FCP_BUF_LEVEL_8, 0, "module detach returning %d", res);
1294 1294
1295 1295 return (res);
1296 1296 }
1297 1297
1298 1298
1299 1299 /* ARGSUSED */
1300 1300 static int
1301 1301 fcp_open(dev_t *devp, int flag, int otype, cred_t *credp)
1302 1302 {
1303 1303 if (otype != OTYP_CHR) {
1304 1304 return (EINVAL);
1305 1305 }
1306 1306
1307 1307 /*
1308 1308 * Allow only root to talk;
1309 1309 */
1310 1310 if (drv_priv(credp)) {
1311 1311 return (EPERM);
1312 1312 }
1313 1313
1314 1314 mutex_enter(&fcp_global_mutex);
1315 1315 if (fcp_oflag & FCP_EXCL) {
1316 1316 mutex_exit(&fcp_global_mutex);
1317 1317 return (EBUSY);
1318 1318 }
1319 1319
1320 1320 if (flag & FEXCL) {
1321 1321 if (fcp_oflag & FCP_OPEN) {
1322 1322 mutex_exit(&fcp_global_mutex);
1323 1323 return (EBUSY);
1324 1324 }
1325 1325 fcp_oflag |= FCP_EXCL;
1326 1326 }
1327 1327 fcp_oflag |= FCP_OPEN;
1328 1328 mutex_exit(&fcp_global_mutex);
1329 1329
1330 1330 return (0);
1331 1331 }
1332 1332
1333 1333
1334 1334 /* ARGSUSED */
1335 1335 static int
1336 1336 fcp_close(dev_t dev, int flag, int otype, cred_t *credp)
1337 1337 {
1338 1338 if (otype != OTYP_CHR) {
1339 1339 return (EINVAL);
1340 1340 }
1341 1341
1342 1342 mutex_enter(&fcp_global_mutex);
1343 1343 if (!(fcp_oflag & FCP_OPEN)) {
1344 1344 mutex_exit(&fcp_global_mutex);
1345 1345 return (ENODEV);
1346 1346 }
1347 1347 fcp_oflag = FCP_IDLE;
1348 1348 mutex_exit(&fcp_global_mutex);
1349 1349
1350 1350 return (0);
1351 1351 }
1352 1352
1353 1353
1354 1354 /*
1355 1355 * fcp_ioctl
1356 1356 * Entry point for the FCP ioctls
1357 1357 *
1358 1358 * Input:
1359 1359 * See ioctl(9E)
1360 1360 *
1361 1361 * Output:
1362 1362 * See ioctl(9E)
1363 1363 *
1364 1364 * Returns:
1365 1365 * See ioctl(9E)
1366 1366 *
1367 1367 * Context:
1368 1368 * Kernel context.
1369 1369 */
1370 1370 /* ARGSUSED */
1371 1371 static int
1372 1372 fcp_ioctl(dev_t dev, int cmd, intptr_t data, int mode, cred_t *credp,
1373 1373 int *rval)
1374 1374 {
1375 1375 int ret = 0;
1376 1376
1377 1377 mutex_enter(&fcp_global_mutex);
1378 1378 if (!(fcp_oflag & FCP_OPEN)) {
1379 1379 mutex_exit(&fcp_global_mutex);
1380 1380 return (ENXIO);
1381 1381 }
1382 1382 mutex_exit(&fcp_global_mutex);
1383 1383
1384 1384 switch (cmd) {
1385 1385 case FCP_TGT_INQUIRY:
1386 1386 case FCP_TGT_CREATE:
1387 1387 case FCP_TGT_DELETE:
1388 1388 ret = fcp_setup_device_data_ioctl(cmd,
1389 1389 (struct fcp_ioctl *)data, mode, rval);
1390 1390 break;
1391 1391
1392 1392 case FCP_TGT_SEND_SCSI:
1393 1393 mutex_enter(&fcp_ioctl_mutex);
1394 1394 ret = fcp_setup_scsi_ioctl(
1395 1395 (struct fcp_scsi_cmd *)data, mode, rval);
1396 1396 mutex_exit(&fcp_ioctl_mutex);
1397 1397 break;
1398 1398
1399 1399 case FCP_STATE_COUNT:
1400 1400 ret = fcp_get_statec_count((struct fcp_ioctl *)data,
1401 1401 mode, rval);
1402 1402 break;
1403 1403 case FCP_GET_TARGET_MAPPINGS:
1404 1404 ret = fcp_get_target_mappings((struct fcp_ioctl *)data,
1405 1405 mode, rval);
1406 1406 break;
1407 1407 default:
1408 1408 fcp_log(CE_WARN, NULL,
1409 1409 "!Invalid ioctl opcode = 0x%x", cmd);
1410 1410 ret = EINVAL;
1411 1411 }
1412 1412
1413 1413 return (ret);
1414 1414 }
1415 1415
1416 1416
1417 1417 /*
1418 1418 * fcp_setup_device_data_ioctl
1419 1419 * Setup handler for the "device data" style of
1420 1420 * ioctl for FCP. See "fcp_util.h" for data structure
1421 1421 * definition.
1422 1422 *
1423 1423 * Input:
1424 1424 * cmd = FCP ioctl command
1425 1425 * data = ioctl data
1426 1426 * mode = See ioctl(9E)
1427 1427 *
1428 1428 * Output:
1429 1429 * data = ioctl data
1430 1430 * rval = return value - see ioctl(9E)
1431 1431 *
1432 1432 * Returns:
1433 1433 * See ioctl(9E)
1434 1434 *
1435 1435 * Context:
1436 1436 * Kernel context.
1437 1437 */
1438 1438 /* ARGSUSED */
1439 1439 static int
1440 1440 fcp_setup_device_data_ioctl(int cmd, struct fcp_ioctl *data, int mode,
1441 1441 int *rval)
1442 1442 {
1443 1443 struct fcp_port *pptr;
1444 1444 struct device_data *dev_data;
1445 1445 uint32_t link_cnt;
1446 1446 la_wwn_t *wwn_ptr = NULL;
1447 1447 struct fcp_tgt *ptgt = NULL;
1448 1448 struct fcp_lun *plun = NULL;
1449 1449 int i, error;
1450 1450 struct fcp_ioctl fioctl;
1451 1451
1452 1452 #ifdef _MULTI_DATAMODEL
1453 1453 switch (ddi_model_convert_from(mode & FMODELS)) {
1454 1454 case DDI_MODEL_ILP32: {
1455 1455 struct fcp32_ioctl f32_ioctl;
1456 1456
1457 1457 if (ddi_copyin((void *)data, (void *)&f32_ioctl,
1458 1458 sizeof (struct fcp32_ioctl), mode)) {
1459 1459 return (EFAULT);
1460 1460 }
1461 1461 fioctl.fp_minor = f32_ioctl.fp_minor;
1462 1462 fioctl.listlen = f32_ioctl.listlen;
1463 1463 fioctl.list = (caddr_t)(long)f32_ioctl.list;
1464 1464 break;
1465 1465 }
1466 1466 case DDI_MODEL_NONE:
1467 1467 if (ddi_copyin((void *)data, (void *)&fioctl,
1468 1468 sizeof (struct fcp_ioctl), mode)) {
1469 1469 return (EFAULT);
1470 1470 }
1471 1471 break;
1472 1472 }
1473 1473
1474 1474 #else /* _MULTI_DATAMODEL */
1475 1475 if (ddi_copyin((void *)data, (void *)&fioctl,
1476 1476 sizeof (struct fcp_ioctl), mode)) {
1477 1477 return (EFAULT);
1478 1478 }
1479 1479 #endif /* _MULTI_DATAMODEL */
1480 1480
1481 1481 /*
1482 1482 * Right now we can assume that the minor number matches with
1483 1483 * this instance of fp. If this changes we will need to
1484 1484 * revisit this logic.
1485 1485 */
1486 1486 mutex_enter(&fcp_global_mutex);
1487 1487 pptr = fcp_port_head;
1488 1488 while (pptr) {
1489 1489 if (pptr->port_instance == (uint32_t)fioctl.fp_minor) {
1490 1490 break;
1491 1491 } else {
1492 1492 pptr = pptr->port_next;
1493 1493 }
1494 1494 }
1495 1495 mutex_exit(&fcp_global_mutex);
1496 1496 if (pptr == NULL) {
1497 1497 return (ENXIO);
1498 1498 }
1499 1499 mutex_enter(&pptr->port_mutex);
1500 1500
1501 1501
1502 1502 if ((dev_data = kmem_zalloc((sizeof (struct device_data)) *
1503 1503 fioctl.listlen, KM_NOSLEEP)) == NULL) {
1504 1504 mutex_exit(&pptr->port_mutex);
1505 1505 return (ENOMEM);
1506 1506 }
1507 1507
1508 1508 if (ddi_copyin(fioctl.list, dev_data,
1509 1509 (sizeof (struct device_data)) * fioctl.listlen, mode)) {
1510 1510 kmem_free(dev_data, sizeof (*dev_data) * fioctl.listlen);
1511 1511 mutex_exit(&pptr->port_mutex);
1512 1512 return (EFAULT);
1513 1513 }
1514 1514 link_cnt = pptr->port_link_cnt;
1515 1515
1516 1516 if (cmd == FCP_TGT_INQUIRY) {
1517 1517 wwn_ptr = (la_wwn_t *)&(dev_data[0].dev_pwwn);
1518 1518 if (bcmp(wwn_ptr->raw_wwn, pptr->port_pwwn.raw_wwn,
1519 1519 sizeof (wwn_ptr->raw_wwn)) == 0) {
1520 1520 /* This ioctl is requesting INQ info of local HBA */
1521 1521 mutex_exit(&pptr->port_mutex);
1522 1522 dev_data[0].dev0_type = DTYPE_UNKNOWN;
1523 1523 dev_data[0].dev_status = 0;
1524 1524 if (ddi_copyout(dev_data, fioctl.list,
1525 1525 (sizeof (struct device_data)) * fioctl.listlen,
1526 1526 mode)) {
1527 1527 kmem_free(dev_data,
1528 1528 sizeof (*dev_data) * fioctl.listlen);
1529 1529 return (EFAULT);
1530 1530 }
1531 1531 kmem_free(dev_data,
1532 1532 sizeof (*dev_data) * fioctl.listlen);
1533 1533 #ifdef _MULTI_DATAMODEL
1534 1534 switch (ddi_model_convert_from(mode & FMODELS)) {
1535 1535 case DDI_MODEL_ILP32: {
1536 1536 struct fcp32_ioctl f32_ioctl;
1537 1537 f32_ioctl.fp_minor = fioctl.fp_minor;
1538 1538 f32_ioctl.listlen = fioctl.listlen;
1539 1539 f32_ioctl.list = (caddr32_t)(long)fioctl.list;
1540 1540 if (ddi_copyout((void *)&f32_ioctl,
1541 1541 (void *)data,
1542 1542 sizeof (struct fcp32_ioctl), mode)) {
1543 1543 return (EFAULT);
1544 1544 }
1545 1545 break;
1546 1546 }
1547 1547 case DDI_MODEL_NONE:
1548 1548 if (ddi_copyout((void *)&fioctl, (void *)data,
1549 1549 sizeof (struct fcp_ioctl), mode)) {
1550 1550 return (EFAULT);
1551 1551 }
1552 1552 break;
1553 1553 }
1554 1554 #else /* _MULTI_DATAMODEL */
1555 1555 if (ddi_copyout((void *)&fioctl, (void *)data,
1556 1556 sizeof (struct fcp_ioctl), mode)) {
1557 1557 return (EFAULT);
1558 1558 }
1559 1559 #endif /* _MULTI_DATAMODEL */
1560 1560 return (0);
1561 1561 }
1562 1562 }
1563 1563
1564 1564 if (pptr->port_state & (FCP_STATE_INIT | FCP_STATE_OFFLINE)) {
1565 1565 kmem_free(dev_data, sizeof (*dev_data) * fioctl.listlen);
1566 1566 mutex_exit(&pptr->port_mutex);
1567 1567 return (ENXIO);
1568 1568 }
1569 1569
1570 1570 for (i = 0; (i < fioctl.listlen) && (link_cnt == pptr->port_link_cnt);
1571 1571 i++) {
1572 1572 wwn_ptr = (la_wwn_t *)&(dev_data[i].dev_pwwn);
1573 1573
1574 1574 dev_data[i].dev0_type = DTYPE_UNKNOWN;
1575 1575
1576 1576
1577 1577 dev_data[i].dev_status = ENXIO;
1578 1578
1579 1579 if ((ptgt = fcp_lookup_target(pptr,
1580 1580 (uchar_t *)wwn_ptr)) == NULL) {
1581 1581 mutex_exit(&pptr->port_mutex);
1582 1582 if (fc_ulp_get_remote_port(pptr->port_fp_handle,
1583 1583 wwn_ptr, &error, 0) == NULL) {
1584 1584 dev_data[i].dev_status = ENODEV;
1585 1585 mutex_enter(&pptr->port_mutex);
1586 1586 continue;
1587 1587 } else {
1588 1588
1589 1589 dev_data[i].dev_status = EAGAIN;
1590 1590
1591 1591 mutex_enter(&pptr->port_mutex);
1592 1592 continue;
1593 1593 }
1594 1594 } else {
1595 1595 mutex_enter(&ptgt->tgt_mutex);
1596 1596 if (ptgt->tgt_state & (FCP_TGT_MARK |
1597 1597 FCP_TGT_BUSY)) {
1598 1598 dev_data[i].dev_status = EAGAIN;
1599 1599 mutex_exit(&ptgt->tgt_mutex);
1600 1600 continue;
1601 1601 }
1602 1602
1603 1603 if (ptgt->tgt_state & FCP_TGT_OFFLINE) {
1604 1604 if (ptgt->tgt_icap && !ptgt->tgt_tcap) {
1605 1605 dev_data[i].dev_status = ENOTSUP;
1606 1606 } else {
1607 1607 dev_data[i].dev_status = ENXIO;
1608 1608 }
1609 1609 mutex_exit(&ptgt->tgt_mutex);
1610 1610 continue;
1611 1611 }
1612 1612
1613 1613 switch (cmd) {
1614 1614 case FCP_TGT_INQUIRY:
1615 1615 /*
1616 1616 * The reason we give device type of
1617 1617 * lun 0 only even though in some
1618 1618 * cases(like maxstrat) lun 0 device
1619 1619 * type may be 0x3f(invalid) is that
1620 1620 * for bridge boxes target will appear
1621 1621 * as luns and the first lun could be
1622 1622 * a device that utility may not care
1623 1623 * about (like a tape device).
1624 1624 */
1625 1625 dev_data[i].dev_lun_cnt = ptgt->tgt_lun_cnt;
1626 1626 dev_data[i].dev_status = 0;
1627 1627 mutex_exit(&ptgt->tgt_mutex);
1628 1628
1629 1629 if ((plun = fcp_get_lun(ptgt, 0)) == NULL) {
1630 1630 dev_data[i].dev0_type = DTYPE_UNKNOWN;
1631 1631 } else {
1632 1632 dev_data[i].dev0_type = plun->lun_type;
1633 1633 }
1634 1634 mutex_enter(&ptgt->tgt_mutex);
1635 1635 break;
1636 1636
1637 1637 case FCP_TGT_CREATE:
1638 1638 mutex_exit(&ptgt->tgt_mutex);
1639 1639 mutex_exit(&pptr->port_mutex);
1640 1640
1641 1641 /*
1642 1642 * serialize state change call backs.
1643 1643 * only one call back will be handled
1644 1644 * at a time.
1645 1645 */
1646 1646 mutex_enter(&fcp_global_mutex);
1647 1647 if (fcp_oflag & FCP_BUSY) {
1648 1648 mutex_exit(&fcp_global_mutex);
1649 1649 if (dev_data) {
1650 1650 kmem_free(dev_data,
1651 1651 sizeof (*dev_data) *
1652 1652 fioctl.listlen);
1653 1653 }
1654 1654 return (EBUSY);
1655 1655 }
1656 1656 fcp_oflag |= FCP_BUSY;
1657 1657 mutex_exit(&fcp_global_mutex);
1658 1658
1659 1659 dev_data[i].dev_status =
1660 1660 fcp_create_on_demand(pptr,
1661 1661 wwn_ptr->raw_wwn);
1662 1662
1663 1663 if (dev_data[i].dev_status != 0) {
1664 1664 char buf[25];
1665 1665
1666 1666 for (i = 0; i < FC_WWN_SIZE; i++) {
1667 1667 (void) sprintf(&buf[i << 1],
1668 1668 "%02x",
1669 1669 wwn_ptr->raw_wwn[i]);
1670 1670 }
1671 1671
1672 1672 fcp_log(CE_WARN, pptr->port_dip,
1673 1673 "!Failed to create nodes for"
1674 1674 " pwwn=%s; error=%x", buf,
1675 1675 dev_data[i].dev_status);
1676 1676 }
1677 1677
1678 1678 /* allow state change call backs again */
1679 1679 mutex_enter(&fcp_global_mutex);
1680 1680 fcp_oflag &= ~FCP_BUSY;
1681 1681 mutex_exit(&fcp_global_mutex);
1682 1682
1683 1683 mutex_enter(&pptr->port_mutex);
1684 1684 mutex_enter(&ptgt->tgt_mutex);
1685 1685
1686 1686 break;
1687 1687
1688 1688 case FCP_TGT_DELETE:
1689 1689 break;
1690 1690
1691 1691 default:
1692 1692 fcp_log(CE_WARN, pptr->port_dip,
1693 1693 "!Invalid device data ioctl "
1694 1694 "opcode = 0x%x", cmd);
1695 1695 }
1696 1696 mutex_exit(&ptgt->tgt_mutex);
1697 1697 }
1698 1698 }
1699 1699 mutex_exit(&pptr->port_mutex);
1700 1700
1701 1701 if (ddi_copyout(dev_data, fioctl.list,
1702 1702 (sizeof (struct device_data)) * fioctl.listlen, mode)) {
1703 1703 kmem_free(dev_data, sizeof (*dev_data) * fioctl.listlen);
1704 1704 return (EFAULT);
1705 1705 }
1706 1706 kmem_free(dev_data, sizeof (*dev_data) * fioctl.listlen);
1707 1707
1708 1708 #ifdef _MULTI_DATAMODEL
1709 1709 switch (ddi_model_convert_from(mode & FMODELS)) {
1710 1710 case DDI_MODEL_ILP32: {
1711 1711 struct fcp32_ioctl f32_ioctl;
1712 1712
1713 1713 f32_ioctl.fp_minor = fioctl.fp_minor;
1714 1714 f32_ioctl.listlen = fioctl.listlen;
1715 1715 f32_ioctl.list = (caddr32_t)(long)fioctl.list;
1716 1716 if (ddi_copyout((void *)&f32_ioctl, (void *)data,
1717 1717 sizeof (struct fcp32_ioctl), mode)) {
1718 1718 return (EFAULT);
1719 1719 }
1720 1720 break;
1721 1721 }
1722 1722 case DDI_MODEL_NONE:
1723 1723 if (ddi_copyout((void *)&fioctl, (void *)data,
1724 1724 sizeof (struct fcp_ioctl), mode)) {
1725 1725 return (EFAULT);
1726 1726 }
1727 1727 break;
1728 1728 }
1729 1729 #else /* _MULTI_DATAMODEL */
1730 1730
1731 1731 if (ddi_copyout((void *)&fioctl, (void *)data,
1732 1732 sizeof (struct fcp_ioctl), mode)) {
1733 1733 return (EFAULT);
1734 1734 }
1735 1735 #endif /* _MULTI_DATAMODEL */
1736 1736
1737 1737 return (0);
1738 1738 }
1739 1739
1740 1740 /*
1741 1741 * Fetch the target mappings (path, etc.) for all LUNs
1742 1742 * on this port.
1743 1743 */
1744 1744 /* ARGSUSED */
1745 1745 static int
1746 1746 fcp_get_target_mappings(struct fcp_ioctl *data,
1747 1747 int mode, int *rval)
1748 1748 {
1749 1749 struct fcp_port *pptr;
1750 1750 fc_hba_target_mappings_t *mappings;
1751 1751 fc_hba_mapping_entry_t *map;
1752 1752 struct fcp_tgt *ptgt = NULL;
1753 1753 struct fcp_lun *plun = NULL;
1754 1754 int i, mapIndex, mappingSize;
1755 1755 int listlen;
1756 1756 struct fcp_ioctl fioctl;
1757 1757 char *path;
1758 1758 fcp_ent_addr_t sam_lun_addr;
1759 1759
1760 1760 #ifdef _MULTI_DATAMODEL
1761 1761 switch (ddi_model_convert_from(mode & FMODELS)) {
1762 1762 case DDI_MODEL_ILP32: {
1763 1763 struct fcp32_ioctl f32_ioctl;
1764 1764
1765 1765 if (ddi_copyin((void *)data, (void *)&f32_ioctl,
1766 1766 sizeof (struct fcp32_ioctl), mode)) {
1767 1767 return (EFAULT);
1768 1768 }
1769 1769 fioctl.fp_minor = f32_ioctl.fp_minor;
1770 1770 fioctl.listlen = f32_ioctl.listlen;
1771 1771 fioctl.list = (caddr_t)(long)f32_ioctl.list;
1772 1772 break;
1773 1773 }
1774 1774 case DDI_MODEL_NONE:
1775 1775 if (ddi_copyin((void *)data, (void *)&fioctl,
1776 1776 sizeof (struct fcp_ioctl), mode)) {
1777 1777 return (EFAULT);
1778 1778 }
1779 1779 break;
1780 1780 }
1781 1781
1782 1782 #else /* _MULTI_DATAMODEL */
1783 1783 if (ddi_copyin((void *)data, (void *)&fioctl,
1784 1784 sizeof (struct fcp_ioctl), mode)) {
1785 1785 return (EFAULT);
1786 1786 }
1787 1787 #endif /* _MULTI_DATAMODEL */
1788 1788
1789 1789 /*
1790 1790 * Right now we can assume that the minor number matches with
1791 1791 * this instance of fp. If this changes we will need to
1792 1792 * revisit this logic.
1793 1793 */
1794 1794 mutex_enter(&fcp_global_mutex);
1795 1795 pptr = fcp_port_head;
1796 1796 while (pptr) {
1797 1797 if (pptr->port_instance == (uint32_t)fioctl.fp_minor) {
1798 1798 break;
1799 1799 } else {
1800 1800 pptr = pptr->port_next;
1801 1801 }
1802 1802 }
1803 1803 mutex_exit(&fcp_global_mutex);
1804 1804 if (pptr == NULL) {
1805 1805 cmn_err(CE_NOTE, "target mappings: unknown instance number: %d",
1806 1806 fioctl.fp_minor);
1807 1807 return (ENXIO);
1808 1808 }
1809 1809
1810 1810
1811 1811 /* We use listlen to show the total buffer size */
1812 1812 mappingSize = fioctl.listlen;
1813 1813
1814 1814 /* Now calculate how many mapping entries will fit */
1815 1815 listlen = fioctl.listlen + sizeof (fc_hba_mapping_entry_t)
1816 1816 - sizeof (fc_hba_target_mappings_t);
1817 1817 if (listlen <= 0) {
1818 1818 cmn_err(CE_NOTE, "target mappings: Insufficient buffer");
1819 1819 return (ENXIO);
1820 1820 }
1821 1821 listlen = listlen / sizeof (fc_hba_mapping_entry_t);
1822 1822
1823 1823 if ((mappings = kmem_zalloc(mappingSize, KM_SLEEP)) == NULL) {
1824 1824 return (ENOMEM);
1825 1825 }
1826 1826 mappings->version = FC_HBA_TARGET_MAPPINGS_VERSION;
1827 1827
1828 1828 /* Now get to work */
1829 1829 mapIndex = 0;
1830 1830
1831 1831 mutex_enter(&pptr->port_mutex);
1832 1832 /* Loop through all targets on this port */
1833 1833 for (i = 0; i < FCP_NUM_HASH; i++) {
1834 1834 for (ptgt = pptr->port_tgt_hash_table[i]; ptgt != NULL;
1835 1835 ptgt = ptgt->tgt_next) {
1836 1836
1837 1837 mutex_enter(&ptgt->tgt_mutex);
1838 1838
1839 1839 /* Loop through all LUNs on this target */
1840 1840 for (plun = ptgt->tgt_lun; plun != NULL;
1841 1841 plun = plun->lun_next) {
1842 1842 if (plun->lun_state & FCP_LUN_OFFLINE) {
1843 1843 continue;
1844 1844 }
1845 1845
1846 1846 path = fcp_get_lun_path(plun);
1847 1847 if (path == NULL) {
1848 1848 continue;
1849 1849 }
1850 1850
1851 1851 if (mapIndex >= listlen) {
1852 1852 mapIndex ++;
1853 1853 kmem_free(path, MAXPATHLEN);
1854 1854 continue;
1855 1855 }
1856 1856 map = &mappings->entries[mapIndex++];
1857 1857 bcopy(path, map->targetDriver,
1858 1858 sizeof (map->targetDriver));
1859 1859 map->d_id = ptgt->tgt_d_id;
1860 1860 map->busNumber = 0;
1861 1861 map->targetNumber = ptgt->tgt_d_id;
1862 1862 map->osLUN = plun->lun_num;
1863 1863
1864 1864 /*
1865 1865 * We had swapped lun when we stored it in
1866 1866 * lun_addr. We need to swap it back before
1867 1867 * returning it to user land
1868 1868 */
1869 1869
1870 1870 sam_lun_addr.ent_addr_0 =
1871 1871 BE_16(plun->lun_addr.ent_addr_0);
1872 1872 sam_lun_addr.ent_addr_1 =
1873 1873 BE_16(plun->lun_addr.ent_addr_1);
1874 1874 sam_lun_addr.ent_addr_2 =
1875 1875 BE_16(plun->lun_addr.ent_addr_2);
1876 1876 sam_lun_addr.ent_addr_3 =
1877 1877 BE_16(plun->lun_addr.ent_addr_3);
1878 1878
1879 1879 bcopy(&sam_lun_addr, &map->samLUN,
1880 1880 FCP_LUN_SIZE);
1881 1881 bcopy(ptgt->tgt_node_wwn.raw_wwn,
1882 1882 map->NodeWWN.raw_wwn, sizeof (la_wwn_t));
1883 1883 bcopy(ptgt->tgt_port_wwn.raw_wwn,
1884 1884 map->PortWWN.raw_wwn, sizeof (la_wwn_t));
1885 1885
1886 1886 if (plun->lun_guid) {
1887 1887
1888 1888 /* convert ascii wwn to bytes */
1889 1889 fcp_ascii_to_wwn(plun->lun_guid,
1890 1890 map->guid, sizeof (map->guid));
1891 1891
1892 1892 if ((sizeof (map->guid)) <
1893 1893 plun->lun_guid_size / 2) {
1894 1894 cmn_err(CE_WARN,
1895 1895 "fcp_get_target_mappings:"
1896 1896 "guid copy space "
1897 1897 "insufficient."
1898 1898 "Copy Truncation - "
1899 1899 "available %d; need %d",
1900 1900 (int)sizeof (map->guid),
1901 1901 (int)
1902 1902 plun->lun_guid_size / 2);
1903 1903 }
1904 1904 }
1905 1905 kmem_free(path, MAXPATHLEN);
1906 1906 }
1907 1907 mutex_exit(&ptgt->tgt_mutex);
1908 1908 }
1909 1909 }
1910 1910 mutex_exit(&pptr->port_mutex);
1911 1911 mappings->numLuns = mapIndex;
1912 1912
1913 1913 if (ddi_copyout(mappings, fioctl.list, mappingSize, mode)) {
1914 1914 kmem_free(mappings, mappingSize);
1915 1915 return (EFAULT);
1916 1916 }
1917 1917 kmem_free(mappings, mappingSize);
1918 1918
1919 1919 #ifdef _MULTI_DATAMODEL
1920 1920 switch (ddi_model_convert_from(mode & FMODELS)) {
1921 1921 case DDI_MODEL_ILP32: {
1922 1922 struct fcp32_ioctl f32_ioctl;
1923 1923
1924 1924 f32_ioctl.fp_minor = fioctl.fp_minor;
1925 1925 f32_ioctl.listlen = fioctl.listlen;
1926 1926 f32_ioctl.list = (caddr32_t)(long)fioctl.list;
1927 1927 if (ddi_copyout((void *)&f32_ioctl, (void *)data,
1928 1928 sizeof (struct fcp32_ioctl), mode)) {
1929 1929 return (EFAULT);
1930 1930 }
1931 1931 break;
1932 1932 }
1933 1933 case DDI_MODEL_NONE:
1934 1934 if (ddi_copyout((void *)&fioctl, (void *)data,
1935 1935 sizeof (struct fcp_ioctl), mode)) {
1936 1936 return (EFAULT);
1937 1937 }
1938 1938 break;
1939 1939 }
1940 1940 #else /* _MULTI_DATAMODEL */
1941 1941
1942 1942 if (ddi_copyout((void *)&fioctl, (void *)data,
1943 1943 sizeof (struct fcp_ioctl), mode)) {
1944 1944 return (EFAULT);
1945 1945 }
1946 1946 #endif /* _MULTI_DATAMODEL */
1947 1947
1948 1948 return (0);
1949 1949 }
1950 1950
1951 1951 /*
1952 1952 * fcp_setup_scsi_ioctl
1953 1953 * Setup handler for the "scsi passthru" style of
1954 1954 * ioctl for FCP. See "fcp_util.h" for data structure
1955 1955 * definition.
1956 1956 *
1957 1957 * Input:
1958 1958 * u_fscsi = ioctl data (user address space)
1959 1959 * mode = See ioctl(9E)
1960 1960 *
1961 1961 * Output:
1962 1962 * u_fscsi = ioctl data (user address space)
1963 1963 * rval = return value - see ioctl(9E)
1964 1964 *
1965 1965 * Returns:
1966 1966 * 0 = OK
1967 1967 * EAGAIN = See errno.h
1968 1968 * EBUSY = See errno.h
1969 1969 * EFAULT = See errno.h
1970 1970 * EINTR = See errno.h
1971 1971 * EINVAL = See errno.h
1972 1972 * EIO = See errno.h
1973 1973 * ENOMEM = See errno.h
1974 1974 * ENXIO = See errno.h
1975 1975 *
1976 1976 * Context:
1977 1977 * Kernel context.
1978 1978 */
1979 1979 /* ARGSUSED */
1980 1980 static int
1981 1981 fcp_setup_scsi_ioctl(struct fcp_scsi_cmd *u_fscsi,
1982 1982 int mode, int *rval)
1983 1983 {
1984 1984 int ret = 0;
1985 1985 int temp_ret;
1986 1986 caddr_t k_cdbbufaddr = NULL;
1987 1987 caddr_t k_bufaddr = NULL;
1988 1988 caddr_t k_rqbufaddr = NULL;
1989 1989 caddr_t u_cdbbufaddr;
1990 1990 caddr_t u_bufaddr;
1991 1991 caddr_t u_rqbufaddr;
1992 1992 struct fcp_scsi_cmd k_fscsi;
1993 1993
1994 1994 /*
1995 1995 * Get fcp_scsi_cmd array element from user address space
1996 1996 */
1997 1997 if ((ret = fcp_copyin_scsi_cmd((caddr_t)u_fscsi, &k_fscsi, mode))
1998 1998 != 0) {
1999 1999 return (ret);
2000 2000 }
2001 2001
2002 2002
2003 2003 /*
2004 2004 * Even though kmem_alloc() checks the validity of the
2005 2005 * buffer length, this check is needed when the
2006 2006 * kmem_flags set and the zero buffer length is passed.
2007 2007 */
2008 2008 if ((k_fscsi.scsi_cdblen <= 0) ||
2009 2009 (k_fscsi.scsi_buflen <= 0) ||
2010 2010 (k_fscsi.scsi_rqlen <= 0)) {
2011 2011 return (EINVAL);
2012 2012 }
2013 2013
2014 2014 /*
2015 2015 * Allocate data for fcp_scsi_cmd pointer fields
2016 2016 */
2017 2017 if (ret == 0) {
2018 2018 k_cdbbufaddr = kmem_alloc(k_fscsi.scsi_cdblen, KM_NOSLEEP);
2019 2019 k_bufaddr = kmem_alloc(k_fscsi.scsi_buflen, KM_NOSLEEP);
2020 2020 k_rqbufaddr = kmem_alloc(k_fscsi.scsi_rqlen, KM_NOSLEEP);
2021 2021
2022 2022 if (k_cdbbufaddr == NULL ||
2023 2023 k_bufaddr == NULL ||
2024 2024 k_rqbufaddr == NULL) {
2025 2025 ret = ENOMEM;
2026 2026 }
2027 2027 }
2028 2028
2029 2029 /*
2030 2030 * Get fcp_scsi_cmd pointer fields from user
2031 2031 * address space
2032 2032 */
2033 2033 if (ret == 0) {
2034 2034 u_cdbbufaddr = k_fscsi.scsi_cdbbufaddr;
2035 2035 u_bufaddr = k_fscsi.scsi_bufaddr;
2036 2036 u_rqbufaddr = k_fscsi.scsi_rqbufaddr;
2037 2037
2038 2038 if (ddi_copyin(u_cdbbufaddr,
2039 2039 k_cdbbufaddr,
2040 2040 k_fscsi.scsi_cdblen,
2041 2041 mode)) {
2042 2042 ret = EFAULT;
2043 2043 } else if (ddi_copyin(u_bufaddr,
2044 2044 k_bufaddr,
2045 2045 k_fscsi.scsi_buflen,
2046 2046 mode)) {
2047 2047 ret = EFAULT;
2048 2048 } else if (ddi_copyin(u_rqbufaddr,
2049 2049 k_rqbufaddr,
2050 2050 k_fscsi.scsi_rqlen,
2051 2051 mode)) {
2052 2052 ret = EFAULT;
2053 2053 }
2054 2054 }
2055 2055
2056 2056 /*
2057 2057 * Send scsi command (blocking)
2058 2058 */
2059 2059 if (ret == 0) {
2060 2060 /*
2061 2061 * Prior to sending the scsi command, the
2062 2062 * fcp_scsi_cmd data structure must contain kernel,
2063 2063 * not user, addresses.
2064 2064 */
2065 2065 k_fscsi.scsi_cdbbufaddr = k_cdbbufaddr;
2066 2066 k_fscsi.scsi_bufaddr = k_bufaddr;
2067 2067 k_fscsi.scsi_rqbufaddr = k_rqbufaddr;
2068 2068
2069 2069 ret = fcp_send_scsi_ioctl(&k_fscsi);
2070 2070
2071 2071 /*
2072 2072 * After sending the scsi command, the
2073 2073 * fcp_scsi_cmd data structure must contain user,
2074 2074 * not kernel, addresses.
2075 2075 */
2076 2076 k_fscsi.scsi_cdbbufaddr = u_cdbbufaddr;
2077 2077 k_fscsi.scsi_bufaddr = u_bufaddr;
2078 2078 k_fscsi.scsi_rqbufaddr = u_rqbufaddr;
2079 2079 }
2080 2080
2081 2081 /*
2082 2082 * Put fcp_scsi_cmd pointer fields to user address space
2083 2083 */
2084 2084 if (ret == 0) {
2085 2085 if (ddi_copyout(k_cdbbufaddr,
2086 2086 u_cdbbufaddr,
2087 2087 k_fscsi.scsi_cdblen,
2088 2088 mode)) {
2089 2089 ret = EFAULT;
2090 2090 } else if (ddi_copyout(k_bufaddr,
2091 2091 u_bufaddr,
2092 2092 k_fscsi.scsi_buflen,
2093 2093 mode)) {
2094 2094 ret = EFAULT;
2095 2095 } else if (ddi_copyout(k_rqbufaddr,
2096 2096 u_rqbufaddr,
2097 2097 k_fscsi.scsi_rqlen,
2098 2098 mode)) {
2099 2099 ret = EFAULT;
2100 2100 }
2101 2101 }
2102 2102
2103 2103 /*
2104 2104 * Free data for fcp_scsi_cmd pointer fields
2105 2105 */
2106 2106 if (k_cdbbufaddr != NULL) {
2107 2107 kmem_free(k_cdbbufaddr, k_fscsi.scsi_cdblen);
2108 2108 }
2109 2109 if (k_bufaddr != NULL) {
2110 2110 kmem_free(k_bufaddr, k_fscsi.scsi_buflen);
2111 2111 }
2112 2112 if (k_rqbufaddr != NULL) {
2113 2113 kmem_free(k_rqbufaddr, k_fscsi.scsi_rqlen);
2114 2114 }
2115 2115
2116 2116 /*
2117 2117 * Put fcp_scsi_cmd array element to user address space
2118 2118 */
2119 2119 temp_ret = fcp_copyout_scsi_cmd(&k_fscsi, (caddr_t)u_fscsi, mode);
2120 2120 if (temp_ret != 0) {
2121 2121 ret = temp_ret;
2122 2122 }
2123 2123
2124 2124 /*
2125 2125 * Return status
2126 2126 */
2127 2127 return (ret);
2128 2128 }
2129 2129
2130 2130
2131 2131 /*
2132 2132 * fcp_copyin_scsi_cmd
2133 2133 * Copy in fcp_scsi_cmd data structure from user address space.
2134 2134 * The data may be in 32 bit or 64 bit modes.
2135 2135 *
2136 2136 * Input:
2137 2137 * base_addr = from address (user address space)
2138 2138 * mode = See ioctl(9E) and ddi_copyin(9F)
2139 2139 *
2140 2140 * Output:
2141 2141 * fscsi = to address (kernel address space)
2142 2142 *
2143 2143 * Returns:
2144 2144 * 0 = OK
2145 2145 * EFAULT = Error
2146 2146 *
2147 2147 * Context:
2148 2148 * Kernel context.
2149 2149 */
2150 2150 static int
2151 2151 fcp_copyin_scsi_cmd(caddr_t base_addr, struct fcp_scsi_cmd *fscsi, int mode)
2152 2152 {
2153 2153 #ifdef _MULTI_DATAMODEL
2154 2154 struct fcp32_scsi_cmd f32scsi;
2155 2155
2156 2156 switch (ddi_model_convert_from(mode & FMODELS)) {
2157 2157 case DDI_MODEL_ILP32:
2158 2158 /*
2159 2159 * Copy data from user address space
2160 2160 */
2161 2161 if (ddi_copyin((void *)base_addr,
2162 2162 &f32scsi,
2163 2163 sizeof (struct fcp32_scsi_cmd),
2164 2164 mode)) {
2165 2165 return (EFAULT);
2166 2166 }
2167 2167 /*
2168 2168 * Convert from 32 bit to 64 bit
2169 2169 */
2170 2170 FCP32_SCSI_CMD_TO_FCP_SCSI_CMD(&f32scsi, fscsi);
2171 2171 break;
2172 2172 case DDI_MODEL_NONE:
2173 2173 /*
2174 2174 * Copy data from user address space
2175 2175 */
2176 2176 if (ddi_copyin((void *)base_addr,
2177 2177 fscsi,
2178 2178 sizeof (struct fcp_scsi_cmd),
2179 2179 mode)) {
2180 2180 return (EFAULT);
2181 2181 }
2182 2182 break;
2183 2183 }
2184 2184 #else /* _MULTI_DATAMODEL */
2185 2185 /*
2186 2186 * Copy data from user address space
2187 2187 */
2188 2188 if (ddi_copyin((void *)base_addr,
2189 2189 fscsi,
2190 2190 sizeof (struct fcp_scsi_cmd),
2191 2191 mode)) {
2192 2192 return (EFAULT);
2193 2193 }
2194 2194 #endif /* _MULTI_DATAMODEL */
2195 2195
2196 2196 return (0);
2197 2197 }
2198 2198
2199 2199
2200 2200 /*
2201 2201 * fcp_copyout_scsi_cmd
2202 2202 * Copy out fcp_scsi_cmd data structure to user address space.
2203 2203 * The data may be in 32 bit or 64 bit modes.
2204 2204 *
2205 2205 * Input:
2206 2206 * fscsi = to address (kernel address space)
2207 2207 * mode = See ioctl(9E) and ddi_copyin(9F)
2208 2208 *
2209 2209 * Output:
2210 2210 * base_addr = from address (user address space)
2211 2211 *
2212 2212 * Returns:
2213 2213 * 0 = OK
2214 2214 * EFAULT = Error
2215 2215 *
2216 2216 * Context:
2217 2217 * Kernel context.
2218 2218 */
2219 2219 static int
2220 2220 fcp_copyout_scsi_cmd(struct fcp_scsi_cmd *fscsi, caddr_t base_addr, int mode)
2221 2221 {
2222 2222 #ifdef _MULTI_DATAMODEL
2223 2223 struct fcp32_scsi_cmd f32scsi;
2224 2224
2225 2225 switch (ddi_model_convert_from(mode & FMODELS)) {
2226 2226 case DDI_MODEL_ILP32:
2227 2227 /*
2228 2228 * Convert from 64 bit to 32 bit
2229 2229 */
2230 2230 FCP_SCSI_CMD_TO_FCP32_SCSI_CMD(fscsi, &f32scsi);
2231 2231 /*
2232 2232 * Copy data to user address space
2233 2233 */
2234 2234 if (ddi_copyout(&f32scsi,
2235 2235 (void *)base_addr,
2236 2236 sizeof (struct fcp32_scsi_cmd),
2237 2237 mode)) {
2238 2238 return (EFAULT);
2239 2239 }
2240 2240 break;
2241 2241 case DDI_MODEL_NONE:
2242 2242 /*
2243 2243 * Copy data to user address space
2244 2244 */
2245 2245 if (ddi_copyout(fscsi,
2246 2246 (void *)base_addr,
2247 2247 sizeof (struct fcp_scsi_cmd),
2248 2248 mode)) {
2249 2249 return (EFAULT);
2250 2250 }
2251 2251 break;
2252 2252 }
2253 2253 #else /* _MULTI_DATAMODEL */
2254 2254 /*
2255 2255 * Copy data to user address space
2256 2256 */
2257 2257 if (ddi_copyout(fscsi,
2258 2258 (void *)base_addr,
2259 2259 sizeof (struct fcp_scsi_cmd),
2260 2260 mode)) {
2261 2261 return (EFAULT);
2262 2262 }
2263 2263 #endif /* _MULTI_DATAMODEL */
2264 2264
2265 2265 return (0);
2266 2266 }
2267 2267
2268 2268
2269 2269 /*
2270 2270 * fcp_send_scsi_ioctl
2271 2271 * Sends the SCSI command in blocking mode.
2272 2272 *
2273 2273 * Input:
2274 2274 * fscsi = SCSI command data structure
2275 2275 *
2276 2276 * Output:
2277 2277 * fscsi = SCSI command data structure
2278 2278 *
2279 2279 * Returns:
2280 2280 * 0 = OK
2281 2281 * EAGAIN = See errno.h
2282 2282 * EBUSY = See errno.h
2283 2283 * EINTR = See errno.h
2284 2284 * EINVAL = See errno.h
2285 2285 * EIO = See errno.h
2286 2286 * ENOMEM = See errno.h
2287 2287 * ENXIO = See errno.h
2288 2288 *
2289 2289 * Context:
2290 2290 * Kernel context.
2291 2291 */
2292 2292 static int
2293 2293 fcp_send_scsi_ioctl(struct fcp_scsi_cmd *fscsi)
2294 2294 {
2295 2295 struct fcp_lun *plun = NULL;
2296 2296 struct fcp_port *pptr = NULL;
2297 2297 struct fcp_tgt *ptgt = NULL;
2298 2298 fc_packet_t *fpkt = NULL;
2299 2299 struct fcp_ipkt *icmd = NULL;
2300 2300 int target_created = FALSE;
2301 2301 fc_frame_hdr_t *hp;
2302 2302 struct fcp_cmd fcp_cmd;
2303 2303 struct fcp_cmd *fcmd;
2304 2304 union scsi_cdb *scsi_cdb;
2305 2305 la_wwn_t *wwn_ptr;
2306 2306 int nodma;
2307 2307 struct fcp_rsp *rsp;
2308 2308 struct fcp_rsp_info *rsp_info;
2309 2309 caddr_t rsp_sense;
2310 2310 int buf_len;
2311 2311 int info_len;
2312 2312 int sense_len;
2313 2313 struct scsi_extended_sense *sense_to = NULL;
2314 2314 timeout_id_t tid;
2315 2315 uint8_t reconfig_lun = FALSE;
2316 2316 uint8_t reconfig_pending = FALSE;
2317 2317 uint8_t scsi_cmd;
2318 2318 int rsp_len;
2319 2319 int cmd_index;
2320 2320 int fc_status;
2321 2321 int pkt_state;
2322 2322 int pkt_action;
2323 2323 int pkt_reason;
2324 2324 int ret, xport_retval = ~FC_SUCCESS;
2325 2325 int lcount;
2326 2326 int tcount;
2327 2327 int reconfig_status;
2328 2328 int port_busy = FALSE;
2329 2329 uchar_t *lun_string;
2330 2330
2331 2331 /*
2332 2332 * Check valid SCSI command
2333 2333 */
2334 2334 scsi_cmd = ((uint8_t *)fscsi->scsi_cdbbufaddr)[0];
2335 2335 ret = EINVAL;
2336 2336 for (cmd_index = 0;
2337 2337 cmd_index < FCP_NUM_ELEMENTS(scsi_ioctl_list) &&
2338 2338 ret != 0;
2339 2339 cmd_index++) {
2340 2340 /*
2341 2341 * First byte of CDB is the SCSI command
2342 2342 */
2343 2343 if (scsi_ioctl_list[cmd_index] == scsi_cmd) {
2344 2344 ret = 0;
2345 2345 }
2346 2346 }
2347 2347
2348 2348 /*
2349 2349 * Check inputs
2350 2350 */
2351 2351 if (fscsi->scsi_flags != FCP_SCSI_READ) {
2352 2352 ret = EINVAL;
2353 2353 } else if (fscsi->scsi_cdblen > FCP_CDB_SIZE) {
2354 2354 /* no larger than */
2355 2355 ret = EINVAL;
2356 2356 }
2357 2357
2358 2358
2359 2359 /*
2360 2360 * Find FC port
2361 2361 */
2362 2362 if (ret == 0) {
2363 2363 /*
2364 2364 * Acquire global mutex
2365 2365 */
2366 2366 mutex_enter(&fcp_global_mutex);
2367 2367
2368 2368 pptr = fcp_port_head;
2369 2369 while (pptr) {
2370 2370 if (pptr->port_instance ==
2371 2371 (uint32_t)fscsi->scsi_fc_port_num) {
2372 2372 break;
2373 2373 } else {
2374 2374 pptr = pptr->port_next;
2375 2375 }
2376 2376 }
2377 2377
2378 2378 if (pptr == NULL) {
2379 2379 ret = ENXIO;
2380 2380 } else {
2381 2381 /*
2382 2382 * fc_ulp_busy_port can raise power
2383 2383 * so, we must not hold any mutexes involved in PM
2384 2384 */
2385 2385 mutex_exit(&fcp_global_mutex);
2386 2386 ret = fc_ulp_busy_port(pptr->port_fp_handle);
2387 2387 }
2388 2388
2389 2389 if (ret == 0) {
2390 2390
2391 2391 /* remember port is busy, so we will release later */
2392 2392 port_busy = TRUE;
2393 2393
2394 2394 /*
2395 2395 * If there is a reconfiguration in progress, wait
2396 2396 * for it to complete.
2397 2397 */
2398 2398
2399 2399 fcp_reconfig_wait(pptr);
2400 2400
2401 2401 /* reacquire mutexes in order */
2402 2402 mutex_enter(&fcp_global_mutex);
2403 2403 mutex_enter(&pptr->port_mutex);
2404 2404
2405 2405 /*
2406 2406 * Will port accept DMA?
2407 2407 */
2408 2408 nodma = (pptr->port_fcp_dma == FC_NO_DVMA_SPACE)
2409 2409 ? 1 : 0;
2410 2410
2411 2411 /*
2412 2412 * If init or offline, device not known
2413 2413 *
2414 2414 * If we are discovering (onlining), we can
2415 2415 * NOT obviously provide reliable data about
2416 2416 * devices until it is complete
2417 2417 */
2418 2418 if (pptr->port_state & (FCP_STATE_INIT |
2419 2419 FCP_STATE_OFFLINE)) {
2420 2420 ret = ENXIO;
2421 2421 } else if (pptr->port_state & FCP_STATE_ONLINING) {
2422 2422 ret = EBUSY;
2423 2423 } else {
2424 2424 /*
2425 2425 * Find target from pwwn
2426 2426 *
2427 2427 * The wwn must be put into a local
2428 2428 * variable to ensure alignment.
2429 2429 */
2430 2430 wwn_ptr = (la_wwn_t *)&(fscsi->scsi_fc_pwwn);
2431 2431 ptgt = fcp_lookup_target(pptr,
2432 2432 (uchar_t *)wwn_ptr);
2433 2433
2434 2434 /*
2435 2435 * If target not found,
2436 2436 */
2437 2437 if (ptgt == NULL) {
2438 2438 /*
2439 2439 * Note: Still have global &
2440 2440 * port mutexes
2441 2441 */
2442 2442 mutex_exit(&pptr->port_mutex);
2443 2443 ptgt = fcp_port_create_tgt(pptr,
2444 2444 wwn_ptr, &ret, &fc_status,
2445 2445 &pkt_state, &pkt_action,
2446 2446 &pkt_reason);
2447 2447 mutex_enter(&pptr->port_mutex);
2448 2448
2449 2449 fscsi->scsi_fc_status = fc_status;
2450 2450 fscsi->scsi_pkt_state =
2451 2451 (uchar_t)pkt_state;
2452 2452 fscsi->scsi_pkt_reason = pkt_reason;
2453 2453 fscsi->scsi_pkt_action =
2454 2454 (uchar_t)pkt_action;
2455 2455
2456 2456 if (ptgt != NULL) {
2457 2457 target_created = TRUE;
2458 2458 } else if (ret == 0) {
2459 2459 ret = ENOMEM;
2460 2460 }
2461 2461 }
2462 2462
2463 2463 if (ret == 0) {
2464 2464 /*
2465 2465 * Acquire target
2466 2466 */
2467 2467 mutex_enter(&ptgt->tgt_mutex);
2468 2468
2469 2469 /*
2470 2470 * If target is mark or busy,
2471 2471 * then target can not be used
2472 2472 */
2473 2473 if (ptgt->tgt_state &
2474 2474 (FCP_TGT_MARK |
2475 2475 FCP_TGT_BUSY)) {
2476 2476 ret = EBUSY;
2477 2477 } else {
2478 2478 /*
2479 2479 * Mark target as busy
2480 2480 */
2481 2481 ptgt->tgt_state |=
2482 2482 FCP_TGT_BUSY;
2483 2483 }
2484 2484
2485 2485 /*
2486 2486 * Release target
2487 2487 */
2488 2488 lcount = pptr->port_link_cnt;
2489 2489 tcount = ptgt->tgt_change_cnt;
2490 2490 mutex_exit(&ptgt->tgt_mutex);
2491 2491 }
2492 2492 }
2493 2493
2494 2494 /*
2495 2495 * Release port
2496 2496 */
2497 2497 mutex_exit(&pptr->port_mutex);
2498 2498 }
2499 2499
2500 2500 /*
2501 2501 * Release global mutex
2502 2502 */
2503 2503 mutex_exit(&fcp_global_mutex);
2504 2504 }
2505 2505
2506 2506 if (ret == 0) {
2507 2507 uint64_t belun = BE_64(fscsi->scsi_lun);
2508 2508
2509 2509 /*
2510 2510 * If it's a target device, find lun from pwwn
2511 2511 * The wwn must be put into a local
2512 2512 * variable to ensure alignment.
2513 2513 */
2514 2514 mutex_enter(&pptr->port_mutex);
2515 2515 wwn_ptr = (la_wwn_t *)&(fscsi->scsi_fc_pwwn);
2516 2516 if (!ptgt->tgt_tcap && ptgt->tgt_icap) {
2517 2517 /* this is not a target */
2518 2518 fscsi->scsi_fc_status = FC_DEVICE_NOT_TGT;
2519 2519 ret = ENXIO;
2520 2520 } else if ((belun << 16) != 0) {
2521 2521 /*
2522 2522 * Since fcp only support PD and LU addressing method
2523 2523 * so far, the last 6 bytes of a valid LUN are expected
2524 2524 * to be filled with 00h.
2525 2525 */
2526 2526 fscsi->scsi_fc_status = FC_INVALID_LUN;
2527 2527 cmn_err(CE_WARN, "fcp: Unsupported LUN addressing"
2528 2528 " method 0x%02x with LUN number 0x%016" PRIx64,
2529 2529 (uint8_t)(belun >> 62), belun);
2530 2530 ret = ENXIO;
2531 2531 } else if ((plun = fcp_lookup_lun(pptr, (uchar_t *)wwn_ptr,
2532 2532 (uint16_t)((belun >> 48) & 0x3fff))) == NULL) {
2533 2533 /*
2534 2534 * This is a SCSI target, but no LUN at this
2535 2535 * address.
2536 2536 *
2537 2537 * In the future, we may want to send this to
2538 2538 * the target, and let it respond
2539 2539 * appropriately
2540 2540 */
2541 2541 ret = ENXIO;
2542 2542 }
2543 2543 mutex_exit(&pptr->port_mutex);
2544 2544 }
2545 2545
2546 2546 /*
2547 2547 * Finished grabbing external resources
2548 2548 * Allocate internal packet (icmd)
2549 2549 */
2550 2550 if (ret == 0) {
2551 2551 /*
2552 2552 * Calc rsp len assuming rsp info included
2553 2553 */
2554 2554 rsp_len = sizeof (struct fcp_rsp) +
2555 2555 sizeof (struct fcp_rsp_info) + fscsi->scsi_rqlen;
2556 2556
2557 2557 icmd = fcp_icmd_alloc(pptr, ptgt,
2558 2558 sizeof (struct fcp_cmd),
2559 2559 rsp_len,
2560 2560 fscsi->scsi_buflen,
2561 2561 nodma,
2562 2562 lcount, /* ipkt_link_cnt */
2563 2563 tcount, /* ipkt_change_cnt */
2564 2564 0, /* cause */
2565 2565 FC_INVALID_RSCN_COUNT); /* invalidate the count */
2566 2566
2567 2567 if (icmd == NULL) {
2568 2568 ret = ENOMEM;
2569 2569 } else {
2570 2570 /*
2571 2571 * Setup internal packet as sema sync
2572 2572 */
2573 2573 fcp_ipkt_sema_init(icmd);
2574 2574 }
2575 2575 }
2576 2576
2577 2577 if (ret == 0) {
2578 2578 /*
2579 2579 * Init fpkt pointer for use.
2580 2580 */
2581 2581
2582 2582 fpkt = icmd->ipkt_fpkt;
2583 2583
2584 2584 fpkt->pkt_tran_flags = FC_TRAN_CLASS3 | FC_TRAN_INTR;
2585 2585 fpkt->pkt_tran_type = FC_PKT_FCP_READ; /* only rd for now */
2586 2586 fpkt->pkt_timeout = fscsi->scsi_timeout;
2587 2587
2588 2588 /*
2589 2589 * Init fcmd pointer for use by SCSI command
2590 2590 */
2591 2591
2592 2592 if (nodma) {
2593 2593 fcmd = (struct fcp_cmd *)fpkt->pkt_cmd;
2594 2594 } else {
2595 2595 fcmd = &fcp_cmd;
2596 2596 }
2597 2597 bzero(fcmd, sizeof (struct fcp_cmd));
2598 2598 ptgt = plun->lun_tgt;
2599 2599
2600 2600 lun_string = (uchar_t *)&fscsi->scsi_lun;
2601 2601
2602 2602 fcmd->fcp_ent_addr.ent_addr_0 =
2603 2603 BE_16(*(uint16_t *)&(lun_string[0]));
2604 2604 fcmd->fcp_ent_addr.ent_addr_1 =
2605 2605 BE_16(*(uint16_t *)&(lun_string[2]));
2606 2606 fcmd->fcp_ent_addr.ent_addr_2 =
2607 2607 BE_16(*(uint16_t *)&(lun_string[4]));
2608 2608 fcmd->fcp_ent_addr.ent_addr_3 =
2609 2609 BE_16(*(uint16_t *)&(lun_string[6]));
2610 2610
2611 2611 /*
2612 2612 * Setup internal packet(icmd)
2613 2613 */
2614 2614 icmd->ipkt_lun = plun;
2615 2615 icmd->ipkt_restart = 0;
2616 2616 icmd->ipkt_retries = 0;
2617 2617 icmd->ipkt_opcode = 0;
2618 2618
2619 2619 /*
2620 2620 * Init the frame HEADER Pointer for use
2621 2621 */
2622 2622 hp = &fpkt->pkt_cmd_fhdr;
2623 2623
2624 2624 hp->s_id = pptr->port_id;
2625 2625 hp->d_id = ptgt->tgt_d_id;
2626 2626 hp->r_ctl = R_CTL_COMMAND;
2627 2627 hp->type = FC_TYPE_SCSI_FCP;
2628 2628 hp->f_ctl = F_CTL_SEQ_INITIATIVE | F_CTL_FIRST_SEQ;
2629 2629 hp->rsvd = 0;
2630 2630 hp->seq_id = 0;
2631 2631 hp->seq_cnt = 0;
2632 2632 hp->ox_id = 0xffff;
2633 2633 hp->rx_id = 0xffff;
2634 2634 hp->ro = 0;
2635 2635
2636 2636 fcmd->fcp_cntl.cntl_qtype = FCP_QTYPE_SIMPLE;
2637 2637 fcmd->fcp_cntl.cntl_read_data = 1; /* only rd for now */
2638 2638 fcmd->fcp_cntl.cntl_write_data = 0;
2639 2639 fcmd->fcp_data_len = fscsi->scsi_buflen;
2640 2640
2641 2641 scsi_cdb = (union scsi_cdb *)fcmd->fcp_cdb;
2642 2642 bcopy((char *)fscsi->scsi_cdbbufaddr, (char *)scsi_cdb,
2643 2643 fscsi->scsi_cdblen);
2644 2644
2645 2645 if (!nodma) {
2646 2646 FCP_CP_OUT((uint8_t *)fcmd, fpkt->pkt_cmd,
2647 2647 fpkt->pkt_cmd_acc, sizeof (struct fcp_cmd));
2648 2648 }
2649 2649
2650 2650 /*
2651 2651 * Send SCSI command to FC transport
2652 2652 */
2653 2653
2654 2654 if (ret == 0) {
2655 2655 mutex_enter(&ptgt->tgt_mutex);
2656 2656
2657 2657 if (!FCP_TGT_STATE_CHANGED(ptgt, icmd)) {
2658 2658 mutex_exit(&ptgt->tgt_mutex);
2659 2659 fscsi->scsi_fc_status = xport_retval =
2660 2660 fc_ulp_transport(pptr->port_fp_handle,
2661 2661 fpkt);
2662 2662 if (fscsi->scsi_fc_status != FC_SUCCESS) {
2663 2663 ret = EIO;
2664 2664 }
2665 2665 } else {
2666 2666 mutex_exit(&ptgt->tgt_mutex);
2667 2667 ret = EBUSY;
2668 2668 }
2669 2669 }
2670 2670 }
2671 2671
2672 2672 /*
2673 2673 * Wait for completion only if fc_ulp_transport was called and it
2674 2674 * returned a success. This is the only time callback will happen.
2675 2675 * Otherwise, there is no point in waiting
2676 2676 */
2677 2677 if ((ret == 0) && (xport_retval == FC_SUCCESS)) {
2678 2678 ret = fcp_ipkt_sema_wait(icmd);
2679 2679 }
2680 2680
2681 2681 /*
2682 2682 * Copy data to IOCTL data structures
2683 2683 */
2684 2684 rsp = NULL;
2685 2685 if ((ret == 0) && (xport_retval == FC_SUCCESS)) {
2686 2686 rsp = (struct fcp_rsp *)fpkt->pkt_resp;
2687 2687
2688 2688 if (fcp_validate_fcp_response(rsp, pptr) != FC_SUCCESS) {
2689 2689 fcp_log(CE_WARN, pptr->port_dip,
2690 2690 "!SCSI command to d_id=0x%x lun=0x%x"
2691 2691 " failed, Bad FCP response values:"
2692 2692 " rsvd1=%x, rsvd2=%x, sts-rsvd1=%x,"
2693 2693 " sts-rsvd2=%x, rsplen=%x, senselen=%x",
2694 2694 ptgt->tgt_d_id, plun->lun_num,
2695 2695 rsp->reserved_0, rsp->reserved_1,
2696 2696 rsp->fcp_u.fcp_status.reserved_0,
2697 2697 rsp->fcp_u.fcp_status.reserved_1,
2698 2698 rsp->fcp_response_len, rsp->fcp_sense_len);
2699 2699
2700 2700 ret = EIO;
2701 2701 }
2702 2702 }
2703 2703
2704 2704 if ((ret == 0) && (rsp != NULL)) {
2705 2705 /*
2706 2706 * Calc response lengths
2707 2707 */
2708 2708 sense_len = 0;
2709 2709 info_len = 0;
2710 2710
2711 2711 if (rsp->fcp_u.fcp_status.rsp_len_set) {
2712 2712 info_len = rsp->fcp_response_len;
2713 2713 }
2714 2714
2715 2715 rsp_info = (struct fcp_rsp_info *)
2716 2716 ((uint8_t *)rsp + sizeof (struct fcp_rsp));
2717 2717
2718 2718 /*
2719 2719 * Get SCSI status
2720 2720 */
2721 2721 fscsi->scsi_bufstatus = rsp->fcp_u.fcp_status.scsi_status;
2722 2722 /*
2723 2723 * If a lun was just added or removed and the next command
2724 2724 * comes through this interface, we need to capture the check
2725 2725 * condition so we can discover the new topology.
2726 2726 */
2727 2727 if (fscsi->scsi_bufstatus != STATUS_GOOD &&
2728 2728 rsp->fcp_u.fcp_status.sense_len_set) {
2729 2729 sense_len = rsp->fcp_sense_len;
2730 2730 rsp_sense = (caddr_t)((uint8_t *)rsp_info + info_len);
2731 2731 sense_to = (struct scsi_extended_sense *)rsp_sense;
2732 2732 if ((FCP_SENSE_REPORTLUN_CHANGED(sense_to)) ||
2733 2733 (FCP_SENSE_NO_LUN(sense_to))) {
2734 2734 reconfig_lun = TRUE;
2735 2735 }
2736 2736 }
2737 2737
2738 2738 if (fscsi->scsi_bufstatus == STATUS_GOOD && (ptgt != NULL) &&
2739 2739 (reconfig_lun || (scsi_cdb->scc_cmd == SCMD_REPORT_LUN))) {
2740 2740 if (reconfig_lun == FALSE) {
2741 2741 reconfig_status =
2742 2742 fcp_is_reconfig_needed(ptgt, fpkt);
2743 2743 }
2744 2744
2745 2745 if ((reconfig_lun == TRUE) ||
2746 2746 (reconfig_status == TRUE)) {
2747 2747 mutex_enter(&ptgt->tgt_mutex);
2748 2748 if (ptgt->tgt_tid == NULL) {
2749 2749 /*
2750 2750 * Either we've been notified the
2751 2751 * REPORT_LUN data has changed, or
2752 2752 * we've determined on our own that
2753 2753 * we're out of date. Kick off
2754 2754 * rediscovery.
2755 2755 */
2756 2756 tid = timeout(fcp_reconfigure_luns,
2757 2757 (caddr_t)ptgt, drv_usectohz(1));
2758 2758
2759 2759 ptgt->tgt_tid = tid;
2760 2760 ptgt->tgt_state |= FCP_TGT_BUSY;
2761 2761 ret = EBUSY;
2762 2762 reconfig_pending = TRUE;
2763 2763 }
2764 2764 mutex_exit(&ptgt->tgt_mutex);
2765 2765 }
2766 2766 }
2767 2767
2768 2768 /*
2769 2769 * Calc residuals and buffer lengths
2770 2770 */
2771 2771
2772 2772 if (ret == 0) {
2773 2773 buf_len = fscsi->scsi_buflen;
2774 2774 fscsi->scsi_bufresid = 0;
2775 2775 if (rsp->fcp_u.fcp_status.resid_under) {
2776 2776 if (rsp->fcp_resid <= fscsi->scsi_buflen) {
2777 2777 fscsi->scsi_bufresid = rsp->fcp_resid;
2778 2778 } else {
2779 2779 cmn_err(CE_WARN, "fcp: bad residue %x "
2780 2780 "for txfer len %x", rsp->fcp_resid,
2781 2781 fscsi->scsi_buflen);
2782 2782 fscsi->scsi_bufresid =
2783 2783 fscsi->scsi_buflen;
2784 2784 }
2785 2785 buf_len -= fscsi->scsi_bufresid;
2786 2786 }
2787 2787 if (rsp->fcp_u.fcp_status.resid_over) {
2788 2788 fscsi->scsi_bufresid = -rsp->fcp_resid;
2789 2789 }
2790 2790
2791 2791 fscsi->scsi_rqresid = fscsi->scsi_rqlen - sense_len;
2792 2792 if (fscsi->scsi_rqlen < sense_len) {
2793 2793 sense_len = fscsi->scsi_rqlen;
2794 2794 }
2795 2795
2796 2796 fscsi->scsi_fc_rspcode = 0;
2797 2797 if (rsp->fcp_u.fcp_status.rsp_len_set) {
2798 2798 fscsi->scsi_fc_rspcode = rsp_info->rsp_code;
2799 2799 }
2800 2800 fscsi->scsi_pkt_state = fpkt->pkt_state;
2801 2801 fscsi->scsi_pkt_action = fpkt->pkt_action;
2802 2802 fscsi->scsi_pkt_reason = fpkt->pkt_reason;
2803 2803
2804 2804 /*
2805 2805 * Copy data and request sense
2806 2806 *
2807 2807 * Data must be copied by using the FCP_CP_IN macro.
2808 2808 * This will ensure the proper byte order since the data
2809 2809 * is being copied directly from the memory mapped
2810 2810 * device register.
2811 2811 *
2812 2812 * The response (and request sense) will be in the
2813 2813 * correct byte order. No special copy is necessary.
2814 2814 */
2815 2815
2816 2816 if (buf_len) {
2817 2817 FCP_CP_IN(fpkt->pkt_data,
2818 2818 fscsi->scsi_bufaddr,
2819 2819 fpkt->pkt_data_acc,
2820 2820 buf_len);
2821 2821 }
2822 2822 bcopy((void *)rsp_sense,
2823 2823 (void *)fscsi->scsi_rqbufaddr,
2824 2824 sense_len);
2825 2825 }
2826 2826 }
2827 2827
2828 2828 /*
2829 2829 * Cleanup transport data structures if icmd was alloc-ed
2830 2830 * So, cleanup happens in the same thread that icmd was alloc-ed
2831 2831 */
2832 2832 if (icmd != NULL) {
2833 2833 fcp_ipkt_sema_cleanup(icmd);
2834 2834 }
2835 2835
2836 2836 /* restore pm busy/idle status */
2837 2837 if (port_busy) {
2838 2838 fc_ulp_idle_port(pptr->port_fp_handle);
2839 2839 }
2840 2840
2841 2841 /*
2842 2842 * Cleanup target. if a reconfig is pending, don't clear the BUSY
2843 2843 * flag, it'll be cleared when the reconfig is complete.
2844 2844 */
2845 2845 if ((ptgt != NULL) && !reconfig_pending) {
2846 2846 /*
2847 2847 * If target was created,
2848 2848 */
2849 2849 if (target_created) {
2850 2850 mutex_enter(&ptgt->tgt_mutex);
2851 2851 ptgt->tgt_state &= ~FCP_TGT_BUSY;
2852 2852 mutex_exit(&ptgt->tgt_mutex);
2853 2853 } else {
2854 2854 /*
2855 2855 * De-mark target as busy
2856 2856 */
2857 2857 mutex_enter(&ptgt->tgt_mutex);
2858 2858 ptgt->tgt_state &= ~FCP_TGT_BUSY;
2859 2859 mutex_exit(&ptgt->tgt_mutex);
2860 2860 }
2861 2861 }
2862 2862 return (ret);
2863 2863 }
2864 2864
2865 2865
2866 2866 static int
2867 2867 fcp_is_reconfig_needed(struct fcp_tgt *ptgt,
2868 2868 fc_packet_t *fpkt)
2869 2869 {
2870 2870 uchar_t *lun_string;
2871 2871 uint16_t lun_num, i;
2872 2872 int num_luns;
2873 2873 int actual_luns;
2874 2874 int num_masked_luns;
2875 2875 int lun_buflen;
2876 2876 struct fcp_lun *plun = NULL;
2877 2877 struct fcp_reportlun_resp *report_lun;
2878 2878 uint8_t reconfig_needed = FALSE;
2879 2879 uint8_t lun_exists = FALSE;
2880 2880 fcp_port_t *pptr = ptgt->tgt_port;
2881 2881
2882 2882 report_lun = kmem_zalloc(fpkt->pkt_datalen, KM_SLEEP);
2883 2883
2884 2884 FCP_CP_IN(fpkt->pkt_data, report_lun, fpkt->pkt_data_acc,
2885 2885 fpkt->pkt_datalen);
2886 2886
2887 2887 /* get number of luns (which is supplied as LUNS * 8) */
2888 2888 num_luns = BE_32(report_lun->num_lun) >> 3;
2889 2889
2890 2890 /*
2891 2891 * Figure out exactly how many lun strings our response buffer
2892 2892 * can hold.
2893 2893 */
2894 2894 lun_buflen = (fpkt->pkt_datalen -
2895 2895 2 * sizeof (uint32_t)) / sizeof (longlong_t);
2896 2896
2897 2897 /*
2898 2898 * Is our response buffer full or not? We don't want to
2899 2899 * potentially walk beyond the number of luns we have.
2900 2900 */
2901 2901 if (num_luns <= lun_buflen) {
2902 2902 actual_luns = num_luns;
2903 2903 } else {
2904 2904 actual_luns = lun_buflen;
2905 2905 }
2906 2906
2907 2907 mutex_enter(&ptgt->tgt_mutex);
2908 2908
2909 2909 /* Scan each lun to see if we have masked it. */
2910 2910 num_masked_luns = 0;
2911 2911 if (fcp_lun_blacklist != NULL) {
2912 2912 for (i = 0; i < actual_luns; i++) {
2913 2913 lun_string = (uchar_t *)&(report_lun->lun_string[i]);
2914 2914 switch (lun_string[0] & 0xC0) {
2915 2915 case FCP_LUN_ADDRESSING:
2916 2916 case FCP_PD_ADDRESSING:
2917 2917 case FCP_VOLUME_ADDRESSING:
2918 2918 lun_num = ((lun_string[0] & 0x3F) << 8)
2919 2919 | lun_string[1];
2920 2920 if (fcp_should_mask(&ptgt->tgt_port_wwn,
2921 2921 lun_num) == TRUE) {
2922 2922 num_masked_luns++;
2923 2923 }
2924 2924 break;
2925 2925 default:
2926 2926 break;
2927 2927 }
2928 2928 }
2929 2929 }
2930 2930
2931 2931 /*
2932 2932 * The quick and easy check. If the number of LUNs reported
2933 2933 * doesn't match the number we currently know about, we need
2934 2934 * to reconfigure.
2935 2935 */
2936 2936 if (num_luns && num_luns != (ptgt->tgt_lun_cnt + num_masked_luns)) {
2937 2937 mutex_exit(&ptgt->tgt_mutex);
2938 2938 kmem_free(report_lun, fpkt->pkt_datalen);
2939 2939 return (TRUE);
2940 2940 }
2941 2941
2942 2942 /*
2943 2943 * If the quick and easy check doesn't turn up anything, we walk
2944 2944 * the list of luns from the REPORT_LUN response and look for
2945 2945 * any luns we don't know about. If we find one, we know we need
2946 2946 * to reconfigure. We will skip LUNs that are masked because of the
2947 2947 * blacklist.
2948 2948 */
2949 2949 for (i = 0; i < actual_luns; i++) {
2950 2950 lun_string = (uchar_t *)&(report_lun->lun_string[i]);
2951 2951 lun_exists = FALSE;
2952 2952 switch (lun_string[0] & 0xC0) {
2953 2953 case FCP_LUN_ADDRESSING:
2954 2954 case FCP_PD_ADDRESSING:
2955 2955 case FCP_VOLUME_ADDRESSING:
2956 2956 lun_num = ((lun_string[0] & 0x3F) << 8) | lun_string[1];
2957 2957
2958 2958 if ((fcp_lun_blacklist != NULL) && (fcp_should_mask(
2959 2959 &ptgt->tgt_port_wwn, lun_num) == TRUE)) {
2960 2960 lun_exists = TRUE;
2961 2961 break;
2962 2962 }
2963 2963
2964 2964 for (plun = ptgt->tgt_lun; plun;
2965 2965 plun = plun->lun_next) {
2966 2966 if (plun->lun_num == lun_num) {
2967 2967 lun_exists = TRUE;
2968 2968 break;
2969 2969 }
2970 2970 }
2971 2971 break;
2972 2972 default:
2973 2973 break;
2974 2974 }
2975 2975
2976 2976 if (lun_exists == FALSE) {
2977 2977 reconfig_needed = TRUE;
2978 2978 break;
2979 2979 }
2980 2980 }
2981 2981
2982 2982 mutex_exit(&ptgt->tgt_mutex);
2983 2983 kmem_free(report_lun, fpkt->pkt_datalen);
2984 2984
2985 2985 return (reconfig_needed);
2986 2986 }
2987 2987
2988 2988 /*
2989 2989 * This function is called by fcp_handle_page83 and uses inquiry response data
2990 2990 * stored in plun->lun_inq to determine whether or not a device is a member of
2991 2991 * the table fcp_symmetric_disk_table_size. We return 0 if it is in the table,
2992 2992 * otherwise 1.
2993 2993 */
2994 2994 static int
2995 2995 fcp_symmetric_device_probe(struct fcp_lun *plun)
2996 2996 {
2997 2997 struct scsi_inquiry *stdinq = &plun->lun_inq;
2998 2998 char *devidptr;
2999 2999 int i, len;
3000 3000
3001 3001 for (i = 0; i < fcp_symmetric_disk_table_size; i++) {
3002 3002 devidptr = fcp_symmetric_disk_table[i];
3003 3003 len = (int)strlen(devidptr);
3004 3004
3005 3005 if (bcmp(stdinq->inq_vid, devidptr, len) == 0) {
3006 3006 return (0);
3007 3007 }
3008 3008 }
3009 3009 return (1);
3010 3010 }
3011 3011
3012 3012
3013 3013 /*
3014 3014 * This function is called by fcp_ioctl for the FCP_STATE_COUNT ioctl
3015 3015 * It basically returns the current count of # of state change callbacks
3016 3016 * i.e the value of tgt_change_cnt.
3017 3017 *
3018 3018 * INPUT:
3019 3019 * fcp_ioctl.fp_minor -> The minor # of the fp port
3020 3020 * fcp_ioctl.listlen -> 1
3021 3021 * fcp_ioctl.list -> Pointer to a 32 bit integer
3022 3022 */
3023 3023 /*ARGSUSED2*/
3024 3024 static int
3025 3025 fcp_get_statec_count(struct fcp_ioctl *data, int mode, int *rval)
3026 3026 {
3027 3027 int ret;
3028 3028 uint32_t link_cnt;
3029 3029 struct fcp_ioctl fioctl;
3030 3030 struct fcp_port *pptr = NULL;
3031 3031
3032 3032 if ((ret = fcp_copyin_fcp_ioctl_data(data, mode, rval, &fioctl,
3033 3033 &pptr)) != 0) {
3034 3034 return (ret);
3035 3035 }
3036 3036
3037 3037 ASSERT(pptr != NULL);
3038 3038
3039 3039 if (fioctl.listlen != 1) {
3040 3040 return (EINVAL);
3041 3041 }
3042 3042
3043 3043 mutex_enter(&pptr->port_mutex);
3044 3044 if (pptr->port_state & FCP_STATE_OFFLINE) {
3045 3045 mutex_exit(&pptr->port_mutex);
3046 3046 return (ENXIO);
3047 3047 }
3048 3048
3049 3049 /*
3050 3050 * FCP_STATE_INIT is set in 2 cases (not sure why it is overloaded):
3051 3051 * When the fcp initially attaches to the port and there are nothing
3052 3052 * hanging out of the port or if there was a repeat offline state change
3053 3053 * callback (refer fcp_statec_callback() FC_STATE_OFFLINE case).
3054 3054 * In the latter case, port_tmp_cnt will be non-zero and that is how we
3055 3055 * will differentiate the 2 cases.
3056 3056 */
3057 3057 if ((pptr->port_state & FCP_STATE_INIT) && pptr->port_tmp_cnt) {
3058 3058 mutex_exit(&pptr->port_mutex);
3059 3059 return (ENXIO);
3060 3060 }
3061 3061
3062 3062 link_cnt = pptr->port_link_cnt;
3063 3063 mutex_exit(&pptr->port_mutex);
3064 3064
3065 3065 if (ddi_copyout(&link_cnt, fioctl.list, (sizeof (uint32_t)), mode)) {
3066 3066 return (EFAULT);
3067 3067 }
3068 3068
3069 3069 #ifdef _MULTI_DATAMODEL
3070 3070 switch (ddi_model_convert_from(mode & FMODELS)) {
3071 3071 case DDI_MODEL_ILP32: {
3072 3072 struct fcp32_ioctl f32_ioctl;
3073 3073
3074 3074 f32_ioctl.fp_minor = fioctl.fp_minor;
3075 3075 f32_ioctl.listlen = fioctl.listlen;
3076 3076 f32_ioctl.list = (caddr32_t)(long)fioctl.list;
3077 3077 if (ddi_copyout((void *)&f32_ioctl, (void *)data,
3078 3078 sizeof (struct fcp32_ioctl), mode)) {
3079 3079 return (EFAULT);
3080 3080 }
3081 3081 break;
3082 3082 }
3083 3083 case DDI_MODEL_NONE:
3084 3084 if (ddi_copyout((void *)&fioctl, (void *)data,
3085 3085 sizeof (struct fcp_ioctl), mode)) {
3086 3086 return (EFAULT);
3087 3087 }
3088 3088 break;
3089 3089 }
3090 3090 #else /* _MULTI_DATAMODEL */
3091 3091
3092 3092 if (ddi_copyout((void *)&fioctl, (void *)data,
3093 3093 sizeof (struct fcp_ioctl), mode)) {
3094 3094 return (EFAULT);
3095 3095 }
3096 3096 #endif /* _MULTI_DATAMODEL */
3097 3097
3098 3098 return (0);
3099 3099 }
3100 3100
3101 3101 /*
3102 3102 * This function copies the fcp_ioctl structure passed in from user land
3103 3103 * into kernel land. Handles 32 bit applications.
3104 3104 */
3105 3105 /*ARGSUSED*/
3106 3106 static int
3107 3107 fcp_copyin_fcp_ioctl_data(struct fcp_ioctl *data, int mode, int *rval,
3108 3108 struct fcp_ioctl *fioctl, struct fcp_port **pptr)
3109 3109 {
3110 3110 struct fcp_port *t_pptr;
3111 3111
3112 3112 #ifdef _MULTI_DATAMODEL
3113 3113 switch (ddi_model_convert_from(mode & FMODELS)) {
3114 3114 case DDI_MODEL_ILP32: {
3115 3115 struct fcp32_ioctl f32_ioctl;
3116 3116
3117 3117 if (ddi_copyin((void *)data, (void *)&f32_ioctl,
3118 3118 sizeof (struct fcp32_ioctl), mode)) {
3119 3119 return (EFAULT);
3120 3120 }
3121 3121 fioctl->fp_minor = f32_ioctl.fp_minor;
3122 3122 fioctl->listlen = f32_ioctl.listlen;
3123 3123 fioctl->list = (caddr_t)(long)f32_ioctl.list;
3124 3124 break;
3125 3125 }
3126 3126 case DDI_MODEL_NONE:
3127 3127 if (ddi_copyin((void *)data, (void *)fioctl,
3128 3128 sizeof (struct fcp_ioctl), mode)) {
3129 3129 return (EFAULT);
3130 3130 }
3131 3131 break;
3132 3132 }
3133 3133
3134 3134 #else /* _MULTI_DATAMODEL */
3135 3135 if (ddi_copyin((void *)data, (void *)fioctl,
3136 3136 sizeof (struct fcp_ioctl), mode)) {
3137 3137 return (EFAULT);
3138 3138 }
3139 3139 #endif /* _MULTI_DATAMODEL */
3140 3140
3141 3141 /*
3142 3142 * Right now we can assume that the minor number matches with
3143 3143 * this instance of fp. If this changes we will need to
3144 3144 * revisit this logic.
3145 3145 */
3146 3146 mutex_enter(&fcp_global_mutex);
3147 3147 t_pptr = fcp_port_head;
3148 3148 while (t_pptr) {
3149 3149 if (t_pptr->port_instance == (uint32_t)fioctl->fp_minor) {
3150 3150 break;
3151 3151 } else {
3152 3152 t_pptr = t_pptr->port_next;
3153 3153 }
3154 3154 }
3155 3155 *pptr = t_pptr;
3156 3156 mutex_exit(&fcp_global_mutex);
3157 3157 if (t_pptr == NULL) {
3158 3158 return (ENXIO);
3159 3159 }
3160 3160
3161 3161 return (0);
3162 3162 }
3163 3163
3164 3164 /*
3165 3165 * Function: fcp_port_create_tgt
3166 3166 *
3167 3167 * Description: As the name suggest this function creates the target context
3168 3168 * specified by the the WWN provided by the caller. If the
3169 3169 * creation goes well and the target is known by fp/fctl a PLOGI
3170 3170 * followed by a PRLI are issued.
3171 3171 *
3172 3172 * Argument: pptr fcp port structure
3173 3173 * pwwn WWN of the target
3174 3174 * ret_val Address of the return code. It could be:
3175 3175 * EIO, ENOMEM or 0.
3176 3176 * fc_status PLOGI or PRLI status completion
3177 3177 * fc_pkt_state PLOGI or PRLI state completion
3178 3178 * fc_pkt_reason PLOGI or PRLI reason completion
3179 3179 * fc_pkt_action PLOGI or PRLI action completion
3180 3180 *
3181 3181 * Return Value: NULL if it failed
3182 3182 * Target structure address if it succeeds
3183 3183 */
3184 3184 static struct fcp_tgt *
3185 3185 fcp_port_create_tgt(struct fcp_port *pptr, la_wwn_t *pwwn, int *ret_val,
3186 3186 int *fc_status, int *fc_pkt_state, int *fc_pkt_reason, int *fc_pkt_action)
3187 3187 {
3188 3188 struct fcp_tgt *ptgt = NULL;
3189 3189 fc_portmap_t devlist;
3190 3190 int lcount;
3191 3191 int error;
3192 3192
3193 3193 *ret_val = 0;
3194 3194
3195 3195 /*
3196 3196 * Check FC port device & get port map
3197 3197 */
3198 3198 if (fc_ulp_get_remote_port(pptr->port_fp_handle, pwwn,
3199 3199 &error, 1) == NULL) {
3200 3200 *ret_val = EIO;
3201 3201 } else {
3202 3202 if (fc_ulp_pwwn_to_portmap(pptr->port_fp_handle, pwwn,
3203 3203 &devlist) != FC_SUCCESS) {
3204 3204 *ret_val = EIO;
3205 3205 }
3206 3206 }
3207 3207
3208 3208 /* Set port map flags */
3209 3209 devlist.map_type = PORT_DEVICE_USER_CREATE;
3210 3210
3211 3211 /* Allocate target */
3212 3212 if (*ret_val == 0) {
3213 3213 lcount = pptr->port_link_cnt;
3214 3214 ptgt = fcp_alloc_tgt(pptr, &devlist, lcount);
3215 3215 if (ptgt == NULL) {
3216 3216 fcp_log(CE_WARN, pptr->port_dip,
3217 3217 "!FC target allocation failed");
3218 3218 *ret_val = ENOMEM;
3219 3219 } else {
3220 3220 /* Setup target */
3221 3221 mutex_enter(&ptgt->tgt_mutex);
3222 3222
3223 3223 ptgt->tgt_statec_cause = FCP_CAUSE_TGT_CHANGE;
3224 3224 ptgt->tgt_tmp_cnt = 1;
3225 3225 ptgt->tgt_d_id = devlist.map_did.port_id;
3226 3226 ptgt->tgt_hard_addr =
3227 3227 devlist.map_hard_addr.hard_addr;
3228 3228 ptgt->tgt_pd_handle = devlist.map_pd;
3229 3229 ptgt->tgt_fca_dev = NULL;
3230 3230
3231 3231 bcopy(&devlist.map_nwwn, &ptgt->tgt_node_wwn.raw_wwn[0],
3232 3232 FC_WWN_SIZE);
3233 3233 bcopy(&devlist.map_pwwn, &ptgt->tgt_port_wwn.raw_wwn[0],
3234 3234 FC_WWN_SIZE);
3235 3235
3236 3236 mutex_exit(&ptgt->tgt_mutex);
3237 3237 }
3238 3238 }
3239 3239
3240 3240 /* Release global mutex for PLOGI and PRLI */
3241 3241 mutex_exit(&fcp_global_mutex);
3242 3242
3243 3243 /* Send PLOGI (If necessary) */
3244 3244 if (*ret_val == 0) {
3245 3245 *ret_val = fcp_tgt_send_plogi(ptgt, fc_status,
3246 3246 fc_pkt_state, fc_pkt_reason, fc_pkt_action);
3247 3247 }
3248 3248
3249 3249 /* Send PRLI (If necessary) */
3250 3250 if (*ret_val == 0) {
3251 3251 *ret_val = fcp_tgt_send_prli(ptgt, fc_status,
3252 3252 fc_pkt_state, fc_pkt_reason, fc_pkt_action);
3253 3253 }
3254 3254
3255 3255 mutex_enter(&fcp_global_mutex);
3256 3256
3257 3257 return (ptgt);
3258 3258 }
3259 3259
3260 3260 /*
3261 3261 * Function: fcp_tgt_send_plogi
3262 3262 *
3263 3263 * Description: This function sends a PLOGI to the target specified by the
3264 3264 * caller and waits till it completes.
3265 3265 *
3266 3266 * Argument: ptgt Target to send the plogi to.
3267 3267 * fc_status Status returned by fp/fctl in the PLOGI request.
3268 3268 * fc_pkt_state State returned by fp/fctl in the PLOGI request.
3269 3269 * fc_pkt_reason Reason returned by fp/fctl in the PLOGI request.
3270 3270 * fc_pkt_action Action returned by fp/fctl in the PLOGI request.
3271 3271 *
3272 3272 * Return Value: 0
3273 3273 * ENOMEM
3274 3274 * EIO
3275 3275 *
3276 3276 * Context: User context.
3277 3277 */
3278 3278 static int
3279 3279 fcp_tgt_send_plogi(struct fcp_tgt *ptgt, int *fc_status, int *fc_pkt_state,
3280 3280 int *fc_pkt_reason, int *fc_pkt_action)
3281 3281 {
3282 3282 struct fcp_port *pptr;
3283 3283 struct fcp_ipkt *icmd;
3284 3284 struct fc_packet *fpkt;
3285 3285 fc_frame_hdr_t *hp;
3286 3286 struct la_els_logi logi;
3287 3287 int tcount;
3288 3288 int lcount;
3289 3289 int ret, login_retval = ~FC_SUCCESS;
3290 3290
3291 3291 ret = 0;
3292 3292
3293 3293 pptr = ptgt->tgt_port;
3294 3294
3295 3295 lcount = pptr->port_link_cnt;
3296 3296 tcount = ptgt->tgt_change_cnt;
3297 3297
3298 3298 /* Alloc internal packet */
3299 3299 icmd = fcp_icmd_alloc(pptr, ptgt, sizeof (la_els_logi_t),
3300 3300 sizeof (la_els_logi_t), 0,
3301 3301 pptr->port_state & FCP_STATE_FCA_IS_NODMA,
3302 3302 lcount, tcount, 0, FC_INVALID_RSCN_COUNT);
3303 3303
3304 3304 if (icmd == NULL) {
3305 3305 ret = ENOMEM;
3306 3306 } else {
3307 3307 /*
3308 3308 * Setup internal packet as sema sync
3309 3309 */
3310 3310 fcp_ipkt_sema_init(icmd);
3311 3311
3312 3312 /*
3313 3313 * Setup internal packet (icmd)
3314 3314 */
3315 3315 icmd->ipkt_lun = NULL;
3316 3316 icmd->ipkt_restart = 0;
3317 3317 icmd->ipkt_retries = 0;
3318 3318 icmd->ipkt_opcode = LA_ELS_PLOGI;
3319 3319
3320 3320 /*
3321 3321 * Setup fc_packet
3322 3322 */
3323 3323 fpkt = icmd->ipkt_fpkt;
3324 3324
3325 3325 fpkt->pkt_tran_flags = FC_TRAN_CLASS3 | FC_TRAN_INTR;
3326 3326 fpkt->pkt_tran_type = FC_PKT_EXCHANGE;
3327 3327 fpkt->pkt_timeout = FCP_ELS_TIMEOUT;
3328 3328
3329 3329 /*
3330 3330 * Setup FC frame header
3331 3331 */
3332 3332 hp = &fpkt->pkt_cmd_fhdr;
3333 3333
3334 3334 hp->s_id = pptr->port_id; /* source ID */
3335 3335 hp->d_id = ptgt->tgt_d_id; /* dest ID */
3336 3336 hp->r_ctl = R_CTL_ELS_REQ;
3337 3337 hp->type = FC_TYPE_EXTENDED_LS;
3338 3338 hp->f_ctl = F_CTL_SEQ_INITIATIVE | F_CTL_FIRST_SEQ;
3339 3339 hp->seq_id = 0;
3340 3340 hp->rsvd = 0;
3341 3341 hp->df_ctl = 0;
3342 3342 hp->seq_cnt = 0;
3343 3343 hp->ox_id = 0xffff; /* i.e. none */
3344 3344 hp->rx_id = 0xffff; /* i.e. none */
3345 3345 hp->ro = 0;
3346 3346
3347 3347 /*
3348 3348 * Setup PLOGI
3349 3349 */
3350 3350 bzero(&logi, sizeof (struct la_els_logi));
3351 3351 logi.ls_code.ls_code = LA_ELS_PLOGI;
3352 3352
3353 3353 FCP_CP_OUT((uint8_t *)&logi, fpkt->pkt_cmd,
3354 3354 fpkt->pkt_cmd_acc, sizeof (struct la_els_logi));
3355 3355
3356 3356 /*
3357 3357 * Send PLOGI
3358 3358 */
3359 3359 *fc_status = login_retval =
3360 3360 fc_ulp_login(pptr->port_fp_handle, &fpkt, 1);
3361 3361 if (*fc_status != FC_SUCCESS) {
3362 3362 ret = EIO;
3363 3363 }
3364 3364 }
3365 3365
3366 3366 /*
3367 3367 * Wait for completion
3368 3368 */
3369 3369 if ((ret == 0) && (login_retval == FC_SUCCESS)) {
3370 3370 ret = fcp_ipkt_sema_wait(icmd);
3371 3371
3372 3372 *fc_pkt_state = fpkt->pkt_state;
3373 3373 *fc_pkt_reason = fpkt->pkt_reason;
3374 3374 *fc_pkt_action = fpkt->pkt_action;
3375 3375 }
3376 3376
3377 3377 /*
3378 3378 * Cleanup transport data structures if icmd was alloc-ed AND if there
3379 3379 * is going to be no callback (i.e if fc_ulp_login() failed).
3380 3380 * Otherwise, cleanup happens in callback routine.
3381 3381 */
3382 3382 if (icmd != NULL) {
3383 3383 fcp_ipkt_sema_cleanup(icmd);
3384 3384 }
3385 3385
3386 3386 return (ret);
3387 3387 }
3388 3388
3389 3389 /*
3390 3390 * Function: fcp_tgt_send_prli
3391 3391 *
3392 3392 * Description: Does nothing as of today.
3393 3393 *
3394 3394 * Argument: ptgt Target to send the prli to.
3395 3395 * fc_status Status returned by fp/fctl in the PRLI request.
3396 3396 * fc_pkt_state State returned by fp/fctl in the PRLI request.
3397 3397 * fc_pkt_reason Reason returned by fp/fctl in the PRLI request.
3398 3398 * fc_pkt_action Action returned by fp/fctl in the PRLI request.
3399 3399 *
3400 3400 * Return Value: 0
3401 3401 */
3402 3402 /*ARGSUSED*/
3403 3403 static int
3404 3404 fcp_tgt_send_prli(struct fcp_tgt *ptgt, int *fc_status, int *fc_pkt_state,
3405 3405 int *fc_pkt_reason, int *fc_pkt_action)
3406 3406 {
3407 3407 return (0);
3408 3408 }
3409 3409
3410 3410 /*
3411 3411 * Function: fcp_ipkt_sema_init
3412 3412 *
3413 3413 * Description: Initializes the semaphore contained in the internal packet.
3414 3414 *
3415 3415 * Argument: icmd Internal packet the semaphore of which must be
3416 3416 * initialized.
3417 3417 *
3418 3418 * Return Value: None
3419 3419 *
3420 3420 * Context: User context only.
3421 3421 */
3422 3422 static void
3423 3423 fcp_ipkt_sema_init(struct fcp_ipkt *icmd)
3424 3424 {
3425 3425 struct fc_packet *fpkt;
3426 3426
3427 3427 fpkt = icmd->ipkt_fpkt;
3428 3428
3429 3429 /* Create semaphore for sync */
3430 3430 sema_init(&(icmd->ipkt_sema), 0, NULL, SEMA_DRIVER, NULL);
3431 3431
3432 3432 /* Setup the completion callback */
3433 3433 fpkt->pkt_comp = fcp_ipkt_sema_callback;
3434 3434 }
3435 3435
3436 3436 /*
3437 3437 * Function: fcp_ipkt_sema_wait
3438 3438 *
3439 3439 * Description: Wait on the semaphore embedded in the internal packet. The
3440 3440 * semaphore is released in the callback.
3441 3441 *
3442 3442 * Argument: icmd Internal packet to wait on for completion.
3443 3443 *
3444 3444 * Return Value: 0
3445 3445 * EIO
3446 3446 * EBUSY
3447 3447 * EAGAIN
3448 3448 *
3449 3449 * Context: User context only.
3450 3450 *
3451 3451 * This function does a conversion between the field pkt_state of the fc_packet
3452 3452 * embedded in the internal packet (icmd) and the code it returns.
3453 3453 */
3454 3454 static int
3455 3455 fcp_ipkt_sema_wait(struct fcp_ipkt *icmd)
3456 3456 {
3457 3457 struct fc_packet *fpkt;
3458 3458 int ret;
3459 3459
3460 3460 ret = EIO;
3461 3461 fpkt = icmd->ipkt_fpkt;
3462 3462
3463 3463 /*
3464 3464 * Wait on semaphore
3465 3465 */
3466 3466 sema_p(&(icmd->ipkt_sema));
3467 3467
3468 3468 /*
3469 3469 * Check the status of the FC packet
3470 3470 */
3471 3471 switch (fpkt->pkt_state) {
3472 3472 case FC_PKT_SUCCESS:
3473 3473 ret = 0;
3474 3474 break;
3475 3475 case FC_PKT_LOCAL_RJT:
3476 3476 switch (fpkt->pkt_reason) {
3477 3477 case FC_REASON_SEQ_TIMEOUT:
3478 3478 case FC_REASON_RX_BUF_TIMEOUT:
3479 3479 ret = EAGAIN;
3480 3480 break;
3481 3481 case FC_REASON_PKT_BUSY:
3482 3482 ret = EBUSY;
3483 3483 break;
3484 3484 }
3485 3485 break;
3486 3486 case FC_PKT_TIMEOUT:
3487 3487 ret = EAGAIN;
3488 3488 break;
3489 3489 case FC_PKT_LOCAL_BSY:
3490 3490 case FC_PKT_TRAN_BSY:
3491 3491 case FC_PKT_NPORT_BSY:
3492 3492 case FC_PKT_FABRIC_BSY:
3493 3493 ret = EBUSY;
3494 3494 break;
3495 3495 case FC_PKT_LS_RJT:
3496 3496 case FC_PKT_BA_RJT:
3497 3497 switch (fpkt->pkt_reason) {
3498 3498 case FC_REASON_LOGICAL_BSY:
3499 3499 ret = EBUSY;
3500 3500 break;
3501 3501 }
3502 3502 break;
3503 3503 case FC_PKT_FS_RJT:
3504 3504 switch (fpkt->pkt_reason) {
3505 3505 case FC_REASON_FS_LOGICAL_BUSY:
3506 3506 ret = EBUSY;
3507 3507 break;
3508 3508 }
3509 3509 break;
3510 3510 }
3511 3511
3512 3512 return (ret);
3513 3513 }
3514 3514
3515 3515 /*
3516 3516 * Function: fcp_ipkt_sema_callback
3517 3517 *
3518 3518 * Description: Registered as the completion callback function for the FC
3519 3519 * transport when the ipkt semaphore is used for sync. This will
3520 3520 * cleanup the used data structures, if necessary and wake up
3521 3521 * the user thread to complete the transaction.
3522 3522 *
3523 3523 * Argument: fpkt FC packet (points to the icmd)
3524 3524 *
3525 3525 * Return Value: None
3526 3526 *
3527 3527 * Context: User context only
3528 3528 */
3529 3529 static void
3530 3530 fcp_ipkt_sema_callback(struct fc_packet *fpkt)
3531 3531 {
3532 3532 struct fcp_ipkt *icmd;
3533 3533
3534 3534 icmd = (struct fcp_ipkt *)fpkt->pkt_ulp_private;
3535 3535
3536 3536 /*
3537 3537 * Wake up user thread
3538 3538 */
3539 3539 sema_v(&(icmd->ipkt_sema));
3540 3540 }
3541 3541
3542 3542 /*
3543 3543 * Function: fcp_ipkt_sema_cleanup
3544 3544 *
3545 3545 * Description: Called to cleanup (if necessary) the data structures used
3546 3546 * when ipkt sema is used for sync. This function will detect
3547 3547 * whether the caller is the last thread (via counter) and
3548 3548 * cleanup only if necessary.
3549 3549 *
3550 3550 * Argument: icmd Internal command packet
3551 3551 *
3552 3552 * Return Value: None
3553 3553 *
3554 3554 * Context: User context only
3555 3555 */
3556 3556 static void
3557 3557 fcp_ipkt_sema_cleanup(struct fcp_ipkt *icmd)
3558 3558 {
3559 3559 struct fcp_tgt *ptgt;
3560 3560 struct fcp_port *pptr;
3561 3561
3562 3562 ptgt = icmd->ipkt_tgt;
3563 3563 pptr = icmd->ipkt_port;
3564 3564
3565 3565 /*
3566 3566 * Acquire data structure
3567 3567 */
3568 3568 mutex_enter(&ptgt->tgt_mutex);
3569 3569
3570 3570 /*
3571 3571 * Destroy semaphore
3572 3572 */
3573 3573 sema_destroy(&(icmd->ipkt_sema));
3574 3574
3575 3575 /*
3576 3576 * Cleanup internal packet
3577 3577 */
3578 3578 mutex_exit(&ptgt->tgt_mutex);
3579 3579 fcp_icmd_free(pptr, icmd);
3580 3580 }
3581 3581
3582 3582 /*
3583 3583 * Function: fcp_port_attach
3584 3584 *
3585 3585 * Description: Called by the transport framework to resume, suspend or
3586 3586 * attach a new port.
3587 3587 *
3588 3588 * Argument: ulph Port handle
3589 3589 * *pinfo Port information
3590 3590 * cmd Command
3591 3591 * s_id Port ID
3592 3592 *
3593 3593 * Return Value: FC_FAILURE or FC_SUCCESS
3594 3594 */
3595 3595 /*ARGSUSED*/
3596 3596 static int
3597 3597 fcp_port_attach(opaque_t ulph, fc_ulp_port_info_t *pinfo,
3598 3598 fc_attach_cmd_t cmd, uint32_t s_id)
3599 3599 {
3600 3600 int instance;
3601 3601 int res = FC_FAILURE; /* default result */
3602 3602
3603 3603 ASSERT(pinfo != NULL);
3604 3604
3605 3605 instance = ddi_get_instance(pinfo->port_dip);
3606 3606
3607 3607 switch (cmd) {
3608 3608 case FC_CMD_ATTACH:
3609 3609 /*
3610 3610 * this port instance attaching for the first time (or after
3611 3611 * being detached before)
3612 3612 */
3613 3613 if (fcp_handle_port_attach(ulph, pinfo, s_id,
3614 3614 instance) == DDI_SUCCESS) {
3615 3615 res = FC_SUCCESS;
3616 3616 } else {
3617 3617 ASSERT(ddi_get_soft_state(fcp_softstate,
3618 3618 instance) == NULL);
3619 3619 }
3620 3620 break;
3621 3621
3622 3622 case FC_CMD_RESUME:
3623 3623 case FC_CMD_POWER_UP:
3624 3624 /*
3625 3625 * this port instance was attached and the suspended and
3626 3626 * will now be resumed
3627 3627 */
3628 3628 if (fcp_handle_port_resume(ulph, pinfo, s_id, cmd,
3629 3629 instance) == DDI_SUCCESS) {
3630 3630 res = FC_SUCCESS;
3631 3631 }
3632 3632 break;
3633 3633
3634 3634 default:
3635 3635 /* shouldn't happen */
3636 3636 FCP_TRACE(fcp_logq, "fcp",
3637 3637 fcp_trace, FCP_BUF_LEVEL_2, 0,
3638 3638 "port_attach: unknown cmdcommand: %d", cmd);
3639 3639 break;
3640 3640 }
3641 3641
3642 3642 /* return result */
3643 3643 FCP_DTRACE(fcp_logq, "fcp", fcp_trace,
3644 3644 FCP_BUF_LEVEL_1, 0, "fcp_port_attach returning %d", res);
3645 3645
3646 3646 return (res);
3647 3647 }
3648 3648
3649 3649
3650 3650 /*
3651 3651 * detach or suspend this port instance
3652 3652 *
3653 3653 * acquires and releases the global mutex
3654 3654 *
3655 3655 * acquires and releases the mutex for this port
3656 3656 *
3657 3657 * acquires and releases the hotplug mutex for this port
3658 3658 */
3659 3659 /*ARGSUSED*/
3660 3660 static int
3661 3661 fcp_port_detach(opaque_t ulph, fc_ulp_port_info_t *info,
3662 3662 fc_detach_cmd_t cmd)
3663 3663 {
3664 3664 int flag;
3665 3665 int instance;
3666 3666 struct fcp_port *pptr;
3667 3667
3668 3668 instance = ddi_get_instance(info->port_dip);
3669 3669 pptr = ddi_get_soft_state(fcp_softstate, instance);
3670 3670
3671 3671 switch (cmd) {
3672 3672 case FC_CMD_SUSPEND:
3673 3673 FCP_DTRACE(fcp_logq, "fcp",
3674 3674 fcp_trace, FCP_BUF_LEVEL_8, 0,
3675 3675 "port suspend called for port %d", instance);
3676 3676 flag = FCP_STATE_SUSPENDED;
3677 3677 break;
3678 3678
3679 3679 case FC_CMD_POWER_DOWN:
3680 3680 FCP_DTRACE(fcp_logq, "fcp",
3681 3681 fcp_trace, FCP_BUF_LEVEL_8, 0,
3682 3682 "port power down called for port %d", instance);
3683 3683 flag = FCP_STATE_POWER_DOWN;
3684 3684 break;
3685 3685
3686 3686 case FC_CMD_DETACH:
3687 3687 FCP_DTRACE(fcp_logq, "fcp",
3688 3688 fcp_trace, FCP_BUF_LEVEL_8, 0,
3689 3689 "port detach called for port %d", instance);
3690 3690 flag = FCP_STATE_DETACHING;
3691 3691 break;
3692 3692
3693 3693 default:
3694 3694 /* shouldn't happen */
3695 3695 return (FC_FAILURE);
3696 3696 }
3697 3697 FCP_DTRACE(fcp_logq, "fcp", fcp_trace,
3698 3698 FCP_BUF_LEVEL_1, 0, "fcp_port_detach returning");
3699 3699
3700 3700 return (fcp_handle_port_detach(pptr, flag, instance));
3701 3701 }
3702 3702
3703 3703
3704 3704 /*
3705 3705 * called for ioctls on the transport's devctl interface, and the transport
3706 3706 * has passed it to us
3707 3707 *
3708 3708 * this will only be called for device control ioctls (i.e. hotplugging stuff)
3709 3709 *
3710 3710 * return FC_SUCCESS if we decide to claim the ioctl,
3711 3711 * else return FC_UNCLAIMED
3712 3712 *
3713 3713 * *rval is set iff we decide to claim the ioctl
3714 3714 */
3715 3715 /*ARGSUSED*/
3716 3716 static int
3717 3717 fcp_port_ioctl(opaque_t ulph, opaque_t port_handle, dev_t dev, int cmd,
3718 3718 intptr_t data, int mode, cred_t *credp, int *rval, uint32_t claimed)
3719 3719 {
3720 3720 int retval = FC_UNCLAIMED; /* return value */
3721 3721 struct fcp_port *pptr = NULL; /* our soft state */
3722 3722 struct devctl_iocdata *dcp = NULL; /* for devctl */
3723 3723 dev_info_t *cdip;
3724 3724 mdi_pathinfo_t *pip = NULL;
3725 3725 char *ndi_nm; /* NDI name */
3726 3726 char *ndi_addr; /* NDI addr */
3727 3727 int is_mpxio, circ;
3728 3728 int devi_entered = 0;
3729 3729 clock_t end_time;
3730 3730
3731 3731 ASSERT(rval != NULL);
3732 3732
3733 3733 FCP_DTRACE(fcp_logq, "fcp",
3734 3734 fcp_trace, FCP_BUF_LEVEL_8, 0,
3735 3735 "fcp_port_ioctl(cmd=0x%x, claimed=%d)", cmd, claimed);
3736 3736
3737 3737 /* if already claimed then forget it */
3738 3738 if (claimed) {
3739 3739 /*
3740 3740 * for now, if this ioctl has already been claimed, then
3741 3741 * we just ignore it
3742 3742 */
3743 3743 return (retval);
3744 3744 }
3745 3745
3746 3746 /* get our port info */
3747 3747 if ((pptr = fcp_get_port(port_handle)) == NULL) {
3748 3748 fcp_log(CE_WARN, NULL,
3749 3749 "!fcp:Invalid port handle handle in ioctl");
3750 3750 *rval = ENXIO;
3751 3751 return (retval);
3752 3752 }
3753 3753 is_mpxio = pptr->port_mpxio;
3754 3754
3755 3755 switch (cmd) {
3756 3756 case DEVCTL_BUS_GETSTATE:
3757 3757 case DEVCTL_BUS_QUIESCE:
3758 3758 case DEVCTL_BUS_UNQUIESCE:
3759 3759 case DEVCTL_BUS_RESET:
3760 3760 case DEVCTL_BUS_RESETALL:
3761 3761
3762 3762 case DEVCTL_BUS_DEV_CREATE:
3763 3763 if (ndi_dc_allochdl((void *)data, &dcp) != NDI_SUCCESS) {
3764 3764 return (retval);
3765 3765 }
3766 3766 break;
3767 3767
3768 3768 case DEVCTL_DEVICE_GETSTATE:
3769 3769 case DEVCTL_DEVICE_OFFLINE:
3770 3770 case DEVCTL_DEVICE_ONLINE:
3771 3771 case DEVCTL_DEVICE_REMOVE:
3772 3772 case DEVCTL_DEVICE_RESET:
3773 3773 if (ndi_dc_allochdl((void *)data, &dcp) != NDI_SUCCESS) {
3774 3774 return (retval);
3775 3775 }
3776 3776
3777 3777 ASSERT(dcp != NULL);
3778 3778
3779 3779 /* ensure we have a name and address */
3780 3780 if (((ndi_nm = ndi_dc_getname(dcp)) == NULL) ||
3781 3781 ((ndi_addr = ndi_dc_getaddr(dcp)) == NULL)) {
3782 3782 FCP_TRACE(fcp_logq, pptr->port_instbuf,
3783 3783 fcp_trace, FCP_BUF_LEVEL_2, 0,
3784 3784 "ioctl: can't get name (%s) or addr (%s)",
3785 3785 ndi_nm ? ndi_nm : "<null ptr>",
3786 3786 ndi_addr ? ndi_addr : "<null ptr>");
3787 3787 ndi_dc_freehdl(dcp);
3788 3788 return (retval);
3789 3789 }
3790 3790
3791 3791
3792 3792 /* get our child's DIP */
3793 3793 ASSERT(pptr != NULL);
3794 3794 if (is_mpxio) {
3795 3795 mdi_devi_enter(pptr->port_dip, &circ);
3796 3796 } else {
3797 3797 ndi_devi_enter(pptr->port_dip, &circ);
3798 3798 }
3799 3799 devi_entered = 1;
3800 3800
3801 3801 if ((cdip = ndi_devi_find(pptr->port_dip, ndi_nm,
3802 3802 ndi_addr)) == NULL) {
3803 3803 /* Look for virtually enumerated devices. */
3804 3804 pip = mdi_pi_find(pptr->port_dip, NULL, ndi_addr);
3805 3805 if (pip == NULL ||
3806 3806 ((cdip = mdi_pi_get_client(pip)) == NULL)) {
3807 3807 *rval = ENXIO;
3808 3808 goto out;
3809 3809 }
3810 3810 }
3811 3811 break;
3812 3812
3813 3813 default:
3814 3814 *rval = ENOTTY;
3815 3815 return (retval);
3816 3816 }
3817 3817
3818 3818 /* this ioctl is ours -- process it */
3819 3819
3820 3820 retval = FC_SUCCESS; /* just means we claim the ioctl */
3821 3821
3822 3822 /* we assume it will be a success; else we'll set error value */
3823 3823 *rval = 0;
3824 3824
3825 3825
3826 3826 FCP_DTRACE(fcp_logq, pptr->port_instbuf,
3827 3827 fcp_trace, FCP_BUF_LEVEL_8, 0,
3828 3828 "ioctl: claiming this one");
3829 3829
3830 3830 /* handle ioctls now */
3831 3831 switch (cmd) {
3832 3832 case DEVCTL_DEVICE_GETSTATE:
3833 3833 ASSERT(cdip != NULL);
3834 3834 ASSERT(dcp != NULL);
3835 3835 if (ndi_dc_return_dev_state(cdip, dcp) != NDI_SUCCESS) {
3836 3836 *rval = EFAULT;
3837 3837 }
3838 3838 break;
3839 3839
3840 3840 case DEVCTL_DEVICE_REMOVE:
3841 3841 case DEVCTL_DEVICE_OFFLINE: {
3842 3842 int flag = 0;
3843 3843 int lcount;
3844 3844 int tcount;
3845 3845 struct fcp_pkt *head = NULL;
3846 3846 struct fcp_lun *plun;
3847 3847 child_info_t *cip = CIP(cdip);
3848 3848 int all = 1;
3849 3849 struct fcp_lun *tplun;
3850 3850 struct fcp_tgt *ptgt;
3851 3851
3852 3852 ASSERT(pptr != NULL);
3853 3853 ASSERT(cdip != NULL);
3854 3854
3855 3855 mutex_enter(&pptr->port_mutex);
3856 3856 if (pip != NULL) {
3857 3857 cip = CIP(pip);
3858 3858 }
3859 3859 if ((plun = fcp_get_lun_from_cip(pptr, cip)) == NULL) {
3860 3860 mutex_exit(&pptr->port_mutex);
3861 3861 *rval = ENXIO;
3862 3862 break;
3863 3863 }
3864 3864
3865 3865 head = fcp_scan_commands(plun);
3866 3866 if (head != NULL) {
3867 3867 fcp_abort_commands(head, LUN_PORT);
3868 3868 }
3869 3869 lcount = pptr->port_link_cnt;
3870 3870 tcount = plun->lun_tgt->tgt_change_cnt;
3871 3871 mutex_exit(&pptr->port_mutex);
3872 3872
3873 3873 if (cmd == DEVCTL_DEVICE_REMOVE) {
3874 3874 flag = NDI_DEVI_REMOVE;
3875 3875 if (is_mpxio)
3876 3876 flag |= NDI_USER_REQ;
3877 3877 }
3878 3878
3879 3879 if (is_mpxio) {
3880 3880 mdi_devi_exit(pptr->port_dip, circ);
3881 3881 } else {
3882 3882 ndi_devi_exit(pptr->port_dip, circ);
3883 3883 }
3884 3884 devi_entered = 0;
3885 3885
3886 3886 *rval = fcp_pass_to_hp_and_wait(pptr, plun, cip,
3887 3887 FCP_OFFLINE, lcount, tcount, flag);
3888 3888
3889 3889 if (*rval != NDI_SUCCESS) {
3890 3890 *rval = (*rval == NDI_BUSY) ? EBUSY : EIO;
3891 3891 break;
3892 3892 }
3893 3893
3894 3894 fcp_update_offline_flags(plun);
3895 3895
3896 3896 ptgt = plun->lun_tgt;
3897 3897 mutex_enter(&ptgt->tgt_mutex);
3898 3898 for (tplun = ptgt->tgt_lun; tplun != NULL; tplun =
3899 3899 tplun->lun_next) {
3900 3900 mutex_enter(&tplun->lun_mutex);
3901 3901 if (!(tplun->lun_state & FCP_LUN_OFFLINE)) {
3902 3902 all = 0;
3903 3903 }
3904 3904 mutex_exit(&tplun->lun_mutex);
3905 3905 }
3906 3906
3907 3907 if (all) {
3908 3908 ptgt->tgt_node_state = FCP_TGT_NODE_NONE;
3909 3909 /*
3910 3910 * The user is unconfiguring/offlining the device.
3911 3911 * If fabric and the auto configuration is set
3912 3912 * then make sure the user is the only one who
3913 3913 * can reconfigure the device.
3914 3914 */
3915 3915 if (FC_TOP_EXTERNAL(pptr->port_topology) &&
3916 3916 fcp_enable_auto_configuration) {
3917 3917 ptgt->tgt_manual_config_only = 1;
3918 3918 }
3919 3919 }
3920 3920 mutex_exit(&ptgt->tgt_mutex);
3921 3921 break;
3922 3922 }
3923 3923
3924 3924 case DEVCTL_DEVICE_ONLINE: {
3925 3925 int lcount;
3926 3926 int tcount;
3927 3927 struct fcp_lun *plun;
3928 3928 child_info_t *cip = CIP(cdip);
3929 3929
3930 3930 ASSERT(cdip != NULL);
3931 3931 ASSERT(pptr != NULL);
3932 3932
3933 3933 mutex_enter(&pptr->port_mutex);
3934 3934 if (pip != NULL) {
3935 3935 cip = CIP(pip);
3936 3936 }
3937 3937 if ((plun = fcp_get_lun_from_cip(pptr, cip)) == NULL) {
3938 3938 mutex_exit(&pptr->port_mutex);
3939 3939 *rval = ENXIO;
3940 3940 break;
3941 3941 }
3942 3942 lcount = pptr->port_link_cnt;
3943 3943 tcount = plun->lun_tgt->tgt_change_cnt;
3944 3944 mutex_exit(&pptr->port_mutex);
3945 3945
3946 3946 /*
3947 3947 * The FCP_LUN_ONLINING flag is used in fcp_scsi_start()
3948 3948 * to allow the device attach to occur when the device is
3949 3949 * FCP_LUN_OFFLINE (so we don't reject the INQUIRY command
3950 3950 * from the scsi_probe()).
3951 3951 */
3952 3952 mutex_enter(&LUN_TGT->tgt_mutex);
3953 3953 plun->lun_state |= FCP_LUN_ONLINING;
3954 3954 mutex_exit(&LUN_TGT->tgt_mutex);
3955 3955
3956 3956 if (is_mpxio) {
3957 3957 mdi_devi_exit(pptr->port_dip, circ);
3958 3958 } else {
3959 3959 ndi_devi_exit(pptr->port_dip, circ);
3960 3960 }
3961 3961 devi_entered = 0;
3962 3962
3963 3963 *rval = fcp_pass_to_hp_and_wait(pptr, plun, cip,
3964 3964 FCP_ONLINE, lcount, tcount, 0);
3965 3965
3966 3966 if (*rval != NDI_SUCCESS) {
3967 3967 /* Reset the FCP_LUN_ONLINING bit */
3968 3968 mutex_enter(&LUN_TGT->tgt_mutex);
3969 3969 plun->lun_state &= ~FCP_LUN_ONLINING;
3970 3970 mutex_exit(&LUN_TGT->tgt_mutex);
3971 3971 *rval = EIO;
3972 3972 break;
3973 3973 }
3974 3974 mutex_enter(&LUN_TGT->tgt_mutex);
3975 3975 plun->lun_state &= ~(FCP_LUN_OFFLINE | FCP_LUN_BUSY |
3976 3976 FCP_LUN_ONLINING);
3977 3977 mutex_exit(&LUN_TGT->tgt_mutex);
3978 3978 break;
3979 3979 }
3980 3980
3981 3981 case DEVCTL_BUS_DEV_CREATE: {
3982 3982 uchar_t *bytes = NULL;
3983 3983 uint_t nbytes;
3984 3984 struct fcp_tgt *ptgt = NULL;
3985 3985 struct fcp_lun *plun = NULL;
3986 3986 dev_info_t *useless_dip = NULL;
3987 3987
3988 3988 *rval = ndi_dc_devi_create(dcp, pptr->port_dip,
3989 3989 DEVCTL_CONSTRUCT, &useless_dip);
3990 3990 if (*rval != 0 || useless_dip == NULL) {
3991 3991 break;
3992 3992 }
3993 3993
3994 3994 if ((ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, useless_dip,
3995 3995 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, PORT_WWN_PROP, &bytes,
3996 3996 &nbytes) != DDI_PROP_SUCCESS) || nbytes != FC_WWN_SIZE) {
3997 3997 *rval = EINVAL;
3998 3998 (void) ndi_devi_free(useless_dip);
3999 3999 if (bytes != NULL) {
4000 4000 ddi_prop_free(bytes);
4001 4001 }
4002 4002 break;
4003 4003 }
4004 4004
4005 4005 *rval = fcp_create_on_demand(pptr, bytes);
4006 4006 if (*rval == 0) {
4007 4007 mutex_enter(&pptr->port_mutex);
4008 4008 ptgt = fcp_lookup_target(pptr, (uchar_t *)bytes);
4009 4009 if (ptgt) {
4010 4010 /*
4011 4011 * We now have a pointer to the target that
4012 4012 * was created. Lets point to the first LUN on
4013 4013 * this new target.
4014 4014 */
4015 4015 mutex_enter(&ptgt->tgt_mutex);
4016 4016
4017 4017 plun = ptgt->tgt_lun;
4018 4018 /*
4019 4019 * There may be stale/offline LUN entries on
4020 4020 * this list (this is by design) and so we have
4021 4021 * to make sure we point to the first online
4022 4022 * LUN
4023 4023 */
4024 4024 while (plun &&
4025 4025 plun->lun_state & FCP_LUN_OFFLINE) {
4026 4026 plun = plun->lun_next;
4027 4027 }
4028 4028
4029 4029 mutex_exit(&ptgt->tgt_mutex);
4030 4030 }
4031 4031 mutex_exit(&pptr->port_mutex);
4032 4032 }
4033 4033
4034 4034 if (*rval == 0 && ptgt && plun) {
4035 4035 mutex_enter(&plun->lun_mutex);
4036 4036 /*
4037 4037 * Allow up to fcp_lun_ready_retry seconds to
4038 4038 * configure all the luns behind the target.
4039 4039 *
4040 4040 * The intent here is to allow targets with long
4041 4041 * reboot/reset-recovery times to become available
4042 4042 * while limiting the maximum wait time for an
4043 4043 * unresponsive target.
4044 4044 */
4045 4045 end_time = ddi_get_lbolt() +
4046 4046 SEC_TO_TICK(fcp_lun_ready_retry);
4047 4047
4048 4048 while (ddi_get_lbolt() < end_time) {
4049 4049 retval = FC_SUCCESS;
4050 4050
4051 4051 /*
4052 4052 * The new ndi interfaces for on-demand creation
4053 4053 * are inflexible, Do some more work to pass on
4054 4054 * a path name of some LUN (design is broken !)
4055 4055 */
4056 4056 if (plun->lun_cip) {
4057 4057 if (plun->lun_mpxio == 0) {
4058 4058 cdip = DIP(plun->lun_cip);
4059 4059 } else {
4060 4060 cdip = mdi_pi_get_client(
4061 4061 PIP(plun->lun_cip));
4062 4062 }
4063 4063 if (cdip == NULL) {
4064 4064 *rval = ENXIO;
4065 4065 break;
4066 4066 }
4067 4067
4068 4068 if (!i_ddi_devi_attached(cdip)) {
4069 4069 mutex_exit(&plun->lun_mutex);
4070 4070 delay(drv_usectohz(1000000));
4071 4071 mutex_enter(&plun->lun_mutex);
4072 4072 } else {
4073 4073 /*
4074 4074 * This Lun is ready, lets
4075 4075 * check the next one.
4076 4076 */
4077 4077 mutex_exit(&plun->lun_mutex);
4078 4078 plun = plun->lun_next;
4079 4079 while (plun && (plun->lun_state
4080 4080 & FCP_LUN_OFFLINE)) {
4081 4081 plun = plun->lun_next;
4082 4082 }
4083 4083 if (!plun) {
4084 4084 break;
4085 4085 }
4086 4086 mutex_enter(&plun->lun_mutex);
4087 4087 }
4088 4088 } else {
4089 4089 /*
4090 4090 * lun_cip field for a valid lun
4091 4091 * should never be NULL. Fail the
4092 4092 * command.
4093 4093 */
4094 4094 *rval = ENXIO;
4095 4095 break;
4096 4096 }
4097 4097 }
4098 4098 if (plun) {
4099 4099 mutex_exit(&plun->lun_mutex);
4100 4100 } else {
4101 4101 char devnm[MAXNAMELEN];
4102 4102 int nmlen;
4103 4103
4104 4104 nmlen = snprintf(devnm, MAXNAMELEN, "%s@%s",
4105 4105 ddi_node_name(cdip),
4106 4106 ddi_get_name_addr(cdip));
4107 4107
4108 4108 if (copyout(&devnm, dcp->cpyout_buf, nmlen) !=
4109 4109 0) {
4110 4110 *rval = EFAULT;
4111 4111 }
4112 4112 }
4113 4113 } else {
4114 4114 int i;
4115 4115 char buf[25];
4116 4116
4117 4117 for (i = 0; i < FC_WWN_SIZE; i++) {
4118 4118 (void) sprintf(&buf[i << 1], "%02x", bytes[i]);
4119 4119 }
4120 4120
4121 4121 fcp_log(CE_WARN, pptr->port_dip,
4122 4122 "!Failed to create nodes for pwwn=%s; error=%x",
4123 4123 buf, *rval);
4124 4124 }
4125 4125
4126 4126 (void) ndi_devi_free(useless_dip);
4127 4127 ddi_prop_free(bytes);
4128 4128 break;
4129 4129 }
4130 4130
4131 4131 case DEVCTL_DEVICE_RESET: {
4132 4132 struct fcp_lun *plun;
4133 4133 child_info_t *cip = CIP(cdip);
4134 4134
4135 4135 ASSERT(cdip != NULL);
4136 4136 ASSERT(pptr != NULL);
4137 4137 mutex_enter(&pptr->port_mutex);
4138 4138 if (pip != NULL) {
4139 4139 cip = CIP(pip);
4140 4140 }
4141 4141 if ((plun = fcp_get_lun_from_cip(pptr, cip)) == NULL) {
4142 4142 mutex_exit(&pptr->port_mutex);
4143 4143 *rval = ENXIO;
4144 4144 break;
4145 4145 }
4146 4146 mutex_exit(&pptr->port_mutex);
4147 4147
4148 4148 mutex_enter(&plun->lun_tgt->tgt_mutex);
4149 4149 if (!(plun->lun_state & FCP_SCSI_LUN_TGT_INIT)) {
4150 4150 mutex_exit(&plun->lun_tgt->tgt_mutex);
4151 4151
4152 4152 *rval = ENXIO;
4153 4153 break;
4154 4154 }
4155 4155
4156 4156 if (plun->lun_sd == NULL) {
4157 4157 mutex_exit(&plun->lun_tgt->tgt_mutex);
4158 4158
4159 4159 *rval = ENXIO;
4160 4160 break;
4161 4161 }
4162 4162 mutex_exit(&plun->lun_tgt->tgt_mutex);
4163 4163
4164 4164 /*
4165 4165 * set up ap so that fcp_reset can figure out
4166 4166 * which target to reset
4167 4167 */
4168 4168 if (fcp_scsi_reset(&plun->lun_sd->sd_address,
4169 4169 RESET_TARGET) == FALSE) {
4170 4170 *rval = EIO;
4171 4171 }
4172 4172 break;
4173 4173 }
4174 4174
4175 4175 case DEVCTL_BUS_GETSTATE:
4176 4176 ASSERT(dcp != NULL);
4177 4177 ASSERT(pptr != NULL);
4178 4178 ASSERT(pptr->port_dip != NULL);
4179 4179 if (ndi_dc_return_bus_state(pptr->port_dip, dcp) !=
4180 4180 NDI_SUCCESS) {
4181 4181 *rval = EFAULT;
4182 4182 }
4183 4183 break;
4184 4184
4185 4185 case DEVCTL_BUS_QUIESCE:
4186 4186 case DEVCTL_BUS_UNQUIESCE:
4187 4187 *rval = ENOTSUP;
4188 4188 break;
4189 4189
4190 4190 case DEVCTL_BUS_RESET:
4191 4191 case DEVCTL_BUS_RESETALL:
4192 4192 ASSERT(pptr != NULL);
4193 4193 (void) fcp_linkreset(pptr, NULL, KM_SLEEP);
4194 4194 break;
4195 4195
4196 4196 default:
4197 4197 ASSERT(dcp != NULL);
4198 4198 *rval = ENOTTY;
4199 4199 break;
4200 4200 }
4201 4201
4202 4202 /* all done -- clean up and return */
4203 4203 out: if (devi_entered) {
4204 4204 if (is_mpxio) {
4205 4205 mdi_devi_exit(pptr->port_dip, circ);
4206 4206 } else {
4207 4207 ndi_devi_exit(pptr->port_dip, circ);
4208 4208 }
4209 4209 }
4210 4210
4211 4211 if (dcp != NULL) {
4212 4212 ndi_dc_freehdl(dcp);
4213 4213 }
4214 4214
4215 4215 return (retval);
4216 4216 }
4217 4217
4218 4218
4219 4219 /*ARGSUSED*/
4220 4220 static int
4221 4221 fcp_els_callback(opaque_t ulph, opaque_t port_handle, fc_unsol_buf_t *buf,
4222 4222 uint32_t claimed)
4223 4223 {
4224 4224 uchar_t r_ctl;
4225 4225 uchar_t ls_code;
4226 4226 struct fcp_port *pptr;
4227 4227
4228 4228 if ((pptr = fcp_get_port(port_handle)) == NULL || claimed) {
4229 4229 return (FC_UNCLAIMED);
4230 4230 }
4231 4231
4232 4232 mutex_enter(&pptr->port_mutex);
4233 4233 if (pptr->port_state & (FCP_STATE_DETACHING |
4234 4234 FCP_STATE_SUSPENDED | FCP_STATE_POWER_DOWN)) {
4235 4235 mutex_exit(&pptr->port_mutex);
4236 4236 return (FC_UNCLAIMED);
4237 4237 }
4238 4238 mutex_exit(&pptr->port_mutex);
4239 4239
4240 4240 r_ctl = buf->ub_frame.r_ctl;
4241 4241
4242 4242 switch (r_ctl & R_CTL_ROUTING) {
4243 4243 case R_CTL_EXTENDED_SVC:
4244 4244 if (r_ctl == R_CTL_ELS_REQ) {
4245 4245 ls_code = buf->ub_buffer[0];
4246 4246
4247 4247 switch (ls_code) {
4248 4248 case LA_ELS_PRLI:
4249 4249 /*
4250 4250 * We really don't care if something fails.
4251 4251 * If the PRLI was not sent out, then the
4252 4252 * other end will time it out.
4253 4253 */
4254 4254 if (fcp_unsol_prli(pptr, buf) == FC_SUCCESS) {
4255 4255 return (FC_SUCCESS);
4256 4256 }
4257 4257 return (FC_UNCLAIMED);
4258 4258 /* NOTREACHED */
4259 4259
4260 4260 default:
4261 4261 break;
4262 4262 }
4263 4263 }
4264 4264 /* FALLTHROUGH */
4265 4265
4266 4266 default:
4267 4267 return (FC_UNCLAIMED);
4268 4268 }
4269 4269 }
4270 4270
4271 4271
4272 4272 /*ARGSUSED*/
4273 4273 static int
4274 4274 fcp_data_callback(opaque_t ulph, opaque_t port_handle, fc_unsol_buf_t *buf,
4275 4275 uint32_t claimed)
4276 4276 {
4277 4277 return (FC_UNCLAIMED);
4278 4278 }
4279 4279
4280 4280 /*
4281 4281 * Function: fcp_statec_callback
4282 4282 *
4283 4283 * Description: The purpose of this function is to handle a port state change.
4284 4284 * It is called from fp/fctl and, in a few instances, internally.
4285 4285 *
4286 4286 * Argument: ulph fp/fctl port handle
4287 4287 * port_handle fcp_port structure
4288 4288 * port_state Physical state of the port
4289 4289 * port_top Topology
4290 4290 * *devlist Pointer to the first entry of a table
4291 4291 * containing the remote ports that can be
4292 4292 * reached.
4293 4293 * dev_cnt Number of entries pointed by devlist.
4294 4294 * port_sid Port ID of the local port.
4295 4295 *
4296 4296 * Return Value: None
4297 4297 */
4298 4298 /*ARGSUSED*/
4299 4299 static void
4300 4300 fcp_statec_callback(opaque_t ulph, opaque_t port_handle,
4301 4301 uint32_t port_state, uint32_t port_top, fc_portmap_t *devlist,
4302 4302 uint32_t dev_cnt, uint32_t port_sid)
4303 4303 {
4304 4304 uint32_t link_count;
4305 4305 int map_len = 0;
4306 4306 struct fcp_port *pptr;
4307 4307 fcp_map_tag_t *map_tag = NULL;
4308 4308
4309 4309 if ((pptr = fcp_get_port(port_handle)) == NULL) {
4310 4310 fcp_log(CE_WARN, NULL, "!Invalid port handle in callback");
4311 4311 return; /* nothing to work with! */
4312 4312 }
4313 4313
4314 4314 FCP_TRACE(fcp_logq, pptr->port_instbuf,
4315 4315 fcp_trace, FCP_BUF_LEVEL_2, 0,
4316 4316 "fcp_statec_callback: port state/dev_cnt/top ="
4317 4317 "%d/%d/%d", FC_PORT_STATE_MASK(port_state),
4318 4318 dev_cnt, port_top);
4319 4319
4320 4320 mutex_enter(&pptr->port_mutex);
4321 4321
4322 4322 /*
4323 4323 * If a thread is in detach, don't do anything.
4324 4324 */
4325 4325 if (pptr->port_state & (FCP_STATE_DETACHING |
4326 4326 FCP_STATE_SUSPENDED | FCP_STATE_POWER_DOWN)) {
4327 4327 mutex_exit(&pptr->port_mutex);
4328 4328 return;
4329 4329 }
4330 4330
4331 4331 /*
4332 4332 * First thing we do is set the FCP_STATE_IN_CB_DEVC flag so that if
4333 4333 * init_pkt is called, it knows whether or not the target's status
4334 4334 * (or pd) might be changing.
4335 4335 */
4336 4336
4337 4337 if (FC_PORT_STATE_MASK(port_state) == FC_STATE_DEVICE_CHANGE) {
4338 4338 pptr->port_state |= FCP_STATE_IN_CB_DEVC;
4339 4339 }
4340 4340
4341 4341 /*
4342 4342 * the transport doesn't allocate or probe unless being
4343 4343 * asked to by either the applications or ULPs
4344 4344 *
4345 4345 * in cases where the port is OFFLINE at the time of port
4346 4346 * attach callback and the link comes ONLINE later, for
4347 4347 * easier automatic node creation (i.e. without you having to
4348 4348 * go out and run the utility to perform LOGINs) the
4349 4349 * following conditional is helpful
4350 4350 */
4351 4351 pptr->port_phys_state = port_state;
4352 4352
4353 4353 if (dev_cnt) {
4354 4354 mutex_exit(&pptr->port_mutex);
4355 4355
4356 4356 map_len = sizeof (*map_tag) * dev_cnt;
4357 4357 map_tag = kmem_alloc(map_len, KM_NOSLEEP);
4358 4358 if (map_tag == NULL) {
4359 4359 fcp_log(CE_WARN, pptr->port_dip,
4360 4360 "!fcp%d: failed to allocate for map tags; "
4361 4361 " state change will not be processed",
4362 4362 pptr->port_instance);
4363 4363
4364 4364 mutex_enter(&pptr->port_mutex);
4365 4365 pptr->port_state &= ~FCP_STATE_IN_CB_DEVC;
4366 4366 mutex_exit(&pptr->port_mutex);
4367 4367
4368 4368 return;
4369 4369 }
4370 4370
4371 4371 mutex_enter(&pptr->port_mutex);
4372 4372 }
4373 4373
4374 4374 if (pptr->port_id != port_sid) {
4375 4375 FCP_TRACE(fcp_logq, pptr->port_instbuf,
4376 4376 fcp_trace, FCP_BUF_LEVEL_3, 0,
4377 4377 "fcp: Port S_ID=0x%x => 0x%x", pptr->port_id,
4378 4378 port_sid);
4379 4379 /*
4380 4380 * The local port changed ID. It is the first time a port ID
4381 4381 * is assigned or something drastic happened. We might have
4382 4382 * been unplugged and replugged on another loop or fabric port
4383 4383 * or somebody grabbed the AL_PA we had or somebody rezoned
4384 4384 * the fabric we were plugged into.
4385 4385 */
4386 4386 pptr->port_id = port_sid;
4387 4387 }
4388 4388
4389 4389 switch (FC_PORT_STATE_MASK(port_state)) {
4390 4390 case FC_STATE_OFFLINE:
4391 4391 case FC_STATE_RESET_REQUESTED:
4392 4392 /*
4393 4393 * link has gone from online to offline -- just update the
4394 4394 * state of this port to BUSY and MARKed to go offline
4395 4395 */
4396 4396 FCP_TRACE(fcp_logq, pptr->port_instbuf,
4397 4397 fcp_trace, FCP_BUF_LEVEL_3, 0,
4398 4398 "link went offline");
4399 4399 if ((pptr->port_state & FCP_STATE_OFFLINE) && dev_cnt) {
4400 4400 /*
4401 4401 * We were offline a while ago and this one
4402 4402 * seems to indicate that the loop has gone
4403 4403 * dead forever.
4404 4404 */
4405 4405 pptr->port_tmp_cnt += dev_cnt;
4406 4406 pptr->port_state &= ~FCP_STATE_OFFLINE;
4407 4407 pptr->port_state |= FCP_STATE_INIT;
4408 4408 link_count = pptr->port_link_cnt;
4409 4409 fcp_handle_devices(pptr, devlist, dev_cnt,
4410 4410 link_count, map_tag, FCP_CAUSE_LINK_DOWN);
4411 4411 } else {
4412 4412 pptr->port_link_cnt++;
4413 4413 ASSERT(!(pptr->port_state & FCP_STATE_SUSPENDED));
4414 4414 fcp_update_state(pptr, (FCP_LUN_BUSY |
4415 4415 FCP_LUN_MARK), FCP_CAUSE_LINK_DOWN);
4416 4416 if (pptr->port_mpxio) {
4417 4417 fcp_update_mpxio_path_verifybusy(pptr);
4418 4418 }
4419 4419 pptr->port_state |= FCP_STATE_OFFLINE;
4420 4420 pptr->port_state &=
4421 4421 ~(FCP_STATE_ONLINING | FCP_STATE_ONLINE);
4422 4422 pptr->port_tmp_cnt = 0;
4423 4423 }
4424 4424 mutex_exit(&pptr->port_mutex);
4425 4425 break;
4426 4426
4427 4427 case FC_STATE_ONLINE:
4428 4428 case FC_STATE_LIP:
4429 4429 case FC_STATE_LIP_LBIT_SET:
4430 4430 /*
4431 4431 * link has gone from offline to online
4432 4432 */
4433 4433 FCP_TRACE(fcp_logq, pptr->port_instbuf,
4434 4434 fcp_trace, FCP_BUF_LEVEL_3, 0,
4435 4435 "link went online");
4436 4436
4437 4437 pptr->port_link_cnt++;
4438 4438
4439 4439 while (pptr->port_ipkt_cnt) {
4440 4440 mutex_exit(&pptr->port_mutex);
4441 4441 delay(drv_usectohz(1000000));
4442 4442 mutex_enter(&pptr->port_mutex);
4443 4443 }
4444 4444
4445 4445 pptr->port_topology = port_top;
4446 4446
4447 4447 /*
4448 4448 * The state of the targets and luns accessible through this
4449 4449 * port is updated.
4450 4450 */
4451 4451 fcp_update_state(pptr, FCP_LUN_BUSY | FCP_LUN_MARK,
4452 4452 FCP_CAUSE_LINK_CHANGE);
4453 4453
4454 4454 pptr->port_state &= ~(FCP_STATE_INIT | FCP_STATE_OFFLINE);
4455 4455 pptr->port_state |= FCP_STATE_ONLINING;
4456 4456 pptr->port_tmp_cnt = dev_cnt;
4457 4457 link_count = pptr->port_link_cnt;
4458 4458
4459 4459 pptr->port_deadline = fcp_watchdog_time +
4460 4460 FCP_ICMD_DEADLINE;
4461 4461
4462 4462 if (!dev_cnt) {
4463 4463 /*
4464 4464 * We go directly to the online state if no remote
4465 4465 * ports were discovered.
4466 4466 */
4467 4467 FCP_TRACE(fcp_logq, pptr->port_instbuf,
4468 4468 fcp_trace, FCP_BUF_LEVEL_3, 0,
4469 4469 "No remote ports discovered");
4470 4470
4471 4471 pptr->port_state &= ~FCP_STATE_ONLINING;
4472 4472 pptr->port_state |= FCP_STATE_ONLINE;
4473 4473 }
4474 4474
4475 4475 switch (port_top) {
4476 4476 case FC_TOP_FABRIC:
4477 4477 case FC_TOP_PUBLIC_LOOP:
4478 4478 case FC_TOP_PRIVATE_LOOP:
4479 4479 case FC_TOP_PT_PT:
4480 4480
4481 4481 if (pptr->port_state & FCP_STATE_NS_REG_FAILED) {
4482 4482 fcp_retry_ns_registry(pptr, port_sid);
4483 4483 }
4484 4484
4485 4485 fcp_handle_devices(pptr, devlist, dev_cnt, link_count,
4486 4486 map_tag, FCP_CAUSE_LINK_CHANGE);
4487 4487 break;
4488 4488
4489 4489 default:
4490 4490 /*
4491 4491 * We got here because we were provided with an unknown
4492 4492 * topology.
4493 4493 */
4494 4494 if (pptr->port_state & FCP_STATE_NS_REG_FAILED) {
4495 4495 pptr->port_state &= ~FCP_STATE_NS_REG_FAILED;
4496 4496 }
4497 4497
4498 4498 pptr->port_tmp_cnt -= dev_cnt;
4499 4499 fcp_log(CE_WARN, pptr->port_dip,
4500 4500 "!unknown/unsupported topology (0x%x)", port_top);
4501 4501 break;
4502 4502 }
4503 4503 FCP_TRACE(fcp_logq, pptr->port_instbuf,
4504 4504 fcp_trace, FCP_BUF_LEVEL_3, 0,
4505 4505 "Notify ssd of the reset to reinstate the reservations");
4506 4506
4507 4507 scsi_hba_reset_notify_callback(&pptr->port_mutex,
4508 4508 &pptr->port_reset_notify_listf);
4509 4509
4510 4510 mutex_exit(&pptr->port_mutex);
4511 4511
4512 4512 break;
4513 4513
4514 4514 case FC_STATE_RESET:
4515 4515 ASSERT(pptr->port_state & FCP_STATE_OFFLINE);
4516 4516 FCP_TRACE(fcp_logq, pptr->port_instbuf,
4517 4517 fcp_trace, FCP_BUF_LEVEL_3, 0,
4518 4518 "RESET state, waiting for Offline/Online state_cb");
4519 4519 mutex_exit(&pptr->port_mutex);
4520 4520 break;
4521 4521
4522 4522 case FC_STATE_DEVICE_CHANGE:
4523 4523 /*
4524 4524 * We come here when an application has requested
4525 4525 * Dynamic node creation/deletion in Fabric connectivity.
4526 4526 */
4527 4527 if (pptr->port_state & (FCP_STATE_OFFLINE |
4528 4528 FCP_STATE_INIT)) {
4529 4529 /*
4530 4530 * This case can happen when the FCTL is in the
4531 4531 * process of giving us on online and the host on
4532 4532 * the other side issues a PLOGI/PLOGO. Ideally
4533 4533 * the state changes should be serialized unless
4534 4534 * they are opposite (online-offline).
4535 4535 * The transport will give us a final state change
4536 4536 * so we can ignore this for the time being.
4537 4537 */
4538 4538 pptr->port_state &= ~FCP_STATE_IN_CB_DEVC;
4539 4539 mutex_exit(&pptr->port_mutex);
4540 4540 break;
4541 4541 }
4542 4542
4543 4543 if (pptr->port_state & FCP_STATE_NS_REG_FAILED) {
4544 4544 fcp_retry_ns_registry(pptr, port_sid);
4545 4545 }
4546 4546
4547 4547 /*
4548 4548 * Extend the deadline under steady state conditions
4549 4549 * to provide more time for the device-change-commands
4550 4550 */
4551 4551 if (!pptr->port_ipkt_cnt) {
4552 4552 pptr->port_deadline = fcp_watchdog_time +
4553 4553 FCP_ICMD_DEADLINE;
4554 4554 }
4555 4555
4556 4556 /*
4557 4557 * There is another race condition here, where if we were
4558 4558 * in ONLINEING state and a devices in the map logs out,
4559 4559 * fp will give another state change as DEVICE_CHANGE
4560 4560 * and OLD. This will result in that target being offlined.
4561 4561 * The pd_handle is freed. If from the first statec callback
4562 4562 * we were going to fire a PLOGI/PRLI, the system will
4563 4563 * panic in fc_ulp_transport with invalid pd_handle.
4564 4564 * The fix is to check for the link_cnt before issuing
4565 4565 * any command down.
4566 4566 */
4567 4567 fcp_update_targets(pptr, devlist, dev_cnt,
4568 4568 FCP_LUN_BUSY | FCP_LUN_MARK, FCP_CAUSE_TGT_CHANGE);
4569 4569
4570 4570 link_count = pptr->port_link_cnt;
4571 4571
4572 4572 fcp_handle_devices(pptr, devlist, dev_cnt,
4573 4573 link_count, map_tag, FCP_CAUSE_TGT_CHANGE);
4574 4574
4575 4575 pptr->port_state &= ~FCP_STATE_IN_CB_DEVC;
4576 4576
4577 4577 mutex_exit(&pptr->port_mutex);
4578 4578 break;
4579 4579
4580 4580 case FC_STATE_TARGET_PORT_RESET:
4581 4581 if (pptr->port_state & FCP_STATE_NS_REG_FAILED) {
4582 4582 fcp_retry_ns_registry(pptr, port_sid);
4583 4583 }
4584 4584
4585 4585 /* Do nothing else */
4586 4586 mutex_exit(&pptr->port_mutex);
4587 4587 break;
4588 4588
4589 4589 default:
4590 4590 fcp_log(CE_WARN, pptr->port_dip,
4591 4591 "!Invalid state change=0x%x", port_state);
4592 4592 mutex_exit(&pptr->port_mutex);
4593 4593 break;
4594 4594 }
4595 4595
4596 4596 if (map_tag) {
4597 4597 kmem_free(map_tag, map_len);
4598 4598 }
4599 4599 }
4600 4600
4601 4601 /*
4602 4602 * Function: fcp_handle_devices
4603 4603 *
4604 4604 * Description: This function updates the devices currently known by
4605 4605 * walking the list provided by the caller. The list passed
4606 4606 * by the caller is supposed to be the list of reachable
4607 4607 * devices.
4608 4608 *
4609 4609 * Argument: *pptr Fcp port structure.
4610 4610 * *devlist Pointer to the first entry of a table
4611 4611 * containing the remote ports that can be
4612 4612 * reached.
4613 4613 * dev_cnt Number of entries pointed by devlist.
4614 4614 * link_cnt Link state count.
4615 4615 * *map_tag Array of fcp_map_tag_t structures.
4616 4616 * cause What caused this function to be called.
4617 4617 *
4618 4618 * Return Value: None
4619 4619 *
4620 4620 * Notes: The pptr->port_mutex must be held.
4621 4621 */
4622 4622 static void
4623 4623 fcp_handle_devices(struct fcp_port *pptr, fc_portmap_t devlist[],
4624 4624 uint32_t dev_cnt, int link_cnt, fcp_map_tag_t *map_tag, int cause)
4625 4625 {
4626 4626 int i;
4627 4627 int check_finish_init = 0;
4628 4628 fc_portmap_t *map_entry;
4629 4629 struct fcp_tgt *ptgt = NULL;
4630 4630
4631 4631 FCP_TRACE(fcp_logq, pptr->port_instbuf,
4632 4632 fcp_trace, FCP_BUF_LEVEL_3, 0,
4633 4633 "fcp_handle_devices: called for %d dev(s)", dev_cnt);
4634 4634
4635 4635 if (dev_cnt) {
4636 4636 ASSERT(map_tag != NULL);
4637 4637 }
4638 4638
4639 4639 /*
4640 4640 * The following code goes through the list of remote ports that are
4641 4641 * accessible through this (pptr) local port (The list walked is the
4642 4642 * one provided by the caller which is the list of the remote ports
4643 4643 * currently reachable). It checks if any of them was already
4644 4644 * known by looking for the corresponding target structure based on
4645 4645 * the world wide name. If a target is part of the list it is tagged
4646 4646 * (ptgt->tgt_aux_state = FCP_TGT_TAGGED).
4647 4647 *
4648 4648 * Old comment
4649 4649 * -----------
4650 4650 * Before we drop port mutex; we MUST get the tags updated; This
4651 4651 * two step process is somewhat slow, but more reliable.
4652 4652 */
4653 4653 for (i = 0; (i < dev_cnt) && (pptr->port_link_cnt == link_cnt); i++) {
4654 4654 map_entry = &(devlist[i]);
4655 4655
4656 4656 /*
4657 4657 * get ptr to this map entry in our port's
4658 4658 * list (if any)
4659 4659 */
4660 4660 ptgt = fcp_lookup_target(pptr,
4661 4661 (uchar_t *)&(map_entry->map_pwwn));
4662 4662
4663 4663 if (ptgt) {
4664 4664 map_tag[i] = ptgt->tgt_change_cnt;
4665 4665 if (cause == FCP_CAUSE_LINK_CHANGE) {
4666 4666 ptgt->tgt_aux_state = FCP_TGT_TAGGED;
4667 4667 }
4668 4668 }
4669 4669 }
4670 4670
4671 4671 /*
4672 4672 * At this point we know which devices of the new list were already
4673 4673 * known (The field tgt_aux_state of the target structure has been
4674 4674 * set to FCP_TGT_TAGGED).
4675 4675 *
4676 4676 * The following code goes through the list of targets currently known
4677 4677 * by the local port (the list is actually a hashing table). If a
4678 4678 * target is found and is not tagged, it means the target cannot
4679 4679 * be reached anymore through the local port (pptr). It is offlined.
4680 4680 * The offlining only occurs if the cause is FCP_CAUSE_LINK_CHANGE.
4681 4681 */
4682 4682 for (i = 0; i < FCP_NUM_HASH; i++) {
4683 4683 for (ptgt = pptr->port_tgt_hash_table[i]; ptgt != NULL;
4684 4684 ptgt = ptgt->tgt_next) {
4685 4685 mutex_enter(&ptgt->tgt_mutex);
4686 4686 if ((ptgt->tgt_aux_state != FCP_TGT_TAGGED) &&
4687 4687 (cause == FCP_CAUSE_LINK_CHANGE) &&
4688 4688 !(ptgt->tgt_state & FCP_TGT_OFFLINE)) {
4689 4689 fcp_offline_target_now(pptr, ptgt,
4690 4690 link_cnt, ptgt->tgt_change_cnt, 0);
4691 4691 }
4692 4692 mutex_exit(&ptgt->tgt_mutex);
4693 4693 }
4694 4694 }
4695 4695
4696 4696 /*
4697 4697 * At this point, the devices that were known but cannot be reached
4698 4698 * anymore, have most likely been offlined.
4699 4699 *
4700 4700 * The following section of code seems to go through the list of
4701 4701 * remote ports that can now be reached. For every single one it
4702 4702 * checks if it is already known or if it is a new port.
4703 4703 */
4704 4704 for (i = 0; (i < dev_cnt) && (pptr->port_link_cnt == link_cnt); i++) {
4705 4705
4706 4706 if (check_finish_init) {
4707 4707 ASSERT(i > 0);
4708 4708 (void) fcp_call_finish_init_held(pptr, ptgt, link_cnt,
4709 4709 map_tag[i - 1], cause);
4710 4710 check_finish_init = 0;
4711 4711 }
4712 4712
4713 4713 /* get a pointer to this map entry */
4714 4714 map_entry = &(devlist[i]);
4715 4715
4716 4716 /*
4717 4717 * Check for the duplicate map entry flag. If we have marked
4718 4718 * this entry as a duplicate we skip it since the correct
4719 4719 * (perhaps even same) state change will be encountered
4720 4720 * later in the list.
4721 4721 */
4722 4722 if (map_entry->map_flags & PORT_DEVICE_DUPLICATE_MAP_ENTRY) {
4723 4723 continue;
4724 4724 }
4725 4725
4726 4726 /* get ptr to this map entry in our port's list (if any) */
4727 4727 ptgt = fcp_lookup_target(pptr,
4728 4728 (uchar_t *)&(map_entry->map_pwwn));
4729 4729
4730 4730 if (ptgt) {
4731 4731 /*
4732 4732 * This device was already known. The field
4733 4733 * tgt_aux_state is reset (was probably set to
4734 4734 * FCP_TGT_TAGGED previously in this routine).
4735 4735 */
4736 4736 ptgt->tgt_aux_state = 0;
4737 4737 FCP_TRACE(fcp_logq, pptr->port_instbuf,
4738 4738 fcp_trace, FCP_BUF_LEVEL_3, 0,
4739 4739 "handle_devices: map did/state/type/flags = "
4740 4740 "0x%x/0x%x/0x%x/0x%x, tgt_d_id=0x%x, "
4741 4741 "tgt_state=%d",
4742 4742 map_entry->map_did.port_id, map_entry->map_state,
4743 4743 map_entry->map_type, map_entry->map_flags,
4744 4744 ptgt->tgt_d_id, ptgt->tgt_state);
4745 4745 }
4746 4746
4747 4747 if (map_entry->map_type == PORT_DEVICE_OLD ||
4748 4748 map_entry->map_type == PORT_DEVICE_NEW ||
4749 4749 map_entry->map_type == PORT_DEVICE_REPORTLUN_CHANGED ||
4750 4750 map_entry->map_type == PORT_DEVICE_CHANGED) {
4751 4751 FCP_TRACE(fcp_logq, pptr->port_instbuf,
4752 4752 fcp_trace, FCP_BUF_LEVEL_2, 0,
4753 4753 "map_type=%x, did = %x",
4754 4754 map_entry->map_type,
4755 4755 map_entry->map_did.port_id);
4756 4756 }
4757 4757
4758 4758 switch (map_entry->map_type) {
4759 4759 case PORT_DEVICE_NOCHANGE:
4760 4760 case PORT_DEVICE_USER_CREATE:
4761 4761 case PORT_DEVICE_USER_LOGIN:
4762 4762 case PORT_DEVICE_NEW:
4763 4763 case PORT_DEVICE_REPORTLUN_CHANGED:
4764 4764 FCP_TGT_TRACE(ptgt, map_tag[i], FCP_TGT_TRACE_1);
4765 4765
4766 4766 if (fcp_handle_mapflags(pptr, ptgt, map_entry,
4767 4767 link_cnt, (ptgt) ? map_tag[i] : 0,
4768 4768 cause) == TRUE) {
4769 4769
4770 4770 FCP_TGT_TRACE(ptgt, map_tag[i],
4771 4771 FCP_TGT_TRACE_2);
4772 4772 check_finish_init++;
4773 4773 }
4774 4774 break;
4775 4775
4776 4776 case PORT_DEVICE_OLD:
4777 4777 if (ptgt != NULL) {
4778 4778 FCP_TGT_TRACE(ptgt, map_tag[i],
4779 4779 FCP_TGT_TRACE_3);
4780 4780
4781 4781 mutex_enter(&ptgt->tgt_mutex);
4782 4782 if (!(ptgt->tgt_state & FCP_TGT_OFFLINE)) {
4783 4783 /*
4784 4784 * Must do an in-line wait for I/Os
4785 4785 * to get drained
4786 4786 */
4787 4787 mutex_exit(&ptgt->tgt_mutex);
4788 4788 mutex_exit(&pptr->port_mutex);
4789 4789
4790 4790 mutex_enter(&ptgt->tgt_mutex);
4791 4791 while (ptgt->tgt_ipkt_cnt ||
4792 4792 fcp_outstanding_lun_cmds(ptgt)
4793 4793 == FC_SUCCESS) {
4794 4794 mutex_exit(&ptgt->tgt_mutex);
4795 4795 delay(drv_usectohz(1000000));
4796 4796 mutex_enter(&ptgt->tgt_mutex);
4797 4797 }
4798 4798 mutex_exit(&ptgt->tgt_mutex);
4799 4799
4800 4800 mutex_enter(&pptr->port_mutex);
4801 4801 mutex_enter(&ptgt->tgt_mutex);
4802 4802
4803 4803 (void) fcp_offline_target(pptr, ptgt,
4804 4804 link_cnt, map_tag[i], 0, 0);
4805 4805 }
4806 4806 mutex_exit(&ptgt->tgt_mutex);
4807 4807 }
4808 4808 check_finish_init++;
4809 4809 break;
4810 4810
4811 4811 case PORT_DEVICE_USER_DELETE:
4812 4812 case PORT_DEVICE_USER_LOGOUT:
4813 4813 if (ptgt != NULL) {
4814 4814 FCP_TGT_TRACE(ptgt, map_tag[i],
4815 4815 FCP_TGT_TRACE_4);
4816 4816
4817 4817 mutex_enter(&ptgt->tgt_mutex);
4818 4818 if (!(ptgt->tgt_state & FCP_TGT_OFFLINE)) {
4819 4819 (void) fcp_offline_target(pptr, ptgt,
4820 4820 link_cnt, map_tag[i], 1, 0);
4821 4821 }
4822 4822 mutex_exit(&ptgt->tgt_mutex);
4823 4823 }
4824 4824 check_finish_init++;
4825 4825 break;
4826 4826
4827 4827 case PORT_DEVICE_CHANGED:
4828 4828 if (ptgt != NULL) {
4829 4829 FCP_TGT_TRACE(ptgt, map_tag[i],
4830 4830 FCP_TGT_TRACE_5);
4831 4831
4832 4832 if (fcp_device_changed(pptr, ptgt,
4833 4833 map_entry, link_cnt, map_tag[i],
4834 4834 cause) == TRUE) {
4835 4835 check_finish_init++;
4836 4836 }
4837 4837 } else {
4838 4838 if (fcp_handle_mapflags(pptr, ptgt,
4839 4839 map_entry, link_cnt, 0, cause) == TRUE) {
4840 4840 check_finish_init++;
4841 4841 }
4842 4842 }
4843 4843 break;
4844 4844
4845 4845 default:
4846 4846 fcp_log(CE_WARN, pptr->port_dip,
4847 4847 "!Invalid map_type=0x%x", map_entry->map_type);
4848 4848 check_finish_init++;
4849 4849 break;
4850 4850 }
4851 4851 }
4852 4852
4853 4853 if (check_finish_init && pptr->port_link_cnt == link_cnt) {
4854 4854 ASSERT(i > 0);
4855 4855 (void) fcp_call_finish_init_held(pptr, ptgt, link_cnt,
4856 4856 map_tag[i-1], cause);
4857 4857 } else if (dev_cnt == 0 && pptr->port_link_cnt == link_cnt) {
4858 4858 fcp_offline_all(pptr, link_cnt, cause);
4859 4859 }
4860 4860 }
4861 4861
4862 4862 static int
4863 4863 fcp_handle_reportlun_changed(struct fcp_tgt *ptgt, int cause)
4864 4864 {
4865 4865 struct fcp_lun *plun;
4866 4866 struct fcp_port *pptr;
4867 4867 int rscn_count;
4868 4868 int lun0_newalloc;
4869 4869 int ret = TRUE;
4870 4870
4871 4871 ASSERT(ptgt);
4872 4872 pptr = ptgt->tgt_port;
4873 4873 lun0_newalloc = 0;
4874 4874 if ((plun = fcp_get_lun(ptgt, 0)) == NULL) {
4875 4875 /*
4876 4876 * no LUN struct for LUN 0 yet exists,
4877 4877 * so create one
4878 4878 */
4879 4879 plun = fcp_alloc_lun(ptgt);
4880 4880 if (plun == NULL) {
4881 4881 fcp_log(CE_WARN, pptr->port_dip,
4882 4882 "!Failed to allocate lun 0 for"
4883 4883 " D_ID=%x", ptgt->tgt_d_id);
4884 4884 return (ret);
4885 4885 }
4886 4886 lun0_newalloc = 1;
4887 4887 }
4888 4888
4889 4889 mutex_enter(&ptgt->tgt_mutex);
4890 4890 /*
4891 4891 * consider lun 0 as device not connected if it is
4892 4892 * offlined or newly allocated
4893 4893 */
4894 4894 if ((plun->lun_state & FCP_LUN_OFFLINE) || lun0_newalloc) {
4895 4895 plun->lun_state |= FCP_LUN_DEVICE_NOT_CONNECTED;
4896 4896 }
4897 4897 plun->lun_state |= (FCP_LUN_BUSY | FCP_LUN_MARK);
4898 4898 plun->lun_state &= ~FCP_LUN_OFFLINE;
4899 4899 ptgt->tgt_lun_cnt = 1;
4900 4900 ptgt->tgt_report_lun_cnt = 0;
4901 4901 mutex_exit(&ptgt->tgt_mutex);
4902 4902
4903 4903 rscn_count = fc_ulp_get_rscn_count(pptr->port_fp_handle);
4904 4904 if (fcp_send_scsi(plun, SCMD_REPORT_LUN,
4905 4905 sizeof (struct fcp_reportlun_resp), pptr->port_link_cnt,
4906 4906 ptgt->tgt_change_cnt, cause, rscn_count) != DDI_SUCCESS) {
4907 4907 FCP_TRACE(fcp_logq, pptr->port_instbuf,
4908 4908 fcp_trace, FCP_BUF_LEVEL_3, 0, "!Failed to send REPORTLUN "
4909 4909 "to D_ID=%x", ptgt->tgt_d_id);
4910 4910 } else {
4911 4911 ret = FALSE;
4912 4912 }
4913 4913
4914 4914 return (ret);
4915 4915 }
4916 4916
4917 4917 /*
4918 4918 * Function: fcp_handle_mapflags
4919 4919 *
4920 4920 * Description: This function creates a target structure if the ptgt passed
4921 4921 * is NULL. It also kicks off the PLOGI if we are not logged
4922 4922 * into the target yet or the PRLI if we are logged into the
4923 4923 * target already. The rest of the treatment is done in the
4924 4924 * callbacks of the PLOGI or PRLI.
4925 4925 *
4926 4926 * Argument: *pptr FCP Port structure.
4927 4927 * *ptgt Target structure.
4928 4928 * *map_entry Array of fc_portmap_t structures.
4929 4929 * link_cnt Link state count.
4930 4930 * tgt_cnt Target state count.
4931 4931 * cause What caused this function to be called.
4932 4932 *
4933 4933 * Return Value: TRUE Failed
4934 4934 * FALSE Succeeded
4935 4935 *
4936 4936 * Notes: pptr->port_mutex must be owned.
4937 4937 */
4938 4938 static int
4939 4939 fcp_handle_mapflags(struct fcp_port *pptr, struct fcp_tgt *ptgt,
4940 4940 fc_portmap_t *map_entry, int link_cnt, int tgt_cnt, int cause)
4941 4941 {
4942 4942 int lcount;
4943 4943 int tcount;
4944 4944 int ret = TRUE;
4945 4945 int alloc;
4946 4946 struct fcp_ipkt *icmd;
4947 4947 struct fcp_lun *pseq_lun = NULL;
4948 4948 uchar_t opcode;
4949 4949 int valid_ptgt_was_passed = FALSE;
4950 4950
4951 4951 ASSERT(mutex_owned(&pptr->port_mutex));
4952 4952
4953 4953 /*
4954 4954 * This case is possible where the FCTL has come up and done discovery
4955 4955 * before FCP was loaded and attached. FCTL would have discovered the
4956 4956 * devices and later the ULP came online. In this case ULP's would get
4957 4957 * PORT_DEVICE_NOCHANGE but target would be NULL.
4958 4958 */
4959 4959 if (ptgt == NULL) {
4960 4960 /* don't already have a target */
4961 4961 mutex_exit(&pptr->port_mutex);
4962 4962 ptgt = fcp_alloc_tgt(pptr, map_entry, link_cnt);
4963 4963 mutex_enter(&pptr->port_mutex);
4964 4964
4965 4965 if (ptgt == NULL) {
4966 4966 fcp_log(CE_WARN, pptr->port_dip,
4967 4967 "!FC target allocation failed");
4968 4968 return (ret);
4969 4969 }
4970 4970 mutex_enter(&ptgt->tgt_mutex);
4971 4971 ptgt->tgt_statec_cause = cause;
4972 4972 ptgt->tgt_tmp_cnt = 1;
4973 4973 mutex_exit(&ptgt->tgt_mutex);
4974 4974 } else {
4975 4975 valid_ptgt_was_passed = TRUE;
4976 4976 }
4977 4977
4978 4978 /*
4979 4979 * Copy in the target parameters
4980 4980 */
4981 4981 mutex_enter(&ptgt->tgt_mutex);
4982 4982 ptgt->tgt_d_id = map_entry->map_did.port_id;
4983 4983 ptgt->tgt_hard_addr = map_entry->map_hard_addr.hard_addr;
4984 4984 ptgt->tgt_pd_handle = map_entry->map_pd;
4985 4985 ptgt->tgt_fca_dev = NULL;
4986 4986
4987 4987 /* Copy port and node WWNs */
4988 4988 bcopy(&map_entry->map_nwwn, &ptgt->tgt_node_wwn.raw_wwn[0],
4989 4989 FC_WWN_SIZE);
4990 4990 bcopy(&map_entry->map_pwwn, &ptgt->tgt_port_wwn.raw_wwn[0],
4991 4991 FC_WWN_SIZE);
4992 4992
4993 4993 if (!(map_entry->map_flags & PORT_DEVICE_NO_SKIP_DEVICE_DISCOVERY) &&
4994 4994 (map_entry->map_type == PORT_DEVICE_NOCHANGE) &&
4995 4995 (map_entry->map_state == PORT_DEVICE_LOGGED_IN) &&
4996 4996 valid_ptgt_was_passed) {
4997 4997 /*
4998 4998 * determine if there are any tape LUNs on this target
4999 4999 */
5000 5000 for (pseq_lun = ptgt->tgt_lun;
5001 5001 pseq_lun != NULL;
5002 5002 pseq_lun = pseq_lun->lun_next) {
5003 5003 if ((pseq_lun->lun_type == DTYPE_SEQUENTIAL) &&
5004 5004 !(pseq_lun->lun_state & FCP_LUN_OFFLINE)) {
5005 5005 fcp_update_tgt_state(ptgt, FCP_RESET,
5006 5006 FCP_LUN_MARK);
5007 5007 mutex_exit(&ptgt->tgt_mutex);
5008 5008 return (ret);
5009 5009 }
5010 5010 }
5011 5011 }
5012 5012
5013 5013 /*
5014 5014 * if UA'REPORT_LUN_CHANGED received,
5015 5015 * send out REPORT LUN promptly, skip PLOGI/PRLI process
5016 5016 */
5017 5017 if (map_entry->map_type == PORT_DEVICE_REPORTLUN_CHANGED) {
5018 5018 ptgt->tgt_state &= ~(FCP_TGT_OFFLINE | FCP_TGT_MARK);
5019 5019 mutex_exit(&ptgt->tgt_mutex);
5020 5020 mutex_exit(&pptr->port_mutex);
5021 5021
5022 5022 ret = fcp_handle_reportlun_changed(ptgt, cause);
5023 5023
5024 5024 mutex_enter(&pptr->port_mutex);
5025 5025 return (ret);
5026 5026 }
5027 5027
5028 5028 /*
5029 5029 * If ptgt was NULL when this function was entered, then tgt_node_state
5030 5030 * was never specifically initialized but zeroed out which means
5031 5031 * FCP_TGT_NODE_NONE.
5032 5032 */
5033 5033 switch (ptgt->tgt_node_state) {
5034 5034 case FCP_TGT_NODE_NONE:
5035 5035 case FCP_TGT_NODE_ON_DEMAND:
5036 5036 if (FC_TOP_EXTERNAL(pptr->port_topology) &&
5037 5037 !fcp_enable_auto_configuration &&
5038 5038 map_entry->map_type != PORT_DEVICE_USER_CREATE) {
5039 5039 ptgt->tgt_node_state = FCP_TGT_NODE_ON_DEMAND;
5040 5040 } else if (FC_TOP_EXTERNAL(pptr->port_topology) &&
5041 5041 fcp_enable_auto_configuration &&
5042 5042 (ptgt->tgt_manual_config_only == 1) &&
5043 5043 map_entry->map_type != PORT_DEVICE_USER_CREATE) {
5044 5044 /*
5045 5045 * If auto configuration is set and
5046 5046 * the tgt_manual_config_only flag is set then
5047 5047 * we only want the user to be able to change
5048 5048 * the state through create_on_demand.
5049 5049 */
5050 5050 ptgt->tgt_node_state = FCP_TGT_NODE_ON_DEMAND;
5051 5051 } else {
5052 5052 ptgt->tgt_node_state = FCP_TGT_NODE_NONE;
5053 5053 }
5054 5054 break;
5055 5055
5056 5056 case FCP_TGT_NODE_PRESENT:
5057 5057 break;
5058 5058 }
5059 5059 /*
5060 5060 * If we are booting from a fabric device, make sure we
5061 5061 * mark the node state appropriately for this target to be
5062 5062 * enumerated
5063 5063 */
5064 5064 if (FC_TOP_EXTERNAL(pptr->port_topology) && pptr->port_boot_wwn[0]) {
5065 5065 if (bcmp((caddr_t)pptr->port_boot_wwn,
5066 5066 (caddr_t)&ptgt->tgt_port_wwn.raw_wwn[0],
5067 5067 sizeof (ptgt->tgt_port_wwn)) == 0) {
5068 5068 ptgt->tgt_node_state = FCP_TGT_NODE_NONE;
5069 5069 }
5070 5070 }
5071 5071 mutex_exit(&ptgt->tgt_mutex);
5072 5072
5073 5073 FCP_TRACE(fcp_logq, pptr->port_instbuf,
5074 5074 fcp_trace, FCP_BUF_LEVEL_3, 0,
5075 5075 "map_pd=%p, map_type=%x, did = %x, ulp_rscn_count=0x%x",
5076 5076 map_entry->map_pd, map_entry->map_type, map_entry->map_did.port_id,
5077 5077 map_entry->map_rscn_info.ulp_rscn_count);
5078 5078
5079 5079 mutex_enter(&ptgt->tgt_mutex);
5080 5080
5081 5081 /*
5082 5082 * Reset target OFFLINE state and mark the target BUSY
5083 5083 */
5084 5084 ptgt->tgt_state &= ~FCP_TGT_OFFLINE;
5085 5085 ptgt->tgt_state |= (FCP_TGT_BUSY | FCP_TGT_MARK);
5086 5086
5087 5087 tcount = tgt_cnt ? tgt_cnt : ptgt->tgt_change_cnt;
5088 5088 lcount = link_cnt;
5089 5089
5090 5090 mutex_exit(&ptgt->tgt_mutex);
5091 5091 mutex_exit(&pptr->port_mutex);
5092 5092
5093 5093 /*
5094 5094 * if we are already logged in, then we do a PRLI, else
5095 5095 * we do a PLOGI first (to get logged in)
5096 5096 *
5097 5097 * We will not check if we are the PLOGI initiator
5098 5098 */
5099 5099 opcode = (map_entry->map_state == PORT_DEVICE_LOGGED_IN &&
5100 5100 map_entry->map_pd != NULL) ? LA_ELS_PRLI : LA_ELS_PLOGI;
5101 5101
5102 5102 alloc = FCP_MAX(sizeof (la_els_logi_t), sizeof (la_els_prli_t));
5103 5103
5104 5104 icmd = fcp_icmd_alloc(pptr, ptgt, alloc, alloc, 0,
5105 5105 pptr->port_state & FCP_STATE_FCA_IS_NODMA, lcount, tcount,
5106 5106 cause, map_entry->map_rscn_info.ulp_rscn_count);
5107 5107
5108 5108 if (icmd == NULL) {
5109 5109 FCP_TGT_TRACE(ptgt, tgt_cnt, FCP_TGT_TRACE_29);
5110 5110 /*
5111 5111 * We've exited port_mutex before calling fcp_icmd_alloc,
5112 5112 * we need to make sure we reacquire it before returning.
5113 5113 */
5114 5114 mutex_enter(&pptr->port_mutex);
5115 5115 return (FALSE);
5116 5116 }
5117 5117
5118 5118 /* TRUE is only returned while target is intended skipped */
5119 5119 ret = FALSE;
5120 5120 /* discover info about this target */
5121 5121 if ((fcp_send_els(pptr, ptgt, icmd, opcode,
5122 5122 lcount, tcount, cause)) == DDI_SUCCESS) {
5123 5123 FCP_TGT_TRACE(ptgt, tgt_cnt, FCP_TGT_TRACE_9);
5124 5124 } else {
5125 5125 fcp_icmd_free(pptr, icmd);
5126 5126 ret = TRUE;
5127 5127 }
5128 5128 mutex_enter(&pptr->port_mutex);
5129 5129
5130 5130 return (ret);
5131 5131 }
5132 5132
5133 5133 /*
5134 5134 * Function: fcp_send_els
5135 5135 *
5136 5136 * Description: Sends an ELS to the target specified by the caller. Supports
5137 5137 * PLOGI and PRLI.
5138 5138 *
5139 5139 * Argument: *pptr Fcp port.
5140 5140 * *ptgt Target to send the ELS to.
5141 5141 * *icmd Internal packet
5142 5142 * opcode ELS opcode
5143 5143 * lcount Link state change counter
5144 5144 * tcount Target state change counter
5145 5145 * cause What caused the call
5146 5146 *
5147 5147 * Return Value: DDI_SUCCESS
5148 5148 * Others
5149 5149 */
5150 5150 static int
5151 5151 fcp_send_els(struct fcp_port *pptr, struct fcp_tgt *ptgt,
5152 5152 struct fcp_ipkt *icmd, uchar_t opcode, int lcount, int tcount, int cause)
5153 5153 {
5154 5154 fc_packet_t *fpkt;
5155 5155 fc_frame_hdr_t *hp;
5156 5156 int internal = 0;
5157 5157 int alloc;
5158 5158 int cmd_len;
5159 5159 int resp_len;
5160 5160 int res = DDI_FAILURE; /* default result */
5161 5161 int rval = DDI_FAILURE;
5162 5162
5163 5163 ASSERT(opcode == LA_ELS_PLOGI || opcode == LA_ELS_PRLI);
5164 5164 ASSERT(ptgt->tgt_port == pptr);
5165 5165
5166 5166 FCP_TRACE(fcp_logq, pptr->port_instbuf,
5167 5167 fcp_trace, FCP_BUF_LEVEL_5, 0,
5168 5168 "fcp_send_els: d_id=0x%x ELS 0x%x (%s)", ptgt->tgt_d_id, opcode,
5169 5169 (opcode == LA_ELS_PLOGI) ? "PLOGI" : "PRLI");
5170 5170
5171 5171 if (opcode == LA_ELS_PLOGI) {
5172 5172 cmd_len = sizeof (la_els_logi_t);
5173 5173 resp_len = sizeof (la_els_logi_t);
5174 5174 } else {
5175 5175 ASSERT(opcode == LA_ELS_PRLI);
5176 5176 cmd_len = sizeof (la_els_prli_t);
5177 5177 resp_len = sizeof (la_els_prli_t);
5178 5178 }
5179 5179
5180 5180 if (icmd == NULL) {
5181 5181 alloc = FCP_MAX(sizeof (la_els_logi_t),
5182 5182 sizeof (la_els_prli_t));
5183 5183 icmd = fcp_icmd_alloc(pptr, ptgt, alloc, alloc, 0,
5184 5184 pptr->port_state & FCP_STATE_FCA_IS_NODMA,
5185 5185 lcount, tcount, cause, FC_INVALID_RSCN_COUNT);
5186 5186 if (icmd == NULL) {
5187 5187 FCP_TGT_TRACE(ptgt, tcount, FCP_TGT_TRACE_10);
5188 5188 return (res);
5189 5189 }
5190 5190 internal++;
5191 5191 }
5192 5192 fpkt = icmd->ipkt_fpkt;
5193 5193
5194 5194 fpkt->pkt_cmdlen = cmd_len;
5195 5195 fpkt->pkt_rsplen = resp_len;
5196 5196 fpkt->pkt_datalen = 0;
5197 5197 icmd->ipkt_retries = 0;
5198 5198
5199 5199 /* fill in fpkt info */
5200 5200 fpkt->pkt_tran_flags = FC_TRAN_CLASS3 | FC_TRAN_INTR;
5201 5201 fpkt->pkt_tran_type = FC_PKT_EXCHANGE;
5202 5202 fpkt->pkt_timeout = FCP_ELS_TIMEOUT;
5203 5203
5204 5204 /* get ptr to frame hdr in fpkt */
5205 5205 hp = &fpkt->pkt_cmd_fhdr;
5206 5206
5207 5207 /*
5208 5208 * fill in frame hdr
5209 5209 */
5210 5210 hp->r_ctl = R_CTL_ELS_REQ;
5211 5211 hp->s_id = pptr->port_id; /* source ID */
5212 5212 hp->d_id = ptgt->tgt_d_id; /* dest ID */
5213 5213 hp->type = FC_TYPE_EXTENDED_LS;
5214 5214 hp->f_ctl = F_CTL_SEQ_INITIATIVE | F_CTL_FIRST_SEQ;
5215 5215 hp->seq_id = 0;
5216 5216 hp->rsvd = 0;
5217 5217 hp->df_ctl = 0;
5218 5218 hp->seq_cnt = 0;
5219 5219 hp->ox_id = 0xffff; /* i.e. none */
5220 5220 hp->rx_id = 0xffff; /* i.e. none */
5221 5221 hp->ro = 0;
5222 5222
5223 5223 /*
5224 5224 * at this point we have a filled in cmd pkt
5225 5225 *
5226 5226 * fill in the respective info, then use the transport to send
5227 5227 * the packet
5228 5228 *
5229 5229 * for a PLOGI call fc_ulp_login(), and
5230 5230 * for a PRLI call fc_ulp_issue_els()
5231 5231 */
5232 5232 switch (opcode) {
5233 5233 case LA_ELS_PLOGI: {
5234 5234 struct la_els_logi logi;
5235 5235
5236 5236 bzero(&logi, sizeof (struct la_els_logi));
5237 5237
5238 5238 hp = &fpkt->pkt_cmd_fhdr;
5239 5239 hp->r_ctl = R_CTL_ELS_REQ;
5240 5240 logi.ls_code.ls_code = LA_ELS_PLOGI;
5241 5241 logi.ls_code.mbz = 0;
5242 5242
5243 5243 FCP_CP_OUT((uint8_t *)&logi, fpkt->pkt_cmd,
5244 5244 fpkt->pkt_cmd_acc, sizeof (struct la_els_logi));
5245 5245
5246 5246 icmd->ipkt_opcode = LA_ELS_PLOGI;
5247 5247
5248 5248 mutex_enter(&pptr->port_mutex);
5249 5249 if (!FCP_TGT_STATE_CHANGED(ptgt, icmd)) {
5250 5250
5251 5251 mutex_exit(&pptr->port_mutex);
5252 5252
5253 5253 rval = fc_ulp_login(pptr->port_fp_handle, &fpkt, 1);
5254 5254 if (rval == FC_SUCCESS) {
5255 5255 res = DDI_SUCCESS;
5256 5256 break;
5257 5257 }
5258 5258
5259 5259 FCP_TGT_TRACE(ptgt, tcount, FCP_TGT_TRACE_11);
5260 5260
5261 5261 res = fcp_handle_ipkt_errors(pptr, ptgt, icmd,
5262 5262 rval, "PLOGI");
5263 5263 } else {
5264 5264 FCP_TRACE(fcp_logq, pptr->port_instbuf,
5265 5265 fcp_trace, FCP_BUF_LEVEL_5, 0,
5266 5266 "fcp_send_els1: state change occured"
5267 5267 " for D_ID=0x%x", ptgt->tgt_d_id);
5268 5268 mutex_exit(&pptr->port_mutex);
5269 5269 FCP_TGT_TRACE(ptgt, tcount, FCP_TGT_TRACE_12);
5270 5270 }
5271 5271 break;
5272 5272 }
5273 5273
5274 5274 case LA_ELS_PRLI: {
5275 5275 struct la_els_prli prli;
5276 5276 struct fcp_prli *fprli;
5277 5277
5278 5278 bzero(&prli, sizeof (struct la_els_prli));
5279 5279
5280 5280 hp = &fpkt->pkt_cmd_fhdr;
5281 5281 hp->r_ctl = R_CTL_ELS_REQ;
5282 5282
5283 5283 /* fill in PRLI cmd ELS fields */
5284 5284 prli.ls_code = LA_ELS_PRLI;
5285 5285 prli.page_length = 0x10; /* huh? */
5286 5286 prli.payload_length = sizeof (struct la_els_prli);
5287 5287
5288 5288 icmd->ipkt_opcode = LA_ELS_PRLI;
5289 5289
5290 5290 /* get ptr to PRLI service params */
5291 5291 fprli = (struct fcp_prli *)prli.service_params;
5292 5292
5293 5293 /* fill in service params */
5294 5294 fprli->type = 0x08;
5295 5295 fprli->resvd1 = 0;
5296 5296 fprli->orig_process_assoc_valid = 0;
5297 5297 fprli->resp_process_assoc_valid = 0;
5298 5298 fprli->establish_image_pair = 1;
5299 5299 fprli->resvd2 = 0;
5300 5300 fprli->resvd3 = 0;
5301 5301 fprli->obsolete_1 = 0;
5302 5302 fprli->obsolete_2 = 0;
5303 5303 fprli->data_overlay_allowed = 0;
5304 5304 fprli->initiator_fn = 1;
5305 5305 fprli->confirmed_compl_allowed = 1;
5306 5306
5307 5307 if (fc_ulp_is_name_present("ltct") == FC_SUCCESS) {
5308 5308 fprli->target_fn = 1;
5309 5309 } else {
5310 5310 fprli->target_fn = 0;
5311 5311 }
5312 5312
5313 5313 fprli->retry = 1;
5314 5314 fprli->read_xfer_rdy_disabled = 1;
5315 5315 fprli->write_xfer_rdy_disabled = 0;
5316 5316
5317 5317 FCP_CP_OUT((uint8_t *)&prli, fpkt->pkt_cmd,
5318 5318 fpkt->pkt_cmd_acc, sizeof (struct la_els_prli));
5319 5319
5320 5320 /* issue the PRLI request */
5321 5321
5322 5322 mutex_enter(&pptr->port_mutex);
5323 5323 if (!FCP_TGT_STATE_CHANGED(ptgt, icmd)) {
5324 5324
5325 5325 mutex_exit(&pptr->port_mutex);
5326 5326
5327 5327 rval = fc_ulp_issue_els(pptr->port_fp_handle, fpkt);
5328 5328 if (rval == FC_SUCCESS) {
5329 5329 res = DDI_SUCCESS;
5330 5330 break;
5331 5331 }
5332 5332
5333 5333 FCP_TGT_TRACE(ptgt, tcount, FCP_TGT_TRACE_13);
5334 5334
5335 5335 res = fcp_handle_ipkt_errors(pptr, ptgt, icmd,
5336 5336 rval, "PRLI");
5337 5337 } else {
5338 5338 mutex_exit(&pptr->port_mutex);
5339 5339 FCP_TGT_TRACE(ptgt, tcount, FCP_TGT_TRACE_14);
5340 5340 }
5341 5341 break;
5342 5342 }
5343 5343
5344 5344 default:
5345 5345 fcp_log(CE_WARN, NULL, "!invalid ELS opcode=0x%x", opcode);
5346 5346 break;
5347 5347 }
5348 5348
5349 5349 FCP_TRACE(fcp_logq, pptr->port_instbuf,
5350 5350 fcp_trace, FCP_BUF_LEVEL_5, 0,
5351 5351 "fcp_send_els: returning %d", res);
5352 5352
5353 5353 if (res != DDI_SUCCESS) {
5354 5354 if (internal) {
5355 5355 fcp_icmd_free(pptr, icmd);
5356 5356 }
5357 5357 }
5358 5358
5359 5359 return (res);
5360 5360 }
5361 5361
5362 5362
5363 5363 /*
5364 5364 * called internally update the state of all of the tgts and each LUN
5365 5365 * for this port (i.e. each target known to be attached to this port)
5366 5366 * if they are not already offline
5367 5367 *
5368 5368 * must be called with the port mutex owned
5369 5369 *
5370 5370 * acquires and releases the target mutexes for each target attached
5371 5371 * to this port
5372 5372 */
5373 5373 void
5374 5374 fcp_update_state(struct fcp_port *pptr, uint32_t state, int cause)
5375 5375 {
5376 5376 int i;
5377 5377 struct fcp_tgt *ptgt;
5378 5378
5379 5379 ASSERT(mutex_owned(&pptr->port_mutex));
5380 5380
5381 5381 for (i = 0; i < FCP_NUM_HASH; i++) {
5382 5382 for (ptgt = pptr->port_tgt_hash_table[i]; ptgt != NULL;
5383 5383 ptgt = ptgt->tgt_next) {
5384 5384 mutex_enter(&ptgt->tgt_mutex);
5385 5385 fcp_update_tgt_state(ptgt, FCP_SET, state);
5386 5386 ptgt->tgt_change_cnt++;
5387 5387 ptgt->tgt_statec_cause = cause;
5388 5388 ptgt->tgt_tmp_cnt = 1;
5389 5389 ptgt->tgt_done = 0;
5390 5390 mutex_exit(&ptgt->tgt_mutex);
5391 5391 }
5392 5392 }
5393 5393 }
5394 5394
5395 5395
5396 5396 static void
5397 5397 fcp_offline_all(struct fcp_port *pptr, int lcount, int cause)
5398 5398 {
5399 5399 int i;
5400 5400 int ndevs;
5401 5401 struct fcp_tgt *ptgt;
5402 5402
5403 5403 ASSERT(mutex_owned(&pptr->port_mutex));
5404 5404
5405 5405 for (ndevs = 0, i = 0; i < FCP_NUM_HASH; i++) {
5406 5406 for (ptgt = pptr->port_tgt_hash_table[i]; ptgt != NULL;
5407 5407 ptgt = ptgt->tgt_next) {
5408 5408 ndevs++;
5409 5409 }
5410 5410 }
5411 5411
5412 5412 if (ndevs == 0) {
5413 5413 return;
5414 5414 }
5415 5415 pptr->port_tmp_cnt = ndevs;
5416 5416
5417 5417 for (i = 0; i < FCP_NUM_HASH; i++) {
5418 5418 for (ptgt = pptr->port_tgt_hash_table[i]; ptgt != NULL;
5419 5419 ptgt = ptgt->tgt_next) {
5420 5420 (void) fcp_call_finish_init_held(pptr, ptgt,
5421 5421 lcount, ptgt->tgt_change_cnt, cause);
5422 5422 }
5423 5423 }
5424 5424 }
5425 5425
5426 5426 /*
5427 5427 * Function: fcp_update_tgt_state
5428 5428 *
5429 5429 * Description: This function updates the field tgt_state of a target. That
5430 5430 * field is a bitmap and which bit can be set or reset
5431 5431 * individually. The action applied to the target state is also
5432 5432 * applied to all the LUNs belonging to the target (provided the
5433 5433 * LUN is not offline). A side effect of applying the state
5434 5434 * modification to the target and the LUNs is the field tgt_trace
5435 5435 * of the target and lun_trace of the LUNs is set to zero.
5436 5436 *
5437 5437 *
5438 5438 * Argument: *ptgt Target structure.
5439 5439 * flag Flag indication what action to apply (set/reset).
5440 5440 * state State bits to update.
5441 5441 *
5442 5442 * Return Value: None
5443 5443 *
5444 5444 * Context: Interrupt, Kernel or User context.
5445 5445 * The mutex of the target (ptgt->tgt_mutex) must be owned when
5446 5446 * calling this function.
5447 5447 */
5448 5448 void
5449 5449 fcp_update_tgt_state(struct fcp_tgt *ptgt, int flag, uint32_t state)
5450 5450 {
5451 5451 struct fcp_lun *plun;
5452 5452
5453 5453 ASSERT(mutex_owned(&ptgt->tgt_mutex));
5454 5454
5455 5455 if (!(ptgt->tgt_state & FCP_TGT_OFFLINE)) {
5456 5456 /* The target is not offline. */
5457 5457 if (flag == FCP_SET) {
5458 5458 ptgt->tgt_state |= state;
5459 5459 ptgt->tgt_trace = 0;
5460 5460 } else {
5461 5461 ptgt->tgt_state &= ~state;
5462 5462 }
5463 5463
5464 5464 for (plun = ptgt->tgt_lun; plun != NULL;
5465 5465 plun = plun->lun_next) {
5466 5466 if (!(plun->lun_state & FCP_LUN_OFFLINE)) {
5467 5467 /* The LUN is not offline. */
5468 5468 if (flag == FCP_SET) {
5469 5469 plun->lun_state |= state;
5470 5470 plun->lun_trace = 0;
5471 5471 } else {
5472 5472 plun->lun_state &= ~state;
5473 5473 }
5474 5474 }
5475 5475 }
5476 5476 }
5477 5477 }
5478 5478
5479 5479 /*
5480 5480 * Function: fcp_update_tgt_state
5481 5481 *
5482 5482 * Description: This function updates the field lun_state of a LUN. That
5483 5483 * field is a bitmap and which bit can be set or reset
5484 5484 * individually.
5485 5485 *
5486 5486 * Argument: *plun LUN structure.
5487 5487 * flag Flag indication what action to apply (set/reset).
5488 5488 * state State bits to update.
5489 5489 *
5490 5490 * Return Value: None
5491 5491 *
5492 5492 * Context: Interrupt, Kernel or User context.
5493 5493 * The mutex of the target (ptgt->tgt_mutex) must be owned when
5494 5494 * calling this function.
5495 5495 */
5496 5496 void
5497 5497 fcp_update_lun_state(struct fcp_lun *plun, int flag, uint32_t state)
5498 5498 {
5499 5499 struct fcp_tgt *ptgt = plun->lun_tgt;
5500 5500
5501 5501 ASSERT(mutex_owned(&ptgt->tgt_mutex));
5502 5502
5503 5503 if (!(plun->lun_state & FCP_TGT_OFFLINE)) {
5504 5504 if (flag == FCP_SET) {
5505 5505 plun->lun_state |= state;
5506 5506 } else {
5507 5507 plun->lun_state &= ~state;
5508 5508 }
5509 5509 }
5510 5510 }
5511 5511
5512 5512 /*
5513 5513 * Function: fcp_get_port
5514 5514 *
5515 5515 * Description: This function returns the fcp_port structure from the opaque
5516 5516 * handle passed by the caller. That opaque handle is the handle
5517 5517 * used by fp/fctl to identify a particular local port. That
5518 5518 * handle has been stored in the corresponding fcp_port
5519 5519 * structure. This function is going to walk the global list of
5520 5520 * fcp_port structures till one has a port_fp_handle that matches
5521 5521 * the handle passed by the caller. This function enters the
5522 5522 * mutex fcp_global_mutex while walking the global list and then
5523 5523 * releases it.
5524 5524 *
5525 5525 * Argument: port_handle Opaque handle that fp/fctl uses to identify a
5526 5526 * particular port.
5527 5527 *
5528 5528 * Return Value: NULL Not found.
5529 5529 * Not NULL Pointer to the fcp_port structure.
5530 5530 *
5531 5531 * Context: Interrupt, Kernel or User context.
5532 5532 */
5533 5533 static struct fcp_port *
5534 5534 fcp_get_port(opaque_t port_handle)
5535 5535 {
5536 5536 struct fcp_port *pptr;
5537 5537
5538 5538 ASSERT(port_handle != NULL);
5539 5539
5540 5540 mutex_enter(&fcp_global_mutex);
5541 5541 for (pptr = fcp_port_head; pptr != NULL; pptr = pptr->port_next) {
5542 5542 if (pptr->port_fp_handle == port_handle) {
5543 5543 break;
5544 5544 }
5545 5545 }
5546 5546 mutex_exit(&fcp_global_mutex);
5547 5547
5548 5548 return (pptr);
5549 5549 }
5550 5550
5551 5551
5552 5552 static void
5553 5553 fcp_unsol_callback(fc_packet_t *fpkt)
5554 5554 {
5555 5555 struct fcp_ipkt *icmd = (struct fcp_ipkt *)fpkt->pkt_ulp_private;
5556 5556 struct fcp_port *pptr = icmd->ipkt_port;
5557 5557
5558 5558 if (fpkt->pkt_state != FC_PKT_SUCCESS) {
5559 5559 caddr_t state, reason, action, expln;
5560 5560
5561 5561 (void) fc_ulp_pkt_error(fpkt, &state, &reason,
5562 5562 &action, &expln);
5563 5563
5564 5564 fcp_log(CE_WARN, pptr->port_dip,
5565 5565 "!couldn't post response to unsolicited request: "
5566 5566 " state=%s reason=%s rx_id=%x ox_id=%x",
5567 5567 state, reason, fpkt->pkt_cmd_fhdr.ox_id,
5568 5568 fpkt->pkt_cmd_fhdr.rx_id);
5569 5569 }
5570 5570 fcp_icmd_free(pptr, icmd);
5571 5571 }
5572 5572
5573 5573
5574 5574 /*
5575 5575 * Perform general purpose preparation of a response to an unsolicited request
5576 5576 */
5577 5577 static void
5578 5578 fcp_unsol_resp_init(fc_packet_t *pkt, fc_unsol_buf_t *buf,
5579 5579 uchar_t r_ctl, uchar_t type)
5580 5580 {
5581 5581 pkt->pkt_cmd_fhdr.r_ctl = r_ctl;
5582 5582 pkt->pkt_cmd_fhdr.d_id = buf->ub_frame.s_id;
5583 5583 pkt->pkt_cmd_fhdr.s_id = buf->ub_frame.d_id;
5584 5584 pkt->pkt_cmd_fhdr.type = type;
5585 5585 pkt->pkt_cmd_fhdr.f_ctl = F_CTL_LAST_SEQ | F_CTL_XCHG_CONTEXT;
5586 5586 pkt->pkt_cmd_fhdr.seq_id = buf->ub_frame.seq_id;
5587 5587 pkt->pkt_cmd_fhdr.df_ctl = buf->ub_frame.df_ctl;
5588 5588 pkt->pkt_cmd_fhdr.seq_cnt = buf->ub_frame.seq_cnt;
5589 5589 pkt->pkt_cmd_fhdr.ox_id = buf->ub_frame.ox_id;
5590 5590 pkt->pkt_cmd_fhdr.rx_id = buf->ub_frame.rx_id;
5591 5591 pkt->pkt_cmd_fhdr.ro = 0;
5592 5592 pkt->pkt_cmd_fhdr.rsvd = 0;
5593 5593 pkt->pkt_comp = fcp_unsol_callback;
5594 5594 pkt->pkt_pd = NULL;
5595 5595 pkt->pkt_ub_resp_token = (opaque_t)buf;
5596 5596 }
5597 5597
5598 5598
5599 5599 /*ARGSUSED*/
5600 5600 static int
5601 5601 fcp_unsol_prli(struct fcp_port *pptr, fc_unsol_buf_t *buf)
5602 5602 {
5603 5603 fc_packet_t *fpkt;
5604 5604 struct la_els_prli prli;
5605 5605 struct fcp_prli *fprli;
5606 5606 struct fcp_ipkt *icmd;
5607 5607 struct la_els_prli *from;
5608 5608 struct fcp_prli *orig;
5609 5609 struct fcp_tgt *ptgt;
5610 5610 int tcount = 0;
5611 5611 int lcount;
5612 5612
5613 5613 from = (struct la_els_prli *)buf->ub_buffer;
5614 5614 orig = (struct fcp_prli *)from->service_params;
5615 5615 if ((ptgt = fcp_get_target_by_did(pptr, buf->ub_frame.s_id)) !=
5616 5616 NULL) {
5617 5617 mutex_enter(&ptgt->tgt_mutex);
5618 5618 tcount = ptgt->tgt_change_cnt;
5619 5619 mutex_exit(&ptgt->tgt_mutex);
5620 5620 }
5621 5621
5622 5622 mutex_enter(&pptr->port_mutex);
5623 5623 lcount = pptr->port_link_cnt;
5624 5624 mutex_exit(&pptr->port_mutex);
5625 5625
5626 5626 if ((icmd = fcp_icmd_alloc(pptr, ptgt, sizeof (la_els_prli_t),
5627 5627 sizeof (la_els_prli_t), 0,
5628 5628 pptr->port_state & FCP_STATE_FCA_IS_NODMA,
5629 5629 lcount, tcount, 0, FC_INVALID_RSCN_COUNT)) == NULL) {
5630 5630 return (FC_FAILURE);
5631 5631 }
5632 5632
5633 5633 fpkt = icmd->ipkt_fpkt;
5634 5634 fpkt->pkt_tran_flags = FC_TRAN_CLASS3 | FC_TRAN_INTR;
5635 5635 fpkt->pkt_tran_type = FC_PKT_OUTBOUND;
5636 5636 fpkt->pkt_timeout = FCP_ELS_TIMEOUT;
5637 5637 fpkt->pkt_cmdlen = sizeof (la_els_prli_t);
5638 5638 fpkt->pkt_rsplen = 0;
5639 5639 fpkt->pkt_datalen = 0;
5640 5640
5641 5641 icmd->ipkt_opcode = LA_ELS_PRLI;
5642 5642
5643 5643 bzero(&prli, sizeof (struct la_els_prli));
5644 5644 fprli = (struct fcp_prli *)prli.service_params;
5645 5645 prli.ls_code = LA_ELS_ACC;
5646 5646 prli.page_length = 0x10;
5647 5647 prli.payload_length = sizeof (struct la_els_prli);
5648 5648
5649 5649 /* fill in service params */
5650 5650 fprli->type = 0x08;
5651 5651 fprli->resvd1 = 0;
5652 5652 fprli->orig_process_assoc_valid = orig->orig_process_assoc_valid;
5653 5653 fprli->orig_process_associator = orig->orig_process_associator;
5654 5654 fprli->resp_process_assoc_valid = 0;
5655 5655 fprli->establish_image_pair = 1;
5656 5656 fprli->resvd2 = 0;
5657 5657 fprli->resvd3 = 0;
5658 5658 fprli->obsolete_1 = 0;
5659 5659 fprli->obsolete_2 = 0;
5660 5660 fprli->data_overlay_allowed = 0;
5661 5661 fprli->initiator_fn = 1;
5662 5662 fprli->confirmed_compl_allowed = 1;
5663 5663
5664 5664 if (fc_ulp_is_name_present("ltct") == FC_SUCCESS) {
5665 5665 fprli->target_fn = 1;
5666 5666 } else {
5667 5667 fprli->target_fn = 0;
5668 5668 }
5669 5669
5670 5670 fprli->retry = 1;
5671 5671 fprli->read_xfer_rdy_disabled = 1;
5672 5672 fprli->write_xfer_rdy_disabled = 0;
5673 5673
5674 5674 /* save the unsol prli payload first */
5675 5675 FCP_CP_OUT((uint8_t *)from, fpkt->pkt_resp,
5676 5676 fpkt->pkt_resp_acc, sizeof (struct la_els_prli));
5677 5677
5678 5678 FCP_CP_OUT((uint8_t *)&prli, fpkt->pkt_cmd,
5679 5679 fpkt->pkt_cmd_acc, sizeof (struct la_els_prli));
5680 5680
5681 5681 fcp_unsol_resp_init(fpkt, buf, R_CTL_ELS_RSP, FC_TYPE_EXTENDED_LS);
5682 5682
5683 5683 mutex_enter(&pptr->port_mutex);
5684 5684 if (!FCP_LINK_STATE_CHANGED(pptr, icmd)) {
5685 5685 int rval;
5686 5686 mutex_exit(&pptr->port_mutex);
5687 5687
5688 5688 if ((rval = fc_ulp_issue_els(pptr->port_fp_handle, fpkt)) !=
5689 5689 FC_SUCCESS) {
5690 5690 if ((rval == FC_STATEC_BUSY || rval == FC_OFFLINE) &&
5691 5691 ptgt != NULL) {
5692 5692 fcp_queue_ipkt(pptr, fpkt);
5693 5693 return (FC_SUCCESS);
5694 5694 }
5695 5695 /* Let it timeout */
5696 5696 fcp_icmd_free(pptr, icmd);
5697 5697 return (FC_FAILURE);
5698 5698 }
5699 5699 } else {
5700 5700 mutex_exit(&pptr->port_mutex);
5701 5701 fcp_icmd_free(pptr, icmd);
5702 5702 return (FC_FAILURE);
5703 5703 }
5704 5704
5705 5705 (void) fc_ulp_ubrelease(pptr->port_fp_handle, 1, &buf->ub_token);
5706 5706
5707 5707 return (FC_SUCCESS);
5708 5708 }
5709 5709
5710 5710 /*
5711 5711 * Function: fcp_icmd_alloc
5712 5712 *
5713 5713 * Description: This function allocated a fcp_ipkt structure. The pkt_comp
5714 5714 * field is initialized to fcp_icmd_callback. Sometimes it is
5715 5715 * modified by the caller (such as fcp_send_scsi). The
5716 5716 * structure is also tied to the state of the line and of the
5717 5717 * target at a particular time. That link is established by
5718 5718 * setting the fields ipkt_link_cnt and ipkt_change_cnt to lcount
5719 5719 * and tcount which came respectively from pptr->link_cnt and
5720 5720 * ptgt->tgt_change_cnt.
5721 5721 *
5722 5722 * Argument: *pptr Fcp port.
5723 5723 * *ptgt Target (destination of the command).
5724 5724 * cmd_len Length of the command.
5725 5725 * resp_len Length of the expected response.
5726 5726 * data_len Length of the data.
5727 5727 * nodma Indicates weither the command and response.
5728 5728 * will be transfer through DMA or not.
5729 5729 * lcount Link state change counter.
5730 5730 * tcount Target state change counter.
5731 5731 * cause Reason that lead to this call.
5732 5732 *
5733 5733 * Return Value: NULL Failed.
5734 5734 * Not NULL Internal packet address.
5735 5735 */
5736 5736 static struct fcp_ipkt *
5737 5737 fcp_icmd_alloc(struct fcp_port *pptr, struct fcp_tgt *ptgt, int cmd_len,
5738 5738 int resp_len, int data_len, int nodma, int lcount, int tcount, int cause,
5739 5739 uint32_t rscn_count)
5740 5740 {
5741 5741 int dma_setup = 0;
5742 5742 fc_packet_t *fpkt;
5743 5743 struct fcp_ipkt *icmd = NULL;
5744 5744
5745 5745 icmd = kmem_zalloc(sizeof (struct fcp_ipkt) +
5746 5746 pptr->port_dmacookie_sz + pptr->port_priv_pkt_len,
5747 5747 KM_NOSLEEP);
5748 5748 if (icmd == NULL) {
5749 5749 fcp_log(CE_WARN, pptr->port_dip,
5750 5750 "!internal packet allocation failed");
5751 5751 return (NULL);
5752 5752 }
5753 5753
5754 5754 /*
5755 5755 * initialize the allocated packet
5756 5756 */
5757 5757 icmd->ipkt_nodma = nodma;
5758 5758 icmd->ipkt_next = icmd->ipkt_prev = NULL;
5759 5759 icmd->ipkt_lun = NULL;
5760 5760
5761 5761 icmd->ipkt_link_cnt = lcount;
5762 5762 icmd->ipkt_change_cnt = tcount;
5763 5763 icmd->ipkt_cause = cause;
5764 5764
5765 5765 mutex_enter(&pptr->port_mutex);
5766 5766 icmd->ipkt_port = pptr;
5767 5767 mutex_exit(&pptr->port_mutex);
5768 5768
5769 5769 /* keep track of amt of data to be sent in pkt */
5770 5770 icmd->ipkt_cmdlen = cmd_len;
5771 5771 icmd->ipkt_resplen = resp_len;
5772 5772 icmd->ipkt_datalen = data_len;
5773 5773
5774 5774 /* set up pkt's ptr to the fc_packet_t struct, just after the ipkt */
5775 5775 icmd->ipkt_fpkt = (fc_packet_t *)(&icmd->ipkt_fc_packet);
5776 5776
5777 5777 /* set pkt's private ptr to point to cmd pkt */
5778 5778 icmd->ipkt_fpkt->pkt_ulp_private = (opaque_t)icmd;
5779 5779
5780 5780 /* set FCA private ptr to memory just beyond */
5781 5781 icmd->ipkt_fpkt->pkt_fca_private = (opaque_t)
5782 5782 ((char *)icmd + sizeof (struct fcp_ipkt) +
5783 5783 pptr->port_dmacookie_sz);
5784 5784
5785 5785 /* get ptr to fpkt substruct and fill it in */
5786 5786 fpkt = icmd->ipkt_fpkt;
5787 5787 fpkt->pkt_data_cookie = (ddi_dma_cookie_t *)((caddr_t)icmd +
5788 5788 sizeof (struct fcp_ipkt));
5789 5789
5790 5790 if (ptgt != NULL) {
5791 5791 icmd->ipkt_tgt = ptgt;
5792 5792 fpkt->pkt_fca_device = ptgt->tgt_fca_dev;
5793 5793 }
5794 5794
5795 5795 fpkt->pkt_comp = fcp_icmd_callback;
5796 5796 fpkt->pkt_tran_flags = (FC_TRAN_CLASS3 | FC_TRAN_INTR);
5797 5797 fpkt->pkt_cmdlen = cmd_len;
5798 5798 fpkt->pkt_rsplen = resp_len;
5799 5799 fpkt->pkt_datalen = data_len;
5800 5800
5801 5801 /*
5802 5802 * The pkt_ulp_rscn_infop (aka pkt_ulp_rsvd1) field is used to pass the
5803 5803 * rscn_count as fcp knows down to the transport. If a valid count was
5804 5804 * passed into this function, we allocate memory to actually pass down
5805 5805 * this info.
5806 5806 *
5807 5807 * BTW, if the kmem_zalloc fails, we won't try too hard. This will
5808 5808 * basically mean that fcp will not be able to help transport
5809 5809 * distinguish if a new RSCN has come after fcp was last informed about
5810 5810 * it. In such cases, it might lead to the problem mentioned in CR/bug #
5811 5811 * 5068068 where the device might end up going offline in case of RSCN
5812 5812 * storms.
5813 5813 */
5814 5814 fpkt->pkt_ulp_rscn_infop = NULL;
5815 5815 if (rscn_count != FC_INVALID_RSCN_COUNT) {
5816 5816 fpkt->pkt_ulp_rscn_infop = kmem_zalloc(
5817 5817 sizeof (fc_ulp_rscn_info_t), KM_NOSLEEP);
5818 5818 if (fpkt->pkt_ulp_rscn_infop == NULL) {
5819 5819 FCP_TRACE(fcp_logq, pptr->port_instbuf,
5820 5820 fcp_trace, FCP_BUF_LEVEL_6, 0,
5821 5821 "Failed to alloc memory to pass rscn info");
5822 5822 }
5823 5823 }
5824 5824
5825 5825 if (fpkt->pkt_ulp_rscn_infop != NULL) {
5826 5826 fc_ulp_rscn_info_t *rscnp;
5827 5827
5828 5828 rscnp = (fc_ulp_rscn_info_t *)fpkt->pkt_ulp_rscn_infop;
5829 5829 rscnp->ulp_rscn_count = rscn_count;
5830 5830 }
5831 5831
5832 5832 if (fcp_alloc_dma(pptr, icmd, nodma, KM_NOSLEEP) != FC_SUCCESS) {
5833 5833 goto fail;
5834 5834 }
5835 5835 dma_setup++;
5836 5836
5837 5837 /*
5838 5838 * Must hold target mutex across setting of pkt_pd and call to
5839 5839 * fc_ulp_init_packet to ensure the handle to the target doesn't go
5840 5840 * away while we're not looking.
5841 5841 */
5842 5842 if (ptgt != NULL) {
5843 5843 mutex_enter(&ptgt->tgt_mutex);
5844 5844 fpkt->pkt_pd = ptgt->tgt_pd_handle;
5845 5845
5846 5846 /* ask transport to do its initialization on this pkt */
5847 5847 if (fc_ulp_init_packet(pptr->port_fp_handle, fpkt, KM_NOSLEEP)
5848 5848 != FC_SUCCESS) {
5849 5849 FCP_TRACE(fcp_logq, pptr->port_instbuf,
5850 5850 fcp_trace, FCP_BUF_LEVEL_6, 0,
5851 5851 "fc_ulp_init_packet failed");
5852 5852 mutex_exit(&ptgt->tgt_mutex);
5853 5853 goto fail;
5854 5854 }
5855 5855 mutex_exit(&ptgt->tgt_mutex);
5856 5856 } else {
5857 5857 if (fc_ulp_init_packet(pptr->port_fp_handle, fpkt, KM_NOSLEEP)
5858 5858 != FC_SUCCESS) {
5859 5859 FCP_TRACE(fcp_logq, pptr->port_instbuf,
5860 5860 fcp_trace, FCP_BUF_LEVEL_6, 0,
5861 5861 "fc_ulp_init_packet failed");
5862 5862 goto fail;
5863 5863 }
5864 5864 }
5865 5865
5866 5866 mutex_enter(&pptr->port_mutex);
5867 5867 if (pptr->port_state & (FCP_STATE_DETACHING |
5868 5868 FCP_STATE_SUSPENDED | FCP_STATE_POWER_DOWN)) {
5869 5869 int rval;
5870 5870
5871 5871 mutex_exit(&pptr->port_mutex);
5872 5872
5873 5873 rval = fc_ulp_uninit_packet(pptr->port_fp_handle, fpkt);
5874 5874 ASSERT(rval == FC_SUCCESS);
5875 5875
5876 5876 goto fail;
5877 5877 }
5878 5878
5879 5879 if (ptgt != NULL) {
5880 5880 mutex_enter(&ptgt->tgt_mutex);
5881 5881 ptgt->tgt_ipkt_cnt++;
5882 5882 mutex_exit(&ptgt->tgt_mutex);
5883 5883 }
5884 5884
5885 5885 pptr->port_ipkt_cnt++;
5886 5886
5887 5887 mutex_exit(&pptr->port_mutex);
5888 5888
5889 5889 return (icmd);
5890 5890
5891 5891 fail:
5892 5892 if (fpkt->pkt_ulp_rscn_infop != NULL) {
5893 5893 kmem_free(fpkt->pkt_ulp_rscn_infop,
5894 5894 sizeof (fc_ulp_rscn_info_t));
5895 5895 fpkt->pkt_ulp_rscn_infop = NULL;
5896 5896 }
5897 5897
5898 5898 if (dma_setup) {
5899 5899 fcp_free_dma(pptr, icmd);
5900 5900 }
5901 5901 kmem_free(icmd, sizeof (struct fcp_ipkt) + pptr->port_priv_pkt_len +
5902 5902 (size_t)pptr->port_dmacookie_sz);
5903 5903
5904 5904 return (NULL);
5905 5905 }
5906 5906
5907 5907 /*
5908 5908 * Function: fcp_icmd_free
5909 5909 *
5910 5910 * Description: Frees the internal command passed by the caller.
5911 5911 *
5912 5912 * Argument: *pptr Fcp port.
5913 5913 * *icmd Internal packet to free.
5914 5914 *
5915 5915 * Return Value: None
5916 5916 */
5917 5917 static void
5918 5918 fcp_icmd_free(struct fcp_port *pptr, struct fcp_ipkt *icmd)
5919 5919 {
5920 5920 struct fcp_tgt *ptgt = icmd->ipkt_tgt;
5921 5921
5922 5922 /* Let the underlying layers do their cleanup. */
5923 5923 (void) fc_ulp_uninit_packet(pptr->port_fp_handle,
5924 5924 icmd->ipkt_fpkt);
5925 5925
5926 5926 if (icmd->ipkt_fpkt->pkt_ulp_rscn_infop) {
5927 5927 kmem_free(icmd->ipkt_fpkt->pkt_ulp_rscn_infop,
5928 5928 sizeof (fc_ulp_rscn_info_t));
5929 5929 }
5930 5930
5931 5931 fcp_free_dma(pptr, icmd);
5932 5932
5933 5933 kmem_free(icmd, sizeof (struct fcp_ipkt) + pptr->port_priv_pkt_len +
5934 5934 (size_t)pptr->port_dmacookie_sz);
5935 5935
5936 5936 mutex_enter(&pptr->port_mutex);
5937 5937
5938 5938 if (ptgt) {
5939 5939 mutex_enter(&ptgt->tgt_mutex);
5940 5940 ptgt->tgt_ipkt_cnt--;
5941 5941 mutex_exit(&ptgt->tgt_mutex);
5942 5942 }
5943 5943
5944 5944 pptr->port_ipkt_cnt--;
5945 5945 mutex_exit(&pptr->port_mutex);
5946 5946 }
5947 5947
5948 5948 /*
5949 5949 * Function: fcp_alloc_dma
5950 5950 *
5951 5951 * Description: Allocated the DMA resources required for the internal
5952 5952 * packet.
5953 5953 *
5954 5954 * Argument: *pptr FCP port.
5955 5955 * *icmd Internal FCP packet.
5956 5956 * nodma Indicates if the Cmd and Resp will be DMAed.
5957 5957 * flags Allocation flags (Sleep or NoSleep).
5958 5958 *
5959 5959 * Return Value: FC_SUCCESS
5960 5960 * FC_NOMEM
5961 5961 */
5962 5962 static int
5963 5963 fcp_alloc_dma(struct fcp_port *pptr, struct fcp_ipkt *icmd,
5964 5964 int nodma, int flags)
5965 5965 {
5966 5966 int rval;
5967 5967 size_t real_size;
5968 5968 uint_t ccount;
5969 5969 int bound = 0;
5970 5970 int cmd_resp = 0;
5971 5971 fc_packet_t *fpkt;
5972 5972 ddi_dma_cookie_t pkt_data_cookie;
5973 5973 ddi_dma_cookie_t *cp;
5974 5974 uint32_t cnt;
5975 5975
5976 5976 fpkt = &icmd->ipkt_fc_packet;
5977 5977
5978 5978 ASSERT(fpkt->pkt_cmd_dma == NULL && fpkt->pkt_data_dma == NULL &&
5979 5979 fpkt->pkt_resp_dma == NULL);
5980 5980
5981 5981 icmd->ipkt_nodma = nodma;
5982 5982
5983 5983 if (nodma) {
5984 5984 fpkt->pkt_cmd = kmem_zalloc(fpkt->pkt_cmdlen, flags);
5985 5985 if (fpkt->pkt_cmd == NULL) {
5986 5986 goto fail;
5987 5987 }
5988 5988
5989 5989 fpkt->pkt_resp = kmem_zalloc(fpkt->pkt_rsplen, flags);
5990 5990 if (fpkt->pkt_resp == NULL) {
5991 5991 goto fail;
5992 5992 }
5993 5993 } else {
5994 5994 ASSERT(fpkt->pkt_cmdlen && fpkt->pkt_rsplen);
5995 5995
5996 5996 rval = fcp_alloc_cmd_resp(pptr, fpkt, flags);
5997 5997 if (rval == FC_FAILURE) {
5998 5998 ASSERT(fpkt->pkt_cmd_dma == NULL &&
5999 5999 fpkt->pkt_resp_dma == NULL);
6000 6000 goto fail;
6001 6001 }
6002 6002 cmd_resp++;
6003 6003 }
6004 6004
6005 6005 if ((fpkt->pkt_datalen != 0) &&
6006 6006 !(pptr->port_state & FCP_STATE_FCA_IS_NODMA)) {
6007 6007 /*
6008 6008 * set up DMA handle and memory for the data in this packet
6009 6009 */
6010 6010 if (ddi_dma_alloc_handle(pptr->port_dip,
6011 6011 &pptr->port_data_dma_attr, DDI_DMA_DONTWAIT,
6012 6012 NULL, &fpkt->pkt_data_dma) != DDI_SUCCESS) {
6013 6013 goto fail;
6014 6014 }
6015 6015
6016 6016 if (ddi_dma_mem_alloc(fpkt->pkt_data_dma, fpkt->pkt_datalen,
6017 6017 &pptr->port_dma_acc_attr, DDI_DMA_CONSISTENT,
6018 6018 DDI_DMA_DONTWAIT, NULL, &fpkt->pkt_data,
6019 6019 &real_size, &fpkt->pkt_data_acc) != DDI_SUCCESS) {
6020 6020 goto fail;
6021 6021 }
6022 6022
6023 6023 /* was DMA mem size gotten < size asked for/needed ?? */
6024 6024 if (real_size < fpkt->pkt_datalen) {
6025 6025 goto fail;
6026 6026 }
6027 6027
6028 6028 /* bind DMA address and handle together */
6029 6029 if (ddi_dma_addr_bind_handle(fpkt->pkt_data_dma,
6030 6030 NULL, fpkt->pkt_data, real_size, DDI_DMA_READ |
6031 6031 DDI_DMA_CONSISTENT, DDI_DMA_DONTWAIT, NULL,
6032 6032 &pkt_data_cookie, &ccount) != DDI_DMA_MAPPED) {
6033 6033 goto fail;
6034 6034 }
6035 6035 bound++;
6036 6036
6037 6037 if (ccount > pptr->port_data_dma_attr.dma_attr_sgllen) {
6038 6038 goto fail;
6039 6039 }
6040 6040
6041 6041 fpkt->pkt_data_cookie_cnt = ccount;
6042 6042
6043 6043 cp = fpkt->pkt_data_cookie;
6044 6044 *cp = pkt_data_cookie;
6045 6045 cp++;
6046 6046
6047 6047 for (cnt = 1; cnt < ccount; cnt++, cp++) {
6048 6048 ddi_dma_nextcookie(fpkt->pkt_data_dma,
6049 6049 &pkt_data_cookie);
6050 6050 *cp = pkt_data_cookie;
6051 6051 }
6052 6052
6053 6053 } else if (fpkt->pkt_datalen != 0) {
6054 6054 /*
6055 6055 * If it's a pseudo FCA, then it can't support DMA even in
6056 6056 * SCSI data phase.
6057 6057 */
6058 6058 fpkt->pkt_data = kmem_alloc(fpkt->pkt_datalen, flags);
6059 6059 if (fpkt->pkt_data == NULL) {
6060 6060 goto fail;
6061 6061 }
6062 6062
6063 6063 }
6064 6064
6065 6065 return (FC_SUCCESS);
6066 6066
6067 6067 fail:
6068 6068 if (bound) {
6069 6069 (void) ddi_dma_unbind_handle(fpkt->pkt_data_dma);
6070 6070 }
6071 6071
6072 6072 if (fpkt->pkt_data_dma) {
6073 6073 if (fpkt->pkt_data) {
6074 6074 ddi_dma_mem_free(&fpkt->pkt_data_acc);
6075 6075 }
6076 6076 ddi_dma_free_handle(&fpkt->pkt_data_dma);
6077 6077 } else {
6078 6078 if (fpkt->pkt_data) {
6079 6079 kmem_free(fpkt->pkt_data, fpkt->pkt_datalen);
6080 6080 }
6081 6081 }
6082 6082
6083 6083 if (nodma) {
6084 6084 if (fpkt->pkt_cmd) {
6085 6085 kmem_free(fpkt->pkt_cmd, fpkt->pkt_cmdlen);
6086 6086 }
6087 6087 if (fpkt->pkt_resp) {
6088 6088 kmem_free(fpkt->pkt_resp, fpkt->pkt_rsplen);
6089 6089 }
6090 6090 } else {
6091 6091 if (cmd_resp) {
6092 6092 fcp_free_cmd_resp(pptr, fpkt);
6093 6093 }
6094 6094 }
6095 6095
6096 6096 return (FC_NOMEM);
6097 6097 }
6098 6098
6099 6099
6100 6100 static void
6101 6101 fcp_free_dma(struct fcp_port *pptr, struct fcp_ipkt *icmd)
6102 6102 {
6103 6103 fc_packet_t *fpkt = icmd->ipkt_fpkt;
6104 6104
6105 6105 if (fpkt->pkt_data_dma) {
6106 6106 (void) ddi_dma_unbind_handle(fpkt->pkt_data_dma);
6107 6107 if (fpkt->pkt_data) {
6108 6108 ddi_dma_mem_free(&fpkt->pkt_data_acc);
6109 6109 }
6110 6110 ddi_dma_free_handle(&fpkt->pkt_data_dma);
6111 6111 } else {
6112 6112 if (fpkt->pkt_data) {
6113 6113 kmem_free(fpkt->pkt_data, fpkt->pkt_datalen);
6114 6114 }
6115 6115 /*
6116 6116 * Need we reset pkt_* to zero???
6117 6117 */
6118 6118 }
6119 6119
6120 6120 if (icmd->ipkt_nodma) {
6121 6121 if (fpkt->pkt_cmd) {
6122 6122 kmem_free(fpkt->pkt_cmd, icmd->ipkt_cmdlen);
6123 6123 }
6124 6124 if (fpkt->pkt_resp) {
6125 6125 kmem_free(fpkt->pkt_resp, icmd->ipkt_resplen);
6126 6126 }
6127 6127 } else {
6128 6128 ASSERT(fpkt->pkt_resp_dma != NULL && fpkt->pkt_cmd_dma != NULL);
6129 6129
6130 6130 fcp_free_cmd_resp(pptr, fpkt);
6131 6131 }
6132 6132 }
6133 6133
6134 6134 /*
6135 6135 * Function: fcp_lookup_target
6136 6136 *
6137 6137 * Description: Finds a target given a WWN.
6138 6138 *
6139 6139 * Argument: *pptr FCP port.
6140 6140 * *wwn World Wide Name of the device to look for.
6141 6141 *
6142 6142 * Return Value: NULL No target found
6143 6143 * Not NULL Target structure
6144 6144 *
6145 6145 * Context: Interrupt context.
6146 6146 * The mutex pptr->port_mutex must be owned.
6147 6147 */
6148 6148 /* ARGSUSED */
6149 6149 static struct fcp_tgt *
6150 6150 fcp_lookup_target(struct fcp_port *pptr, uchar_t *wwn)
6151 6151 {
6152 6152 int hash;
6153 6153 struct fcp_tgt *ptgt;
6154 6154
6155 6155 ASSERT(mutex_owned(&pptr->port_mutex));
6156 6156
6157 6157 hash = FCP_HASH(wwn);
6158 6158
6159 6159 for (ptgt = pptr->port_tgt_hash_table[hash]; ptgt != NULL;
6160 6160 ptgt = ptgt->tgt_next) {
6161 6161 if (!(ptgt->tgt_state & FCP_TGT_ORPHAN) &&
6162 6162 bcmp((caddr_t)wwn, (caddr_t)&ptgt->tgt_port_wwn.raw_wwn[0],
6163 6163 sizeof (ptgt->tgt_port_wwn)) == 0) {
6164 6164 break;
6165 6165 }
6166 6166 }
6167 6167
6168 6168 return (ptgt);
6169 6169 }
6170 6170
6171 6171
6172 6172 /*
6173 6173 * Find target structure given a port identifier
6174 6174 */
6175 6175 static struct fcp_tgt *
6176 6176 fcp_get_target_by_did(struct fcp_port *pptr, uint32_t d_id)
6177 6177 {
6178 6178 fc_portid_t port_id;
6179 6179 la_wwn_t pwwn;
6180 6180 struct fcp_tgt *ptgt = NULL;
6181 6181
6182 6182 port_id.priv_lilp_posit = 0;
6183 6183 port_id.port_id = d_id;
6184 6184 if (fc_ulp_get_pwwn_by_did(pptr->port_fp_handle, port_id,
6185 6185 &pwwn) == FC_SUCCESS) {
6186 6186 mutex_enter(&pptr->port_mutex);
6187 6187 ptgt = fcp_lookup_target(pptr, pwwn.raw_wwn);
6188 6188 mutex_exit(&pptr->port_mutex);
6189 6189 }
6190 6190
6191 6191 return (ptgt);
6192 6192 }
6193 6193
6194 6194
6195 6195 /*
6196 6196 * the packet completion callback routine for info cmd pkts
6197 6197 *
6198 6198 * this means fpkt pts to a response to either a PLOGI or a PRLI
6199 6199 *
6200 6200 * if there is an error an attempt is made to call a routine to resend
6201 6201 * the command that failed
6202 6202 */
6203 6203 static void
6204 6204 fcp_icmd_callback(fc_packet_t *fpkt)
6205 6205 {
6206 6206 struct fcp_ipkt *icmd;
6207 6207 struct fcp_port *pptr;
6208 6208 struct fcp_tgt *ptgt;
6209 6209 struct la_els_prli *prli;
6210 6210 struct la_els_prli prli_s;
6211 6211 struct fcp_prli *fprli;
6212 6212 struct fcp_lun *plun;
6213 6213 int free_pkt = 1;
6214 6214 int rval;
6215 6215 ls_code_t resp;
6216 6216 uchar_t prli_acc = 0;
6217 6217 uint32_t rscn_count = FC_INVALID_RSCN_COUNT;
6218 6218 int lun0_newalloc;
6219 6219
6220 6220 icmd = (struct fcp_ipkt *)fpkt->pkt_ulp_private;
6221 6221
6222 6222 /* get ptrs to the port and target structs for the cmd */
6223 6223 pptr = icmd->ipkt_port;
6224 6224 ptgt = icmd->ipkt_tgt;
6225 6225
6226 6226 FCP_CP_IN(fpkt->pkt_resp, &resp, fpkt->pkt_resp_acc, sizeof (resp));
6227 6227
6228 6228 if (icmd->ipkt_opcode == LA_ELS_PRLI) {
6229 6229 FCP_CP_IN(fpkt->pkt_cmd, &prli_s, fpkt->pkt_cmd_acc,
6230 6230 sizeof (prli_s));
6231 6231 prli_acc = (prli_s.ls_code == LA_ELS_ACC);
6232 6232 }
6233 6233
6234 6234 FCP_TRACE(fcp_logq, pptr->port_instbuf,
6235 6235 fcp_trace, FCP_BUF_LEVEL_2, 0,
6236 6236 "ELS (%x) callback state=0x%x reason=0x%x for %x",
6237 6237 icmd->ipkt_opcode, fpkt->pkt_state, fpkt->pkt_reason,
6238 6238 ptgt->tgt_d_id);
6239 6239
6240 6240 if ((fpkt->pkt_state == FC_PKT_SUCCESS) &&
6241 6241 ((resp.ls_code == LA_ELS_ACC) || prli_acc)) {
6242 6242
6243 6243 mutex_enter(&ptgt->tgt_mutex);
6244 6244 if (ptgt->tgt_pd_handle == NULL) {
6245 6245 /*
6246 6246 * in a fabric environment the port device handles
6247 6247 * get created only after successful LOGIN into the
6248 6248 * transport, so the transport makes this port
6249 6249 * device (pd) handle available in this packet, so
6250 6250 * save it now
6251 6251 */
6252 6252 ASSERT(fpkt->pkt_pd != NULL);
6253 6253 ptgt->tgt_pd_handle = fpkt->pkt_pd;
6254 6254 }
6255 6255 mutex_exit(&ptgt->tgt_mutex);
6256 6256
6257 6257 /* which ELS cmd is this response for ?? */
6258 6258 switch (icmd->ipkt_opcode) {
6259 6259 case LA_ELS_PLOGI:
6260 6260 FCP_TRACE(fcp_logq, pptr->port_instbuf,
6261 6261 fcp_trace, FCP_BUF_LEVEL_5, 0,
6262 6262 "PLOGI to d_id=0x%x succeeded, wwn=%08x%08x",
6263 6263 ptgt->tgt_d_id,
6264 6264 *((int *)&ptgt->tgt_port_wwn.raw_wwn[0]),
6265 6265 *((int *)&ptgt->tgt_port_wwn.raw_wwn[4]));
6266 6266
6267 6267 FCP_TGT_TRACE(ptgt, icmd->ipkt_change_cnt,
6268 6268 FCP_TGT_TRACE_15);
6269 6269
6270 6270 /* Note that we are not allocating a new icmd */
6271 6271 if (fcp_send_els(pptr, ptgt, icmd, LA_ELS_PRLI,
6272 6272 icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
6273 6273 icmd->ipkt_cause) != DDI_SUCCESS) {
6274 6274 FCP_TGT_TRACE(ptgt, icmd->ipkt_change_cnt,
6275 6275 FCP_TGT_TRACE_16);
6276 6276 goto fail;
6277 6277 }
6278 6278 break;
6279 6279
6280 6280 case LA_ELS_PRLI:
6281 6281 FCP_TRACE(fcp_logq, pptr->port_instbuf,
6282 6282 fcp_trace, FCP_BUF_LEVEL_5, 0,
6283 6283 "PRLI to d_id=0x%x succeeded", ptgt->tgt_d_id);
6284 6284
6285 6285 FCP_TGT_TRACE(ptgt, icmd->ipkt_change_cnt,
6286 6286 FCP_TGT_TRACE_17);
6287 6287
6288 6288 prli = &prli_s;
6289 6289
6290 6290 FCP_CP_IN(fpkt->pkt_resp, prli, fpkt->pkt_resp_acc,
6291 6291 sizeof (prli_s));
6292 6292
6293 6293 fprli = (struct fcp_prli *)prli->service_params;
6294 6294
6295 6295 mutex_enter(&ptgt->tgt_mutex);
6296 6296 ptgt->tgt_icap = fprli->initiator_fn;
6297 6297 ptgt->tgt_tcap = fprli->target_fn;
6298 6298 mutex_exit(&ptgt->tgt_mutex);
6299 6299
6300 6300 if ((fprli->type != 0x08) || (fprli->target_fn != 1)) {
6301 6301 /*
6302 6302 * this FCP device does not support target mode
6303 6303 */
6304 6304 FCP_TGT_TRACE(ptgt, icmd->ipkt_change_cnt,
6305 6305 FCP_TGT_TRACE_18);
6306 6306 goto fail;
6307 6307 }
6308 6308 if (fprli->retry == 1) {
6309 6309 fc_ulp_disable_relogin(pptr->port_fp_handle,
6310 6310 &ptgt->tgt_port_wwn);
6311 6311 }
6312 6312
6313 6313 /* target is no longer offline */
6314 6314 mutex_enter(&pptr->port_mutex);
6315 6315 mutex_enter(&ptgt->tgt_mutex);
6316 6316 if (!FCP_TGT_STATE_CHANGED(ptgt, icmd)) {
6317 6317 ptgt->tgt_state &= ~(FCP_TGT_OFFLINE |
6318 6318 FCP_TGT_MARK);
6319 6319 } else {
6320 6320 FCP_TRACE(fcp_logq, pptr->port_instbuf,
6321 6321 fcp_trace, FCP_BUF_LEVEL_2, 0,
6322 6322 "fcp_icmd_callback,1: state change "
6323 6323 " occured for D_ID=0x%x", ptgt->tgt_d_id);
6324 6324 mutex_exit(&ptgt->tgt_mutex);
6325 6325 mutex_exit(&pptr->port_mutex);
6326 6326 goto fail;
6327 6327 }
6328 6328 mutex_exit(&ptgt->tgt_mutex);
6329 6329 mutex_exit(&pptr->port_mutex);
6330 6330
6331 6331 /*
6332 6332 * lun 0 should always respond to inquiry, so
6333 6333 * get the LUN struct for LUN 0
6334 6334 *
6335 6335 * Currently we deal with first level of addressing.
6336 6336 * If / when we start supporting 0x device types
6337 6337 * (DTYPE_ARRAY_CTRL, i.e. array controllers)
6338 6338 * this logic will need revisiting.
6339 6339 */
6340 6340 lun0_newalloc = 0;
6341 6341 if ((plun = fcp_get_lun(ptgt, 0)) == NULL) {
6342 6342 /*
6343 6343 * no LUN struct for LUN 0 yet exists,
6344 6344 * so create one
6345 6345 */
6346 6346 plun = fcp_alloc_lun(ptgt);
6347 6347 if (plun == NULL) {
6348 6348 fcp_log(CE_WARN, pptr->port_dip,
6349 6349 "!Failed to allocate lun 0 for"
6350 6350 " D_ID=%x", ptgt->tgt_d_id);
6351 6351 goto fail;
6352 6352 }
6353 6353 lun0_newalloc = 1;
6354 6354 }
6355 6355
6356 6356 /* fill in LUN info */
6357 6357 mutex_enter(&ptgt->tgt_mutex);
6358 6358 /*
6359 6359 * consider lun 0 as device not connected if it is
6360 6360 * offlined or newly allocated
6361 6361 */
6362 6362 if ((plun->lun_state & FCP_LUN_OFFLINE) ||
6363 6363 lun0_newalloc) {
6364 6364 plun->lun_state |= FCP_LUN_DEVICE_NOT_CONNECTED;
6365 6365 }
6366 6366 plun->lun_state |= (FCP_LUN_BUSY | FCP_LUN_MARK);
6367 6367 plun->lun_state &= ~FCP_LUN_OFFLINE;
6368 6368 ptgt->tgt_lun_cnt = 1;
6369 6369 ptgt->tgt_report_lun_cnt = 0;
6370 6370 mutex_exit(&ptgt->tgt_mutex);
6371 6371
6372 6372 /* Retrieve the rscn count (if a valid one exists) */
6373 6373 if (icmd->ipkt_fpkt->pkt_ulp_rscn_infop != NULL) {
6374 6374 rscn_count = ((fc_ulp_rscn_info_t *)
6375 6375 (icmd->ipkt_fpkt->pkt_ulp_rscn_infop))
6376 6376 ->ulp_rscn_count;
6377 6377 } else {
6378 6378 rscn_count = FC_INVALID_RSCN_COUNT;
6379 6379 }
6380 6380
6381 6381 /* send Report Lun request to target */
6382 6382 if (fcp_send_scsi(plun, SCMD_REPORT_LUN,
6383 6383 sizeof (struct fcp_reportlun_resp),
6384 6384 icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
6385 6385 icmd->ipkt_cause, rscn_count) != DDI_SUCCESS) {
6386 6386 mutex_enter(&pptr->port_mutex);
6387 6387 if (!FCP_TGT_STATE_CHANGED(ptgt, icmd)) {
6388 6388 fcp_log(CE_WARN, pptr->port_dip,
6389 6389 "!Failed to send REPORT LUN to"
6390 6390 " D_ID=%x", ptgt->tgt_d_id);
6391 6391 } else {
6392 6392 FCP_TRACE(fcp_logq,
6393 6393 pptr->port_instbuf, fcp_trace,
6394 6394 FCP_BUF_LEVEL_5, 0,
6395 6395 "fcp_icmd_callback,2:state change"
6396 6396 " occured for D_ID=0x%x",
6397 6397 ptgt->tgt_d_id);
6398 6398 }
6399 6399 mutex_exit(&pptr->port_mutex);
6400 6400
6401 6401 FCP_TGT_TRACE(ptgt, icmd->ipkt_change_cnt,
6402 6402 FCP_TGT_TRACE_19);
6403 6403
6404 6404 goto fail;
6405 6405 } else {
6406 6406 free_pkt = 0;
6407 6407 fcp_icmd_free(pptr, icmd);
6408 6408 }
6409 6409 break;
6410 6410
6411 6411 default:
6412 6412 fcp_log(CE_WARN, pptr->port_dip,
6413 6413 "!fcp_icmd_callback Invalid opcode");
6414 6414 goto fail;
6415 6415 }
6416 6416
6417 6417 return;
6418 6418 }
6419 6419
6420 6420
6421 6421 /*
6422 6422 * Other PLOGI failures are not retried as the
6423 6423 * transport does it already
6424 6424 */
6425 6425 if (icmd->ipkt_opcode != LA_ELS_PLOGI) {
6426 6426 if (fcp_is_retryable(icmd) &&
6427 6427 icmd->ipkt_retries++ < FCP_MAX_RETRIES) {
6428 6428
6429 6429 if (FCP_MUST_RETRY(fpkt)) {
6430 6430 fcp_queue_ipkt(pptr, fpkt);
6431 6431 return;
6432 6432 }
6433 6433
6434 6434 FCP_TRACE(fcp_logq, pptr->port_instbuf,
6435 6435 fcp_trace, FCP_BUF_LEVEL_2, 0,
6436 6436 "ELS PRLI is retried for d_id=0x%x, state=%x,"
6437 6437 " reason= %x", ptgt->tgt_d_id, fpkt->pkt_state,
6438 6438 fpkt->pkt_reason);
6439 6439
6440 6440 /*
6441 6441 * Retry by recalling the routine that
6442 6442 * originally queued this packet
6443 6443 */
6444 6444 mutex_enter(&pptr->port_mutex);
6445 6445 if (!FCP_TGT_STATE_CHANGED(ptgt, icmd)) {
6446 6446 caddr_t msg;
6447 6447
6448 6448 mutex_exit(&pptr->port_mutex);
6449 6449
6450 6450 ASSERT(icmd->ipkt_opcode != LA_ELS_PLOGI);
6451 6451
6452 6452 if (fpkt->pkt_state == FC_PKT_TIMEOUT) {
6453 6453 fpkt->pkt_timeout +=
6454 6454 FCP_TIMEOUT_DELTA;
6455 6455 }
6456 6456
6457 6457 rval = fc_ulp_issue_els(pptr->port_fp_handle,
6458 6458 fpkt);
6459 6459 if (rval == FC_SUCCESS) {
6460 6460 return;
6461 6461 }
6462 6462
6463 6463 if (rval == FC_STATEC_BUSY ||
6464 6464 rval == FC_OFFLINE) {
6465 6465 fcp_queue_ipkt(pptr, fpkt);
6466 6466 return;
6467 6467 }
6468 6468 (void) fc_ulp_error(rval, &msg);
6469 6469
6470 6470 fcp_log(CE_NOTE, pptr->port_dip,
6471 6471 "!ELS 0x%x failed to d_id=0x%x;"
6472 6472 " %s", icmd->ipkt_opcode,
6473 6473 ptgt->tgt_d_id, msg);
6474 6474 } else {
6475 6475 FCP_TRACE(fcp_logq, pptr->port_instbuf,
6476 6476 fcp_trace, FCP_BUF_LEVEL_2, 0,
6477 6477 "fcp_icmd_callback,3: state change "
6478 6478 " occured for D_ID=0x%x", ptgt->tgt_d_id);
6479 6479 mutex_exit(&pptr->port_mutex);
6480 6480 }
6481 6481 }
6482 6482 } else {
6483 6483 if (fcp_is_retryable(icmd) &&
6484 6484 icmd->ipkt_retries++ < FCP_MAX_RETRIES) {
6485 6485 if (FCP_MUST_RETRY(fpkt)) {
6486 6486 fcp_queue_ipkt(pptr, fpkt);
6487 6487 return;
6488 6488 }
6489 6489 }
6490 6490 mutex_enter(&pptr->port_mutex);
6491 6491 if (!FCP_TGT_STATE_CHANGED(ptgt, icmd) &&
6492 6492 fpkt->pkt_state != FC_PKT_PORT_OFFLINE) {
6493 6493 mutex_exit(&pptr->port_mutex);
6494 6494 fcp_print_error(fpkt);
6495 6495 } else {
6496 6496 FCP_TRACE(fcp_logq, pptr->port_instbuf,
6497 6497 fcp_trace, FCP_BUF_LEVEL_2, 0,
6498 6498 "fcp_icmd_callback,4: state change occured"
6499 6499 " for D_ID=0x%x", ptgt->tgt_d_id);
6500 6500 mutex_exit(&pptr->port_mutex);
6501 6501 }
6502 6502 }
6503 6503
6504 6504 fail:
6505 6505 if (free_pkt) {
6506 6506 (void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
6507 6507 icmd->ipkt_change_cnt, icmd->ipkt_cause);
6508 6508 fcp_icmd_free(pptr, icmd);
6509 6509 }
6510 6510 }
6511 6511
6512 6512
6513 6513 /*
6514 6514 * called internally to send an info cmd using the transport
6515 6515 *
6516 6516 * sends either an INQ or a REPORT_LUN
6517 6517 *
6518 6518 * when the packet is completed fcp_scsi_callback is called
6519 6519 */
6520 6520 static int
6521 6521 fcp_send_scsi(struct fcp_lun *plun, uchar_t opcode, int alloc_len,
6522 6522 int lcount, int tcount, int cause, uint32_t rscn_count)
6523 6523 {
6524 6524 int nodma;
6525 6525 struct fcp_ipkt *icmd;
6526 6526 struct fcp_tgt *ptgt;
6527 6527 struct fcp_port *pptr;
6528 6528 fc_frame_hdr_t *hp;
6529 6529 fc_packet_t *fpkt;
6530 6530 struct fcp_cmd fcp_cmd;
6531 6531 struct fcp_cmd *fcmd;
6532 6532 union scsi_cdb *scsi_cdb;
6533 6533
6534 6534 ASSERT(plun != NULL);
6535 6535
6536 6536 ptgt = plun->lun_tgt;
6537 6537 ASSERT(ptgt != NULL);
6538 6538
6539 6539 pptr = ptgt->tgt_port;
6540 6540 ASSERT(pptr != NULL);
6541 6541
6542 6542 FCP_TRACE(fcp_logq, pptr->port_instbuf,
6543 6543 fcp_trace, FCP_BUF_LEVEL_5, 0,
6544 6544 "fcp_send_scsi: d_id=0x%x opcode=0x%x", ptgt->tgt_d_id, opcode);
6545 6545
6546 6546 nodma = (pptr->port_fcp_dma == FC_NO_DVMA_SPACE) ? 1 : 0;
6547 6547 icmd = fcp_icmd_alloc(pptr, ptgt, sizeof (struct fcp_cmd),
6548 6548 FCP_MAX_RSP_IU_SIZE, alloc_len, nodma, lcount, tcount, cause,
6549 6549 rscn_count);
6550 6550
6551 6551 if (icmd == NULL) {
6552 6552 return (DDI_FAILURE);
6553 6553 }
6554 6554
6555 6555 fpkt = icmd->ipkt_fpkt;
6556 6556 fpkt->pkt_tran_flags = FC_TRAN_CLASS3 | FC_TRAN_INTR;
6557 6557 icmd->ipkt_retries = 0;
6558 6558 icmd->ipkt_opcode = opcode;
6559 6559 icmd->ipkt_lun = plun;
6560 6560
6561 6561 if (nodma) {
6562 6562 fcmd = (struct fcp_cmd *)fpkt->pkt_cmd;
6563 6563 } else {
6564 6564 fcmd = &fcp_cmd;
6565 6565 }
6566 6566 bzero(fcmd, sizeof (struct fcp_cmd));
6567 6567
6568 6568 fpkt->pkt_timeout = FCP_SCSI_CMD_TIMEOUT;
6569 6569
6570 6570 hp = &fpkt->pkt_cmd_fhdr;
6571 6571
6572 6572 hp->s_id = pptr->port_id;
6573 6573 hp->d_id = ptgt->tgt_d_id;
6574 6574 hp->r_ctl = R_CTL_COMMAND;
6575 6575 hp->type = FC_TYPE_SCSI_FCP;
6576 6576 hp->f_ctl = F_CTL_SEQ_INITIATIVE | F_CTL_FIRST_SEQ;
6577 6577 hp->rsvd = 0;
6578 6578 hp->seq_id = 0;
6579 6579 hp->seq_cnt = 0;
6580 6580 hp->ox_id = 0xffff;
6581 6581 hp->rx_id = 0xffff;
6582 6582 hp->ro = 0;
6583 6583
6584 6584 bcopy(&(plun->lun_addr), &(fcmd->fcp_ent_addr), FCP_LUN_SIZE);
6585 6585
6586 6586 /*
6587 6587 * Request SCSI target for expedited processing
6588 6588 */
6589 6589
6590 6590 /*
6591 6591 * Set up for untagged queuing because we do not
6592 6592 * know if the fibre device supports queuing.
6593 6593 */
6594 6594 fcmd->fcp_cntl.cntl_reserved_0 = 0;
6595 6595 fcmd->fcp_cntl.cntl_reserved_1 = 0;
6596 6596 fcmd->fcp_cntl.cntl_reserved_2 = 0;
6597 6597 fcmd->fcp_cntl.cntl_reserved_3 = 0;
6598 6598 fcmd->fcp_cntl.cntl_reserved_4 = 0;
6599 6599 fcmd->fcp_cntl.cntl_qtype = FCP_QTYPE_UNTAGGED;
6600 6600 scsi_cdb = (union scsi_cdb *)fcmd->fcp_cdb;
6601 6601
6602 6602 switch (opcode) {
6603 6603 case SCMD_INQUIRY_PAGE83:
6604 6604 /*
6605 6605 * Prepare to get the Inquiry VPD page 83 information
6606 6606 */
6607 6607 fcmd->fcp_cntl.cntl_read_data = 1;
6608 6608 fcmd->fcp_cntl.cntl_write_data = 0;
6609 6609 fcmd->fcp_data_len = alloc_len;
6610 6610
6611 6611 fpkt->pkt_tran_type = FC_PKT_FCP_READ;
6612 6612 fpkt->pkt_comp = fcp_scsi_callback;
6613 6613
6614 6614 scsi_cdb->scc_cmd = SCMD_INQUIRY;
6615 6615 scsi_cdb->g0_addr2 = 0x01;
6616 6616 scsi_cdb->g0_addr1 = 0x83;
6617 6617 scsi_cdb->g0_count0 = (uchar_t)alloc_len;
6618 6618 break;
6619 6619
6620 6620 case SCMD_INQUIRY:
6621 6621 fcmd->fcp_cntl.cntl_read_data = 1;
6622 6622 fcmd->fcp_cntl.cntl_write_data = 0;
6623 6623 fcmd->fcp_data_len = alloc_len;
6624 6624
6625 6625 fpkt->pkt_tran_type = FC_PKT_FCP_READ;
6626 6626 fpkt->pkt_comp = fcp_scsi_callback;
6627 6627
6628 6628 scsi_cdb->scc_cmd = SCMD_INQUIRY;
6629 6629 scsi_cdb->g0_count0 = SUN_INQSIZE;
6630 6630 break;
6631 6631
6632 6632 case SCMD_REPORT_LUN: {
6633 6633 fc_portid_t d_id;
6634 6634 opaque_t fca_dev;
6635 6635
6636 6636 ASSERT(alloc_len >= 16);
6637 6637
6638 6638 d_id.priv_lilp_posit = 0;
6639 6639 d_id.port_id = ptgt->tgt_d_id;
6640 6640
6641 6641 fca_dev = fc_ulp_get_fca_device(pptr->port_fp_handle, d_id);
6642 6642
6643 6643 mutex_enter(&ptgt->tgt_mutex);
6644 6644 ptgt->tgt_fca_dev = fca_dev;
6645 6645 mutex_exit(&ptgt->tgt_mutex);
6646 6646
6647 6647 fcmd->fcp_cntl.cntl_read_data = 1;
6648 6648 fcmd->fcp_cntl.cntl_write_data = 0;
6649 6649 fcmd->fcp_data_len = alloc_len;
6650 6650
6651 6651 fpkt->pkt_tran_type = FC_PKT_FCP_READ;
6652 6652 fpkt->pkt_comp = fcp_scsi_callback;
6653 6653
6654 6654 scsi_cdb->scc_cmd = SCMD_REPORT_LUN;
6655 6655 scsi_cdb->scc5_count0 = alloc_len & 0xff;
6656 6656 scsi_cdb->scc5_count1 = (alloc_len >> 8) & 0xff;
6657 6657 scsi_cdb->scc5_count2 = (alloc_len >> 16) & 0xff;
6658 6658 scsi_cdb->scc5_count3 = (alloc_len >> 24) & 0xff;
6659 6659 break;
6660 6660 }
6661 6661
6662 6662 default:
6663 6663 fcp_log(CE_WARN, pptr->port_dip,
6664 6664 "!fcp_send_scsi Invalid opcode");
6665 6665 break;
6666 6666 }
6667 6667
6668 6668 if (!nodma) {
6669 6669 FCP_CP_OUT((uint8_t *)fcmd, fpkt->pkt_cmd,
6670 6670 fpkt->pkt_cmd_acc, sizeof (struct fcp_cmd));
6671 6671 }
6672 6672
6673 6673 mutex_enter(&pptr->port_mutex);
6674 6674 if (!FCP_TGT_STATE_CHANGED(ptgt, icmd)) {
6675 6675
6676 6676 mutex_exit(&pptr->port_mutex);
6677 6677 if (fcp_transport(pptr->port_fp_handle, fpkt, 1) !=
6678 6678 FC_SUCCESS) {
6679 6679 fcp_icmd_free(pptr, icmd);
6680 6680 return (DDI_FAILURE);
6681 6681 }
6682 6682 return (DDI_SUCCESS);
6683 6683 } else {
6684 6684 FCP_TRACE(fcp_logq, pptr->port_instbuf,
6685 6685 fcp_trace, FCP_BUF_LEVEL_2, 0,
6686 6686 "fcp_send_scsi,1: state change occured"
6687 6687 " for D_ID=0x%x", ptgt->tgt_d_id);
6688 6688 mutex_exit(&pptr->port_mutex);
6689 6689 fcp_icmd_free(pptr, icmd);
6690 6690 return (DDI_FAILURE);
6691 6691 }
6692 6692 }
6693 6693
6694 6694
6695 6695 /*
6696 6696 * called by fcp_scsi_callback to check to handle the case where
6697 6697 * REPORT_LUN returns ILLEGAL REQUEST or a UNIT ATTENTION
6698 6698 */
6699 6699 static int
6700 6700 fcp_check_reportlun(struct fcp_rsp *rsp, fc_packet_t *fpkt)
6701 6701 {
6702 6702 uchar_t rqlen;
6703 6703 int rval = DDI_FAILURE;
6704 6704 struct scsi_extended_sense sense_info, *sense;
6705 6705 struct fcp_ipkt *icmd = (struct fcp_ipkt *)
6706 6706 fpkt->pkt_ulp_private;
6707 6707 struct fcp_tgt *ptgt = icmd->ipkt_tgt;
6708 6708 struct fcp_port *pptr = ptgt->tgt_port;
6709 6709
6710 6710 ASSERT(icmd->ipkt_opcode == SCMD_REPORT_LUN);
6711 6711
6712 6712 if (rsp->fcp_u.fcp_status.scsi_status == STATUS_RESERVATION_CONFLICT) {
6713 6713 /*
6714 6714 * SCSI-II Reserve Release support. Some older FC drives return
6715 6715 * Reservation conflict for Report Luns command.
6716 6716 */
6717 6717 if (icmd->ipkt_nodma) {
6718 6718 rsp->fcp_u.fcp_status.rsp_len_set = 0;
6719 6719 rsp->fcp_u.fcp_status.sense_len_set = 0;
6720 6720 rsp->fcp_u.fcp_status.scsi_status = STATUS_GOOD;
6721 6721 } else {
6722 6722 fcp_rsp_t new_resp;
6723 6723
6724 6724 FCP_CP_IN(fpkt->pkt_resp, &new_resp,
6725 6725 fpkt->pkt_resp_acc, sizeof (new_resp));
6726 6726
6727 6727 new_resp.fcp_u.fcp_status.rsp_len_set = 0;
6728 6728 new_resp.fcp_u.fcp_status.sense_len_set = 0;
6729 6729 new_resp.fcp_u.fcp_status.scsi_status = STATUS_GOOD;
6730 6730
6731 6731 FCP_CP_OUT(&new_resp, fpkt->pkt_resp,
6732 6732 fpkt->pkt_resp_acc, sizeof (new_resp));
6733 6733 }
6734 6734
6735 6735 FCP_CP_OUT(fcp_dummy_lun, fpkt->pkt_data,
6736 6736 fpkt->pkt_data_acc, sizeof (fcp_dummy_lun));
6737 6737
6738 6738 return (DDI_SUCCESS);
6739 6739 }
6740 6740
6741 6741 sense = &sense_info;
6742 6742 if (!rsp->fcp_u.fcp_status.sense_len_set) {
6743 6743 /* no need to continue if sense length is not set */
6744 6744 return (rval);
6745 6745 }
6746 6746
6747 6747 /* casting 64-bit integer to 8-bit */
6748 6748 rqlen = (uchar_t)min(rsp->fcp_sense_len,
6749 6749 sizeof (struct scsi_extended_sense));
6750 6750
6751 6751 if (rqlen < 14) {
6752 6752 /* no need to continue if request length isn't long enough */
6753 6753 return (rval);
6754 6754 }
6755 6755
6756 6756 if (icmd->ipkt_nodma) {
6757 6757 /*
6758 6758 * We can safely use fcp_response_len here since the
6759 6759 * only path that calls fcp_check_reportlun,
6760 6760 * fcp_scsi_callback, has already called
6761 6761 * fcp_validate_fcp_response.
6762 6762 */
6763 6763 sense = (struct scsi_extended_sense *)(fpkt->pkt_resp +
6764 6764 sizeof (struct fcp_rsp) + rsp->fcp_response_len);
6765 6765 } else {
6766 6766 FCP_CP_IN(fpkt->pkt_resp + sizeof (struct fcp_rsp) +
6767 6767 rsp->fcp_response_len, sense, fpkt->pkt_resp_acc,
6768 6768 sizeof (struct scsi_extended_sense));
6769 6769 }
6770 6770
6771 6771 if (!FCP_SENSE_NO_LUN(sense)) {
6772 6772 mutex_enter(&ptgt->tgt_mutex);
6773 6773 /* clear the flag if any */
6774 6774 ptgt->tgt_state &= ~FCP_TGT_ILLREQ;
6775 6775 mutex_exit(&ptgt->tgt_mutex);
6776 6776 }
6777 6777
6778 6778 if ((sense->es_key == KEY_ILLEGAL_REQUEST) &&
6779 6779 (sense->es_add_code == 0x20)) {
6780 6780 if (icmd->ipkt_nodma) {
6781 6781 rsp->fcp_u.fcp_status.rsp_len_set = 0;
6782 6782 rsp->fcp_u.fcp_status.sense_len_set = 0;
6783 6783 rsp->fcp_u.fcp_status.scsi_status = STATUS_GOOD;
6784 6784 } else {
6785 6785 fcp_rsp_t new_resp;
6786 6786
6787 6787 FCP_CP_IN(fpkt->pkt_resp, &new_resp,
6788 6788 fpkt->pkt_resp_acc, sizeof (new_resp));
6789 6789
6790 6790 new_resp.fcp_u.fcp_status.rsp_len_set = 0;
6791 6791 new_resp.fcp_u.fcp_status.sense_len_set = 0;
6792 6792 new_resp.fcp_u.fcp_status.scsi_status = STATUS_GOOD;
6793 6793
6794 6794 FCP_CP_OUT(&new_resp, fpkt->pkt_resp,
6795 6795 fpkt->pkt_resp_acc, sizeof (new_resp));
6796 6796 }
6797 6797
6798 6798 FCP_CP_OUT(fcp_dummy_lun, fpkt->pkt_data,
6799 6799 fpkt->pkt_data_acc, sizeof (fcp_dummy_lun));
6800 6800
6801 6801 return (DDI_SUCCESS);
6802 6802 }
6803 6803
6804 6804 /*
6805 6805 * This is for the STK library which returns a check condition,
6806 6806 * to indicate device is not ready, manual assistance needed.
6807 6807 * This is to a report lun command when the door is open.
6808 6808 */
6809 6809 if ((sense->es_key == KEY_NOT_READY) && (sense->es_add_code == 0x04)) {
6810 6810 if (icmd->ipkt_nodma) {
6811 6811 rsp->fcp_u.fcp_status.rsp_len_set = 0;
6812 6812 rsp->fcp_u.fcp_status.sense_len_set = 0;
6813 6813 rsp->fcp_u.fcp_status.scsi_status = STATUS_GOOD;
6814 6814 } else {
6815 6815 fcp_rsp_t new_resp;
6816 6816
6817 6817 FCP_CP_IN(fpkt->pkt_resp, &new_resp,
6818 6818 fpkt->pkt_resp_acc, sizeof (new_resp));
6819 6819
6820 6820 new_resp.fcp_u.fcp_status.rsp_len_set = 0;
6821 6821 new_resp.fcp_u.fcp_status.sense_len_set = 0;
6822 6822 new_resp.fcp_u.fcp_status.scsi_status = STATUS_GOOD;
6823 6823
6824 6824 FCP_CP_OUT(&new_resp, fpkt->pkt_resp,
6825 6825 fpkt->pkt_resp_acc, sizeof (new_resp));
6826 6826 }
6827 6827
6828 6828 FCP_CP_OUT(fcp_dummy_lun, fpkt->pkt_data,
6829 6829 fpkt->pkt_data_acc, sizeof (fcp_dummy_lun));
6830 6830
6831 6831 return (DDI_SUCCESS);
6832 6832 }
6833 6833
6834 6834 if ((FCP_SENSE_REPORTLUN_CHANGED(sense)) ||
6835 6835 (FCP_SENSE_NO_LUN(sense))) {
6836 6836 mutex_enter(&ptgt->tgt_mutex);
6837 6837 if ((FCP_SENSE_NO_LUN(sense)) &&
6838 6838 (ptgt->tgt_state & FCP_TGT_ILLREQ)) {
6839 6839 ptgt->tgt_state &= ~FCP_TGT_ILLREQ;
6840 6840 mutex_exit(&ptgt->tgt_mutex);
6841 6841 /*
6842 6842 * reconfig was triggred by ILLEGAL REQUEST but
6843 6843 * got ILLEGAL REQUEST again
6844 6844 */
6845 6845 FCP_TRACE(fcp_logq, pptr->port_instbuf,
6846 6846 fcp_trace, FCP_BUF_LEVEL_3, 0,
6847 6847 "!FCP: Unable to obtain Report Lun data"
6848 6848 " target=%x", ptgt->tgt_d_id);
6849 6849 } else {
6850 6850 if (ptgt->tgt_tid == NULL) {
6851 6851 timeout_id_t tid;
6852 6852 /*
6853 6853 * REPORT LUN data has changed. Kick off
6854 6854 * rediscovery
6855 6855 */
6856 6856 tid = timeout(fcp_reconfigure_luns,
6857 6857 (caddr_t)ptgt, (clock_t)drv_usectohz(1));
6858 6858
6859 6859 ptgt->tgt_tid = tid;
6860 6860 ptgt->tgt_state |= FCP_TGT_BUSY;
6861 6861 }
6862 6862 if (FCP_SENSE_NO_LUN(sense)) {
6863 6863 ptgt->tgt_state |= FCP_TGT_ILLREQ;
6864 6864 }
6865 6865 mutex_exit(&ptgt->tgt_mutex);
6866 6866 if (FCP_SENSE_REPORTLUN_CHANGED(sense)) {
6867 6867 FCP_TRACE(fcp_logq, pptr->port_instbuf,
6868 6868 fcp_trace, FCP_BUF_LEVEL_3, 0,
6869 6869 "!FCP:Report Lun Has Changed"
6870 6870 " target=%x", ptgt->tgt_d_id);
6871 6871 } else if (FCP_SENSE_NO_LUN(sense)) {
6872 6872 FCP_TRACE(fcp_logq, pptr->port_instbuf,
6873 6873 fcp_trace, FCP_BUF_LEVEL_3, 0,
6874 6874 "!FCP:LU Not Supported"
6875 6875 " target=%x", ptgt->tgt_d_id);
6876 6876 }
6877 6877 }
6878 6878 rval = DDI_SUCCESS;
6879 6879 }
6880 6880
6881 6881 FCP_TRACE(fcp_logq, pptr->port_instbuf,
6882 6882 fcp_trace, FCP_BUF_LEVEL_5, 0,
6883 6883 "D_ID=%x, sense=%x, status=%x",
6884 6884 fpkt->pkt_cmd_fhdr.d_id, sense->es_key,
6885 6885 rsp->fcp_u.fcp_status.scsi_status);
6886 6886
6887 6887 return (rval);
6888 6888 }
6889 6889
6890 6890 /*
6891 6891 * Function: fcp_scsi_callback
6892 6892 *
6893 6893 * Description: This is the callback routine set by fcp_send_scsi() after
6894 6894 * it calls fcp_icmd_alloc(). The SCSI command completed here
6895 6895 * and autogenerated by FCP are: REPORT_LUN, INQUIRY and
6896 6896 * INQUIRY_PAGE83.
6897 6897 *
6898 6898 * Argument: *fpkt FC packet used to convey the command
6899 6899 *
6900 6900 * Return Value: None
6901 6901 */
6902 6902 static void
6903 6903 fcp_scsi_callback(fc_packet_t *fpkt)
6904 6904 {
6905 6905 struct fcp_ipkt *icmd = (struct fcp_ipkt *)
6906 6906 fpkt->pkt_ulp_private;
6907 6907 struct fcp_rsp_info fcp_rsp_err, *bep;
6908 6908 struct fcp_port *pptr;
6909 6909 struct fcp_tgt *ptgt;
6910 6910 struct fcp_lun *plun;
6911 6911 struct fcp_rsp response, *rsp;
6912 6912
6913 6913 ptgt = icmd->ipkt_tgt;
6914 6914 pptr = ptgt->tgt_port;
6915 6915 plun = icmd->ipkt_lun;
6916 6916
6917 6917 if (icmd->ipkt_nodma) {
6918 6918 rsp = (struct fcp_rsp *)fpkt->pkt_resp;
6919 6919 } else {
6920 6920 rsp = &response;
6921 6921 FCP_CP_IN(fpkt->pkt_resp, rsp, fpkt->pkt_resp_acc,
6922 6922 sizeof (struct fcp_rsp));
6923 6923 }
6924 6924
6925 6925 FCP_TRACE(fcp_logq, pptr->port_instbuf,
6926 6926 fcp_trace, FCP_BUF_LEVEL_2, 0,
6927 6927 "SCSI callback state=0x%x for %x, op_code=0x%x, "
6928 6928 "status=%x, lun num=%x",
6929 6929 fpkt->pkt_state, ptgt->tgt_d_id, icmd->ipkt_opcode,
6930 6930 rsp->fcp_u.fcp_status.scsi_status, plun->lun_num);
6931 6931
6932 6932 /*
6933 6933 * Pre-init LUN GUID with NWWN if it is not a device that
6934 6934 * supports multiple luns and we know it's not page83
6935 6935 * compliant. Although using a NWWN is not lun unique,
6936 6936 * we will be fine since there is only one lun behind the taget
6937 6937 * in this case.
6938 6938 */
6939 6939 if ((plun->lun_guid_size == 0) &&
6940 6940 (icmd->ipkt_opcode == SCMD_INQUIRY_PAGE83) &&
6941 6941 (fcp_symmetric_device_probe(plun) == 0)) {
6942 6942
6943 6943 char ascii_wwn[FC_WWN_SIZE*2+1];
6944 6944 fcp_wwn_to_ascii(&ptgt->tgt_node_wwn.raw_wwn[0], ascii_wwn);
6945 6945 (void) fcp_copy_guid_2_lun_block(plun, ascii_wwn);
6946 6946 }
6947 6947
6948 6948 /*
6949 6949 * Some old FC tapes and FC <-> SCSI bridge devices return overrun
6950 6950 * when thay have more data than what is asked in CDB. An overrun
6951 6951 * is really when FCP_DL is smaller than the data length in CDB.
6952 6952 * In the case here we know that REPORT LUN command we formed within
6953 6953 * this binary has correct FCP_DL. So this OVERRUN is due to bad device
6954 6954 * behavior. In reality this is FC_SUCCESS.
6955 6955 */
6956 6956 if ((fpkt->pkt_state != FC_PKT_SUCCESS) &&
6957 6957 (fpkt->pkt_reason == FC_REASON_OVERRUN) &&
6958 6958 (icmd->ipkt_opcode == SCMD_REPORT_LUN)) {
6959 6959 fpkt->pkt_state = FC_PKT_SUCCESS;
6960 6960 }
6961 6961
6962 6962 if (fpkt->pkt_state != FC_PKT_SUCCESS) {
6963 6963 FCP_TRACE(fcp_logq, pptr->port_instbuf,
6964 6964 fcp_trace, FCP_BUF_LEVEL_2, 0,
6965 6965 "icmd failed with state=0x%x for %x", fpkt->pkt_state,
6966 6966 ptgt->tgt_d_id);
6967 6967
6968 6968 if (fpkt->pkt_reason == FC_REASON_CRC_ERROR) {
6969 6969 /*
6970 6970 * Inquiry VPD page command on A5K SES devices would
6971 6971 * result in data CRC errors.
6972 6972 */
6973 6973 if (icmd->ipkt_opcode == SCMD_INQUIRY_PAGE83) {
6974 6974 (void) fcp_handle_page83(fpkt, icmd, 1);
6975 6975 return;
6976 6976 }
6977 6977 }
6978 6978 if (fpkt->pkt_state == FC_PKT_TIMEOUT ||
6979 6979 FCP_MUST_RETRY(fpkt)) {
6980 6980 fpkt->pkt_timeout += FCP_TIMEOUT_DELTA;
6981 6981 fcp_retry_scsi_cmd(fpkt);
6982 6982 return;
6983 6983 }
6984 6984
6985 6985 FCP_TGT_TRACE(ptgt, icmd->ipkt_change_cnt,
6986 6986 FCP_TGT_TRACE_20);
6987 6987
6988 6988 mutex_enter(&pptr->port_mutex);
6989 6989 mutex_enter(&ptgt->tgt_mutex);
6990 6990 if (!FCP_STATE_CHANGED(pptr, ptgt, icmd)) {
6991 6991 mutex_exit(&ptgt->tgt_mutex);
6992 6992 mutex_exit(&pptr->port_mutex);
6993 6993 fcp_print_error(fpkt);
6994 6994 } else {
6995 6995 FCP_TRACE(fcp_logq, pptr->port_instbuf,
6996 6996 fcp_trace, FCP_BUF_LEVEL_2, 0,
6997 6997 "fcp_scsi_callback,1: state change occured"
6998 6998 " for D_ID=0x%x", ptgt->tgt_d_id);
6999 6999 mutex_exit(&ptgt->tgt_mutex);
7000 7000 mutex_exit(&pptr->port_mutex);
7001 7001 }
7002 7002 (void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
7003 7003 icmd->ipkt_change_cnt, icmd->ipkt_cause);
7004 7004 fcp_icmd_free(pptr, icmd);
7005 7005 return;
7006 7006 }
7007 7007
7008 7008 FCP_TGT_TRACE(ptgt, icmd->ipkt_change_cnt, FCP_TGT_TRACE_21);
7009 7009
7010 7010 mutex_enter(&pptr->port_mutex);
7011 7011 mutex_enter(&ptgt->tgt_mutex);
7012 7012 if (FCP_STATE_CHANGED(pptr, ptgt, icmd)) {
7013 7013 FCP_TRACE(fcp_logq, pptr->port_instbuf,
7014 7014 fcp_trace, FCP_BUF_LEVEL_2, 0,
7015 7015 "fcp_scsi_callback,2: state change occured"
7016 7016 " for D_ID=0x%x", ptgt->tgt_d_id);
7017 7017 mutex_exit(&ptgt->tgt_mutex);
7018 7018 mutex_exit(&pptr->port_mutex);
7019 7019 (void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
7020 7020 icmd->ipkt_change_cnt, icmd->ipkt_cause);
7021 7021 fcp_icmd_free(pptr, icmd);
7022 7022 return;
7023 7023 }
7024 7024 ASSERT((ptgt->tgt_state & FCP_TGT_MARK) == 0);
7025 7025
7026 7026 mutex_exit(&ptgt->tgt_mutex);
7027 7027 mutex_exit(&pptr->port_mutex);
7028 7028
7029 7029 if (icmd->ipkt_nodma) {
7030 7030 bep = (struct fcp_rsp_info *)(fpkt->pkt_resp +
7031 7031 sizeof (struct fcp_rsp));
7032 7032 } else {
7033 7033 bep = &fcp_rsp_err;
7034 7034 FCP_CP_IN(fpkt->pkt_resp + sizeof (struct fcp_rsp), bep,
7035 7035 fpkt->pkt_resp_acc, sizeof (struct fcp_rsp_info));
7036 7036 }
7037 7037
7038 7038 if (fcp_validate_fcp_response(rsp, pptr) != FC_SUCCESS) {
7039 7039 fcp_retry_scsi_cmd(fpkt);
7040 7040 return;
7041 7041 }
7042 7042
7043 7043 if (rsp->fcp_u.fcp_status.rsp_len_set && bep->rsp_code !=
7044 7044 FCP_NO_FAILURE) {
7045 7045 FCP_TRACE(fcp_logq, pptr->port_instbuf,
7046 7046 fcp_trace, FCP_BUF_LEVEL_2, 0,
7047 7047 "rsp_code=0x%x, rsp_len_set=0x%x",
7048 7048 bep->rsp_code, rsp->fcp_u.fcp_status.rsp_len_set);
7049 7049 fcp_retry_scsi_cmd(fpkt);
7050 7050 return;
7051 7051 }
7052 7052
7053 7053 if (rsp->fcp_u.fcp_status.scsi_status == STATUS_QFULL ||
7054 7054 rsp->fcp_u.fcp_status.scsi_status == STATUS_BUSY) {
7055 7055 fcp_queue_ipkt(pptr, fpkt);
7056 7056 return;
7057 7057 }
7058 7058
7059 7059 /*
7060 7060 * Devices that do not support INQUIRY_PAGE83, return check condition
7061 7061 * with illegal request as per SCSI spec.
7062 7062 * Crossbridge is one such device and Daktari's SES node is another.
7063 7063 * We want to ideally enumerate these devices as a non-mpxio devices.
7064 7064 * SES nodes (Daktari only currently) are an exception to this.
7065 7065 */
7066 7066 if ((icmd->ipkt_opcode == SCMD_INQUIRY_PAGE83) &&
7067 7067 (rsp->fcp_u.fcp_status.scsi_status & STATUS_CHECK)) {
7068 7068
7069 7069 FCP_TRACE(fcp_logq, pptr->port_instbuf,
7070 7070 fcp_trace, FCP_BUF_LEVEL_3, 0,
7071 7071 "INQUIRY_PAGE83 for d_id %x (dtype:0x%x) failed with "
7072 7072 "check condition. May enumerate as non-mpxio device",
7073 7073 ptgt->tgt_d_id, plun->lun_type);
7074 7074
7075 7075 /*
7076 7076 * If we let Daktari's SES be enumerated as a non-mpxio
7077 7077 * device, there will be a discrepency in that the other
7078 7078 * internal FC disks will get enumerated as mpxio devices.
7079 7079 * Applications like luxadm expect this to be consistent.
7080 7080 *
7081 7081 * So, we put in a hack here to check if this is an SES device
7082 7082 * and handle it here.
7083 7083 */
7084 7084 if (plun->lun_type == DTYPE_ESI) {
7085 7085 /*
7086 7086 * Since, pkt_state is actually FC_PKT_SUCCESS
7087 7087 * at this stage, we fake a failure here so that
7088 7088 * fcp_handle_page83 will create a device path using
7089 7089 * the WWN instead of the GUID which is not there anyway
7090 7090 */
7091 7091 fpkt->pkt_state = FC_PKT_LOCAL_RJT;
7092 7092 (void) fcp_handle_page83(fpkt, icmd, 1);
7093 7093 return;
7094 7094 }
7095 7095
7096 7096 mutex_enter(&ptgt->tgt_mutex);
7097 7097 plun->lun_state &= ~(FCP_LUN_OFFLINE |
7098 7098 FCP_LUN_MARK | FCP_LUN_BUSY);
7099 7099 mutex_exit(&ptgt->tgt_mutex);
7100 7100
7101 7101 (void) fcp_call_finish_init(pptr, ptgt,
7102 7102 icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
7103 7103 icmd->ipkt_cause);
7104 7104 fcp_icmd_free(pptr, icmd);
7105 7105 return;
7106 7106 }
7107 7107
7108 7108 if (rsp->fcp_u.fcp_status.scsi_status != STATUS_GOOD) {
7109 7109 int rval = DDI_FAILURE;
7110 7110
7111 7111 /*
7112 7112 * handle cases where report lun isn't supported
7113 7113 * by faking up our own REPORT_LUN response or
7114 7114 * UNIT ATTENTION
7115 7115 */
7116 7116 if (icmd->ipkt_opcode == SCMD_REPORT_LUN) {
7117 7117 rval = fcp_check_reportlun(rsp, fpkt);
7118 7118
7119 7119 /*
7120 7120 * fcp_check_reportlun might have modified the
7121 7121 * FCP response. Copy it in again to get an updated
7122 7122 * FCP response
7123 7123 */
7124 7124 if (rval == DDI_SUCCESS && icmd->ipkt_nodma == 0) {
7125 7125 rsp = &response;
7126 7126
7127 7127 FCP_CP_IN(fpkt->pkt_resp, rsp,
7128 7128 fpkt->pkt_resp_acc,
7129 7129 sizeof (struct fcp_rsp));
7130 7130 }
7131 7131 }
7132 7132
7133 7133 if (rsp->fcp_u.fcp_status.scsi_status != STATUS_GOOD) {
7134 7134 if (rval == DDI_SUCCESS) {
7135 7135 (void) fcp_call_finish_init(pptr, ptgt,
7136 7136 icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
7137 7137 icmd->ipkt_cause);
7138 7138 fcp_icmd_free(pptr, icmd);
7139 7139 } else {
7140 7140 fcp_retry_scsi_cmd(fpkt);
7141 7141 }
7142 7142
7143 7143 return;
7144 7144 }
7145 7145 } else {
7146 7146 if (icmd->ipkt_opcode == SCMD_REPORT_LUN) {
7147 7147 mutex_enter(&ptgt->tgt_mutex);
7148 7148 ptgt->tgt_state &= ~FCP_TGT_ILLREQ;
7149 7149 mutex_exit(&ptgt->tgt_mutex);
7150 7150 }
7151 7151 }
7152 7152
7153 7153 ASSERT(rsp->fcp_u.fcp_status.scsi_status == STATUS_GOOD);
7154 7154 if (!(pptr->port_state & FCP_STATE_FCA_IS_NODMA)) {
7155 7155 (void) ddi_dma_sync(fpkt->pkt_data_dma, 0, 0,
7156 7156 DDI_DMA_SYNC_FORCPU);
7157 7157 }
7158 7158
7159 7159 switch (icmd->ipkt_opcode) {
7160 7160 case SCMD_INQUIRY:
7161 7161 FCP_LUN_TRACE(plun, FCP_LUN_TRACE_1);
7162 7162 fcp_handle_inquiry(fpkt, icmd);
7163 7163 break;
7164 7164
7165 7165 case SCMD_REPORT_LUN:
7166 7166 FCP_TGT_TRACE(ptgt, icmd->ipkt_change_cnt,
7167 7167 FCP_TGT_TRACE_22);
7168 7168 fcp_handle_reportlun(fpkt, icmd);
7169 7169 break;
7170 7170
7171 7171 case SCMD_INQUIRY_PAGE83:
7172 7172 FCP_LUN_TRACE(plun, FCP_LUN_TRACE_2);
7173 7173 (void) fcp_handle_page83(fpkt, icmd, 0);
7174 7174 break;
7175 7175
7176 7176 default:
7177 7177 fcp_log(CE_WARN, NULL, "!Invalid SCSI opcode");
7178 7178 (void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
7179 7179 icmd->ipkt_change_cnt, icmd->ipkt_cause);
7180 7180 fcp_icmd_free(pptr, icmd);
7181 7181 break;
7182 7182 }
7183 7183 }
7184 7184
7185 7185
7186 7186 static void
7187 7187 fcp_retry_scsi_cmd(fc_packet_t *fpkt)
7188 7188 {
7189 7189 struct fcp_ipkt *icmd = (struct fcp_ipkt *)
7190 7190 fpkt->pkt_ulp_private;
7191 7191 struct fcp_tgt *ptgt = icmd->ipkt_tgt;
7192 7192 struct fcp_port *pptr = ptgt->tgt_port;
7193 7193
7194 7194 if (icmd->ipkt_retries < FCP_MAX_RETRIES &&
7195 7195 fcp_is_retryable(icmd)) {
7196 7196 mutex_enter(&pptr->port_mutex);
7197 7197 if (!FCP_TGT_STATE_CHANGED(ptgt, icmd)) {
7198 7198 mutex_exit(&pptr->port_mutex);
7199 7199 FCP_TRACE(fcp_logq, pptr->port_instbuf,
7200 7200 fcp_trace, FCP_BUF_LEVEL_3, 0,
7201 7201 "Retrying %s to %x; state=%x, reason=%x",
7202 7202 (icmd->ipkt_opcode == SCMD_REPORT_LUN) ?
7203 7203 "Report LUN" : "INQUIRY", ptgt->tgt_d_id,
7204 7204 fpkt->pkt_state, fpkt->pkt_reason);
7205 7205
7206 7206 fcp_queue_ipkt(pptr, fpkt);
7207 7207 } else {
7208 7208 FCP_TRACE(fcp_logq, pptr->port_instbuf,
7209 7209 fcp_trace, FCP_BUF_LEVEL_3, 0,
7210 7210 "fcp_retry_scsi_cmd,1: state change occured"
7211 7211 " for D_ID=0x%x", ptgt->tgt_d_id);
7212 7212 mutex_exit(&pptr->port_mutex);
7213 7213 (void) fcp_call_finish_init(pptr, ptgt,
7214 7214 icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
7215 7215 icmd->ipkt_cause);
7216 7216 fcp_icmd_free(pptr, icmd);
7217 7217 }
7218 7218 } else {
7219 7219 fcp_print_error(fpkt);
7220 7220 (void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
7221 7221 icmd->ipkt_change_cnt, icmd->ipkt_cause);
7222 7222 fcp_icmd_free(pptr, icmd);
7223 7223 }
7224 7224 }
7225 7225
7226 7226 /*
7227 7227 * Function: fcp_handle_page83
7228 7228 *
7229 7229 * Description: Treats the response to INQUIRY_PAGE83.
7230 7230 *
7231 7231 * Argument: *fpkt FC packet used to convey the command.
7232 7232 * *icmd Original fcp_ipkt structure.
7233 7233 * ignore_page83_data
7234 7234 * if it's 1, that means it's a special devices's
7235 7235 * page83 response, it should be enumerated under mpxio
7236 7236 *
7237 7237 * Return Value: None
7238 7238 */
7239 7239 static void
7240 7240 fcp_handle_page83(fc_packet_t *fpkt, struct fcp_ipkt *icmd,
7241 7241 int ignore_page83_data)
7242 7242 {
7243 7243 struct fcp_port *pptr;
7244 7244 struct fcp_lun *plun;
7245 7245 struct fcp_tgt *ptgt;
7246 7246 uchar_t dev_id_page[SCMD_MAX_INQUIRY_PAGE83_SIZE];
7247 7247 int fail = 0;
7248 7248 ddi_devid_t devid;
7249 7249 char *guid = NULL;
7250 7250 int ret;
7251 7251
7252 7252 ASSERT(icmd != NULL && fpkt != NULL);
7253 7253
7254 7254 pptr = icmd->ipkt_port;
7255 7255 ptgt = icmd->ipkt_tgt;
7256 7256 plun = icmd->ipkt_lun;
7257 7257
7258 7258 if (fpkt->pkt_state == FC_PKT_SUCCESS) {
7259 7259 FCP_LUN_TRACE(plun, FCP_LUN_TRACE_7);
7260 7260
7261 7261 FCP_CP_IN(fpkt->pkt_data, dev_id_page, fpkt->pkt_data_acc,
7262 7262 SCMD_MAX_INQUIRY_PAGE83_SIZE);
7263 7263
7264 7264 FCP_TRACE(fcp_logq, pptr->port_instbuf,
7265 7265 fcp_trace, FCP_BUF_LEVEL_5, 0,
7266 7266 "fcp_handle_page83: port=%d, tgt D_ID=0x%x, "
7267 7267 "dtype=0x%x, lun num=%x",
7268 7268 pptr->port_instance, ptgt->tgt_d_id,
7269 7269 dev_id_page[0], plun->lun_num);
7270 7270
7271 7271 ret = ddi_devid_scsi_encode(
7272 7272 DEVID_SCSI_ENCODE_VERSION_LATEST,
7273 7273 NULL, /* driver name */
7274 7274 (unsigned char *) &plun->lun_inq, /* standard inquiry */
7275 7275 sizeof (plun->lun_inq), /* size of standard inquiry */
7276 7276 NULL, /* page 80 data */
7277 7277 0, /* page 80 len */
7278 7278 dev_id_page, /* page 83 data */
7279 7279 SCMD_MAX_INQUIRY_PAGE83_SIZE, /* page 83 data len */
7280 7280 &devid);
7281 7281
7282 7282 if (ret == DDI_SUCCESS) {
7283 7283
7284 7284 guid = ddi_devid_to_guid(devid);
7285 7285
7286 7286 if (guid) {
7287 7287 /*
7288 7288 * Check our current guid. If it's non null
7289 7289 * and it has changed, we need to copy it into
7290 7290 * lun_old_guid since we might still need it.
7291 7291 */
7292 7292 if (plun->lun_guid &&
7293 7293 strcmp(guid, plun->lun_guid)) {
7294 7294 unsigned int len;
7295 7295
7296 7296 /*
7297 7297 * If the guid of the LUN changes,
7298 7298 * reconfiguration should be triggered
7299 7299 * to reflect the changes.
7300 7300 * i.e. we should offline the LUN with
7301 7301 * the old guid, and online the LUN with
7302 7302 * the new guid.
7303 7303 */
7304 7304 plun->lun_state |= FCP_LUN_CHANGED;
7305 7305
7306 7306 if (plun->lun_old_guid) {
7307 7307 kmem_free(plun->lun_old_guid,
7308 7308 plun->lun_old_guid_size);
7309 7309 }
7310 7310
7311 7311 len = plun->lun_guid_size;
7312 7312 plun->lun_old_guid_size = len;
7313 7313
7314 7314 plun->lun_old_guid = kmem_zalloc(len,
7315 7315 KM_NOSLEEP);
7316 7316
7317 7317 if (plun->lun_old_guid) {
7318 7318 /*
7319 7319 * The alloc was successful then
7320 7320 * let's do the copy.
7321 7321 */
7322 7322 bcopy(plun->lun_guid,
7323 7323 plun->lun_old_guid, len);
7324 7324 } else {
7325 7325 fail = 1;
7326 7326 plun->lun_old_guid_size = 0;
7327 7327 }
7328 7328 }
7329 7329 if (!fail) {
7330 7330 if (fcp_copy_guid_2_lun_block(
7331 7331 plun, guid)) {
7332 7332 fail = 1;
7333 7333 }
7334 7334 }
7335 7335 ddi_devid_free_guid(guid);
7336 7336
7337 7337 } else {
7338 7338 FCP_TRACE(fcp_logq, pptr->port_instbuf,
7339 7339 fcp_trace, FCP_BUF_LEVEL_2, 0,
7340 7340 "fcp_handle_page83: unable to create "
7341 7341 "GUID");
7342 7342
7343 7343 /* couldn't create good guid from devid */
7344 7344 fail = 1;
7345 7345 }
7346 7346 ddi_devid_free(devid);
7347 7347
7348 7348 } else if (ret == DDI_NOT_WELL_FORMED) {
7349 7349 /* NULL filled data for page 83 */
7350 7350 FCP_TRACE(fcp_logq, pptr->port_instbuf,
7351 7351 fcp_trace, FCP_BUF_LEVEL_2, 0,
7352 7352 "fcp_handle_page83: retry GUID");
7353 7353
7354 7354 icmd->ipkt_retries = 0;
7355 7355 fcp_retry_scsi_cmd(fpkt);
7356 7356 return;
7357 7357 } else {
7358 7358 FCP_TRACE(fcp_logq, pptr->port_instbuf,
7359 7359 fcp_trace, FCP_BUF_LEVEL_2, 0,
7360 7360 "fcp_handle_page83: bad ddi_devid_scsi_encode %x",
7361 7361 ret);
7362 7362 /*
7363 7363 * Since the page83 validation
7364 7364 * introduced late, we are being
7365 7365 * tolerant to the existing devices
7366 7366 * that already found to be working
7367 7367 * under mpxio, like A5200's SES device,
7368 7368 * its page83 response will not be standard-compliant,
7369 7369 * but we still want it to be enumerated under mpxio.
7370 7370 */
7371 7371 if (fcp_symmetric_device_probe(plun) != 0) {
7372 7372 fail = 1;
7373 7373 }
7374 7374 }
7375 7375
7376 7376 } else {
7377 7377 /* bad packet state */
7378 7378 FCP_LUN_TRACE(plun, FCP_LUN_TRACE_8);
7379 7379
7380 7380 /*
7381 7381 * For some special devices (A5K SES and Daktari's SES devices),
7382 7382 * they should be enumerated under mpxio
7383 7383 * or "luxadm dis" will fail
7384 7384 */
7385 7385 if (ignore_page83_data) {
7386 7386 fail = 0;
7387 7387 } else {
7388 7388 fail = 1;
7389 7389 }
7390 7390 FCP_TRACE(fcp_logq, pptr->port_instbuf,
7391 7391 fcp_trace, FCP_BUF_LEVEL_2, 0,
7392 7392 "!Devid page cmd failed. "
7393 7393 "fpkt_state: %x fpkt_reason: %x",
7394 7394 "ignore_page83: %d",
7395 7395 fpkt->pkt_state, fpkt->pkt_reason,
7396 7396 ignore_page83_data);
7397 7397 }
7398 7398
7399 7399 mutex_enter(&pptr->port_mutex);
7400 7400 mutex_enter(&plun->lun_mutex);
7401 7401 /*
7402 7402 * If lun_cip is not NULL, then we needn't update lun_mpxio to avoid
7403 7403 * mismatch between lun_cip and lun_mpxio.
7404 7404 */
7405 7405 if (plun->lun_cip == NULL) {
7406 7406 /*
7407 7407 * If we don't have a guid for this lun it's because we were
7408 7408 * unable to glean one from the page 83 response. Set the
7409 7409 * control flag to 0 here to make sure that we don't attempt to
7410 7410 * enumerate it under mpxio.
7411 7411 */
7412 7412 if (fail || pptr->port_mpxio == 0) {
7413 7413 plun->lun_mpxio = 0;
7414 7414 } else {
7415 7415 plun->lun_mpxio = 1;
7416 7416 }
7417 7417 }
7418 7418 mutex_exit(&plun->lun_mutex);
7419 7419 mutex_exit(&pptr->port_mutex);
7420 7420
7421 7421 mutex_enter(&ptgt->tgt_mutex);
7422 7422 plun->lun_state &=
7423 7423 ~(FCP_LUN_OFFLINE | FCP_LUN_MARK | FCP_LUN_BUSY);
7424 7424 mutex_exit(&ptgt->tgt_mutex);
7425 7425
7426 7426 (void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
7427 7427 icmd->ipkt_change_cnt, icmd->ipkt_cause);
7428 7428
7429 7429 fcp_icmd_free(pptr, icmd);
7430 7430 }
7431 7431
7432 7432 /*
7433 7433 * Function: fcp_handle_inquiry
7434 7434 *
7435 7435 * Description: Called by fcp_scsi_callback to handle the response to an
7436 7436 * INQUIRY request.
7437 7437 *
7438 7438 * Argument: *fpkt FC packet used to convey the command.
7439 7439 * *icmd Original fcp_ipkt structure.
7440 7440 *
7441 7441 * Return Value: None
7442 7442 */
7443 7443 static void
7444 7444 fcp_handle_inquiry(fc_packet_t *fpkt, struct fcp_ipkt *icmd)
7445 7445 {
7446 7446 struct fcp_port *pptr;
7447 7447 struct fcp_lun *plun;
7448 7448 struct fcp_tgt *ptgt;
7449 7449 uchar_t dtype;
7450 7450 uchar_t pqual;
7451 7451 uint32_t rscn_count = FC_INVALID_RSCN_COUNT;
7452 7452
7453 7453 ASSERT(icmd != NULL && fpkt != NULL);
7454 7454
7455 7455 pptr = icmd->ipkt_port;
7456 7456 ptgt = icmd->ipkt_tgt;
7457 7457 plun = icmd->ipkt_lun;
7458 7458
7459 7459 FCP_CP_IN(fpkt->pkt_data, &plun->lun_inq, fpkt->pkt_data_acc,
7460 7460 sizeof (struct scsi_inquiry));
7461 7461
7462 7462 dtype = plun->lun_inq.inq_dtype & DTYPE_MASK;
7463 7463 pqual = plun->lun_inq.inq_dtype >> 5;
7464 7464
7465 7465 FCP_TRACE(fcp_logq, pptr->port_instbuf,
7466 7466 fcp_trace, FCP_BUF_LEVEL_5, 0,
7467 7467 "fcp_handle_inquiry: port=%d, tgt D_ID=0x%x, lun=0x%x, "
7468 7468 "dtype=0x%x pqual: 0x%x", pptr->port_instance, ptgt->tgt_d_id,
7469 7469 plun->lun_num, dtype, pqual);
7470 7470
7471 7471 if (pqual != 0) {
7472 7472 /*
7473 7473 * Non-zero peripheral qualifier
7474 7474 */
7475 7475 fcp_log(CE_CONT, pptr->port_dip,
7476 7476 "!Target 0x%x lun 0x%x: Nonzero peripheral qualifier: "
7477 7477 "Device type=0x%x Peripheral qual=0x%x\n",
7478 7478 ptgt->tgt_d_id, plun->lun_num, dtype, pqual);
7479 7479
7480 7480 FCP_TRACE(fcp_logq, pptr->port_instbuf,
7481 7481 fcp_trace, FCP_BUF_LEVEL_5, 0,
7482 7482 "!Target 0x%x lun 0x%x: Nonzero peripheral qualifier: "
7483 7483 "Device type=0x%x Peripheral qual=0x%x\n",
7484 7484 ptgt->tgt_d_id, plun->lun_num, dtype, pqual);
7485 7485
7486 7486 FCP_LUN_TRACE(plun, FCP_LUN_TRACE_3);
7487 7487
7488 7488 (void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
7489 7489 icmd->ipkt_change_cnt, icmd->ipkt_cause);
7490 7490 fcp_icmd_free(pptr, icmd);
7491 7491 return;
7492 7492 }
7493 7493
7494 7494 /*
7495 7495 * If the device is already initialized, check the dtype
7496 7496 * for a change. If it has changed then update the flags
7497 7497 * so the create_luns will offline the old device and
7498 7498 * create the new device. Refer to bug: 4764752
7499 7499 */
7500 7500 if ((plun->lun_state & FCP_LUN_INIT) && dtype != plun->lun_type) {
7501 7501 plun->lun_state |= FCP_LUN_CHANGED;
7502 7502 }
7503 7503 plun->lun_type = plun->lun_inq.inq_dtype;
7504 7504
7505 7505 /*
7506 7506 * This code is setting/initializing the throttling in the FCA
7507 7507 * driver.
7508 7508 */
7509 7509 mutex_enter(&pptr->port_mutex);
7510 7510 if (!pptr->port_notify) {
7511 7511 if (bcmp(plun->lun_inq.inq_pid, pid, strlen(pid)) == 0) {
7512 7512 uint32_t cmd = 0;
7513 7513 cmd = ((cmd & 0xFF | FC_NOTIFY_THROTTLE) |
7514 7514 ((cmd & 0xFFFFFF00 >> 8) |
7515 7515 FCP_SVE_THROTTLE << 8));
7516 7516 pptr->port_notify = 1;
7517 7517 mutex_exit(&pptr->port_mutex);
7518 7518 (void) fc_ulp_port_notify(pptr->port_fp_handle, cmd);
7519 7519 mutex_enter(&pptr->port_mutex);
7520 7520 }
7521 7521 }
7522 7522
7523 7523 if (FCP_TGT_STATE_CHANGED(ptgt, icmd)) {
7524 7524 FCP_TRACE(fcp_logq, pptr->port_instbuf,
7525 7525 fcp_trace, FCP_BUF_LEVEL_2, 0,
7526 7526 "fcp_handle_inquiry,1:state change occured"
7527 7527 " for D_ID=0x%x", ptgt->tgt_d_id);
7528 7528 mutex_exit(&pptr->port_mutex);
7529 7529
7530 7530 FCP_LUN_TRACE(plun, FCP_LUN_TRACE_5);
7531 7531 (void) fcp_call_finish_init(pptr, ptgt,
7532 7532 icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
7533 7533 icmd->ipkt_cause);
7534 7534 fcp_icmd_free(pptr, icmd);
7535 7535 return;
7536 7536 }
7537 7537 ASSERT((ptgt->tgt_state & FCP_TGT_MARK) == 0);
7538 7538 mutex_exit(&pptr->port_mutex);
7539 7539
7540 7540 /* Retrieve the rscn count (if a valid one exists) */
7541 7541 if (icmd->ipkt_fpkt->pkt_ulp_rscn_infop != NULL) {
7542 7542 rscn_count = ((fc_ulp_rscn_info_t *)
7543 7543 (icmd->ipkt_fpkt->pkt_ulp_rscn_infop))->ulp_rscn_count;
7544 7544 } else {
7545 7545 rscn_count = FC_INVALID_RSCN_COUNT;
7546 7546 }
7547 7547
7548 7548 if (fcp_send_scsi(plun, SCMD_INQUIRY_PAGE83,
7549 7549 SCMD_MAX_INQUIRY_PAGE83_SIZE,
7550 7550 icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
7551 7551 icmd->ipkt_cause, rscn_count) != DDI_SUCCESS) {
7552 7552 fcp_log(CE_WARN, NULL, "!failed to send page 83");
7553 7553 FCP_LUN_TRACE(plun, FCP_LUN_TRACE_6);
7554 7554 (void) fcp_call_finish_init(pptr, ptgt,
7555 7555 icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
7556 7556 icmd->ipkt_cause);
7557 7557 }
7558 7558
7559 7559 /*
7560 7560 * Read Inquiry VPD Page 0x83 to uniquely
7561 7561 * identify this logical unit.
7562 7562 */
7563 7563 fcp_icmd_free(pptr, icmd);
7564 7564 }
7565 7565
7566 7566 /*
7567 7567 * Function: fcp_handle_reportlun
7568 7568 *
7569 7569 * Description: Called by fcp_scsi_callback to handle the response to a
7570 7570 * REPORT_LUN request.
7571 7571 *
7572 7572 * Argument: *fpkt FC packet used to convey the command.
7573 7573 * *icmd Original fcp_ipkt structure.
7574 7574 *
7575 7575 * Return Value: None
7576 7576 */
7577 7577 static void
7578 7578 fcp_handle_reportlun(fc_packet_t *fpkt, struct fcp_ipkt *icmd)
7579 7579 {
7580 7580 int i;
7581 7581 int nluns_claimed;
7582 7582 int nluns_bufmax;
7583 7583 int len;
7584 7584 uint16_t lun_num;
7585 7585 uint32_t rscn_count = FC_INVALID_RSCN_COUNT;
7586 7586 struct fcp_port *pptr;
7587 7587 struct fcp_tgt *ptgt;
7588 7588 struct fcp_lun *plun;
7589 7589 struct fcp_reportlun_resp *report_lun;
7590 7590
7591 7591 pptr = icmd->ipkt_port;
7592 7592 ptgt = icmd->ipkt_tgt;
7593 7593 len = fpkt->pkt_datalen;
7594 7594
7595 7595 if ((len < FCP_LUN_HEADER) ||
7596 7596 ((report_lun = kmem_zalloc(len, KM_NOSLEEP)) == NULL)) {
7597 7597 (void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
7598 7598 icmd->ipkt_change_cnt, icmd->ipkt_cause);
7599 7599 fcp_icmd_free(pptr, icmd);
7600 7600 return;
7601 7601 }
7602 7602
7603 7603 FCP_CP_IN(fpkt->pkt_data, report_lun, fpkt->pkt_data_acc,
7604 7604 fpkt->pkt_datalen);
7605 7605
7606 7606 FCP_TRACE(fcp_logq, pptr->port_instbuf,
7607 7607 fcp_trace, FCP_BUF_LEVEL_5, 0,
7608 7608 "fcp_handle_reportlun: port=%d, tgt D_ID=0x%x",
7609 7609 pptr->port_instance, ptgt->tgt_d_id);
7610 7610
7611 7611 /*
7612 7612 * Get the number of luns (which is supplied as LUNS * 8) the
7613 7613 * device claims it has.
7614 7614 */
7615 7615 nluns_claimed = BE_32(report_lun->num_lun) >> 3;
7616 7616
7617 7617 /*
7618 7618 * Get the maximum number of luns the buffer submitted can hold.
7619 7619 */
7620 7620 nluns_bufmax = (fpkt->pkt_datalen - FCP_LUN_HEADER) / FCP_LUN_SIZE;
7621 7621
7622 7622 /*
7623 7623 * Due to limitations of certain hardware, we support only 16 bit LUNs
7624 7624 */
7625 7625 if (nluns_claimed > FCP_MAX_LUNS_SUPPORTED) {
7626 7626 kmem_free(report_lun, len);
7627 7627
7628 7628 fcp_log(CE_NOTE, pptr->port_dip, "!Can not support"
7629 7629 " 0x%x number of LUNs for target=%x", nluns_claimed,
7630 7630 ptgt->tgt_d_id);
7631 7631
7632 7632 (void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
7633 7633 icmd->ipkt_change_cnt, icmd->ipkt_cause);
7634 7634 fcp_icmd_free(pptr, icmd);
7635 7635 return;
7636 7636 }
7637 7637
7638 7638 /*
7639 7639 * If there are more LUNs than we have allocated memory for,
7640 7640 * allocate more space and send down yet another report lun if
7641 7641 * the maximum number of attempts hasn't been reached.
7642 7642 */
7643 7643 mutex_enter(&ptgt->tgt_mutex);
7644 7644
7645 7645 if ((nluns_claimed > nluns_bufmax) &&
7646 7646 (ptgt->tgt_report_lun_cnt < FCP_MAX_REPORTLUNS_ATTEMPTS)) {
7647 7647
7648 7648 struct fcp_lun *plun;
7649 7649
7650 7650 ptgt->tgt_report_lun_cnt++;
7651 7651 plun = ptgt->tgt_lun;
7652 7652 ASSERT(plun != NULL);
7653 7653 mutex_exit(&ptgt->tgt_mutex);
7654 7654
7655 7655 kmem_free(report_lun, len);
7656 7656
7657 7657 FCP_TRACE(fcp_logq, pptr->port_instbuf,
7658 7658 fcp_trace, FCP_BUF_LEVEL_5, 0,
7659 7659 "!Dynamically discovered %d LUNs for D_ID=%x",
7660 7660 nluns_claimed, ptgt->tgt_d_id);
7661 7661
7662 7662 /* Retrieve the rscn count (if a valid one exists) */
7663 7663 if (icmd->ipkt_fpkt->pkt_ulp_rscn_infop != NULL) {
7664 7664 rscn_count = ((fc_ulp_rscn_info_t *)
7665 7665 (icmd->ipkt_fpkt->pkt_ulp_rscn_infop))->
7666 7666 ulp_rscn_count;
7667 7667 } else {
7668 7668 rscn_count = FC_INVALID_RSCN_COUNT;
7669 7669 }
7670 7670
7671 7671 if (fcp_send_scsi(icmd->ipkt_lun, SCMD_REPORT_LUN,
7672 7672 FCP_LUN_HEADER + (nluns_claimed * FCP_LUN_SIZE),
7673 7673 icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
7674 7674 icmd->ipkt_cause, rscn_count) != DDI_SUCCESS) {
7675 7675 (void) fcp_call_finish_init(pptr, ptgt,
7676 7676 icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
7677 7677 icmd->ipkt_cause);
7678 7678 }
7679 7679
7680 7680 fcp_icmd_free(pptr, icmd);
7681 7681 return;
7682 7682 }
7683 7683
7684 7684 if (nluns_claimed > nluns_bufmax) {
7685 7685 FCP_TRACE(fcp_logq, pptr->port_instbuf,
7686 7686 fcp_trace, FCP_BUF_LEVEL_5, 0,
7687 7687 "Target=%x:%x:%x:%x:%x:%x:%x:%x"
7688 7688 " Number of LUNs lost=%x",
7689 7689 ptgt->tgt_port_wwn.raw_wwn[0],
7690 7690 ptgt->tgt_port_wwn.raw_wwn[1],
7691 7691 ptgt->tgt_port_wwn.raw_wwn[2],
7692 7692 ptgt->tgt_port_wwn.raw_wwn[3],
7693 7693 ptgt->tgt_port_wwn.raw_wwn[4],
7694 7694 ptgt->tgt_port_wwn.raw_wwn[5],
7695 7695 ptgt->tgt_port_wwn.raw_wwn[6],
7696 7696 ptgt->tgt_port_wwn.raw_wwn[7],
7697 7697 nluns_claimed - nluns_bufmax);
7698 7698
7699 7699 nluns_claimed = nluns_bufmax;
7700 7700 }
7701 7701 ptgt->tgt_lun_cnt = nluns_claimed;
7702 7702
7703 7703 /*
7704 7704 * Identify missing LUNs and print warning messages
7705 7705 */
7706 7706 for (plun = ptgt->tgt_lun; plun; plun = plun->lun_next) {
7707 7707 int offline;
7708 7708 int exists = 0;
7709 7709
7710 7710 offline = (plun->lun_state & FCP_LUN_OFFLINE) ? 1 : 0;
7711 7711
7712 7712 for (i = 0; i < nluns_claimed && exists == 0; i++) {
7713 7713 uchar_t *lun_string;
7714 7714
7715 7715 lun_string = (uchar_t *)&(report_lun->lun_string[i]);
7716 7716
7717 7717 switch (lun_string[0] & 0xC0) {
7718 7718 case FCP_LUN_ADDRESSING:
7719 7719 case FCP_PD_ADDRESSING:
7720 7720 case FCP_VOLUME_ADDRESSING:
7721 7721 lun_num = ((lun_string[0] & 0x3F) << 8) |
7722 7722 lun_string[1];
7723 7723 if (plun->lun_num == lun_num) {
7724 7724 exists++;
7725 7725 break;
7726 7726 }
7727 7727 break;
7728 7728
7729 7729 default:
7730 7730 break;
7731 7731 }
7732 7732 }
7733 7733
7734 7734 if (!exists && !offline) {
7735 7735 mutex_exit(&ptgt->tgt_mutex);
7736 7736
7737 7737 mutex_enter(&pptr->port_mutex);
7738 7738 mutex_enter(&ptgt->tgt_mutex);
7739 7739 if (!FCP_STATE_CHANGED(pptr, ptgt, icmd)) {
7740 7740 /*
7741 7741 * set disappear flag when device was connected
7742 7742 */
7743 7743 if (!(plun->lun_state &
7744 7744 FCP_LUN_DEVICE_NOT_CONNECTED)) {
7745 7745 plun->lun_state |= FCP_LUN_DISAPPEARED;
7746 7746 }
7747 7747 mutex_exit(&ptgt->tgt_mutex);
7748 7748 mutex_exit(&pptr->port_mutex);
7749 7749 if (!(plun->lun_state &
7750 7750 FCP_LUN_DEVICE_NOT_CONNECTED)) {
7751 7751 fcp_log(CE_NOTE, pptr->port_dip,
7752 7752 "!Lun=%x for target=%x disappeared",
7753 7753 plun->lun_num, ptgt->tgt_d_id);
7754 7754 }
7755 7755 mutex_enter(&ptgt->tgt_mutex);
7756 7756 } else {
7757 7757 FCP_TRACE(fcp_logq, pptr->port_instbuf,
7758 7758 fcp_trace, FCP_BUF_LEVEL_5, 0,
7759 7759 "fcp_handle_reportlun,1: state change"
7760 7760 " occured for D_ID=0x%x", ptgt->tgt_d_id);
7761 7761 mutex_exit(&ptgt->tgt_mutex);
7762 7762 mutex_exit(&pptr->port_mutex);
7763 7763 kmem_free(report_lun, len);
7764 7764 (void) fcp_call_finish_init(pptr, ptgt,
7765 7765 icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
7766 7766 icmd->ipkt_cause);
7767 7767 fcp_icmd_free(pptr, icmd);
7768 7768 return;
7769 7769 }
7770 7770 } else if (exists) {
7771 7771 /*
7772 7772 * clear FCP_LUN_DEVICE_NOT_CONNECTED when lun 0
7773 7773 * actually exists in REPORT_LUN response
7774 7774 */
7775 7775 if (plun->lun_state & FCP_LUN_DEVICE_NOT_CONNECTED) {
7776 7776 plun->lun_state &=
7777 7777 ~FCP_LUN_DEVICE_NOT_CONNECTED;
7778 7778 }
7779 7779 if (offline || plun->lun_num == 0) {
7780 7780 if (plun->lun_state & FCP_LUN_DISAPPEARED) {
7781 7781 plun->lun_state &= ~FCP_LUN_DISAPPEARED;
7782 7782 mutex_exit(&ptgt->tgt_mutex);
7783 7783 fcp_log(CE_NOTE, pptr->port_dip,
7784 7784 "!Lun=%x for target=%x reappeared",
7785 7785 plun->lun_num, ptgt->tgt_d_id);
7786 7786 mutex_enter(&ptgt->tgt_mutex);
7787 7787 }
7788 7788 }
7789 7789 }
7790 7790 }
7791 7791
7792 7792 ptgt->tgt_tmp_cnt = nluns_claimed ? nluns_claimed : 1;
7793 7793 mutex_exit(&ptgt->tgt_mutex);
7794 7794
7795 7795 FCP_TRACE(fcp_logq, pptr->port_instbuf,
7796 7796 fcp_trace, FCP_BUF_LEVEL_5, 0,
7797 7797 "fcp_handle_reportlun: port=%d, tgt D_ID=0x%x, %d LUN(s)",
7798 7798 pptr->port_instance, ptgt->tgt_d_id, nluns_claimed);
7799 7799
7800 7800 /* scan each lun */
7801 7801 for (i = 0; i < nluns_claimed; i++) {
7802 7802 uchar_t *lun_string;
7803 7803
7804 7804 lun_string = (uchar_t *)&(report_lun->lun_string[i]);
7805 7805
7806 7806 FCP_TRACE(fcp_logq, pptr->port_instbuf,
7807 7807 fcp_trace, FCP_BUF_LEVEL_5, 0,
7808 7808 "handle_reportlun: d_id=%x, LUN ind=%d, LUN=%d,"
7809 7809 " addr=0x%x", ptgt->tgt_d_id, i, lun_string[1],
7810 7810 lun_string[0]);
7811 7811
7812 7812 switch (lun_string[0] & 0xC0) {
7813 7813 case FCP_LUN_ADDRESSING:
7814 7814 case FCP_PD_ADDRESSING:
7815 7815 case FCP_VOLUME_ADDRESSING:
7816 7816 lun_num = ((lun_string[0] & 0x3F) << 8) | lun_string[1];
7817 7817
7818 7818 /* We will skip masked LUNs because of the blacklist. */
7819 7819 if (fcp_lun_blacklist != NULL) {
7820 7820 mutex_enter(&ptgt->tgt_mutex);
7821 7821 if (fcp_should_mask(&ptgt->tgt_port_wwn,
7822 7822 lun_num) == TRUE) {
7823 7823 ptgt->tgt_lun_cnt--;
7824 7824 mutex_exit(&ptgt->tgt_mutex);
7825 7825 break;
7826 7826 }
7827 7827 mutex_exit(&ptgt->tgt_mutex);
7828 7828 }
7829 7829
7830 7830 /* see if this LUN is already allocated */
7831 7831 if ((plun = fcp_get_lun(ptgt, lun_num)) == NULL) {
7832 7832 plun = fcp_alloc_lun(ptgt);
7833 7833 if (plun == NULL) {
7834 7834 fcp_log(CE_NOTE, pptr->port_dip,
7835 7835 "!Lun allocation failed"
7836 7836 " target=%x lun=%x",
7837 7837 ptgt->tgt_d_id, lun_num);
7838 7838 break;
7839 7839 }
7840 7840 }
7841 7841
7842 7842 mutex_enter(&plun->lun_tgt->tgt_mutex);
7843 7843 /* convert to LUN */
7844 7844 plun->lun_addr.ent_addr_0 =
7845 7845 BE_16(*(uint16_t *)&(lun_string[0]));
7846 7846 plun->lun_addr.ent_addr_1 =
7847 7847 BE_16(*(uint16_t *)&(lun_string[2]));
7848 7848 plun->lun_addr.ent_addr_2 =
7849 7849 BE_16(*(uint16_t *)&(lun_string[4]));
7850 7850 plun->lun_addr.ent_addr_3 =
7851 7851 BE_16(*(uint16_t *)&(lun_string[6]));
7852 7852
7853 7853 plun->lun_num = lun_num;
7854 7854 plun->lun_state |= FCP_LUN_BUSY | FCP_LUN_MARK;
7855 7855 plun->lun_state &= ~FCP_LUN_OFFLINE;
7856 7856 mutex_exit(&plun->lun_tgt->tgt_mutex);
7857 7857
7858 7858 /* Retrieve the rscn count (if a valid one exists) */
7859 7859 if (icmd->ipkt_fpkt->pkt_ulp_rscn_infop != NULL) {
7860 7860 rscn_count = ((fc_ulp_rscn_info_t *)
7861 7861 (icmd->ipkt_fpkt->pkt_ulp_rscn_infop))->
7862 7862 ulp_rscn_count;
7863 7863 } else {
7864 7864 rscn_count = FC_INVALID_RSCN_COUNT;
7865 7865 }
7866 7866
7867 7867 if (fcp_send_scsi(plun, SCMD_INQUIRY, SUN_INQSIZE,
7868 7868 icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
7869 7869 icmd->ipkt_cause, rscn_count) != DDI_SUCCESS) {
7870 7870 mutex_enter(&pptr->port_mutex);
7871 7871 mutex_enter(&plun->lun_tgt->tgt_mutex);
7872 7872 if (!FCP_STATE_CHANGED(pptr, ptgt, icmd)) {
7873 7873 fcp_log(CE_NOTE, pptr->port_dip,
7874 7874 "!failed to send INQUIRY"
7875 7875 " target=%x lun=%x",
7876 7876 ptgt->tgt_d_id, plun->lun_num);
7877 7877 } else {
7878 7878 FCP_TRACE(fcp_logq,
7879 7879 pptr->port_instbuf, fcp_trace,
7880 7880 FCP_BUF_LEVEL_5, 0,
7881 7881 "fcp_handle_reportlun,2: state"
7882 7882 " change occured for D_ID=0x%x",
7883 7883 ptgt->tgt_d_id);
7884 7884 }
7885 7885 mutex_exit(&plun->lun_tgt->tgt_mutex);
7886 7886 mutex_exit(&pptr->port_mutex);
7887 7887 } else {
7888 7888 continue;
7889 7889 }
7890 7890 break;
7891 7891
7892 7892 default:
7893 7893 fcp_log(CE_WARN, NULL,
7894 7894 "!Unsupported LUN Addressing method %x "
7895 7895 "in response to REPORT_LUN", lun_string[0]);
7896 7896 break;
7897 7897 }
7898 7898
7899 7899 /*
7900 7900 * each time through this loop we should decrement
7901 7901 * the tmp_cnt by one -- since we go through this loop
7902 7902 * one time for each LUN, the tmp_cnt should never be <=0
7903 7903 */
7904 7904 (void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
7905 7905 icmd->ipkt_change_cnt, icmd->ipkt_cause);
7906 7906 }
7907 7907
7908 7908 if (i == 0) {
7909 7909 fcp_log(CE_WARN, pptr->port_dip,
7910 7910 "!FCP: target=%x reported NO Luns", ptgt->tgt_d_id);
7911 7911 (void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
7912 7912 icmd->ipkt_change_cnt, icmd->ipkt_cause);
7913 7913 }
7914 7914
7915 7915 kmem_free(report_lun, len);
7916 7916 fcp_icmd_free(pptr, icmd);
7917 7917 }
7918 7918
7919 7919
7920 7920 /*
7921 7921 * called internally to return a LUN given a target and a LUN number
7922 7922 */
7923 7923 static struct fcp_lun *
7924 7924 fcp_get_lun(struct fcp_tgt *ptgt, uint16_t lun_num)
7925 7925 {
7926 7926 struct fcp_lun *plun;
7927 7927
7928 7928 mutex_enter(&ptgt->tgt_mutex);
7929 7929 for (plun = ptgt->tgt_lun; plun != NULL; plun = plun->lun_next) {
7930 7930 if (plun->lun_num == lun_num) {
7931 7931 mutex_exit(&ptgt->tgt_mutex);
7932 7932 return (plun);
7933 7933 }
7934 7934 }
7935 7935 mutex_exit(&ptgt->tgt_mutex);
7936 7936
7937 7937 return (NULL);
7938 7938 }
7939 7939
7940 7940
7941 7941 /*
7942 7942 * handle finishing one target for fcp_finish_init
7943 7943 *
7944 7944 * return true (non-zero) if we want finish_init to continue with the
7945 7945 * next target
7946 7946 *
7947 7947 * called with the port mutex held
7948 7948 */
7949 7949 /*ARGSUSED*/
7950 7950 static int
7951 7951 fcp_finish_tgt(struct fcp_port *pptr, struct fcp_tgt *ptgt,
7952 7952 int link_cnt, int tgt_cnt, int cause)
7953 7953 {
7954 7954 int rval = 1;
7955 7955 ASSERT(pptr != NULL);
7956 7956 ASSERT(ptgt != NULL);
7957 7957
7958 7958 FCP_TRACE(fcp_logq, pptr->port_instbuf,
7959 7959 fcp_trace, FCP_BUF_LEVEL_5, 0,
7960 7960 "finish_tgt: D_ID/state = 0x%x/0x%x", ptgt->tgt_d_id,
7961 7961 ptgt->tgt_state);
7962 7962
7963 7963 ASSERT(mutex_owned(&pptr->port_mutex));
7964 7964
7965 7965 if ((pptr->port_link_cnt != link_cnt) ||
7966 7966 (tgt_cnt && ptgt->tgt_change_cnt != tgt_cnt)) {
7967 7967 /*
7968 7968 * oh oh -- another link reset or target change
7969 7969 * must have occurred while we are in here
7970 7970 */
7971 7971 FCP_TGT_TRACE(ptgt, tgt_cnt, FCP_TGT_TRACE_23);
7972 7972
7973 7973 return (0);
7974 7974 } else {
7975 7975 FCP_TGT_TRACE(ptgt, tgt_cnt, FCP_TGT_TRACE_24);
7976 7976 }
7977 7977
7978 7978 mutex_enter(&ptgt->tgt_mutex);
7979 7979
7980 7980 if (!(ptgt->tgt_state & FCP_TGT_OFFLINE)) {
7981 7981 /*
7982 7982 * tgt is not offline -- is it marked (i.e. needs
7983 7983 * to be offlined) ??
7984 7984 */
7985 7985 if (ptgt->tgt_state & FCP_TGT_MARK) {
7986 7986 /*
7987 7987 * this target not offline *and*
7988 7988 * marked
7989 7989 */
7990 7990 ptgt->tgt_state &= ~FCP_TGT_MARK;
7991 7991 rval = fcp_offline_target(pptr, ptgt, link_cnt,
7992 7992 tgt_cnt, 0, 0);
7993 7993 } else {
7994 7994 ptgt->tgt_state &= ~FCP_TGT_BUSY;
7995 7995
7996 7996 /* create the LUNs */
7997 7997 if (ptgt->tgt_node_state != FCP_TGT_NODE_ON_DEMAND) {
7998 7998 ptgt->tgt_node_state = FCP_TGT_NODE_PRESENT;
7999 7999 fcp_create_luns(ptgt, link_cnt, tgt_cnt,
8000 8000 cause);
8001 8001 ptgt->tgt_device_created = 1;
8002 8002 } else {
8003 8003 fcp_update_tgt_state(ptgt, FCP_RESET,
8004 8004 FCP_LUN_BUSY);
8005 8005 }
8006 8006 }
8007 8007 }
8008 8008
8009 8009 mutex_exit(&ptgt->tgt_mutex);
8010 8010
8011 8011 return (rval);
8012 8012 }
8013 8013
8014 8014
8015 8015 /*
8016 8016 * this routine is called to finish port initialization
8017 8017 *
8018 8018 * Each port has a "temp" counter -- when a state change happens (e.g.
8019 8019 * port online), the temp count is set to the number of devices in the map.
8020 8020 * Then, as each device gets "discovered", the temp counter is decremented
8021 8021 * by one. When this count reaches zero we know that all of the devices
8022 8022 * in the map have been discovered (or an error has occurred), so we can
8023 8023 * then finish initialization -- which is done by this routine (well, this
8024 8024 * and fcp-finish_tgt())
8025 8025 *
8026 8026 * acquires and releases the global mutex
8027 8027 *
8028 8028 * called with the port mutex owned
8029 8029 */
8030 8030 static void
8031 8031 fcp_finish_init(struct fcp_port *pptr)
8032 8032 {
8033 8033 #ifdef DEBUG
8034 8034 bzero(pptr->port_finish_stack, sizeof (pptr->port_finish_stack));
8035 8035 pptr->port_finish_depth = getpcstack(pptr->port_finish_stack,
8036 8036 FCP_STACK_DEPTH);
8037 8037 #endif /* DEBUG */
8038 8038
8039 8039 ASSERT(mutex_owned(&pptr->port_mutex));
8040 8040
8041 8041 FCP_TRACE(fcp_logq, pptr->port_instbuf,
8042 8042 fcp_trace, FCP_BUF_LEVEL_2, 0, "finish_init:"
8043 8043 " entering; ipkt count=%d", pptr->port_ipkt_cnt);
8044 8044
8045 8045 if ((pptr->port_state & FCP_STATE_ONLINING) &&
8046 8046 !(pptr->port_state & (FCP_STATE_SUSPENDED |
8047 8047 FCP_STATE_DETACHING | FCP_STATE_POWER_DOWN))) {
8048 8048 pptr->port_state &= ~FCP_STATE_ONLINING;
8049 8049 pptr->port_state |= FCP_STATE_ONLINE;
8050 8050 }
8051 8051
8052 8052 /* Wake up threads waiting on config done */
8053 8053 cv_broadcast(&pptr->port_config_cv);
8054 8054 }
8055 8055
8056 8056
8057 8057 /*
8058 8058 * called from fcp_finish_init to create the LUNs for a target
8059 8059 *
8060 8060 * called with the port mutex owned
8061 8061 */
8062 8062 static void
8063 8063 fcp_create_luns(struct fcp_tgt *ptgt, int link_cnt, int tgt_cnt, int cause)
8064 8064 {
8065 8065 struct fcp_lun *plun;
8066 8066 struct fcp_port *pptr;
8067 8067 child_info_t *cip = NULL;
8068 8068
8069 8069 ASSERT(ptgt != NULL);
8070 8070 ASSERT(mutex_owned(&ptgt->tgt_mutex));
8071 8071
8072 8072 pptr = ptgt->tgt_port;
8073 8073
8074 8074 ASSERT(pptr != NULL);
8075 8075
8076 8076 /* scan all LUNs for this target */
8077 8077 for (plun = ptgt->tgt_lun; plun != NULL; plun = plun->lun_next) {
8078 8078 if (plun->lun_state & FCP_LUN_OFFLINE) {
8079 8079 continue;
8080 8080 }
8081 8081
8082 8082 if (plun->lun_state & FCP_LUN_MARK) {
8083 8083 FCP_TRACE(fcp_logq, pptr->port_instbuf,
8084 8084 fcp_trace, FCP_BUF_LEVEL_2, 0,
8085 8085 "fcp_create_luns: offlining marked LUN!");
8086 8086 fcp_offline_lun(plun, link_cnt, tgt_cnt, 1, 0);
8087 8087 continue;
8088 8088 }
8089 8089
8090 8090 plun->lun_state &= ~FCP_LUN_BUSY;
8091 8091
8092 8092 /*
8093 8093 * There are conditions in which FCP_LUN_INIT flag is cleared
8094 8094 * but we have a valid plun->lun_cip. To cover this case also
8095 8095 * CLEAR_BUSY whenever we have a valid lun_cip.
8096 8096 */
8097 8097 if (plun->lun_mpxio && plun->lun_cip &&
8098 8098 (!fcp_pass_to_hp(pptr, plun, plun->lun_cip,
8099 8099 FCP_MPXIO_PATH_CLEAR_BUSY, link_cnt, tgt_cnt,
8100 8100 0, 0))) {
8101 8101 FCP_TRACE(fcp_logq, pptr->port_instbuf,
8102 8102 fcp_trace, FCP_BUF_LEVEL_2, 0,
8103 8103 "fcp_create_luns: enable lun %p failed!",
8104 8104 plun);
8105 8105 }
8106 8106
8107 8107 if (plun->lun_state & FCP_LUN_INIT &&
8108 8108 !(plun->lun_state & FCP_LUN_CHANGED)) {
8109 8109 continue;
8110 8110 }
8111 8111
8112 8112 if (cause == FCP_CAUSE_USER_CREATE) {
8113 8113 continue;
8114 8114 }
8115 8115
8116 8116 FCP_TRACE(fcp_logq, pptr->port_instbuf,
8117 8117 fcp_trace, FCP_BUF_LEVEL_6, 0,
8118 8118 "create_luns: passing ONLINE elem to HP thread");
8119 8119
8120 8120 /*
8121 8121 * If lun has changed, prepare for offlining the old path.
8122 8122 * Do not offline the old path right now, since it may be
8123 8123 * still opened.
8124 8124 */
8125 8125 if (plun->lun_cip && (plun->lun_state & FCP_LUN_CHANGED)) {
8126 8126 fcp_prepare_offline_lun(plun, link_cnt, tgt_cnt);
8127 8127 }
8128 8128
8129 8129 /* pass an ONLINE element to the hotplug thread */
8130 8130 if (!fcp_pass_to_hp(pptr, plun, cip, FCP_ONLINE,
8131 8131 link_cnt, tgt_cnt, NDI_ONLINE_ATTACH, 0)) {
8132 8132
8133 8133 /*
8134 8134 * We can not synchronous attach (i.e pass
8135 8135 * NDI_ONLINE_ATTACH) here as we might be
8136 8136 * coming from an interrupt or callback
8137 8137 * thread.
8138 8138 */
8139 8139 if (!fcp_pass_to_hp(pptr, plun, cip, FCP_ONLINE,
8140 8140 link_cnt, tgt_cnt, 0, 0)) {
8141 8141 fcp_log(CE_CONT, pptr->port_dip,
8142 8142 "Can not ONLINE LUN; D_ID=%x, LUN=%x\n",
8143 8143 plun->lun_tgt->tgt_d_id, plun->lun_num);
8144 8144 }
8145 8145 }
8146 8146 }
8147 8147 }
8148 8148
8149 8149
8150 8150 /*
8151 8151 * function to online/offline devices
8152 8152 */
8153 8153 static int
8154 8154 fcp_trigger_lun(struct fcp_lun *plun, child_info_t *cip, int old_mpxio,
8155 8155 int online, int lcount, int tcount, int flags)
8156 8156 {
8157 8157 int rval = NDI_FAILURE;
8158 8158 int circ;
8159 8159 child_info_t *ccip;
8160 8160 struct fcp_port *pptr = plun->lun_tgt->tgt_port;
8161 8161 int is_mpxio = pptr->port_mpxio;
8162 8162
8163 8163 if ((old_mpxio != 0) && (plun->lun_mpxio != old_mpxio)) {
8164 8164 /*
8165 8165 * When this event gets serviced, lun_cip and lun_mpxio
8166 8166 * has changed, so it should be invalidated now.
8167 8167 */
8168 8168 FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
8169 8169 FCP_BUF_LEVEL_2, 0, "fcp_trigger_lun: lun_mpxio changed: "
8170 8170 "plun: %p, cip: %p, what:%d", plun, cip, online);
8171 8171 return (rval);
8172 8172 }
8173 8173
8174 8174 FCP_TRACE(fcp_logq, pptr->port_instbuf,
8175 8175 fcp_trace, FCP_BUF_LEVEL_2, 0,
8176 8176 "fcp_trigger_lun: plun=%p target=%x lun=%d cip=%p what=%x "
8177 8177 "flags=%x mpxio=%x\n",
8178 8178 plun, LUN_TGT->tgt_d_id, plun->lun_num, cip, online, flags,
8179 8179 plun->lun_mpxio);
8180 8180
8181 8181 /*
8182 8182 * lun_mpxio needs checking here because we can end up in a race
8183 8183 * condition where this task has been dispatched while lun_mpxio is
8184 8184 * set, but an earlier FCP_ONLINE task for the same LUN tried to
8185 8185 * enable MPXIO for the LUN, but was unable to, and hence cleared
8186 8186 * the flag. We rely on the serialization of the tasks here. We return
8187 8187 * NDI_SUCCESS so any callers continue without reporting spurious
8188 8188 * errors, and the still think we're an MPXIO LUN.
8189 8189 */
8190 8190
8191 8191 if (online == FCP_MPXIO_PATH_CLEAR_BUSY ||
8192 8192 online == FCP_MPXIO_PATH_SET_BUSY) {
8193 8193 if (plun->lun_mpxio) {
8194 8194 rval = fcp_update_mpxio_path(plun, cip, online);
8195 8195 } else {
8196 8196 rval = NDI_SUCCESS;
8197 8197 }
8198 8198 return (rval);
8199 8199 }
8200 8200
8201 8201 if (fc_ulp_busy_port(pptr->port_fp_handle) != 0) {
8202 8202 return (NDI_FAILURE);
8203 8203 }
8204 8204
8205 8205 if (is_mpxio) {
↓ open down ↓ |
8205 lines elided |
↑ open up ↑ |
8206 8206 mdi_devi_enter(pptr->port_dip, &circ);
8207 8207 } else {
8208 8208 ndi_devi_enter(pptr->port_dip, &circ);
8209 8209 }
8210 8210
8211 8211 mutex_enter(&pptr->port_mutex);
8212 8212 mutex_enter(&plun->lun_mutex);
8213 8213
8214 8214 if (online == FCP_ONLINE) {
8215 8215 ccip = fcp_get_cip(plun, cip, lcount, tcount);
8216 - if (ccip == NULL) {
8217 - goto fail;
8218 - }
8216 + if (ccip == NULL)
8217 + goto skip;
8219 8218 } else {
8220 - if (fcp_is_child_present(plun, cip) != FC_SUCCESS) {
8221 - goto fail;
8222 - }
8219 + if (fcp_is_child_present(plun, cip) != FC_SUCCESS)
8220 + goto skip;
8223 8221 ccip = cip;
8224 8222 }
8225 8223
8226 8224 if (online == FCP_ONLINE) {
8227 8225 rval = fcp_online_child(plun, ccip, lcount, tcount, flags,
8228 8226 &circ);
8229 - fc_ulp_log_device_event(pptr->port_fp_handle,
8230 - FC_ULP_DEVICE_ONLINE);
8231 8227 } else {
8232 8228 rval = fcp_offline_child(plun, ccip, lcount, tcount, flags,
8233 8229 &circ);
8234 - fc_ulp_log_device_event(pptr->port_fp_handle,
8235 - FC_ULP_DEVICE_OFFLINE);
8236 8230 }
8237 8231
8238 -fail: mutex_exit(&plun->lun_mutex);
8232 +skip:
8233 + mutex_exit(&plun->lun_mutex);
8239 8234 mutex_exit(&pptr->port_mutex);
8240 8235
8236 + if (rval == NDI_SUCCESS) {
8237 + fc_ulp_log_device_event(pptr->port_fp_handle,
8238 + online == FCP_ONLINE ?
8239 + FC_ULP_DEVICE_ONLINE : FC_ULP_DEVICE_OFFLINE);
8240 + }
8241 +
8241 8242 if (is_mpxio) {
8242 8243 mdi_devi_exit(pptr->port_dip, circ);
8243 8244 } else {
8244 8245 ndi_devi_exit(pptr->port_dip, circ);
8245 8246 }
8246 8247
8247 8248 fc_ulp_idle_port(pptr->port_fp_handle);
8248 8249
8249 8250 return (rval);
8250 8251 }
8251 8252
8252 8253
8253 8254 /*
8254 8255 * take a target offline by taking all of its LUNs offline
8255 8256 */
8256 8257 /*ARGSUSED*/
8257 8258 static int
8258 8259 fcp_offline_target(struct fcp_port *pptr, struct fcp_tgt *ptgt,
8259 8260 int link_cnt, int tgt_cnt, int nowait, int flags)
8260 8261 {
8261 8262 struct fcp_tgt_elem *elem;
8262 8263
8263 8264 ASSERT(mutex_owned(&pptr->port_mutex));
8264 8265 ASSERT(mutex_owned(&ptgt->tgt_mutex));
8265 8266
8266 8267 ASSERT(!(ptgt->tgt_state & FCP_TGT_OFFLINE));
8267 8268
8268 8269 if (link_cnt != pptr->port_link_cnt || (tgt_cnt && tgt_cnt !=
8269 8270 ptgt->tgt_change_cnt)) {
8270 8271 mutex_exit(&ptgt->tgt_mutex);
8271 8272 FCP_TGT_TRACE(ptgt, tgt_cnt, FCP_TGT_TRACE_25);
8272 8273 mutex_enter(&ptgt->tgt_mutex);
8273 8274
8274 8275 return (0);
8275 8276 }
8276 8277
8277 8278 ptgt->tgt_pd_handle = NULL;
8278 8279 mutex_exit(&ptgt->tgt_mutex);
8279 8280 FCP_TGT_TRACE(ptgt, tgt_cnt, FCP_TGT_TRACE_26);
8280 8281 mutex_enter(&ptgt->tgt_mutex);
8281 8282
8282 8283 tgt_cnt = tgt_cnt ? tgt_cnt : ptgt->tgt_change_cnt;
8283 8284
8284 8285 if (ptgt->tgt_tcap &&
8285 8286 (elem = kmem_zalloc(sizeof (*elem), KM_NOSLEEP)) != NULL) {
8286 8287 elem->flags = flags;
8287 8288 elem->time = fcp_watchdog_time;
8288 8289 if (nowait == 0) {
8289 8290 elem->time += fcp_offline_delay;
8290 8291 }
8291 8292 elem->ptgt = ptgt;
8292 8293 elem->link_cnt = link_cnt;
8293 8294 elem->tgt_cnt = tgt_cnt;
8294 8295 elem->next = pptr->port_offline_tgts;
8295 8296 pptr->port_offline_tgts = elem;
8296 8297 } else {
8297 8298 fcp_offline_target_now(pptr, ptgt, link_cnt, tgt_cnt, flags);
8298 8299 }
8299 8300
8300 8301 return (1);
8301 8302 }
8302 8303
8303 8304
8304 8305 static void
8305 8306 fcp_offline_target_now(struct fcp_port *pptr, struct fcp_tgt *ptgt,
8306 8307 int link_cnt, int tgt_cnt, int flags)
8307 8308 {
8308 8309 ASSERT(mutex_owned(&pptr->port_mutex));
8309 8310 ASSERT(mutex_owned(&ptgt->tgt_mutex));
8310 8311
8311 8312 fc_ulp_enable_relogin(pptr->port_fp_handle, &ptgt->tgt_port_wwn);
8312 8313 ptgt->tgt_state = FCP_TGT_OFFLINE;
8313 8314 ptgt->tgt_pd_handle = NULL;
8314 8315 fcp_offline_tgt_luns(ptgt, link_cnt, tgt_cnt, flags);
8315 8316 }
8316 8317
8317 8318
8318 8319 static void
8319 8320 fcp_offline_tgt_luns(struct fcp_tgt *ptgt, int link_cnt, int tgt_cnt,
8320 8321 int flags)
8321 8322 {
8322 8323 struct fcp_lun *plun;
8323 8324
8324 8325 ASSERT(mutex_owned(&ptgt->tgt_port->port_mutex));
8325 8326 ASSERT(mutex_owned(&ptgt->tgt_mutex));
8326 8327
8327 8328 for (plun = ptgt->tgt_lun; plun != NULL; plun = plun->lun_next) {
8328 8329 if (!(plun->lun_state & FCP_LUN_OFFLINE)) {
8329 8330 fcp_offline_lun(plun, link_cnt, tgt_cnt, 1, flags);
8330 8331 }
8331 8332 }
8332 8333 }
8333 8334
8334 8335
8335 8336 /*
8336 8337 * take a LUN offline
8337 8338 *
8338 8339 * enters and leaves with the target mutex held, releasing it in the process
8339 8340 *
8340 8341 * allocates memory in non-sleep mode
8341 8342 */
8342 8343 static void
8343 8344 fcp_offline_lun(struct fcp_lun *plun, int link_cnt, int tgt_cnt,
8344 8345 int nowait, int flags)
8345 8346 {
8346 8347 struct fcp_port *pptr = plun->lun_tgt->tgt_port;
8347 8348 struct fcp_lun_elem *elem;
8348 8349
8349 8350 ASSERT(plun != NULL);
8350 8351 ASSERT(mutex_owned(&LUN_TGT->tgt_mutex));
8351 8352
8352 8353 if (nowait) {
8353 8354 fcp_offline_lun_now(plun, link_cnt, tgt_cnt, flags);
8354 8355 return;
8355 8356 }
8356 8357
8357 8358 if ((elem = kmem_zalloc(sizeof (*elem), KM_NOSLEEP)) != NULL) {
8358 8359 elem->flags = flags;
8359 8360 elem->time = fcp_watchdog_time;
8360 8361 if (nowait == 0) {
8361 8362 elem->time += fcp_offline_delay;
8362 8363 }
8363 8364 elem->plun = plun;
8364 8365 elem->link_cnt = link_cnt;
8365 8366 elem->tgt_cnt = plun->lun_tgt->tgt_change_cnt;
8366 8367 elem->next = pptr->port_offline_luns;
8367 8368 pptr->port_offline_luns = elem;
8368 8369 } else {
8369 8370 fcp_offline_lun_now(plun, link_cnt, tgt_cnt, flags);
8370 8371 }
8371 8372 }
8372 8373
8373 8374
8374 8375 static void
8375 8376 fcp_prepare_offline_lun(struct fcp_lun *plun, int link_cnt, int tgt_cnt)
8376 8377 {
8377 8378 struct fcp_pkt *head = NULL;
8378 8379
8379 8380 ASSERT(mutex_owned(&LUN_TGT->tgt_mutex));
8380 8381
8381 8382 mutex_exit(&LUN_TGT->tgt_mutex);
8382 8383
8383 8384 head = fcp_scan_commands(plun);
8384 8385 if (head != NULL) {
8385 8386 fcp_abort_commands(head, LUN_PORT);
8386 8387 }
8387 8388
8388 8389 mutex_enter(&LUN_TGT->tgt_mutex);
8389 8390
8390 8391 if (plun->lun_cip && plun->lun_mpxio) {
8391 8392 /*
8392 8393 * Intimate MPxIO lun busy is cleared
8393 8394 */
8394 8395 if (!fcp_pass_to_hp(LUN_PORT, plun, plun->lun_cip,
8395 8396 FCP_MPXIO_PATH_CLEAR_BUSY, link_cnt, tgt_cnt,
8396 8397 0, 0)) {
8397 8398 fcp_log(CE_NOTE, LUN_PORT->port_dip,
8398 8399 "Can not ENABLE LUN; D_ID=%x, LUN=%x",
8399 8400 LUN_TGT->tgt_d_id, plun->lun_num);
8400 8401 }
8401 8402 /*
8402 8403 * Intimate MPxIO that the lun is now marked for offline
8403 8404 */
8404 8405 mutex_exit(&LUN_TGT->tgt_mutex);
8405 8406 (void) mdi_pi_disable_path(PIP(plun->lun_cip), DRIVER_DISABLE);
8406 8407 mutex_enter(&LUN_TGT->tgt_mutex);
8407 8408 }
8408 8409 }
8409 8410
8410 8411 static void
8411 8412 fcp_offline_lun_now(struct fcp_lun *plun, int link_cnt, int tgt_cnt,
8412 8413 int flags)
8413 8414 {
8414 8415 ASSERT(mutex_owned(&LUN_TGT->tgt_mutex));
8415 8416
8416 8417 mutex_exit(&LUN_TGT->tgt_mutex);
8417 8418 fcp_update_offline_flags(plun);
8418 8419 mutex_enter(&LUN_TGT->tgt_mutex);
8419 8420
8420 8421 fcp_prepare_offline_lun(plun, link_cnt, tgt_cnt);
8421 8422
8422 8423 FCP_TRACE(fcp_logq, LUN_PORT->port_instbuf,
8423 8424 fcp_trace, FCP_BUF_LEVEL_4, 0,
8424 8425 "offline_lun: passing OFFLINE elem to HP thread");
8425 8426
8426 8427 if (plun->lun_cip) {
8427 8428 fcp_log(CE_NOTE, LUN_PORT->port_dip,
8428 8429 "!offlining lun=%x (trace=%x), target=%x (trace=%x)",
8429 8430 plun->lun_num, plun->lun_trace, LUN_TGT->tgt_d_id,
8430 8431 LUN_TGT->tgt_trace);
8431 8432
8432 8433 if (!fcp_pass_to_hp(LUN_PORT, plun, plun->lun_cip, FCP_OFFLINE,
8433 8434 link_cnt, tgt_cnt, flags, 0)) {
8434 8435 fcp_log(CE_CONT, LUN_PORT->port_dip,
8435 8436 "Can not OFFLINE LUN; D_ID=%x, LUN=%x\n",
8436 8437 LUN_TGT->tgt_d_id, plun->lun_num);
8437 8438 }
8438 8439 }
8439 8440 }
8440 8441
8441 8442 static void
8442 8443 fcp_scan_offline_luns(struct fcp_port *pptr)
8443 8444 {
8444 8445 struct fcp_lun_elem *elem;
8445 8446 struct fcp_lun_elem *prev;
8446 8447 struct fcp_lun_elem *next;
8447 8448
8448 8449 ASSERT(MUTEX_HELD(&pptr->port_mutex));
8449 8450
8450 8451 prev = NULL;
8451 8452 elem = pptr->port_offline_luns;
8452 8453 while (elem) {
8453 8454 next = elem->next;
8454 8455 if (elem->time <= fcp_watchdog_time) {
8455 8456 int changed = 1;
8456 8457 struct fcp_tgt *ptgt = elem->plun->lun_tgt;
8457 8458
8458 8459 mutex_enter(&ptgt->tgt_mutex);
8459 8460 if (pptr->port_link_cnt == elem->link_cnt &&
8460 8461 ptgt->tgt_change_cnt == elem->tgt_cnt) {
8461 8462 changed = 0;
8462 8463 }
8463 8464
8464 8465 if (!changed &&
8465 8466 !(elem->plun->lun_state & FCP_TGT_OFFLINE)) {
8466 8467 fcp_offline_lun_now(elem->plun,
8467 8468 elem->link_cnt, elem->tgt_cnt, elem->flags);
8468 8469 }
8469 8470 mutex_exit(&ptgt->tgt_mutex);
8470 8471
8471 8472 kmem_free(elem, sizeof (*elem));
8472 8473
8473 8474 if (prev) {
8474 8475 prev->next = next;
8475 8476 } else {
8476 8477 pptr->port_offline_luns = next;
8477 8478 }
8478 8479 } else {
8479 8480 prev = elem;
8480 8481 }
8481 8482 elem = next;
8482 8483 }
8483 8484 }
8484 8485
8485 8486
8486 8487 static void
8487 8488 fcp_scan_offline_tgts(struct fcp_port *pptr)
8488 8489 {
8489 8490 struct fcp_tgt_elem *elem;
8490 8491 struct fcp_tgt_elem *prev;
8491 8492 struct fcp_tgt_elem *next;
8492 8493
8493 8494 ASSERT(MUTEX_HELD(&pptr->port_mutex));
8494 8495
8495 8496 prev = NULL;
8496 8497 elem = pptr->port_offline_tgts;
8497 8498 while (elem) {
8498 8499 next = elem->next;
8499 8500 if (elem->time <= fcp_watchdog_time) {
8500 8501 int outdated = 1;
8501 8502 struct fcp_tgt *ptgt = elem->ptgt;
8502 8503
8503 8504 mutex_enter(&ptgt->tgt_mutex);
8504 8505
8505 8506 if (ptgt->tgt_change_cnt == elem->tgt_cnt) {
8506 8507 /* No change on tgt since elem was created. */
8507 8508 outdated = 0;
8508 8509 } else if (ptgt->tgt_change_cnt == elem->tgt_cnt + 1 &&
8509 8510 pptr->port_link_cnt == elem->link_cnt + 1 &&
8510 8511 ptgt->tgt_statec_cause == FCP_CAUSE_LINK_DOWN) {
8511 8512 /*
8512 8513 * Exactly one thing happened to the target
8513 8514 * inbetween: the local port went offline.
8514 8515 * For fp the remote port is already gone so
8515 8516 * it will not tell us again to offline the
8516 8517 * target. We must offline it now.
8517 8518 */
8518 8519 outdated = 0;
8519 8520 }
8520 8521
8521 8522 if (!outdated && !(ptgt->tgt_state &
8522 8523 FCP_TGT_OFFLINE)) {
8523 8524 fcp_offline_target_now(pptr,
8524 8525 ptgt, elem->link_cnt, elem->tgt_cnt,
8525 8526 elem->flags);
8526 8527 }
8527 8528
8528 8529 mutex_exit(&ptgt->tgt_mutex);
8529 8530
8530 8531 kmem_free(elem, sizeof (*elem));
8531 8532
8532 8533 if (prev) {
8533 8534 prev->next = next;
8534 8535 } else {
8535 8536 pptr->port_offline_tgts = next;
8536 8537 }
8537 8538 } else {
8538 8539 prev = elem;
8539 8540 }
8540 8541 elem = next;
8541 8542 }
8542 8543 }
8543 8544
8544 8545
8545 8546 static void
8546 8547 fcp_update_offline_flags(struct fcp_lun *plun)
8547 8548 {
8548 8549 struct fcp_port *pptr = LUN_PORT;
8549 8550 ASSERT(plun != NULL);
8550 8551
8551 8552 mutex_enter(&LUN_TGT->tgt_mutex);
8552 8553 plun->lun_state |= FCP_LUN_OFFLINE;
8553 8554 plun->lun_state &= ~(FCP_LUN_INIT | FCP_LUN_BUSY | FCP_LUN_MARK);
8554 8555
8555 8556 mutex_enter(&plun->lun_mutex);
8556 8557 if (plun->lun_cip && plun->lun_state & FCP_SCSI_LUN_TGT_INIT) {
8557 8558 dev_info_t *cdip = NULL;
8558 8559
8559 8560 mutex_exit(&LUN_TGT->tgt_mutex);
8560 8561
8561 8562 if (plun->lun_mpxio == 0) {
8562 8563 cdip = DIP(plun->lun_cip);
8563 8564 } else if (plun->lun_cip) {
8564 8565 cdip = mdi_pi_get_client(PIP(plun->lun_cip));
8565 8566 }
8566 8567
8567 8568 mutex_exit(&plun->lun_mutex);
8568 8569 if (cdip) {
8569 8570 (void) ndi_event_retrieve_cookie(
8570 8571 pptr->port_ndi_event_hdl, cdip, FCAL_REMOVE_EVENT,
8571 8572 &fcp_remove_eid, NDI_EVENT_NOPASS);
8572 8573 (void) ndi_event_run_callbacks(
8573 8574 pptr->port_ndi_event_hdl, cdip,
8574 8575 fcp_remove_eid, NULL);
8575 8576 }
8576 8577 } else {
8577 8578 mutex_exit(&plun->lun_mutex);
8578 8579 mutex_exit(&LUN_TGT->tgt_mutex);
8579 8580 }
8580 8581 }
8581 8582
8582 8583
8583 8584 /*
8584 8585 * Scan all of the command pkts for this port, moving pkts that
8585 8586 * match our LUN onto our own list (headed by "head")
8586 8587 */
8587 8588 static struct fcp_pkt *
8588 8589 fcp_scan_commands(struct fcp_lun *plun)
8589 8590 {
8590 8591 struct fcp_port *pptr = LUN_PORT;
8591 8592
8592 8593 struct fcp_pkt *cmd = NULL; /* pkt cmd ptr */
8593 8594 struct fcp_pkt *ncmd = NULL; /* next pkt ptr */
8594 8595 struct fcp_pkt *pcmd = NULL; /* the previous command */
8595 8596
8596 8597 struct fcp_pkt *head = NULL; /* head of our list */
8597 8598 struct fcp_pkt *tail = NULL; /* tail of our list */
8598 8599
8599 8600 int cmds_found = 0;
8600 8601
8601 8602 mutex_enter(&pptr->port_pkt_mutex);
8602 8603 for (cmd = pptr->port_pkt_head; cmd != NULL; cmd = ncmd) {
8603 8604 struct fcp_lun *tlun =
8604 8605 ADDR2LUN(&cmd->cmd_pkt->pkt_address);
8605 8606
8606 8607 ncmd = cmd->cmd_next; /* set next command */
8607 8608
8608 8609 /*
8609 8610 * if this pkt is for a different LUN or the
8610 8611 * command is sent down, skip it.
8611 8612 */
8612 8613 if (tlun != plun || cmd->cmd_state == FCP_PKT_ISSUED ||
8613 8614 (cmd->cmd_pkt->pkt_flags & FLAG_NOINTR)) {
8614 8615 pcmd = cmd;
8615 8616 continue;
8616 8617 }
8617 8618 cmds_found++;
8618 8619 if (pcmd != NULL) {
8619 8620 ASSERT(pptr->port_pkt_head != cmd);
8620 8621 pcmd->cmd_next = cmd->cmd_next;
8621 8622 } else {
8622 8623 ASSERT(cmd == pptr->port_pkt_head);
8623 8624 pptr->port_pkt_head = cmd->cmd_next;
8624 8625 }
8625 8626
8626 8627 if (cmd == pptr->port_pkt_tail) {
8627 8628 pptr->port_pkt_tail = pcmd;
8628 8629 if (pcmd) {
8629 8630 pcmd->cmd_next = NULL;
8630 8631 }
8631 8632 }
8632 8633
8633 8634 if (head == NULL) {
8634 8635 head = tail = cmd;
8635 8636 } else {
8636 8637 ASSERT(tail != NULL);
8637 8638
8638 8639 tail->cmd_next = cmd;
8639 8640 tail = cmd;
8640 8641 }
8641 8642 cmd->cmd_next = NULL;
8642 8643 }
8643 8644 mutex_exit(&pptr->port_pkt_mutex);
8644 8645
8645 8646 FCP_DTRACE(fcp_logq, pptr->port_instbuf,
8646 8647 fcp_trace, FCP_BUF_LEVEL_8, 0,
8647 8648 "scan commands: %d cmd(s) found", cmds_found);
8648 8649
8649 8650 return (head);
8650 8651 }
8651 8652
8652 8653
8653 8654 /*
8654 8655 * Abort all the commands in the command queue
8655 8656 */
8656 8657 static void
8657 8658 fcp_abort_commands(struct fcp_pkt *head, struct fcp_port *pptr)
8658 8659 {
8659 8660 struct fcp_pkt *cmd = NULL; /* pkt cmd ptr */
8660 8661 struct fcp_pkt *ncmd = NULL; /* next pkt ptr */
8661 8662
8662 8663 ASSERT(mutex_owned(&pptr->port_mutex));
8663 8664
8664 8665 /* scan through the pkts and invalid them */
8665 8666 for (cmd = head; cmd != NULL; cmd = ncmd) {
8666 8667 struct scsi_pkt *pkt = cmd->cmd_pkt;
8667 8668
8668 8669 ncmd = cmd->cmd_next;
8669 8670 ASSERT(pkt != NULL);
8670 8671
8671 8672 /*
8672 8673 * The lun is going to be marked offline. Indicate
8673 8674 * the target driver not to requeue or retry this command
8674 8675 * as the device is going to be offlined pretty soon.
8675 8676 */
8676 8677 pkt->pkt_reason = CMD_DEV_GONE;
8677 8678 pkt->pkt_statistics = 0;
8678 8679 pkt->pkt_state = 0;
8679 8680
8680 8681 /* reset cmd flags/state */
8681 8682 cmd->cmd_flags &= ~CFLAG_IN_QUEUE;
8682 8683 cmd->cmd_state = FCP_PKT_IDLE;
8683 8684
8684 8685 /*
8685 8686 * ensure we have a packet completion routine,
8686 8687 * then call it.
8687 8688 */
8688 8689 ASSERT(pkt->pkt_comp != NULL);
8689 8690
8690 8691 mutex_exit(&pptr->port_mutex);
8691 8692 fcp_post_callback(cmd);
8692 8693 mutex_enter(&pptr->port_mutex);
8693 8694 }
8694 8695 }
8695 8696
8696 8697
8697 8698 /*
8698 8699 * the pkt_comp callback for command packets
8699 8700 */
8700 8701 static void
8701 8702 fcp_cmd_callback(fc_packet_t *fpkt)
8702 8703 {
8703 8704 struct fcp_pkt *cmd = (struct fcp_pkt *)fpkt->pkt_ulp_private;
8704 8705 struct scsi_pkt *pkt = cmd->cmd_pkt;
8705 8706 struct fcp_port *pptr = ADDR2FCP(&pkt->pkt_address);
8706 8707
8707 8708 ASSERT(cmd->cmd_state != FCP_PKT_IDLE);
8708 8709
8709 8710 if (cmd->cmd_state == FCP_PKT_IDLE) {
8710 8711 cmn_err(CE_PANIC, "Packet already completed %p",
8711 8712 (void *)cmd);
8712 8713 }
8713 8714
8714 8715 /*
8715 8716 * Watch thread should be freeing the packet, ignore the pkt.
8716 8717 */
8717 8718 if (cmd->cmd_state == FCP_PKT_ABORTING) {
8718 8719 fcp_log(CE_CONT, pptr->port_dip,
8719 8720 "!FCP: Pkt completed while aborting\n");
8720 8721 return;
8721 8722 }
8722 8723 cmd->cmd_state = FCP_PKT_IDLE;
8723 8724
8724 8725 fcp_complete_pkt(fpkt);
8725 8726
8726 8727 #ifdef DEBUG
8727 8728 mutex_enter(&pptr->port_pkt_mutex);
8728 8729 pptr->port_npkts--;
8729 8730 mutex_exit(&pptr->port_pkt_mutex);
8730 8731 #endif /* DEBUG */
8731 8732
8732 8733 fcp_post_callback(cmd);
8733 8734 }
8734 8735
8735 8736
8736 8737 static void
8737 8738 fcp_complete_pkt(fc_packet_t *fpkt)
8738 8739 {
8739 8740 int error = 0;
8740 8741 struct fcp_pkt *cmd = (struct fcp_pkt *)
8741 8742 fpkt->pkt_ulp_private;
8742 8743 struct scsi_pkt *pkt = cmd->cmd_pkt;
8743 8744 struct fcp_port *pptr = ADDR2FCP(&pkt->pkt_address);
8744 8745 struct fcp_lun *plun;
8745 8746 struct fcp_tgt *ptgt;
8746 8747 struct fcp_rsp *rsp;
8747 8748 struct scsi_address save;
8748 8749
8749 8750 #ifdef DEBUG
8750 8751 save = pkt->pkt_address;
8751 8752 #endif /* DEBUG */
8752 8753
8753 8754 rsp = (struct fcp_rsp *)cmd->cmd_fcp_rsp;
8754 8755
8755 8756 if (fpkt->pkt_state == FC_PKT_SUCCESS) {
8756 8757 if (pptr->port_fcp_dma != FC_NO_DVMA_SPACE) {
8757 8758 FCP_CP_IN(fpkt->pkt_resp, rsp, fpkt->pkt_resp_acc,
8758 8759 sizeof (struct fcp_rsp));
8759 8760 }
8760 8761
8761 8762 pkt->pkt_state = STATE_GOT_BUS | STATE_GOT_TARGET |
8762 8763 STATE_SENT_CMD | STATE_GOT_STATUS;
8763 8764
8764 8765 pkt->pkt_resid = 0;
8765 8766
8766 8767 if (fpkt->pkt_datalen) {
8767 8768 pkt->pkt_state |= STATE_XFERRED_DATA;
8768 8769 if (fpkt->pkt_data_resid) {
8769 8770 error++;
8770 8771 }
8771 8772 }
8772 8773
8773 8774 if ((pkt->pkt_scbp != NULL) && ((*(pkt->pkt_scbp) =
8774 8775 rsp->fcp_u.fcp_status.scsi_status) != STATUS_GOOD)) {
8775 8776 /*
8776 8777 * The next two checks make sure that if there
8777 8778 * is no sense data or a valid response and
8778 8779 * the command came back with check condition,
8779 8780 * the command should be retried.
8780 8781 */
8781 8782 if (!rsp->fcp_u.fcp_status.rsp_len_set &&
8782 8783 !rsp->fcp_u.fcp_status.sense_len_set) {
8783 8784 pkt->pkt_state &= ~STATE_XFERRED_DATA;
8784 8785 pkt->pkt_resid = cmd->cmd_dmacount;
8785 8786 }
8786 8787 }
8787 8788
8788 8789 if ((error | rsp->fcp_u.i_fcp_status | rsp->fcp_resid) == 0) {
8789 8790 return;
8790 8791 }
8791 8792
8792 8793 plun = ADDR2LUN(&pkt->pkt_address);
8793 8794 ptgt = plun->lun_tgt;
8794 8795 ASSERT(ptgt != NULL);
8795 8796
8796 8797 /*
8797 8798 * Update the transfer resid, if appropriate
8798 8799 */
8799 8800 if (rsp->fcp_u.fcp_status.resid_over ||
8800 8801 rsp->fcp_u.fcp_status.resid_under) {
8801 8802 pkt->pkt_resid = rsp->fcp_resid;
8802 8803 }
8803 8804
8804 8805 /*
8805 8806 * First see if we got a FCP protocol error.
8806 8807 */
8807 8808 if (rsp->fcp_u.fcp_status.rsp_len_set) {
8808 8809 struct fcp_rsp_info *bep;
8809 8810 bep = (struct fcp_rsp_info *)(cmd->cmd_fcp_rsp +
8810 8811 sizeof (struct fcp_rsp));
8811 8812
8812 8813 if (fcp_validate_fcp_response(rsp, pptr) !=
8813 8814 FC_SUCCESS) {
8814 8815 pkt->pkt_reason = CMD_CMPLT;
8815 8816 *(pkt->pkt_scbp) = STATUS_CHECK;
8816 8817
8817 8818 fcp_log(CE_WARN, pptr->port_dip,
8818 8819 "!SCSI command to d_id=0x%x lun=0x%x"
8819 8820 " failed, Bad FCP response values:"
8820 8821 " rsvd1=%x, rsvd2=%x, sts-rsvd1=%x,"
8821 8822 " sts-rsvd2=%x, rsplen=%x, senselen=%x",
8822 8823 ptgt->tgt_d_id, plun->lun_num,
8823 8824 rsp->reserved_0, rsp->reserved_1,
8824 8825 rsp->fcp_u.fcp_status.reserved_0,
8825 8826 rsp->fcp_u.fcp_status.reserved_1,
8826 8827 rsp->fcp_response_len, rsp->fcp_sense_len);
8827 8828
8828 8829 return;
8829 8830 }
8830 8831
8831 8832 if (pptr->port_fcp_dma != FC_NO_DVMA_SPACE) {
8832 8833 FCP_CP_IN(fpkt->pkt_resp +
8833 8834 sizeof (struct fcp_rsp), bep,
8834 8835 fpkt->pkt_resp_acc,
8835 8836 sizeof (struct fcp_rsp_info));
8836 8837 }
8837 8838
8838 8839 if (bep->rsp_code != FCP_NO_FAILURE) {
8839 8840 child_info_t *cip;
8840 8841
8841 8842 pkt->pkt_reason = CMD_TRAN_ERR;
8842 8843
8843 8844 mutex_enter(&plun->lun_mutex);
8844 8845 cip = plun->lun_cip;
8845 8846 mutex_exit(&plun->lun_mutex);
8846 8847
8847 8848 FCP_TRACE(fcp_logq, pptr->port_instbuf,
8848 8849 fcp_trace, FCP_BUF_LEVEL_2, 0,
8849 8850 "FCP response error on cmd=%p"
8850 8851 " target=0x%x, cip=%p", cmd,
8851 8852 ptgt->tgt_d_id, cip);
8852 8853 }
8853 8854 }
8854 8855
8855 8856 /*
8856 8857 * See if we got a SCSI error with sense data
8857 8858 */
8858 8859 if (rsp->fcp_u.fcp_status.sense_len_set) {
8859 8860 uchar_t rqlen;
8860 8861 caddr_t sense_from;
8861 8862 child_info_t *cip;
8862 8863 timeout_id_t tid;
8863 8864 struct scsi_arq_status *arq;
8864 8865 struct scsi_extended_sense *sense_to;
8865 8866
8866 8867 arq = (struct scsi_arq_status *)pkt->pkt_scbp;
8867 8868 sense_to = &arq->sts_sensedata;
8868 8869
8869 8870 rqlen = (uchar_t)min(rsp->fcp_sense_len,
8870 8871 sizeof (struct scsi_extended_sense));
8871 8872
8872 8873 sense_from = (caddr_t)fpkt->pkt_resp +
8873 8874 sizeof (struct fcp_rsp) + rsp->fcp_response_len;
8874 8875
8875 8876 if (fcp_validate_fcp_response(rsp, pptr) !=
8876 8877 FC_SUCCESS) {
8877 8878 pkt->pkt_reason = CMD_CMPLT;
8878 8879 *(pkt->pkt_scbp) = STATUS_CHECK;
8879 8880
8880 8881 fcp_log(CE_WARN, pptr->port_dip,
8881 8882 "!SCSI command to d_id=0x%x lun=0x%x"
8882 8883 " failed, Bad FCP response values:"
8883 8884 " rsvd1=%x, rsvd2=%x, sts-rsvd1=%x,"
8884 8885 " sts-rsvd2=%x, rsplen=%x, senselen=%x",
8885 8886 ptgt->tgt_d_id, plun->lun_num,
8886 8887 rsp->reserved_0, rsp->reserved_1,
8887 8888 rsp->fcp_u.fcp_status.reserved_0,
8888 8889 rsp->fcp_u.fcp_status.reserved_1,
8889 8890 rsp->fcp_response_len, rsp->fcp_sense_len);
8890 8891
8891 8892 return;
8892 8893 }
8893 8894
8894 8895 /*
8895 8896 * copy in sense information
8896 8897 */
8897 8898 if (pptr->port_fcp_dma != FC_NO_DVMA_SPACE) {
8898 8899 FCP_CP_IN(sense_from, sense_to,
8899 8900 fpkt->pkt_resp_acc, rqlen);
8900 8901 } else {
8901 8902 bcopy(sense_from, sense_to, rqlen);
8902 8903 }
8903 8904
8904 8905 if ((FCP_SENSE_REPORTLUN_CHANGED(sense_to)) ||
8905 8906 (FCP_SENSE_NO_LUN(sense_to))) {
8906 8907 mutex_enter(&ptgt->tgt_mutex);
8907 8908 if (ptgt->tgt_tid == NULL) {
8908 8909 /*
8909 8910 * Kick off rediscovery
8910 8911 */
8911 8912 tid = timeout(fcp_reconfigure_luns,
8912 8913 (caddr_t)ptgt, drv_usectohz(1));
8913 8914
8914 8915 ptgt->tgt_tid = tid;
8915 8916 ptgt->tgt_state |= FCP_TGT_BUSY;
8916 8917 }
8917 8918 mutex_exit(&ptgt->tgt_mutex);
8918 8919 if (FCP_SENSE_REPORTLUN_CHANGED(sense_to)) {
8919 8920 FCP_TRACE(fcp_logq, pptr->port_instbuf,
8920 8921 fcp_trace, FCP_BUF_LEVEL_3, 0,
8921 8922 "!FCP: Report Lun Has Changed"
8922 8923 " target=%x", ptgt->tgt_d_id);
8923 8924 } else if (FCP_SENSE_NO_LUN(sense_to)) {
8924 8925 FCP_TRACE(fcp_logq, pptr->port_instbuf,
8925 8926 fcp_trace, FCP_BUF_LEVEL_3, 0,
8926 8927 "!FCP: LU Not Supported"
8927 8928 " target=%x", ptgt->tgt_d_id);
8928 8929 }
8929 8930 }
8930 8931 ASSERT(pkt->pkt_scbp != NULL);
8931 8932
8932 8933 pkt->pkt_state |= STATE_ARQ_DONE;
8933 8934
8934 8935 arq->sts_rqpkt_resid = SENSE_LENGTH - rqlen;
8935 8936
8936 8937 *((uchar_t *)&arq->sts_rqpkt_status) = STATUS_GOOD;
8937 8938 arq->sts_rqpkt_reason = 0;
8938 8939 arq->sts_rqpkt_statistics = 0;
8939 8940
8940 8941 arq->sts_rqpkt_state = STATE_GOT_BUS |
8941 8942 STATE_GOT_TARGET | STATE_SENT_CMD |
8942 8943 STATE_GOT_STATUS | STATE_ARQ_DONE |
8943 8944 STATE_XFERRED_DATA;
8944 8945
8945 8946 mutex_enter(&plun->lun_mutex);
8946 8947 cip = plun->lun_cip;
8947 8948 mutex_exit(&plun->lun_mutex);
8948 8949
8949 8950 FCP_DTRACE(fcp_logq, pptr->port_instbuf,
8950 8951 fcp_trace, FCP_BUF_LEVEL_8, 0,
8951 8952 "SCSI Check condition on cmd=%p target=0x%x"
8952 8953 " LUN=%p, cmd=%x SCSI status=%x, es key=%x"
8953 8954 " ASC=%x ASCQ=%x", cmd, ptgt->tgt_d_id, cip,
8954 8955 cmd->cmd_fcp_cmd.fcp_cdb[0],
8955 8956 rsp->fcp_u.fcp_status.scsi_status,
8956 8957 sense_to->es_key, sense_to->es_add_code,
8957 8958 sense_to->es_qual_code);
8958 8959 }
8959 8960 } else {
8960 8961 plun = ADDR2LUN(&pkt->pkt_address);
8961 8962 ptgt = plun->lun_tgt;
8962 8963 ASSERT(ptgt != NULL);
8963 8964
8964 8965 /*
8965 8966 * Work harder to translate errors into target driver
8966 8967 * understandable ones. Note with despair that the target
8967 8968 * drivers don't decode pkt_state and pkt_reason exhaustively
8968 8969 * They resort to using the big hammer most often, which
8969 8970 * may not get fixed in the life time of this driver.
8970 8971 */
8971 8972 pkt->pkt_state = 0;
8972 8973 pkt->pkt_statistics = 0;
8973 8974
8974 8975 switch (fpkt->pkt_state) {
8975 8976 case FC_PKT_TRAN_ERROR:
8976 8977 switch (fpkt->pkt_reason) {
8977 8978 case FC_REASON_OVERRUN:
8978 8979 pkt->pkt_reason = CMD_CMD_OVR;
8979 8980 pkt->pkt_statistics |= STAT_ABORTED;
8980 8981 break;
8981 8982
8982 8983 case FC_REASON_XCHG_BSY: {
8983 8984 caddr_t ptr;
8984 8985
8985 8986 pkt->pkt_reason = CMD_CMPLT; /* Lie */
8986 8987
8987 8988 ptr = (caddr_t)pkt->pkt_scbp;
8988 8989 if (ptr) {
8989 8990 *ptr = STATUS_BUSY;
8990 8991 }
8991 8992 break;
8992 8993 }
8993 8994
8994 8995 case FC_REASON_ABORTED:
8995 8996 pkt->pkt_reason = CMD_TRAN_ERR;
8996 8997 pkt->pkt_statistics |= STAT_ABORTED;
8997 8998 break;
8998 8999
8999 9000 case FC_REASON_ABORT_FAILED:
9000 9001 pkt->pkt_reason = CMD_ABORT_FAIL;
9001 9002 break;
9002 9003
9003 9004 case FC_REASON_NO_SEQ_INIT:
9004 9005 case FC_REASON_CRC_ERROR:
9005 9006 pkt->pkt_reason = CMD_TRAN_ERR;
9006 9007 pkt->pkt_statistics |= STAT_ABORTED;
9007 9008 break;
9008 9009 default:
9009 9010 pkt->pkt_reason = CMD_TRAN_ERR;
9010 9011 break;
9011 9012 }
9012 9013 break;
9013 9014
9014 9015 case FC_PKT_PORT_OFFLINE: {
9015 9016 dev_info_t *cdip = NULL;
9016 9017 caddr_t ptr;
9017 9018
9018 9019 if (fpkt->pkt_reason == FC_REASON_LOGIN_REQUIRED) {
9019 9020 FCP_DTRACE(fcp_logq, pptr->port_instbuf,
9020 9021 fcp_trace, FCP_BUF_LEVEL_8, 0,
9021 9022 "SCSI cmd; LOGIN REQUIRED from FCA for %x",
9022 9023 ptgt->tgt_d_id);
9023 9024 }
9024 9025
9025 9026 mutex_enter(&plun->lun_mutex);
9026 9027 if (plun->lun_mpxio == 0) {
9027 9028 cdip = DIP(plun->lun_cip);
9028 9029 } else if (plun->lun_cip) {
9029 9030 cdip = mdi_pi_get_client(PIP(plun->lun_cip));
9030 9031 }
9031 9032
9032 9033 mutex_exit(&plun->lun_mutex);
9033 9034
9034 9035 if (cdip) {
9035 9036 (void) ndi_event_retrieve_cookie(
9036 9037 pptr->port_ndi_event_hdl, cdip,
9037 9038 FCAL_REMOVE_EVENT, &fcp_remove_eid,
9038 9039 NDI_EVENT_NOPASS);
9039 9040 (void) ndi_event_run_callbacks(
9040 9041 pptr->port_ndi_event_hdl, cdip,
9041 9042 fcp_remove_eid, NULL);
9042 9043 }
9043 9044
9044 9045 /*
9045 9046 * If the link goes off-line for a lip,
9046 9047 * this will cause a error to the ST SG
9047 9048 * SGEN drivers. By setting BUSY we will
9048 9049 * give the drivers the chance to retry
9049 9050 * before it blows of the job. ST will
9050 9051 * remember how many times it has retried.
9051 9052 */
9052 9053
9053 9054 if ((plun->lun_type == DTYPE_SEQUENTIAL) ||
9054 9055 (plun->lun_type == DTYPE_CHANGER)) {
9055 9056 pkt->pkt_reason = CMD_CMPLT; /* Lie */
9056 9057 ptr = (caddr_t)pkt->pkt_scbp;
9057 9058 if (ptr) {
9058 9059 *ptr = STATUS_BUSY;
9059 9060 }
9060 9061 } else {
9061 9062 pkt->pkt_reason = CMD_TRAN_ERR;
9062 9063 pkt->pkt_statistics |= STAT_BUS_RESET;
9063 9064 }
9064 9065 break;
9065 9066 }
9066 9067
9067 9068 case FC_PKT_TRAN_BSY:
9068 9069 /*
9069 9070 * Use the ssd Qfull handling here.
9070 9071 */
9071 9072 *pkt->pkt_scbp = STATUS_INTERMEDIATE;
9072 9073 pkt->pkt_state = STATE_GOT_BUS;
9073 9074 break;
9074 9075
9075 9076 case FC_PKT_TIMEOUT:
9076 9077 pkt->pkt_reason = CMD_TIMEOUT;
9077 9078 if (fpkt->pkt_reason == FC_REASON_ABORT_FAILED) {
9078 9079 pkt->pkt_statistics |= STAT_TIMEOUT;
9079 9080 } else {
9080 9081 pkt->pkt_statistics |= STAT_ABORTED;
9081 9082 }
9082 9083 break;
9083 9084
9084 9085 case FC_PKT_LOCAL_RJT:
9085 9086 switch (fpkt->pkt_reason) {
9086 9087 case FC_REASON_OFFLINE: {
9087 9088 dev_info_t *cdip = NULL;
9088 9089
9089 9090 mutex_enter(&plun->lun_mutex);
9090 9091 if (plun->lun_mpxio == 0) {
9091 9092 cdip = DIP(plun->lun_cip);
9092 9093 } else if (plun->lun_cip) {
9093 9094 cdip = mdi_pi_get_client(
9094 9095 PIP(plun->lun_cip));
9095 9096 }
9096 9097 mutex_exit(&plun->lun_mutex);
9097 9098
9098 9099 if (cdip) {
9099 9100 (void) ndi_event_retrieve_cookie(
9100 9101 pptr->port_ndi_event_hdl, cdip,
9101 9102 FCAL_REMOVE_EVENT,
9102 9103 &fcp_remove_eid,
9103 9104 NDI_EVENT_NOPASS);
9104 9105 (void) ndi_event_run_callbacks(
9105 9106 pptr->port_ndi_event_hdl,
9106 9107 cdip, fcp_remove_eid, NULL);
9107 9108 }
9108 9109
9109 9110 pkt->pkt_reason = CMD_TRAN_ERR;
9110 9111 pkt->pkt_statistics |= STAT_BUS_RESET;
9111 9112
9112 9113 break;
9113 9114 }
9114 9115
9115 9116 case FC_REASON_NOMEM:
9116 9117 case FC_REASON_QFULL: {
9117 9118 caddr_t ptr;
9118 9119
9119 9120 pkt->pkt_reason = CMD_CMPLT; /* Lie */
9120 9121 ptr = (caddr_t)pkt->pkt_scbp;
9121 9122 if (ptr) {
9122 9123 *ptr = STATUS_BUSY;
9123 9124 }
9124 9125 break;
9125 9126 }
9126 9127
9127 9128 case FC_REASON_DMA_ERROR:
9128 9129 pkt->pkt_reason = CMD_DMA_DERR;
9129 9130 pkt->pkt_statistics |= STAT_ABORTED;
9130 9131 break;
9131 9132
9132 9133 case FC_REASON_CRC_ERROR:
9133 9134 case FC_REASON_UNDERRUN: {
9134 9135 uchar_t status;
9135 9136 /*
9136 9137 * Work around for Bugid: 4240945.
9137 9138 * IB on A5k doesn't set the Underrun bit
9138 9139 * in the fcp status, when it is transferring
9139 9140 * less than requested amount of data. Work
9140 9141 * around the ses problem to keep luxadm
9141 9142 * happy till ibfirmware is fixed.
9142 9143 */
9143 9144 if (pptr->port_fcp_dma != FC_NO_DVMA_SPACE) {
9144 9145 FCP_CP_IN(fpkt->pkt_resp, rsp,
9145 9146 fpkt->pkt_resp_acc,
9146 9147 sizeof (struct fcp_rsp));
9147 9148 }
9148 9149 status = rsp->fcp_u.fcp_status.scsi_status;
9149 9150 if (((plun->lun_type & DTYPE_MASK) ==
9150 9151 DTYPE_ESI) && (status == STATUS_GOOD)) {
9151 9152 pkt->pkt_reason = CMD_CMPLT;
9152 9153 *pkt->pkt_scbp = status;
9153 9154 pkt->pkt_resid = 0;
9154 9155 } else {
9155 9156 pkt->pkt_reason = CMD_TRAN_ERR;
9156 9157 pkt->pkt_statistics |= STAT_ABORTED;
9157 9158 }
9158 9159 break;
9159 9160 }
9160 9161
9161 9162 case FC_REASON_NO_CONNECTION:
9162 9163 case FC_REASON_UNSUPPORTED:
9163 9164 case FC_REASON_ILLEGAL_REQ:
9164 9165 case FC_REASON_BAD_SID:
9165 9166 case FC_REASON_DIAG_BUSY:
9166 9167 case FC_REASON_FCAL_OPN_FAIL:
9167 9168 case FC_REASON_BAD_XID:
9168 9169 default:
9169 9170 pkt->pkt_reason = CMD_TRAN_ERR;
9170 9171 pkt->pkt_statistics |= STAT_ABORTED;
9171 9172 break;
9172 9173
9173 9174 }
9174 9175 break;
9175 9176
9176 9177 case FC_PKT_NPORT_RJT:
9177 9178 case FC_PKT_FABRIC_RJT:
9178 9179 case FC_PKT_NPORT_BSY:
9179 9180 case FC_PKT_FABRIC_BSY:
9180 9181 default:
9181 9182 FCP_DTRACE(fcp_logq, pptr->port_instbuf,
9182 9183 fcp_trace, FCP_BUF_LEVEL_8, 0,
9183 9184 "FC Status 0x%x, reason 0x%x",
9184 9185 fpkt->pkt_state, fpkt->pkt_reason);
9185 9186 pkt->pkt_reason = CMD_TRAN_ERR;
9186 9187 pkt->pkt_statistics |= STAT_ABORTED;
9187 9188 break;
9188 9189 }
9189 9190
9190 9191 FCP_DTRACE(fcp_logq, pptr->port_instbuf,
9191 9192 fcp_trace, FCP_BUF_LEVEL_9, 0,
9192 9193 "!FC error on cmd=%p target=0x%x: pkt state=0x%x "
9193 9194 " pkt reason=0x%x", cmd, ptgt->tgt_d_id, fpkt->pkt_state,
9194 9195 fpkt->pkt_reason);
9195 9196 }
9196 9197
9197 9198 ASSERT(save.a_hba_tran == pkt->pkt_address.a_hba_tran);
9198 9199 }
9199 9200
9200 9201
9201 9202 static int
9202 9203 fcp_validate_fcp_response(struct fcp_rsp *rsp, struct fcp_port *pptr)
9203 9204 {
9204 9205 if (rsp->reserved_0 || rsp->reserved_1 ||
9205 9206 rsp->fcp_u.fcp_status.reserved_0 ||
9206 9207 rsp->fcp_u.fcp_status.reserved_1) {
9207 9208 /*
9208 9209 * These reserved fields should ideally be zero. FCP-2 does say
9209 9210 * that the recipient need not check for reserved fields to be
9210 9211 * zero. If they are not zero, we will not make a fuss about it
9211 9212 * - just log it (in debug to both trace buffer and messages
9212 9213 * file and to trace buffer only in non-debug) and move on.
9213 9214 *
9214 9215 * Non-zero reserved fields were seen with minnows.
9215 9216 *
9216 9217 * qlc takes care of some of this but we cannot assume that all
9217 9218 * FCAs will do so.
9218 9219 */
9219 9220 FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
9220 9221 FCP_BUF_LEVEL_5, 0,
9221 9222 "Got fcp response packet with non-zero reserved fields "
9222 9223 "rsp->reserved_0:0x%x, rsp_reserved_1:0x%x, "
9223 9224 "status.reserved_0:0x%x, status.reserved_1:0x%x",
9224 9225 rsp->reserved_0, rsp->reserved_1,
9225 9226 rsp->fcp_u.fcp_status.reserved_0,
9226 9227 rsp->fcp_u.fcp_status.reserved_1);
9227 9228 }
9228 9229
9229 9230 if (rsp->fcp_u.fcp_status.rsp_len_set && (rsp->fcp_response_len >
9230 9231 (FCP_MAX_RSP_IU_SIZE - sizeof (struct fcp_rsp)))) {
9231 9232 return (FC_FAILURE);
9232 9233 }
9233 9234
9234 9235 if (rsp->fcp_u.fcp_status.sense_len_set && rsp->fcp_sense_len >
9235 9236 (FCP_MAX_RSP_IU_SIZE - rsp->fcp_response_len -
9236 9237 sizeof (struct fcp_rsp))) {
9237 9238 return (FC_FAILURE);
9238 9239 }
9239 9240
9240 9241 return (FC_SUCCESS);
9241 9242 }
9242 9243
9243 9244
9244 9245 /*
9245 9246 * This is called when there is a change the in device state. The case we're
9246 9247 * handling here is, if the d_id s does not match, offline this tgt and online
9247 9248 * a new tgt with the new d_id. called from fcp_handle_devices with
9248 9249 * port_mutex held.
9249 9250 */
9250 9251 static int
9251 9252 fcp_device_changed(struct fcp_port *pptr, struct fcp_tgt *ptgt,
9252 9253 fc_portmap_t *map_entry, int link_cnt, int tgt_cnt, int cause)
9253 9254 {
9254 9255 ASSERT(mutex_owned(&pptr->port_mutex));
9255 9256
9256 9257 FCP_TRACE(fcp_logq, pptr->port_instbuf,
9257 9258 fcp_trace, FCP_BUF_LEVEL_3, 0,
9258 9259 "Starting fcp_device_changed...");
9259 9260
9260 9261 /*
9261 9262 * The two cases where the port_device_changed is called is
9262 9263 * either it changes it's d_id or it's hard address.
9263 9264 */
9264 9265 if ((ptgt->tgt_d_id != map_entry->map_did.port_id) ||
9265 9266 (FC_TOP_EXTERNAL(pptr->port_topology) &&
9266 9267 (ptgt->tgt_hard_addr != map_entry->map_hard_addr.hard_addr))) {
9267 9268
9268 9269 /* offline this target */
9269 9270 mutex_enter(&ptgt->tgt_mutex);
9270 9271 if (!(ptgt->tgt_state & FCP_TGT_OFFLINE)) {
9271 9272 (void) fcp_offline_target(pptr, ptgt, link_cnt,
9272 9273 0, 1, NDI_DEVI_REMOVE);
9273 9274 }
9274 9275 mutex_exit(&ptgt->tgt_mutex);
9275 9276
9276 9277 fcp_log(CE_NOTE, pptr->port_dip,
9277 9278 "Change in target properties: Old D_ID=%x New D_ID=%x"
9278 9279 " Old HA=%x New HA=%x", ptgt->tgt_d_id,
9279 9280 map_entry->map_did.port_id, ptgt->tgt_hard_addr,
9280 9281 map_entry->map_hard_addr.hard_addr);
9281 9282 }
9282 9283
9283 9284 return (fcp_handle_mapflags(pptr, ptgt, map_entry,
9284 9285 link_cnt, tgt_cnt, cause));
9285 9286 }
9286 9287
9287 9288 /*
9288 9289 * Function: fcp_alloc_lun
9289 9290 *
9290 9291 * Description: Creates a new lun structure and adds it to the list
9291 9292 * of luns of the target.
9292 9293 *
9293 9294 * Argument: ptgt Target the lun will belong to.
9294 9295 *
9295 9296 * Return Value: NULL Failed
9296 9297 * Not NULL Succeeded
9297 9298 *
9298 9299 * Context: Kernel context
9299 9300 */
9300 9301 static struct fcp_lun *
9301 9302 fcp_alloc_lun(struct fcp_tgt *ptgt)
9302 9303 {
9303 9304 struct fcp_lun *plun;
9304 9305
9305 9306 plun = kmem_zalloc(sizeof (struct fcp_lun), KM_NOSLEEP);
9306 9307 if (plun != NULL) {
9307 9308 /*
9308 9309 * Initialize the mutex before putting in the target list
9309 9310 * especially before releasing the target mutex.
9310 9311 */
9311 9312 mutex_init(&plun->lun_mutex, NULL, MUTEX_DRIVER, NULL);
9312 9313 plun->lun_tgt = ptgt;
9313 9314
9314 9315 mutex_enter(&ptgt->tgt_mutex);
9315 9316 plun->lun_next = ptgt->tgt_lun;
9316 9317 ptgt->tgt_lun = plun;
9317 9318 plun->lun_old_guid = NULL;
9318 9319 plun->lun_old_guid_size = 0;
9319 9320 mutex_exit(&ptgt->tgt_mutex);
9320 9321 }
9321 9322
9322 9323 return (plun);
9323 9324 }
9324 9325
9325 9326 /*
9326 9327 * Function: fcp_dealloc_lun
9327 9328 *
9328 9329 * Description: Frees the LUN structure passed by the caller.
9329 9330 *
9330 9331 * Argument: plun LUN structure to free.
9331 9332 *
9332 9333 * Return Value: None
9333 9334 *
9334 9335 * Context: Kernel context.
9335 9336 */
9336 9337 static void
9337 9338 fcp_dealloc_lun(struct fcp_lun *plun)
9338 9339 {
9339 9340 mutex_enter(&plun->lun_mutex);
9340 9341 if (plun->lun_cip) {
9341 9342 fcp_remove_child(plun);
9342 9343 }
9343 9344 mutex_exit(&plun->lun_mutex);
9344 9345
9345 9346 mutex_destroy(&plun->lun_mutex);
9346 9347 if (plun->lun_guid) {
9347 9348 kmem_free(plun->lun_guid, plun->lun_guid_size);
9348 9349 }
9349 9350 if (plun->lun_old_guid) {
9350 9351 kmem_free(plun->lun_old_guid, plun->lun_old_guid_size);
9351 9352 }
9352 9353 kmem_free(plun, sizeof (*plun));
9353 9354 }
9354 9355
9355 9356 /*
9356 9357 * Function: fcp_alloc_tgt
9357 9358 *
9358 9359 * Description: Creates a new target structure and adds it to the port
9359 9360 * hash list.
9360 9361 *
9361 9362 * Argument: pptr fcp port structure
9362 9363 * *map_entry entry describing the target to create
9363 9364 * link_cnt Link state change counter
9364 9365 *
9365 9366 * Return Value: NULL Failed
9366 9367 * Not NULL Succeeded
9367 9368 *
9368 9369 * Context: Kernel context.
9369 9370 */
9370 9371 static struct fcp_tgt *
9371 9372 fcp_alloc_tgt(struct fcp_port *pptr, fc_portmap_t *map_entry, int link_cnt)
9372 9373 {
9373 9374 int hash;
9374 9375 uchar_t *wwn;
9375 9376 struct fcp_tgt *ptgt;
9376 9377
9377 9378 ptgt = kmem_zalloc(sizeof (*ptgt), KM_NOSLEEP);
9378 9379 if (ptgt != NULL) {
9379 9380 mutex_enter(&pptr->port_mutex);
9380 9381 if (link_cnt != pptr->port_link_cnt) {
9381 9382 /*
9382 9383 * oh oh -- another link reset
9383 9384 * in progress -- give up
9384 9385 */
9385 9386 mutex_exit(&pptr->port_mutex);
9386 9387 kmem_free(ptgt, sizeof (*ptgt));
9387 9388 ptgt = NULL;
9388 9389 } else {
9389 9390 /*
9390 9391 * initialize the mutex before putting in the port
9391 9392 * wwn list, especially before releasing the port
9392 9393 * mutex.
9393 9394 */
9394 9395 mutex_init(&ptgt->tgt_mutex, NULL, MUTEX_DRIVER, NULL);
9395 9396
9396 9397 /* add new target entry to the port's hash list */
9397 9398 wwn = (uchar_t *)&map_entry->map_pwwn;
9398 9399 hash = FCP_HASH(wwn);
9399 9400
9400 9401 ptgt->tgt_next = pptr->port_tgt_hash_table[hash];
9401 9402 pptr->port_tgt_hash_table[hash] = ptgt;
9402 9403
9403 9404 /* save cross-ptr */
9404 9405 ptgt->tgt_port = pptr;
9405 9406
9406 9407 ptgt->tgt_change_cnt = 1;
9407 9408
9408 9409 /* initialize the target manual_config_only flag */
9409 9410 if (fcp_enable_auto_configuration) {
9410 9411 ptgt->tgt_manual_config_only = 0;
9411 9412 } else {
9412 9413 ptgt->tgt_manual_config_only = 1;
9413 9414 }
9414 9415
9415 9416 mutex_exit(&pptr->port_mutex);
9416 9417 }
9417 9418 }
9418 9419
9419 9420 return (ptgt);
9420 9421 }
9421 9422
9422 9423 /*
9423 9424 * Function: fcp_dealloc_tgt
9424 9425 *
9425 9426 * Description: Frees the target structure passed by the caller.
9426 9427 *
9427 9428 * Argument: ptgt Target structure to free.
9428 9429 *
9429 9430 * Return Value: None
9430 9431 *
9431 9432 * Context: Kernel context.
9432 9433 */
9433 9434 static void
9434 9435 fcp_dealloc_tgt(struct fcp_tgt *ptgt)
9435 9436 {
9436 9437 mutex_destroy(&ptgt->tgt_mutex);
9437 9438 kmem_free(ptgt, sizeof (*ptgt));
9438 9439 }
9439 9440
9440 9441
9441 9442 /*
9442 9443 * Handle STATUS_QFULL and STATUS_BUSY by performing delayed retry
9443 9444 *
9444 9445 * Device discovery commands will not be retried for-ever as
9445 9446 * this will have repercussions on other devices that need to
9446 9447 * be submitted to the hotplug thread. After a quick glance
9447 9448 * at the SCSI-3 spec, it was found that the spec doesn't
9448 9449 * mandate a forever retry, rather recommends a delayed retry.
9449 9450 *
9450 9451 * Since Photon IB is single threaded, STATUS_BUSY is common
9451 9452 * in a 4+initiator environment. Make sure the total time
9452 9453 * spent on retries (including command timeout) does not
9453 9454 * 60 seconds
9454 9455 */
9455 9456 static void
9456 9457 fcp_queue_ipkt(struct fcp_port *pptr, fc_packet_t *fpkt)
9457 9458 {
9458 9459 struct fcp_ipkt *icmd = (struct fcp_ipkt *)fpkt->pkt_ulp_private;
9459 9460 struct fcp_tgt *ptgt = icmd->ipkt_tgt;
9460 9461
9461 9462 mutex_enter(&pptr->port_mutex);
9462 9463 mutex_enter(&ptgt->tgt_mutex);
9463 9464 if (FCP_STATE_CHANGED(pptr, ptgt, icmd)) {
9464 9465 FCP_TRACE(fcp_logq, pptr->port_instbuf,
9465 9466 fcp_trace, FCP_BUF_LEVEL_2, 0,
9466 9467 "fcp_queue_ipkt,1:state change occured"
9467 9468 " for D_ID=0x%x", ptgt->tgt_d_id);
9468 9469 mutex_exit(&ptgt->tgt_mutex);
9469 9470 mutex_exit(&pptr->port_mutex);
9470 9471 (void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
9471 9472 icmd->ipkt_change_cnt, icmd->ipkt_cause);
9472 9473 fcp_icmd_free(pptr, icmd);
9473 9474 return;
9474 9475 }
9475 9476 mutex_exit(&ptgt->tgt_mutex);
9476 9477
9477 9478 icmd->ipkt_restart = fcp_watchdog_time + icmd->ipkt_retries++;
9478 9479
9479 9480 if (pptr->port_ipkt_list != NULL) {
9480 9481 /* add pkt to front of doubly-linked list */
9481 9482 pptr->port_ipkt_list->ipkt_prev = icmd;
9482 9483 icmd->ipkt_next = pptr->port_ipkt_list;
9483 9484 pptr->port_ipkt_list = icmd;
9484 9485 icmd->ipkt_prev = NULL;
9485 9486 } else {
9486 9487 /* this is the first/only pkt on the list */
9487 9488 pptr->port_ipkt_list = icmd;
9488 9489 icmd->ipkt_next = NULL;
9489 9490 icmd->ipkt_prev = NULL;
9490 9491 }
9491 9492 mutex_exit(&pptr->port_mutex);
9492 9493 }
9493 9494
9494 9495 /*
9495 9496 * Function: fcp_transport
9496 9497 *
9497 9498 * Description: This function submits the Fibre Channel packet to the transort
9498 9499 * layer by calling fc_ulp_transport(). If fc_ulp_transport()
9499 9500 * fails the submission, the treatment depends on the value of
9500 9501 * the variable internal.
9501 9502 *
9502 9503 * Argument: port_handle fp/fctl port handle.
9503 9504 * *fpkt Packet to submit to the transport layer.
9504 9505 * internal Not zero when it's an internal packet.
9505 9506 *
9506 9507 * Return Value: FC_TRAN_BUSY
9507 9508 * FC_STATEC_BUSY
9508 9509 * FC_OFFLINE
9509 9510 * FC_LOGINREQ
9510 9511 * FC_DEVICE_BUSY
9511 9512 * FC_SUCCESS
9512 9513 */
9513 9514 static int
9514 9515 fcp_transport(opaque_t port_handle, fc_packet_t *fpkt, int internal)
9515 9516 {
9516 9517 int rval;
9517 9518
9518 9519 rval = fc_ulp_transport(port_handle, fpkt);
9519 9520 if (rval == FC_SUCCESS) {
9520 9521 return (rval);
9521 9522 }
9522 9523
9523 9524 /*
9524 9525 * LUN isn't marked BUSY or OFFLINE, so we got here to transport
9525 9526 * a command, if the underlying modules see that there is a state
9526 9527 * change, or if a port is OFFLINE, that means, that state change
9527 9528 * hasn't reached FCP yet, so re-queue the command for deferred
9528 9529 * submission.
9529 9530 */
9530 9531 if ((rval == FC_STATEC_BUSY) || (rval == FC_OFFLINE) ||
9531 9532 (rval == FC_LOGINREQ) || (rval == FC_DEVICE_BUSY) ||
9532 9533 (rval == FC_DEVICE_BUSY_NEW_RSCN) || (rval == FC_TRAN_BUSY)) {
9533 9534 /*
9534 9535 * Defer packet re-submission. Life hang is possible on
9535 9536 * internal commands if the port driver sends FC_STATEC_BUSY
9536 9537 * for ever, but that shouldn't happen in a good environment.
9537 9538 * Limiting re-transport for internal commands is probably a
9538 9539 * good idea..
9539 9540 * A race condition can happen when a port sees barrage of
9540 9541 * link transitions offline to online. If the FCTL has
9541 9542 * returned FC_STATEC_BUSY or FC_OFFLINE then none of the
9542 9543 * internal commands should be queued to do the discovery.
9543 9544 * The race condition is when an online comes and FCP starts
9544 9545 * its internal discovery and the link goes offline. It is
9545 9546 * possible that the statec_callback has not reached FCP
9546 9547 * and FCP is carrying on with its internal discovery.
9547 9548 * FC_STATEC_BUSY or FC_OFFLINE will be the first indication
9548 9549 * that the link has gone offline. At this point FCP should
9549 9550 * drop all the internal commands and wait for the
9550 9551 * statec_callback. It will be facilitated by incrementing
9551 9552 * port_link_cnt.
9552 9553 *
9553 9554 * For external commands, the (FC)pkt_timeout is decremented
9554 9555 * by the QUEUE Delay added by our driver, Care is taken to
9555 9556 * ensure that it doesn't become zero (zero means no timeout)
9556 9557 * If the time expires right inside driver queue itself,
9557 9558 * the watch thread will return it to the original caller
9558 9559 * indicating that the command has timed-out.
9559 9560 */
9560 9561 if (internal) {
9561 9562 char *op;
9562 9563 struct fcp_ipkt *icmd;
9563 9564
9564 9565 icmd = (struct fcp_ipkt *)fpkt->pkt_ulp_private;
9565 9566 switch (icmd->ipkt_opcode) {
9566 9567 case SCMD_REPORT_LUN:
9567 9568 op = "REPORT LUN";
9568 9569 break;
9569 9570
9570 9571 case SCMD_INQUIRY:
9571 9572 op = "INQUIRY";
9572 9573 break;
9573 9574
9574 9575 case SCMD_INQUIRY_PAGE83:
9575 9576 op = "INQUIRY-83";
9576 9577 break;
9577 9578
9578 9579 default:
9579 9580 op = "Internal SCSI COMMAND";
9580 9581 break;
9581 9582 }
9582 9583
9583 9584 if (fcp_handle_ipkt_errors(icmd->ipkt_port,
9584 9585 icmd->ipkt_tgt, icmd, rval, op) == DDI_SUCCESS) {
9585 9586 rval = FC_SUCCESS;
9586 9587 }
9587 9588 } else {
9588 9589 struct fcp_pkt *cmd;
9589 9590 struct fcp_port *pptr;
9590 9591
9591 9592 cmd = (struct fcp_pkt *)fpkt->pkt_ulp_private;
9592 9593 cmd->cmd_state = FCP_PKT_IDLE;
9593 9594 pptr = ADDR2FCP(&cmd->cmd_pkt->pkt_address);
9594 9595
9595 9596 if (cmd->cmd_pkt->pkt_flags & FLAG_NOQUEUE) {
9596 9597 FCP_DTRACE(fcp_logq, pptr->port_instbuf,
9597 9598 fcp_trace, FCP_BUF_LEVEL_9, 0,
9598 9599 "fcp_transport: xport busy for pkt %p",
9599 9600 cmd->cmd_pkt);
9600 9601 rval = FC_TRAN_BUSY;
9601 9602 } else {
9602 9603 fcp_queue_pkt(pptr, cmd);
9603 9604 rval = FC_SUCCESS;
9604 9605 }
9605 9606 }
9606 9607 }
9607 9608
9608 9609 return (rval);
9609 9610 }
9610 9611
9611 9612 /*VARARGS3*/
9612 9613 static void
9613 9614 fcp_log(int level, dev_info_t *dip, const char *fmt, ...)
9614 9615 {
9615 9616 char buf[256];
9616 9617 va_list ap;
9617 9618
9618 9619 if (dip == NULL) {
9619 9620 dip = fcp_global_dip;
9620 9621 }
9621 9622
9622 9623 va_start(ap, fmt);
9623 9624 (void) vsprintf(buf, fmt, ap);
9624 9625 va_end(ap);
9625 9626
9626 9627 scsi_log(dip, "fcp", level, buf);
9627 9628 }
9628 9629
9629 9630 /*
9630 9631 * This function retries NS registry of FC4 type.
9631 9632 * It assumes that fcp_mutex is held.
9632 9633 * The function does nothing if topology is not fabric
9633 9634 * So, the topology has to be set before this function can be called
9634 9635 */
9635 9636 static void
9636 9637 fcp_retry_ns_registry(struct fcp_port *pptr, uint32_t s_id)
9637 9638 {
9638 9639 int rval;
9639 9640
9640 9641 ASSERT(MUTEX_HELD(&pptr->port_mutex));
9641 9642
9642 9643 if (((pptr->port_state & FCP_STATE_NS_REG_FAILED) == 0) ||
9643 9644 ((pptr->port_topology != FC_TOP_FABRIC) &&
9644 9645 (pptr->port_topology != FC_TOP_PUBLIC_LOOP))) {
9645 9646 if (pptr->port_state & FCP_STATE_NS_REG_FAILED) {
9646 9647 pptr->port_state &= ~FCP_STATE_NS_REG_FAILED;
9647 9648 }
9648 9649 return;
9649 9650 }
9650 9651 mutex_exit(&pptr->port_mutex);
9651 9652 rval = fcp_do_ns_registry(pptr, s_id);
9652 9653 mutex_enter(&pptr->port_mutex);
9653 9654
9654 9655 if (rval == 0) {
9655 9656 /* Registry successful. Reset flag */
9656 9657 pptr->port_state &= ~(FCP_STATE_NS_REG_FAILED);
9657 9658 }
9658 9659 }
9659 9660
9660 9661 /*
9661 9662 * This function registers the ULP with the switch by calling transport i/f
9662 9663 */
9663 9664 static int
9664 9665 fcp_do_ns_registry(struct fcp_port *pptr, uint32_t s_id)
9665 9666 {
9666 9667 fc_ns_cmd_t ns_cmd;
9667 9668 ns_rfc_type_t rfc;
9668 9669 uint32_t types[8];
9669 9670
9670 9671 /*
9671 9672 * Prepare the Name server structure to
9672 9673 * register with the transport in case of
9673 9674 * Fabric configuration.
9674 9675 */
9675 9676 bzero(&rfc, sizeof (rfc));
9676 9677 bzero(types, sizeof (types));
9677 9678
9678 9679 types[FC4_TYPE_WORD_POS(FC_TYPE_SCSI_FCP)] =
9679 9680 (1 << FC4_TYPE_BIT_POS(FC_TYPE_SCSI_FCP));
9680 9681
9681 9682 rfc.rfc_port_id.port_id = s_id;
9682 9683 bcopy(types, rfc.rfc_types, sizeof (types));
9683 9684
9684 9685 ns_cmd.ns_flags = 0;
9685 9686 ns_cmd.ns_cmd = NS_RFT_ID;
9686 9687 ns_cmd.ns_req_len = sizeof (rfc);
9687 9688 ns_cmd.ns_req_payload = (caddr_t)&rfc;
9688 9689 ns_cmd.ns_resp_len = 0;
9689 9690 ns_cmd.ns_resp_payload = NULL;
9690 9691
9691 9692 /*
9692 9693 * Perform the Name Server Registration for SCSI_FCP FC4 Type.
9693 9694 */
9694 9695 if (fc_ulp_port_ns(pptr->port_fp_handle, NULL, &ns_cmd)) {
9695 9696 fcp_log(CE_WARN, pptr->port_dip,
9696 9697 "!ns_registry: failed name server registration");
9697 9698 return (1);
9698 9699 }
9699 9700
9700 9701 return (0);
9701 9702 }
9702 9703
9703 9704 /*
9704 9705 * Function: fcp_handle_port_attach
9705 9706 *
9706 9707 * Description: This function is called from fcp_port_attach() to attach a
9707 9708 * new port. This routine does the following:
9708 9709 *
9709 9710 * 1) Allocates an fcp_port structure and initializes it.
9710 9711 * 2) Tries to register the new FC-4 (FCP) capablity with the name
9711 9712 * server.
9712 9713 * 3) Kicks off the enumeration of the targets/luns visible
9713 9714 * through this new port. That is done by calling
9714 9715 * fcp_statec_callback() if the port is online.
9715 9716 *
9716 9717 * Argument: ulph fp/fctl port handle.
9717 9718 * *pinfo Port information.
9718 9719 * s_id Port ID.
9719 9720 * instance Device instance number for the local port
9720 9721 * (returned by ddi_get_instance()).
9721 9722 *
9722 9723 * Return Value: DDI_SUCCESS
9723 9724 * DDI_FAILURE
9724 9725 *
9725 9726 * Context: User and Kernel context.
9726 9727 */
9727 9728 /*ARGSUSED*/
9728 9729 int
9729 9730 fcp_handle_port_attach(opaque_t ulph, fc_ulp_port_info_t *pinfo,
9730 9731 uint32_t s_id, int instance)
9731 9732 {
9732 9733 int res = DDI_FAILURE;
9733 9734 scsi_hba_tran_t *tran;
9734 9735 int mutex_initted = FALSE;
9735 9736 int hba_attached = FALSE;
9736 9737 int soft_state_linked = FALSE;
9737 9738 int event_bind = FALSE;
9738 9739 struct fcp_port *pptr;
9739 9740 fc_portmap_t *tmp_list = NULL;
9740 9741 uint32_t max_cnt, alloc_cnt;
9741 9742 uchar_t *boot_wwn = NULL;
9742 9743 uint_t nbytes;
9743 9744 int manual_cfg;
9744 9745
9745 9746 /*
9746 9747 * this port instance attaching for the first time (or after
9747 9748 * being detached before)
9748 9749 */
9749 9750 FCP_TRACE(fcp_logq, "fcp", fcp_trace,
9750 9751 FCP_BUF_LEVEL_3, 0, "port attach: for port %d", instance);
9751 9752
9752 9753 if (ddi_soft_state_zalloc(fcp_softstate, instance) != DDI_SUCCESS) {
9753 9754 cmn_err(CE_WARN, "fcp: Softstate struct alloc failed"
9754 9755 "parent dip: %p; instance: %d", (void *)pinfo->port_dip,
9755 9756 instance);
9756 9757 return (res);
9757 9758 }
9758 9759
9759 9760 if ((pptr = ddi_get_soft_state(fcp_softstate, instance)) == NULL) {
9760 9761 /* this shouldn't happen */
9761 9762 ddi_soft_state_free(fcp_softstate, instance);
9762 9763 cmn_err(CE_WARN, "fcp: bad soft state");
9763 9764 return (res);
9764 9765 }
9765 9766
9766 9767 (void) sprintf(pptr->port_instbuf, "fcp(%d)", instance);
9767 9768
9768 9769 /*
9769 9770 * Make a copy of ulp_port_info as fctl allocates
9770 9771 * a temp struct.
9771 9772 */
9772 9773 (void) fcp_cp_pinfo(pptr, pinfo);
9773 9774
9774 9775 /*
9775 9776 * Check for manual_configuration_only property.
9776 9777 * Enable manual configurtion if the property is
9777 9778 * set to 1, otherwise disable manual configuration.
9778 9779 */
9779 9780 if ((manual_cfg = ddi_prop_get_int(DDI_DEV_T_ANY, pptr->port_dip,
9780 9781 DDI_PROP_NOTPROM | DDI_PROP_DONTPASS,
9781 9782 MANUAL_CFG_ONLY,
9782 9783 -1)) != -1) {
9783 9784 if (manual_cfg == 1) {
9784 9785 char *pathname;
9785 9786 pathname = kmem_zalloc(MAXPATHLEN, KM_SLEEP);
9786 9787 (void) ddi_pathname(pptr->port_dip, pathname);
9787 9788 cmn_err(CE_NOTE,
9788 9789 "%s (%s%d) %s is enabled via %s.conf.",
9789 9790 pathname,
9790 9791 ddi_driver_name(pptr->port_dip),
9791 9792 ddi_get_instance(pptr->port_dip),
9792 9793 MANUAL_CFG_ONLY,
9793 9794 ddi_driver_name(pptr->port_dip));
9794 9795 fcp_enable_auto_configuration = 0;
9795 9796 kmem_free(pathname, MAXPATHLEN);
9796 9797 }
9797 9798 }
9798 9799 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(pptr->port_link_cnt));
9799 9800 pptr->port_link_cnt = 1;
9800 9801 _NOTE(NOW_VISIBLE_TO_OTHER_THREADS(pptr->port_link_cnt));
9801 9802 pptr->port_id = s_id;
9802 9803 pptr->port_instance = instance;
9803 9804 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(pptr->port_state));
9804 9805 pptr->port_state = FCP_STATE_INIT;
9805 9806 if (pinfo->port_acc_attr == NULL) {
9806 9807 /*
9807 9808 * The corresponding FCA doesn't support DMA at all
9808 9809 */
9809 9810 pptr->port_state |= FCP_STATE_FCA_IS_NODMA;
9810 9811 }
9811 9812
9812 9813 _NOTE(NOW_VISIBLE_TO_OTHER_THREADS(pptr->port_state));
9813 9814
9814 9815 if (!(pptr->port_state & FCP_STATE_FCA_IS_NODMA)) {
9815 9816 /*
9816 9817 * If FCA supports DMA in SCSI data phase, we need preallocate
9817 9818 * dma cookie, so stash the cookie size
9818 9819 */
9819 9820 pptr->port_dmacookie_sz = sizeof (ddi_dma_cookie_t) *
9820 9821 pptr->port_data_dma_attr.dma_attr_sgllen;
9821 9822 }
9822 9823
9823 9824 /*
9824 9825 * The two mutexes of fcp_port are initialized. The variable
9825 9826 * mutex_initted is incremented to remember that fact. That variable
9826 9827 * is checked when the routine fails and the mutexes have to be
9827 9828 * destroyed.
9828 9829 */
9829 9830 mutex_init(&pptr->port_mutex, NULL, MUTEX_DRIVER, NULL);
9830 9831 mutex_init(&pptr->port_pkt_mutex, NULL, MUTEX_DRIVER, NULL);
9831 9832 mutex_initted++;
9832 9833
9833 9834 /*
9834 9835 * The SCSI tran structure is allocate and initialized now.
9835 9836 */
9836 9837 if ((tran = scsi_hba_tran_alloc(pptr->port_dip, 0)) == NULL) {
9837 9838 fcp_log(CE_WARN, pptr->port_dip,
9838 9839 "!fcp%d: scsi_hba_tran_alloc failed", instance);
9839 9840 goto fail;
9840 9841 }
9841 9842
9842 9843 /* link in the transport structure then fill it in */
9843 9844 pptr->port_tran = tran;
9844 9845 tran->tran_hba_private = pptr;
9845 9846 tran->tran_tgt_init = fcp_scsi_tgt_init;
9846 9847 tran->tran_tgt_probe = NULL;
9847 9848 tran->tran_tgt_free = fcp_scsi_tgt_free;
9848 9849 tran->tran_start = fcp_scsi_start;
9849 9850 tran->tran_reset = fcp_scsi_reset;
9850 9851 tran->tran_abort = fcp_scsi_abort;
9851 9852 tran->tran_getcap = fcp_scsi_getcap;
9852 9853 tran->tran_setcap = fcp_scsi_setcap;
9853 9854 tran->tran_init_pkt = NULL;
9854 9855 tran->tran_destroy_pkt = NULL;
9855 9856 tran->tran_dmafree = NULL;
9856 9857 tran->tran_sync_pkt = NULL;
9857 9858 tran->tran_reset_notify = fcp_scsi_reset_notify;
9858 9859 tran->tran_get_bus_addr = fcp_scsi_get_bus_addr;
9859 9860 tran->tran_get_name = fcp_scsi_get_name;
9860 9861 tran->tran_clear_aca = NULL;
9861 9862 tran->tran_clear_task_set = NULL;
9862 9863 tran->tran_terminate_task = NULL;
9863 9864 tran->tran_get_eventcookie = fcp_scsi_bus_get_eventcookie;
9864 9865 tran->tran_add_eventcall = fcp_scsi_bus_add_eventcall;
9865 9866 tran->tran_remove_eventcall = fcp_scsi_bus_remove_eventcall;
9866 9867 tran->tran_post_event = fcp_scsi_bus_post_event;
9867 9868 tran->tran_quiesce = NULL;
9868 9869 tran->tran_unquiesce = NULL;
9869 9870 tran->tran_bus_reset = NULL;
9870 9871 tran->tran_bus_config = fcp_scsi_bus_config;
9871 9872 tran->tran_bus_unconfig = fcp_scsi_bus_unconfig;
9872 9873 tran->tran_bus_power = NULL;
9873 9874 tran->tran_interconnect_type = INTERCONNECT_FABRIC;
9874 9875
9875 9876 tran->tran_pkt_constructor = fcp_kmem_cache_constructor;
9876 9877 tran->tran_pkt_destructor = fcp_kmem_cache_destructor;
9877 9878 tran->tran_setup_pkt = fcp_pkt_setup;
9878 9879 tran->tran_teardown_pkt = fcp_pkt_teardown;
9879 9880 tran->tran_hba_len = pptr->port_priv_pkt_len +
9880 9881 sizeof (struct fcp_pkt) + pptr->port_dmacookie_sz;
9881 9882 if (pptr->port_state & FCP_STATE_FCA_IS_NODMA) {
9882 9883 /*
9883 9884 * If FCA don't support DMA, then we use different vectors to
9884 9885 * minimize the effects on DMA code flow path
9885 9886 */
9886 9887 tran->tran_start = fcp_pseudo_start;
9887 9888 tran->tran_init_pkt = fcp_pseudo_init_pkt;
9888 9889 tran->tran_destroy_pkt = fcp_pseudo_destroy_pkt;
9889 9890 tran->tran_sync_pkt = fcp_pseudo_sync_pkt;
9890 9891 tran->tran_dmafree = fcp_pseudo_dmafree;
9891 9892 tran->tran_setup_pkt = NULL;
9892 9893 tran->tran_teardown_pkt = NULL;
9893 9894 tran->tran_pkt_constructor = NULL;
9894 9895 tran->tran_pkt_destructor = NULL;
9895 9896 pptr->port_data_dma_attr = pseudo_fca_dma_attr;
9896 9897 }
9897 9898
9898 9899 /*
9899 9900 * Allocate an ndi event handle
9900 9901 */
9901 9902 pptr->port_ndi_event_defs = (ndi_event_definition_t *)
9902 9903 kmem_zalloc(sizeof (fcp_ndi_event_defs), KM_SLEEP);
9903 9904
9904 9905 bcopy(fcp_ndi_event_defs, pptr->port_ndi_event_defs,
9905 9906 sizeof (fcp_ndi_event_defs));
9906 9907
9907 9908 (void) ndi_event_alloc_hdl(pptr->port_dip, NULL,
9908 9909 &pptr->port_ndi_event_hdl, NDI_SLEEP);
9909 9910
9910 9911 pptr->port_ndi_events.ndi_events_version = NDI_EVENTS_REV1;
9911 9912 pptr->port_ndi_events.ndi_n_events = FCP_N_NDI_EVENTS;
9912 9913 pptr->port_ndi_events.ndi_event_defs = pptr->port_ndi_event_defs;
9913 9914
9914 9915 if (DEVI_IS_ATTACHING(pptr->port_dip) &&
9915 9916 (ndi_event_bind_set(pptr->port_ndi_event_hdl,
9916 9917 &pptr->port_ndi_events, NDI_SLEEP) != NDI_SUCCESS)) {
9917 9918 goto fail;
9918 9919 }
9919 9920 event_bind++; /* Checked in fail case */
9920 9921
9921 9922 if (scsi_hba_attach_setup(pptr->port_dip, &pptr->port_data_dma_attr,
9922 9923 tran, SCSI_HBA_ADDR_COMPLEX | SCSI_HBA_TRAN_SCB)
9923 9924 != DDI_SUCCESS) {
9924 9925 fcp_log(CE_WARN, pptr->port_dip,
9925 9926 "!fcp%d: scsi_hba_attach_setup failed", instance);
9926 9927 goto fail;
9927 9928 }
9928 9929 hba_attached++; /* Checked in fail case */
9929 9930
9930 9931 pptr->port_mpxio = 0;
9931 9932 if (mdi_phci_register(MDI_HCI_CLASS_SCSI, pptr->port_dip, 0) ==
9932 9933 MDI_SUCCESS) {
9933 9934 pptr->port_mpxio++;
9934 9935 }
9935 9936
9936 9937 /*
9937 9938 * The following code is putting the new port structure in the global
9938 9939 * list of ports and, if it is the first port to attach, it start the
9939 9940 * fcp_watchdog_tick.
9940 9941 *
9941 9942 * Why put this new port in the global before we are done attaching it?
9942 9943 * We are actually making the structure globally known before we are
9943 9944 * done attaching it. The reason for that is: because of the code that
9944 9945 * follows. At this point the resources to handle the port are
9945 9946 * allocated. This function is now going to do the following:
9946 9947 *
9947 9948 * 1) It is going to try to register with the name server advertizing
9948 9949 * the new FCP capability of the port.
9949 9950 * 2) It is going to play the role of the fp/fctl layer by building
9950 9951 * a list of worlwide names reachable through this port and call
9951 9952 * itself on fcp_statec_callback(). That requires the port to
9952 9953 * be part of the global list.
9953 9954 */
9954 9955 mutex_enter(&fcp_global_mutex);
9955 9956 if (fcp_port_head == NULL) {
9956 9957 fcp_read_blacklist(pinfo->port_dip, &fcp_lun_blacklist);
9957 9958 }
9958 9959 pptr->port_next = fcp_port_head;
9959 9960 fcp_port_head = pptr;
9960 9961 soft_state_linked++;
9961 9962
9962 9963 if (fcp_watchdog_init++ == 0) {
9963 9964 fcp_watchdog_tick = fcp_watchdog_timeout *
9964 9965 drv_usectohz(1000000);
9965 9966 fcp_watchdog_id = timeout(fcp_watch, NULL,
9966 9967 fcp_watchdog_tick);
9967 9968 }
9968 9969 mutex_exit(&fcp_global_mutex);
9969 9970
9970 9971 /*
9971 9972 * Here an attempt is made to register with the name server, the new
9972 9973 * FCP capability. That is done using an RTF_ID to the name server.
9973 9974 * It is done synchronously. The function fcp_do_ns_registry()
9974 9975 * doesn't return till the name server responded.
9975 9976 * On failures, just ignore it for now and it will get retried during
9976 9977 * state change callbacks. We'll set a flag to show this failure
9977 9978 */
9978 9979 if (fcp_do_ns_registry(pptr, s_id)) {
9979 9980 mutex_enter(&pptr->port_mutex);
9980 9981 pptr->port_state |= FCP_STATE_NS_REG_FAILED;
9981 9982 mutex_exit(&pptr->port_mutex);
9982 9983 } else {
9983 9984 mutex_enter(&pptr->port_mutex);
9984 9985 pptr->port_state &= ~(FCP_STATE_NS_REG_FAILED);
9985 9986 mutex_exit(&pptr->port_mutex);
9986 9987 }
9987 9988
9988 9989 /*
9989 9990 * Lookup for boot WWN property
9990 9991 */
9991 9992 if (modrootloaded != 1) {
9992 9993 if ((ddi_prop_lookup_byte_array(DDI_DEV_T_ANY,
9993 9994 ddi_get_parent(pinfo->port_dip),
9994 9995 DDI_PROP_DONTPASS, OBP_BOOT_WWN,
9995 9996 &boot_wwn, &nbytes) == DDI_PROP_SUCCESS) &&
9996 9997 (nbytes == FC_WWN_SIZE)) {
9997 9998 bcopy(boot_wwn, pptr->port_boot_wwn, FC_WWN_SIZE);
9998 9999 }
9999 10000 if (boot_wwn) {
10000 10001 ddi_prop_free(boot_wwn);
10001 10002 }
10002 10003 }
10003 10004
10004 10005 /*
10005 10006 * Handle various topologies and link states.
10006 10007 */
10007 10008 switch (FC_PORT_STATE_MASK(pptr->port_phys_state)) {
10008 10009 case FC_STATE_OFFLINE:
10009 10010
10010 10011 /*
10011 10012 * we're attaching a port where the link is offline
10012 10013 *
10013 10014 * Wait for ONLINE, at which time a state
10014 10015 * change will cause a statec_callback
10015 10016 *
10016 10017 * in the mean time, do not do anything
10017 10018 */
10018 10019 res = DDI_SUCCESS;
10019 10020 pptr->port_state |= FCP_STATE_OFFLINE;
10020 10021 break;
10021 10022
10022 10023 case FC_STATE_ONLINE: {
10023 10024 if (pptr->port_topology == FC_TOP_UNKNOWN) {
10024 10025 (void) fcp_linkreset(pptr, NULL, KM_NOSLEEP);
10025 10026 res = DDI_SUCCESS;
10026 10027 break;
10027 10028 }
10028 10029 /*
10029 10030 * discover devices and create nodes (a private
10030 10031 * loop or point-to-point)
10031 10032 */
10032 10033 ASSERT(pptr->port_topology != FC_TOP_UNKNOWN);
10033 10034
10034 10035 /*
10035 10036 * At this point we are going to build a list of all the ports
10036 10037 * that can be reached through this local port. It looks like
10037 10038 * we cannot handle more than FCP_MAX_DEVICES per local port
10038 10039 * (128).
10039 10040 */
10040 10041 if ((tmp_list = (fc_portmap_t *)kmem_zalloc(
10041 10042 sizeof (fc_portmap_t) * FCP_MAX_DEVICES,
10042 10043 KM_NOSLEEP)) == NULL) {
10043 10044 fcp_log(CE_WARN, pptr->port_dip,
10044 10045 "!fcp%d: failed to allocate portmap",
10045 10046 instance);
10046 10047 goto fail;
10047 10048 }
10048 10049
10049 10050 /*
10050 10051 * fc_ulp_getportmap() is going to provide us with the list of
10051 10052 * remote ports in the buffer we just allocated. The way the
10052 10053 * list is going to be retrieved depends on the topology.
10053 10054 * However, if we are connected to a Fabric, a name server
10054 10055 * request may be sent to get the list of FCP capable ports.
10055 10056 * It should be noted that is the case the request is
10056 10057 * synchronous. This means we are stuck here till the name
10057 10058 * server replies. A lot of things can change during that time
10058 10059 * and including, may be, being called on
10059 10060 * fcp_statec_callback() for different reasons. I'm not sure
10060 10061 * the code can handle that.
10061 10062 */
10062 10063 max_cnt = FCP_MAX_DEVICES;
10063 10064 alloc_cnt = FCP_MAX_DEVICES;
10064 10065 if ((res = fc_ulp_getportmap(pptr->port_fp_handle,
10065 10066 &tmp_list, &max_cnt, FC_ULP_PLOGI_PRESERVE)) !=
10066 10067 FC_SUCCESS) {
10067 10068 caddr_t msg;
10068 10069
10069 10070 (void) fc_ulp_error(res, &msg);
10070 10071
10071 10072 /*
10072 10073 * this just means the transport is
10073 10074 * busy perhaps building a portmap so,
10074 10075 * for now, succeed this port attach
10075 10076 * when the transport has a new map,
10076 10077 * it'll send us a state change then
10077 10078 */
10078 10079 fcp_log(CE_WARN, pptr->port_dip,
10079 10080 "!failed to get port map : %s", msg);
10080 10081
10081 10082 res = DDI_SUCCESS;
10082 10083 break; /* go return result */
10083 10084 }
10084 10085 if (max_cnt > alloc_cnt) {
10085 10086 alloc_cnt = max_cnt;
10086 10087 }
10087 10088
10088 10089 /*
10089 10090 * We are now going to call fcp_statec_callback() ourselves.
10090 10091 * By issuing this call we are trying to kick off the enumera-
10091 10092 * tion process.
10092 10093 */
10093 10094 /*
10094 10095 * let the state change callback do the SCSI device
10095 10096 * discovery and create the devinfos
10096 10097 */
10097 10098 fcp_statec_callback(ulph, pptr->port_fp_handle,
10098 10099 pptr->port_phys_state, pptr->port_topology, tmp_list,
10099 10100 max_cnt, pptr->port_id);
10100 10101
10101 10102 res = DDI_SUCCESS;
10102 10103 break;
10103 10104 }
10104 10105
10105 10106 default:
10106 10107 /* unknown port state */
10107 10108 fcp_log(CE_WARN, pptr->port_dip,
10108 10109 "!fcp%d: invalid port state at attach=0x%x",
10109 10110 instance, pptr->port_phys_state);
10110 10111
10111 10112 mutex_enter(&pptr->port_mutex);
10112 10113 pptr->port_phys_state = FCP_STATE_OFFLINE;
10113 10114 mutex_exit(&pptr->port_mutex);
10114 10115
10115 10116 res = DDI_SUCCESS;
10116 10117 break;
10117 10118 }
10118 10119
10119 10120 /* free temp list if used */
10120 10121 if (tmp_list != NULL) {
10121 10122 kmem_free(tmp_list, sizeof (fc_portmap_t) * alloc_cnt);
10122 10123 }
10123 10124
10124 10125 /* note the attach time */
10125 10126 pptr->port_attach_time = ddi_get_lbolt64();
10126 10127
10127 10128 /* all done */
10128 10129 return (res);
10129 10130
10130 10131 /* a failure we have to clean up after */
10131 10132 fail:
10132 10133 fcp_log(CE_WARN, pptr->port_dip, "!failed to attach to port");
10133 10134
10134 10135 if (soft_state_linked) {
10135 10136 /* remove this fcp_port from the linked list */
10136 10137 (void) fcp_soft_state_unlink(pptr);
10137 10138 }
10138 10139
10139 10140 /* unbind and free event set */
10140 10141 if (pptr->port_ndi_event_hdl) {
10141 10142 if (event_bind) {
10142 10143 (void) ndi_event_unbind_set(pptr->port_ndi_event_hdl,
10143 10144 &pptr->port_ndi_events, NDI_SLEEP);
10144 10145 }
10145 10146 (void) ndi_event_free_hdl(pptr->port_ndi_event_hdl);
10146 10147 }
10147 10148
10148 10149 if (pptr->port_ndi_event_defs) {
10149 10150 (void) kmem_free(pptr->port_ndi_event_defs,
10150 10151 sizeof (fcp_ndi_event_defs));
10151 10152 }
10152 10153
10153 10154 /*
10154 10155 * Clean up mpxio stuff
10155 10156 */
10156 10157 if (pptr->port_mpxio) {
10157 10158 (void) mdi_phci_unregister(pptr->port_dip, 0);
10158 10159 pptr->port_mpxio--;
10159 10160 }
10160 10161
10161 10162 /* undo SCSI HBA setup */
10162 10163 if (hba_attached) {
10163 10164 (void) scsi_hba_detach(pptr->port_dip);
10164 10165 }
10165 10166 if (pptr->port_tran != NULL) {
10166 10167 scsi_hba_tran_free(pptr->port_tran);
10167 10168 }
10168 10169
10169 10170 mutex_enter(&fcp_global_mutex);
10170 10171
10171 10172 /*
10172 10173 * We check soft_state_linked, because it is incremented right before
10173 10174 * we call increment fcp_watchdog_init. Therefore, we know if
10174 10175 * soft_state_linked is still FALSE, we do not want to decrement
10175 10176 * fcp_watchdog_init or possibly call untimeout.
10176 10177 */
10177 10178
10178 10179 if (soft_state_linked) {
10179 10180 if (--fcp_watchdog_init == 0) {
10180 10181 timeout_id_t tid = fcp_watchdog_id;
10181 10182
10182 10183 mutex_exit(&fcp_global_mutex);
10183 10184 (void) untimeout(tid);
10184 10185 } else {
10185 10186 mutex_exit(&fcp_global_mutex);
10186 10187 }
10187 10188 } else {
10188 10189 mutex_exit(&fcp_global_mutex);
10189 10190 }
10190 10191
10191 10192 if (mutex_initted) {
10192 10193 mutex_destroy(&pptr->port_mutex);
10193 10194 mutex_destroy(&pptr->port_pkt_mutex);
10194 10195 }
10195 10196
10196 10197 if (tmp_list != NULL) {
10197 10198 kmem_free(tmp_list, sizeof (fc_portmap_t) * alloc_cnt);
10198 10199 }
10199 10200
10200 10201 /* this makes pptr invalid */
10201 10202 ddi_soft_state_free(fcp_softstate, instance);
10202 10203
10203 10204 return (DDI_FAILURE);
10204 10205 }
10205 10206
10206 10207
10207 10208 static int
10208 10209 fcp_handle_port_detach(struct fcp_port *pptr, int flag, int instance)
10209 10210 {
10210 10211 int count = 0;
10211 10212
10212 10213 mutex_enter(&pptr->port_mutex);
10213 10214
10214 10215 /*
10215 10216 * if the port is powered down or suspended, nothing else
10216 10217 * to do; just return.
10217 10218 */
10218 10219 if (flag != FCP_STATE_DETACHING) {
10219 10220 if (pptr->port_state & (FCP_STATE_POWER_DOWN |
10220 10221 FCP_STATE_SUSPENDED)) {
10221 10222 pptr->port_state |= flag;
10222 10223 mutex_exit(&pptr->port_mutex);
10223 10224 return (FC_SUCCESS);
10224 10225 }
10225 10226 }
10226 10227
10227 10228 if (pptr->port_state & FCP_STATE_IN_MDI) {
10228 10229 mutex_exit(&pptr->port_mutex);
10229 10230 return (FC_FAILURE);
10230 10231 }
10231 10232
10232 10233 FCP_TRACE(fcp_logq, pptr->port_instbuf,
10233 10234 fcp_trace, FCP_BUF_LEVEL_2, 0,
10234 10235 "fcp_handle_port_detach: port is detaching");
10235 10236
10236 10237 pptr->port_state |= flag;
10237 10238
10238 10239 /*
10239 10240 * Wait for any ongoing reconfig/ipkt to complete, that
10240 10241 * ensures the freeing to targets/luns is safe.
10241 10242 * No more ref to this port should happen from statec/ioctl
10242 10243 * after that as it was removed from the global port list.
10243 10244 */
10244 10245 while (pptr->port_tmp_cnt || pptr->port_ipkt_cnt ||
10245 10246 (pptr->port_state & FCP_STATE_IN_WATCHDOG)) {
10246 10247 /*
10247 10248 * Let's give sufficient time for reconfig/ipkt
10248 10249 * to complete.
10249 10250 */
10250 10251 if (count++ >= FCP_ICMD_DEADLINE) {
10251 10252 break;
10252 10253 }
10253 10254 mutex_exit(&pptr->port_mutex);
10254 10255 delay(drv_usectohz(1000000));
10255 10256 mutex_enter(&pptr->port_mutex);
10256 10257 }
10257 10258
10258 10259 /*
10259 10260 * if the driver is still busy then fail to
10260 10261 * suspend/power down.
10261 10262 */
10262 10263 if (pptr->port_tmp_cnt || pptr->port_ipkt_cnt ||
10263 10264 (pptr->port_state & FCP_STATE_IN_WATCHDOG)) {
10264 10265 pptr->port_state &= ~flag;
10265 10266 mutex_exit(&pptr->port_mutex);
10266 10267 return (FC_FAILURE);
10267 10268 }
10268 10269
10269 10270 if (flag == FCP_STATE_DETACHING) {
10270 10271 pptr = fcp_soft_state_unlink(pptr);
10271 10272 ASSERT(pptr != NULL);
10272 10273 }
10273 10274
10274 10275 pptr->port_link_cnt++;
10275 10276 pptr->port_state |= FCP_STATE_OFFLINE;
10276 10277 pptr->port_state &= ~(FCP_STATE_ONLINING | FCP_STATE_ONLINE);
10277 10278
10278 10279 fcp_update_state(pptr, (FCP_LUN_BUSY | FCP_LUN_MARK),
10279 10280 FCP_CAUSE_LINK_DOWN);
10280 10281 mutex_exit(&pptr->port_mutex);
10281 10282
10282 10283 /* kill watch dog timer if we're the last */
10283 10284 mutex_enter(&fcp_global_mutex);
10284 10285 if (--fcp_watchdog_init == 0) {
10285 10286 timeout_id_t tid = fcp_watchdog_id;
10286 10287 mutex_exit(&fcp_global_mutex);
10287 10288 (void) untimeout(tid);
10288 10289 } else {
10289 10290 mutex_exit(&fcp_global_mutex);
10290 10291 }
10291 10292
10292 10293 /* clean up the port structures */
10293 10294 if (flag == FCP_STATE_DETACHING) {
10294 10295 fcp_cleanup_port(pptr, instance);
10295 10296 }
10296 10297
10297 10298 return (FC_SUCCESS);
10298 10299 }
10299 10300
10300 10301
10301 10302 static void
10302 10303 fcp_cleanup_port(struct fcp_port *pptr, int instance)
10303 10304 {
10304 10305 ASSERT(pptr != NULL);
10305 10306
10306 10307 /* unbind and free event set */
10307 10308 if (pptr->port_ndi_event_hdl) {
10308 10309 (void) ndi_event_unbind_set(pptr->port_ndi_event_hdl,
10309 10310 &pptr->port_ndi_events, NDI_SLEEP);
10310 10311 (void) ndi_event_free_hdl(pptr->port_ndi_event_hdl);
10311 10312 }
10312 10313
10313 10314 if (pptr->port_ndi_event_defs) {
10314 10315 (void) kmem_free(pptr->port_ndi_event_defs,
10315 10316 sizeof (fcp_ndi_event_defs));
10316 10317 }
10317 10318
10318 10319 /* free the lun/target structures and devinfos */
10319 10320 fcp_free_targets(pptr);
10320 10321
10321 10322 /*
10322 10323 * Clean up mpxio stuff
10323 10324 */
10324 10325 if (pptr->port_mpxio) {
10325 10326 (void) mdi_phci_unregister(pptr->port_dip, 0);
10326 10327 pptr->port_mpxio--;
10327 10328 }
10328 10329
10329 10330 /* clean up SCSA stuff */
10330 10331 (void) scsi_hba_detach(pptr->port_dip);
10331 10332 if (pptr->port_tran != NULL) {
10332 10333 scsi_hba_tran_free(pptr->port_tran);
10333 10334 }
10334 10335
10335 10336 #ifdef KSTATS_CODE
10336 10337 /* clean up kstats */
10337 10338 if (pptr->fcp_ksp != NULL) {
10338 10339 kstat_delete(pptr->fcp_ksp);
10339 10340 }
10340 10341 #endif
10341 10342
10342 10343 /* clean up soft state mutexes/condition variables */
10343 10344 mutex_destroy(&pptr->port_mutex);
10344 10345 mutex_destroy(&pptr->port_pkt_mutex);
10345 10346
10346 10347 /* all done with soft state */
10347 10348 ddi_soft_state_free(fcp_softstate, instance);
10348 10349 }
10349 10350
10350 10351 /*
10351 10352 * Function: fcp_kmem_cache_constructor
10352 10353 *
10353 10354 * Description: This function allocates and initializes the resources required
10354 10355 * to build a scsi_pkt structure the target driver. The result
10355 10356 * of the allocation and initialization will be cached in the
10356 10357 * memory cache. As DMA resources may be allocated here, that
10357 10358 * means DMA resources will be tied up in the cache manager.
10358 10359 * This is a tradeoff that has been made for performance reasons.
10359 10360 *
10360 10361 * Argument: *buf Memory to preinitialize.
10361 10362 * *arg FCP port structure (fcp_port).
10362 10363 * kmflags Value passed to kmem_cache_alloc() and
10363 10364 * propagated to the constructor.
10364 10365 *
10365 10366 * Return Value: 0 Allocation/Initialization was successful.
10366 10367 * -1 Allocation or Initialization failed.
10367 10368 *
10368 10369 *
10369 10370 * If the returned value is 0, the buffer is initialized like this:
10370 10371 *
10371 10372 * +================================+
10372 10373 * +----> | struct scsi_pkt |
10373 10374 * | | |
10374 10375 * | +--- | pkt_ha_private |
10375 10376 * | | | |
10376 10377 * | | +================================+
10377 10378 * | |
10378 10379 * | | +================================+
10379 10380 * | +--> | struct fcp_pkt | <---------+
10380 10381 * | | | |
10381 10382 * +----- | cmd_pkt | |
10382 10383 * | cmd_fp_pkt | ---+ |
10383 10384 * +-------->| cmd_fcp_rsp[] | | |
10384 10385 * | +--->| cmd_fcp_cmd[] | | |
10385 10386 * | | |--------------------------------| | |
10386 10387 * | | | struct fc_packet | <--+ |
10387 10388 * | | | | |
10388 10389 * | | | pkt_ulp_private | ----------+
10389 10390 * | | | pkt_fca_private | -----+
10390 10391 * | | | pkt_data_cookie | ---+ |
10391 10392 * | | | pkt_cmdlen | | |
10392 10393 * | |(a) | pkt_rsplen | | |
10393 10394 * | +----| .......... pkt_cmd ........... | ---|-|---------------+
10394 10395 * | (b) | pkt_cmd_cookie | ---|-|----------+ |
10395 10396 * +---------| .......... pkt_resp .......... | ---|-|------+ | |
10396 10397 * | pkt_resp_cookie | ---|-|--+ | | |
10397 10398 * | pkt_cmd_dma | | | | | | |
10398 10399 * | pkt_cmd_acc | | | | | | |
10399 10400 * +================================+ | | | | | |
10400 10401 * | dma_cookies | <--+ | | | | |
10401 10402 * | | | | | | |
10402 10403 * +================================+ | | | | |
10403 10404 * | fca_private | <----+ | | | |
10404 10405 * | | | | | |
10405 10406 * +================================+ | | | |
10406 10407 * | | | |
10407 10408 * | | | |
10408 10409 * +================================+ (d) | | | |
10409 10410 * | fcp_resp cookies | <-------+ | | |
10410 10411 * | | | | |
10411 10412 * +================================+ | | |
10412 10413 * | | |
10413 10414 * +================================+ (d) | | |
10414 10415 * | fcp_resp | <-----------+ | |
10415 10416 * | (DMA resources associated) | | |
10416 10417 * +================================+ | |
10417 10418 * | |
10418 10419 * | |
10419 10420 * | |
10420 10421 * +================================+ (c) | |
10421 10422 * | fcp_cmd cookies | <---------------+ |
10422 10423 * | | |
10423 10424 * +================================+ |
10424 10425 * |
10425 10426 * +================================+ (c) |
10426 10427 * | fcp_cmd | <--------------------+
10427 10428 * | (DMA resources associated) |
10428 10429 * +================================+
10429 10430 *
10430 10431 * (a) Only if DMA is NOT used for the FCP_CMD buffer.
10431 10432 * (b) Only if DMA is NOT used for the FCP_RESP buffer
10432 10433 * (c) Only if DMA is used for the FCP_CMD buffer.
10433 10434 * (d) Only if DMA is used for the FCP_RESP buffer
10434 10435 */
10435 10436 static int
10436 10437 fcp_kmem_cache_constructor(struct scsi_pkt *pkt, scsi_hba_tran_t *tran,
10437 10438 int kmflags)
10438 10439 {
10439 10440 struct fcp_pkt *cmd;
10440 10441 struct fcp_port *pptr;
10441 10442 fc_packet_t *fpkt;
10442 10443
10443 10444 pptr = (struct fcp_port *)tran->tran_hba_private;
10444 10445 cmd = (struct fcp_pkt *)pkt->pkt_ha_private;
10445 10446 bzero(cmd, tran->tran_hba_len);
10446 10447
10447 10448 cmd->cmd_pkt = pkt;
10448 10449 pkt->pkt_cdbp = cmd->cmd_fcp_cmd.fcp_cdb;
10449 10450 fpkt = (fc_packet_t *)&cmd->cmd_fc_packet;
10450 10451 cmd->cmd_fp_pkt = fpkt;
10451 10452
10452 10453 cmd->cmd_pkt->pkt_ha_private = (opaque_t)cmd;
10453 10454 cmd->cmd_fp_pkt->pkt_ulp_private = (opaque_t)cmd;
10454 10455 cmd->cmd_fp_pkt->pkt_fca_private = (opaque_t)((caddr_t)cmd +
10455 10456 sizeof (struct fcp_pkt) + pptr->port_dmacookie_sz);
10456 10457
10457 10458 fpkt->pkt_data_cookie = (ddi_dma_cookie_t *)((caddr_t)cmd +
10458 10459 sizeof (struct fcp_pkt));
10459 10460
10460 10461 fpkt->pkt_cmdlen = sizeof (struct fcp_cmd);
10461 10462 fpkt->pkt_rsplen = FCP_MAX_RSP_IU_SIZE;
10462 10463
10463 10464 if (pptr->port_fcp_dma == FC_NO_DVMA_SPACE) {
10464 10465 /*
10465 10466 * The underlying HBA doesn't want to DMA the fcp_cmd or
10466 10467 * fcp_resp. The transfer of information will be done by
10467 10468 * bcopy.
10468 10469 * The naming of the flags (that is actually a value) is
10469 10470 * unfortunate. FC_NO_DVMA_SPACE doesn't mean "NO VIRTUAL
10470 10471 * DMA" but instead "NO DMA".
10471 10472 */
10472 10473 fpkt->pkt_resp_acc = fpkt->pkt_cmd_acc = NULL;
10473 10474 fpkt->pkt_cmd = (caddr_t)&cmd->cmd_fcp_cmd;
10474 10475 fpkt->pkt_resp = cmd->cmd_fcp_rsp;
10475 10476 } else {
10476 10477 /*
10477 10478 * The underlying HBA will dma the fcp_cmd buffer and fcp_resp
10478 10479 * buffer. A buffer is allocated for each one the ddi_dma_*
10479 10480 * interfaces.
10480 10481 */
10481 10482 if (fcp_alloc_cmd_resp(pptr, fpkt, kmflags) != FC_SUCCESS) {
10482 10483 return (-1);
10483 10484 }
10484 10485 }
10485 10486
10486 10487 return (0);
10487 10488 }
10488 10489
10489 10490 /*
10490 10491 * Function: fcp_kmem_cache_destructor
10491 10492 *
10492 10493 * Description: Called by the destructor of the cache managed by SCSA.
10493 10494 * All the resources pre-allocated in fcp_pkt_constructor
10494 10495 * and the data also pre-initialized in fcp_pkt_constructor
10495 10496 * are freed and uninitialized here.
10496 10497 *
10497 10498 * Argument: *buf Memory to uninitialize.
10498 10499 * *arg FCP port structure (fcp_port).
10499 10500 *
10500 10501 * Return Value: None
10501 10502 *
10502 10503 * Context: kernel
10503 10504 */
10504 10505 static void
10505 10506 fcp_kmem_cache_destructor(struct scsi_pkt *pkt, scsi_hba_tran_t *tran)
10506 10507 {
10507 10508 struct fcp_pkt *cmd;
10508 10509 struct fcp_port *pptr;
10509 10510
10510 10511 pptr = (struct fcp_port *)(tran->tran_hba_private);
10511 10512 cmd = pkt->pkt_ha_private;
10512 10513
10513 10514 if (pptr->port_fcp_dma != FC_NO_DVMA_SPACE) {
10514 10515 /*
10515 10516 * If DMA was used to transfer the FCP_CMD and FCP_RESP, the
10516 10517 * buffer and DMA resources allocated to do so are released.
10517 10518 */
10518 10519 fcp_free_cmd_resp(pptr, cmd->cmd_fp_pkt);
10519 10520 }
10520 10521 }
10521 10522
10522 10523 /*
10523 10524 * Function: fcp_alloc_cmd_resp
10524 10525 *
10525 10526 * Description: This function allocated an FCP_CMD and FCP_RESP buffer that
10526 10527 * will be DMAed by the HBA. The buffer is allocated applying
10527 10528 * the DMA requirements for the HBA. The buffers allocated will
10528 10529 * also be bound. DMA resources are allocated in the process.
10529 10530 * They will be released by fcp_free_cmd_resp().
10530 10531 *
10531 10532 * Argument: *pptr FCP port.
10532 10533 * *fpkt fc packet for which the cmd and resp packet should be
10533 10534 * allocated.
10534 10535 * flags Allocation flags.
10535 10536 *
10536 10537 * Return Value: FC_FAILURE
10537 10538 * FC_SUCCESS
10538 10539 *
10539 10540 * Context: User or Kernel context only if flags == KM_SLEEP.
10540 10541 * Interrupt context if the KM_SLEEP is not specified.
10541 10542 */
10542 10543 static int
10543 10544 fcp_alloc_cmd_resp(struct fcp_port *pptr, fc_packet_t *fpkt, int flags)
10544 10545 {
10545 10546 int rval;
10546 10547 int cmd_len;
10547 10548 int resp_len;
10548 10549 ulong_t real_len;
10549 10550 int (*cb) (caddr_t);
10550 10551 ddi_dma_cookie_t pkt_cookie;
10551 10552 ddi_dma_cookie_t *cp;
10552 10553 uint32_t cnt;
10553 10554
10554 10555 cb = (flags == KM_SLEEP) ? DDI_DMA_SLEEP : DDI_DMA_DONTWAIT;
10555 10556
10556 10557 cmd_len = fpkt->pkt_cmdlen;
10557 10558 resp_len = fpkt->pkt_rsplen;
10558 10559
10559 10560 ASSERT(fpkt->pkt_cmd_dma == NULL);
10560 10561
10561 10562 /* Allocation of a DMA handle used in subsequent calls. */
10562 10563 if (ddi_dma_alloc_handle(pptr->port_dip, &pptr->port_cmd_dma_attr,
10563 10564 cb, NULL, &fpkt->pkt_cmd_dma) != DDI_SUCCESS) {
10564 10565 return (FC_FAILURE);
10565 10566 }
10566 10567
10567 10568 /* A buffer is allocated that satisfies the DMA requirements. */
10568 10569 rval = ddi_dma_mem_alloc(fpkt->pkt_cmd_dma, cmd_len,
10569 10570 &pptr->port_dma_acc_attr, DDI_DMA_CONSISTENT, cb, NULL,
10570 10571 (caddr_t *)&fpkt->pkt_cmd, &real_len, &fpkt->pkt_cmd_acc);
10571 10572
10572 10573 if (rval != DDI_SUCCESS) {
10573 10574 ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10574 10575 return (FC_FAILURE);
10575 10576 }
10576 10577
10577 10578 if (real_len < cmd_len) {
10578 10579 ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
10579 10580 ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10580 10581 return (FC_FAILURE);
10581 10582 }
10582 10583
10583 10584 /* The buffer allocated is DMA bound. */
10584 10585 rval = ddi_dma_addr_bind_handle(fpkt->pkt_cmd_dma, NULL,
10585 10586 fpkt->pkt_cmd, real_len, DDI_DMA_WRITE | DDI_DMA_CONSISTENT,
10586 10587 cb, NULL, &pkt_cookie, &fpkt->pkt_cmd_cookie_cnt);
10587 10588
10588 10589 if (rval != DDI_DMA_MAPPED) {
10589 10590 ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
10590 10591 ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10591 10592 return (FC_FAILURE);
10592 10593 }
10593 10594
10594 10595 if (fpkt->pkt_cmd_cookie_cnt >
10595 10596 pptr->port_cmd_dma_attr.dma_attr_sgllen) {
10596 10597 (void) ddi_dma_unbind_handle(fpkt->pkt_cmd_dma);
10597 10598 ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
10598 10599 ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10599 10600 return (FC_FAILURE);
10600 10601 }
10601 10602
10602 10603 ASSERT(fpkt->pkt_cmd_cookie_cnt != 0);
10603 10604
10604 10605 /*
10605 10606 * The buffer where the scatter/gather list is going to be built is
10606 10607 * allocated.
10607 10608 */
10608 10609 cp = fpkt->pkt_cmd_cookie = (ddi_dma_cookie_t *)kmem_alloc(
10609 10610 fpkt->pkt_cmd_cookie_cnt * sizeof (pkt_cookie),
10610 10611 KM_NOSLEEP);
10611 10612
10612 10613 if (cp == NULL) {
10613 10614 (void) ddi_dma_unbind_handle(fpkt->pkt_cmd_dma);
10614 10615 ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
10615 10616 ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10616 10617 return (FC_FAILURE);
10617 10618 }
10618 10619
10619 10620 /*
10620 10621 * The scatter/gather list for the buffer we just allocated is built
10621 10622 * here.
10622 10623 */
10623 10624 *cp = pkt_cookie;
10624 10625 cp++;
10625 10626
10626 10627 for (cnt = 1; cnt < fpkt->pkt_cmd_cookie_cnt; cnt++, cp++) {
10627 10628 ddi_dma_nextcookie(fpkt->pkt_cmd_dma,
10628 10629 &pkt_cookie);
10629 10630 *cp = pkt_cookie;
10630 10631 }
10631 10632
10632 10633 ASSERT(fpkt->pkt_resp_dma == NULL);
10633 10634 if (ddi_dma_alloc_handle(pptr->port_dip, &pptr->port_resp_dma_attr,
10634 10635 cb, NULL, &fpkt->pkt_resp_dma) != DDI_SUCCESS) {
10635 10636 (void) ddi_dma_unbind_handle(fpkt->pkt_cmd_dma);
10636 10637 ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
10637 10638 ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10638 10639 return (FC_FAILURE);
10639 10640 }
10640 10641
10641 10642 rval = ddi_dma_mem_alloc(fpkt->pkt_resp_dma, resp_len,
10642 10643 &pptr->port_dma_acc_attr, DDI_DMA_CONSISTENT, cb, NULL,
10643 10644 (caddr_t *)&fpkt->pkt_resp, &real_len,
10644 10645 &fpkt->pkt_resp_acc);
10645 10646
10646 10647 if (rval != DDI_SUCCESS) {
10647 10648 ddi_dma_free_handle(&fpkt->pkt_resp_dma);
10648 10649 (void) ddi_dma_unbind_handle(fpkt->pkt_cmd_dma);
10649 10650 ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
10650 10651 ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10651 10652 kmem_free(fpkt->pkt_cmd_cookie,
10652 10653 fpkt->pkt_cmd_cookie_cnt * sizeof (pkt_cookie));
10653 10654 return (FC_FAILURE);
10654 10655 }
10655 10656
10656 10657 if (real_len < resp_len) {
10657 10658 ddi_dma_mem_free(&fpkt->pkt_resp_acc);
10658 10659 ddi_dma_free_handle(&fpkt->pkt_resp_dma);
10659 10660 (void) ddi_dma_unbind_handle(fpkt->pkt_cmd_dma);
10660 10661 ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
10661 10662 ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10662 10663 kmem_free(fpkt->pkt_cmd_cookie,
10663 10664 fpkt->pkt_cmd_cookie_cnt * sizeof (pkt_cookie));
10664 10665 return (FC_FAILURE);
10665 10666 }
10666 10667
10667 10668 rval = ddi_dma_addr_bind_handle(fpkt->pkt_resp_dma, NULL,
10668 10669 fpkt->pkt_resp, real_len, DDI_DMA_READ | DDI_DMA_CONSISTENT,
10669 10670 cb, NULL, &pkt_cookie, &fpkt->pkt_resp_cookie_cnt);
10670 10671
10671 10672 if (rval != DDI_DMA_MAPPED) {
10672 10673 ddi_dma_mem_free(&fpkt->pkt_resp_acc);
10673 10674 ddi_dma_free_handle(&fpkt->pkt_resp_dma);
10674 10675 (void) ddi_dma_unbind_handle(fpkt->pkt_cmd_dma);
10675 10676 ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
10676 10677 ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10677 10678 kmem_free(fpkt->pkt_cmd_cookie,
10678 10679 fpkt->pkt_cmd_cookie_cnt * sizeof (pkt_cookie));
10679 10680 return (FC_FAILURE);
10680 10681 }
10681 10682
10682 10683 if (fpkt->pkt_resp_cookie_cnt >
10683 10684 pptr->port_resp_dma_attr.dma_attr_sgllen) {
10684 10685 ddi_dma_mem_free(&fpkt->pkt_resp_acc);
10685 10686 ddi_dma_free_handle(&fpkt->pkt_resp_dma);
10686 10687 (void) ddi_dma_unbind_handle(fpkt->pkt_cmd_dma);
10687 10688 ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
10688 10689 ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10689 10690 kmem_free(fpkt->pkt_cmd_cookie,
10690 10691 fpkt->pkt_cmd_cookie_cnt * sizeof (pkt_cookie));
10691 10692 return (FC_FAILURE);
10692 10693 }
10693 10694
10694 10695 ASSERT(fpkt->pkt_resp_cookie_cnt != 0);
10695 10696
10696 10697 cp = fpkt->pkt_resp_cookie = (ddi_dma_cookie_t *)kmem_alloc(
10697 10698 fpkt->pkt_resp_cookie_cnt * sizeof (pkt_cookie),
10698 10699 KM_NOSLEEP);
10699 10700
10700 10701 if (cp == NULL) {
10701 10702 ddi_dma_mem_free(&fpkt->pkt_resp_acc);
10702 10703 ddi_dma_free_handle(&fpkt->pkt_resp_dma);
10703 10704 (void) ddi_dma_unbind_handle(fpkt->pkt_cmd_dma);
10704 10705 ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
10705 10706 ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10706 10707 kmem_free(fpkt->pkt_cmd_cookie,
10707 10708 fpkt->pkt_cmd_cookie_cnt * sizeof (pkt_cookie));
10708 10709 return (FC_FAILURE);
10709 10710 }
10710 10711
10711 10712 *cp = pkt_cookie;
10712 10713 cp++;
10713 10714
10714 10715 for (cnt = 1; cnt < fpkt->pkt_resp_cookie_cnt; cnt++, cp++) {
10715 10716 ddi_dma_nextcookie(fpkt->pkt_resp_dma,
10716 10717 &pkt_cookie);
10717 10718 *cp = pkt_cookie;
10718 10719 }
10719 10720
10720 10721 return (FC_SUCCESS);
10721 10722 }
10722 10723
10723 10724 /*
10724 10725 * Function: fcp_free_cmd_resp
10725 10726 *
10726 10727 * Description: This function releases the FCP_CMD and FCP_RESP buffer
10727 10728 * allocated by fcp_alloc_cmd_resp() and all the resources
10728 10729 * associated with them. That includes the DMA resources and the
10729 10730 * buffer allocated for the cookies of each one of them.
10730 10731 *
10731 10732 * Argument: *pptr FCP port context.
10732 10733 * *fpkt fc packet containing the cmd and resp packet
10733 10734 * to be released.
10734 10735 *
10735 10736 * Return Value: None
10736 10737 *
10737 10738 * Context: Interrupt, User and Kernel context.
10738 10739 */
10739 10740 /* ARGSUSED */
10740 10741 static void
10741 10742 fcp_free_cmd_resp(struct fcp_port *pptr, fc_packet_t *fpkt)
10742 10743 {
10743 10744 ASSERT(fpkt->pkt_resp_dma != NULL && fpkt->pkt_cmd_dma != NULL);
10744 10745
10745 10746 if (fpkt->pkt_resp_dma) {
10746 10747 (void) ddi_dma_unbind_handle(fpkt->pkt_resp_dma);
10747 10748 ddi_dma_mem_free(&fpkt->pkt_resp_acc);
10748 10749 ddi_dma_free_handle(&fpkt->pkt_resp_dma);
10749 10750 }
10750 10751
10751 10752 if (fpkt->pkt_resp_cookie) {
10752 10753 kmem_free(fpkt->pkt_resp_cookie,
10753 10754 fpkt->pkt_resp_cookie_cnt * sizeof (ddi_dma_cookie_t));
10754 10755 fpkt->pkt_resp_cookie = NULL;
10755 10756 }
10756 10757
10757 10758 if (fpkt->pkt_cmd_dma) {
10758 10759 (void) ddi_dma_unbind_handle(fpkt->pkt_cmd_dma);
10759 10760 ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
10760 10761 ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10761 10762 }
10762 10763
10763 10764 if (fpkt->pkt_cmd_cookie) {
10764 10765 kmem_free(fpkt->pkt_cmd_cookie,
10765 10766 fpkt->pkt_cmd_cookie_cnt * sizeof (ddi_dma_cookie_t));
10766 10767 fpkt->pkt_cmd_cookie = NULL;
10767 10768 }
10768 10769 }
10769 10770
10770 10771
10771 10772 /*
10772 10773 * called by the transport to do our own target initialization
10773 10774 *
10774 10775 * can acquire and release the global mutex
10775 10776 */
10776 10777 /* ARGSUSED */
10777 10778 static int
10778 10779 fcp_phys_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
10779 10780 scsi_hba_tran_t *hba_tran, struct scsi_device *sd)
10780 10781 {
10781 10782 uchar_t *bytes;
10782 10783 uint_t nbytes;
10783 10784 uint16_t lun_num;
10784 10785 struct fcp_tgt *ptgt;
10785 10786 struct fcp_lun *plun;
10786 10787 struct fcp_port *pptr = (struct fcp_port *)
10787 10788 hba_tran->tran_hba_private;
10788 10789
10789 10790 ASSERT(pptr != NULL);
10790 10791
10791 10792 FCP_DTRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
10792 10793 FCP_BUF_LEVEL_8, 0,
10793 10794 "fcp_phys_tgt_init: called for %s (instance %d)",
10794 10795 ddi_get_name(tgt_dip), ddi_get_instance(tgt_dip));
10795 10796
10796 10797 /* get our port WWN property */
10797 10798 bytes = NULL;
10798 10799 if ((scsi_device_prop_lookup_byte_array(sd, SCSI_DEVICE_PROP_PATH,
10799 10800 PORT_WWN_PROP, &bytes, &nbytes) != DDI_PROP_SUCCESS) ||
10800 10801 (nbytes != FC_WWN_SIZE)) {
10801 10802 /* no port WWN property */
10802 10803 FCP_DTRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
10803 10804 FCP_BUF_LEVEL_8, 0,
10804 10805 "fcp_phys_tgt_init: Returning DDI_NOT_WELL_FORMED"
10805 10806 " for %s (instance %d): bytes=%p nbytes=%x",
10806 10807 ddi_get_name(tgt_dip), ddi_get_instance(tgt_dip), bytes,
10807 10808 nbytes);
10808 10809
10809 10810 if (bytes != NULL) {
10810 10811 scsi_device_prop_free(sd, SCSI_DEVICE_PROP_PATH, bytes);
10811 10812 }
10812 10813
10813 10814 return (DDI_NOT_WELL_FORMED);
10814 10815 }
10815 10816 ASSERT(bytes != NULL);
10816 10817
10817 10818 lun_num = scsi_device_prop_get_int(sd, SCSI_DEVICE_PROP_PATH,
10818 10819 LUN_PROP, 0xFFFF);
10819 10820 if (lun_num == 0xFFFF) {
10820 10821 FCP_DTRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
10821 10822 FCP_BUF_LEVEL_8, 0,
10822 10823 "fcp_phys_tgt_init: Returning DDI_FAILURE:lun"
10823 10824 " for %s (instance %d)", ddi_get_name(tgt_dip),
10824 10825 ddi_get_instance(tgt_dip));
10825 10826
10826 10827 scsi_device_prop_free(sd, SCSI_DEVICE_PROP_PATH, bytes);
10827 10828 return (DDI_NOT_WELL_FORMED);
10828 10829 }
10829 10830
10830 10831 mutex_enter(&pptr->port_mutex);
10831 10832 if ((plun = fcp_lookup_lun(pptr, bytes, lun_num)) == NULL) {
10832 10833 mutex_exit(&pptr->port_mutex);
10833 10834 FCP_DTRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
10834 10835 FCP_BUF_LEVEL_8, 0,
10835 10836 "fcp_phys_tgt_init: Returning DDI_FAILURE: No Lun"
10836 10837 " for %s (instance %d)", ddi_get_name(tgt_dip),
10837 10838 ddi_get_instance(tgt_dip));
10838 10839
10839 10840 scsi_device_prop_free(sd, SCSI_DEVICE_PROP_PATH, bytes);
10840 10841 return (DDI_FAILURE);
10841 10842 }
10842 10843
10843 10844 ASSERT(bcmp(plun->lun_tgt->tgt_port_wwn.raw_wwn, bytes,
10844 10845 FC_WWN_SIZE) == 0);
10845 10846 ASSERT(plun->lun_num == lun_num);
10846 10847
10847 10848 scsi_device_prop_free(sd, SCSI_DEVICE_PROP_PATH, bytes);
10848 10849
10849 10850 ptgt = plun->lun_tgt;
10850 10851
10851 10852 mutex_enter(&ptgt->tgt_mutex);
10852 10853 plun->lun_tgt_count++;
10853 10854 scsi_device_hba_private_set(sd, plun);
10854 10855 plun->lun_state |= FCP_SCSI_LUN_TGT_INIT;
10855 10856 plun->lun_sd = sd;
10856 10857 mutex_exit(&ptgt->tgt_mutex);
10857 10858 mutex_exit(&pptr->port_mutex);
10858 10859
10859 10860 return (DDI_SUCCESS);
10860 10861 }
10861 10862
10862 10863 /*ARGSUSED*/
10863 10864 static int
10864 10865 fcp_virt_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
10865 10866 scsi_hba_tran_t *hba_tran, struct scsi_device *sd)
10866 10867 {
10867 10868 uchar_t *bytes;
10868 10869 uint_t nbytes;
10869 10870 uint16_t lun_num;
10870 10871 struct fcp_tgt *ptgt;
10871 10872 struct fcp_lun *plun;
10872 10873 struct fcp_port *pptr = (struct fcp_port *)
10873 10874 hba_tran->tran_hba_private;
10874 10875 child_info_t *cip;
10875 10876
10876 10877 ASSERT(pptr != NULL);
10877 10878
10878 10879 FCP_DTRACE(fcp_logq, pptr->port_instbuf,
10879 10880 fcp_trace, FCP_BUF_LEVEL_8, 0,
10880 10881 "fcp_virt_tgt_init: called for %s (instance %d) (hba_dip %p),"
10881 10882 " (tgt_dip %p)", ddi_get_name(tgt_dip),
10882 10883 ddi_get_instance(tgt_dip), hba_dip, tgt_dip);
10883 10884
10884 10885 cip = (child_info_t *)sd->sd_pathinfo;
10885 10886 if (cip == NULL) {
10886 10887 FCP_DTRACE(fcp_logq, pptr->port_instbuf,
10887 10888 fcp_trace, FCP_BUF_LEVEL_8, 0,
10888 10889 "fcp_virt_tgt_init: Returning DDI_NOT_WELL_FORMED"
10889 10890 " for %s (instance %d)", ddi_get_name(tgt_dip),
10890 10891 ddi_get_instance(tgt_dip));
10891 10892
10892 10893 return (DDI_NOT_WELL_FORMED);
10893 10894 }
10894 10895
10895 10896 /* get our port WWN property */
10896 10897 bytes = NULL;
10897 10898 if ((scsi_device_prop_lookup_byte_array(sd, SCSI_DEVICE_PROP_PATH,
10898 10899 PORT_WWN_PROP, &bytes, &nbytes) != DDI_PROP_SUCCESS) ||
10899 10900 (nbytes != FC_WWN_SIZE)) {
10900 10901 if (bytes) {
10901 10902 scsi_device_prop_free(sd, SCSI_DEVICE_PROP_PATH, bytes);
10902 10903 }
10903 10904 return (DDI_NOT_WELL_FORMED);
10904 10905 }
10905 10906
10906 10907 ASSERT(bytes != NULL);
10907 10908
10908 10909 lun_num = scsi_device_prop_get_int(sd, SCSI_DEVICE_PROP_PATH,
10909 10910 LUN_PROP, 0xFFFF);
10910 10911 if (lun_num == 0xFFFF) {
10911 10912 FCP_DTRACE(fcp_logq, pptr->port_instbuf,
10912 10913 fcp_trace, FCP_BUF_LEVEL_8, 0,
10913 10914 "fcp_virt_tgt_init: Returning DDI_FAILURE:lun"
10914 10915 " for %s (instance %d)", ddi_get_name(tgt_dip),
10915 10916 ddi_get_instance(tgt_dip));
10916 10917
10917 10918 scsi_device_prop_free(sd, SCSI_DEVICE_PROP_PATH, bytes);
10918 10919 return (DDI_NOT_WELL_FORMED);
10919 10920 }
10920 10921
10921 10922 mutex_enter(&pptr->port_mutex);
10922 10923 if ((plun = fcp_lookup_lun(pptr, bytes, lun_num)) == NULL) {
10923 10924 mutex_exit(&pptr->port_mutex);
10924 10925 FCP_DTRACE(fcp_logq, pptr->port_instbuf,
10925 10926 fcp_trace, FCP_BUF_LEVEL_8, 0,
10926 10927 "fcp_virt_tgt_init: Returning DDI_FAILURE: No Lun"
10927 10928 " for %s (instance %d)", ddi_get_name(tgt_dip),
10928 10929 ddi_get_instance(tgt_dip));
10929 10930
10930 10931 scsi_device_prop_free(sd, SCSI_DEVICE_PROP_PATH, bytes);
10931 10932 return (DDI_FAILURE);
10932 10933 }
10933 10934
10934 10935 ASSERT(bcmp(plun->lun_tgt->tgt_port_wwn.raw_wwn, bytes,
10935 10936 FC_WWN_SIZE) == 0);
10936 10937 ASSERT(plun->lun_num == lun_num);
10937 10938
10938 10939 scsi_device_prop_free(sd, SCSI_DEVICE_PROP_PATH, bytes);
10939 10940
10940 10941 ptgt = plun->lun_tgt;
10941 10942
10942 10943 mutex_enter(&ptgt->tgt_mutex);
10943 10944 plun->lun_tgt_count++;
10944 10945 scsi_device_hba_private_set(sd, plun);
10945 10946 plun->lun_state |= FCP_SCSI_LUN_TGT_INIT;
10946 10947 plun->lun_sd = sd;
10947 10948 mutex_exit(&ptgt->tgt_mutex);
10948 10949 mutex_exit(&pptr->port_mutex);
10949 10950
10950 10951 return (DDI_SUCCESS);
10951 10952 }
10952 10953
10953 10954
10954 10955 /*
10955 10956 * called by the transport to do our own target initialization
10956 10957 *
10957 10958 * can acquire and release the global mutex
10958 10959 */
10959 10960 /* ARGSUSED */
10960 10961 static int
10961 10962 fcp_scsi_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
10962 10963 scsi_hba_tran_t *hba_tran, struct scsi_device *sd)
10963 10964 {
10964 10965 struct fcp_port *pptr = (struct fcp_port *)
10965 10966 hba_tran->tran_hba_private;
10966 10967 int rval;
10967 10968
10968 10969 ASSERT(pptr != NULL);
10969 10970
10970 10971 /*
10971 10972 * Child node is getting initialized. Look at the mpxio component
10972 10973 * type on the child device to see if this device is mpxio managed
10973 10974 * or not.
10974 10975 */
10975 10976 if (mdi_component_is_client(tgt_dip, NULL) == MDI_SUCCESS) {
10976 10977 rval = fcp_virt_tgt_init(hba_dip, tgt_dip, hba_tran, sd);
10977 10978 } else {
10978 10979 rval = fcp_phys_tgt_init(hba_dip, tgt_dip, hba_tran, sd);
10979 10980 }
10980 10981
10981 10982 return (rval);
10982 10983 }
10983 10984
10984 10985
10985 10986 /* ARGSUSED */
10986 10987 static void
10987 10988 fcp_scsi_tgt_free(dev_info_t *hba_dip, dev_info_t *tgt_dip,
10988 10989 scsi_hba_tran_t *hba_tran, struct scsi_device *sd)
10989 10990 {
10990 10991 struct fcp_lun *plun = scsi_device_hba_private_get(sd);
10991 10992 struct fcp_tgt *ptgt;
10992 10993
10993 10994 FCP_DTRACE(fcp_logq, LUN_PORT->port_instbuf,
10994 10995 fcp_trace, FCP_BUF_LEVEL_8, 0,
10995 10996 "fcp_scsi_tgt_free: called for tran %s%d, dev %s%d",
10996 10997 ddi_get_name(hba_dip), ddi_get_instance(hba_dip),
10997 10998 ddi_get_name(tgt_dip), ddi_get_instance(tgt_dip));
10998 10999
10999 11000 if (plun == NULL) {
11000 11001 return;
11001 11002 }
11002 11003 ptgt = plun->lun_tgt;
11003 11004
11004 11005 ASSERT(ptgt != NULL);
11005 11006
11006 11007 mutex_enter(&ptgt->tgt_mutex);
11007 11008 ASSERT(plun->lun_tgt_count > 0);
11008 11009
11009 11010 if (--plun->lun_tgt_count == 0) {
11010 11011 plun->lun_state &= ~FCP_SCSI_LUN_TGT_INIT;
11011 11012 }
11012 11013 plun->lun_sd = NULL;
11013 11014 mutex_exit(&ptgt->tgt_mutex);
11014 11015 }
11015 11016
11016 11017 /*
11017 11018 * Function: fcp_scsi_start
11018 11019 *
11019 11020 * Description: This function is called by the target driver to request a
11020 11021 * command to be sent.
11021 11022 *
11022 11023 * Argument: *ap SCSI address of the device.
11023 11024 * *pkt SCSI packet containing the cmd to send.
11024 11025 *
11025 11026 * Return Value: TRAN_ACCEPT
11026 11027 * TRAN_BUSY
11027 11028 * TRAN_BADPKT
11028 11029 * TRAN_FATAL_ERROR
11029 11030 */
11030 11031 static int
11031 11032 fcp_scsi_start(struct scsi_address *ap, struct scsi_pkt *pkt)
11032 11033 {
11033 11034 struct fcp_port *pptr = ADDR2FCP(ap);
11034 11035 struct fcp_lun *plun = ADDR2LUN(ap);
11035 11036 struct fcp_pkt *cmd = PKT2CMD(pkt);
11036 11037 struct fcp_tgt *ptgt = plun->lun_tgt;
↓ open down ↓ |
2786 lines elided |
↑ open up ↑ |
11037 11038 int rval;
11038 11039
11039 11040 /* ensure command isn't already issued */
11040 11041 ASSERT(cmd->cmd_state != FCP_PKT_ISSUED);
11041 11042
11042 11043 FCP_DTRACE(fcp_logq, pptr->port_instbuf,
11043 11044 fcp_trace, FCP_BUF_LEVEL_9, 0,
11044 11045 "fcp_transport Invoked for %x", plun->lun_tgt->tgt_d_id);
11045 11046
11046 11047 /*
11047 - * It is strange that we enter the fcp_port mutex and the target
11048 - * mutex to check the lun state (which has a mutex of its own).
11049 - */
11050 - mutex_enter(&pptr->port_mutex);
11051 - mutex_enter(&ptgt->tgt_mutex);
11052 -
11053 - /*
11054 11048 * If the device is offline and is not in the process of coming
11055 11049 * online, fail the request.
11056 11050 */
11057 -
11051 + mutex_enter(&plun->lun_mutex);
11058 11052 if ((plun->lun_state & FCP_LUN_OFFLINE) &&
11059 11053 !(plun->lun_state & FCP_LUN_ONLINING)) {
11060 - mutex_exit(&ptgt->tgt_mutex);
11061 - mutex_exit(&pptr->port_mutex);
11062 -
11063 - if (cmd->cmd_fp_pkt->pkt_pd == NULL) {
11054 + mutex_exit(&plun->lun_mutex);
11055 + if (cmd->cmd_fp_pkt->pkt_pd == NULL)
11064 11056 pkt->pkt_reason = CMD_DEV_GONE;
11065 - }
11066 -
11067 11057 return (TRAN_FATAL_ERROR);
11068 11058 }
11059 + mutex_exit(&plun->lun_mutex);
11060 +
11069 11061 cmd->cmd_fp_pkt->pkt_timeout = pkt->pkt_time;
11070 11062
11071 11063 /*
11072 11064 * If we are suspended, kernel is trying to dump, so don't
11073 11065 * block, fail or defer requests - send them down right away.
11074 11066 * NOTE: If we are in panic (i.e. trying to dump), we can't
11075 11067 * assume we have been suspended. There is hardware such as
11076 11068 * the v880 that doesn't do PM. Thus, the check for
11077 11069 * ddi_in_panic.
11078 11070 *
11079 11071 * If FCP_STATE_IN_CB_DEVC is set, devices are in the process
11080 11072 * of changing. So, if we can queue the packet, do it. Eventually,
11081 11073 * either the device will have gone away or changed and we can fail
11082 11074 * the request, or we can proceed if the device didn't change.
↓ open down ↓ |
4 lines elided |
↑ open up ↑ |
11083 11075 *
11084 11076 * If the pd in the target or the packet is NULL it's probably
11085 11077 * because the device has gone away, we allow the request to be
11086 11078 * put on the internal queue here in case the device comes back within
11087 11079 * the offline timeout. fctl will fix up the pd's if the tgt_pd_handle
11088 11080 * has gone NULL, while fcp deals cases where pkt_pd is NULL. pkt_pd
11089 11081 * could be NULL because the device was disappearing during or since
11090 11082 * packet initialization.
11091 11083 */
11092 11084
11085 + mutex_enter(&pptr->port_mutex);
11086 + mutex_enter(&ptgt->tgt_mutex);
11087 +
11093 11088 if (((plun->lun_state & FCP_LUN_BUSY) && (!(pptr->port_state &
11094 11089 FCP_STATE_SUSPENDED)) && !ddi_in_panic()) ||
11095 11090 (pptr->port_state & (FCP_STATE_ONLINING | FCP_STATE_IN_CB_DEVC)) ||
11096 11091 (ptgt->tgt_pd_handle == NULL) ||
11097 11092 (cmd->cmd_fp_pkt->pkt_pd == NULL)) {
11098 11093 /*
11099 11094 * If ((LUN is busy AND
11100 11095 * LUN not suspended AND
11101 11096 * The system is not in panic state) OR
11102 11097 * (The port is coming up))
11103 11098 *
11104 11099 * We check to see if the any of the flags FLAG_NOINTR or
11105 11100 * FLAG_NOQUEUE is set. If one of them is set the value
11106 11101 * returned will be TRAN_BUSY. If not, the request is queued.
11107 11102 */
11108 11103 mutex_exit(&ptgt->tgt_mutex);
11109 11104 mutex_exit(&pptr->port_mutex);
11110 11105
11111 11106 /* see if using interrupts is allowed (so queueing'll work) */
11112 11107 if (pkt->pkt_flags & FLAG_NOINTR) {
11113 11108 pkt->pkt_resid = 0;
11114 11109 return (TRAN_BUSY);
11115 11110 }
11116 11111 if (pkt->pkt_flags & FLAG_NOQUEUE) {
11117 11112 FCP_DTRACE(fcp_logq, pptr->port_instbuf,
11118 11113 fcp_trace, FCP_BUF_LEVEL_9, 0,
11119 11114 "fcp_scsi_start: lun busy for pkt %p", pkt);
11120 11115 return (TRAN_BUSY);
11121 11116 }
11122 11117 #ifdef DEBUG
11123 11118 mutex_enter(&pptr->port_pkt_mutex);
11124 11119 pptr->port_npkts++;
11125 11120 mutex_exit(&pptr->port_pkt_mutex);
11126 11121 #endif /* DEBUG */
11127 11122
11128 11123 /* got queue up the pkt for later */
11129 11124 fcp_queue_pkt(pptr, cmd);
11130 11125 return (TRAN_ACCEPT);
11131 11126 }
11132 11127 cmd->cmd_state = FCP_PKT_ISSUED;
11133 11128
11134 11129 mutex_exit(&ptgt->tgt_mutex);
11135 11130 mutex_exit(&pptr->port_mutex);
11136 11131
11137 11132 /*
11138 11133 * Now that we released the mutexes, what was protected by them can
11139 11134 * change.
11140 11135 */
11141 11136
11142 11137 /*
11143 11138 * If there is a reconfiguration in progress, wait for it to complete.
11144 11139 */
11145 11140 fcp_reconfig_wait(pptr);
11146 11141
11147 11142 cmd->cmd_timeout = pkt->pkt_time ? fcp_watchdog_time +
11148 11143 pkt->pkt_time : 0;
11149 11144
11150 11145 /* prepare the packet */
11151 11146
11152 11147 fcp_prepare_pkt(pptr, cmd, plun);
11153 11148
11154 11149 if (cmd->cmd_pkt->pkt_time) {
11155 11150 cmd->cmd_fp_pkt->pkt_timeout = cmd->cmd_pkt->pkt_time;
11156 11151 } else {
11157 11152 cmd->cmd_fp_pkt->pkt_timeout = 5 * 60 * 60;
11158 11153 }
11159 11154
11160 11155 /*
11161 11156 * if interrupts aren't allowed (e.g. at dump time) then we'll
11162 11157 * have to do polled I/O
11163 11158 */
11164 11159 if (pkt->pkt_flags & FLAG_NOINTR) {
11165 11160 cmd->cmd_state &= ~FCP_PKT_ISSUED;
11166 11161 return (fcp_dopoll(pptr, cmd));
11167 11162 }
11168 11163
11169 11164 #ifdef DEBUG
11170 11165 mutex_enter(&pptr->port_pkt_mutex);
11171 11166 pptr->port_npkts++;
11172 11167 mutex_exit(&pptr->port_pkt_mutex);
11173 11168 #endif /* DEBUG */
11174 11169
11175 11170 rval = fcp_transport(pptr->port_fp_handle, cmd->cmd_fp_pkt, 0);
11176 11171 if (rval == FC_SUCCESS) {
11177 11172 FCP_DTRACE(fcp_logq, pptr->port_instbuf,
11178 11173 fcp_trace, FCP_BUF_LEVEL_9, 0,
11179 11174 "fcp_transport success for %x", plun->lun_tgt->tgt_d_id);
11180 11175 return (TRAN_ACCEPT);
11181 11176 }
11182 11177
11183 11178 cmd->cmd_state = FCP_PKT_IDLE;
11184 11179
11185 11180 #ifdef DEBUG
11186 11181 mutex_enter(&pptr->port_pkt_mutex);
11187 11182 pptr->port_npkts--;
11188 11183 mutex_exit(&pptr->port_pkt_mutex);
11189 11184 #endif /* DEBUG */
11190 11185
11191 11186 /*
11192 11187 * For lack of clearer definitions, choose
11193 11188 * between TRAN_BUSY and TRAN_FATAL_ERROR.
11194 11189 */
11195 11190
11196 11191 if (rval == FC_TRAN_BUSY) {
11197 11192 pkt->pkt_resid = 0;
11198 11193 rval = TRAN_BUSY;
11199 11194 } else {
11200 11195 mutex_enter(&ptgt->tgt_mutex);
11201 11196 if (plun->lun_state & FCP_LUN_OFFLINE) {
11202 11197 child_info_t *cip;
11203 11198
11204 11199 mutex_enter(&plun->lun_mutex);
11205 11200 cip = plun->lun_cip;
11206 11201 mutex_exit(&plun->lun_mutex);
11207 11202
11208 11203 FCP_TRACE(fcp_logq, pptr->port_instbuf,
11209 11204 fcp_trace, FCP_BUF_LEVEL_6, 0,
11210 11205 "fcp_transport failed 2 for %x: %x; dip=%p",
11211 11206 plun->lun_tgt->tgt_d_id, rval, cip);
11212 11207
11213 11208 rval = TRAN_FATAL_ERROR;
11214 11209 } else {
11215 11210 if (pkt->pkt_flags & FLAG_NOQUEUE) {
11216 11211 FCP_DTRACE(fcp_logq, pptr->port_instbuf,
11217 11212 fcp_trace, FCP_BUF_LEVEL_9, 0,
11218 11213 "fcp_scsi_start: FC_BUSY for pkt %p",
11219 11214 pkt);
11220 11215 rval = TRAN_BUSY;
11221 11216 } else {
11222 11217 rval = TRAN_ACCEPT;
11223 11218 fcp_queue_pkt(pptr, cmd);
11224 11219 }
11225 11220 }
11226 11221 mutex_exit(&ptgt->tgt_mutex);
11227 11222 }
11228 11223
11229 11224 return (rval);
11230 11225 }
11231 11226
11232 11227 /*
11233 11228 * called by the transport to abort a packet
11234 11229 */
11235 11230 /*ARGSUSED*/
11236 11231 static int
11237 11232 fcp_scsi_abort(struct scsi_address *ap, struct scsi_pkt *pkt)
11238 11233 {
11239 11234 int tgt_cnt;
11240 11235 struct fcp_port *pptr = ADDR2FCP(ap);
11241 11236 struct fcp_lun *plun = ADDR2LUN(ap);
11242 11237 struct fcp_tgt *ptgt = plun->lun_tgt;
11243 11238
11244 11239 if (pkt == NULL) {
11245 11240 if (ptgt) {
11246 11241 mutex_enter(&ptgt->tgt_mutex);
11247 11242 tgt_cnt = ptgt->tgt_change_cnt;
11248 11243 mutex_exit(&ptgt->tgt_mutex);
11249 11244 fcp_abort_all(pptr, ptgt, plun, tgt_cnt);
11250 11245 return (TRUE);
11251 11246 }
11252 11247 }
11253 11248 return (FALSE);
11254 11249 }
11255 11250
11256 11251
11257 11252 /*
11258 11253 * Perform reset
11259 11254 */
11260 11255 int
11261 11256 fcp_scsi_reset(struct scsi_address *ap, int level)
11262 11257 {
11263 11258 int rval = 0;
11264 11259 struct fcp_port *pptr = ADDR2FCP(ap);
11265 11260 struct fcp_lun *plun = ADDR2LUN(ap);
11266 11261 struct fcp_tgt *ptgt = plun->lun_tgt;
11267 11262
11268 11263 if (level == RESET_ALL) {
11269 11264 if (fcp_linkreset(pptr, ap, KM_NOSLEEP) == FC_SUCCESS) {
11270 11265 rval = 1;
11271 11266 }
11272 11267 } else if (level == RESET_TARGET || level == RESET_LUN) {
11273 11268 /*
11274 11269 * If we are in the middle of discovery, return
11275 11270 * SUCCESS as this target will be rediscovered
11276 11271 * anyway
11277 11272 */
11278 11273 mutex_enter(&ptgt->tgt_mutex);
11279 11274 if (ptgt->tgt_state & (FCP_TGT_OFFLINE | FCP_TGT_BUSY)) {
11280 11275 mutex_exit(&ptgt->tgt_mutex);
11281 11276 return (1);
11282 11277 }
11283 11278 mutex_exit(&ptgt->tgt_mutex);
11284 11279
11285 11280 if (fcp_reset_target(ap, level) == FC_SUCCESS) {
11286 11281 rval = 1;
11287 11282 }
11288 11283 }
11289 11284 return (rval);
11290 11285 }
11291 11286
11292 11287
11293 11288 /*
11294 11289 * called by the framework to get a SCSI capability
11295 11290 */
11296 11291 static int
11297 11292 fcp_scsi_getcap(struct scsi_address *ap, char *cap, int whom)
11298 11293 {
11299 11294 return (fcp_commoncap(ap, cap, 0, whom, 0));
11300 11295 }
11301 11296
11302 11297
11303 11298 /*
11304 11299 * called by the framework to set a SCSI capability
11305 11300 */
11306 11301 static int
11307 11302 fcp_scsi_setcap(struct scsi_address *ap, char *cap, int value, int whom)
11308 11303 {
11309 11304 return (fcp_commoncap(ap, cap, value, whom, 1));
11310 11305 }
11311 11306
11312 11307 /*
11313 11308 * Function: fcp_pkt_setup
11314 11309 *
11315 11310 * Description: This function sets up the scsi_pkt structure passed by the
11316 11311 * caller. This function assumes fcp_pkt_constructor has been
11317 11312 * called previously for the packet passed by the caller. If
11318 11313 * successful this call will have the following results:
11319 11314 *
11320 11315 * - The resources needed that will be constant through out
11321 11316 * the whole transaction are allocated.
11322 11317 * - The fields that will be constant through out the whole
11323 11318 * transaction are initialized.
11324 11319 * - The scsi packet will be linked to the LUN structure
11325 11320 * addressed by the transaction.
11326 11321 *
11327 11322 * Argument:
11328 11323 * *pkt Pointer to a scsi_pkt structure.
11329 11324 * callback
11330 11325 * arg
11331 11326 *
11332 11327 * Return Value: 0 Success
11333 11328 * !0 Failure
11334 11329 *
11335 11330 * Context: Kernel context or interrupt context
11336 11331 */
11337 11332 /* ARGSUSED */
11338 11333 static int
11339 11334 fcp_pkt_setup(struct scsi_pkt *pkt,
11340 11335 int (*callback)(caddr_t arg),
11341 11336 caddr_t arg)
11342 11337 {
11343 11338 struct fcp_pkt *cmd;
11344 11339 struct fcp_port *pptr;
11345 11340 struct fcp_lun *plun;
11346 11341 struct fcp_tgt *ptgt;
11347 11342 int kf;
11348 11343 fc_packet_t *fpkt;
11349 11344 fc_frame_hdr_t *hp;
11350 11345
11351 11346 pptr = ADDR2FCP(&pkt->pkt_address);
11352 11347 plun = ADDR2LUN(&pkt->pkt_address);
11353 11348 ptgt = plun->lun_tgt;
11354 11349
11355 11350 cmd = (struct fcp_pkt *)pkt->pkt_ha_private;
11356 11351 fpkt = cmd->cmd_fp_pkt;
11357 11352
11358 11353 /*
11359 11354 * this request is for dma allocation only
11360 11355 */
11361 11356 /*
11362 11357 * First step of fcp_scsi_init_pkt: pkt allocation
11363 11358 * We determine if the caller is willing to wait for the
11364 11359 * resources.
11365 11360 */
11366 11361 kf = (callback == SLEEP_FUNC) ? KM_SLEEP: KM_NOSLEEP;
11367 11362
11368 11363 /*
11369 11364 * Selective zeroing of the pkt.
11370 11365 */
11371 11366 cmd->cmd_back = NULL;
11372 11367 cmd->cmd_next = NULL;
11373 11368
11374 11369 /*
11375 11370 * Zero out fcp command
11376 11371 */
11377 11372 bzero(&cmd->cmd_fcp_cmd, sizeof (cmd->cmd_fcp_cmd));
11378 11373
11379 11374 cmd->cmd_state = FCP_PKT_IDLE;
11380 11375
11381 11376 fpkt = cmd->cmd_fp_pkt;
11382 11377 fpkt->pkt_data_acc = NULL;
11383 11378
11384 11379 /*
11385 11380 * When port_state is FCP_STATE_OFFLINE, remote_port (tgt_pd_handle)
11386 11381 * could be destroyed. We need fail pkt_setup.
11387 11382 */
11388 11383 if (pptr->port_state & FCP_STATE_OFFLINE) {
11389 11384 return (-1);
11390 11385 }
11391 11386
11392 11387 mutex_enter(&ptgt->tgt_mutex);
11393 11388 fpkt->pkt_pd = ptgt->tgt_pd_handle;
11394 11389
11395 11390 if (fc_ulp_init_packet(pptr->port_fp_handle, fpkt, kf)
11396 11391 != FC_SUCCESS) {
11397 11392 mutex_exit(&ptgt->tgt_mutex);
11398 11393 return (-1);
11399 11394 }
11400 11395
11401 11396 mutex_exit(&ptgt->tgt_mutex);
11402 11397
11403 11398 /* Fill in the Fabric Channel Header */
11404 11399 hp = &fpkt->pkt_cmd_fhdr;
11405 11400 hp->r_ctl = R_CTL_COMMAND;
11406 11401 hp->rsvd = 0;
11407 11402 hp->type = FC_TYPE_SCSI_FCP;
11408 11403 hp->f_ctl = F_CTL_SEQ_INITIATIVE | F_CTL_FIRST_SEQ;
11409 11404 hp->seq_id = 0;
11410 11405 hp->df_ctl = 0;
11411 11406 hp->seq_cnt = 0;
11412 11407 hp->ox_id = 0xffff;
11413 11408 hp->rx_id = 0xffff;
11414 11409 hp->ro = 0;
11415 11410
11416 11411 /*
11417 11412 * A doubly linked list (cmd_forw, cmd_back) is built
11418 11413 * out of every allocated packet on a per-lun basis
11419 11414 *
11420 11415 * The packets are maintained in the list so as to satisfy
11421 11416 * scsi_abort() requests. At present (which is unlikely to
11422 11417 * change in the future) nobody performs a real scsi_abort
11423 11418 * in the SCSI target drivers (as they don't keep the packets
11424 11419 * after doing scsi_transport - so they don't know how to
11425 11420 * abort a packet other than sending a NULL to abort all
11426 11421 * outstanding packets)
11427 11422 */
11428 11423 mutex_enter(&plun->lun_mutex);
11429 11424 if ((cmd->cmd_forw = plun->lun_pkt_head) != NULL) {
11430 11425 plun->lun_pkt_head->cmd_back = cmd;
11431 11426 } else {
11432 11427 plun->lun_pkt_tail = cmd;
11433 11428 }
11434 11429 plun->lun_pkt_head = cmd;
11435 11430 mutex_exit(&plun->lun_mutex);
11436 11431 return (0);
11437 11432 }
11438 11433
11439 11434 /*
11440 11435 * Function: fcp_pkt_teardown
11441 11436 *
11442 11437 * Description: This function releases a scsi_pkt structure and all the
11443 11438 * resources attached to it.
11444 11439 *
11445 11440 * Argument: *pkt Pointer to a scsi_pkt structure.
11446 11441 *
11447 11442 * Return Value: None
11448 11443 *
11449 11444 * Context: User, Kernel or Interrupt context.
11450 11445 */
11451 11446 static void
11452 11447 fcp_pkt_teardown(struct scsi_pkt *pkt)
11453 11448 {
11454 11449 struct fcp_port *pptr = ADDR2FCP(&pkt->pkt_address);
11455 11450 struct fcp_lun *plun = ADDR2LUN(&pkt->pkt_address);
11456 11451 struct fcp_pkt *cmd = (struct fcp_pkt *)pkt->pkt_ha_private;
11457 11452
11458 11453 /*
11459 11454 * Remove the packet from the per-lun list
11460 11455 */
11461 11456 mutex_enter(&plun->lun_mutex);
11462 11457 if (cmd->cmd_back) {
11463 11458 ASSERT(cmd != plun->lun_pkt_head);
11464 11459 cmd->cmd_back->cmd_forw = cmd->cmd_forw;
11465 11460 } else {
11466 11461 ASSERT(cmd == plun->lun_pkt_head);
11467 11462 plun->lun_pkt_head = cmd->cmd_forw;
11468 11463 }
11469 11464
11470 11465 if (cmd->cmd_forw) {
11471 11466 cmd->cmd_forw->cmd_back = cmd->cmd_back;
11472 11467 } else {
11473 11468 ASSERT(cmd == plun->lun_pkt_tail);
11474 11469 plun->lun_pkt_tail = cmd->cmd_back;
11475 11470 }
11476 11471
11477 11472 mutex_exit(&plun->lun_mutex);
11478 11473
11479 11474 (void) fc_ulp_uninit_packet(pptr->port_fp_handle, cmd->cmd_fp_pkt);
11480 11475 }
11481 11476
11482 11477 /*
11483 11478 * Routine for reset notification setup, to register or cancel.
11484 11479 * This function is called by SCSA
11485 11480 */
11486 11481 /*ARGSUSED*/
11487 11482 static int
11488 11483 fcp_scsi_reset_notify(struct scsi_address *ap, int flag,
11489 11484 void (*callback)(caddr_t), caddr_t arg)
11490 11485 {
11491 11486 struct fcp_port *pptr = ADDR2FCP(ap);
11492 11487
11493 11488 return (scsi_hba_reset_notify_setup(ap, flag, callback, arg,
11494 11489 &pptr->port_mutex, &pptr->port_reset_notify_listf));
11495 11490 }
11496 11491
11497 11492
11498 11493 static int
11499 11494 fcp_scsi_bus_get_eventcookie(dev_info_t *dip, dev_info_t *rdip, char *name,
11500 11495 ddi_eventcookie_t *event_cookiep)
11501 11496 {
11502 11497 struct fcp_port *pptr = fcp_dip2port(dip);
11503 11498
11504 11499 if (pptr == NULL) {
11505 11500 return (DDI_FAILURE);
11506 11501 }
11507 11502
11508 11503 return (ndi_event_retrieve_cookie(pptr->port_ndi_event_hdl, rdip, name,
11509 11504 event_cookiep, NDI_EVENT_NOPASS));
11510 11505 }
11511 11506
11512 11507
11513 11508 static int
11514 11509 fcp_scsi_bus_add_eventcall(dev_info_t *dip, dev_info_t *rdip,
11515 11510 ddi_eventcookie_t eventid, void (*callback)(), void *arg,
11516 11511 ddi_callback_id_t *cb_id)
11517 11512 {
11518 11513 struct fcp_port *pptr = fcp_dip2port(dip);
11519 11514
11520 11515 if (pptr == NULL) {
11521 11516 return (DDI_FAILURE);
11522 11517 }
11523 11518
11524 11519 return (ndi_event_add_callback(pptr->port_ndi_event_hdl, rdip,
11525 11520 eventid, callback, arg, NDI_SLEEP, cb_id));
11526 11521 }
11527 11522
11528 11523
11529 11524 static int
11530 11525 fcp_scsi_bus_remove_eventcall(dev_info_t *dip, ddi_callback_id_t cb_id)
11531 11526 {
11532 11527
11533 11528 struct fcp_port *pptr = fcp_dip2port(dip);
11534 11529
11535 11530 if (pptr == NULL) {
11536 11531 return (DDI_FAILURE);
11537 11532 }
11538 11533 return (ndi_event_remove_callback(pptr->port_ndi_event_hdl, cb_id));
11539 11534 }
11540 11535
11541 11536
11542 11537 /*
11543 11538 * called by the transport to post an event
11544 11539 */
11545 11540 static int
11546 11541 fcp_scsi_bus_post_event(dev_info_t *dip, dev_info_t *rdip,
11547 11542 ddi_eventcookie_t eventid, void *impldata)
11548 11543 {
11549 11544 struct fcp_port *pptr = fcp_dip2port(dip);
11550 11545
11551 11546 if (pptr == NULL) {
11552 11547 return (DDI_FAILURE);
11553 11548 }
11554 11549
11555 11550 return (ndi_event_run_callbacks(pptr->port_ndi_event_hdl, rdip,
11556 11551 eventid, impldata));
11557 11552 }
11558 11553
11559 11554
11560 11555 /*
11561 11556 * A target in in many cases in Fibre Channel has a one to one relation
11562 11557 * with a port identifier (which is also known as D_ID and also as AL_PA
11563 11558 * in private Loop) On Fibre Channel-to-SCSI bridge boxes a target reset
11564 11559 * will most likely result in resetting all LUNs (which means a reset will
11565 11560 * occur on all the SCSI devices connected at the other end of the bridge)
11566 11561 * That is the latest favorite topic for discussion, for, one can debate as
11567 11562 * hot as one likes and come up with arguably a best solution to one's
11568 11563 * satisfaction
11569 11564 *
11570 11565 * To stay on track and not digress much, here are the problems stated
11571 11566 * briefly:
11572 11567 *
11573 11568 * SCSA doesn't define RESET_LUN, It defines RESET_TARGET, but the
11574 11569 * target drivers use RESET_TARGET even if their instance is on a
11575 11570 * LUN. Doesn't that sound a bit broken ?
11576 11571 *
11577 11572 * FCP SCSI (the current spec) only defines RESET TARGET in the
11578 11573 * control fields of an FCP_CMND structure. It should have been
11579 11574 * fixed right there, giving flexibility to the initiators to
11580 11575 * minimize havoc that could be caused by resetting a target.
11581 11576 */
11582 11577 static int
11583 11578 fcp_reset_target(struct scsi_address *ap, int level)
11584 11579 {
11585 11580 int rval = FC_FAILURE;
11586 11581 char lun_id[25];
11587 11582 struct fcp_port *pptr = ADDR2FCP(ap);
11588 11583 struct fcp_lun *plun = ADDR2LUN(ap);
11589 11584 struct fcp_tgt *ptgt = plun->lun_tgt;
11590 11585 struct scsi_pkt *pkt;
11591 11586 struct fcp_pkt *cmd;
11592 11587 struct fcp_rsp *rsp;
11593 11588 uint32_t tgt_cnt;
11594 11589 struct fcp_rsp_info *rsp_info;
11595 11590 struct fcp_reset_elem *p;
11596 11591 int bval;
11597 11592
11598 11593 if ((p = kmem_alloc(sizeof (struct fcp_reset_elem),
11599 11594 KM_NOSLEEP)) == NULL) {
11600 11595 return (rval);
11601 11596 }
11602 11597
11603 11598 mutex_enter(&ptgt->tgt_mutex);
11604 11599 if (level == RESET_TARGET) {
11605 11600 if (ptgt->tgt_state & (FCP_TGT_OFFLINE | FCP_TGT_BUSY)) {
11606 11601 mutex_exit(&ptgt->tgt_mutex);
11607 11602 kmem_free(p, sizeof (struct fcp_reset_elem));
11608 11603 return (rval);
11609 11604 }
11610 11605 fcp_update_tgt_state(ptgt, FCP_SET, FCP_LUN_BUSY);
11611 11606 (void) strcpy(lun_id, " ");
11612 11607 } else {
11613 11608 if (plun->lun_state & (FCP_LUN_OFFLINE | FCP_LUN_BUSY)) {
11614 11609 mutex_exit(&ptgt->tgt_mutex);
11615 11610 kmem_free(p, sizeof (struct fcp_reset_elem));
11616 11611 return (rval);
11617 11612 }
11618 11613 fcp_update_lun_state(plun, FCP_SET, FCP_LUN_BUSY);
11619 11614
11620 11615 (void) sprintf(lun_id, ", LUN=%d", plun->lun_num);
11621 11616 }
11622 11617 tgt_cnt = ptgt->tgt_change_cnt;
11623 11618
11624 11619 mutex_exit(&ptgt->tgt_mutex);
11625 11620
11626 11621 if ((pkt = scsi_init_pkt(ap, NULL, NULL, 0, 0,
11627 11622 0, 0, NULL, 0)) == NULL) {
11628 11623 kmem_free(p, sizeof (struct fcp_reset_elem));
11629 11624 mutex_enter(&ptgt->tgt_mutex);
11630 11625 fcp_update_tgt_state(ptgt, FCP_RESET, FCP_LUN_BUSY);
11631 11626 mutex_exit(&ptgt->tgt_mutex);
11632 11627 return (rval);
11633 11628 }
11634 11629 pkt->pkt_time = FCP_POLL_TIMEOUT;
11635 11630
11636 11631 /* fill in cmd part of packet */
11637 11632 cmd = PKT2CMD(pkt);
11638 11633 if (level == RESET_TARGET) {
11639 11634 cmd->cmd_fcp_cmd.fcp_cntl.cntl_reset_tgt = 1;
11640 11635 } else {
11641 11636 cmd->cmd_fcp_cmd.fcp_cntl.cntl_reset_lun = 1;
11642 11637 }
11643 11638 cmd->cmd_fp_pkt->pkt_comp = NULL;
11644 11639 cmd->cmd_pkt->pkt_flags |= FLAG_NOINTR;
11645 11640
11646 11641 /* prepare a packet for transport */
11647 11642 fcp_prepare_pkt(pptr, cmd, plun);
11648 11643
11649 11644 if (cmd->cmd_pkt->pkt_time) {
11650 11645 cmd->cmd_fp_pkt->pkt_timeout = cmd->cmd_pkt->pkt_time;
11651 11646 } else {
11652 11647 cmd->cmd_fp_pkt->pkt_timeout = 5 * 60 * 60;
11653 11648 }
11654 11649
11655 11650 (void) fc_ulp_busy_port(pptr->port_fp_handle);
11656 11651 bval = fcp_dopoll(pptr, cmd);
11657 11652 fc_ulp_idle_port(pptr->port_fp_handle);
11658 11653
11659 11654 /* submit the packet */
11660 11655 if (bval == TRAN_ACCEPT) {
11661 11656 int error = 3;
11662 11657
11663 11658 rsp = (struct fcp_rsp *)cmd->cmd_fcp_rsp;
11664 11659 rsp_info = (struct fcp_rsp_info *)(cmd->cmd_fcp_rsp +
11665 11660 sizeof (struct fcp_rsp));
11666 11661
11667 11662 if (rsp->fcp_u.fcp_status.rsp_len_set) {
11668 11663 if (fcp_validate_fcp_response(rsp, pptr) ==
11669 11664 FC_SUCCESS) {
11670 11665 if (pptr->port_fcp_dma != FC_NO_DVMA_SPACE) {
11671 11666 FCP_CP_IN(cmd->cmd_fp_pkt->pkt_resp +
11672 11667 sizeof (struct fcp_rsp), rsp_info,
11673 11668 cmd->cmd_fp_pkt->pkt_resp_acc,
11674 11669 sizeof (struct fcp_rsp_info));
11675 11670 }
11676 11671 if (rsp_info->rsp_code == FCP_NO_FAILURE) {
11677 11672 rval = FC_SUCCESS;
11678 11673 error = 0;
11679 11674 } else {
11680 11675 error = 1;
11681 11676 }
11682 11677 } else {
11683 11678 error = 2;
11684 11679 }
11685 11680 }
11686 11681
11687 11682 switch (error) {
11688 11683 case 0:
11689 11684 fcp_log(CE_WARN, pptr->port_dip,
11690 11685 "!FCP: WWN 0x%08x%08x %s reset successfully",
11691 11686 *((int *)&ptgt->tgt_port_wwn.raw_wwn[0]),
11692 11687 *((int *)&ptgt->tgt_port_wwn.raw_wwn[4]), lun_id);
11693 11688 break;
11694 11689
11695 11690 case 1:
11696 11691 fcp_log(CE_WARN, pptr->port_dip,
11697 11692 "!FCP: Reset to WWN 0x%08x%08x %s failed,"
11698 11693 " response code=%x",
11699 11694 *((int *)&ptgt->tgt_port_wwn.raw_wwn[0]),
11700 11695 *((int *)&ptgt->tgt_port_wwn.raw_wwn[4]), lun_id,
11701 11696 rsp_info->rsp_code);
11702 11697 break;
11703 11698
11704 11699 case 2:
11705 11700 fcp_log(CE_WARN, pptr->port_dip,
11706 11701 "!FCP: Reset to WWN 0x%08x%08x %s failed,"
11707 11702 " Bad FCP response values: rsvd1=%x,"
11708 11703 " rsvd2=%x, sts-rsvd1=%x, sts-rsvd2=%x,"
11709 11704 " rsplen=%x, senselen=%x",
11710 11705 *((int *)&ptgt->tgt_port_wwn.raw_wwn[0]),
11711 11706 *((int *)&ptgt->tgt_port_wwn.raw_wwn[4]), lun_id,
11712 11707 rsp->reserved_0, rsp->reserved_1,
11713 11708 rsp->fcp_u.fcp_status.reserved_0,
11714 11709 rsp->fcp_u.fcp_status.reserved_1,
11715 11710 rsp->fcp_response_len, rsp->fcp_sense_len);
11716 11711 break;
11717 11712
11718 11713 default:
11719 11714 fcp_log(CE_WARN, pptr->port_dip,
11720 11715 "!FCP: Reset to WWN 0x%08x%08x %s failed",
11721 11716 *((int *)&ptgt->tgt_port_wwn.raw_wwn[0]),
11722 11717 *((int *)&ptgt->tgt_port_wwn.raw_wwn[4]), lun_id);
11723 11718 break;
11724 11719 }
11725 11720 }
11726 11721 scsi_destroy_pkt(pkt);
11727 11722
11728 11723 if (rval == FC_FAILURE) {
11729 11724 mutex_enter(&ptgt->tgt_mutex);
11730 11725 if (level == RESET_TARGET) {
11731 11726 fcp_update_tgt_state(ptgt, FCP_RESET, FCP_LUN_BUSY);
11732 11727 } else {
11733 11728 fcp_update_lun_state(plun, FCP_RESET, FCP_LUN_BUSY);
11734 11729 }
11735 11730 mutex_exit(&ptgt->tgt_mutex);
11736 11731 kmem_free(p, sizeof (struct fcp_reset_elem));
11737 11732 return (rval);
11738 11733 }
11739 11734
11740 11735 mutex_enter(&pptr->port_mutex);
11741 11736 if (level == RESET_TARGET) {
11742 11737 p->tgt = ptgt;
11743 11738 p->lun = NULL;
11744 11739 } else {
11745 11740 p->tgt = NULL;
11746 11741 p->lun = plun;
11747 11742 }
11748 11743 p->tgt = ptgt;
11749 11744 p->tgt_cnt = tgt_cnt;
11750 11745 p->timeout = fcp_watchdog_time + FCP_RESET_DELAY;
11751 11746 p->next = pptr->port_reset_list;
11752 11747 pptr->port_reset_list = p;
11753 11748
11754 11749 FCP_TRACE(fcp_logq, pptr->port_instbuf,
11755 11750 fcp_trace, FCP_BUF_LEVEL_3, 0,
11756 11751 "Notify ssd of the reset to reinstate the reservations");
11757 11752
11758 11753 scsi_hba_reset_notify_callback(&pptr->port_mutex,
11759 11754 &pptr->port_reset_notify_listf);
11760 11755
11761 11756 mutex_exit(&pptr->port_mutex);
11762 11757
11763 11758 return (rval);
11764 11759 }
11765 11760
11766 11761
11767 11762 /*
11768 11763 * called by fcp_getcap and fcp_setcap to get and set (respectively)
11769 11764 * SCSI capabilities
11770 11765 */
11771 11766 /* ARGSUSED */
11772 11767 static int
11773 11768 fcp_commoncap(struct scsi_address *ap, char *cap,
11774 11769 int val, int tgtonly, int doset)
11775 11770 {
11776 11771 struct fcp_port *pptr = ADDR2FCP(ap);
11777 11772 struct fcp_lun *plun = ADDR2LUN(ap);
11778 11773 struct fcp_tgt *ptgt = plun->lun_tgt;
11779 11774 int cidx;
11780 11775 int rval = FALSE;
11781 11776
11782 11777 if (cap == (char *)0) {
11783 11778 FCP_TRACE(fcp_logq, pptr->port_instbuf,
11784 11779 fcp_trace, FCP_BUF_LEVEL_3, 0,
11785 11780 "fcp_commoncap: invalid arg");
11786 11781 return (rval);
11787 11782 }
11788 11783
11789 11784 if ((cidx = scsi_hba_lookup_capstr(cap)) == -1) {
11790 11785 return (UNDEFINED);
11791 11786 }
11792 11787
11793 11788 /*
11794 11789 * Process setcap request.
11795 11790 */
11796 11791 if (doset) {
11797 11792 /*
11798 11793 * At present, we can only set binary (0/1) values
11799 11794 */
11800 11795 switch (cidx) {
11801 11796 case SCSI_CAP_ARQ:
11802 11797 if (val == 0) {
11803 11798 rval = FALSE;
11804 11799 } else {
11805 11800 rval = TRUE;
11806 11801 }
11807 11802 break;
11808 11803
11809 11804 case SCSI_CAP_LUN_RESET:
11810 11805 if (val) {
11811 11806 plun->lun_cap |= FCP_LUN_CAP_RESET;
11812 11807 } else {
11813 11808 plun->lun_cap &= ~FCP_LUN_CAP_RESET;
11814 11809 }
11815 11810 rval = TRUE;
11816 11811 break;
11817 11812
11818 11813 case SCSI_CAP_SECTOR_SIZE:
11819 11814 rval = TRUE;
11820 11815 break;
11821 11816 default:
11822 11817 FCP_TRACE(fcp_logq, pptr->port_instbuf,
11823 11818 fcp_trace, FCP_BUF_LEVEL_4, 0,
11824 11819 "fcp_setcap: unsupported %d", cidx);
11825 11820 rval = UNDEFINED;
11826 11821 break;
11827 11822 }
11828 11823
11829 11824 FCP_TRACE(fcp_logq, pptr->port_instbuf,
11830 11825 fcp_trace, FCP_BUF_LEVEL_5, 0,
11831 11826 "set cap: cap=%s, val/tgtonly/doset/rval = "
11832 11827 "0x%x/0x%x/0x%x/%d",
11833 11828 cap, val, tgtonly, doset, rval);
11834 11829
11835 11830 } else {
11836 11831 /*
11837 11832 * Process getcap request.
11838 11833 */
11839 11834 switch (cidx) {
11840 11835 case SCSI_CAP_DMA_MAX:
11841 11836 rval = (int)pptr->port_data_dma_attr.dma_attr_maxxfer;
11842 11837
11843 11838 /*
11844 11839 * Need to make an adjustment qlc is uint_t 64
11845 11840 * st is int, so we will make the adjustment here
11846 11841 * being as nobody wants to touch this.
11847 11842 * It still leaves the max single block length
11848 11843 * of 2 gig. This should last .
11849 11844 */
11850 11845
11851 11846 if (rval == -1) {
11852 11847 rval = MAX_INT_DMA;
11853 11848 }
11854 11849
11855 11850 break;
11856 11851
11857 11852 case SCSI_CAP_INITIATOR_ID:
11858 11853 rval = pptr->port_id;
11859 11854 break;
11860 11855
11861 11856 case SCSI_CAP_ARQ:
11862 11857 case SCSI_CAP_RESET_NOTIFICATION:
11863 11858 case SCSI_CAP_TAGGED_QING:
11864 11859 rval = TRUE;
11865 11860 break;
11866 11861
11867 11862 case SCSI_CAP_SCSI_VERSION:
11868 11863 rval = 3;
11869 11864 break;
11870 11865
11871 11866 case SCSI_CAP_INTERCONNECT_TYPE:
11872 11867 if (FC_TOP_EXTERNAL(pptr->port_topology) ||
11873 11868 (ptgt->tgt_hard_addr == 0)) {
11874 11869 rval = INTERCONNECT_FABRIC;
11875 11870 } else {
11876 11871 rval = INTERCONNECT_FIBRE;
11877 11872 }
11878 11873 break;
11879 11874
11880 11875 case SCSI_CAP_LUN_RESET:
11881 11876 rval = ((plun->lun_cap & FCP_LUN_CAP_RESET) != 0) ?
11882 11877 TRUE : FALSE;
11883 11878 break;
11884 11879
11885 11880 default:
11886 11881 FCP_TRACE(fcp_logq, pptr->port_instbuf,
11887 11882 fcp_trace, FCP_BUF_LEVEL_4, 0,
11888 11883 "fcp_getcap: unsupported %d", cidx);
11889 11884 rval = UNDEFINED;
11890 11885 break;
11891 11886 }
11892 11887
11893 11888 FCP_TRACE(fcp_logq, pptr->port_instbuf,
11894 11889 fcp_trace, FCP_BUF_LEVEL_8, 0,
11895 11890 "get cap: cap=%s, val/tgtonly/doset/rval = "
11896 11891 "0x%x/0x%x/0x%x/%d",
11897 11892 cap, val, tgtonly, doset, rval);
11898 11893 }
11899 11894
11900 11895 return (rval);
11901 11896 }
11902 11897
11903 11898 /*
11904 11899 * called by the transport to get the port-wwn and lun
11905 11900 * properties of this device, and to create a "name" based on them
11906 11901 *
11907 11902 * these properties don't exist on sun4m
11908 11903 *
11909 11904 * return 1 for success else return 0
11910 11905 */
11911 11906 /* ARGSUSED */
11912 11907 static int
11913 11908 fcp_scsi_get_name(struct scsi_device *sd, char *name, int len)
11914 11909 {
11915 11910 int i;
11916 11911 int *lun;
11917 11912 int numChars;
11918 11913 uint_t nlun;
11919 11914 uint_t count;
11920 11915 uint_t nbytes;
11921 11916 uchar_t *bytes;
11922 11917 uint16_t lun_num;
11923 11918 uint32_t tgt_id;
11924 11919 char **conf_wwn;
11925 11920 char tbuf[(FC_WWN_SIZE << 1) + 1];
11926 11921 uchar_t barray[FC_WWN_SIZE];
11927 11922 dev_info_t *tgt_dip;
11928 11923 struct fcp_tgt *ptgt;
11929 11924 struct fcp_port *pptr;
11930 11925 struct fcp_lun *plun;
11931 11926
11932 11927 ASSERT(sd != NULL);
11933 11928 ASSERT(name != NULL);
11934 11929
11935 11930 tgt_dip = sd->sd_dev;
11936 11931 pptr = ddi_get_soft_state(fcp_softstate,
11937 11932 ddi_get_instance(ddi_get_parent(tgt_dip)));
11938 11933 if (pptr == NULL) {
11939 11934 return (0);
11940 11935 }
11941 11936
11942 11937 ASSERT(tgt_dip != NULL);
11943 11938
11944 11939 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, sd->sd_dev,
11945 11940 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM,
11946 11941 LUN_PROP, &lun, &nlun) != DDI_SUCCESS) {
11947 11942 name[0] = '\0';
11948 11943 return (0);
11949 11944 }
11950 11945
11951 11946 if (nlun == 0) {
11952 11947 ddi_prop_free(lun);
11953 11948 return (0);
11954 11949 }
11955 11950
11956 11951 lun_num = lun[0];
11957 11952 ddi_prop_free(lun);
11958 11953
11959 11954 /*
11960 11955 * Lookup for .conf WWN property
11961 11956 */
11962 11957 if (ddi_prop_lookup_string_array(DDI_DEV_T_ANY, tgt_dip,
11963 11958 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, CONF_WWN_PROP,
11964 11959 &conf_wwn, &count) == DDI_PROP_SUCCESS) {
11965 11960 ASSERT(count >= 1);
11966 11961
11967 11962 fcp_ascii_to_wwn(conf_wwn[0], barray, FC_WWN_SIZE);
11968 11963 ddi_prop_free(conf_wwn);
11969 11964 mutex_enter(&pptr->port_mutex);
11970 11965 if ((plun = fcp_lookup_lun(pptr, barray, lun_num)) == NULL) {
11971 11966 mutex_exit(&pptr->port_mutex);
11972 11967 return (0);
11973 11968 }
11974 11969 ptgt = plun->lun_tgt;
11975 11970 mutex_exit(&pptr->port_mutex);
11976 11971
11977 11972 (void) ndi_prop_update_byte_array(DDI_DEV_T_NONE,
11978 11973 tgt_dip, PORT_WWN_PROP, barray, FC_WWN_SIZE);
11979 11974
11980 11975 if (!FC_TOP_EXTERNAL(pptr->port_topology) &&
11981 11976 ptgt->tgt_hard_addr != 0) {
11982 11977 tgt_id = (uint32_t)fcp_alpa_to_switch[
11983 11978 ptgt->tgt_hard_addr];
11984 11979 } else {
11985 11980 tgt_id = ptgt->tgt_d_id;
11986 11981 }
11987 11982
11988 11983 (void) ndi_prop_update_int(DDI_DEV_T_NONE, tgt_dip,
11989 11984 TARGET_PROP, tgt_id);
11990 11985 }
11991 11986
11992 11987 /* get the our port-wwn property */
11993 11988 bytes = NULL;
11994 11989 if ((ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, tgt_dip,
11995 11990 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, PORT_WWN_PROP, &bytes,
11996 11991 &nbytes) != DDI_PROP_SUCCESS) || nbytes != FC_WWN_SIZE) {
11997 11992 if (bytes != NULL) {
11998 11993 ddi_prop_free(bytes);
11999 11994 }
12000 11995 return (0);
12001 11996 }
12002 11997
12003 11998 for (i = 0; i < FC_WWN_SIZE; i++) {
12004 11999 (void) sprintf(&tbuf[i << 1], "%02x", *(bytes + i));
12005 12000 }
12006 12001
12007 12002 /* Stick in the address of the form "wWWN,LUN" */
12008 12003 numChars = snprintf(name, len, "w%s,%x", tbuf, lun_num);
12009 12004
12010 12005 ASSERT(numChars < len);
12011 12006 if (numChars >= len) {
12012 12007 fcp_log(CE_WARN, pptr->port_dip,
12013 12008 "!fcp_scsi_get_name: "
12014 12009 "name parameter length too small, it needs to be %d",
12015 12010 numChars+1);
12016 12011 }
12017 12012
12018 12013 ddi_prop_free(bytes);
12019 12014
12020 12015 return (1);
12021 12016 }
12022 12017
12023 12018
12024 12019 /*
12025 12020 * called by the transport to get the SCSI target id value, returning
12026 12021 * it in "name"
12027 12022 *
12028 12023 * this isn't needed/used on sun4m
12029 12024 *
12030 12025 * return 1 for success else return 0
12031 12026 */
12032 12027 /* ARGSUSED */
12033 12028 static int
12034 12029 fcp_scsi_get_bus_addr(struct scsi_device *sd, char *name, int len)
12035 12030 {
12036 12031 struct fcp_lun *plun = ADDR2LUN(&sd->sd_address);
12037 12032 struct fcp_tgt *ptgt;
12038 12033 int numChars;
12039 12034
12040 12035 if (plun == NULL) {
12041 12036 return (0);
12042 12037 }
12043 12038
12044 12039 if ((ptgt = plun->lun_tgt) == NULL) {
12045 12040 return (0);
12046 12041 }
12047 12042
12048 12043 numChars = snprintf(name, len, "%x", ptgt->tgt_d_id);
12049 12044
12050 12045 ASSERT(numChars < len);
12051 12046 if (numChars >= len) {
12052 12047 fcp_log(CE_WARN, NULL,
12053 12048 "!fcp_scsi_get_bus_addr: "
12054 12049 "name parameter length too small, it needs to be %d",
12055 12050 numChars+1);
12056 12051 }
12057 12052
12058 12053 return (1);
12059 12054 }
12060 12055
12061 12056
12062 12057 /*
12063 12058 * called internally to reset the link where the specified port lives
12064 12059 */
12065 12060 static int
12066 12061 fcp_linkreset(struct fcp_port *pptr, struct scsi_address *ap, int sleep)
12067 12062 {
12068 12063 la_wwn_t wwn;
12069 12064 struct fcp_lun *plun;
12070 12065 struct fcp_tgt *ptgt;
12071 12066
12072 12067 /* disable restart of lip if we're suspended */
12073 12068 mutex_enter(&pptr->port_mutex);
12074 12069
12075 12070 if (pptr->port_state & (FCP_STATE_SUSPENDED |
12076 12071 FCP_STATE_POWER_DOWN)) {
12077 12072 mutex_exit(&pptr->port_mutex);
12078 12073 FCP_TRACE(fcp_logq, pptr->port_instbuf,
12079 12074 fcp_trace, FCP_BUF_LEVEL_2, 0,
12080 12075 "fcp_linkreset, fcp%d: link reset "
12081 12076 "disabled due to DDI_SUSPEND",
12082 12077 ddi_get_instance(pptr->port_dip));
12083 12078 return (FC_FAILURE);
12084 12079 }
12085 12080
12086 12081 if (pptr->port_state & (FCP_STATE_OFFLINE | FCP_STATE_ONLINING)) {
12087 12082 mutex_exit(&pptr->port_mutex);
12088 12083 return (FC_SUCCESS);
12089 12084 }
12090 12085
12091 12086 FCP_DTRACE(fcp_logq, pptr->port_instbuf,
12092 12087 fcp_trace, FCP_BUF_LEVEL_8, 0, "Forcing link reset");
12093 12088
12094 12089 /*
12095 12090 * If ap == NULL assume local link reset.
12096 12091 */
12097 12092 if (FC_TOP_EXTERNAL(pptr->port_topology) && (ap != NULL)) {
12098 12093 plun = ADDR2LUN(ap);
12099 12094 ptgt = plun->lun_tgt;
12100 12095 bcopy(&ptgt->tgt_port_wwn.raw_wwn[0], &wwn, sizeof (wwn));
12101 12096 } else {
12102 12097 bzero((caddr_t)&wwn, sizeof (wwn));
12103 12098 }
12104 12099 mutex_exit(&pptr->port_mutex);
12105 12100
12106 12101 return (fc_ulp_linkreset(pptr->port_fp_handle, &wwn, sleep));
12107 12102 }
12108 12103
12109 12104
12110 12105 /*
12111 12106 * called from fcp_port_attach() to resume a port
12112 12107 * return DDI_* success/failure status
12113 12108 * acquires and releases the global mutex
12114 12109 * acquires and releases the port mutex
12115 12110 */
12116 12111 /*ARGSUSED*/
12117 12112
12118 12113 static int
12119 12114 fcp_handle_port_resume(opaque_t ulph, fc_ulp_port_info_t *pinfo,
12120 12115 uint32_t s_id, fc_attach_cmd_t cmd, int instance)
12121 12116 {
12122 12117 int res = DDI_FAILURE; /* default result */
12123 12118 struct fcp_port *pptr; /* port state ptr */
12124 12119 uint32_t alloc_cnt;
12125 12120 uint32_t max_cnt;
12126 12121 fc_portmap_t *tmp_list = NULL;
12127 12122
12128 12123 FCP_DTRACE(fcp_logq, "fcp", fcp_trace,
12129 12124 FCP_BUF_LEVEL_8, 0, "port resume: for port %d",
12130 12125 instance);
12131 12126
12132 12127 if ((pptr = ddi_get_soft_state(fcp_softstate, instance)) == NULL) {
12133 12128 cmn_err(CE_WARN, "fcp: bad soft state");
12134 12129 return (res);
12135 12130 }
12136 12131
12137 12132 mutex_enter(&pptr->port_mutex);
12138 12133 switch (cmd) {
12139 12134 case FC_CMD_RESUME:
12140 12135 ASSERT((pptr->port_state & FCP_STATE_POWER_DOWN) == 0);
12141 12136 pptr->port_state &= ~FCP_STATE_SUSPENDED;
12142 12137 break;
12143 12138
12144 12139 case FC_CMD_POWER_UP:
12145 12140 /*
12146 12141 * If the port is DDI_SUSPENded, defer rediscovery
12147 12142 * until DDI_RESUME occurs
12148 12143 */
12149 12144 if (pptr->port_state & FCP_STATE_SUSPENDED) {
12150 12145 pptr->port_state &= ~FCP_STATE_POWER_DOWN;
12151 12146 mutex_exit(&pptr->port_mutex);
12152 12147 return (DDI_SUCCESS);
12153 12148 }
12154 12149 pptr->port_state &= ~FCP_STATE_POWER_DOWN;
12155 12150 }
12156 12151 pptr->port_id = s_id;
12157 12152 pptr->port_state = FCP_STATE_INIT;
12158 12153 mutex_exit(&pptr->port_mutex);
12159 12154
12160 12155 /*
12161 12156 * Make a copy of ulp_port_info as fctl allocates
12162 12157 * a temp struct.
12163 12158 */
12164 12159 (void) fcp_cp_pinfo(pptr, pinfo);
12165 12160
12166 12161 mutex_enter(&fcp_global_mutex);
12167 12162 if (fcp_watchdog_init++ == 0) {
12168 12163 fcp_watchdog_tick = fcp_watchdog_timeout *
12169 12164 drv_usectohz(1000000);
12170 12165 fcp_watchdog_id = timeout(fcp_watch,
12171 12166 NULL, fcp_watchdog_tick);
12172 12167 }
12173 12168 mutex_exit(&fcp_global_mutex);
12174 12169
12175 12170 /*
12176 12171 * Handle various topologies and link states.
12177 12172 */
12178 12173 switch (FC_PORT_STATE_MASK(pptr->port_phys_state)) {
12179 12174 case FC_STATE_OFFLINE:
12180 12175 /*
12181 12176 * Wait for ONLINE, at which time a state
12182 12177 * change will cause a statec_callback
12183 12178 */
12184 12179 res = DDI_SUCCESS;
12185 12180 break;
12186 12181
12187 12182 case FC_STATE_ONLINE:
12188 12183
12189 12184 if (pptr->port_topology == FC_TOP_UNKNOWN) {
12190 12185 (void) fcp_linkreset(pptr, NULL, KM_NOSLEEP);
12191 12186 res = DDI_SUCCESS;
12192 12187 break;
12193 12188 }
12194 12189
12195 12190 if (FC_TOP_EXTERNAL(pptr->port_topology) &&
12196 12191 !fcp_enable_auto_configuration) {
12197 12192 tmp_list = fcp_construct_map(pptr, &alloc_cnt);
12198 12193 if (tmp_list == NULL) {
12199 12194 if (!alloc_cnt) {
12200 12195 res = DDI_SUCCESS;
12201 12196 }
12202 12197 break;
12203 12198 }
12204 12199 max_cnt = alloc_cnt;
12205 12200 } else {
12206 12201 ASSERT(pptr->port_topology != FC_TOP_UNKNOWN);
12207 12202
12208 12203 alloc_cnt = FCP_MAX_DEVICES;
12209 12204
12210 12205 if ((tmp_list = (fc_portmap_t *)kmem_zalloc(
12211 12206 (sizeof (fc_portmap_t)) * alloc_cnt,
12212 12207 KM_NOSLEEP)) == NULL) {
12213 12208 fcp_log(CE_WARN, pptr->port_dip,
12214 12209 "!fcp%d: failed to allocate portmap",
12215 12210 instance);
12216 12211 break;
12217 12212 }
12218 12213
12219 12214 max_cnt = alloc_cnt;
12220 12215 if ((res = fc_ulp_getportmap(pptr->port_fp_handle,
12221 12216 &tmp_list, &max_cnt, FC_ULP_PLOGI_PRESERVE)) !=
12222 12217 FC_SUCCESS) {
12223 12218 caddr_t msg;
12224 12219
12225 12220 (void) fc_ulp_error(res, &msg);
12226 12221
12227 12222 FCP_TRACE(fcp_logq, pptr->port_instbuf,
12228 12223 fcp_trace, FCP_BUF_LEVEL_2, 0,
12229 12224 "resume failed getportmap: reason=0x%x",
12230 12225 res);
12231 12226
12232 12227 fcp_log(CE_WARN, pptr->port_dip,
12233 12228 "!failed to get port map : %s", msg);
12234 12229 break;
12235 12230 }
12236 12231 if (max_cnt > alloc_cnt) {
12237 12232 alloc_cnt = max_cnt;
12238 12233 }
12239 12234 }
12240 12235
12241 12236 /*
12242 12237 * do the SCSI device discovery and create
12243 12238 * the devinfos
12244 12239 */
12245 12240 fcp_statec_callback(ulph, pptr->port_fp_handle,
12246 12241 pptr->port_phys_state, pptr->port_topology, tmp_list,
12247 12242 max_cnt, pptr->port_id);
12248 12243
12249 12244 res = DDI_SUCCESS;
12250 12245 break;
12251 12246
12252 12247 default:
12253 12248 fcp_log(CE_WARN, pptr->port_dip,
12254 12249 "!fcp%d: invalid port state at attach=0x%x",
12255 12250 instance, pptr->port_phys_state);
12256 12251
12257 12252 mutex_enter(&pptr->port_mutex);
12258 12253 pptr->port_phys_state = FCP_STATE_OFFLINE;
12259 12254 mutex_exit(&pptr->port_mutex);
12260 12255 res = DDI_SUCCESS;
12261 12256
12262 12257 break;
12263 12258 }
12264 12259
12265 12260 if (tmp_list != NULL) {
12266 12261 kmem_free(tmp_list, sizeof (fc_portmap_t) * alloc_cnt);
12267 12262 }
12268 12263
12269 12264 return (res);
12270 12265 }
12271 12266
12272 12267
12273 12268 static void
12274 12269 fcp_cp_pinfo(struct fcp_port *pptr, fc_ulp_port_info_t *pinfo)
12275 12270 {
12276 12271 pptr->port_fp_modlinkage = *pinfo->port_linkage;
12277 12272 pptr->port_dip = pinfo->port_dip;
12278 12273 pptr->port_fp_handle = pinfo->port_handle;
12279 12274 if (pinfo->port_acc_attr != NULL) {
12280 12275 /*
12281 12276 * FCA supports DMA
12282 12277 */
12283 12278 pptr->port_data_dma_attr = *pinfo->port_data_dma_attr;
12284 12279 pptr->port_cmd_dma_attr = *pinfo->port_cmd_dma_attr;
12285 12280 pptr->port_resp_dma_attr = *pinfo->port_resp_dma_attr;
12286 12281 pptr->port_dma_acc_attr = *pinfo->port_acc_attr;
12287 12282 }
12288 12283 pptr->port_priv_pkt_len = pinfo->port_fca_pkt_size;
12289 12284 pptr->port_max_exch = pinfo->port_fca_max_exch;
12290 12285 pptr->port_phys_state = pinfo->port_state;
12291 12286 pptr->port_topology = pinfo->port_flags;
12292 12287 pptr->port_reset_action = pinfo->port_reset_action;
12293 12288 pptr->port_cmds_dma_flags = pinfo->port_dma_behavior;
12294 12289 pptr->port_fcp_dma = pinfo->port_fcp_dma;
12295 12290 bcopy(&pinfo->port_nwwn, &pptr->port_nwwn, sizeof (la_wwn_t));
12296 12291 bcopy(&pinfo->port_pwwn, &pptr->port_pwwn, sizeof (la_wwn_t));
12297 12292
12298 12293 /* Clear FMA caps to avoid fm-capability ereport */
12299 12294 if (pptr->port_cmd_dma_attr.dma_attr_flags & DDI_DMA_FLAGERR)
12300 12295 pptr->port_cmd_dma_attr.dma_attr_flags &= ~DDI_DMA_FLAGERR;
12301 12296 if (pptr->port_data_dma_attr.dma_attr_flags & DDI_DMA_FLAGERR)
12302 12297 pptr->port_data_dma_attr.dma_attr_flags &= ~DDI_DMA_FLAGERR;
12303 12298 if (pptr->port_resp_dma_attr.dma_attr_flags & DDI_DMA_FLAGERR)
12304 12299 pptr->port_resp_dma_attr.dma_attr_flags &= ~DDI_DMA_FLAGERR;
12305 12300 }
12306 12301
12307 12302 /*
12308 12303 * If the elements wait field is set to 1 then
12309 12304 * another thread is waiting for the operation to complete. Once
12310 12305 * it is complete, the waiting thread is signaled and the element is
12311 12306 * freed by the waiting thread. If the elements wait field is set to 0
12312 12307 * the element is freed.
12313 12308 */
12314 12309 static void
12315 12310 fcp_process_elem(struct fcp_hp_elem *elem, int result)
12316 12311 {
12317 12312 ASSERT(elem != NULL);
12318 12313 mutex_enter(&elem->mutex);
12319 12314 elem->result = result;
12320 12315 if (elem->wait) {
12321 12316 elem->wait = 0;
12322 12317 cv_signal(&elem->cv);
12323 12318 mutex_exit(&elem->mutex);
12324 12319 } else {
12325 12320 mutex_exit(&elem->mutex);
12326 12321 cv_destroy(&elem->cv);
12327 12322 mutex_destroy(&elem->mutex);
12328 12323 kmem_free(elem, sizeof (struct fcp_hp_elem));
12329 12324 }
12330 12325 }
12331 12326
12332 12327 /*
12333 12328 * This function is invoked from the taskq thread to allocate
12334 12329 * devinfo nodes and to online/offline them.
12335 12330 */
12336 12331 static void
12337 12332 fcp_hp_task(void *arg)
12338 12333 {
12339 12334 struct fcp_hp_elem *elem = (struct fcp_hp_elem *)arg;
12340 12335 struct fcp_lun *plun = elem->lun;
12341 12336 struct fcp_port *pptr = elem->port;
12342 12337 int result;
12343 12338
12344 12339 ASSERT(elem->what == FCP_ONLINE ||
12345 12340 elem->what == FCP_OFFLINE ||
12346 12341 elem->what == FCP_MPXIO_PATH_CLEAR_BUSY ||
12347 12342 elem->what == FCP_MPXIO_PATH_SET_BUSY);
12348 12343
12349 12344 mutex_enter(&pptr->port_mutex);
12350 12345 mutex_enter(&plun->lun_mutex);
12351 12346 if (((elem->what == FCP_ONLINE || elem->what == FCP_OFFLINE) &&
12352 12347 plun->lun_event_count != elem->event_cnt) ||
12353 12348 pptr->port_state & (FCP_STATE_SUSPENDED |
12354 12349 FCP_STATE_DETACHING | FCP_STATE_POWER_DOWN)) {
12355 12350 mutex_exit(&plun->lun_mutex);
12356 12351 mutex_exit(&pptr->port_mutex);
12357 12352 fcp_process_elem(elem, NDI_FAILURE);
12358 12353 return;
12359 12354 }
12360 12355 mutex_exit(&plun->lun_mutex);
12361 12356 mutex_exit(&pptr->port_mutex);
12362 12357
12363 12358 result = fcp_trigger_lun(plun, elem->cip, elem->old_lun_mpxio,
12364 12359 elem->what, elem->link_cnt, elem->tgt_cnt, elem->flags);
12365 12360 fcp_process_elem(elem, result);
12366 12361 }
12367 12362
12368 12363
12369 12364 static child_info_t *
12370 12365 fcp_get_cip(struct fcp_lun *plun, child_info_t *cip, int lcount,
12371 12366 int tcount)
12372 12367 {
12373 12368 ASSERT(MUTEX_HELD(&plun->lun_mutex));
12374 12369
12375 12370 if (fcp_is_child_present(plun, cip) == FC_FAILURE) {
12376 12371 struct fcp_port *pptr = plun->lun_tgt->tgt_port;
12377 12372
12378 12373 ASSERT(MUTEX_HELD(&pptr->port_mutex));
12379 12374 /*
12380 12375 * Child has not been created yet. Create the child device
12381 12376 * based on the per-Lun flags.
12382 12377 */
12383 12378 if (pptr->port_mpxio == 0 || plun->lun_mpxio == 0) {
12384 12379 plun->lun_cip =
12385 12380 CIP(fcp_create_dip(plun, lcount, tcount));
12386 12381 plun->lun_mpxio = 0;
12387 12382 } else {
12388 12383 plun->lun_cip =
12389 12384 CIP(fcp_create_pip(plun, lcount, tcount));
12390 12385 plun->lun_mpxio = 1;
12391 12386 }
12392 12387 } else {
12393 12388 plun->lun_cip = cip;
12394 12389 }
12395 12390
12396 12391 return (plun->lun_cip);
12397 12392 }
12398 12393
12399 12394
12400 12395 static int
12401 12396 fcp_is_dip_present(struct fcp_lun *plun, dev_info_t *cdip)
12402 12397 {
12403 12398 int rval = FC_FAILURE;
12404 12399 dev_info_t *pdip;
12405 12400 struct dev_info *dip;
12406 12401 int circular;
12407 12402
12408 12403 ASSERT(MUTEX_HELD(&plun->lun_mutex));
12409 12404
12410 12405 pdip = plun->lun_tgt->tgt_port->port_dip;
12411 12406
12412 12407 if (plun->lun_cip == NULL) {
12413 12408 FCP_TRACE(fcp_logq, LUN_PORT->port_instbuf,
12414 12409 fcp_trace, FCP_BUF_LEVEL_3, 0,
12415 12410 "fcp_is_dip_present: plun->lun_cip is NULL: "
12416 12411 "plun: %p lun state: %x num: %d target state: %x",
12417 12412 plun, plun->lun_state, plun->lun_num,
12418 12413 plun->lun_tgt->tgt_port->port_state);
12419 12414 return (rval);
12420 12415 }
12421 12416 ndi_devi_enter(pdip, &circular);
12422 12417 dip = DEVI(pdip)->devi_child;
12423 12418 while (dip) {
12424 12419 if (dip == DEVI(cdip)) {
12425 12420 rval = FC_SUCCESS;
12426 12421 break;
12427 12422 }
12428 12423 dip = dip->devi_sibling;
12429 12424 }
12430 12425 ndi_devi_exit(pdip, circular);
12431 12426 return (rval);
12432 12427 }
12433 12428
12434 12429 static int
12435 12430 fcp_is_child_present(struct fcp_lun *plun, child_info_t *cip)
12436 12431 {
12437 12432 int rval = FC_FAILURE;
12438 12433
12439 12434 ASSERT(plun != NULL);
12440 12435 ASSERT(MUTEX_HELD(&plun->lun_mutex));
12441 12436
12442 12437 if (plun->lun_mpxio == 0) {
12443 12438 rval = fcp_is_dip_present(plun, DIP(cip));
12444 12439 } else {
12445 12440 rval = fcp_is_pip_present(plun, PIP(cip));
12446 12441 }
12447 12442
12448 12443 return (rval);
12449 12444 }
12450 12445
12451 12446 /*
12452 12447 * Function: fcp_create_dip
12453 12448 *
12454 12449 * Description: Creates a dev_info_t structure for the LUN specified by the
12455 12450 * caller.
12456 12451 *
12457 12452 * Argument: plun Lun structure
12458 12453 * link_cnt Link state count.
12459 12454 * tgt_cnt Target state change count.
12460 12455 *
12461 12456 * Return Value: NULL if it failed
12462 12457 * dev_info_t structure address if it succeeded
12463 12458 *
12464 12459 * Context: Kernel context
12465 12460 */
12466 12461 static dev_info_t *
12467 12462 fcp_create_dip(struct fcp_lun *plun, int link_cnt, int tgt_cnt)
12468 12463 {
12469 12464 int failure = 0;
12470 12465 uint32_t tgt_id;
12471 12466 uint64_t sam_lun;
12472 12467 struct fcp_tgt *ptgt = plun->lun_tgt;
12473 12468 struct fcp_port *pptr = ptgt->tgt_port;
12474 12469 dev_info_t *pdip = pptr->port_dip;
12475 12470 dev_info_t *cdip = NULL;
12476 12471 dev_info_t *old_dip = DIP(plun->lun_cip);
12477 12472 char *nname = NULL;
12478 12473 char **compatible = NULL;
12479 12474 int ncompatible;
12480 12475 char *scsi_binding_set;
12481 12476 char t_pwwn[17];
12482 12477
12483 12478 ASSERT(MUTEX_HELD(&plun->lun_mutex));
12484 12479 ASSERT(MUTEX_HELD(&pptr->port_mutex));
12485 12480
12486 12481 /* get the 'scsi-binding-set' property */
12487 12482 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, pdip,
12488 12483 DDI_PROP_NOTPROM | DDI_PROP_DONTPASS, "scsi-binding-set",
12489 12484 &scsi_binding_set) != DDI_PROP_SUCCESS) {
12490 12485 scsi_binding_set = NULL;
12491 12486 }
12492 12487
12493 12488 /* determine the node name and compatible */
12494 12489 scsi_hba_nodename_compatible_get(&plun->lun_inq, scsi_binding_set,
12495 12490 plun->lun_inq.inq_dtype, NULL, &nname, &compatible, &ncompatible);
12496 12491 if (scsi_binding_set) {
12497 12492 ddi_prop_free(scsi_binding_set);
12498 12493 }
12499 12494
12500 12495 if (nname == NULL) {
12501 12496 #ifdef DEBUG
12502 12497 cmn_err(CE_WARN, "%s%d: no driver for "
12503 12498 "device @w%02x%02x%02x%02x%02x%02x%02x%02x,%d:"
12504 12499 " compatible: %s",
12505 12500 ddi_driver_name(pdip), ddi_get_instance(pdip),
12506 12501 ptgt->tgt_port_wwn.raw_wwn[0],
12507 12502 ptgt->tgt_port_wwn.raw_wwn[1],
12508 12503 ptgt->tgt_port_wwn.raw_wwn[2],
12509 12504 ptgt->tgt_port_wwn.raw_wwn[3],
12510 12505 ptgt->tgt_port_wwn.raw_wwn[4],
12511 12506 ptgt->tgt_port_wwn.raw_wwn[5],
12512 12507 ptgt->tgt_port_wwn.raw_wwn[6],
12513 12508 ptgt->tgt_port_wwn.raw_wwn[7], plun->lun_num,
12514 12509 *compatible);
12515 12510 #endif /* DEBUG */
12516 12511 failure++;
12517 12512 goto end_of_fcp_create_dip;
12518 12513 }
12519 12514
12520 12515 cdip = fcp_find_existing_dip(plun, pdip, nname);
12521 12516
12522 12517 /*
12523 12518 * if the old_dip does not match the cdip, that means there is
12524 12519 * some property change. since we'll be using the cdip, we need
12525 12520 * to offline the old_dip. If the state contains FCP_LUN_CHANGED
12526 12521 * then the dtype for the device has been updated. Offline the
12527 12522 * the old device and create a new device with the new device type
12528 12523 * Refer to bug: 4764752
12529 12524 */
12530 12525 if (old_dip && (cdip != old_dip ||
12531 12526 plun->lun_state & FCP_LUN_CHANGED)) {
12532 12527 plun->lun_state &= ~(FCP_LUN_INIT);
12533 12528 mutex_exit(&plun->lun_mutex);
12534 12529 mutex_exit(&pptr->port_mutex);
12535 12530
12536 12531 mutex_enter(&ptgt->tgt_mutex);
12537 12532 (void) fcp_pass_to_hp(pptr, plun, CIP(old_dip), FCP_OFFLINE,
12538 12533 link_cnt, tgt_cnt, NDI_DEVI_REMOVE, 0);
12539 12534 mutex_exit(&ptgt->tgt_mutex);
12540 12535
12541 12536 #ifdef DEBUG
12542 12537 if (cdip != NULL) {
12543 12538 FCP_TRACE(fcp_logq, pptr->port_instbuf,
12544 12539 fcp_trace, FCP_BUF_LEVEL_2, 0,
12545 12540 "Old dip=%p; New dip=%p don't match", old_dip,
12546 12541 cdip);
12547 12542 } else {
12548 12543 FCP_TRACE(fcp_logq, pptr->port_instbuf,
12549 12544 fcp_trace, FCP_BUF_LEVEL_2, 0,
12550 12545 "Old dip=%p; New dip=NULL don't match", old_dip);
12551 12546 }
12552 12547 #endif
12553 12548
12554 12549 mutex_enter(&pptr->port_mutex);
12555 12550 mutex_enter(&plun->lun_mutex);
12556 12551 }
12557 12552
12558 12553 if (cdip == NULL || plun->lun_state & FCP_LUN_CHANGED) {
12559 12554 plun->lun_state &= ~(FCP_LUN_CHANGED);
12560 12555 if (ndi_devi_alloc(pptr->port_dip, nname,
12561 12556 DEVI_SID_NODEID, &cdip) != NDI_SUCCESS) {
12562 12557 failure++;
12563 12558 goto end_of_fcp_create_dip;
12564 12559 }
12565 12560 }
12566 12561
12567 12562 /*
12568 12563 * Previously all the properties for the devinfo were destroyed here
12569 12564 * with a call to ndi_prop_remove_all(). Since this may cause loss of
12570 12565 * the devid property (and other properties established by the target
12571 12566 * driver or framework) which the code does not always recreate, this
12572 12567 * call was removed.
12573 12568 * This opens a theoretical possibility that we may return with a
12574 12569 * stale devid on the node if the scsi entity behind the fibre channel
12575 12570 * lun has changed.
12576 12571 */
12577 12572
12578 12573 /* decorate the node with compatible */
12579 12574 if (ndi_prop_update_string_array(DDI_DEV_T_NONE, cdip,
12580 12575 "compatible", compatible, ncompatible) != DDI_PROP_SUCCESS) {
12581 12576 failure++;
12582 12577 goto end_of_fcp_create_dip;
12583 12578 }
12584 12579
12585 12580 if (ndi_prop_update_byte_array(DDI_DEV_T_NONE, cdip, NODE_WWN_PROP,
12586 12581 ptgt->tgt_node_wwn.raw_wwn, FC_WWN_SIZE) != DDI_PROP_SUCCESS) {
12587 12582 failure++;
12588 12583 goto end_of_fcp_create_dip;
12589 12584 }
12590 12585
12591 12586 if (ndi_prop_update_byte_array(DDI_DEV_T_NONE, cdip, PORT_WWN_PROP,
12592 12587 ptgt->tgt_port_wwn.raw_wwn, FC_WWN_SIZE) != DDI_PROP_SUCCESS) {
12593 12588 failure++;
12594 12589 goto end_of_fcp_create_dip;
12595 12590 }
12596 12591
12597 12592 fcp_wwn_to_ascii(ptgt->tgt_port_wwn.raw_wwn, t_pwwn);
12598 12593 t_pwwn[16] = '\0';
12599 12594 if (ndi_prop_update_string(DDI_DEV_T_NONE, cdip, TGT_PORT_PROP, t_pwwn)
12600 12595 != DDI_PROP_SUCCESS) {
12601 12596 failure++;
12602 12597 goto end_of_fcp_create_dip;
12603 12598 }
12604 12599
12605 12600 /*
12606 12601 * If there is no hard address - We might have to deal with
12607 12602 * that by using WWN - Having said that it is important to
12608 12603 * recognize this problem early so ssd can be informed of
12609 12604 * the right interconnect type.
12610 12605 */
12611 12606 if (!FC_TOP_EXTERNAL(pptr->port_topology) && ptgt->tgt_hard_addr != 0) {
12612 12607 tgt_id = (uint32_t)fcp_alpa_to_switch[ptgt->tgt_hard_addr];
12613 12608 } else {
12614 12609 tgt_id = ptgt->tgt_d_id;
12615 12610 }
12616 12611
12617 12612 if (ndi_prop_update_int(DDI_DEV_T_NONE, cdip, TARGET_PROP,
12618 12613 tgt_id) != DDI_PROP_SUCCESS) {
12619 12614 failure++;
12620 12615 goto end_of_fcp_create_dip;
12621 12616 }
12622 12617
12623 12618 if (ndi_prop_update_int(DDI_DEV_T_NONE, cdip, LUN_PROP,
12624 12619 (int)plun->lun_num) != DDI_PROP_SUCCESS) {
12625 12620 failure++;
12626 12621 goto end_of_fcp_create_dip;
12627 12622 }
12628 12623 bcopy(&plun->lun_addr, &sam_lun, FCP_LUN_SIZE);
12629 12624 if (ndi_prop_update_int64(DDI_DEV_T_NONE, cdip, SAM_LUN_PROP,
12630 12625 sam_lun) != DDI_PROP_SUCCESS) {
12631 12626 failure++;
12632 12627 goto end_of_fcp_create_dip;
12633 12628 }
12634 12629
12635 12630 end_of_fcp_create_dip:
12636 12631 scsi_hba_nodename_compatible_free(nname, compatible);
12637 12632
12638 12633 if (cdip != NULL && failure) {
12639 12634 (void) ndi_prop_remove_all(cdip);
12640 12635 (void) ndi_devi_free(cdip);
12641 12636 cdip = NULL;
12642 12637 }
12643 12638
12644 12639 return (cdip);
12645 12640 }
12646 12641
12647 12642 /*
12648 12643 * Function: fcp_create_pip
12649 12644 *
12650 12645 * Description: Creates a Path Id for the LUN specified by the caller.
12651 12646 *
12652 12647 * Argument: plun Lun structure
12653 12648 * link_cnt Link state count.
12654 12649 * tgt_cnt Target state count.
12655 12650 *
12656 12651 * Return Value: NULL if it failed
12657 12652 * mdi_pathinfo_t structure address if it succeeded
12658 12653 *
12659 12654 * Context: Kernel context
12660 12655 */
12661 12656 static mdi_pathinfo_t *
12662 12657 fcp_create_pip(struct fcp_lun *plun, int lcount, int tcount)
12663 12658 {
12664 12659 int i;
12665 12660 char buf[MAXNAMELEN];
12666 12661 char uaddr[MAXNAMELEN];
12667 12662 int failure = 0;
12668 12663 uint32_t tgt_id;
12669 12664 uint64_t sam_lun;
12670 12665 struct fcp_tgt *ptgt = plun->lun_tgt;
12671 12666 struct fcp_port *pptr = ptgt->tgt_port;
12672 12667 dev_info_t *pdip = pptr->port_dip;
12673 12668 mdi_pathinfo_t *pip = NULL;
12674 12669 mdi_pathinfo_t *old_pip = PIP(plun->lun_cip);
12675 12670 char *nname = NULL;
12676 12671 char **compatible = NULL;
12677 12672 int ncompatible;
12678 12673 char *scsi_binding_set;
12679 12674 char t_pwwn[17];
12680 12675
12681 12676 ASSERT(MUTEX_HELD(&plun->lun_mutex));
12682 12677 ASSERT(MUTEX_HELD(&pptr->port_mutex));
12683 12678
12684 12679 scsi_binding_set = "vhci";
12685 12680
12686 12681 /* determine the node name and compatible */
12687 12682 scsi_hba_nodename_compatible_get(&plun->lun_inq, scsi_binding_set,
12688 12683 plun->lun_inq.inq_dtype, NULL, &nname, &compatible, &ncompatible);
12689 12684
12690 12685 if (nname == NULL) {
12691 12686 #ifdef DEBUG
12692 12687 cmn_err(CE_WARN, "fcp_create_dip: %s%d: no driver for "
12693 12688 "device @w%02x%02x%02x%02x%02x%02x%02x%02x,%d:"
12694 12689 " compatible: %s",
12695 12690 ddi_driver_name(pdip), ddi_get_instance(pdip),
12696 12691 ptgt->tgt_port_wwn.raw_wwn[0],
12697 12692 ptgt->tgt_port_wwn.raw_wwn[1],
12698 12693 ptgt->tgt_port_wwn.raw_wwn[2],
12699 12694 ptgt->tgt_port_wwn.raw_wwn[3],
12700 12695 ptgt->tgt_port_wwn.raw_wwn[4],
12701 12696 ptgt->tgt_port_wwn.raw_wwn[5],
12702 12697 ptgt->tgt_port_wwn.raw_wwn[6],
12703 12698 ptgt->tgt_port_wwn.raw_wwn[7], plun->lun_num,
12704 12699 *compatible);
12705 12700 #endif /* DEBUG */
12706 12701 failure++;
12707 12702 goto end_of_fcp_create_pip;
12708 12703 }
12709 12704
12710 12705 pip = fcp_find_existing_pip(plun, pdip);
12711 12706
12712 12707 /*
12713 12708 * if the old_dip does not match the cdip, that means there is
12714 12709 * some property change. since we'll be using the cdip, we need
12715 12710 * to offline the old_dip. If the state contains FCP_LUN_CHANGED
12716 12711 * then the dtype for the device has been updated. Offline the
12717 12712 * the old device and create a new device with the new device type
12718 12713 * Refer to bug: 4764752
12719 12714 */
12720 12715 if (old_pip && (pip != old_pip ||
12721 12716 plun->lun_state & FCP_LUN_CHANGED)) {
12722 12717 plun->lun_state &= ~(FCP_LUN_INIT);
12723 12718 mutex_exit(&plun->lun_mutex);
12724 12719 mutex_exit(&pptr->port_mutex);
12725 12720
12726 12721 mutex_enter(&ptgt->tgt_mutex);
12727 12722 (void) fcp_pass_to_hp(pptr, plun, CIP(old_pip),
12728 12723 FCP_OFFLINE, lcount, tcount,
12729 12724 NDI_DEVI_REMOVE, 0);
12730 12725 mutex_exit(&ptgt->tgt_mutex);
12731 12726
12732 12727 if (pip != NULL) {
12733 12728 FCP_TRACE(fcp_logq, pptr->port_instbuf,
12734 12729 fcp_trace, FCP_BUF_LEVEL_2, 0,
12735 12730 "Old pip=%p; New pip=%p don't match",
12736 12731 old_pip, pip);
12737 12732 } else {
12738 12733 FCP_TRACE(fcp_logq, pptr->port_instbuf,
12739 12734 fcp_trace, FCP_BUF_LEVEL_2, 0,
12740 12735 "Old pip=%p; New pip=NULL don't match",
12741 12736 old_pip);
12742 12737 }
12743 12738
12744 12739 mutex_enter(&pptr->port_mutex);
12745 12740 mutex_enter(&plun->lun_mutex);
12746 12741 }
12747 12742
12748 12743 /*
12749 12744 * Since FC_WWN_SIZE is 8 bytes and its not like the
12750 12745 * lun_guid_size which is dependent on the target, I don't
12751 12746 * believe the same trancation happens here UNLESS the standards
12752 12747 * change the FC_WWN_SIZE value to something larger than
12753 12748 * MAXNAMELEN(currently 255 bytes).
12754 12749 */
12755 12750
12756 12751 for (i = 0; i < FC_WWN_SIZE; i++) {
12757 12752 (void) sprintf(&buf[i << 1], "%02x",
12758 12753 ptgt->tgt_port_wwn.raw_wwn[i]);
12759 12754 }
12760 12755
12761 12756 (void) snprintf(uaddr, MAXNAMELEN, "w%s,%x",
12762 12757 buf, plun->lun_num);
12763 12758
12764 12759 if (pip == NULL || plun->lun_state & FCP_LUN_CHANGED) {
12765 12760 /*
12766 12761 * Release the locks before calling into
12767 12762 * mdi_pi_alloc_compatible() since this can result in a
12768 12763 * callback into fcp which can result in a deadlock
12769 12764 * (see bug # 4870272).
12770 12765 *
12771 12766 * Basically, what we are trying to avoid is the scenario where
12772 12767 * one thread does ndi_devi_enter() and tries to grab
12773 12768 * fcp_mutex and another does it the other way round.
12774 12769 *
12775 12770 * But before we do that, make sure that nobody releases the
12776 12771 * port in the meantime. We can do this by setting a flag.
12777 12772 */
12778 12773 plun->lun_state &= ~(FCP_LUN_CHANGED);
12779 12774 pptr->port_state |= FCP_STATE_IN_MDI;
12780 12775 mutex_exit(&plun->lun_mutex);
12781 12776 mutex_exit(&pptr->port_mutex);
12782 12777 if (mdi_pi_alloc_compatible(pdip, nname, plun->lun_guid,
12783 12778 uaddr, compatible, ncompatible, 0, &pip) != MDI_SUCCESS) {
12784 12779 fcp_log(CE_WARN, pptr->port_dip,
12785 12780 "!path alloc failed:0x%x", plun);
12786 12781 mutex_enter(&pptr->port_mutex);
12787 12782 mutex_enter(&plun->lun_mutex);
12788 12783 pptr->port_state &= ~FCP_STATE_IN_MDI;
12789 12784 failure++;
12790 12785 goto end_of_fcp_create_pip;
12791 12786 }
12792 12787 mutex_enter(&pptr->port_mutex);
12793 12788 mutex_enter(&plun->lun_mutex);
12794 12789 pptr->port_state &= ~FCP_STATE_IN_MDI;
12795 12790 } else {
12796 12791 (void) mdi_prop_remove(pip, NULL);
12797 12792 }
12798 12793
12799 12794 mdi_pi_set_phci_private(pip, (caddr_t)plun);
12800 12795
12801 12796 if (mdi_prop_update_byte_array(pip, NODE_WWN_PROP,
12802 12797 ptgt->tgt_node_wwn.raw_wwn, FC_WWN_SIZE)
12803 12798 != DDI_PROP_SUCCESS) {
12804 12799 failure++;
12805 12800 goto end_of_fcp_create_pip;
12806 12801 }
12807 12802
12808 12803 if (mdi_prop_update_byte_array(pip, PORT_WWN_PROP,
12809 12804 ptgt->tgt_port_wwn.raw_wwn, FC_WWN_SIZE)
12810 12805 != DDI_PROP_SUCCESS) {
12811 12806 failure++;
12812 12807 goto end_of_fcp_create_pip;
12813 12808 }
12814 12809
12815 12810 fcp_wwn_to_ascii(ptgt->tgt_port_wwn.raw_wwn, t_pwwn);
12816 12811 t_pwwn[16] = '\0';
12817 12812 if (mdi_prop_update_string(pip, TGT_PORT_PROP, t_pwwn)
12818 12813 != DDI_PROP_SUCCESS) {
12819 12814 failure++;
12820 12815 goto end_of_fcp_create_pip;
12821 12816 }
12822 12817
12823 12818 /*
12824 12819 * If there is no hard address - We might have to deal with
12825 12820 * that by using WWN - Having said that it is important to
12826 12821 * recognize this problem early so ssd can be informed of
12827 12822 * the right interconnect type.
12828 12823 */
12829 12824 if (!FC_TOP_EXTERNAL(pptr->port_topology) &&
12830 12825 ptgt->tgt_hard_addr != 0) {
12831 12826 tgt_id = (uint32_t)
12832 12827 fcp_alpa_to_switch[ptgt->tgt_hard_addr];
12833 12828 } else {
12834 12829 tgt_id = ptgt->tgt_d_id;
12835 12830 }
12836 12831
12837 12832 if (mdi_prop_update_int(pip, TARGET_PROP, tgt_id)
12838 12833 != DDI_PROP_SUCCESS) {
12839 12834 failure++;
12840 12835 goto end_of_fcp_create_pip;
12841 12836 }
12842 12837
12843 12838 if (mdi_prop_update_int(pip, LUN_PROP, (int)plun->lun_num)
12844 12839 != DDI_PROP_SUCCESS) {
12845 12840 failure++;
12846 12841 goto end_of_fcp_create_pip;
12847 12842 }
12848 12843 bcopy(&plun->lun_addr, &sam_lun, FCP_LUN_SIZE);
12849 12844 if (mdi_prop_update_int64(pip, SAM_LUN_PROP, sam_lun)
12850 12845 != DDI_PROP_SUCCESS) {
12851 12846 failure++;
12852 12847 goto end_of_fcp_create_pip;
12853 12848 }
12854 12849
12855 12850 end_of_fcp_create_pip:
12856 12851 scsi_hba_nodename_compatible_free(nname, compatible);
12857 12852
12858 12853 if (pip != NULL && failure) {
12859 12854 (void) mdi_prop_remove(pip, NULL);
12860 12855 mutex_exit(&plun->lun_mutex);
12861 12856 mutex_exit(&pptr->port_mutex);
12862 12857 (void) mdi_pi_free(pip, 0);
12863 12858 mutex_enter(&pptr->port_mutex);
12864 12859 mutex_enter(&plun->lun_mutex);
12865 12860 pip = NULL;
12866 12861 }
12867 12862
12868 12863 return (pip);
12869 12864 }
12870 12865
12871 12866 static dev_info_t *
12872 12867 fcp_find_existing_dip(struct fcp_lun *plun, dev_info_t *pdip, caddr_t name)
12873 12868 {
12874 12869 uint_t nbytes;
12875 12870 uchar_t *bytes;
12876 12871 uint_t nwords;
12877 12872 uint32_t tgt_id;
12878 12873 int *words;
12879 12874 dev_info_t *cdip;
12880 12875 dev_info_t *ndip;
12881 12876 struct fcp_tgt *ptgt = plun->lun_tgt;
12882 12877 struct fcp_port *pptr = ptgt->tgt_port;
12883 12878 int circular;
12884 12879
12885 12880 ndi_devi_enter(pdip, &circular);
12886 12881
12887 12882 ndip = (dev_info_t *)DEVI(pdip)->devi_child;
12888 12883 while ((cdip = ndip) != NULL) {
12889 12884 ndip = (dev_info_t *)DEVI(cdip)->devi_sibling;
12890 12885
12891 12886 if (strcmp(DEVI(cdip)->devi_node_name, name)) {
12892 12887 continue;
12893 12888 }
12894 12889
12895 12890 if (ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, cdip,
12896 12891 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, NODE_WWN_PROP, &bytes,
12897 12892 &nbytes) != DDI_PROP_SUCCESS) {
12898 12893 continue;
12899 12894 }
12900 12895
12901 12896 if (nbytes != FC_WWN_SIZE || bytes == NULL) {
12902 12897 if (bytes != NULL) {
12903 12898 ddi_prop_free(bytes);
12904 12899 }
12905 12900 continue;
12906 12901 }
12907 12902 ASSERT(bytes != NULL);
12908 12903
12909 12904 if (bcmp(bytes, ptgt->tgt_node_wwn.raw_wwn, nbytes) != 0) {
12910 12905 ddi_prop_free(bytes);
12911 12906 continue;
12912 12907 }
12913 12908
12914 12909 ddi_prop_free(bytes);
12915 12910
12916 12911 if (ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, cdip,
12917 12912 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, PORT_WWN_PROP, &bytes,
12918 12913 &nbytes) != DDI_PROP_SUCCESS) {
12919 12914 continue;
12920 12915 }
12921 12916
12922 12917 if (nbytes != FC_WWN_SIZE || bytes == NULL) {
12923 12918 if (bytes != NULL) {
12924 12919 ddi_prop_free(bytes);
12925 12920 }
12926 12921 continue;
12927 12922 }
12928 12923 ASSERT(bytes != NULL);
12929 12924
12930 12925 if (bcmp(bytes, ptgt->tgt_port_wwn.raw_wwn, nbytes) != 0) {
12931 12926 ddi_prop_free(bytes);
12932 12927 continue;
12933 12928 }
12934 12929
12935 12930 ddi_prop_free(bytes);
12936 12931
12937 12932 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, cdip,
12938 12933 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, TARGET_PROP, &words,
12939 12934 &nwords) != DDI_PROP_SUCCESS) {
12940 12935 continue;
12941 12936 }
12942 12937
12943 12938 if (nwords != 1 || words == NULL) {
12944 12939 if (words != NULL) {
12945 12940 ddi_prop_free(words);
12946 12941 }
12947 12942 continue;
12948 12943 }
12949 12944 ASSERT(words != NULL);
12950 12945
12951 12946 /*
12952 12947 * If there is no hard address - We might have to deal with
12953 12948 * that by using WWN - Having said that it is important to
12954 12949 * recognize this problem early so ssd can be informed of
12955 12950 * the right interconnect type.
12956 12951 */
12957 12952 if (!FC_TOP_EXTERNAL(pptr->port_topology) &&
12958 12953 ptgt->tgt_hard_addr != 0) {
12959 12954 tgt_id =
12960 12955 (uint32_t)fcp_alpa_to_switch[ptgt->tgt_hard_addr];
12961 12956 } else {
12962 12957 tgt_id = ptgt->tgt_d_id;
12963 12958 }
12964 12959
12965 12960 if (tgt_id != (uint32_t)*words) {
12966 12961 ddi_prop_free(words);
12967 12962 continue;
12968 12963 }
12969 12964 ddi_prop_free(words);
12970 12965
12971 12966 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, cdip,
12972 12967 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, LUN_PROP, &words,
12973 12968 &nwords) != DDI_PROP_SUCCESS) {
12974 12969 continue;
12975 12970 }
12976 12971
12977 12972 if (nwords != 1 || words == NULL) {
12978 12973 if (words != NULL) {
12979 12974 ddi_prop_free(words);
12980 12975 }
12981 12976 continue;
12982 12977 }
12983 12978 ASSERT(words != NULL);
12984 12979
12985 12980 if (plun->lun_num == (uint16_t)*words) {
12986 12981 ddi_prop_free(words);
12987 12982 break;
12988 12983 }
12989 12984 ddi_prop_free(words);
12990 12985 }
12991 12986 ndi_devi_exit(pdip, circular);
12992 12987
12993 12988 return (cdip);
12994 12989 }
12995 12990
12996 12991
12997 12992 static int
12998 12993 fcp_is_pip_present(struct fcp_lun *plun, mdi_pathinfo_t *pip)
12999 12994 {
13000 12995 dev_info_t *pdip;
13001 12996 char buf[MAXNAMELEN];
13002 12997 char uaddr[MAXNAMELEN];
13003 12998 int rval = FC_FAILURE;
13004 12999
13005 13000 ASSERT(MUTEX_HELD(&plun->lun_mutex));
13006 13001
13007 13002 pdip = plun->lun_tgt->tgt_port->port_dip;
13008 13003
13009 13004 /*
13010 13005 * Check if pip (and not plun->lun_cip) is NULL. plun->lun_cip can be
13011 13006 * non-NULL even when the LUN is not there as in the case when a LUN is
13012 13007 * configured and then deleted on the device end (for T3/T4 case). In
13013 13008 * such cases, pip will be NULL.
13014 13009 *
13015 13010 * If the device generates an RSCN, it will end up getting offlined when
13016 13011 * it disappeared and a new LUN will get created when it is rediscovered
13017 13012 * on the device. If we check for lun_cip here, the LUN will not end
13018 13013 * up getting onlined since this function will end up returning a
13019 13014 * FC_SUCCESS.
13020 13015 *
13021 13016 * The behavior is different on other devices. For instance, on a HDS,
13022 13017 * there was no RSCN generated by the device but the next I/O generated
13023 13018 * a check condition and rediscovery got triggered that way. So, in
13024 13019 * such cases, this path will not be exercised
13025 13020 */
13026 13021 if (pip == NULL) {
13027 13022 FCP_TRACE(fcp_logq, LUN_PORT->port_instbuf,
13028 13023 fcp_trace, FCP_BUF_LEVEL_4, 0,
13029 13024 "fcp_is_pip_present: plun->lun_cip is NULL: "
13030 13025 "plun: %p lun state: %x num: %d target state: %x",
13031 13026 plun, plun->lun_state, plun->lun_num,
13032 13027 plun->lun_tgt->tgt_port->port_state);
13033 13028 return (rval);
13034 13029 }
13035 13030
13036 13031 fcp_wwn_to_ascii(plun->lun_tgt->tgt_port_wwn.raw_wwn, buf);
13037 13032
13038 13033 (void) snprintf(uaddr, MAXNAMELEN, "w%s,%x", buf, plun->lun_num);
13039 13034
13040 13035 if (mdi_pi_find(pdip, NULL, uaddr) == pip) {
13041 13036 rval = FC_SUCCESS;
13042 13037 }
13043 13038
13044 13039 return (rval);
13045 13040 }
13046 13041
13047 13042 static mdi_pathinfo_t *
13048 13043 fcp_find_existing_pip(struct fcp_lun *plun, dev_info_t *pdip)
13049 13044 {
13050 13045 char buf[MAXNAMELEN];
13051 13046 char uaddr[MAXNAMELEN];
13052 13047 mdi_pathinfo_t *pip;
13053 13048 struct fcp_tgt *ptgt = plun->lun_tgt;
13054 13049 struct fcp_port *pptr = ptgt->tgt_port;
13055 13050
13056 13051 ASSERT(MUTEX_HELD(&pptr->port_mutex));
13057 13052
13058 13053 fcp_wwn_to_ascii(ptgt->tgt_port_wwn.raw_wwn, buf);
13059 13054 (void) snprintf(uaddr, MAXNAMELEN, "w%s,%x", buf, plun->lun_num);
13060 13055
13061 13056 pip = mdi_pi_find(pdip, plun->lun_guid, uaddr);
13062 13057
13063 13058 return (pip);
13064 13059 }
13065 13060
13066 13061
13067 13062 static int
13068 13063 fcp_online_child(struct fcp_lun *plun, child_info_t *cip, int lcount,
13069 13064 int tcount, int flags, int *circ)
13070 13065 {
13071 13066 int rval;
13072 13067 struct fcp_port *pptr = plun->lun_tgt->tgt_port;
13073 13068 struct fcp_tgt *ptgt = plun->lun_tgt;
13074 13069 dev_info_t *cdip = NULL;
13075 13070
13076 13071 ASSERT(MUTEX_HELD(&pptr->port_mutex));
13077 13072 ASSERT(MUTEX_HELD(&plun->lun_mutex));
13078 13073
13079 13074 if (plun->lun_cip == NULL) {
13080 13075 FCP_TRACE(fcp_logq, pptr->port_instbuf,
13081 13076 fcp_trace, FCP_BUF_LEVEL_3, 0,
13082 13077 "fcp_online_child: plun->lun_cip is NULL: "
13083 13078 "plun: %p state: %x num: %d target state: %x",
13084 13079 plun, plun->lun_state, plun->lun_num,
13085 13080 plun->lun_tgt->tgt_port->port_state);
13086 13081 return (NDI_FAILURE);
13087 13082 }
13088 13083 again:
13089 13084 if (plun->lun_mpxio == 0) {
13090 13085 cdip = DIP(cip);
13091 13086 mutex_exit(&plun->lun_mutex);
13092 13087 mutex_exit(&pptr->port_mutex);
13093 13088
13094 13089 FCP_TRACE(fcp_logq, pptr->port_instbuf,
13095 13090 fcp_trace, FCP_BUF_LEVEL_3, 0,
13096 13091 "!Invoking ndi_devi_online for %s: target=%x lun=%x",
13097 13092 ddi_get_name(cdip), ptgt->tgt_d_id, plun->lun_num);
13098 13093
13099 13094 /*
13100 13095 * We could check for FCP_LUN_INIT here but chances
13101 13096 * of getting here when it's already in FCP_LUN_INIT
13102 13097 * is rare and a duplicate ndi_devi_online wouldn't
13103 13098 * hurt either (as the node would already have been
13104 13099 * in CF2)
13105 13100 */
13106 13101 if (!i_ddi_devi_attached(ddi_get_parent(cdip))) {
13107 13102 rval = ndi_devi_bind_driver(cdip, flags);
13108 13103 FCP_TRACE(fcp_logq, pptr->port_instbuf,
13109 13104 fcp_trace, FCP_BUF_LEVEL_3, 0,
13110 13105 "!Invoking ndi_devi_bind_driver: rval=%d", rval);
13111 13106 } else {
13112 13107 rval = ndi_devi_online(cdip, flags);
13113 13108 }
13114 13109
13115 13110 /*
13116 13111 * We log the message into trace buffer if the device
13117 13112 * is "ses" and into syslog for any other device
13118 13113 * type. This is to prevent the ndi_devi_online failure
13119 13114 * message that appears for V880/A5K ses devices.
13120 13115 */
13121 13116 if (rval == NDI_SUCCESS) {
13122 13117 mutex_enter(&ptgt->tgt_mutex);
13123 13118 plun->lun_state |= FCP_LUN_INIT;
13124 13119 mutex_exit(&ptgt->tgt_mutex);
13125 13120 } else if (strncmp(ddi_node_name(cdip), "ses", 3) != 0) {
13126 13121 fcp_log(CE_NOTE, pptr->port_dip,
13127 13122 "!ndi_devi_online:"
13128 13123 " failed for %s: target=%x lun=%x %x",
13129 13124 ddi_get_name(cdip), ptgt->tgt_d_id,
13130 13125 plun->lun_num, rval);
13131 13126 } else {
13132 13127 FCP_TRACE(fcp_logq, pptr->port_instbuf,
13133 13128 fcp_trace, FCP_BUF_LEVEL_3, 0,
13134 13129 " !ndi_devi_online:"
13135 13130 " failed for %s: target=%x lun=%x %x",
13136 13131 ddi_get_name(cdip), ptgt->tgt_d_id,
13137 13132 plun->lun_num, rval);
13138 13133 }
13139 13134 } else {
13140 13135 cdip = mdi_pi_get_client(PIP(cip));
13141 13136 mutex_exit(&plun->lun_mutex);
13142 13137 mutex_exit(&pptr->port_mutex);
13143 13138
13144 13139 FCP_TRACE(fcp_logq, pptr->port_instbuf,
13145 13140 fcp_trace, FCP_BUF_LEVEL_3, 0,
13146 13141 "!Invoking mdi_pi_online for %s: target=%x lun=%x",
13147 13142 ddi_get_name(cdip), ptgt->tgt_d_id, plun->lun_num);
13148 13143
13149 13144 /*
13150 13145 * Hold path and exit phci to avoid deadlock with power
13151 13146 * management code during mdi_pi_online.
13152 13147 */
13153 13148 mdi_hold_path(PIP(cip));
13154 13149 mdi_devi_exit_phci(pptr->port_dip, *circ);
13155 13150
13156 13151 rval = mdi_pi_online(PIP(cip), flags);
13157 13152
13158 13153 mdi_devi_enter_phci(pptr->port_dip, circ);
13159 13154 mdi_rele_path(PIP(cip));
13160 13155
13161 13156 if (rval == MDI_SUCCESS) {
13162 13157 mutex_enter(&ptgt->tgt_mutex);
13163 13158 plun->lun_state |= FCP_LUN_INIT;
13164 13159 mutex_exit(&ptgt->tgt_mutex);
13165 13160
13166 13161 /*
13167 13162 * Clear MPxIO path permanent disable in case
13168 13163 * fcp hotplug dropped the offline event.
13169 13164 */
13170 13165 (void) mdi_pi_enable_path(PIP(cip), DRIVER_DISABLE);
13171 13166
13172 13167 } else if (rval == MDI_NOT_SUPPORTED) {
13173 13168 child_info_t *old_cip = cip;
13174 13169
13175 13170 /*
13176 13171 * MPxIO does not support this device yet.
13177 13172 * Enumerate in legacy mode.
13178 13173 */
13179 13174 mutex_enter(&pptr->port_mutex);
13180 13175 mutex_enter(&plun->lun_mutex);
13181 13176 plun->lun_mpxio = 0;
13182 13177 plun->lun_cip = NULL;
13183 13178 cdip = fcp_create_dip(plun, lcount, tcount);
13184 13179 plun->lun_cip = cip = CIP(cdip);
13185 13180 if (cip == NULL) {
13186 13181 fcp_log(CE_WARN, pptr->port_dip,
13187 13182 "!fcp_online_child: "
13188 13183 "Create devinfo failed for LU=%p", plun);
13189 13184 mutex_exit(&plun->lun_mutex);
13190 13185
13191 13186 mutex_enter(&ptgt->tgt_mutex);
13192 13187 plun->lun_state |= FCP_LUN_OFFLINE;
13193 13188 mutex_exit(&ptgt->tgt_mutex);
13194 13189
13195 13190 mutex_exit(&pptr->port_mutex);
13196 13191
13197 13192 /*
13198 13193 * free the mdi_pathinfo node
13199 13194 */
13200 13195 (void) mdi_pi_free(PIP(old_cip), 0);
13201 13196 } else {
13202 13197 FCP_TRACE(fcp_logq, pptr->port_instbuf,
13203 13198 fcp_trace, FCP_BUF_LEVEL_3, 0,
13204 13199 "fcp_online_child: creating devinfo "
13205 13200 "node 0x%p for plun 0x%p",
13206 13201 cip, plun);
13207 13202 mutex_exit(&plun->lun_mutex);
13208 13203 mutex_exit(&pptr->port_mutex);
13209 13204 /*
13210 13205 * free the mdi_pathinfo node
13211 13206 */
13212 13207 (void) mdi_pi_free(PIP(old_cip), 0);
13213 13208 mutex_enter(&pptr->port_mutex);
13214 13209 mutex_enter(&plun->lun_mutex);
13215 13210 goto again;
13216 13211 }
13217 13212 } else {
13218 13213 if (cdip) {
13219 13214 fcp_log(CE_NOTE, pptr->port_dip,
13220 13215 "!fcp_online_child: mdi_pi_online:"
13221 13216 " failed for %s: target=%x lun=%x %x",
13222 13217 ddi_get_name(cdip), ptgt->tgt_d_id,
13223 13218 plun->lun_num, rval);
13224 13219 }
13225 13220 }
13226 13221 rval = (rval == MDI_SUCCESS) ? NDI_SUCCESS : NDI_FAILURE;
13227 13222 }
13228 13223
13229 13224 if (rval == NDI_SUCCESS) {
13230 13225 if (cdip) {
13231 13226 (void) ndi_event_retrieve_cookie(
13232 13227 pptr->port_ndi_event_hdl, cdip, FCAL_INSERT_EVENT,
13233 13228 &fcp_insert_eid, NDI_EVENT_NOPASS);
13234 13229 (void) ndi_event_run_callbacks(pptr->port_ndi_event_hdl,
13235 13230 cdip, fcp_insert_eid, NULL);
13236 13231 }
13237 13232 }
13238 13233 mutex_enter(&pptr->port_mutex);
13239 13234 mutex_enter(&plun->lun_mutex);
13240 13235 return (rval);
13241 13236 }
13242 13237
13243 13238 /* ARGSUSED */
13244 13239 static int
13245 13240 fcp_offline_child(struct fcp_lun *plun, child_info_t *cip, int lcount,
13246 13241 int tcount, int flags, int *circ)
13247 13242 {
13248 13243 int rval;
13249 13244 int lun_mpxio;
13250 13245 struct fcp_port *pptr = plun->lun_tgt->tgt_port;
13251 13246 struct fcp_tgt *ptgt = plun->lun_tgt;
13252 13247 dev_info_t *cdip;
13253 13248
13254 13249 ASSERT(MUTEX_HELD(&plun->lun_mutex));
13255 13250 ASSERT(MUTEX_HELD(&pptr->port_mutex));
13256 13251
13257 13252 if (plun->lun_cip == NULL) {
13258 13253 FCP_TRACE(fcp_logq, pptr->port_instbuf,
13259 13254 fcp_trace, FCP_BUF_LEVEL_3, 0,
13260 13255 "fcp_offline_child: plun->lun_cip is NULL: "
13261 13256 "plun: %p lun state: %x num: %d target state: %x",
13262 13257 plun, plun->lun_state, plun->lun_num,
13263 13258 plun->lun_tgt->tgt_port->port_state);
13264 13259 return (NDI_FAILURE);
13265 13260 }
13266 13261
13267 13262 /*
13268 13263 * We will use this value twice. Make a copy to be sure we use
13269 13264 * the same value in both places.
13270 13265 */
13271 13266 lun_mpxio = plun->lun_mpxio;
13272 13267
13273 13268 if (lun_mpxio == 0) {
13274 13269 cdip = DIP(cip);
13275 13270 mutex_exit(&plun->lun_mutex);
13276 13271 mutex_exit(&pptr->port_mutex);
13277 13272 rval = ndi_devi_offline(DIP(cip), NDI_DEVFS_CLEAN | flags);
13278 13273 if (rval != NDI_SUCCESS) {
13279 13274 FCP_TRACE(fcp_logq, pptr->port_instbuf,
13280 13275 fcp_trace, FCP_BUF_LEVEL_3, 0,
13281 13276 "fcp_offline_child: ndi_devi_offline failed "
13282 13277 "rval=%x cip=%p", rval, cip);
13283 13278 }
13284 13279 } else {
13285 13280 cdip = mdi_pi_get_client(PIP(cip));
13286 13281 mutex_exit(&plun->lun_mutex);
13287 13282 mutex_exit(&pptr->port_mutex);
13288 13283
13289 13284 /*
13290 13285 * Exit phci to avoid deadlock with power management code
13291 13286 * during mdi_pi_offline
13292 13287 */
13293 13288 mdi_hold_path(PIP(cip));
13294 13289 mdi_devi_exit_phci(pptr->port_dip, *circ);
13295 13290
13296 13291 rval = mdi_pi_offline(PIP(cip), flags & ~NDI_DEVI_REMOVE);
13297 13292
13298 13293 mdi_devi_enter_phci(pptr->port_dip, circ);
13299 13294 mdi_rele_path(PIP(cip));
13300 13295
13301 13296 rval = (rval == MDI_SUCCESS) ? NDI_SUCCESS : NDI_FAILURE;
13302 13297 }
13303 13298
13304 13299 mutex_enter(&ptgt->tgt_mutex);
13305 13300 plun->lun_state &= ~FCP_LUN_INIT;
13306 13301 mutex_exit(&ptgt->tgt_mutex);
13307 13302
13308 13303 if (rval == NDI_SUCCESS) {
13309 13304 cdip = NULL;
13310 13305 if (flags & NDI_DEVI_REMOVE) {
13311 13306 mutex_enter(&plun->lun_mutex);
13312 13307 /*
13313 13308 * If the guid of the LUN changes, lun_cip will not
13314 13309 * equal to cip, and after offlining the LUN with the
13315 13310 * old guid, we should keep lun_cip since it's the cip
13316 13311 * of the LUN with the new guid.
13317 13312 * Otherwise remove our reference to child node.
13318 13313 *
13319 13314 * This must be done before the child node is freed,
13320 13315 * otherwise other threads could see a stale lun_cip
13321 13316 * pointer.
13322 13317 */
13323 13318 if (plun->lun_cip == cip) {
13324 13319 plun->lun_cip = NULL;
13325 13320 }
13326 13321 if (plun->lun_old_guid) {
13327 13322 kmem_free(plun->lun_old_guid,
13328 13323 plun->lun_old_guid_size);
13329 13324 plun->lun_old_guid = NULL;
13330 13325 plun->lun_old_guid_size = 0;
13331 13326 }
13332 13327 mutex_exit(&plun->lun_mutex);
13333 13328 }
13334 13329 }
13335 13330
13336 13331 if (lun_mpxio != 0) {
13337 13332 if (rval == NDI_SUCCESS) {
13338 13333 /*
13339 13334 * Clear MPxIO path permanent disable as the path is
13340 13335 * already offlined.
13341 13336 */
13342 13337 (void) mdi_pi_enable_path(PIP(cip), DRIVER_DISABLE);
13343 13338
13344 13339 if (flags & NDI_DEVI_REMOVE) {
13345 13340 (void) mdi_pi_free(PIP(cip), 0);
13346 13341 }
13347 13342 } else {
13348 13343 FCP_TRACE(fcp_logq, pptr->port_instbuf,
13349 13344 fcp_trace, FCP_BUF_LEVEL_3, 0,
13350 13345 "fcp_offline_child: mdi_pi_offline failed "
13351 13346 "rval=%x cip=%p", rval, cip);
13352 13347 }
13353 13348 }
13354 13349
13355 13350 mutex_enter(&pptr->port_mutex);
13356 13351 mutex_enter(&plun->lun_mutex);
13357 13352
13358 13353 if (cdip) {
13359 13354 FCP_TRACE(fcp_logq, pptr->port_instbuf,
13360 13355 fcp_trace, FCP_BUF_LEVEL_3, 0, "!%s failed for %s:"
13361 13356 " target=%x lun=%x", "ndi_offline",
13362 13357 ddi_get_name(cdip), ptgt->tgt_d_id, plun->lun_num);
13363 13358 }
13364 13359
13365 13360 return (rval);
13366 13361 }
13367 13362
13368 13363 static void
13369 13364 fcp_remove_child(struct fcp_lun *plun)
13370 13365 {
13371 13366 child_info_t *cip;
13372 13367 int circ;
13373 13368
13374 13369 ASSERT(MUTEX_HELD(&plun->lun_mutex));
13375 13370
13376 13371 if (fcp_is_child_present(plun, plun->lun_cip) == FC_SUCCESS) {
13377 13372 if (plun->lun_mpxio == 0) {
13378 13373 (void) ndi_prop_remove_all(DIP(plun->lun_cip));
13379 13374 (void) ndi_devi_free(DIP(plun->lun_cip));
13380 13375 plun->lun_cip = NULL;
13381 13376 } else {
13382 13377 /*
13383 13378 * Clear reference to the child node in the lun.
13384 13379 * This must be done before freeing it with mdi_pi_free
13385 13380 * and with lun_mutex held so that other threads always
13386 13381 * see either valid lun_cip or NULL when holding
13387 13382 * lun_mutex. We keep a copy in cip.
13388 13383 */
13389 13384 cip = plun->lun_cip;
13390 13385 plun->lun_cip = NULL;
13391 13386
13392 13387 mutex_exit(&plun->lun_mutex);
13393 13388 mutex_exit(&plun->lun_tgt->tgt_mutex);
13394 13389 mutex_exit(&plun->lun_tgt->tgt_port->port_mutex);
13395 13390
13396 13391 mdi_devi_enter(plun->lun_tgt->tgt_port->port_dip,
13397 13392 &circ);
13398 13393
13399 13394 /*
13400 13395 * Exit phci to avoid deadlock with power management
13401 13396 * code during mdi_pi_offline
13402 13397 */
13403 13398 mdi_hold_path(PIP(cip));
13404 13399 mdi_devi_exit_phci(plun->lun_tgt->tgt_port->port_dip,
13405 13400 circ);
13406 13401 (void) mdi_pi_offline(PIP(cip), 0);
13407 13402 mdi_devi_enter_phci(plun->lun_tgt->tgt_port->port_dip,
13408 13403 &circ);
13409 13404 mdi_rele_path(PIP(cip));
13410 13405
13411 13406 mdi_devi_exit(plun->lun_tgt->tgt_port->port_dip, circ);
13412 13407
13413 13408 FCP_TRACE(fcp_logq,
13414 13409 plun->lun_tgt->tgt_port->port_instbuf,
13415 13410 fcp_trace, FCP_BUF_LEVEL_3, 0,
13416 13411 "lun=%p pip freed %p", plun, cip);
13417 13412
13418 13413 (void) mdi_prop_remove(PIP(cip), NULL);
13419 13414 (void) mdi_pi_free(PIP(cip), 0);
13420 13415
13421 13416 mutex_enter(&plun->lun_tgt->tgt_port->port_mutex);
13422 13417 mutex_enter(&plun->lun_tgt->tgt_mutex);
13423 13418 mutex_enter(&plun->lun_mutex);
13424 13419 }
13425 13420 } else {
13426 13421 plun->lun_cip = NULL;
13427 13422 }
13428 13423 }
13429 13424
13430 13425 /*
13431 13426 * called when a timeout occurs
13432 13427 *
13433 13428 * can be scheduled during an attach or resume (if not already running)
13434 13429 *
13435 13430 * one timeout is set up for all ports
13436 13431 *
13437 13432 * acquires and releases the global mutex
13438 13433 */
13439 13434 /*ARGSUSED*/
13440 13435 static void
13441 13436 fcp_watch(void *arg)
13442 13437 {
13443 13438 struct fcp_port *pptr;
13444 13439 struct fcp_ipkt *icmd;
13445 13440 struct fcp_ipkt *nicmd;
13446 13441 struct fcp_pkt *cmd;
13447 13442 struct fcp_pkt *ncmd;
13448 13443 struct fcp_pkt *tail;
13449 13444 struct fcp_pkt *pcmd;
13450 13445 struct fcp_pkt *save_head;
13451 13446 struct fcp_port *save_port;
13452 13447
13453 13448 /* increment global watchdog time */
13454 13449 fcp_watchdog_time += fcp_watchdog_timeout;
13455 13450
13456 13451 mutex_enter(&fcp_global_mutex);
13457 13452
13458 13453 /* scan each port in our list */
13459 13454 for (pptr = fcp_port_head; pptr != NULL; pptr = pptr->port_next) {
13460 13455 save_port = fcp_port_head;
13461 13456 pptr->port_state |= FCP_STATE_IN_WATCHDOG;
13462 13457 mutex_exit(&fcp_global_mutex);
13463 13458
13464 13459 mutex_enter(&pptr->port_mutex);
13465 13460 if (pptr->port_ipkt_list == NULL &&
13466 13461 (pptr->port_state & (FCP_STATE_SUSPENDED |
13467 13462 FCP_STATE_DETACHING | FCP_STATE_POWER_DOWN))) {
13468 13463 pptr->port_state &= ~FCP_STATE_IN_WATCHDOG;
13469 13464 mutex_exit(&pptr->port_mutex);
13470 13465 mutex_enter(&fcp_global_mutex);
13471 13466 goto end_of_watchdog;
13472 13467 }
13473 13468
13474 13469 /*
13475 13470 * We check if a list of targets need to be offlined.
13476 13471 */
13477 13472 if (pptr->port_offline_tgts) {
13478 13473 fcp_scan_offline_tgts(pptr);
13479 13474 }
13480 13475
13481 13476 /*
13482 13477 * We check if a list of luns need to be offlined.
13483 13478 */
13484 13479 if (pptr->port_offline_luns) {
13485 13480 fcp_scan_offline_luns(pptr);
13486 13481 }
13487 13482
13488 13483 /*
13489 13484 * We check if a list of targets or luns need to be reset.
13490 13485 */
13491 13486 if (pptr->port_reset_list) {
13492 13487 fcp_check_reset_delay(pptr);
13493 13488 }
13494 13489
13495 13490 mutex_exit(&pptr->port_mutex);
13496 13491
13497 13492 /*
13498 13493 * This is where the pending commands (pkt) are checked for
13499 13494 * timeout.
13500 13495 */
13501 13496 mutex_enter(&pptr->port_pkt_mutex);
13502 13497 tail = pptr->port_pkt_tail;
13503 13498
13504 13499 for (pcmd = NULL, cmd = pptr->port_pkt_head;
13505 13500 cmd != NULL; cmd = ncmd) {
13506 13501 ncmd = cmd->cmd_next;
13507 13502 /*
13508 13503 * If a command is in this queue the bit CFLAG_IN_QUEUE
13509 13504 * must be set.
13510 13505 */
13511 13506 ASSERT(cmd->cmd_flags & CFLAG_IN_QUEUE);
13512 13507 /*
13513 13508 * FCP_INVALID_TIMEOUT will be set for those
13514 13509 * command that need to be failed. Mostly those
13515 13510 * cmds that could not be queued down for the
13516 13511 * "timeout" value. cmd->cmd_timeout is used
13517 13512 * to try and requeue the command regularly.
13518 13513 */
13519 13514 if (cmd->cmd_timeout >= fcp_watchdog_time) {
13520 13515 /*
13521 13516 * This command hasn't timed out yet. Let's
13522 13517 * go to the next one.
13523 13518 */
13524 13519 pcmd = cmd;
13525 13520 goto end_of_loop;
13526 13521 }
13527 13522
13528 13523 if (cmd == pptr->port_pkt_head) {
13529 13524 ASSERT(pcmd == NULL);
13530 13525 pptr->port_pkt_head = cmd->cmd_next;
13531 13526 } else {
13532 13527 ASSERT(pcmd != NULL);
13533 13528 pcmd->cmd_next = cmd->cmd_next;
13534 13529 }
13535 13530
13536 13531 if (cmd == pptr->port_pkt_tail) {
13537 13532 ASSERT(cmd->cmd_next == NULL);
13538 13533 pptr->port_pkt_tail = pcmd;
13539 13534 if (pcmd) {
13540 13535 pcmd->cmd_next = NULL;
13541 13536 }
13542 13537 }
13543 13538 cmd->cmd_next = NULL;
13544 13539
13545 13540 /*
13546 13541 * save the current head before dropping the
13547 13542 * mutex - If the head doesn't remain the
13548 13543 * same after re acquiring the mutex, just
13549 13544 * bail out and revisit on next tick.
13550 13545 *
13551 13546 * PS: The tail pointer can change as the commands
13552 13547 * get requeued after failure to retransport
13553 13548 */
13554 13549 save_head = pptr->port_pkt_head;
13555 13550 mutex_exit(&pptr->port_pkt_mutex);
13556 13551
13557 13552 if (cmd->cmd_fp_pkt->pkt_timeout ==
13558 13553 FCP_INVALID_TIMEOUT) {
13559 13554 struct scsi_pkt *pkt = cmd->cmd_pkt;
13560 13555 struct fcp_lun *plun;
13561 13556 struct fcp_tgt *ptgt;
13562 13557
13563 13558 plun = ADDR2LUN(&pkt->pkt_address);
13564 13559 ptgt = plun->lun_tgt;
13565 13560
13566 13561 FCP_TRACE(fcp_logq, pptr->port_instbuf,
13567 13562 fcp_trace, FCP_BUF_LEVEL_2, 0,
13568 13563 "SCSI cmd 0x%x to D_ID=%x timed out",
13569 13564 pkt->pkt_cdbp[0], ptgt->tgt_d_id);
13570 13565
13571 13566 cmd->cmd_state == FCP_PKT_ABORTING ?
13572 13567 fcp_fail_cmd(cmd, CMD_RESET,
13573 13568 STAT_DEV_RESET) : fcp_fail_cmd(cmd,
13574 13569 CMD_TIMEOUT, STAT_ABORTED);
13575 13570 } else {
13576 13571 fcp_retransport_cmd(pptr, cmd);
13577 13572 }
13578 13573 mutex_enter(&pptr->port_pkt_mutex);
13579 13574 if (save_head && save_head != pptr->port_pkt_head) {
13580 13575 /*
13581 13576 * Looks like linked list got changed (mostly
13582 13577 * happens when an an OFFLINE LUN code starts
13583 13578 * returning overflow queue commands in
13584 13579 * parallel. So bail out and revisit during
13585 13580 * next tick
13586 13581 */
13587 13582 break;
13588 13583 }
13589 13584 end_of_loop:
13590 13585 /*
13591 13586 * Scan only upto the previously known tail pointer
13592 13587 * to avoid excessive processing - lots of new packets
13593 13588 * could have been added to the tail or the old ones
13594 13589 * re-queued.
13595 13590 */
13596 13591 if (cmd == tail) {
13597 13592 break;
13598 13593 }
13599 13594 }
13600 13595 mutex_exit(&pptr->port_pkt_mutex);
13601 13596
13602 13597 mutex_enter(&pptr->port_mutex);
13603 13598 for (icmd = pptr->port_ipkt_list; icmd != NULL; icmd = nicmd) {
13604 13599 struct fcp_tgt *ptgt = icmd->ipkt_tgt;
13605 13600
13606 13601 nicmd = icmd->ipkt_next;
13607 13602 if ((icmd->ipkt_restart != 0) &&
13608 13603 (icmd->ipkt_restart >= fcp_watchdog_time)) {
13609 13604 /* packet has not timed out */
13610 13605 continue;
13611 13606 }
13612 13607
13613 13608 /* time for packet re-transport */
13614 13609 if (icmd == pptr->port_ipkt_list) {
13615 13610 pptr->port_ipkt_list = icmd->ipkt_next;
13616 13611 if (pptr->port_ipkt_list) {
13617 13612 pptr->port_ipkt_list->ipkt_prev =
13618 13613 NULL;
13619 13614 }
13620 13615 } else {
13621 13616 icmd->ipkt_prev->ipkt_next = icmd->ipkt_next;
13622 13617 if (icmd->ipkt_next) {
13623 13618 icmd->ipkt_next->ipkt_prev =
13624 13619 icmd->ipkt_prev;
13625 13620 }
13626 13621 }
13627 13622 icmd->ipkt_next = NULL;
13628 13623 icmd->ipkt_prev = NULL;
13629 13624 mutex_exit(&pptr->port_mutex);
13630 13625
13631 13626 if (fcp_is_retryable(icmd)) {
13632 13627 fc_ulp_rscn_info_t *rscnp =
13633 13628 (fc_ulp_rscn_info_t *)icmd->ipkt_fpkt->
13634 13629 pkt_ulp_rscn_infop;
13635 13630
13636 13631 FCP_TRACE(fcp_logq, pptr->port_instbuf,
13637 13632 fcp_trace, FCP_BUF_LEVEL_2, 0,
13638 13633 "%x to D_ID=%x Retrying..",
13639 13634 icmd->ipkt_opcode,
13640 13635 icmd->ipkt_fpkt->pkt_cmd_fhdr.d_id);
13641 13636
13642 13637 /*
13643 13638 * Update the RSCN count in the packet
13644 13639 * before resending.
13645 13640 */
13646 13641
13647 13642 if (rscnp != NULL) {
13648 13643 rscnp->ulp_rscn_count =
13649 13644 fc_ulp_get_rscn_count(pptr->
13650 13645 port_fp_handle);
13651 13646 }
13652 13647
13653 13648 mutex_enter(&pptr->port_mutex);
13654 13649 mutex_enter(&ptgt->tgt_mutex);
13655 13650 if (!FCP_STATE_CHANGED(pptr, ptgt, icmd)) {
13656 13651 mutex_exit(&ptgt->tgt_mutex);
13657 13652 mutex_exit(&pptr->port_mutex);
13658 13653 switch (icmd->ipkt_opcode) {
13659 13654 int rval;
13660 13655 case LA_ELS_PLOGI:
13661 13656 if ((rval = fc_ulp_login(
13662 13657 pptr->port_fp_handle,
13663 13658 &icmd->ipkt_fpkt, 1)) ==
13664 13659 FC_SUCCESS) {
13665 13660 mutex_enter(
13666 13661 &pptr->port_mutex);
13667 13662 continue;
13668 13663 }
13669 13664 if (fcp_handle_ipkt_errors(
13670 13665 pptr, ptgt, icmd, rval,
13671 13666 "PLOGI") == DDI_SUCCESS) {
13672 13667 mutex_enter(
13673 13668 &pptr->port_mutex);
13674 13669 continue;
13675 13670 }
13676 13671 break;
13677 13672
13678 13673 case LA_ELS_PRLI:
13679 13674 if ((rval = fc_ulp_issue_els(
13680 13675 pptr->port_fp_handle,
13681 13676 icmd->ipkt_fpkt)) ==
13682 13677 FC_SUCCESS) {
13683 13678 mutex_enter(
13684 13679 &pptr->port_mutex);
13685 13680 continue;
13686 13681 }
13687 13682 if (fcp_handle_ipkt_errors(
13688 13683 pptr, ptgt, icmd, rval,
13689 13684 "PRLI") == DDI_SUCCESS) {
13690 13685 mutex_enter(
13691 13686 &pptr->port_mutex);
13692 13687 continue;
13693 13688 }
13694 13689 break;
13695 13690
13696 13691 default:
13697 13692 if ((rval = fcp_transport(
13698 13693 pptr->port_fp_handle,
13699 13694 icmd->ipkt_fpkt, 1)) ==
13700 13695 FC_SUCCESS) {
13701 13696 mutex_enter(
13702 13697 &pptr->port_mutex);
13703 13698 continue;
13704 13699 }
13705 13700 if (fcp_handle_ipkt_errors(
13706 13701 pptr, ptgt, icmd, rval,
13707 13702 "PRLI") == DDI_SUCCESS) {
13708 13703 mutex_enter(
13709 13704 &pptr->port_mutex);
13710 13705 continue;
13711 13706 }
13712 13707 break;
13713 13708 }
13714 13709 } else {
13715 13710 mutex_exit(&ptgt->tgt_mutex);
13716 13711 mutex_exit(&pptr->port_mutex);
13717 13712 }
13718 13713 } else {
13719 13714 fcp_print_error(icmd->ipkt_fpkt);
13720 13715 }
13721 13716
13722 13717 (void) fcp_call_finish_init(pptr, ptgt,
13723 13718 icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
13724 13719 icmd->ipkt_cause);
13725 13720 fcp_icmd_free(pptr, icmd);
13726 13721 mutex_enter(&pptr->port_mutex);
13727 13722 }
13728 13723
13729 13724 pptr->port_state &= ~FCP_STATE_IN_WATCHDOG;
13730 13725 mutex_exit(&pptr->port_mutex);
13731 13726 mutex_enter(&fcp_global_mutex);
13732 13727
13733 13728 end_of_watchdog:
13734 13729 /*
13735 13730 * Bail out early before getting into trouble
13736 13731 */
13737 13732 if (save_port != fcp_port_head) {
13738 13733 break;
13739 13734 }
13740 13735 }
13741 13736
13742 13737 if (fcp_watchdog_init > 0) {
13743 13738 /* reschedule timeout to go again */
13744 13739 fcp_watchdog_id =
13745 13740 timeout(fcp_watch, NULL, fcp_watchdog_tick);
13746 13741 }
13747 13742 mutex_exit(&fcp_global_mutex);
13748 13743 }
13749 13744
13750 13745
13751 13746 static void
13752 13747 fcp_check_reset_delay(struct fcp_port *pptr)
13753 13748 {
13754 13749 uint32_t tgt_cnt;
13755 13750 int level;
13756 13751 struct fcp_tgt *ptgt;
13757 13752 struct fcp_lun *plun;
13758 13753 struct fcp_reset_elem *cur = NULL;
13759 13754 struct fcp_reset_elem *next = NULL;
13760 13755 struct fcp_reset_elem *prev = NULL;
13761 13756
13762 13757 ASSERT(mutex_owned(&pptr->port_mutex));
13763 13758
13764 13759 next = pptr->port_reset_list;
13765 13760 while ((cur = next) != NULL) {
13766 13761 next = cur->next;
13767 13762
13768 13763 if (cur->timeout < fcp_watchdog_time) {
13769 13764 prev = cur;
13770 13765 continue;
13771 13766 }
13772 13767
13773 13768 ptgt = cur->tgt;
13774 13769 plun = cur->lun;
13775 13770 tgt_cnt = cur->tgt_cnt;
13776 13771
13777 13772 if (ptgt) {
13778 13773 level = RESET_TARGET;
13779 13774 } else {
13780 13775 ASSERT(plun != NULL);
13781 13776 level = RESET_LUN;
13782 13777 ptgt = plun->lun_tgt;
13783 13778 }
13784 13779 if (prev) {
13785 13780 prev->next = next;
13786 13781 } else {
13787 13782 /*
13788 13783 * Because we drop port mutex while doing aborts for
13789 13784 * packets, we can't rely on reset_list pointing to
13790 13785 * our head
13791 13786 */
13792 13787 if (cur == pptr->port_reset_list) {
13793 13788 pptr->port_reset_list = next;
13794 13789 } else {
13795 13790 struct fcp_reset_elem *which;
13796 13791
13797 13792 which = pptr->port_reset_list;
13798 13793 while (which && which->next != cur) {
13799 13794 which = which->next;
13800 13795 }
13801 13796 ASSERT(which != NULL);
13802 13797
13803 13798 which->next = next;
13804 13799 prev = which;
13805 13800 }
13806 13801 }
13807 13802
13808 13803 kmem_free(cur, sizeof (*cur));
13809 13804
13810 13805 if (tgt_cnt == ptgt->tgt_change_cnt) {
13811 13806 mutex_enter(&ptgt->tgt_mutex);
13812 13807 if (level == RESET_TARGET) {
13813 13808 fcp_update_tgt_state(ptgt,
13814 13809 FCP_RESET, FCP_LUN_BUSY);
13815 13810 } else {
13816 13811 fcp_update_lun_state(plun,
13817 13812 FCP_RESET, FCP_LUN_BUSY);
13818 13813 }
13819 13814 mutex_exit(&ptgt->tgt_mutex);
13820 13815
13821 13816 mutex_exit(&pptr->port_mutex);
13822 13817 fcp_abort_all(pptr, ptgt, plun, tgt_cnt);
13823 13818 mutex_enter(&pptr->port_mutex);
13824 13819 }
13825 13820 }
13826 13821 }
13827 13822
13828 13823
13829 13824 static void
13830 13825 fcp_abort_all(struct fcp_port *pptr, struct fcp_tgt *ttgt,
13831 13826 struct fcp_lun *rlun, int tgt_cnt)
13832 13827 {
13833 13828 int rval;
13834 13829 struct fcp_lun *tlun, *nlun;
13835 13830 struct fcp_pkt *pcmd = NULL, *ncmd = NULL,
13836 13831 *cmd = NULL, *head = NULL,
13837 13832 *tail = NULL;
13838 13833
13839 13834 mutex_enter(&pptr->port_pkt_mutex);
13840 13835 for (cmd = pptr->port_pkt_head; cmd != NULL; cmd = ncmd) {
13841 13836 struct fcp_lun *plun = ADDR2LUN(&cmd->cmd_pkt->pkt_address);
13842 13837 struct fcp_tgt *ptgt = plun->lun_tgt;
13843 13838
13844 13839 ncmd = cmd->cmd_next;
13845 13840
13846 13841 if (ptgt != ttgt && plun != rlun) {
13847 13842 pcmd = cmd;
13848 13843 continue;
13849 13844 }
13850 13845
13851 13846 if (pcmd != NULL) {
13852 13847 ASSERT(pptr->port_pkt_head != cmd);
13853 13848 pcmd->cmd_next = ncmd;
13854 13849 } else {
13855 13850 ASSERT(cmd == pptr->port_pkt_head);
13856 13851 pptr->port_pkt_head = ncmd;
13857 13852 }
13858 13853 if (pptr->port_pkt_tail == cmd) {
13859 13854 ASSERT(cmd->cmd_next == NULL);
13860 13855 pptr->port_pkt_tail = pcmd;
13861 13856 if (pcmd != NULL) {
13862 13857 pcmd->cmd_next = NULL;
13863 13858 }
13864 13859 }
13865 13860
13866 13861 if (head == NULL) {
13867 13862 head = tail = cmd;
13868 13863 } else {
13869 13864 ASSERT(tail != NULL);
13870 13865 tail->cmd_next = cmd;
13871 13866 tail = cmd;
13872 13867 }
13873 13868 cmd->cmd_next = NULL;
13874 13869 }
13875 13870 mutex_exit(&pptr->port_pkt_mutex);
13876 13871
13877 13872 for (cmd = head; cmd != NULL; cmd = ncmd) {
13878 13873 struct scsi_pkt *pkt = cmd->cmd_pkt;
13879 13874
13880 13875 ncmd = cmd->cmd_next;
13881 13876 ASSERT(pkt != NULL);
13882 13877
13883 13878 mutex_enter(&pptr->port_mutex);
13884 13879 if (ttgt->tgt_change_cnt == tgt_cnt) {
13885 13880 mutex_exit(&pptr->port_mutex);
13886 13881 cmd->cmd_flags &= ~CFLAG_IN_QUEUE;
13887 13882 pkt->pkt_reason = CMD_RESET;
13888 13883 pkt->pkt_statistics |= STAT_DEV_RESET;
13889 13884 cmd->cmd_state = FCP_PKT_IDLE;
13890 13885 fcp_post_callback(cmd);
13891 13886 } else {
13892 13887 mutex_exit(&pptr->port_mutex);
13893 13888 }
13894 13889 }
13895 13890
13896 13891 /*
13897 13892 * If the FCA will return all the commands in its queue then our
13898 13893 * work is easy, just return.
13899 13894 */
13900 13895
13901 13896 if (pptr->port_reset_action == FC_RESET_RETURN_ALL) {
13902 13897 return;
13903 13898 }
13904 13899
13905 13900 /*
13906 13901 * For RESET_LUN get hold of target pointer
13907 13902 */
13908 13903 if (ttgt == NULL) {
13909 13904 ASSERT(rlun != NULL);
13910 13905
13911 13906 ttgt = rlun->lun_tgt;
13912 13907
13913 13908 ASSERT(ttgt != NULL);
13914 13909 }
13915 13910
13916 13911 /*
13917 13912 * There are some severe race conditions here.
13918 13913 * While we are trying to abort the pkt, it might be completing
13919 13914 * so mark it aborted and if the abort does not succeed then
13920 13915 * handle it in the watch thread.
13921 13916 */
13922 13917 mutex_enter(&ttgt->tgt_mutex);
13923 13918 nlun = ttgt->tgt_lun;
13924 13919 mutex_exit(&ttgt->tgt_mutex);
13925 13920 while ((tlun = nlun) != NULL) {
13926 13921 int restart = 0;
13927 13922 if (rlun && rlun != tlun) {
13928 13923 mutex_enter(&ttgt->tgt_mutex);
13929 13924 nlun = tlun->lun_next;
13930 13925 mutex_exit(&ttgt->tgt_mutex);
13931 13926 continue;
13932 13927 }
13933 13928 mutex_enter(&tlun->lun_mutex);
13934 13929 cmd = tlun->lun_pkt_head;
13935 13930 while (cmd != NULL) {
13936 13931 if (cmd->cmd_state == FCP_PKT_ISSUED) {
13937 13932 struct scsi_pkt *pkt;
13938 13933
13939 13934 restart = 1;
13940 13935 cmd->cmd_state = FCP_PKT_ABORTING;
13941 13936 mutex_exit(&tlun->lun_mutex);
13942 13937 rval = fc_ulp_abort(pptr->port_fp_handle,
13943 13938 cmd->cmd_fp_pkt, KM_SLEEP);
13944 13939 if (rval == FC_SUCCESS) {
13945 13940 pkt = cmd->cmd_pkt;
13946 13941 pkt->pkt_reason = CMD_RESET;
13947 13942 pkt->pkt_statistics |= STAT_DEV_RESET;
13948 13943 cmd->cmd_state = FCP_PKT_IDLE;
13949 13944 fcp_post_callback(cmd);
13950 13945 } else {
13951 13946 caddr_t msg;
13952 13947
13953 13948 (void) fc_ulp_error(rval, &msg);
13954 13949
13955 13950 /*
13956 13951 * This part is tricky. The abort
13957 13952 * failed and now the command could
13958 13953 * be completing. The cmd_state ==
13959 13954 * FCP_PKT_ABORTING should save
13960 13955 * us in fcp_cmd_callback. If we
13961 13956 * are already aborting ignore the
13962 13957 * command in fcp_cmd_callback.
13963 13958 * Here we leave this packet for 20
13964 13959 * sec to be aborted in the
13965 13960 * fcp_watch thread.
13966 13961 */
13967 13962 fcp_log(CE_WARN, pptr->port_dip,
13968 13963 "!Abort failed after reset %s",
13969 13964 msg);
13970 13965
13971 13966 cmd->cmd_timeout =
13972 13967 fcp_watchdog_time +
13973 13968 cmd->cmd_pkt->pkt_time +
13974 13969 FCP_FAILED_DELAY;
13975 13970
13976 13971 cmd->cmd_fp_pkt->pkt_timeout =
13977 13972 FCP_INVALID_TIMEOUT;
13978 13973 /*
13979 13974 * This is a hack, cmd is put in the
13980 13975 * overflow queue so that it can be
13981 13976 * timed out finally
13982 13977 */
13983 13978 cmd->cmd_flags |= CFLAG_IN_QUEUE;
13984 13979
13985 13980 mutex_enter(&pptr->port_pkt_mutex);
13986 13981 if (pptr->port_pkt_head) {
13987 13982 ASSERT(pptr->port_pkt_tail
13988 13983 != NULL);
13989 13984 pptr->port_pkt_tail->cmd_next
13990 13985 = cmd;
13991 13986 pptr->port_pkt_tail = cmd;
13992 13987 } else {
13993 13988 ASSERT(pptr->port_pkt_tail
13994 13989 == NULL);
13995 13990 pptr->port_pkt_head =
13996 13991 pptr->port_pkt_tail
13997 13992 = cmd;
13998 13993 }
13999 13994 cmd->cmd_next = NULL;
14000 13995 mutex_exit(&pptr->port_pkt_mutex);
14001 13996 }
14002 13997 mutex_enter(&tlun->lun_mutex);
14003 13998 cmd = tlun->lun_pkt_head;
14004 13999 } else {
14005 14000 cmd = cmd->cmd_forw;
14006 14001 }
14007 14002 }
14008 14003 mutex_exit(&tlun->lun_mutex);
14009 14004
14010 14005 mutex_enter(&ttgt->tgt_mutex);
14011 14006 restart == 1 ? (nlun = ttgt->tgt_lun) : (nlun = tlun->lun_next);
14012 14007 mutex_exit(&ttgt->tgt_mutex);
14013 14008
14014 14009 mutex_enter(&pptr->port_mutex);
14015 14010 if (tgt_cnt != ttgt->tgt_change_cnt) {
14016 14011 mutex_exit(&pptr->port_mutex);
14017 14012 return;
14018 14013 } else {
14019 14014 mutex_exit(&pptr->port_mutex);
14020 14015 }
14021 14016 }
14022 14017 }
14023 14018
14024 14019
14025 14020 /*
14026 14021 * unlink the soft state, returning the soft state found (if any)
14027 14022 *
14028 14023 * acquires and releases the global mutex
14029 14024 */
14030 14025 struct fcp_port *
14031 14026 fcp_soft_state_unlink(struct fcp_port *pptr)
14032 14027 {
14033 14028 struct fcp_port *hptr; /* ptr index */
14034 14029 struct fcp_port *tptr; /* prev hptr */
14035 14030
14036 14031 mutex_enter(&fcp_global_mutex);
14037 14032 for (hptr = fcp_port_head, tptr = NULL;
14038 14033 hptr != NULL;
14039 14034 tptr = hptr, hptr = hptr->port_next) {
14040 14035 if (hptr == pptr) {
14041 14036 /* we found a match -- remove this item */
14042 14037 if (tptr == NULL) {
14043 14038 /* we're at the head of the list */
14044 14039 fcp_port_head = hptr->port_next;
14045 14040 } else {
14046 14041 tptr->port_next = hptr->port_next;
14047 14042 }
14048 14043 break; /* success */
14049 14044 }
14050 14045 }
14051 14046 if (fcp_port_head == NULL) {
14052 14047 fcp_cleanup_blacklist(&fcp_lun_blacklist);
14053 14048 }
14054 14049 mutex_exit(&fcp_global_mutex);
14055 14050 return (hptr);
14056 14051 }
14057 14052
14058 14053
14059 14054 /*
14060 14055 * called by fcp_scsi_hba_tgt_init to find a LUN given a
14061 14056 * WWN and a LUN number
14062 14057 */
14063 14058 /* ARGSUSED */
14064 14059 static struct fcp_lun *
14065 14060 fcp_lookup_lun(struct fcp_port *pptr, uchar_t *wwn, uint16_t lun)
14066 14061 {
14067 14062 int hash;
14068 14063 struct fcp_tgt *ptgt;
14069 14064 struct fcp_lun *plun;
14070 14065
14071 14066 ASSERT(mutex_owned(&pptr->port_mutex));
14072 14067
14073 14068 hash = FCP_HASH(wwn);
14074 14069 for (ptgt = pptr->port_tgt_hash_table[hash]; ptgt != NULL;
14075 14070 ptgt = ptgt->tgt_next) {
14076 14071 if (bcmp((caddr_t)wwn, (caddr_t)&ptgt->tgt_port_wwn.raw_wwn[0],
14077 14072 sizeof (ptgt->tgt_port_wwn)) == 0) {
14078 14073 mutex_enter(&ptgt->tgt_mutex);
14079 14074 for (plun = ptgt->tgt_lun;
14080 14075 plun != NULL;
14081 14076 plun = plun->lun_next) {
14082 14077 if (plun->lun_num == lun) {
14083 14078 mutex_exit(&ptgt->tgt_mutex);
14084 14079 return (plun);
14085 14080 }
14086 14081 }
14087 14082 mutex_exit(&ptgt->tgt_mutex);
14088 14083 return (NULL);
14089 14084 }
14090 14085 }
14091 14086 return (NULL);
14092 14087 }
14093 14088
14094 14089 /*
14095 14090 * Function: fcp_prepare_pkt
14096 14091 *
14097 14092 * Description: This function prepares the SCSI cmd pkt, passed by the caller,
14098 14093 * for fcp_start(). It binds the data or partially maps it.
14099 14094 * Builds the FCP header and starts the initialization of the
14100 14095 * Fibre Channel header.
14101 14096 *
14102 14097 * Argument: *pptr FCP port.
14103 14098 * *cmd FCP packet.
14104 14099 * *plun LUN the command will be sent to.
14105 14100 *
14106 14101 * Context: User, Kernel and Interrupt context.
14107 14102 */
14108 14103 static void
14109 14104 fcp_prepare_pkt(struct fcp_port *pptr, struct fcp_pkt *cmd,
14110 14105 struct fcp_lun *plun)
14111 14106 {
14112 14107 fc_packet_t *fpkt = cmd->cmd_fp_pkt;
14113 14108 struct fcp_tgt *ptgt = plun->lun_tgt;
14114 14109 struct fcp_cmd *fcmd = &cmd->cmd_fcp_cmd;
14115 14110
14116 14111 ASSERT(cmd->cmd_pkt->pkt_comp ||
14117 14112 (cmd->cmd_pkt->pkt_flags & FLAG_NOINTR));
14118 14113
14119 14114 if (cmd->cmd_pkt->pkt_numcookies) {
14120 14115 if (cmd->cmd_pkt->pkt_dma_flags & DDI_DMA_READ) {
14121 14116 fcmd->fcp_cntl.cntl_read_data = 1;
14122 14117 fcmd->fcp_cntl.cntl_write_data = 0;
14123 14118 fpkt->pkt_tran_type = FC_PKT_FCP_READ;
14124 14119 } else {
14125 14120 fcmd->fcp_cntl.cntl_read_data = 0;
14126 14121 fcmd->fcp_cntl.cntl_write_data = 1;
14127 14122 fpkt->pkt_tran_type = FC_PKT_FCP_WRITE;
14128 14123 }
14129 14124
14130 14125 fpkt->pkt_data_cookie = cmd->cmd_pkt->pkt_cookies;
14131 14126
14132 14127 fpkt->pkt_data_cookie_cnt = cmd->cmd_pkt->pkt_numcookies;
14133 14128 ASSERT(fpkt->pkt_data_cookie_cnt <=
14134 14129 pptr->port_data_dma_attr.dma_attr_sgllen);
14135 14130
14136 14131 cmd->cmd_dmacount = cmd->cmd_pkt->pkt_dma_len;
14137 14132
14138 14133 /* FCA needs pkt_datalen to be set */
14139 14134 fpkt->pkt_datalen = cmd->cmd_dmacount;
14140 14135 fcmd->fcp_data_len = cmd->cmd_dmacount;
14141 14136 } else {
14142 14137 fcmd->fcp_cntl.cntl_read_data = 0;
14143 14138 fcmd->fcp_cntl.cntl_write_data = 0;
14144 14139 fpkt->pkt_tran_type = FC_PKT_EXCHANGE;
14145 14140 fpkt->pkt_datalen = 0;
14146 14141 fcmd->fcp_data_len = 0;
14147 14142 }
14148 14143
14149 14144 /* set up the Tagged Queuing type */
14150 14145 if (cmd->cmd_pkt->pkt_flags & FLAG_HTAG) {
14151 14146 fcmd->fcp_cntl.cntl_qtype = FCP_QTYPE_HEAD_OF_Q;
14152 14147 } else if (cmd->cmd_pkt->pkt_flags & FLAG_OTAG) {
14153 14148 fcmd->fcp_cntl.cntl_qtype = FCP_QTYPE_ORDERED;
14154 14149 } else if (cmd->cmd_pkt->pkt_flags & FLAG_STAG) {
14155 14150 fcmd->fcp_cntl.cntl_qtype = FCP_QTYPE_SIMPLE;
14156 14151 } else {
14157 14152 fcmd->fcp_cntl.cntl_qtype = FCP_QTYPE_UNTAGGED;
14158 14153 }
14159 14154
14160 14155 fcmd->fcp_ent_addr = plun->lun_addr;
14161 14156
14162 14157 if (pptr->port_fcp_dma != FC_NO_DVMA_SPACE) {
14163 14158 FCP_CP_OUT((uint8_t *)fcmd, fpkt->pkt_cmd,
14164 14159 fpkt->pkt_cmd_acc, sizeof (struct fcp_cmd));
14165 14160 } else {
14166 14161 ASSERT(fpkt->pkt_cmd_dma == NULL && fpkt->pkt_resp_dma == NULL);
14167 14162 }
14168 14163
14169 14164 cmd->cmd_pkt->pkt_reason = CMD_CMPLT;
14170 14165 cmd->cmd_pkt->pkt_state = 0;
14171 14166 cmd->cmd_pkt->pkt_statistics = 0;
14172 14167 cmd->cmd_pkt->pkt_resid = 0;
14173 14168
14174 14169 cmd->cmd_fp_pkt->pkt_data_dma = cmd->cmd_pkt->pkt_handle;
14175 14170
14176 14171 if (cmd->cmd_pkt->pkt_flags & FLAG_NOINTR) {
14177 14172 fpkt->pkt_tran_flags = (FC_TRAN_CLASS3 | FC_TRAN_NO_INTR);
14178 14173 fpkt->pkt_comp = NULL;
14179 14174 } else {
14180 14175 fpkt->pkt_tran_flags = (FC_TRAN_CLASS3 | FC_TRAN_INTR);
14181 14176 if (cmd->cmd_pkt->pkt_flags & FLAG_IMMEDIATE_CB) {
14182 14177 fpkt->pkt_tran_flags |= FC_TRAN_IMMEDIATE_CB;
14183 14178 }
14184 14179 fpkt->pkt_comp = fcp_cmd_callback;
14185 14180 }
14186 14181
14187 14182 mutex_enter(&pptr->port_mutex);
14188 14183 if (pptr->port_state & FCP_STATE_SUSPENDED) {
14189 14184 fpkt->pkt_tran_flags |= FC_TRAN_DUMPING;
14190 14185 }
14191 14186 mutex_exit(&pptr->port_mutex);
14192 14187
14193 14188 fpkt->pkt_cmd_fhdr.d_id = ptgt->tgt_d_id;
14194 14189 fpkt->pkt_cmd_fhdr.s_id = pptr->port_id;
14195 14190
14196 14191 /*
14197 14192 * Save a few kernel cycles here
14198 14193 */
14199 14194 #ifndef __lock_lint
14200 14195 fpkt->pkt_fca_device = ptgt->tgt_fca_dev;
14201 14196 #endif /* __lock_lint */
14202 14197 }
14203 14198
14204 14199 static void
14205 14200 fcp_post_callback(struct fcp_pkt *cmd)
14206 14201 {
14207 14202 scsi_hba_pkt_comp(cmd->cmd_pkt);
14208 14203 }
14209 14204
14210 14205
14211 14206 /*
14212 14207 * called to do polled I/O by fcp_start()
14213 14208 *
14214 14209 * return a transport status value, i.e. TRAN_ACCECPT for success
14215 14210 */
14216 14211 static int
14217 14212 fcp_dopoll(struct fcp_port *pptr, struct fcp_pkt *cmd)
14218 14213 {
14219 14214 int rval;
14220 14215
14221 14216 #ifdef DEBUG
14222 14217 mutex_enter(&pptr->port_pkt_mutex);
14223 14218 pptr->port_npkts++;
14224 14219 mutex_exit(&pptr->port_pkt_mutex);
14225 14220 #endif /* DEBUG */
14226 14221
14227 14222 if (cmd->cmd_fp_pkt->pkt_timeout) {
14228 14223 cmd->cmd_fp_pkt->pkt_timeout = cmd->cmd_pkt->pkt_time;
14229 14224 } else {
14230 14225 cmd->cmd_fp_pkt->pkt_timeout = FCP_POLL_TIMEOUT;
14231 14226 }
14232 14227
14233 14228 ASSERT(cmd->cmd_fp_pkt->pkt_comp == NULL);
14234 14229
14235 14230 cmd->cmd_state = FCP_PKT_ISSUED;
14236 14231
14237 14232 rval = fc_ulp_transport(pptr->port_fp_handle, cmd->cmd_fp_pkt);
14238 14233
14239 14234 #ifdef DEBUG
14240 14235 mutex_enter(&pptr->port_pkt_mutex);
14241 14236 pptr->port_npkts--;
14242 14237 mutex_exit(&pptr->port_pkt_mutex);
14243 14238 #endif /* DEBUG */
14244 14239
14245 14240 cmd->cmd_state = FCP_PKT_IDLE;
14246 14241
14247 14242 switch (rval) {
14248 14243 case FC_SUCCESS:
14249 14244 if (cmd->cmd_fp_pkt->pkt_state == FC_PKT_SUCCESS) {
14250 14245 fcp_complete_pkt(cmd->cmd_fp_pkt);
14251 14246 rval = TRAN_ACCEPT;
14252 14247 } else {
14253 14248 rval = TRAN_FATAL_ERROR;
14254 14249 }
14255 14250 break;
14256 14251
14257 14252 case FC_TRAN_BUSY:
14258 14253 rval = TRAN_BUSY;
14259 14254 cmd->cmd_pkt->pkt_resid = 0;
14260 14255 break;
14261 14256
14262 14257 case FC_BADPACKET:
14263 14258 rval = TRAN_BADPKT;
14264 14259 break;
14265 14260
14266 14261 default:
14267 14262 rval = TRAN_FATAL_ERROR;
14268 14263 break;
14269 14264 }
14270 14265
14271 14266 return (rval);
14272 14267 }
14273 14268
14274 14269
14275 14270 /*
14276 14271 * called by some of the following transport-called routines to convert
14277 14272 * a supplied dip ptr to a port struct ptr (i.e. to the soft state)
14278 14273 */
14279 14274 static struct fcp_port *
14280 14275 fcp_dip2port(dev_info_t *dip)
14281 14276 {
14282 14277 int instance;
14283 14278
14284 14279 instance = ddi_get_instance(dip);
14285 14280 return (ddi_get_soft_state(fcp_softstate, instance));
14286 14281 }
14287 14282
14288 14283
14289 14284 /*
14290 14285 * called internally to return a LUN given a dip
14291 14286 */
14292 14287 struct fcp_lun *
14293 14288 fcp_get_lun_from_cip(struct fcp_port *pptr, child_info_t *cip)
14294 14289 {
14295 14290 struct fcp_tgt *ptgt;
14296 14291 struct fcp_lun *plun;
14297 14292 int i;
14298 14293
14299 14294
14300 14295 ASSERT(mutex_owned(&pptr->port_mutex));
14301 14296
14302 14297 for (i = 0; i < FCP_NUM_HASH; i++) {
14303 14298 for (ptgt = pptr->port_tgt_hash_table[i];
14304 14299 ptgt != NULL;
14305 14300 ptgt = ptgt->tgt_next) {
14306 14301 mutex_enter(&ptgt->tgt_mutex);
14307 14302 for (plun = ptgt->tgt_lun; plun != NULL;
14308 14303 plun = plun->lun_next) {
14309 14304 mutex_enter(&plun->lun_mutex);
14310 14305 if (plun->lun_cip == cip) {
14311 14306 mutex_exit(&plun->lun_mutex);
14312 14307 mutex_exit(&ptgt->tgt_mutex);
14313 14308 return (plun); /* match found */
14314 14309 }
14315 14310 mutex_exit(&plun->lun_mutex);
14316 14311 }
14317 14312 mutex_exit(&ptgt->tgt_mutex);
14318 14313 }
14319 14314 }
14320 14315 return (NULL); /* no LUN found */
14321 14316 }
14322 14317
14323 14318 /*
14324 14319 * pass an element to the hotplug list, kick the hotplug thread
14325 14320 * and wait for the element to get processed by the hotplug thread.
14326 14321 * on return the element is freed.
14327 14322 *
14328 14323 * return zero success and non-zero on failure
14329 14324 *
14330 14325 * acquires/releases the target mutex
14331 14326 *
14332 14327 */
14333 14328 static int
14334 14329 fcp_pass_to_hp_and_wait(struct fcp_port *pptr, struct fcp_lun *plun,
14335 14330 child_info_t *cip, int what, int link_cnt, int tgt_cnt, int flags)
14336 14331 {
14337 14332 struct fcp_hp_elem *elem;
14338 14333 int rval;
14339 14334
14340 14335 mutex_enter(&plun->lun_tgt->tgt_mutex);
14341 14336 if ((elem = fcp_pass_to_hp(pptr, plun, cip,
14342 14337 what, link_cnt, tgt_cnt, flags, 1)) == NULL) {
14343 14338 mutex_exit(&plun->lun_tgt->tgt_mutex);
14344 14339 fcp_log(CE_CONT, pptr->port_dip,
14345 14340 "Can not pass_to_hp: what: %d; D_ID=%x, LUN=%x\n",
14346 14341 what, plun->lun_tgt->tgt_d_id, plun->lun_num);
14347 14342 return (NDI_FAILURE);
14348 14343 }
14349 14344 mutex_exit(&plun->lun_tgt->tgt_mutex);
14350 14345 mutex_enter(&elem->mutex);
14351 14346 if (elem->wait) {
14352 14347 while (elem->wait) {
14353 14348 cv_wait(&elem->cv, &elem->mutex);
14354 14349 }
14355 14350 }
14356 14351 rval = (elem->result);
14357 14352 mutex_exit(&elem->mutex);
14358 14353 mutex_destroy(&elem->mutex);
14359 14354 cv_destroy(&elem->cv);
14360 14355 kmem_free(elem, sizeof (struct fcp_hp_elem));
14361 14356 return (rval);
14362 14357 }
14363 14358
14364 14359 /*
14365 14360 * pass an element to the hotplug list, and then
14366 14361 * kick the hotplug thread
14367 14362 *
14368 14363 * return Boolean success, i.e. non-zero if all goes well, else zero on error
14369 14364 *
14370 14365 * acquires/releases the hotplug mutex
14371 14366 *
14372 14367 * called with the target mutex owned
14373 14368 *
14374 14369 * memory acquired in NOSLEEP mode
14375 14370 * NOTE: if wait is set to 1 then the caller is responsible for waiting on
14376 14371 * for the hp daemon to process the request and is responsible for
14377 14372 * freeing the element
14378 14373 */
14379 14374 static struct fcp_hp_elem *
14380 14375 fcp_pass_to_hp(struct fcp_port *pptr, struct fcp_lun *plun,
14381 14376 child_info_t *cip, int what, int link_cnt, int tgt_cnt, int flags, int wait)
14382 14377 {
14383 14378 struct fcp_hp_elem *elem;
14384 14379 dev_info_t *pdip;
14385 14380
14386 14381 ASSERT(pptr != NULL);
14387 14382 ASSERT(plun != NULL);
14388 14383 ASSERT(plun->lun_tgt != NULL);
14389 14384 ASSERT(mutex_owned(&plun->lun_tgt->tgt_mutex));
14390 14385
14391 14386 /* create space for a hotplug element */
14392 14387 if ((elem = kmem_zalloc(sizeof (struct fcp_hp_elem), KM_NOSLEEP))
14393 14388 == NULL) {
14394 14389 fcp_log(CE_WARN, NULL,
14395 14390 "!can't allocate memory for hotplug element");
14396 14391 return (NULL);
14397 14392 }
14398 14393
14399 14394 /* fill in hotplug element */
14400 14395 elem->port = pptr;
14401 14396 elem->lun = plun;
14402 14397 elem->cip = cip;
14403 14398 elem->old_lun_mpxio = plun->lun_mpxio;
14404 14399 elem->what = what;
14405 14400 elem->flags = flags;
14406 14401 elem->link_cnt = link_cnt;
14407 14402 elem->tgt_cnt = tgt_cnt;
14408 14403 elem->wait = wait;
14409 14404 mutex_init(&elem->mutex, NULL, MUTEX_DRIVER, NULL);
14410 14405 cv_init(&elem->cv, NULL, CV_DRIVER, NULL);
14411 14406
14412 14407 /* schedule the hotplug task */
14413 14408 pdip = pptr->port_dip;
14414 14409 mutex_enter(&plun->lun_mutex);
14415 14410 if (elem->what == FCP_ONLINE || elem->what == FCP_OFFLINE) {
14416 14411 plun->lun_event_count++;
14417 14412 elem->event_cnt = plun->lun_event_count;
14418 14413 }
14419 14414 mutex_exit(&plun->lun_mutex);
14420 14415 if (taskq_dispatch(DEVI(pdip)->devi_taskq, fcp_hp_task,
14421 14416 (void *)elem, KM_NOSLEEP) == NULL) {
14422 14417 mutex_enter(&plun->lun_mutex);
14423 14418 if (elem->what == FCP_ONLINE || elem->what == FCP_OFFLINE) {
14424 14419 plun->lun_event_count--;
14425 14420 }
14426 14421 mutex_exit(&plun->lun_mutex);
14427 14422 kmem_free(elem, sizeof (*elem));
14428 14423 return (0);
14429 14424 }
14430 14425
14431 14426 return (elem);
14432 14427 }
14433 14428
14434 14429
14435 14430 static void
14436 14431 fcp_retransport_cmd(struct fcp_port *pptr, struct fcp_pkt *cmd)
14437 14432 {
14438 14433 int rval;
14439 14434 struct scsi_address *ap;
14440 14435 struct fcp_lun *plun;
14441 14436 struct fcp_tgt *ptgt;
14442 14437 fc_packet_t *fpkt;
14443 14438
14444 14439 ap = &cmd->cmd_pkt->pkt_address;
14445 14440 plun = ADDR2LUN(ap);
14446 14441 ptgt = plun->lun_tgt;
14447 14442
14448 14443 ASSERT(cmd->cmd_flags & CFLAG_IN_QUEUE);
14449 14444
14450 14445 cmd->cmd_state = FCP_PKT_IDLE;
14451 14446
14452 14447 mutex_enter(&pptr->port_mutex);
14453 14448 mutex_enter(&ptgt->tgt_mutex);
14454 14449 if (((plun->lun_state & (FCP_LUN_BUSY | FCP_LUN_OFFLINE)) == 0) &&
14455 14450 (!(pptr->port_state & FCP_STATE_ONLINING))) {
14456 14451 fc_ulp_rscn_info_t *rscnp;
14457 14452
14458 14453 cmd->cmd_state = FCP_PKT_ISSUED;
14459 14454
14460 14455 /*
14461 14456 * It is possible for pkt_pd to be NULL if tgt_pd_handle was
14462 14457 * originally NULL, hence we try to set it to the pd pointed
14463 14458 * to by the SCSI device we're trying to get to.
14464 14459 */
14465 14460
14466 14461 fpkt = cmd->cmd_fp_pkt;
14467 14462 if ((fpkt->pkt_pd == NULL) && (ptgt->tgt_pd_handle != NULL)) {
14468 14463 fpkt->pkt_pd = ptgt->tgt_pd_handle;
14469 14464 /*
14470 14465 * We need to notify the transport that we now have a
14471 14466 * reference to the remote port handle.
14472 14467 */
14473 14468 fc_ulp_hold_remote_port(ptgt->tgt_pd_handle);
14474 14469 }
14475 14470
14476 14471 mutex_exit(&ptgt->tgt_mutex);
14477 14472 mutex_exit(&pptr->port_mutex);
14478 14473
14479 14474 ASSERT((cmd->cmd_pkt->pkt_flags & FLAG_NOINTR) == 0);
14480 14475
14481 14476 /* prepare the packet */
14482 14477
14483 14478 fcp_prepare_pkt(pptr, cmd, plun);
14484 14479
14485 14480 rscnp = (fc_ulp_rscn_info_t *)cmd->cmd_fp_pkt->
14486 14481 pkt_ulp_rscn_infop;
14487 14482
14488 14483 cmd->cmd_timeout = cmd->cmd_pkt->pkt_time ?
14489 14484 fcp_watchdog_time + cmd->cmd_pkt->pkt_time : 0;
14490 14485
14491 14486 if (rscnp != NULL) {
14492 14487 rscnp->ulp_rscn_count =
14493 14488 fc_ulp_get_rscn_count(pptr->
14494 14489 port_fp_handle);
14495 14490 }
14496 14491
14497 14492 rval = fcp_transport(pptr->port_fp_handle,
14498 14493 cmd->cmd_fp_pkt, 0);
14499 14494
14500 14495 if (rval == FC_SUCCESS) {
14501 14496 return;
14502 14497 }
14503 14498 cmd->cmd_state &= ~FCP_PKT_ISSUED;
14504 14499 } else {
14505 14500 mutex_exit(&ptgt->tgt_mutex);
14506 14501 mutex_exit(&pptr->port_mutex);
14507 14502 }
14508 14503
14509 14504 fcp_queue_pkt(pptr, cmd);
14510 14505 }
14511 14506
14512 14507
14513 14508 static void
14514 14509 fcp_fail_cmd(struct fcp_pkt *cmd, uchar_t reason, uint_t statistics)
14515 14510 {
14516 14511 ASSERT(cmd->cmd_flags & CFLAG_IN_QUEUE);
14517 14512
14518 14513 cmd->cmd_flags &= ~CFLAG_IN_QUEUE;
14519 14514 cmd->cmd_state = FCP_PKT_IDLE;
14520 14515
14521 14516 cmd->cmd_pkt->pkt_reason = reason;
14522 14517 cmd->cmd_pkt->pkt_state = 0;
14523 14518 cmd->cmd_pkt->pkt_statistics = statistics;
14524 14519
14525 14520 fcp_post_callback(cmd);
14526 14521 }
14527 14522
14528 14523 /*
14529 14524 * Function: fcp_queue_pkt
14530 14525 *
14531 14526 * Description: This function queues the packet passed by the caller into
14532 14527 * the list of packets of the FCP port.
14533 14528 *
14534 14529 * Argument: *pptr FCP port.
14535 14530 * *cmd FCP packet to queue.
14536 14531 *
14537 14532 * Return Value: None
14538 14533 *
14539 14534 * Context: User, Kernel and Interrupt context.
14540 14535 */
14541 14536 static void
14542 14537 fcp_queue_pkt(struct fcp_port *pptr, struct fcp_pkt *cmd)
14543 14538 {
14544 14539 ASSERT((cmd->cmd_pkt->pkt_flags & FLAG_NOQUEUE) == NULL);
14545 14540
14546 14541 mutex_enter(&pptr->port_pkt_mutex);
14547 14542 cmd->cmd_flags |= CFLAG_IN_QUEUE;
14548 14543 ASSERT(cmd->cmd_state != FCP_PKT_ISSUED);
14549 14544 cmd->cmd_timeout = fcp_watchdog_time + FCP_QUEUE_DELAY;
14550 14545
14551 14546 /*
14552 14547 * zero pkt_time means hang around for ever
14553 14548 */
14554 14549 if (cmd->cmd_pkt->pkt_time) {
14555 14550 if (cmd->cmd_fp_pkt->pkt_timeout > FCP_QUEUE_DELAY) {
14556 14551 cmd->cmd_fp_pkt->pkt_timeout -= FCP_QUEUE_DELAY;
14557 14552 } else {
14558 14553 /*
14559 14554 * Indicate the watch thread to fail the
14560 14555 * command by setting it to highest value
14561 14556 */
14562 14557 cmd->cmd_timeout = fcp_watchdog_time;
14563 14558 cmd->cmd_fp_pkt->pkt_timeout = FCP_INVALID_TIMEOUT;
14564 14559 }
14565 14560 }
14566 14561
14567 14562 if (pptr->port_pkt_head) {
14568 14563 ASSERT(pptr->port_pkt_tail != NULL);
14569 14564
14570 14565 pptr->port_pkt_tail->cmd_next = cmd;
14571 14566 pptr->port_pkt_tail = cmd;
14572 14567 } else {
14573 14568 ASSERT(pptr->port_pkt_tail == NULL);
14574 14569
14575 14570 pptr->port_pkt_head = pptr->port_pkt_tail = cmd;
14576 14571 }
14577 14572 cmd->cmd_next = NULL;
14578 14573 mutex_exit(&pptr->port_pkt_mutex);
14579 14574 }
14580 14575
14581 14576 /*
14582 14577 * Function: fcp_update_targets
14583 14578 *
14584 14579 * Description: This function applies the specified change of state to all
14585 14580 * the targets listed. The operation applied is 'set'.
14586 14581 *
14587 14582 * Argument: *pptr FCP port.
14588 14583 * *dev_list Array of fc_portmap_t structures.
14589 14584 * count Length of dev_list.
14590 14585 * state State bits to update.
14591 14586 * cause Reason for the update.
14592 14587 *
14593 14588 * Return Value: None
14594 14589 *
14595 14590 * Context: User, Kernel and Interrupt context.
14596 14591 * The mutex pptr->port_mutex must be held.
14597 14592 */
14598 14593 static void
14599 14594 fcp_update_targets(struct fcp_port *pptr, fc_portmap_t *dev_list,
14600 14595 uint32_t count, uint32_t state, int cause)
14601 14596 {
14602 14597 fc_portmap_t *map_entry;
14603 14598 struct fcp_tgt *ptgt;
14604 14599
14605 14600 ASSERT(MUTEX_HELD(&pptr->port_mutex));
14606 14601
14607 14602 while (count--) {
14608 14603 map_entry = &(dev_list[count]);
14609 14604 ptgt = fcp_lookup_target(pptr,
14610 14605 (uchar_t *)&(map_entry->map_pwwn));
14611 14606 if (ptgt == NULL) {
14612 14607 continue;
14613 14608 }
14614 14609
14615 14610 mutex_enter(&ptgt->tgt_mutex);
14616 14611 ptgt->tgt_trace = 0;
14617 14612 ptgt->tgt_change_cnt++;
14618 14613 ptgt->tgt_statec_cause = cause;
14619 14614 ptgt->tgt_tmp_cnt = 1;
14620 14615 fcp_update_tgt_state(ptgt, FCP_SET, state);
14621 14616 mutex_exit(&ptgt->tgt_mutex);
14622 14617 }
14623 14618 }
14624 14619
14625 14620 static int
14626 14621 fcp_call_finish_init(struct fcp_port *pptr, struct fcp_tgt *ptgt,
14627 14622 int lcount, int tcount, int cause)
14628 14623 {
14629 14624 int rval;
14630 14625
14631 14626 mutex_enter(&pptr->port_mutex);
14632 14627 rval = fcp_call_finish_init_held(pptr, ptgt, lcount, tcount, cause);
14633 14628 mutex_exit(&pptr->port_mutex);
14634 14629
14635 14630 return (rval);
14636 14631 }
14637 14632
14638 14633
14639 14634 static int
14640 14635 fcp_call_finish_init_held(struct fcp_port *pptr, struct fcp_tgt *ptgt,
14641 14636 int lcount, int tcount, int cause)
14642 14637 {
14643 14638 int finish_init = 0;
14644 14639 int finish_tgt = 0;
14645 14640 int do_finish_init = 0;
14646 14641 int rval = FCP_NO_CHANGE;
14647 14642
14648 14643 if (cause == FCP_CAUSE_LINK_CHANGE ||
14649 14644 cause == FCP_CAUSE_LINK_DOWN) {
14650 14645 do_finish_init = 1;
14651 14646 }
14652 14647
14653 14648 if (ptgt != NULL) {
14654 14649 FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
14655 14650 FCP_BUF_LEVEL_2, 0,
14656 14651 "link_cnt: %d,%d; tgt_cnt: %d,%d; tmp_cnt: %d,%d;"
14657 14652 " cause = %d, d_id = 0x%x, tgt_done = %d",
14658 14653 pptr->port_link_cnt, lcount, ptgt->tgt_change_cnt, tcount,
14659 14654 pptr->port_tmp_cnt, ptgt->tgt_tmp_cnt, cause,
14660 14655 ptgt->tgt_d_id, ptgt->tgt_done);
14661 14656
14662 14657 mutex_enter(&ptgt->tgt_mutex);
14663 14658
14664 14659 if (tcount && (ptgt->tgt_change_cnt != tcount)) {
14665 14660 rval = FCP_DEV_CHANGE;
14666 14661 if (do_finish_init && ptgt->tgt_done == 0) {
14667 14662 ptgt->tgt_done++;
14668 14663 finish_init = 1;
14669 14664 }
14670 14665 } else {
14671 14666 if (--ptgt->tgt_tmp_cnt <= 0) {
14672 14667 ptgt->tgt_tmp_cnt = 0;
14673 14668 finish_tgt = 1;
14674 14669
14675 14670 if (do_finish_init) {
14676 14671 finish_init = 1;
14677 14672 }
14678 14673 }
14679 14674 }
14680 14675 mutex_exit(&ptgt->tgt_mutex);
14681 14676 } else {
14682 14677 FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
14683 14678 FCP_BUF_LEVEL_2, 0,
14684 14679 "Call Finish Init for NO target");
14685 14680
14686 14681 if (do_finish_init) {
14687 14682 finish_init = 1;
14688 14683 }
14689 14684 }
14690 14685
14691 14686 if (finish_tgt) {
14692 14687 ASSERT(ptgt != NULL);
14693 14688
14694 14689 mutex_enter(&ptgt->tgt_mutex);
14695 14690 #ifdef DEBUG
14696 14691 bzero(ptgt->tgt_tmp_cnt_stack,
14697 14692 sizeof (ptgt->tgt_tmp_cnt_stack));
14698 14693
14699 14694 ptgt->tgt_tmp_cnt_depth = getpcstack(ptgt->tgt_tmp_cnt_stack,
14700 14695 FCP_STACK_DEPTH);
14701 14696 #endif /* DEBUG */
14702 14697 mutex_exit(&ptgt->tgt_mutex);
14703 14698
14704 14699 (void) fcp_finish_tgt(pptr, ptgt, lcount, tcount, cause);
14705 14700 }
14706 14701
14707 14702 if (finish_init && lcount == pptr->port_link_cnt) {
14708 14703 ASSERT(pptr->port_tmp_cnt > 0);
14709 14704 if (--pptr->port_tmp_cnt == 0) {
14710 14705 fcp_finish_init(pptr);
14711 14706 }
14712 14707 } else if (lcount != pptr->port_link_cnt) {
14713 14708 FCP_TRACE(fcp_logq, pptr->port_instbuf,
14714 14709 fcp_trace, FCP_BUF_LEVEL_2, 0,
14715 14710 "fcp_call_finish_init_held,1: state change occured"
14716 14711 " for D_ID=0x%x", (ptgt) ? ptgt->tgt_d_id : 0);
14717 14712 }
14718 14713
14719 14714 return (rval);
14720 14715 }
14721 14716
14722 14717 static void
14723 14718 fcp_reconfigure_luns(void * tgt_handle)
14724 14719 {
14725 14720 uint32_t dev_cnt;
14726 14721 fc_portmap_t *devlist;
14727 14722 struct fcp_tgt *ptgt = (struct fcp_tgt *)tgt_handle;
14728 14723 struct fcp_port *pptr = ptgt->tgt_port;
14729 14724
14730 14725 /*
14731 14726 * If the timer that fires this off got canceled too late, the
14732 14727 * target could have been destroyed.
14733 14728 */
14734 14729
14735 14730 if (ptgt->tgt_tid == NULL) {
14736 14731 return;
14737 14732 }
14738 14733
14739 14734 devlist = kmem_zalloc(sizeof (*devlist), KM_NOSLEEP);
14740 14735 if (devlist == NULL) {
14741 14736 fcp_log(CE_WARN, pptr->port_dip,
14742 14737 "!fcp%d: failed to allocate for portmap",
14743 14738 pptr->port_instance);
14744 14739 return;
14745 14740 }
14746 14741
14747 14742 dev_cnt = 1;
14748 14743 devlist->map_pd = ptgt->tgt_pd_handle;
14749 14744 devlist->map_hard_addr.hard_addr = ptgt->tgt_hard_addr;
14750 14745 devlist->map_did.port_id = ptgt->tgt_d_id;
14751 14746
14752 14747 bcopy(&ptgt->tgt_node_wwn.raw_wwn[0], &devlist->map_nwwn, FC_WWN_SIZE);
14753 14748 bcopy(&ptgt->tgt_port_wwn.raw_wwn[0], &devlist->map_pwwn, FC_WWN_SIZE);
14754 14749
14755 14750 devlist->map_state = PORT_DEVICE_LOGGED_IN;
14756 14751 devlist->map_type = PORT_DEVICE_REPORTLUN_CHANGED;
14757 14752 devlist->map_flags = 0;
14758 14753
14759 14754 fcp_statec_callback(NULL, pptr->port_fp_handle, FC_STATE_DEVICE_CHANGE,
14760 14755 pptr->port_topology, devlist, dev_cnt, pptr->port_id);
14761 14756
14762 14757 /*
14763 14758 * Clear the tgt_tid after no more references to
14764 14759 * the fcp_tgt
14765 14760 */
14766 14761 mutex_enter(&ptgt->tgt_mutex);
14767 14762 ptgt->tgt_tid = NULL;
14768 14763 mutex_exit(&ptgt->tgt_mutex);
14769 14764
14770 14765 kmem_free(devlist, sizeof (*devlist));
14771 14766 }
14772 14767
14773 14768
14774 14769 static void
14775 14770 fcp_free_targets(struct fcp_port *pptr)
14776 14771 {
14777 14772 int i;
14778 14773 struct fcp_tgt *ptgt;
14779 14774
14780 14775 mutex_enter(&pptr->port_mutex);
14781 14776 for (i = 0; i < FCP_NUM_HASH; i++) {
14782 14777 ptgt = pptr->port_tgt_hash_table[i];
14783 14778 while (ptgt != NULL) {
14784 14779 struct fcp_tgt *next_tgt = ptgt->tgt_next;
14785 14780
14786 14781 fcp_free_target(ptgt);
14787 14782 ptgt = next_tgt;
14788 14783 }
14789 14784 }
14790 14785 mutex_exit(&pptr->port_mutex);
14791 14786 }
14792 14787
14793 14788
14794 14789 static void
14795 14790 fcp_free_target(struct fcp_tgt *ptgt)
14796 14791 {
14797 14792 struct fcp_lun *plun;
14798 14793 timeout_id_t tid;
14799 14794
14800 14795 mutex_enter(&ptgt->tgt_mutex);
14801 14796 tid = ptgt->tgt_tid;
14802 14797
14803 14798 /*
14804 14799 * Cancel any pending timeouts for this target.
14805 14800 */
14806 14801
14807 14802 if (tid != NULL) {
14808 14803 /*
14809 14804 * Set tgt_tid to NULL first to avoid a race in the callback.
14810 14805 * If tgt_tid is NULL, the callback will simply return.
14811 14806 */
14812 14807 ptgt->tgt_tid = NULL;
14813 14808 mutex_exit(&ptgt->tgt_mutex);
14814 14809 (void) untimeout(tid);
14815 14810 mutex_enter(&ptgt->tgt_mutex);
14816 14811 }
14817 14812
14818 14813 plun = ptgt->tgt_lun;
14819 14814 while (plun != NULL) {
14820 14815 struct fcp_lun *next_lun = plun->lun_next;
14821 14816
14822 14817 fcp_dealloc_lun(plun);
14823 14818 plun = next_lun;
14824 14819 }
14825 14820
14826 14821 mutex_exit(&ptgt->tgt_mutex);
14827 14822 fcp_dealloc_tgt(ptgt);
14828 14823 }
14829 14824
14830 14825 /*
14831 14826 * Function: fcp_is_retryable
14832 14827 *
14833 14828 * Description: Indicates if the internal packet is retryable.
14834 14829 *
14835 14830 * Argument: *icmd FCP internal packet.
14836 14831 *
14837 14832 * Return Value: 0 Not retryable
14838 14833 * 1 Retryable
14839 14834 *
14840 14835 * Context: User, Kernel and Interrupt context
14841 14836 */
14842 14837 static int
14843 14838 fcp_is_retryable(struct fcp_ipkt *icmd)
14844 14839 {
14845 14840 if (icmd->ipkt_port->port_state & (FCP_STATE_SUSPENDED |
14846 14841 FCP_STATE_DETACHING | FCP_STATE_POWER_DOWN)) {
14847 14842 return (0);
14848 14843 }
14849 14844
14850 14845 return (((fcp_watchdog_time + icmd->ipkt_fpkt->pkt_timeout) <
14851 14846 icmd->ipkt_port->port_deadline) ? 1 : 0);
14852 14847 }
14853 14848
14854 14849 /*
14855 14850 * Function: fcp_create_on_demand
14856 14851 *
14857 14852 * Argument: *pptr FCP port.
14858 14853 * *pwwn Port WWN.
14859 14854 *
14860 14855 * Return Value: 0 Success
14861 14856 * EIO
14862 14857 * ENOMEM
14863 14858 * EBUSY
14864 14859 * EINVAL
14865 14860 *
14866 14861 * Context: User and Kernel context
14867 14862 */
14868 14863 static int
14869 14864 fcp_create_on_demand(struct fcp_port *pptr, uchar_t *pwwn)
14870 14865 {
14871 14866 int wait_ms;
14872 14867 int tcount;
14873 14868 int lcount;
14874 14869 int ret;
14875 14870 int error;
14876 14871 int rval = EIO;
14877 14872 int ntries;
14878 14873 fc_portmap_t *devlist;
14879 14874 opaque_t pd;
14880 14875 struct fcp_lun *plun;
14881 14876 struct fcp_tgt *ptgt;
14882 14877 int old_manual = 0;
14883 14878
14884 14879 /* Allocates the fc_portmap_t structure. */
14885 14880 devlist = kmem_zalloc(sizeof (*devlist), KM_SLEEP);
14886 14881
14887 14882 /*
14888 14883 * If FC_INVALID_RSCN_COUNT is non-zero, we will have to init as shown
14889 14884 * in the commented statement below:
14890 14885 *
14891 14886 * devlist->map_rscn_info.ulp_rscn_count = FC_INVALID_RSCN_COUNT;
14892 14887 *
14893 14888 * Below, the deadline for the discovery process is set.
14894 14889 */
14895 14890 mutex_enter(&pptr->port_mutex);
14896 14891 pptr->port_deadline = fcp_watchdog_time + FCP_ICMD_DEADLINE;
14897 14892 mutex_exit(&pptr->port_mutex);
14898 14893
14899 14894 /*
14900 14895 * We try to find the remote port based on the WWN provided by the
14901 14896 * caller. We actually ask fp/fctl if it has it.
14902 14897 */
14903 14898 pd = fc_ulp_get_remote_port(pptr->port_fp_handle,
14904 14899 (la_wwn_t *)pwwn, &error, 1);
14905 14900
14906 14901 if (pd == NULL) {
14907 14902 kmem_free(devlist, sizeof (*devlist));
14908 14903 return (rval);
14909 14904 }
14910 14905
14911 14906 /*
14912 14907 * The remote port was found. We ask fp/fctl to update our
14913 14908 * fc_portmap_t structure.
14914 14909 */
14915 14910 ret = fc_ulp_pwwn_to_portmap(pptr->port_fp_handle,
14916 14911 (la_wwn_t *)pwwn, devlist);
14917 14912 if (ret != FC_SUCCESS) {
14918 14913 kmem_free(devlist, sizeof (*devlist));
14919 14914 return (rval);
14920 14915 }
14921 14916
14922 14917 /*
14923 14918 * The map flag field is set to indicates that the creation is being
14924 14919 * done at the user request (Ioclt probably luxadm or cfgadm).
14925 14920 */
14926 14921 devlist->map_type = PORT_DEVICE_USER_CREATE;
14927 14922
14928 14923 mutex_enter(&pptr->port_mutex);
14929 14924
14930 14925 /*
14931 14926 * We check to see if fcp already has a target that describes the
14932 14927 * device being created. If not it is created.
14933 14928 */
14934 14929 ptgt = fcp_lookup_target(pptr, pwwn);
14935 14930 if (ptgt == NULL) {
14936 14931 lcount = pptr->port_link_cnt;
14937 14932 mutex_exit(&pptr->port_mutex);
14938 14933
14939 14934 ptgt = fcp_alloc_tgt(pptr, devlist, lcount);
14940 14935 if (ptgt == NULL) {
14941 14936 fcp_log(CE_WARN, pptr->port_dip,
14942 14937 "!FC target allocation failed");
14943 14938 return (ENOMEM);
14944 14939 }
14945 14940
14946 14941 mutex_enter(&pptr->port_mutex);
14947 14942 }
14948 14943
14949 14944 mutex_enter(&ptgt->tgt_mutex);
14950 14945 ptgt->tgt_statec_cause = FCP_CAUSE_USER_CREATE;
14951 14946 ptgt->tgt_tmp_cnt = 1;
14952 14947 ptgt->tgt_device_created = 0;
14953 14948 /*
14954 14949 * If fabric and auto config is set but the target was
14955 14950 * manually unconfigured then reset to the manual_config_only to
14956 14951 * 0 so the device will get configured.
14957 14952 */
14958 14953 if (FC_TOP_EXTERNAL(pptr->port_topology) &&
14959 14954 fcp_enable_auto_configuration &&
14960 14955 ptgt->tgt_manual_config_only == 1) {
14961 14956 old_manual = 1;
14962 14957 ptgt->tgt_manual_config_only = 0;
14963 14958 }
14964 14959 mutex_exit(&ptgt->tgt_mutex);
14965 14960
14966 14961 fcp_update_targets(pptr, devlist, 1,
14967 14962 FCP_LUN_BUSY | FCP_LUN_MARK, FCP_CAUSE_USER_CREATE);
14968 14963
14969 14964 lcount = pptr->port_link_cnt;
14970 14965 tcount = ptgt->tgt_change_cnt;
14971 14966
14972 14967 if (fcp_handle_mapflags(pptr, ptgt, devlist, lcount,
14973 14968 tcount, FCP_CAUSE_USER_CREATE) == TRUE) {
14974 14969 if (FC_TOP_EXTERNAL(pptr->port_topology) &&
14975 14970 fcp_enable_auto_configuration && old_manual) {
14976 14971 mutex_enter(&ptgt->tgt_mutex);
14977 14972 ptgt->tgt_manual_config_only = 1;
14978 14973 mutex_exit(&ptgt->tgt_mutex);
14979 14974 }
14980 14975
14981 14976 if (pptr->port_link_cnt != lcount ||
14982 14977 ptgt->tgt_change_cnt != tcount) {
14983 14978 rval = EBUSY;
14984 14979 }
14985 14980 mutex_exit(&pptr->port_mutex);
14986 14981
14987 14982 FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
14988 14983 FCP_BUF_LEVEL_3, 0,
14989 14984 "fcp_create_on_demand: mapflags ptgt=%x, "
14990 14985 "lcount=%x::port_link_cnt=%x, "
14991 14986 "tcount=%x: tgt_change_cnt=%x, rval=%x",
14992 14987 ptgt, lcount, pptr->port_link_cnt,
14993 14988 tcount, ptgt->tgt_change_cnt, rval);
14994 14989 return (rval);
14995 14990 }
14996 14991
14997 14992 /*
14998 14993 * Due to lack of synchronization mechanisms, we perform
14999 14994 * periodic monitoring of our request; Because requests
15000 14995 * get dropped when another one supercedes (either because
15001 14996 * of a link change or a target change), it is difficult to
15002 14997 * provide a clean synchronization mechanism (such as a
15003 14998 * semaphore or a conditional variable) without exhaustively
15004 14999 * rewriting the mainline discovery code of this driver.
15005 15000 */
15006 15001 wait_ms = 500;
15007 15002
15008 15003 ntries = fcp_max_target_retries;
15009 15004
15010 15005 FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
15011 15006 FCP_BUF_LEVEL_3, 0,
15012 15007 "fcp_create_on_demand(1): ntries=%x, ptgt=%x, "
15013 15008 "lcount=%x::port_link_cnt=%x, "
15014 15009 "tcount=%x::tgt_change_cnt=%x, rval=%x, tgt_device_created=%x "
15015 15010 "tgt_tmp_cnt =%x",
15016 15011 ntries, ptgt, lcount, pptr->port_link_cnt,
15017 15012 tcount, ptgt->tgt_change_cnt, rval, ptgt->tgt_device_created,
15018 15013 ptgt->tgt_tmp_cnt);
15019 15014
15020 15015 mutex_enter(&ptgt->tgt_mutex);
15021 15016 while (ntries-- != 0 && pptr->port_link_cnt == lcount &&
15022 15017 ptgt->tgt_change_cnt == tcount && ptgt->tgt_device_created == 0) {
15023 15018 mutex_exit(&ptgt->tgt_mutex);
15024 15019 mutex_exit(&pptr->port_mutex);
15025 15020
15026 15021 delay(drv_usectohz(wait_ms * 1000));
15027 15022
15028 15023 mutex_enter(&pptr->port_mutex);
15029 15024 mutex_enter(&ptgt->tgt_mutex);
15030 15025 }
15031 15026
15032 15027
15033 15028 if (pptr->port_link_cnt != lcount || ptgt->tgt_change_cnt != tcount) {
15034 15029 rval = EBUSY;
15035 15030 } else {
15036 15031 if (ptgt->tgt_tmp_cnt == 0 && ptgt->tgt_node_state ==
15037 15032 FCP_TGT_NODE_PRESENT) {
15038 15033 rval = 0;
15039 15034 }
15040 15035 }
15041 15036
15042 15037 FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
15043 15038 FCP_BUF_LEVEL_3, 0,
15044 15039 "fcp_create_on_demand(2): ntries=%x, ptgt=%x, "
15045 15040 "lcount=%x::port_link_cnt=%x, "
15046 15041 "tcount=%x::tgt_change_cnt=%x, rval=%x, tgt_device_created=%x "
15047 15042 "tgt_tmp_cnt =%x",
15048 15043 ntries, ptgt, lcount, pptr->port_link_cnt,
15049 15044 tcount, ptgt->tgt_change_cnt, rval, ptgt->tgt_device_created,
15050 15045 ptgt->tgt_tmp_cnt);
15051 15046
15052 15047 if (rval) {
15053 15048 if (FC_TOP_EXTERNAL(pptr->port_topology) &&
15054 15049 fcp_enable_auto_configuration && old_manual) {
15055 15050 ptgt->tgt_manual_config_only = 1;
15056 15051 }
15057 15052 mutex_exit(&ptgt->tgt_mutex);
15058 15053 mutex_exit(&pptr->port_mutex);
15059 15054 kmem_free(devlist, sizeof (*devlist));
15060 15055
15061 15056 FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
15062 15057 FCP_BUF_LEVEL_3, 0,
15063 15058 "fcp_create_on_demand(3): ntries=%x, ptgt=%x, "
15064 15059 "lcount=%x::port_link_cnt=%x, "
15065 15060 "tcount=%x::tgt_change_cnt=%x, rval=%x, "
15066 15061 "tgt_device_created=%x, tgt D_ID=%x",
15067 15062 ntries, ptgt, lcount, pptr->port_link_cnt,
15068 15063 tcount, ptgt->tgt_change_cnt, rval,
15069 15064 ptgt->tgt_device_created, ptgt->tgt_d_id);
15070 15065 return (rval);
15071 15066 }
15072 15067
15073 15068 if ((plun = ptgt->tgt_lun) != NULL) {
15074 15069 tcount = plun->lun_tgt->tgt_change_cnt;
15075 15070 } else {
15076 15071 rval = EINVAL;
15077 15072 }
15078 15073 lcount = pptr->port_link_cnt;
15079 15074
15080 15075 /*
15081 15076 * Configuring the target with no LUNs will fail. We
15082 15077 * should reset the node state so that it is not
15083 15078 * automatically configured when the LUNs are added
15084 15079 * to this target.
15085 15080 */
15086 15081 if (ptgt->tgt_lun_cnt == 0) {
15087 15082 ptgt->tgt_node_state = FCP_TGT_NODE_NONE;
15088 15083 }
15089 15084 mutex_exit(&ptgt->tgt_mutex);
15090 15085 mutex_exit(&pptr->port_mutex);
15091 15086
15092 15087 while (plun) {
15093 15088 child_info_t *cip;
15094 15089
15095 15090 mutex_enter(&plun->lun_mutex);
15096 15091 cip = plun->lun_cip;
15097 15092 mutex_exit(&plun->lun_mutex);
15098 15093
15099 15094 mutex_enter(&ptgt->tgt_mutex);
15100 15095 if (!(plun->lun_state & FCP_LUN_OFFLINE)) {
15101 15096 mutex_exit(&ptgt->tgt_mutex);
15102 15097
15103 15098 rval = fcp_pass_to_hp_and_wait(pptr, plun, cip,
15104 15099 FCP_ONLINE, lcount, tcount,
15105 15100 NDI_ONLINE_ATTACH);
15106 15101 if (rval != NDI_SUCCESS) {
15107 15102 FCP_TRACE(fcp_logq,
15108 15103 pptr->port_instbuf, fcp_trace,
15109 15104 FCP_BUF_LEVEL_3, 0,
15110 15105 "fcp_create_on_demand: "
15111 15106 "pass_to_hp_and_wait failed "
15112 15107 "rval=%x", rval);
15113 15108 rval = EIO;
15114 15109 } else {
15115 15110 mutex_enter(&LUN_TGT->tgt_mutex);
15116 15111 plun->lun_state &= ~(FCP_LUN_OFFLINE |
15117 15112 FCP_LUN_BUSY);
15118 15113 mutex_exit(&LUN_TGT->tgt_mutex);
15119 15114 }
15120 15115 mutex_enter(&ptgt->tgt_mutex);
15121 15116 }
15122 15117
15123 15118 plun = plun->lun_next;
15124 15119 mutex_exit(&ptgt->tgt_mutex);
15125 15120 }
15126 15121
15127 15122 kmem_free(devlist, sizeof (*devlist));
15128 15123
15129 15124 if (FC_TOP_EXTERNAL(pptr->port_topology) &&
15130 15125 fcp_enable_auto_configuration && old_manual) {
15131 15126 mutex_enter(&ptgt->tgt_mutex);
15132 15127 /* if successful then set manual to 0 */
15133 15128 if (rval == 0) {
15134 15129 ptgt->tgt_manual_config_only = 0;
15135 15130 } else {
15136 15131 /* reset to 1 so the user has to do the config */
15137 15132 ptgt->tgt_manual_config_only = 1;
15138 15133 }
15139 15134 mutex_exit(&ptgt->tgt_mutex);
15140 15135 }
15141 15136
15142 15137 return (rval);
15143 15138 }
15144 15139
15145 15140
15146 15141 static void
15147 15142 fcp_ascii_to_wwn(caddr_t string, uchar_t bytes[], unsigned int byte_len)
15148 15143 {
15149 15144 int count;
15150 15145 uchar_t byte;
15151 15146
15152 15147 count = 0;
15153 15148 while (*string) {
15154 15149 byte = FCP_ATOB(*string); string++;
15155 15150 byte = byte << 4 | FCP_ATOB(*string); string++;
15156 15151 bytes[count++] = byte;
15157 15152
15158 15153 if (count >= byte_len) {
15159 15154 break;
15160 15155 }
15161 15156 }
15162 15157 }
15163 15158
15164 15159 static void
15165 15160 fcp_wwn_to_ascii(uchar_t wwn[], char *string)
15166 15161 {
15167 15162 int i;
15168 15163
15169 15164 for (i = 0; i < FC_WWN_SIZE; i++) {
15170 15165 (void) sprintf(string + (i * 2),
15171 15166 "%02x", wwn[i]);
15172 15167 }
15173 15168
15174 15169 }
15175 15170
15176 15171 static void
15177 15172 fcp_print_error(fc_packet_t *fpkt)
15178 15173 {
15179 15174 struct fcp_ipkt *icmd = (struct fcp_ipkt *)
15180 15175 fpkt->pkt_ulp_private;
15181 15176 struct fcp_port *pptr;
15182 15177 struct fcp_tgt *ptgt;
15183 15178 struct fcp_lun *plun;
15184 15179 caddr_t buf;
15185 15180 int scsi_cmd = 0;
15186 15181
15187 15182 ptgt = icmd->ipkt_tgt;
15188 15183 plun = icmd->ipkt_lun;
15189 15184 pptr = ptgt->tgt_port;
15190 15185
15191 15186 buf = kmem_zalloc(256, KM_NOSLEEP);
15192 15187 if (buf == NULL) {
15193 15188 return;
15194 15189 }
15195 15190
15196 15191 switch (icmd->ipkt_opcode) {
15197 15192 case SCMD_REPORT_LUN:
15198 15193 (void) sprintf(buf, "!REPORT LUN to D_ID=0x%%x"
15199 15194 " lun=0x%%x failed");
15200 15195 scsi_cmd++;
15201 15196 break;
15202 15197
15203 15198 case SCMD_INQUIRY_PAGE83:
15204 15199 (void) sprintf(buf, "!INQUIRY-83 to D_ID=0x%%x"
15205 15200 " lun=0x%%x failed");
15206 15201 scsi_cmd++;
15207 15202 break;
15208 15203
15209 15204 case SCMD_INQUIRY:
15210 15205 (void) sprintf(buf, "!INQUIRY to D_ID=0x%%x"
15211 15206 " lun=0x%%x failed");
15212 15207 scsi_cmd++;
15213 15208 break;
15214 15209
15215 15210 case LA_ELS_PLOGI:
15216 15211 (void) sprintf(buf, "!PLOGI to D_ID=0x%%x failed");
15217 15212 break;
15218 15213
15219 15214 case LA_ELS_PRLI:
15220 15215 (void) sprintf(buf, "!PRLI to D_ID=0x%%x failed");
15221 15216 break;
15222 15217 }
15223 15218
15224 15219 if (scsi_cmd && fpkt->pkt_state == FC_PKT_SUCCESS) {
15225 15220 struct fcp_rsp response, *rsp;
15226 15221 uchar_t asc, ascq;
15227 15222 caddr_t sense_key = NULL;
15228 15223 struct fcp_rsp_info fcp_rsp_err, *bep;
15229 15224
15230 15225 if (icmd->ipkt_nodma) {
15231 15226 rsp = (struct fcp_rsp *)fpkt->pkt_resp;
15232 15227 bep = (struct fcp_rsp_info *)((caddr_t)rsp +
15233 15228 sizeof (struct fcp_rsp));
15234 15229 } else {
15235 15230 rsp = &response;
15236 15231 bep = &fcp_rsp_err;
15237 15232
15238 15233 FCP_CP_IN(fpkt->pkt_resp, rsp, fpkt->pkt_resp_acc,
15239 15234 sizeof (struct fcp_rsp));
15240 15235
15241 15236 FCP_CP_IN(fpkt->pkt_resp + sizeof (struct fcp_rsp),
15242 15237 bep, fpkt->pkt_resp_acc,
15243 15238 sizeof (struct fcp_rsp_info));
15244 15239 }
15245 15240
15246 15241
15247 15242 if (fcp_validate_fcp_response(rsp, pptr) != FC_SUCCESS) {
15248 15243 (void) sprintf(buf + strlen(buf),
15249 15244 " : Bad FCP response values rsvd1=%%x, rsvd2=%%x,"
15250 15245 " sts-rsvd1=%%x, sts-rsvd2=%%x, rsplen=%%x,"
15251 15246 " senselen=%%x. Giving up");
15252 15247
15253 15248 fcp_log(CE_WARN, pptr->port_dip, buf,
15254 15249 ptgt->tgt_d_id, plun->lun_num, rsp->reserved_0,
15255 15250 rsp->reserved_1, rsp->fcp_u.fcp_status.reserved_0,
15256 15251 rsp->fcp_u.fcp_status.reserved_1,
15257 15252 rsp->fcp_response_len, rsp->fcp_sense_len);
15258 15253
15259 15254 kmem_free(buf, 256);
15260 15255 return;
15261 15256 }
15262 15257
15263 15258 if (rsp->fcp_u.fcp_status.rsp_len_set &&
15264 15259 bep->rsp_code != FCP_NO_FAILURE) {
15265 15260 (void) sprintf(buf + strlen(buf),
15266 15261 " FCP Response code = 0x%x", bep->rsp_code);
15267 15262 }
15268 15263
15269 15264 if (rsp->fcp_u.fcp_status.scsi_status & STATUS_CHECK) {
15270 15265 struct scsi_extended_sense sense_info, *sense_ptr;
15271 15266
15272 15267 if (icmd->ipkt_nodma) {
15273 15268 sense_ptr = (struct scsi_extended_sense *)
15274 15269 ((caddr_t)fpkt->pkt_resp +
15275 15270 sizeof (struct fcp_rsp) +
15276 15271 rsp->fcp_response_len);
15277 15272 } else {
15278 15273 sense_ptr = &sense_info;
15279 15274
15280 15275 FCP_CP_IN(fpkt->pkt_resp +
15281 15276 sizeof (struct fcp_rsp) +
15282 15277 rsp->fcp_response_len, &sense_info,
15283 15278 fpkt->pkt_resp_acc,
15284 15279 sizeof (struct scsi_extended_sense));
15285 15280 }
15286 15281
15287 15282 if (sense_ptr->es_key < NUM_SENSE_KEYS +
15288 15283 NUM_IMPL_SENSE_KEYS) {
15289 15284 sense_key = sense_keys[sense_ptr->es_key];
15290 15285 } else {
15291 15286 sense_key = "Undefined";
15292 15287 }
15293 15288
15294 15289 asc = sense_ptr->es_add_code;
15295 15290 ascq = sense_ptr->es_qual_code;
15296 15291
15297 15292 (void) sprintf(buf + strlen(buf),
15298 15293 ": sense key=%%s, ASC=%%x," " ASCQ=%%x."
15299 15294 " Giving up");
15300 15295
15301 15296 fcp_log(CE_WARN, pptr->port_dip, buf,
15302 15297 ptgt->tgt_d_id, plun->lun_num, sense_key,
15303 15298 asc, ascq);
15304 15299 } else {
15305 15300 (void) sprintf(buf + strlen(buf),
15306 15301 " : SCSI status=%%x. Giving up");
15307 15302
15308 15303 fcp_log(CE_WARN, pptr->port_dip, buf,
15309 15304 ptgt->tgt_d_id, plun->lun_num,
15310 15305 rsp->fcp_u.fcp_status.scsi_status);
15311 15306 }
15312 15307 } else {
15313 15308 caddr_t state, reason, action, expln;
15314 15309
15315 15310 (void) fc_ulp_pkt_error(fpkt, &state, &reason,
15316 15311 &action, &expln);
15317 15312
15318 15313 (void) sprintf(buf + strlen(buf), ": State:%%s,"
15319 15314 " Reason:%%s. Giving up");
15320 15315
15321 15316 if (scsi_cmd) {
15322 15317 fcp_log(CE_WARN, pptr->port_dip, buf,
15323 15318 ptgt->tgt_d_id, plun->lun_num, state, reason);
15324 15319 } else {
15325 15320 fcp_log(CE_WARN, pptr->port_dip, buf,
15326 15321 ptgt->tgt_d_id, state, reason);
15327 15322 }
15328 15323 }
15329 15324
15330 15325 kmem_free(buf, 256);
15331 15326 }
15332 15327
15333 15328
15334 15329 static int
15335 15330 fcp_handle_ipkt_errors(struct fcp_port *pptr, struct fcp_tgt *ptgt,
15336 15331 struct fcp_ipkt *icmd, int rval, caddr_t op)
15337 15332 {
15338 15333 int ret = DDI_FAILURE;
15339 15334 char *error;
15340 15335
15341 15336 switch (rval) {
15342 15337 case FC_DEVICE_BUSY_NEW_RSCN:
15343 15338 /*
15344 15339 * This means that there was a new RSCN that the transport
15345 15340 * knows about (which the ULP *may* know about too) but the
15346 15341 * pkt that was sent down was related to an older RSCN. So, we
15347 15342 * are just going to reset the retry count and deadline and
15348 15343 * continue to retry. The idea is that transport is currently
15349 15344 * working on the new RSCN and will soon let the ULPs know
15350 15345 * about it and when it does the existing logic will kick in
15351 15346 * where it will change the tcount to indicate that something
15352 15347 * changed on the target. So, rediscovery will start and there
15353 15348 * will not be an infinite retry.
15354 15349 *
15355 15350 * For a full flow of how the RSCN info is transferred back and
15356 15351 * forth, see fp.c
15357 15352 */
15358 15353 icmd->ipkt_retries = 0;
15359 15354 icmd->ipkt_port->port_deadline = fcp_watchdog_time +
15360 15355 FCP_ICMD_DEADLINE;
15361 15356
15362 15357 FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
15363 15358 FCP_BUF_LEVEL_3, 0,
15364 15359 "fcp_handle_ipkt_errors: rval=%x for D_ID=%x",
15365 15360 rval, ptgt->tgt_d_id);
15366 15361 /* FALLTHROUGH */
15367 15362
15368 15363 case FC_STATEC_BUSY:
15369 15364 case FC_DEVICE_BUSY:
15370 15365 case FC_PBUSY:
15371 15366 case FC_FBUSY:
15372 15367 case FC_TRAN_BUSY:
15373 15368 case FC_OFFLINE:
15374 15369 FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
15375 15370 FCP_BUF_LEVEL_3, 0,
15376 15371 "fcp_handle_ipkt_errors: rval=%x for D_ID=%x",
15377 15372 rval, ptgt->tgt_d_id);
15378 15373 if (icmd->ipkt_retries < FCP_MAX_RETRIES &&
15379 15374 fcp_is_retryable(icmd)) {
15380 15375 fcp_queue_ipkt(pptr, icmd->ipkt_fpkt);
15381 15376 ret = DDI_SUCCESS;
15382 15377 }
15383 15378 break;
15384 15379
15385 15380 case FC_LOGINREQ:
15386 15381 /*
15387 15382 * FC_LOGINREQ used to be handled just like all the cases
15388 15383 * above. It has been changed to handled a PRLI that fails
15389 15384 * with FC_LOGINREQ different than other ipkts that fail
15390 15385 * with FC_LOGINREQ. If a PRLI fails with FC_LOGINREQ it is
15391 15386 * a simple matter to turn it into a PLOGI instead, so that's
15392 15387 * exactly what we do here.
15393 15388 */
15394 15389 if (icmd->ipkt_opcode == LA_ELS_PRLI) {
15395 15390 ret = fcp_send_els(icmd->ipkt_port, icmd->ipkt_tgt,
15396 15391 icmd, LA_ELS_PLOGI, icmd->ipkt_link_cnt,
15397 15392 icmd->ipkt_change_cnt, icmd->ipkt_cause);
15398 15393 } else {
15399 15394 FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
15400 15395 FCP_BUF_LEVEL_3, 0,
15401 15396 "fcp_handle_ipkt_errors: rval=%x for D_ID=%x",
15402 15397 rval, ptgt->tgt_d_id);
15403 15398 if (icmd->ipkt_retries < FCP_MAX_RETRIES &&
15404 15399 fcp_is_retryable(icmd)) {
15405 15400 fcp_queue_ipkt(pptr, icmd->ipkt_fpkt);
15406 15401 ret = DDI_SUCCESS;
15407 15402 }
15408 15403 }
15409 15404 break;
15410 15405
15411 15406 default:
15412 15407 mutex_enter(&pptr->port_mutex);
15413 15408 mutex_enter(&ptgt->tgt_mutex);
15414 15409 if (!FCP_STATE_CHANGED(pptr, ptgt, icmd)) {
15415 15410 mutex_exit(&ptgt->tgt_mutex);
15416 15411 mutex_exit(&pptr->port_mutex);
15417 15412
15418 15413 (void) fc_ulp_error(rval, &error);
15419 15414 fcp_log(CE_WARN, pptr->port_dip,
15420 15415 "!Failed to send %s to D_ID=%x error=%s",
15421 15416 op, ptgt->tgt_d_id, error);
15422 15417 } else {
15423 15418 FCP_TRACE(fcp_logq, pptr->port_instbuf,
15424 15419 fcp_trace, FCP_BUF_LEVEL_2, 0,
15425 15420 "fcp_handle_ipkt_errors,1: state change occured"
15426 15421 " for D_ID=0x%x", ptgt->tgt_d_id);
15427 15422 mutex_exit(&ptgt->tgt_mutex);
15428 15423 mutex_exit(&pptr->port_mutex);
15429 15424 }
15430 15425 break;
15431 15426 }
15432 15427
15433 15428 return (ret);
15434 15429 }
15435 15430
15436 15431
15437 15432 /*
15438 15433 * Check of outstanding commands on any LUN for this target
15439 15434 */
15440 15435 static int
15441 15436 fcp_outstanding_lun_cmds(struct fcp_tgt *ptgt)
15442 15437 {
15443 15438 struct fcp_lun *plun;
15444 15439 struct fcp_pkt *cmd;
15445 15440
15446 15441 for (plun = ptgt->tgt_lun; plun != NULL; plun = plun->lun_next) {
15447 15442 mutex_enter(&plun->lun_mutex);
15448 15443 for (cmd = plun->lun_pkt_head; cmd != NULL;
15449 15444 cmd = cmd->cmd_forw) {
15450 15445 if (cmd->cmd_state == FCP_PKT_ISSUED) {
15451 15446 mutex_exit(&plun->lun_mutex);
15452 15447 return (FC_SUCCESS);
15453 15448 }
15454 15449 }
15455 15450 mutex_exit(&plun->lun_mutex);
15456 15451 }
15457 15452
15458 15453 return (FC_FAILURE);
15459 15454 }
15460 15455
15461 15456 static fc_portmap_t *
15462 15457 fcp_construct_map(struct fcp_port *pptr, uint32_t *dev_cnt)
15463 15458 {
15464 15459 int i;
15465 15460 fc_portmap_t *devlist;
15466 15461 fc_portmap_t *devptr = NULL;
15467 15462 struct fcp_tgt *ptgt;
15468 15463
15469 15464 mutex_enter(&pptr->port_mutex);
15470 15465 for (i = 0, *dev_cnt = 0; i < FCP_NUM_HASH; i++) {
15471 15466 for (ptgt = pptr->port_tgt_hash_table[i]; ptgt != NULL;
15472 15467 ptgt = ptgt->tgt_next) {
15473 15468 if (!(ptgt->tgt_state & FCP_TGT_ORPHAN)) {
15474 15469 ++*dev_cnt;
15475 15470 }
15476 15471 }
15477 15472 }
15478 15473
15479 15474 devptr = devlist = kmem_zalloc(sizeof (*devlist) * *dev_cnt,
15480 15475 KM_NOSLEEP);
15481 15476 if (devlist == NULL) {
15482 15477 mutex_exit(&pptr->port_mutex);
15483 15478 fcp_log(CE_WARN, pptr->port_dip,
15484 15479 "!fcp%d: failed to allocate for portmap for construct map",
15485 15480 pptr->port_instance);
15486 15481 return (devptr);
15487 15482 }
15488 15483
15489 15484 for (i = 0; i < FCP_NUM_HASH; i++) {
15490 15485 for (ptgt = pptr->port_tgt_hash_table[i]; ptgt != NULL;
15491 15486 ptgt = ptgt->tgt_next) {
15492 15487 if (!(ptgt->tgt_state & FCP_TGT_ORPHAN)) {
15493 15488 int ret;
15494 15489
15495 15490 ret = fc_ulp_pwwn_to_portmap(
15496 15491 pptr->port_fp_handle,
15497 15492 (la_wwn_t *)&ptgt->tgt_port_wwn.raw_wwn[0],
15498 15493 devlist);
15499 15494
15500 15495 if (ret == FC_SUCCESS) {
15501 15496 devlist++;
15502 15497 continue;
15503 15498 }
15504 15499
15505 15500 devlist->map_pd = NULL;
15506 15501 devlist->map_did.port_id = ptgt->tgt_d_id;
15507 15502 devlist->map_hard_addr.hard_addr =
15508 15503 ptgt->tgt_hard_addr;
15509 15504
15510 15505 devlist->map_state = PORT_DEVICE_INVALID;
15511 15506 devlist->map_type = PORT_DEVICE_OLD;
15512 15507
15513 15508 bcopy(&ptgt->tgt_node_wwn.raw_wwn[0],
15514 15509 &devlist->map_nwwn, FC_WWN_SIZE);
15515 15510
15516 15511 bcopy(&ptgt->tgt_port_wwn.raw_wwn[0],
15517 15512 &devlist->map_pwwn, FC_WWN_SIZE);
15518 15513
15519 15514 devlist++;
15520 15515 }
15521 15516 }
15522 15517 }
15523 15518
15524 15519 mutex_exit(&pptr->port_mutex);
15525 15520
15526 15521 return (devptr);
15527 15522 }
15528 15523 /*
15529 15524 * Inimate MPxIO that the lun is busy and cannot accept regular IO
15530 15525 */
15531 15526 static void
15532 15527 fcp_update_mpxio_path_verifybusy(struct fcp_port *pptr)
15533 15528 {
15534 15529 int i;
15535 15530 struct fcp_tgt *ptgt;
15536 15531 struct fcp_lun *plun;
15537 15532
15538 15533 for (i = 0; i < FCP_NUM_HASH; i++) {
15539 15534 for (ptgt = pptr->port_tgt_hash_table[i]; ptgt != NULL;
15540 15535 ptgt = ptgt->tgt_next) {
15541 15536 mutex_enter(&ptgt->tgt_mutex);
15542 15537 for (plun = ptgt->tgt_lun; plun != NULL;
15543 15538 plun = plun->lun_next) {
15544 15539 if (plun->lun_mpxio &&
15545 15540 plun->lun_state & FCP_LUN_BUSY) {
15546 15541 if (!fcp_pass_to_hp(pptr, plun,
15547 15542 plun->lun_cip,
15548 15543 FCP_MPXIO_PATH_SET_BUSY,
15549 15544 pptr->port_link_cnt,
15550 15545 ptgt->tgt_change_cnt, 0, 0)) {
15551 15546 FCP_TRACE(fcp_logq,
15552 15547 pptr->port_instbuf,
15553 15548 fcp_trace,
15554 15549 FCP_BUF_LEVEL_2, 0,
15555 15550 "path_verifybusy: "
15556 15551 "disable lun %p failed!",
15557 15552 plun);
15558 15553 }
15559 15554 }
15560 15555 }
15561 15556 mutex_exit(&ptgt->tgt_mutex);
15562 15557 }
15563 15558 }
15564 15559 }
15565 15560
15566 15561 static int
15567 15562 fcp_update_mpxio_path(struct fcp_lun *plun, child_info_t *cip, int what)
15568 15563 {
15569 15564 dev_info_t *cdip = NULL;
15570 15565 dev_info_t *pdip = NULL;
15571 15566
15572 15567 ASSERT(plun);
15573 15568
15574 15569 mutex_enter(&plun->lun_mutex);
15575 15570 if (fcp_is_child_present(plun, cip) == FC_FAILURE) {
15576 15571 mutex_exit(&plun->lun_mutex);
15577 15572 return (NDI_FAILURE);
15578 15573 }
15579 15574 mutex_exit(&plun->lun_mutex);
15580 15575 cdip = mdi_pi_get_client(PIP(cip));
15581 15576 pdip = mdi_pi_get_phci(PIP(cip));
15582 15577
15583 15578 ASSERT(cdip != NULL);
15584 15579 ASSERT(pdip != NULL);
15585 15580
15586 15581 if (what == FCP_MPXIO_PATH_CLEAR_BUSY) {
15587 15582 /* LUN ready for IO */
15588 15583 (void) mdi_pi_enable_path(PIP(cip), DRIVER_DISABLE_TRANSIENT);
15589 15584 } else {
15590 15585 /* LUN busy to accept IO */
15591 15586 (void) mdi_pi_disable_path(PIP(cip), DRIVER_DISABLE_TRANSIENT);
15592 15587 }
15593 15588 return (NDI_SUCCESS);
15594 15589 }
15595 15590
15596 15591 /*
15597 15592 * Caller must free the returned string of MAXPATHLEN len
15598 15593 * If the device is offline (-1 instance number) NULL
15599 15594 * will be returned.
15600 15595 */
15601 15596 static char *
15602 15597 fcp_get_lun_path(struct fcp_lun *plun)
15603 15598 {
15604 15599 dev_info_t *dip = NULL;
15605 15600 char *path = NULL;
15606 15601 mdi_pathinfo_t *pip = NULL;
15607 15602
15608 15603 if (plun == NULL) {
15609 15604 return (NULL);
15610 15605 }
15611 15606
15612 15607 mutex_enter(&plun->lun_mutex);
15613 15608 if (plun->lun_mpxio == 0) {
15614 15609 dip = DIP(plun->lun_cip);
15615 15610 mutex_exit(&plun->lun_mutex);
15616 15611 } else {
15617 15612 /*
15618 15613 * lun_cip must be accessed with lun_mutex held. Here
15619 15614 * plun->lun_cip either points to a valid node or it is NULL.
15620 15615 * Make a copy so that we can release lun_mutex.
15621 15616 */
15622 15617 pip = PIP(plun->lun_cip);
15623 15618
15624 15619 /*
15625 15620 * Increase ref count on the path so that we can release
15626 15621 * lun_mutex and still be sure that the pathinfo node (and thus
15627 15622 * also the client) is not deallocated. If pip is NULL, this
15628 15623 * has no effect.
15629 15624 */
15630 15625 mdi_hold_path(pip);
15631 15626
15632 15627 mutex_exit(&plun->lun_mutex);
15633 15628
15634 15629 /* Get the client. If pip is NULL, we get NULL. */
15635 15630 dip = mdi_pi_get_client(pip);
15636 15631 }
15637 15632
15638 15633 if (dip == NULL)
15639 15634 goto out;
15640 15635 if (ddi_get_instance(dip) < 0)
15641 15636 goto out;
15642 15637
15643 15638 path = kmem_alloc(MAXPATHLEN, KM_SLEEP);
15644 15639 if (path == NULL)
15645 15640 goto out;
15646 15641
15647 15642 (void) ddi_pathname(dip, path);
15648 15643
15649 15644 /* Clean up. */
15650 15645 out:
15651 15646 if (pip != NULL)
15652 15647 mdi_rele_path(pip);
15653 15648
15654 15649 /*
15655 15650 * In reality, the user wants a fully valid path (one they can open)
15656 15651 * but this string is lacking the mount point, and the minor node.
15657 15652 * It would be nice if we could "figure these out" somehow
15658 15653 * and fill them in. Otherwise, the userland code has to understand
15659 15654 * driver specific details of which minor node is the "best" or
15660 15655 * "right" one to expose. (Ex: which slice is the whole disk, or
15661 15656 * which tape doesn't rewind)
15662 15657 */
15663 15658 return (path);
15664 15659 }
15665 15660
15666 15661 static int
15667 15662 fcp_scsi_bus_config(dev_info_t *parent, uint_t flag,
15668 15663 ddi_bus_config_op_t op, void *arg, dev_info_t **childp)
15669 15664 {
15670 15665 int64_t reset_delay;
15671 15666 int rval, retry = 0;
15672 15667 struct fcp_port *pptr = fcp_dip2port(parent);
15673 15668
15674 15669 reset_delay = (int64_t)(USEC_TO_TICK(FCP_INIT_WAIT_TIMEOUT)) -
15675 15670 (ddi_get_lbolt64() - pptr->port_attach_time);
15676 15671 if (reset_delay < 0) {
15677 15672 reset_delay = 0;
15678 15673 }
15679 15674
15680 15675 if (fcp_bus_config_debug) {
15681 15676 flag |= NDI_DEVI_DEBUG;
15682 15677 }
15683 15678
15684 15679 switch (op) {
15685 15680 case BUS_CONFIG_ONE:
15686 15681 /*
15687 15682 * Retry the command since we need to ensure
15688 15683 * the fabric devices are available for root
15689 15684 */
15690 15685 while (retry++ < fcp_max_bus_config_retries) {
15691 15686 rval = (ndi_busop_bus_config(parent,
15692 15687 flag | NDI_MDI_FALLBACK, op,
15693 15688 arg, childp, (clock_t)reset_delay));
15694 15689 if (rval == 0) {
15695 15690 return (rval);
15696 15691 }
15697 15692 }
15698 15693
15699 15694 /*
15700 15695 * drain taskq to make sure nodes are created and then
15701 15696 * try again.
15702 15697 */
15703 15698 taskq_wait(DEVI(parent)->devi_taskq);
15704 15699 return (ndi_busop_bus_config(parent, flag | NDI_MDI_FALLBACK,
15705 15700 op, arg, childp, 0));
15706 15701
15707 15702 case BUS_CONFIG_DRIVER:
15708 15703 case BUS_CONFIG_ALL: {
15709 15704 /*
15710 15705 * delay till all devices report in (port_tmp_cnt == 0)
15711 15706 * or FCP_INIT_WAIT_TIMEOUT
15712 15707 */
15713 15708 mutex_enter(&pptr->port_mutex);
15714 15709 while ((reset_delay > 0) && pptr->port_tmp_cnt) {
15715 15710 (void) cv_timedwait(&pptr->port_config_cv,
15716 15711 &pptr->port_mutex,
15717 15712 ddi_get_lbolt() + (clock_t)reset_delay);
15718 15713 reset_delay =
15719 15714 (int64_t)(USEC_TO_TICK(FCP_INIT_WAIT_TIMEOUT)) -
15720 15715 (ddi_get_lbolt64() - pptr->port_attach_time);
15721 15716 }
15722 15717 mutex_exit(&pptr->port_mutex);
15723 15718 /* drain taskq to make sure nodes are created */
15724 15719 taskq_wait(DEVI(parent)->devi_taskq);
15725 15720 return (ndi_busop_bus_config(parent, flag, op,
15726 15721 arg, childp, 0));
15727 15722 }
15728 15723
15729 15724 default:
15730 15725 return (NDI_FAILURE);
15731 15726 }
15732 15727 /*NOTREACHED*/
15733 15728 }
15734 15729
15735 15730 static int
15736 15731 fcp_scsi_bus_unconfig(dev_info_t *parent, uint_t flag,
15737 15732 ddi_bus_config_op_t op, void *arg)
15738 15733 {
15739 15734 if (fcp_bus_config_debug) {
15740 15735 flag |= NDI_DEVI_DEBUG;
15741 15736 }
15742 15737
15743 15738 return (ndi_busop_bus_unconfig(parent, flag, op, arg));
15744 15739 }
15745 15740
15746 15741
15747 15742 /*
15748 15743 * Routine to copy GUID into the lun structure.
15749 15744 * returns 0 if copy was successful and 1 if encountered a
15750 15745 * failure and did not copy the guid.
15751 15746 */
15752 15747 static int
15753 15748 fcp_copy_guid_2_lun_block(struct fcp_lun *plun, char *guidp)
15754 15749 {
15755 15750
15756 15751 int retval = 0;
15757 15752
15758 15753 /* add one for the null terminator */
15759 15754 const unsigned int len = strlen(guidp) + 1;
15760 15755
15761 15756 if ((guidp == NULL) || (plun == NULL)) {
15762 15757 return (1);
15763 15758 }
15764 15759
15765 15760 /*
15766 15761 * if the plun->lun_guid already has been allocated,
15767 15762 * then check the size. if the size is exact, reuse
15768 15763 * it....if not free it an allocate the required size.
15769 15764 * The reallocation should NOT typically happen
15770 15765 * unless the GUIDs reported changes between passes.
15771 15766 * We free up and alloc again even if the
15772 15767 * size was more than required. This is due to the
15773 15768 * fact that the field lun_guid_size - serves
15774 15769 * dual role of indicating the size of the wwn
15775 15770 * size and ALSO the allocation size.
15776 15771 */
15777 15772 if (plun->lun_guid) {
15778 15773 if (plun->lun_guid_size != len) {
15779 15774 /*
15780 15775 * free the allocated memory and
15781 15776 * initialize the field
15782 15777 * lun_guid_size to 0.
15783 15778 */
15784 15779 kmem_free(plun->lun_guid, plun->lun_guid_size);
15785 15780 plun->lun_guid = NULL;
15786 15781 plun->lun_guid_size = 0;
15787 15782 }
15788 15783 }
15789 15784 /*
15790 15785 * alloc only if not already done.
15791 15786 */
15792 15787 if (plun->lun_guid == NULL) {
15793 15788 plun->lun_guid = kmem_zalloc(len, KM_NOSLEEP);
15794 15789 if (plun->lun_guid == NULL) {
15795 15790 cmn_err(CE_WARN, "fcp_copy_guid_2_lun_block:"
15796 15791 "Unable to allocate"
15797 15792 "Memory for GUID!!! size %d", len);
15798 15793 retval = 1;
15799 15794 } else {
15800 15795 plun->lun_guid_size = len;
15801 15796 }
15802 15797 }
15803 15798 if (plun->lun_guid) {
15804 15799 /*
15805 15800 * now copy the GUID
15806 15801 */
15807 15802 bcopy(guidp, plun->lun_guid, plun->lun_guid_size);
15808 15803 }
15809 15804 return (retval);
15810 15805 }
15811 15806
15812 15807 /*
15813 15808 * fcp_reconfig_wait
15814 15809 *
15815 15810 * Wait for a rediscovery/reconfiguration to complete before continuing.
15816 15811 */
15817 15812
15818 15813 static void
15819 15814 fcp_reconfig_wait(struct fcp_port *pptr)
15820 15815 {
15821 15816 clock_t reconfig_start, wait_timeout;
15822 15817
15823 15818 /*
15824 15819 * Quick check. If pptr->port_tmp_cnt is 0, there is no
15825 15820 * reconfiguration in progress.
15826 15821 */
15827 15822
15828 15823 mutex_enter(&pptr->port_mutex);
15829 15824 if (pptr->port_tmp_cnt == 0) {
15830 15825 mutex_exit(&pptr->port_mutex);
15831 15826 return;
15832 15827 }
15833 15828 mutex_exit(&pptr->port_mutex);
15834 15829
15835 15830 /*
15836 15831 * If we cause a reconfig by raising power, delay until all devices
15837 15832 * report in (port_tmp_cnt returns to 0)
15838 15833 */
15839 15834
15840 15835 reconfig_start = ddi_get_lbolt();
15841 15836 wait_timeout = drv_usectohz(FCP_INIT_WAIT_TIMEOUT);
15842 15837
15843 15838 mutex_enter(&pptr->port_mutex);
15844 15839
15845 15840 while (((ddi_get_lbolt() - reconfig_start) < wait_timeout) &&
15846 15841 pptr->port_tmp_cnt) {
15847 15842
15848 15843 (void) cv_timedwait(&pptr->port_config_cv, &pptr->port_mutex,
15849 15844 reconfig_start + wait_timeout);
15850 15845 }
15851 15846
15852 15847 mutex_exit(&pptr->port_mutex);
15853 15848
15854 15849 /*
15855 15850 * Even if fcp_tmp_count isn't 0, continue without error. The port
15856 15851 * we want may still be ok. If not, it will error out later
15857 15852 */
15858 15853 }
15859 15854
15860 15855 /*
15861 15856 * Read masking info from fp.conf and construct the global fcp_lun_blacklist.
15862 15857 * We rely on the fcp_global_mutex to provide protection against changes to
15863 15858 * the fcp_lun_blacklist.
15864 15859 *
15865 15860 * You can describe a list of target port WWNs and LUN numbers which will
15866 15861 * not be configured. LUN numbers will be interpreted as decimal. White
15867 15862 * spaces and ',' can be used in the list of LUN numbers.
15868 15863 *
15869 15864 * To prevent LUNs 1 and 2 from being configured for target
15870 15865 * port 510000f010fd92a1 and target port 510000e012079df1, set:
15871 15866 *
15872 15867 * pwwn-lun-blacklist=
15873 15868 * "510000f010fd92a1,1,2",
15874 15869 * "510000e012079df1,1,2";
15875 15870 */
15876 15871 static void
15877 15872 fcp_read_blacklist(dev_info_t *dip,
15878 15873 struct fcp_black_list_entry **pplun_blacklist)
15879 15874 {
15880 15875 char **prop_array = NULL;
15881 15876 char *curr_pwwn = NULL;
15882 15877 char *curr_lun = NULL;
15883 15878 uint32_t prop_item = 0;
15884 15879 int idx = 0;
15885 15880 int len = 0;
15886 15881
15887 15882 ASSERT(mutex_owned(&fcp_global_mutex));
15888 15883 if (ddi_prop_lookup_string_array(DDI_DEV_T_ANY, dip,
15889 15884 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM,
15890 15885 LUN_BLACKLIST_PROP, &prop_array, &prop_item) != DDI_PROP_SUCCESS) {
15891 15886 return;
15892 15887 }
15893 15888
15894 15889 for (idx = 0; idx < prop_item; idx++) {
15895 15890
15896 15891 curr_pwwn = prop_array[idx];
15897 15892 while (*curr_pwwn == ' ') {
15898 15893 curr_pwwn++;
15899 15894 }
15900 15895 if (strlen(curr_pwwn) <= (sizeof (la_wwn_t) * 2 + 1)) {
15901 15896 fcp_log(CE_WARN, NULL, "Invalid WWN %s in the blacklist"
15902 15897 ", please check.", curr_pwwn);
15903 15898 continue;
15904 15899 }
15905 15900 if ((*(curr_pwwn + sizeof (la_wwn_t) * 2) != ' ') &&
15906 15901 (*(curr_pwwn + sizeof (la_wwn_t) * 2) != ',')) {
15907 15902 fcp_log(CE_WARN, NULL, "Invalid WWN %s in the blacklist"
15908 15903 ", please check.", curr_pwwn);
15909 15904 continue;
15910 15905 }
15911 15906 for (len = 0; len < sizeof (la_wwn_t) * 2; len++) {
15912 15907 if (isxdigit(curr_pwwn[len]) != TRUE) {
15913 15908 fcp_log(CE_WARN, NULL, "Invalid WWN %s in the "
15914 15909 "blacklist, please check.", curr_pwwn);
15915 15910 break;
15916 15911 }
15917 15912 }
15918 15913 if (len != sizeof (la_wwn_t) * 2) {
15919 15914 continue;
15920 15915 }
15921 15916
15922 15917 curr_lun = curr_pwwn + sizeof (la_wwn_t) * 2 + 1;
15923 15918 *(curr_lun - 1) = '\0';
15924 15919 fcp_mask_pwwn_lun(curr_pwwn, curr_lun, pplun_blacklist);
15925 15920 }
15926 15921
15927 15922 ddi_prop_free(prop_array);
15928 15923 }
15929 15924
15930 15925 /*
15931 15926 * Get the masking info about one remote target port designated by wwn.
15932 15927 * Lun ids could be separated by ',' or white spaces.
15933 15928 */
15934 15929 static void
15935 15930 fcp_mask_pwwn_lun(char *curr_pwwn, char *curr_lun,
15936 15931 struct fcp_black_list_entry **pplun_blacklist)
15937 15932 {
15938 15933 int idx = 0;
15939 15934 uint32_t offset = 0;
15940 15935 unsigned long lun_id = 0;
15941 15936 char lunid_buf[16];
15942 15937 char *pend = NULL;
15943 15938 int illegal_digit = 0;
15944 15939
15945 15940 while (offset < strlen(curr_lun)) {
15946 15941 while ((curr_lun[offset + idx] != ',') &&
15947 15942 (curr_lun[offset + idx] != '\0') &&
15948 15943 (curr_lun[offset + idx] != ' ')) {
15949 15944 if (isdigit(curr_lun[offset + idx]) == 0) {
15950 15945 illegal_digit++;
15951 15946 }
15952 15947 idx++;
15953 15948 }
15954 15949 if (illegal_digit > 0) {
15955 15950 offset += (idx+1); /* To the start of next lun */
15956 15951 idx = 0;
15957 15952 illegal_digit = 0;
15958 15953 fcp_log(CE_WARN, NULL, "Invalid LUN %s for WWN %s in "
15959 15954 "the blacklist, please check digits.",
15960 15955 curr_lun, curr_pwwn);
15961 15956 continue;
15962 15957 }
15963 15958 if (idx >= (sizeof (lunid_buf) / sizeof (lunid_buf[0]))) {
15964 15959 fcp_log(CE_WARN, NULL, "Invalid LUN %s for WWN %s in "
15965 15960 "the blacklist, please check the length of LUN#.",
15966 15961 curr_lun, curr_pwwn);
15967 15962 break;
15968 15963 }
15969 15964 if (idx == 0) { /* ignore ' ' or ',' or '\0' */
15970 15965 offset++;
15971 15966 continue;
15972 15967 }
15973 15968
15974 15969 bcopy(curr_lun + offset, lunid_buf, idx);
15975 15970 lunid_buf[idx] = '\0';
15976 15971 if (ddi_strtoul(lunid_buf, &pend, 10, &lun_id) == 0) {
15977 15972 fcp_add_one_mask(curr_pwwn, lun_id, pplun_blacklist);
15978 15973 } else {
15979 15974 fcp_log(CE_WARN, NULL, "Invalid LUN %s for WWN %s in "
15980 15975 "the blacklist, please check %s.",
15981 15976 curr_lun, curr_pwwn, lunid_buf);
15982 15977 }
15983 15978 offset += (idx+1); /* To the start of next lun */
15984 15979 idx = 0;
15985 15980 }
15986 15981 }
15987 15982
15988 15983 /*
15989 15984 * Add one masking record
15990 15985 */
15991 15986 static void
15992 15987 fcp_add_one_mask(char *curr_pwwn, uint32_t lun_id,
15993 15988 struct fcp_black_list_entry **pplun_blacklist)
15994 15989 {
15995 15990 struct fcp_black_list_entry *tmp_entry = *pplun_blacklist;
15996 15991 struct fcp_black_list_entry *new_entry = NULL;
15997 15992 la_wwn_t wwn;
15998 15993
15999 15994 fcp_ascii_to_wwn(curr_pwwn, wwn.raw_wwn, sizeof (la_wwn_t));
16000 15995 while (tmp_entry) {
16001 15996 if ((bcmp(&tmp_entry->wwn, &wwn,
16002 15997 sizeof (la_wwn_t)) == 0) && (tmp_entry->lun == lun_id)) {
16003 15998 return;
16004 15999 }
16005 16000
16006 16001 tmp_entry = tmp_entry->next;
16007 16002 }
16008 16003
16009 16004 /* add to black list */
16010 16005 new_entry = (struct fcp_black_list_entry *)kmem_zalloc
16011 16006 (sizeof (struct fcp_black_list_entry), KM_SLEEP);
16012 16007 bcopy(&wwn, &new_entry->wwn, sizeof (la_wwn_t));
16013 16008 new_entry->lun = lun_id;
16014 16009 new_entry->masked = 0;
16015 16010 new_entry->next = *pplun_blacklist;
16016 16011 *pplun_blacklist = new_entry;
16017 16012 }
16018 16013
16019 16014 /*
16020 16015 * Check if we should mask the specified lun of this fcp_tgt
16021 16016 */
16022 16017 static int
16023 16018 fcp_should_mask(la_wwn_t *wwn, uint32_t lun_id)
16024 16019 {
16025 16020 struct fcp_black_list_entry *remote_port;
16026 16021
16027 16022 remote_port = fcp_lun_blacklist;
16028 16023 while (remote_port != NULL) {
16029 16024 if (bcmp(wwn, &remote_port->wwn, sizeof (la_wwn_t)) == 0) {
16030 16025 if (remote_port->lun == lun_id) {
16031 16026 remote_port->masked++;
16032 16027 if (remote_port->masked == 1) {
16033 16028 fcp_log(CE_NOTE, NULL, "LUN %d of port "
16034 16029 "%02x%02x%02x%02x%02x%02x%02x%02x "
16035 16030 "is masked due to black listing.\n",
16036 16031 lun_id, wwn->raw_wwn[0],
16037 16032 wwn->raw_wwn[1], wwn->raw_wwn[2],
16038 16033 wwn->raw_wwn[3], wwn->raw_wwn[4],
16039 16034 wwn->raw_wwn[5], wwn->raw_wwn[6],
16040 16035 wwn->raw_wwn[7]);
16041 16036 }
16042 16037 return (TRUE);
16043 16038 }
16044 16039 }
16045 16040 remote_port = remote_port->next;
16046 16041 }
16047 16042 return (FALSE);
16048 16043 }
16049 16044
16050 16045 /*
16051 16046 * Release all allocated resources
16052 16047 */
16053 16048 static void
16054 16049 fcp_cleanup_blacklist(struct fcp_black_list_entry **pplun_blacklist)
16055 16050 {
16056 16051 struct fcp_black_list_entry *tmp_entry = *pplun_blacklist;
16057 16052 struct fcp_black_list_entry *current_entry = NULL;
16058 16053
16059 16054 ASSERT(mutex_owned(&fcp_global_mutex));
16060 16055 /*
16061 16056 * Traverse all luns
16062 16057 */
16063 16058 while (tmp_entry) {
16064 16059 current_entry = tmp_entry;
16065 16060 tmp_entry = tmp_entry->next;
16066 16061 kmem_free(current_entry, sizeof (struct fcp_black_list_entry));
16067 16062 }
16068 16063 *pplun_blacklist = NULL;
16069 16064 }
16070 16065
16071 16066 /*
16072 16067 * In fcp module,
16073 16068 * pkt@scsi_pkt, cmd@fcp_pkt, icmd@fcp_ipkt, fpkt@fc_packet, pptr@fcp_port
16074 16069 */
16075 16070 static struct scsi_pkt *
16076 16071 fcp_pseudo_init_pkt(struct scsi_address *ap, struct scsi_pkt *pkt,
16077 16072 struct buf *bp, int cmdlen, int statuslen, int tgtlen,
16078 16073 int flags, int (*callback)(), caddr_t arg)
16079 16074 {
16080 16075 fcp_port_t *pptr = ADDR2FCP(ap);
16081 16076 fcp_pkt_t *cmd = NULL;
16082 16077 fc_frame_hdr_t *hp;
16083 16078
16084 16079 /*
16085 16080 * First step: get the packet
16086 16081 */
16087 16082 if (pkt == NULL) {
16088 16083 pkt = scsi_hba_pkt_alloc(pptr->port_dip, ap, cmdlen, statuslen,
16089 16084 tgtlen, sizeof (fcp_pkt_t) + pptr->port_priv_pkt_len,
16090 16085 callback, arg);
16091 16086 if (pkt == NULL) {
16092 16087 return (NULL);
16093 16088 }
16094 16089
16095 16090 /*
16096 16091 * All fields in scsi_pkt will be initialized properly or
16097 16092 * set to zero. We need do nothing for scsi_pkt.
16098 16093 */
16099 16094 /*
16100 16095 * But it's our responsibility to link other related data
16101 16096 * structures. Their initialization will be done, just
16102 16097 * before the scsi_pkt will be sent to FCA.
16103 16098 */
16104 16099 cmd = PKT2CMD(pkt);
16105 16100 cmd->cmd_pkt = pkt;
16106 16101 cmd->cmd_fp_pkt = &cmd->cmd_fc_packet;
16107 16102 /*
16108 16103 * fc_packet_t
16109 16104 */
16110 16105 cmd->cmd_fp_pkt->pkt_ulp_private = (opaque_t)cmd;
16111 16106 cmd->cmd_fp_pkt->pkt_fca_private = (opaque_t)((caddr_t)cmd +
16112 16107 sizeof (struct fcp_pkt));
16113 16108 cmd->cmd_fp_pkt->pkt_cmd = (caddr_t)&cmd->cmd_fcp_cmd;
16114 16109 cmd->cmd_fp_pkt->pkt_cmdlen = sizeof (struct fcp_cmd);
16115 16110 cmd->cmd_fp_pkt->pkt_resp = cmd->cmd_fcp_rsp;
16116 16111 cmd->cmd_fp_pkt->pkt_rsplen = FCP_MAX_RSP_IU_SIZE;
16117 16112 /*
16118 16113 * Fill in the Fabric Channel Header
16119 16114 */
16120 16115 hp = &cmd->cmd_fp_pkt->pkt_cmd_fhdr;
16121 16116 hp->r_ctl = R_CTL_COMMAND;
16122 16117 hp->rsvd = 0;
16123 16118 hp->type = FC_TYPE_SCSI_FCP;
16124 16119 hp->f_ctl = F_CTL_SEQ_INITIATIVE | F_CTL_FIRST_SEQ;
16125 16120 hp->seq_id = 0;
16126 16121 hp->df_ctl = 0;
16127 16122 hp->seq_cnt = 0;
16128 16123 hp->ox_id = 0xffff;
16129 16124 hp->rx_id = 0xffff;
16130 16125 hp->ro = 0;
16131 16126 } else {
16132 16127 /*
16133 16128 * We need think if we should reset any elements in
16134 16129 * related data structures.
16135 16130 */
16136 16131 FCP_TRACE(fcp_logq, pptr->port_instbuf,
16137 16132 fcp_trace, FCP_BUF_LEVEL_6, 0,
16138 16133 "reusing pkt, flags %d", flags);
16139 16134 cmd = PKT2CMD(pkt);
16140 16135 if (cmd->cmd_fp_pkt->pkt_pd) {
16141 16136 cmd->cmd_fp_pkt->pkt_pd = NULL;
16142 16137 }
16143 16138 }
16144 16139
16145 16140 /*
16146 16141 * Second step: dma allocation/move
16147 16142 */
16148 16143 if (bp && bp->b_bcount != 0) {
16149 16144 /*
16150 16145 * Mark if it's read or write
16151 16146 */
16152 16147 if (bp->b_flags & B_READ) {
16153 16148 cmd->cmd_flags |= CFLAG_IS_READ;
16154 16149 } else {
16155 16150 cmd->cmd_flags &= ~CFLAG_IS_READ;
16156 16151 }
16157 16152
16158 16153 bp_mapin(bp);
16159 16154 cmd->cmd_fp_pkt->pkt_data = bp->b_un.b_addr;
16160 16155 cmd->cmd_fp_pkt->pkt_datalen = bp->b_bcount;
16161 16156 cmd->cmd_fp_pkt->pkt_data_resid = 0;
16162 16157 } else {
16163 16158 /*
16164 16159 * It seldom happens, except when CLUSTER or SCSI_VHCI wants
16165 16160 * to send zero-length read/write.
16166 16161 */
16167 16162 cmd->cmd_fp_pkt->pkt_data = NULL;
16168 16163 cmd->cmd_fp_pkt->pkt_datalen = 0;
16169 16164 }
16170 16165
16171 16166 return (pkt);
16172 16167 }
16173 16168
16174 16169 static void
16175 16170 fcp_pseudo_destroy_pkt(struct scsi_address *ap, struct scsi_pkt *pkt)
16176 16171 {
16177 16172 fcp_port_t *pptr = ADDR2FCP(ap);
16178 16173
16179 16174 /*
16180 16175 * First we let FCA to uninitilize private part.
16181 16176 */
16182 16177 (void) fc_ulp_uninit_packet(pptr->port_fp_handle,
16183 16178 PKT2CMD(pkt)->cmd_fp_pkt);
16184 16179
16185 16180 /*
16186 16181 * Then we uninitialize fc_packet.
16187 16182 */
16188 16183
16189 16184 /*
16190 16185 * Thirdly, we uninitializae fcp_pkt.
16191 16186 */
16192 16187
16193 16188 /*
16194 16189 * In the end, we free scsi_pkt.
16195 16190 */
16196 16191 scsi_hba_pkt_free(ap, pkt);
16197 16192 }
16198 16193
16199 16194 static int
16200 16195 fcp_pseudo_start(struct scsi_address *ap, struct scsi_pkt *pkt)
16201 16196 {
16202 16197 fcp_port_t *pptr = ADDR2FCP(ap);
16203 16198 fcp_lun_t *plun = ADDR2LUN(ap);
16204 16199 fcp_tgt_t *ptgt = plun->lun_tgt;
16205 16200 fcp_pkt_t *cmd = PKT2CMD(pkt);
16206 16201 fcp_cmd_t *fcmd = &cmd->cmd_fcp_cmd;
16207 16202 fc_packet_t *fpkt = cmd->cmd_fp_pkt;
16208 16203 int rval;
16209 16204
16210 16205 fpkt->pkt_pd = ptgt->tgt_pd_handle;
16211 16206 (void) fc_ulp_init_packet(pptr->port_fp_handle, cmd->cmd_fp_pkt, 1);
16212 16207
16213 16208 /*
16214 16209 * Firstly, we need initialize fcp_pkt_t
16215 16210 * Secondly, we need initialize fcp_cmd_t.
16216 16211 */
16217 16212 bcopy(pkt->pkt_cdbp, fcmd->fcp_cdb, pkt->pkt_cdblen);
16218 16213 fcmd->fcp_data_len = fpkt->pkt_datalen;
16219 16214 fcmd->fcp_ent_addr = plun->lun_addr;
16220 16215 if (pkt->pkt_flags & FLAG_HTAG) {
16221 16216 fcmd->fcp_cntl.cntl_qtype = FCP_QTYPE_HEAD_OF_Q;
16222 16217 } else if (pkt->pkt_flags & FLAG_OTAG) {
16223 16218 fcmd->fcp_cntl.cntl_qtype = FCP_QTYPE_ORDERED;
16224 16219 } else if (pkt->pkt_flags & FLAG_STAG) {
16225 16220 fcmd->fcp_cntl.cntl_qtype = FCP_QTYPE_SIMPLE;
16226 16221 } else {
16227 16222 fcmd->fcp_cntl.cntl_qtype = FCP_QTYPE_UNTAGGED;
16228 16223 }
16229 16224
16230 16225 if (cmd->cmd_flags & CFLAG_IS_READ) {
16231 16226 fcmd->fcp_cntl.cntl_read_data = 1;
16232 16227 fcmd->fcp_cntl.cntl_write_data = 0;
16233 16228 } else {
16234 16229 fcmd->fcp_cntl.cntl_read_data = 0;
16235 16230 fcmd->fcp_cntl.cntl_write_data = 1;
16236 16231 }
16237 16232
16238 16233 /*
16239 16234 * Then we need initialize fc_packet_t too.
16240 16235 */
16241 16236 fpkt->pkt_timeout = pkt->pkt_time + 2;
16242 16237 fpkt->pkt_cmd_fhdr.d_id = ptgt->tgt_d_id;
16243 16238 fpkt->pkt_cmd_fhdr.s_id = pptr->port_id;
16244 16239 if (cmd->cmd_flags & CFLAG_IS_READ) {
16245 16240 fpkt->pkt_tran_type = FC_PKT_FCP_READ;
16246 16241 } else {
16247 16242 fpkt->pkt_tran_type = FC_PKT_FCP_WRITE;
16248 16243 }
16249 16244
16250 16245 if (pkt->pkt_flags & FLAG_NOINTR) {
16251 16246 fpkt->pkt_comp = NULL;
16252 16247 fpkt->pkt_tran_flags = (FC_TRAN_CLASS3 | FC_TRAN_NO_INTR);
16253 16248 } else {
16254 16249 fpkt->pkt_comp = fcp_cmd_callback;
16255 16250 fpkt->pkt_tran_flags = (FC_TRAN_CLASS3 | FC_TRAN_INTR);
16256 16251 if (pkt->pkt_flags & FLAG_IMMEDIATE_CB) {
16257 16252 fpkt->pkt_tran_flags |= FC_TRAN_IMMEDIATE_CB;
16258 16253 }
16259 16254 }
16260 16255
16261 16256 /*
16262 16257 * Lastly, we need initialize scsi_pkt
16263 16258 */
16264 16259 pkt->pkt_reason = CMD_CMPLT;
16265 16260 pkt->pkt_state = 0;
16266 16261 pkt->pkt_statistics = 0;
16267 16262 pkt->pkt_resid = 0;
16268 16263
16269 16264 /*
16270 16265 * if interrupts aren't allowed (e.g. at dump time) then we'll
16271 16266 * have to do polled I/O
16272 16267 */
16273 16268 if (pkt->pkt_flags & FLAG_NOINTR) {
16274 16269 return (fcp_dopoll(pptr, cmd));
16275 16270 }
16276 16271
16277 16272 cmd->cmd_state = FCP_PKT_ISSUED;
16278 16273 rval = fcp_transport(pptr->port_fp_handle, fpkt, 0);
16279 16274 if (rval == FC_SUCCESS) {
16280 16275 return (TRAN_ACCEPT);
16281 16276 }
16282 16277
16283 16278 /*
16284 16279 * Need more consideration
16285 16280 *
16286 16281 * pkt->pkt_flags & FLAG_NOQUEUE could abort other pkt
16287 16282 */
16288 16283 cmd->cmd_state = FCP_PKT_IDLE;
16289 16284 if (rval == FC_TRAN_BUSY) {
16290 16285 return (TRAN_BUSY);
16291 16286 } else {
16292 16287 return (TRAN_FATAL_ERROR);
16293 16288 }
16294 16289 }
16295 16290
16296 16291 /*
16297 16292 * scsi_poll will always call tran_sync_pkt for pseudo FC-HBAs
16298 16293 * SCSA will initialize it to scsi_sync_cache_pkt for physical FC-HBAs
16299 16294 */
16300 16295 static void
16301 16296 fcp_pseudo_sync_pkt(struct scsi_address *ap, struct scsi_pkt *pkt)
16302 16297 {
16303 16298 FCP_TRACE(fcp_logq, "fcp_pseudo_sync_pkt", fcp_trace,
16304 16299 FCP_BUF_LEVEL_2, 0, "ap-%p, scsi_pkt-%p", ap, pkt);
16305 16300 }
16306 16301
16307 16302 /*
16308 16303 * scsi_dmafree will always call tran_dmafree, when STATE_ARQ_DONE
16309 16304 */
16310 16305 static void
16311 16306 fcp_pseudo_dmafree(struct scsi_address *ap, struct scsi_pkt *pkt)
16312 16307 {
16313 16308 FCP_TRACE(fcp_logq, "fcp_pseudo_dmafree", fcp_trace,
16314 16309 FCP_BUF_LEVEL_2, 0, "ap-%p, scsi_pkt-%p", ap, pkt);
16315 16310 }
↓ open down ↓ |
5213 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX