1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
24 */
25
26 /*
27 * Copyright 2018 Nexenta Systems, Inc.
28 */
29
30 /*
31 * Fibre Channel SCSI ULP Mapping driver
32 */
33
34 #include <sys/scsi/scsi.h>
35 #include <sys/types.h>
36 #include <sys/varargs.h>
37 #include <sys/devctl.h>
38 #include <sys/thread.h>
39 #include <sys/thread.h>
40 #include <sys/open.h>
41 #include <sys/file.h>
42 #include <sys/sunndi.h>
43 #include <sys/console.h>
44 #include <sys/proc.h>
45 #include <sys/time.h>
46 #include <sys/utsname.h>
47 #include <sys/scsi/impl/scsi_reset_notify.h>
48 #include <sys/ndi_impldefs.h>
49 #include <sys/byteorder.h>
50 #include <sys/ctype.h>
51 #include <sys/sunmdi.h>
52
53 #include <sys/fibre-channel/fc.h>
54 #include <sys/fibre-channel/impl/fc_ulpif.h>
55 #include <sys/fibre-channel/ulp/fcpvar.h>
56
57 /*
58 * Discovery Process
59 * =================
60 *
61 * The discovery process is a major function of FCP. In order to help
62 * understand that function a flow diagram is given here. This diagram
63 * doesn't claim to cover all the cases and the events that can occur during
64 * the discovery process nor the subtleties of the code. The code paths shown
65 * are simplified. Its purpose is to help the reader (and potentially bug
66 * fixer) have an overall view of the logic of the code. For that reason the
67 * diagram covers the simple case of the line coming up cleanly or of a new
68 * port attaching to FCP the link being up. The reader must keep in mind
69 * that:
70 *
71 * - There are special cases where bringing devices online and offline
72 * is driven by Ioctl.
73 *
74 * - The behavior of the discovery process can be modified through the
75 * .conf file.
76 *
77 * - The line can go down and come back up at any time during the
78 * discovery process which explains some of the complexity of the code.
79 *
80 * ............................................................................
81 *
82 * STEP 1: The line comes up or a new Fibre Channel port attaches to FCP.
83 *
84 *
85 * +-------------------------+
86 * fp/fctl module --->| fcp_port_attach |
87 * +-------------------------+
88 * | |
89 * | |
90 * | v
91 * | +-------------------------+
92 * | | fcp_handle_port_attach |
93 * | +-------------------------+
94 * | |
95 * | |
96 * +--------------------+ |
97 * | |
98 * v v
99 * +-------------------------+
100 * | fcp_statec_callback |
101 * +-------------------------+
102 * |
103 * |
104 * v
105 * +-------------------------+
106 * | fcp_handle_devices |
107 * +-------------------------+
108 * |
109 * |
110 * v
111 * +-------------------------+
112 * | fcp_handle_mapflags |
113 * +-------------------------+
114 * |
115 * |
116 * v
117 * +-------------------------+
118 * | fcp_send_els |
119 * | |
120 * | PLOGI or PRLI To all the|
121 * | reachable devices. |
122 * +-------------------------+
123 *
124 *
125 * ............................................................................
126 *
127 * STEP 2: The callback functions of the PLOGI and/or PRLI requests sent during
128 * STEP 1 are called (it is actually the same function).
129 *
130 *
131 * +-------------------------+
132 * | fcp_icmd_callback |
133 * fp/fctl module --->| |
134 * | callback for PLOGI and |
135 * | PRLI. |
136 * +-------------------------+
137 * |
138 * |
139 * Received PLOGI Accept /-\ Received PRLI Accept
140 * _ _ _ _ _ _ / \_ _ _ _ _ _
141 * | \ / |
142 * | \-/ |
143 * | |
144 * v v
145 * +-------------------------+ +-------------------------+
146 * | fcp_send_els | | fcp_send_scsi |
147 * | | | |
148 * | PRLI | | REPORT_LUN |
149 * +-------------------------+ +-------------------------+
150 *
151 * ............................................................................
152 *
153 * STEP 3: The callback functions of the SCSI commands issued by FCP are called
154 * (It is actually the same function).
155 *
156 *
157 * +-------------------------+
158 * fp/fctl module ------->| fcp_scsi_callback |
159 * +-------------------------+
160 * |
161 * |
162 * |
163 * Receive REPORT_LUN reply /-\ Receive INQUIRY PAGE83 reply
164 * _ _ _ _ _ _ _ _ _ _ / \_ _ _ _ _ _ _ _ _ _ _ _
165 * | \ / |
166 * | \-/ |
167 * | | |
168 * | Receive INQUIRY reply| |
169 * | | |
170 * v v v
171 * +------------------------+ +----------------------+ +----------------------+
172 * | fcp_handle_reportlun | | fcp_handle_inquiry | | fcp_handle_page83 |
173 * |(Called for each Target)| | (Called for each LUN)| |(Called for each LUN) |
174 * +------------------------+ +----------------------+ +----------------------+
175 * | | |
176 * | | |
177 * | | |
178 * v v |
179 * +-----------------+ +-----------------+ |
180 * | fcp_send_scsi | | fcp_send_scsi | |
181 * | | | | |
182 * | INQUIRY | | INQUIRY PAGE83 | |
183 * | (To each LUN) | +-----------------+ |
184 * +-----------------+ |
185 * |
186 * v
187 * +------------------------+
188 * | fcp_call_finish_init |
189 * +------------------------+
190 * |
191 * v
192 * +-----------------------------+
193 * | fcp_call_finish_init_held |
194 * +-----------------------------+
195 * |
196 * |
197 * All LUNs scanned /-\
198 * _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ __ / \
199 * | \ /
200 * | \-/
201 * v |
202 * +------------------+ |
203 * | fcp_finish_tgt | |
204 * +------------------+ |
205 * | Target Not Offline and |
206 * Target Not Offline and | not marked and tgt_node_state |
207 * marked /-\ not FCP_TGT_NODE_ON_DEMAND |
208 * _ _ _ _ _ _ / \_ _ _ _ _ _ _ _ |
209 * | \ / | |
210 * | \-/ | |
211 * v v |
212 * +----------------------------+ +-------------------+ |
213 * | fcp_offline_target | | fcp_create_luns | |
214 * | | +-------------------+ |
215 * | A structure fcp_tgt_elem | | |
216 * | is created and queued in | v |
217 * | the FCP port list | +-------------------+ |
218 * | port_offline_tgts. It | | fcp_pass_to_hp | |
219 * | will be unqueued by the | | | |
220 * | watchdog timer. | | Called for each | |
221 * +----------------------------+ | LUN. Dispatches | |
222 * | | fcp_hp_task | |
223 * | +-------------------+ |
224 * | | |
225 * | | |
226 * | | |
227 * | +---------------->|
228 * | |
229 * +---------------------------------------------->|
230 * |
231 * |
232 * All the targets (devices) have been scanned /-\
233 * _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ / \
234 * | \ /
235 * | \-/
236 * +-------------------------------------+ |
237 * | fcp_finish_init | |
238 * | | |
239 * | Signal broadcasts the condition | |
240 * | variable port_config_cv of the FCP | |
241 * | port. One potential code sequence | |
242 * | waiting on the condition variable | |
243 * | the code sequence handling | |
244 * | BUS_CONFIG_ALL and BUS_CONFIG_DRIVER| |
245 * | The other is in the function | |
246 * | fcp_reconfig_wait which is called | |
247 * | in the transmit path preventing IOs | |
248 * | from going through till the disco- | |
249 * | very process is over. | |
250 * +-------------------------------------+ |
251 * | |
252 * | |
253 * +--------------------------------->|
254 * |
255 * v
256 * Return
257 *
258 * ............................................................................
259 *
260 * STEP 4: The hot plug task is called (for each fcp_hp_elem).
261 *
262 *
263 * +-------------------------+
264 * | fcp_hp_task |
265 * +-------------------------+
266 * |
267 * |
268 * v
269 * +-------------------------+
270 * | fcp_trigger_lun |
271 * +-------------------------+
272 * |
273 * |
274 * v
275 * Bring offline /-\ Bring online
276 * _ _ _ _ _ _ _ _ _/ \_ _ _ _ _ _ _ _ _ _
277 * | \ / |
278 * | \-/ |
279 * v v
280 * +---------------------+ +-----------------------+
281 * | fcp_offline_child | | fcp_get_cip |
282 * +---------------------+ | |
283 * | Creates a dev_info_t |
284 * | or a mdi_pathinfo_t |
285 * | depending on whether |
286 * | mpxio is on or off. |
287 * +-----------------------+
288 * |
289 * |
290 * v
291 * +-----------------------+
292 * | fcp_online_child |
293 * | |
294 * | Set device online |
295 * | using NDI or MDI. |
296 * +-----------------------+
297 *
298 * ............................................................................
299 *
300 * STEP 5: The watchdog timer expires. The watch dog timer does much more that
301 * what is described here. We only show the target offline path.
302 *
303 *
304 * +--------------------------+
305 * | fcp_watch |
306 * +--------------------------+
307 * |
308 * |
309 * v
310 * +--------------------------+
311 * | fcp_scan_offline_tgts |
312 * +--------------------------+
313 * |
314 * |
315 * v
316 * +--------------------------+
317 * | fcp_offline_target_now |
318 * +--------------------------+
319 * |
320 * |
321 * v
322 * +--------------------------+
323 * | fcp_offline_tgt_luns |
324 * +--------------------------+
325 * |
326 * |
327 * v
328 * +--------------------------+
329 * | fcp_offline_lun |
330 * +--------------------------+
331 * |
332 * |
333 * v
334 * +----------------------------------+
335 * | fcp_offline_lun_now |
336 * | |
337 * | A request (or two if mpxio) is |
338 * | sent to the hot plug task using |
339 * | a fcp_hp_elem structure. |
340 * +----------------------------------+
341 */
342
343 /*
344 * Functions registered with DDI framework
345 */
346 static int fcp_attach(dev_info_t *devi, ddi_attach_cmd_t cmd);
347 static int fcp_detach(dev_info_t *devi, ddi_detach_cmd_t cmd);
348 static int fcp_open(dev_t *devp, int flag, int otype, cred_t *credp);
349 static int fcp_close(dev_t dev, int flag, int otype, cred_t *credp);
350 static int fcp_ioctl(dev_t dev, int cmd, intptr_t data, int mode,
351 cred_t *credp, int *rval);
352
353 /*
354 * Functions registered with FC Transport framework
355 */
356 static int fcp_port_attach(opaque_t ulph, fc_ulp_port_info_t *pinfo,
357 fc_attach_cmd_t cmd, uint32_t s_id);
358 static int fcp_port_detach(opaque_t ulph, fc_ulp_port_info_t *info,
359 fc_detach_cmd_t cmd);
360 static int fcp_port_ioctl(opaque_t ulph, opaque_t port_handle, dev_t dev,
361 int cmd, intptr_t data, int mode, cred_t *credp, int *rval,
362 uint32_t claimed);
363 static int fcp_els_callback(opaque_t ulph, opaque_t port_handle,
364 fc_unsol_buf_t *buf, uint32_t claimed);
365 static int fcp_data_callback(opaque_t ulph, opaque_t port_handle,
366 fc_unsol_buf_t *buf, uint32_t claimed);
367 static void fcp_statec_callback(opaque_t ulph, opaque_t port_handle,
368 uint32_t port_state, uint32_t port_top, fc_portmap_t *devlist,
369 uint32_t dev_cnt, uint32_t port_sid);
370
371 /*
372 * Functions registered with SCSA framework
373 */
374 static int fcp_phys_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
375 scsi_hba_tran_t *hba_tran, struct scsi_device *sd);
376 static int fcp_scsi_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
377 scsi_hba_tran_t *hba_tran, struct scsi_device *sd);
378 static void fcp_scsi_tgt_free(dev_info_t *hba_dip, dev_info_t *tgt_dip,
379 scsi_hba_tran_t *hba_tran, struct scsi_device *sd);
380 static int fcp_scsi_start(struct scsi_address *ap, struct scsi_pkt *pkt);
381 static int fcp_scsi_abort(struct scsi_address *ap, struct scsi_pkt *pkt);
382 static int fcp_scsi_reset(struct scsi_address *ap, int level);
383 static int fcp_scsi_getcap(struct scsi_address *ap, char *cap, int whom);
384 static int fcp_scsi_setcap(struct scsi_address *ap, char *cap, int value,
385 int whom);
386 static void fcp_pkt_teardown(struct scsi_pkt *pkt);
387 static int fcp_scsi_reset_notify(struct scsi_address *ap, int flag,
388 void (*callback)(caddr_t), caddr_t arg);
389 static int fcp_scsi_bus_get_eventcookie(dev_info_t *dip, dev_info_t *rdip,
390 char *name, ddi_eventcookie_t *event_cookiep);
391 static int fcp_scsi_bus_add_eventcall(dev_info_t *dip, dev_info_t *rdip,
392 ddi_eventcookie_t eventid, void (*callback)(), void *arg,
393 ddi_callback_id_t *cb_id);
394 static int fcp_scsi_bus_remove_eventcall(dev_info_t *devi,
395 ddi_callback_id_t cb_id);
396 static int fcp_scsi_bus_post_event(dev_info_t *dip, dev_info_t *rdip,
397 ddi_eventcookie_t eventid, void *impldata);
398 static int fcp_scsi_bus_config(dev_info_t *parent, uint_t flag,
399 ddi_bus_config_op_t op, void *arg, dev_info_t **childp);
400 static int fcp_scsi_bus_unconfig(dev_info_t *parent, uint_t flag,
401 ddi_bus_config_op_t op, void *arg);
402
403 /*
404 * Internal functions
405 */
406 static int fcp_setup_device_data_ioctl(int cmd, struct fcp_ioctl *data,
407 int mode, int *rval);
408
409 static int fcp_setup_scsi_ioctl(struct fcp_scsi_cmd *u_fscsi,
410 int mode, int *rval);
411 static int fcp_copyin_scsi_cmd(caddr_t base_addr,
412 struct fcp_scsi_cmd *fscsi, int mode);
413 static int fcp_copyout_scsi_cmd(struct fcp_scsi_cmd *fscsi,
414 caddr_t base_addr, int mode);
415 static int fcp_send_scsi_ioctl(struct fcp_scsi_cmd *fscsi);
416
417 static struct fcp_tgt *fcp_port_create_tgt(struct fcp_port *pptr,
418 la_wwn_t *pwwn, int *ret_val, int *fc_status, int *fc_pkt_state,
419 int *fc_pkt_reason, int *fc_pkt_action);
420 static int fcp_tgt_send_plogi(struct fcp_tgt *ptgt, int *fc_status,
421 int *fc_pkt_state, int *fc_pkt_reason, int *fc_pkt_action);
422 static int fcp_tgt_send_prli(struct fcp_tgt *ptgt, int *fc_status,
423 int *fc_pkt_state, int *fc_pkt_reason, int *fc_pkt_action);
424 static void fcp_ipkt_sema_init(struct fcp_ipkt *icmd);
425 static int fcp_ipkt_sema_wait(struct fcp_ipkt *icmd);
426 static void fcp_ipkt_sema_callback(struct fc_packet *fpkt);
427 static void fcp_ipkt_sema_cleanup(struct fcp_ipkt *icmd);
428
429 static void fcp_handle_devices(struct fcp_port *pptr,
430 fc_portmap_t devlist[], uint32_t dev_cnt, int link_cnt,
431 fcp_map_tag_t *map_tag, int cause);
432 static int fcp_handle_mapflags(struct fcp_port *pptr,
433 struct fcp_tgt *ptgt, fc_portmap_t *map_entry, int link_cnt,
434 int tgt_cnt, int cause);
435 static int fcp_handle_reportlun_changed(struct fcp_tgt *ptgt, int cause);
436 static int fcp_send_els(struct fcp_port *pptr, struct fcp_tgt *ptgt,
437 struct fcp_ipkt *icmd, uchar_t opcode, int lcount, int tcount, int cause);
438 static void fcp_update_state(struct fcp_port *pptr, uint32_t state,
439 int cause);
440 static void fcp_update_tgt_state(struct fcp_tgt *ptgt, int flag,
441 uint32_t state);
442 static struct fcp_port *fcp_get_port(opaque_t port_handle);
443 static void fcp_unsol_callback(fc_packet_t *fpkt);
444 static void fcp_unsol_resp_init(fc_packet_t *pkt, fc_unsol_buf_t *buf,
445 uchar_t r_ctl, uchar_t type);
446 static int fcp_unsol_prli(struct fcp_port *pptr, fc_unsol_buf_t *buf);
447 static struct fcp_ipkt *fcp_icmd_alloc(struct fcp_port *pptr,
448 struct fcp_tgt *ptgt, int cmd_len, int resp_len, int data_len,
449 int nodma, int lcount, int tcount, int cause, uint32_t rscn_count);
450 static void fcp_icmd_free(struct fcp_port *pptr, struct fcp_ipkt *icmd);
451 static int fcp_alloc_dma(struct fcp_port *pptr, struct fcp_ipkt *icmd,
452 int nodma, int flags);
453 static void fcp_free_dma(struct fcp_port *pptr, struct fcp_ipkt *icmd);
454 static struct fcp_tgt *fcp_lookup_target(struct fcp_port *pptr,
455 uchar_t *wwn);
456 static struct fcp_tgt *fcp_get_target_by_did(struct fcp_port *pptr,
457 uint32_t d_id);
458 static void fcp_icmd_callback(fc_packet_t *fpkt);
459 static int fcp_send_scsi(struct fcp_lun *plun, uchar_t opcode,
460 int len, int lcount, int tcount, int cause, uint32_t rscn_count);
461 static int fcp_check_reportlun(struct fcp_rsp *rsp, fc_packet_t *fpkt);
462 static void fcp_scsi_callback(fc_packet_t *fpkt);
463 static void fcp_retry_scsi_cmd(fc_packet_t *fpkt);
464 static void fcp_handle_inquiry(fc_packet_t *fpkt, struct fcp_ipkt *icmd);
465 static void fcp_handle_reportlun(fc_packet_t *fpkt, struct fcp_ipkt *icmd);
466 static struct fcp_lun *fcp_get_lun(struct fcp_tgt *ptgt,
467 uint16_t lun_num);
468 static int fcp_finish_tgt(struct fcp_port *pptr, struct fcp_tgt *ptgt,
469 int link_cnt, int tgt_cnt, int cause);
470 static void fcp_finish_init(struct fcp_port *pptr);
471 static void fcp_create_luns(struct fcp_tgt *ptgt, int link_cnt,
472 int tgt_cnt, int cause);
473 static int fcp_trigger_lun(struct fcp_lun *plun, child_info_t *cip,
474 int old_mpxio, int online, int link_cnt, int tgt_cnt, int flags);
475 static int fcp_offline_target(struct fcp_port *pptr, struct fcp_tgt *ptgt,
476 int link_cnt, int tgt_cnt, int nowait, int flags);
477 static void fcp_offline_target_now(struct fcp_port *pptr,
478 struct fcp_tgt *ptgt, int link_cnt, int tgt_cnt, int flags);
479 static void fcp_offline_tgt_luns(struct fcp_tgt *ptgt, int link_cnt,
480 int tgt_cnt, int flags);
481 static void fcp_offline_lun(struct fcp_lun *plun, int link_cnt, int tgt_cnt,
482 int nowait, int flags);
483 static void fcp_prepare_offline_lun(struct fcp_lun *plun, int link_cnt,
484 int tgt_cnt);
485 static void fcp_offline_lun_now(struct fcp_lun *plun, int link_cnt,
486 int tgt_cnt, int flags);
487 static void fcp_scan_offline_luns(struct fcp_port *pptr);
488 static void fcp_scan_offline_tgts(struct fcp_port *pptr);
489 static void fcp_update_offline_flags(struct fcp_lun *plun);
490 static struct fcp_pkt *fcp_scan_commands(struct fcp_lun *plun);
491 static void fcp_abort_commands(struct fcp_pkt *head, struct
492 fcp_port *pptr);
493 static void fcp_cmd_callback(fc_packet_t *fpkt);
494 static void fcp_complete_pkt(fc_packet_t *fpkt);
495 static int fcp_validate_fcp_response(struct fcp_rsp *rsp,
496 struct fcp_port *pptr);
497 static int fcp_device_changed(struct fcp_port *pptr, struct fcp_tgt *ptgt,
498 fc_portmap_t *map_entry, int link_cnt, int tgt_cnt, int cause);
499 static struct fcp_lun *fcp_alloc_lun(struct fcp_tgt *ptgt);
500 static void fcp_dealloc_lun(struct fcp_lun *plun);
501 static struct fcp_tgt *fcp_alloc_tgt(struct fcp_port *pptr,
502 fc_portmap_t *map_entry, int link_cnt);
503 static void fcp_dealloc_tgt(struct fcp_tgt *ptgt);
504 static void fcp_queue_ipkt(struct fcp_port *pptr, fc_packet_t *fpkt);
505 static int fcp_transport(opaque_t port_handle, fc_packet_t *fpkt,
506 int internal);
507 static void fcp_log(int level, dev_info_t *dip, const char *fmt, ...);
508 static int fcp_handle_port_attach(opaque_t ulph, fc_ulp_port_info_t *pinfo,
509 uint32_t s_id, int instance);
510 static int fcp_handle_port_detach(struct fcp_port *pptr, int flag,
511 int instance);
512 static void fcp_cleanup_port(struct fcp_port *pptr, int instance);
513 static int fcp_kmem_cache_constructor(struct scsi_pkt *, scsi_hba_tran_t *,
514 int);
515 static void fcp_kmem_cache_destructor(struct scsi_pkt *, scsi_hba_tran_t *);
516 static int fcp_pkt_setup(struct scsi_pkt *, int (*)(), caddr_t);
517 static int fcp_alloc_cmd_resp(struct fcp_port *pptr, fc_packet_t *fpkt,
518 int flags);
519 static void fcp_free_cmd_resp(struct fcp_port *pptr, fc_packet_t *fpkt);
520 static int fcp_reset_target(struct scsi_address *ap, int level);
521 static int fcp_commoncap(struct scsi_address *ap, char *cap,
522 int val, int tgtonly, int doset);
523 static int fcp_scsi_get_name(struct scsi_device *sd, char *name, int len);
524 static int fcp_scsi_get_bus_addr(struct scsi_device *sd, char *name, int len);
525 static int fcp_linkreset(struct fcp_port *pptr, struct scsi_address *ap,
526 int sleep);
527 static int fcp_handle_port_resume(opaque_t ulph, fc_ulp_port_info_t *pinfo,
528 uint32_t s_id, fc_attach_cmd_t cmd, int instance);
529 static void fcp_cp_pinfo(struct fcp_port *pptr, fc_ulp_port_info_t *pinfo);
530 static void fcp_process_elem(struct fcp_hp_elem *elem, int result);
531 static child_info_t *fcp_get_cip(struct fcp_lun *plun, child_info_t *cip,
532 int lcount, int tcount);
533 static int fcp_is_dip_present(struct fcp_lun *plun, dev_info_t *cdip);
534 static int fcp_is_child_present(struct fcp_lun *plun, child_info_t *cip);
535 static dev_info_t *fcp_create_dip(struct fcp_lun *plun, int link_cnt,
536 int tgt_cnt);
537 static dev_info_t *fcp_find_existing_dip(struct fcp_lun *plun,
538 dev_info_t *pdip, caddr_t name);
539 static int fcp_online_child(struct fcp_lun *plun, child_info_t *cip,
540 int lcount, int tcount, int flags, int *circ);
541 static int fcp_offline_child(struct fcp_lun *plun, child_info_t *cip,
542 int lcount, int tcount, int flags, int *circ);
543 static void fcp_remove_child(struct fcp_lun *plun);
544 static void fcp_watch(void *arg);
545 static void fcp_check_reset_delay(struct fcp_port *pptr);
546 static void fcp_abort_all(struct fcp_port *pptr, struct fcp_tgt *ttgt,
547 struct fcp_lun *rlun, int tgt_cnt);
548 struct fcp_port *fcp_soft_state_unlink(struct fcp_port *pptr);
549 static struct fcp_lun *fcp_lookup_lun(struct fcp_port *pptr,
550 uchar_t *wwn, uint16_t lun);
551 static void fcp_prepare_pkt(struct fcp_port *pptr, struct fcp_pkt *cmd,
552 struct fcp_lun *plun);
553 static void fcp_post_callback(struct fcp_pkt *cmd);
554 static int fcp_dopoll(struct fcp_port *pptr, struct fcp_pkt *cmd);
555 static struct fcp_port *fcp_dip2port(dev_info_t *dip);
556 struct fcp_lun *fcp_get_lun_from_cip(struct fcp_port *pptr,
557 child_info_t *cip);
558 static int fcp_pass_to_hp_and_wait(struct fcp_port *pptr,
559 struct fcp_lun *plun, child_info_t *cip, int what, int link_cnt,
560 int tgt_cnt, int flags);
561 static struct fcp_hp_elem *fcp_pass_to_hp(struct fcp_port *pptr,
562 struct fcp_lun *plun, child_info_t *cip, int what, int link_cnt,
563 int tgt_cnt, int flags, int wait);
564 static void fcp_retransport_cmd(struct fcp_port *pptr,
565 struct fcp_pkt *cmd);
566 static void fcp_fail_cmd(struct fcp_pkt *cmd, uchar_t reason,
567 uint_t statistics);
568 static void fcp_queue_pkt(struct fcp_port *pptr, struct fcp_pkt *cmd);
569 static void fcp_update_targets(struct fcp_port *pptr,
570 fc_portmap_t *dev_list, uint32_t count, uint32_t state, int cause);
571 static int fcp_call_finish_init(struct fcp_port *pptr,
572 struct fcp_tgt *ptgt, int lcount, int tcount, int cause);
573 static int fcp_call_finish_init_held(struct fcp_port *pptr,
574 struct fcp_tgt *ptgt, int lcount, int tcount, int cause);
575 static void fcp_reconfigure_luns(void * tgt_handle);
576 static void fcp_free_targets(struct fcp_port *pptr);
577 static void fcp_free_target(struct fcp_tgt *ptgt);
578 static int fcp_is_retryable(struct fcp_ipkt *icmd);
579 static int fcp_create_on_demand(struct fcp_port *pptr, uchar_t *pwwn);
580 static void fcp_ascii_to_wwn(caddr_t string, uchar_t bytes[], unsigned int);
581 static void fcp_wwn_to_ascii(uchar_t bytes[], char *string);
582 static void fcp_print_error(fc_packet_t *fpkt);
583 static int fcp_handle_ipkt_errors(struct fcp_port *pptr,
584 struct fcp_tgt *ptgt, struct fcp_ipkt *icmd, int rval, caddr_t op);
585 static int fcp_outstanding_lun_cmds(struct fcp_tgt *ptgt);
586 static fc_portmap_t *fcp_construct_map(struct fcp_port *pptr,
587 uint32_t *dev_cnt);
588 static void fcp_offline_all(struct fcp_port *pptr, int lcount, int cause);
589 static int fcp_get_statec_count(struct fcp_ioctl *data, int mode, int *rval);
590 static int fcp_copyin_fcp_ioctl_data(struct fcp_ioctl *, int, int *,
591 struct fcp_ioctl *, struct fcp_port **);
592 static char *fcp_get_lun_path(struct fcp_lun *plun);
593 static int fcp_get_target_mappings(struct fcp_ioctl *data, int mode,
594 int *rval);
595 static int fcp_do_ns_registry(struct fcp_port *pptr, uint32_t s_id);
596 static void fcp_retry_ns_registry(struct fcp_port *pptr, uint32_t s_id);
597 static char *fcp_get_lun_path(struct fcp_lun *plun);
598 static int fcp_get_target_mappings(struct fcp_ioctl *data, int mode,
599 int *rval);
600 static void fcp_reconfig_wait(struct fcp_port *pptr);
601
602 /*
603 * New functions added for mpxio support
604 */
605 static int fcp_virt_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
606 scsi_hba_tran_t *hba_tran, struct scsi_device *sd);
607 static mdi_pathinfo_t *fcp_create_pip(struct fcp_lun *plun, int lcount,
608 int tcount);
609 static mdi_pathinfo_t *fcp_find_existing_pip(struct fcp_lun *plun,
610 dev_info_t *pdip);
611 static int fcp_is_pip_present(struct fcp_lun *plun, mdi_pathinfo_t *pip);
612 static void fcp_handle_page83(fc_packet_t *, struct fcp_ipkt *, int);
613 static void fcp_update_mpxio_path_verifybusy(struct fcp_port *pptr);
614 static int fcp_copy_guid_2_lun_block(struct fcp_lun *plun, char *guidp);
615 static int fcp_update_mpxio_path(struct fcp_lun *plun, child_info_t *cip,
616 int what);
617 static int fcp_is_reconfig_needed(struct fcp_tgt *ptgt,
618 fc_packet_t *fpkt);
619 static int fcp_symmetric_device_probe(struct fcp_lun *plun);
620
621 /*
622 * New functions added for lun masking support
623 */
624 static void fcp_read_blacklist(dev_info_t *dip,
625 struct fcp_black_list_entry **pplun_blacklist);
626 static void fcp_mask_pwwn_lun(char *curr_pwwn, char *curr_lun,
627 struct fcp_black_list_entry **pplun_blacklist);
628 static void fcp_add_one_mask(char *curr_pwwn, uint32_t lun_id,
629 struct fcp_black_list_entry **pplun_blacklist);
630 static int fcp_should_mask(la_wwn_t *wwn, uint32_t lun_id);
631 static void fcp_cleanup_blacklist(struct fcp_black_list_entry **lun_blacklist);
632
633 /*
634 * New functions to support software FCA (like fcoei)
635 */
636 static struct scsi_pkt *fcp_pseudo_init_pkt(
637 struct scsi_address *ap, struct scsi_pkt *pkt,
638 struct buf *bp, int cmdlen, int statuslen,
639 int tgtlen, int flags, int (*callback)(), caddr_t arg);
640 static void fcp_pseudo_destroy_pkt(
641 struct scsi_address *ap, struct scsi_pkt *pkt);
642 static void fcp_pseudo_sync_pkt(
643 struct scsi_address *ap, struct scsi_pkt *pkt);
644 static int fcp_pseudo_start(struct scsi_address *ap, struct scsi_pkt *pkt);
645 static void fcp_pseudo_dmafree(
646 struct scsi_address *ap, struct scsi_pkt *pkt);
647
648 extern struct mod_ops mod_driverops;
649 /*
650 * This variable is defined in modctl.c and set to '1' after the root driver
651 * and fs are loaded. It serves as an indication that the root filesystem can
652 * be used.
653 */
654 extern int modrootloaded;
655 /*
656 * This table contains strings associated with the SCSI sense key codes. It
657 * is used by FCP to print a clear explanation of the code returned in the
658 * sense information by a device.
659 */
660 extern char *sense_keys[];
661 /*
662 * This device is created by the SCSI pseudo nexus driver (SCSI vHCI). It is
663 * under this device that the paths to a physical device are created when
664 * MPxIO is used.
665 */
666 extern dev_info_t *scsi_vhci_dip;
667
668 /*
669 * Report lun processing
670 */
671 #define FCP_LUN_ADDRESSING 0x80
672 #define FCP_PD_ADDRESSING 0x00
673 #define FCP_VOLUME_ADDRESSING 0x40
674
675 #define FCP_SVE_THROTTLE 0x28 /* Vicom */
676 #define MAX_INT_DMA 0x7fffffff
677 /*
678 * Property definitions
679 */
680 #define NODE_WWN_PROP (char *)fcp_node_wwn_prop
681 #define PORT_WWN_PROP (char *)fcp_port_wwn_prop
682 #define TARGET_PROP (char *)fcp_target_prop
683 #define LUN_PROP (char *)fcp_lun_prop
684 #define SAM_LUN_PROP (char *)fcp_sam_lun_prop
685 #define CONF_WWN_PROP (char *)fcp_conf_wwn_prop
686 #define OBP_BOOT_WWN (char *)fcp_obp_boot_wwn
687 #define MANUAL_CFG_ONLY (char *)fcp_manual_config_only
688 #define INIT_PORT_PROP (char *)fcp_init_port_prop
689 #define TGT_PORT_PROP (char *)fcp_tgt_port_prop
690 #define LUN_BLACKLIST_PROP (char *)fcp_lun_blacklist_prop
691 /*
692 * Short hand macros.
693 */
694 #define LUN_PORT (plun->lun_tgt->tgt_port)
695 #define LUN_TGT (plun->lun_tgt)
696
697 /*
698 * Driver private macros
699 */
700 #define FCP_ATOB(x) (((x) >= '0' && (x) <= '9') ? ((x) - '0') : \
701 ((x) >= 'a' && (x) <= 'f') ? \
702 ((x) - 'a' + 10) : ((x) - 'A' + 10))
703
704 #define FCP_MAX(a, b) ((a) > (b) ? (a) : (b))
705
706 #define FCP_N_NDI_EVENTS \
707 (sizeof (fcp_ndi_event_defs) / sizeof (ndi_event_definition_t))
708
709 #define FCP_LINK_STATE_CHANGED(p, c) \
710 ((p)->port_link_cnt != (c)->ipkt_link_cnt)
711
712 #define FCP_TGT_STATE_CHANGED(t, c) \
713 ((t)->tgt_change_cnt != (c)->ipkt_change_cnt)
714
715 #define FCP_STATE_CHANGED(p, t, c) \
716 (FCP_TGT_STATE_CHANGED(t, c))
717
718 #define FCP_MUST_RETRY(fpkt) \
719 ((fpkt)->pkt_state == FC_PKT_LOCAL_BSY || \
720 (fpkt)->pkt_state == FC_PKT_LOCAL_RJT || \
721 (fpkt)->pkt_state == FC_PKT_TRAN_BSY || \
722 (fpkt)->pkt_state == FC_PKT_ELS_IN_PROGRESS || \
723 (fpkt)->pkt_state == FC_PKT_NPORT_BSY || \
724 (fpkt)->pkt_state == FC_PKT_FABRIC_BSY || \
725 (fpkt)->pkt_state == FC_PKT_PORT_OFFLINE || \
726 (fpkt)->pkt_reason == FC_REASON_OFFLINE)
727
728 #define FCP_SENSE_REPORTLUN_CHANGED(es) \
729 ((es)->es_key == KEY_UNIT_ATTENTION && \
730 (es)->es_add_code == 0x3f && \
731 (es)->es_qual_code == 0x0e)
732
733 #define FCP_SENSE_NO_LUN(es) \
734 ((es)->es_key == KEY_ILLEGAL_REQUEST && \
735 (es)->es_add_code == 0x25 && \
736 (es)->es_qual_code == 0x0)
737
738 #define FCP_VERSION "20091208-1.192"
739 #define FCP_NAME_VERSION "SunFC FCP v" FCP_VERSION
740
741 #define FCP_NUM_ELEMENTS(array) \
742 (sizeof (array) / sizeof ((array)[0]))
743
744 /*
745 * Debugging, Error reporting, and tracing
746 */
747 #define FCP_LOG_SIZE 1024 * 1024
748
749 #define FCP_LEVEL_1 0x00001 /* attach/detach PM CPR */
750 #define FCP_LEVEL_2 0x00002 /* failures/Invalid data */
751 #define FCP_LEVEL_3 0x00004 /* state change, discovery */
752 #define FCP_LEVEL_4 0x00008 /* ULP messages */
753 #define FCP_LEVEL_5 0x00010 /* ELS/SCSI cmds */
754 #define FCP_LEVEL_6 0x00020 /* Transport failures */
755 #define FCP_LEVEL_7 0x00040
756 #define FCP_LEVEL_8 0x00080 /* I/O tracing */
757 #define FCP_LEVEL_9 0x00100 /* I/O tracing */
758
759
760
761 /*
762 * Log contents to system messages file
763 */
764 #define FCP_MSG_LEVEL_1 (FCP_LEVEL_1 | FC_TRACE_LOG_MSG)
765 #define FCP_MSG_LEVEL_2 (FCP_LEVEL_2 | FC_TRACE_LOG_MSG)
766 #define FCP_MSG_LEVEL_3 (FCP_LEVEL_3 | FC_TRACE_LOG_MSG)
767 #define FCP_MSG_LEVEL_4 (FCP_LEVEL_4 | FC_TRACE_LOG_MSG)
768 #define FCP_MSG_LEVEL_5 (FCP_LEVEL_5 | FC_TRACE_LOG_MSG)
769 #define FCP_MSG_LEVEL_6 (FCP_LEVEL_6 | FC_TRACE_LOG_MSG)
770 #define FCP_MSG_LEVEL_7 (FCP_LEVEL_7 | FC_TRACE_LOG_MSG)
771 #define FCP_MSG_LEVEL_8 (FCP_LEVEL_8 | FC_TRACE_LOG_MSG)
772 #define FCP_MSG_LEVEL_9 (FCP_LEVEL_9 | FC_TRACE_LOG_MSG)
773
774
775 /*
776 * Log contents to trace buffer
777 */
778 #define FCP_BUF_LEVEL_1 (FCP_LEVEL_1 | FC_TRACE_LOG_BUF)
779 #define FCP_BUF_LEVEL_2 (FCP_LEVEL_2 | FC_TRACE_LOG_BUF)
780 #define FCP_BUF_LEVEL_3 (FCP_LEVEL_3 | FC_TRACE_LOG_BUF)
781 #define FCP_BUF_LEVEL_4 (FCP_LEVEL_4 | FC_TRACE_LOG_BUF)
782 #define FCP_BUF_LEVEL_5 (FCP_LEVEL_5 | FC_TRACE_LOG_BUF)
783 #define FCP_BUF_LEVEL_6 (FCP_LEVEL_6 | FC_TRACE_LOG_BUF)
784 #define FCP_BUF_LEVEL_7 (FCP_LEVEL_7 | FC_TRACE_LOG_BUF)
785 #define FCP_BUF_LEVEL_8 (FCP_LEVEL_8 | FC_TRACE_LOG_BUF)
786 #define FCP_BUF_LEVEL_9 (FCP_LEVEL_9 | FC_TRACE_LOG_BUF)
787
788
789 /*
790 * Log contents to both system messages file and trace buffer
791 */
792 #define FCP_MSG_BUF_LEVEL_1 (FCP_LEVEL_1 | FC_TRACE_LOG_BUF | \
793 FC_TRACE_LOG_MSG)
794 #define FCP_MSG_BUF_LEVEL_2 (FCP_LEVEL_2 | FC_TRACE_LOG_BUF | \
795 FC_TRACE_LOG_MSG)
796 #define FCP_MSG_BUF_LEVEL_3 (FCP_LEVEL_3 | FC_TRACE_LOG_BUF | \
797 FC_TRACE_LOG_MSG)
798 #define FCP_MSG_BUF_LEVEL_4 (FCP_LEVEL_4 | FC_TRACE_LOG_BUF | \
799 FC_TRACE_LOG_MSG)
800 #define FCP_MSG_BUF_LEVEL_5 (FCP_LEVEL_5 | FC_TRACE_LOG_BUF | \
801 FC_TRACE_LOG_MSG)
802 #define FCP_MSG_BUF_LEVEL_6 (FCP_LEVEL_6 | FC_TRACE_LOG_BUF | \
803 FC_TRACE_LOG_MSG)
804 #define FCP_MSG_BUF_LEVEL_7 (FCP_LEVEL_7 | FC_TRACE_LOG_BUF | \
805 FC_TRACE_LOG_MSG)
806 #define FCP_MSG_BUF_LEVEL_8 (FCP_LEVEL_8 | FC_TRACE_LOG_BUF | \
807 FC_TRACE_LOG_MSG)
808 #define FCP_MSG_BUF_LEVEL_9 (FCP_LEVEL_9 | FC_TRACE_LOG_BUF | \
809 FC_TRACE_LOG_MSG)
810 #ifdef DEBUG
811 #define FCP_DTRACE fc_trace_debug
812 #else
813 #define FCP_DTRACE
814 #endif
815
816 #define FCP_TRACE fc_trace_debug
817
818 static struct cb_ops fcp_cb_ops = {
819 fcp_open, /* open */
820 fcp_close, /* close */
821 nodev, /* strategy */
822 nodev, /* print */
823 nodev, /* dump */
824 nodev, /* read */
825 nodev, /* write */
826 fcp_ioctl, /* ioctl */
827 nodev, /* devmap */
828 nodev, /* mmap */
829 nodev, /* segmap */
830 nochpoll, /* chpoll */
831 ddi_prop_op, /* cb_prop_op */
832 0, /* streamtab */
833 D_NEW | D_MP | D_HOTPLUG, /* cb_flag */
834 CB_REV, /* rev */
835 nodev, /* aread */
836 nodev /* awrite */
837 };
838
839
840 static struct dev_ops fcp_ops = {
841 DEVO_REV,
842 0,
843 ddi_getinfo_1to1,
844 nulldev, /* identify */
845 nulldev, /* probe */
846 fcp_attach, /* attach and detach are mandatory */
847 fcp_detach,
848 nodev, /* reset */
849 &fcp_cb_ops, /* cb_ops */
850 NULL, /* bus_ops */
851 NULL, /* power */
852 };
853
854
855 char *fcp_version = FCP_NAME_VERSION;
856
857 static struct modldrv modldrv = {
858 &mod_driverops,
859 FCP_NAME_VERSION,
860 &fcp_ops
861 };
862
863
864 static struct modlinkage modlinkage = {
865 MODREV_1,
866 &modldrv,
867 NULL
868 };
869
870
871 static fc_ulp_modinfo_t fcp_modinfo = {
872 &fcp_modinfo, /* ulp_handle */
873 FCTL_ULP_MODREV_4, /* ulp_rev */
874 FC4_SCSI_FCP, /* ulp_type */
875 "fcp", /* ulp_name */
876 FCP_STATEC_MASK, /* ulp_statec_mask */
877 fcp_port_attach, /* ulp_port_attach */
878 fcp_port_detach, /* ulp_port_detach */
879 fcp_port_ioctl, /* ulp_port_ioctl */
880 fcp_els_callback, /* ulp_els_callback */
881 fcp_data_callback, /* ulp_data_callback */
882 fcp_statec_callback /* ulp_statec_callback */
883 };
884
885 #ifdef DEBUG
886 #define FCP_TRACE_DEFAULT (FC_TRACE_LOG_MASK | FCP_LEVEL_1 | \
887 FCP_LEVEL_2 | FCP_LEVEL_3 | \
888 FCP_LEVEL_4 | FCP_LEVEL_5 | \
889 FCP_LEVEL_6 | FCP_LEVEL_7)
890 #else
891 #define FCP_TRACE_DEFAULT (FC_TRACE_LOG_MASK | FCP_LEVEL_1 | \
892 FCP_LEVEL_2 | FCP_LEVEL_3 | \
893 FCP_LEVEL_4 | FCP_LEVEL_5 | \
894 FCP_LEVEL_6 | FCP_LEVEL_7)
895 #endif
896
897 /* FCP global variables */
898 int fcp_bus_config_debug = 0;
899 static int fcp_log_size = FCP_LOG_SIZE;
900 static int fcp_trace = FCP_TRACE_DEFAULT;
901 static fc_trace_logq_t *fcp_logq = NULL;
902 static struct fcp_black_list_entry *fcp_lun_blacklist = NULL;
903 /*
904 * The auto-configuration is set by default. The only way of disabling it is
905 * through the property MANUAL_CFG_ONLY in the fcp.conf file.
906 */
907 static int fcp_enable_auto_configuration = 1;
908 static int fcp_max_bus_config_retries = 4;
909 static int fcp_lun_ready_retry = 300;
910 /*
911 * The value assigned to the following variable has changed several times due
912 * to a problem with the data underruns reporting of some firmware(s). The
913 * current value of 50 gives a timeout value of 25 seconds for a max number
914 * of 256 LUNs.
915 */
916 static int fcp_max_target_retries = 50;
917 /*
918 * Watchdog variables
919 * ------------------
920 *
921 * fcp_watchdog_init
922 *
923 * Indicates if the watchdog timer is running or not. This is actually
924 * a counter of the number of Fibre Channel ports that attached. When
925 * the first port attaches the watchdog is started. When the last port
926 * detaches the watchdog timer is stopped.
927 *
928 * fcp_watchdog_time
929 *
930 * This is the watchdog clock counter. It is incremented by
931 * fcp_watchdog_time each time the watchdog timer expires.
932 *
933 * fcp_watchdog_timeout
934 *
935 * Increment value of the variable fcp_watchdog_time as well as the
936 * the timeout value of the watchdog timer. The unit is 1 second. It
937 * is strange that this is not a #define but a variable since the code
938 * never changes this value. The reason why it can be said that the
939 * unit is 1 second is because the number of ticks for the watchdog
940 * timer is determined like this:
941 *
942 * fcp_watchdog_tick = fcp_watchdog_timeout *
943 * drv_usectohz(1000000);
944 *
945 * The value 1000000 is hard coded in the code.
946 *
947 * fcp_watchdog_tick
948 *
949 * Watchdog timer value in ticks.
950 */
951 static int fcp_watchdog_init = 0;
952 static int fcp_watchdog_time = 0;
953 static int fcp_watchdog_timeout = 1;
954 static int fcp_watchdog_tick;
955
956 /*
957 * fcp_offline_delay is a global variable to enable customisation of
958 * the timeout on link offlines or RSCNs. The default value is set
959 * to match FCP_OFFLINE_DELAY (20sec), which is 2*RA_TOV_els as
960 * specified in FCP4 Chapter 11 (see www.t10.org).
961 *
962 * The variable fcp_offline_delay is specified in SECONDS.
963 *
964 * If we made this a static var then the user would not be able to
965 * change it. This variable is set in fcp_attach().
966 */
967 unsigned int fcp_offline_delay = FCP_OFFLINE_DELAY;
968
969 static void *fcp_softstate = NULL; /* for soft state */
970 static uchar_t fcp_oflag = FCP_IDLE; /* open flag */
971 static kmutex_t fcp_global_mutex;
972 static kmutex_t fcp_ioctl_mutex;
973 static dev_info_t *fcp_global_dip = NULL;
974 static timeout_id_t fcp_watchdog_id;
975 const char *fcp_lun_prop = "lun";
976 const char *fcp_sam_lun_prop = "sam-lun";
977 const char *fcp_target_prop = "target";
978 /*
979 * NOTE: consumers of "node-wwn" property include stmsboot in ON
980 * consolidation.
981 */
982 const char *fcp_node_wwn_prop = "node-wwn";
983 const char *fcp_port_wwn_prop = "port-wwn";
984 const char *fcp_conf_wwn_prop = "fc-port-wwn";
985 const char *fcp_obp_boot_wwn = "fc-boot-dev-portwwn";
986 const char *fcp_manual_config_only = "manual_configuration_only";
987 const char *fcp_init_port_prop = "initiator-port";
988 const char *fcp_tgt_port_prop = "target-port";
989 const char *fcp_lun_blacklist_prop = "pwwn-lun-blacklist";
990
991 static struct fcp_port *fcp_port_head = NULL;
992 static ddi_eventcookie_t fcp_insert_eid;
993 static ddi_eventcookie_t fcp_remove_eid;
994
995 static ndi_event_definition_t fcp_ndi_event_defs[] = {
996 { FCP_EVENT_TAG_INSERT, FCAL_INSERT_EVENT, EPL_KERNEL },
997 { FCP_EVENT_TAG_REMOVE, FCAL_REMOVE_EVENT, EPL_INTERRUPT }
998 };
999
1000 /*
1001 * List of valid commands for the scsi_ioctl call
1002 */
1003 static uint8_t scsi_ioctl_list[] = {
1004 SCMD_INQUIRY,
1005 SCMD_REPORT_LUN,
1006 SCMD_READ_CAPACITY
1007 };
1008
1009 /*
1010 * this is used to dummy up a report lun response for cases
1011 * where the target doesn't support it
1012 */
1013 static uchar_t fcp_dummy_lun[] = {
1014 0x00, /* MSB length (length = no of luns * 8) */
1015 0x00,
1016 0x00,
1017 0x08, /* LSB length */
1018 0x00, /* MSB reserved */
1019 0x00,
1020 0x00,
1021 0x00, /* LSB reserved */
1022 FCP_PD_ADDRESSING,
1023 0x00, /* LUN is ZERO at the first level */
1024 0x00,
1025 0x00, /* second level is zero */
1026 0x00,
1027 0x00, /* third level is zero */
1028 0x00,
1029 0x00 /* fourth level is zero */
1030 };
1031
1032 static uchar_t fcp_alpa_to_switch[] = {
1033 0x00, 0x7d, 0x7c, 0x00, 0x7b, 0x00, 0x00, 0x00, 0x7a, 0x00,
1034 0x00, 0x00, 0x00, 0x00, 0x00, 0x79, 0x78, 0x00, 0x00, 0x00,
1035 0x00, 0x00, 0x00, 0x77, 0x76, 0x00, 0x00, 0x75, 0x00, 0x74,
1036 0x73, 0x72, 0x00, 0x00, 0x00, 0x71, 0x00, 0x70, 0x6f, 0x6e,
1037 0x00, 0x6d, 0x6c, 0x6b, 0x6a, 0x69, 0x68, 0x00, 0x00, 0x67,
1038 0x66, 0x65, 0x64, 0x63, 0x62, 0x00, 0x00, 0x61, 0x60, 0x00,
1039 0x5f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x5e, 0x00, 0x5d,
1040 0x5c, 0x5b, 0x00, 0x5a, 0x59, 0x58, 0x57, 0x56, 0x55, 0x00,
1041 0x00, 0x54, 0x53, 0x52, 0x51, 0x50, 0x4f, 0x00, 0x00, 0x4e,
1042 0x4d, 0x00, 0x4c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x4b,
1043 0x00, 0x4a, 0x49, 0x48, 0x00, 0x47, 0x46, 0x45, 0x44, 0x43,
1044 0x42, 0x00, 0x00, 0x41, 0x40, 0x3f, 0x3e, 0x3d, 0x3c, 0x00,
1045 0x00, 0x3b, 0x3a, 0x00, 0x39, 0x00, 0x00, 0x00, 0x38, 0x37,
1046 0x36, 0x00, 0x35, 0x00, 0x00, 0x00, 0x34, 0x00, 0x00, 0x00,
1047 0x00, 0x00, 0x00, 0x33, 0x32, 0x00, 0x00, 0x00, 0x00, 0x00,
1048 0x00, 0x31, 0x30, 0x00, 0x00, 0x2f, 0x00, 0x2e, 0x2d, 0x2c,
1049 0x00, 0x00, 0x00, 0x2b, 0x00, 0x2a, 0x29, 0x28, 0x00, 0x27,
1050 0x26, 0x25, 0x24, 0x23, 0x22, 0x00, 0x00, 0x21, 0x20, 0x1f,
1051 0x1e, 0x1d, 0x1c, 0x00, 0x00, 0x1b, 0x1a, 0x00, 0x19, 0x00,
1052 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x00, 0x17, 0x16, 0x15,
1053 0x00, 0x14, 0x13, 0x12, 0x11, 0x10, 0x0f, 0x00, 0x00, 0x0e,
1054 0x0d, 0x0c, 0x0b, 0x0a, 0x09, 0x00, 0x00, 0x08, 0x07, 0x00,
1055 0x06, 0x00, 0x00, 0x00, 0x05, 0x04, 0x03, 0x00, 0x02, 0x00,
1056 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1057 };
1058
1059 static caddr_t pid = "SESS01 ";
1060
1061 #if !defined(lint)
1062
1063 _NOTE(MUTEX_PROTECTS_DATA(fcp_global_mutex,
1064 fcp_port::fcp_next fcp_watchdog_id))
1065
1066 _NOTE(DATA_READABLE_WITHOUT_LOCK(fcp_watchdog_time))
1067
1068 _NOTE(SCHEME_PROTECTS_DATA("Unshared",
1069 fcp_insert_eid
1070 fcp_remove_eid
1071 fcp_watchdog_time))
1072
1073 _NOTE(SCHEME_PROTECTS_DATA("Unshared",
1074 fcp_cb_ops
1075 fcp_ops
1076 callb_cpr))
1077
1078 #endif /* lint */
1079
1080 /*
1081 * This table is used to determine whether or not it's safe to copy in
1082 * the target node name for a lun. Since all luns behind the same target
1083 * have the same wwnn, only tagets that do not support multiple luns are
1084 * eligible to be enumerated under mpxio if they aren't page83 compliant.
1085 */
1086
1087 char *fcp_symmetric_disk_table[] = {
1088 "SEAGATE ST",
1089 "IBM DDYFT",
1090 "SUNW SUNWGS", /* Daktari enclosure */
1091 "SUN SENA", /* SES device */
1092 "SUN SESS01" /* VICOM SVE box */
1093 };
1094
1095 int fcp_symmetric_disk_table_size =
1096 sizeof (fcp_symmetric_disk_table)/sizeof (char *);
1097
1098 /*
1099 * This structure is bogus. scsi_hba_attach_setup() requires, as in the kernel
1100 * will panic if you don't pass this in to the routine, this information.
1101 * Need to determine what the actual impact to the system is by providing
1102 * this information if any. Since dma allocation is done in pkt_init it may
1103 * not have any impact. These values are straight from the Writing Device
1104 * Driver manual.
1105 */
1106 static ddi_dma_attr_t pseudo_fca_dma_attr = {
1107 DMA_ATTR_V0, /* ddi_dma_attr version */
1108 0, /* low address */
1109 0xffffffff, /* high address */
1110 0x00ffffff, /* counter upper bound */
1111 1, /* alignment requirements */
1112 0x3f, /* burst sizes */
1113 1, /* minimum DMA access */
1114 0xffffffff, /* maximum DMA access */
1115 (1 << 24) - 1, /* segment boundary restrictions */
1116 1, /* scater/gather list length */
1117 512, /* device granularity */
1118 0 /* DMA flags */
1119 };
1120
1121 /*
1122 * The _init(9e) return value should be that of mod_install(9f). Under
1123 * some circumstances, a failure may not be related mod_install(9f) and
1124 * one would then require a return value to indicate the failure. Looking
1125 * at mod_install(9f), it is expected to return 0 for success and non-zero
1126 * for failure. mod_install(9f) for device drivers, further goes down the
1127 * calling chain and ends up in ddi_installdrv(), whose return values are
1128 * DDI_SUCCESS and DDI_FAILURE - There are also other functions in the
1129 * calling chain of mod_install(9f) which return values like EINVAL and
1130 * in some even return -1.
1131 *
1132 * To work around the vagaries of the mod_install() calling chain, return
1133 * either 0 or ENODEV depending on the success or failure of mod_install()
1134 */
1135 int
1136 _init(void)
1137 {
1138 int rval;
1139
1140 /*
1141 * Allocate soft state and prepare to do ddi_soft_state_zalloc()
1142 * before registering with the transport first.
1143 */
1144 if (ddi_soft_state_init(&fcp_softstate,
1145 sizeof (struct fcp_port), FCP_INIT_ITEMS) != 0) {
1146 return (EINVAL);
1147 }
1148
1149 mutex_init(&fcp_global_mutex, NULL, MUTEX_DRIVER, NULL);
1150 mutex_init(&fcp_ioctl_mutex, NULL, MUTEX_DRIVER, NULL);
1151
1152 if ((rval = fc_ulp_add(&fcp_modinfo)) != FC_SUCCESS) {
1153 cmn_err(CE_WARN, "fcp: fc_ulp_add failed");
1154 mutex_destroy(&fcp_global_mutex);
1155 mutex_destroy(&fcp_ioctl_mutex);
1156 ddi_soft_state_fini(&fcp_softstate);
1157 return (ENODEV);
1158 }
1159
1160 fcp_logq = fc_trace_alloc_logq(fcp_log_size);
1161
1162 if ((rval = mod_install(&modlinkage)) != 0) {
1163 fc_trace_free_logq(fcp_logq);
1164 (void) fc_ulp_remove(&fcp_modinfo);
1165 mutex_destroy(&fcp_global_mutex);
1166 mutex_destroy(&fcp_ioctl_mutex);
1167 ddi_soft_state_fini(&fcp_softstate);
1168 rval = ENODEV;
1169 }
1170
1171 return (rval);
1172 }
1173
1174
1175 /*
1176 * the system is done with us as a driver, so clean up
1177 */
1178 int
1179 _fini(void)
1180 {
1181 int rval;
1182
1183 /*
1184 * don't start cleaning up until we know that the module remove
1185 * has worked -- if this works, then we know that each instance
1186 * has successfully been DDI_DETACHed
1187 */
1188 if ((rval = mod_remove(&modlinkage)) != 0) {
1189 return (rval);
1190 }
1191
1192 (void) fc_ulp_remove(&fcp_modinfo);
1193
1194 ddi_soft_state_fini(&fcp_softstate);
1195 mutex_destroy(&fcp_global_mutex);
1196 mutex_destroy(&fcp_ioctl_mutex);
1197 fc_trace_free_logq(fcp_logq);
1198
1199 return (rval);
1200 }
1201
1202
1203 int
1204 _info(struct modinfo *modinfop)
1205 {
1206 return (mod_info(&modlinkage, modinfop));
1207 }
1208
1209
1210 /*
1211 * attach the module
1212 */
1213 static int
1214 fcp_attach(dev_info_t *devi, ddi_attach_cmd_t cmd)
1215 {
1216 int rval = DDI_SUCCESS;
1217
1218 FCP_DTRACE(fcp_logq, "fcp", fcp_trace,
1219 FCP_BUF_LEVEL_8, 0, "fcp module attach: cmd=0x%x", cmd);
1220
1221 if (cmd == DDI_ATTACH) {
1222 /* The FCP pseudo device is created here. */
1223 mutex_enter(&fcp_global_mutex);
1224 fcp_global_dip = devi;
1225 mutex_exit(&fcp_global_mutex);
1226
1227 if (ddi_create_minor_node(fcp_global_dip, "fcp", S_IFCHR,
1228 0, DDI_PSEUDO, 0) == DDI_SUCCESS) {
1229 ddi_report_dev(fcp_global_dip);
1230 } else {
1231 cmn_err(CE_WARN, "FCP: Cannot create minor node");
1232 mutex_enter(&fcp_global_mutex);
1233 fcp_global_dip = NULL;
1234 mutex_exit(&fcp_global_mutex);
1235
1236 rval = DDI_FAILURE;
1237 }
1238 /*
1239 * We check the fcp_offline_delay property at this
1240 * point. This variable is global for the driver,
1241 * not specific to an instance.
1242 *
1243 * We do not recommend setting the value to less
1244 * than 10 seconds (RA_TOV_els), or greater than
1245 * 60 seconds.
1246 */
1247 fcp_offline_delay = ddi_prop_get_int(DDI_DEV_T_ANY,
1248 devi, DDI_PROP_DONTPASS | DDI_PROP_NOTPROM,
1249 "fcp_offline_delay", FCP_OFFLINE_DELAY);
1250 if ((fcp_offline_delay < 10) ||
1251 (fcp_offline_delay > 60)) {
1252 cmn_err(CE_WARN, "Setting fcp_offline_delay "
1253 "to %d second(s). This is outside the "
1254 "recommended range of 10..60 seconds.",
1255 fcp_offline_delay);
1256 }
1257 }
1258
1259 return (rval);
1260 }
1261
1262
1263 /*ARGSUSED*/
1264 static int
1265 fcp_detach(dev_info_t *devi, ddi_detach_cmd_t cmd)
1266 {
1267 int res = DDI_SUCCESS;
1268
1269 FCP_DTRACE(fcp_logq, "fcp", fcp_trace,
1270 FCP_BUF_LEVEL_8, 0, "module detach: cmd=0x%x", cmd);
1271
1272 if (cmd == DDI_DETACH) {
1273 /*
1274 * Check if there are active ports/threads. If there
1275 * are any, we will fail, else we will succeed (there
1276 * should not be much to clean up)
1277 */
1278 mutex_enter(&fcp_global_mutex);
1279 FCP_DTRACE(fcp_logq, "fcp",
1280 fcp_trace, FCP_BUF_LEVEL_8, 0, "port_head=%p",
1281 (void *) fcp_port_head);
1282
1283 if (fcp_port_head == NULL) {
1284 ddi_remove_minor_node(fcp_global_dip, NULL);
1285 fcp_global_dip = NULL;
1286 mutex_exit(&fcp_global_mutex);
1287 } else {
1288 mutex_exit(&fcp_global_mutex);
1289 res = DDI_FAILURE;
1290 }
1291 }
1292 FCP_DTRACE(fcp_logq, "fcp", fcp_trace,
1293 FCP_BUF_LEVEL_8, 0, "module detach returning %d", res);
1294
1295 return (res);
1296 }
1297
1298
1299 /* ARGSUSED */
1300 static int
1301 fcp_open(dev_t *devp, int flag, int otype, cred_t *credp)
1302 {
1303 if (otype != OTYP_CHR) {
1304 return (EINVAL);
1305 }
1306
1307 /*
1308 * Allow only root to talk;
1309 */
1310 if (drv_priv(credp)) {
1311 return (EPERM);
1312 }
1313
1314 mutex_enter(&fcp_global_mutex);
1315 if (fcp_oflag & FCP_EXCL) {
1316 mutex_exit(&fcp_global_mutex);
1317 return (EBUSY);
1318 }
1319
1320 if (flag & FEXCL) {
1321 if (fcp_oflag & FCP_OPEN) {
1322 mutex_exit(&fcp_global_mutex);
1323 return (EBUSY);
1324 }
1325 fcp_oflag |= FCP_EXCL;
1326 }
1327 fcp_oflag |= FCP_OPEN;
1328 mutex_exit(&fcp_global_mutex);
1329
1330 return (0);
1331 }
1332
1333
1334 /* ARGSUSED */
1335 static int
1336 fcp_close(dev_t dev, int flag, int otype, cred_t *credp)
1337 {
1338 if (otype != OTYP_CHR) {
1339 return (EINVAL);
1340 }
1341
1342 mutex_enter(&fcp_global_mutex);
1343 if (!(fcp_oflag & FCP_OPEN)) {
1344 mutex_exit(&fcp_global_mutex);
1345 return (ENODEV);
1346 }
1347 fcp_oflag = FCP_IDLE;
1348 mutex_exit(&fcp_global_mutex);
1349
1350 return (0);
1351 }
1352
1353
1354 /*
1355 * fcp_ioctl
1356 * Entry point for the FCP ioctls
1357 *
1358 * Input:
1359 * See ioctl(9E)
1360 *
1361 * Output:
1362 * See ioctl(9E)
1363 *
1364 * Returns:
1365 * See ioctl(9E)
1366 *
1367 * Context:
1368 * Kernel context.
1369 */
1370 /* ARGSUSED */
1371 static int
1372 fcp_ioctl(dev_t dev, int cmd, intptr_t data, int mode, cred_t *credp,
1373 int *rval)
1374 {
1375 int ret = 0;
1376
1377 mutex_enter(&fcp_global_mutex);
1378 if (!(fcp_oflag & FCP_OPEN)) {
1379 mutex_exit(&fcp_global_mutex);
1380 return (ENXIO);
1381 }
1382 mutex_exit(&fcp_global_mutex);
1383
1384 switch (cmd) {
1385 case FCP_TGT_INQUIRY:
1386 case FCP_TGT_CREATE:
1387 case FCP_TGT_DELETE:
1388 ret = fcp_setup_device_data_ioctl(cmd,
1389 (struct fcp_ioctl *)data, mode, rval);
1390 break;
1391
1392 case FCP_TGT_SEND_SCSI:
1393 mutex_enter(&fcp_ioctl_mutex);
1394 ret = fcp_setup_scsi_ioctl(
1395 (struct fcp_scsi_cmd *)data, mode, rval);
1396 mutex_exit(&fcp_ioctl_mutex);
1397 break;
1398
1399 case FCP_STATE_COUNT:
1400 ret = fcp_get_statec_count((struct fcp_ioctl *)data,
1401 mode, rval);
1402 break;
1403 case FCP_GET_TARGET_MAPPINGS:
1404 ret = fcp_get_target_mappings((struct fcp_ioctl *)data,
1405 mode, rval);
1406 break;
1407 default:
1408 fcp_log(CE_WARN, NULL,
1409 "!Invalid ioctl opcode = 0x%x", cmd);
1410 ret = EINVAL;
1411 }
1412
1413 return (ret);
1414 }
1415
1416
1417 /*
1418 * fcp_setup_device_data_ioctl
1419 * Setup handler for the "device data" style of
1420 * ioctl for FCP. See "fcp_util.h" for data structure
1421 * definition.
1422 *
1423 * Input:
1424 * cmd = FCP ioctl command
1425 * data = ioctl data
1426 * mode = See ioctl(9E)
1427 *
1428 * Output:
1429 * data = ioctl data
1430 * rval = return value - see ioctl(9E)
1431 *
1432 * Returns:
1433 * See ioctl(9E)
1434 *
1435 * Context:
1436 * Kernel context.
1437 */
1438 /* ARGSUSED */
1439 static int
1440 fcp_setup_device_data_ioctl(int cmd, struct fcp_ioctl *data, int mode,
1441 int *rval)
1442 {
1443 struct fcp_port *pptr;
1444 struct device_data *dev_data;
1445 uint32_t link_cnt;
1446 la_wwn_t *wwn_ptr = NULL;
1447 struct fcp_tgt *ptgt = NULL;
1448 struct fcp_lun *plun = NULL;
1449 int i, error;
1450 struct fcp_ioctl fioctl;
1451
1452 #ifdef _MULTI_DATAMODEL
1453 switch (ddi_model_convert_from(mode & FMODELS)) {
1454 case DDI_MODEL_ILP32: {
1455 struct fcp32_ioctl f32_ioctl;
1456
1457 if (ddi_copyin((void *)data, (void *)&f32_ioctl,
1458 sizeof (struct fcp32_ioctl), mode)) {
1459 return (EFAULT);
1460 }
1461 fioctl.fp_minor = f32_ioctl.fp_minor;
1462 fioctl.listlen = f32_ioctl.listlen;
1463 fioctl.list = (caddr_t)(long)f32_ioctl.list;
1464 break;
1465 }
1466 case DDI_MODEL_NONE:
1467 if (ddi_copyin((void *)data, (void *)&fioctl,
1468 sizeof (struct fcp_ioctl), mode)) {
1469 return (EFAULT);
1470 }
1471 break;
1472 }
1473
1474 #else /* _MULTI_DATAMODEL */
1475 if (ddi_copyin((void *)data, (void *)&fioctl,
1476 sizeof (struct fcp_ioctl), mode)) {
1477 return (EFAULT);
1478 }
1479 #endif /* _MULTI_DATAMODEL */
1480
1481 /*
1482 * Right now we can assume that the minor number matches with
1483 * this instance of fp. If this changes we will need to
1484 * revisit this logic.
1485 */
1486 mutex_enter(&fcp_global_mutex);
1487 pptr = fcp_port_head;
1488 while (pptr) {
1489 if (pptr->port_instance == (uint32_t)fioctl.fp_minor) {
1490 break;
1491 } else {
1492 pptr = pptr->port_next;
1493 }
1494 }
1495 mutex_exit(&fcp_global_mutex);
1496 if (pptr == NULL) {
1497 return (ENXIO);
1498 }
1499 mutex_enter(&pptr->port_mutex);
1500
1501
1502 if ((dev_data = kmem_zalloc((sizeof (struct device_data)) *
1503 fioctl.listlen, KM_NOSLEEP)) == NULL) {
1504 mutex_exit(&pptr->port_mutex);
1505 return (ENOMEM);
1506 }
1507
1508 if (ddi_copyin(fioctl.list, dev_data,
1509 (sizeof (struct device_data)) * fioctl.listlen, mode)) {
1510 kmem_free(dev_data, sizeof (*dev_data) * fioctl.listlen);
1511 mutex_exit(&pptr->port_mutex);
1512 return (EFAULT);
1513 }
1514 link_cnt = pptr->port_link_cnt;
1515
1516 if (cmd == FCP_TGT_INQUIRY) {
1517 wwn_ptr = (la_wwn_t *)&(dev_data[0].dev_pwwn);
1518 if (bcmp(wwn_ptr->raw_wwn, pptr->port_pwwn.raw_wwn,
1519 sizeof (wwn_ptr->raw_wwn)) == 0) {
1520 /* This ioctl is requesting INQ info of local HBA */
1521 mutex_exit(&pptr->port_mutex);
1522 dev_data[0].dev0_type = DTYPE_UNKNOWN;
1523 dev_data[0].dev_status = 0;
1524 if (ddi_copyout(dev_data, fioctl.list,
1525 (sizeof (struct device_data)) * fioctl.listlen,
1526 mode)) {
1527 kmem_free(dev_data,
1528 sizeof (*dev_data) * fioctl.listlen);
1529 return (EFAULT);
1530 }
1531 kmem_free(dev_data,
1532 sizeof (*dev_data) * fioctl.listlen);
1533 #ifdef _MULTI_DATAMODEL
1534 switch (ddi_model_convert_from(mode & FMODELS)) {
1535 case DDI_MODEL_ILP32: {
1536 struct fcp32_ioctl f32_ioctl;
1537 f32_ioctl.fp_minor = fioctl.fp_minor;
1538 f32_ioctl.listlen = fioctl.listlen;
1539 f32_ioctl.list = (caddr32_t)(long)fioctl.list;
1540 if (ddi_copyout((void *)&f32_ioctl,
1541 (void *)data,
1542 sizeof (struct fcp32_ioctl), mode)) {
1543 return (EFAULT);
1544 }
1545 break;
1546 }
1547 case DDI_MODEL_NONE:
1548 if (ddi_copyout((void *)&fioctl, (void *)data,
1549 sizeof (struct fcp_ioctl), mode)) {
1550 return (EFAULT);
1551 }
1552 break;
1553 }
1554 #else /* _MULTI_DATAMODEL */
1555 if (ddi_copyout((void *)&fioctl, (void *)data,
1556 sizeof (struct fcp_ioctl), mode)) {
1557 return (EFAULT);
1558 }
1559 #endif /* _MULTI_DATAMODEL */
1560 return (0);
1561 }
1562 }
1563
1564 if (pptr->port_state & (FCP_STATE_INIT | FCP_STATE_OFFLINE)) {
1565 kmem_free(dev_data, sizeof (*dev_data) * fioctl.listlen);
1566 mutex_exit(&pptr->port_mutex);
1567 return (ENXIO);
1568 }
1569
1570 for (i = 0; (i < fioctl.listlen) && (link_cnt == pptr->port_link_cnt);
1571 i++) {
1572 wwn_ptr = (la_wwn_t *)&(dev_data[i].dev_pwwn);
1573
1574 dev_data[i].dev0_type = DTYPE_UNKNOWN;
1575
1576
1577 dev_data[i].dev_status = ENXIO;
1578
1579 if ((ptgt = fcp_lookup_target(pptr,
1580 (uchar_t *)wwn_ptr)) == NULL) {
1581 mutex_exit(&pptr->port_mutex);
1582 if (fc_ulp_get_remote_port(pptr->port_fp_handle,
1583 wwn_ptr, &error, 0) == NULL) {
1584 dev_data[i].dev_status = ENODEV;
1585 mutex_enter(&pptr->port_mutex);
1586 continue;
1587 } else {
1588
1589 dev_data[i].dev_status = EAGAIN;
1590
1591 mutex_enter(&pptr->port_mutex);
1592 continue;
1593 }
1594 } else {
1595 mutex_enter(&ptgt->tgt_mutex);
1596 if (ptgt->tgt_state & (FCP_TGT_MARK |
1597 FCP_TGT_BUSY)) {
1598 dev_data[i].dev_status = EAGAIN;
1599 mutex_exit(&ptgt->tgt_mutex);
1600 continue;
1601 }
1602
1603 if (ptgt->tgt_state & FCP_TGT_OFFLINE) {
1604 if (ptgt->tgt_icap && !ptgt->tgt_tcap) {
1605 dev_data[i].dev_status = ENOTSUP;
1606 } else {
1607 dev_data[i].dev_status = ENXIO;
1608 }
1609 mutex_exit(&ptgt->tgt_mutex);
1610 continue;
1611 }
1612
1613 switch (cmd) {
1614 case FCP_TGT_INQUIRY:
1615 /*
1616 * The reason we give device type of
1617 * lun 0 only even though in some
1618 * cases(like maxstrat) lun 0 device
1619 * type may be 0x3f(invalid) is that
1620 * for bridge boxes target will appear
1621 * as luns and the first lun could be
1622 * a device that utility may not care
1623 * about (like a tape device).
1624 */
1625 dev_data[i].dev_lun_cnt = ptgt->tgt_lun_cnt;
1626 dev_data[i].dev_status = 0;
1627 mutex_exit(&ptgt->tgt_mutex);
1628
1629 if ((plun = fcp_get_lun(ptgt, 0)) == NULL) {
1630 dev_data[i].dev0_type = DTYPE_UNKNOWN;
1631 } else {
1632 dev_data[i].dev0_type = plun->lun_type;
1633 }
1634 mutex_enter(&ptgt->tgt_mutex);
1635 break;
1636
1637 case FCP_TGT_CREATE:
1638 mutex_exit(&ptgt->tgt_mutex);
1639 mutex_exit(&pptr->port_mutex);
1640
1641 /*
1642 * serialize state change call backs.
1643 * only one call back will be handled
1644 * at a time.
1645 */
1646 mutex_enter(&fcp_global_mutex);
1647 if (fcp_oflag & FCP_BUSY) {
1648 mutex_exit(&fcp_global_mutex);
1649 if (dev_data) {
1650 kmem_free(dev_data,
1651 sizeof (*dev_data) *
1652 fioctl.listlen);
1653 }
1654 return (EBUSY);
1655 }
1656 fcp_oflag |= FCP_BUSY;
1657 mutex_exit(&fcp_global_mutex);
1658
1659 dev_data[i].dev_status =
1660 fcp_create_on_demand(pptr,
1661 wwn_ptr->raw_wwn);
1662
1663 if (dev_data[i].dev_status != 0) {
1664 char buf[25];
1665
1666 for (i = 0; i < FC_WWN_SIZE; i++) {
1667 (void) sprintf(&buf[i << 1],
1668 "%02x",
1669 wwn_ptr->raw_wwn[i]);
1670 }
1671
1672 fcp_log(CE_WARN, pptr->port_dip,
1673 "!Failed to create nodes for"
1674 " pwwn=%s; error=%x", buf,
1675 dev_data[i].dev_status);
1676 }
1677
1678 /* allow state change call backs again */
1679 mutex_enter(&fcp_global_mutex);
1680 fcp_oflag &= ~FCP_BUSY;
1681 mutex_exit(&fcp_global_mutex);
1682
1683 mutex_enter(&pptr->port_mutex);
1684 mutex_enter(&ptgt->tgt_mutex);
1685
1686 break;
1687
1688 case FCP_TGT_DELETE:
1689 break;
1690
1691 default:
1692 fcp_log(CE_WARN, pptr->port_dip,
1693 "!Invalid device data ioctl "
1694 "opcode = 0x%x", cmd);
1695 }
1696 mutex_exit(&ptgt->tgt_mutex);
1697 }
1698 }
1699 mutex_exit(&pptr->port_mutex);
1700
1701 if (ddi_copyout(dev_data, fioctl.list,
1702 (sizeof (struct device_data)) * fioctl.listlen, mode)) {
1703 kmem_free(dev_data, sizeof (*dev_data) * fioctl.listlen);
1704 return (EFAULT);
1705 }
1706 kmem_free(dev_data, sizeof (*dev_data) * fioctl.listlen);
1707
1708 #ifdef _MULTI_DATAMODEL
1709 switch (ddi_model_convert_from(mode & FMODELS)) {
1710 case DDI_MODEL_ILP32: {
1711 struct fcp32_ioctl f32_ioctl;
1712
1713 f32_ioctl.fp_minor = fioctl.fp_minor;
1714 f32_ioctl.listlen = fioctl.listlen;
1715 f32_ioctl.list = (caddr32_t)(long)fioctl.list;
1716 if (ddi_copyout((void *)&f32_ioctl, (void *)data,
1717 sizeof (struct fcp32_ioctl), mode)) {
1718 return (EFAULT);
1719 }
1720 break;
1721 }
1722 case DDI_MODEL_NONE:
1723 if (ddi_copyout((void *)&fioctl, (void *)data,
1724 sizeof (struct fcp_ioctl), mode)) {
1725 return (EFAULT);
1726 }
1727 break;
1728 }
1729 #else /* _MULTI_DATAMODEL */
1730
1731 if (ddi_copyout((void *)&fioctl, (void *)data,
1732 sizeof (struct fcp_ioctl), mode)) {
1733 return (EFAULT);
1734 }
1735 #endif /* _MULTI_DATAMODEL */
1736
1737 return (0);
1738 }
1739
1740 /*
1741 * Fetch the target mappings (path, etc.) for all LUNs
1742 * on this port.
1743 */
1744 /* ARGSUSED */
1745 static int
1746 fcp_get_target_mappings(struct fcp_ioctl *data,
1747 int mode, int *rval)
1748 {
1749 struct fcp_port *pptr;
1750 fc_hba_target_mappings_t *mappings;
1751 fc_hba_mapping_entry_t *map;
1752 struct fcp_tgt *ptgt = NULL;
1753 struct fcp_lun *plun = NULL;
1754 int i, mapIndex, mappingSize;
1755 int listlen;
1756 struct fcp_ioctl fioctl;
1757 char *path;
1758 fcp_ent_addr_t sam_lun_addr;
1759
1760 #ifdef _MULTI_DATAMODEL
1761 switch (ddi_model_convert_from(mode & FMODELS)) {
1762 case DDI_MODEL_ILP32: {
1763 struct fcp32_ioctl f32_ioctl;
1764
1765 if (ddi_copyin((void *)data, (void *)&f32_ioctl,
1766 sizeof (struct fcp32_ioctl), mode)) {
1767 return (EFAULT);
1768 }
1769 fioctl.fp_minor = f32_ioctl.fp_minor;
1770 fioctl.listlen = f32_ioctl.listlen;
1771 fioctl.list = (caddr_t)(long)f32_ioctl.list;
1772 break;
1773 }
1774 case DDI_MODEL_NONE:
1775 if (ddi_copyin((void *)data, (void *)&fioctl,
1776 sizeof (struct fcp_ioctl), mode)) {
1777 return (EFAULT);
1778 }
1779 break;
1780 }
1781
1782 #else /* _MULTI_DATAMODEL */
1783 if (ddi_copyin((void *)data, (void *)&fioctl,
1784 sizeof (struct fcp_ioctl), mode)) {
1785 return (EFAULT);
1786 }
1787 #endif /* _MULTI_DATAMODEL */
1788
1789 /*
1790 * Right now we can assume that the minor number matches with
1791 * this instance of fp. If this changes we will need to
1792 * revisit this logic.
1793 */
1794 mutex_enter(&fcp_global_mutex);
1795 pptr = fcp_port_head;
1796 while (pptr) {
1797 if (pptr->port_instance == (uint32_t)fioctl.fp_minor) {
1798 break;
1799 } else {
1800 pptr = pptr->port_next;
1801 }
1802 }
1803 mutex_exit(&fcp_global_mutex);
1804 if (pptr == NULL) {
1805 cmn_err(CE_NOTE, "target mappings: unknown instance number: %d",
1806 fioctl.fp_minor);
1807 return (ENXIO);
1808 }
1809
1810
1811 /* We use listlen to show the total buffer size */
1812 mappingSize = fioctl.listlen;
1813
1814 /* Now calculate how many mapping entries will fit */
1815 listlen = fioctl.listlen + sizeof (fc_hba_mapping_entry_t)
1816 - sizeof (fc_hba_target_mappings_t);
1817 if (listlen <= 0) {
1818 cmn_err(CE_NOTE, "target mappings: Insufficient buffer");
1819 return (ENXIO);
1820 }
1821 listlen = listlen / sizeof (fc_hba_mapping_entry_t);
1822
1823 if ((mappings = kmem_zalloc(mappingSize, KM_SLEEP)) == NULL) {
1824 return (ENOMEM);
1825 }
1826 mappings->version = FC_HBA_TARGET_MAPPINGS_VERSION;
1827
1828 /* Now get to work */
1829 mapIndex = 0;
1830
1831 mutex_enter(&pptr->port_mutex);
1832 /* Loop through all targets on this port */
1833 for (i = 0; i < FCP_NUM_HASH; i++) {
1834 for (ptgt = pptr->port_tgt_hash_table[i]; ptgt != NULL;
1835 ptgt = ptgt->tgt_next) {
1836
1837 mutex_enter(&ptgt->tgt_mutex);
1838
1839 /* Loop through all LUNs on this target */
1840 for (plun = ptgt->tgt_lun; plun != NULL;
1841 plun = plun->lun_next) {
1842 if (plun->lun_state & FCP_LUN_OFFLINE) {
1843 continue;
1844 }
1845
1846 path = fcp_get_lun_path(plun);
1847 if (path == NULL) {
1848 continue;
1849 }
1850
1851 if (mapIndex >= listlen) {
1852 mapIndex ++;
1853 kmem_free(path, MAXPATHLEN);
1854 continue;
1855 }
1856 map = &mappings->entries[mapIndex++];
1857 bcopy(path, map->targetDriver,
1858 sizeof (map->targetDriver));
1859 map->d_id = ptgt->tgt_d_id;
1860 map->busNumber = 0;
1861 map->targetNumber = ptgt->tgt_d_id;
1862 map->osLUN = plun->lun_num;
1863
1864 /*
1865 * We had swapped lun when we stored it in
1866 * lun_addr. We need to swap it back before
1867 * returning it to user land
1868 */
1869
1870 sam_lun_addr.ent_addr_0 =
1871 BE_16(plun->lun_addr.ent_addr_0);
1872 sam_lun_addr.ent_addr_1 =
1873 BE_16(plun->lun_addr.ent_addr_1);
1874 sam_lun_addr.ent_addr_2 =
1875 BE_16(plun->lun_addr.ent_addr_2);
1876 sam_lun_addr.ent_addr_3 =
1877 BE_16(plun->lun_addr.ent_addr_3);
1878
1879 bcopy(&sam_lun_addr, &map->samLUN,
1880 FCP_LUN_SIZE);
1881 bcopy(ptgt->tgt_node_wwn.raw_wwn,
1882 map->NodeWWN.raw_wwn, sizeof (la_wwn_t));
1883 bcopy(ptgt->tgt_port_wwn.raw_wwn,
1884 map->PortWWN.raw_wwn, sizeof (la_wwn_t));
1885
1886 if (plun->lun_guid) {
1887
1888 /* convert ascii wwn to bytes */
1889 fcp_ascii_to_wwn(plun->lun_guid,
1890 map->guid, sizeof (map->guid));
1891
1892 if ((sizeof (map->guid)) <
1893 plun->lun_guid_size / 2) {
1894 cmn_err(CE_WARN,
1895 "fcp_get_target_mappings:"
1896 "guid copy space "
1897 "insufficient."
1898 "Copy Truncation - "
1899 "available %d; need %d",
1900 (int)sizeof (map->guid),
1901 (int)
1902 plun->lun_guid_size / 2);
1903 }
1904 }
1905 kmem_free(path, MAXPATHLEN);
1906 }
1907 mutex_exit(&ptgt->tgt_mutex);
1908 }
1909 }
1910 mutex_exit(&pptr->port_mutex);
1911 mappings->numLuns = mapIndex;
1912
1913 if (ddi_copyout(mappings, fioctl.list, mappingSize, mode)) {
1914 kmem_free(mappings, mappingSize);
1915 return (EFAULT);
1916 }
1917 kmem_free(mappings, mappingSize);
1918
1919 #ifdef _MULTI_DATAMODEL
1920 switch (ddi_model_convert_from(mode & FMODELS)) {
1921 case DDI_MODEL_ILP32: {
1922 struct fcp32_ioctl f32_ioctl;
1923
1924 f32_ioctl.fp_minor = fioctl.fp_minor;
1925 f32_ioctl.listlen = fioctl.listlen;
1926 f32_ioctl.list = (caddr32_t)(long)fioctl.list;
1927 if (ddi_copyout((void *)&f32_ioctl, (void *)data,
1928 sizeof (struct fcp32_ioctl), mode)) {
1929 return (EFAULT);
1930 }
1931 break;
1932 }
1933 case DDI_MODEL_NONE:
1934 if (ddi_copyout((void *)&fioctl, (void *)data,
1935 sizeof (struct fcp_ioctl), mode)) {
1936 return (EFAULT);
1937 }
1938 break;
1939 }
1940 #else /* _MULTI_DATAMODEL */
1941
1942 if (ddi_copyout((void *)&fioctl, (void *)data,
1943 sizeof (struct fcp_ioctl), mode)) {
1944 return (EFAULT);
1945 }
1946 #endif /* _MULTI_DATAMODEL */
1947
1948 return (0);
1949 }
1950
1951 /*
1952 * fcp_setup_scsi_ioctl
1953 * Setup handler for the "scsi passthru" style of
1954 * ioctl for FCP. See "fcp_util.h" for data structure
1955 * definition.
1956 *
1957 * Input:
1958 * u_fscsi = ioctl data (user address space)
1959 * mode = See ioctl(9E)
1960 *
1961 * Output:
1962 * u_fscsi = ioctl data (user address space)
1963 * rval = return value - see ioctl(9E)
1964 *
1965 * Returns:
1966 * 0 = OK
1967 * EAGAIN = See errno.h
1968 * EBUSY = See errno.h
1969 * EFAULT = See errno.h
1970 * EINTR = See errno.h
1971 * EINVAL = See errno.h
1972 * EIO = See errno.h
1973 * ENOMEM = See errno.h
1974 * ENXIO = See errno.h
1975 *
1976 * Context:
1977 * Kernel context.
1978 */
1979 /* ARGSUSED */
1980 static int
1981 fcp_setup_scsi_ioctl(struct fcp_scsi_cmd *u_fscsi,
1982 int mode, int *rval)
1983 {
1984 int ret = 0;
1985 int temp_ret;
1986 caddr_t k_cdbbufaddr = NULL;
1987 caddr_t k_bufaddr = NULL;
1988 caddr_t k_rqbufaddr = NULL;
1989 caddr_t u_cdbbufaddr;
1990 caddr_t u_bufaddr;
1991 caddr_t u_rqbufaddr;
1992 struct fcp_scsi_cmd k_fscsi;
1993
1994 /*
1995 * Get fcp_scsi_cmd array element from user address space
1996 */
1997 if ((ret = fcp_copyin_scsi_cmd((caddr_t)u_fscsi, &k_fscsi, mode))
1998 != 0) {
1999 return (ret);
2000 }
2001
2002
2003 /*
2004 * Even though kmem_alloc() checks the validity of the
2005 * buffer length, this check is needed when the
2006 * kmem_flags set and the zero buffer length is passed.
2007 */
2008 if ((k_fscsi.scsi_cdblen <= 0) ||
2009 (k_fscsi.scsi_buflen <= 0) ||
2010 (k_fscsi.scsi_rqlen <= 0)) {
2011 return (EINVAL);
2012 }
2013
2014 /*
2015 * Allocate data for fcp_scsi_cmd pointer fields
2016 */
2017 if (ret == 0) {
2018 k_cdbbufaddr = kmem_alloc(k_fscsi.scsi_cdblen, KM_NOSLEEP);
2019 k_bufaddr = kmem_alloc(k_fscsi.scsi_buflen, KM_NOSLEEP);
2020 k_rqbufaddr = kmem_alloc(k_fscsi.scsi_rqlen, KM_NOSLEEP);
2021
2022 if (k_cdbbufaddr == NULL ||
2023 k_bufaddr == NULL ||
2024 k_rqbufaddr == NULL) {
2025 ret = ENOMEM;
2026 }
2027 }
2028
2029 /*
2030 * Get fcp_scsi_cmd pointer fields from user
2031 * address space
2032 */
2033 if (ret == 0) {
2034 u_cdbbufaddr = k_fscsi.scsi_cdbbufaddr;
2035 u_bufaddr = k_fscsi.scsi_bufaddr;
2036 u_rqbufaddr = k_fscsi.scsi_rqbufaddr;
2037
2038 if (ddi_copyin(u_cdbbufaddr,
2039 k_cdbbufaddr,
2040 k_fscsi.scsi_cdblen,
2041 mode)) {
2042 ret = EFAULT;
2043 } else if (ddi_copyin(u_bufaddr,
2044 k_bufaddr,
2045 k_fscsi.scsi_buflen,
2046 mode)) {
2047 ret = EFAULT;
2048 } else if (ddi_copyin(u_rqbufaddr,
2049 k_rqbufaddr,
2050 k_fscsi.scsi_rqlen,
2051 mode)) {
2052 ret = EFAULT;
2053 }
2054 }
2055
2056 /*
2057 * Send scsi command (blocking)
2058 */
2059 if (ret == 0) {
2060 /*
2061 * Prior to sending the scsi command, the
2062 * fcp_scsi_cmd data structure must contain kernel,
2063 * not user, addresses.
2064 */
2065 k_fscsi.scsi_cdbbufaddr = k_cdbbufaddr;
2066 k_fscsi.scsi_bufaddr = k_bufaddr;
2067 k_fscsi.scsi_rqbufaddr = k_rqbufaddr;
2068
2069 ret = fcp_send_scsi_ioctl(&k_fscsi);
2070
2071 /*
2072 * After sending the scsi command, the
2073 * fcp_scsi_cmd data structure must contain user,
2074 * not kernel, addresses.
2075 */
2076 k_fscsi.scsi_cdbbufaddr = u_cdbbufaddr;
2077 k_fscsi.scsi_bufaddr = u_bufaddr;
2078 k_fscsi.scsi_rqbufaddr = u_rqbufaddr;
2079 }
2080
2081 /*
2082 * Put fcp_scsi_cmd pointer fields to user address space
2083 */
2084 if (ret == 0) {
2085 if (ddi_copyout(k_cdbbufaddr,
2086 u_cdbbufaddr,
2087 k_fscsi.scsi_cdblen,
2088 mode)) {
2089 ret = EFAULT;
2090 } else if (ddi_copyout(k_bufaddr,
2091 u_bufaddr,
2092 k_fscsi.scsi_buflen,
2093 mode)) {
2094 ret = EFAULT;
2095 } else if (ddi_copyout(k_rqbufaddr,
2096 u_rqbufaddr,
2097 k_fscsi.scsi_rqlen,
2098 mode)) {
2099 ret = EFAULT;
2100 }
2101 }
2102
2103 /*
2104 * Free data for fcp_scsi_cmd pointer fields
2105 */
2106 if (k_cdbbufaddr != NULL) {
2107 kmem_free(k_cdbbufaddr, k_fscsi.scsi_cdblen);
2108 }
2109 if (k_bufaddr != NULL) {
2110 kmem_free(k_bufaddr, k_fscsi.scsi_buflen);
2111 }
2112 if (k_rqbufaddr != NULL) {
2113 kmem_free(k_rqbufaddr, k_fscsi.scsi_rqlen);
2114 }
2115
2116 /*
2117 * Put fcp_scsi_cmd array element to user address space
2118 */
2119 temp_ret = fcp_copyout_scsi_cmd(&k_fscsi, (caddr_t)u_fscsi, mode);
2120 if (temp_ret != 0) {
2121 ret = temp_ret;
2122 }
2123
2124 /*
2125 * Return status
2126 */
2127 return (ret);
2128 }
2129
2130
2131 /*
2132 * fcp_copyin_scsi_cmd
2133 * Copy in fcp_scsi_cmd data structure from user address space.
2134 * The data may be in 32 bit or 64 bit modes.
2135 *
2136 * Input:
2137 * base_addr = from address (user address space)
2138 * mode = See ioctl(9E) and ddi_copyin(9F)
2139 *
2140 * Output:
2141 * fscsi = to address (kernel address space)
2142 *
2143 * Returns:
2144 * 0 = OK
2145 * EFAULT = Error
2146 *
2147 * Context:
2148 * Kernel context.
2149 */
2150 static int
2151 fcp_copyin_scsi_cmd(caddr_t base_addr, struct fcp_scsi_cmd *fscsi, int mode)
2152 {
2153 #ifdef _MULTI_DATAMODEL
2154 struct fcp32_scsi_cmd f32scsi;
2155
2156 switch (ddi_model_convert_from(mode & FMODELS)) {
2157 case DDI_MODEL_ILP32:
2158 /*
2159 * Copy data from user address space
2160 */
2161 if (ddi_copyin((void *)base_addr,
2162 &f32scsi,
2163 sizeof (struct fcp32_scsi_cmd),
2164 mode)) {
2165 return (EFAULT);
2166 }
2167 /*
2168 * Convert from 32 bit to 64 bit
2169 */
2170 FCP32_SCSI_CMD_TO_FCP_SCSI_CMD(&f32scsi, fscsi);
2171 break;
2172 case DDI_MODEL_NONE:
2173 /*
2174 * Copy data from user address space
2175 */
2176 if (ddi_copyin((void *)base_addr,
2177 fscsi,
2178 sizeof (struct fcp_scsi_cmd),
2179 mode)) {
2180 return (EFAULT);
2181 }
2182 break;
2183 }
2184 #else /* _MULTI_DATAMODEL */
2185 /*
2186 * Copy data from user address space
2187 */
2188 if (ddi_copyin((void *)base_addr,
2189 fscsi,
2190 sizeof (struct fcp_scsi_cmd),
2191 mode)) {
2192 return (EFAULT);
2193 }
2194 #endif /* _MULTI_DATAMODEL */
2195
2196 return (0);
2197 }
2198
2199
2200 /*
2201 * fcp_copyout_scsi_cmd
2202 * Copy out fcp_scsi_cmd data structure to user address space.
2203 * The data may be in 32 bit or 64 bit modes.
2204 *
2205 * Input:
2206 * fscsi = to address (kernel address space)
2207 * mode = See ioctl(9E) and ddi_copyin(9F)
2208 *
2209 * Output:
2210 * base_addr = from address (user address space)
2211 *
2212 * Returns:
2213 * 0 = OK
2214 * EFAULT = Error
2215 *
2216 * Context:
2217 * Kernel context.
2218 */
2219 static int
2220 fcp_copyout_scsi_cmd(struct fcp_scsi_cmd *fscsi, caddr_t base_addr, int mode)
2221 {
2222 #ifdef _MULTI_DATAMODEL
2223 struct fcp32_scsi_cmd f32scsi;
2224
2225 switch (ddi_model_convert_from(mode & FMODELS)) {
2226 case DDI_MODEL_ILP32:
2227 /*
2228 * Convert from 64 bit to 32 bit
2229 */
2230 FCP_SCSI_CMD_TO_FCP32_SCSI_CMD(fscsi, &f32scsi);
2231 /*
2232 * Copy data to user address space
2233 */
2234 if (ddi_copyout(&f32scsi,
2235 (void *)base_addr,
2236 sizeof (struct fcp32_scsi_cmd),
2237 mode)) {
2238 return (EFAULT);
2239 }
2240 break;
2241 case DDI_MODEL_NONE:
2242 /*
2243 * Copy data to user address space
2244 */
2245 if (ddi_copyout(fscsi,
2246 (void *)base_addr,
2247 sizeof (struct fcp_scsi_cmd),
2248 mode)) {
2249 return (EFAULT);
2250 }
2251 break;
2252 }
2253 #else /* _MULTI_DATAMODEL */
2254 /*
2255 * Copy data to user address space
2256 */
2257 if (ddi_copyout(fscsi,
2258 (void *)base_addr,
2259 sizeof (struct fcp_scsi_cmd),
2260 mode)) {
2261 return (EFAULT);
2262 }
2263 #endif /* _MULTI_DATAMODEL */
2264
2265 return (0);
2266 }
2267
2268
2269 /*
2270 * fcp_send_scsi_ioctl
2271 * Sends the SCSI command in blocking mode.
2272 *
2273 * Input:
2274 * fscsi = SCSI command data structure
2275 *
2276 * Output:
2277 * fscsi = SCSI command data structure
2278 *
2279 * Returns:
2280 * 0 = OK
2281 * EAGAIN = See errno.h
2282 * EBUSY = See errno.h
2283 * EINTR = See errno.h
2284 * EINVAL = See errno.h
2285 * EIO = See errno.h
2286 * ENOMEM = See errno.h
2287 * ENXIO = See errno.h
2288 *
2289 * Context:
2290 * Kernel context.
2291 */
2292 static int
2293 fcp_send_scsi_ioctl(struct fcp_scsi_cmd *fscsi)
2294 {
2295 struct fcp_lun *plun = NULL;
2296 struct fcp_port *pptr = NULL;
2297 struct fcp_tgt *ptgt = NULL;
2298 fc_packet_t *fpkt = NULL;
2299 struct fcp_ipkt *icmd = NULL;
2300 int target_created = FALSE;
2301 fc_frame_hdr_t *hp;
2302 struct fcp_cmd fcp_cmd;
2303 struct fcp_cmd *fcmd;
2304 union scsi_cdb *scsi_cdb;
2305 la_wwn_t *wwn_ptr;
2306 int nodma;
2307 struct fcp_rsp *rsp;
2308 struct fcp_rsp_info *rsp_info;
2309 caddr_t rsp_sense;
2310 int buf_len;
2311 int info_len;
2312 int sense_len;
2313 struct scsi_extended_sense *sense_to = NULL;
2314 timeout_id_t tid;
2315 uint8_t reconfig_lun = FALSE;
2316 uint8_t reconfig_pending = FALSE;
2317 uint8_t scsi_cmd;
2318 int rsp_len;
2319 int cmd_index;
2320 int fc_status;
2321 int pkt_state;
2322 int pkt_action;
2323 int pkt_reason;
2324 int ret, xport_retval = ~FC_SUCCESS;
2325 int lcount;
2326 int tcount;
2327 int reconfig_status;
2328 int port_busy = FALSE;
2329 uchar_t *lun_string;
2330
2331 /*
2332 * Check valid SCSI command
2333 */
2334 scsi_cmd = ((uint8_t *)fscsi->scsi_cdbbufaddr)[0];
2335 ret = EINVAL;
2336 for (cmd_index = 0;
2337 cmd_index < FCP_NUM_ELEMENTS(scsi_ioctl_list) &&
2338 ret != 0;
2339 cmd_index++) {
2340 /*
2341 * First byte of CDB is the SCSI command
2342 */
2343 if (scsi_ioctl_list[cmd_index] == scsi_cmd) {
2344 ret = 0;
2345 }
2346 }
2347
2348 /*
2349 * Check inputs
2350 */
2351 if (fscsi->scsi_flags != FCP_SCSI_READ) {
2352 ret = EINVAL;
2353 } else if (fscsi->scsi_cdblen > FCP_CDB_SIZE) {
2354 /* no larger than */
2355 ret = EINVAL;
2356 }
2357
2358
2359 /*
2360 * Find FC port
2361 */
2362 if (ret == 0) {
2363 /*
2364 * Acquire global mutex
2365 */
2366 mutex_enter(&fcp_global_mutex);
2367
2368 pptr = fcp_port_head;
2369 while (pptr) {
2370 if (pptr->port_instance ==
2371 (uint32_t)fscsi->scsi_fc_port_num) {
2372 break;
2373 } else {
2374 pptr = pptr->port_next;
2375 }
2376 }
2377
2378 if (pptr == NULL) {
2379 ret = ENXIO;
2380 } else {
2381 /*
2382 * fc_ulp_busy_port can raise power
2383 * so, we must not hold any mutexes involved in PM
2384 */
2385 mutex_exit(&fcp_global_mutex);
2386 ret = fc_ulp_busy_port(pptr->port_fp_handle);
2387 }
2388
2389 if (ret == 0) {
2390
2391 /* remember port is busy, so we will release later */
2392 port_busy = TRUE;
2393
2394 /*
2395 * If there is a reconfiguration in progress, wait
2396 * for it to complete.
2397 */
2398
2399 fcp_reconfig_wait(pptr);
2400
2401 /* reacquire mutexes in order */
2402 mutex_enter(&fcp_global_mutex);
2403 mutex_enter(&pptr->port_mutex);
2404
2405 /*
2406 * Will port accept DMA?
2407 */
2408 nodma = (pptr->port_fcp_dma == FC_NO_DVMA_SPACE)
2409 ? 1 : 0;
2410
2411 /*
2412 * If init or offline, device not known
2413 *
2414 * If we are discovering (onlining), we can
2415 * NOT obviously provide reliable data about
2416 * devices until it is complete
2417 */
2418 if (pptr->port_state & (FCP_STATE_INIT |
2419 FCP_STATE_OFFLINE)) {
2420 ret = ENXIO;
2421 } else if (pptr->port_state & FCP_STATE_ONLINING) {
2422 ret = EBUSY;
2423 } else {
2424 /*
2425 * Find target from pwwn
2426 *
2427 * The wwn must be put into a local
2428 * variable to ensure alignment.
2429 */
2430 wwn_ptr = (la_wwn_t *)&(fscsi->scsi_fc_pwwn);
2431 ptgt = fcp_lookup_target(pptr,
2432 (uchar_t *)wwn_ptr);
2433
2434 /*
2435 * If target not found,
2436 */
2437 if (ptgt == NULL) {
2438 /*
2439 * Note: Still have global &
2440 * port mutexes
2441 */
2442 mutex_exit(&pptr->port_mutex);
2443 ptgt = fcp_port_create_tgt(pptr,
2444 wwn_ptr, &ret, &fc_status,
2445 &pkt_state, &pkt_action,
2446 &pkt_reason);
2447 mutex_enter(&pptr->port_mutex);
2448
2449 fscsi->scsi_fc_status = fc_status;
2450 fscsi->scsi_pkt_state =
2451 (uchar_t)pkt_state;
2452 fscsi->scsi_pkt_reason = pkt_reason;
2453 fscsi->scsi_pkt_action =
2454 (uchar_t)pkt_action;
2455
2456 if (ptgt != NULL) {
2457 target_created = TRUE;
2458 } else if (ret == 0) {
2459 ret = ENOMEM;
2460 }
2461 }
2462
2463 if (ret == 0) {
2464 /*
2465 * Acquire target
2466 */
2467 mutex_enter(&ptgt->tgt_mutex);
2468
2469 /*
2470 * If target is mark or busy,
2471 * then target can not be used
2472 */
2473 if (ptgt->tgt_state &
2474 (FCP_TGT_MARK |
2475 FCP_TGT_BUSY)) {
2476 ret = EBUSY;
2477 } else {
2478 /*
2479 * Mark target as busy
2480 */
2481 ptgt->tgt_state |=
2482 FCP_TGT_BUSY;
2483 }
2484
2485 /*
2486 * Release target
2487 */
2488 lcount = pptr->port_link_cnt;
2489 tcount = ptgt->tgt_change_cnt;
2490 mutex_exit(&ptgt->tgt_mutex);
2491 }
2492 }
2493
2494 /*
2495 * Release port
2496 */
2497 mutex_exit(&pptr->port_mutex);
2498 }
2499
2500 /*
2501 * Release global mutex
2502 */
2503 mutex_exit(&fcp_global_mutex);
2504 }
2505
2506 if (ret == 0) {
2507 uint64_t belun = BE_64(fscsi->scsi_lun);
2508
2509 /*
2510 * If it's a target device, find lun from pwwn
2511 * The wwn must be put into a local
2512 * variable to ensure alignment.
2513 */
2514 mutex_enter(&pptr->port_mutex);
2515 wwn_ptr = (la_wwn_t *)&(fscsi->scsi_fc_pwwn);
2516 if (!ptgt->tgt_tcap && ptgt->tgt_icap) {
2517 /* this is not a target */
2518 fscsi->scsi_fc_status = FC_DEVICE_NOT_TGT;
2519 ret = ENXIO;
2520 } else if ((belun << 16) != 0) {
2521 /*
2522 * Since fcp only support PD and LU addressing method
2523 * so far, the last 6 bytes of a valid LUN are expected
2524 * to be filled with 00h.
2525 */
2526 fscsi->scsi_fc_status = FC_INVALID_LUN;
2527 cmn_err(CE_WARN, "fcp: Unsupported LUN addressing"
2528 " method 0x%02x with LUN number 0x%016" PRIx64,
2529 (uint8_t)(belun >> 62), belun);
2530 ret = ENXIO;
2531 } else if ((plun = fcp_lookup_lun(pptr, (uchar_t *)wwn_ptr,
2532 (uint16_t)((belun >> 48) & 0x3fff))) == NULL) {
2533 /*
2534 * This is a SCSI target, but no LUN at this
2535 * address.
2536 *
2537 * In the future, we may want to send this to
2538 * the target, and let it respond
2539 * appropriately
2540 */
2541 ret = ENXIO;
2542 }
2543 mutex_exit(&pptr->port_mutex);
2544 }
2545
2546 /*
2547 * Finished grabbing external resources
2548 * Allocate internal packet (icmd)
2549 */
2550 if (ret == 0) {
2551 /*
2552 * Calc rsp len assuming rsp info included
2553 */
2554 rsp_len = sizeof (struct fcp_rsp) +
2555 sizeof (struct fcp_rsp_info) + fscsi->scsi_rqlen;
2556
2557 icmd = fcp_icmd_alloc(pptr, ptgt,
2558 sizeof (struct fcp_cmd),
2559 rsp_len,
2560 fscsi->scsi_buflen,
2561 nodma,
2562 lcount, /* ipkt_link_cnt */
2563 tcount, /* ipkt_change_cnt */
2564 0, /* cause */
2565 FC_INVALID_RSCN_COUNT); /* invalidate the count */
2566
2567 if (icmd == NULL) {
2568 ret = ENOMEM;
2569 } else {
2570 /*
2571 * Setup internal packet as sema sync
2572 */
2573 fcp_ipkt_sema_init(icmd);
2574 }
2575 }
2576
2577 if (ret == 0) {
2578 /*
2579 * Init fpkt pointer for use.
2580 */
2581
2582 fpkt = icmd->ipkt_fpkt;
2583
2584 fpkt->pkt_tran_flags = FC_TRAN_CLASS3 | FC_TRAN_INTR;
2585 fpkt->pkt_tran_type = FC_PKT_FCP_READ; /* only rd for now */
2586 fpkt->pkt_timeout = fscsi->scsi_timeout;
2587
2588 /*
2589 * Init fcmd pointer for use by SCSI command
2590 */
2591
2592 if (nodma) {
2593 fcmd = (struct fcp_cmd *)fpkt->pkt_cmd;
2594 } else {
2595 fcmd = &fcp_cmd;
2596 }
2597 bzero(fcmd, sizeof (struct fcp_cmd));
2598 ptgt = plun->lun_tgt;
2599
2600 lun_string = (uchar_t *)&fscsi->scsi_lun;
2601
2602 fcmd->fcp_ent_addr.ent_addr_0 =
2603 BE_16(*(uint16_t *)&(lun_string[0]));
2604 fcmd->fcp_ent_addr.ent_addr_1 =
2605 BE_16(*(uint16_t *)&(lun_string[2]));
2606 fcmd->fcp_ent_addr.ent_addr_2 =
2607 BE_16(*(uint16_t *)&(lun_string[4]));
2608 fcmd->fcp_ent_addr.ent_addr_3 =
2609 BE_16(*(uint16_t *)&(lun_string[6]));
2610
2611 /*
2612 * Setup internal packet(icmd)
2613 */
2614 icmd->ipkt_lun = plun;
2615 icmd->ipkt_restart = 0;
2616 icmd->ipkt_retries = 0;
2617 icmd->ipkt_opcode = 0;
2618
2619 /*
2620 * Init the frame HEADER Pointer for use
2621 */
2622 hp = &fpkt->pkt_cmd_fhdr;
2623
2624 hp->s_id = pptr->port_id;
2625 hp->d_id = ptgt->tgt_d_id;
2626 hp->r_ctl = R_CTL_COMMAND;
2627 hp->type = FC_TYPE_SCSI_FCP;
2628 hp->f_ctl = F_CTL_SEQ_INITIATIVE | F_CTL_FIRST_SEQ;
2629 hp->rsvd = 0;
2630 hp->seq_id = 0;
2631 hp->seq_cnt = 0;
2632 hp->ox_id = 0xffff;
2633 hp->rx_id = 0xffff;
2634 hp->ro = 0;
2635
2636 fcmd->fcp_cntl.cntl_qtype = FCP_QTYPE_SIMPLE;
2637 fcmd->fcp_cntl.cntl_read_data = 1; /* only rd for now */
2638 fcmd->fcp_cntl.cntl_write_data = 0;
2639 fcmd->fcp_data_len = fscsi->scsi_buflen;
2640
2641 scsi_cdb = (union scsi_cdb *)fcmd->fcp_cdb;
2642 bcopy((char *)fscsi->scsi_cdbbufaddr, (char *)scsi_cdb,
2643 fscsi->scsi_cdblen);
2644
2645 if (!nodma) {
2646 FCP_CP_OUT((uint8_t *)fcmd, fpkt->pkt_cmd,
2647 fpkt->pkt_cmd_acc, sizeof (struct fcp_cmd));
2648 }
2649
2650 /*
2651 * Send SCSI command to FC transport
2652 */
2653
2654 if (ret == 0) {
2655 mutex_enter(&ptgt->tgt_mutex);
2656
2657 if (!FCP_TGT_STATE_CHANGED(ptgt, icmd)) {
2658 mutex_exit(&ptgt->tgt_mutex);
2659 fscsi->scsi_fc_status = xport_retval =
2660 fc_ulp_transport(pptr->port_fp_handle,
2661 fpkt);
2662 if (fscsi->scsi_fc_status != FC_SUCCESS) {
2663 ret = EIO;
2664 }
2665 } else {
2666 mutex_exit(&ptgt->tgt_mutex);
2667 ret = EBUSY;
2668 }
2669 }
2670 }
2671
2672 /*
2673 * Wait for completion only if fc_ulp_transport was called and it
2674 * returned a success. This is the only time callback will happen.
2675 * Otherwise, there is no point in waiting
2676 */
2677 if ((ret == 0) && (xport_retval == FC_SUCCESS)) {
2678 ret = fcp_ipkt_sema_wait(icmd);
2679 }
2680
2681 /*
2682 * Copy data to IOCTL data structures
2683 */
2684 rsp = NULL;
2685 if ((ret == 0) && (xport_retval == FC_SUCCESS)) {
2686 rsp = (struct fcp_rsp *)fpkt->pkt_resp;
2687
2688 if (fcp_validate_fcp_response(rsp, pptr) != FC_SUCCESS) {
2689 fcp_log(CE_WARN, pptr->port_dip,
2690 "!SCSI command to d_id=0x%x lun=0x%x"
2691 " failed, Bad FCP response values:"
2692 " rsvd1=%x, rsvd2=%x, sts-rsvd1=%x,"
2693 " sts-rsvd2=%x, rsplen=%x, senselen=%x",
2694 ptgt->tgt_d_id, plun->lun_num,
2695 rsp->reserved_0, rsp->reserved_1,
2696 rsp->fcp_u.fcp_status.reserved_0,
2697 rsp->fcp_u.fcp_status.reserved_1,
2698 rsp->fcp_response_len, rsp->fcp_sense_len);
2699
2700 ret = EIO;
2701 }
2702 }
2703
2704 if ((ret == 0) && (rsp != NULL)) {
2705 /*
2706 * Calc response lengths
2707 */
2708 sense_len = 0;
2709 info_len = 0;
2710
2711 if (rsp->fcp_u.fcp_status.rsp_len_set) {
2712 info_len = rsp->fcp_response_len;
2713 }
2714
2715 rsp_info = (struct fcp_rsp_info *)
2716 ((uint8_t *)rsp + sizeof (struct fcp_rsp));
2717
2718 /*
2719 * Get SCSI status
2720 */
2721 fscsi->scsi_bufstatus = rsp->fcp_u.fcp_status.scsi_status;
2722 /*
2723 * If a lun was just added or removed and the next command
2724 * comes through this interface, we need to capture the check
2725 * condition so we can discover the new topology.
2726 */
2727 if (fscsi->scsi_bufstatus != STATUS_GOOD &&
2728 rsp->fcp_u.fcp_status.sense_len_set) {
2729 sense_len = rsp->fcp_sense_len;
2730 rsp_sense = (caddr_t)((uint8_t *)rsp_info + info_len);
2731 sense_to = (struct scsi_extended_sense *)rsp_sense;
2732 if ((FCP_SENSE_REPORTLUN_CHANGED(sense_to)) ||
2733 (FCP_SENSE_NO_LUN(sense_to))) {
2734 reconfig_lun = TRUE;
2735 }
2736 }
2737
2738 if (fscsi->scsi_bufstatus == STATUS_GOOD && (ptgt != NULL) &&
2739 (reconfig_lun || (scsi_cdb->scc_cmd == SCMD_REPORT_LUN))) {
2740 if (reconfig_lun == FALSE) {
2741 reconfig_status =
2742 fcp_is_reconfig_needed(ptgt, fpkt);
2743 }
2744
2745 if ((reconfig_lun == TRUE) ||
2746 (reconfig_status == TRUE)) {
2747 mutex_enter(&ptgt->tgt_mutex);
2748 if (ptgt->tgt_tid == NULL) {
2749 /*
2750 * Either we've been notified the
2751 * REPORT_LUN data has changed, or
2752 * we've determined on our own that
2753 * we're out of date. Kick off
2754 * rediscovery.
2755 */
2756 tid = timeout(fcp_reconfigure_luns,
2757 (caddr_t)ptgt, drv_usectohz(1));
2758
2759 ptgt->tgt_tid = tid;
2760 ptgt->tgt_state |= FCP_TGT_BUSY;
2761 ret = EBUSY;
2762 reconfig_pending = TRUE;
2763 }
2764 mutex_exit(&ptgt->tgt_mutex);
2765 }
2766 }
2767
2768 /*
2769 * Calc residuals and buffer lengths
2770 */
2771
2772 if (ret == 0) {
2773 buf_len = fscsi->scsi_buflen;
2774 fscsi->scsi_bufresid = 0;
2775 if (rsp->fcp_u.fcp_status.resid_under) {
2776 if (rsp->fcp_resid <= fscsi->scsi_buflen) {
2777 fscsi->scsi_bufresid = rsp->fcp_resid;
2778 } else {
2779 cmn_err(CE_WARN, "fcp: bad residue %x "
2780 "for txfer len %x", rsp->fcp_resid,
2781 fscsi->scsi_buflen);
2782 fscsi->scsi_bufresid =
2783 fscsi->scsi_buflen;
2784 }
2785 buf_len -= fscsi->scsi_bufresid;
2786 }
2787 if (rsp->fcp_u.fcp_status.resid_over) {
2788 fscsi->scsi_bufresid = -rsp->fcp_resid;
2789 }
2790
2791 fscsi->scsi_rqresid = fscsi->scsi_rqlen - sense_len;
2792 if (fscsi->scsi_rqlen < sense_len) {
2793 sense_len = fscsi->scsi_rqlen;
2794 }
2795
2796 fscsi->scsi_fc_rspcode = 0;
2797 if (rsp->fcp_u.fcp_status.rsp_len_set) {
2798 fscsi->scsi_fc_rspcode = rsp_info->rsp_code;
2799 }
2800 fscsi->scsi_pkt_state = fpkt->pkt_state;
2801 fscsi->scsi_pkt_action = fpkt->pkt_action;
2802 fscsi->scsi_pkt_reason = fpkt->pkt_reason;
2803
2804 /*
2805 * Copy data and request sense
2806 *
2807 * Data must be copied by using the FCP_CP_IN macro.
2808 * This will ensure the proper byte order since the data
2809 * is being copied directly from the memory mapped
2810 * device register.
2811 *
2812 * The response (and request sense) will be in the
2813 * correct byte order. No special copy is necessary.
2814 */
2815
2816 if (buf_len) {
2817 FCP_CP_IN(fpkt->pkt_data,
2818 fscsi->scsi_bufaddr,
2819 fpkt->pkt_data_acc,
2820 buf_len);
2821 }
2822 bcopy((void *)rsp_sense,
2823 (void *)fscsi->scsi_rqbufaddr,
2824 sense_len);
2825 }
2826 }
2827
2828 /*
2829 * Cleanup transport data structures if icmd was alloc-ed
2830 * So, cleanup happens in the same thread that icmd was alloc-ed
2831 */
2832 if (icmd != NULL) {
2833 fcp_ipkt_sema_cleanup(icmd);
2834 }
2835
2836 /* restore pm busy/idle status */
2837 if (port_busy) {
2838 fc_ulp_idle_port(pptr->port_fp_handle);
2839 }
2840
2841 /*
2842 * Cleanup target. if a reconfig is pending, don't clear the BUSY
2843 * flag, it'll be cleared when the reconfig is complete.
2844 */
2845 if ((ptgt != NULL) && !reconfig_pending) {
2846 /*
2847 * If target was created,
2848 */
2849 if (target_created) {
2850 mutex_enter(&ptgt->tgt_mutex);
2851 ptgt->tgt_state &= ~FCP_TGT_BUSY;
2852 mutex_exit(&ptgt->tgt_mutex);
2853 } else {
2854 /*
2855 * De-mark target as busy
2856 */
2857 mutex_enter(&ptgt->tgt_mutex);
2858 ptgt->tgt_state &= ~FCP_TGT_BUSY;
2859 mutex_exit(&ptgt->tgt_mutex);
2860 }
2861 }
2862 return (ret);
2863 }
2864
2865
2866 static int
2867 fcp_is_reconfig_needed(struct fcp_tgt *ptgt,
2868 fc_packet_t *fpkt)
2869 {
2870 uchar_t *lun_string;
2871 uint16_t lun_num, i;
2872 int num_luns;
2873 int actual_luns;
2874 int num_masked_luns;
2875 int lun_buflen;
2876 struct fcp_lun *plun = NULL;
2877 struct fcp_reportlun_resp *report_lun;
2878 uint8_t reconfig_needed = FALSE;
2879 uint8_t lun_exists = FALSE;
2880 fcp_port_t *pptr = ptgt->tgt_port;
2881
2882 report_lun = kmem_zalloc(fpkt->pkt_datalen, KM_SLEEP);
2883
2884 FCP_CP_IN(fpkt->pkt_data, report_lun, fpkt->pkt_data_acc,
2885 fpkt->pkt_datalen);
2886
2887 /* get number of luns (which is supplied as LUNS * 8) */
2888 num_luns = BE_32(report_lun->num_lun) >> 3;
2889
2890 /*
2891 * Figure out exactly how many lun strings our response buffer
2892 * can hold.
2893 */
2894 lun_buflen = (fpkt->pkt_datalen -
2895 2 * sizeof (uint32_t)) / sizeof (longlong_t);
2896
2897 /*
2898 * Is our response buffer full or not? We don't want to
2899 * potentially walk beyond the number of luns we have.
2900 */
2901 if (num_luns <= lun_buflen) {
2902 actual_luns = num_luns;
2903 } else {
2904 actual_luns = lun_buflen;
2905 }
2906
2907 mutex_enter(&ptgt->tgt_mutex);
2908
2909 /* Scan each lun to see if we have masked it. */
2910 num_masked_luns = 0;
2911 if (fcp_lun_blacklist != NULL) {
2912 for (i = 0; i < actual_luns; i++) {
2913 lun_string = (uchar_t *)&(report_lun->lun_string[i]);
2914 switch (lun_string[0] & 0xC0) {
2915 case FCP_LUN_ADDRESSING:
2916 case FCP_PD_ADDRESSING:
2917 case FCP_VOLUME_ADDRESSING:
2918 lun_num = ((lun_string[0] & 0x3F) << 8)
2919 | lun_string[1];
2920 if (fcp_should_mask(&ptgt->tgt_port_wwn,
2921 lun_num) == TRUE) {
2922 num_masked_luns++;
2923 }
2924 break;
2925 default:
2926 break;
2927 }
2928 }
2929 }
2930
2931 /*
2932 * The quick and easy check. If the number of LUNs reported
2933 * doesn't match the number we currently know about, we need
2934 * to reconfigure.
2935 */
2936 if (num_luns && num_luns != (ptgt->tgt_lun_cnt + num_masked_luns)) {
2937 mutex_exit(&ptgt->tgt_mutex);
2938 kmem_free(report_lun, fpkt->pkt_datalen);
2939 return (TRUE);
2940 }
2941
2942 /*
2943 * If the quick and easy check doesn't turn up anything, we walk
2944 * the list of luns from the REPORT_LUN response and look for
2945 * any luns we don't know about. If we find one, we know we need
2946 * to reconfigure. We will skip LUNs that are masked because of the
2947 * blacklist.
2948 */
2949 for (i = 0; i < actual_luns; i++) {
2950 lun_string = (uchar_t *)&(report_lun->lun_string[i]);
2951 lun_exists = FALSE;
2952 switch (lun_string[0] & 0xC0) {
2953 case FCP_LUN_ADDRESSING:
2954 case FCP_PD_ADDRESSING:
2955 case FCP_VOLUME_ADDRESSING:
2956 lun_num = ((lun_string[0] & 0x3F) << 8) | lun_string[1];
2957
2958 if ((fcp_lun_blacklist != NULL) && (fcp_should_mask(
2959 &ptgt->tgt_port_wwn, lun_num) == TRUE)) {
2960 lun_exists = TRUE;
2961 break;
2962 }
2963
2964 for (plun = ptgt->tgt_lun; plun;
2965 plun = plun->lun_next) {
2966 if (plun->lun_num == lun_num) {
2967 lun_exists = TRUE;
2968 break;
2969 }
2970 }
2971 break;
2972 default:
2973 break;
2974 }
2975
2976 if (lun_exists == FALSE) {
2977 reconfig_needed = TRUE;
2978 break;
2979 }
2980 }
2981
2982 mutex_exit(&ptgt->tgt_mutex);
2983 kmem_free(report_lun, fpkt->pkt_datalen);
2984
2985 return (reconfig_needed);
2986 }
2987
2988 /*
2989 * This function is called by fcp_handle_page83 and uses inquiry response data
2990 * stored in plun->lun_inq to determine whether or not a device is a member of
2991 * the table fcp_symmetric_disk_table_size. We return 0 if it is in the table,
2992 * otherwise 1.
2993 */
2994 static int
2995 fcp_symmetric_device_probe(struct fcp_lun *plun)
2996 {
2997 struct scsi_inquiry *stdinq = &plun->lun_inq;
2998 char *devidptr;
2999 int i, len;
3000
3001 for (i = 0; i < fcp_symmetric_disk_table_size; i++) {
3002 devidptr = fcp_symmetric_disk_table[i];
3003 len = (int)strlen(devidptr);
3004
3005 if (bcmp(stdinq->inq_vid, devidptr, len) == 0) {
3006 return (0);
3007 }
3008 }
3009 return (1);
3010 }
3011
3012
3013 /*
3014 * This function is called by fcp_ioctl for the FCP_STATE_COUNT ioctl
3015 * It basically returns the current count of # of state change callbacks
3016 * i.e the value of tgt_change_cnt.
3017 *
3018 * INPUT:
3019 * fcp_ioctl.fp_minor -> The minor # of the fp port
3020 * fcp_ioctl.listlen -> 1
3021 * fcp_ioctl.list -> Pointer to a 32 bit integer
3022 */
3023 /*ARGSUSED2*/
3024 static int
3025 fcp_get_statec_count(struct fcp_ioctl *data, int mode, int *rval)
3026 {
3027 int ret;
3028 uint32_t link_cnt;
3029 struct fcp_ioctl fioctl;
3030 struct fcp_port *pptr = NULL;
3031
3032 if ((ret = fcp_copyin_fcp_ioctl_data(data, mode, rval, &fioctl,
3033 &pptr)) != 0) {
3034 return (ret);
3035 }
3036
3037 ASSERT(pptr != NULL);
3038
3039 if (fioctl.listlen != 1) {
3040 return (EINVAL);
3041 }
3042
3043 mutex_enter(&pptr->port_mutex);
3044 if (pptr->port_state & FCP_STATE_OFFLINE) {
3045 mutex_exit(&pptr->port_mutex);
3046 return (ENXIO);
3047 }
3048
3049 /*
3050 * FCP_STATE_INIT is set in 2 cases (not sure why it is overloaded):
3051 * When the fcp initially attaches to the port and there are nothing
3052 * hanging out of the port or if there was a repeat offline state change
3053 * callback (refer fcp_statec_callback() FC_STATE_OFFLINE case).
3054 * In the latter case, port_tmp_cnt will be non-zero and that is how we
3055 * will differentiate the 2 cases.
3056 */
3057 if ((pptr->port_state & FCP_STATE_INIT) && pptr->port_tmp_cnt) {
3058 mutex_exit(&pptr->port_mutex);
3059 return (ENXIO);
3060 }
3061
3062 link_cnt = pptr->port_link_cnt;
3063 mutex_exit(&pptr->port_mutex);
3064
3065 if (ddi_copyout(&link_cnt, fioctl.list, (sizeof (uint32_t)), mode)) {
3066 return (EFAULT);
3067 }
3068
3069 #ifdef _MULTI_DATAMODEL
3070 switch (ddi_model_convert_from(mode & FMODELS)) {
3071 case DDI_MODEL_ILP32: {
3072 struct fcp32_ioctl f32_ioctl;
3073
3074 f32_ioctl.fp_minor = fioctl.fp_minor;
3075 f32_ioctl.listlen = fioctl.listlen;
3076 f32_ioctl.list = (caddr32_t)(long)fioctl.list;
3077 if (ddi_copyout((void *)&f32_ioctl, (void *)data,
3078 sizeof (struct fcp32_ioctl), mode)) {
3079 return (EFAULT);
3080 }
3081 break;
3082 }
3083 case DDI_MODEL_NONE:
3084 if (ddi_copyout((void *)&fioctl, (void *)data,
3085 sizeof (struct fcp_ioctl), mode)) {
3086 return (EFAULT);
3087 }
3088 break;
3089 }
3090 #else /* _MULTI_DATAMODEL */
3091
3092 if (ddi_copyout((void *)&fioctl, (void *)data,
3093 sizeof (struct fcp_ioctl), mode)) {
3094 return (EFAULT);
3095 }
3096 #endif /* _MULTI_DATAMODEL */
3097
3098 return (0);
3099 }
3100
3101 /*
3102 * This function copies the fcp_ioctl structure passed in from user land
3103 * into kernel land. Handles 32 bit applications.
3104 */
3105 /*ARGSUSED*/
3106 static int
3107 fcp_copyin_fcp_ioctl_data(struct fcp_ioctl *data, int mode, int *rval,
3108 struct fcp_ioctl *fioctl, struct fcp_port **pptr)
3109 {
3110 struct fcp_port *t_pptr;
3111
3112 #ifdef _MULTI_DATAMODEL
3113 switch (ddi_model_convert_from(mode & FMODELS)) {
3114 case DDI_MODEL_ILP32: {
3115 struct fcp32_ioctl f32_ioctl;
3116
3117 if (ddi_copyin((void *)data, (void *)&f32_ioctl,
3118 sizeof (struct fcp32_ioctl), mode)) {
3119 return (EFAULT);
3120 }
3121 fioctl->fp_minor = f32_ioctl.fp_minor;
3122 fioctl->listlen = f32_ioctl.listlen;
3123 fioctl->list = (caddr_t)(long)f32_ioctl.list;
3124 break;
3125 }
3126 case DDI_MODEL_NONE:
3127 if (ddi_copyin((void *)data, (void *)fioctl,
3128 sizeof (struct fcp_ioctl), mode)) {
3129 return (EFAULT);
3130 }
3131 break;
3132 }
3133
3134 #else /* _MULTI_DATAMODEL */
3135 if (ddi_copyin((void *)data, (void *)fioctl,
3136 sizeof (struct fcp_ioctl), mode)) {
3137 return (EFAULT);
3138 }
3139 #endif /* _MULTI_DATAMODEL */
3140
3141 /*
3142 * Right now we can assume that the minor number matches with
3143 * this instance of fp. If this changes we will need to
3144 * revisit this logic.
3145 */
3146 mutex_enter(&fcp_global_mutex);
3147 t_pptr = fcp_port_head;
3148 while (t_pptr) {
3149 if (t_pptr->port_instance == (uint32_t)fioctl->fp_minor) {
3150 break;
3151 } else {
3152 t_pptr = t_pptr->port_next;
3153 }
3154 }
3155 *pptr = t_pptr;
3156 mutex_exit(&fcp_global_mutex);
3157 if (t_pptr == NULL) {
3158 return (ENXIO);
3159 }
3160
3161 return (0);
3162 }
3163
3164 /*
3165 * Function: fcp_port_create_tgt
3166 *
3167 * Description: As the name suggest this function creates the target context
3168 * specified by the the WWN provided by the caller. If the
3169 * creation goes well and the target is known by fp/fctl a PLOGI
3170 * followed by a PRLI are issued.
3171 *
3172 * Argument: pptr fcp port structure
3173 * pwwn WWN of the target
3174 * ret_val Address of the return code. It could be:
3175 * EIO, ENOMEM or 0.
3176 * fc_status PLOGI or PRLI status completion
3177 * fc_pkt_state PLOGI or PRLI state completion
3178 * fc_pkt_reason PLOGI or PRLI reason completion
3179 * fc_pkt_action PLOGI or PRLI action completion
3180 *
3181 * Return Value: NULL if it failed
3182 * Target structure address if it succeeds
3183 */
3184 static struct fcp_tgt *
3185 fcp_port_create_tgt(struct fcp_port *pptr, la_wwn_t *pwwn, int *ret_val,
3186 int *fc_status, int *fc_pkt_state, int *fc_pkt_reason, int *fc_pkt_action)
3187 {
3188 struct fcp_tgt *ptgt = NULL;
3189 fc_portmap_t devlist;
3190 int lcount;
3191 int error;
3192
3193 *ret_val = 0;
3194
3195 /*
3196 * Check FC port device & get port map
3197 */
3198 if (fc_ulp_get_remote_port(pptr->port_fp_handle, pwwn,
3199 &error, 1) == NULL) {
3200 *ret_val = EIO;
3201 } else {
3202 if (fc_ulp_pwwn_to_portmap(pptr->port_fp_handle, pwwn,
3203 &devlist) != FC_SUCCESS) {
3204 *ret_val = EIO;
3205 }
3206 }
3207
3208 /* Set port map flags */
3209 devlist.map_type = PORT_DEVICE_USER_CREATE;
3210
3211 /* Allocate target */
3212 if (*ret_val == 0) {
3213 lcount = pptr->port_link_cnt;
3214 ptgt = fcp_alloc_tgt(pptr, &devlist, lcount);
3215 if (ptgt == NULL) {
3216 fcp_log(CE_WARN, pptr->port_dip,
3217 "!FC target allocation failed");
3218 *ret_val = ENOMEM;
3219 } else {
3220 /* Setup target */
3221 mutex_enter(&ptgt->tgt_mutex);
3222
3223 ptgt->tgt_statec_cause = FCP_CAUSE_TGT_CHANGE;
3224 ptgt->tgt_tmp_cnt = 1;
3225 ptgt->tgt_d_id = devlist.map_did.port_id;
3226 ptgt->tgt_hard_addr =
3227 devlist.map_hard_addr.hard_addr;
3228 ptgt->tgt_pd_handle = devlist.map_pd;
3229 ptgt->tgt_fca_dev = NULL;
3230
3231 bcopy(&devlist.map_nwwn, &ptgt->tgt_node_wwn.raw_wwn[0],
3232 FC_WWN_SIZE);
3233 bcopy(&devlist.map_pwwn, &ptgt->tgt_port_wwn.raw_wwn[0],
3234 FC_WWN_SIZE);
3235
3236 mutex_exit(&ptgt->tgt_mutex);
3237 }
3238 }
3239
3240 /* Release global mutex for PLOGI and PRLI */
3241 mutex_exit(&fcp_global_mutex);
3242
3243 /* Send PLOGI (If necessary) */
3244 if (*ret_val == 0) {
3245 *ret_val = fcp_tgt_send_plogi(ptgt, fc_status,
3246 fc_pkt_state, fc_pkt_reason, fc_pkt_action);
3247 }
3248
3249 /* Send PRLI (If necessary) */
3250 if (*ret_val == 0) {
3251 *ret_val = fcp_tgt_send_prli(ptgt, fc_status,
3252 fc_pkt_state, fc_pkt_reason, fc_pkt_action);
3253 }
3254
3255 mutex_enter(&fcp_global_mutex);
3256
3257 return (ptgt);
3258 }
3259
3260 /*
3261 * Function: fcp_tgt_send_plogi
3262 *
3263 * Description: This function sends a PLOGI to the target specified by the
3264 * caller and waits till it completes.
3265 *
3266 * Argument: ptgt Target to send the plogi to.
3267 * fc_status Status returned by fp/fctl in the PLOGI request.
3268 * fc_pkt_state State returned by fp/fctl in the PLOGI request.
3269 * fc_pkt_reason Reason returned by fp/fctl in the PLOGI request.
3270 * fc_pkt_action Action returned by fp/fctl in the PLOGI request.
3271 *
3272 * Return Value: 0
3273 * ENOMEM
3274 * EIO
3275 *
3276 * Context: User context.
3277 */
3278 static int
3279 fcp_tgt_send_plogi(struct fcp_tgt *ptgt, int *fc_status, int *fc_pkt_state,
3280 int *fc_pkt_reason, int *fc_pkt_action)
3281 {
3282 struct fcp_port *pptr;
3283 struct fcp_ipkt *icmd;
3284 struct fc_packet *fpkt;
3285 fc_frame_hdr_t *hp;
3286 struct la_els_logi logi;
3287 int tcount;
3288 int lcount;
3289 int ret, login_retval = ~FC_SUCCESS;
3290
3291 ret = 0;
3292
3293 pptr = ptgt->tgt_port;
3294
3295 lcount = pptr->port_link_cnt;
3296 tcount = ptgt->tgt_change_cnt;
3297
3298 /* Alloc internal packet */
3299 icmd = fcp_icmd_alloc(pptr, ptgt, sizeof (la_els_logi_t),
3300 sizeof (la_els_logi_t), 0,
3301 pptr->port_state & FCP_STATE_FCA_IS_NODMA,
3302 lcount, tcount, 0, FC_INVALID_RSCN_COUNT);
3303
3304 if (icmd == NULL) {
3305 ret = ENOMEM;
3306 } else {
3307 /*
3308 * Setup internal packet as sema sync
3309 */
3310 fcp_ipkt_sema_init(icmd);
3311
3312 /*
3313 * Setup internal packet (icmd)
3314 */
3315 icmd->ipkt_lun = NULL;
3316 icmd->ipkt_restart = 0;
3317 icmd->ipkt_retries = 0;
3318 icmd->ipkt_opcode = LA_ELS_PLOGI;
3319
3320 /*
3321 * Setup fc_packet
3322 */
3323 fpkt = icmd->ipkt_fpkt;
3324
3325 fpkt->pkt_tran_flags = FC_TRAN_CLASS3 | FC_TRAN_INTR;
3326 fpkt->pkt_tran_type = FC_PKT_EXCHANGE;
3327 fpkt->pkt_timeout = FCP_ELS_TIMEOUT;
3328
3329 /*
3330 * Setup FC frame header
3331 */
3332 hp = &fpkt->pkt_cmd_fhdr;
3333
3334 hp->s_id = pptr->port_id; /* source ID */
3335 hp->d_id = ptgt->tgt_d_id; /* dest ID */
3336 hp->r_ctl = R_CTL_ELS_REQ;
3337 hp->type = FC_TYPE_EXTENDED_LS;
3338 hp->f_ctl = F_CTL_SEQ_INITIATIVE | F_CTL_FIRST_SEQ;
3339 hp->seq_id = 0;
3340 hp->rsvd = 0;
3341 hp->df_ctl = 0;
3342 hp->seq_cnt = 0;
3343 hp->ox_id = 0xffff; /* i.e. none */
3344 hp->rx_id = 0xffff; /* i.e. none */
3345 hp->ro = 0;
3346
3347 /*
3348 * Setup PLOGI
3349 */
3350 bzero(&logi, sizeof (struct la_els_logi));
3351 logi.ls_code.ls_code = LA_ELS_PLOGI;
3352
3353 FCP_CP_OUT((uint8_t *)&logi, fpkt->pkt_cmd,
3354 fpkt->pkt_cmd_acc, sizeof (struct la_els_logi));
3355
3356 /*
3357 * Send PLOGI
3358 */
3359 *fc_status = login_retval =
3360 fc_ulp_login(pptr->port_fp_handle, &fpkt, 1);
3361 if (*fc_status != FC_SUCCESS) {
3362 ret = EIO;
3363 }
3364 }
3365
3366 /*
3367 * Wait for completion
3368 */
3369 if ((ret == 0) && (login_retval == FC_SUCCESS)) {
3370 ret = fcp_ipkt_sema_wait(icmd);
3371
3372 *fc_pkt_state = fpkt->pkt_state;
3373 *fc_pkt_reason = fpkt->pkt_reason;
3374 *fc_pkt_action = fpkt->pkt_action;
3375 }
3376
3377 /*
3378 * Cleanup transport data structures if icmd was alloc-ed AND if there
3379 * is going to be no callback (i.e if fc_ulp_login() failed).
3380 * Otherwise, cleanup happens in callback routine.
3381 */
3382 if (icmd != NULL) {
3383 fcp_ipkt_sema_cleanup(icmd);
3384 }
3385
3386 return (ret);
3387 }
3388
3389 /*
3390 * Function: fcp_tgt_send_prli
3391 *
3392 * Description: Does nothing as of today.
3393 *
3394 * Argument: ptgt Target to send the prli to.
3395 * fc_status Status returned by fp/fctl in the PRLI request.
3396 * fc_pkt_state State returned by fp/fctl in the PRLI request.
3397 * fc_pkt_reason Reason returned by fp/fctl in the PRLI request.
3398 * fc_pkt_action Action returned by fp/fctl in the PRLI request.
3399 *
3400 * Return Value: 0
3401 */
3402 /*ARGSUSED*/
3403 static int
3404 fcp_tgt_send_prli(struct fcp_tgt *ptgt, int *fc_status, int *fc_pkt_state,
3405 int *fc_pkt_reason, int *fc_pkt_action)
3406 {
3407 return (0);
3408 }
3409
3410 /*
3411 * Function: fcp_ipkt_sema_init
3412 *
3413 * Description: Initializes the semaphore contained in the internal packet.
3414 *
3415 * Argument: icmd Internal packet the semaphore of which must be
3416 * initialized.
3417 *
3418 * Return Value: None
3419 *
3420 * Context: User context only.
3421 */
3422 static void
3423 fcp_ipkt_sema_init(struct fcp_ipkt *icmd)
3424 {
3425 struct fc_packet *fpkt;
3426
3427 fpkt = icmd->ipkt_fpkt;
3428
3429 /* Create semaphore for sync */
3430 sema_init(&(icmd->ipkt_sema), 0, NULL, SEMA_DRIVER, NULL);
3431
3432 /* Setup the completion callback */
3433 fpkt->pkt_comp = fcp_ipkt_sema_callback;
3434 }
3435
3436 /*
3437 * Function: fcp_ipkt_sema_wait
3438 *
3439 * Description: Wait on the semaphore embedded in the internal packet. The
3440 * semaphore is released in the callback.
3441 *
3442 * Argument: icmd Internal packet to wait on for completion.
3443 *
3444 * Return Value: 0
3445 * EIO
3446 * EBUSY
3447 * EAGAIN
3448 *
3449 * Context: User context only.
3450 *
3451 * This function does a conversion between the field pkt_state of the fc_packet
3452 * embedded in the internal packet (icmd) and the code it returns.
3453 */
3454 static int
3455 fcp_ipkt_sema_wait(struct fcp_ipkt *icmd)
3456 {
3457 struct fc_packet *fpkt;
3458 int ret;
3459
3460 ret = EIO;
3461 fpkt = icmd->ipkt_fpkt;
3462
3463 /*
3464 * Wait on semaphore
3465 */
3466 sema_p(&(icmd->ipkt_sema));
3467
3468 /*
3469 * Check the status of the FC packet
3470 */
3471 switch (fpkt->pkt_state) {
3472 case FC_PKT_SUCCESS:
3473 ret = 0;
3474 break;
3475 case FC_PKT_LOCAL_RJT:
3476 switch (fpkt->pkt_reason) {
3477 case FC_REASON_SEQ_TIMEOUT:
3478 case FC_REASON_RX_BUF_TIMEOUT:
3479 ret = EAGAIN;
3480 break;
3481 case FC_REASON_PKT_BUSY:
3482 ret = EBUSY;
3483 break;
3484 }
3485 break;
3486 case FC_PKT_TIMEOUT:
3487 ret = EAGAIN;
3488 break;
3489 case FC_PKT_LOCAL_BSY:
3490 case FC_PKT_TRAN_BSY:
3491 case FC_PKT_NPORT_BSY:
3492 case FC_PKT_FABRIC_BSY:
3493 ret = EBUSY;
3494 break;
3495 case FC_PKT_LS_RJT:
3496 case FC_PKT_BA_RJT:
3497 switch (fpkt->pkt_reason) {
3498 case FC_REASON_LOGICAL_BSY:
3499 ret = EBUSY;
3500 break;
3501 }
3502 break;
3503 case FC_PKT_FS_RJT:
3504 switch (fpkt->pkt_reason) {
3505 case FC_REASON_FS_LOGICAL_BUSY:
3506 ret = EBUSY;
3507 break;
3508 }
3509 break;
3510 }
3511
3512 return (ret);
3513 }
3514
3515 /*
3516 * Function: fcp_ipkt_sema_callback
3517 *
3518 * Description: Registered as the completion callback function for the FC
3519 * transport when the ipkt semaphore is used for sync. This will
3520 * cleanup the used data structures, if necessary and wake up
3521 * the user thread to complete the transaction.
3522 *
3523 * Argument: fpkt FC packet (points to the icmd)
3524 *
3525 * Return Value: None
3526 *
3527 * Context: User context only
3528 */
3529 static void
3530 fcp_ipkt_sema_callback(struct fc_packet *fpkt)
3531 {
3532 struct fcp_ipkt *icmd;
3533
3534 icmd = (struct fcp_ipkt *)fpkt->pkt_ulp_private;
3535
3536 /*
3537 * Wake up user thread
3538 */
3539 sema_v(&(icmd->ipkt_sema));
3540 }
3541
3542 /*
3543 * Function: fcp_ipkt_sema_cleanup
3544 *
3545 * Description: Called to cleanup (if necessary) the data structures used
3546 * when ipkt sema is used for sync. This function will detect
3547 * whether the caller is the last thread (via counter) and
3548 * cleanup only if necessary.
3549 *
3550 * Argument: icmd Internal command packet
3551 *
3552 * Return Value: None
3553 *
3554 * Context: User context only
3555 */
3556 static void
3557 fcp_ipkt_sema_cleanup(struct fcp_ipkt *icmd)
3558 {
3559 struct fcp_tgt *ptgt;
3560 struct fcp_port *pptr;
3561
3562 ptgt = icmd->ipkt_tgt;
3563 pptr = icmd->ipkt_port;
3564
3565 /*
3566 * Acquire data structure
3567 */
3568 mutex_enter(&ptgt->tgt_mutex);
3569
3570 /*
3571 * Destroy semaphore
3572 */
3573 sema_destroy(&(icmd->ipkt_sema));
3574
3575 /*
3576 * Cleanup internal packet
3577 */
3578 mutex_exit(&ptgt->tgt_mutex);
3579 fcp_icmd_free(pptr, icmd);
3580 }
3581
3582 /*
3583 * Function: fcp_port_attach
3584 *
3585 * Description: Called by the transport framework to resume, suspend or
3586 * attach a new port.
3587 *
3588 * Argument: ulph Port handle
3589 * *pinfo Port information
3590 * cmd Command
3591 * s_id Port ID
3592 *
3593 * Return Value: FC_FAILURE or FC_SUCCESS
3594 */
3595 /*ARGSUSED*/
3596 static int
3597 fcp_port_attach(opaque_t ulph, fc_ulp_port_info_t *pinfo,
3598 fc_attach_cmd_t cmd, uint32_t s_id)
3599 {
3600 int instance;
3601 int res = FC_FAILURE; /* default result */
3602
3603 ASSERT(pinfo != NULL);
3604
3605 instance = ddi_get_instance(pinfo->port_dip);
3606
3607 switch (cmd) {
3608 case FC_CMD_ATTACH:
3609 /*
3610 * this port instance attaching for the first time (or after
3611 * being detached before)
3612 */
3613 if (fcp_handle_port_attach(ulph, pinfo, s_id,
3614 instance) == DDI_SUCCESS) {
3615 res = FC_SUCCESS;
3616 } else {
3617 ASSERT(ddi_get_soft_state(fcp_softstate,
3618 instance) == NULL);
3619 }
3620 break;
3621
3622 case FC_CMD_RESUME:
3623 case FC_CMD_POWER_UP:
3624 /*
3625 * this port instance was attached and the suspended and
3626 * will now be resumed
3627 */
3628 if (fcp_handle_port_resume(ulph, pinfo, s_id, cmd,
3629 instance) == DDI_SUCCESS) {
3630 res = FC_SUCCESS;
3631 }
3632 break;
3633
3634 default:
3635 /* shouldn't happen */
3636 FCP_TRACE(fcp_logq, "fcp",
3637 fcp_trace, FCP_BUF_LEVEL_2, 0,
3638 "port_attach: unknown cmdcommand: %d", cmd);
3639 break;
3640 }
3641
3642 /* return result */
3643 FCP_DTRACE(fcp_logq, "fcp", fcp_trace,
3644 FCP_BUF_LEVEL_1, 0, "fcp_port_attach returning %d", res);
3645
3646 return (res);
3647 }
3648
3649
3650 /*
3651 * detach or suspend this port instance
3652 *
3653 * acquires and releases the global mutex
3654 *
3655 * acquires and releases the mutex for this port
3656 *
3657 * acquires and releases the hotplug mutex for this port
3658 */
3659 /*ARGSUSED*/
3660 static int
3661 fcp_port_detach(opaque_t ulph, fc_ulp_port_info_t *info,
3662 fc_detach_cmd_t cmd)
3663 {
3664 int flag;
3665 int instance;
3666 struct fcp_port *pptr;
3667
3668 instance = ddi_get_instance(info->port_dip);
3669 pptr = ddi_get_soft_state(fcp_softstate, instance);
3670
3671 switch (cmd) {
3672 case FC_CMD_SUSPEND:
3673 FCP_DTRACE(fcp_logq, "fcp",
3674 fcp_trace, FCP_BUF_LEVEL_8, 0,
3675 "port suspend called for port %d", instance);
3676 flag = FCP_STATE_SUSPENDED;
3677 break;
3678
3679 case FC_CMD_POWER_DOWN:
3680 FCP_DTRACE(fcp_logq, "fcp",
3681 fcp_trace, FCP_BUF_LEVEL_8, 0,
3682 "port power down called for port %d", instance);
3683 flag = FCP_STATE_POWER_DOWN;
3684 break;
3685
3686 case FC_CMD_DETACH:
3687 FCP_DTRACE(fcp_logq, "fcp",
3688 fcp_trace, FCP_BUF_LEVEL_8, 0,
3689 "port detach called for port %d", instance);
3690 flag = FCP_STATE_DETACHING;
3691 break;
3692
3693 default:
3694 /* shouldn't happen */
3695 return (FC_FAILURE);
3696 }
3697 FCP_DTRACE(fcp_logq, "fcp", fcp_trace,
3698 FCP_BUF_LEVEL_1, 0, "fcp_port_detach returning");
3699
3700 return (fcp_handle_port_detach(pptr, flag, instance));
3701 }
3702
3703
3704 /*
3705 * called for ioctls on the transport's devctl interface, and the transport
3706 * has passed it to us
3707 *
3708 * this will only be called for device control ioctls (i.e. hotplugging stuff)
3709 *
3710 * return FC_SUCCESS if we decide to claim the ioctl,
3711 * else return FC_UNCLAIMED
3712 *
3713 * *rval is set iff we decide to claim the ioctl
3714 */
3715 /*ARGSUSED*/
3716 static int
3717 fcp_port_ioctl(opaque_t ulph, opaque_t port_handle, dev_t dev, int cmd,
3718 intptr_t data, int mode, cred_t *credp, int *rval, uint32_t claimed)
3719 {
3720 int retval = FC_UNCLAIMED; /* return value */
3721 struct fcp_port *pptr = NULL; /* our soft state */
3722 struct devctl_iocdata *dcp = NULL; /* for devctl */
3723 dev_info_t *cdip;
3724 mdi_pathinfo_t *pip = NULL;
3725 char *ndi_nm; /* NDI name */
3726 char *ndi_addr; /* NDI addr */
3727 int is_mpxio, circ;
3728 int devi_entered = 0;
3729 clock_t end_time;
3730
3731 ASSERT(rval != NULL);
3732
3733 FCP_DTRACE(fcp_logq, "fcp",
3734 fcp_trace, FCP_BUF_LEVEL_8, 0,
3735 "fcp_port_ioctl(cmd=0x%x, claimed=%d)", cmd, claimed);
3736
3737 /* if already claimed then forget it */
3738 if (claimed) {
3739 /*
3740 * for now, if this ioctl has already been claimed, then
3741 * we just ignore it
3742 */
3743 return (retval);
3744 }
3745
3746 /* get our port info */
3747 if ((pptr = fcp_get_port(port_handle)) == NULL) {
3748 fcp_log(CE_WARN, NULL,
3749 "!fcp:Invalid port handle handle in ioctl");
3750 *rval = ENXIO;
3751 return (retval);
3752 }
3753 is_mpxio = pptr->port_mpxio;
3754
3755 switch (cmd) {
3756 case DEVCTL_BUS_GETSTATE:
3757 case DEVCTL_BUS_QUIESCE:
3758 case DEVCTL_BUS_UNQUIESCE:
3759 case DEVCTL_BUS_RESET:
3760 case DEVCTL_BUS_RESETALL:
3761
3762 case DEVCTL_BUS_DEV_CREATE:
3763 if (ndi_dc_allochdl((void *)data, &dcp) != NDI_SUCCESS) {
3764 return (retval);
3765 }
3766 break;
3767
3768 case DEVCTL_DEVICE_GETSTATE:
3769 case DEVCTL_DEVICE_OFFLINE:
3770 case DEVCTL_DEVICE_ONLINE:
3771 case DEVCTL_DEVICE_REMOVE:
3772 case DEVCTL_DEVICE_RESET:
3773 if (ndi_dc_allochdl((void *)data, &dcp) != NDI_SUCCESS) {
3774 return (retval);
3775 }
3776
3777 ASSERT(dcp != NULL);
3778
3779 /* ensure we have a name and address */
3780 if (((ndi_nm = ndi_dc_getname(dcp)) == NULL) ||
3781 ((ndi_addr = ndi_dc_getaddr(dcp)) == NULL)) {
3782 FCP_TRACE(fcp_logq, pptr->port_instbuf,
3783 fcp_trace, FCP_BUF_LEVEL_2, 0,
3784 "ioctl: can't get name (%s) or addr (%s)",
3785 ndi_nm ? ndi_nm : "<null ptr>",
3786 ndi_addr ? ndi_addr : "<null ptr>");
3787 ndi_dc_freehdl(dcp);
3788 return (retval);
3789 }
3790
3791
3792 /* get our child's DIP */
3793 ASSERT(pptr != NULL);
3794 if (is_mpxio) {
3795 mdi_devi_enter(pptr->port_dip, &circ);
3796 } else {
3797 ndi_devi_enter(pptr->port_dip, &circ);
3798 }
3799 devi_entered = 1;
3800
3801 if ((cdip = ndi_devi_find(pptr->port_dip, ndi_nm,
3802 ndi_addr)) == NULL) {
3803 /* Look for virtually enumerated devices. */
3804 pip = mdi_pi_find(pptr->port_dip, NULL, ndi_addr);
3805 if (pip == NULL ||
3806 ((cdip = mdi_pi_get_client(pip)) == NULL)) {
3807 *rval = ENXIO;
3808 goto out;
3809 }
3810 }
3811 break;
3812
3813 default:
3814 *rval = ENOTTY;
3815 return (retval);
3816 }
3817
3818 /* this ioctl is ours -- process it */
3819
3820 retval = FC_SUCCESS; /* just means we claim the ioctl */
3821
3822 /* we assume it will be a success; else we'll set error value */
3823 *rval = 0;
3824
3825
3826 FCP_DTRACE(fcp_logq, pptr->port_instbuf,
3827 fcp_trace, FCP_BUF_LEVEL_8, 0,
3828 "ioctl: claiming this one");
3829
3830 /* handle ioctls now */
3831 switch (cmd) {
3832 case DEVCTL_DEVICE_GETSTATE:
3833 ASSERT(cdip != NULL);
3834 ASSERT(dcp != NULL);
3835 if (ndi_dc_return_dev_state(cdip, dcp) != NDI_SUCCESS) {
3836 *rval = EFAULT;
3837 }
3838 break;
3839
3840 case DEVCTL_DEVICE_REMOVE:
3841 case DEVCTL_DEVICE_OFFLINE: {
3842 int flag = 0;
3843 int lcount;
3844 int tcount;
3845 struct fcp_pkt *head = NULL;
3846 struct fcp_lun *plun;
3847 child_info_t *cip = CIP(cdip);
3848 int all = 1;
3849 struct fcp_lun *tplun;
3850 struct fcp_tgt *ptgt;
3851
3852 ASSERT(pptr != NULL);
3853 ASSERT(cdip != NULL);
3854
3855 mutex_enter(&pptr->port_mutex);
3856 if (pip != NULL) {
3857 cip = CIP(pip);
3858 }
3859 if ((plun = fcp_get_lun_from_cip(pptr, cip)) == NULL) {
3860 mutex_exit(&pptr->port_mutex);
3861 *rval = ENXIO;
3862 break;
3863 }
3864
3865 head = fcp_scan_commands(plun);
3866 if (head != NULL) {
3867 fcp_abort_commands(head, LUN_PORT);
3868 }
3869 lcount = pptr->port_link_cnt;
3870 tcount = plun->lun_tgt->tgt_change_cnt;
3871 mutex_exit(&pptr->port_mutex);
3872
3873 if (cmd == DEVCTL_DEVICE_REMOVE) {
3874 flag = NDI_DEVI_REMOVE;
3875 if (is_mpxio)
3876 flag |= NDI_USER_REQ;
3877 }
3878
3879 if (is_mpxio) {
3880 mdi_devi_exit(pptr->port_dip, circ);
3881 } else {
3882 ndi_devi_exit(pptr->port_dip, circ);
3883 }
3884 devi_entered = 0;
3885
3886 *rval = fcp_pass_to_hp_and_wait(pptr, plun, cip,
3887 FCP_OFFLINE, lcount, tcount, flag);
3888
3889 if (*rval != NDI_SUCCESS) {
3890 *rval = (*rval == NDI_BUSY) ? EBUSY : EIO;
3891 break;
3892 }
3893
3894 fcp_update_offline_flags(plun);
3895
3896 ptgt = plun->lun_tgt;
3897 mutex_enter(&ptgt->tgt_mutex);
3898 for (tplun = ptgt->tgt_lun; tplun != NULL; tplun =
3899 tplun->lun_next) {
3900 mutex_enter(&tplun->lun_mutex);
3901 if (!(tplun->lun_state & FCP_LUN_OFFLINE)) {
3902 all = 0;
3903 }
3904 mutex_exit(&tplun->lun_mutex);
3905 }
3906
3907 if (all) {
3908 ptgt->tgt_node_state = FCP_TGT_NODE_NONE;
3909 /*
3910 * The user is unconfiguring/offlining the device.
3911 * If fabric and the auto configuration is set
3912 * then make sure the user is the only one who
3913 * can reconfigure the device.
3914 */
3915 if (FC_TOP_EXTERNAL(pptr->port_topology) &&
3916 fcp_enable_auto_configuration) {
3917 ptgt->tgt_manual_config_only = 1;
3918 }
3919 }
3920 mutex_exit(&ptgt->tgt_mutex);
3921 break;
3922 }
3923
3924 case DEVCTL_DEVICE_ONLINE: {
3925 int lcount;
3926 int tcount;
3927 struct fcp_lun *plun;
3928 child_info_t *cip = CIP(cdip);
3929
3930 ASSERT(cdip != NULL);
3931 ASSERT(pptr != NULL);
3932
3933 mutex_enter(&pptr->port_mutex);
3934 if (pip != NULL) {
3935 cip = CIP(pip);
3936 }
3937 if ((plun = fcp_get_lun_from_cip(pptr, cip)) == NULL) {
3938 mutex_exit(&pptr->port_mutex);
3939 *rval = ENXIO;
3940 break;
3941 }
3942 lcount = pptr->port_link_cnt;
3943 tcount = plun->lun_tgt->tgt_change_cnt;
3944 mutex_exit(&pptr->port_mutex);
3945
3946 /*
3947 * The FCP_LUN_ONLINING flag is used in fcp_scsi_start()
3948 * to allow the device attach to occur when the device is
3949 * FCP_LUN_OFFLINE (so we don't reject the INQUIRY command
3950 * from the scsi_probe()).
3951 */
3952 mutex_enter(&LUN_TGT->tgt_mutex);
3953 plun->lun_state |= FCP_LUN_ONLINING;
3954 mutex_exit(&LUN_TGT->tgt_mutex);
3955
3956 if (is_mpxio) {
3957 mdi_devi_exit(pptr->port_dip, circ);
3958 } else {
3959 ndi_devi_exit(pptr->port_dip, circ);
3960 }
3961 devi_entered = 0;
3962
3963 *rval = fcp_pass_to_hp_and_wait(pptr, plun, cip,
3964 FCP_ONLINE, lcount, tcount, 0);
3965
3966 if (*rval != NDI_SUCCESS) {
3967 /* Reset the FCP_LUN_ONLINING bit */
3968 mutex_enter(&LUN_TGT->tgt_mutex);
3969 plun->lun_state &= ~FCP_LUN_ONLINING;
3970 mutex_exit(&LUN_TGT->tgt_mutex);
3971 *rval = EIO;
3972 break;
3973 }
3974 mutex_enter(&LUN_TGT->tgt_mutex);
3975 plun->lun_state &= ~(FCP_LUN_OFFLINE | FCP_LUN_BUSY |
3976 FCP_LUN_ONLINING);
3977 mutex_exit(&LUN_TGT->tgt_mutex);
3978 break;
3979 }
3980
3981 case DEVCTL_BUS_DEV_CREATE: {
3982 uchar_t *bytes = NULL;
3983 uint_t nbytes;
3984 struct fcp_tgt *ptgt = NULL;
3985 struct fcp_lun *plun = NULL;
3986 dev_info_t *useless_dip = NULL;
3987
3988 *rval = ndi_dc_devi_create(dcp, pptr->port_dip,
3989 DEVCTL_CONSTRUCT, &useless_dip);
3990 if (*rval != 0 || useless_dip == NULL) {
3991 break;
3992 }
3993
3994 if ((ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, useless_dip,
3995 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, PORT_WWN_PROP, &bytes,
3996 &nbytes) != DDI_PROP_SUCCESS) || nbytes != FC_WWN_SIZE) {
3997 *rval = EINVAL;
3998 (void) ndi_devi_free(useless_dip);
3999 if (bytes != NULL) {
4000 ddi_prop_free(bytes);
4001 }
4002 break;
4003 }
4004
4005 *rval = fcp_create_on_demand(pptr, bytes);
4006 if (*rval == 0) {
4007 mutex_enter(&pptr->port_mutex);
4008 ptgt = fcp_lookup_target(pptr, (uchar_t *)bytes);
4009 if (ptgt) {
4010 /*
4011 * We now have a pointer to the target that
4012 * was created. Lets point to the first LUN on
4013 * this new target.
4014 */
4015 mutex_enter(&ptgt->tgt_mutex);
4016
4017 plun = ptgt->tgt_lun;
4018 /*
4019 * There may be stale/offline LUN entries on
4020 * this list (this is by design) and so we have
4021 * to make sure we point to the first online
4022 * LUN
4023 */
4024 while (plun &&
4025 plun->lun_state & FCP_LUN_OFFLINE) {
4026 plun = plun->lun_next;
4027 }
4028
4029 mutex_exit(&ptgt->tgt_mutex);
4030 }
4031 mutex_exit(&pptr->port_mutex);
4032 }
4033
4034 if (*rval == 0 && ptgt && plun) {
4035 mutex_enter(&plun->lun_mutex);
4036 /*
4037 * Allow up to fcp_lun_ready_retry seconds to
4038 * configure all the luns behind the target.
4039 *
4040 * The intent here is to allow targets with long
4041 * reboot/reset-recovery times to become available
4042 * while limiting the maximum wait time for an
4043 * unresponsive target.
4044 */
4045 end_time = ddi_get_lbolt() +
4046 SEC_TO_TICK(fcp_lun_ready_retry);
4047
4048 while (ddi_get_lbolt() < end_time) {
4049 retval = FC_SUCCESS;
4050
4051 /*
4052 * The new ndi interfaces for on-demand creation
4053 * are inflexible, Do some more work to pass on
4054 * a path name of some LUN (design is broken !)
4055 */
4056 if (plun->lun_cip) {
4057 if (plun->lun_mpxio == 0) {
4058 cdip = DIP(plun->lun_cip);
4059 } else {
4060 cdip = mdi_pi_get_client(
4061 PIP(plun->lun_cip));
4062 }
4063 if (cdip == NULL) {
4064 *rval = ENXIO;
4065 break;
4066 }
4067
4068 if (!i_ddi_devi_attached(cdip)) {
4069 mutex_exit(&plun->lun_mutex);
4070 delay(drv_usectohz(1000000));
4071 mutex_enter(&plun->lun_mutex);
4072 } else {
4073 /*
4074 * This Lun is ready, lets
4075 * check the next one.
4076 */
4077 mutex_exit(&plun->lun_mutex);
4078 plun = plun->lun_next;
4079 while (plun && (plun->lun_state
4080 & FCP_LUN_OFFLINE)) {
4081 plun = plun->lun_next;
4082 }
4083 if (!plun) {
4084 break;
4085 }
4086 mutex_enter(&plun->lun_mutex);
4087 }
4088 } else {
4089 /*
4090 * lun_cip field for a valid lun
4091 * should never be NULL. Fail the
4092 * command.
4093 */
4094 *rval = ENXIO;
4095 break;
4096 }
4097 }
4098 if (plun) {
4099 mutex_exit(&plun->lun_mutex);
4100 } else {
4101 char devnm[MAXNAMELEN];
4102 int nmlen;
4103
4104 nmlen = snprintf(devnm, MAXNAMELEN, "%s@%s",
4105 ddi_node_name(cdip),
4106 ddi_get_name_addr(cdip));
4107
4108 if (copyout(&devnm, dcp->cpyout_buf, nmlen) !=
4109 0) {
4110 *rval = EFAULT;
4111 }
4112 }
4113 } else {
4114 int i;
4115 char buf[25];
4116
4117 for (i = 0; i < FC_WWN_SIZE; i++) {
4118 (void) sprintf(&buf[i << 1], "%02x", bytes[i]);
4119 }
4120
4121 fcp_log(CE_WARN, pptr->port_dip,
4122 "!Failed to create nodes for pwwn=%s; error=%x",
4123 buf, *rval);
4124 }
4125
4126 (void) ndi_devi_free(useless_dip);
4127 ddi_prop_free(bytes);
4128 break;
4129 }
4130
4131 case DEVCTL_DEVICE_RESET: {
4132 struct fcp_lun *plun;
4133 child_info_t *cip = CIP(cdip);
4134
4135 ASSERT(cdip != NULL);
4136 ASSERT(pptr != NULL);
4137 mutex_enter(&pptr->port_mutex);
4138 if (pip != NULL) {
4139 cip = CIP(pip);
4140 }
4141 if ((plun = fcp_get_lun_from_cip(pptr, cip)) == NULL) {
4142 mutex_exit(&pptr->port_mutex);
4143 *rval = ENXIO;
4144 break;
4145 }
4146 mutex_exit(&pptr->port_mutex);
4147
4148 mutex_enter(&plun->lun_tgt->tgt_mutex);
4149 if (!(plun->lun_state & FCP_SCSI_LUN_TGT_INIT)) {
4150 mutex_exit(&plun->lun_tgt->tgt_mutex);
4151
4152 *rval = ENXIO;
4153 break;
4154 }
4155
4156 if (plun->lun_sd == NULL) {
4157 mutex_exit(&plun->lun_tgt->tgt_mutex);
4158
4159 *rval = ENXIO;
4160 break;
4161 }
4162 mutex_exit(&plun->lun_tgt->tgt_mutex);
4163
4164 /*
4165 * set up ap so that fcp_reset can figure out
4166 * which target to reset
4167 */
4168 if (fcp_scsi_reset(&plun->lun_sd->sd_address,
4169 RESET_TARGET) == FALSE) {
4170 *rval = EIO;
4171 }
4172 break;
4173 }
4174
4175 case DEVCTL_BUS_GETSTATE:
4176 ASSERT(dcp != NULL);
4177 ASSERT(pptr != NULL);
4178 ASSERT(pptr->port_dip != NULL);
4179 if (ndi_dc_return_bus_state(pptr->port_dip, dcp) !=
4180 NDI_SUCCESS) {
4181 *rval = EFAULT;
4182 }
4183 break;
4184
4185 case DEVCTL_BUS_QUIESCE:
4186 case DEVCTL_BUS_UNQUIESCE:
4187 *rval = ENOTSUP;
4188 break;
4189
4190 case DEVCTL_BUS_RESET:
4191 case DEVCTL_BUS_RESETALL:
4192 ASSERT(pptr != NULL);
4193 (void) fcp_linkreset(pptr, NULL, KM_SLEEP);
4194 break;
4195
4196 default:
4197 ASSERT(dcp != NULL);
4198 *rval = ENOTTY;
4199 break;
4200 }
4201
4202 /* all done -- clean up and return */
4203 out: if (devi_entered) {
4204 if (is_mpxio) {
4205 mdi_devi_exit(pptr->port_dip, circ);
4206 } else {
4207 ndi_devi_exit(pptr->port_dip, circ);
4208 }
4209 }
4210
4211 if (dcp != NULL) {
4212 ndi_dc_freehdl(dcp);
4213 }
4214
4215 return (retval);
4216 }
4217
4218
4219 /*ARGSUSED*/
4220 static int
4221 fcp_els_callback(opaque_t ulph, opaque_t port_handle, fc_unsol_buf_t *buf,
4222 uint32_t claimed)
4223 {
4224 uchar_t r_ctl;
4225 uchar_t ls_code;
4226 struct fcp_port *pptr;
4227
4228 if ((pptr = fcp_get_port(port_handle)) == NULL || claimed) {
4229 return (FC_UNCLAIMED);
4230 }
4231
4232 mutex_enter(&pptr->port_mutex);
4233 if (pptr->port_state & (FCP_STATE_DETACHING |
4234 FCP_STATE_SUSPENDED | FCP_STATE_POWER_DOWN)) {
4235 mutex_exit(&pptr->port_mutex);
4236 return (FC_UNCLAIMED);
4237 }
4238 mutex_exit(&pptr->port_mutex);
4239
4240 r_ctl = buf->ub_frame.r_ctl;
4241
4242 switch (r_ctl & R_CTL_ROUTING) {
4243 case R_CTL_EXTENDED_SVC:
4244 if (r_ctl == R_CTL_ELS_REQ) {
4245 ls_code = buf->ub_buffer[0];
4246
4247 switch (ls_code) {
4248 case LA_ELS_PRLI:
4249 /*
4250 * We really don't care if something fails.
4251 * If the PRLI was not sent out, then the
4252 * other end will time it out.
4253 */
4254 if (fcp_unsol_prli(pptr, buf) == FC_SUCCESS) {
4255 return (FC_SUCCESS);
4256 }
4257 return (FC_UNCLAIMED);
4258 /* NOTREACHED */
4259
4260 default:
4261 break;
4262 }
4263 }
4264 /* FALLTHROUGH */
4265
4266 default:
4267 return (FC_UNCLAIMED);
4268 }
4269 }
4270
4271
4272 /*ARGSUSED*/
4273 static int
4274 fcp_data_callback(opaque_t ulph, opaque_t port_handle, fc_unsol_buf_t *buf,
4275 uint32_t claimed)
4276 {
4277 return (FC_UNCLAIMED);
4278 }
4279
4280 /*
4281 * Function: fcp_statec_callback
4282 *
4283 * Description: The purpose of this function is to handle a port state change.
4284 * It is called from fp/fctl and, in a few instances, internally.
4285 *
4286 * Argument: ulph fp/fctl port handle
4287 * port_handle fcp_port structure
4288 * port_state Physical state of the port
4289 * port_top Topology
4290 * *devlist Pointer to the first entry of a table
4291 * containing the remote ports that can be
4292 * reached.
4293 * dev_cnt Number of entries pointed by devlist.
4294 * port_sid Port ID of the local port.
4295 *
4296 * Return Value: None
4297 */
4298 /*ARGSUSED*/
4299 static void
4300 fcp_statec_callback(opaque_t ulph, opaque_t port_handle,
4301 uint32_t port_state, uint32_t port_top, fc_portmap_t *devlist,
4302 uint32_t dev_cnt, uint32_t port_sid)
4303 {
4304 uint32_t link_count;
4305 int map_len = 0;
4306 struct fcp_port *pptr;
4307 fcp_map_tag_t *map_tag = NULL;
4308
4309 if ((pptr = fcp_get_port(port_handle)) == NULL) {
4310 fcp_log(CE_WARN, NULL, "!Invalid port handle in callback");
4311 return; /* nothing to work with! */
4312 }
4313
4314 FCP_TRACE(fcp_logq, pptr->port_instbuf,
4315 fcp_trace, FCP_BUF_LEVEL_2, 0,
4316 "fcp_statec_callback: port state/dev_cnt/top ="
4317 "%d/%d/%d", FC_PORT_STATE_MASK(port_state),
4318 dev_cnt, port_top);
4319
4320 mutex_enter(&pptr->port_mutex);
4321
4322 /*
4323 * If a thread is in detach, don't do anything.
4324 */
4325 if (pptr->port_state & (FCP_STATE_DETACHING |
4326 FCP_STATE_SUSPENDED | FCP_STATE_POWER_DOWN)) {
4327 mutex_exit(&pptr->port_mutex);
4328 return;
4329 }
4330
4331 /*
4332 * First thing we do is set the FCP_STATE_IN_CB_DEVC flag so that if
4333 * init_pkt is called, it knows whether or not the target's status
4334 * (or pd) might be changing.
4335 */
4336
4337 if (FC_PORT_STATE_MASK(port_state) == FC_STATE_DEVICE_CHANGE) {
4338 pptr->port_state |= FCP_STATE_IN_CB_DEVC;
4339 }
4340
4341 /*
4342 * the transport doesn't allocate or probe unless being
4343 * asked to by either the applications or ULPs
4344 *
4345 * in cases where the port is OFFLINE at the time of port
4346 * attach callback and the link comes ONLINE later, for
4347 * easier automatic node creation (i.e. without you having to
4348 * go out and run the utility to perform LOGINs) the
4349 * following conditional is helpful
4350 */
4351 pptr->port_phys_state = port_state;
4352
4353 if (dev_cnt) {
4354 mutex_exit(&pptr->port_mutex);
4355
4356 map_len = sizeof (*map_tag) * dev_cnt;
4357 map_tag = kmem_alloc(map_len, KM_NOSLEEP);
4358 if (map_tag == NULL) {
4359 fcp_log(CE_WARN, pptr->port_dip,
4360 "!fcp%d: failed to allocate for map tags; "
4361 " state change will not be processed",
4362 pptr->port_instance);
4363
4364 mutex_enter(&pptr->port_mutex);
4365 pptr->port_state &= ~FCP_STATE_IN_CB_DEVC;
4366 mutex_exit(&pptr->port_mutex);
4367
4368 return;
4369 }
4370
4371 mutex_enter(&pptr->port_mutex);
4372 }
4373
4374 if (pptr->port_id != port_sid) {
4375 FCP_TRACE(fcp_logq, pptr->port_instbuf,
4376 fcp_trace, FCP_BUF_LEVEL_3, 0,
4377 "fcp: Port S_ID=0x%x => 0x%x", pptr->port_id,
4378 port_sid);
4379 /*
4380 * The local port changed ID. It is the first time a port ID
4381 * is assigned or something drastic happened. We might have
4382 * been unplugged and replugged on another loop or fabric port
4383 * or somebody grabbed the AL_PA we had or somebody rezoned
4384 * the fabric we were plugged into.
4385 */
4386 pptr->port_id = port_sid;
4387 }
4388
4389 switch (FC_PORT_STATE_MASK(port_state)) {
4390 case FC_STATE_OFFLINE:
4391 case FC_STATE_RESET_REQUESTED:
4392 /*
4393 * link has gone from online to offline -- just update the
4394 * state of this port to BUSY and MARKed to go offline
4395 */
4396 FCP_TRACE(fcp_logq, pptr->port_instbuf,
4397 fcp_trace, FCP_BUF_LEVEL_3, 0,
4398 "link went offline");
4399 if ((pptr->port_state & FCP_STATE_OFFLINE) && dev_cnt) {
4400 /*
4401 * We were offline a while ago and this one
4402 * seems to indicate that the loop has gone
4403 * dead forever.
4404 */
4405 pptr->port_tmp_cnt += dev_cnt;
4406 pptr->port_state &= ~FCP_STATE_OFFLINE;
4407 pptr->port_state |= FCP_STATE_INIT;
4408 link_count = pptr->port_link_cnt;
4409 fcp_handle_devices(pptr, devlist, dev_cnt,
4410 link_count, map_tag, FCP_CAUSE_LINK_DOWN);
4411 } else {
4412 pptr->port_link_cnt++;
4413 ASSERT(!(pptr->port_state & FCP_STATE_SUSPENDED));
4414 fcp_update_state(pptr, (FCP_LUN_BUSY |
4415 FCP_LUN_MARK), FCP_CAUSE_LINK_DOWN);
4416 if (pptr->port_mpxio) {
4417 fcp_update_mpxio_path_verifybusy(pptr);
4418 }
4419 pptr->port_state |= FCP_STATE_OFFLINE;
4420 pptr->port_state &=
4421 ~(FCP_STATE_ONLINING | FCP_STATE_ONLINE);
4422 pptr->port_tmp_cnt = 0;
4423 }
4424 mutex_exit(&pptr->port_mutex);
4425 break;
4426
4427 case FC_STATE_ONLINE:
4428 case FC_STATE_LIP:
4429 case FC_STATE_LIP_LBIT_SET:
4430 /*
4431 * link has gone from offline to online
4432 */
4433 FCP_TRACE(fcp_logq, pptr->port_instbuf,
4434 fcp_trace, FCP_BUF_LEVEL_3, 0,
4435 "link went online");
4436
4437 pptr->port_link_cnt++;
4438
4439 while (pptr->port_ipkt_cnt) {
4440 mutex_exit(&pptr->port_mutex);
4441 delay(drv_usectohz(1000000));
4442 mutex_enter(&pptr->port_mutex);
4443 }
4444
4445 pptr->port_topology = port_top;
4446
4447 /*
4448 * The state of the targets and luns accessible through this
4449 * port is updated.
4450 */
4451 fcp_update_state(pptr, FCP_LUN_BUSY | FCP_LUN_MARK,
4452 FCP_CAUSE_LINK_CHANGE);
4453
4454 pptr->port_state &= ~(FCP_STATE_INIT | FCP_STATE_OFFLINE);
4455 pptr->port_state |= FCP_STATE_ONLINING;
4456 pptr->port_tmp_cnt = dev_cnt;
4457 link_count = pptr->port_link_cnt;
4458
4459 pptr->port_deadline = fcp_watchdog_time +
4460 FCP_ICMD_DEADLINE;
4461
4462 if (!dev_cnt) {
4463 /*
4464 * We go directly to the online state if no remote
4465 * ports were discovered.
4466 */
4467 FCP_TRACE(fcp_logq, pptr->port_instbuf,
4468 fcp_trace, FCP_BUF_LEVEL_3, 0,
4469 "No remote ports discovered");
4470
4471 pptr->port_state &= ~FCP_STATE_ONLINING;
4472 pptr->port_state |= FCP_STATE_ONLINE;
4473 }
4474
4475 switch (port_top) {
4476 case FC_TOP_FABRIC:
4477 case FC_TOP_PUBLIC_LOOP:
4478 case FC_TOP_PRIVATE_LOOP:
4479 case FC_TOP_PT_PT:
4480
4481 if (pptr->port_state & FCP_STATE_NS_REG_FAILED) {
4482 fcp_retry_ns_registry(pptr, port_sid);
4483 }
4484
4485 fcp_handle_devices(pptr, devlist, dev_cnt, link_count,
4486 map_tag, FCP_CAUSE_LINK_CHANGE);
4487 break;
4488
4489 default:
4490 /*
4491 * We got here because we were provided with an unknown
4492 * topology.
4493 */
4494 if (pptr->port_state & FCP_STATE_NS_REG_FAILED) {
4495 pptr->port_state &= ~FCP_STATE_NS_REG_FAILED;
4496 }
4497
4498 pptr->port_tmp_cnt -= dev_cnt;
4499 fcp_log(CE_WARN, pptr->port_dip,
4500 "!unknown/unsupported topology (0x%x)", port_top);
4501 break;
4502 }
4503 FCP_TRACE(fcp_logq, pptr->port_instbuf,
4504 fcp_trace, FCP_BUF_LEVEL_3, 0,
4505 "Notify ssd of the reset to reinstate the reservations");
4506
4507 scsi_hba_reset_notify_callback(&pptr->port_mutex,
4508 &pptr->port_reset_notify_listf);
4509
4510 mutex_exit(&pptr->port_mutex);
4511
4512 break;
4513
4514 case FC_STATE_RESET:
4515 ASSERT(pptr->port_state & FCP_STATE_OFFLINE);
4516 FCP_TRACE(fcp_logq, pptr->port_instbuf,
4517 fcp_trace, FCP_BUF_LEVEL_3, 0,
4518 "RESET state, waiting for Offline/Online state_cb");
4519 mutex_exit(&pptr->port_mutex);
4520 break;
4521
4522 case FC_STATE_DEVICE_CHANGE:
4523 /*
4524 * We come here when an application has requested
4525 * Dynamic node creation/deletion in Fabric connectivity.
4526 */
4527 if (pptr->port_state & (FCP_STATE_OFFLINE |
4528 FCP_STATE_INIT)) {
4529 /*
4530 * This case can happen when the FCTL is in the
4531 * process of giving us on online and the host on
4532 * the other side issues a PLOGI/PLOGO. Ideally
4533 * the state changes should be serialized unless
4534 * they are opposite (online-offline).
4535 * The transport will give us a final state change
4536 * so we can ignore this for the time being.
4537 */
4538 pptr->port_state &= ~FCP_STATE_IN_CB_DEVC;
4539 mutex_exit(&pptr->port_mutex);
4540 break;
4541 }
4542
4543 if (pptr->port_state & FCP_STATE_NS_REG_FAILED) {
4544 fcp_retry_ns_registry(pptr, port_sid);
4545 }
4546
4547 /*
4548 * Extend the deadline under steady state conditions
4549 * to provide more time for the device-change-commands
4550 */
4551 if (!pptr->port_ipkt_cnt) {
4552 pptr->port_deadline = fcp_watchdog_time +
4553 FCP_ICMD_DEADLINE;
4554 }
4555
4556 /*
4557 * There is another race condition here, where if we were
4558 * in ONLINEING state and a devices in the map logs out,
4559 * fp will give another state change as DEVICE_CHANGE
4560 * and OLD. This will result in that target being offlined.
4561 * The pd_handle is freed. If from the first statec callback
4562 * we were going to fire a PLOGI/PRLI, the system will
4563 * panic in fc_ulp_transport with invalid pd_handle.
4564 * The fix is to check for the link_cnt before issuing
4565 * any command down.
4566 */
4567 fcp_update_targets(pptr, devlist, dev_cnt,
4568 FCP_LUN_BUSY | FCP_LUN_MARK, FCP_CAUSE_TGT_CHANGE);
4569
4570 link_count = pptr->port_link_cnt;
4571
4572 fcp_handle_devices(pptr, devlist, dev_cnt,
4573 link_count, map_tag, FCP_CAUSE_TGT_CHANGE);
4574
4575 pptr->port_state &= ~FCP_STATE_IN_CB_DEVC;
4576
4577 mutex_exit(&pptr->port_mutex);
4578 break;
4579
4580 case FC_STATE_TARGET_PORT_RESET:
4581 if (pptr->port_state & FCP_STATE_NS_REG_FAILED) {
4582 fcp_retry_ns_registry(pptr, port_sid);
4583 }
4584
4585 /* Do nothing else */
4586 mutex_exit(&pptr->port_mutex);
4587 break;
4588
4589 default:
4590 fcp_log(CE_WARN, pptr->port_dip,
4591 "!Invalid state change=0x%x", port_state);
4592 mutex_exit(&pptr->port_mutex);
4593 break;
4594 }
4595
4596 if (map_tag) {
4597 kmem_free(map_tag, map_len);
4598 }
4599 }
4600
4601 /*
4602 * Function: fcp_handle_devices
4603 *
4604 * Description: This function updates the devices currently known by
4605 * walking the list provided by the caller. The list passed
4606 * by the caller is supposed to be the list of reachable
4607 * devices.
4608 *
4609 * Argument: *pptr Fcp port structure.
4610 * *devlist Pointer to the first entry of a table
4611 * containing the remote ports that can be
4612 * reached.
4613 * dev_cnt Number of entries pointed by devlist.
4614 * link_cnt Link state count.
4615 * *map_tag Array of fcp_map_tag_t structures.
4616 * cause What caused this function to be called.
4617 *
4618 * Return Value: None
4619 *
4620 * Notes: The pptr->port_mutex must be held.
4621 */
4622 static void
4623 fcp_handle_devices(struct fcp_port *pptr, fc_portmap_t devlist[],
4624 uint32_t dev_cnt, int link_cnt, fcp_map_tag_t *map_tag, int cause)
4625 {
4626 int i;
4627 int check_finish_init = 0;
4628 fc_portmap_t *map_entry;
4629 struct fcp_tgt *ptgt = NULL;
4630
4631 FCP_TRACE(fcp_logq, pptr->port_instbuf,
4632 fcp_trace, FCP_BUF_LEVEL_3, 0,
4633 "fcp_handle_devices: called for %d dev(s)", dev_cnt);
4634
4635 if (dev_cnt) {
4636 ASSERT(map_tag != NULL);
4637 }
4638
4639 /*
4640 * The following code goes through the list of remote ports that are
4641 * accessible through this (pptr) local port (The list walked is the
4642 * one provided by the caller which is the list of the remote ports
4643 * currently reachable). It checks if any of them was already
4644 * known by looking for the corresponding target structure based on
4645 * the world wide name. If a target is part of the list it is tagged
4646 * (ptgt->tgt_aux_state = FCP_TGT_TAGGED).
4647 *
4648 * Old comment
4649 * -----------
4650 * Before we drop port mutex; we MUST get the tags updated; This
4651 * two step process is somewhat slow, but more reliable.
4652 */
4653 for (i = 0; (i < dev_cnt) && (pptr->port_link_cnt == link_cnt); i++) {
4654 map_entry = &(devlist[i]);
4655
4656 /*
4657 * get ptr to this map entry in our port's
4658 * list (if any)
4659 */
4660 ptgt = fcp_lookup_target(pptr,
4661 (uchar_t *)&(map_entry->map_pwwn));
4662
4663 if (ptgt) {
4664 map_tag[i] = ptgt->tgt_change_cnt;
4665 if (cause == FCP_CAUSE_LINK_CHANGE) {
4666 ptgt->tgt_aux_state = FCP_TGT_TAGGED;
4667 }
4668 }
4669 }
4670
4671 /*
4672 * At this point we know which devices of the new list were already
4673 * known (The field tgt_aux_state of the target structure has been
4674 * set to FCP_TGT_TAGGED).
4675 *
4676 * The following code goes through the list of targets currently known
4677 * by the local port (the list is actually a hashing table). If a
4678 * target is found and is not tagged, it means the target cannot
4679 * be reached anymore through the local port (pptr). It is offlined.
4680 * The offlining only occurs if the cause is FCP_CAUSE_LINK_CHANGE.
4681 */
4682 for (i = 0; i < FCP_NUM_HASH; i++) {
4683 for (ptgt = pptr->port_tgt_hash_table[i]; ptgt != NULL;
4684 ptgt = ptgt->tgt_next) {
4685 mutex_enter(&ptgt->tgt_mutex);
4686 if ((ptgt->tgt_aux_state != FCP_TGT_TAGGED) &&
4687 (cause == FCP_CAUSE_LINK_CHANGE) &&
4688 !(ptgt->tgt_state & FCP_TGT_OFFLINE)) {
4689 fcp_offline_target_now(pptr, ptgt,
4690 link_cnt, ptgt->tgt_change_cnt, 0);
4691 }
4692 mutex_exit(&ptgt->tgt_mutex);
4693 }
4694 }
4695
4696 /*
4697 * At this point, the devices that were known but cannot be reached
4698 * anymore, have most likely been offlined.
4699 *
4700 * The following section of code seems to go through the list of
4701 * remote ports that can now be reached. For every single one it
4702 * checks if it is already known or if it is a new port.
4703 */
4704 for (i = 0; (i < dev_cnt) && (pptr->port_link_cnt == link_cnt); i++) {
4705
4706 if (check_finish_init) {
4707 ASSERT(i > 0);
4708 (void) fcp_call_finish_init_held(pptr, ptgt, link_cnt,
4709 map_tag[i - 1], cause);
4710 check_finish_init = 0;
4711 }
4712
4713 /* get a pointer to this map entry */
4714 map_entry = &(devlist[i]);
4715
4716 /*
4717 * Check for the duplicate map entry flag. If we have marked
4718 * this entry as a duplicate we skip it since the correct
4719 * (perhaps even same) state change will be encountered
4720 * later in the list.
4721 */
4722 if (map_entry->map_flags & PORT_DEVICE_DUPLICATE_MAP_ENTRY) {
4723 continue;
4724 }
4725
4726 /* get ptr to this map entry in our port's list (if any) */
4727 ptgt = fcp_lookup_target(pptr,
4728 (uchar_t *)&(map_entry->map_pwwn));
4729
4730 if (ptgt) {
4731 /*
4732 * This device was already known. The field
4733 * tgt_aux_state is reset (was probably set to
4734 * FCP_TGT_TAGGED previously in this routine).
4735 */
4736 ptgt->tgt_aux_state = 0;
4737 FCP_TRACE(fcp_logq, pptr->port_instbuf,
4738 fcp_trace, FCP_BUF_LEVEL_3, 0,
4739 "handle_devices: map did/state/type/flags = "
4740 "0x%x/0x%x/0x%x/0x%x, tgt_d_id=0x%x, "
4741 "tgt_state=%d",
4742 map_entry->map_did.port_id, map_entry->map_state,
4743 map_entry->map_type, map_entry->map_flags,
4744 ptgt->tgt_d_id, ptgt->tgt_state);
4745 }
4746
4747 if (map_entry->map_type == PORT_DEVICE_OLD ||
4748 map_entry->map_type == PORT_DEVICE_NEW ||
4749 map_entry->map_type == PORT_DEVICE_REPORTLUN_CHANGED ||
4750 map_entry->map_type == PORT_DEVICE_CHANGED) {
4751 FCP_TRACE(fcp_logq, pptr->port_instbuf,
4752 fcp_trace, FCP_BUF_LEVEL_2, 0,
4753 "map_type=%x, did = %x",
4754 map_entry->map_type,
4755 map_entry->map_did.port_id);
4756 }
4757
4758 switch (map_entry->map_type) {
4759 case PORT_DEVICE_NOCHANGE:
4760 case PORT_DEVICE_USER_CREATE:
4761 case PORT_DEVICE_USER_LOGIN:
4762 case PORT_DEVICE_NEW:
4763 case PORT_DEVICE_REPORTLUN_CHANGED:
4764 FCP_TGT_TRACE(ptgt, map_tag[i], FCP_TGT_TRACE_1);
4765
4766 if (fcp_handle_mapflags(pptr, ptgt, map_entry,
4767 link_cnt, (ptgt) ? map_tag[i] : 0,
4768 cause) == TRUE) {
4769
4770 FCP_TGT_TRACE(ptgt, map_tag[i],
4771 FCP_TGT_TRACE_2);
4772 check_finish_init++;
4773 }
4774 break;
4775
4776 case PORT_DEVICE_OLD:
4777 if (ptgt != NULL) {
4778 FCP_TGT_TRACE(ptgt, map_tag[i],
4779 FCP_TGT_TRACE_3);
4780
4781 mutex_enter(&ptgt->tgt_mutex);
4782 if (!(ptgt->tgt_state & FCP_TGT_OFFLINE)) {
4783 /*
4784 * Must do an in-line wait for I/Os
4785 * to get drained
4786 */
4787 mutex_exit(&ptgt->tgt_mutex);
4788 mutex_exit(&pptr->port_mutex);
4789
4790 mutex_enter(&ptgt->tgt_mutex);
4791 while (ptgt->tgt_ipkt_cnt ||
4792 fcp_outstanding_lun_cmds(ptgt)
4793 == FC_SUCCESS) {
4794 mutex_exit(&ptgt->tgt_mutex);
4795 delay(drv_usectohz(1000000));
4796 mutex_enter(&ptgt->tgt_mutex);
4797 }
4798 mutex_exit(&ptgt->tgt_mutex);
4799
4800 mutex_enter(&pptr->port_mutex);
4801 mutex_enter(&ptgt->tgt_mutex);
4802
4803 (void) fcp_offline_target(pptr, ptgt,
4804 link_cnt, map_tag[i], 0, 0);
4805 }
4806 mutex_exit(&ptgt->tgt_mutex);
4807 }
4808 check_finish_init++;
4809 break;
4810
4811 case PORT_DEVICE_USER_DELETE:
4812 case PORT_DEVICE_USER_LOGOUT:
4813 if (ptgt != NULL) {
4814 FCP_TGT_TRACE(ptgt, map_tag[i],
4815 FCP_TGT_TRACE_4);
4816
4817 mutex_enter(&ptgt->tgt_mutex);
4818 if (!(ptgt->tgt_state & FCP_TGT_OFFLINE)) {
4819 (void) fcp_offline_target(pptr, ptgt,
4820 link_cnt, map_tag[i], 1, 0);
4821 }
4822 mutex_exit(&ptgt->tgt_mutex);
4823 }
4824 check_finish_init++;
4825 break;
4826
4827 case PORT_DEVICE_CHANGED:
4828 if (ptgt != NULL) {
4829 FCP_TGT_TRACE(ptgt, map_tag[i],
4830 FCP_TGT_TRACE_5);
4831
4832 if (fcp_device_changed(pptr, ptgt,
4833 map_entry, link_cnt, map_tag[i],
4834 cause) == TRUE) {
4835 check_finish_init++;
4836 }
4837 } else {
4838 if (fcp_handle_mapflags(pptr, ptgt,
4839 map_entry, link_cnt, 0, cause) == TRUE) {
4840 check_finish_init++;
4841 }
4842 }
4843 break;
4844
4845 default:
4846 fcp_log(CE_WARN, pptr->port_dip,
4847 "!Invalid map_type=0x%x", map_entry->map_type);
4848 check_finish_init++;
4849 break;
4850 }
4851 }
4852
4853 if (check_finish_init && pptr->port_link_cnt == link_cnt) {
4854 ASSERT(i > 0);
4855 (void) fcp_call_finish_init_held(pptr, ptgt, link_cnt,
4856 map_tag[i-1], cause);
4857 } else if (dev_cnt == 0 && pptr->port_link_cnt == link_cnt) {
4858 fcp_offline_all(pptr, link_cnt, cause);
4859 }
4860 }
4861
4862 static int
4863 fcp_handle_reportlun_changed(struct fcp_tgt *ptgt, int cause)
4864 {
4865 struct fcp_lun *plun;
4866 struct fcp_port *pptr;
4867 int rscn_count;
4868 int lun0_newalloc;
4869 int ret = TRUE;
4870
4871 ASSERT(ptgt);
4872 pptr = ptgt->tgt_port;
4873 lun0_newalloc = 0;
4874 if ((plun = fcp_get_lun(ptgt, 0)) == NULL) {
4875 /*
4876 * no LUN struct for LUN 0 yet exists,
4877 * so create one
4878 */
4879 plun = fcp_alloc_lun(ptgt);
4880 if (plun == NULL) {
4881 fcp_log(CE_WARN, pptr->port_dip,
4882 "!Failed to allocate lun 0 for"
4883 " D_ID=%x", ptgt->tgt_d_id);
4884 return (ret);
4885 }
4886 lun0_newalloc = 1;
4887 }
4888
4889 mutex_enter(&ptgt->tgt_mutex);
4890 /*
4891 * consider lun 0 as device not connected if it is
4892 * offlined or newly allocated
4893 */
4894 if ((plun->lun_state & FCP_LUN_OFFLINE) || lun0_newalloc) {
4895 plun->lun_state |= FCP_LUN_DEVICE_NOT_CONNECTED;
4896 }
4897 plun->lun_state |= (FCP_LUN_BUSY | FCP_LUN_MARK);
4898 plun->lun_state &= ~FCP_LUN_OFFLINE;
4899 ptgt->tgt_lun_cnt = 1;
4900 ptgt->tgt_report_lun_cnt = 0;
4901 mutex_exit(&ptgt->tgt_mutex);
4902
4903 rscn_count = fc_ulp_get_rscn_count(pptr->port_fp_handle);
4904 if (fcp_send_scsi(plun, SCMD_REPORT_LUN,
4905 sizeof (struct fcp_reportlun_resp), pptr->port_link_cnt,
4906 ptgt->tgt_change_cnt, cause, rscn_count) != DDI_SUCCESS) {
4907 FCP_TRACE(fcp_logq, pptr->port_instbuf,
4908 fcp_trace, FCP_BUF_LEVEL_3, 0, "!Failed to send REPORTLUN "
4909 "to D_ID=%x", ptgt->tgt_d_id);
4910 } else {
4911 ret = FALSE;
4912 }
4913
4914 return (ret);
4915 }
4916
4917 /*
4918 * Function: fcp_handle_mapflags
4919 *
4920 * Description: This function creates a target structure if the ptgt passed
4921 * is NULL. It also kicks off the PLOGI if we are not logged
4922 * into the target yet or the PRLI if we are logged into the
4923 * target already. The rest of the treatment is done in the
4924 * callbacks of the PLOGI or PRLI.
4925 *
4926 * Argument: *pptr FCP Port structure.
4927 * *ptgt Target structure.
4928 * *map_entry Array of fc_portmap_t structures.
4929 * link_cnt Link state count.
4930 * tgt_cnt Target state count.
4931 * cause What caused this function to be called.
4932 *
4933 * Return Value: TRUE Failed
4934 * FALSE Succeeded
4935 *
4936 * Notes: pptr->port_mutex must be owned.
4937 */
4938 static int
4939 fcp_handle_mapflags(struct fcp_port *pptr, struct fcp_tgt *ptgt,
4940 fc_portmap_t *map_entry, int link_cnt, int tgt_cnt, int cause)
4941 {
4942 int lcount;
4943 int tcount;
4944 int ret = TRUE;
4945 int alloc;
4946 struct fcp_ipkt *icmd;
4947 struct fcp_lun *pseq_lun = NULL;
4948 uchar_t opcode;
4949 int valid_ptgt_was_passed = FALSE;
4950
4951 ASSERT(mutex_owned(&pptr->port_mutex));
4952
4953 /*
4954 * This case is possible where the FCTL has come up and done discovery
4955 * before FCP was loaded and attached. FCTL would have discovered the
4956 * devices and later the ULP came online. In this case ULP's would get
4957 * PORT_DEVICE_NOCHANGE but target would be NULL.
4958 */
4959 if (ptgt == NULL) {
4960 /* don't already have a target */
4961 mutex_exit(&pptr->port_mutex);
4962 ptgt = fcp_alloc_tgt(pptr, map_entry, link_cnt);
4963 mutex_enter(&pptr->port_mutex);
4964
4965 if (ptgt == NULL) {
4966 fcp_log(CE_WARN, pptr->port_dip,
4967 "!FC target allocation failed");
4968 return (ret);
4969 }
4970 mutex_enter(&ptgt->tgt_mutex);
4971 ptgt->tgt_statec_cause = cause;
4972 ptgt->tgt_tmp_cnt = 1;
4973 mutex_exit(&ptgt->tgt_mutex);
4974 } else {
4975 valid_ptgt_was_passed = TRUE;
4976 }
4977
4978 /*
4979 * Copy in the target parameters
4980 */
4981 mutex_enter(&ptgt->tgt_mutex);
4982 ptgt->tgt_d_id = map_entry->map_did.port_id;
4983 ptgt->tgt_hard_addr = map_entry->map_hard_addr.hard_addr;
4984 ptgt->tgt_pd_handle = map_entry->map_pd;
4985 ptgt->tgt_fca_dev = NULL;
4986
4987 /* Copy port and node WWNs */
4988 bcopy(&map_entry->map_nwwn, &ptgt->tgt_node_wwn.raw_wwn[0],
4989 FC_WWN_SIZE);
4990 bcopy(&map_entry->map_pwwn, &ptgt->tgt_port_wwn.raw_wwn[0],
4991 FC_WWN_SIZE);
4992
4993 if (!(map_entry->map_flags & PORT_DEVICE_NO_SKIP_DEVICE_DISCOVERY) &&
4994 (map_entry->map_type == PORT_DEVICE_NOCHANGE) &&
4995 (map_entry->map_state == PORT_DEVICE_LOGGED_IN) &&
4996 valid_ptgt_was_passed) {
4997 /*
4998 * determine if there are any tape LUNs on this target
4999 */
5000 for (pseq_lun = ptgt->tgt_lun;
5001 pseq_lun != NULL;
5002 pseq_lun = pseq_lun->lun_next) {
5003 if ((pseq_lun->lun_type == DTYPE_SEQUENTIAL) &&
5004 !(pseq_lun->lun_state & FCP_LUN_OFFLINE)) {
5005 fcp_update_tgt_state(ptgt, FCP_RESET,
5006 FCP_LUN_MARK);
5007 mutex_exit(&ptgt->tgt_mutex);
5008 return (ret);
5009 }
5010 }
5011 }
5012
5013 /*
5014 * if UA'REPORT_LUN_CHANGED received,
5015 * send out REPORT LUN promptly, skip PLOGI/PRLI process
5016 */
5017 if (map_entry->map_type == PORT_DEVICE_REPORTLUN_CHANGED) {
5018 ptgt->tgt_state &= ~(FCP_TGT_OFFLINE | FCP_TGT_MARK);
5019 mutex_exit(&ptgt->tgt_mutex);
5020 mutex_exit(&pptr->port_mutex);
5021
5022 ret = fcp_handle_reportlun_changed(ptgt, cause);
5023
5024 mutex_enter(&pptr->port_mutex);
5025 return (ret);
5026 }
5027
5028 /*
5029 * If ptgt was NULL when this function was entered, then tgt_node_state
5030 * was never specifically initialized but zeroed out which means
5031 * FCP_TGT_NODE_NONE.
5032 */
5033 switch (ptgt->tgt_node_state) {
5034 case FCP_TGT_NODE_NONE:
5035 case FCP_TGT_NODE_ON_DEMAND:
5036 if (FC_TOP_EXTERNAL(pptr->port_topology) &&
5037 !fcp_enable_auto_configuration &&
5038 map_entry->map_type != PORT_DEVICE_USER_CREATE) {
5039 ptgt->tgt_node_state = FCP_TGT_NODE_ON_DEMAND;
5040 } else if (FC_TOP_EXTERNAL(pptr->port_topology) &&
5041 fcp_enable_auto_configuration &&
5042 (ptgt->tgt_manual_config_only == 1) &&
5043 map_entry->map_type != PORT_DEVICE_USER_CREATE) {
5044 /*
5045 * If auto configuration is set and
5046 * the tgt_manual_config_only flag is set then
5047 * we only want the user to be able to change
5048 * the state through create_on_demand.
5049 */
5050 ptgt->tgt_node_state = FCP_TGT_NODE_ON_DEMAND;
5051 } else {
5052 ptgt->tgt_node_state = FCP_TGT_NODE_NONE;
5053 }
5054 break;
5055
5056 case FCP_TGT_NODE_PRESENT:
5057 break;
5058 }
5059 /*
5060 * If we are booting from a fabric device, make sure we
5061 * mark the node state appropriately for this target to be
5062 * enumerated
5063 */
5064 if (FC_TOP_EXTERNAL(pptr->port_topology) && pptr->port_boot_wwn[0]) {
5065 if (bcmp((caddr_t)pptr->port_boot_wwn,
5066 (caddr_t)&ptgt->tgt_port_wwn.raw_wwn[0],
5067 sizeof (ptgt->tgt_port_wwn)) == 0) {
5068 ptgt->tgt_node_state = FCP_TGT_NODE_NONE;
5069 }
5070 }
5071 mutex_exit(&ptgt->tgt_mutex);
5072
5073 FCP_TRACE(fcp_logq, pptr->port_instbuf,
5074 fcp_trace, FCP_BUF_LEVEL_3, 0,
5075 "map_pd=%p, map_type=%x, did = %x, ulp_rscn_count=0x%x",
5076 map_entry->map_pd, map_entry->map_type, map_entry->map_did.port_id,
5077 map_entry->map_rscn_info.ulp_rscn_count);
5078
5079 mutex_enter(&ptgt->tgt_mutex);
5080
5081 /*
5082 * Reset target OFFLINE state and mark the target BUSY
5083 */
5084 ptgt->tgt_state &= ~FCP_TGT_OFFLINE;
5085 ptgt->tgt_state |= (FCP_TGT_BUSY | FCP_TGT_MARK);
5086
5087 tcount = tgt_cnt ? tgt_cnt : ptgt->tgt_change_cnt;
5088 lcount = link_cnt;
5089
5090 mutex_exit(&ptgt->tgt_mutex);
5091 mutex_exit(&pptr->port_mutex);
5092
5093 /*
5094 * if we are already logged in, then we do a PRLI, else
5095 * we do a PLOGI first (to get logged in)
5096 *
5097 * We will not check if we are the PLOGI initiator
5098 */
5099 opcode = (map_entry->map_state == PORT_DEVICE_LOGGED_IN &&
5100 map_entry->map_pd != NULL) ? LA_ELS_PRLI : LA_ELS_PLOGI;
5101
5102 alloc = FCP_MAX(sizeof (la_els_logi_t), sizeof (la_els_prli_t));
5103
5104 icmd = fcp_icmd_alloc(pptr, ptgt, alloc, alloc, 0,
5105 pptr->port_state & FCP_STATE_FCA_IS_NODMA, lcount, tcount,
5106 cause, map_entry->map_rscn_info.ulp_rscn_count);
5107
5108 if (icmd == NULL) {
5109 FCP_TGT_TRACE(ptgt, tgt_cnt, FCP_TGT_TRACE_29);
5110 /*
5111 * We've exited port_mutex before calling fcp_icmd_alloc,
5112 * we need to make sure we reacquire it before returning.
5113 */
5114 mutex_enter(&pptr->port_mutex);
5115 return (FALSE);
5116 }
5117
5118 /* TRUE is only returned while target is intended skipped */
5119 ret = FALSE;
5120 /* discover info about this target */
5121 if ((fcp_send_els(pptr, ptgt, icmd, opcode,
5122 lcount, tcount, cause)) == DDI_SUCCESS) {
5123 FCP_TGT_TRACE(ptgt, tgt_cnt, FCP_TGT_TRACE_9);
5124 } else {
5125 fcp_icmd_free(pptr, icmd);
5126 ret = TRUE;
5127 }
5128 mutex_enter(&pptr->port_mutex);
5129
5130 return (ret);
5131 }
5132
5133 /*
5134 * Function: fcp_send_els
5135 *
5136 * Description: Sends an ELS to the target specified by the caller. Supports
5137 * PLOGI and PRLI.
5138 *
5139 * Argument: *pptr Fcp port.
5140 * *ptgt Target to send the ELS to.
5141 * *icmd Internal packet
5142 * opcode ELS opcode
5143 * lcount Link state change counter
5144 * tcount Target state change counter
5145 * cause What caused the call
5146 *
5147 * Return Value: DDI_SUCCESS
5148 * Others
5149 */
5150 static int
5151 fcp_send_els(struct fcp_port *pptr, struct fcp_tgt *ptgt,
5152 struct fcp_ipkt *icmd, uchar_t opcode, int lcount, int tcount, int cause)
5153 {
5154 fc_packet_t *fpkt;
5155 fc_frame_hdr_t *hp;
5156 int internal = 0;
5157 int alloc;
5158 int cmd_len;
5159 int resp_len;
5160 int res = DDI_FAILURE; /* default result */
5161 int rval = DDI_FAILURE;
5162
5163 ASSERT(opcode == LA_ELS_PLOGI || opcode == LA_ELS_PRLI);
5164 ASSERT(ptgt->tgt_port == pptr);
5165
5166 FCP_TRACE(fcp_logq, pptr->port_instbuf,
5167 fcp_trace, FCP_BUF_LEVEL_5, 0,
5168 "fcp_send_els: d_id=0x%x ELS 0x%x (%s)", ptgt->tgt_d_id, opcode,
5169 (opcode == LA_ELS_PLOGI) ? "PLOGI" : "PRLI");
5170
5171 if (opcode == LA_ELS_PLOGI) {
5172 cmd_len = sizeof (la_els_logi_t);
5173 resp_len = sizeof (la_els_logi_t);
5174 } else {
5175 ASSERT(opcode == LA_ELS_PRLI);
5176 cmd_len = sizeof (la_els_prli_t);
5177 resp_len = sizeof (la_els_prli_t);
5178 }
5179
5180 if (icmd == NULL) {
5181 alloc = FCP_MAX(sizeof (la_els_logi_t),
5182 sizeof (la_els_prli_t));
5183 icmd = fcp_icmd_alloc(pptr, ptgt, alloc, alloc, 0,
5184 pptr->port_state & FCP_STATE_FCA_IS_NODMA,
5185 lcount, tcount, cause, FC_INVALID_RSCN_COUNT);
5186 if (icmd == NULL) {
5187 FCP_TGT_TRACE(ptgt, tcount, FCP_TGT_TRACE_10);
5188 return (res);
5189 }
5190 internal++;
5191 }
5192 fpkt = icmd->ipkt_fpkt;
5193
5194 fpkt->pkt_cmdlen = cmd_len;
5195 fpkt->pkt_rsplen = resp_len;
5196 fpkt->pkt_datalen = 0;
5197 icmd->ipkt_retries = 0;
5198
5199 /* fill in fpkt info */
5200 fpkt->pkt_tran_flags = FC_TRAN_CLASS3 | FC_TRAN_INTR;
5201 fpkt->pkt_tran_type = FC_PKT_EXCHANGE;
5202 fpkt->pkt_timeout = FCP_ELS_TIMEOUT;
5203
5204 /* get ptr to frame hdr in fpkt */
5205 hp = &fpkt->pkt_cmd_fhdr;
5206
5207 /*
5208 * fill in frame hdr
5209 */
5210 hp->r_ctl = R_CTL_ELS_REQ;
5211 hp->s_id = pptr->port_id; /* source ID */
5212 hp->d_id = ptgt->tgt_d_id; /* dest ID */
5213 hp->type = FC_TYPE_EXTENDED_LS;
5214 hp->f_ctl = F_CTL_SEQ_INITIATIVE | F_CTL_FIRST_SEQ;
5215 hp->seq_id = 0;
5216 hp->rsvd = 0;
5217 hp->df_ctl = 0;
5218 hp->seq_cnt = 0;
5219 hp->ox_id = 0xffff; /* i.e. none */
5220 hp->rx_id = 0xffff; /* i.e. none */
5221 hp->ro = 0;
5222
5223 /*
5224 * at this point we have a filled in cmd pkt
5225 *
5226 * fill in the respective info, then use the transport to send
5227 * the packet
5228 *
5229 * for a PLOGI call fc_ulp_login(), and
5230 * for a PRLI call fc_ulp_issue_els()
5231 */
5232 switch (opcode) {
5233 case LA_ELS_PLOGI: {
5234 struct la_els_logi logi;
5235
5236 bzero(&logi, sizeof (struct la_els_logi));
5237
5238 hp = &fpkt->pkt_cmd_fhdr;
5239 hp->r_ctl = R_CTL_ELS_REQ;
5240 logi.ls_code.ls_code = LA_ELS_PLOGI;
5241 logi.ls_code.mbz = 0;
5242
5243 FCP_CP_OUT((uint8_t *)&logi, fpkt->pkt_cmd,
5244 fpkt->pkt_cmd_acc, sizeof (struct la_els_logi));
5245
5246 icmd->ipkt_opcode = LA_ELS_PLOGI;
5247
5248 mutex_enter(&pptr->port_mutex);
5249 if (!FCP_TGT_STATE_CHANGED(ptgt, icmd)) {
5250
5251 mutex_exit(&pptr->port_mutex);
5252
5253 rval = fc_ulp_login(pptr->port_fp_handle, &fpkt, 1);
5254 if (rval == FC_SUCCESS) {
5255 res = DDI_SUCCESS;
5256 break;
5257 }
5258
5259 FCP_TGT_TRACE(ptgt, tcount, FCP_TGT_TRACE_11);
5260
5261 res = fcp_handle_ipkt_errors(pptr, ptgt, icmd,
5262 rval, "PLOGI");
5263 } else {
5264 FCP_TRACE(fcp_logq, pptr->port_instbuf,
5265 fcp_trace, FCP_BUF_LEVEL_5, 0,
5266 "fcp_send_els1: state change occured"
5267 " for D_ID=0x%x", ptgt->tgt_d_id);
5268 mutex_exit(&pptr->port_mutex);
5269 FCP_TGT_TRACE(ptgt, tcount, FCP_TGT_TRACE_12);
5270 }
5271 break;
5272 }
5273
5274 case LA_ELS_PRLI: {
5275 struct la_els_prli prli;
5276 struct fcp_prli *fprli;
5277
5278 bzero(&prli, sizeof (struct la_els_prli));
5279
5280 hp = &fpkt->pkt_cmd_fhdr;
5281 hp->r_ctl = R_CTL_ELS_REQ;
5282
5283 /* fill in PRLI cmd ELS fields */
5284 prli.ls_code = LA_ELS_PRLI;
5285 prli.page_length = 0x10; /* huh? */
5286 prli.payload_length = sizeof (struct la_els_prli);
5287
5288 icmd->ipkt_opcode = LA_ELS_PRLI;
5289
5290 /* get ptr to PRLI service params */
5291 fprli = (struct fcp_prli *)prli.service_params;
5292
5293 /* fill in service params */
5294 fprli->type = 0x08;
5295 fprli->resvd1 = 0;
5296 fprli->orig_process_assoc_valid = 0;
5297 fprli->resp_process_assoc_valid = 0;
5298 fprli->establish_image_pair = 1;
5299 fprli->resvd2 = 0;
5300 fprli->resvd3 = 0;
5301 fprli->obsolete_1 = 0;
5302 fprli->obsolete_2 = 0;
5303 fprli->data_overlay_allowed = 0;
5304 fprli->initiator_fn = 1;
5305 fprli->confirmed_compl_allowed = 1;
5306
5307 if (fc_ulp_is_name_present("ltct") == FC_SUCCESS) {
5308 fprli->target_fn = 1;
5309 } else {
5310 fprli->target_fn = 0;
5311 }
5312
5313 fprli->retry = 1;
5314 fprli->read_xfer_rdy_disabled = 1;
5315 fprli->write_xfer_rdy_disabled = 0;
5316
5317 FCP_CP_OUT((uint8_t *)&prli, fpkt->pkt_cmd,
5318 fpkt->pkt_cmd_acc, sizeof (struct la_els_prli));
5319
5320 /* issue the PRLI request */
5321
5322 mutex_enter(&pptr->port_mutex);
5323 if (!FCP_TGT_STATE_CHANGED(ptgt, icmd)) {
5324
5325 mutex_exit(&pptr->port_mutex);
5326
5327 rval = fc_ulp_issue_els(pptr->port_fp_handle, fpkt);
5328 if (rval == FC_SUCCESS) {
5329 res = DDI_SUCCESS;
5330 break;
5331 }
5332
5333 FCP_TGT_TRACE(ptgt, tcount, FCP_TGT_TRACE_13);
5334
5335 res = fcp_handle_ipkt_errors(pptr, ptgt, icmd,
5336 rval, "PRLI");
5337 } else {
5338 mutex_exit(&pptr->port_mutex);
5339 FCP_TGT_TRACE(ptgt, tcount, FCP_TGT_TRACE_14);
5340 }
5341 break;
5342 }
5343
5344 default:
5345 fcp_log(CE_WARN, NULL, "!invalid ELS opcode=0x%x", opcode);
5346 break;
5347 }
5348
5349 FCP_TRACE(fcp_logq, pptr->port_instbuf,
5350 fcp_trace, FCP_BUF_LEVEL_5, 0,
5351 "fcp_send_els: returning %d", res);
5352
5353 if (res != DDI_SUCCESS) {
5354 if (internal) {
5355 fcp_icmd_free(pptr, icmd);
5356 }
5357 }
5358
5359 return (res);
5360 }
5361
5362
5363 /*
5364 * called internally update the state of all of the tgts and each LUN
5365 * for this port (i.e. each target known to be attached to this port)
5366 * if they are not already offline
5367 *
5368 * must be called with the port mutex owned
5369 *
5370 * acquires and releases the target mutexes for each target attached
5371 * to this port
5372 */
5373 void
5374 fcp_update_state(struct fcp_port *pptr, uint32_t state, int cause)
5375 {
5376 int i;
5377 struct fcp_tgt *ptgt;
5378
5379 ASSERT(mutex_owned(&pptr->port_mutex));
5380
5381 for (i = 0; i < FCP_NUM_HASH; i++) {
5382 for (ptgt = pptr->port_tgt_hash_table[i]; ptgt != NULL;
5383 ptgt = ptgt->tgt_next) {
5384 mutex_enter(&ptgt->tgt_mutex);
5385 fcp_update_tgt_state(ptgt, FCP_SET, state);
5386 ptgt->tgt_change_cnt++;
5387 ptgt->tgt_statec_cause = cause;
5388 ptgt->tgt_tmp_cnt = 1;
5389 ptgt->tgt_done = 0;
5390 mutex_exit(&ptgt->tgt_mutex);
5391 }
5392 }
5393 }
5394
5395
5396 static void
5397 fcp_offline_all(struct fcp_port *pptr, int lcount, int cause)
5398 {
5399 int i;
5400 int ndevs;
5401 struct fcp_tgt *ptgt;
5402
5403 ASSERT(mutex_owned(&pptr->port_mutex));
5404
5405 for (ndevs = 0, i = 0; i < FCP_NUM_HASH; i++) {
5406 for (ptgt = pptr->port_tgt_hash_table[i]; ptgt != NULL;
5407 ptgt = ptgt->tgt_next) {
5408 ndevs++;
5409 }
5410 }
5411
5412 if (ndevs == 0) {
5413 return;
5414 }
5415 pptr->port_tmp_cnt = ndevs;
5416
5417 for (i = 0; i < FCP_NUM_HASH; i++) {
5418 for (ptgt = pptr->port_tgt_hash_table[i]; ptgt != NULL;
5419 ptgt = ptgt->tgt_next) {
5420 (void) fcp_call_finish_init_held(pptr, ptgt,
5421 lcount, ptgt->tgt_change_cnt, cause);
5422 }
5423 }
5424 }
5425
5426 /*
5427 * Function: fcp_update_tgt_state
5428 *
5429 * Description: This function updates the field tgt_state of a target. That
5430 * field is a bitmap and which bit can be set or reset
5431 * individually. The action applied to the target state is also
5432 * applied to all the LUNs belonging to the target (provided the
5433 * LUN is not offline). A side effect of applying the state
5434 * modification to the target and the LUNs is the field tgt_trace
5435 * of the target and lun_trace of the LUNs is set to zero.
5436 *
5437 *
5438 * Argument: *ptgt Target structure.
5439 * flag Flag indication what action to apply (set/reset).
5440 * state State bits to update.
5441 *
5442 * Return Value: None
5443 *
5444 * Context: Interrupt, Kernel or User context.
5445 * The mutex of the target (ptgt->tgt_mutex) must be owned when
5446 * calling this function.
5447 */
5448 void
5449 fcp_update_tgt_state(struct fcp_tgt *ptgt, int flag, uint32_t state)
5450 {
5451 struct fcp_lun *plun;
5452
5453 ASSERT(mutex_owned(&ptgt->tgt_mutex));
5454
5455 if (!(ptgt->tgt_state & FCP_TGT_OFFLINE)) {
5456 /* The target is not offline. */
5457 if (flag == FCP_SET) {
5458 ptgt->tgt_state |= state;
5459 ptgt->tgt_trace = 0;
5460 } else {
5461 ptgt->tgt_state &= ~state;
5462 }
5463
5464 for (plun = ptgt->tgt_lun; plun != NULL;
5465 plun = plun->lun_next) {
5466 if (!(plun->lun_state & FCP_LUN_OFFLINE)) {
5467 /* The LUN is not offline. */
5468 if (flag == FCP_SET) {
5469 plun->lun_state |= state;
5470 plun->lun_trace = 0;
5471 } else {
5472 plun->lun_state &= ~state;
5473 }
5474 }
5475 }
5476 }
5477 }
5478
5479 /*
5480 * Function: fcp_update_tgt_state
5481 *
5482 * Description: This function updates the field lun_state of a LUN. That
5483 * field is a bitmap and which bit can be set or reset
5484 * individually.
5485 *
5486 * Argument: *plun LUN structure.
5487 * flag Flag indication what action to apply (set/reset).
5488 * state State bits to update.
5489 *
5490 * Return Value: None
5491 *
5492 * Context: Interrupt, Kernel or User context.
5493 * The mutex of the target (ptgt->tgt_mutex) must be owned when
5494 * calling this function.
5495 */
5496 void
5497 fcp_update_lun_state(struct fcp_lun *plun, int flag, uint32_t state)
5498 {
5499 struct fcp_tgt *ptgt = plun->lun_tgt;
5500
5501 ASSERT(mutex_owned(&ptgt->tgt_mutex));
5502
5503 if (!(plun->lun_state & FCP_TGT_OFFLINE)) {
5504 if (flag == FCP_SET) {
5505 plun->lun_state |= state;
5506 } else {
5507 plun->lun_state &= ~state;
5508 }
5509 }
5510 }
5511
5512 /*
5513 * Function: fcp_get_port
5514 *
5515 * Description: This function returns the fcp_port structure from the opaque
5516 * handle passed by the caller. That opaque handle is the handle
5517 * used by fp/fctl to identify a particular local port. That
5518 * handle has been stored in the corresponding fcp_port
5519 * structure. This function is going to walk the global list of
5520 * fcp_port structures till one has a port_fp_handle that matches
5521 * the handle passed by the caller. This function enters the
5522 * mutex fcp_global_mutex while walking the global list and then
5523 * releases it.
5524 *
5525 * Argument: port_handle Opaque handle that fp/fctl uses to identify a
5526 * particular port.
5527 *
5528 * Return Value: NULL Not found.
5529 * Not NULL Pointer to the fcp_port structure.
5530 *
5531 * Context: Interrupt, Kernel or User context.
5532 */
5533 static struct fcp_port *
5534 fcp_get_port(opaque_t port_handle)
5535 {
5536 struct fcp_port *pptr;
5537
5538 ASSERT(port_handle != NULL);
5539
5540 mutex_enter(&fcp_global_mutex);
5541 for (pptr = fcp_port_head; pptr != NULL; pptr = pptr->port_next) {
5542 if (pptr->port_fp_handle == port_handle) {
5543 break;
5544 }
5545 }
5546 mutex_exit(&fcp_global_mutex);
5547
5548 return (pptr);
5549 }
5550
5551
5552 static void
5553 fcp_unsol_callback(fc_packet_t *fpkt)
5554 {
5555 struct fcp_ipkt *icmd = (struct fcp_ipkt *)fpkt->pkt_ulp_private;
5556 struct fcp_port *pptr = icmd->ipkt_port;
5557
5558 if (fpkt->pkt_state != FC_PKT_SUCCESS) {
5559 caddr_t state, reason, action, expln;
5560
5561 (void) fc_ulp_pkt_error(fpkt, &state, &reason,
5562 &action, &expln);
5563
5564 fcp_log(CE_WARN, pptr->port_dip,
5565 "!couldn't post response to unsolicited request: "
5566 " state=%s reason=%s rx_id=%x ox_id=%x",
5567 state, reason, fpkt->pkt_cmd_fhdr.ox_id,
5568 fpkt->pkt_cmd_fhdr.rx_id);
5569 }
5570 fcp_icmd_free(pptr, icmd);
5571 }
5572
5573
5574 /*
5575 * Perform general purpose preparation of a response to an unsolicited request
5576 */
5577 static void
5578 fcp_unsol_resp_init(fc_packet_t *pkt, fc_unsol_buf_t *buf,
5579 uchar_t r_ctl, uchar_t type)
5580 {
5581 pkt->pkt_cmd_fhdr.r_ctl = r_ctl;
5582 pkt->pkt_cmd_fhdr.d_id = buf->ub_frame.s_id;
5583 pkt->pkt_cmd_fhdr.s_id = buf->ub_frame.d_id;
5584 pkt->pkt_cmd_fhdr.type = type;
5585 pkt->pkt_cmd_fhdr.f_ctl = F_CTL_LAST_SEQ | F_CTL_XCHG_CONTEXT;
5586 pkt->pkt_cmd_fhdr.seq_id = buf->ub_frame.seq_id;
5587 pkt->pkt_cmd_fhdr.df_ctl = buf->ub_frame.df_ctl;
5588 pkt->pkt_cmd_fhdr.seq_cnt = buf->ub_frame.seq_cnt;
5589 pkt->pkt_cmd_fhdr.ox_id = buf->ub_frame.ox_id;
5590 pkt->pkt_cmd_fhdr.rx_id = buf->ub_frame.rx_id;
5591 pkt->pkt_cmd_fhdr.ro = 0;
5592 pkt->pkt_cmd_fhdr.rsvd = 0;
5593 pkt->pkt_comp = fcp_unsol_callback;
5594 pkt->pkt_pd = NULL;
5595 pkt->pkt_ub_resp_token = (opaque_t)buf;
5596 }
5597
5598
5599 /*ARGSUSED*/
5600 static int
5601 fcp_unsol_prli(struct fcp_port *pptr, fc_unsol_buf_t *buf)
5602 {
5603 fc_packet_t *fpkt;
5604 struct la_els_prli prli;
5605 struct fcp_prli *fprli;
5606 struct fcp_ipkt *icmd;
5607 struct la_els_prli *from;
5608 struct fcp_prli *orig;
5609 struct fcp_tgt *ptgt;
5610 int tcount = 0;
5611 int lcount;
5612
5613 from = (struct la_els_prli *)buf->ub_buffer;
5614 orig = (struct fcp_prli *)from->service_params;
5615 if ((ptgt = fcp_get_target_by_did(pptr, buf->ub_frame.s_id)) !=
5616 NULL) {
5617 mutex_enter(&ptgt->tgt_mutex);
5618 tcount = ptgt->tgt_change_cnt;
5619 mutex_exit(&ptgt->tgt_mutex);
5620 }
5621
5622 mutex_enter(&pptr->port_mutex);
5623 lcount = pptr->port_link_cnt;
5624 mutex_exit(&pptr->port_mutex);
5625
5626 if ((icmd = fcp_icmd_alloc(pptr, ptgt, sizeof (la_els_prli_t),
5627 sizeof (la_els_prli_t), 0,
5628 pptr->port_state & FCP_STATE_FCA_IS_NODMA,
5629 lcount, tcount, 0, FC_INVALID_RSCN_COUNT)) == NULL) {
5630 return (FC_FAILURE);
5631 }
5632
5633 fpkt = icmd->ipkt_fpkt;
5634 fpkt->pkt_tran_flags = FC_TRAN_CLASS3 | FC_TRAN_INTR;
5635 fpkt->pkt_tran_type = FC_PKT_OUTBOUND;
5636 fpkt->pkt_timeout = FCP_ELS_TIMEOUT;
5637 fpkt->pkt_cmdlen = sizeof (la_els_prli_t);
5638 fpkt->pkt_rsplen = 0;
5639 fpkt->pkt_datalen = 0;
5640
5641 icmd->ipkt_opcode = LA_ELS_PRLI;
5642
5643 bzero(&prli, sizeof (struct la_els_prli));
5644 fprli = (struct fcp_prli *)prli.service_params;
5645 prli.ls_code = LA_ELS_ACC;
5646 prli.page_length = 0x10;
5647 prli.payload_length = sizeof (struct la_els_prli);
5648
5649 /* fill in service params */
5650 fprli->type = 0x08;
5651 fprli->resvd1 = 0;
5652 fprli->orig_process_assoc_valid = orig->orig_process_assoc_valid;
5653 fprli->orig_process_associator = orig->orig_process_associator;
5654 fprli->resp_process_assoc_valid = 0;
5655 fprli->establish_image_pair = 1;
5656 fprli->resvd2 = 0;
5657 fprli->resvd3 = 0;
5658 fprli->obsolete_1 = 0;
5659 fprli->obsolete_2 = 0;
5660 fprli->data_overlay_allowed = 0;
5661 fprli->initiator_fn = 1;
5662 fprli->confirmed_compl_allowed = 1;
5663
5664 if (fc_ulp_is_name_present("ltct") == FC_SUCCESS) {
5665 fprli->target_fn = 1;
5666 } else {
5667 fprli->target_fn = 0;
5668 }
5669
5670 fprli->retry = 1;
5671 fprli->read_xfer_rdy_disabled = 1;
5672 fprli->write_xfer_rdy_disabled = 0;
5673
5674 /* save the unsol prli payload first */
5675 FCP_CP_OUT((uint8_t *)from, fpkt->pkt_resp,
5676 fpkt->pkt_resp_acc, sizeof (struct la_els_prli));
5677
5678 FCP_CP_OUT((uint8_t *)&prli, fpkt->pkt_cmd,
5679 fpkt->pkt_cmd_acc, sizeof (struct la_els_prli));
5680
5681 fcp_unsol_resp_init(fpkt, buf, R_CTL_ELS_RSP, FC_TYPE_EXTENDED_LS);
5682
5683 mutex_enter(&pptr->port_mutex);
5684 if (!FCP_LINK_STATE_CHANGED(pptr, icmd)) {
5685 int rval;
5686 mutex_exit(&pptr->port_mutex);
5687
5688 if ((rval = fc_ulp_issue_els(pptr->port_fp_handle, fpkt)) !=
5689 FC_SUCCESS) {
5690 if ((rval == FC_STATEC_BUSY || rval == FC_OFFLINE) &&
5691 ptgt != NULL) {
5692 fcp_queue_ipkt(pptr, fpkt);
5693 return (FC_SUCCESS);
5694 }
5695 /* Let it timeout */
5696 fcp_icmd_free(pptr, icmd);
5697 return (FC_FAILURE);
5698 }
5699 } else {
5700 mutex_exit(&pptr->port_mutex);
5701 fcp_icmd_free(pptr, icmd);
5702 return (FC_FAILURE);
5703 }
5704
5705 (void) fc_ulp_ubrelease(pptr->port_fp_handle, 1, &buf->ub_token);
5706
5707 return (FC_SUCCESS);
5708 }
5709
5710 /*
5711 * Function: fcp_icmd_alloc
5712 *
5713 * Description: This function allocated a fcp_ipkt structure. The pkt_comp
5714 * field is initialized to fcp_icmd_callback. Sometimes it is
5715 * modified by the caller (such as fcp_send_scsi). The
5716 * structure is also tied to the state of the line and of the
5717 * target at a particular time. That link is established by
5718 * setting the fields ipkt_link_cnt and ipkt_change_cnt to lcount
5719 * and tcount which came respectively from pptr->link_cnt and
5720 * ptgt->tgt_change_cnt.
5721 *
5722 * Argument: *pptr Fcp port.
5723 * *ptgt Target (destination of the command).
5724 * cmd_len Length of the command.
5725 * resp_len Length of the expected response.
5726 * data_len Length of the data.
5727 * nodma Indicates weither the command and response.
5728 * will be transfer through DMA or not.
5729 * lcount Link state change counter.
5730 * tcount Target state change counter.
5731 * cause Reason that lead to this call.
5732 *
5733 * Return Value: NULL Failed.
5734 * Not NULL Internal packet address.
5735 */
5736 static struct fcp_ipkt *
5737 fcp_icmd_alloc(struct fcp_port *pptr, struct fcp_tgt *ptgt, int cmd_len,
5738 int resp_len, int data_len, int nodma, int lcount, int tcount, int cause,
5739 uint32_t rscn_count)
5740 {
5741 int dma_setup = 0;
5742 fc_packet_t *fpkt;
5743 struct fcp_ipkt *icmd = NULL;
5744
5745 icmd = kmem_zalloc(sizeof (struct fcp_ipkt) +
5746 pptr->port_dmacookie_sz + pptr->port_priv_pkt_len,
5747 KM_NOSLEEP);
5748 if (icmd == NULL) {
5749 fcp_log(CE_WARN, pptr->port_dip,
5750 "!internal packet allocation failed");
5751 return (NULL);
5752 }
5753
5754 /*
5755 * initialize the allocated packet
5756 */
5757 icmd->ipkt_nodma = nodma;
5758 icmd->ipkt_next = icmd->ipkt_prev = NULL;
5759 icmd->ipkt_lun = NULL;
5760
5761 icmd->ipkt_link_cnt = lcount;
5762 icmd->ipkt_change_cnt = tcount;
5763 icmd->ipkt_cause = cause;
5764
5765 mutex_enter(&pptr->port_mutex);
5766 icmd->ipkt_port = pptr;
5767 mutex_exit(&pptr->port_mutex);
5768
5769 /* keep track of amt of data to be sent in pkt */
5770 icmd->ipkt_cmdlen = cmd_len;
5771 icmd->ipkt_resplen = resp_len;
5772 icmd->ipkt_datalen = data_len;
5773
5774 /* set up pkt's ptr to the fc_packet_t struct, just after the ipkt */
5775 icmd->ipkt_fpkt = (fc_packet_t *)(&icmd->ipkt_fc_packet);
5776
5777 /* set pkt's private ptr to point to cmd pkt */
5778 icmd->ipkt_fpkt->pkt_ulp_private = (opaque_t)icmd;
5779
5780 /* set FCA private ptr to memory just beyond */
5781 icmd->ipkt_fpkt->pkt_fca_private = (opaque_t)
5782 ((char *)icmd + sizeof (struct fcp_ipkt) +
5783 pptr->port_dmacookie_sz);
5784
5785 /* get ptr to fpkt substruct and fill it in */
5786 fpkt = icmd->ipkt_fpkt;
5787 fpkt->pkt_data_cookie = (ddi_dma_cookie_t *)((caddr_t)icmd +
5788 sizeof (struct fcp_ipkt));
5789
5790 if (ptgt != NULL) {
5791 icmd->ipkt_tgt = ptgt;
5792 fpkt->pkt_fca_device = ptgt->tgt_fca_dev;
5793 }
5794
5795 fpkt->pkt_comp = fcp_icmd_callback;
5796 fpkt->pkt_tran_flags = (FC_TRAN_CLASS3 | FC_TRAN_INTR);
5797 fpkt->pkt_cmdlen = cmd_len;
5798 fpkt->pkt_rsplen = resp_len;
5799 fpkt->pkt_datalen = data_len;
5800
5801 /*
5802 * The pkt_ulp_rscn_infop (aka pkt_ulp_rsvd1) field is used to pass the
5803 * rscn_count as fcp knows down to the transport. If a valid count was
5804 * passed into this function, we allocate memory to actually pass down
5805 * this info.
5806 *
5807 * BTW, if the kmem_zalloc fails, we won't try too hard. This will
5808 * basically mean that fcp will not be able to help transport
5809 * distinguish if a new RSCN has come after fcp was last informed about
5810 * it. In such cases, it might lead to the problem mentioned in CR/bug #
5811 * 5068068 where the device might end up going offline in case of RSCN
5812 * storms.
5813 */
5814 fpkt->pkt_ulp_rscn_infop = NULL;
5815 if (rscn_count != FC_INVALID_RSCN_COUNT) {
5816 fpkt->pkt_ulp_rscn_infop = kmem_zalloc(
5817 sizeof (fc_ulp_rscn_info_t), KM_NOSLEEP);
5818 if (fpkt->pkt_ulp_rscn_infop == NULL) {
5819 FCP_TRACE(fcp_logq, pptr->port_instbuf,
5820 fcp_trace, FCP_BUF_LEVEL_6, 0,
5821 "Failed to alloc memory to pass rscn info");
5822 }
5823 }
5824
5825 if (fpkt->pkt_ulp_rscn_infop != NULL) {
5826 fc_ulp_rscn_info_t *rscnp;
5827
5828 rscnp = (fc_ulp_rscn_info_t *)fpkt->pkt_ulp_rscn_infop;
5829 rscnp->ulp_rscn_count = rscn_count;
5830 }
5831
5832 if (fcp_alloc_dma(pptr, icmd, nodma, KM_NOSLEEP) != FC_SUCCESS) {
5833 goto fail;
5834 }
5835 dma_setup++;
5836
5837 /*
5838 * Must hold target mutex across setting of pkt_pd and call to
5839 * fc_ulp_init_packet to ensure the handle to the target doesn't go
5840 * away while we're not looking.
5841 */
5842 if (ptgt != NULL) {
5843 mutex_enter(&ptgt->tgt_mutex);
5844 fpkt->pkt_pd = ptgt->tgt_pd_handle;
5845
5846 /* ask transport to do its initialization on this pkt */
5847 if (fc_ulp_init_packet(pptr->port_fp_handle, fpkt, KM_NOSLEEP)
5848 != FC_SUCCESS) {
5849 FCP_TRACE(fcp_logq, pptr->port_instbuf,
5850 fcp_trace, FCP_BUF_LEVEL_6, 0,
5851 "fc_ulp_init_packet failed");
5852 mutex_exit(&ptgt->tgt_mutex);
5853 goto fail;
5854 }
5855 mutex_exit(&ptgt->tgt_mutex);
5856 } else {
5857 if (fc_ulp_init_packet(pptr->port_fp_handle, fpkt, KM_NOSLEEP)
5858 != FC_SUCCESS) {
5859 FCP_TRACE(fcp_logq, pptr->port_instbuf,
5860 fcp_trace, FCP_BUF_LEVEL_6, 0,
5861 "fc_ulp_init_packet failed");
5862 goto fail;
5863 }
5864 }
5865
5866 mutex_enter(&pptr->port_mutex);
5867 if (pptr->port_state & (FCP_STATE_DETACHING |
5868 FCP_STATE_SUSPENDED | FCP_STATE_POWER_DOWN)) {
5869 int rval;
5870
5871 mutex_exit(&pptr->port_mutex);
5872
5873 rval = fc_ulp_uninit_packet(pptr->port_fp_handle, fpkt);
5874 ASSERT(rval == FC_SUCCESS);
5875
5876 goto fail;
5877 }
5878
5879 if (ptgt != NULL) {
5880 mutex_enter(&ptgt->tgt_mutex);
5881 ptgt->tgt_ipkt_cnt++;
5882 mutex_exit(&ptgt->tgt_mutex);
5883 }
5884
5885 pptr->port_ipkt_cnt++;
5886
5887 mutex_exit(&pptr->port_mutex);
5888
5889 return (icmd);
5890
5891 fail:
5892 if (fpkt->pkt_ulp_rscn_infop != NULL) {
5893 kmem_free(fpkt->pkt_ulp_rscn_infop,
5894 sizeof (fc_ulp_rscn_info_t));
5895 fpkt->pkt_ulp_rscn_infop = NULL;
5896 }
5897
5898 if (dma_setup) {
5899 fcp_free_dma(pptr, icmd);
5900 }
5901 kmem_free(icmd, sizeof (struct fcp_ipkt) + pptr->port_priv_pkt_len +
5902 (size_t)pptr->port_dmacookie_sz);
5903
5904 return (NULL);
5905 }
5906
5907 /*
5908 * Function: fcp_icmd_free
5909 *
5910 * Description: Frees the internal command passed by the caller.
5911 *
5912 * Argument: *pptr Fcp port.
5913 * *icmd Internal packet to free.
5914 *
5915 * Return Value: None
5916 */
5917 static void
5918 fcp_icmd_free(struct fcp_port *pptr, struct fcp_ipkt *icmd)
5919 {
5920 struct fcp_tgt *ptgt = icmd->ipkt_tgt;
5921
5922 /* Let the underlying layers do their cleanup. */
5923 (void) fc_ulp_uninit_packet(pptr->port_fp_handle,
5924 icmd->ipkt_fpkt);
5925
5926 if (icmd->ipkt_fpkt->pkt_ulp_rscn_infop) {
5927 kmem_free(icmd->ipkt_fpkt->pkt_ulp_rscn_infop,
5928 sizeof (fc_ulp_rscn_info_t));
5929 }
5930
5931 fcp_free_dma(pptr, icmd);
5932
5933 kmem_free(icmd, sizeof (struct fcp_ipkt) + pptr->port_priv_pkt_len +
5934 (size_t)pptr->port_dmacookie_sz);
5935
5936 mutex_enter(&pptr->port_mutex);
5937
5938 if (ptgt) {
5939 mutex_enter(&ptgt->tgt_mutex);
5940 ptgt->tgt_ipkt_cnt--;
5941 mutex_exit(&ptgt->tgt_mutex);
5942 }
5943
5944 pptr->port_ipkt_cnt--;
5945 mutex_exit(&pptr->port_mutex);
5946 }
5947
5948 /*
5949 * Function: fcp_alloc_dma
5950 *
5951 * Description: Allocated the DMA resources required for the internal
5952 * packet.
5953 *
5954 * Argument: *pptr FCP port.
5955 * *icmd Internal FCP packet.
5956 * nodma Indicates if the Cmd and Resp will be DMAed.
5957 * flags Allocation flags (Sleep or NoSleep).
5958 *
5959 * Return Value: FC_SUCCESS
5960 * FC_NOMEM
5961 */
5962 static int
5963 fcp_alloc_dma(struct fcp_port *pptr, struct fcp_ipkt *icmd,
5964 int nodma, int flags)
5965 {
5966 int rval;
5967 size_t real_size;
5968 uint_t ccount;
5969 int bound = 0;
5970 int cmd_resp = 0;
5971 fc_packet_t *fpkt;
5972 ddi_dma_cookie_t pkt_data_cookie;
5973 ddi_dma_cookie_t *cp;
5974 uint32_t cnt;
5975
5976 fpkt = &icmd->ipkt_fc_packet;
5977
5978 ASSERT(fpkt->pkt_cmd_dma == NULL && fpkt->pkt_data_dma == NULL &&
5979 fpkt->pkt_resp_dma == NULL);
5980
5981 icmd->ipkt_nodma = nodma;
5982
5983 if (nodma) {
5984 fpkt->pkt_cmd = kmem_zalloc(fpkt->pkt_cmdlen, flags);
5985 if (fpkt->pkt_cmd == NULL) {
5986 goto fail;
5987 }
5988
5989 fpkt->pkt_resp = kmem_zalloc(fpkt->pkt_rsplen, flags);
5990 if (fpkt->pkt_resp == NULL) {
5991 goto fail;
5992 }
5993 } else {
5994 ASSERT(fpkt->pkt_cmdlen && fpkt->pkt_rsplen);
5995
5996 rval = fcp_alloc_cmd_resp(pptr, fpkt, flags);
5997 if (rval == FC_FAILURE) {
5998 ASSERT(fpkt->pkt_cmd_dma == NULL &&
5999 fpkt->pkt_resp_dma == NULL);
6000 goto fail;
6001 }
6002 cmd_resp++;
6003 }
6004
6005 if ((fpkt->pkt_datalen != 0) &&
6006 !(pptr->port_state & FCP_STATE_FCA_IS_NODMA)) {
6007 /*
6008 * set up DMA handle and memory for the data in this packet
6009 */
6010 if (ddi_dma_alloc_handle(pptr->port_dip,
6011 &pptr->port_data_dma_attr, DDI_DMA_DONTWAIT,
6012 NULL, &fpkt->pkt_data_dma) != DDI_SUCCESS) {
6013 goto fail;
6014 }
6015
6016 if (ddi_dma_mem_alloc(fpkt->pkt_data_dma, fpkt->pkt_datalen,
6017 &pptr->port_dma_acc_attr, DDI_DMA_CONSISTENT,
6018 DDI_DMA_DONTWAIT, NULL, &fpkt->pkt_data,
6019 &real_size, &fpkt->pkt_data_acc) != DDI_SUCCESS) {
6020 goto fail;
6021 }
6022
6023 /* was DMA mem size gotten < size asked for/needed ?? */
6024 if (real_size < fpkt->pkt_datalen) {
6025 goto fail;
6026 }
6027
6028 /* bind DMA address and handle together */
6029 if (ddi_dma_addr_bind_handle(fpkt->pkt_data_dma,
6030 NULL, fpkt->pkt_data, real_size, DDI_DMA_READ |
6031 DDI_DMA_CONSISTENT, DDI_DMA_DONTWAIT, NULL,
6032 &pkt_data_cookie, &ccount) != DDI_DMA_MAPPED) {
6033 goto fail;
6034 }
6035 bound++;
6036
6037 if (ccount > pptr->port_data_dma_attr.dma_attr_sgllen) {
6038 goto fail;
6039 }
6040
6041 fpkt->pkt_data_cookie_cnt = ccount;
6042
6043 cp = fpkt->pkt_data_cookie;
6044 *cp = pkt_data_cookie;
6045 cp++;
6046
6047 for (cnt = 1; cnt < ccount; cnt++, cp++) {
6048 ddi_dma_nextcookie(fpkt->pkt_data_dma,
6049 &pkt_data_cookie);
6050 *cp = pkt_data_cookie;
6051 }
6052
6053 } else if (fpkt->pkt_datalen != 0) {
6054 /*
6055 * If it's a pseudo FCA, then it can't support DMA even in
6056 * SCSI data phase.
6057 */
6058 fpkt->pkt_data = kmem_alloc(fpkt->pkt_datalen, flags);
6059 if (fpkt->pkt_data == NULL) {
6060 goto fail;
6061 }
6062
6063 }
6064
6065 return (FC_SUCCESS);
6066
6067 fail:
6068 if (bound) {
6069 (void) ddi_dma_unbind_handle(fpkt->pkt_data_dma);
6070 }
6071
6072 if (fpkt->pkt_data_dma) {
6073 if (fpkt->pkt_data) {
6074 ddi_dma_mem_free(&fpkt->pkt_data_acc);
6075 }
6076 ddi_dma_free_handle(&fpkt->pkt_data_dma);
6077 } else {
6078 if (fpkt->pkt_data) {
6079 kmem_free(fpkt->pkt_data, fpkt->pkt_datalen);
6080 }
6081 }
6082
6083 if (nodma) {
6084 if (fpkt->pkt_cmd) {
6085 kmem_free(fpkt->pkt_cmd, fpkt->pkt_cmdlen);
6086 }
6087 if (fpkt->pkt_resp) {
6088 kmem_free(fpkt->pkt_resp, fpkt->pkt_rsplen);
6089 }
6090 } else {
6091 if (cmd_resp) {
6092 fcp_free_cmd_resp(pptr, fpkt);
6093 }
6094 }
6095
6096 return (FC_NOMEM);
6097 }
6098
6099
6100 static void
6101 fcp_free_dma(struct fcp_port *pptr, struct fcp_ipkt *icmd)
6102 {
6103 fc_packet_t *fpkt = icmd->ipkt_fpkt;
6104
6105 if (fpkt->pkt_data_dma) {
6106 (void) ddi_dma_unbind_handle(fpkt->pkt_data_dma);
6107 if (fpkt->pkt_data) {
6108 ddi_dma_mem_free(&fpkt->pkt_data_acc);
6109 }
6110 ddi_dma_free_handle(&fpkt->pkt_data_dma);
6111 } else {
6112 if (fpkt->pkt_data) {
6113 kmem_free(fpkt->pkt_data, fpkt->pkt_datalen);
6114 }
6115 /*
6116 * Need we reset pkt_* to zero???
6117 */
6118 }
6119
6120 if (icmd->ipkt_nodma) {
6121 if (fpkt->pkt_cmd) {
6122 kmem_free(fpkt->pkt_cmd, icmd->ipkt_cmdlen);
6123 }
6124 if (fpkt->pkt_resp) {
6125 kmem_free(fpkt->pkt_resp, icmd->ipkt_resplen);
6126 }
6127 } else {
6128 ASSERT(fpkt->pkt_resp_dma != NULL && fpkt->pkt_cmd_dma != NULL);
6129
6130 fcp_free_cmd_resp(pptr, fpkt);
6131 }
6132 }
6133
6134 /*
6135 * Function: fcp_lookup_target
6136 *
6137 * Description: Finds a target given a WWN.
6138 *
6139 * Argument: *pptr FCP port.
6140 * *wwn World Wide Name of the device to look for.
6141 *
6142 * Return Value: NULL No target found
6143 * Not NULL Target structure
6144 *
6145 * Context: Interrupt context.
6146 * The mutex pptr->port_mutex must be owned.
6147 */
6148 /* ARGSUSED */
6149 static struct fcp_tgt *
6150 fcp_lookup_target(struct fcp_port *pptr, uchar_t *wwn)
6151 {
6152 int hash;
6153 struct fcp_tgt *ptgt;
6154
6155 ASSERT(mutex_owned(&pptr->port_mutex));
6156
6157 hash = FCP_HASH(wwn);
6158
6159 for (ptgt = pptr->port_tgt_hash_table[hash]; ptgt != NULL;
6160 ptgt = ptgt->tgt_next) {
6161 if (!(ptgt->tgt_state & FCP_TGT_ORPHAN) &&
6162 bcmp((caddr_t)wwn, (caddr_t)&ptgt->tgt_port_wwn.raw_wwn[0],
6163 sizeof (ptgt->tgt_port_wwn)) == 0) {
6164 break;
6165 }
6166 }
6167
6168 return (ptgt);
6169 }
6170
6171
6172 /*
6173 * Find target structure given a port identifier
6174 */
6175 static struct fcp_tgt *
6176 fcp_get_target_by_did(struct fcp_port *pptr, uint32_t d_id)
6177 {
6178 fc_portid_t port_id;
6179 la_wwn_t pwwn;
6180 struct fcp_tgt *ptgt = NULL;
6181
6182 port_id.priv_lilp_posit = 0;
6183 port_id.port_id = d_id;
6184 if (fc_ulp_get_pwwn_by_did(pptr->port_fp_handle, port_id,
6185 &pwwn) == FC_SUCCESS) {
6186 mutex_enter(&pptr->port_mutex);
6187 ptgt = fcp_lookup_target(pptr, pwwn.raw_wwn);
6188 mutex_exit(&pptr->port_mutex);
6189 }
6190
6191 return (ptgt);
6192 }
6193
6194
6195 /*
6196 * the packet completion callback routine for info cmd pkts
6197 *
6198 * this means fpkt pts to a response to either a PLOGI or a PRLI
6199 *
6200 * if there is an error an attempt is made to call a routine to resend
6201 * the command that failed
6202 */
6203 static void
6204 fcp_icmd_callback(fc_packet_t *fpkt)
6205 {
6206 struct fcp_ipkt *icmd;
6207 struct fcp_port *pptr;
6208 struct fcp_tgt *ptgt;
6209 struct la_els_prli *prli;
6210 struct la_els_prli prli_s;
6211 struct fcp_prli *fprli;
6212 struct fcp_lun *plun;
6213 int free_pkt = 1;
6214 int rval;
6215 ls_code_t resp;
6216 uchar_t prli_acc = 0;
6217 uint32_t rscn_count = FC_INVALID_RSCN_COUNT;
6218 int lun0_newalloc;
6219
6220 icmd = (struct fcp_ipkt *)fpkt->pkt_ulp_private;
6221
6222 /* get ptrs to the port and target structs for the cmd */
6223 pptr = icmd->ipkt_port;
6224 ptgt = icmd->ipkt_tgt;
6225
6226 FCP_CP_IN(fpkt->pkt_resp, &resp, fpkt->pkt_resp_acc, sizeof (resp));
6227
6228 if (icmd->ipkt_opcode == LA_ELS_PRLI) {
6229 FCP_CP_IN(fpkt->pkt_cmd, &prli_s, fpkt->pkt_cmd_acc,
6230 sizeof (prli_s));
6231 prli_acc = (prli_s.ls_code == LA_ELS_ACC);
6232 }
6233
6234 FCP_TRACE(fcp_logq, pptr->port_instbuf,
6235 fcp_trace, FCP_BUF_LEVEL_2, 0,
6236 "ELS (%x) callback state=0x%x reason=0x%x for %x",
6237 icmd->ipkt_opcode, fpkt->pkt_state, fpkt->pkt_reason,
6238 ptgt->tgt_d_id);
6239
6240 if ((fpkt->pkt_state == FC_PKT_SUCCESS) &&
6241 ((resp.ls_code == LA_ELS_ACC) || prli_acc)) {
6242
6243 mutex_enter(&ptgt->tgt_mutex);
6244 if (ptgt->tgt_pd_handle == NULL) {
6245 /*
6246 * in a fabric environment the port device handles
6247 * get created only after successful LOGIN into the
6248 * transport, so the transport makes this port
6249 * device (pd) handle available in this packet, so
6250 * save it now
6251 */
6252 ASSERT(fpkt->pkt_pd != NULL);
6253 ptgt->tgt_pd_handle = fpkt->pkt_pd;
6254 }
6255 mutex_exit(&ptgt->tgt_mutex);
6256
6257 /* which ELS cmd is this response for ?? */
6258 switch (icmd->ipkt_opcode) {
6259 case LA_ELS_PLOGI:
6260 FCP_TRACE(fcp_logq, pptr->port_instbuf,
6261 fcp_trace, FCP_BUF_LEVEL_5, 0,
6262 "PLOGI to d_id=0x%x succeeded, wwn=%08x%08x",
6263 ptgt->tgt_d_id,
6264 *((int *)&ptgt->tgt_port_wwn.raw_wwn[0]),
6265 *((int *)&ptgt->tgt_port_wwn.raw_wwn[4]));
6266
6267 FCP_TGT_TRACE(ptgt, icmd->ipkt_change_cnt,
6268 FCP_TGT_TRACE_15);
6269
6270 /* Note that we are not allocating a new icmd */
6271 if (fcp_send_els(pptr, ptgt, icmd, LA_ELS_PRLI,
6272 icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
6273 icmd->ipkt_cause) != DDI_SUCCESS) {
6274 FCP_TGT_TRACE(ptgt, icmd->ipkt_change_cnt,
6275 FCP_TGT_TRACE_16);
6276 goto fail;
6277 }
6278 break;
6279
6280 case LA_ELS_PRLI:
6281 FCP_TRACE(fcp_logq, pptr->port_instbuf,
6282 fcp_trace, FCP_BUF_LEVEL_5, 0,
6283 "PRLI to d_id=0x%x succeeded", ptgt->tgt_d_id);
6284
6285 FCP_TGT_TRACE(ptgt, icmd->ipkt_change_cnt,
6286 FCP_TGT_TRACE_17);
6287
6288 prli = &prli_s;
6289
6290 FCP_CP_IN(fpkt->pkt_resp, prli, fpkt->pkt_resp_acc,
6291 sizeof (prli_s));
6292
6293 fprli = (struct fcp_prli *)prli->service_params;
6294
6295 mutex_enter(&ptgt->tgt_mutex);
6296 ptgt->tgt_icap = fprli->initiator_fn;
6297 ptgt->tgt_tcap = fprli->target_fn;
6298 mutex_exit(&ptgt->tgt_mutex);
6299
6300 if ((fprli->type != 0x08) || (fprli->target_fn != 1)) {
6301 /*
6302 * this FCP device does not support target mode
6303 */
6304 FCP_TGT_TRACE(ptgt, icmd->ipkt_change_cnt,
6305 FCP_TGT_TRACE_18);
6306 goto fail;
6307 }
6308 if (fprli->retry == 1) {
6309 fc_ulp_disable_relogin(pptr->port_fp_handle,
6310 &ptgt->tgt_port_wwn);
6311 }
6312
6313 /* target is no longer offline */
6314 mutex_enter(&pptr->port_mutex);
6315 mutex_enter(&ptgt->tgt_mutex);
6316 if (!FCP_TGT_STATE_CHANGED(ptgt, icmd)) {
6317 ptgt->tgt_state &= ~(FCP_TGT_OFFLINE |
6318 FCP_TGT_MARK);
6319 } else {
6320 FCP_TRACE(fcp_logq, pptr->port_instbuf,
6321 fcp_trace, FCP_BUF_LEVEL_2, 0,
6322 "fcp_icmd_callback,1: state change "
6323 " occured for D_ID=0x%x", ptgt->tgt_d_id);
6324 mutex_exit(&ptgt->tgt_mutex);
6325 mutex_exit(&pptr->port_mutex);
6326 goto fail;
6327 }
6328 mutex_exit(&ptgt->tgt_mutex);
6329 mutex_exit(&pptr->port_mutex);
6330
6331 /*
6332 * lun 0 should always respond to inquiry, so
6333 * get the LUN struct for LUN 0
6334 *
6335 * Currently we deal with first level of addressing.
6336 * If / when we start supporting 0x device types
6337 * (DTYPE_ARRAY_CTRL, i.e. array controllers)
6338 * this logic will need revisiting.
6339 */
6340 lun0_newalloc = 0;
6341 if ((plun = fcp_get_lun(ptgt, 0)) == NULL) {
6342 /*
6343 * no LUN struct for LUN 0 yet exists,
6344 * so create one
6345 */
6346 plun = fcp_alloc_lun(ptgt);
6347 if (plun == NULL) {
6348 fcp_log(CE_WARN, pptr->port_dip,
6349 "!Failed to allocate lun 0 for"
6350 " D_ID=%x", ptgt->tgt_d_id);
6351 goto fail;
6352 }
6353 lun0_newalloc = 1;
6354 }
6355
6356 /* fill in LUN info */
6357 mutex_enter(&ptgt->tgt_mutex);
6358 /*
6359 * consider lun 0 as device not connected if it is
6360 * offlined or newly allocated
6361 */
6362 if ((plun->lun_state & FCP_LUN_OFFLINE) ||
6363 lun0_newalloc) {
6364 plun->lun_state |= FCP_LUN_DEVICE_NOT_CONNECTED;
6365 }
6366 plun->lun_state |= (FCP_LUN_BUSY | FCP_LUN_MARK);
6367 plun->lun_state &= ~FCP_LUN_OFFLINE;
6368 ptgt->tgt_lun_cnt = 1;
6369 ptgt->tgt_report_lun_cnt = 0;
6370 mutex_exit(&ptgt->tgt_mutex);
6371
6372 /* Retrieve the rscn count (if a valid one exists) */
6373 if (icmd->ipkt_fpkt->pkt_ulp_rscn_infop != NULL) {
6374 rscn_count = ((fc_ulp_rscn_info_t *)
6375 (icmd->ipkt_fpkt->pkt_ulp_rscn_infop))
6376 ->ulp_rscn_count;
6377 } else {
6378 rscn_count = FC_INVALID_RSCN_COUNT;
6379 }
6380
6381 /* send Report Lun request to target */
6382 if (fcp_send_scsi(plun, SCMD_REPORT_LUN,
6383 sizeof (struct fcp_reportlun_resp),
6384 icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
6385 icmd->ipkt_cause, rscn_count) != DDI_SUCCESS) {
6386 mutex_enter(&pptr->port_mutex);
6387 if (!FCP_TGT_STATE_CHANGED(ptgt, icmd)) {
6388 fcp_log(CE_WARN, pptr->port_dip,
6389 "!Failed to send REPORT LUN to"
6390 " D_ID=%x", ptgt->tgt_d_id);
6391 } else {
6392 FCP_TRACE(fcp_logq,
6393 pptr->port_instbuf, fcp_trace,
6394 FCP_BUF_LEVEL_5, 0,
6395 "fcp_icmd_callback,2:state change"
6396 " occured for D_ID=0x%x",
6397 ptgt->tgt_d_id);
6398 }
6399 mutex_exit(&pptr->port_mutex);
6400
6401 FCP_TGT_TRACE(ptgt, icmd->ipkt_change_cnt,
6402 FCP_TGT_TRACE_19);
6403
6404 goto fail;
6405 } else {
6406 free_pkt = 0;
6407 fcp_icmd_free(pptr, icmd);
6408 }
6409 break;
6410
6411 default:
6412 fcp_log(CE_WARN, pptr->port_dip,
6413 "!fcp_icmd_callback Invalid opcode");
6414 goto fail;
6415 }
6416
6417 return;
6418 }
6419
6420
6421 /*
6422 * Other PLOGI failures are not retried as the
6423 * transport does it already
6424 */
6425 if (icmd->ipkt_opcode != LA_ELS_PLOGI) {
6426 if (fcp_is_retryable(icmd) &&
6427 icmd->ipkt_retries++ < FCP_MAX_RETRIES) {
6428
6429 if (FCP_MUST_RETRY(fpkt)) {
6430 fcp_queue_ipkt(pptr, fpkt);
6431 return;
6432 }
6433
6434 FCP_TRACE(fcp_logq, pptr->port_instbuf,
6435 fcp_trace, FCP_BUF_LEVEL_2, 0,
6436 "ELS PRLI is retried for d_id=0x%x, state=%x,"
6437 " reason= %x", ptgt->tgt_d_id, fpkt->pkt_state,
6438 fpkt->pkt_reason);
6439
6440 /*
6441 * Retry by recalling the routine that
6442 * originally queued this packet
6443 */
6444 mutex_enter(&pptr->port_mutex);
6445 if (!FCP_TGT_STATE_CHANGED(ptgt, icmd)) {
6446 caddr_t msg;
6447
6448 mutex_exit(&pptr->port_mutex);
6449
6450 ASSERT(icmd->ipkt_opcode != LA_ELS_PLOGI);
6451
6452 if (fpkt->pkt_state == FC_PKT_TIMEOUT) {
6453 fpkt->pkt_timeout +=
6454 FCP_TIMEOUT_DELTA;
6455 }
6456
6457 rval = fc_ulp_issue_els(pptr->port_fp_handle,
6458 fpkt);
6459 if (rval == FC_SUCCESS) {
6460 return;
6461 }
6462
6463 if (rval == FC_STATEC_BUSY ||
6464 rval == FC_OFFLINE) {
6465 fcp_queue_ipkt(pptr, fpkt);
6466 return;
6467 }
6468 (void) fc_ulp_error(rval, &msg);
6469
6470 fcp_log(CE_NOTE, pptr->port_dip,
6471 "!ELS 0x%x failed to d_id=0x%x;"
6472 " %s", icmd->ipkt_opcode,
6473 ptgt->tgt_d_id, msg);
6474 } else {
6475 FCP_TRACE(fcp_logq, pptr->port_instbuf,
6476 fcp_trace, FCP_BUF_LEVEL_2, 0,
6477 "fcp_icmd_callback,3: state change "
6478 " occured for D_ID=0x%x", ptgt->tgt_d_id);
6479 mutex_exit(&pptr->port_mutex);
6480 }
6481 }
6482 } else {
6483 if (fcp_is_retryable(icmd) &&
6484 icmd->ipkt_retries++ < FCP_MAX_RETRIES) {
6485 if (FCP_MUST_RETRY(fpkt)) {
6486 fcp_queue_ipkt(pptr, fpkt);
6487 return;
6488 }
6489 }
6490 mutex_enter(&pptr->port_mutex);
6491 if (!FCP_TGT_STATE_CHANGED(ptgt, icmd) &&
6492 fpkt->pkt_state != FC_PKT_PORT_OFFLINE) {
6493 mutex_exit(&pptr->port_mutex);
6494 fcp_print_error(fpkt);
6495 } else {
6496 FCP_TRACE(fcp_logq, pptr->port_instbuf,
6497 fcp_trace, FCP_BUF_LEVEL_2, 0,
6498 "fcp_icmd_callback,4: state change occured"
6499 " for D_ID=0x%x", ptgt->tgt_d_id);
6500 mutex_exit(&pptr->port_mutex);
6501 }
6502 }
6503
6504 fail:
6505 if (free_pkt) {
6506 (void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
6507 icmd->ipkt_change_cnt, icmd->ipkt_cause);
6508 fcp_icmd_free(pptr, icmd);
6509 }
6510 }
6511
6512
6513 /*
6514 * called internally to send an info cmd using the transport
6515 *
6516 * sends either an INQ or a REPORT_LUN
6517 *
6518 * when the packet is completed fcp_scsi_callback is called
6519 */
6520 static int
6521 fcp_send_scsi(struct fcp_lun *plun, uchar_t opcode, int alloc_len,
6522 int lcount, int tcount, int cause, uint32_t rscn_count)
6523 {
6524 int nodma;
6525 struct fcp_ipkt *icmd;
6526 struct fcp_tgt *ptgt;
6527 struct fcp_port *pptr;
6528 fc_frame_hdr_t *hp;
6529 fc_packet_t *fpkt;
6530 struct fcp_cmd fcp_cmd;
6531 struct fcp_cmd *fcmd;
6532 union scsi_cdb *scsi_cdb;
6533
6534 ASSERT(plun != NULL);
6535
6536 ptgt = plun->lun_tgt;
6537 ASSERT(ptgt != NULL);
6538
6539 pptr = ptgt->tgt_port;
6540 ASSERT(pptr != NULL);
6541
6542 FCP_TRACE(fcp_logq, pptr->port_instbuf,
6543 fcp_trace, FCP_BUF_LEVEL_5, 0,
6544 "fcp_send_scsi: d_id=0x%x opcode=0x%x", ptgt->tgt_d_id, opcode);
6545
6546 nodma = (pptr->port_fcp_dma == FC_NO_DVMA_SPACE) ? 1 : 0;
6547 icmd = fcp_icmd_alloc(pptr, ptgt, sizeof (struct fcp_cmd),
6548 FCP_MAX_RSP_IU_SIZE, alloc_len, nodma, lcount, tcount, cause,
6549 rscn_count);
6550
6551 if (icmd == NULL) {
6552 return (DDI_FAILURE);
6553 }
6554
6555 fpkt = icmd->ipkt_fpkt;
6556 fpkt->pkt_tran_flags = FC_TRAN_CLASS3 | FC_TRAN_INTR;
6557 icmd->ipkt_retries = 0;
6558 icmd->ipkt_opcode = opcode;
6559 icmd->ipkt_lun = plun;
6560
6561 if (nodma) {
6562 fcmd = (struct fcp_cmd *)fpkt->pkt_cmd;
6563 } else {
6564 fcmd = &fcp_cmd;
6565 }
6566 bzero(fcmd, sizeof (struct fcp_cmd));
6567
6568 fpkt->pkt_timeout = FCP_SCSI_CMD_TIMEOUT;
6569
6570 hp = &fpkt->pkt_cmd_fhdr;
6571
6572 hp->s_id = pptr->port_id;
6573 hp->d_id = ptgt->tgt_d_id;
6574 hp->r_ctl = R_CTL_COMMAND;
6575 hp->type = FC_TYPE_SCSI_FCP;
6576 hp->f_ctl = F_CTL_SEQ_INITIATIVE | F_CTL_FIRST_SEQ;
6577 hp->rsvd = 0;
6578 hp->seq_id = 0;
6579 hp->seq_cnt = 0;
6580 hp->ox_id = 0xffff;
6581 hp->rx_id = 0xffff;
6582 hp->ro = 0;
6583
6584 bcopy(&(plun->lun_addr), &(fcmd->fcp_ent_addr), FCP_LUN_SIZE);
6585
6586 /*
6587 * Request SCSI target for expedited processing
6588 */
6589
6590 /*
6591 * Set up for untagged queuing because we do not
6592 * know if the fibre device supports queuing.
6593 */
6594 fcmd->fcp_cntl.cntl_reserved_0 = 0;
6595 fcmd->fcp_cntl.cntl_reserved_1 = 0;
6596 fcmd->fcp_cntl.cntl_reserved_2 = 0;
6597 fcmd->fcp_cntl.cntl_reserved_3 = 0;
6598 fcmd->fcp_cntl.cntl_reserved_4 = 0;
6599 fcmd->fcp_cntl.cntl_qtype = FCP_QTYPE_UNTAGGED;
6600 scsi_cdb = (union scsi_cdb *)fcmd->fcp_cdb;
6601
6602 switch (opcode) {
6603 case SCMD_INQUIRY_PAGE83:
6604 /*
6605 * Prepare to get the Inquiry VPD page 83 information
6606 */
6607 fcmd->fcp_cntl.cntl_read_data = 1;
6608 fcmd->fcp_cntl.cntl_write_data = 0;
6609 fcmd->fcp_data_len = alloc_len;
6610
6611 fpkt->pkt_tran_type = FC_PKT_FCP_READ;
6612 fpkt->pkt_comp = fcp_scsi_callback;
6613
6614 scsi_cdb->scc_cmd = SCMD_INQUIRY;
6615 scsi_cdb->g0_addr2 = 0x01;
6616 scsi_cdb->g0_addr1 = 0x83;
6617 scsi_cdb->g0_count0 = (uchar_t)alloc_len;
6618 break;
6619
6620 case SCMD_INQUIRY:
6621 fcmd->fcp_cntl.cntl_read_data = 1;
6622 fcmd->fcp_cntl.cntl_write_data = 0;
6623 fcmd->fcp_data_len = alloc_len;
6624
6625 fpkt->pkt_tran_type = FC_PKT_FCP_READ;
6626 fpkt->pkt_comp = fcp_scsi_callback;
6627
6628 scsi_cdb->scc_cmd = SCMD_INQUIRY;
6629 scsi_cdb->g0_count0 = SUN_INQSIZE;
6630 break;
6631
6632 case SCMD_REPORT_LUN: {
6633 fc_portid_t d_id;
6634 opaque_t fca_dev;
6635
6636 ASSERT(alloc_len >= 16);
6637
6638 d_id.priv_lilp_posit = 0;
6639 d_id.port_id = ptgt->tgt_d_id;
6640
6641 fca_dev = fc_ulp_get_fca_device(pptr->port_fp_handle, d_id);
6642
6643 mutex_enter(&ptgt->tgt_mutex);
6644 ptgt->tgt_fca_dev = fca_dev;
6645 mutex_exit(&ptgt->tgt_mutex);
6646
6647 fcmd->fcp_cntl.cntl_read_data = 1;
6648 fcmd->fcp_cntl.cntl_write_data = 0;
6649 fcmd->fcp_data_len = alloc_len;
6650
6651 fpkt->pkt_tran_type = FC_PKT_FCP_READ;
6652 fpkt->pkt_comp = fcp_scsi_callback;
6653
6654 scsi_cdb->scc_cmd = SCMD_REPORT_LUN;
6655 scsi_cdb->scc5_count0 = alloc_len & 0xff;
6656 scsi_cdb->scc5_count1 = (alloc_len >> 8) & 0xff;
6657 scsi_cdb->scc5_count2 = (alloc_len >> 16) & 0xff;
6658 scsi_cdb->scc5_count3 = (alloc_len >> 24) & 0xff;
6659 break;
6660 }
6661
6662 default:
6663 fcp_log(CE_WARN, pptr->port_dip,
6664 "!fcp_send_scsi Invalid opcode");
6665 break;
6666 }
6667
6668 if (!nodma) {
6669 FCP_CP_OUT((uint8_t *)fcmd, fpkt->pkt_cmd,
6670 fpkt->pkt_cmd_acc, sizeof (struct fcp_cmd));
6671 }
6672
6673 mutex_enter(&pptr->port_mutex);
6674 if (!FCP_TGT_STATE_CHANGED(ptgt, icmd)) {
6675
6676 mutex_exit(&pptr->port_mutex);
6677 if (fcp_transport(pptr->port_fp_handle, fpkt, 1) !=
6678 FC_SUCCESS) {
6679 fcp_icmd_free(pptr, icmd);
6680 return (DDI_FAILURE);
6681 }
6682 return (DDI_SUCCESS);
6683 } else {
6684 FCP_TRACE(fcp_logq, pptr->port_instbuf,
6685 fcp_trace, FCP_BUF_LEVEL_2, 0,
6686 "fcp_send_scsi,1: state change occured"
6687 " for D_ID=0x%x", ptgt->tgt_d_id);
6688 mutex_exit(&pptr->port_mutex);
6689 fcp_icmd_free(pptr, icmd);
6690 return (DDI_FAILURE);
6691 }
6692 }
6693
6694
6695 /*
6696 * called by fcp_scsi_callback to check to handle the case where
6697 * REPORT_LUN returns ILLEGAL REQUEST or a UNIT ATTENTION
6698 */
6699 static int
6700 fcp_check_reportlun(struct fcp_rsp *rsp, fc_packet_t *fpkt)
6701 {
6702 uchar_t rqlen;
6703 int rval = DDI_FAILURE;
6704 struct scsi_extended_sense sense_info, *sense;
6705 struct fcp_ipkt *icmd = (struct fcp_ipkt *)
6706 fpkt->pkt_ulp_private;
6707 struct fcp_tgt *ptgt = icmd->ipkt_tgt;
6708 struct fcp_port *pptr = ptgt->tgt_port;
6709
6710 ASSERT(icmd->ipkt_opcode == SCMD_REPORT_LUN);
6711
6712 if (rsp->fcp_u.fcp_status.scsi_status == STATUS_RESERVATION_CONFLICT) {
6713 /*
6714 * SCSI-II Reserve Release support. Some older FC drives return
6715 * Reservation conflict for Report Luns command.
6716 */
6717 if (icmd->ipkt_nodma) {
6718 rsp->fcp_u.fcp_status.rsp_len_set = 0;
6719 rsp->fcp_u.fcp_status.sense_len_set = 0;
6720 rsp->fcp_u.fcp_status.scsi_status = STATUS_GOOD;
6721 } else {
6722 fcp_rsp_t new_resp;
6723
6724 FCP_CP_IN(fpkt->pkt_resp, &new_resp,
6725 fpkt->pkt_resp_acc, sizeof (new_resp));
6726
6727 new_resp.fcp_u.fcp_status.rsp_len_set = 0;
6728 new_resp.fcp_u.fcp_status.sense_len_set = 0;
6729 new_resp.fcp_u.fcp_status.scsi_status = STATUS_GOOD;
6730
6731 FCP_CP_OUT(&new_resp, fpkt->pkt_resp,
6732 fpkt->pkt_resp_acc, sizeof (new_resp));
6733 }
6734
6735 FCP_CP_OUT(fcp_dummy_lun, fpkt->pkt_data,
6736 fpkt->pkt_data_acc, sizeof (fcp_dummy_lun));
6737
6738 return (DDI_SUCCESS);
6739 }
6740
6741 sense = &sense_info;
6742 if (!rsp->fcp_u.fcp_status.sense_len_set) {
6743 /* no need to continue if sense length is not set */
6744 return (rval);
6745 }
6746
6747 /* casting 64-bit integer to 8-bit */
6748 rqlen = (uchar_t)min(rsp->fcp_sense_len,
6749 sizeof (struct scsi_extended_sense));
6750
6751 if (rqlen < 14) {
6752 /* no need to continue if request length isn't long enough */
6753 return (rval);
6754 }
6755
6756 if (icmd->ipkt_nodma) {
6757 /*
6758 * We can safely use fcp_response_len here since the
6759 * only path that calls fcp_check_reportlun,
6760 * fcp_scsi_callback, has already called
6761 * fcp_validate_fcp_response.
6762 */
6763 sense = (struct scsi_extended_sense *)(fpkt->pkt_resp +
6764 sizeof (struct fcp_rsp) + rsp->fcp_response_len);
6765 } else {
6766 FCP_CP_IN(fpkt->pkt_resp + sizeof (struct fcp_rsp) +
6767 rsp->fcp_response_len, sense, fpkt->pkt_resp_acc,
6768 sizeof (struct scsi_extended_sense));
6769 }
6770
6771 if (!FCP_SENSE_NO_LUN(sense)) {
6772 mutex_enter(&ptgt->tgt_mutex);
6773 /* clear the flag if any */
6774 ptgt->tgt_state &= ~FCP_TGT_ILLREQ;
6775 mutex_exit(&ptgt->tgt_mutex);
6776 }
6777
6778 if ((sense->es_key == KEY_ILLEGAL_REQUEST) &&
6779 (sense->es_add_code == 0x20)) {
6780 if (icmd->ipkt_nodma) {
6781 rsp->fcp_u.fcp_status.rsp_len_set = 0;
6782 rsp->fcp_u.fcp_status.sense_len_set = 0;
6783 rsp->fcp_u.fcp_status.scsi_status = STATUS_GOOD;
6784 } else {
6785 fcp_rsp_t new_resp;
6786
6787 FCP_CP_IN(fpkt->pkt_resp, &new_resp,
6788 fpkt->pkt_resp_acc, sizeof (new_resp));
6789
6790 new_resp.fcp_u.fcp_status.rsp_len_set = 0;
6791 new_resp.fcp_u.fcp_status.sense_len_set = 0;
6792 new_resp.fcp_u.fcp_status.scsi_status = STATUS_GOOD;
6793
6794 FCP_CP_OUT(&new_resp, fpkt->pkt_resp,
6795 fpkt->pkt_resp_acc, sizeof (new_resp));
6796 }
6797
6798 FCP_CP_OUT(fcp_dummy_lun, fpkt->pkt_data,
6799 fpkt->pkt_data_acc, sizeof (fcp_dummy_lun));
6800
6801 return (DDI_SUCCESS);
6802 }
6803
6804 /*
6805 * This is for the STK library which returns a check condition,
6806 * to indicate device is not ready, manual assistance needed.
6807 * This is to a report lun command when the door is open.
6808 */
6809 if ((sense->es_key == KEY_NOT_READY) && (sense->es_add_code == 0x04)) {
6810 if (icmd->ipkt_nodma) {
6811 rsp->fcp_u.fcp_status.rsp_len_set = 0;
6812 rsp->fcp_u.fcp_status.sense_len_set = 0;
6813 rsp->fcp_u.fcp_status.scsi_status = STATUS_GOOD;
6814 } else {
6815 fcp_rsp_t new_resp;
6816
6817 FCP_CP_IN(fpkt->pkt_resp, &new_resp,
6818 fpkt->pkt_resp_acc, sizeof (new_resp));
6819
6820 new_resp.fcp_u.fcp_status.rsp_len_set = 0;
6821 new_resp.fcp_u.fcp_status.sense_len_set = 0;
6822 new_resp.fcp_u.fcp_status.scsi_status = STATUS_GOOD;
6823
6824 FCP_CP_OUT(&new_resp, fpkt->pkt_resp,
6825 fpkt->pkt_resp_acc, sizeof (new_resp));
6826 }
6827
6828 FCP_CP_OUT(fcp_dummy_lun, fpkt->pkt_data,
6829 fpkt->pkt_data_acc, sizeof (fcp_dummy_lun));
6830
6831 return (DDI_SUCCESS);
6832 }
6833
6834 if ((FCP_SENSE_REPORTLUN_CHANGED(sense)) ||
6835 (FCP_SENSE_NO_LUN(sense))) {
6836 mutex_enter(&ptgt->tgt_mutex);
6837 if ((FCP_SENSE_NO_LUN(sense)) &&
6838 (ptgt->tgt_state & FCP_TGT_ILLREQ)) {
6839 ptgt->tgt_state &= ~FCP_TGT_ILLREQ;
6840 mutex_exit(&ptgt->tgt_mutex);
6841 /*
6842 * reconfig was triggred by ILLEGAL REQUEST but
6843 * got ILLEGAL REQUEST again
6844 */
6845 FCP_TRACE(fcp_logq, pptr->port_instbuf,
6846 fcp_trace, FCP_BUF_LEVEL_3, 0,
6847 "!FCP: Unable to obtain Report Lun data"
6848 " target=%x", ptgt->tgt_d_id);
6849 } else {
6850 if (ptgt->tgt_tid == NULL) {
6851 timeout_id_t tid;
6852 /*
6853 * REPORT LUN data has changed. Kick off
6854 * rediscovery
6855 */
6856 tid = timeout(fcp_reconfigure_luns,
6857 (caddr_t)ptgt, (clock_t)drv_usectohz(1));
6858
6859 ptgt->tgt_tid = tid;
6860 ptgt->tgt_state |= FCP_TGT_BUSY;
6861 }
6862 if (FCP_SENSE_NO_LUN(sense)) {
6863 ptgt->tgt_state |= FCP_TGT_ILLREQ;
6864 }
6865 mutex_exit(&ptgt->tgt_mutex);
6866 if (FCP_SENSE_REPORTLUN_CHANGED(sense)) {
6867 FCP_TRACE(fcp_logq, pptr->port_instbuf,
6868 fcp_trace, FCP_BUF_LEVEL_3, 0,
6869 "!FCP:Report Lun Has Changed"
6870 " target=%x", ptgt->tgt_d_id);
6871 } else if (FCP_SENSE_NO_LUN(sense)) {
6872 FCP_TRACE(fcp_logq, pptr->port_instbuf,
6873 fcp_trace, FCP_BUF_LEVEL_3, 0,
6874 "!FCP:LU Not Supported"
6875 " target=%x", ptgt->tgt_d_id);
6876 }
6877 }
6878 rval = DDI_SUCCESS;
6879 }
6880
6881 FCP_TRACE(fcp_logq, pptr->port_instbuf,
6882 fcp_trace, FCP_BUF_LEVEL_5, 0,
6883 "D_ID=%x, sense=%x, status=%x",
6884 fpkt->pkt_cmd_fhdr.d_id, sense->es_key,
6885 rsp->fcp_u.fcp_status.scsi_status);
6886
6887 return (rval);
6888 }
6889
6890 /*
6891 * Function: fcp_scsi_callback
6892 *
6893 * Description: This is the callback routine set by fcp_send_scsi() after
6894 * it calls fcp_icmd_alloc(). The SCSI command completed here
6895 * and autogenerated by FCP are: REPORT_LUN, INQUIRY and
6896 * INQUIRY_PAGE83.
6897 *
6898 * Argument: *fpkt FC packet used to convey the command
6899 *
6900 * Return Value: None
6901 */
6902 static void
6903 fcp_scsi_callback(fc_packet_t *fpkt)
6904 {
6905 struct fcp_ipkt *icmd = (struct fcp_ipkt *)
6906 fpkt->pkt_ulp_private;
6907 struct fcp_rsp_info fcp_rsp_err, *bep;
6908 struct fcp_port *pptr;
6909 struct fcp_tgt *ptgt;
6910 struct fcp_lun *plun;
6911 struct fcp_rsp response, *rsp;
6912
6913 ptgt = icmd->ipkt_tgt;
6914 pptr = ptgt->tgt_port;
6915 plun = icmd->ipkt_lun;
6916
6917 if (icmd->ipkt_nodma) {
6918 rsp = (struct fcp_rsp *)fpkt->pkt_resp;
6919 } else {
6920 rsp = &response;
6921 FCP_CP_IN(fpkt->pkt_resp, rsp, fpkt->pkt_resp_acc,
6922 sizeof (struct fcp_rsp));
6923 }
6924
6925 FCP_TRACE(fcp_logq, pptr->port_instbuf,
6926 fcp_trace, FCP_BUF_LEVEL_2, 0,
6927 "SCSI callback state=0x%x for %x, op_code=0x%x, "
6928 "status=%x, lun num=%x",
6929 fpkt->pkt_state, ptgt->tgt_d_id, icmd->ipkt_opcode,
6930 rsp->fcp_u.fcp_status.scsi_status, plun->lun_num);
6931
6932 /*
6933 * Pre-init LUN GUID with NWWN if it is not a device that
6934 * supports multiple luns and we know it's not page83
6935 * compliant. Although using a NWWN is not lun unique,
6936 * we will be fine since there is only one lun behind the taget
6937 * in this case.
6938 */
6939 if ((plun->lun_guid_size == 0) &&
6940 (icmd->ipkt_opcode == SCMD_INQUIRY_PAGE83) &&
6941 (fcp_symmetric_device_probe(plun) == 0)) {
6942
6943 char ascii_wwn[FC_WWN_SIZE*2+1];
6944 fcp_wwn_to_ascii(&ptgt->tgt_node_wwn.raw_wwn[0], ascii_wwn);
6945 (void) fcp_copy_guid_2_lun_block(plun, ascii_wwn);
6946 }
6947
6948 /*
6949 * Some old FC tapes and FC <-> SCSI bridge devices return overrun
6950 * when thay have more data than what is asked in CDB. An overrun
6951 * is really when FCP_DL is smaller than the data length in CDB.
6952 * In the case here we know that REPORT LUN command we formed within
6953 * this binary has correct FCP_DL. So this OVERRUN is due to bad device
6954 * behavior. In reality this is FC_SUCCESS.
6955 */
6956 if ((fpkt->pkt_state != FC_PKT_SUCCESS) &&
6957 (fpkt->pkt_reason == FC_REASON_OVERRUN) &&
6958 (icmd->ipkt_opcode == SCMD_REPORT_LUN)) {
6959 fpkt->pkt_state = FC_PKT_SUCCESS;
6960 }
6961
6962 if (fpkt->pkt_state != FC_PKT_SUCCESS) {
6963 FCP_TRACE(fcp_logq, pptr->port_instbuf,
6964 fcp_trace, FCP_BUF_LEVEL_2, 0,
6965 "icmd failed with state=0x%x for %x", fpkt->pkt_state,
6966 ptgt->tgt_d_id);
6967
6968 if (fpkt->pkt_reason == FC_REASON_CRC_ERROR) {
6969 /*
6970 * Inquiry VPD page command on A5K SES devices would
6971 * result in data CRC errors.
6972 */
6973 if (icmd->ipkt_opcode == SCMD_INQUIRY_PAGE83) {
6974 (void) fcp_handle_page83(fpkt, icmd, 1);
6975 return;
6976 }
6977 }
6978 if (fpkt->pkt_state == FC_PKT_TIMEOUT ||
6979 FCP_MUST_RETRY(fpkt)) {
6980 fpkt->pkt_timeout += FCP_TIMEOUT_DELTA;
6981 fcp_retry_scsi_cmd(fpkt);
6982 return;
6983 }
6984
6985 FCP_TGT_TRACE(ptgt, icmd->ipkt_change_cnt,
6986 FCP_TGT_TRACE_20);
6987
6988 mutex_enter(&pptr->port_mutex);
6989 mutex_enter(&ptgt->tgt_mutex);
6990 if (!FCP_STATE_CHANGED(pptr, ptgt, icmd)) {
6991 mutex_exit(&ptgt->tgt_mutex);
6992 mutex_exit(&pptr->port_mutex);
6993 fcp_print_error(fpkt);
6994 } else {
6995 FCP_TRACE(fcp_logq, pptr->port_instbuf,
6996 fcp_trace, FCP_BUF_LEVEL_2, 0,
6997 "fcp_scsi_callback,1: state change occured"
6998 " for D_ID=0x%x", ptgt->tgt_d_id);
6999 mutex_exit(&ptgt->tgt_mutex);
7000 mutex_exit(&pptr->port_mutex);
7001 }
7002 (void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
7003 icmd->ipkt_change_cnt, icmd->ipkt_cause);
7004 fcp_icmd_free(pptr, icmd);
7005 return;
7006 }
7007
7008 FCP_TGT_TRACE(ptgt, icmd->ipkt_change_cnt, FCP_TGT_TRACE_21);
7009
7010 mutex_enter(&pptr->port_mutex);
7011 mutex_enter(&ptgt->tgt_mutex);
7012 if (FCP_STATE_CHANGED(pptr, ptgt, icmd)) {
7013 FCP_TRACE(fcp_logq, pptr->port_instbuf,
7014 fcp_trace, FCP_BUF_LEVEL_2, 0,
7015 "fcp_scsi_callback,2: state change occured"
7016 " for D_ID=0x%x", ptgt->tgt_d_id);
7017 mutex_exit(&ptgt->tgt_mutex);
7018 mutex_exit(&pptr->port_mutex);
7019 (void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
7020 icmd->ipkt_change_cnt, icmd->ipkt_cause);
7021 fcp_icmd_free(pptr, icmd);
7022 return;
7023 }
7024 ASSERT((ptgt->tgt_state & FCP_TGT_MARK) == 0);
7025
7026 mutex_exit(&ptgt->tgt_mutex);
7027 mutex_exit(&pptr->port_mutex);
7028
7029 if (icmd->ipkt_nodma) {
7030 bep = (struct fcp_rsp_info *)(fpkt->pkt_resp +
7031 sizeof (struct fcp_rsp));
7032 } else {
7033 bep = &fcp_rsp_err;
7034 FCP_CP_IN(fpkt->pkt_resp + sizeof (struct fcp_rsp), bep,
7035 fpkt->pkt_resp_acc, sizeof (struct fcp_rsp_info));
7036 }
7037
7038 if (fcp_validate_fcp_response(rsp, pptr) != FC_SUCCESS) {
7039 fcp_retry_scsi_cmd(fpkt);
7040 return;
7041 }
7042
7043 if (rsp->fcp_u.fcp_status.rsp_len_set && bep->rsp_code !=
7044 FCP_NO_FAILURE) {
7045 FCP_TRACE(fcp_logq, pptr->port_instbuf,
7046 fcp_trace, FCP_BUF_LEVEL_2, 0,
7047 "rsp_code=0x%x, rsp_len_set=0x%x",
7048 bep->rsp_code, rsp->fcp_u.fcp_status.rsp_len_set);
7049 fcp_retry_scsi_cmd(fpkt);
7050 return;
7051 }
7052
7053 if (rsp->fcp_u.fcp_status.scsi_status == STATUS_QFULL ||
7054 rsp->fcp_u.fcp_status.scsi_status == STATUS_BUSY) {
7055 fcp_queue_ipkt(pptr, fpkt);
7056 return;
7057 }
7058
7059 /*
7060 * Devices that do not support INQUIRY_PAGE83, return check condition
7061 * with illegal request as per SCSI spec.
7062 * Crossbridge is one such device and Daktari's SES node is another.
7063 * We want to ideally enumerate these devices as a non-mpxio devices.
7064 * SES nodes (Daktari only currently) are an exception to this.
7065 */
7066 if ((icmd->ipkt_opcode == SCMD_INQUIRY_PAGE83) &&
7067 (rsp->fcp_u.fcp_status.scsi_status & STATUS_CHECK)) {
7068
7069 FCP_TRACE(fcp_logq, pptr->port_instbuf,
7070 fcp_trace, FCP_BUF_LEVEL_3, 0,
7071 "INQUIRY_PAGE83 for d_id %x (dtype:0x%x) failed with "
7072 "check condition. May enumerate as non-mpxio device",
7073 ptgt->tgt_d_id, plun->lun_type);
7074
7075 /*
7076 * If we let Daktari's SES be enumerated as a non-mpxio
7077 * device, there will be a discrepency in that the other
7078 * internal FC disks will get enumerated as mpxio devices.
7079 * Applications like luxadm expect this to be consistent.
7080 *
7081 * So, we put in a hack here to check if this is an SES device
7082 * and handle it here.
7083 */
7084 if (plun->lun_type == DTYPE_ESI) {
7085 /*
7086 * Since, pkt_state is actually FC_PKT_SUCCESS
7087 * at this stage, we fake a failure here so that
7088 * fcp_handle_page83 will create a device path using
7089 * the WWN instead of the GUID which is not there anyway
7090 */
7091 fpkt->pkt_state = FC_PKT_LOCAL_RJT;
7092 (void) fcp_handle_page83(fpkt, icmd, 1);
7093 return;
7094 }
7095
7096 mutex_enter(&ptgt->tgt_mutex);
7097 plun->lun_state &= ~(FCP_LUN_OFFLINE |
7098 FCP_LUN_MARK | FCP_LUN_BUSY);
7099 mutex_exit(&ptgt->tgt_mutex);
7100
7101 (void) fcp_call_finish_init(pptr, ptgt,
7102 icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
7103 icmd->ipkt_cause);
7104 fcp_icmd_free(pptr, icmd);
7105 return;
7106 }
7107
7108 if (rsp->fcp_u.fcp_status.scsi_status != STATUS_GOOD) {
7109 int rval = DDI_FAILURE;
7110
7111 /*
7112 * handle cases where report lun isn't supported
7113 * by faking up our own REPORT_LUN response or
7114 * UNIT ATTENTION
7115 */
7116 if (icmd->ipkt_opcode == SCMD_REPORT_LUN) {
7117 rval = fcp_check_reportlun(rsp, fpkt);
7118
7119 /*
7120 * fcp_check_reportlun might have modified the
7121 * FCP response. Copy it in again to get an updated
7122 * FCP response
7123 */
7124 if (rval == DDI_SUCCESS && icmd->ipkt_nodma == 0) {
7125 rsp = &response;
7126
7127 FCP_CP_IN(fpkt->pkt_resp, rsp,
7128 fpkt->pkt_resp_acc,
7129 sizeof (struct fcp_rsp));
7130 }
7131 }
7132
7133 if (rsp->fcp_u.fcp_status.scsi_status != STATUS_GOOD) {
7134 if (rval == DDI_SUCCESS) {
7135 (void) fcp_call_finish_init(pptr, ptgt,
7136 icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
7137 icmd->ipkt_cause);
7138 fcp_icmd_free(pptr, icmd);
7139 } else {
7140 fcp_retry_scsi_cmd(fpkt);
7141 }
7142
7143 return;
7144 }
7145 } else {
7146 if (icmd->ipkt_opcode == SCMD_REPORT_LUN) {
7147 mutex_enter(&ptgt->tgt_mutex);
7148 ptgt->tgt_state &= ~FCP_TGT_ILLREQ;
7149 mutex_exit(&ptgt->tgt_mutex);
7150 }
7151 }
7152
7153 ASSERT(rsp->fcp_u.fcp_status.scsi_status == STATUS_GOOD);
7154 if (!(pptr->port_state & FCP_STATE_FCA_IS_NODMA)) {
7155 (void) ddi_dma_sync(fpkt->pkt_data_dma, 0, 0,
7156 DDI_DMA_SYNC_FORCPU);
7157 }
7158
7159 switch (icmd->ipkt_opcode) {
7160 case SCMD_INQUIRY:
7161 FCP_LUN_TRACE(plun, FCP_LUN_TRACE_1);
7162 fcp_handle_inquiry(fpkt, icmd);
7163 break;
7164
7165 case SCMD_REPORT_LUN:
7166 FCP_TGT_TRACE(ptgt, icmd->ipkt_change_cnt,
7167 FCP_TGT_TRACE_22);
7168 fcp_handle_reportlun(fpkt, icmd);
7169 break;
7170
7171 case SCMD_INQUIRY_PAGE83:
7172 FCP_LUN_TRACE(plun, FCP_LUN_TRACE_2);
7173 (void) fcp_handle_page83(fpkt, icmd, 0);
7174 break;
7175
7176 default:
7177 fcp_log(CE_WARN, NULL, "!Invalid SCSI opcode");
7178 (void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
7179 icmd->ipkt_change_cnt, icmd->ipkt_cause);
7180 fcp_icmd_free(pptr, icmd);
7181 break;
7182 }
7183 }
7184
7185
7186 static void
7187 fcp_retry_scsi_cmd(fc_packet_t *fpkt)
7188 {
7189 struct fcp_ipkt *icmd = (struct fcp_ipkt *)
7190 fpkt->pkt_ulp_private;
7191 struct fcp_tgt *ptgt = icmd->ipkt_tgt;
7192 struct fcp_port *pptr = ptgt->tgt_port;
7193
7194 if (icmd->ipkt_retries < FCP_MAX_RETRIES &&
7195 fcp_is_retryable(icmd)) {
7196 mutex_enter(&pptr->port_mutex);
7197 if (!FCP_TGT_STATE_CHANGED(ptgt, icmd)) {
7198 mutex_exit(&pptr->port_mutex);
7199 FCP_TRACE(fcp_logq, pptr->port_instbuf,
7200 fcp_trace, FCP_BUF_LEVEL_3, 0,
7201 "Retrying %s to %x; state=%x, reason=%x",
7202 (icmd->ipkt_opcode == SCMD_REPORT_LUN) ?
7203 "Report LUN" : "INQUIRY", ptgt->tgt_d_id,
7204 fpkt->pkt_state, fpkt->pkt_reason);
7205
7206 fcp_queue_ipkt(pptr, fpkt);
7207 } else {
7208 FCP_TRACE(fcp_logq, pptr->port_instbuf,
7209 fcp_trace, FCP_BUF_LEVEL_3, 0,
7210 "fcp_retry_scsi_cmd,1: state change occured"
7211 " for D_ID=0x%x", ptgt->tgt_d_id);
7212 mutex_exit(&pptr->port_mutex);
7213 (void) fcp_call_finish_init(pptr, ptgt,
7214 icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
7215 icmd->ipkt_cause);
7216 fcp_icmd_free(pptr, icmd);
7217 }
7218 } else {
7219 fcp_print_error(fpkt);
7220 (void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
7221 icmd->ipkt_change_cnt, icmd->ipkt_cause);
7222 fcp_icmd_free(pptr, icmd);
7223 }
7224 }
7225
7226 /*
7227 * Function: fcp_handle_page83
7228 *
7229 * Description: Treats the response to INQUIRY_PAGE83.
7230 *
7231 * Argument: *fpkt FC packet used to convey the command.
7232 * *icmd Original fcp_ipkt structure.
7233 * ignore_page83_data
7234 * if it's 1, that means it's a special devices's
7235 * page83 response, it should be enumerated under mpxio
7236 *
7237 * Return Value: None
7238 */
7239 static void
7240 fcp_handle_page83(fc_packet_t *fpkt, struct fcp_ipkt *icmd,
7241 int ignore_page83_data)
7242 {
7243 struct fcp_port *pptr;
7244 struct fcp_lun *plun;
7245 struct fcp_tgt *ptgt;
7246 uchar_t dev_id_page[SCMD_MAX_INQUIRY_PAGE83_SIZE];
7247 int fail = 0;
7248 ddi_devid_t devid;
7249 char *guid = NULL;
7250 int ret;
7251
7252 ASSERT(icmd != NULL && fpkt != NULL);
7253
7254 pptr = icmd->ipkt_port;
7255 ptgt = icmd->ipkt_tgt;
7256 plun = icmd->ipkt_lun;
7257
7258 if (fpkt->pkt_state == FC_PKT_SUCCESS) {
7259 FCP_LUN_TRACE(plun, FCP_LUN_TRACE_7);
7260
7261 FCP_CP_IN(fpkt->pkt_data, dev_id_page, fpkt->pkt_data_acc,
7262 SCMD_MAX_INQUIRY_PAGE83_SIZE);
7263
7264 FCP_TRACE(fcp_logq, pptr->port_instbuf,
7265 fcp_trace, FCP_BUF_LEVEL_5, 0,
7266 "fcp_handle_page83: port=%d, tgt D_ID=0x%x, "
7267 "dtype=0x%x, lun num=%x",
7268 pptr->port_instance, ptgt->tgt_d_id,
7269 dev_id_page[0], plun->lun_num);
7270
7271 ret = ddi_devid_scsi_encode(
7272 DEVID_SCSI_ENCODE_VERSION_LATEST,
7273 NULL, /* driver name */
7274 (unsigned char *) &plun->lun_inq, /* standard inquiry */
7275 sizeof (plun->lun_inq), /* size of standard inquiry */
7276 NULL, /* page 80 data */
7277 0, /* page 80 len */
7278 dev_id_page, /* page 83 data */
7279 SCMD_MAX_INQUIRY_PAGE83_SIZE, /* page 83 data len */
7280 &devid);
7281
7282 if (ret == DDI_SUCCESS) {
7283
7284 guid = ddi_devid_to_guid(devid);
7285
7286 if (guid) {
7287 /*
7288 * Check our current guid. If it's non null
7289 * and it has changed, we need to copy it into
7290 * lun_old_guid since we might still need it.
7291 */
7292 if (plun->lun_guid &&
7293 strcmp(guid, plun->lun_guid)) {
7294 unsigned int len;
7295
7296 /*
7297 * If the guid of the LUN changes,
7298 * reconfiguration should be triggered
7299 * to reflect the changes.
7300 * i.e. we should offline the LUN with
7301 * the old guid, and online the LUN with
7302 * the new guid.
7303 */
7304 plun->lun_state |= FCP_LUN_CHANGED;
7305
7306 if (plun->lun_old_guid) {
7307 kmem_free(plun->lun_old_guid,
7308 plun->lun_old_guid_size);
7309 }
7310
7311 len = plun->lun_guid_size;
7312 plun->lun_old_guid_size = len;
7313
7314 plun->lun_old_guid = kmem_zalloc(len,
7315 KM_NOSLEEP);
7316
7317 if (plun->lun_old_guid) {
7318 /*
7319 * The alloc was successful then
7320 * let's do the copy.
7321 */
7322 bcopy(plun->lun_guid,
7323 plun->lun_old_guid, len);
7324 } else {
7325 fail = 1;
7326 plun->lun_old_guid_size = 0;
7327 }
7328 }
7329 if (!fail) {
7330 if (fcp_copy_guid_2_lun_block(
7331 plun, guid)) {
7332 fail = 1;
7333 }
7334 }
7335 ddi_devid_free_guid(guid);
7336
7337 } else {
7338 FCP_TRACE(fcp_logq, pptr->port_instbuf,
7339 fcp_trace, FCP_BUF_LEVEL_2, 0,
7340 "fcp_handle_page83: unable to create "
7341 "GUID");
7342
7343 /* couldn't create good guid from devid */
7344 fail = 1;
7345 }
7346 ddi_devid_free(devid);
7347
7348 } else if (ret == DDI_NOT_WELL_FORMED) {
7349 /* NULL filled data for page 83 */
7350 FCP_TRACE(fcp_logq, pptr->port_instbuf,
7351 fcp_trace, FCP_BUF_LEVEL_2, 0,
7352 "fcp_handle_page83: retry GUID");
7353
7354 icmd->ipkt_retries = 0;
7355 fcp_retry_scsi_cmd(fpkt);
7356 return;
7357 } else {
7358 FCP_TRACE(fcp_logq, pptr->port_instbuf,
7359 fcp_trace, FCP_BUF_LEVEL_2, 0,
7360 "fcp_handle_page83: bad ddi_devid_scsi_encode %x",
7361 ret);
7362 /*
7363 * Since the page83 validation
7364 * introduced late, we are being
7365 * tolerant to the existing devices
7366 * that already found to be working
7367 * under mpxio, like A5200's SES device,
7368 * its page83 response will not be standard-compliant,
7369 * but we still want it to be enumerated under mpxio.
7370 */
7371 if (fcp_symmetric_device_probe(plun) != 0) {
7372 fail = 1;
7373 }
7374 }
7375
7376 } else {
7377 /* bad packet state */
7378 FCP_LUN_TRACE(plun, FCP_LUN_TRACE_8);
7379
7380 /*
7381 * For some special devices (A5K SES and Daktari's SES devices),
7382 * they should be enumerated under mpxio
7383 * or "luxadm dis" will fail
7384 */
7385 if (ignore_page83_data) {
7386 fail = 0;
7387 } else {
7388 fail = 1;
7389 }
7390 FCP_TRACE(fcp_logq, pptr->port_instbuf,
7391 fcp_trace, FCP_BUF_LEVEL_2, 0,
7392 "!Devid page cmd failed. "
7393 "fpkt_state: %x fpkt_reason: %x",
7394 "ignore_page83: %d",
7395 fpkt->pkt_state, fpkt->pkt_reason,
7396 ignore_page83_data);
7397 }
7398
7399 mutex_enter(&pptr->port_mutex);
7400 mutex_enter(&plun->lun_mutex);
7401 /*
7402 * If lun_cip is not NULL, then we needn't update lun_mpxio to avoid
7403 * mismatch between lun_cip and lun_mpxio.
7404 */
7405 if (plun->lun_cip == NULL) {
7406 /*
7407 * If we don't have a guid for this lun it's because we were
7408 * unable to glean one from the page 83 response. Set the
7409 * control flag to 0 here to make sure that we don't attempt to
7410 * enumerate it under mpxio.
7411 */
7412 if (fail || pptr->port_mpxio == 0) {
7413 plun->lun_mpxio = 0;
7414 } else {
7415 plun->lun_mpxio = 1;
7416 }
7417 }
7418 mutex_exit(&plun->lun_mutex);
7419 mutex_exit(&pptr->port_mutex);
7420
7421 mutex_enter(&ptgt->tgt_mutex);
7422 plun->lun_state &=
7423 ~(FCP_LUN_OFFLINE | FCP_LUN_MARK | FCP_LUN_BUSY);
7424 mutex_exit(&ptgt->tgt_mutex);
7425
7426 (void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
7427 icmd->ipkt_change_cnt, icmd->ipkt_cause);
7428
7429 fcp_icmd_free(pptr, icmd);
7430 }
7431
7432 /*
7433 * Function: fcp_handle_inquiry
7434 *
7435 * Description: Called by fcp_scsi_callback to handle the response to an
7436 * INQUIRY request.
7437 *
7438 * Argument: *fpkt FC packet used to convey the command.
7439 * *icmd Original fcp_ipkt structure.
7440 *
7441 * Return Value: None
7442 */
7443 static void
7444 fcp_handle_inquiry(fc_packet_t *fpkt, struct fcp_ipkt *icmd)
7445 {
7446 struct fcp_port *pptr;
7447 struct fcp_lun *plun;
7448 struct fcp_tgt *ptgt;
7449 uchar_t dtype;
7450 uchar_t pqual;
7451 uint32_t rscn_count = FC_INVALID_RSCN_COUNT;
7452
7453 ASSERT(icmd != NULL && fpkt != NULL);
7454
7455 pptr = icmd->ipkt_port;
7456 ptgt = icmd->ipkt_tgt;
7457 plun = icmd->ipkt_lun;
7458
7459 FCP_CP_IN(fpkt->pkt_data, &plun->lun_inq, fpkt->pkt_data_acc,
7460 sizeof (struct scsi_inquiry));
7461
7462 dtype = plun->lun_inq.inq_dtype & DTYPE_MASK;
7463 pqual = plun->lun_inq.inq_dtype >> 5;
7464
7465 FCP_TRACE(fcp_logq, pptr->port_instbuf,
7466 fcp_trace, FCP_BUF_LEVEL_5, 0,
7467 "fcp_handle_inquiry: port=%d, tgt D_ID=0x%x, lun=0x%x, "
7468 "dtype=0x%x pqual: 0x%x", pptr->port_instance, ptgt->tgt_d_id,
7469 plun->lun_num, dtype, pqual);
7470
7471 if (pqual != 0) {
7472 /*
7473 * Non-zero peripheral qualifier
7474 */
7475 fcp_log(CE_CONT, pptr->port_dip,
7476 "!Target 0x%x lun 0x%x: Nonzero peripheral qualifier: "
7477 "Device type=0x%x Peripheral qual=0x%x\n",
7478 ptgt->tgt_d_id, plun->lun_num, dtype, pqual);
7479
7480 FCP_TRACE(fcp_logq, pptr->port_instbuf,
7481 fcp_trace, FCP_BUF_LEVEL_5, 0,
7482 "!Target 0x%x lun 0x%x: Nonzero peripheral qualifier: "
7483 "Device type=0x%x Peripheral qual=0x%x\n",
7484 ptgt->tgt_d_id, plun->lun_num, dtype, pqual);
7485
7486 FCP_LUN_TRACE(plun, FCP_LUN_TRACE_3);
7487
7488 (void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
7489 icmd->ipkt_change_cnt, icmd->ipkt_cause);
7490 fcp_icmd_free(pptr, icmd);
7491 return;
7492 }
7493
7494 /*
7495 * If the device is already initialized, check the dtype
7496 * for a change. If it has changed then update the flags
7497 * so the create_luns will offline the old device and
7498 * create the new device. Refer to bug: 4764752
7499 */
7500 if ((plun->lun_state & FCP_LUN_INIT) && dtype != plun->lun_type) {
7501 plun->lun_state |= FCP_LUN_CHANGED;
7502 }
7503 plun->lun_type = plun->lun_inq.inq_dtype;
7504
7505 /*
7506 * This code is setting/initializing the throttling in the FCA
7507 * driver.
7508 */
7509 mutex_enter(&pptr->port_mutex);
7510 if (!pptr->port_notify) {
7511 if (bcmp(plun->lun_inq.inq_pid, pid, strlen(pid)) == 0) {
7512 uint32_t cmd = 0;
7513 cmd = ((cmd & 0xFF | FC_NOTIFY_THROTTLE) |
7514 ((cmd & 0xFFFFFF00 >> 8) |
7515 FCP_SVE_THROTTLE << 8));
7516 pptr->port_notify = 1;
7517 mutex_exit(&pptr->port_mutex);
7518 (void) fc_ulp_port_notify(pptr->port_fp_handle, cmd);
7519 mutex_enter(&pptr->port_mutex);
7520 }
7521 }
7522
7523 if (FCP_TGT_STATE_CHANGED(ptgt, icmd)) {
7524 FCP_TRACE(fcp_logq, pptr->port_instbuf,
7525 fcp_trace, FCP_BUF_LEVEL_2, 0,
7526 "fcp_handle_inquiry,1:state change occured"
7527 " for D_ID=0x%x", ptgt->tgt_d_id);
7528 mutex_exit(&pptr->port_mutex);
7529
7530 FCP_LUN_TRACE(plun, FCP_LUN_TRACE_5);
7531 (void) fcp_call_finish_init(pptr, ptgt,
7532 icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
7533 icmd->ipkt_cause);
7534 fcp_icmd_free(pptr, icmd);
7535 return;
7536 }
7537 ASSERT((ptgt->tgt_state & FCP_TGT_MARK) == 0);
7538 mutex_exit(&pptr->port_mutex);
7539
7540 /* Retrieve the rscn count (if a valid one exists) */
7541 if (icmd->ipkt_fpkt->pkt_ulp_rscn_infop != NULL) {
7542 rscn_count = ((fc_ulp_rscn_info_t *)
7543 (icmd->ipkt_fpkt->pkt_ulp_rscn_infop))->ulp_rscn_count;
7544 } else {
7545 rscn_count = FC_INVALID_RSCN_COUNT;
7546 }
7547
7548 if (fcp_send_scsi(plun, SCMD_INQUIRY_PAGE83,
7549 SCMD_MAX_INQUIRY_PAGE83_SIZE,
7550 icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
7551 icmd->ipkt_cause, rscn_count) != DDI_SUCCESS) {
7552 fcp_log(CE_WARN, NULL, "!failed to send page 83");
7553 FCP_LUN_TRACE(plun, FCP_LUN_TRACE_6);
7554 (void) fcp_call_finish_init(pptr, ptgt,
7555 icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
7556 icmd->ipkt_cause);
7557 }
7558
7559 /*
7560 * Read Inquiry VPD Page 0x83 to uniquely
7561 * identify this logical unit.
7562 */
7563 fcp_icmd_free(pptr, icmd);
7564 }
7565
7566 /*
7567 * Function: fcp_handle_reportlun
7568 *
7569 * Description: Called by fcp_scsi_callback to handle the response to a
7570 * REPORT_LUN request.
7571 *
7572 * Argument: *fpkt FC packet used to convey the command.
7573 * *icmd Original fcp_ipkt structure.
7574 *
7575 * Return Value: None
7576 */
7577 static void
7578 fcp_handle_reportlun(fc_packet_t *fpkt, struct fcp_ipkt *icmd)
7579 {
7580 int i;
7581 int nluns_claimed;
7582 int nluns_bufmax;
7583 int len;
7584 uint16_t lun_num;
7585 uint32_t rscn_count = FC_INVALID_RSCN_COUNT;
7586 struct fcp_port *pptr;
7587 struct fcp_tgt *ptgt;
7588 struct fcp_lun *plun;
7589 struct fcp_reportlun_resp *report_lun;
7590
7591 pptr = icmd->ipkt_port;
7592 ptgt = icmd->ipkt_tgt;
7593 len = fpkt->pkt_datalen;
7594
7595 if ((len < FCP_LUN_HEADER) ||
7596 ((report_lun = kmem_zalloc(len, KM_NOSLEEP)) == NULL)) {
7597 (void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
7598 icmd->ipkt_change_cnt, icmd->ipkt_cause);
7599 fcp_icmd_free(pptr, icmd);
7600 return;
7601 }
7602
7603 FCP_CP_IN(fpkt->pkt_data, report_lun, fpkt->pkt_data_acc,
7604 fpkt->pkt_datalen);
7605
7606 FCP_TRACE(fcp_logq, pptr->port_instbuf,
7607 fcp_trace, FCP_BUF_LEVEL_5, 0,
7608 "fcp_handle_reportlun: port=%d, tgt D_ID=0x%x",
7609 pptr->port_instance, ptgt->tgt_d_id);
7610
7611 /*
7612 * Get the number of luns (which is supplied as LUNS * 8) the
7613 * device claims it has.
7614 */
7615 nluns_claimed = BE_32(report_lun->num_lun) >> 3;
7616
7617 /*
7618 * Get the maximum number of luns the buffer submitted can hold.
7619 */
7620 nluns_bufmax = (fpkt->pkt_datalen - FCP_LUN_HEADER) / FCP_LUN_SIZE;
7621
7622 /*
7623 * Due to limitations of certain hardware, we support only 16 bit LUNs
7624 */
7625 if (nluns_claimed > FCP_MAX_LUNS_SUPPORTED) {
7626 kmem_free(report_lun, len);
7627
7628 fcp_log(CE_NOTE, pptr->port_dip, "!Can not support"
7629 " 0x%x number of LUNs for target=%x", nluns_claimed,
7630 ptgt->tgt_d_id);
7631
7632 (void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
7633 icmd->ipkt_change_cnt, icmd->ipkt_cause);
7634 fcp_icmd_free(pptr, icmd);
7635 return;
7636 }
7637
7638 /*
7639 * If there are more LUNs than we have allocated memory for,
7640 * allocate more space and send down yet another report lun if
7641 * the maximum number of attempts hasn't been reached.
7642 */
7643 mutex_enter(&ptgt->tgt_mutex);
7644
7645 if ((nluns_claimed > nluns_bufmax) &&
7646 (ptgt->tgt_report_lun_cnt < FCP_MAX_REPORTLUNS_ATTEMPTS)) {
7647
7648 struct fcp_lun *plun;
7649
7650 ptgt->tgt_report_lun_cnt++;
7651 plun = ptgt->tgt_lun;
7652 ASSERT(plun != NULL);
7653 mutex_exit(&ptgt->tgt_mutex);
7654
7655 kmem_free(report_lun, len);
7656
7657 FCP_TRACE(fcp_logq, pptr->port_instbuf,
7658 fcp_trace, FCP_BUF_LEVEL_5, 0,
7659 "!Dynamically discovered %d LUNs for D_ID=%x",
7660 nluns_claimed, ptgt->tgt_d_id);
7661
7662 /* Retrieve the rscn count (if a valid one exists) */
7663 if (icmd->ipkt_fpkt->pkt_ulp_rscn_infop != NULL) {
7664 rscn_count = ((fc_ulp_rscn_info_t *)
7665 (icmd->ipkt_fpkt->pkt_ulp_rscn_infop))->
7666 ulp_rscn_count;
7667 } else {
7668 rscn_count = FC_INVALID_RSCN_COUNT;
7669 }
7670
7671 if (fcp_send_scsi(icmd->ipkt_lun, SCMD_REPORT_LUN,
7672 FCP_LUN_HEADER + (nluns_claimed * FCP_LUN_SIZE),
7673 icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
7674 icmd->ipkt_cause, rscn_count) != DDI_SUCCESS) {
7675 (void) fcp_call_finish_init(pptr, ptgt,
7676 icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
7677 icmd->ipkt_cause);
7678 }
7679
7680 fcp_icmd_free(pptr, icmd);
7681 return;
7682 }
7683
7684 if (nluns_claimed > nluns_bufmax) {
7685 FCP_TRACE(fcp_logq, pptr->port_instbuf,
7686 fcp_trace, FCP_BUF_LEVEL_5, 0,
7687 "Target=%x:%x:%x:%x:%x:%x:%x:%x"
7688 " Number of LUNs lost=%x",
7689 ptgt->tgt_port_wwn.raw_wwn[0],
7690 ptgt->tgt_port_wwn.raw_wwn[1],
7691 ptgt->tgt_port_wwn.raw_wwn[2],
7692 ptgt->tgt_port_wwn.raw_wwn[3],
7693 ptgt->tgt_port_wwn.raw_wwn[4],
7694 ptgt->tgt_port_wwn.raw_wwn[5],
7695 ptgt->tgt_port_wwn.raw_wwn[6],
7696 ptgt->tgt_port_wwn.raw_wwn[7],
7697 nluns_claimed - nluns_bufmax);
7698
7699 nluns_claimed = nluns_bufmax;
7700 }
7701 ptgt->tgt_lun_cnt = nluns_claimed;
7702
7703 /*
7704 * Identify missing LUNs and print warning messages
7705 */
7706 for (plun = ptgt->tgt_lun; plun; plun = plun->lun_next) {
7707 int offline;
7708 int exists = 0;
7709
7710 offline = (plun->lun_state & FCP_LUN_OFFLINE) ? 1 : 0;
7711
7712 for (i = 0; i < nluns_claimed && exists == 0; i++) {
7713 uchar_t *lun_string;
7714
7715 lun_string = (uchar_t *)&(report_lun->lun_string[i]);
7716
7717 switch (lun_string[0] & 0xC0) {
7718 case FCP_LUN_ADDRESSING:
7719 case FCP_PD_ADDRESSING:
7720 case FCP_VOLUME_ADDRESSING:
7721 lun_num = ((lun_string[0] & 0x3F) << 8) |
7722 lun_string[1];
7723 if (plun->lun_num == lun_num) {
7724 exists++;
7725 break;
7726 }
7727 break;
7728
7729 default:
7730 break;
7731 }
7732 }
7733
7734 if (!exists && !offline) {
7735 mutex_exit(&ptgt->tgt_mutex);
7736
7737 mutex_enter(&pptr->port_mutex);
7738 mutex_enter(&ptgt->tgt_mutex);
7739 if (!FCP_STATE_CHANGED(pptr, ptgt, icmd)) {
7740 /*
7741 * set disappear flag when device was connected
7742 */
7743 if (!(plun->lun_state &
7744 FCP_LUN_DEVICE_NOT_CONNECTED)) {
7745 plun->lun_state |= FCP_LUN_DISAPPEARED;
7746 }
7747 mutex_exit(&ptgt->tgt_mutex);
7748 mutex_exit(&pptr->port_mutex);
7749 if (!(plun->lun_state &
7750 FCP_LUN_DEVICE_NOT_CONNECTED)) {
7751 fcp_log(CE_NOTE, pptr->port_dip,
7752 "!Lun=%x for target=%x disappeared",
7753 plun->lun_num, ptgt->tgt_d_id);
7754 }
7755 mutex_enter(&ptgt->tgt_mutex);
7756 } else {
7757 FCP_TRACE(fcp_logq, pptr->port_instbuf,
7758 fcp_trace, FCP_BUF_LEVEL_5, 0,
7759 "fcp_handle_reportlun,1: state change"
7760 " occured for D_ID=0x%x", ptgt->tgt_d_id);
7761 mutex_exit(&ptgt->tgt_mutex);
7762 mutex_exit(&pptr->port_mutex);
7763 kmem_free(report_lun, len);
7764 (void) fcp_call_finish_init(pptr, ptgt,
7765 icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
7766 icmd->ipkt_cause);
7767 fcp_icmd_free(pptr, icmd);
7768 return;
7769 }
7770 } else if (exists) {
7771 /*
7772 * clear FCP_LUN_DEVICE_NOT_CONNECTED when lun 0
7773 * actually exists in REPORT_LUN response
7774 */
7775 if (plun->lun_state & FCP_LUN_DEVICE_NOT_CONNECTED) {
7776 plun->lun_state &=
7777 ~FCP_LUN_DEVICE_NOT_CONNECTED;
7778 }
7779 if (offline || plun->lun_num == 0) {
7780 if (plun->lun_state & FCP_LUN_DISAPPEARED) {
7781 plun->lun_state &= ~FCP_LUN_DISAPPEARED;
7782 mutex_exit(&ptgt->tgt_mutex);
7783 fcp_log(CE_NOTE, pptr->port_dip,
7784 "!Lun=%x for target=%x reappeared",
7785 plun->lun_num, ptgt->tgt_d_id);
7786 mutex_enter(&ptgt->tgt_mutex);
7787 }
7788 }
7789 }
7790 }
7791
7792 ptgt->tgt_tmp_cnt = nluns_claimed ? nluns_claimed : 1;
7793 mutex_exit(&ptgt->tgt_mutex);
7794
7795 FCP_TRACE(fcp_logq, pptr->port_instbuf,
7796 fcp_trace, FCP_BUF_LEVEL_5, 0,
7797 "fcp_handle_reportlun: port=%d, tgt D_ID=0x%x, %d LUN(s)",
7798 pptr->port_instance, ptgt->tgt_d_id, nluns_claimed);
7799
7800 /* scan each lun */
7801 for (i = 0; i < nluns_claimed; i++) {
7802 uchar_t *lun_string;
7803
7804 lun_string = (uchar_t *)&(report_lun->lun_string[i]);
7805
7806 FCP_TRACE(fcp_logq, pptr->port_instbuf,
7807 fcp_trace, FCP_BUF_LEVEL_5, 0,
7808 "handle_reportlun: d_id=%x, LUN ind=%d, LUN=%d,"
7809 " addr=0x%x", ptgt->tgt_d_id, i, lun_string[1],
7810 lun_string[0]);
7811
7812 switch (lun_string[0] & 0xC0) {
7813 case FCP_LUN_ADDRESSING:
7814 case FCP_PD_ADDRESSING:
7815 case FCP_VOLUME_ADDRESSING:
7816 lun_num = ((lun_string[0] & 0x3F) << 8) | lun_string[1];
7817
7818 /* We will skip masked LUNs because of the blacklist. */
7819 if (fcp_lun_blacklist != NULL) {
7820 mutex_enter(&ptgt->tgt_mutex);
7821 if (fcp_should_mask(&ptgt->tgt_port_wwn,
7822 lun_num) == TRUE) {
7823 ptgt->tgt_lun_cnt--;
7824 mutex_exit(&ptgt->tgt_mutex);
7825 break;
7826 }
7827 mutex_exit(&ptgt->tgt_mutex);
7828 }
7829
7830 /* see if this LUN is already allocated */
7831 if ((plun = fcp_get_lun(ptgt, lun_num)) == NULL) {
7832 plun = fcp_alloc_lun(ptgt);
7833 if (plun == NULL) {
7834 fcp_log(CE_NOTE, pptr->port_dip,
7835 "!Lun allocation failed"
7836 " target=%x lun=%x",
7837 ptgt->tgt_d_id, lun_num);
7838 break;
7839 }
7840 }
7841
7842 mutex_enter(&plun->lun_tgt->tgt_mutex);
7843 /* convert to LUN */
7844 plun->lun_addr.ent_addr_0 =
7845 BE_16(*(uint16_t *)&(lun_string[0]));
7846 plun->lun_addr.ent_addr_1 =
7847 BE_16(*(uint16_t *)&(lun_string[2]));
7848 plun->lun_addr.ent_addr_2 =
7849 BE_16(*(uint16_t *)&(lun_string[4]));
7850 plun->lun_addr.ent_addr_3 =
7851 BE_16(*(uint16_t *)&(lun_string[6]));
7852
7853 plun->lun_num = lun_num;
7854 plun->lun_state |= FCP_LUN_BUSY | FCP_LUN_MARK;
7855 plun->lun_state &= ~FCP_LUN_OFFLINE;
7856 mutex_exit(&plun->lun_tgt->tgt_mutex);
7857
7858 /* Retrieve the rscn count (if a valid one exists) */
7859 if (icmd->ipkt_fpkt->pkt_ulp_rscn_infop != NULL) {
7860 rscn_count = ((fc_ulp_rscn_info_t *)
7861 (icmd->ipkt_fpkt->pkt_ulp_rscn_infop))->
7862 ulp_rscn_count;
7863 } else {
7864 rscn_count = FC_INVALID_RSCN_COUNT;
7865 }
7866
7867 if (fcp_send_scsi(plun, SCMD_INQUIRY, SUN_INQSIZE,
7868 icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
7869 icmd->ipkt_cause, rscn_count) != DDI_SUCCESS) {
7870 mutex_enter(&pptr->port_mutex);
7871 mutex_enter(&plun->lun_tgt->tgt_mutex);
7872 if (!FCP_STATE_CHANGED(pptr, ptgt, icmd)) {
7873 fcp_log(CE_NOTE, pptr->port_dip,
7874 "!failed to send INQUIRY"
7875 " target=%x lun=%x",
7876 ptgt->tgt_d_id, plun->lun_num);
7877 } else {
7878 FCP_TRACE(fcp_logq,
7879 pptr->port_instbuf, fcp_trace,
7880 FCP_BUF_LEVEL_5, 0,
7881 "fcp_handle_reportlun,2: state"
7882 " change occured for D_ID=0x%x",
7883 ptgt->tgt_d_id);
7884 }
7885 mutex_exit(&plun->lun_tgt->tgt_mutex);
7886 mutex_exit(&pptr->port_mutex);
7887 } else {
7888 continue;
7889 }
7890 break;
7891
7892 default:
7893 fcp_log(CE_WARN, NULL,
7894 "!Unsupported LUN Addressing method %x "
7895 "in response to REPORT_LUN", lun_string[0]);
7896 break;
7897 }
7898
7899 /*
7900 * each time through this loop we should decrement
7901 * the tmp_cnt by one -- since we go through this loop
7902 * one time for each LUN, the tmp_cnt should never be <=0
7903 */
7904 (void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
7905 icmd->ipkt_change_cnt, icmd->ipkt_cause);
7906 }
7907
7908 if (i == 0) {
7909 fcp_log(CE_WARN, pptr->port_dip,
7910 "!FCP: target=%x reported NO Luns", ptgt->tgt_d_id);
7911 (void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
7912 icmd->ipkt_change_cnt, icmd->ipkt_cause);
7913 }
7914
7915 kmem_free(report_lun, len);
7916 fcp_icmd_free(pptr, icmd);
7917 }
7918
7919
7920 /*
7921 * called internally to return a LUN given a target and a LUN number
7922 */
7923 static struct fcp_lun *
7924 fcp_get_lun(struct fcp_tgt *ptgt, uint16_t lun_num)
7925 {
7926 struct fcp_lun *plun;
7927
7928 mutex_enter(&ptgt->tgt_mutex);
7929 for (plun = ptgt->tgt_lun; plun != NULL; plun = plun->lun_next) {
7930 if (plun->lun_num == lun_num) {
7931 mutex_exit(&ptgt->tgt_mutex);
7932 return (plun);
7933 }
7934 }
7935 mutex_exit(&ptgt->tgt_mutex);
7936
7937 return (NULL);
7938 }
7939
7940
7941 /*
7942 * handle finishing one target for fcp_finish_init
7943 *
7944 * return true (non-zero) if we want finish_init to continue with the
7945 * next target
7946 *
7947 * called with the port mutex held
7948 */
7949 /*ARGSUSED*/
7950 static int
7951 fcp_finish_tgt(struct fcp_port *pptr, struct fcp_tgt *ptgt,
7952 int link_cnt, int tgt_cnt, int cause)
7953 {
7954 int rval = 1;
7955 ASSERT(pptr != NULL);
7956 ASSERT(ptgt != NULL);
7957
7958 FCP_TRACE(fcp_logq, pptr->port_instbuf,
7959 fcp_trace, FCP_BUF_LEVEL_5, 0,
7960 "finish_tgt: D_ID/state = 0x%x/0x%x", ptgt->tgt_d_id,
7961 ptgt->tgt_state);
7962
7963 ASSERT(mutex_owned(&pptr->port_mutex));
7964
7965 if ((pptr->port_link_cnt != link_cnt) ||
7966 (tgt_cnt && ptgt->tgt_change_cnt != tgt_cnt)) {
7967 /*
7968 * oh oh -- another link reset or target change
7969 * must have occurred while we are in here
7970 */
7971 FCP_TGT_TRACE(ptgt, tgt_cnt, FCP_TGT_TRACE_23);
7972
7973 return (0);
7974 } else {
7975 FCP_TGT_TRACE(ptgt, tgt_cnt, FCP_TGT_TRACE_24);
7976 }
7977
7978 mutex_enter(&ptgt->tgt_mutex);
7979
7980 if (!(ptgt->tgt_state & FCP_TGT_OFFLINE)) {
7981 /*
7982 * tgt is not offline -- is it marked (i.e. needs
7983 * to be offlined) ??
7984 */
7985 if (ptgt->tgt_state & FCP_TGT_MARK) {
7986 /*
7987 * this target not offline *and*
7988 * marked
7989 */
7990 ptgt->tgt_state &= ~FCP_TGT_MARK;
7991 rval = fcp_offline_target(pptr, ptgt, link_cnt,
7992 tgt_cnt, 0, 0);
7993 } else {
7994 ptgt->tgt_state &= ~FCP_TGT_BUSY;
7995
7996 /* create the LUNs */
7997 if (ptgt->tgt_node_state != FCP_TGT_NODE_ON_DEMAND) {
7998 ptgt->tgt_node_state = FCP_TGT_NODE_PRESENT;
7999 fcp_create_luns(ptgt, link_cnt, tgt_cnt,
8000 cause);
8001 ptgt->tgt_device_created = 1;
8002 } else {
8003 fcp_update_tgt_state(ptgt, FCP_RESET,
8004 FCP_LUN_BUSY);
8005 }
8006 }
8007 }
8008
8009 mutex_exit(&ptgt->tgt_mutex);
8010
8011 return (rval);
8012 }
8013
8014
8015 /*
8016 * this routine is called to finish port initialization
8017 *
8018 * Each port has a "temp" counter -- when a state change happens (e.g.
8019 * port online), the temp count is set to the number of devices in the map.
8020 * Then, as each device gets "discovered", the temp counter is decremented
8021 * by one. When this count reaches zero we know that all of the devices
8022 * in the map have been discovered (or an error has occurred), so we can
8023 * then finish initialization -- which is done by this routine (well, this
8024 * and fcp-finish_tgt())
8025 *
8026 * acquires and releases the global mutex
8027 *
8028 * called with the port mutex owned
8029 */
8030 static void
8031 fcp_finish_init(struct fcp_port *pptr)
8032 {
8033 #ifdef DEBUG
8034 bzero(pptr->port_finish_stack, sizeof (pptr->port_finish_stack));
8035 pptr->port_finish_depth = getpcstack(pptr->port_finish_stack,
8036 FCP_STACK_DEPTH);
8037 #endif /* DEBUG */
8038
8039 ASSERT(mutex_owned(&pptr->port_mutex));
8040
8041 FCP_TRACE(fcp_logq, pptr->port_instbuf,
8042 fcp_trace, FCP_BUF_LEVEL_2, 0, "finish_init:"
8043 " entering; ipkt count=%d", pptr->port_ipkt_cnt);
8044
8045 if ((pptr->port_state & FCP_STATE_ONLINING) &&
8046 !(pptr->port_state & (FCP_STATE_SUSPENDED |
8047 FCP_STATE_DETACHING | FCP_STATE_POWER_DOWN))) {
8048 pptr->port_state &= ~FCP_STATE_ONLINING;
8049 pptr->port_state |= FCP_STATE_ONLINE;
8050 }
8051
8052 /* Wake up threads waiting on config done */
8053 cv_broadcast(&pptr->port_config_cv);
8054 }
8055
8056
8057 /*
8058 * called from fcp_finish_init to create the LUNs for a target
8059 *
8060 * called with the port mutex owned
8061 */
8062 static void
8063 fcp_create_luns(struct fcp_tgt *ptgt, int link_cnt, int tgt_cnt, int cause)
8064 {
8065 struct fcp_lun *plun;
8066 struct fcp_port *pptr;
8067 child_info_t *cip = NULL;
8068
8069 ASSERT(ptgt != NULL);
8070 ASSERT(mutex_owned(&ptgt->tgt_mutex));
8071
8072 pptr = ptgt->tgt_port;
8073
8074 ASSERT(pptr != NULL);
8075
8076 /* scan all LUNs for this target */
8077 for (plun = ptgt->tgt_lun; plun != NULL; plun = plun->lun_next) {
8078 if (plun->lun_state & FCP_LUN_OFFLINE) {
8079 continue;
8080 }
8081
8082 if (plun->lun_state & FCP_LUN_MARK) {
8083 FCP_TRACE(fcp_logq, pptr->port_instbuf,
8084 fcp_trace, FCP_BUF_LEVEL_2, 0,
8085 "fcp_create_luns: offlining marked LUN!");
8086 fcp_offline_lun(plun, link_cnt, tgt_cnt, 1, 0);
8087 continue;
8088 }
8089
8090 plun->lun_state &= ~FCP_LUN_BUSY;
8091
8092 /*
8093 * There are conditions in which FCP_LUN_INIT flag is cleared
8094 * but we have a valid plun->lun_cip. To cover this case also
8095 * CLEAR_BUSY whenever we have a valid lun_cip.
8096 */
8097 if (plun->lun_mpxio && plun->lun_cip &&
8098 (!fcp_pass_to_hp(pptr, plun, plun->lun_cip,
8099 FCP_MPXIO_PATH_CLEAR_BUSY, link_cnt, tgt_cnt,
8100 0, 0))) {
8101 FCP_TRACE(fcp_logq, pptr->port_instbuf,
8102 fcp_trace, FCP_BUF_LEVEL_2, 0,
8103 "fcp_create_luns: enable lun %p failed!",
8104 plun);
8105 }
8106
8107 if (plun->lun_state & FCP_LUN_INIT &&
8108 !(plun->lun_state & FCP_LUN_CHANGED)) {
8109 continue;
8110 }
8111
8112 if (cause == FCP_CAUSE_USER_CREATE) {
8113 continue;
8114 }
8115
8116 FCP_TRACE(fcp_logq, pptr->port_instbuf,
8117 fcp_trace, FCP_BUF_LEVEL_6, 0,
8118 "create_luns: passing ONLINE elem to HP thread");
8119
8120 /*
8121 * If lun has changed, prepare for offlining the old path.
8122 * Do not offline the old path right now, since it may be
8123 * still opened.
8124 */
8125 if (plun->lun_cip && (plun->lun_state & FCP_LUN_CHANGED)) {
8126 fcp_prepare_offline_lun(plun, link_cnt, tgt_cnt);
8127 }
8128
8129 /* pass an ONLINE element to the hotplug thread */
8130 if (!fcp_pass_to_hp(pptr, plun, cip, FCP_ONLINE,
8131 link_cnt, tgt_cnt, NDI_ONLINE_ATTACH, 0)) {
8132
8133 /*
8134 * We can not synchronous attach (i.e pass
8135 * NDI_ONLINE_ATTACH) here as we might be
8136 * coming from an interrupt or callback
8137 * thread.
8138 */
8139 if (!fcp_pass_to_hp(pptr, plun, cip, FCP_ONLINE,
8140 link_cnt, tgt_cnt, 0, 0)) {
8141 fcp_log(CE_CONT, pptr->port_dip,
8142 "Can not ONLINE LUN; D_ID=%x, LUN=%x\n",
8143 plun->lun_tgt->tgt_d_id, plun->lun_num);
8144 }
8145 }
8146 }
8147 }
8148
8149
8150 /*
8151 * function to online/offline devices
8152 */
8153 static int
8154 fcp_trigger_lun(struct fcp_lun *plun, child_info_t *cip, int old_mpxio,
8155 int online, int lcount, int tcount, int flags)
8156 {
8157 int rval = NDI_FAILURE;
8158 int circ;
8159 child_info_t *ccip;
8160 struct fcp_port *pptr = plun->lun_tgt->tgt_port;
8161 int is_mpxio = pptr->port_mpxio;
8162
8163 if ((old_mpxio != 0) && (plun->lun_mpxio != old_mpxio)) {
8164 /*
8165 * When this event gets serviced, lun_cip and lun_mpxio
8166 * has changed, so it should be invalidated now.
8167 */
8168 FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
8169 FCP_BUF_LEVEL_2, 0, "fcp_trigger_lun: lun_mpxio changed: "
8170 "plun: %p, cip: %p, what:%d", plun, cip, online);
8171 return (rval);
8172 }
8173
8174 FCP_TRACE(fcp_logq, pptr->port_instbuf,
8175 fcp_trace, FCP_BUF_LEVEL_2, 0,
8176 "fcp_trigger_lun: plun=%p target=%x lun=%d cip=%p what=%x "
8177 "flags=%x mpxio=%x\n",
8178 plun, LUN_TGT->tgt_d_id, plun->lun_num, cip, online, flags,
8179 plun->lun_mpxio);
8180
8181 /*
8182 * lun_mpxio needs checking here because we can end up in a race
8183 * condition where this task has been dispatched while lun_mpxio is
8184 * set, but an earlier FCP_ONLINE task for the same LUN tried to
8185 * enable MPXIO for the LUN, but was unable to, and hence cleared
8186 * the flag. We rely on the serialization of the tasks here. We return
8187 * NDI_SUCCESS so any callers continue without reporting spurious
8188 * errors, and the still think we're an MPXIO LUN.
8189 */
8190
8191 if (online == FCP_MPXIO_PATH_CLEAR_BUSY ||
8192 online == FCP_MPXIO_PATH_SET_BUSY) {
8193 if (plun->lun_mpxio) {
8194 rval = fcp_update_mpxio_path(plun, cip, online);
8195 } else {
8196 rval = NDI_SUCCESS;
8197 }
8198 return (rval);
8199 }
8200
8201 if (fc_ulp_busy_port(pptr->port_fp_handle) != 0) {
8202 return (NDI_FAILURE);
8203 }
8204
8205 if (is_mpxio) {
8206 mdi_devi_enter(pptr->port_dip, &circ);
8207 } else {
8208 ndi_devi_enter(pptr->port_dip, &circ);
8209 }
8210
8211 mutex_enter(&pptr->port_mutex);
8212 mutex_enter(&plun->lun_mutex);
8213
8214 if (online == FCP_ONLINE) {
8215 ccip = fcp_get_cip(plun, cip, lcount, tcount);
8216 if (ccip == NULL) {
8217 goto fail;
8218 }
8219 } else {
8220 if (fcp_is_child_present(plun, cip) != FC_SUCCESS) {
8221 goto fail;
8222 }
8223 ccip = cip;
8224 }
8225
8226 if (online == FCP_ONLINE) {
8227 rval = fcp_online_child(plun, ccip, lcount, tcount, flags,
8228 &circ);
8229 fc_ulp_log_device_event(pptr->port_fp_handle,
8230 FC_ULP_DEVICE_ONLINE);
8231 } else {
8232 rval = fcp_offline_child(plun, ccip, lcount, tcount, flags,
8233 &circ);
8234 fc_ulp_log_device_event(pptr->port_fp_handle,
8235 FC_ULP_DEVICE_OFFLINE);
8236 }
8237
8238 fail: mutex_exit(&plun->lun_mutex);
8239 mutex_exit(&pptr->port_mutex);
8240
8241 if (is_mpxio) {
8242 mdi_devi_exit(pptr->port_dip, circ);
8243 } else {
8244 ndi_devi_exit(pptr->port_dip, circ);
8245 }
8246
8247 fc_ulp_idle_port(pptr->port_fp_handle);
8248
8249 return (rval);
8250 }
8251
8252
8253 /*
8254 * take a target offline by taking all of its LUNs offline
8255 */
8256 /*ARGSUSED*/
8257 static int
8258 fcp_offline_target(struct fcp_port *pptr, struct fcp_tgt *ptgt,
8259 int link_cnt, int tgt_cnt, int nowait, int flags)
8260 {
8261 struct fcp_tgt_elem *elem;
8262
8263 ASSERT(mutex_owned(&pptr->port_mutex));
8264 ASSERT(mutex_owned(&ptgt->tgt_mutex));
8265
8266 ASSERT(!(ptgt->tgt_state & FCP_TGT_OFFLINE));
8267
8268 if (link_cnt != pptr->port_link_cnt || (tgt_cnt && tgt_cnt !=
8269 ptgt->tgt_change_cnt)) {
8270 mutex_exit(&ptgt->tgt_mutex);
8271 FCP_TGT_TRACE(ptgt, tgt_cnt, FCP_TGT_TRACE_25);
8272 mutex_enter(&ptgt->tgt_mutex);
8273
8274 return (0);
8275 }
8276
8277 ptgt->tgt_pd_handle = NULL;
8278 mutex_exit(&ptgt->tgt_mutex);
8279 FCP_TGT_TRACE(ptgt, tgt_cnt, FCP_TGT_TRACE_26);
8280 mutex_enter(&ptgt->tgt_mutex);
8281
8282 tgt_cnt = tgt_cnt ? tgt_cnt : ptgt->tgt_change_cnt;
8283
8284 if (ptgt->tgt_tcap &&
8285 (elem = kmem_zalloc(sizeof (*elem), KM_NOSLEEP)) != NULL) {
8286 elem->flags = flags;
8287 elem->time = fcp_watchdog_time;
8288 if (nowait == 0) {
8289 elem->time += fcp_offline_delay;
8290 }
8291 elem->ptgt = ptgt;
8292 elem->link_cnt = link_cnt;
8293 elem->tgt_cnt = tgt_cnt;
8294 elem->next = pptr->port_offline_tgts;
8295 pptr->port_offline_tgts = elem;
8296 } else {
8297 fcp_offline_target_now(pptr, ptgt, link_cnt, tgt_cnt, flags);
8298 }
8299
8300 return (1);
8301 }
8302
8303
8304 static void
8305 fcp_offline_target_now(struct fcp_port *pptr, struct fcp_tgt *ptgt,
8306 int link_cnt, int tgt_cnt, int flags)
8307 {
8308 ASSERT(mutex_owned(&pptr->port_mutex));
8309 ASSERT(mutex_owned(&ptgt->tgt_mutex));
8310
8311 fc_ulp_enable_relogin(pptr->port_fp_handle, &ptgt->tgt_port_wwn);
8312 ptgt->tgt_state = FCP_TGT_OFFLINE;
8313 ptgt->tgt_pd_handle = NULL;
8314 fcp_offline_tgt_luns(ptgt, link_cnt, tgt_cnt, flags);
8315 }
8316
8317
8318 static void
8319 fcp_offline_tgt_luns(struct fcp_tgt *ptgt, int link_cnt, int tgt_cnt,
8320 int flags)
8321 {
8322 struct fcp_lun *plun;
8323
8324 ASSERT(mutex_owned(&ptgt->tgt_port->port_mutex));
8325 ASSERT(mutex_owned(&ptgt->tgt_mutex));
8326
8327 for (plun = ptgt->tgt_lun; plun != NULL; plun = plun->lun_next) {
8328 if (!(plun->lun_state & FCP_LUN_OFFLINE)) {
8329 fcp_offline_lun(plun, link_cnt, tgt_cnt, 1, flags);
8330 }
8331 }
8332 }
8333
8334
8335 /*
8336 * take a LUN offline
8337 *
8338 * enters and leaves with the target mutex held, releasing it in the process
8339 *
8340 * allocates memory in non-sleep mode
8341 */
8342 static void
8343 fcp_offline_lun(struct fcp_lun *plun, int link_cnt, int tgt_cnt,
8344 int nowait, int flags)
8345 {
8346 struct fcp_port *pptr = plun->lun_tgt->tgt_port;
8347 struct fcp_lun_elem *elem;
8348
8349 ASSERT(plun != NULL);
8350 ASSERT(mutex_owned(&LUN_TGT->tgt_mutex));
8351
8352 if (nowait) {
8353 fcp_offline_lun_now(plun, link_cnt, tgt_cnt, flags);
8354 return;
8355 }
8356
8357 if ((elem = kmem_zalloc(sizeof (*elem), KM_NOSLEEP)) != NULL) {
8358 elem->flags = flags;
8359 elem->time = fcp_watchdog_time;
8360 if (nowait == 0) {
8361 elem->time += fcp_offline_delay;
8362 }
8363 elem->plun = plun;
8364 elem->link_cnt = link_cnt;
8365 elem->tgt_cnt = plun->lun_tgt->tgt_change_cnt;
8366 elem->next = pptr->port_offline_luns;
8367 pptr->port_offline_luns = elem;
8368 } else {
8369 fcp_offline_lun_now(plun, link_cnt, tgt_cnt, flags);
8370 }
8371 }
8372
8373
8374 static void
8375 fcp_prepare_offline_lun(struct fcp_lun *plun, int link_cnt, int tgt_cnt)
8376 {
8377 struct fcp_pkt *head = NULL;
8378
8379 ASSERT(mutex_owned(&LUN_TGT->tgt_mutex));
8380
8381 mutex_exit(&LUN_TGT->tgt_mutex);
8382
8383 head = fcp_scan_commands(plun);
8384 if (head != NULL) {
8385 fcp_abort_commands(head, LUN_PORT);
8386 }
8387
8388 mutex_enter(&LUN_TGT->tgt_mutex);
8389
8390 if (plun->lun_cip && plun->lun_mpxio) {
8391 /*
8392 * Intimate MPxIO lun busy is cleared
8393 */
8394 if (!fcp_pass_to_hp(LUN_PORT, plun, plun->lun_cip,
8395 FCP_MPXIO_PATH_CLEAR_BUSY, link_cnt, tgt_cnt,
8396 0, 0)) {
8397 fcp_log(CE_NOTE, LUN_PORT->port_dip,
8398 "Can not ENABLE LUN; D_ID=%x, LUN=%x",
8399 LUN_TGT->tgt_d_id, plun->lun_num);
8400 }
8401 /*
8402 * Intimate MPxIO that the lun is now marked for offline
8403 */
8404 mutex_exit(&LUN_TGT->tgt_mutex);
8405 (void) mdi_pi_disable_path(PIP(plun->lun_cip), DRIVER_DISABLE);
8406 mutex_enter(&LUN_TGT->tgt_mutex);
8407 }
8408 }
8409
8410 static void
8411 fcp_offline_lun_now(struct fcp_lun *plun, int link_cnt, int tgt_cnt,
8412 int flags)
8413 {
8414 ASSERT(mutex_owned(&LUN_TGT->tgt_mutex));
8415
8416 mutex_exit(&LUN_TGT->tgt_mutex);
8417 fcp_update_offline_flags(plun);
8418 mutex_enter(&LUN_TGT->tgt_mutex);
8419
8420 fcp_prepare_offline_lun(plun, link_cnt, tgt_cnt);
8421
8422 FCP_TRACE(fcp_logq, LUN_PORT->port_instbuf,
8423 fcp_trace, FCP_BUF_LEVEL_4, 0,
8424 "offline_lun: passing OFFLINE elem to HP thread");
8425
8426 if (plun->lun_cip) {
8427 fcp_log(CE_NOTE, LUN_PORT->port_dip,
8428 "!offlining lun=%x (trace=%x), target=%x (trace=%x)",
8429 plun->lun_num, plun->lun_trace, LUN_TGT->tgt_d_id,
8430 LUN_TGT->tgt_trace);
8431
8432 if (!fcp_pass_to_hp(LUN_PORT, plun, plun->lun_cip, FCP_OFFLINE,
8433 link_cnt, tgt_cnt, flags, 0)) {
8434 fcp_log(CE_CONT, LUN_PORT->port_dip,
8435 "Can not OFFLINE LUN; D_ID=%x, LUN=%x\n",
8436 LUN_TGT->tgt_d_id, plun->lun_num);
8437 }
8438 }
8439 }
8440
8441 static void
8442 fcp_scan_offline_luns(struct fcp_port *pptr)
8443 {
8444 struct fcp_lun_elem *elem;
8445 struct fcp_lun_elem *prev;
8446 struct fcp_lun_elem *next;
8447
8448 ASSERT(MUTEX_HELD(&pptr->port_mutex));
8449
8450 prev = NULL;
8451 elem = pptr->port_offline_luns;
8452 while (elem) {
8453 next = elem->next;
8454 if (elem->time <= fcp_watchdog_time) {
8455 int changed = 1;
8456 struct fcp_tgt *ptgt = elem->plun->lun_tgt;
8457
8458 mutex_enter(&ptgt->tgt_mutex);
8459 if (pptr->port_link_cnt == elem->link_cnt &&
8460 ptgt->tgt_change_cnt == elem->tgt_cnt) {
8461 changed = 0;
8462 }
8463
8464 if (!changed &&
8465 !(elem->plun->lun_state & FCP_TGT_OFFLINE)) {
8466 fcp_offline_lun_now(elem->plun,
8467 elem->link_cnt, elem->tgt_cnt, elem->flags);
8468 }
8469 mutex_exit(&ptgt->tgt_mutex);
8470
8471 kmem_free(elem, sizeof (*elem));
8472
8473 if (prev) {
8474 prev->next = next;
8475 } else {
8476 pptr->port_offline_luns = next;
8477 }
8478 } else {
8479 prev = elem;
8480 }
8481 elem = next;
8482 }
8483 }
8484
8485
8486 static void
8487 fcp_scan_offline_tgts(struct fcp_port *pptr)
8488 {
8489 struct fcp_tgt_elem *elem;
8490 struct fcp_tgt_elem *prev;
8491 struct fcp_tgt_elem *next;
8492
8493 ASSERT(MUTEX_HELD(&pptr->port_mutex));
8494
8495 prev = NULL;
8496 elem = pptr->port_offline_tgts;
8497 while (elem) {
8498 next = elem->next;
8499 if (elem->time <= fcp_watchdog_time) {
8500 int outdated = 1;
8501 struct fcp_tgt *ptgt = elem->ptgt;
8502
8503 mutex_enter(&ptgt->tgt_mutex);
8504
8505 if (ptgt->tgt_change_cnt == elem->tgt_cnt) {
8506 /* No change on tgt since elem was created. */
8507 outdated = 0;
8508 } else if (ptgt->tgt_change_cnt == elem->tgt_cnt + 1 &&
8509 pptr->port_link_cnt == elem->link_cnt + 1 &&
8510 ptgt->tgt_statec_cause == FCP_CAUSE_LINK_DOWN) {
8511 /*
8512 * Exactly one thing happened to the target
8513 * inbetween: the local port went offline.
8514 * For fp the remote port is already gone so
8515 * it will not tell us again to offline the
8516 * target. We must offline it now.
8517 */
8518 outdated = 0;
8519 }
8520
8521 if (!outdated && !(ptgt->tgt_state &
8522 FCP_TGT_OFFLINE)) {
8523 fcp_offline_target_now(pptr,
8524 ptgt, elem->link_cnt, elem->tgt_cnt,
8525 elem->flags);
8526 }
8527
8528 mutex_exit(&ptgt->tgt_mutex);
8529
8530 kmem_free(elem, sizeof (*elem));
8531
8532 if (prev) {
8533 prev->next = next;
8534 } else {
8535 pptr->port_offline_tgts = next;
8536 }
8537 } else {
8538 prev = elem;
8539 }
8540 elem = next;
8541 }
8542 }
8543
8544
8545 static void
8546 fcp_update_offline_flags(struct fcp_lun *plun)
8547 {
8548 struct fcp_port *pptr = LUN_PORT;
8549 ASSERT(plun != NULL);
8550
8551 mutex_enter(&LUN_TGT->tgt_mutex);
8552 plun->lun_state |= FCP_LUN_OFFLINE;
8553 plun->lun_state &= ~(FCP_LUN_INIT | FCP_LUN_BUSY | FCP_LUN_MARK);
8554
8555 mutex_enter(&plun->lun_mutex);
8556 if (plun->lun_cip && plun->lun_state & FCP_SCSI_LUN_TGT_INIT) {
8557 dev_info_t *cdip = NULL;
8558
8559 mutex_exit(&LUN_TGT->tgt_mutex);
8560
8561 if (plun->lun_mpxio == 0) {
8562 cdip = DIP(plun->lun_cip);
8563 } else if (plun->lun_cip) {
8564 cdip = mdi_pi_get_client(PIP(plun->lun_cip));
8565 }
8566
8567 mutex_exit(&plun->lun_mutex);
8568 if (cdip) {
8569 (void) ndi_event_retrieve_cookie(
8570 pptr->port_ndi_event_hdl, cdip, FCAL_REMOVE_EVENT,
8571 &fcp_remove_eid, NDI_EVENT_NOPASS);
8572 (void) ndi_event_run_callbacks(
8573 pptr->port_ndi_event_hdl, cdip,
8574 fcp_remove_eid, NULL);
8575 }
8576 } else {
8577 mutex_exit(&plun->lun_mutex);
8578 mutex_exit(&LUN_TGT->tgt_mutex);
8579 }
8580 }
8581
8582
8583 /*
8584 * Scan all of the command pkts for this port, moving pkts that
8585 * match our LUN onto our own list (headed by "head")
8586 */
8587 static struct fcp_pkt *
8588 fcp_scan_commands(struct fcp_lun *plun)
8589 {
8590 struct fcp_port *pptr = LUN_PORT;
8591
8592 struct fcp_pkt *cmd = NULL; /* pkt cmd ptr */
8593 struct fcp_pkt *ncmd = NULL; /* next pkt ptr */
8594 struct fcp_pkt *pcmd = NULL; /* the previous command */
8595
8596 struct fcp_pkt *head = NULL; /* head of our list */
8597 struct fcp_pkt *tail = NULL; /* tail of our list */
8598
8599 int cmds_found = 0;
8600
8601 mutex_enter(&pptr->port_pkt_mutex);
8602 for (cmd = pptr->port_pkt_head; cmd != NULL; cmd = ncmd) {
8603 struct fcp_lun *tlun =
8604 ADDR2LUN(&cmd->cmd_pkt->pkt_address);
8605
8606 ncmd = cmd->cmd_next; /* set next command */
8607
8608 /*
8609 * if this pkt is for a different LUN or the
8610 * command is sent down, skip it.
8611 */
8612 if (tlun != plun || cmd->cmd_state == FCP_PKT_ISSUED ||
8613 (cmd->cmd_pkt->pkt_flags & FLAG_NOINTR)) {
8614 pcmd = cmd;
8615 continue;
8616 }
8617 cmds_found++;
8618 if (pcmd != NULL) {
8619 ASSERT(pptr->port_pkt_head != cmd);
8620 pcmd->cmd_next = cmd->cmd_next;
8621 } else {
8622 ASSERT(cmd == pptr->port_pkt_head);
8623 pptr->port_pkt_head = cmd->cmd_next;
8624 }
8625
8626 if (cmd == pptr->port_pkt_tail) {
8627 pptr->port_pkt_tail = pcmd;
8628 if (pcmd) {
8629 pcmd->cmd_next = NULL;
8630 }
8631 }
8632
8633 if (head == NULL) {
8634 head = tail = cmd;
8635 } else {
8636 ASSERT(tail != NULL);
8637
8638 tail->cmd_next = cmd;
8639 tail = cmd;
8640 }
8641 cmd->cmd_next = NULL;
8642 }
8643 mutex_exit(&pptr->port_pkt_mutex);
8644
8645 FCP_DTRACE(fcp_logq, pptr->port_instbuf,
8646 fcp_trace, FCP_BUF_LEVEL_8, 0,
8647 "scan commands: %d cmd(s) found", cmds_found);
8648
8649 return (head);
8650 }
8651
8652
8653 /*
8654 * Abort all the commands in the command queue
8655 */
8656 static void
8657 fcp_abort_commands(struct fcp_pkt *head, struct fcp_port *pptr)
8658 {
8659 struct fcp_pkt *cmd = NULL; /* pkt cmd ptr */
8660 struct fcp_pkt *ncmd = NULL; /* next pkt ptr */
8661
8662 ASSERT(mutex_owned(&pptr->port_mutex));
8663
8664 /* scan through the pkts and invalid them */
8665 for (cmd = head; cmd != NULL; cmd = ncmd) {
8666 struct scsi_pkt *pkt = cmd->cmd_pkt;
8667
8668 ncmd = cmd->cmd_next;
8669 ASSERT(pkt != NULL);
8670
8671 /*
8672 * The lun is going to be marked offline. Indicate
8673 * the target driver not to requeue or retry this command
8674 * as the device is going to be offlined pretty soon.
8675 */
8676 pkt->pkt_reason = CMD_DEV_GONE;
8677 pkt->pkt_statistics = 0;
8678 pkt->pkt_state = 0;
8679
8680 /* reset cmd flags/state */
8681 cmd->cmd_flags &= ~CFLAG_IN_QUEUE;
8682 cmd->cmd_state = FCP_PKT_IDLE;
8683
8684 /*
8685 * ensure we have a packet completion routine,
8686 * then call it.
8687 */
8688 ASSERT(pkt->pkt_comp != NULL);
8689
8690 mutex_exit(&pptr->port_mutex);
8691 fcp_post_callback(cmd);
8692 mutex_enter(&pptr->port_mutex);
8693 }
8694 }
8695
8696
8697 /*
8698 * the pkt_comp callback for command packets
8699 */
8700 static void
8701 fcp_cmd_callback(fc_packet_t *fpkt)
8702 {
8703 struct fcp_pkt *cmd = (struct fcp_pkt *)fpkt->pkt_ulp_private;
8704 struct scsi_pkt *pkt = cmd->cmd_pkt;
8705 struct fcp_port *pptr = ADDR2FCP(&pkt->pkt_address);
8706
8707 ASSERT(cmd->cmd_state != FCP_PKT_IDLE);
8708
8709 if (cmd->cmd_state == FCP_PKT_IDLE) {
8710 cmn_err(CE_PANIC, "Packet already completed %p",
8711 (void *)cmd);
8712 }
8713
8714 /*
8715 * Watch thread should be freeing the packet, ignore the pkt.
8716 */
8717 if (cmd->cmd_state == FCP_PKT_ABORTING) {
8718 fcp_log(CE_CONT, pptr->port_dip,
8719 "!FCP: Pkt completed while aborting\n");
8720 return;
8721 }
8722 cmd->cmd_state = FCP_PKT_IDLE;
8723
8724 fcp_complete_pkt(fpkt);
8725
8726 #ifdef DEBUG
8727 mutex_enter(&pptr->port_pkt_mutex);
8728 pptr->port_npkts--;
8729 mutex_exit(&pptr->port_pkt_mutex);
8730 #endif /* DEBUG */
8731
8732 fcp_post_callback(cmd);
8733 }
8734
8735
8736 static void
8737 fcp_complete_pkt(fc_packet_t *fpkt)
8738 {
8739 int error = 0;
8740 struct fcp_pkt *cmd = (struct fcp_pkt *)
8741 fpkt->pkt_ulp_private;
8742 struct scsi_pkt *pkt = cmd->cmd_pkt;
8743 struct fcp_port *pptr = ADDR2FCP(&pkt->pkt_address);
8744 struct fcp_lun *plun;
8745 struct fcp_tgt *ptgt;
8746 struct fcp_rsp *rsp;
8747 struct scsi_address save;
8748
8749 #ifdef DEBUG
8750 save = pkt->pkt_address;
8751 #endif /* DEBUG */
8752
8753 rsp = (struct fcp_rsp *)cmd->cmd_fcp_rsp;
8754
8755 if (fpkt->pkt_state == FC_PKT_SUCCESS) {
8756 if (pptr->port_fcp_dma != FC_NO_DVMA_SPACE) {
8757 FCP_CP_IN(fpkt->pkt_resp, rsp, fpkt->pkt_resp_acc,
8758 sizeof (struct fcp_rsp));
8759 }
8760
8761 pkt->pkt_state = STATE_GOT_BUS | STATE_GOT_TARGET |
8762 STATE_SENT_CMD | STATE_GOT_STATUS;
8763
8764 pkt->pkt_resid = 0;
8765
8766 if (fpkt->pkt_datalen) {
8767 pkt->pkt_state |= STATE_XFERRED_DATA;
8768 if (fpkt->pkt_data_resid) {
8769 error++;
8770 }
8771 }
8772
8773 if ((pkt->pkt_scbp != NULL) && ((*(pkt->pkt_scbp) =
8774 rsp->fcp_u.fcp_status.scsi_status) != STATUS_GOOD)) {
8775 /*
8776 * The next two checks make sure that if there
8777 * is no sense data or a valid response and
8778 * the command came back with check condition,
8779 * the command should be retried.
8780 */
8781 if (!rsp->fcp_u.fcp_status.rsp_len_set &&
8782 !rsp->fcp_u.fcp_status.sense_len_set) {
8783 pkt->pkt_state &= ~STATE_XFERRED_DATA;
8784 pkt->pkt_resid = cmd->cmd_dmacount;
8785 }
8786 }
8787
8788 if ((error | rsp->fcp_u.i_fcp_status | rsp->fcp_resid) == 0) {
8789 return;
8790 }
8791
8792 plun = ADDR2LUN(&pkt->pkt_address);
8793 ptgt = plun->lun_tgt;
8794 ASSERT(ptgt != NULL);
8795
8796 /*
8797 * Update the transfer resid, if appropriate
8798 */
8799 if (rsp->fcp_u.fcp_status.resid_over ||
8800 rsp->fcp_u.fcp_status.resid_under) {
8801 pkt->pkt_resid = rsp->fcp_resid;
8802 }
8803
8804 /*
8805 * First see if we got a FCP protocol error.
8806 */
8807 if (rsp->fcp_u.fcp_status.rsp_len_set) {
8808 struct fcp_rsp_info *bep;
8809 bep = (struct fcp_rsp_info *)(cmd->cmd_fcp_rsp +
8810 sizeof (struct fcp_rsp));
8811
8812 if (fcp_validate_fcp_response(rsp, pptr) !=
8813 FC_SUCCESS) {
8814 pkt->pkt_reason = CMD_CMPLT;
8815 *(pkt->pkt_scbp) = STATUS_CHECK;
8816
8817 fcp_log(CE_WARN, pptr->port_dip,
8818 "!SCSI command to d_id=0x%x lun=0x%x"
8819 " failed, Bad FCP response values:"
8820 " rsvd1=%x, rsvd2=%x, sts-rsvd1=%x,"
8821 " sts-rsvd2=%x, rsplen=%x, senselen=%x",
8822 ptgt->tgt_d_id, plun->lun_num,
8823 rsp->reserved_0, rsp->reserved_1,
8824 rsp->fcp_u.fcp_status.reserved_0,
8825 rsp->fcp_u.fcp_status.reserved_1,
8826 rsp->fcp_response_len, rsp->fcp_sense_len);
8827
8828 return;
8829 }
8830
8831 if (pptr->port_fcp_dma != FC_NO_DVMA_SPACE) {
8832 FCP_CP_IN(fpkt->pkt_resp +
8833 sizeof (struct fcp_rsp), bep,
8834 fpkt->pkt_resp_acc,
8835 sizeof (struct fcp_rsp_info));
8836 }
8837
8838 if (bep->rsp_code != FCP_NO_FAILURE) {
8839 child_info_t *cip;
8840
8841 pkt->pkt_reason = CMD_TRAN_ERR;
8842
8843 mutex_enter(&plun->lun_mutex);
8844 cip = plun->lun_cip;
8845 mutex_exit(&plun->lun_mutex);
8846
8847 FCP_TRACE(fcp_logq, pptr->port_instbuf,
8848 fcp_trace, FCP_BUF_LEVEL_2, 0,
8849 "FCP response error on cmd=%p"
8850 " target=0x%x, cip=%p", cmd,
8851 ptgt->tgt_d_id, cip);
8852 }
8853 }
8854
8855 /*
8856 * See if we got a SCSI error with sense data
8857 */
8858 if (rsp->fcp_u.fcp_status.sense_len_set) {
8859 uchar_t rqlen;
8860 caddr_t sense_from;
8861 child_info_t *cip;
8862 timeout_id_t tid;
8863 struct scsi_arq_status *arq;
8864 struct scsi_extended_sense *sense_to;
8865
8866 arq = (struct scsi_arq_status *)pkt->pkt_scbp;
8867 sense_to = &arq->sts_sensedata;
8868
8869 rqlen = (uchar_t)min(rsp->fcp_sense_len,
8870 sizeof (struct scsi_extended_sense));
8871
8872 sense_from = (caddr_t)fpkt->pkt_resp +
8873 sizeof (struct fcp_rsp) + rsp->fcp_response_len;
8874
8875 if (fcp_validate_fcp_response(rsp, pptr) !=
8876 FC_SUCCESS) {
8877 pkt->pkt_reason = CMD_CMPLT;
8878 *(pkt->pkt_scbp) = STATUS_CHECK;
8879
8880 fcp_log(CE_WARN, pptr->port_dip,
8881 "!SCSI command to d_id=0x%x lun=0x%x"
8882 " failed, Bad FCP response values:"
8883 " rsvd1=%x, rsvd2=%x, sts-rsvd1=%x,"
8884 " sts-rsvd2=%x, rsplen=%x, senselen=%x",
8885 ptgt->tgt_d_id, plun->lun_num,
8886 rsp->reserved_0, rsp->reserved_1,
8887 rsp->fcp_u.fcp_status.reserved_0,
8888 rsp->fcp_u.fcp_status.reserved_1,
8889 rsp->fcp_response_len, rsp->fcp_sense_len);
8890
8891 return;
8892 }
8893
8894 /*
8895 * copy in sense information
8896 */
8897 if (pptr->port_fcp_dma != FC_NO_DVMA_SPACE) {
8898 FCP_CP_IN(sense_from, sense_to,
8899 fpkt->pkt_resp_acc, rqlen);
8900 } else {
8901 bcopy(sense_from, sense_to, rqlen);
8902 }
8903
8904 if ((FCP_SENSE_REPORTLUN_CHANGED(sense_to)) ||
8905 (FCP_SENSE_NO_LUN(sense_to))) {
8906 mutex_enter(&ptgt->tgt_mutex);
8907 if (ptgt->tgt_tid == NULL) {
8908 /*
8909 * Kick off rediscovery
8910 */
8911 tid = timeout(fcp_reconfigure_luns,
8912 (caddr_t)ptgt, drv_usectohz(1));
8913
8914 ptgt->tgt_tid = tid;
8915 ptgt->tgt_state |= FCP_TGT_BUSY;
8916 }
8917 mutex_exit(&ptgt->tgt_mutex);
8918 if (FCP_SENSE_REPORTLUN_CHANGED(sense_to)) {
8919 FCP_TRACE(fcp_logq, pptr->port_instbuf,
8920 fcp_trace, FCP_BUF_LEVEL_3, 0,
8921 "!FCP: Report Lun Has Changed"
8922 " target=%x", ptgt->tgt_d_id);
8923 } else if (FCP_SENSE_NO_LUN(sense_to)) {
8924 FCP_TRACE(fcp_logq, pptr->port_instbuf,
8925 fcp_trace, FCP_BUF_LEVEL_3, 0,
8926 "!FCP: LU Not Supported"
8927 " target=%x", ptgt->tgt_d_id);
8928 }
8929 }
8930 ASSERT(pkt->pkt_scbp != NULL);
8931
8932 pkt->pkt_state |= STATE_ARQ_DONE;
8933
8934 arq->sts_rqpkt_resid = SENSE_LENGTH - rqlen;
8935
8936 *((uchar_t *)&arq->sts_rqpkt_status) = STATUS_GOOD;
8937 arq->sts_rqpkt_reason = 0;
8938 arq->sts_rqpkt_statistics = 0;
8939
8940 arq->sts_rqpkt_state = STATE_GOT_BUS |
8941 STATE_GOT_TARGET | STATE_SENT_CMD |
8942 STATE_GOT_STATUS | STATE_ARQ_DONE |
8943 STATE_XFERRED_DATA;
8944
8945 mutex_enter(&plun->lun_mutex);
8946 cip = plun->lun_cip;
8947 mutex_exit(&plun->lun_mutex);
8948
8949 FCP_DTRACE(fcp_logq, pptr->port_instbuf,
8950 fcp_trace, FCP_BUF_LEVEL_8, 0,
8951 "SCSI Check condition on cmd=%p target=0x%x"
8952 " LUN=%p, cmd=%x SCSI status=%x, es key=%x"
8953 " ASC=%x ASCQ=%x", cmd, ptgt->tgt_d_id, cip,
8954 cmd->cmd_fcp_cmd.fcp_cdb[0],
8955 rsp->fcp_u.fcp_status.scsi_status,
8956 sense_to->es_key, sense_to->es_add_code,
8957 sense_to->es_qual_code);
8958 }
8959 } else {
8960 plun = ADDR2LUN(&pkt->pkt_address);
8961 ptgt = plun->lun_tgt;
8962 ASSERT(ptgt != NULL);
8963
8964 /*
8965 * Work harder to translate errors into target driver
8966 * understandable ones. Note with despair that the target
8967 * drivers don't decode pkt_state and pkt_reason exhaustively
8968 * They resort to using the big hammer most often, which
8969 * may not get fixed in the life time of this driver.
8970 */
8971 pkt->pkt_state = 0;
8972 pkt->pkt_statistics = 0;
8973
8974 switch (fpkt->pkt_state) {
8975 case FC_PKT_TRAN_ERROR:
8976 switch (fpkt->pkt_reason) {
8977 case FC_REASON_OVERRUN:
8978 pkt->pkt_reason = CMD_CMD_OVR;
8979 pkt->pkt_statistics |= STAT_ABORTED;
8980 break;
8981
8982 case FC_REASON_XCHG_BSY: {
8983 caddr_t ptr;
8984
8985 pkt->pkt_reason = CMD_CMPLT; /* Lie */
8986
8987 ptr = (caddr_t)pkt->pkt_scbp;
8988 if (ptr) {
8989 *ptr = STATUS_BUSY;
8990 }
8991 break;
8992 }
8993
8994 case FC_REASON_ABORTED:
8995 pkt->pkt_reason = CMD_TRAN_ERR;
8996 pkt->pkt_statistics |= STAT_ABORTED;
8997 break;
8998
8999 case FC_REASON_ABORT_FAILED:
9000 pkt->pkt_reason = CMD_ABORT_FAIL;
9001 break;
9002
9003 case FC_REASON_NO_SEQ_INIT:
9004 case FC_REASON_CRC_ERROR:
9005 pkt->pkt_reason = CMD_TRAN_ERR;
9006 pkt->pkt_statistics |= STAT_ABORTED;
9007 break;
9008 default:
9009 pkt->pkt_reason = CMD_TRAN_ERR;
9010 break;
9011 }
9012 break;
9013
9014 case FC_PKT_PORT_OFFLINE: {
9015 dev_info_t *cdip = NULL;
9016 caddr_t ptr;
9017
9018 if (fpkt->pkt_reason == FC_REASON_LOGIN_REQUIRED) {
9019 FCP_DTRACE(fcp_logq, pptr->port_instbuf,
9020 fcp_trace, FCP_BUF_LEVEL_8, 0,
9021 "SCSI cmd; LOGIN REQUIRED from FCA for %x",
9022 ptgt->tgt_d_id);
9023 }
9024
9025 mutex_enter(&plun->lun_mutex);
9026 if (plun->lun_mpxio == 0) {
9027 cdip = DIP(plun->lun_cip);
9028 } else if (plun->lun_cip) {
9029 cdip = mdi_pi_get_client(PIP(plun->lun_cip));
9030 }
9031
9032 mutex_exit(&plun->lun_mutex);
9033
9034 if (cdip) {
9035 (void) ndi_event_retrieve_cookie(
9036 pptr->port_ndi_event_hdl, cdip,
9037 FCAL_REMOVE_EVENT, &fcp_remove_eid,
9038 NDI_EVENT_NOPASS);
9039 (void) ndi_event_run_callbacks(
9040 pptr->port_ndi_event_hdl, cdip,
9041 fcp_remove_eid, NULL);
9042 }
9043
9044 /*
9045 * If the link goes off-line for a lip,
9046 * this will cause a error to the ST SG
9047 * SGEN drivers. By setting BUSY we will
9048 * give the drivers the chance to retry
9049 * before it blows of the job. ST will
9050 * remember how many times it has retried.
9051 */
9052
9053 if ((plun->lun_type == DTYPE_SEQUENTIAL) ||
9054 (plun->lun_type == DTYPE_CHANGER)) {
9055 pkt->pkt_reason = CMD_CMPLT; /* Lie */
9056 ptr = (caddr_t)pkt->pkt_scbp;
9057 if (ptr) {
9058 *ptr = STATUS_BUSY;
9059 }
9060 } else {
9061 pkt->pkt_reason = CMD_TRAN_ERR;
9062 pkt->pkt_statistics |= STAT_BUS_RESET;
9063 }
9064 break;
9065 }
9066
9067 case FC_PKT_TRAN_BSY:
9068 /*
9069 * Use the ssd Qfull handling here.
9070 */
9071 *pkt->pkt_scbp = STATUS_INTERMEDIATE;
9072 pkt->pkt_state = STATE_GOT_BUS;
9073 break;
9074
9075 case FC_PKT_TIMEOUT:
9076 pkt->pkt_reason = CMD_TIMEOUT;
9077 if (fpkt->pkt_reason == FC_REASON_ABORT_FAILED) {
9078 pkt->pkt_statistics |= STAT_TIMEOUT;
9079 } else {
9080 pkt->pkt_statistics |= STAT_ABORTED;
9081 }
9082 break;
9083
9084 case FC_PKT_LOCAL_RJT:
9085 switch (fpkt->pkt_reason) {
9086 case FC_REASON_OFFLINE: {
9087 dev_info_t *cdip = NULL;
9088
9089 mutex_enter(&plun->lun_mutex);
9090 if (plun->lun_mpxio == 0) {
9091 cdip = DIP(plun->lun_cip);
9092 } else if (plun->lun_cip) {
9093 cdip = mdi_pi_get_client(
9094 PIP(plun->lun_cip));
9095 }
9096 mutex_exit(&plun->lun_mutex);
9097
9098 if (cdip) {
9099 (void) ndi_event_retrieve_cookie(
9100 pptr->port_ndi_event_hdl, cdip,
9101 FCAL_REMOVE_EVENT,
9102 &fcp_remove_eid,
9103 NDI_EVENT_NOPASS);
9104 (void) ndi_event_run_callbacks(
9105 pptr->port_ndi_event_hdl,
9106 cdip, fcp_remove_eid, NULL);
9107 }
9108
9109 pkt->pkt_reason = CMD_TRAN_ERR;
9110 pkt->pkt_statistics |= STAT_BUS_RESET;
9111
9112 break;
9113 }
9114
9115 case FC_REASON_NOMEM:
9116 case FC_REASON_QFULL: {
9117 caddr_t ptr;
9118
9119 pkt->pkt_reason = CMD_CMPLT; /* Lie */
9120 ptr = (caddr_t)pkt->pkt_scbp;
9121 if (ptr) {
9122 *ptr = STATUS_BUSY;
9123 }
9124 break;
9125 }
9126
9127 case FC_REASON_DMA_ERROR:
9128 pkt->pkt_reason = CMD_DMA_DERR;
9129 pkt->pkt_statistics |= STAT_ABORTED;
9130 break;
9131
9132 case FC_REASON_CRC_ERROR:
9133 case FC_REASON_UNDERRUN: {
9134 uchar_t status;
9135 /*
9136 * Work around for Bugid: 4240945.
9137 * IB on A5k doesn't set the Underrun bit
9138 * in the fcp status, when it is transferring
9139 * less than requested amount of data. Work
9140 * around the ses problem to keep luxadm
9141 * happy till ibfirmware is fixed.
9142 */
9143 if (pptr->port_fcp_dma != FC_NO_DVMA_SPACE) {
9144 FCP_CP_IN(fpkt->pkt_resp, rsp,
9145 fpkt->pkt_resp_acc,
9146 sizeof (struct fcp_rsp));
9147 }
9148 status = rsp->fcp_u.fcp_status.scsi_status;
9149 if (((plun->lun_type & DTYPE_MASK) ==
9150 DTYPE_ESI) && (status == STATUS_GOOD)) {
9151 pkt->pkt_reason = CMD_CMPLT;
9152 *pkt->pkt_scbp = status;
9153 pkt->pkt_resid = 0;
9154 } else {
9155 pkt->pkt_reason = CMD_TRAN_ERR;
9156 pkt->pkt_statistics |= STAT_ABORTED;
9157 }
9158 break;
9159 }
9160
9161 case FC_REASON_NO_CONNECTION:
9162 case FC_REASON_UNSUPPORTED:
9163 case FC_REASON_ILLEGAL_REQ:
9164 case FC_REASON_BAD_SID:
9165 case FC_REASON_DIAG_BUSY:
9166 case FC_REASON_FCAL_OPN_FAIL:
9167 case FC_REASON_BAD_XID:
9168 default:
9169 pkt->pkt_reason = CMD_TRAN_ERR;
9170 pkt->pkt_statistics |= STAT_ABORTED;
9171 break;
9172
9173 }
9174 break;
9175
9176 case FC_PKT_NPORT_RJT:
9177 case FC_PKT_FABRIC_RJT:
9178 case FC_PKT_NPORT_BSY:
9179 case FC_PKT_FABRIC_BSY:
9180 default:
9181 FCP_DTRACE(fcp_logq, pptr->port_instbuf,
9182 fcp_trace, FCP_BUF_LEVEL_8, 0,
9183 "FC Status 0x%x, reason 0x%x",
9184 fpkt->pkt_state, fpkt->pkt_reason);
9185 pkt->pkt_reason = CMD_TRAN_ERR;
9186 pkt->pkt_statistics |= STAT_ABORTED;
9187 break;
9188 }
9189
9190 FCP_DTRACE(fcp_logq, pptr->port_instbuf,
9191 fcp_trace, FCP_BUF_LEVEL_9, 0,
9192 "!FC error on cmd=%p target=0x%x: pkt state=0x%x "
9193 " pkt reason=0x%x", cmd, ptgt->tgt_d_id, fpkt->pkt_state,
9194 fpkt->pkt_reason);
9195 }
9196
9197 ASSERT(save.a_hba_tran == pkt->pkt_address.a_hba_tran);
9198 }
9199
9200
9201 static int
9202 fcp_validate_fcp_response(struct fcp_rsp *rsp, struct fcp_port *pptr)
9203 {
9204 if (rsp->reserved_0 || rsp->reserved_1 ||
9205 rsp->fcp_u.fcp_status.reserved_0 ||
9206 rsp->fcp_u.fcp_status.reserved_1) {
9207 /*
9208 * These reserved fields should ideally be zero. FCP-2 does say
9209 * that the recipient need not check for reserved fields to be
9210 * zero. If they are not zero, we will not make a fuss about it
9211 * - just log it (in debug to both trace buffer and messages
9212 * file and to trace buffer only in non-debug) and move on.
9213 *
9214 * Non-zero reserved fields were seen with minnows.
9215 *
9216 * qlc takes care of some of this but we cannot assume that all
9217 * FCAs will do so.
9218 */
9219 FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
9220 FCP_BUF_LEVEL_5, 0,
9221 "Got fcp response packet with non-zero reserved fields "
9222 "rsp->reserved_0:0x%x, rsp_reserved_1:0x%x, "
9223 "status.reserved_0:0x%x, status.reserved_1:0x%x",
9224 rsp->reserved_0, rsp->reserved_1,
9225 rsp->fcp_u.fcp_status.reserved_0,
9226 rsp->fcp_u.fcp_status.reserved_1);
9227 }
9228
9229 if (rsp->fcp_u.fcp_status.rsp_len_set && (rsp->fcp_response_len >
9230 (FCP_MAX_RSP_IU_SIZE - sizeof (struct fcp_rsp)))) {
9231 return (FC_FAILURE);
9232 }
9233
9234 if (rsp->fcp_u.fcp_status.sense_len_set && rsp->fcp_sense_len >
9235 (FCP_MAX_RSP_IU_SIZE - rsp->fcp_response_len -
9236 sizeof (struct fcp_rsp))) {
9237 return (FC_FAILURE);
9238 }
9239
9240 return (FC_SUCCESS);
9241 }
9242
9243
9244 /*
9245 * This is called when there is a change the in device state. The case we're
9246 * handling here is, if the d_id s does not match, offline this tgt and online
9247 * a new tgt with the new d_id. called from fcp_handle_devices with
9248 * port_mutex held.
9249 */
9250 static int
9251 fcp_device_changed(struct fcp_port *pptr, struct fcp_tgt *ptgt,
9252 fc_portmap_t *map_entry, int link_cnt, int tgt_cnt, int cause)
9253 {
9254 ASSERT(mutex_owned(&pptr->port_mutex));
9255
9256 FCP_TRACE(fcp_logq, pptr->port_instbuf,
9257 fcp_trace, FCP_BUF_LEVEL_3, 0,
9258 "Starting fcp_device_changed...");
9259
9260 /*
9261 * The two cases where the port_device_changed is called is
9262 * either it changes it's d_id or it's hard address.
9263 */
9264 if ((ptgt->tgt_d_id != map_entry->map_did.port_id) ||
9265 (FC_TOP_EXTERNAL(pptr->port_topology) &&
9266 (ptgt->tgt_hard_addr != map_entry->map_hard_addr.hard_addr))) {
9267
9268 /* offline this target */
9269 mutex_enter(&ptgt->tgt_mutex);
9270 if (!(ptgt->tgt_state & FCP_TGT_OFFLINE)) {
9271 (void) fcp_offline_target(pptr, ptgt, link_cnt,
9272 0, 1, NDI_DEVI_REMOVE);
9273 }
9274 mutex_exit(&ptgt->tgt_mutex);
9275
9276 fcp_log(CE_NOTE, pptr->port_dip,
9277 "Change in target properties: Old D_ID=%x New D_ID=%x"
9278 " Old HA=%x New HA=%x", ptgt->tgt_d_id,
9279 map_entry->map_did.port_id, ptgt->tgt_hard_addr,
9280 map_entry->map_hard_addr.hard_addr);
9281 }
9282
9283 return (fcp_handle_mapflags(pptr, ptgt, map_entry,
9284 link_cnt, tgt_cnt, cause));
9285 }
9286
9287 /*
9288 * Function: fcp_alloc_lun
9289 *
9290 * Description: Creates a new lun structure and adds it to the list
9291 * of luns of the target.
9292 *
9293 * Argument: ptgt Target the lun will belong to.
9294 *
9295 * Return Value: NULL Failed
9296 * Not NULL Succeeded
9297 *
9298 * Context: Kernel context
9299 */
9300 static struct fcp_lun *
9301 fcp_alloc_lun(struct fcp_tgt *ptgt)
9302 {
9303 struct fcp_lun *plun;
9304
9305 plun = kmem_zalloc(sizeof (struct fcp_lun), KM_NOSLEEP);
9306 if (plun != NULL) {
9307 /*
9308 * Initialize the mutex before putting in the target list
9309 * especially before releasing the target mutex.
9310 */
9311 mutex_init(&plun->lun_mutex, NULL, MUTEX_DRIVER, NULL);
9312 plun->lun_tgt = ptgt;
9313
9314 mutex_enter(&ptgt->tgt_mutex);
9315 plun->lun_next = ptgt->tgt_lun;
9316 ptgt->tgt_lun = plun;
9317 plun->lun_old_guid = NULL;
9318 plun->lun_old_guid_size = 0;
9319 mutex_exit(&ptgt->tgt_mutex);
9320 }
9321
9322 return (plun);
9323 }
9324
9325 /*
9326 * Function: fcp_dealloc_lun
9327 *
9328 * Description: Frees the LUN structure passed by the caller.
9329 *
9330 * Argument: plun LUN structure to free.
9331 *
9332 * Return Value: None
9333 *
9334 * Context: Kernel context.
9335 */
9336 static void
9337 fcp_dealloc_lun(struct fcp_lun *plun)
9338 {
9339 mutex_enter(&plun->lun_mutex);
9340 if (plun->lun_cip) {
9341 fcp_remove_child(plun);
9342 }
9343 mutex_exit(&plun->lun_mutex);
9344
9345 mutex_destroy(&plun->lun_mutex);
9346 if (plun->lun_guid) {
9347 kmem_free(plun->lun_guid, plun->lun_guid_size);
9348 }
9349 if (plun->lun_old_guid) {
9350 kmem_free(plun->lun_old_guid, plun->lun_old_guid_size);
9351 }
9352 kmem_free(plun, sizeof (*plun));
9353 }
9354
9355 /*
9356 * Function: fcp_alloc_tgt
9357 *
9358 * Description: Creates a new target structure and adds it to the port
9359 * hash list.
9360 *
9361 * Argument: pptr fcp port structure
9362 * *map_entry entry describing the target to create
9363 * link_cnt Link state change counter
9364 *
9365 * Return Value: NULL Failed
9366 * Not NULL Succeeded
9367 *
9368 * Context: Kernel context.
9369 */
9370 static struct fcp_tgt *
9371 fcp_alloc_tgt(struct fcp_port *pptr, fc_portmap_t *map_entry, int link_cnt)
9372 {
9373 int hash;
9374 uchar_t *wwn;
9375 struct fcp_tgt *ptgt;
9376
9377 ptgt = kmem_zalloc(sizeof (*ptgt), KM_NOSLEEP);
9378 if (ptgt != NULL) {
9379 mutex_enter(&pptr->port_mutex);
9380 if (link_cnt != pptr->port_link_cnt) {
9381 /*
9382 * oh oh -- another link reset
9383 * in progress -- give up
9384 */
9385 mutex_exit(&pptr->port_mutex);
9386 kmem_free(ptgt, sizeof (*ptgt));
9387 ptgt = NULL;
9388 } else {
9389 /*
9390 * initialize the mutex before putting in the port
9391 * wwn list, especially before releasing the port
9392 * mutex.
9393 */
9394 mutex_init(&ptgt->tgt_mutex, NULL, MUTEX_DRIVER, NULL);
9395
9396 /* add new target entry to the port's hash list */
9397 wwn = (uchar_t *)&map_entry->map_pwwn;
9398 hash = FCP_HASH(wwn);
9399
9400 ptgt->tgt_next = pptr->port_tgt_hash_table[hash];
9401 pptr->port_tgt_hash_table[hash] = ptgt;
9402
9403 /* save cross-ptr */
9404 ptgt->tgt_port = pptr;
9405
9406 ptgt->tgt_change_cnt = 1;
9407
9408 /* initialize the target manual_config_only flag */
9409 if (fcp_enable_auto_configuration) {
9410 ptgt->tgt_manual_config_only = 0;
9411 } else {
9412 ptgt->tgt_manual_config_only = 1;
9413 }
9414
9415 mutex_exit(&pptr->port_mutex);
9416 }
9417 }
9418
9419 return (ptgt);
9420 }
9421
9422 /*
9423 * Function: fcp_dealloc_tgt
9424 *
9425 * Description: Frees the target structure passed by the caller.
9426 *
9427 * Argument: ptgt Target structure to free.
9428 *
9429 * Return Value: None
9430 *
9431 * Context: Kernel context.
9432 */
9433 static void
9434 fcp_dealloc_tgt(struct fcp_tgt *ptgt)
9435 {
9436 mutex_destroy(&ptgt->tgt_mutex);
9437 kmem_free(ptgt, sizeof (*ptgt));
9438 }
9439
9440
9441 /*
9442 * Handle STATUS_QFULL and STATUS_BUSY by performing delayed retry
9443 *
9444 * Device discovery commands will not be retried for-ever as
9445 * this will have repercussions on other devices that need to
9446 * be submitted to the hotplug thread. After a quick glance
9447 * at the SCSI-3 spec, it was found that the spec doesn't
9448 * mandate a forever retry, rather recommends a delayed retry.
9449 *
9450 * Since Photon IB is single threaded, STATUS_BUSY is common
9451 * in a 4+initiator environment. Make sure the total time
9452 * spent on retries (including command timeout) does not
9453 * 60 seconds
9454 */
9455 static void
9456 fcp_queue_ipkt(struct fcp_port *pptr, fc_packet_t *fpkt)
9457 {
9458 struct fcp_ipkt *icmd = (struct fcp_ipkt *)fpkt->pkt_ulp_private;
9459 struct fcp_tgt *ptgt = icmd->ipkt_tgt;
9460
9461 mutex_enter(&pptr->port_mutex);
9462 mutex_enter(&ptgt->tgt_mutex);
9463 if (FCP_STATE_CHANGED(pptr, ptgt, icmd)) {
9464 FCP_TRACE(fcp_logq, pptr->port_instbuf,
9465 fcp_trace, FCP_BUF_LEVEL_2, 0,
9466 "fcp_queue_ipkt,1:state change occured"
9467 " for D_ID=0x%x", ptgt->tgt_d_id);
9468 mutex_exit(&ptgt->tgt_mutex);
9469 mutex_exit(&pptr->port_mutex);
9470 (void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
9471 icmd->ipkt_change_cnt, icmd->ipkt_cause);
9472 fcp_icmd_free(pptr, icmd);
9473 return;
9474 }
9475 mutex_exit(&ptgt->tgt_mutex);
9476
9477 icmd->ipkt_restart = fcp_watchdog_time + icmd->ipkt_retries++;
9478
9479 if (pptr->port_ipkt_list != NULL) {
9480 /* add pkt to front of doubly-linked list */
9481 pptr->port_ipkt_list->ipkt_prev = icmd;
9482 icmd->ipkt_next = pptr->port_ipkt_list;
9483 pptr->port_ipkt_list = icmd;
9484 icmd->ipkt_prev = NULL;
9485 } else {
9486 /* this is the first/only pkt on the list */
9487 pptr->port_ipkt_list = icmd;
9488 icmd->ipkt_next = NULL;
9489 icmd->ipkt_prev = NULL;
9490 }
9491 mutex_exit(&pptr->port_mutex);
9492 }
9493
9494 /*
9495 * Function: fcp_transport
9496 *
9497 * Description: This function submits the Fibre Channel packet to the transort
9498 * layer by calling fc_ulp_transport(). If fc_ulp_transport()
9499 * fails the submission, the treatment depends on the value of
9500 * the variable internal.
9501 *
9502 * Argument: port_handle fp/fctl port handle.
9503 * *fpkt Packet to submit to the transport layer.
9504 * internal Not zero when it's an internal packet.
9505 *
9506 * Return Value: FC_TRAN_BUSY
9507 * FC_STATEC_BUSY
9508 * FC_OFFLINE
9509 * FC_LOGINREQ
9510 * FC_DEVICE_BUSY
9511 * FC_SUCCESS
9512 */
9513 static int
9514 fcp_transport(opaque_t port_handle, fc_packet_t *fpkt, int internal)
9515 {
9516 int rval;
9517
9518 rval = fc_ulp_transport(port_handle, fpkt);
9519 if (rval == FC_SUCCESS) {
9520 return (rval);
9521 }
9522
9523 /*
9524 * LUN isn't marked BUSY or OFFLINE, so we got here to transport
9525 * a command, if the underlying modules see that there is a state
9526 * change, or if a port is OFFLINE, that means, that state change
9527 * hasn't reached FCP yet, so re-queue the command for deferred
9528 * submission.
9529 */
9530 if ((rval == FC_STATEC_BUSY) || (rval == FC_OFFLINE) ||
9531 (rval == FC_LOGINREQ) || (rval == FC_DEVICE_BUSY) ||
9532 (rval == FC_DEVICE_BUSY_NEW_RSCN) || (rval == FC_TRAN_BUSY)) {
9533 /*
9534 * Defer packet re-submission. Life hang is possible on
9535 * internal commands if the port driver sends FC_STATEC_BUSY
9536 * for ever, but that shouldn't happen in a good environment.
9537 * Limiting re-transport for internal commands is probably a
9538 * good idea..
9539 * A race condition can happen when a port sees barrage of
9540 * link transitions offline to online. If the FCTL has
9541 * returned FC_STATEC_BUSY or FC_OFFLINE then none of the
9542 * internal commands should be queued to do the discovery.
9543 * The race condition is when an online comes and FCP starts
9544 * its internal discovery and the link goes offline. It is
9545 * possible that the statec_callback has not reached FCP
9546 * and FCP is carrying on with its internal discovery.
9547 * FC_STATEC_BUSY or FC_OFFLINE will be the first indication
9548 * that the link has gone offline. At this point FCP should
9549 * drop all the internal commands and wait for the
9550 * statec_callback. It will be facilitated by incrementing
9551 * port_link_cnt.
9552 *
9553 * For external commands, the (FC)pkt_timeout is decremented
9554 * by the QUEUE Delay added by our driver, Care is taken to
9555 * ensure that it doesn't become zero (zero means no timeout)
9556 * If the time expires right inside driver queue itself,
9557 * the watch thread will return it to the original caller
9558 * indicating that the command has timed-out.
9559 */
9560 if (internal) {
9561 char *op;
9562 struct fcp_ipkt *icmd;
9563
9564 icmd = (struct fcp_ipkt *)fpkt->pkt_ulp_private;
9565 switch (icmd->ipkt_opcode) {
9566 case SCMD_REPORT_LUN:
9567 op = "REPORT LUN";
9568 break;
9569
9570 case SCMD_INQUIRY:
9571 op = "INQUIRY";
9572 break;
9573
9574 case SCMD_INQUIRY_PAGE83:
9575 op = "INQUIRY-83";
9576 break;
9577
9578 default:
9579 op = "Internal SCSI COMMAND";
9580 break;
9581 }
9582
9583 if (fcp_handle_ipkt_errors(icmd->ipkt_port,
9584 icmd->ipkt_tgt, icmd, rval, op) == DDI_SUCCESS) {
9585 rval = FC_SUCCESS;
9586 }
9587 } else {
9588 struct fcp_pkt *cmd;
9589 struct fcp_port *pptr;
9590
9591 cmd = (struct fcp_pkt *)fpkt->pkt_ulp_private;
9592 cmd->cmd_state = FCP_PKT_IDLE;
9593 pptr = ADDR2FCP(&cmd->cmd_pkt->pkt_address);
9594
9595 if (cmd->cmd_pkt->pkt_flags & FLAG_NOQUEUE) {
9596 FCP_DTRACE(fcp_logq, pptr->port_instbuf,
9597 fcp_trace, FCP_BUF_LEVEL_9, 0,
9598 "fcp_transport: xport busy for pkt %p",
9599 cmd->cmd_pkt);
9600 rval = FC_TRAN_BUSY;
9601 } else {
9602 fcp_queue_pkt(pptr, cmd);
9603 rval = FC_SUCCESS;
9604 }
9605 }
9606 }
9607
9608 return (rval);
9609 }
9610
9611 /*VARARGS3*/
9612 static void
9613 fcp_log(int level, dev_info_t *dip, const char *fmt, ...)
9614 {
9615 char buf[256];
9616 va_list ap;
9617
9618 if (dip == NULL) {
9619 dip = fcp_global_dip;
9620 }
9621
9622 va_start(ap, fmt);
9623 (void) vsprintf(buf, fmt, ap);
9624 va_end(ap);
9625
9626 scsi_log(dip, "fcp", level, buf);
9627 }
9628
9629 /*
9630 * This function retries NS registry of FC4 type.
9631 * It assumes that fcp_mutex is held.
9632 * The function does nothing if topology is not fabric
9633 * So, the topology has to be set before this function can be called
9634 */
9635 static void
9636 fcp_retry_ns_registry(struct fcp_port *pptr, uint32_t s_id)
9637 {
9638 int rval;
9639
9640 ASSERT(MUTEX_HELD(&pptr->port_mutex));
9641
9642 if (((pptr->port_state & FCP_STATE_NS_REG_FAILED) == 0) ||
9643 ((pptr->port_topology != FC_TOP_FABRIC) &&
9644 (pptr->port_topology != FC_TOP_PUBLIC_LOOP))) {
9645 if (pptr->port_state & FCP_STATE_NS_REG_FAILED) {
9646 pptr->port_state &= ~FCP_STATE_NS_REG_FAILED;
9647 }
9648 return;
9649 }
9650 mutex_exit(&pptr->port_mutex);
9651 rval = fcp_do_ns_registry(pptr, s_id);
9652 mutex_enter(&pptr->port_mutex);
9653
9654 if (rval == 0) {
9655 /* Registry successful. Reset flag */
9656 pptr->port_state &= ~(FCP_STATE_NS_REG_FAILED);
9657 }
9658 }
9659
9660 /*
9661 * This function registers the ULP with the switch by calling transport i/f
9662 */
9663 static int
9664 fcp_do_ns_registry(struct fcp_port *pptr, uint32_t s_id)
9665 {
9666 fc_ns_cmd_t ns_cmd;
9667 ns_rfc_type_t rfc;
9668 uint32_t types[8];
9669
9670 /*
9671 * Prepare the Name server structure to
9672 * register with the transport in case of
9673 * Fabric configuration.
9674 */
9675 bzero(&rfc, sizeof (rfc));
9676 bzero(types, sizeof (types));
9677
9678 types[FC4_TYPE_WORD_POS(FC_TYPE_SCSI_FCP)] =
9679 (1 << FC4_TYPE_BIT_POS(FC_TYPE_SCSI_FCP));
9680
9681 rfc.rfc_port_id.port_id = s_id;
9682 bcopy(types, rfc.rfc_types, sizeof (types));
9683
9684 ns_cmd.ns_flags = 0;
9685 ns_cmd.ns_cmd = NS_RFT_ID;
9686 ns_cmd.ns_req_len = sizeof (rfc);
9687 ns_cmd.ns_req_payload = (caddr_t)&rfc;
9688 ns_cmd.ns_resp_len = 0;
9689 ns_cmd.ns_resp_payload = NULL;
9690
9691 /*
9692 * Perform the Name Server Registration for SCSI_FCP FC4 Type.
9693 */
9694 if (fc_ulp_port_ns(pptr->port_fp_handle, NULL, &ns_cmd)) {
9695 fcp_log(CE_WARN, pptr->port_dip,
9696 "!ns_registry: failed name server registration");
9697 return (1);
9698 }
9699
9700 return (0);
9701 }
9702
9703 /*
9704 * Function: fcp_handle_port_attach
9705 *
9706 * Description: This function is called from fcp_port_attach() to attach a
9707 * new port. This routine does the following:
9708 *
9709 * 1) Allocates an fcp_port structure and initializes it.
9710 * 2) Tries to register the new FC-4 (FCP) capablity with the name
9711 * server.
9712 * 3) Kicks off the enumeration of the targets/luns visible
9713 * through this new port. That is done by calling
9714 * fcp_statec_callback() if the port is online.
9715 *
9716 * Argument: ulph fp/fctl port handle.
9717 * *pinfo Port information.
9718 * s_id Port ID.
9719 * instance Device instance number for the local port
9720 * (returned by ddi_get_instance()).
9721 *
9722 * Return Value: DDI_SUCCESS
9723 * DDI_FAILURE
9724 *
9725 * Context: User and Kernel context.
9726 */
9727 /*ARGSUSED*/
9728 int
9729 fcp_handle_port_attach(opaque_t ulph, fc_ulp_port_info_t *pinfo,
9730 uint32_t s_id, int instance)
9731 {
9732 int res = DDI_FAILURE;
9733 scsi_hba_tran_t *tran;
9734 int mutex_initted = FALSE;
9735 int hba_attached = FALSE;
9736 int soft_state_linked = FALSE;
9737 int event_bind = FALSE;
9738 struct fcp_port *pptr;
9739 fc_portmap_t *tmp_list = NULL;
9740 uint32_t max_cnt, alloc_cnt;
9741 uchar_t *boot_wwn = NULL;
9742 uint_t nbytes;
9743 int manual_cfg;
9744
9745 /*
9746 * this port instance attaching for the first time (or after
9747 * being detached before)
9748 */
9749 FCP_TRACE(fcp_logq, "fcp", fcp_trace,
9750 FCP_BUF_LEVEL_3, 0, "port attach: for port %d", instance);
9751
9752 if (ddi_soft_state_zalloc(fcp_softstate, instance) != DDI_SUCCESS) {
9753 cmn_err(CE_WARN, "fcp: Softstate struct alloc failed"
9754 "parent dip: %p; instance: %d", (void *)pinfo->port_dip,
9755 instance);
9756 return (res);
9757 }
9758
9759 if ((pptr = ddi_get_soft_state(fcp_softstate, instance)) == NULL) {
9760 /* this shouldn't happen */
9761 ddi_soft_state_free(fcp_softstate, instance);
9762 cmn_err(CE_WARN, "fcp: bad soft state");
9763 return (res);
9764 }
9765
9766 (void) sprintf(pptr->port_instbuf, "fcp(%d)", instance);
9767
9768 /*
9769 * Make a copy of ulp_port_info as fctl allocates
9770 * a temp struct.
9771 */
9772 (void) fcp_cp_pinfo(pptr, pinfo);
9773
9774 /*
9775 * Check for manual_configuration_only property.
9776 * Enable manual configurtion if the property is
9777 * set to 1, otherwise disable manual configuration.
9778 */
9779 if ((manual_cfg = ddi_prop_get_int(DDI_DEV_T_ANY, pptr->port_dip,
9780 DDI_PROP_NOTPROM | DDI_PROP_DONTPASS,
9781 MANUAL_CFG_ONLY,
9782 -1)) != -1) {
9783 if (manual_cfg == 1) {
9784 char *pathname;
9785 pathname = kmem_zalloc(MAXPATHLEN, KM_SLEEP);
9786 (void) ddi_pathname(pptr->port_dip, pathname);
9787 cmn_err(CE_NOTE,
9788 "%s (%s%d) %s is enabled via %s.conf.",
9789 pathname,
9790 ddi_driver_name(pptr->port_dip),
9791 ddi_get_instance(pptr->port_dip),
9792 MANUAL_CFG_ONLY,
9793 ddi_driver_name(pptr->port_dip));
9794 fcp_enable_auto_configuration = 0;
9795 kmem_free(pathname, MAXPATHLEN);
9796 }
9797 }
9798 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(pptr->port_link_cnt));
9799 pptr->port_link_cnt = 1;
9800 _NOTE(NOW_VISIBLE_TO_OTHER_THREADS(pptr->port_link_cnt));
9801 pptr->port_id = s_id;
9802 pptr->port_instance = instance;
9803 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(pptr->port_state));
9804 pptr->port_state = FCP_STATE_INIT;
9805 if (pinfo->port_acc_attr == NULL) {
9806 /*
9807 * The corresponding FCA doesn't support DMA at all
9808 */
9809 pptr->port_state |= FCP_STATE_FCA_IS_NODMA;
9810 }
9811
9812 _NOTE(NOW_VISIBLE_TO_OTHER_THREADS(pptr->port_state));
9813
9814 if (!(pptr->port_state & FCP_STATE_FCA_IS_NODMA)) {
9815 /*
9816 * If FCA supports DMA in SCSI data phase, we need preallocate
9817 * dma cookie, so stash the cookie size
9818 */
9819 pptr->port_dmacookie_sz = sizeof (ddi_dma_cookie_t) *
9820 pptr->port_data_dma_attr.dma_attr_sgllen;
9821 }
9822
9823 /*
9824 * The two mutexes of fcp_port are initialized. The variable
9825 * mutex_initted is incremented to remember that fact. That variable
9826 * is checked when the routine fails and the mutexes have to be
9827 * destroyed.
9828 */
9829 mutex_init(&pptr->port_mutex, NULL, MUTEX_DRIVER, NULL);
9830 mutex_init(&pptr->port_pkt_mutex, NULL, MUTEX_DRIVER, NULL);
9831 mutex_initted++;
9832
9833 /*
9834 * The SCSI tran structure is allocate and initialized now.
9835 */
9836 if ((tran = scsi_hba_tran_alloc(pptr->port_dip, 0)) == NULL) {
9837 fcp_log(CE_WARN, pptr->port_dip,
9838 "!fcp%d: scsi_hba_tran_alloc failed", instance);
9839 goto fail;
9840 }
9841
9842 /* link in the transport structure then fill it in */
9843 pptr->port_tran = tran;
9844 tran->tran_hba_private = pptr;
9845 tran->tran_tgt_init = fcp_scsi_tgt_init;
9846 tran->tran_tgt_probe = NULL;
9847 tran->tran_tgt_free = fcp_scsi_tgt_free;
9848 tran->tran_start = fcp_scsi_start;
9849 tran->tran_reset = fcp_scsi_reset;
9850 tran->tran_abort = fcp_scsi_abort;
9851 tran->tran_getcap = fcp_scsi_getcap;
9852 tran->tran_setcap = fcp_scsi_setcap;
9853 tran->tran_init_pkt = NULL;
9854 tran->tran_destroy_pkt = NULL;
9855 tran->tran_dmafree = NULL;
9856 tran->tran_sync_pkt = NULL;
9857 tran->tran_reset_notify = fcp_scsi_reset_notify;
9858 tran->tran_get_bus_addr = fcp_scsi_get_bus_addr;
9859 tran->tran_get_name = fcp_scsi_get_name;
9860 tran->tran_clear_aca = NULL;
9861 tran->tran_clear_task_set = NULL;
9862 tran->tran_terminate_task = NULL;
9863 tran->tran_get_eventcookie = fcp_scsi_bus_get_eventcookie;
9864 tran->tran_add_eventcall = fcp_scsi_bus_add_eventcall;
9865 tran->tran_remove_eventcall = fcp_scsi_bus_remove_eventcall;
9866 tran->tran_post_event = fcp_scsi_bus_post_event;
9867 tran->tran_quiesce = NULL;
9868 tran->tran_unquiesce = NULL;
9869 tran->tran_bus_reset = NULL;
9870 tran->tran_bus_config = fcp_scsi_bus_config;
9871 tran->tran_bus_unconfig = fcp_scsi_bus_unconfig;
9872 tran->tran_bus_power = NULL;
9873 tran->tran_interconnect_type = INTERCONNECT_FABRIC;
9874
9875 tran->tran_pkt_constructor = fcp_kmem_cache_constructor;
9876 tran->tran_pkt_destructor = fcp_kmem_cache_destructor;
9877 tran->tran_setup_pkt = fcp_pkt_setup;
9878 tran->tran_teardown_pkt = fcp_pkt_teardown;
9879 tran->tran_hba_len = pptr->port_priv_pkt_len +
9880 sizeof (struct fcp_pkt) + pptr->port_dmacookie_sz;
9881 if (pptr->port_state & FCP_STATE_FCA_IS_NODMA) {
9882 /*
9883 * If FCA don't support DMA, then we use different vectors to
9884 * minimize the effects on DMA code flow path
9885 */
9886 tran->tran_start = fcp_pseudo_start;
9887 tran->tran_init_pkt = fcp_pseudo_init_pkt;
9888 tran->tran_destroy_pkt = fcp_pseudo_destroy_pkt;
9889 tran->tran_sync_pkt = fcp_pseudo_sync_pkt;
9890 tran->tran_dmafree = fcp_pseudo_dmafree;
9891 tran->tran_setup_pkt = NULL;
9892 tran->tran_teardown_pkt = NULL;
9893 tran->tran_pkt_constructor = NULL;
9894 tran->tran_pkt_destructor = NULL;
9895 pptr->port_data_dma_attr = pseudo_fca_dma_attr;
9896 }
9897
9898 /*
9899 * Allocate an ndi event handle
9900 */
9901 pptr->port_ndi_event_defs = (ndi_event_definition_t *)
9902 kmem_zalloc(sizeof (fcp_ndi_event_defs), KM_SLEEP);
9903
9904 bcopy(fcp_ndi_event_defs, pptr->port_ndi_event_defs,
9905 sizeof (fcp_ndi_event_defs));
9906
9907 (void) ndi_event_alloc_hdl(pptr->port_dip, NULL,
9908 &pptr->port_ndi_event_hdl, NDI_SLEEP);
9909
9910 pptr->port_ndi_events.ndi_events_version = NDI_EVENTS_REV1;
9911 pptr->port_ndi_events.ndi_n_events = FCP_N_NDI_EVENTS;
9912 pptr->port_ndi_events.ndi_event_defs = pptr->port_ndi_event_defs;
9913
9914 if (DEVI_IS_ATTACHING(pptr->port_dip) &&
9915 (ndi_event_bind_set(pptr->port_ndi_event_hdl,
9916 &pptr->port_ndi_events, NDI_SLEEP) != NDI_SUCCESS)) {
9917 goto fail;
9918 }
9919 event_bind++; /* Checked in fail case */
9920
9921 if (scsi_hba_attach_setup(pptr->port_dip, &pptr->port_data_dma_attr,
9922 tran, SCSI_HBA_ADDR_COMPLEX | SCSI_HBA_TRAN_SCB)
9923 != DDI_SUCCESS) {
9924 fcp_log(CE_WARN, pptr->port_dip,
9925 "!fcp%d: scsi_hba_attach_setup failed", instance);
9926 goto fail;
9927 }
9928 hba_attached++; /* Checked in fail case */
9929
9930 pptr->port_mpxio = 0;
9931 if (mdi_phci_register(MDI_HCI_CLASS_SCSI, pptr->port_dip, 0) ==
9932 MDI_SUCCESS) {
9933 pptr->port_mpxio++;
9934 }
9935
9936 /*
9937 * The following code is putting the new port structure in the global
9938 * list of ports and, if it is the first port to attach, it start the
9939 * fcp_watchdog_tick.
9940 *
9941 * Why put this new port in the global before we are done attaching it?
9942 * We are actually making the structure globally known before we are
9943 * done attaching it. The reason for that is: because of the code that
9944 * follows. At this point the resources to handle the port are
9945 * allocated. This function is now going to do the following:
9946 *
9947 * 1) It is going to try to register with the name server advertizing
9948 * the new FCP capability of the port.
9949 * 2) It is going to play the role of the fp/fctl layer by building
9950 * a list of worlwide names reachable through this port and call
9951 * itself on fcp_statec_callback(). That requires the port to
9952 * be part of the global list.
9953 */
9954 mutex_enter(&fcp_global_mutex);
9955 if (fcp_port_head == NULL) {
9956 fcp_read_blacklist(pinfo->port_dip, &fcp_lun_blacklist);
9957 }
9958 pptr->port_next = fcp_port_head;
9959 fcp_port_head = pptr;
9960 soft_state_linked++;
9961
9962 if (fcp_watchdog_init++ == 0) {
9963 fcp_watchdog_tick = fcp_watchdog_timeout *
9964 drv_usectohz(1000000);
9965 fcp_watchdog_id = timeout(fcp_watch, NULL,
9966 fcp_watchdog_tick);
9967 }
9968 mutex_exit(&fcp_global_mutex);
9969
9970 /*
9971 * Here an attempt is made to register with the name server, the new
9972 * FCP capability. That is done using an RTF_ID to the name server.
9973 * It is done synchronously. The function fcp_do_ns_registry()
9974 * doesn't return till the name server responded.
9975 * On failures, just ignore it for now and it will get retried during
9976 * state change callbacks. We'll set a flag to show this failure
9977 */
9978 if (fcp_do_ns_registry(pptr, s_id)) {
9979 mutex_enter(&pptr->port_mutex);
9980 pptr->port_state |= FCP_STATE_NS_REG_FAILED;
9981 mutex_exit(&pptr->port_mutex);
9982 } else {
9983 mutex_enter(&pptr->port_mutex);
9984 pptr->port_state &= ~(FCP_STATE_NS_REG_FAILED);
9985 mutex_exit(&pptr->port_mutex);
9986 }
9987
9988 /*
9989 * Lookup for boot WWN property
9990 */
9991 if (modrootloaded != 1) {
9992 if ((ddi_prop_lookup_byte_array(DDI_DEV_T_ANY,
9993 ddi_get_parent(pinfo->port_dip),
9994 DDI_PROP_DONTPASS, OBP_BOOT_WWN,
9995 &boot_wwn, &nbytes) == DDI_PROP_SUCCESS) &&
9996 (nbytes == FC_WWN_SIZE)) {
9997 bcopy(boot_wwn, pptr->port_boot_wwn, FC_WWN_SIZE);
9998 }
9999 if (boot_wwn) {
10000 ddi_prop_free(boot_wwn);
10001 }
10002 }
10003
10004 /*
10005 * Handle various topologies and link states.
10006 */
10007 switch (FC_PORT_STATE_MASK(pptr->port_phys_state)) {
10008 case FC_STATE_OFFLINE:
10009
10010 /*
10011 * we're attaching a port where the link is offline
10012 *
10013 * Wait for ONLINE, at which time a state
10014 * change will cause a statec_callback
10015 *
10016 * in the mean time, do not do anything
10017 */
10018 res = DDI_SUCCESS;
10019 pptr->port_state |= FCP_STATE_OFFLINE;
10020 break;
10021
10022 case FC_STATE_ONLINE: {
10023 if (pptr->port_topology == FC_TOP_UNKNOWN) {
10024 (void) fcp_linkreset(pptr, NULL, KM_NOSLEEP);
10025 res = DDI_SUCCESS;
10026 break;
10027 }
10028 /*
10029 * discover devices and create nodes (a private
10030 * loop or point-to-point)
10031 */
10032 ASSERT(pptr->port_topology != FC_TOP_UNKNOWN);
10033
10034 /*
10035 * At this point we are going to build a list of all the ports
10036 * that can be reached through this local port. It looks like
10037 * we cannot handle more than FCP_MAX_DEVICES per local port
10038 * (128).
10039 */
10040 if ((tmp_list = (fc_portmap_t *)kmem_zalloc(
10041 sizeof (fc_portmap_t) * FCP_MAX_DEVICES,
10042 KM_NOSLEEP)) == NULL) {
10043 fcp_log(CE_WARN, pptr->port_dip,
10044 "!fcp%d: failed to allocate portmap",
10045 instance);
10046 goto fail;
10047 }
10048
10049 /*
10050 * fc_ulp_getportmap() is going to provide us with the list of
10051 * remote ports in the buffer we just allocated. The way the
10052 * list is going to be retrieved depends on the topology.
10053 * However, if we are connected to a Fabric, a name server
10054 * request may be sent to get the list of FCP capable ports.
10055 * It should be noted that is the case the request is
10056 * synchronous. This means we are stuck here till the name
10057 * server replies. A lot of things can change during that time
10058 * and including, may be, being called on
10059 * fcp_statec_callback() for different reasons. I'm not sure
10060 * the code can handle that.
10061 */
10062 max_cnt = FCP_MAX_DEVICES;
10063 alloc_cnt = FCP_MAX_DEVICES;
10064 if ((res = fc_ulp_getportmap(pptr->port_fp_handle,
10065 &tmp_list, &max_cnt, FC_ULP_PLOGI_PRESERVE)) !=
10066 FC_SUCCESS) {
10067 caddr_t msg;
10068
10069 (void) fc_ulp_error(res, &msg);
10070
10071 /*
10072 * this just means the transport is
10073 * busy perhaps building a portmap so,
10074 * for now, succeed this port attach
10075 * when the transport has a new map,
10076 * it'll send us a state change then
10077 */
10078 fcp_log(CE_WARN, pptr->port_dip,
10079 "!failed to get port map : %s", msg);
10080
10081 res = DDI_SUCCESS;
10082 break; /* go return result */
10083 }
10084 if (max_cnt > alloc_cnt) {
10085 alloc_cnt = max_cnt;
10086 }
10087
10088 /*
10089 * We are now going to call fcp_statec_callback() ourselves.
10090 * By issuing this call we are trying to kick off the enumera-
10091 * tion process.
10092 */
10093 /*
10094 * let the state change callback do the SCSI device
10095 * discovery and create the devinfos
10096 */
10097 fcp_statec_callback(ulph, pptr->port_fp_handle,
10098 pptr->port_phys_state, pptr->port_topology, tmp_list,
10099 max_cnt, pptr->port_id);
10100
10101 res = DDI_SUCCESS;
10102 break;
10103 }
10104
10105 default:
10106 /* unknown port state */
10107 fcp_log(CE_WARN, pptr->port_dip,
10108 "!fcp%d: invalid port state at attach=0x%x",
10109 instance, pptr->port_phys_state);
10110
10111 mutex_enter(&pptr->port_mutex);
10112 pptr->port_phys_state = FCP_STATE_OFFLINE;
10113 mutex_exit(&pptr->port_mutex);
10114
10115 res = DDI_SUCCESS;
10116 break;
10117 }
10118
10119 /* free temp list if used */
10120 if (tmp_list != NULL) {
10121 kmem_free(tmp_list, sizeof (fc_portmap_t) * alloc_cnt);
10122 }
10123
10124 /* note the attach time */
10125 pptr->port_attach_time = ddi_get_lbolt64();
10126
10127 /* all done */
10128 return (res);
10129
10130 /* a failure we have to clean up after */
10131 fail:
10132 fcp_log(CE_WARN, pptr->port_dip, "!failed to attach to port");
10133
10134 if (soft_state_linked) {
10135 /* remove this fcp_port from the linked list */
10136 (void) fcp_soft_state_unlink(pptr);
10137 }
10138
10139 /* unbind and free event set */
10140 if (pptr->port_ndi_event_hdl) {
10141 if (event_bind) {
10142 (void) ndi_event_unbind_set(pptr->port_ndi_event_hdl,
10143 &pptr->port_ndi_events, NDI_SLEEP);
10144 }
10145 (void) ndi_event_free_hdl(pptr->port_ndi_event_hdl);
10146 }
10147
10148 if (pptr->port_ndi_event_defs) {
10149 (void) kmem_free(pptr->port_ndi_event_defs,
10150 sizeof (fcp_ndi_event_defs));
10151 }
10152
10153 /*
10154 * Clean up mpxio stuff
10155 */
10156 if (pptr->port_mpxio) {
10157 (void) mdi_phci_unregister(pptr->port_dip, 0);
10158 pptr->port_mpxio--;
10159 }
10160
10161 /* undo SCSI HBA setup */
10162 if (hba_attached) {
10163 (void) scsi_hba_detach(pptr->port_dip);
10164 }
10165 if (pptr->port_tran != NULL) {
10166 scsi_hba_tran_free(pptr->port_tran);
10167 }
10168
10169 mutex_enter(&fcp_global_mutex);
10170
10171 /*
10172 * We check soft_state_linked, because it is incremented right before
10173 * we call increment fcp_watchdog_init. Therefore, we know if
10174 * soft_state_linked is still FALSE, we do not want to decrement
10175 * fcp_watchdog_init or possibly call untimeout.
10176 */
10177
10178 if (soft_state_linked) {
10179 if (--fcp_watchdog_init == 0) {
10180 timeout_id_t tid = fcp_watchdog_id;
10181
10182 mutex_exit(&fcp_global_mutex);
10183 (void) untimeout(tid);
10184 } else {
10185 mutex_exit(&fcp_global_mutex);
10186 }
10187 } else {
10188 mutex_exit(&fcp_global_mutex);
10189 }
10190
10191 if (mutex_initted) {
10192 mutex_destroy(&pptr->port_mutex);
10193 mutex_destroy(&pptr->port_pkt_mutex);
10194 }
10195
10196 if (tmp_list != NULL) {
10197 kmem_free(tmp_list, sizeof (fc_portmap_t) * alloc_cnt);
10198 }
10199
10200 /* this makes pptr invalid */
10201 ddi_soft_state_free(fcp_softstate, instance);
10202
10203 return (DDI_FAILURE);
10204 }
10205
10206
10207 static int
10208 fcp_handle_port_detach(struct fcp_port *pptr, int flag, int instance)
10209 {
10210 int count = 0;
10211
10212 mutex_enter(&pptr->port_mutex);
10213
10214 /*
10215 * if the port is powered down or suspended, nothing else
10216 * to do; just return.
10217 */
10218 if (flag != FCP_STATE_DETACHING) {
10219 if (pptr->port_state & (FCP_STATE_POWER_DOWN |
10220 FCP_STATE_SUSPENDED)) {
10221 pptr->port_state |= flag;
10222 mutex_exit(&pptr->port_mutex);
10223 return (FC_SUCCESS);
10224 }
10225 }
10226
10227 if (pptr->port_state & FCP_STATE_IN_MDI) {
10228 mutex_exit(&pptr->port_mutex);
10229 return (FC_FAILURE);
10230 }
10231
10232 FCP_TRACE(fcp_logq, pptr->port_instbuf,
10233 fcp_trace, FCP_BUF_LEVEL_2, 0,
10234 "fcp_handle_port_detach: port is detaching");
10235
10236 pptr->port_state |= flag;
10237
10238 /*
10239 * Wait for any ongoing reconfig/ipkt to complete, that
10240 * ensures the freeing to targets/luns is safe.
10241 * No more ref to this port should happen from statec/ioctl
10242 * after that as it was removed from the global port list.
10243 */
10244 while (pptr->port_tmp_cnt || pptr->port_ipkt_cnt ||
10245 (pptr->port_state & FCP_STATE_IN_WATCHDOG)) {
10246 /*
10247 * Let's give sufficient time for reconfig/ipkt
10248 * to complete.
10249 */
10250 if (count++ >= FCP_ICMD_DEADLINE) {
10251 break;
10252 }
10253 mutex_exit(&pptr->port_mutex);
10254 delay(drv_usectohz(1000000));
10255 mutex_enter(&pptr->port_mutex);
10256 }
10257
10258 /*
10259 * if the driver is still busy then fail to
10260 * suspend/power down.
10261 */
10262 if (pptr->port_tmp_cnt || pptr->port_ipkt_cnt ||
10263 (pptr->port_state & FCP_STATE_IN_WATCHDOG)) {
10264 pptr->port_state &= ~flag;
10265 mutex_exit(&pptr->port_mutex);
10266 return (FC_FAILURE);
10267 }
10268
10269 if (flag == FCP_STATE_DETACHING) {
10270 pptr = fcp_soft_state_unlink(pptr);
10271 ASSERT(pptr != NULL);
10272 }
10273
10274 pptr->port_link_cnt++;
10275 pptr->port_state |= FCP_STATE_OFFLINE;
10276 pptr->port_state &= ~(FCP_STATE_ONLINING | FCP_STATE_ONLINE);
10277
10278 fcp_update_state(pptr, (FCP_LUN_BUSY | FCP_LUN_MARK),
10279 FCP_CAUSE_LINK_DOWN);
10280 mutex_exit(&pptr->port_mutex);
10281
10282 /* kill watch dog timer if we're the last */
10283 mutex_enter(&fcp_global_mutex);
10284 if (--fcp_watchdog_init == 0) {
10285 timeout_id_t tid = fcp_watchdog_id;
10286 mutex_exit(&fcp_global_mutex);
10287 (void) untimeout(tid);
10288 } else {
10289 mutex_exit(&fcp_global_mutex);
10290 }
10291
10292 /* clean up the port structures */
10293 if (flag == FCP_STATE_DETACHING) {
10294 fcp_cleanup_port(pptr, instance);
10295 }
10296
10297 return (FC_SUCCESS);
10298 }
10299
10300
10301 static void
10302 fcp_cleanup_port(struct fcp_port *pptr, int instance)
10303 {
10304 ASSERT(pptr != NULL);
10305
10306 /* unbind and free event set */
10307 if (pptr->port_ndi_event_hdl) {
10308 (void) ndi_event_unbind_set(pptr->port_ndi_event_hdl,
10309 &pptr->port_ndi_events, NDI_SLEEP);
10310 (void) ndi_event_free_hdl(pptr->port_ndi_event_hdl);
10311 }
10312
10313 if (pptr->port_ndi_event_defs) {
10314 (void) kmem_free(pptr->port_ndi_event_defs,
10315 sizeof (fcp_ndi_event_defs));
10316 }
10317
10318 /* free the lun/target structures and devinfos */
10319 fcp_free_targets(pptr);
10320
10321 /*
10322 * Clean up mpxio stuff
10323 */
10324 if (pptr->port_mpxio) {
10325 (void) mdi_phci_unregister(pptr->port_dip, 0);
10326 pptr->port_mpxio--;
10327 }
10328
10329 /* clean up SCSA stuff */
10330 (void) scsi_hba_detach(pptr->port_dip);
10331 if (pptr->port_tran != NULL) {
10332 scsi_hba_tran_free(pptr->port_tran);
10333 }
10334
10335 #ifdef KSTATS_CODE
10336 /* clean up kstats */
10337 if (pptr->fcp_ksp != NULL) {
10338 kstat_delete(pptr->fcp_ksp);
10339 }
10340 #endif
10341
10342 /* clean up soft state mutexes/condition variables */
10343 mutex_destroy(&pptr->port_mutex);
10344 mutex_destroy(&pptr->port_pkt_mutex);
10345
10346 /* all done with soft state */
10347 ddi_soft_state_free(fcp_softstate, instance);
10348 }
10349
10350 /*
10351 * Function: fcp_kmem_cache_constructor
10352 *
10353 * Description: This function allocates and initializes the resources required
10354 * to build a scsi_pkt structure the target driver. The result
10355 * of the allocation and initialization will be cached in the
10356 * memory cache. As DMA resources may be allocated here, that
10357 * means DMA resources will be tied up in the cache manager.
10358 * This is a tradeoff that has been made for performance reasons.
10359 *
10360 * Argument: *buf Memory to preinitialize.
10361 * *arg FCP port structure (fcp_port).
10362 * kmflags Value passed to kmem_cache_alloc() and
10363 * propagated to the constructor.
10364 *
10365 * Return Value: 0 Allocation/Initialization was successful.
10366 * -1 Allocation or Initialization failed.
10367 *
10368 *
10369 * If the returned value is 0, the buffer is initialized like this:
10370 *
10371 * +================================+
10372 * +----> | struct scsi_pkt |
10373 * | | |
10374 * | +--- | pkt_ha_private |
10375 * | | | |
10376 * | | +================================+
10377 * | |
10378 * | | +================================+
10379 * | +--> | struct fcp_pkt | <---------+
10380 * | | | |
10381 * +----- | cmd_pkt | |
10382 * | cmd_fp_pkt | ---+ |
10383 * +-------->| cmd_fcp_rsp[] | | |
10384 * | +--->| cmd_fcp_cmd[] | | |
10385 * | | |--------------------------------| | |
10386 * | | | struct fc_packet | <--+ |
10387 * | | | | |
10388 * | | | pkt_ulp_private | ----------+
10389 * | | | pkt_fca_private | -----+
10390 * | | | pkt_data_cookie | ---+ |
10391 * | | | pkt_cmdlen | | |
10392 * | |(a) | pkt_rsplen | | |
10393 * | +----| .......... pkt_cmd ........... | ---|-|---------------+
10394 * | (b) | pkt_cmd_cookie | ---|-|----------+ |
10395 * +---------| .......... pkt_resp .......... | ---|-|------+ | |
10396 * | pkt_resp_cookie | ---|-|--+ | | |
10397 * | pkt_cmd_dma | | | | | | |
10398 * | pkt_cmd_acc | | | | | | |
10399 * +================================+ | | | | | |
10400 * | dma_cookies | <--+ | | | | |
10401 * | | | | | | |
10402 * +================================+ | | | | |
10403 * | fca_private | <----+ | | | |
10404 * | | | | | |
10405 * +================================+ | | | |
10406 * | | | |
10407 * | | | |
10408 * +================================+ (d) | | | |
10409 * | fcp_resp cookies | <-------+ | | |
10410 * | | | | |
10411 * +================================+ | | |
10412 * | | |
10413 * +================================+ (d) | | |
10414 * | fcp_resp | <-----------+ | |
10415 * | (DMA resources associated) | | |
10416 * +================================+ | |
10417 * | |
10418 * | |
10419 * | |
10420 * +================================+ (c) | |
10421 * | fcp_cmd cookies | <---------------+ |
10422 * | | |
10423 * +================================+ |
10424 * |
10425 * +================================+ (c) |
10426 * | fcp_cmd | <--------------------+
10427 * | (DMA resources associated) |
10428 * +================================+
10429 *
10430 * (a) Only if DMA is NOT used for the FCP_CMD buffer.
10431 * (b) Only if DMA is NOT used for the FCP_RESP buffer
10432 * (c) Only if DMA is used for the FCP_CMD buffer.
10433 * (d) Only if DMA is used for the FCP_RESP buffer
10434 */
10435 static int
10436 fcp_kmem_cache_constructor(struct scsi_pkt *pkt, scsi_hba_tran_t *tran,
10437 int kmflags)
10438 {
10439 struct fcp_pkt *cmd;
10440 struct fcp_port *pptr;
10441 fc_packet_t *fpkt;
10442
10443 pptr = (struct fcp_port *)tran->tran_hba_private;
10444 cmd = (struct fcp_pkt *)pkt->pkt_ha_private;
10445 bzero(cmd, tran->tran_hba_len);
10446
10447 cmd->cmd_pkt = pkt;
10448 pkt->pkt_cdbp = cmd->cmd_fcp_cmd.fcp_cdb;
10449 fpkt = (fc_packet_t *)&cmd->cmd_fc_packet;
10450 cmd->cmd_fp_pkt = fpkt;
10451
10452 cmd->cmd_pkt->pkt_ha_private = (opaque_t)cmd;
10453 cmd->cmd_fp_pkt->pkt_ulp_private = (opaque_t)cmd;
10454 cmd->cmd_fp_pkt->pkt_fca_private = (opaque_t)((caddr_t)cmd +
10455 sizeof (struct fcp_pkt) + pptr->port_dmacookie_sz);
10456
10457 fpkt->pkt_data_cookie = (ddi_dma_cookie_t *)((caddr_t)cmd +
10458 sizeof (struct fcp_pkt));
10459
10460 fpkt->pkt_cmdlen = sizeof (struct fcp_cmd);
10461 fpkt->pkt_rsplen = FCP_MAX_RSP_IU_SIZE;
10462
10463 if (pptr->port_fcp_dma == FC_NO_DVMA_SPACE) {
10464 /*
10465 * The underlying HBA doesn't want to DMA the fcp_cmd or
10466 * fcp_resp. The transfer of information will be done by
10467 * bcopy.
10468 * The naming of the flags (that is actually a value) is
10469 * unfortunate. FC_NO_DVMA_SPACE doesn't mean "NO VIRTUAL
10470 * DMA" but instead "NO DMA".
10471 */
10472 fpkt->pkt_resp_acc = fpkt->pkt_cmd_acc = NULL;
10473 fpkt->pkt_cmd = (caddr_t)&cmd->cmd_fcp_cmd;
10474 fpkt->pkt_resp = cmd->cmd_fcp_rsp;
10475 } else {
10476 /*
10477 * The underlying HBA will dma the fcp_cmd buffer and fcp_resp
10478 * buffer. A buffer is allocated for each one the ddi_dma_*
10479 * interfaces.
10480 */
10481 if (fcp_alloc_cmd_resp(pptr, fpkt, kmflags) != FC_SUCCESS) {
10482 return (-1);
10483 }
10484 }
10485
10486 return (0);
10487 }
10488
10489 /*
10490 * Function: fcp_kmem_cache_destructor
10491 *
10492 * Description: Called by the destructor of the cache managed by SCSA.
10493 * All the resources pre-allocated in fcp_pkt_constructor
10494 * and the data also pre-initialized in fcp_pkt_constructor
10495 * are freed and uninitialized here.
10496 *
10497 * Argument: *buf Memory to uninitialize.
10498 * *arg FCP port structure (fcp_port).
10499 *
10500 * Return Value: None
10501 *
10502 * Context: kernel
10503 */
10504 static void
10505 fcp_kmem_cache_destructor(struct scsi_pkt *pkt, scsi_hba_tran_t *tran)
10506 {
10507 struct fcp_pkt *cmd;
10508 struct fcp_port *pptr;
10509
10510 pptr = (struct fcp_port *)(tran->tran_hba_private);
10511 cmd = pkt->pkt_ha_private;
10512
10513 if (pptr->port_fcp_dma != FC_NO_DVMA_SPACE) {
10514 /*
10515 * If DMA was used to transfer the FCP_CMD and FCP_RESP, the
10516 * buffer and DMA resources allocated to do so are released.
10517 */
10518 fcp_free_cmd_resp(pptr, cmd->cmd_fp_pkt);
10519 }
10520 }
10521
10522 /*
10523 * Function: fcp_alloc_cmd_resp
10524 *
10525 * Description: This function allocated an FCP_CMD and FCP_RESP buffer that
10526 * will be DMAed by the HBA. The buffer is allocated applying
10527 * the DMA requirements for the HBA. The buffers allocated will
10528 * also be bound. DMA resources are allocated in the process.
10529 * They will be released by fcp_free_cmd_resp().
10530 *
10531 * Argument: *pptr FCP port.
10532 * *fpkt fc packet for which the cmd and resp packet should be
10533 * allocated.
10534 * flags Allocation flags.
10535 *
10536 * Return Value: FC_FAILURE
10537 * FC_SUCCESS
10538 *
10539 * Context: User or Kernel context only if flags == KM_SLEEP.
10540 * Interrupt context if the KM_SLEEP is not specified.
10541 */
10542 static int
10543 fcp_alloc_cmd_resp(struct fcp_port *pptr, fc_packet_t *fpkt, int flags)
10544 {
10545 int rval;
10546 int cmd_len;
10547 int resp_len;
10548 ulong_t real_len;
10549 int (*cb) (caddr_t);
10550 ddi_dma_cookie_t pkt_cookie;
10551 ddi_dma_cookie_t *cp;
10552 uint32_t cnt;
10553
10554 cb = (flags == KM_SLEEP) ? DDI_DMA_SLEEP : DDI_DMA_DONTWAIT;
10555
10556 cmd_len = fpkt->pkt_cmdlen;
10557 resp_len = fpkt->pkt_rsplen;
10558
10559 ASSERT(fpkt->pkt_cmd_dma == NULL);
10560
10561 /* Allocation of a DMA handle used in subsequent calls. */
10562 if (ddi_dma_alloc_handle(pptr->port_dip, &pptr->port_cmd_dma_attr,
10563 cb, NULL, &fpkt->pkt_cmd_dma) != DDI_SUCCESS) {
10564 return (FC_FAILURE);
10565 }
10566
10567 /* A buffer is allocated that satisfies the DMA requirements. */
10568 rval = ddi_dma_mem_alloc(fpkt->pkt_cmd_dma, cmd_len,
10569 &pptr->port_dma_acc_attr, DDI_DMA_CONSISTENT, cb, NULL,
10570 (caddr_t *)&fpkt->pkt_cmd, &real_len, &fpkt->pkt_cmd_acc);
10571
10572 if (rval != DDI_SUCCESS) {
10573 ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10574 return (FC_FAILURE);
10575 }
10576
10577 if (real_len < cmd_len) {
10578 ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
10579 ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10580 return (FC_FAILURE);
10581 }
10582
10583 /* The buffer allocated is DMA bound. */
10584 rval = ddi_dma_addr_bind_handle(fpkt->pkt_cmd_dma, NULL,
10585 fpkt->pkt_cmd, real_len, DDI_DMA_WRITE | DDI_DMA_CONSISTENT,
10586 cb, NULL, &pkt_cookie, &fpkt->pkt_cmd_cookie_cnt);
10587
10588 if (rval != DDI_DMA_MAPPED) {
10589 ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
10590 ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10591 return (FC_FAILURE);
10592 }
10593
10594 if (fpkt->pkt_cmd_cookie_cnt >
10595 pptr->port_cmd_dma_attr.dma_attr_sgllen) {
10596 (void) ddi_dma_unbind_handle(fpkt->pkt_cmd_dma);
10597 ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
10598 ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10599 return (FC_FAILURE);
10600 }
10601
10602 ASSERT(fpkt->pkt_cmd_cookie_cnt != 0);
10603
10604 /*
10605 * The buffer where the scatter/gather list is going to be built is
10606 * allocated.
10607 */
10608 cp = fpkt->pkt_cmd_cookie = (ddi_dma_cookie_t *)kmem_alloc(
10609 fpkt->pkt_cmd_cookie_cnt * sizeof (pkt_cookie),
10610 KM_NOSLEEP);
10611
10612 if (cp == NULL) {
10613 (void) ddi_dma_unbind_handle(fpkt->pkt_cmd_dma);
10614 ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
10615 ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10616 return (FC_FAILURE);
10617 }
10618
10619 /*
10620 * The scatter/gather list for the buffer we just allocated is built
10621 * here.
10622 */
10623 *cp = pkt_cookie;
10624 cp++;
10625
10626 for (cnt = 1; cnt < fpkt->pkt_cmd_cookie_cnt; cnt++, cp++) {
10627 ddi_dma_nextcookie(fpkt->pkt_cmd_dma,
10628 &pkt_cookie);
10629 *cp = pkt_cookie;
10630 }
10631
10632 ASSERT(fpkt->pkt_resp_dma == NULL);
10633 if (ddi_dma_alloc_handle(pptr->port_dip, &pptr->port_resp_dma_attr,
10634 cb, NULL, &fpkt->pkt_resp_dma) != DDI_SUCCESS) {
10635 (void) ddi_dma_unbind_handle(fpkt->pkt_cmd_dma);
10636 ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
10637 ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10638 return (FC_FAILURE);
10639 }
10640
10641 rval = ddi_dma_mem_alloc(fpkt->pkt_resp_dma, resp_len,
10642 &pptr->port_dma_acc_attr, DDI_DMA_CONSISTENT, cb, NULL,
10643 (caddr_t *)&fpkt->pkt_resp, &real_len,
10644 &fpkt->pkt_resp_acc);
10645
10646 if (rval != DDI_SUCCESS) {
10647 ddi_dma_free_handle(&fpkt->pkt_resp_dma);
10648 (void) ddi_dma_unbind_handle(fpkt->pkt_cmd_dma);
10649 ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
10650 ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10651 kmem_free(fpkt->pkt_cmd_cookie,
10652 fpkt->pkt_cmd_cookie_cnt * sizeof (pkt_cookie));
10653 return (FC_FAILURE);
10654 }
10655
10656 if (real_len < resp_len) {
10657 ddi_dma_mem_free(&fpkt->pkt_resp_acc);
10658 ddi_dma_free_handle(&fpkt->pkt_resp_dma);
10659 (void) ddi_dma_unbind_handle(fpkt->pkt_cmd_dma);
10660 ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
10661 ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10662 kmem_free(fpkt->pkt_cmd_cookie,
10663 fpkt->pkt_cmd_cookie_cnt * sizeof (pkt_cookie));
10664 return (FC_FAILURE);
10665 }
10666
10667 rval = ddi_dma_addr_bind_handle(fpkt->pkt_resp_dma, NULL,
10668 fpkt->pkt_resp, real_len, DDI_DMA_READ | DDI_DMA_CONSISTENT,
10669 cb, NULL, &pkt_cookie, &fpkt->pkt_resp_cookie_cnt);
10670
10671 if (rval != DDI_DMA_MAPPED) {
10672 ddi_dma_mem_free(&fpkt->pkt_resp_acc);
10673 ddi_dma_free_handle(&fpkt->pkt_resp_dma);
10674 (void) ddi_dma_unbind_handle(fpkt->pkt_cmd_dma);
10675 ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
10676 ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10677 kmem_free(fpkt->pkt_cmd_cookie,
10678 fpkt->pkt_cmd_cookie_cnt * sizeof (pkt_cookie));
10679 return (FC_FAILURE);
10680 }
10681
10682 if (fpkt->pkt_resp_cookie_cnt >
10683 pptr->port_resp_dma_attr.dma_attr_sgllen) {
10684 ddi_dma_mem_free(&fpkt->pkt_resp_acc);
10685 ddi_dma_free_handle(&fpkt->pkt_resp_dma);
10686 (void) ddi_dma_unbind_handle(fpkt->pkt_cmd_dma);
10687 ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
10688 ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10689 kmem_free(fpkt->pkt_cmd_cookie,
10690 fpkt->pkt_cmd_cookie_cnt * sizeof (pkt_cookie));
10691 return (FC_FAILURE);
10692 }
10693
10694 ASSERT(fpkt->pkt_resp_cookie_cnt != 0);
10695
10696 cp = fpkt->pkt_resp_cookie = (ddi_dma_cookie_t *)kmem_alloc(
10697 fpkt->pkt_resp_cookie_cnt * sizeof (pkt_cookie),
10698 KM_NOSLEEP);
10699
10700 if (cp == NULL) {
10701 ddi_dma_mem_free(&fpkt->pkt_resp_acc);
10702 ddi_dma_free_handle(&fpkt->pkt_resp_dma);
10703 (void) ddi_dma_unbind_handle(fpkt->pkt_cmd_dma);
10704 ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
10705 ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10706 kmem_free(fpkt->pkt_cmd_cookie,
10707 fpkt->pkt_cmd_cookie_cnt * sizeof (pkt_cookie));
10708 return (FC_FAILURE);
10709 }
10710
10711 *cp = pkt_cookie;
10712 cp++;
10713
10714 for (cnt = 1; cnt < fpkt->pkt_resp_cookie_cnt; cnt++, cp++) {
10715 ddi_dma_nextcookie(fpkt->pkt_resp_dma,
10716 &pkt_cookie);
10717 *cp = pkt_cookie;
10718 }
10719
10720 return (FC_SUCCESS);
10721 }
10722
10723 /*
10724 * Function: fcp_free_cmd_resp
10725 *
10726 * Description: This function releases the FCP_CMD and FCP_RESP buffer
10727 * allocated by fcp_alloc_cmd_resp() and all the resources
10728 * associated with them. That includes the DMA resources and the
10729 * buffer allocated for the cookies of each one of them.
10730 *
10731 * Argument: *pptr FCP port context.
10732 * *fpkt fc packet containing the cmd and resp packet
10733 * to be released.
10734 *
10735 * Return Value: None
10736 *
10737 * Context: Interrupt, User and Kernel context.
10738 */
10739 /* ARGSUSED */
10740 static void
10741 fcp_free_cmd_resp(struct fcp_port *pptr, fc_packet_t *fpkt)
10742 {
10743 ASSERT(fpkt->pkt_resp_dma != NULL && fpkt->pkt_cmd_dma != NULL);
10744
10745 if (fpkt->pkt_resp_dma) {
10746 (void) ddi_dma_unbind_handle(fpkt->pkt_resp_dma);
10747 ddi_dma_mem_free(&fpkt->pkt_resp_acc);
10748 ddi_dma_free_handle(&fpkt->pkt_resp_dma);
10749 }
10750
10751 if (fpkt->pkt_resp_cookie) {
10752 kmem_free(fpkt->pkt_resp_cookie,
10753 fpkt->pkt_resp_cookie_cnt * sizeof (ddi_dma_cookie_t));
10754 fpkt->pkt_resp_cookie = NULL;
10755 }
10756
10757 if (fpkt->pkt_cmd_dma) {
10758 (void) ddi_dma_unbind_handle(fpkt->pkt_cmd_dma);
10759 ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
10760 ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10761 }
10762
10763 if (fpkt->pkt_cmd_cookie) {
10764 kmem_free(fpkt->pkt_cmd_cookie,
10765 fpkt->pkt_cmd_cookie_cnt * sizeof (ddi_dma_cookie_t));
10766 fpkt->pkt_cmd_cookie = NULL;
10767 }
10768 }
10769
10770
10771 /*
10772 * called by the transport to do our own target initialization
10773 *
10774 * can acquire and release the global mutex
10775 */
10776 /* ARGSUSED */
10777 static int
10778 fcp_phys_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
10779 scsi_hba_tran_t *hba_tran, struct scsi_device *sd)
10780 {
10781 uchar_t *bytes;
10782 uint_t nbytes;
10783 uint16_t lun_num;
10784 struct fcp_tgt *ptgt;
10785 struct fcp_lun *plun;
10786 struct fcp_port *pptr = (struct fcp_port *)
10787 hba_tran->tran_hba_private;
10788
10789 ASSERT(pptr != NULL);
10790
10791 FCP_DTRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
10792 FCP_BUF_LEVEL_8, 0,
10793 "fcp_phys_tgt_init: called for %s (instance %d)",
10794 ddi_get_name(tgt_dip), ddi_get_instance(tgt_dip));
10795
10796 /* get our port WWN property */
10797 bytes = NULL;
10798 if ((scsi_device_prop_lookup_byte_array(sd, SCSI_DEVICE_PROP_PATH,
10799 PORT_WWN_PROP, &bytes, &nbytes) != DDI_PROP_SUCCESS) ||
10800 (nbytes != FC_WWN_SIZE)) {
10801 /* no port WWN property */
10802 FCP_DTRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
10803 FCP_BUF_LEVEL_8, 0,
10804 "fcp_phys_tgt_init: Returning DDI_NOT_WELL_FORMED"
10805 " for %s (instance %d): bytes=%p nbytes=%x",
10806 ddi_get_name(tgt_dip), ddi_get_instance(tgt_dip), bytes,
10807 nbytes);
10808
10809 if (bytes != NULL) {
10810 scsi_device_prop_free(sd, SCSI_DEVICE_PROP_PATH, bytes);
10811 }
10812
10813 return (DDI_NOT_WELL_FORMED);
10814 }
10815 ASSERT(bytes != NULL);
10816
10817 lun_num = scsi_device_prop_get_int(sd, SCSI_DEVICE_PROP_PATH,
10818 LUN_PROP, 0xFFFF);
10819 if (lun_num == 0xFFFF) {
10820 FCP_DTRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
10821 FCP_BUF_LEVEL_8, 0,
10822 "fcp_phys_tgt_init: Returning DDI_FAILURE:lun"
10823 " for %s (instance %d)", ddi_get_name(tgt_dip),
10824 ddi_get_instance(tgt_dip));
10825
10826 scsi_device_prop_free(sd, SCSI_DEVICE_PROP_PATH, bytes);
10827 return (DDI_NOT_WELL_FORMED);
10828 }
10829
10830 mutex_enter(&pptr->port_mutex);
10831 if ((plun = fcp_lookup_lun(pptr, bytes, lun_num)) == NULL) {
10832 mutex_exit(&pptr->port_mutex);
10833 FCP_DTRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
10834 FCP_BUF_LEVEL_8, 0,
10835 "fcp_phys_tgt_init: Returning DDI_FAILURE: No Lun"
10836 " for %s (instance %d)", ddi_get_name(tgt_dip),
10837 ddi_get_instance(tgt_dip));
10838
10839 scsi_device_prop_free(sd, SCSI_DEVICE_PROP_PATH, bytes);
10840 return (DDI_FAILURE);
10841 }
10842
10843 ASSERT(bcmp(plun->lun_tgt->tgt_port_wwn.raw_wwn, bytes,
10844 FC_WWN_SIZE) == 0);
10845 ASSERT(plun->lun_num == lun_num);
10846
10847 scsi_device_prop_free(sd, SCSI_DEVICE_PROP_PATH, bytes);
10848
10849 ptgt = plun->lun_tgt;
10850
10851 mutex_enter(&ptgt->tgt_mutex);
10852 plun->lun_tgt_count++;
10853 scsi_device_hba_private_set(sd, plun);
10854 plun->lun_state |= FCP_SCSI_LUN_TGT_INIT;
10855 plun->lun_sd = sd;
10856 mutex_exit(&ptgt->tgt_mutex);
10857 mutex_exit(&pptr->port_mutex);
10858
10859 return (DDI_SUCCESS);
10860 }
10861
10862 /*ARGSUSED*/
10863 static int
10864 fcp_virt_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
10865 scsi_hba_tran_t *hba_tran, struct scsi_device *sd)
10866 {
10867 uchar_t *bytes;
10868 uint_t nbytes;
10869 uint16_t lun_num;
10870 struct fcp_tgt *ptgt;
10871 struct fcp_lun *plun;
10872 struct fcp_port *pptr = (struct fcp_port *)
10873 hba_tran->tran_hba_private;
10874 child_info_t *cip;
10875
10876 ASSERT(pptr != NULL);
10877
10878 FCP_DTRACE(fcp_logq, pptr->port_instbuf,
10879 fcp_trace, FCP_BUF_LEVEL_8, 0,
10880 "fcp_virt_tgt_init: called for %s (instance %d) (hba_dip %p),"
10881 " (tgt_dip %p)", ddi_get_name(tgt_dip),
10882 ddi_get_instance(tgt_dip), hba_dip, tgt_dip);
10883
10884 cip = (child_info_t *)sd->sd_pathinfo;
10885 if (cip == NULL) {
10886 FCP_DTRACE(fcp_logq, pptr->port_instbuf,
10887 fcp_trace, FCP_BUF_LEVEL_8, 0,
10888 "fcp_virt_tgt_init: Returning DDI_NOT_WELL_FORMED"
10889 " for %s (instance %d)", ddi_get_name(tgt_dip),
10890 ddi_get_instance(tgt_dip));
10891
10892 return (DDI_NOT_WELL_FORMED);
10893 }
10894
10895 /* get our port WWN property */
10896 bytes = NULL;
10897 if ((scsi_device_prop_lookup_byte_array(sd, SCSI_DEVICE_PROP_PATH,
10898 PORT_WWN_PROP, &bytes, &nbytes) != DDI_PROP_SUCCESS) ||
10899 (nbytes != FC_WWN_SIZE)) {
10900 if (bytes) {
10901 scsi_device_prop_free(sd, SCSI_DEVICE_PROP_PATH, bytes);
10902 }
10903 return (DDI_NOT_WELL_FORMED);
10904 }
10905
10906 ASSERT(bytes != NULL);
10907
10908 lun_num = scsi_device_prop_get_int(sd, SCSI_DEVICE_PROP_PATH,
10909 LUN_PROP, 0xFFFF);
10910 if (lun_num == 0xFFFF) {
10911 FCP_DTRACE(fcp_logq, pptr->port_instbuf,
10912 fcp_trace, FCP_BUF_LEVEL_8, 0,
10913 "fcp_virt_tgt_init: Returning DDI_FAILURE:lun"
10914 " for %s (instance %d)", ddi_get_name(tgt_dip),
10915 ddi_get_instance(tgt_dip));
10916
10917 scsi_device_prop_free(sd, SCSI_DEVICE_PROP_PATH, bytes);
10918 return (DDI_NOT_WELL_FORMED);
10919 }
10920
10921 mutex_enter(&pptr->port_mutex);
10922 if ((plun = fcp_lookup_lun(pptr, bytes, lun_num)) == NULL) {
10923 mutex_exit(&pptr->port_mutex);
10924 FCP_DTRACE(fcp_logq, pptr->port_instbuf,
10925 fcp_trace, FCP_BUF_LEVEL_8, 0,
10926 "fcp_virt_tgt_init: Returning DDI_FAILURE: No Lun"
10927 " for %s (instance %d)", ddi_get_name(tgt_dip),
10928 ddi_get_instance(tgt_dip));
10929
10930 scsi_device_prop_free(sd, SCSI_DEVICE_PROP_PATH, bytes);
10931 return (DDI_FAILURE);
10932 }
10933
10934 ASSERT(bcmp(plun->lun_tgt->tgt_port_wwn.raw_wwn, bytes,
10935 FC_WWN_SIZE) == 0);
10936 ASSERT(plun->lun_num == lun_num);
10937
10938 scsi_device_prop_free(sd, SCSI_DEVICE_PROP_PATH, bytes);
10939
10940 ptgt = plun->lun_tgt;
10941
10942 mutex_enter(&ptgt->tgt_mutex);
10943 plun->lun_tgt_count++;
10944 scsi_device_hba_private_set(sd, plun);
10945 plun->lun_state |= FCP_SCSI_LUN_TGT_INIT;
10946 plun->lun_sd = sd;
10947 mutex_exit(&ptgt->tgt_mutex);
10948 mutex_exit(&pptr->port_mutex);
10949
10950 return (DDI_SUCCESS);
10951 }
10952
10953
10954 /*
10955 * called by the transport to do our own target initialization
10956 *
10957 * can acquire and release the global mutex
10958 */
10959 /* ARGSUSED */
10960 static int
10961 fcp_scsi_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
10962 scsi_hba_tran_t *hba_tran, struct scsi_device *sd)
10963 {
10964 struct fcp_port *pptr = (struct fcp_port *)
10965 hba_tran->tran_hba_private;
10966 int rval;
10967
10968 ASSERT(pptr != NULL);
10969
10970 /*
10971 * Child node is getting initialized. Look at the mpxio component
10972 * type on the child device to see if this device is mpxio managed
10973 * or not.
10974 */
10975 if (mdi_component_is_client(tgt_dip, NULL) == MDI_SUCCESS) {
10976 rval = fcp_virt_tgt_init(hba_dip, tgt_dip, hba_tran, sd);
10977 } else {
10978 rval = fcp_phys_tgt_init(hba_dip, tgt_dip, hba_tran, sd);
10979 }
10980
10981 return (rval);
10982 }
10983
10984
10985 /* ARGSUSED */
10986 static void
10987 fcp_scsi_tgt_free(dev_info_t *hba_dip, dev_info_t *tgt_dip,
10988 scsi_hba_tran_t *hba_tran, struct scsi_device *sd)
10989 {
10990 struct fcp_lun *plun = scsi_device_hba_private_get(sd);
10991 struct fcp_tgt *ptgt;
10992
10993 FCP_DTRACE(fcp_logq, LUN_PORT->port_instbuf,
10994 fcp_trace, FCP_BUF_LEVEL_8, 0,
10995 "fcp_scsi_tgt_free: called for tran %s%d, dev %s%d",
10996 ddi_get_name(hba_dip), ddi_get_instance(hba_dip),
10997 ddi_get_name(tgt_dip), ddi_get_instance(tgt_dip));
10998
10999 if (plun == NULL) {
11000 return;
11001 }
11002 ptgt = plun->lun_tgt;
11003
11004 ASSERT(ptgt != NULL);
11005
11006 mutex_enter(&ptgt->tgt_mutex);
11007 ASSERT(plun->lun_tgt_count > 0);
11008
11009 if (--plun->lun_tgt_count == 0) {
11010 plun->lun_state &= ~FCP_SCSI_LUN_TGT_INIT;
11011 }
11012 plun->lun_sd = NULL;
11013 mutex_exit(&ptgt->tgt_mutex);
11014 }
11015
11016 /*
11017 * Function: fcp_scsi_start
11018 *
11019 * Description: This function is called by the target driver to request a
11020 * command to be sent.
11021 *
11022 * Argument: *ap SCSI address of the device.
11023 * *pkt SCSI packet containing the cmd to send.
11024 *
11025 * Return Value: TRAN_ACCEPT
11026 * TRAN_BUSY
11027 * TRAN_BADPKT
11028 * TRAN_FATAL_ERROR
11029 */
11030 static int
11031 fcp_scsi_start(struct scsi_address *ap, struct scsi_pkt *pkt)
11032 {
11033 struct fcp_port *pptr = ADDR2FCP(ap);
11034 struct fcp_lun *plun = ADDR2LUN(ap);
11035 struct fcp_pkt *cmd = PKT2CMD(pkt);
11036 struct fcp_tgt *ptgt = plun->lun_tgt;
11037 int rval;
11038
11039 /* ensure command isn't already issued */
11040 ASSERT(cmd->cmd_state != FCP_PKT_ISSUED);
11041
11042 FCP_DTRACE(fcp_logq, pptr->port_instbuf,
11043 fcp_trace, FCP_BUF_LEVEL_9, 0,
11044 "fcp_transport Invoked for %x", plun->lun_tgt->tgt_d_id);
11045
11046 /*
11047 * It is strange that we enter the fcp_port mutex and the target
11048 * mutex to check the lun state (which has a mutex of its own).
11049 */
11050 mutex_enter(&pptr->port_mutex);
11051 mutex_enter(&ptgt->tgt_mutex);
11052
11053 /*
11054 * If the device is offline and is not in the process of coming
11055 * online, fail the request.
11056 */
11057
11058 if ((plun->lun_state & FCP_LUN_OFFLINE) &&
11059 !(plun->lun_state & FCP_LUN_ONLINING)) {
11060 mutex_exit(&ptgt->tgt_mutex);
11061 mutex_exit(&pptr->port_mutex);
11062
11063 if (cmd->cmd_fp_pkt->pkt_pd == NULL) {
11064 pkt->pkt_reason = CMD_DEV_GONE;
11065 }
11066
11067 return (TRAN_FATAL_ERROR);
11068 }
11069 cmd->cmd_fp_pkt->pkt_timeout = pkt->pkt_time;
11070
11071 /*
11072 * If we are suspended, kernel is trying to dump, so don't
11073 * block, fail or defer requests - send them down right away.
11074 * NOTE: If we are in panic (i.e. trying to dump), we can't
11075 * assume we have been suspended. There is hardware such as
11076 * the v880 that doesn't do PM. Thus, the check for
11077 * ddi_in_panic.
11078 *
11079 * If FCP_STATE_IN_CB_DEVC is set, devices are in the process
11080 * of changing. So, if we can queue the packet, do it. Eventually,
11081 * either the device will have gone away or changed and we can fail
11082 * the request, or we can proceed if the device didn't change.
11083 *
11084 * If the pd in the target or the packet is NULL it's probably
11085 * because the device has gone away, we allow the request to be
11086 * put on the internal queue here in case the device comes back within
11087 * the offline timeout. fctl will fix up the pd's if the tgt_pd_handle
11088 * has gone NULL, while fcp deals cases where pkt_pd is NULL. pkt_pd
11089 * could be NULL because the device was disappearing during or since
11090 * packet initialization.
11091 */
11092
11093 if (((plun->lun_state & FCP_LUN_BUSY) && (!(pptr->port_state &
11094 FCP_STATE_SUSPENDED)) && !ddi_in_panic()) ||
11095 (pptr->port_state & (FCP_STATE_ONLINING | FCP_STATE_IN_CB_DEVC)) ||
11096 (ptgt->tgt_pd_handle == NULL) ||
11097 (cmd->cmd_fp_pkt->pkt_pd == NULL)) {
11098 /*
11099 * If ((LUN is busy AND
11100 * LUN not suspended AND
11101 * The system is not in panic state) OR
11102 * (The port is coming up))
11103 *
11104 * We check to see if the any of the flags FLAG_NOINTR or
11105 * FLAG_NOQUEUE is set. If one of them is set the value
11106 * returned will be TRAN_BUSY. If not, the request is queued.
11107 */
11108 mutex_exit(&ptgt->tgt_mutex);
11109 mutex_exit(&pptr->port_mutex);
11110
11111 /* see if using interrupts is allowed (so queueing'll work) */
11112 if (pkt->pkt_flags & FLAG_NOINTR) {
11113 pkt->pkt_resid = 0;
11114 return (TRAN_BUSY);
11115 }
11116 if (pkt->pkt_flags & FLAG_NOQUEUE) {
11117 FCP_DTRACE(fcp_logq, pptr->port_instbuf,
11118 fcp_trace, FCP_BUF_LEVEL_9, 0,
11119 "fcp_scsi_start: lun busy for pkt %p", pkt);
11120 return (TRAN_BUSY);
11121 }
11122 #ifdef DEBUG
11123 mutex_enter(&pptr->port_pkt_mutex);
11124 pptr->port_npkts++;
11125 mutex_exit(&pptr->port_pkt_mutex);
11126 #endif /* DEBUG */
11127
11128 /* got queue up the pkt for later */
11129 fcp_queue_pkt(pptr, cmd);
11130 return (TRAN_ACCEPT);
11131 }
11132 cmd->cmd_state = FCP_PKT_ISSUED;
11133
11134 mutex_exit(&ptgt->tgt_mutex);
11135 mutex_exit(&pptr->port_mutex);
11136
11137 /*
11138 * Now that we released the mutexes, what was protected by them can
11139 * change.
11140 */
11141
11142 /*
11143 * If there is a reconfiguration in progress, wait for it to complete.
11144 */
11145 fcp_reconfig_wait(pptr);
11146
11147 cmd->cmd_timeout = pkt->pkt_time ? fcp_watchdog_time +
11148 pkt->pkt_time : 0;
11149
11150 /* prepare the packet */
11151
11152 fcp_prepare_pkt(pptr, cmd, plun);
11153
11154 if (cmd->cmd_pkt->pkt_time) {
11155 cmd->cmd_fp_pkt->pkt_timeout = cmd->cmd_pkt->pkt_time;
11156 } else {
11157 cmd->cmd_fp_pkt->pkt_timeout = 5 * 60 * 60;
11158 }
11159
11160 /*
11161 * if interrupts aren't allowed (e.g. at dump time) then we'll
11162 * have to do polled I/O
11163 */
11164 if (pkt->pkt_flags & FLAG_NOINTR) {
11165 cmd->cmd_state &= ~FCP_PKT_ISSUED;
11166 return (fcp_dopoll(pptr, cmd));
11167 }
11168
11169 #ifdef DEBUG
11170 mutex_enter(&pptr->port_pkt_mutex);
11171 pptr->port_npkts++;
11172 mutex_exit(&pptr->port_pkt_mutex);
11173 #endif /* DEBUG */
11174
11175 rval = fcp_transport(pptr->port_fp_handle, cmd->cmd_fp_pkt, 0);
11176 if (rval == FC_SUCCESS) {
11177 FCP_DTRACE(fcp_logq, pptr->port_instbuf,
11178 fcp_trace, FCP_BUF_LEVEL_9, 0,
11179 "fcp_transport success for %x", plun->lun_tgt->tgt_d_id);
11180 return (TRAN_ACCEPT);
11181 }
11182
11183 cmd->cmd_state = FCP_PKT_IDLE;
11184
11185 #ifdef DEBUG
11186 mutex_enter(&pptr->port_pkt_mutex);
11187 pptr->port_npkts--;
11188 mutex_exit(&pptr->port_pkt_mutex);
11189 #endif /* DEBUG */
11190
11191 /*
11192 * For lack of clearer definitions, choose
11193 * between TRAN_BUSY and TRAN_FATAL_ERROR.
11194 */
11195
11196 if (rval == FC_TRAN_BUSY) {
11197 pkt->pkt_resid = 0;
11198 rval = TRAN_BUSY;
11199 } else {
11200 mutex_enter(&ptgt->tgt_mutex);
11201 if (plun->lun_state & FCP_LUN_OFFLINE) {
11202 child_info_t *cip;
11203
11204 mutex_enter(&plun->lun_mutex);
11205 cip = plun->lun_cip;
11206 mutex_exit(&plun->lun_mutex);
11207
11208 FCP_TRACE(fcp_logq, pptr->port_instbuf,
11209 fcp_trace, FCP_BUF_LEVEL_6, 0,
11210 "fcp_transport failed 2 for %x: %x; dip=%p",
11211 plun->lun_tgt->tgt_d_id, rval, cip);
11212
11213 rval = TRAN_FATAL_ERROR;
11214 } else {
11215 if (pkt->pkt_flags & FLAG_NOQUEUE) {
11216 FCP_DTRACE(fcp_logq, pptr->port_instbuf,
11217 fcp_trace, FCP_BUF_LEVEL_9, 0,
11218 "fcp_scsi_start: FC_BUSY for pkt %p",
11219 pkt);
11220 rval = TRAN_BUSY;
11221 } else {
11222 rval = TRAN_ACCEPT;
11223 fcp_queue_pkt(pptr, cmd);
11224 }
11225 }
11226 mutex_exit(&ptgt->tgt_mutex);
11227 }
11228
11229 return (rval);
11230 }
11231
11232 /*
11233 * called by the transport to abort a packet
11234 */
11235 /*ARGSUSED*/
11236 static int
11237 fcp_scsi_abort(struct scsi_address *ap, struct scsi_pkt *pkt)
11238 {
11239 int tgt_cnt;
11240 struct fcp_port *pptr = ADDR2FCP(ap);
11241 struct fcp_lun *plun = ADDR2LUN(ap);
11242 struct fcp_tgt *ptgt = plun->lun_tgt;
11243
11244 if (pkt == NULL) {
11245 if (ptgt) {
11246 mutex_enter(&ptgt->tgt_mutex);
11247 tgt_cnt = ptgt->tgt_change_cnt;
11248 mutex_exit(&ptgt->tgt_mutex);
11249 fcp_abort_all(pptr, ptgt, plun, tgt_cnt);
11250 return (TRUE);
11251 }
11252 }
11253 return (FALSE);
11254 }
11255
11256
11257 /*
11258 * Perform reset
11259 */
11260 int
11261 fcp_scsi_reset(struct scsi_address *ap, int level)
11262 {
11263 int rval = 0;
11264 struct fcp_port *pptr = ADDR2FCP(ap);
11265 struct fcp_lun *plun = ADDR2LUN(ap);
11266 struct fcp_tgt *ptgt = plun->lun_tgt;
11267
11268 if (level == RESET_ALL) {
11269 if (fcp_linkreset(pptr, ap, KM_NOSLEEP) == FC_SUCCESS) {
11270 rval = 1;
11271 }
11272 } else if (level == RESET_TARGET || level == RESET_LUN) {
11273 /*
11274 * If we are in the middle of discovery, return
11275 * SUCCESS as this target will be rediscovered
11276 * anyway
11277 */
11278 mutex_enter(&ptgt->tgt_mutex);
11279 if (ptgt->tgt_state & (FCP_TGT_OFFLINE | FCP_TGT_BUSY)) {
11280 mutex_exit(&ptgt->tgt_mutex);
11281 return (1);
11282 }
11283 mutex_exit(&ptgt->tgt_mutex);
11284
11285 if (fcp_reset_target(ap, level) == FC_SUCCESS) {
11286 rval = 1;
11287 }
11288 }
11289 return (rval);
11290 }
11291
11292
11293 /*
11294 * called by the framework to get a SCSI capability
11295 */
11296 static int
11297 fcp_scsi_getcap(struct scsi_address *ap, char *cap, int whom)
11298 {
11299 return (fcp_commoncap(ap, cap, 0, whom, 0));
11300 }
11301
11302
11303 /*
11304 * called by the framework to set a SCSI capability
11305 */
11306 static int
11307 fcp_scsi_setcap(struct scsi_address *ap, char *cap, int value, int whom)
11308 {
11309 return (fcp_commoncap(ap, cap, value, whom, 1));
11310 }
11311
11312 /*
11313 * Function: fcp_pkt_setup
11314 *
11315 * Description: This function sets up the scsi_pkt structure passed by the
11316 * caller. This function assumes fcp_pkt_constructor has been
11317 * called previously for the packet passed by the caller. If
11318 * successful this call will have the following results:
11319 *
11320 * - The resources needed that will be constant through out
11321 * the whole transaction are allocated.
11322 * - The fields that will be constant through out the whole
11323 * transaction are initialized.
11324 * - The scsi packet will be linked to the LUN structure
11325 * addressed by the transaction.
11326 *
11327 * Argument:
11328 * *pkt Pointer to a scsi_pkt structure.
11329 * callback
11330 * arg
11331 *
11332 * Return Value: 0 Success
11333 * !0 Failure
11334 *
11335 * Context: Kernel context or interrupt context
11336 */
11337 /* ARGSUSED */
11338 static int
11339 fcp_pkt_setup(struct scsi_pkt *pkt,
11340 int (*callback)(caddr_t arg),
11341 caddr_t arg)
11342 {
11343 struct fcp_pkt *cmd;
11344 struct fcp_port *pptr;
11345 struct fcp_lun *plun;
11346 struct fcp_tgt *ptgt;
11347 int kf;
11348 fc_packet_t *fpkt;
11349 fc_frame_hdr_t *hp;
11350
11351 pptr = ADDR2FCP(&pkt->pkt_address);
11352 plun = ADDR2LUN(&pkt->pkt_address);
11353 ptgt = plun->lun_tgt;
11354
11355 cmd = (struct fcp_pkt *)pkt->pkt_ha_private;
11356 fpkt = cmd->cmd_fp_pkt;
11357
11358 /*
11359 * this request is for dma allocation only
11360 */
11361 /*
11362 * First step of fcp_scsi_init_pkt: pkt allocation
11363 * We determine if the caller is willing to wait for the
11364 * resources.
11365 */
11366 kf = (callback == SLEEP_FUNC) ? KM_SLEEP: KM_NOSLEEP;
11367
11368 /*
11369 * Selective zeroing of the pkt.
11370 */
11371 cmd->cmd_back = NULL;
11372 cmd->cmd_next = NULL;
11373
11374 /*
11375 * Zero out fcp command
11376 */
11377 bzero(&cmd->cmd_fcp_cmd, sizeof (cmd->cmd_fcp_cmd));
11378
11379 cmd->cmd_state = FCP_PKT_IDLE;
11380
11381 fpkt = cmd->cmd_fp_pkt;
11382 fpkt->pkt_data_acc = NULL;
11383
11384 /*
11385 * When port_state is FCP_STATE_OFFLINE, remote_port (tgt_pd_handle)
11386 * could be destroyed. We need fail pkt_setup.
11387 */
11388 if (pptr->port_state & FCP_STATE_OFFLINE) {
11389 return (-1);
11390 }
11391
11392 mutex_enter(&ptgt->tgt_mutex);
11393 fpkt->pkt_pd = ptgt->tgt_pd_handle;
11394
11395 if (fc_ulp_init_packet(pptr->port_fp_handle, fpkt, kf)
11396 != FC_SUCCESS) {
11397 mutex_exit(&ptgt->tgt_mutex);
11398 return (-1);
11399 }
11400
11401 mutex_exit(&ptgt->tgt_mutex);
11402
11403 /* Fill in the Fabric Channel Header */
11404 hp = &fpkt->pkt_cmd_fhdr;
11405 hp->r_ctl = R_CTL_COMMAND;
11406 hp->rsvd = 0;
11407 hp->type = FC_TYPE_SCSI_FCP;
11408 hp->f_ctl = F_CTL_SEQ_INITIATIVE | F_CTL_FIRST_SEQ;
11409 hp->seq_id = 0;
11410 hp->df_ctl = 0;
11411 hp->seq_cnt = 0;
11412 hp->ox_id = 0xffff;
11413 hp->rx_id = 0xffff;
11414 hp->ro = 0;
11415
11416 /*
11417 * A doubly linked list (cmd_forw, cmd_back) is built
11418 * out of every allocated packet on a per-lun basis
11419 *
11420 * The packets are maintained in the list so as to satisfy
11421 * scsi_abort() requests. At present (which is unlikely to
11422 * change in the future) nobody performs a real scsi_abort
11423 * in the SCSI target drivers (as they don't keep the packets
11424 * after doing scsi_transport - so they don't know how to
11425 * abort a packet other than sending a NULL to abort all
11426 * outstanding packets)
11427 */
11428 mutex_enter(&plun->lun_mutex);
11429 if ((cmd->cmd_forw = plun->lun_pkt_head) != NULL) {
11430 plun->lun_pkt_head->cmd_back = cmd;
11431 } else {
11432 plun->lun_pkt_tail = cmd;
11433 }
11434 plun->lun_pkt_head = cmd;
11435 mutex_exit(&plun->lun_mutex);
11436 return (0);
11437 }
11438
11439 /*
11440 * Function: fcp_pkt_teardown
11441 *
11442 * Description: This function releases a scsi_pkt structure and all the
11443 * resources attached to it.
11444 *
11445 * Argument: *pkt Pointer to a scsi_pkt structure.
11446 *
11447 * Return Value: None
11448 *
11449 * Context: User, Kernel or Interrupt context.
11450 */
11451 static void
11452 fcp_pkt_teardown(struct scsi_pkt *pkt)
11453 {
11454 struct fcp_port *pptr = ADDR2FCP(&pkt->pkt_address);
11455 struct fcp_lun *plun = ADDR2LUN(&pkt->pkt_address);
11456 struct fcp_pkt *cmd = (struct fcp_pkt *)pkt->pkt_ha_private;
11457
11458 /*
11459 * Remove the packet from the per-lun list
11460 */
11461 mutex_enter(&plun->lun_mutex);
11462 if (cmd->cmd_back) {
11463 ASSERT(cmd != plun->lun_pkt_head);
11464 cmd->cmd_back->cmd_forw = cmd->cmd_forw;
11465 } else {
11466 ASSERT(cmd == plun->lun_pkt_head);
11467 plun->lun_pkt_head = cmd->cmd_forw;
11468 }
11469
11470 if (cmd->cmd_forw) {
11471 cmd->cmd_forw->cmd_back = cmd->cmd_back;
11472 } else {
11473 ASSERT(cmd == plun->lun_pkt_tail);
11474 plun->lun_pkt_tail = cmd->cmd_back;
11475 }
11476
11477 mutex_exit(&plun->lun_mutex);
11478
11479 (void) fc_ulp_uninit_packet(pptr->port_fp_handle, cmd->cmd_fp_pkt);
11480 }
11481
11482 /*
11483 * Routine for reset notification setup, to register or cancel.
11484 * This function is called by SCSA
11485 */
11486 /*ARGSUSED*/
11487 static int
11488 fcp_scsi_reset_notify(struct scsi_address *ap, int flag,
11489 void (*callback)(caddr_t), caddr_t arg)
11490 {
11491 struct fcp_port *pptr = ADDR2FCP(ap);
11492
11493 return (scsi_hba_reset_notify_setup(ap, flag, callback, arg,
11494 &pptr->port_mutex, &pptr->port_reset_notify_listf));
11495 }
11496
11497
11498 static int
11499 fcp_scsi_bus_get_eventcookie(dev_info_t *dip, dev_info_t *rdip, char *name,
11500 ddi_eventcookie_t *event_cookiep)
11501 {
11502 struct fcp_port *pptr = fcp_dip2port(dip);
11503
11504 if (pptr == NULL) {
11505 return (DDI_FAILURE);
11506 }
11507
11508 return (ndi_event_retrieve_cookie(pptr->port_ndi_event_hdl, rdip, name,
11509 event_cookiep, NDI_EVENT_NOPASS));
11510 }
11511
11512
11513 static int
11514 fcp_scsi_bus_add_eventcall(dev_info_t *dip, dev_info_t *rdip,
11515 ddi_eventcookie_t eventid, void (*callback)(), void *arg,
11516 ddi_callback_id_t *cb_id)
11517 {
11518 struct fcp_port *pptr = fcp_dip2port(dip);
11519
11520 if (pptr == NULL) {
11521 return (DDI_FAILURE);
11522 }
11523
11524 return (ndi_event_add_callback(pptr->port_ndi_event_hdl, rdip,
11525 eventid, callback, arg, NDI_SLEEP, cb_id));
11526 }
11527
11528
11529 static int
11530 fcp_scsi_bus_remove_eventcall(dev_info_t *dip, ddi_callback_id_t cb_id)
11531 {
11532
11533 struct fcp_port *pptr = fcp_dip2port(dip);
11534
11535 if (pptr == NULL) {
11536 return (DDI_FAILURE);
11537 }
11538 return (ndi_event_remove_callback(pptr->port_ndi_event_hdl, cb_id));
11539 }
11540
11541
11542 /*
11543 * called by the transport to post an event
11544 */
11545 static int
11546 fcp_scsi_bus_post_event(dev_info_t *dip, dev_info_t *rdip,
11547 ddi_eventcookie_t eventid, void *impldata)
11548 {
11549 struct fcp_port *pptr = fcp_dip2port(dip);
11550
11551 if (pptr == NULL) {
11552 return (DDI_FAILURE);
11553 }
11554
11555 return (ndi_event_run_callbacks(pptr->port_ndi_event_hdl, rdip,
11556 eventid, impldata));
11557 }
11558
11559
11560 /*
11561 * A target in in many cases in Fibre Channel has a one to one relation
11562 * with a port identifier (which is also known as D_ID and also as AL_PA
11563 * in private Loop) On Fibre Channel-to-SCSI bridge boxes a target reset
11564 * will most likely result in resetting all LUNs (which means a reset will
11565 * occur on all the SCSI devices connected at the other end of the bridge)
11566 * That is the latest favorite topic for discussion, for, one can debate as
11567 * hot as one likes and come up with arguably a best solution to one's
11568 * satisfaction
11569 *
11570 * To stay on track and not digress much, here are the problems stated
11571 * briefly:
11572 *
11573 * SCSA doesn't define RESET_LUN, It defines RESET_TARGET, but the
11574 * target drivers use RESET_TARGET even if their instance is on a
11575 * LUN. Doesn't that sound a bit broken ?
11576 *
11577 * FCP SCSI (the current spec) only defines RESET TARGET in the
11578 * control fields of an FCP_CMND structure. It should have been
11579 * fixed right there, giving flexibility to the initiators to
11580 * minimize havoc that could be caused by resetting a target.
11581 */
11582 static int
11583 fcp_reset_target(struct scsi_address *ap, int level)
11584 {
11585 int rval = FC_FAILURE;
11586 char lun_id[25];
11587 struct fcp_port *pptr = ADDR2FCP(ap);
11588 struct fcp_lun *plun = ADDR2LUN(ap);
11589 struct fcp_tgt *ptgt = plun->lun_tgt;
11590 struct scsi_pkt *pkt;
11591 struct fcp_pkt *cmd;
11592 struct fcp_rsp *rsp;
11593 uint32_t tgt_cnt;
11594 struct fcp_rsp_info *rsp_info;
11595 struct fcp_reset_elem *p;
11596 int bval;
11597
11598 if ((p = kmem_alloc(sizeof (struct fcp_reset_elem),
11599 KM_NOSLEEP)) == NULL) {
11600 return (rval);
11601 }
11602
11603 mutex_enter(&ptgt->tgt_mutex);
11604 if (level == RESET_TARGET) {
11605 if (ptgt->tgt_state & (FCP_TGT_OFFLINE | FCP_TGT_BUSY)) {
11606 mutex_exit(&ptgt->tgt_mutex);
11607 kmem_free(p, sizeof (struct fcp_reset_elem));
11608 return (rval);
11609 }
11610 fcp_update_tgt_state(ptgt, FCP_SET, FCP_LUN_BUSY);
11611 (void) strcpy(lun_id, " ");
11612 } else {
11613 if (plun->lun_state & (FCP_LUN_OFFLINE | FCP_LUN_BUSY)) {
11614 mutex_exit(&ptgt->tgt_mutex);
11615 kmem_free(p, sizeof (struct fcp_reset_elem));
11616 return (rval);
11617 }
11618 fcp_update_lun_state(plun, FCP_SET, FCP_LUN_BUSY);
11619
11620 (void) sprintf(lun_id, ", LUN=%d", plun->lun_num);
11621 }
11622 tgt_cnt = ptgt->tgt_change_cnt;
11623
11624 mutex_exit(&ptgt->tgt_mutex);
11625
11626 if ((pkt = scsi_init_pkt(ap, NULL, NULL, 0, 0,
11627 0, 0, NULL, 0)) == NULL) {
11628 kmem_free(p, sizeof (struct fcp_reset_elem));
11629 mutex_enter(&ptgt->tgt_mutex);
11630 fcp_update_tgt_state(ptgt, FCP_RESET, FCP_LUN_BUSY);
11631 mutex_exit(&ptgt->tgt_mutex);
11632 return (rval);
11633 }
11634 pkt->pkt_time = FCP_POLL_TIMEOUT;
11635
11636 /* fill in cmd part of packet */
11637 cmd = PKT2CMD(pkt);
11638 if (level == RESET_TARGET) {
11639 cmd->cmd_fcp_cmd.fcp_cntl.cntl_reset_tgt = 1;
11640 } else {
11641 cmd->cmd_fcp_cmd.fcp_cntl.cntl_reset_lun = 1;
11642 }
11643 cmd->cmd_fp_pkt->pkt_comp = NULL;
11644 cmd->cmd_pkt->pkt_flags |= FLAG_NOINTR;
11645
11646 /* prepare a packet for transport */
11647 fcp_prepare_pkt(pptr, cmd, plun);
11648
11649 if (cmd->cmd_pkt->pkt_time) {
11650 cmd->cmd_fp_pkt->pkt_timeout = cmd->cmd_pkt->pkt_time;
11651 } else {
11652 cmd->cmd_fp_pkt->pkt_timeout = 5 * 60 * 60;
11653 }
11654
11655 (void) fc_ulp_busy_port(pptr->port_fp_handle);
11656 bval = fcp_dopoll(pptr, cmd);
11657 fc_ulp_idle_port(pptr->port_fp_handle);
11658
11659 /* submit the packet */
11660 if (bval == TRAN_ACCEPT) {
11661 int error = 3;
11662
11663 rsp = (struct fcp_rsp *)cmd->cmd_fcp_rsp;
11664 rsp_info = (struct fcp_rsp_info *)(cmd->cmd_fcp_rsp +
11665 sizeof (struct fcp_rsp));
11666
11667 if (rsp->fcp_u.fcp_status.rsp_len_set) {
11668 if (fcp_validate_fcp_response(rsp, pptr) ==
11669 FC_SUCCESS) {
11670 if (pptr->port_fcp_dma != FC_NO_DVMA_SPACE) {
11671 FCP_CP_IN(cmd->cmd_fp_pkt->pkt_resp +
11672 sizeof (struct fcp_rsp), rsp_info,
11673 cmd->cmd_fp_pkt->pkt_resp_acc,
11674 sizeof (struct fcp_rsp_info));
11675 }
11676 if (rsp_info->rsp_code == FCP_NO_FAILURE) {
11677 rval = FC_SUCCESS;
11678 error = 0;
11679 } else {
11680 error = 1;
11681 }
11682 } else {
11683 error = 2;
11684 }
11685 }
11686
11687 switch (error) {
11688 case 0:
11689 fcp_log(CE_WARN, pptr->port_dip,
11690 "!FCP: WWN 0x%08x%08x %s reset successfully",
11691 *((int *)&ptgt->tgt_port_wwn.raw_wwn[0]),
11692 *((int *)&ptgt->tgt_port_wwn.raw_wwn[4]), lun_id);
11693 break;
11694
11695 case 1:
11696 fcp_log(CE_WARN, pptr->port_dip,
11697 "!FCP: Reset to WWN 0x%08x%08x %s failed,"
11698 " response code=%x",
11699 *((int *)&ptgt->tgt_port_wwn.raw_wwn[0]),
11700 *((int *)&ptgt->tgt_port_wwn.raw_wwn[4]), lun_id,
11701 rsp_info->rsp_code);
11702 break;
11703
11704 case 2:
11705 fcp_log(CE_WARN, pptr->port_dip,
11706 "!FCP: Reset to WWN 0x%08x%08x %s failed,"
11707 " Bad FCP response values: rsvd1=%x,"
11708 " rsvd2=%x, sts-rsvd1=%x, sts-rsvd2=%x,"
11709 " rsplen=%x, senselen=%x",
11710 *((int *)&ptgt->tgt_port_wwn.raw_wwn[0]),
11711 *((int *)&ptgt->tgt_port_wwn.raw_wwn[4]), lun_id,
11712 rsp->reserved_0, rsp->reserved_1,
11713 rsp->fcp_u.fcp_status.reserved_0,
11714 rsp->fcp_u.fcp_status.reserved_1,
11715 rsp->fcp_response_len, rsp->fcp_sense_len);
11716 break;
11717
11718 default:
11719 fcp_log(CE_WARN, pptr->port_dip,
11720 "!FCP: Reset to WWN 0x%08x%08x %s failed",
11721 *((int *)&ptgt->tgt_port_wwn.raw_wwn[0]),
11722 *((int *)&ptgt->tgt_port_wwn.raw_wwn[4]), lun_id);
11723 break;
11724 }
11725 }
11726 scsi_destroy_pkt(pkt);
11727
11728 if (rval == FC_FAILURE) {
11729 mutex_enter(&ptgt->tgt_mutex);
11730 if (level == RESET_TARGET) {
11731 fcp_update_tgt_state(ptgt, FCP_RESET, FCP_LUN_BUSY);
11732 } else {
11733 fcp_update_lun_state(plun, FCP_RESET, FCP_LUN_BUSY);
11734 }
11735 mutex_exit(&ptgt->tgt_mutex);
11736 kmem_free(p, sizeof (struct fcp_reset_elem));
11737 return (rval);
11738 }
11739
11740 mutex_enter(&pptr->port_mutex);
11741 if (level == RESET_TARGET) {
11742 p->tgt = ptgt;
11743 p->lun = NULL;
11744 } else {
11745 p->tgt = NULL;
11746 p->lun = plun;
11747 }
11748 p->tgt = ptgt;
11749 p->tgt_cnt = tgt_cnt;
11750 p->timeout = fcp_watchdog_time + FCP_RESET_DELAY;
11751 p->next = pptr->port_reset_list;
11752 pptr->port_reset_list = p;
11753
11754 FCP_TRACE(fcp_logq, pptr->port_instbuf,
11755 fcp_trace, FCP_BUF_LEVEL_3, 0,
11756 "Notify ssd of the reset to reinstate the reservations");
11757
11758 scsi_hba_reset_notify_callback(&pptr->port_mutex,
11759 &pptr->port_reset_notify_listf);
11760
11761 mutex_exit(&pptr->port_mutex);
11762
11763 return (rval);
11764 }
11765
11766
11767 /*
11768 * called by fcp_getcap and fcp_setcap to get and set (respectively)
11769 * SCSI capabilities
11770 */
11771 /* ARGSUSED */
11772 static int
11773 fcp_commoncap(struct scsi_address *ap, char *cap,
11774 int val, int tgtonly, int doset)
11775 {
11776 struct fcp_port *pptr = ADDR2FCP(ap);
11777 struct fcp_lun *plun = ADDR2LUN(ap);
11778 struct fcp_tgt *ptgt = plun->lun_tgt;
11779 int cidx;
11780 int rval = FALSE;
11781
11782 if (cap == (char *)0) {
11783 FCP_TRACE(fcp_logq, pptr->port_instbuf,
11784 fcp_trace, FCP_BUF_LEVEL_3, 0,
11785 "fcp_commoncap: invalid arg");
11786 return (rval);
11787 }
11788
11789 if ((cidx = scsi_hba_lookup_capstr(cap)) == -1) {
11790 return (UNDEFINED);
11791 }
11792
11793 /*
11794 * Process setcap request.
11795 */
11796 if (doset) {
11797 /*
11798 * At present, we can only set binary (0/1) values
11799 */
11800 switch (cidx) {
11801 case SCSI_CAP_ARQ:
11802 if (val == 0) {
11803 rval = FALSE;
11804 } else {
11805 rval = TRUE;
11806 }
11807 break;
11808
11809 case SCSI_CAP_LUN_RESET:
11810 if (val) {
11811 plun->lun_cap |= FCP_LUN_CAP_RESET;
11812 } else {
11813 plun->lun_cap &= ~FCP_LUN_CAP_RESET;
11814 }
11815 rval = TRUE;
11816 break;
11817
11818 case SCSI_CAP_SECTOR_SIZE:
11819 rval = TRUE;
11820 break;
11821 default:
11822 FCP_TRACE(fcp_logq, pptr->port_instbuf,
11823 fcp_trace, FCP_BUF_LEVEL_4, 0,
11824 "fcp_setcap: unsupported %d", cidx);
11825 rval = UNDEFINED;
11826 break;
11827 }
11828
11829 FCP_TRACE(fcp_logq, pptr->port_instbuf,
11830 fcp_trace, FCP_BUF_LEVEL_5, 0,
11831 "set cap: cap=%s, val/tgtonly/doset/rval = "
11832 "0x%x/0x%x/0x%x/%d",
11833 cap, val, tgtonly, doset, rval);
11834
11835 } else {
11836 /*
11837 * Process getcap request.
11838 */
11839 switch (cidx) {
11840 case SCSI_CAP_DMA_MAX:
11841 rval = (int)pptr->port_data_dma_attr.dma_attr_maxxfer;
11842
11843 /*
11844 * Need to make an adjustment qlc is uint_t 64
11845 * st is int, so we will make the adjustment here
11846 * being as nobody wants to touch this.
11847 * It still leaves the max single block length
11848 * of 2 gig. This should last .
11849 */
11850
11851 if (rval == -1) {
11852 rval = MAX_INT_DMA;
11853 }
11854
11855 break;
11856
11857 case SCSI_CAP_INITIATOR_ID:
11858 rval = pptr->port_id;
11859 break;
11860
11861 case SCSI_CAP_ARQ:
11862 case SCSI_CAP_RESET_NOTIFICATION:
11863 case SCSI_CAP_TAGGED_QING:
11864 rval = TRUE;
11865 break;
11866
11867 case SCSI_CAP_SCSI_VERSION:
11868 rval = 3;
11869 break;
11870
11871 case SCSI_CAP_INTERCONNECT_TYPE:
11872 if (FC_TOP_EXTERNAL(pptr->port_topology) ||
11873 (ptgt->tgt_hard_addr == 0)) {
11874 rval = INTERCONNECT_FABRIC;
11875 } else {
11876 rval = INTERCONNECT_FIBRE;
11877 }
11878 break;
11879
11880 case SCSI_CAP_LUN_RESET:
11881 rval = ((plun->lun_cap & FCP_LUN_CAP_RESET) != 0) ?
11882 TRUE : FALSE;
11883 break;
11884
11885 default:
11886 FCP_TRACE(fcp_logq, pptr->port_instbuf,
11887 fcp_trace, FCP_BUF_LEVEL_4, 0,
11888 "fcp_getcap: unsupported %d", cidx);
11889 rval = UNDEFINED;
11890 break;
11891 }
11892
11893 FCP_TRACE(fcp_logq, pptr->port_instbuf,
11894 fcp_trace, FCP_BUF_LEVEL_8, 0,
11895 "get cap: cap=%s, val/tgtonly/doset/rval = "
11896 "0x%x/0x%x/0x%x/%d",
11897 cap, val, tgtonly, doset, rval);
11898 }
11899
11900 return (rval);
11901 }
11902
11903 /*
11904 * called by the transport to get the port-wwn and lun
11905 * properties of this device, and to create a "name" based on them
11906 *
11907 * these properties don't exist on sun4m
11908 *
11909 * return 1 for success else return 0
11910 */
11911 /* ARGSUSED */
11912 static int
11913 fcp_scsi_get_name(struct scsi_device *sd, char *name, int len)
11914 {
11915 int i;
11916 int *lun;
11917 int numChars;
11918 uint_t nlun;
11919 uint_t count;
11920 uint_t nbytes;
11921 uchar_t *bytes;
11922 uint16_t lun_num;
11923 uint32_t tgt_id;
11924 char **conf_wwn;
11925 char tbuf[(FC_WWN_SIZE << 1) + 1];
11926 uchar_t barray[FC_WWN_SIZE];
11927 dev_info_t *tgt_dip;
11928 struct fcp_tgt *ptgt;
11929 struct fcp_port *pptr;
11930 struct fcp_lun *plun;
11931
11932 ASSERT(sd != NULL);
11933 ASSERT(name != NULL);
11934
11935 tgt_dip = sd->sd_dev;
11936 pptr = ddi_get_soft_state(fcp_softstate,
11937 ddi_get_instance(ddi_get_parent(tgt_dip)));
11938 if (pptr == NULL) {
11939 return (0);
11940 }
11941
11942 ASSERT(tgt_dip != NULL);
11943
11944 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, sd->sd_dev,
11945 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM,
11946 LUN_PROP, &lun, &nlun) != DDI_SUCCESS) {
11947 name[0] = '\0';
11948 return (0);
11949 }
11950
11951 if (nlun == 0) {
11952 ddi_prop_free(lun);
11953 return (0);
11954 }
11955
11956 lun_num = lun[0];
11957 ddi_prop_free(lun);
11958
11959 /*
11960 * Lookup for .conf WWN property
11961 */
11962 if (ddi_prop_lookup_string_array(DDI_DEV_T_ANY, tgt_dip,
11963 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, CONF_WWN_PROP,
11964 &conf_wwn, &count) == DDI_PROP_SUCCESS) {
11965 ASSERT(count >= 1);
11966
11967 fcp_ascii_to_wwn(conf_wwn[0], barray, FC_WWN_SIZE);
11968 ddi_prop_free(conf_wwn);
11969 mutex_enter(&pptr->port_mutex);
11970 if ((plun = fcp_lookup_lun(pptr, barray, lun_num)) == NULL) {
11971 mutex_exit(&pptr->port_mutex);
11972 return (0);
11973 }
11974 ptgt = plun->lun_tgt;
11975 mutex_exit(&pptr->port_mutex);
11976
11977 (void) ndi_prop_update_byte_array(DDI_DEV_T_NONE,
11978 tgt_dip, PORT_WWN_PROP, barray, FC_WWN_SIZE);
11979
11980 if (!FC_TOP_EXTERNAL(pptr->port_topology) &&
11981 ptgt->tgt_hard_addr != 0) {
11982 tgt_id = (uint32_t)fcp_alpa_to_switch[
11983 ptgt->tgt_hard_addr];
11984 } else {
11985 tgt_id = ptgt->tgt_d_id;
11986 }
11987
11988 (void) ndi_prop_update_int(DDI_DEV_T_NONE, tgt_dip,
11989 TARGET_PROP, tgt_id);
11990 }
11991
11992 /* get the our port-wwn property */
11993 bytes = NULL;
11994 if ((ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, tgt_dip,
11995 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, PORT_WWN_PROP, &bytes,
11996 &nbytes) != DDI_PROP_SUCCESS) || nbytes != FC_WWN_SIZE) {
11997 if (bytes != NULL) {
11998 ddi_prop_free(bytes);
11999 }
12000 return (0);
12001 }
12002
12003 for (i = 0; i < FC_WWN_SIZE; i++) {
12004 (void) sprintf(&tbuf[i << 1], "%02x", *(bytes + i));
12005 }
12006
12007 /* Stick in the address of the form "wWWN,LUN" */
12008 numChars = snprintf(name, len, "w%s,%x", tbuf, lun_num);
12009
12010 ASSERT(numChars < len);
12011 if (numChars >= len) {
12012 fcp_log(CE_WARN, pptr->port_dip,
12013 "!fcp_scsi_get_name: "
12014 "name parameter length too small, it needs to be %d",
12015 numChars+1);
12016 }
12017
12018 ddi_prop_free(bytes);
12019
12020 return (1);
12021 }
12022
12023
12024 /*
12025 * called by the transport to get the SCSI target id value, returning
12026 * it in "name"
12027 *
12028 * this isn't needed/used on sun4m
12029 *
12030 * return 1 for success else return 0
12031 */
12032 /* ARGSUSED */
12033 static int
12034 fcp_scsi_get_bus_addr(struct scsi_device *sd, char *name, int len)
12035 {
12036 struct fcp_lun *plun = ADDR2LUN(&sd->sd_address);
12037 struct fcp_tgt *ptgt;
12038 int numChars;
12039
12040 if (plun == NULL) {
12041 return (0);
12042 }
12043
12044 if ((ptgt = plun->lun_tgt) == NULL) {
12045 return (0);
12046 }
12047
12048 numChars = snprintf(name, len, "%x", ptgt->tgt_d_id);
12049
12050 ASSERT(numChars < len);
12051 if (numChars >= len) {
12052 fcp_log(CE_WARN, NULL,
12053 "!fcp_scsi_get_bus_addr: "
12054 "name parameter length too small, it needs to be %d",
12055 numChars+1);
12056 }
12057
12058 return (1);
12059 }
12060
12061
12062 /*
12063 * called internally to reset the link where the specified port lives
12064 */
12065 static int
12066 fcp_linkreset(struct fcp_port *pptr, struct scsi_address *ap, int sleep)
12067 {
12068 la_wwn_t wwn;
12069 struct fcp_lun *plun;
12070 struct fcp_tgt *ptgt;
12071
12072 /* disable restart of lip if we're suspended */
12073 mutex_enter(&pptr->port_mutex);
12074
12075 if (pptr->port_state & (FCP_STATE_SUSPENDED |
12076 FCP_STATE_POWER_DOWN)) {
12077 mutex_exit(&pptr->port_mutex);
12078 FCP_TRACE(fcp_logq, pptr->port_instbuf,
12079 fcp_trace, FCP_BUF_LEVEL_2, 0,
12080 "fcp_linkreset, fcp%d: link reset "
12081 "disabled due to DDI_SUSPEND",
12082 ddi_get_instance(pptr->port_dip));
12083 return (FC_FAILURE);
12084 }
12085
12086 if (pptr->port_state & (FCP_STATE_OFFLINE | FCP_STATE_ONLINING)) {
12087 mutex_exit(&pptr->port_mutex);
12088 return (FC_SUCCESS);
12089 }
12090
12091 FCP_DTRACE(fcp_logq, pptr->port_instbuf,
12092 fcp_trace, FCP_BUF_LEVEL_8, 0, "Forcing link reset");
12093
12094 /*
12095 * If ap == NULL assume local link reset.
12096 */
12097 if (FC_TOP_EXTERNAL(pptr->port_topology) && (ap != NULL)) {
12098 plun = ADDR2LUN(ap);
12099 ptgt = plun->lun_tgt;
12100 bcopy(&ptgt->tgt_port_wwn.raw_wwn[0], &wwn, sizeof (wwn));
12101 } else {
12102 bzero((caddr_t)&wwn, sizeof (wwn));
12103 }
12104 mutex_exit(&pptr->port_mutex);
12105
12106 return (fc_ulp_linkreset(pptr->port_fp_handle, &wwn, sleep));
12107 }
12108
12109
12110 /*
12111 * called from fcp_port_attach() to resume a port
12112 * return DDI_* success/failure status
12113 * acquires and releases the global mutex
12114 * acquires and releases the port mutex
12115 */
12116 /*ARGSUSED*/
12117
12118 static int
12119 fcp_handle_port_resume(opaque_t ulph, fc_ulp_port_info_t *pinfo,
12120 uint32_t s_id, fc_attach_cmd_t cmd, int instance)
12121 {
12122 int res = DDI_FAILURE; /* default result */
12123 struct fcp_port *pptr; /* port state ptr */
12124 uint32_t alloc_cnt;
12125 uint32_t max_cnt;
12126 fc_portmap_t *tmp_list = NULL;
12127
12128 FCP_DTRACE(fcp_logq, "fcp", fcp_trace,
12129 FCP_BUF_LEVEL_8, 0, "port resume: for port %d",
12130 instance);
12131
12132 if ((pptr = ddi_get_soft_state(fcp_softstate, instance)) == NULL) {
12133 cmn_err(CE_WARN, "fcp: bad soft state");
12134 return (res);
12135 }
12136
12137 mutex_enter(&pptr->port_mutex);
12138 switch (cmd) {
12139 case FC_CMD_RESUME:
12140 ASSERT((pptr->port_state & FCP_STATE_POWER_DOWN) == 0);
12141 pptr->port_state &= ~FCP_STATE_SUSPENDED;
12142 break;
12143
12144 case FC_CMD_POWER_UP:
12145 /*
12146 * If the port is DDI_SUSPENded, defer rediscovery
12147 * until DDI_RESUME occurs
12148 */
12149 if (pptr->port_state & FCP_STATE_SUSPENDED) {
12150 pptr->port_state &= ~FCP_STATE_POWER_DOWN;
12151 mutex_exit(&pptr->port_mutex);
12152 return (DDI_SUCCESS);
12153 }
12154 pptr->port_state &= ~FCP_STATE_POWER_DOWN;
12155 }
12156 pptr->port_id = s_id;
12157 pptr->port_state = FCP_STATE_INIT;
12158 mutex_exit(&pptr->port_mutex);
12159
12160 /*
12161 * Make a copy of ulp_port_info as fctl allocates
12162 * a temp struct.
12163 */
12164 (void) fcp_cp_pinfo(pptr, pinfo);
12165
12166 mutex_enter(&fcp_global_mutex);
12167 if (fcp_watchdog_init++ == 0) {
12168 fcp_watchdog_tick = fcp_watchdog_timeout *
12169 drv_usectohz(1000000);
12170 fcp_watchdog_id = timeout(fcp_watch,
12171 NULL, fcp_watchdog_tick);
12172 }
12173 mutex_exit(&fcp_global_mutex);
12174
12175 /*
12176 * Handle various topologies and link states.
12177 */
12178 switch (FC_PORT_STATE_MASK(pptr->port_phys_state)) {
12179 case FC_STATE_OFFLINE:
12180 /*
12181 * Wait for ONLINE, at which time a state
12182 * change will cause a statec_callback
12183 */
12184 res = DDI_SUCCESS;
12185 break;
12186
12187 case FC_STATE_ONLINE:
12188
12189 if (pptr->port_topology == FC_TOP_UNKNOWN) {
12190 (void) fcp_linkreset(pptr, NULL, KM_NOSLEEP);
12191 res = DDI_SUCCESS;
12192 break;
12193 }
12194
12195 if (FC_TOP_EXTERNAL(pptr->port_topology) &&
12196 !fcp_enable_auto_configuration) {
12197 tmp_list = fcp_construct_map(pptr, &alloc_cnt);
12198 if (tmp_list == NULL) {
12199 if (!alloc_cnt) {
12200 res = DDI_SUCCESS;
12201 }
12202 break;
12203 }
12204 max_cnt = alloc_cnt;
12205 } else {
12206 ASSERT(pptr->port_topology != FC_TOP_UNKNOWN);
12207
12208 alloc_cnt = FCP_MAX_DEVICES;
12209
12210 if ((tmp_list = (fc_portmap_t *)kmem_zalloc(
12211 (sizeof (fc_portmap_t)) * alloc_cnt,
12212 KM_NOSLEEP)) == NULL) {
12213 fcp_log(CE_WARN, pptr->port_dip,
12214 "!fcp%d: failed to allocate portmap",
12215 instance);
12216 break;
12217 }
12218
12219 max_cnt = alloc_cnt;
12220 if ((res = fc_ulp_getportmap(pptr->port_fp_handle,
12221 &tmp_list, &max_cnt, FC_ULP_PLOGI_PRESERVE)) !=
12222 FC_SUCCESS) {
12223 caddr_t msg;
12224
12225 (void) fc_ulp_error(res, &msg);
12226
12227 FCP_TRACE(fcp_logq, pptr->port_instbuf,
12228 fcp_trace, FCP_BUF_LEVEL_2, 0,
12229 "resume failed getportmap: reason=0x%x",
12230 res);
12231
12232 fcp_log(CE_WARN, pptr->port_dip,
12233 "!failed to get port map : %s", msg);
12234 break;
12235 }
12236 if (max_cnt > alloc_cnt) {
12237 alloc_cnt = max_cnt;
12238 }
12239 }
12240
12241 /*
12242 * do the SCSI device discovery and create
12243 * the devinfos
12244 */
12245 fcp_statec_callback(ulph, pptr->port_fp_handle,
12246 pptr->port_phys_state, pptr->port_topology, tmp_list,
12247 max_cnt, pptr->port_id);
12248
12249 res = DDI_SUCCESS;
12250 break;
12251
12252 default:
12253 fcp_log(CE_WARN, pptr->port_dip,
12254 "!fcp%d: invalid port state at attach=0x%x",
12255 instance, pptr->port_phys_state);
12256
12257 mutex_enter(&pptr->port_mutex);
12258 pptr->port_phys_state = FCP_STATE_OFFLINE;
12259 mutex_exit(&pptr->port_mutex);
12260 res = DDI_SUCCESS;
12261
12262 break;
12263 }
12264
12265 if (tmp_list != NULL) {
12266 kmem_free(tmp_list, sizeof (fc_portmap_t) * alloc_cnt);
12267 }
12268
12269 return (res);
12270 }
12271
12272
12273 static void
12274 fcp_cp_pinfo(struct fcp_port *pptr, fc_ulp_port_info_t *pinfo)
12275 {
12276 pptr->port_fp_modlinkage = *pinfo->port_linkage;
12277 pptr->port_dip = pinfo->port_dip;
12278 pptr->port_fp_handle = pinfo->port_handle;
12279 if (pinfo->port_acc_attr != NULL) {
12280 /*
12281 * FCA supports DMA
12282 */
12283 pptr->port_data_dma_attr = *pinfo->port_data_dma_attr;
12284 pptr->port_cmd_dma_attr = *pinfo->port_cmd_dma_attr;
12285 pptr->port_resp_dma_attr = *pinfo->port_resp_dma_attr;
12286 pptr->port_dma_acc_attr = *pinfo->port_acc_attr;
12287 }
12288 pptr->port_priv_pkt_len = pinfo->port_fca_pkt_size;
12289 pptr->port_max_exch = pinfo->port_fca_max_exch;
12290 pptr->port_phys_state = pinfo->port_state;
12291 pptr->port_topology = pinfo->port_flags;
12292 pptr->port_reset_action = pinfo->port_reset_action;
12293 pptr->port_cmds_dma_flags = pinfo->port_dma_behavior;
12294 pptr->port_fcp_dma = pinfo->port_fcp_dma;
12295 bcopy(&pinfo->port_nwwn, &pptr->port_nwwn, sizeof (la_wwn_t));
12296 bcopy(&pinfo->port_pwwn, &pptr->port_pwwn, sizeof (la_wwn_t));
12297
12298 /* Clear FMA caps to avoid fm-capability ereport */
12299 if (pptr->port_cmd_dma_attr.dma_attr_flags & DDI_DMA_FLAGERR)
12300 pptr->port_cmd_dma_attr.dma_attr_flags &= ~DDI_DMA_FLAGERR;
12301 if (pptr->port_data_dma_attr.dma_attr_flags & DDI_DMA_FLAGERR)
12302 pptr->port_data_dma_attr.dma_attr_flags &= ~DDI_DMA_FLAGERR;
12303 if (pptr->port_resp_dma_attr.dma_attr_flags & DDI_DMA_FLAGERR)
12304 pptr->port_resp_dma_attr.dma_attr_flags &= ~DDI_DMA_FLAGERR;
12305 }
12306
12307 /*
12308 * If the elements wait field is set to 1 then
12309 * another thread is waiting for the operation to complete. Once
12310 * it is complete, the waiting thread is signaled and the element is
12311 * freed by the waiting thread. If the elements wait field is set to 0
12312 * the element is freed.
12313 */
12314 static void
12315 fcp_process_elem(struct fcp_hp_elem *elem, int result)
12316 {
12317 ASSERT(elem != NULL);
12318 mutex_enter(&elem->mutex);
12319 elem->result = result;
12320 if (elem->wait) {
12321 elem->wait = 0;
12322 cv_signal(&elem->cv);
12323 mutex_exit(&elem->mutex);
12324 } else {
12325 mutex_exit(&elem->mutex);
12326 cv_destroy(&elem->cv);
12327 mutex_destroy(&elem->mutex);
12328 kmem_free(elem, sizeof (struct fcp_hp_elem));
12329 }
12330 }
12331
12332 /*
12333 * This function is invoked from the taskq thread to allocate
12334 * devinfo nodes and to online/offline them.
12335 */
12336 static void
12337 fcp_hp_task(void *arg)
12338 {
12339 struct fcp_hp_elem *elem = (struct fcp_hp_elem *)arg;
12340 struct fcp_lun *plun = elem->lun;
12341 struct fcp_port *pptr = elem->port;
12342 int result;
12343
12344 ASSERT(elem->what == FCP_ONLINE ||
12345 elem->what == FCP_OFFLINE ||
12346 elem->what == FCP_MPXIO_PATH_CLEAR_BUSY ||
12347 elem->what == FCP_MPXIO_PATH_SET_BUSY);
12348
12349 mutex_enter(&pptr->port_mutex);
12350 mutex_enter(&plun->lun_mutex);
12351 if (((elem->what == FCP_ONLINE || elem->what == FCP_OFFLINE) &&
12352 plun->lun_event_count != elem->event_cnt) ||
12353 pptr->port_state & (FCP_STATE_SUSPENDED |
12354 FCP_STATE_DETACHING | FCP_STATE_POWER_DOWN)) {
12355 mutex_exit(&plun->lun_mutex);
12356 mutex_exit(&pptr->port_mutex);
12357 fcp_process_elem(elem, NDI_FAILURE);
12358 return;
12359 }
12360 mutex_exit(&plun->lun_mutex);
12361 mutex_exit(&pptr->port_mutex);
12362
12363 result = fcp_trigger_lun(plun, elem->cip, elem->old_lun_mpxio,
12364 elem->what, elem->link_cnt, elem->tgt_cnt, elem->flags);
12365 fcp_process_elem(elem, result);
12366 }
12367
12368
12369 static child_info_t *
12370 fcp_get_cip(struct fcp_lun *plun, child_info_t *cip, int lcount,
12371 int tcount)
12372 {
12373 ASSERT(MUTEX_HELD(&plun->lun_mutex));
12374
12375 if (fcp_is_child_present(plun, cip) == FC_FAILURE) {
12376 struct fcp_port *pptr = plun->lun_tgt->tgt_port;
12377
12378 ASSERT(MUTEX_HELD(&pptr->port_mutex));
12379 /*
12380 * Child has not been created yet. Create the child device
12381 * based on the per-Lun flags.
12382 */
12383 if (pptr->port_mpxio == 0 || plun->lun_mpxio == 0) {
12384 plun->lun_cip =
12385 CIP(fcp_create_dip(plun, lcount, tcount));
12386 plun->lun_mpxio = 0;
12387 } else {
12388 plun->lun_cip =
12389 CIP(fcp_create_pip(plun, lcount, tcount));
12390 plun->lun_mpxio = 1;
12391 }
12392 } else {
12393 plun->lun_cip = cip;
12394 }
12395
12396 return (plun->lun_cip);
12397 }
12398
12399
12400 static int
12401 fcp_is_dip_present(struct fcp_lun *plun, dev_info_t *cdip)
12402 {
12403 int rval = FC_FAILURE;
12404 dev_info_t *pdip;
12405 struct dev_info *dip;
12406 int circular;
12407
12408 ASSERT(MUTEX_HELD(&plun->lun_mutex));
12409
12410 pdip = plun->lun_tgt->tgt_port->port_dip;
12411
12412 if (plun->lun_cip == NULL) {
12413 FCP_TRACE(fcp_logq, LUN_PORT->port_instbuf,
12414 fcp_trace, FCP_BUF_LEVEL_3, 0,
12415 "fcp_is_dip_present: plun->lun_cip is NULL: "
12416 "plun: %p lun state: %x num: %d target state: %x",
12417 plun, plun->lun_state, plun->lun_num,
12418 plun->lun_tgt->tgt_port->port_state);
12419 return (rval);
12420 }
12421 ndi_devi_enter(pdip, &circular);
12422 dip = DEVI(pdip)->devi_child;
12423 while (dip) {
12424 if (dip == DEVI(cdip)) {
12425 rval = FC_SUCCESS;
12426 break;
12427 }
12428 dip = dip->devi_sibling;
12429 }
12430 ndi_devi_exit(pdip, circular);
12431 return (rval);
12432 }
12433
12434 static int
12435 fcp_is_child_present(struct fcp_lun *plun, child_info_t *cip)
12436 {
12437 int rval = FC_FAILURE;
12438
12439 ASSERT(plun != NULL);
12440 ASSERT(MUTEX_HELD(&plun->lun_mutex));
12441
12442 if (plun->lun_mpxio == 0) {
12443 rval = fcp_is_dip_present(plun, DIP(cip));
12444 } else {
12445 rval = fcp_is_pip_present(plun, PIP(cip));
12446 }
12447
12448 return (rval);
12449 }
12450
12451 /*
12452 * Function: fcp_create_dip
12453 *
12454 * Description: Creates a dev_info_t structure for the LUN specified by the
12455 * caller.
12456 *
12457 * Argument: plun Lun structure
12458 * link_cnt Link state count.
12459 * tgt_cnt Target state change count.
12460 *
12461 * Return Value: NULL if it failed
12462 * dev_info_t structure address if it succeeded
12463 *
12464 * Context: Kernel context
12465 */
12466 static dev_info_t *
12467 fcp_create_dip(struct fcp_lun *plun, int link_cnt, int tgt_cnt)
12468 {
12469 int failure = 0;
12470 uint32_t tgt_id;
12471 uint64_t sam_lun;
12472 struct fcp_tgt *ptgt = plun->lun_tgt;
12473 struct fcp_port *pptr = ptgt->tgt_port;
12474 dev_info_t *pdip = pptr->port_dip;
12475 dev_info_t *cdip = NULL;
12476 dev_info_t *old_dip = DIP(plun->lun_cip);
12477 char *nname = NULL;
12478 char **compatible = NULL;
12479 int ncompatible;
12480 char *scsi_binding_set;
12481 char t_pwwn[17];
12482
12483 ASSERT(MUTEX_HELD(&plun->lun_mutex));
12484 ASSERT(MUTEX_HELD(&pptr->port_mutex));
12485
12486 /* get the 'scsi-binding-set' property */
12487 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, pdip,
12488 DDI_PROP_NOTPROM | DDI_PROP_DONTPASS, "scsi-binding-set",
12489 &scsi_binding_set) != DDI_PROP_SUCCESS) {
12490 scsi_binding_set = NULL;
12491 }
12492
12493 /* determine the node name and compatible */
12494 scsi_hba_nodename_compatible_get(&plun->lun_inq, scsi_binding_set,
12495 plun->lun_inq.inq_dtype, NULL, &nname, &compatible, &ncompatible);
12496 if (scsi_binding_set) {
12497 ddi_prop_free(scsi_binding_set);
12498 }
12499
12500 if (nname == NULL) {
12501 #ifdef DEBUG
12502 cmn_err(CE_WARN, "%s%d: no driver for "
12503 "device @w%02x%02x%02x%02x%02x%02x%02x%02x,%d:"
12504 " compatible: %s",
12505 ddi_driver_name(pdip), ddi_get_instance(pdip),
12506 ptgt->tgt_port_wwn.raw_wwn[0],
12507 ptgt->tgt_port_wwn.raw_wwn[1],
12508 ptgt->tgt_port_wwn.raw_wwn[2],
12509 ptgt->tgt_port_wwn.raw_wwn[3],
12510 ptgt->tgt_port_wwn.raw_wwn[4],
12511 ptgt->tgt_port_wwn.raw_wwn[5],
12512 ptgt->tgt_port_wwn.raw_wwn[6],
12513 ptgt->tgt_port_wwn.raw_wwn[7], plun->lun_num,
12514 *compatible);
12515 #endif /* DEBUG */
12516 failure++;
12517 goto end_of_fcp_create_dip;
12518 }
12519
12520 cdip = fcp_find_existing_dip(plun, pdip, nname);
12521
12522 /*
12523 * if the old_dip does not match the cdip, that means there is
12524 * some property change. since we'll be using the cdip, we need
12525 * to offline the old_dip. If the state contains FCP_LUN_CHANGED
12526 * then the dtype for the device has been updated. Offline the
12527 * the old device and create a new device with the new device type
12528 * Refer to bug: 4764752
12529 */
12530 if (old_dip && (cdip != old_dip ||
12531 plun->lun_state & FCP_LUN_CHANGED)) {
12532 plun->lun_state &= ~(FCP_LUN_INIT);
12533 mutex_exit(&plun->lun_mutex);
12534 mutex_exit(&pptr->port_mutex);
12535
12536 mutex_enter(&ptgt->tgt_mutex);
12537 (void) fcp_pass_to_hp(pptr, plun, CIP(old_dip), FCP_OFFLINE,
12538 link_cnt, tgt_cnt, NDI_DEVI_REMOVE, 0);
12539 mutex_exit(&ptgt->tgt_mutex);
12540
12541 #ifdef DEBUG
12542 if (cdip != NULL) {
12543 FCP_TRACE(fcp_logq, pptr->port_instbuf,
12544 fcp_trace, FCP_BUF_LEVEL_2, 0,
12545 "Old dip=%p; New dip=%p don't match", old_dip,
12546 cdip);
12547 } else {
12548 FCP_TRACE(fcp_logq, pptr->port_instbuf,
12549 fcp_trace, FCP_BUF_LEVEL_2, 0,
12550 "Old dip=%p; New dip=NULL don't match", old_dip);
12551 }
12552 #endif
12553
12554 mutex_enter(&pptr->port_mutex);
12555 mutex_enter(&plun->lun_mutex);
12556 }
12557
12558 if (cdip == NULL || plun->lun_state & FCP_LUN_CHANGED) {
12559 plun->lun_state &= ~(FCP_LUN_CHANGED);
12560 if (ndi_devi_alloc(pptr->port_dip, nname,
12561 DEVI_SID_NODEID, &cdip) != NDI_SUCCESS) {
12562 failure++;
12563 goto end_of_fcp_create_dip;
12564 }
12565 }
12566
12567 /*
12568 * Previously all the properties for the devinfo were destroyed here
12569 * with a call to ndi_prop_remove_all(). Since this may cause loss of
12570 * the devid property (and other properties established by the target
12571 * driver or framework) which the code does not always recreate, this
12572 * call was removed.
12573 * This opens a theoretical possibility that we may return with a
12574 * stale devid on the node if the scsi entity behind the fibre channel
12575 * lun has changed.
12576 */
12577
12578 /* decorate the node with compatible */
12579 if (ndi_prop_update_string_array(DDI_DEV_T_NONE, cdip,
12580 "compatible", compatible, ncompatible) != DDI_PROP_SUCCESS) {
12581 failure++;
12582 goto end_of_fcp_create_dip;
12583 }
12584
12585 if (ndi_prop_update_byte_array(DDI_DEV_T_NONE, cdip, NODE_WWN_PROP,
12586 ptgt->tgt_node_wwn.raw_wwn, FC_WWN_SIZE) != DDI_PROP_SUCCESS) {
12587 failure++;
12588 goto end_of_fcp_create_dip;
12589 }
12590
12591 if (ndi_prop_update_byte_array(DDI_DEV_T_NONE, cdip, PORT_WWN_PROP,
12592 ptgt->tgt_port_wwn.raw_wwn, FC_WWN_SIZE) != DDI_PROP_SUCCESS) {
12593 failure++;
12594 goto end_of_fcp_create_dip;
12595 }
12596
12597 fcp_wwn_to_ascii(ptgt->tgt_port_wwn.raw_wwn, t_pwwn);
12598 t_pwwn[16] = '\0';
12599 if (ndi_prop_update_string(DDI_DEV_T_NONE, cdip, TGT_PORT_PROP, t_pwwn)
12600 != DDI_PROP_SUCCESS) {
12601 failure++;
12602 goto end_of_fcp_create_dip;
12603 }
12604
12605 /*
12606 * If there is no hard address - We might have to deal with
12607 * that by using WWN - Having said that it is important to
12608 * recognize this problem early so ssd can be informed of
12609 * the right interconnect type.
12610 */
12611 if (!FC_TOP_EXTERNAL(pptr->port_topology) && ptgt->tgt_hard_addr != 0) {
12612 tgt_id = (uint32_t)fcp_alpa_to_switch[ptgt->tgt_hard_addr];
12613 } else {
12614 tgt_id = ptgt->tgt_d_id;
12615 }
12616
12617 if (ndi_prop_update_int(DDI_DEV_T_NONE, cdip, TARGET_PROP,
12618 tgt_id) != DDI_PROP_SUCCESS) {
12619 failure++;
12620 goto end_of_fcp_create_dip;
12621 }
12622
12623 if (ndi_prop_update_int(DDI_DEV_T_NONE, cdip, LUN_PROP,
12624 (int)plun->lun_num) != DDI_PROP_SUCCESS) {
12625 failure++;
12626 goto end_of_fcp_create_dip;
12627 }
12628 bcopy(&plun->lun_addr, &sam_lun, FCP_LUN_SIZE);
12629 if (ndi_prop_update_int64(DDI_DEV_T_NONE, cdip, SAM_LUN_PROP,
12630 sam_lun) != DDI_PROP_SUCCESS) {
12631 failure++;
12632 goto end_of_fcp_create_dip;
12633 }
12634
12635 end_of_fcp_create_dip:
12636 scsi_hba_nodename_compatible_free(nname, compatible);
12637
12638 if (cdip != NULL && failure) {
12639 (void) ndi_prop_remove_all(cdip);
12640 (void) ndi_devi_free(cdip);
12641 cdip = NULL;
12642 }
12643
12644 return (cdip);
12645 }
12646
12647 /*
12648 * Function: fcp_create_pip
12649 *
12650 * Description: Creates a Path Id for the LUN specified by the caller.
12651 *
12652 * Argument: plun Lun structure
12653 * link_cnt Link state count.
12654 * tgt_cnt Target state count.
12655 *
12656 * Return Value: NULL if it failed
12657 * mdi_pathinfo_t structure address if it succeeded
12658 *
12659 * Context: Kernel context
12660 */
12661 static mdi_pathinfo_t *
12662 fcp_create_pip(struct fcp_lun *plun, int lcount, int tcount)
12663 {
12664 int i;
12665 char buf[MAXNAMELEN];
12666 char uaddr[MAXNAMELEN];
12667 int failure = 0;
12668 uint32_t tgt_id;
12669 uint64_t sam_lun;
12670 struct fcp_tgt *ptgt = plun->lun_tgt;
12671 struct fcp_port *pptr = ptgt->tgt_port;
12672 dev_info_t *pdip = pptr->port_dip;
12673 mdi_pathinfo_t *pip = NULL;
12674 mdi_pathinfo_t *old_pip = PIP(plun->lun_cip);
12675 char *nname = NULL;
12676 char **compatible = NULL;
12677 int ncompatible;
12678 char *scsi_binding_set;
12679 char t_pwwn[17];
12680
12681 ASSERT(MUTEX_HELD(&plun->lun_mutex));
12682 ASSERT(MUTEX_HELD(&pptr->port_mutex));
12683
12684 scsi_binding_set = "vhci";
12685
12686 /* determine the node name and compatible */
12687 scsi_hba_nodename_compatible_get(&plun->lun_inq, scsi_binding_set,
12688 plun->lun_inq.inq_dtype, NULL, &nname, &compatible, &ncompatible);
12689
12690 if (nname == NULL) {
12691 #ifdef DEBUG
12692 cmn_err(CE_WARN, "fcp_create_dip: %s%d: no driver for "
12693 "device @w%02x%02x%02x%02x%02x%02x%02x%02x,%d:"
12694 " compatible: %s",
12695 ddi_driver_name(pdip), ddi_get_instance(pdip),
12696 ptgt->tgt_port_wwn.raw_wwn[0],
12697 ptgt->tgt_port_wwn.raw_wwn[1],
12698 ptgt->tgt_port_wwn.raw_wwn[2],
12699 ptgt->tgt_port_wwn.raw_wwn[3],
12700 ptgt->tgt_port_wwn.raw_wwn[4],
12701 ptgt->tgt_port_wwn.raw_wwn[5],
12702 ptgt->tgt_port_wwn.raw_wwn[6],
12703 ptgt->tgt_port_wwn.raw_wwn[7], plun->lun_num,
12704 *compatible);
12705 #endif /* DEBUG */
12706 failure++;
12707 goto end_of_fcp_create_pip;
12708 }
12709
12710 pip = fcp_find_existing_pip(plun, pdip);
12711
12712 /*
12713 * if the old_dip does not match the cdip, that means there is
12714 * some property change. since we'll be using the cdip, we need
12715 * to offline the old_dip. If the state contains FCP_LUN_CHANGED
12716 * then the dtype for the device has been updated. Offline the
12717 * the old device and create a new device with the new device type
12718 * Refer to bug: 4764752
12719 */
12720 if (old_pip && (pip != old_pip ||
12721 plun->lun_state & FCP_LUN_CHANGED)) {
12722 plun->lun_state &= ~(FCP_LUN_INIT);
12723 mutex_exit(&plun->lun_mutex);
12724 mutex_exit(&pptr->port_mutex);
12725
12726 mutex_enter(&ptgt->tgt_mutex);
12727 (void) fcp_pass_to_hp(pptr, plun, CIP(old_pip),
12728 FCP_OFFLINE, lcount, tcount,
12729 NDI_DEVI_REMOVE, 0);
12730 mutex_exit(&ptgt->tgt_mutex);
12731
12732 if (pip != NULL) {
12733 FCP_TRACE(fcp_logq, pptr->port_instbuf,
12734 fcp_trace, FCP_BUF_LEVEL_2, 0,
12735 "Old pip=%p; New pip=%p don't match",
12736 old_pip, pip);
12737 } else {
12738 FCP_TRACE(fcp_logq, pptr->port_instbuf,
12739 fcp_trace, FCP_BUF_LEVEL_2, 0,
12740 "Old pip=%p; New pip=NULL don't match",
12741 old_pip);
12742 }
12743
12744 mutex_enter(&pptr->port_mutex);
12745 mutex_enter(&plun->lun_mutex);
12746 }
12747
12748 /*
12749 * Since FC_WWN_SIZE is 8 bytes and its not like the
12750 * lun_guid_size which is dependent on the target, I don't
12751 * believe the same trancation happens here UNLESS the standards
12752 * change the FC_WWN_SIZE value to something larger than
12753 * MAXNAMELEN(currently 255 bytes).
12754 */
12755
12756 for (i = 0; i < FC_WWN_SIZE; i++) {
12757 (void) sprintf(&buf[i << 1], "%02x",
12758 ptgt->tgt_port_wwn.raw_wwn[i]);
12759 }
12760
12761 (void) snprintf(uaddr, MAXNAMELEN, "w%s,%x",
12762 buf, plun->lun_num);
12763
12764 if (pip == NULL || plun->lun_state & FCP_LUN_CHANGED) {
12765 /*
12766 * Release the locks before calling into
12767 * mdi_pi_alloc_compatible() since this can result in a
12768 * callback into fcp which can result in a deadlock
12769 * (see bug # 4870272).
12770 *
12771 * Basically, what we are trying to avoid is the scenario where
12772 * one thread does ndi_devi_enter() and tries to grab
12773 * fcp_mutex and another does it the other way round.
12774 *
12775 * But before we do that, make sure that nobody releases the
12776 * port in the meantime. We can do this by setting a flag.
12777 */
12778 plun->lun_state &= ~(FCP_LUN_CHANGED);
12779 pptr->port_state |= FCP_STATE_IN_MDI;
12780 mutex_exit(&plun->lun_mutex);
12781 mutex_exit(&pptr->port_mutex);
12782 if (mdi_pi_alloc_compatible(pdip, nname, plun->lun_guid,
12783 uaddr, compatible, ncompatible, 0, &pip) != MDI_SUCCESS) {
12784 fcp_log(CE_WARN, pptr->port_dip,
12785 "!path alloc failed:0x%x", plun);
12786 mutex_enter(&pptr->port_mutex);
12787 mutex_enter(&plun->lun_mutex);
12788 pptr->port_state &= ~FCP_STATE_IN_MDI;
12789 failure++;
12790 goto end_of_fcp_create_pip;
12791 }
12792 mutex_enter(&pptr->port_mutex);
12793 mutex_enter(&plun->lun_mutex);
12794 pptr->port_state &= ~FCP_STATE_IN_MDI;
12795 } else {
12796 (void) mdi_prop_remove(pip, NULL);
12797 }
12798
12799 mdi_pi_set_phci_private(pip, (caddr_t)plun);
12800
12801 if (mdi_prop_update_byte_array(pip, NODE_WWN_PROP,
12802 ptgt->tgt_node_wwn.raw_wwn, FC_WWN_SIZE)
12803 != DDI_PROP_SUCCESS) {
12804 failure++;
12805 goto end_of_fcp_create_pip;
12806 }
12807
12808 if (mdi_prop_update_byte_array(pip, PORT_WWN_PROP,
12809 ptgt->tgt_port_wwn.raw_wwn, FC_WWN_SIZE)
12810 != DDI_PROP_SUCCESS) {
12811 failure++;
12812 goto end_of_fcp_create_pip;
12813 }
12814
12815 fcp_wwn_to_ascii(ptgt->tgt_port_wwn.raw_wwn, t_pwwn);
12816 t_pwwn[16] = '\0';
12817 if (mdi_prop_update_string(pip, TGT_PORT_PROP, t_pwwn)
12818 != DDI_PROP_SUCCESS) {
12819 failure++;
12820 goto end_of_fcp_create_pip;
12821 }
12822
12823 /*
12824 * If there is no hard address - We might have to deal with
12825 * that by using WWN - Having said that it is important to
12826 * recognize this problem early so ssd can be informed of
12827 * the right interconnect type.
12828 */
12829 if (!FC_TOP_EXTERNAL(pptr->port_topology) &&
12830 ptgt->tgt_hard_addr != 0) {
12831 tgt_id = (uint32_t)
12832 fcp_alpa_to_switch[ptgt->tgt_hard_addr];
12833 } else {
12834 tgt_id = ptgt->tgt_d_id;
12835 }
12836
12837 if (mdi_prop_update_int(pip, TARGET_PROP, tgt_id)
12838 != DDI_PROP_SUCCESS) {
12839 failure++;
12840 goto end_of_fcp_create_pip;
12841 }
12842
12843 if (mdi_prop_update_int(pip, LUN_PROP, (int)plun->lun_num)
12844 != DDI_PROP_SUCCESS) {
12845 failure++;
12846 goto end_of_fcp_create_pip;
12847 }
12848 bcopy(&plun->lun_addr, &sam_lun, FCP_LUN_SIZE);
12849 if (mdi_prop_update_int64(pip, SAM_LUN_PROP, sam_lun)
12850 != DDI_PROP_SUCCESS) {
12851 failure++;
12852 goto end_of_fcp_create_pip;
12853 }
12854
12855 end_of_fcp_create_pip:
12856 scsi_hba_nodename_compatible_free(nname, compatible);
12857
12858 if (pip != NULL && failure) {
12859 (void) mdi_prop_remove(pip, NULL);
12860 mutex_exit(&plun->lun_mutex);
12861 mutex_exit(&pptr->port_mutex);
12862 (void) mdi_pi_free(pip, 0);
12863 mutex_enter(&pptr->port_mutex);
12864 mutex_enter(&plun->lun_mutex);
12865 pip = NULL;
12866 }
12867
12868 return (pip);
12869 }
12870
12871 static dev_info_t *
12872 fcp_find_existing_dip(struct fcp_lun *plun, dev_info_t *pdip, caddr_t name)
12873 {
12874 uint_t nbytes;
12875 uchar_t *bytes;
12876 uint_t nwords;
12877 uint32_t tgt_id;
12878 int *words;
12879 dev_info_t *cdip;
12880 dev_info_t *ndip;
12881 struct fcp_tgt *ptgt = plun->lun_tgt;
12882 struct fcp_port *pptr = ptgt->tgt_port;
12883 int circular;
12884
12885 ndi_devi_enter(pdip, &circular);
12886
12887 ndip = (dev_info_t *)DEVI(pdip)->devi_child;
12888 while ((cdip = ndip) != NULL) {
12889 ndip = (dev_info_t *)DEVI(cdip)->devi_sibling;
12890
12891 if (strcmp(DEVI(cdip)->devi_node_name, name)) {
12892 continue;
12893 }
12894
12895 if (ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, cdip,
12896 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, NODE_WWN_PROP, &bytes,
12897 &nbytes) != DDI_PROP_SUCCESS) {
12898 continue;
12899 }
12900
12901 if (nbytes != FC_WWN_SIZE || bytes == NULL) {
12902 if (bytes != NULL) {
12903 ddi_prop_free(bytes);
12904 }
12905 continue;
12906 }
12907 ASSERT(bytes != NULL);
12908
12909 if (bcmp(bytes, ptgt->tgt_node_wwn.raw_wwn, nbytes) != 0) {
12910 ddi_prop_free(bytes);
12911 continue;
12912 }
12913
12914 ddi_prop_free(bytes);
12915
12916 if (ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, cdip,
12917 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, PORT_WWN_PROP, &bytes,
12918 &nbytes) != DDI_PROP_SUCCESS) {
12919 continue;
12920 }
12921
12922 if (nbytes != FC_WWN_SIZE || bytes == NULL) {
12923 if (bytes != NULL) {
12924 ddi_prop_free(bytes);
12925 }
12926 continue;
12927 }
12928 ASSERT(bytes != NULL);
12929
12930 if (bcmp(bytes, ptgt->tgt_port_wwn.raw_wwn, nbytes) != 0) {
12931 ddi_prop_free(bytes);
12932 continue;
12933 }
12934
12935 ddi_prop_free(bytes);
12936
12937 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, cdip,
12938 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, TARGET_PROP, &words,
12939 &nwords) != DDI_PROP_SUCCESS) {
12940 continue;
12941 }
12942
12943 if (nwords != 1 || words == NULL) {
12944 if (words != NULL) {
12945 ddi_prop_free(words);
12946 }
12947 continue;
12948 }
12949 ASSERT(words != NULL);
12950
12951 /*
12952 * If there is no hard address - We might have to deal with
12953 * that by using WWN - Having said that it is important to
12954 * recognize this problem early so ssd can be informed of
12955 * the right interconnect type.
12956 */
12957 if (!FC_TOP_EXTERNAL(pptr->port_topology) &&
12958 ptgt->tgt_hard_addr != 0) {
12959 tgt_id =
12960 (uint32_t)fcp_alpa_to_switch[ptgt->tgt_hard_addr];
12961 } else {
12962 tgt_id = ptgt->tgt_d_id;
12963 }
12964
12965 if (tgt_id != (uint32_t)*words) {
12966 ddi_prop_free(words);
12967 continue;
12968 }
12969 ddi_prop_free(words);
12970
12971 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, cdip,
12972 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, LUN_PROP, &words,
12973 &nwords) != DDI_PROP_SUCCESS) {
12974 continue;
12975 }
12976
12977 if (nwords != 1 || words == NULL) {
12978 if (words != NULL) {
12979 ddi_prop_free(words);
12980 }
12981 continue;
12982 }
12983 ASSERT(words != NULL);
12984
12985 if (plun->lun_num == (uint16_t)*words) {
12986 ddi_prop_free(words);
12987 break;
12988 }
12989 ddi_prop_free(words);
12990 }
12991 ndi_devi_exit(pdip, circular);
12992
12993 return (cdip);
12994 }
12995
12996
12997 static int
12998 fcp_is_pip_present(struct fcp_lun *plun, mdi_pathinfo_t *pip)
12999 {
13000 dev_info_t *pdip;
13001 char buf[MAXNAMELEN];
13002 char uaddr[MAXNAMELEN];
13003 int rval = FC_FAILURE;
13004
13005 ASSERT(MUTEX_HELD(&plun->lun_mutex));
13006
13007 pdip = plun->lun_tgt->tgt_port->port_dip;
13008
13009 /*
13010 * Check if pip (and not plun->lun_cip) is NULL. plun->lun_cip can be
13011 * non-NULL even when the LUN is not there as in the case when a LUN is
13012 * configured and then deleted on the device end (for T3/T4 case). In
13013 * such cases, pip will be NULL.
13014 *
13015 * If the device generates an RSCN, it will end up getting offlined when
13016 * it disappeared and a new LUN will get created when it is rediscovered
13017 * on the device. If we check for lun_cip here, the LUN will not end
13018 * up getting onlined since this function will end up returning a
13019 * FC_SUCCESS.
13020 *
13021 * The behavior is different on other devices. For instance, on a HDS,
13022 * there was no RSCN generated by the device but the next I/O generated
13023 * a check condition and rediscovery got triggered that way. So, in
13024 * such cases, this path will not be exercised
13025 */
13026 if (pip == NULL) {
13027 FCP_TRACE(fcp_logq, LUN_PORT->port_instbuf,
13028 fcp_trace, FCP_BUF_LEVEL_4, 0,
13029 "fcp_is_pip_present: plun->lun_cip is NULL: "
13030 "plun: %p lun state: %x num: %d target state: %x",
13031 plun, plun->lun_state, plun->lun_num,
13032 plun->lun_tgt->tgt_port->port_state);
13033 return (rval);
13034 }
13035
13036 fcp_wwn_to_ascii(plun->lun_tgt->tgt_port_wwn.raw_wwn, buf);
13037
13038 (void) snprintf(uaddr, MAXNAMELEN, "w%s,%x", buf, plun->lun_num);
13039
13040 if (mdi_pi_find(pdip, NULL, uaddr) == pip) {
13041 rval = FC_SUCCESS;
13042 }
13043
13044 return (rval);
13045 }
13046
13047 static mdi_pathinfo_t *
13048 fcp_find_existing_pip(struct fcp_lun *plun, dev_info_t *pdip)
13049 {
13050 char buf[MAXNAMELEN];
13051 char uaddr[MAXNAMELEN];
13052 mdi_pathinfo_t *pip;
13053 struct fcp_tgt *ptgt = plun->lun_tgt;
13054 struct fcp_port *pptr = ptgt->tgt_port;
13055
13056 ASSERT(MUTEX_HELD(&pptr->port_mutex));
13057
13058 fcp_wwn_to_ascii(ptgt->tgt_port_wwn.raw_wwn, buf);
13059 (void) snprintf(uaddr, MAXNAMELEN, "w%s,%x", buf, plun->lun_num);
13060
13061 pip = mdi_pi_find(pdip, plun->lun_guid, uaddr);
13062
13063 return (pip);
13064 }
13065
13066
13067 static int
13068 fcp_online_child(struct fcp_lun *plun, child_info_t *cip, int lcount,
13069 int tcount, int flags, int *circ)
13070 {
13071 int rval;
13072 struct fcp_port *pptr = plun->lun_tgt->tgt_port;
13073 struct fcp_tgt *ptgt = plun->lun_tgt;
13074 dev_info_t *cdip = NULL;
13075
13076 ASSERT(MUTEX_HELD(&pptr->port_mutex));
13077 ASSERT(MUTEX_HELD(&plun->lun_mutex));
13078
13079 if (plun->lun_cip == NULL) {
13080 FCP_TRACE(fcp_logq, pptr->port_instbuf,
13081 fcp_trace, FCP_BUF_LEVEL_3, 0,
13082 "fcp_online_child: plun->lun_cip is NULL: "
13083 "plun: %p state: %x num: %d target state: %x",
13084 plun, plun->lun_state, plun->lun_num,
13085 plun->lun_tgt->tgt_port->port_state);
13086 return (NDI_FAILURE);
13087 }
13088 again:
13089 if (plun->lun_mpxio == 0) {
13090 cdip = DIP(cip);
13091 mutex_exit(&plun->lun_mutex);
13092 mutex_exit(&pptr->port_mutex);
13093
13094 FCP_TRACE(fcp_logq, pptr->port_instbuf,
13095 fcp_trace, FCP_BUF_LEVEL_3, 0,
13096 "!Invoking ndi_devi_online for %s: target=%x lun=%x",
13097 ddi_get_name(cdip), ptgt->tgt_d_id, plun->lun_num);
13098
13099 /*
13100 * We could check for FCP_LUN_INIT here but chances
13101 * of getting here when it's already in FCP_LUN_INIT
13102 * is rare and a duplicate ndi_devi_online wouldn't
13103 * hurt either (as the node would already have been
13104 * in CF2)
13105 */
13106 if (!i_ddi_devi_attached(ddi_get_parent(cdip))) {
13107 rval = ndi_devi_bind_driver(cdip, flags);
13108 FCP_TRACE(fcp_logq, pptr->port_instbuf,
13109 fcp_trace, FCP_BUF_LEVEL_3, 0,
13110 "!Invoking ndi_devi_bind_driver: rval=%d", rval);
13111 } else {
13112 rval = ndi_devi_online(cdip, flags);
13113 }
13114
13115 /*
13116 * We log the message into trace buffer if the device
13117 * is "ses" and into syslog for any other device
13118 * type. This is to prevent the ndi_devi_online failure
13119 * message that appears for V880/A5K ses devices.
13120 */
13121 if (rval == NDI_SUCCESS) {
13122 mutex_enter(&ptgt->tgt_mutex);
13123 plun->lun_state |= FCP_LUN_INIT;
13124 mutex_exit(&ptgt->tgt_mutex);
13125 } else if (strncmp(ddi_node_name(cdip), "ses", 3) != 0) {
13126 fcp_log(CE_NOTE, pptr->port_dip,
13127 "!ndi_devi_online:"
13128 " failed for %s: target=%x lun=%x %x",
13129 ddi_get_name(cdip), ptgt->tgt_d_id,
13130 plun->lun_num, rval);
13131 } else {
13132 FCP_TRACE(fcp_logq, pptr->port_instbuf,
13133 fcp_trace, FCP_BUF_LEVEL_3, 0,
13134 " !ndi_devi_online:"
13135 " failed for %s: target=%x lun=%x %x",
13136 ddi_get_name(cdip), ptgt->tgt_d_id,
13137 plun->lun_num, rval);
13138 }
13139 } else {
13140 cdip = mdi_pi_get_client(PIP(cip));
13141 mutex_exit(&plun->lun_mutex);
13142 mutex_exit(&pptr->port_mutex);
13143
13144 FCP_TRACE(fcp_logq, pptr->port_instbuf,
13145 fcp_trace, FCP_BUF_LEVEL_3, 0,
13146 "!Invoking mdi_pi_online for %s: target=%x lun=%x",
13147 ddi_get_name(cdip), ptgt->tgt_d_id, plun->lun_num);
13148
13149 /*
13150 * Hold path and exit phci to avoid deadlock with power
13151 * management code during mdi_pi_online.
13152 */
13153 mdi_hold_path(PIP(cip));
13154 mdi_devi_exit_phci(pptr->port_dip, *circ);
13155
13156 rval = mdi_pi_online(PIP(cip), flags);
13157
13158 mdi_devi_enter_phci(pptr->port_dip, circ);
13159 mdi_rele_path(PIP(cip));
13160
13161 if (rval == MDI_SUCCESS) {
13162 mutex_enter(&ptgt->tgt_mutex);
13163 plun->lun_state |= FCP_LUN_INIT;
13164 mutex_exit(&ptgt->tgt_mutex);
13165
13166 /*
13167 * Clear MPxIO path permanent disable in case
13168 * fcp hotplug dropped the offline event.
13169 */
13170 (void) mdi_pi_enable_path(PIP(cip), DRIVER_DISABLE);
13171
13172 } else if (rval == MDI_NOT_SUPPORTED) {
13173 child_info_t *old_cip = cip;
13174
13175 /*
13176 * MPxIO does not support this device yet.
13177 * Enumerate in legacy mode.
13178 */
13179 mutex_enter(&pptr->port_mutex);
13180 mutex_enter(&plun->lun_mutex);
13181 plun->lun_mpxio = 0;
13182 plun->lun_cip = NULL;
13183 cdip = fcp_create_dip(plun, lcount, tcount);
13184 plun->lun_cip = cip = CIP(cdip);
13185 if (cip == NULL) {
13186 fcp_log(CE_WARN, pptr->port_dip,
13187 "!fcp_online_child: "
13188 "Create devinfo failed for LU=%p", plun);
13189 mutex_exit(&plun->lun_mutex);
13190
13191 mutex_enter(&ptgt->tgt_mutex);
13192 plun->lun_state |= FCP_LUN_OFFLINE;
13193 mutex_exit(&ptgt->tgt_mutex);
13194
13195 mutex_exit(&pptr->port_mutex);
13196
13197 /*
13198 * free the mdi_pathinfo node
13199 */
13200 (void) mdi_pi_free(PIP(old_cip), 0);
13201 } else {
13202 FCP_TRACE(fcp_logq, pptr->port_instbuf,
13203 fcp_trace, FCP_BUF_LEVEL_3, 0,
13204 "fcp_online_child: creating devinfo "
13205 "node 0x%p for plun 0x%p",
13206 cip, plun);
13207 mutex_exit(&plun->lun_mutex);
13208 mutex_exit(&pptr->port_mutex);
13209 /*
13210 * free the mdi_pathinfo node
13211 */
13212 (void) mdi_pi_free(PIP(old_cip), 0);
13213 mutex_enter(&pptr->port_mutex);
13214 mutex_enter(&plun->lun_mutex);
13215 goto again;
13216 }
13217 } else {
13218 if (cdip) {
13219 fcp_log(CE_NOTE, pptr->port_dip,
13220 "!fcp_online_child: mdi_pi_online:"
13221 " failed for %s: target=%x lun=%x %x",
13222 ddi_get_name(cdip), ptgt->tgt_d_id,
13223 plun->lun_num, rval);
13224 }
13225 }
13226 rval = (rval == MDI_SUCCESS) ? NDI_SUCCESS : NDI_FAILURE;
13227 }
13228
13229 if (rval == NDI_SUCCESS) {
13230 if (cdip) {
13231 (void) ndi_event_retrieve_cookie(
13232 pptr->port_ndi_event_hdl, cdip, FCAL_INSERT_EVENT,
13233 &fcp_insert_eid, NDI_EVENT_NOPASS);
13234 (void) ndi_event_run_callbacks(pptr->port_ndi_event_hdl,
13235 cdip, fcp_insert_eid, NULL);
13236 }
13237 }
13238 mutex_enter(&pptr->port_mutex);
13239 mutex_enter(&plun->lun_mutex);
13240 return (rval);
13241 }
13242
13243 /* ARGSUSED */
13244 static int
13245 fcp_offline_child(struct fcp_lun *plun, child_info_t *cip, int lcount,
13246 int tcount, int flags, int *circ)
13247 {
13248 int rval;
13249 int lun_mpxio;
13250 struct fcp_port *pptr = plun->lun_tgt->tgt_port;
13251 struct fcp_tgt *ptgt = plun->lun_tgt;
13252 dev_info_t *cdip;
13253
13254 ASSERT(MUTEX_HELD(&plun->lun_mutex));
13255 ASSERT(MUTEX_HELD(&pptr->port_mutex));
13256
13257 if (plun->lun_cip == NULL) {
13258 FCP_TRACE(fcp_logq, pptr->port_instbuf,
13259 fcp_trace, FCP_BUF_LEVEL_3, 0,
13260 "fcp_offline_child: plun->lun_cip is NULL: "
13261 "plun: %p lun state: %x num: %d target state: %x",
13262 plun, plun->lun_state, plun->lun_num,
13263 plun->lun_tgt->tgt_port->port_state);
13264 return (NDI_FAILURE);
13265 }
13266
13267 /*
13268 * We will use this value twice. Make a copy to be sure we use
13269 * the same value in both places.
13270 */
13271 lun_mpxio = plun->lun_mpxio;
13272
13273 if (lun_mpxio == 0) {
13274 cdip = DIP(cip);
13275 mutex_exit(&plun->lun_mutex);
13276 mutex_exit(&pptr->port_mutex);
13277 rval = ndi_devi_offline(DIP(cip), NDI_DEVFS_CLEAN | flags);
13278 if (rval != NDI_SUCCESS) {
13279 FCP_TRACE(fcp_logq, pptr->port_instbuf,
13280 fcp_trace, FCP_BUF_LEVEL_3, 0,
13281 "fcp_offline_child: ndi_devi_offline failed "
13282 "rval=%x cip=%p", rval, cip);
13283 }
13284 } else {
13285 cdip = mdi_pi_get_client(PIP(cip));
13286 mutex_exit(&plun->lun_mutex);
13287 mutex_exit(&pptr->port_mutex);
13288
13289 /*
13290 * Exit phci to avoid deadlock with power management code
13291 * during mdi_pi_offline
13292 */
13293 mdi_hold_path(PIP(cip));
13294 mdi_devi_exit_phci(pptr->port_dip, *circ);
13295
13296 rval = mdi_pi_offline(PIP(cip), flags & ~NDI_DEVI_REMOVE);
13297
13298 mdi_devi_enter_phci(pptr->port_dip, circ);
13299 mdi_rele_path(PIP(cip));
13300
13301 rval = (rval == MDI_SUCCESS) ? NDI_SUCCESS : NDI_FAILURE;
13302 }
13303
13304 mutex_enter(&ptgt->tgt_mutex);
13305 plun->lun_state &= ~FCP_LUN_INIT;
13306 mutex_exit(&ptgt->tgt_mutex);
13307
13308 if (rval == NDI_SUCCESS) {
13309 cdip = NULL;
13310 if (flags & NDI_DEVI_REMOVE) {
13311 mutex_enter(&plun->lun_mutex);
13312 /*
13313 * If the guid of the LUN changes, lun_cip will not
13314 * equal to cip, and after offlining the LUN with the
13315 * old guid, we should keep lun_cip since it's the cip
13316 * of the LUN with the new guid.
13317 * Otherwise remove our reference to child node.
13318 *
13319 * This must be done before the child node is freed,
13320 * otherwise other threads could see a stale lun_cip
13321 * pointer.
13322 */
13323 if (plun->lun_cip == cip) {
13324 plun->lun_cip = NULL;
13325 }
13326 if (plun->lun_old_guid) {
13327 kmem_free(plun->lun_old_guid,
13328 plun->lun_old_guid_size);
13329 plun->lun_old_guid = NULL;
13330 plun->lun_old_guid_size = 0;
13331 }
13332 mutex_exit(&plun->lun_mutex);
13333 }
13334 }
13335
13336 if (lun_mpxio != 0) {
13337 if (rval == NDI_SUCCESS) {
13338 /*
13339 * Clear MPxIO path permanent disable as the path is
13340 * already offlined.
13341 */
13342 (void) mdi_pi_enable_path(PIP(cip), DRIVER_DISABLE);
13343
13344 if (flags & NDI_DEVI_REMOVE) {
13345 (void) mdi_pi_free(PIP(cip), 0);
13346 }
13347 } else {
13348 FCP_TRACE(fcp_logq, pptr->port_instbuf,
13349 fcp_trace, FCP_BUF_LEVEL_3, 0,
13350 "fcp_offline_child: mdi_pi_offline failed "
13351 "rval=%x cip=%p", rval, cip);
13352 }
13353 }
13354
13355 mutex_enter(&pptr->port_mutex);
13356 mutex_enter(&plun->lun_mutex);
13357
13358 if (cdip) {
13359 FCP_TRACE(fcp_logq, pptr->port_instbuf,
13360 fcp_trace, FCP_BUF_LEVEL_3, 0, "!%s failed for %s:"
13361 " target=%x lun=%x", "ndi_offline",
13362 ddi_get_name(cdip), ptgt->tgt_d_id, plun->lun_num);
13363 }
13364
13365 return (rval);
13366 }
13367
13368 static void
13369 fcp_remove_child(struct fcp_lun *plun)
13370 {
13371 child_info_t *cip;
13372 int circ;
13373
13374 ASSERT(MUTEX_HELD(&plun->lun_mutex));
13375
13376 if (fcp_is_child_present(plun, plun->lun_cip) == FC_SUCCESS) {
13377 if (plun->lun_mpxio == 0) {
13378 (void) ndi_prop_remove_all(DIP(plun->lun_cip));
13379 (void) ndi_devi_free(DIP(plun->lun_cip));
13380 plun->lun_cip = NULL;
13381 } else {
13382 /*
13383 * Clear reference to the child node in the lun.
13384 * This must be done before freeing it with mdi_pi_free
13385 * and with lun_mutex held so that other threads always
13386 * see either valid lun_cip or NULL when holding
13387 * lun_mutex. We keep a copy in cip.
13388 */
13389 cip = plun->lun_cip;
13390 plun->lun_cip = NULL;
13391
13392 mutex_exit(&plun->lun_mutex);
13393 mutex_exit(&plun->lun_tgt->tgt_mutex);
13394 mutex_exit(&plun->lun_tgt->tgt_port->port_mutex);
13395
13396 mdi_devi_enter(plun->lun_tgt->tgt_port->port_dip,
13397 &circ);
13398
13399 /*
13400 * Exit phci to avoid deadlock with power management
13401 * code during mdi_pi_offline
13402 */
13403 mdi_hold_path(PIP(cip));
13404 mdi_devi_exit_phci(plun->lun_tgt->tgt_port->port_dip,
13405 circ);
13406 (void) mdi_pi_offline(PIP(cip), 0);
13407 mdi_devi_enter_phci(plun->lun_tgt->tgt_port->port_dip,
13408 &circ);
13409 mdi_rele_path(PIP(cip));
13410
13411 mdi_devi_exit(plun->lun_tgt->tgt_port->port_dip, circ);
13412
13413 FCP_TRACE(fcp_logq,
13414 plun->lun_tgt->tgt_port->port_instbuf,
13415 fcp_trace, FCP_BUF_LEVEL_3, 0,
13416 "lun=%p pip freed %p", plun, cip);
13417
13418 (void) mdi_prop_remove(PIP(cip), NULL);
13419 (void) mdi_pi_free(PIP(cip), 0);
13420
13421 mutex_enter(&plun->lun_tgt->tgt_port->port_mutex);
13422 mutex_enter(&plun->lun_tgt->tgt_mutex);
13423 mutex_enter(&plun->lun_mutex);
13424 }
13425 } else {
13426 plun->lun_cip = NULL;
13427 }
13428 }
13429
13430 /*
13431 * called when a timeout occurs
13432 *
13433 * can be scheduled during an attach or resume (if not already running)
13434 *
13435 * one timeout is set up for all ports
13436 *
13437 * acquires and releases the global mutex
13438 */
13439 /*ARGSUSED*/
13440 static void
13441 fcp_watch(void *arg)
13442 {
13443 struct fcp_port *pptr;
13444 struct fcp_ipkt *icmd;
13445 struct fcp_ipkt *nicmd;
13446 struct fcp_pkt *cmd;
13447 struct fcp_pkt *ncmd;
13448 struct fcp_pkt *tail;
13449 struct fcp_pkt *pcmd;
13450 struct fcp_pkt *save_head;
13451 struct fcp_port *save_port;
13452
13453 /* increment global watchdog time */
13454 fcp_watchdog_time += fcp_watchdog_timeout;
13455
13456 mutex_enter(&fcp_global_mutex);
13457
13458 /* scan each port in our list */
13459 for (pptr = fcp_port_head; pptr != NULL; pptr = pptr->port_next) {
13460 save_port = fcp_port_head;
13461 pptr->port_state |= FCP_STATE_IN_WATCHDOG;
13462 mutex_exit(&fcp_global_mutex);
13463
13464 mutex_enter(&pptr->port_mutex);
13465 if (pptr->port_ipkt_list == NULL &&
13466 (pptr->port_state & (FCP_STATE_SUSPENDED |
13467 FCP_STATE_DETACHING | FCP_STATE_POWER_DOWN))) {
13468 pptr->port_state &= ~FCP_STATE_IN_WATCHDOG;
13469 mutex_exit(&pptr->port_mutex);
13470 mutex_enter(&fcp_global_mutex);
13471 goto end_of_watchdog;
13472 }
13473
13474 /*
13475 * We check if a list of targets need to be offlined.
13476 */
13477 if (pptr->port_offline_tgts) {
13478 fcp_scan_offline_tgts(pptr);
13479 }
13480
13481 /*
13482 * We check if a list of luns need to be offlined.
13483 */
13484 if (pptr->port_offline_luns) {
13485 fcp_scan_offline_luns(pptr);
13486 }
13487
13488 /*
13489 * We check if a list of targets or luns need to be reset.
13490 */
13491 if (pptr->port_reset_list) {
13492 fcp_check_reset_delay(pptr);
13493 }
13494
13495 mutex_exit(&pptr->port_mutex);
13496
13497 /*
13498 * This is where the pending commands (pkt) are checked for
13499 * timeout.
13500 */
13501 mutex_enter(&pptr->port_pkt_mutex);
13502 tail = pptr->port_pkt_tail;
13503
13504 for (pcmd = NULL, cmd = pptr->port_pkt_head;
13505 cmd != NULL; cmd = ncmd) {
13506 ncmd = cmd->cmd_next;
13507 /*
13508 * If a command is in this queue the bit CFLAG_IN_QUEUE
13509 * must be set.
13510 */
13511 ASSERT(cmd->cmd_flags & CFLAG_IN_QUEUE);
13512 /*
13513 * FCP_INVALID_TIMEOUT will be set for those
13514 * command that need to be failed. Mostly those
13515 * cmds that could not be queued down for the
13516 * "timeout" value. cmd->cmd_timeout is used
13517 * to try and requeue the command regularly.
13518 */
13519 if (cmd->cmd_timeout >= fcp_watchdog_time) {
13520 /*
13521 * This command hasn't timed out yet. Let's
13522 * go to the next one.
13523 */
13524 pcmd = cmd;
13525 goto end_of_loop;
13526 }
13527
13528 if (cmd == pptr->port_pkt_head) {
13529 ASSERT(pcmd == NULL);
13530 pptr->port_pkt_head = cmd->cmd_next;
13531 } else {
13532 ASSERT(pcmd != NULL);
13533 pcmd->cmd_next = cmd->cmd_next;
13534 }
13535
13536 if (cmd == pptr->port_pkt_tail) {
13537 ASSERT(cmd->cmd_next == NULL);
13538 pptr->port_pkt_tail = pcmd;
13539 if (pcmd) {
13540 pcmd->cmd_next = NULL;
13541 }
13542 }
13543 cmd->cmd_next = NULL;
13544
13545 /*
13546 * save the current head before dropping the
13547 * mutex - If the head doesn't remain the
13548 * same after re acquiring the mutex, just
13549 * bail out and revisit on next tick.
13550 *
13551 * PS: The tail pointer can change as the commands
13552 * get requeued after failure to retransport
13553 */
13554 save_head = pptr->port_pkt_head;
13555 mutex_exit(&pptr->port_pkt_mutex);
13556
13557 if (cmd->cmd_fp_pkt->pkt_timeout ==
13558 FCP_INVALID_TIMEOUT) {
13559 struct scsi_pkt *pkt = cmd->cmd_pkt;
13560 struct fcp_lun *plun;
13561 struct fcp_tgt *ptgt;
13562
13563 plun = ADDR2LUN(&pkt->pkt_address);
13564 ptgt = plun->lun_tgt;
13565
13566 FCP_TRACE(fcp_logq, pptr->port_instbuf,
13567 fcp_trace, FCP_BUF_LEVEL_2, 0,
13568 "SCSI cmd 0x%x to D_ID=%x timed out",
13569 pkt->pkt_cdbp[0], ptgt->tgt_d_id);
13570
13571 cmd->cmd_state == FCP_PKT_ABORTING ?
13572 fcp_fail_cmd(cmd, CMD_RESET,
13573 STAT_DEV_RESET) : fcp_fail_cmd(cmd,
13574 CMD_TIMEOUT, STAT_ABORTED);
13575 } else {
13576 fcp_retransport_cmd(pptr, cmd);
13577 }
13578 mutex_enter(&pptr->port_pkt_mutex);
13579 if (save_head && save_head != pptr->port_pkt_head) {
13580 /*
13581 * Looks like linked list got changed (mostly
13582 * happens when an an OFFLINE LUN code starts
13583 * returning overflow queue commands in
13584 * parallel. So bail out and revisit during
13585 * next tick
13586 */
13587 break;
13588 }
13589 end_of_loop:
13590 /*
13591 * Scan only upto the previously known tail pointer
13592 * to avoid excessive processing - lots of new packets
13593 * could have been added to the tail or the old ones
13594 * re-queued.
13595 */
13596 if (cmd == tail) {
13597 break;
13598 }
13599 }
13600 mutex_exit(&pptr->port_pkt_mutex);
13601
13602 mutex_enter(&pptr->port_mutex);
13603 for (icmd = pptr->port_ipkt_list; icmd != NULL; icmd = nicmd) {
13604 struct fcp_tgt *ptgt = icmd->ipkt_tgt;
13605
13606 nicmd = icmd->ipkt_next;
13607 if ((icmd->ipkt_restart != 0) &&
13608 (icmd->ipkt_restart >= fcp_watchdog_time)) {
13609 /* packet has not timed out */
13610 continue;
13611 }
13612
13613 /* time for packet re-transport */
13614 if (icmd == pptr->port_ipkt_list) {
13615 pptr->port_ipkt_list = icmd->ipkt_next;
13616 if (pptr->port_ipkt_list) {
13617 pptr->port_ipkt_list->ipkt_prev =
13618 NULL;
13619 }
13620 } else {
13621 icmd->ipkt_prev->ipkt_next = icmd->ipkt_next;
13622 if (icmd->ipkt_next) {
13623 icmd->ipkt_next->ipkt_prev =
13624 icmd->ipkt_prev;
13625 }
13626 }
13627 icmd->ipkt_next = NULL;
13628 icmd->ipkt_prev = NULL;
13629 mutex_exit(&pptr->port_mutex);
13630
13631 if (fcp_is_retryable(icmd)) {
13632 fc_ulp_rscn_info_t *rscnp =
13633 (fc_ulp_rscn_info_t *)icmd->ipkt_fpkt->
13634 pkt_ulp_rscn_infop;
13635
13636 FCP_TRACE(fcp_logq, pptr->port_instbuf,
13637 fcp_trace, FCP_BUF_LEVEL_2, 0,
13638 "%x to D_ID=%x Retrying..",
13639 icmd->ipkt_opcode,
13640 icmd->ipkt_fpkt->pkt_cmd_fhdr.d_id);
13641
13642 /*
13643 * Update the RSCN count in the packet
13644 * before resending.
13645 */
13646
13647 if (rscnp != NULL) {
13648 rscnp->ulp_rscn_count =
13649 fc_ulp_get_rscn_count(pptr->
13650 port_fp_handle);
13651 }
13652
13653 mutex_enter(&pptr->port_mutex);
13654 mutex_enter(&ptgt->tgt_mutex);
13655 if (!FCP_STATE_CHANGED(pptr, ptgt, icmd)) {
13656 mutex_exit(&ptgt->tgt_mutex);
13657 mutex_exit(&pptr->port_mutex);
13658 switch (icmd->ipkt_opcode) {
13659 int rval;
13660 case LA_ELS_PLOGI:
13661 if ((rval = fc_ulp_login(
13662 pptr->port_fp_handle,
13663 &icmd->ipkt_fpkt, 1)) ==
13664 FC_SUCCESS) {
13665 mutex_enter(
13666 &pptr->port_mutex);
13667 continue;
13668 }
13669 if (fcp_handle_ipkt_errors(
13670 pptr, ptgt, icmd, rval,
13671 "PLOGI") == DDI_SUCCESS) {
13672 mutex_enter(
13673 &pptr->port_mutex);
13674 continue;
13675 }
13676 break;
13677
13678 case LA_ELS_PRLI:
13679 if ((rval = fc_ulp_issue_els(
13680 pptr->port_fp_handle,
13681 icmd->ipkt_fpkt)) ==
13682 FC_SUCCESS) {
13683 mutex_enter(
13684 &pptr->port_mutex);
13685 continue;
13686 }
13687 if (fcp_handle_ipkt_errors(
13688 pptr, ptgt, icmd, rval,
13689 "PRLI") == DDI_SUCCESS) {
13690 mutex_enter(
13691 &pptr->port_mutex);
13692 continue;
13693 }
13694 break;
13695
13696 default:
13697 if ((rval = fcp_transport(
13698 pptr->port_fp_handle,
13699 icmd->ipkt_fpkt, 1)) ==
13700 FC_SUCCESS) {
13701 mutex_enter(
13702 &pptr->port_mutex);
13703 continue;
13704 }
13705 if (fcp_handle_ipkt_errors(
13706 pptr, ptgt, icmd, rval,
13707 "PRLI") == DDI_SUCCESS) {
13708 mutex_enter(
13709 &pptr->port_mutex);
13710 continue;
13711 }
13712 break;
13713 }
13714 } else {
13715 mutex_exit(&ptgt->tgt_mutex);
13716 mutex_exit(&pptr->port_mutex);
13717 }
13718 } else {
13719 fcp_print_error(icmd->ipkt_fpkt);
13720 }
13721
13722 (void) fcp_call_finish_init(pptr, ptgt,
13723 icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
13724 icmd->ipkt_cause);
13725 fcp_icmd_free(pptr, icmd);
13726 mutex_enter(&pptr->port_mutex);
13727 }
13728
13729 pptr->port_state &= ~FCP_STATE_IN_WATCHDOG;
13730 mutex_exit(&pptr->port_mutex);
13731 mutex_enter(&fcp_global_mutex);
13732
13733 end_of_watchdog:
13734 /*
13735 * Bail out early before getting into trouble
13736 */
13737 if (save_port != fcp_port_head) {
13738 break;
13739 }
13740 }
13741
13742 if (fcp_watchdog_init > 0) {
13743 /* reschedule timeout to go again */
13744 fcp_watchdog_id =
13745 timeout(fcp_watch, NULL, fcp_watchdog_tick);
13746 }
13747 mutex_exit(&fcp_global_mutex);
13748 }
13749
13750
13751 static void
13752 fcp_check_reset_delay(struct fcp_port *pptr)
13753 {
13754 uint32_t tgt_cnt;
13755 int level;
13756 struct fcp_tgt *ptgt;
13757 struct fcp_lun *plun;
13758 struct fcp_reset_elem *cur = NULL;
13759 struct fcp_reset_elem *next = NULL;
13760 struct fcp_reset_elem *prev = NULL;
13761
13762 ASSERT(mutex_owned(&pptr->port_mutex));
13763
13764 next = pptr->port_reset_list;
13765 while ((cur = next) != NULL) {
13766 next = cur->next;
13767
13768 if (cur->timeout < fcp_watchdog_time) {
13769 prev = cur;
13770 continue;
13771 }
13772
13773 ptgt = cur->tgt;
13774 plun = cur->lun;
13775 tgt_cnt = cur->tgt_cnt;
13776
13777 if (ptgt) {
13778 level = RESET_TARGET;
13779 } else {
13780 ASSERT(plun != NULL);
13781 level = RESET_LUN;
13782 ptgt = plun->lun_tgt;
13783 }
13784 if (prev) {
13785 prev->next = next;
13786 } else {
13787 /*
13788 * Because we drop port mutex while doing aborts for
13789 * packets, we can't rely on reset_list pointing to
13790 * our head
13791 */
13792 if (cur == pptr->port_reset_list) {
13793 pptr->port_reset_list = next;
13794 } else {
13795 struct fcp_reset_elem *which;
13796
13797 which = pptr->port_reset_list;
13798 while (which && which->next != cur) {
13799 which = which->next;
13800 }
13801 ASSERT(which != NULL);
13802
13803 which->next = next;
13804 prev = which;
13805 }
13806 }
13807
13808 kmem_free(cur, sizeof (*cur));
13809
13810 if (tgt_cnt == ptgt->tgt_change_cnt) {
13811 mutex_enter(&ptgt->tgt_mutex);
13812 if (level == RESET_TARGET) {
13813 fcp_update_tgt_state(ptgt,
13814 FCP_RESET, FCP_LUN_BUSY);
13815 } else {
13816 fcp_update_lun_state(plun,
13817 FCP_RESET, FCP_LUN_BUSY);
13818 }
13819 mutex_exit(&ptgt->tgt_mutex);
13820
13821 mutex_exit(&pptr->port_mutex);
13822 fcp_abort_all(pptr, ptgt, plun, tgt_cnt);
13823 mutex_enter(&pptr->port_mutex);
13824 }
13825 }
13826 }
13827
13828
13829 static void
13830 fcp_abort_all(struct fcp_port *pptr, struct fcp_tgt *ttgt,
13831 struct fcp_lun *rlun, int tgt_cnt)
13832 {
13833 int rval;
13834 struct fcp_lun *tlun, *nlun;
13835 struct fcp_pkt *pcmd = NULL, *ncmd = NULL,
13836 *cmd = NULL, *head = NULL,
13837 *tail = NULL;
13838
13839 mutex_enter(&pptr->port_pkt_mutex);
13840 for (cmd = pptr->port_pkt_head; cmd != NULL; cmd = ncmd) {
13841 struct fcp_lun *plun = ADDR2LUN(&cmd->cmd_pkt->pkt_address);
13842 struct fcp_tgt *ptgt = plun->lun_tgt;
13843
13844 ncmd = cmd->cmd_next;
13845
13846 if (ptgt != ttgt && plun != rlun) {
13847 pcmd = cmd;
13848 continue;
13849 }
13850
13851 if (pcmd != NULL) {
13852 ASSERT(pptr->port_pkt_head != cmd);
13853 pcmd->cmd_next = ncmd;
13854 } else {
13855 ASSERT(cmd == pptr->port_pkt_head);
13856 pptr->port_pkt_head = ncmd;
13857 }
13858 if (pptr->port_pkt_tail == cmd) {
13859 ASSERT(cmd->cmd_next == NULL);
13860 pptr->port_pkt_tail = pcmd;
13861 if (pcmd != NULL) {
13862 pcmd->cmd_next = NULL;
13863 }
13864 }
13865
13866 if (head == NULL) {
13867 head = tail = cmd;
13868 } else {
13869 ASSERT(tail != NULL);
13870 tail->cmd_next = cmd;
13871 tail = cmd;
13872 }
13873 cmd->cmd_next = NULL;
13874 }
13875 mutex_exit(&pptr->port_pkt_mutex);
13876
13877 for (cmd = head; cmd != NULL; cmd = ncmd) {
13878 struct scsi_pkt *pkt = cmd->cmd_pkt;
13879
13880 ncmd = cmd->cmd_next;
13881 ASSERT(pkt != NULL);
13882
13883 mutex_enter(&pptr->port_mutex);
13884 if (ttgt->tgt_change_cnt == tgt_cnt) {
13885 mutex_exit(&pptr->port_mutex);
13886 cmd->cmd_flags &= ~CFLAG_IN_QUEUE;
13887 pkt->pkt_reason = CMD_RESET;
13888 pkt->pkt_statistics |= STAT_DEV_RESET;
13889 cmd->cmd_state = FCP_PKT_IDLE;
13890 fcp_post_callback(cmd);
13891 } else {
13892 mutex_exit(&pptr->port_mutex);
13893 }
13894 }
13895
13896 /*
13897 * If the FCA will return all the commands in its queue then our
13898 * work is easy, just return.
13899 */
13900
13901 if (pptr->port_reset_action == FC_RESET_RETURN_ALL) {
13902 return;
13903 }
13904
13905 /*
13906 * For RESET_LUN get hold of target pointer
13907 */
13908 if (ttgt == NULL) {
13909 ASSERT(rlun != NULL);
13910
13911 ttgt = rlun->lun_tgt;
13912
13913 ASSERT(ttgt != NULL);
13914 }
13915
13916 /*
13917 * There are some severe race conditions here.
13918 * While we are trying to abort the pkt, it might be completing
13919 * so mark it aborted and if the abort does not succeed then
13920 * handle it in the watch thread.
13921 */
13922 mutex_enter(&ttgt->tgt_mutex);
13923 nlun = ttgt->tgt_lun;
13924 mutex_exit(&ttgt->tgt_mutex);
13925 while ((tlun = nlun) != NULL) {
13926 int restart = 0;
13927 if (rlun && rlun != tlun) {
13928 mutex_enter(&ttgt->tgt_mutex);
13929 nlun = tlun->lun_next;
13930 mutex_exit(&ttgt->tgt_mutex);
13931 continue;
13932 }
13933 mutex_enter(&tlun->lun_mutex);
13934 cmd = tlun->lun_pkt_head;
13935 while (cmd != NULL) {
13936 if (cmd->cmd_state == FCP_PKT_ISSUED) {
13937 struct scsi_pkt *pkt;
13938
13939 restart = 1;
13940 cmd->cmd_state = FCP_PKT_ABORTING;
13941 mutex_exit(&tlun->lun_mutex);
13942 rval = fc_ulp_abort(pptr->port_fp_handle,
13943 cmd->cmd_fp_pkt, KM_SLEEP);
13944 if (rval == FC_SUCCESS) {
13945 pkt = cmd->cmd_pkt;
13946 pkt->pkt_reason = CMD_RESET;
13947 pkt->pkt_statistics |= STAT_DEV_RESET;
13948 cmd->cmd_state = FCP_PKT_IDLE;
13949 fcp_post_callback(cmd);
13950 } else {
13951 caddr_t msg;
13952
13953 (void) fc_ulp_error(rval, &msg);
13954
13955 /*
13956 * This part is tricky. The abort
13957 * failed and now the command could
13958 * be completing. The cmd_state ==
13959 * FCP_PKT_ABORTING should save
13960 * us in fcp_cmd_callback. If we
13961 * are already aborting ignore the
13962 * command in fcp_cmd_callback.
13963 * Here we leave this packet for 20
13964 * sec to be aborted in the
13965 * fcp_watch thread.
13966 */
13967 fcp_log(CE_WARN, pptr->port_dip,
13968 "!Abort failed after reset %s",
13969 msg);
13970
13971 cmd->cmd_timeout =
13972 fcp_watchdog_time +
13973 cmd->cmd_pkt->pkt_time +
13974 FCP_FAILED_DELAY;
13975
13976 cmd->cmd_fp_pkt->pkt_timeout =
13977 FCP_INVALID_TIMEOUT;
13978 /*
13979 * This is a hack, cmd is put in the
13980 * overflow queue so that it can be
13981 * timed out finally
13982 */
13983 cmd->cmd_flags |= CFLAG_IN_QUEUE;
13984
13985 mutex_enter(&pptr->port_pkt_mutex);
13986 if (pptr->port_pkt_head) {
13987 ASSERT(pptr->port_pkt_tail
13988 != NULL);
13989 pptr->port_pkt_tail->cmd_next
13990 = cmd;
13991 pptr->port_pkt_tail = cmd;
13992 } else {
13993 ASSERT(pptr->port_pkt_tail
13994 == NULL);
13995 pptr->port_pkt_head =
13996 pptr->port_pkt_tail
13997 = cmd;
13998 }
13999 cmd->cmd_next = NULL;
14000 mutex_exit(&pptr->port_pkt_mutex);
14001 }
14002 mutex_enter(&tlun->lun_mutex);
14003 cmd = tlun->lun_pkt_head;
14004 } else {
14005 cmd = cmd->cmd_forw;
14006 }
14007 }
14008 mutex_exit(&tlun->lun_mutex);
14009
14010 mutex_enter(&ttgt->tgt_mutex);
14011 restart == 1 ? (nlun = ttgt->tgt_lun) : (nlun = tlun->lun_next);
14012 mutex_exit(&ttgt->tgt_mutex);
14013
14014 mutex_enter(&pptr->port_mutex);
14015 if (tgt_cnt != ttgt->tgt_change_cnt) {
14016 mutex_exit(&pptr->port_mutex);
14017 return;
14018 } else {
14019 mutex_exit(&pptr->port_mutex);
14020 }
14021 }
14022 }
14023
14024
14025 /*
14026 * unlink the soft state, returning the soft state found (if any)
14027 *
14028 * acquires and releases the global mutex
14029 */
14030 struct fcp_port *
14031 fcp_soft_state_unlink(struct fcp_port *pptr)
14032 {
14033 struct fcp_port *hptr; /* ptr index */
14034 struct fcp_port *tptr; /* prev hptr */
14035
14036 mutex_enter(&fcp_global_mutex);
14037 for (hptr = fcp_port_head, tptr = NULL;
14038 hptr != NULL;
14039 tptr = hptr, hptr = hptr->port_next) {
14040 if (hptr == pptr) {
14041 /* we found a match -- remove this item */
14042 if (tptr == NULL) {
14043 /* we're at the head of the list */
14044 fcp_port_head = hptr->port_next;
14045 } else {
14046 tptr->port_next = hptr->port_next;
14047 }
14048 break; /* success */
14049 }
14050 }
14051 if (fcp_port_head == NULL) {
14052 fcp_cleanup_blacklist(&fcp_lun_blacklist);
14053 }
14054 mutex_exit(&fcp_global_mutex);
14055 return (hptr);
14056 }
14057
14058
14059 /*
14060 * called by fcp_scsi_hba_tgt_init to find a LUN given a
14061 * WWN and a LUN number
14062 */
14063 /* ARGSUSED */
14064 static struct fcp_lun *
14065 fcp_lookup_lun(struct fcp_port *pptr, uchar_t *wwn, uint16_t lun)
14066 {
14067 int hash;
14068 struct fcp_tgt *ptgt;
14069 struct fcp_lun *plun;
14070
14071 ASSERT(mutex_owned(&pptr->port_mutex));
14072
14073 hash = FCP_HASH(wwn);
14074 for (ptgt = pptr->port_tgt_hash_table[hash]; ptgt != NULL;
14075 ptgt = ptgt->tgt_next) {
14076 if (bcmp((caddr_t)wwn, (caddr_t)&ptgt->tgt_port_wwn.raw_wwn[0],
14077 sizeof (ptgt->tgt_port_wwn)) == 0) {
14078 mutex_enter(&ptgt->tgt_mutex);
14079 for (plun = ptgt->tgt_lun;
14080 plun != NULL;
14081 plun = plun->lun_next) {
14082 if (plun->lun_num == lun) {
14083 mutex_exit(&ptgt->tgt_mutex);
14084 return (plun);
14085 }
14086 }
14087 mutex_exit(&ptgt->tgt_mutex);
14088 return (NULL);
14089 }
14090 }
14091 return (NULL);
14092 }
14093
14094 /*
14095 * Function: fcp_prepare_pkt
14096 *
14097 * Description: This function prepares the SCSI cmd pkt, passed by the caller,
14098 * for fcp_start(). It binds the data or partially maps it.
14099 * Builds the FCP header and starts the initialization of the
14100 * Fibre Channel header.
14101 *
14102 * Argument: *pptr FCP port.
14103 * *cmd FCP packet.
14104 * *plun LUN the command will be sent to.
14105 *
14106 * Context: User, Kernel and Interrupt context.
14107 */
14108 static void
14109 fcp_prepare_pkt(struct fcp_port *pptr, struct fcp_pkt *cmd,
14110 struct fcp_lun *plun)
14111 {
14112 fc_packet_t *fpkt = cmd->cmd_fp_pkt;
14113 struct fcp_tgt *ptgt = plun->lun_tgt;
14114 struct fcp_cmd *fcmd = &cmd->cmd_fcp_cmd;
14115
14116 ASSERT(cmd->cmd_pkt->pkt_comp ||
14117 (cmd->cmd_pkt->pkt_flags & FLAG_NOINTR));
14118
14119 if (cmd->cmd_pkt->pkt_numcookies) {
14120 if (cmd->cmd_pkt->pkt_dma_flags & DDI_DMA_READ) {
14121 fcmd->fcp_cntl.cntl_read_data = 1;
14122 fcmd->fcp_cntl.cntl_write_data = 0;
14123 fpkt->pkt_tran_type = FC_PKT_FCP_READ;
14124 } else {
14125 fcmd->fcp_cntl.cntl_read_data = 0;
14126 fcmd->fcp_cntl.cntl_write_data = 1;
14127 fpkt->pkt_tran_type = FC_PKT_FCP_WRITE;
14128 }
14129
14130 fpkt->pkt_data_cookie = cmd->cmd_pkt->pkt_cookies;
14131
14132 fpkt->pkt_data_cookie_cnt = cmd->cmd_pkt->pkt_numcookies;
14133 ASSERT(fpkt->pkt_data_cookie_cnt <=
14134 pptr->port_data_dma_attr.dma_attr_sgllen);
14135
14136 cmd->cmd_dmacount = cmd->cmd_pkt->pkt_dma_len;
14137
14138 /* FCA needs pkt_datalen to be set */
14139 fpkt->pkt_datalen = cmd->cmd_dmacount;
14140 fcmd->fcp_data_len = cmd->cmd_dmacount;
14141 } else {
14142 fcmd->fcp_cntl.cntl_read_data = 0;
14143 fcmd->fcp_cntl.cntl_write_data = 0;
14144 fpkt->pkt_tran_type = FC_PKT_EXCHANGE;
14145 fpkt->pkt_datalen = 0;
14146 fcmd->fcp_data_len = 0;
14147 }
14148
14149 /* set up the Tagged Queuing type */
14150 if (cmd->cmd_pkt->pkt_flags & FLAG_HTAG) {
14151 fcmd->fcp_cntl.cntl_qtype = FCP_QTYPE_HEAD_OF_Q;
14152 } else if (cmd->cmd_pkt->pkt_flags & FLAG_OTAG) {
14153 fcmd->fcp_cntl.cntl_qtype = FCP_QTYPE_ORDERED;
14154 } else if (cmd->cmd_pkt->pkt_flags & FLAG_STAG) {
14155 fcmd->fcp_cntl.cntl_qtype = FCP_QTYPE_SIMPLE;
14156 } else {
14157 fcmd->fcp_cntl.cntl_qtype = FCP_QTYPE_UNTAGGED;
14158 }
14159
14160 fcmd->fcp_ent_addr = plun->lun_addr;
14161
14162 if (pptr->port_fcp_dma != FC_NO_DVMA_SPACE) {
14163 FCP_CP_OUT((uint8_t *)fcmd, fpkt->pkt_cmd,
14164 fpkt->pkt_cmd_acc, sizeof (struct fcp_cmd));
14165 } else {
14166 ASSERT(fpkt->pkt_cmd_dma == NULL && fpkt->pkt_resp_dma == NULL);
14167 }
14168
14169 cmd->cmd_pkt->pkt_reason = CMD_CMPLT;
14170 cmd->cmd_pkt->pkt_state = 0;
14171 cmd->cmd_pkt->pkt_statistics = 0;
14172 cmd->cmd_pkt->pkt_resid = 0;
14173
14174 cmd->cmd_fp_pkt->pkt_data_dma = cmd->cmd_pkt->pkt_handle;
14175
14176 if (cmd->cmd_pkt->pkt_flags & FLAG_NOINTR) {
14177 fpkt->pkt_tran_flags = (FC_TRAN_CLASS3 | FC_TRAN_NO_INTR);
14178 fpkt->pkt_comp = NULL;
14179 } else {
14180 fpkt->pkt_tran_flags = (FC_TRAN_CLASS3 | FC_TRAN_INTR);
14181 if (cmd->cmd_pkt->pkt_flags & FLAG_IMMEDIATE_CB) {
14182 fpkt->pkt_tran_flags |= FC_TRAN_IMMEDIATE_CB;
14183 }
14184 fpkt->pkt_comp = fcp_cmd_callback;
14185 }
14186
14187 mutex_enter(&pptr->port_mutex);
14188 if (pptr->port_state & FCP_STATE_SUSPENDED) {
14189 fpkt->pkt_tran_flags |= FC_TRAN_DUMPING;
14190 }
14191 mutex_exit(&pptr->port_mutex);
14192
14193 fpkt->pkt_cmd_fhdr.d_id = ptgt->tgt_d_id;
14194 fpkt->pkt_cmd_fhdr.s_id = pptr->port_id;
14195
14196 /*
14197 * Save a few kernel cycles here
14198 */
14199 #ifndef __lock_lint
14200 fpkt->pkt_fca_device = ptgt->tgt_fca_dev;
14201 #endif /* __lock_lint */
14202 }
14203
14204 static void
14205 fcp_post_callback(struct fcp_pkt *cmd)
14206 {
14207 scsi_hba_pkt_comp(cmd->cmd_pkt);
14208 }
14209
14210
14211 /*
14212 * called to do polled I/O by fcp_start()
14213 *
14214 * return a transport status value, i.e. TRAN_ACCECPT for success
14215 */
14216 static int
14217 fcp_dopoll(struct fcp_port *pptr, struct fcp_pkt *cmd)
14218 {
14219 int rval;
14220
14221 #ifdef DEBUG
14222 mutex_enter(&pptr->port_pkt_mutex);
14223 pptr->port_npkts++;
14224 mutex_exit(&pptr->port_pkt_mutex);
14225 #endif /* DEBUG */
14226
14227 if (cmd->cmd_fp_pkt->pkt_timeout) {
14228 cmd->cmd_fp_pkt->pkt_timeout = cmd->cmd_pkt->pkt_time;
14229 } else {
14230 cmd->cmd_fp_pkt->pkt_timeout = FCP_POLL_TIMEOUT;
14231 }
14232
14233 ASSERT(cmd->cmd_fp_pkt->pkt_comp == NULL);
14234
14235 cmd->cmd_state = FCP_PKT_ISSUED;
14236
14237 rval = fc_ulp_transport(pptr->port_fp_handle, cmd->cmd_fp_pkt);
14238
14239 #ifdef DEBUG
14240 mutex_enter(&pptr->port_pkt_mutex);
14241 pptr->port_npkts--;
14242 mutex_exit(&pptr->port_pkt_mutex);
14243 #endif /* DEBUG */
14244
14245 cmd->cmd_state = FCP_PKT_IDLE;
14246
14247 switch (rval) {
14248 case FC_SUCCESS:
14249 if (cmd->cmd_fp_pkt->pkt_state == FC_PKT_SUCCESS) {
14250 fcp_complete_pkt(cmd->cmd_fp_pkt);
14251 rval = TRAN_ACCEPT;
14252 } else {
14253 rval = TRAN_FATAL_ERROR;
14254 }
14255 break;
14256
14257 case FC_TRAN_BUSY:
14258 rval = TRAN_BUSY;
14259 cmd->cmd_pkt->pkt_resid = 0;
14260 break;
14261
14262 case FC_BADPACKET:
14263 rval = TRAN_BADPKT;
14264 break;
14265
14266 default:
14267 rval = TRAN_FATAL_ERROR;
14268 break;
14269 }
14270
14271 return (rval);
14272 }
14273
14274
14275 /*
14276 * called by some of the following transport-called routines to convert
14277 * a supplied dip ptr to a port struct ptr (i.e. to the soft state)
14278 */
14279 static struct fcp_port *
14280 fcp_dip2port(dev_info_t *dip)
14281 {
14282 int instance;
14283
14284 instance = ddi_get_instance(dip);
14285 return (ddi_get_soft_state(fcp_softstate, instance));
14286 }
14287
14288
14289 /*
14290 * called internally to return a LUN given a dip
14291 */
14292 struct fcp_lun *
14293 fcp_get_lun_from_cip(struct fcp_port *pptr, child_info_t *cip)
14294 {
14295 struct fcp_tgt *ptgt;
14296 struct fcp_lun *plun;
14297 int i;
14298
14299
14300 ASSERT(mutex_owned(&pptr->port_mutex));
14301
14302 for (i = 0; i < FCP_NUM_HASH; i++) {
14303 for (ptgt = pptr->port_tgt_hash_table[i];
14304 ptgt != NULL;
14305 ptgt = ptgt->tgt_next) {
14306 mutex_enter(&ptgt->tgt_mutex);
14307 for (plun = ptgt->tgt_lun; plun != NULL;
14308 plun = plun->lun_next) {
14309 mutex_enter(&plun->lun_mutex);
14310 if (plun->lun_cip == cip) {
14311 mutex_exit(&plun->lun_mutex);
14312 mutex_exit(&ptgt->tgt_mutex);
14313 return (plun); /* match found */
14314 }
14315 mutex_exit(&plun->lun_mutex);
14316 }
14317 mutex_exit(&ptgt->tgt_mutex);
14318 }
14319 }
14320 return (NULL); /* no LUN found */
14321 }
14322
14323 /*
14324 * pass an element to the hotplug list, kick the hotplug thread
14325 * and wait for the element to get processed by the hotplug thread.
14326 * on return the element is freed.
14327 *
14328 * return zero success and non-zero on failure
14329 *
14330 * acquires/releases the target mutex
14331 *
14332 */
14333 static int
14334 fcp_pass_to_hp_and_wait(struct fcp_port *pptr, struct fcp_lun *plun,
14335 child_info_t *cip, int what, int link_cnt, int tgt_cnt, int flags)
14336 {
14337 struct fcp_hp_elem *elem;
14338 int rval;
14339
14340 mutex_enter(&plun->lun_tgt->tgt_mutex);
14341 if ((elem = fcp_pass_to_hp(pptr, plun, cip,
14342 what, link_cnt, tgt_cnt, flags, 1)) == NULL) {
14343 mutex_exit(&plun->lun_tgt->tgt_mutex);
14344 fcp_log(CE_CONT, pptr->port_dip,
14345 "Can not pass_to_hp: what: %d; D_ID=%x, LUN=%x\n",
14346 what, plun->lun_tgt->tgt_d_id, plun->lun_num);
14347 return (NDI_FAILURE);
14348 }
14349 mutex_exit(&plun->lun_tgt->tgt_mutex);
14350 mutex_enter(&elem->mutex);
14351 if (elem->wait) {
14352 while (elem->wait) {
14353 cv_wait(&elem->cv, &elem->mutex);
14354 }
14355 }
14356 rval = (elem->result);
14357 mutex_exit(&elem->mutex);
14358 mutex_destroy(&elem->mutex);
14359 cv_destroy(&elem->cv);
14360 kmem_free(elem, sizeof (struct fcp_hp_elem));
14361 return (rval);
14362 }
14363
14364 /*
14365 * pass an element to the hotplug list, and then
14366 * kick the hotplug thread
14367 *
14368 * return Boolean success, i.e. non-zero if all goes well, else zero on error
14369 *
14370 * acquires/releases the hotplug mutex
14371 *
14372 * called with the target mutex owned
14373 *
14374 * memory acquired in NOSLEEP mode
14375 * NOTE: if wait is set to 1 then the caller is responsible for waiting on
14376 * for the hp daemon to process the request and is responsible for
14377 * freeing the element
14378 */
14379 static struct fcp_hp_elem *
14380 fcp_pass_to_hp(struct fcp_port *pptr, struct fcp_lun *plun,
14381 child_info_t *cip, int what, int link_cnt, int tgt_cnt, int flags, int wait)
14382 {
14383 struct fcp_hp_elem *elem;
14384 dev_info_t *pdip;
14385
14386 ASSERT(pptr != NULL);
14387 ASSERT(plun != NULL);
14388 ASSERT(plun->lun_tgt != NULL);
14389 ASSERT(mutex_owned(&plun->lun_tgt->tgt_mutex));
14390
14391 /* create space for a hotplug element */
14392 if ((elem = kmem_zalloc(sizeof (struct fcp_hp_elem), KM_NOSLEEP))
14393 == NULL) {
14394 fcp_log(CE_WARN, NULL,
14395 "!can't allocate memory for hotplug element");
14396 return (NULL);
14397 }
14398
14399 /* fill in hotplug element */
14400 elem->port = pptr;
14401 elem->lun = plun;
14402 elem->cip = cip;
14403 elem->old_lun_mpxio = plun->lun_mpxio;
14404 elem->what = what;
14405 elem->flags = flags;
14406 elem->link_cnt = link_cnt;
14407 elem->tgt_cnt = tgt_cnt;
14408 elem->wait = wait;
14409 mutex_init(&elem->mutex, NULL, MUTEX_DRIVER, NULL);
14410 cv_init(&elem->cv, NULL, CV_DRIVER, NULL);
14411
14412 /* schedule the hotplug task */
14413 pdip = pptr->port_dip;
14414 mutex_enter(&plun->lun_mutex);
14415 if (elem->what == FCP_ONLINE || elem->what == FCP_OFFLINE) {
14416 plun->lun_event_count++;
14417 elem->event_cnt = plun->lun_event_count;
14418 }
14419 mutex_exit(&plun->lun_mutex);
14420 if (taskq_dispatch(DEVI(pdip)->devi_taskq, fcp_hp_task,
14421 (void *)elem, KM_NOSLEEP) == NULL) {
14422 mutex_enter(&plun->lun_mutex);
14423 if (elem->what == FCP_ONLINE || elem->what == FCP_OFFLINE) {
14424 plun->lun_event_count--;
14425 }
14426 mutex_exit(&plun->lun_mutex);
14427 kmem_free(elem, sizeof (*elem));
14428 return (0);
14429 }
14430
14431 return (elem);
14432 }
14433
14434
14435 static void
14436 fcp_retransport_cmd(struct fcp_port *pptr, struct fcp_pkt *cmd)
14437 {
14438 int rval;
14439 struct scsi_address *ap;
14440 struct fcp_lun *plun;
14441 struct fcp_tgt *ptgt;
14442 fc_packet_t *fpkt;
14443
14444 ap = &cmd->cmd_pkt->pkt_address;
14445 plun = ADDR2LUN(ap);
14446 ptgt = plun->lun_tgt;
14447
14448 ASSERT(cmd->cmd_flags & CFLAG_IN_QUEUE);
14449
14450 cmd->cmd_state = FCP_PKT_IDLE;
14451
14452 mutex_enter(&pptr->port_mutex);
14453 mutex_enter(&ptgt->tgt_mutex);
14454 if (((plun->lun_state & (FCP_LUN_BUSY | FCP_LUN_OFFLINE)) == 0) &&
14455 (!(pptr->port_state & FCP_STATE_ONLINING))) {
14456 fc_ulp_rscn_info_t *rscnp;
14457
14458 cmd->cmd_state = FCP_PKT_ISSUED;
14459
14460 /*
14461 * It is possible for pkt_pd to be NULL if tgt_pd_handle was
14462 * originally NULL, hence we try to set it to the pd pointed
14463 * to by the SCSI device we're trying to get to.
14464 */
14465
14466 fpkt = cmd->cmd_fp_pkt;
14467 if ((fpkt->pkt_pd == NULL) && (ptgt->tgt_pd_handle != NULL)) {
14468 fpkt->pkt_pd = ptgt->tgt_pd_handle;
14469 /*
14470 * We need to notify the transport that we now have a
14471 * reference to the remote port handle.
14472 */
14473 fc_ulp_hold_remote_port(ptgt->tgt_pd_handle);
14474 }
14475
14476 mutex_exit(&ptgt->tgt_mutex);
14477 mutex_exit(&pptr->port_mutex);
14478
14479 ASSERT((cmd->cmd_pkt->pkt_flags & FLAG_NOINTR) == 0);
14480
14481 /* prepare the packet */
14482
14483 fcp_prepare_pkt(pptr, cmd, plun);
14484
14485 rscnp = (fc_ulp_rscn_info_t *)cmd->cmd_fp_pkt->
14486 pkt_ulp_rscn_infop;
14487
14488 cmd->cmd_timeout = cmd->cmd_pkt->pkt_time ?
14489 fcp_watchdog_time + cmd->cmd_pkt->pkt_time : 0;
14490
14491 if (rscnp != NULL) {
14492 rscnp->ulp_rscn_count =
14493 fc_ulp_get_rscn_count(pptr->
14494 port_fp_handle);
14495 }
14496
14497 rval = fcp_transport(pptr->port_fp_handle,
14498 cmd->cmd_fp_pkt, 0);
14499
14500 if (rval == FC_SUCCESS) {
14501 return;
14502 }
14503 cmd->cmd_state &= ~FCP_PKT_ISSUED;
14504 } else {
14505 mutex_exit(&ptgt->tgt_mutex);
14506 mutex_exit(&pptr->port_mutex);
14507 }
14508
14509 fcp_queue_pkt(pptr, cmd);
14510 }
14511
14512
14513 static void
14514 fcp_fail_cmd(struct fcp_pkt *cmd, uchar_t reason, uint_t statistics)
14515 {
14516 ASSERT(cmd->cmd_flags & CFLAG_IN_QUEUE);
14517
14518 cmd->cmd_flags &= ~CFLAG_IN_QUEUE;
14519 cmd->cmd_state = FCP_PKT_IDLE;
14520
14521 cmd->cmd_pkt->pkt_reason = reason;
14522 cmd->cmd_pkt->pkt_state = 0;
14523 cmd->cmd_pkt->pkt_statistics = statistics;
14524
14525 fcp_post_callback(cmd);
14526 }
14527
14528 /*
14529 * Function: fcp_queue_pkt
14530 *
14531 * Description: This function queues the packet passed by the caller into
14532 * the list of packets of the FCP port.
14533 *
14534 * Argument: *pptr FCP port.
14535 * *cmd FCP packet to queue.
14536 *
14537 * Return Value: None
14538 *
14539 * Context: User, Kernel and Interrupt context.
14540 */
14541 static void
14542 fcp_queue_pkt(struct fcp_port *pptr, struct fcp_pkt *cmd)
14543 {
14544 ASSERT((cmd->cmd_pkt->pkt_flags & FLAG_NOQUEUE) == NULL);
14545
14546 mutex_enter(&pptr->port_pkt_mutex);
14547 cmd->cmd_flags |= CFLAG_IN_QUEUE;
14548 ASSERT(cmd->cmd_state != FCP_PKT_ISSUED);
14549 cmd->cmd_timeout = fcp_watchdog_time + FCP_QUEUE_DELAY;
14550
14551 /*
14552 * zero pkt_time means hang around for ever
14553 */
14554 if (cmd->cmd_pkt->pkt_time) {
14555 if (cmd->cmd_fp_pkt->pkt_timeout > FCP_QUEUE_DELAY) {
14556 cmd->cmd_fp_pkt->pkt_timeout -= FCP_QUEUE_DELAY;
14557 } else {
14558 /*
14559 * Indicate the watch thread to fail the
14560 * command by setting it to highest value
14561 */
14562 cmd->cmd_timeout = fcp_watchdog_time;
14563 cmd->cmd_fp_pkt->pkt_timeout = FCP_INVALID_TIMEOUT;
14564 }
14565 }
14566
14567 if (pptr->port_pkt_head) {
14568 ASSERT(pptr->port_pkt_tail != NULL);
14569
14570 pptr->port_pkt_tail->cmd_next = cmd;
14571 pptr->port_pkt_tail = cmd;
14572 } else {
14573 ASSERT(pptr->port_pkt_tail == NULL);
14574
14575 pptr->port_pkt_head = pptr->port_pkt_tail = cmd;
14576 }
14577 cmd->cmd_next = NULL;
14578 mutex_exit(&pptr->port_pkt_mutex);
14579 }
14580
14581 /*
14582 * Function: fcp_update_targets
14583 *
14584 * Description: This function applies the specified change of state to all
14585 * the targets listed. The operation applied is 'set'.
14586 *
14587 * Argument: *pptr FCP port.
14588 * *dev_list Array of fc_portmap_t structures.
14589 * count Length of dev_list.
14590 * state State bits to update.
14591 * cause Reason for the update.
14592 *
14593 * Return Value: None
14594 *
14595 * Context: User, Kernel and Interrupt context.
14596 * The mutex pptr->port_mutex must be held.
14597 */
14598 static void
14599 fcp_update_targets(struct fcp_port *pptr, fc_portmap_t *dev_list,
14600 uint32_t count, uint32_t state, int cause)
14601 {
14602 fc_portmap_t *map_entry;
14603 struct fcp_tgt *ptgt;
14604
14605 ASSERT(MUTEX_HELD(&pptr->port_mutex));
14606
14607 while (count--) {
14608 map_entry = &(dev_list[count]);
14609 ptgt = fcp_lookup_target(pptr,
14610 (uchar_t *)&(map_entry->map_pwwn));
14611 if (ptgt == NULL) {
14612 continue;
14613 }
14614
14615 mutex_enter(&ptgt->tgt_mutex);
14616 ptgt->tgt_trace = 0;
14617 ptgt->tgt_change_cnt++;
14618 ptgt->tgt_statec_cause = cause;
14619 ptgt->tgt_tmp_cnt = 1;
14620 fcp_update_tgt_state(ptgt, FCP_SET, state);
14621 mutex_exit(&ptgt->tgt_mutex);
14622 }
14623 }
14624
14625 static int
14626 fcp_call_finish_init(struct fcp_port *pptr, struct fcp_tgt *ptgt,
14627 int lcount, int tcount, int cause)
14628 {
14629 int rval;
14630
14631 mutex_enter(&pptr->port_mutex);
14632 rval = fcp_call_finish_init_held(pptr, ptgt, lcount, tcount, cause);
14633 mutex_exit(&pptr->port_mutex);
14634
14635 return (rval);
14636 }
14637
14638
14639 static int
14640 fcp_call_finish_init_held(struct fcp_port *pptr, struct fcp_tgt *ptgt,
14641 int lcount, int tcount, int cause)
14642 {
14643 int finish_init = 0;
14644 int finish_tgt = 0;
14645 int do_finish_init = 0;
14646 int rval = FCP_NO_CHANGE;
14647
14648 if (cause == FCP_CAUSE_LINK_CHANGE ||
14649 cause == FCP_CAUSE_LINK_DOWN) {
14650 do_finish_init = 1;
14651 }
14652
14653 if (ptgt != NULL) {
14654 FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
14655 FCP_BUF_LEVEL_2, 0,
14656 "link_cnt: %d,%d; tgt_cnt: %d,%d; tmp_cnt: %d,%d;"
14657 " cause = %d, d_id = 0x%x, tgt_done = %d",
14658 pptr->port_link_cnt, lcount, ptgt->tgt_change_cnt, tcount,
14659 pptr->port_tmp_cnt, ptgt->tgt_tmp_cnt, cause,
14660 ptgt->tgt_d_id, ptgt->tgt_done);
14661
14662 mutex_enter(&ptgt->tgt_mutex);
14663
14664 if (tcount && (ptgt->tgt_change_cnt != tcount)) {
14665 rval = FCP_DEV_CHANGE;
14666 if (do_finish_init && ptgt->tgt_done == 0) {
14667 ptgt->tgt_done++;
14668 finish_init = 1;
14669 }
14670 } else {
14671 if (--ptgt->tgt_tmp_cnt <= 0) {
14672 ptgt->tgt_tmp_cnt = 0;
14673 finish_tgt = 1;
14674
14675 if (do_finish_init) {
14676 finish_init = 1;
14677 }
14678 }
14679 }
14680 mutex_exit(&ptgt->tgt_mutex);
14681 } else {
14682 FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
14683 FCP_BUF_LEVEL_2, 0,
14684 "Call Finish Init for NO target");
14685
14686 if (do_finish_init) {
14687 finish_init = 1;
14688 }
14689 }
14690
14691 if (finish_tgt) {
14692 ASSERT(ptgt != NULL);
14693
14694 mutex_enter(&ptgt->tgt_mutex);
14695 #ifdef DEBUG
14696 bzero(ptgt->tgt_tmp_cnt_stack,
14697 sizeof (ptgt->tgt_tmp_cnt_stack));
14698
14699 ptgt->tgt_tmp_cnt_depth = getpcstack(ptgt->tgt_tmp_cnt_stack,
14700 FCP_STACK_DEPTH);
14701 #endif /* DEBUG */
14702 mutex_exit(&ptgt->tgt_mutex);
14703
14704 (void) fcp_finish_tgt(pptr, ptgt, lcount, tcount, cause);
14705 }
14706
14707 if (finish_init && lcount == pptr->port_link_cnt) {
14708 ASSERT(pptr->port_tmp_cnt > 0);
14709 if (--pptr->port_tmp_cnt == 0) {
14710 fcp_finish_init(pptr);
14711 }
14712 } else if (lcount != pptr->port_link_cnt) {
14713 FCP_TRACE(fcp_logq, pptr->port_instbuf,
14714 fcp_trace, FCP_BUF_LEVEL_2, 0,
14715 "fcp_call_finish_init_held,1: state change occured"
14716 " for D_ID=0x%x", (ptgt) ? ptgt->tgt_d_id : 0);
14717 }
14718
14719 return (rval);
14720 }
14721
14722 static void
14723 fcp_reconfigure_luns(void * tgt_handle)
14724 {
14725 uint32_t dev_cnt;
14726 fc_portmap_t *devlist;
14727 struct fcp_tgt *ptgt = (struct fcp_tgt *)tgt_handle;
14728 struct fcp_port *pptr = ptgt->tgt_port;
14729
14730 /*
14731 * If the timer that fires this off got canceled too late, the
14732 * target could have been destroyed.
14733 */
14734
14735 if (ptgt->tgt_tid == NULL) {
14736 return;
14737 }
14738
14739 devlist = kmem_zalloc(sizeof (*devlist), KM_NOSLEEP);
14740 if (devlist == NULL) {
14741 fcp_log(CE_WARN, pptr->port_dip,
14742 "!fcp%d: failed to allocate for portmap",
14743 pptr->port_instance);
14744 return;
14745 }
14746
14747 dev_cnt = 1;
14748 devlist->map_pd = ptgt->tgt_pd_handle;
14749 devlist->map_hard_addr.hard_addr = ptgt->tgt_hard_addr;
14750 devlist->map_did.port_id = ptgt->tgt_d_id;
14751
14752 bcopy(&ptgt->tgt_node_wwn.raw_wwn[0], &devlist->map_nwwn, FC_WWN_SIZE);
14753 bcopy(&ptgt->tgt_port_wwn.raw_wwn[0], &devlist->map_pwwn, FC_WWN_SIZE);
14754
14755 devlist->map_state = PORT_DEVICE_LOGGED_IN;
14756 devlist->map_type = PORT_DEVICE_REPORTLUN_CHANGED;
14757 devlist->map_flags = 0;
14758
14759 fcp_statec_callback(NULL, pptr->port_fp_handle, FC_STATE_DEVICE_CHANGE,
14760 pptr->port_topology, devlist, dev_cnt, pptr->port_id);
14761
14762 /*
14763 * Clear the tgt_tid after no more references to
14764 * the fcp_tgt
14765 */
14766 mutex_enter(&ptgt->tgt_mutex);
14767 ptgt->tgt_tid = NULL;
14768 mutex_exit(&ptgt->tgt_mutex);
14769
14770 kmem_free(devlist, sizeof (*devlist));
14771 }
14772
14773
14774 static void
14775 fcp_free_targets(struct fcp_port *pptr)
14776 {
14777 int i;
14778 struct fcp_tgt *ptgt;
14779
14780 mutex_enter(&pptr->port_mutex);
14781 for (i = 0; i < FCP_NUM_HASH; i++) {
14782 ptgt = pptr->port_tgt_hash_table[i];
14783 while (ptgt != NULL) {
14784 struct fcp_tgt *next_tgt = ptgt->tgt_next;
14785
14786 fcp_free_target(ptgt);
14787 ptgt = next_tgt;
14788 }
14789 }
14790 mutex_exit(&pptr->port_mutex);
14791 }
14792
14793
14794 static void
14795 fcp_free_target(struct fcp_tgt *ptgt)
14796 {
14797 struct fcp_lun *plun;
14798 timeout_id_t tid;
14799
14800 mutex_enter(&ptgt->tgt_mutex);
14801 tid = ptgt->tgt_tid;
14802
14803 /*
14804 * Cancel any pending timeouts for this target.
14805 */
14806
14807 if (tid != NULL) {
14808 /*
14809 * Set tgt_tid to NULL first to avoid a race in the callback.
14810 * If tgt_tid is NULL, the callback will simply return.
14811 */
14812 ptgt->tgt_tid = NULL;
14813 mutex_exit(&ptgt->tgt_mutex);
14814 (void) untimeout(tid);
14815 mutex_enter(&ptgt->tgt_mutex);
14816 }
14817
14818 plun = ptgt->tgt_lun;
14819 while (plun != NULL) {
14820 struct fcp_lun *next_lun = plun->lun_next;
14821
14822 fcp_dealloc_lun(plun);
14823 plun = next_lun;
14824 }
14825
14826 mutex_exit(&ptgt->tgt_mutex);
14827 fcp_dealloc_tgt(ptgt);
14828 }
14829
14830 /*
14831 * Function: fcp_is_retryable
14832 *
14833 * Description: Indicates if the internal packet is retryable.
14834 *
14835 * Argument: *icmd FCP internal packet.
14836 *
14837 * Return Value: 0 Not retryable
14838 * 1 Retryable
14839 *
14840 * Context: User, Kernel and Interrupt context
14841 */
14842 static int
14843 fcp_is_retryable(struct fcp_ipkt *icmd)
14844 {
14845 if (icmd->ipkt_port->port_state & (FCP_STATE_SUSPENDED |
14846 FCP_STATE_DETACHING | FCP_STATE_POWER_DOWN)) {
14847 return (0);
14848 }
14849
14850 return (((fcp_watchdog_time + icmd->ipkt_fpkt->pkt_timeout) <
14851 icmd->ipkt_port->port_deadline) ? 1 : 0);
14852 }
14853
14854 /*
14855 * Function: fcp_create_on_demand
14856 *
14857 * Argument: *pptr FCP port.
14858 * *pwwn Port WWN.
14859 *
14860 * Return Value: 0 Success
14861 * EIO
14862 * ENOMEM
14863 * EBUSY
14864 * EINVAL
14865 *
14866 * Context: User and Kernel context
14867 */
14868 static int
14869 fcp_create_on_demand(struct fcp_port *pptr, uchar_t *pwwn)
14870 {
14871 int wait_ms;
14872 int tcount;
14873 int lcount;
14874 int ret;
14875 int error;
14876 int rval = EIO;
14877 int ntries;
14878 fc_portmap_t *devlist;
14879 opaque_t pd;
14880 struct fcp_lun *plun;
14881 struct fcp_tgt *ptgt;
14882 int old_manual = 0;
14883
14884 /* Allocates the fc_portmap_t structure. */
14885 devlist = kmem_zalloc(sizeof (*devlist), KM_SLEEP);
14886
14887 /*
14888 * If FC_INVALID_RSCN_COUNT is non-zero, we will have to init as shown
14889 * in the commented statement below:
14890 *
14891 * devlist->map_rscn_info.ulp_rscn_count = FC_INVALID_RSCN_COUNT;
14892 *
14893 * Below, the deadline for the discovery process is set.
14894 */
14895 mutex_enter(&pptr->port_mutex);
14896 pptr->port_deadline = fcp_watchdog_time + FCP_ICMD_DEADLINE;
14897 mutex_exit(&pptr->port_mutex);
14898
14899 /*
14900 * We try to find the remote port based on the WWN provided by the
14901 * caller. We actually ask fp/fctl if it has it.
14902 */
14903 pd = fc_ulp_get_remote_port(pptr->port_fp_handle,
14904 (la_wwn_t *)pwwn, &error, 1);
14905
14906 if (pd == NULL) {
14907 kmem_free(devlist, sizeof (*devlist));
14908 return (rval);
14909 }
14910
14911 /*
14912 * The remote port was found. We ask fp/fctl to update our
14913 * fc_portmap_t structure.
14914 */
14915 ret = fc_ulp_pwwn_to_portmap(pptr->port_fp_handle,
14916 (la_wwn_t *)pwwn, devlist);
14917 if (ret != FC_SUCCESS) {
14918 kmem_free(devlist, sizeof (*devlist));
14919 return (rval);
14920 }
14921
14922 /*
14923 * The map flag field is set to indicates that the creation is being
14924 * done at the user request (Ioclt probably luxadm or cfgadm).
14925 */
14926 devlist->map_type = PORT_DEVICE_USER_CREATE;
14927
14928 mutex_enter(&pptr->port_mutex);
14929
14930 /*
14931 * We check to see if fcp already has a target that describes the
14932 * device being created. If not it is created.
14933 */
14934 ptgt = fcp_lookup_target(pptr, pwwn);
14935 if (ptgt == NULL) {
14936 lcount = pptr->port_link_cnt;
14937 mutex_exit(&pptr->port_mutex);
14938
14939 ptgt = fcp_alloc_tgt(pptr, devlist, lcount);
14940 if (ptgt == NULL) {
14941 fcp_log(CE_WARN, pptr->port_dip,
14942 "!FC target allocation failed");
14943 return (ENOMEM);
14944 }
14945
14946 mutex_enter(&pptr->port_mutex);
14947 }
14948
14949 mutex_enter(&ptgt->tgt_mutex);
14950 ptgt->tgt_statec_cause = FCP_CAUSE_USER_CREATE;
14951 ptgt->tgt_tmp_cnt = 1;
14952 ptgt->tgt_device_created = 0;
14953 /*
14954 * If fabric and auto config is set but the target was
14955 * manually unconfigured then reset to the manual_config_only to
14956 * 0 so the device will get configured.
14957 */
14958 if (FC_TOP_EXTERNAL(pptr->port_topology) &&
14959 fcp_enable_auto_configuration &&
14960 ptgt->tgt_manual_config_only == 1) {
14961 old_manual = 1;
14962 ptgt->tgt_manual_config_only = 0;
14963 }
14964 mutex_exit(&ptgt->tgt_mutex);
14965
14966 fcp_update_targets(pptr, devlist, 1,
14967 FCP_LUN_BUSY | FCP_LUN_MARK, FCP_CAUSE_USER_CREATE);
14968
14969 lcount = pptr->port_link_cnt;
14970 tcount = ptgt->tgt_change_cnt;
14971
14972 if (fcp_handle_mapflags(pptr, ptgt, devlist, lcount,
14973 tcount, FCP_CAUSE_USER_CREATE) == TRUE) {
14974 if (FC_TOP_EXTERNAL(pptr->port_topology) &&
14975 fcp_enable_auto_configuration && old_manual) {
14976 mutex_enter(&ptgt->tgt_mutex);
14977 ptgt->tgt_manual_config_only = 1;
14978 mutex_exit(&ptgt->tgt_mutex);
14979 }
14980
14981 if (pptr->port_link_cnt != lcount ||
14982 ptgt->tgt_change_cnt != tcount) {
14983 rval = EBUSY;
14984 }
14985 mutex_exit(&pptr->port_mutex);
14986
14987 FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
14988 FCP_BUF_LEVEL_3, 0,
14989 "fcp_create_on_demand: mapflags ptgt=%x, "
14990 "lcount=%x::port_link_cnt=%x, "
14991 "tcount=%x: tgt_change_cnt=%x, rval=%x",
14992 ptgt, lcount, pptr->port_link_cnt,
14993 tcount, ptgt->tgt_change_cnt, rval);
14994 return (rval);
14995 }
14996
14997 /*
14998 * Due to lack of synchronization mechanisms, we perform
14999 * periodic monitoring of our request; Because requests
15000 * get dropped when another one supercedes (either because
15001 * of a link change or a target change), it is difficult to
15002 * provide a clean synchronization mechanism (such as a
15003 * semaphore or a conditional variable) without exhaustively
15004 * rewriting the mainline discovery code of this driver.
15005 */
15006 wait_ms = 500;
15007
15008 ntries = fcp_max_target_retries;
15009
15010 FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
15011 FCP_BUF_LEVEL_3, 0,
15012 "fcp_create_on_demand(1): ntries=%x, ptgt=%x, "
15013 "lcount=%x::port_link_cnt=%x, "
15014 "tcount=%x::tgt_change_cnt=%x, rval=%x, tgt_device_created=%x "
15015 "tgt_tmp_cnt =%x",
15016 ntries, ptgt, lcount, pptr->port_link_cnt,
15017 tcount, ptgt->tgt_change_cnt, rval, ptgt->tgt_device_created,
15018 ptgt->tgt_tmp_cnt);
15019
15020 mutex_enter(&ptgt->tgt_mutex);
15021 while (ntries-- != 0 && pptr->port_link_cnt == lcount &&
15022 ptgt->tgt_change_cnt == tcount && ptgt->tgt_device_created == 0) {
15023 mutex_exit(&ptgt->tgt_mutex);
15024 mutex_exit(&pptr->port_mutex);
15025
15026 delay(drv_usectohz(wait_ms * 1000));
15027
15028 mutex_enter(&pptr->port_mutex);
15029 mutex_enter(&ptgt->tgt_mutex);
15030 }
15031
15032
15033 if (pptr->port_link_cnt != lcount || ptgt->tgt_change_cnt != tcount) {
15034 rval = EBUSY;
15035 } else {
15036 if (ptgt->tgt_tmp_cnt == 0 && ptgt->tgt_node_state ==
15037 FCP_TGT_NODE_PRESENT) {
15038 rval = 0;
15039 }
15040 }
15041
15042 FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
15043 FCP_BUF_LEVEL_3, 0,
15044 "fcp_create_on_demand(2): ntries=%x, ptgt=%x, "
15045 "lcount=%x::port_link_cnt=%x, "
15046 "tcount=%x::tgt_change_cnt=%x, rval=%x, tgt_device_created=%x "
15047 "tgt_tmp_cnt =%x",
15048 ntries, ptgt, lcount, pptr->port_link_cnt,
15049 tcount, ptgt->tgt_change_cnt, rval, ptgt->tgt_device_created,
15050 ptgt->tgt_tmp_cnt);
15051
15052 if (rval) {
15053 if (FC_TOP_EXTERNAL(pptr->port_topology) &&
15054 fcp_enable_auto_configuration && old_manual) {
15055 ptgt->tgt_manual_config_only = 1;
15056 }
15057 mutex_exit(&ptgt->tgt_mutex);
15058 mutex_exit(&pptr->port_mutex);
15059 kmem_free(devlist, sizeof (*devlist));
15060
15061 FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
15062 FCP_BUF_LEVEL_3, 0,
15063 "fcp_create_on_demand(3): ntries=%x, ptgt=%x, "
15064 "lcount=%x::port_link_cnt=%x, "
15065 "tcount=%x::tgt_change_cnt=%x, rval=%x, "
15066 "tgt_device_created=%x, tgt D_ID=%x",
15067 ntries, ptgt, lcount, pptr->port_link_cnt,
15068 tcount, ptgt->tgt_change_cnt, rval,
15069 ptgt->tgt_device_created, ptgt->tgt_d_id);
15070 return (rval);
15071 }
15072
15073 if ((plun = ptgt->tgt_lun) != NULL) {
15074 tcount = plun->lun_tgt->tgt_change_cnt;
15075 } else {
15076 rval = EINVAL;
15077 }
15078 lcount = pptr->port_link_cnt;
15079
15080 /*
15081 * Configuring the target with no LUNs will fail. We
15082 * should reset the node state so that it is not
15083 * automatically configured when the LUNs are added
15084 * to this target.
15085 */
15086 if (ptgt->tgt_lun_cnt == 0) {
15087 ptgt->tgt_node_state = FCP_TGT_NODE_NONE;
15088 }
15089 mutex_exit(&ptgt->tgt_mutex);
15090 mutex_exit(&pptr->port_mutex);
15091
15092 while (plun) {
15093 child_info_t *cip;
15094
15095 mutex_enter(&plun->lun_mutex);
15096 cip = plun->lun_cip;
15097 mutex_exit(&plun->lun_mutex);
15098
15099 mutex_enter(&ptgt->tgt_mutex);
15100 if (!(plun->lun_state & FCP_LUN_OFFLINE)) {
15101 mutex_exit(&ptgt->tgt_mutex);
15102
15103 rval = fcp_pass_to_hp_and_wait(pptr, plun, cip,
15104 FCP_ONLINE, lcount, tcount,
15105 NDI_ONLINE_ATTACH);
15106 if (rval != NDI_SUCCESS) {
15107 FCP_TRACE(fcp_logq,
15108 pptr->port_instbuf, fcp_trace,
15109 FCP_BUF_LEVEL_3, 0,
15110 "fcp_create_on_demand: "
15111 "pass_to_hp_and_wait failed "
15112 "rval=%x", rval);
15113 rval = EIO;
15114 } else {
15115 mutex_enter(&LUN_TGT->tgt_mutex);
15116 plun->lun_state &= ~(FCP_LUN_OFFLINE |
15117 FCP_LUN_BUSY);
15118 mutex_exit(&LUN_TGT->tgt_mutex);
15119 }
15120 mutex_enter(&ptgt->tgt_mutex);
15121 }
15122
15123 plun = plun->lun_next;
15124 mutex_exit(&ptgt->tgt_mutex);
15125 }
15126
15127 kmem_free(devlist, sizeof (*devlist));
15128
15129 if (FC_TOP_EXTERNAL(pptr->port_topology) &&
15130 fcp_enable_auto_configuration && old_manual) {
15131 mutex_enter(&ptgt->tgt_mutex);
15132 /* if successful then set manual to 0 */
15133 if (rval == 0) {
15134 ptgt->tgt_manual_config_only = 0;
15135 } else {
15136 /* reset to 1 so the user has to do the config */
15137 ptgt->tgt_manual_config_only = 1;
15138 }
15139 mutex_exit(&ptgt->tgt_mutex);
15140 }
15141
15142 return (rval);
15143 }
15144
15145
15146 static void
15147 fcp_ascii_to_wwn(caddr_t string, uchar_t bytes[], unsigned int byte_len)
15148 {
15149 int count;
15150 uchar_t byte;
15151
15152 count = 0;
15153 while (*string) {
15154 byte = FCP_ATOB(*string); string++;
15155 byte = byte << 4 | FCP_ATOB(*string); string++;
15156 bytes[count++] = byte;
15157
15158 if (count >= byte_len) {
15159 break;
15160 }
15161 }
15162 }
15163
15164 static void
15165 fcp_wwn_to_ascii(uchar_t wwn[], char *string)
15166 {
15167 int i;
15168
15169 for (i = 0; i < FC_WWN_SIZE; i++) {
15170 (void) sprintf(string + (i * 2),
15171 "%02x", wwn[i]);
15172 }
15173
15174 }
15175
15176 static void
15177 fcp_print_error(fc_packet_t *fpkt)
15178 {
15179 struct fcp_ipkt *icmd = (struct fcp_ipkt *)
15180 fpkt->pkt_ulp_private;
15181 struct fcp_port *pptr;
15182 struct fcp_tgt *ptgt;
15183 struct fcp_lun *plun;
15184 caddr_t buf;
15185 int scsi_cmd = 0;
15186
15187 ptgt = icmd->ipkt_tgt;
15188 plun = icmd->ipkt_lun;
15189 pptr = ptgt->tgt_port;
15190
15191 buf = kmem_zalloc(256, KM_NOSLEEP);
15192 if (buf == NULL) {
15193 return;
15194 }
15195
15196 switch (icmd->ipkt_opcode) {
15197 case SCMD_REPORT_LUN:
15198 (void) sprintf(buf, "!REPORT LUN to D_ID=0x%%x"
15199 " lun=0x%%x failed");
15200 scsi_cmd++;
15201 break;
15202
15203 case SCMD_INQUIRY_PAGE83:
15204 (void) sprintf(buf, "!INQUIRY-83 to D_ID=0x%%x"
15205 " lun=0x%%x failed");
15206 scsi_cmd++;
15207 break;
15208
15209 case SCMD_INQUIRY:
15210 (void) sprintf(buf, "!INQUIRY to D_ID=0x%%x"
15211 " lun=0x%%x failed");
15212 scsi_cmd++;
15213 break;
15214
15215 case LA_ELS_PLOGI:
15216 (void) sprintf(buf, "!PLOGI to D_ID=0x%%x failed");
15217 break;
15218
15219 case LA_ELS_PRLI:
15220 (void) sprintf(buf, "!PRLI to D_ID=0x%%x failed");
15221 break;
15222 }
15223
15224 if (scsi_cmd && fpkt->pkt_state == FC_PKT_SUCCESS) {
15225 struct fcp_rsp response, *rsp;
15226 uchar_t asc, ascq;
15227 caddr_t sense_key = NULL;
15228 struct fcp_rsp_info fcp_rsp_err, *bep;
15229
15230 if (icmd->ipkt_nodma) {
15231 rsp = (struct fcp_rsp *)fpkt->pkt_resp;
15232 bep = (struct fcp_rsp_info *)((caddr_t)rsp +
15233 sizeof (struct fcp_rsp));
15234 } else {
15235 rsp = &response;
15236 bep = &fcp_rsp_err;
15237
15238 FCP_CP_IN(fpkt->pkt_resp, rsp, fpkt->pkt_resp_acc,
15239 sizeof (struct fcp_rsp));
15240
15241 FCP_CP_IN(fpkt->pkt_resp + sizeof (struct fcp_rsp),
15242 bep, fpkt->pkt_resp_acc,
15243 sizeof (struct fcp_rsp_info));
15244 }
15245
15246
15247 if (fcp_validate_fcp_response(rsp, pptr) != FC_SUCCESS) {
15248 (void) sprintf(buf + strlen(buf),
15249 " : Bad FCP response values rsvd1=%%x, rsvd2=%%x,"
15250 " sts-rsvd1=%%x, sts-rsvd2=%%x, rsplen=%%x,"
15251 " senselen=%%x. Giving up");
15252
15253 fcp_log(CE_WARN, pptr->port_dip, buf,
15254 ptgt->tgt_d_id, plun->lun_num, rsp->reserved_0,
15255 rsp->reserved_1, rsp->fcp_u.fcp_status.reserved_0,
15256 rsp->fcp_u.fcp_status.reserved_1,
15257 rsp->fcp_response_len, rsp->fcp_sense_len);
15258
15259 kmem_free(buf, 256);
15260 return;
15261 }
15262
15263 if (rsp->fcp_u.fcp_status.rsp_len_set &&
15264 bep->rsp_code != FCP_NO_FAILURE) {
15265 (void) sprintf(buf + strlen(buf),
15266 " FCP Response code = 0x%x", bep->rsp_code);
15267 }
15268
15269 if (rsp->fcp_u.fcp_status.scsi_status & STATUS_CHECK) {
15270 struct scsi_extended_sense sense_info, *sense_ptr;
15271
15272 if (icmd->ipkt_nodma) {
15273 sense_ptr = (struct scsi_extended_sense *)
15274 ((caddr_t)fpkt->pkt_resp +
15275 sizeof (struct fcp_rsp) +
15276 rsp->fcp_response_len);
15277 } else {
15278 sense_ptr = &sense_info;
15279
15280 FCP_CP_IN(fpkt->pkt_resp +
15281 sizeof (struct fcp_rsp) +
15282 rsp->fcp_response_len, &sense_info,
15283 fpkt->pkt_resp_acc,
15284 sizeof (struct scsi_extended_sense));
15285 }
15286
15287 if (sense_ptr->es_key < NUM_SENSE_KEYS +
15288 NUM_IMPL_SENSE_KEYS) {
15289 sense_key = sense_keys[sense_ptr->es_key];
15290 } else {
15291 sense_key = "Undefined";
15292 }
15293
15294 asc = sense_ptr->es_add_code;
15295 ascq = sense_ptr->es_qual_code;
15296
15297 (void) sprintf(buf + strlen(buf),
15298 ": sense key=%%s, ASC=%%x," " ASCQ=%%x."
15299 " Giving up");
15300
15301 fcp_log(CE_WARN, pptr->port_dip, buf,
15302 ptgt->tgt_d_id, plun->lun_num, sense_key,
15303 asc, ascq);
15304 } else {
15305 (void) sprintf(buf + strlen(buf),
15306 " : SCSI status=%%x. Giving up");
15307
15308 fcp_log(CE_WARN, pptr->port_dip, buf,
15309 ptgt->tgt_d_id, plun->lun_num,
15310 rsp->fcp_u.fcp_status.scsi_status);
15311 }
15312 } else {
15313 caddr_t state, reason, action, expln;
15314
15315 (void) fc_ulp_pkt_error(fpkt, &state, &reason,
15316 &action, &expln);
15317
15318 (void) sprintf(buf + strlen(buf), ": State:%%s,"
15319 " Reason:%%s. Giving up");
15320
15321 if (scsi_cmd) {
15322 fcp_log(CE_WARN, pptr->port_dip, buf,
15323 ptgt->tgt_d_id, plun->lun_num, state, reason);
15324 } else {
15325 fcp_log(CE_WARN, pptr->port_dip, buf,
15326 ptgt->tgt_d_id, state, reason);
15327 }
15328 }
15329
15330 kmem_free(buf, 256);
15331 }
15332
15333
15334 static int
15335 fcp_handle_ipkt_errors(struct fcp_port *pptr, struct fcp_tgt *ptgt,
15336 struct fcp_ipkt *icmd, int rval, caddr_t op)
15337 {
15338 int ret = DDI_FAILURE;
15339 char *error;
15340
15341 switch (rval) {
15342 case FC_DEVICE_BUSY_NEW_RSCN:
15343 /*
15344 * This means that there was a new RSCN that the transport
15345 * knows about (which the ULP *may* know about too) but the
15346 * pkt that was sent down was related to an older RSCN. So, we
15347 * are just going to reset the retry count and deadline and
15348 * continue to retry. The idea is that transport is currently
15349 * working on the new RSCN and will soon let the ULPs know
15350 * about it and when it does the existing logic will kick in
15351 * where it will change the tcount to indicate that something
15352 * changed on the target. So, rediscovery will start and there
15353 * will not be an infinite retry.
15354 *
15355 * For a full flow of how the RSCN info is transferred back and
15356 * forth, see fp.c
15357 */
15358 icmd->ipkt_retries = 0;
15359 icmd->ipkt_port->port_deadline = fcp_watchdog_time +
15360 FCP_ICMD_DEADLINE;
15361
15362 FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
15363 FCP_BUF_LEVEL_3, 0,
15364 "fcp_handle_ipkt_errors: rval=%x for D_ID=%x",
15365 rval, ptgt->tgt_d_id);
15366 /* FALLTHROUGH */
15367
15368 case FC_STATEC_BUSY:
15369 case FC_DEVICE_BUSY:
15370 case FC_PBUSY:
15371 case FC_FBUSY:
15372 case FC_TRAN_BUSY:
15373 case FC_OFFLINE:
15374 FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
15375 FCP_BUF_LEVEL_3, 0,
15376 "fcp_handle_ipkt_errors: rval=%x for D_ID=%x",
15377 rval, ptgt->tgt_d_id);
15378 if (icmd->ipkt_retries < FCP_MAX_RETRIES &&
15379 fcp_is_retryable(icmd)) {
15380 fcp_queue_ipkt(pptr, icmd->ipkt_fpkt);
15381 ret = DDI_SUCCESS;
15382 }
15383 break;
15384
15385 case FC_LOGINREQ:
15386 /*
15387 * FC_LOGINREQ used to be handled just like all the cases
15388 * above. It has been changed to handled a PRLI that fails
15389 * with FC_LOGINREQ different than other ipkts that fail
15390 * with FC_LOGINREQ. If a PRLI fails with FC_LOGINREQ it is
15391 * a simple matter to turn it into a PLOGI instead, so that's
15392 * exactly what we do here.
15393 */
15394 if (icmd->ipkt_opcode == LA_ELS_PRLI) {
15395 ret = fcp_send_els(icmd->ipkt_port, icmd->ipkt_tgt,
15396 icmd, LA_ELS_PLOGI, icmd->ipkt_link_cnt,
15397 icmd->ipkt_change_cnt, icmd->ipkt_cause);
15398 } else {
15399 FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
15400 FCP_BUF_LEVEL_3, 0,
15401 "fcp_handle_ipkt_errors: rval=%x for D_ID=%x",
15402 rval, ptgt->tgt_d_id);
15403 if (icmd->ipkt_retries < FCP_MAX_RETRIES &&
15404 fcp_is_retryable(icmd)) {
15405 fcp_queue_ipkt(pptr, icmd->ipkt_fpkt);
15406 ret = DDI_SUCCESS;
15407 }
15408 }
15409 break;
15410
15411 default:
15412 mutex_enter(&pptr->port_mutex);
15413 mutex_enter(&ptgt->tgt_mutex);
15414 if (!FCP_STATE_CHANGED(pptr, ptgt, icmd)) {
15415 mutex_exit(&ptgt->tgt_mutex);
15416 mutex_exit(&pptr->port_mutex);
15417
15418 (void) fc_ulp_error(rval, &error);
15419 fcp_log(CE_WARN, pptr->port_dip,
15420 "!Failed to send %s to D_ID=%x error=%s",
15421 op, ptgt->tgt_d_id, error);
15422 } else {
15423 FCP_TRACE(fcp_logq, pptr->port_instbuf,
15424 fcp_trace, FCP_BUF_LEVEL_2, 0,
15425 "fcp_handle_ipkt_errors,1: state change occured"
15426 " for D_ID=0x%x", ptgt->tgt_d_id);
15427 mutex_exit(&ptgt->tgt_mutex);
15428 mutex_exit(&pptr->port_mutex);
15429 }
15430 break;
15431 }
15432
15433 return (ret);
15434 }
15435
15436
15437 /*
15438 * Check of outstanding commands on any LUN for this target
15439 */
15440 static int
15441 fcp_outstanding_lun_cmds(struct fcp_tgt *ptgt)
15442 {
15443 struct fcp_lun *plun;
15444 struct fcp_pkt *cmd;
15445
15446 for (plun = ptgt->tgt_lun; plun != NULL; plun = plun->lun_next) {
15447 mutex_enter(&plun->lun_mutex);
15448 for (cmd = plun->lun_pkt_head; cmd != NULL;
15449 cmd = cmd->cmd_forw) {
15450 if (cmd->cmd_state == FCP_PKT_ISSUED) {
15451 mutex_exit(&plun->lun_mutex);
15452 return (FC_SUCCESS);
15453 }
15454 }
15455 mutex_exit(&plun->lun_mutex);
15456 }
15457
15458 return (FC_FAILURE);
15459 }
15460
15461 static fc_portmap_t *
15462 fcp_construct_map(struct fcp_port *pptr, uint32_t *dev_cnt)
15463 {
15464 int i;
15465 fc_portmap_t *devlist;
15466 fc_portmap_t *devptr = NULL;
15467 struct fcp_tgt *ptgt;
15468
15469 mutex_enter(&pptr->port_mutex);
15470 for (i = 0, *dev_cnt = 0; i < FCP_NUM_HASH; i++) {
15471 for (ptgt = pptr->port_tgt_hash_table[i]; ptgt != NULL;
15472 ptgt = ptgt->tgt_next) {
15473 if (!(ptgt->tgt_state & FCP_TGT_ORPHAN)) {
15474 ++*dev_cnt;
15475 }
15476 }
15477 }
15478
15479 devptr = devlist = kmem_zalloc(sizeof (*devlist) * *dev_cnt,
15480 KM_NOSLEEP);
15481 if (devlist == NULL) {
15482 mutex_exit(&pptr->port_mutex);
15483 fcp_log(CE_WARN, pptr->port_dip,
15484 "!fcp%d: failed to allocate for portmap for construct map",
15485 pptr->port_instance);
15486 return (devptr);
15487 }
15488
15489 for (i = 0; i < FCP_NUM_HASH; i++) {
15490 for (ptgt = pptr->port_tgt_hash_table[i]; ptgt != NULL;
15491 ptgt = ptgt->tgt_next) {
15492 if (!(ptgt->tgt_state & FCP_TGT_ORPHAN)) {
15493 int ret;
15494
15495 ret = fc_ulp_pwwn_to_portmap(
15496 pptr->port_fp_handle,
15497 (la_wwn_t *)&ptgt->tgt_port_wwn.raw_wwn[0],
15498 devlist);
15499
15500 if (ret == FC_SUCCESS) {
15501 devlist++;
15502 continue;
15503 }
15504
15505 devlist->map_pd = NULL;
15506 devlist->map_did.port_id = ptgt->tgt_d_id;
15507 devlist->map_hard_addr.hard_addr =
15508 ptgt->tgt_hard_addr;
15509
15510 devlist->map_state = PORT_DEVICE_INVALID;
15511 devlist->map_type = PORT_DEVICE_OLD;
15512
15513 bcopy(&ptgt->tgt_node_wwn.raw_wwn[0],
15514 &devlist->map_nwwn, FC_WWN_SIZE);
15515
15516 bcopy(&ptgt->tgt_port_wwn.raw_wwn[0],
15517 &devlist->map_pwwn, FC_WWN_SIZE);
15518
15519 devlist++;
15520 }
15521 }
15522 }
15523
15524 mutex_exit(&pptr->port_mutex);
15525
15526 return (devptr);
15527 }
15528 /*
15529 * Inimate MPxIO that the lun is busy and cannot accept regular IO
15530 */
15531 static void
15532 fcp_update_mpxio_path_verifybusy(struct fcp_port *pptr)
15533 {
15534 int i;
15535 struct fcp_tgt *ptgt;
15536 struct fcp_lun *plun;
15537
15538 for (i = 0; i < FCP_NUM_HASH; i++) {
15539 for (ptgt = pptr->port_tgt_hash_table[i]; ptgt != NULL;
15540 ptgt = ptgt->tgt_next) {
15541 mutex_enter(&ptgt->tgt_mutex);
15542 for (plun = ptgt->tgt_lun; plun != NULL;
15543 plun = plun->lun_next) {
15544 if (plun->lun_mpxio &&
15545 plun->lun_state & FCP_LUN_BUSY) {
15546 if (!fcp_pass_to_hp(pptr, plun,
15547 plun->lun_cip,
15548 FCP_MPXIO_PATH_SET_BUSY,
15549 pptr->port_link_cnt,
15550 ptgt->tgt_change_cnt, 0, 0)) {
15551 FCP_TRACE(fcp_logq,
15552 pptr->port_instbuf,
15553 fcp_trace,
15554 FCP_BUF_LEVEL_2, 0,
15555 "path_verifybusy: "
15556 "disable lun %p failed!",
15557 plun);
15558 }
15559 }
15560 }
15561 mutex_exit(&ptgt->tgt_mutex);
15562 }
15563 }
15564 }
15565
15566 static int
15567 fcp_update_mpxio_path(struct fcp_lun *plun, child_info_t *cip, int what)
15568 {
15569 dev_info_t *cdip = NULL;
15570 dev_info_t *pdip = NULL;
15571
15572 ASSERT(plun);
15573
15574 mutex_enter(&plun->lun_mutex);
15575 if (fcp_is_child_present(plun, cip) == FC_FAILURE) {
15576 mutex_exit(&plun->lun_mutex);
15577 return (NDI_FAILURE);
15578 }
15579 mutex_exit(&plun->lun_mutex);
15580 cdip = mdi_pi_get_client(PIP(cip));
15581 pdip = mdi_pi_get_phci(PIP(cip));
15582
15583 ASSERT(cdip != NULL);
15584 ASSERT(pdip != NULL);
15585
15586 if (what == FCP_MPXIO_PATH_CLEAR_BUSY) {
15587 /* LUN ready for IO */
15588 (void) mdi_pi_enable_path(PIP(cip), DRIVER_DISABLE_TRANSIENT);
15589 } else {
15590 /* LUN busy to accept IO */
15591 (void) mdi_pi_disable_path(PIP(cip), DRIVER_DISABLE_TRANSIENT);
15592 }
15593 return (NDI_SUCCESS);
15594 }
15595
15596 /*
15597 * Caller must free the returned string of MAXPATHLEN len
15598 * If the device is offline (-1 instance number) NULL
15599 * will be returned.
15600 */
15601 static char *
15602 fcp_get_lun_path(struct fcp_lun *plun)
15603 {
15604 dev_info_t *dip = NULL;
15605 char *path = NULL;
15606 mdi_pathinfo_t *pip = NULL;
15607
15608 if (plun == NULL) {
15609 return (NULL);
15610 }
15611
15612 mutex_enter(&plun->lun_mutex);
15613 if (plun->lun_mpxio == 0) {
15614 dip = DIP(plun->lun_cip);
15615 mutex_exit(&plun->lun_mutex);
15616 } else {
15617 /*
15618 * lun_cip must be accessed with lun_mutex held. Here
15619 * plun->lun_cip either points to a valid node or it is NULL.
15620 * Make a copy so that we can release lun_mutex.
15621 */
15622 pip = PIP(plun->lun_cip);
15623
15624 /*
15625 * Increase ref count on the path so that we can release
15626 * lun_mutex and still be sure that the pathinfo node (and thus
15627 * also the client) is not deallocated. If pip is NULL, this
15628 * has no effect.
15629 */
15630 mdi_hold_path(pip);
15631
15632 mutex_exit(&plun->lun_mutex);
15633
15634 /* Get the client. If pip is NULL, we get NULL. */
15635 dip = mdi_pi_get_client(pip);
15636 }
15637
15638 if (dip == NULL)
15639 goto out;
15640 if (ddi_get_instance(dip) < 0)
15641 goto out;
15642
15643 path = kmem_alloc(MAXPATHLEN, KM_SLEEP);
15644 if (path == NULL)
15645 goto out;
15646
15647 (void) ddi_pathname(dip, path);
15648
15649 /* Clean up. */
15650 out:
15651 if (pip != NULL)
15652 mdi_rele_path(pip);
15653
15654 /*
15655 * In reality, the user wants a fully valid path (one they can open)
15656 * but this string is lacking the mount point, and the minor node.
15657 * It would be nice if we could "figure these out" somehow
15658 * and fill them in. Otherwise, the userland code has to understand
15659 * driver specific details of which minor node is the "best" or
15660 * "right" one to expose. (Ex: which slice is the whole disk, or
15661 * which tape doesn't rewind)
15662 */
15663 return (path);
15664 }
15665
15666 static int
15667 fcp_scsi_bus_config(dev_info_t *parent, uint_t flag,
15668 ddi_bus_config_op_t op, void *arg, dev_info_t **childp)
15669 {
15670 int64_t reset_delay;
15671 int rval, retry = 0;
15672 struct fcp_port *pptr = fcp_dip2port(parent);
15673
15674 reset_delay = (int64_t)(USEC_TO_TICK(FCP_INIT_WAIT_TIMEOUT)) -
15675 (ddi_get_lbolt64() - pptr->port_attach_time);
15676 if (reset_delay < 0) {
15677 reset_delay = 0;
15678 }
15679
15680 if (fcp_bus_config_debug) {
15681 flag |= NDI_DEVI_DEBUG;
15682 }
15683
15684 switch (op) {
15685 case BUS_CONFIG_ONE:
15686 /*
15687 * Retry the command since we need to ensure
15688 * the fabric devices are available for root
15689 */
15690 while (retry++ < fcp_max_bus_config_retries) {
15691 rval = (ndi_busop_bus_config(parent,
15692 flag | NDI_MDI_FALLBACK, op,
15693 arg, childp, (clock_t)reset_delay));
15694 if (rval == 0) {
15695 return (rval);
15696 }
15697 }
15698
15699 /*
15700 * drain taskq to make sure nodes are created and then
15701 * try again.
15702 */
15703 taskq_wait(DEVI(parent)->devi_taskq);
15704 return (ndi_busop_bus_config(parent, flag | NDI_MDI_FALLBACK,
15705 op, arg, childp, 0));
15706
15707 case BUS_CONFIG_DRIVER:
15708 case BUS_CONFIG_ALL: {
15709 /*
15710 * delay till all devices report in (port_tmp_cnt == 0)
15711 * or FCP_INIT_WAIT_TIMEOUT
15712 */
15713 mutex_enter(&pptr->port_mutex);
15714 while ((reset_delay > 0) && pptr->port_tmp_cnt) {
15715 (void) cv_timedwait(&pptr->port_config_cv,
15716 &pptr->port_mutex,
15717 ddi_get_lbolt() + (clock_t)reset_delay);
15718 reset_delay =
15719 (int64_t)(USEC_TO_TICK(FCP_INIT_WAIT_TIMEOUT)) -
15720 (ddi_get_lbolt64() - pptr->port_attach_time);
15721 }
15722 mutex_exit(&pptr->port_mutex);
15723 /* drain taskq to make sure nodes are created */
15724 taskq_wait(DEVI(parent)->devi_taskq);
15725 return (ndi_busop_bus_config(parent, flag, op,
15726 arg, childp, 0));
15727 }
15728
15729 default:
15730 return (NDI_FAILURE);
15731 }
15732 /*NOTREACHED*/
15733 }
15734
15735 static int
15736 fcp_scsi_bus_unconfig(dev_info_t *parent, uint_t flag,
15737 ddi_bus_config_op_t op, void *arg)
15738 {
15739 if (fcp_bus_config_debug) {
15740 flag |= NDI_DEVI_DEBUG;
15741 }
15742
15743 return (ndi_busop_bus_unconfig(parent, flag, op, arg));
15744 }
15745
15746
15747 /*
15748 * Routine to copy GUID into the lun structure.
15749 * returns 0 if copy was successful and 1 if encountered a
15750 * failure and did not copy the guid.
15751 */
15752 static int
15753 fcp_copy_guid_2_lun_block(struct fcp_lun *plun, char *guidp)
15754 {
15755
15756 int retval = 0;
15757
15758 /* add one for the null terminator */
15759 const unsigned int len = strlen(guidp) + 1;
15760
15761 if ((guidp == NULL) || (plun == NULL)) {
15762 return (1);
15763 }
15764
15765 /*
15766 * if the plun->lun_guid already has been allocated,
15767 * then check the size. if the size is exact, reuse
15768 * it....if not free it an allocate the required size.
15769 * The reallocation should NOT typically happen
15770 * unless the GUIDs reported changes between passes.
15771 * We free up and alloc again even if the
15772 * size was more than required. This is due to the
15773 * fact that the field lun_guid_size - serves
15774 * dual role of indicating the size of the wwn
15775 * size and ALSO the allocation size.
15776 */
15777 if (plun->lun_guid) {
15778 if (plun->lun_guid_size != len) {
15779 /*
15780 * free the allocated memory and
15781 * initialize the field
15782 * lun_guid_size to 0.
15783 */
15784 kmem_free(plun->lun_guid, plun->lun_guid_size);
15785 plun->lun_guid = NULL;
15786 plun->lun_guid_size = 0;
15787 }
15788 }
15789 /*
15790 * alloc only if not already done.
15791 */
15792 if (plun->lun_guid == NULL) {
15793 plun->lun_guid = kmem_zalloc(len, KM_NOSLEEP);
15794 if (plun->lun_guid == NULL) {
15795 cmn_err(CE_WARN, "fcp_copy_guid_2_lun_block:"
15796 "Unable to allocate"
15797 "Memory for GUID!!! size %d", len);
15798 retval = 1;
15799 } else {
15800 plun->lun_guid_size = len;
15801 }
15802 }
15803 if (plun->lun_guid) {
15804 /*
15805 * now copy the GUID
15806 */
15807 bcopy(guidp, plun->lun_guid, plun->lun_guid_size);
15808 }
15809 return (retval);
15810 }
15811
15812 /*
15813 * fcp_reconfig_wait
15814 *
15815 * Wait for a rediscovery/reconfiguration to complete before continuing.
15816 */
15817
15818 static void
15819 fcp_reconfig_wait(struct fcp_port *pptr)
15820 {
15821 clock_t reconfig_start, wait_timeout;
15822
15823 /*
15824 * Quick check. If pptr->port_tmp_cnt is 0, there is no
15825 * reconfiguration in progress.
15826 */
15827
15828 mutex_enter(&pptr->port_mutex);
15829 if (pptr->port_tmp_cnt == 0) {
15830 mutex_exit(&pptr->port_mutex);
15831 return;
15832 }
15833 mutex_exit(&pptr->port_mutex);
15834
15835 /*
15836 * If we cause a reconfig by raising power, delay until all devices
15837 * report in (port_tmp_cnt returns to 0)
15838 */
15839
15840 reconfig_start = ddi_get_lbolt();
15841 wait_timeout = drv_usectohz(FCP_INIT_WAIT_TIMEOUT);
15842
15843 mutex_enter(&pptr->port_mutex);
15844
15845 while (((ddi_get_lbolt() - reconfig_start) < wait_timeout) &&
15846 pptr->port_tmp_cnt) {
15847
15848 (void) cv_timedwait(&pptr->port_config_cv, &pptr->port_mutex,
15849 reconfig_start + wait_timeout);
15850 }
15851
15852 mutex_exit(&pptr->port_mutex);
15853
15854 /*
15855 * Even if fcp_tmp_count isn't 0, continue without error. The port
15856 * we want may still be ok. If not, it will error out later
15857 */
15858 }
15859
15860 /*
15861 * Read masking info from fp.conf and construct the global fcp_lun_blacklist.
15862 * We rely on the fcp_global_mutex to provide protection against changes to
15863 * the fcp_lun_blacklist.
15864 *
15865 * You can describe a list of target port WWNs and LUN numbers which will
15866 * not be configured. LUN numbers will be interpreted as decimal. White
15867 * spaces and ',' can be used in the list of LUN numbers.
15868 *
15869 * To prevent LUNs 1 and 2 from being configured for target
15870 * port 510000f010fd92a1 and target port 510000e012079df1, set:
15871 *
15872 * pwwn-lun-blacklist=
15873 * "510000f010fd92a1,1,2",
15874 * "510000e012079df1,1,2";
15875 */
15876 static void
15877 fcp_read_blacklist(dev_info_t *dip,
15878 struct fcp_black_list_entry **pplun_blacklist)
15879 {
15880 char **prop_array = NULL;
15881 char *curr_pwwn = NULL;
15882 char *curr_lun = NULL;
15883 uint32_t prop_item = 0;
15884 int idx = 0;
15885 int len = 0;
15886
15887 ASSERT(mutex_owned(&fcp_global_mutex));
15888 if (ddi_prop_lookup_string_array(DDI_DEV_T_ANY, dip,
15889 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM,
15890 LUN_BLACKLIST_PROP, &prop_array, &prop_item) != DDI_PROP_SUCCESS) {
15891 return;
15892 }
15893
15894 for (idx = 0; idx < prop_item; idx++) {
15895
15896 curr_pwwn = prop_array[idx];
15897 while (*curr_pwwn == ' ') {
15898 curr_pwwn++;
15899 }
15900 if (strlen(curr_pwwn) <= (sizeof (la_wwn_t) * 2 + 1)) {
15901 fcp_log(CE_WARN, NULL, "Invalid WWN %s in the blacklist"
15902 ", please check.", curr_pwwn);
15903 continue;
15904 }
15905 if ((*(curr_pwwn + sizeof (la_wwn_t) * 2) != ' ') &&
15906 (*(curr_pwwn + sizeof (la_wwn_t) * 2) != ',')) {
15907 fcp_log(CE_WARN, NULL, "Invalid WWN %s in the blacklist"
15908 ", please check.", curr_pwwn);
15909 continue;
15910 }
15911 for (len = 0; len < sizeof (la_wwn_t) * 2; len++) {
15912 if (isxdigit(curr_pwwn[len]) != TRUE) {
15913 fcp_log(CE_WARN, NULL, "Invalid WWN %s in the "
15914 "blacklist, please check.", curr_pwwn);
15915 break;
15916 }
15917 }
15918 if (len != sizeof (la_wwn_t) * 2) {
15919 continue;
15920 }
15921
15922 curr_lun = curr_pwwn + sizeof (la_wwn_t) * 2 + 1;
15923 *(curr_lun - 1) = '\0';
15924 fcp_mask_pwwn_lun(curr_pwwn, curr_lun, pplun_blacklist);
15925 }
15926
15927 ddi_prop_free(prop_array);
15928 }
15929
15930 /*
15931 * Get the masking info about one remote target port designated by wwn.
15932 * Lun ids could be separated by ',' or white spaces.
15933 */
15934 static void
15935 fcp_mask_pwwn_lun(char *curr_pwwn, char *curr_lun,
15936 struct fcp_black_list_entry **pplun_blacklist)
15937 {
15938 int idx = 0;
15939 uint32_t offset = 0;
15940 unsigned long lun_id = 0;
15941 char lunid_buf[16];
15942 char *pend = NULL;
15943 int illegal_digit = 0;
15944
15945 while (offset < strlen(curr_lun)) {
15946 while ((curr_lun[offset + idx] != ',') &&
15947 (curr_lun[offset + idx] != '\0') &&
15948 (curr_lun[offset + idx] != ' ')) {
15949 if (isdigit(curr_lun[offset + idx]) == 0) {
15950 illegal_digit++;
15951 }
15952 idx++;
15953 }
15954 if (illegal_digit > 0) {
15955 offset += (idx+1); /* To the start of next lun */
15956 idx = 0;
15957 illegal_digit = 0;
15958 fcp_log(CE_WARN, NULL, "Invalid LUN %s for WWN %s in "
15959 "the blacklist, please check digits.",
15960 curr_lun, curr_pwwn);
15961 continue;
15962 }
15963 if (idx >= (sizeof (lunid_buf) / sizeof (lunid_buf[0]))) {
15964 fcp_log(CE_WARN, NULL, "Invalid LUN %s for WWN %s in "
15965 "the blacklist, please check the length of LUN#.",
15966 curr_lun, curr_pwwn);
15967 break;
15968 }
15969 if (idx == 0) { /* ignore ' ' or ',' or '\0' */
15970 offset++;
15971 continue;
15972 }
15973
15974 bcopy(curr_lun + offset, lunid_buf, idx);
15975 lunid_buf[idx] = '\0';
15976 if (ddi_strtoul(lunid_buf, &pend, 10, &lun_id) == 0) {
15977 fcp_add_one_mask(curr_pwwn, lun_id, pplun_blacklist);
15978 } else {
15979 fcp_log(CE_WARN, NULL, "Invalid LUN %s for WWN %s in "
15980 "the blacklist, please check %s.",
15981 curr_lun, curr_pwwn, lunid_buf);
15982 }
15983 offset += (idx+1); /* To the start of next lun */
15984 idx = 0;
15985 }
15986 }
15987
15988 /*
15989 * Add one masking record
15990 */
15991 static void
15992 fcp_add_one_mask(char *curr_pwwn, uint32_t lun_id,
15993 struct fcp_black_list_entry **pplun_blacklist)
15994 {
15995 struct fcp_black_list_entry *tmp_entry = *pplun_blacklist;
15996 struct fcp_black_list_entry *new_entry = NULL;
15997 la_wwn_t wwn;
15998
15999 fcp_ascii_to_wwn(curr_pwwn, wwn.raw_wwn, sizeof (la_wwn_t));
16000 while (tmp_entry) {
16001 if ((bcmp(&tmp_entry->wwn, &wwn,
16002 sizeof (la_wwn_t)) == 0) && (tmp_entry->lun == lun_id)) {
16003 return;
16004 }
16005
16006 tmp_entry = tmp_entry->next;
16007 }
16008
16009 /* add to black list */
16010 new_entry = (struct fcp_black_list_entry *)kmem_zalloc
16011 (sizeof (struct fcp_black_list_entry), KM_SLEEP);
16012 bcopy(&wwn, &new_entry->wwn, sizeof (la_wwn_t));
16013 new_entry->lun = lun_id;
16014 new_entry->masked = 0;
16015 new_entry->next = *pplun_blacklist;
16016 *pplun_blacklist = new_entry;
16017 }
16018
16019 /*
16020 * Check if we should mask the specified lun of this fcp_tgt
16021 */
16022 static int
16023 fcp_should_mask(la_wwn_t *wwn, uint32_t lun_id)
16024 {
16025 struct fcp_black_list_entry *remote_port;
16026
16027 remote_port = fcp_lun_blacklist;
16028 while (remote_port != NULL) {
16029 if (bcmp(wwn, &remote_port->wwn, sizeof (la_wwn_t)) == 0) {
16030 if (remote_port->lun == lun_id) {
16031 remote_port->masked++;
16032 if (remote_port->masked == 1) {
16033 fcp_log(CE_NOTE, NULL, "LUN %d of port "
16034 "%02x%02x%02x%02x%02x%02x%02x%02x "
16035 "is masked due to black listing.\n",
16036 lun_id, wwn->raw_wwn[0],
16037 wwn->raw_wwn[1], wwn->raw_wwn[2],
16038 wwn->raw_wwn[3], wwn->raw_wwn[4],
16039 wwn->raw_wwn[5], wwn->raw_wwn[6],
16040 wwn->raw_wwn[7]);
16041 }
16042 return (TRUE);
16043 }
16044 }
16045 remote_port = remote_port->next;
16046 }
16047 return (FALSE);
16048 }
16049
16050 /*
16051 * Release all allocated resources
16052 */
16053 static void
16054 fcp_cleanup_blacklist(struct fcp_black_list_entry **pplun_blacklist)
16055 {
16056 struct fcp_black_list_entry *tmp_entry = *pplun_blacklist;
16057 struct fcp_black_list_entry *current_entry = NULL;
16058
16059 ASSERT(mutex_owned(&fcp_global_mutex));
16060 /*
16061 * Traverse all luns
16062 */
16063 while (tmp_entry) {
16064 current_entry = tmp_entry;
16065 tmp_entry = tmp_entry->next;
16066 kmem_free(current_entry, sizeof (struct fcp_black_list_entry));
16067 }
16068 *pplun_blacklist = NULL;
16069 }
16070
16071 /*
16072 * In fcp module,
16073 * pkt@scsi_pkt, cmd@fcp_pkt, icmd@fcp_ipkt, fpkt@fc_packet, pptr@fcp_port
16074 */
16075 static struct scsi_pkt *
16076 fcp_pseudo_init_pkt(struct scsi_address *ap, struct scsi_pkt *pkt,
16077 struct buf *bp, int cmdlen, int statuslen, int tgtlen,
16078 int flags, int (*callback)(), caddr_t arg)
16079 {
16080 fcp_port_t *pptr = ADDR2FCP(ap);
16081 fcp_pkt_t *cmd = NULL;
16082 fc_frame_hdr_t *hp;
16083
16084 /*
16085 * First step: get the packet
16086 */
16087 if (pkt == NULL) {
16088 pkt = scsi_hba_pkt_alloc(pptr->port_dip, ap, cmdlen, statuslen,
16089 tgtlen, sizeof (fcp_pkt_t) + pptr->port_priv_pkt_len,
16090 callback, arg);
16091 if (pkt == NULL) {
16092 return (NULL);
16093 }
16094
16095 /*
16096 * All fields in scsi_pkt will be initialized properly or
16097 * set to zero. We need do nothing for scsi_pkt.
16098 */
16099 /*
16100 * But it's our responsibility to link other related data
16101 * structures. Their initialization will be done, just
16102 * before the scsi_pkt will be sent to FCA.
16103 */
16104 cmd = PKT2CMD(pkt);
16105 cmd->cmd_pkt = pkt;
16106 cmd->cmd_fp_pkt = &cmd->cmd_fc_packet;
16107 /*
16108 * fc_packet_t
16109 */
16110 cmd->cmd_fp_pkt->pkt_ulp_private = (opaque_t)cmd;
16111 cmd->cmd_fp_pkt->pkt_fca_private = (opaque_t)((caddr_t)cmd +
16112 sizeof (struct fcp_pkt));
16113 cmd->cmd_fp_pkt->pkt_cmd = (caddr_t)&cmd->cmd_fcp_cmd;
16114 cmd->cmd_fp_pkt->pkt_cmdlen = sizeof (struct fcp_cmd);
16115 cmd->cmd_fp_pkt->pkt_resp = cmd->cmd_fcp_rsp;
16116 cmd->cmd_fp_pkt->pkt_rsplen = FCP_MAX_RSP_IU_SIZE;
16117 /*
16118 * Fill in the Fabric Channel Header
16119 */
16120 hp = &cmd->cmd_fp_pkt->pkt_cmd_fhdr;
16121 hp->r_ctl = R_CTL_COMMAND;
16122 hp->rsvd = 0;
16123 hp->type = FC_TYPE_SCSI_FCP;
16124 hp->f_ctl = F_CTL_SEQ_INITIATIVE | F_CTL_FIRST_SEQ;
16125 hp->seq_id = 0;
16126 hp->df_ctl = 0;
16127 hp->seq_cnt = 0;
16128 hp->ox_id = 0xffff;
16129 hp->rx_id = 0xffff;
16130 hp->ro = 0;
16131 } else {
16132 /*
16133 * We need think if we should reset any elements in
16134 * related data structures.
16135 */
16136 FCP_TRACE(fcp_logq, pptr->port_instbuf,
16137 fcp_trace, FCP_BUF_LEVEL_6, 0,
16138 "reusing pkt, flags %d", flags);
16139 cmd = PKT2CMD(pkt);
16140 if (cmd->cmd_fp_pkt->pkt_pd) {
16141 cmd->cmd_fp_pkt->pkt_pd = NULL;
16142 }
16143 }
16144
16145 /*
16146 * Second step: dma allocation/move
16147 */
16148 if (bp && bp->b_bcount != 0) {
16149 /*
16150 * Mark if it's read or write
16151 */
16152 if (bp->b_flags & B_READ) {
16153 cmd->cmd_flags |= CFLAG_IS_READ;
16154 } else {
16155 cmd->cmd_flags &= ~CFLAG_IS_READ;
16156 }
16157
16158 bp_mapin(bp);
16159 cmd->cmd_fp_pkt->pkt_data = bp->b_un.b_addr;
16160 cmd->cmd_fp_pkt->pkt_datalen = bp->b_bcount;
16161 cmd->cmd_fp_pkt->pkt_data_resid = 0;
16162 } else {
16163 /*
16164 * It seldom happens, except when CLUSTER or SCSI_VHCI wants
16165 * to send zero-length read/write.
16166 */
16167 cmd->cmd_fp_pkt->pkt_data = NULL;
16168 cmd->cmd_fp_pkt->pkt_datalen = 0;
16169 }
16170
16171 return (pkt);
16172 }
16173
16174 static void
16175 fcp_pseudo_destroy_pkt(struct scsi_address *ap, struct scsi_pkt *pkt)
16176 {
16177 fcp_port_t *pptr = ADDR2FCP(ap);
16178
16179 /*
16180 * First we let FCA to uninitilize private part.
16181 */
16182 (void) fc_ulp_uninit_packet(pptr->port_fp_handle,
16183 PKT2CMD(pkt)->cmd_fp_pkt);
16184
16185 /*
16186 * Then we uninitialize fc_packet.
16187 */
16188
16189 /*
16190 * Thirdly, we uninitializae fcp_pkt.
16191 */
16192
16193 /*
16194 * In the end, we free scsi_pkt.
16195 */
16196 scsi_hba_pkt_free(ap, pkt);
16197 }
16198
16199 static int
16200 fcp_pseudo_start(struct scsi_address *ap, struct scsi_pkt *pkt)
16201 {
16202 fcp_port_t *pptr = ADDR2FCP(ap);
16203 fcp_lun_t *plun = ADDR2LUN(ap);
16204 fcp_tgt_t *ptgt = plun->lun_tgt;
16205 fcp_pkt_t *cmd = PKT2CMD(pkt);
16206 fcp_cmd_t *fcmd = &cmd->cmd_fcp_cmd;
16207 fc_packet_t *fpkt = cmd->cmd_fp_pkt;
16208 int rval;
16209
16210 fpkt->pkt_pd = ptgt->tgt_pd_handle;
16211 (void) fc_ulp_init_packet(pptr->port_fp_handle, cmd->cmd_fp_pkt, 1);
16212
16213 /*
16214 * Firstly, we need initialize fcp_pkt_t
16215 * Secondly, we need initialize fcp_cmd_t.
16216 */
16217 bcopy(pkt->pkt_cdbp, fcmd->fcp_cdb, pkt->pkt_cdblen);
16218 fcmd->fcp_data_len = fpkt->pkt_datalen;
16219 fcmd->fcp_ent_addr = plun->lun_addr;
16220 if (pkt->pkt_flags & FLAG_HTAG) {
16221 fcmd->fcp_cntl.cntl_qtype = FCP_QTYPE_HEAD_OF_Q;
16222 } else if (pkt->pkt_flags & FLAG_OTAG) {
16223 fcmd->fcp_cntl.cntl_qtype = FCP_QTYPE_ORDERED;
16224 } else if (pkt->pkt_flags & FLAG_STAG) {
16225 fcmd->fcp_cntl.cntl_qtype = FCP_QTYPE_SIMPLE;
16226 } else {
16227 fcmd->fcp_cntl.cntl_qtype = FCP_QTYPE_UNTAGGED;
16228 }
16229
16230 if (cmd->cmd_flags & CFLAG_IS_READ) {
16231 fcmd->fcp_cntl.cntl_read_data = 1;
16232 fcmd->fcp_cntl.cntl_write_data = 0;
16233 } else {
16234 fcmd->fcp_cntl.cntl_read_data = 0;
16235 fcmd->fcp_cntl.cntl_write_data = 1;
16236 }
16237
16238 /*
16239 * Then we need initialize fc_packet_t too.
16240 */
16241 fpkt->pkt_timeout = pkt->pkt_time + 2;
16242 fpkt->pkt_cmd_fhdr.d_id = ptgt->tgt_d_id;
16243 fpkt->pkt_cmd_fhdr.s_id = pptr->port_id;
16244 if (cmd->cmd_flags & CFLAG_IS_READ) {
16245 fpkt->pkt_tran_type = FC_PKT_FCP_READ;
16246 } else {
16247 fpkt->pkt_tran_type = FC_PKT_FCP_WRITE;
16248 }
16249
16250 if (pkt->pkt_flags & FLAG_NOINTR) {
16251 fpkt->pkt_comp = NULL;
16252 fpkt->pkt_tran_flags = (FC_TRAN_CLASS3 | FC_TRAN_NO_INTR);
16253 } else {
16254 fpkt->pkt_comp = fcp_cmd_callback;
16255 fpkt->pkt_tran_flags = (FC_TRAN_CLASS3 | FC_TRAN_INTR);
16256 if (pkt->pkt_flags & FLAG_IMMEDIATE_CB) {
16257 fpkt->pkt_tran_flags |= FC_TRAN_IMMEDIATE_CB;
16258 }
16259 }
16260
16261 /*
16262 * Lastly, we need initialize scsi_pkt
16263 */
16264 pkt->pkt_reason = CMD_CMPLT;
16265 pkt->pkt_state = 0;
16266 pkt->pkt_statistics = 0;
16267 pkt->pkt_resid = 0;
16268
16269 /*
16270 * if interrupts aren't allowed (e.g. at dump time) then we'll
16271 * have to do polled I/O
16272 */
16273 if (pkt->pkt_flags & FLAG_NOINTR) {
16274 return (fcp_dopoll(pptr, cmd));
16275 }
16276
16277 cmd->cmd_state = FCP_PKT_ISSUED;
16278 rval = fcp_transport(pptr->port_fp_handle, fpkt, 0);
16279 if (rval == FC_SUCCESS) {
16280 return (TRAN_ACCEPT);
16281 }
16282
16283 /*
16284 * Need more consideration
16285 *
16286 * pkt->pkt_flags & FLAG_NOQUEUE could abort other pkt
16287 */
16288 cmd->cmd_state = FCP_PKT_IDLE;
16289 if (rval == FC_TRAN_BUSY) {
16290 return (TRAN_BUSY);
16291 } else {
16292 return (TRAN_FATAL_ERROR);
16293 }
16294 }
16295
16296 /*
16297 * scsi_poll will always call tran_sync_pkt for pseudo FC-HBAs
16298 * SCSA will initialize it to scsi_sync_cache_pkt for physical FC-HBAs
16299 */
16300 static void
16301 fcp_pseudo_sync_pkt(struct scsi_address *ap, struct scsi_pkt *pkt)
16302 {
16303 FCP_TRACE(fcp_logq, "fcp_pseudo_sync_pkt", fcp_trace,
16304 FCP_BUF_LEVEL_2, 0, "ap-%p, scsi_pkt-%p", ap, pkt);
16305 }
16306
16307 /*
16308 * scsi_dmafree will always call tran_dmafree, when STATE_ARQ_DONE
16309 */
16310 static void
16311 fcp_pseudo_dmafree(struct scsi_address *ap, struct scsi_pkt *pkt)
16312 {
16313 FCP_TRACE(fcp_logq, "fcp_pseudo_dmafree", fcp_trace,
16314 FCP_BUF_LEVEL_2, 0, "ap-%p, scsi_pkt-%p", ap, pkt);
16315 }