1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
24 */
25
26 /*
27 * Copyright 2018 Nexenta Systems, Inc.
28 */
29
30 /*
31 * Fibre Channel SCSI ULP Mapping driver
32 */
33
34 #include <sys/scsi/scsi.h>
35 #include <sys/types.h>
36 #include <sys/varargs.h>
37 #include <sys/devctl.h>
38 #include <sys/thread.h>
39 #include <sys/thread.h>
40 #include <sys/open.h>
41 #include <sys/file.h>
42 #include <sys/sunndi.h>
43 #include <sys/console.h>
44 #include <sys/proc.h>
45 #include <sys/time.h>
46 #include <sys/utsname.h>
47 #include <sys/scsi/impl/scsi_reset_notify.h>
48 #include <sys/ndi_impldefs.h>
49 #include <sys/byteorder.h>
50 #include <sys/ctype.h>
51 #include <sys/sunmdi.h>
52
53 #include <sys/fibre-channel/fc.h>
54 #include <sys/fibre-channel/impl/fc_ulpif.h>
55 #include <sys/fibre-channel/ulp/fcpvar.h>
56
57 /*
58 * Discovery Process
59 * =================
60 *
61 * The discovery process is a major function of FCP. In order to help
62 * understand that function a flow diagram is given here. This diagram
63 * doesn't claim to cover all the cases and the events that can occur during
64 * the discovery process nor the subtleties of the code. The code paths shown
65 * are simplified. Its purpose is to help the reader (and potentially bug
66 * fixer) have an overall view of the logic of the code. For that reason the
67 * diagram covers the simple case of the line coming up cleanly or of a new
68 * port attaching to FCP the link being up. The reader must keep in mind
69 * that:
70 *
71 * - There are special cases where bringing devices online and offline
72 * is driven by Ioctl.
73 *
74 * - The behavior of the discovery process can be modified through the
75 * .conf file.
76 *
77 * - The line can go down and come back up at any time during the
78 * discovery process which explains some of the complexity of the code.
79 *
80 * ............................................................................
81 *
82 * STEP 1: The line comes up or a new Fibre Channel port attaches to FCP.
83 *
84 *
85 * +-------------------------+
86 * fp/fctl module --->| fcp_port_attach |
87 * +-------------------------+
88 * | |
89 * | |
90 * | v
91 * | +-------------------------+
92 * | | fcp_handle_port_attach |
93 * | +-------------------------+
94 * | |
95 * | |
96 * +--------------------+ |
97 * | |
98 * v v
99 * +-------------------------+
100 * | fcp_statec_callback |
101 * +-------------------------+
102 * |
103 * |
104 * v
105 * +-------------------------+
106 * | fcp_handle_devices |
107 * +-------------------------+
108 * |
109 * |
110 * v
111 * +-------------------------+
112 * | fcp_handle_mapflags |
113 * +-------------------------+
114 * |
115 * |
116 * v
117 * +-------------------------+
118 * | fcp_send_els |
119 * | |
120 * | PLOGI or PRLI To all the|
121 * | reachable devices. |
122 * +-------------------------+
123 *
124 *
125 * ............................................................................
126 *
127 * STEP 2: The callback functions of the PLOGI and/or PRLI requests sent during
128 * STEP 1 are called (it is actually the same function).
129 *
130 *
131 * +-------------------------+
132 * | fcp_icmd_callback |
133 * fp/fctl module --->| |
134 * | callback for PLOGI and |
135 * | PRLI. |
136 * +-------------------------+
137 * |
138 * |
139 * Received PLOGI Accept /-\ Received PRLI Accept
140 * _ _ _ _ _ _ / \_ _ _ _ _ _
141 * | \ / |
142 * | \-/ |
143 * | |
144 * v v
145 * +-------------------------+ +-------------------------+
146 * | fcp_send_els | | fcp_send_scsi |
147 * | | | |
148 * | PRLI | | REPORT_LUN |
149 * +-------------------------+ +-------------------------+
150 *
151 * ............................................................................
152 *
153 * STEP 3: The callback functions of the SCSI commands issued by FCP are called
154 * (It is actually the same function).
155 *
156 *
157 * +-------------------------+
158 * fp/fctl module ------->| fcp_scsi_callback |
159 * +-------------------------+
160 * |
161 * |
162 * |
163 * Receive REPORT_LUN reply /-\ Receive INQUIRY PAGE83 reply
164 * _ _ _ _ _ _ _ _ _ _ / \_ _ _ _ _ _ _ _ _ _ _ _
165 * | \ / |
166 * | \-/ |
167 * | | |
168 * | Receive INQUIRY reply| |
169 * | | |
170 * v v v
171 * +------------------------+ +----------------------+ +----------------------+
172 * | fcp_handle_reportlun | | fcp_handle_inquiry | | fcp_handle_page83 |
173 * |(Called for each Target)| | (Called for each LUN)| |(Called for each LUN) |
174 * +------------------------+ +----------------------+ +----------------------+
175 * | | |
176 * | | |
177 * | | |
178 * v v |
179 * +-----------------+ +-----------------+ |
180 * | fcp_send_scsi | | fcp_send_scsi | |
181 * | | | | |
182 * | INQUIRY | | INQUIRY PAGE83 | |
183 * | (To each LUN) | +-----------------+ |
184 * +-----------------+ |
185 * |
186 * v
187 * +------------------------+
188 * | fcp_call_finish_init |
189 * +------------------------+
190 * |
191 * v
192 * +-----------------------------+
193 * | fcp_call_finish_init_held |
194 * +-----------------------------+
195 * |
196 * |
197 * All LUNs scanned /-\
198 * _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ __ / \
199 * | \ /
200 * | \-/
201 * v |
202 * +------------------+ |
203 * | fcp_finish_tgt | |
204 * +------------------+ |
205 * | Target Not Offline and |
206 * Target Not Offline and | not marked and tgt_node_state |
207 * marked /-\ not FCP_TGT_NODE_ON_DEMAND |
208 * _ _ _ _ _ _ / \_ _ _ _ _ _ _ _ |
209 * | \ / | |
210 * | \-/ | |
211 * v v |
212 * +----------------------------+ +-------------------+ |
213 * | fcp_offline_target | | fcp_create_luns | |
214 * | | +-------------------+ |
215 * | A structure fcp_tgt_elem | | |
216 * | is created and queued in | v |
217 * | the FCP port list | +-------------------+ |
218 * | port_offline_tgts. It | | fcp_pass_to_hp | |
219 * | will be unqueued by the | | | |
220 * | watchdog timer. | | Called for each | |
221 * +----------------------------+ | LUN. Dispatches | |
222 * | | fcp_hp_task | |
223 * | +-------------------+ |
224 * | | |
225 * | | |
226 * | | |
227 * | +---------------->|
228 * | |
229 * +---------------------------------------------->|
230 * |
231 * |
232 * All the targets (devices) have been scanned /-\
233 * _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ / \
234 * | \ /
235 * | \-/
236 * +-------------------------------------+ |
237 * | fcp_finish_init | |
238 * | | |
239 * | Signal broadcasts the condition | |
240 * | variable port_config_cv of the FCP | |
241 * | port. One potential code sequence | |
242 * | waiting on the condition variable | |
243 * | the code sequence handling | |
244 * | BUS_CONFIG_ALL and BUS_CONFIG_DRIVER| |
245 * | The other is in the function | |
246 * | fcp_reconfig_wait which is called | |
247 * | in the transmit path preventing IOs | |
248 * | from going through till the disco- | |
249 * | very process is over. | |
250 * +-------------------------------------+ |
251 * | |
252 * | |
253 * +--------------------------------->|
254 * |
255 * v
256 * Return
257 *
258 * ............................................................................
259 *
260 * STEP 4: The hot plug task is called (for each fcp_hp_elem).
261 *
262 *
263 * +-------------------------+
264 * | fcp_hp_task |
265 * +-------------------------+
266 * |
267 * |
268 * v
269 * +-------------------------+
270 * | fcp_trigger_lun |
271 * +-------------------------+
272 * |
273 * |
274 * v
275 * Bring offline /-\ Bring online
276 * _ _ _ _ _ _ _ _ _/ \_ _ _ _ _ _ _ _ _ _
277 * | \ / |
278 * | \-/ |
279 * v v
280 * +---------------------+ +-----------------------+
281 * | fcp_offline_child | | fcp_get_cip |
282 * +---------------------+ | |
283 * | Creates a dev_info_t |
284 * | or a mdi_pathinfo_t |
285 * | depending on whether |
286 * | mpxio is on or off. |
287 * +-----------------------+
288 * |
289 * |
290 * v
291 * +-----------------------+
292 * | fcp_online_child |
293 * | |
294 * | Set device online |
295 * | using NDI or MDI. |
296 * +-----------------------+
297 *
298 * ............................................................................
299 *
300 * STEP 5: The watchdog timer expires. The watch dog timer does much more that
301 * what is described here. We only show the target offline path.
302 *
303 *
304 * +--------------------------+
305 * | fcp_watch |
306 * +--------------------------+
307 * |
308 * |
309 * v
310 * +--------------------------+
311 * | fcp_scan_offline_tgts |
312 * +--------------------------+
313 * |
314 * |
315 * v
316 * +--------------------------+
317 * | fcp_offline_target_now |
318 * +--------------------------+
319 * |
320 * |
321 * v
322 * +--------------------------+
323 * | fcp_offline_tgt_luns |
324 * +--------------------------+
325 * |
326 * |
327 * v
328 * +--------------------------+
329 * | fcp_offline_lun |
330 * +--------------------------+
331 * |
332 * |
333 * v
334 * +----------------------------------+
335 * | fcp_offline_lun_now |
336 * | |
337 * | A request (or two if mpxio) is |
338 * | sent to the hot plug task using |
339 * | a fcp_hp_elem structure. |
340 * +----------------------------------+
341 */
342
343 /*
344 * Functions registered with DDI framework
345 */
346 static int fcp_attach(dev_info_t *devi, ddi_attach_cmd_t cmd);
347 static int fcp_detach(dev_info_t *devi, ddi_detach_cmd_t cmd);
348 static int fcp_open(dev_t *devp, int flag, int otype, cred_t *credp);
349 static int fcp_close(dev_t dev, int flag, int otype, cred_t *credp);
350 static int fcp_ioctl(dev_t dev, int cmd, intptr_t data, int mode,
351 cred_t *credp, int *rval);
352
353 /*
354 * Functions registered with FC Transport framework
355 */
356 static int fcp_port_attach(opaque_t ulph, fc_ulp_port_info_t *pinfo,
357 fc_attach_cmd_t cmd, uint32_t s_id);
358 static int fcp_port_detach(opaque_t ulph, fc_ulp_port_info_t *info,
359 fc_detach_cmd_t cmd);
360 static int fcp_port_ioctl(opaque_t ulph, opaque_t port_handle, dev_t dev,
361 int cmd, intptr_t data, int mode, cred_t *credp, int *rval,
362 uint32_t claimed);
363 static int fcp_els_callback(opaque_t ulph, opaque_t port_handle,
364 fc_unsol_buf_t *buf, uint32_t claimed);
365 static int fcp_data_callback(opaque_t ulph, opaque_t port_handle,
366 fc_unsol_buf_t *buf, uint32_t claimed);
367 static void fcp_statec_callback(opaque_t ulph, opaque_t port_handle,
368 uint32_t port_state, uint32_t port_top, fc_portmap_t *devlist,
369 uint32_t dev_cnt, uint32_t port_sid);
370
371 /*
372 * Functions registered with SCSA framework
373 */
374 static int fcp_phys_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
375 scsi_hba_tran_t *hba_tran, struct scsi_device *sd);
376 static int fcp_scsi_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
377 scsi_hba_tran_t *hba_tran, struct scsi_device *sd);
378 static void fcp_scsi_tgt_free(dev_info_t *hba_dip, dev_info_t *tgt_dip,
379 scsi_hba_tran_t *hba_tran, struct scsi_device *sd);
380 static int fcp_scsi_start(struct scsi_address *ap, struct scsi_pkt *pkt);
381 static int fcp_scsi_abort(struct scsi_address *ap, struct scsi_pkt *pkt);
382 static int fcp_scsi_reset(struct scsi_address *ap, int level);
383 static int fcp_scsi_getcap(struct scsi_address *ap, char *cap, int whom);
384 static int fcp_scsi_setcap(struct scsi_address *ap, char *cap, int value,
385 int whom);
386 static void fcp_pkt_teardown(struct scsi_pkt *pkt);
387 static int fcp_scsi_reset_notify(struct scsi_address *ap, int flag,
388 void (*callback)(caddr_t), caddr_t arg);
389 static int fcp_scsi_bus_get_eventcookie(dev_info_t *dip, dev_info_t *rdip,
390 char *name, ddi_eventcookie_t *event_cookiep);
391 static int fcp_scsi_bus_add_eventcall(dev_info_t *dip, dev_info_t *rdip,
392 ddi_eventcookie_t eventid, void (*callback)(), void *arg,
393 ddi_callback_id_t *cb_id);
394 static int fcp_scsi_bus_remove_eventcall(dev_info_t *devi,
395 ddi_callback_id_t cb_id);
396 static int fcp_scsi_bus_post_event(dev_info_t *dip, dev_info_t *rdip,
397 ddi_eventcookie_t eventid, void *impldata);
398 static int fcp_scsi_bus_config(dev_info_t *parent, uint_t flag,
399 ddi_bus_config_op_t op, void *arg, dev_info_t **childp);
400 static int fcp_scsi_bus_unconfig(dev_info_t *parent, uint_t flag,
401 ddi_bus_config_op_t op, void *arg);
402
403 /*
404 * Internal functions
405 */
406 static int fcp_setup_device_data_ioctl(int cmd, struct fcp_ioctl *data,
407 int mode, int *rval);
408
409 static int fcp_setup_scsi_ioctl(struct fcp_scsi_cmd *u_fscsi,
410 int mode, int *rval);
411 static int fcp_copyin_scsi_cmd(caddr_t base_addr,
412 struct fcp_scsi_cmd *fscsi, int mode);
413 static int fcp_copyout_scsi_cmd(struct fcp_scsi_cmd *fscsi,
414 caddr_t base_addr, int mode);
415 static int fcp_send_scsi_ioctl(struct fcp_scsi_cmd *fscsi);
416
417 static struct fcp_tgt *fcp_port_create_tgt(struct fcp_port *pptr,
418 la_wwn_t *pwwn, int *ret_val, int *fc_status, int *fc_pkt_state,
419 int *fc_pkt_reason, int *fc_pkt_action);
420 static int fcp_tgt_send_plogi(struct fcp_tgt *ptgt, int *fc_status,
421 int *fc_pkt_state, int *fc_pkt_reason, int *fc_pkt_action);
422 static int fcp_tgt_send_prli(struct fcp_tgt *ptgt, int *fc_status,
423 int *fc_pkt_state, int *fc_pkt_reason, int *fc_pkt_action);
424 static void fcp_ipkt_sema_init(struct fcp_ipkt *icmd);
425 static int fcp_ipkt_sema_wait(struct fcp_ipkt *icmd);
426 static void fcp_ipkt_sema_callback(struct fc_packet *fpkt);
427 static void fcp_ipkt_sema_cleanup(struct fcp_ipkt *icmd);
428
429 static void fcp_handle_devices(struct fcp_port *pptr,
430 fc_portmap_t devlist[], uint32_t dev_cnt, int link_cnt,
431 fcp_map_tag_t *map_tag, int cause);
432 static int fcp_handle_mapflags(struct fcp_port *pptr,
433 struct fcp_tgt *ptgt, fc_portmap_t *map_entry, int link_cnt,
434 int tgt_cnt, int cause);
435 static int fcp_handle_reportlun_changed(struct fcp_tgt *ptgt, int cause);
436 static int fcp_send_els(struct fcp_port *pptr, struct fcp_tgt *ptgt,
437 struct fcp_ipkt *icmd, uchar_t opcode, int lcount, int tcount, int cause);
438 static void fcp_update_state(struct fcp_port *pptr, uint32_t state,
439 int cause);
440 static void fcp_update_tgt_state(struct fcp_tgt *ptgt, int flag,
441 uint32_t state);
442 static struct fcp_port *fcp_get_port(opaque_t port_handle);
443 static void fcp_unsol_callback(fc_packet_t *fpkt);
444 static void fcp_unsol_resp_init(fc_packet_t *pkt, fc_unsol_buf_t *buf,
445 uchar_t r_ctl, uchar_t type);
446 static int fcp_unsol_prli(struct fcp_port *pptr, fc_unsol_buf_t *buf);
447 static struct fcp_ipkt *fcp_icmd_alloc(struct fcp_port *pptr,
448 struct fcp_tgt *ptgt, int cmd_len, int resp_len, int data_len,
449 int nodma, int lcount, int tcount, int cause, uint32_t rscn_count);
450 static void fcp_icmd_free(struct fcp_port *pptr, struct fcp_ipkt *icmd);
451 static int fcp_alloc_dma(struct fcp_port *pptr, struct fcp_ipkt *icmd,
452 int nodma, int flags);
453 static void fcp_free_dma(struct fcp_port *pptr, struct fcp_ipkt *icmd);
454 static struct fcp_tgt *fcp_lookup_target(struct fcp_port *pptr,
455 uchar_t *wwn);
456 static struct fcp_tgt *fcp_get_target_by_did(struct fcp_port *pptr,
457 uint32_t d_id);
458 static void fcp_icmd_callback(fc_packet_t *fpkt);
459 static int fcp_send_scsi(struct fcp_lun *plun, uchar_t opcode,
460 int len, int lcount, int tcount, int cause, uint32_t rscn_count);
461 static int fcp_check_reportlun(struct fcp_rsp *rsp, fc_packet_t *fpkt);
462 static void fcp_scsi_callback(fc_packet_t *fpkt);
463 static void fcp_retry_scsi_cmd(fc_packet_t *fpkt);
464 static void fcp_handle_inquiry(fc_packet_t *fpkt, struct fcp_ipkt *icmd);
465 static void fcp_handle_reportlun(fc_packet_t *fpkt, struct fcp_ipkt *icmd);
466 static struct fcp_lun *fcp_get_lun(struct fcp_tgt *ptgt,
467 uint16_t lun_num);
468 static int fcp_finish_tgt(struct fcp_port *pptr, struct fcp_tgt *ptgt,
469 int link_cnt, int tgt_cnt, int cause);
470 static void fcp_finish_init(struct fcp_port *pptr);
471 static void fcp_create_luns(struct fcp_tgt *ptgt, int link_cnt,
472 int tgt_cnt, int cause);
473 static int fcp_trigger_lun(struct fcp_lun *plun, child_info_t *cip,
474 int old_mpxio, int online, int link_cnt, int tgt_cnt, int flags);
475 static int fcp_offline_target(struct fcp_port *pptr, struct fcp_tgt *ptgt,
476 int link_cnt, int tgt_cnt, int nowait, int flags);
477 static void fcp_offline_target_now(struct fcp_port *pptr,
478 struct fcp_tgt *ptgt, int link_cnt, int tgt_cnt, int flags);
479 static void fcp_offline_tgt_luns(struct fcp_tgt *ptgt, int link_cnt,
480 int tgt_cnt, int flags);
481 static void fcp_offline_lun(struct fcp_lun *plun, int link_cnt, int tgt_cnt,
482 int nowait, int flags);
483 static void fcp_prepare_offline_lun(struct fcp_lun *plun, int link_cnt,
484 int tgt_cnt);
485 static void fcp_offline_lun_now(struct fcp_lun *plun, int link_cnt,
486 int tgt_cnt, int flags);
487 static void fcp_scan_offline_luns(struct fcp_port *pptr);
488 static void fcp_scan_offline_tgts(struct fcp_port *pptr);
489 static void fcp_update_offline_flags(struct fcp_lun *plun);
490 static struct fcp_pkt *fcp_scan_commands(struct fcp_lun *plun);
491 static void fcp_abort_commands(struct fcp_pkt *head, struct
492 fcp_port *pptr);
493 static void fcp_cmd_callback(fc_packet_t *fpkt);
494 static void fcp_complete_pkt(fc_packet_t *fpkt);
495 static int fcp_validate_fcp_response(struct fcp_rsp *rsp,
496 struct fcp_port *pptr);
497 static int fcp_device_changed(struct fcp_port *pptr, struct fcp_tgt *ptgt,
498 fc_portmap_t *map_entry, int link_cnt, int tgt_cnt, int cause);
499 static struct fcp_lun *fcp_alloc_lun(struct fcp_tgt *ptgt);
500 static void fcp_dealloc_lun(struct fcp_lun *plun);
501 static struct fcp_tgt *fcp_alloc_tgt(struct fcp_port *pptr,
502 fc_portmap_t *map_entry, int link_cnt);
503 static void fcp_dealloc_tgt(struct fcp_tgt *ptgt);
504 static void fcp_queue_ipkt(struct fcp_port *pptr, fc_packet_t *fpkt);
505 static int fcp_transport(opaque_t port_handle, fc_packet_t *fpkt,
506 int internal);
507 static void fcp_log(int level, dev_info_t *dip, const char *fmt, ...);
508 static int fcp_handle_port_attach(opaque_t ulph, fc_ulp_port_info_t *pinfo,
509 uint32_t s_id, int instance);
510 static int fcp_handle_port_detach(struct fcp_port *pptr, int flag,
511 int instance);
512 static void fcp_cleanup_port(struct fcp_port *pptr, int instance);
513 static int fcp_kmem_cache_constructor(struct scsi_pkt *, scsi_hba_tran_t *,
514 int);
515 static void fcp_kmem_cache_destructor(struct scsi_pkt *, scsi_hba_tran_t *);
516 static int fcp_pkt_setup(struct scsi_pkt *, int (*)(), caddr_t);
517 static int fcp_alloc_cmd_resp(struct fcp_port *pptr, fc_packet_t *fpkt,
518 int flags);
519 static void fcp_free_cmd_resp(struct fcp_port *pptr, fc_packet_t *fpkt);
520 static int fcp_reset_target(struct scsi_address *ap, int level);
521 static int fcp_commoncap(struct scsi_address *ap, char *cap,
522 int val, int tgtonly, int doset);
523 static int fcp_scsi_get_name(struct scsi_device *sd, char *name, int len);
524 static int fcp_scsi_get_bus_addr(struct scsi_device *sd, char *name, int len);
525 static int fcp_linkreset(struct fcp_port *pptr, struct scsi_address *ap,
526 int sleep);
527 static int fcp_handle_port_resume(opaque_t ulph, fc_ulp_port_info_t *pinfo,
528 uint32_t s_id, fc_attach_cmd_t cmd, int instance);
529 static void fcp_cp_pinfo(struct fcp_port *pptr, fc_ulp_port_info_t *pinfo);
530 static void fcp_process_elem(struct fcp_hp_elem *elem, int result);
531 static child_info_t *fcp_get_cip(struct fcp_lun *plun, child_info_t *cip,
532 int lcount, int tcount);
533 static int fcp_is_dip_present(struct fcp_lun *plun, dev_info_t *cdip);
534 static int fcp_is_child_present(struct fcp_lun *plun, child_info_t *cip);
535 static dev_info_t *fcp_create_dip(struct fcp_lun *plun, int link_cnt,
536 int tgt_cnt);
537 static dev_info_t *fcp_find_existing_dip(struct fcp_lun *plun,
538 dev_info_t *pdip, caddr_t name);
539 static int fcp_online_child(struct fcp_lun *plun, child_info_t *cip,
540 int lcount, int tcount, int flags, int *circ);
541 static int fcp_offline_child(struct fcp_lun *plun, child_info_t *cip,
542 int lcount, int tcount, int flags, int *circ);
543 static void fcp_remove_child(struct fcp_lun *plun);
544 static void fcp_watch(void *arg);
545 static void fcp_check_reset_delay(struct fcp_port *pptr);
546 static void fcp_abort_all(struct fcp_port *pptr, struct fcp_tgt *ttgt,
547 struct fcp_lun *rlun, int tgt_cnt);
548 struct fcp_port *fcp_soft_state_unlink(struct fcp_port *pptr);
549 static struct fcp_lun *fcp_lookup_lun(struct fcp_port *pptr,
550 uchar_t *wwn, uint16_t lun);
551 static void fcp_prepare_pkt(struct fcp_port *pptr, struct fcp_pkt *cmd,
552 struct fcp_lun *plun);
553 static void fcp_post_callback(struct fcp_pkt *cmd);
554 static int fcp_dopoll(struct fcp_port *pptr, struct fcp_pkt *cmd);
555 static struct fcp_port *fcp_dip2port(dev_info_t *dip);
556 struct fcp_lun *fcp_get_lun_from_cip(struct fcp_port *pptr,
557 child_info_t *cip);
558 static int fcp_pass_to_hp_and_wait(struct fcp_port *pptr,
559 struct fcp_lun *plun, child_info_t *cip, int what, int link_cnt,
560 int tgt_cnt, int flags);
561 static struct fcp_hp_elem *fcp_pass_to_hp(struct fcp_port *pptr,
562 struct fcp_lun *plun, child_info_t *cip, int what, int link_cnt,
563 int tgt_cnt, int flags, int wait);
564 static void fcp_retransport_cmd(struct fcp_port *pptr,
565 struct fcp_pkt *cmd);
566 static void fcp_fail_cmd(struct fcp_pkt *cmd, uchar_t reason,
567 uint_t statistics);
568 static void fcp_queue_pkt(struct fcp_port *pptr, struct fcp_pkt *cmd);
569 static void fcp_update_targets(struct fcp_port *pptr,
570 fc_portmap_t *dev_list, uint32_t count, uint32_t state, int cause);
571 static int fcp_call_finish_init(struct fcp_port *pptr,
572 struct fcp_tgt *ptgt, int lcount, int tcount, int cause);
573 static int fcp_call_finish_init_held(struct fcp_port *pptr,
574 struct fcp_tgt *ptgt, int lcount, int tcount, int cause);
575 static void fcp_reconfigure_luns(void * tgt_handle);
576 static void fcp_free_targets(struct fcp_port *pptr);
577 static void fcp_free_target(struct fcp_tgt *ptgt);
578 static int fcp_is_retryable(struct fcp_ipkt *icmd);
579 static int fcp_create_on_demand(struct fcp_port *pptr, uchar_t *pwwn);
580 static void fcp_ascii_to_wwn(caddr_t string, uchar_t bytes[], unsigned int);
581 static void fcp_wwn_to_ascii(uchar_t bytes[], char *string);
582 static void fcp_print_error(fc_packet_t *fpkt);
583 static int fcp_handle_ipkt_errors(struct fcp_port *pptr,
584 struct fcp_tgt *ptgt, struct fcp_ipkt *icmd, int rval, caddr_t op);
585 static int fcp_outstanding_lun_cmds(struct fcp_tgt *ptgt);
586 static fc_portmap_t *fcp_construct_map(struct fcp_port *pptr,
587 uint32_t *dev_cnt);
588 static void fcp_offline_all(struct fcp_port *pptr, int lcount, int cause);
589 static int fcp_get_statec_count(struct fcp_ioctl *data, int mode, int *rval);
590 static int fcp_copyin_fcp_ioctl_data(struct fcp_ioctl *, int, int *,
591 struct fcp_ioctl *, struct fcp_port **);
592 static char *fcp_get_lun_path(struct fcp_lun *plun);
593 static int fcp_get_target_mappings(struct fcp_ioctl *data, int mode,
594 int *rval);
595 static int fcp_do_ns_registry(struct fcp_port *pptr, uint32_t s_id);
596 static void fcp_retry_ns_registry(struct fcp_port *pptr, uint32_t s_id);
597 static char *fcp_get_lun_path(struct fcp_lun *plun);
598 static int fcp_get_target_mappings(struct fcp_ioctl *data, int mode,
599 int *rval);
600 static void fcp_reconfig_wait(struct fcp_port *pptr);
601
602 /*
603 * New functions added for mpxio support
604 */
605 static int fcp_virt_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
606 scsi_hba_tran_t *hba_tran, struct scsi_device *sd);
607 static mdi_pathinfo_t *fcp_create_pip(struct fcp_lun *plun, int lcount,
608 int tcount);
609 static mdi_pathinfo_t *fcp_find_existing_pip(struct fcp_lun *plun,
610 dev_info_t *pdip);
611 static int fcp_is_pip_present(struct fcp_lun *plun, mdi_pathinfo_t *pip);
612 static void fcp_handle_page83(fc_packet_t *, struct fcp_ipkt *, int);
613 static void fcp_update_mpxio_path_verifybusy(struct fcp_port *pptr);
614 static int fcp_copy_guid_2_lun_block(struct fcp_lun *plun, char *guidp);
615 static int fcp_update_mpxio_path(struct fcp_lun *plun, child_info_t *cip,
616 int what);
617 static int fcp_is_reconfig_needed(struct fcp_tgt *ptgt,
618 fc_packet_t *fpkt);
619 static int fcp_symmetric_device_probe(struct fcp_lun *plun);
620
621 /*
622 * New functions added for lun masking support
623 */
624 static void fcp_read_blacklist(dev_info_t *dip,
625 struct fcp_black_list_entry **pplun_blacklist);
626 static void fcp_mask_pwwn_lun(char *curr_pwwn, char *curr_lun,
627 struct fcp_black_list_entry **pplun_blacklist);
628 static void fcp_add_one_mask(char *curr_pwwn, uint32_t lun_id,
629 struct fcp_black_list_entry **pplun_blacklist);
630 static int fcp_should_mask(la_wwn_t *wwn, uint32_t lun_id);
631 static void fcp_cleanup_blacklist(struct fcp_black_list_entry **lun_blacklist);
632
633 /*
634 * New functions to support software FCA (like fcoei)
635 */
636 static struct scsi_pkt *fcp_pseudo_init_pkt(
637 struct scsi_address *ap, struct scsi_pkt *pkt,
638 struct buf *bp, int cmdlen, int statuslen,
639 int tgtlen, int flags, int (*callback)(), caddr_t arg);
640 static void fcp_pseudo_destroy_pkt(
641 struct scsi_address *ap, struct scsi_pkt *pkt);
642 static void fcp_pseudo_sync_pkt(
643 struct scsi_address *ap, struct scsi_pkt *pkt);
644 static int fcp_pseudo_start(struct scsi_address *ap, struct scsi_pkt *pkt);
645 static void fcp_pseudo_dmafree(
646 struct scsi_address *ap, struct scsi_pkt *pkt);
647
648 extern struct mod_ops mod_driverops;
649 /*
650 * This variable is defined in modctl.c and set to '1' after the root driver
651 * and fs are loaded. It serves as an indication that the root filesystem can
652 * be used.
653 */
654 extern int modrootloaded;
655 /*
656 * This table contains strings associated with the SCSI sense key codes. It
657 * is used by FCP to print a clear explanation of the code returned in the
658 * sense information by a device.
659 */
660 extern char *sense_keys[];
661 /*
662 * This device is created by the SCSI pseudo nexus driver (SCSI vHCI). It is
663 * under this device that the paths to a physical device are created when
664 * MPxIO is used.
665 */
666 extern dev_info_t *scsi_vhci_dip;
667
668 /*
669 * Report lun processing
670 */
671 #define FCP_LUN_ADDRESSING 0x80
672 #define FCP_PD_ADDRESSING 0x00
673 #define FCP_VOLUME_ADDRESSING 0x40
674
675 #define FCP_SVE_THROTTLE 0x28 /* Vicom */
676 #define MAX_INT_DMA 0x7fffffff
677 /*
678 * Property definitions
679 */
680 #define NODE_WWN_PROP (char *)fcp_node_wwn_prop
681 #define PORT_WWN_PROP (char *)fcp_port_wwn_prop
682 #define TARGET_PROP (char *)fcp_target_prop
683 #define LUN_PROP (char *)fcp_lun_prop
684 #define SAM_LUN_PROP (char *)fcp_sam_lun_prop
685 #define CONF_WWN_PROP (char *)fcp_conf_wwn_prop
686 #define OBP_BOOT_WWN (char *)fcp_obp_boot_wwn
687 #define MANUAL_CFG_ONLY (char *)fcp_manual_config_only
688 #define INIT_PORT_PROP (char *)fcp_init_port_prop
689 #define TGT_PORT_PROP (char *)fcp_tgt_port_prop
690 #define LUN_BLACKLIST_PROP (char *)fcp_lun_blacklist_prop
691 /*
692 * Short hand macros.
693 */
694 #define LUN_PORT (plun->lun_tgt->tgt_port)
695 #define LUN_TGT (plun->lun_tgt)
696
697 /*
698 * Driver private macros
699 */
700 #define FCP_ATOB(x) (((x) >= '0' && (x) <= '9') ? ((x) - '0') : \
701 ((x) >= 'a' && (x) <= 'f') ? \
702 ((x) - 'a' + 10) : ((x) - 'A' + 10))
703
704 #define FCP_MAX(a, b) ((a) > (b) ? (a) : (b))
705
706 #define FCP_N_NDI_EVENTS \
707 (sizeof (fcp_ndi_event_defs) / sizeof (ndi_event_definition_t))
708
709 #define FCP_LINK_STATE_CHANGED(p, c) \
710 ((p)->port_link_cnt != (c)->ipkt_link_cnt)
711
712 #define FCP_TGT_STATE_CHANGED(t, c) \
713 ((t)->tgt_change_cnt != (c)->ipkt_change_cnt)
714
715 #define FCP_STATE_CHANGED(p, t, c) \
716 (FCP_TGT_STATE_CHANGED(t, c))
717
718 #define FCP_MUST_RETRY(fpkt) \
719 ((fpkt)->pkt_state == FC_PKT_LOCAL_BSY || \
720 (fpkt)->pkt_state == FC_PKT_LOCAL_RJT || \
721 (fpkt)->pkt_state == FC_PKT_TRAN_BSY || \
722 (fpkt)->pkt_state == FC_PKT_ELS_IN_PROGRESS || \
723 (fpkt)->pkt_state == FC_PKT_NPORT_BSY || \
724 (fpkt)->pkt_state == FC_PKT_FABRIC_BSY || \
725 (fpkt)->pkt_state == FC_PKT_PORT_OFFLINE || \
726 (fpkt)->pkt_reason == FC_REASON_OFFLINE)
727
728 #define FCP_SENSE_REPORTLUN_CHANGED(es) \
729 ((es)->es_key == KEY_UNIT_ATTENTION && \
730 (es)->es_add_code == 0x3f && \
731 (es)->es_qual_code == 0x0e)
732
733 #define FCP_SENSE_NO_LUN(es) \
734 ((es)->es_key == KEY_ILLEGAL_REQUEST && \
735 (es)->es_add_code == 0x25 && \
736 (es)->es_qual_code == 0x0)
737
738 #define FCP_VERSION "20091208-1.192"
739 #define FCP_NAME_VERSION "SunFC FCP v" FCP_VERSION
740
741 #define FCP_NUM_ELEMENTS(array) \
742 (sizeof (array) / sizeof ((array)[0]))
743
744 /*
745 * Debugging, Error reporting, and tracing
746 */
747 #define FCP_LOG_SIZE 1024 * 1024
748
749 #define FCP_LEVEL_1 0x00001 /* attach/detach PM CPR */
750 #define FCP_LEVEL_2 0x00002 /* failures/Invalid data */
751 #define FCP_LEVEL_3 0x00004 /* state change, discovery */
752 #define FCP_LEVEL_4 0x00008 /* ULP messages */
753 #define FCP_LEVEL_5 0x00010 /* ELS/SCSI cmds */
754 #define FCP_LEVEL_6 0x00020 /* Transport failures */
755 #define FCP_LEVEL_7 0x00040
756 #define FCP_LEVEL_8 0x00080 /* I/O tracing */
757 #define FCP_LEVEL_9 0x00100 /* I/O tracing */
758
759
760
761 /*
762 * Log contents to system messages file
763 */
764 #define FCP_MSG_LEVEL_1 (FCP_LEVEL_1 | FC_TRACE_LOG_MSG)
765 #define FCP_MSG_LEVEL_2 (FCP_LEVEL_2 | FC_TRACE_LOG_MSG)
766 #define FCP_MSG_LEVEL_3 (FCP_LEVEL_3 | FC_TRACE_LOG_MSG)
767 #define FCP_MSG_LEVEL_4 (FCP_LEVEL_4 | FC_TRACE_LOG_MSG)
768 #define FCP_MSG_LEVEL_5 (FCP_LEVEL_5 | FC_TRACE_LOG_MSG)
769 #define FCP_MSG_LEVEL_6 (FCP_LEVEL_6 | FC_TRACE_LOG_MSG)
770 #define FCP_MSG_LEVEL_7 (FCP_LEVEL_7 | FC_TRACE_LOG_MSG)
771 #define FCP_MSG_LEVEL_8 (FCP_LEVEL_8 | FC_TRACE_LOG_MSG)
772 #define FCP_MSG_LEVEL_9 (FCP_LEVEL_9 | FC_TRACE_LOG_MSG)
773
774
775 /*
776 * Log contents to trace buffer
777 */
778 #define FCP_BUF_LEVEL_1 (FCP_LEVEL_1 | FC_TRACE_LOG_BUF)
779 #define FCP_BUF_LEVEL_2 (FCP_LEVEL_2 | FC_TRACE_LOG_BUF)
780 #define FCP_BUF_LEVEL_3 (FCP_LEVEL_3 | FC_TRACE_LOG_BUF)
781 #define FCP_BUF_LEVEL_4 (FCP_LEVEL_4 | FC_TRACE_LOG_BUF)
782 #define FCP_BUF_LEVEL_5 (FCP_LEVEL_5 | FC_TRACE_LOG_BUF)
783 #define FCP_BUF_LEVEL_6 (FCP_LEVEL_6 | FC_TRACE_LOG_BUF)
784 #define FCP_BUF_LEVEL_7 (FCP_LEVEL_7 | FC_TRACE_LOG_BUF)
785 #define FCP_BUF_LEVEL_8 (FCP_LEVEL_8 | FC_TRACE_LOG_BUF)
786 #define FCP_BUF_LEVEL_9 (FCP_LEVEL_9 | FC_TRACE_LOG_BUF)
787
788
789 /*
790 * Log contents to both system messages file and trace buffer
791 */
792 #define FCP_MSG_BUF_LEVEL_1 (FCP_LEVEL_1 | FC_TRACE_LOG_BUF | \
793 FC_TRACE_LOG_MSG)
794 #define FCP_MSG_BUF_LEVEL_2 (FCP_LEVEL_2 | FC_TRACE_LOG_BUF | \
795 FC_TRACE_LOG_MSG)
796 #define FCP_MSG_BUF_LEVEL_3 (FCP_LEVEL_3 | FC_TRACE_LOG_BUF | \
797 FC_TRACE_LOG_MSG)
798 #define FCP_MSG_BUF_LEVEL_4 (FCP_LEVEL_4 | FC_TRACE_LOG_BUF | \
799 FC_TRACE_LOG_MSG)
800 #define FCP_MSG_BUF_LEVEL_5 (FCP_LEVEL_5 | FC_TRACE_LOG_BUF | \
801 FC_TRACE_LOG_MSG)
802 #define FCP_MSG_BUF_LEVEL_6 (FCP_LEVEL_6 | FC_TRACE_LOG_BUF | \
803 FC_TRACE_LOG_MSG)
804 #define FCP_MSG_BUF_LEVEL_7 (FCP_LEVEL_7 | FC_TRACE_LOG_BUF | \
805 FC_TRACE_LOG_MSG)
806 #define FCP_MSG_BUF_LEVEL_8 (FCP_LEVEL_8 | FC_TRACE_LOG_BUF | \
807 FC_TRACE_LOG_MSG)
808 #define FCP_MSG_BUF_LEVEL_9 (FCP_LEVEL_9 | FC_TRACE_LOG_BUF | \
809 FC_TRACE_LOG_MSG)
810 #ifdef DEBUG
811 #define FCP_DTRACE fc_trace_debug
812 #else
813 #define FCP_DTRACE
814 #endif
815
816 #define FCP_TRACE fc_trace_debug
817
818 static struct cb_ops fcp_cb_ops = {
819 fcp_open, /* open */
820 fcp_close, /* close */
821 nodev, /* strategy */
822 nodev, /* print */
823 nodev, /* dump */
824 nodev, /* read */
825 nodev, /* write */
826 fcp_ioctl, /* ioctl */
827 nodev, /* devmap */
828 nodev, /* mmap */
829 nodev, /* segmap */
830 nochpoll, /* chpoll */
831 ddi_prop_op, /* cb_prop_op */
832 0, /* streamtab */
833 D_NEW | D_MP | D_HOTPLUG, /* cb_flag */
834 CB_REV, /* rev */
835 nodev, /* aread */
836 nodev /* awrite */
837 };
838
839
840 static struct dev_ops fcp_ops = {
841 DEVO_REV,
842 0,
843 ddi_getinfo_1to1,
844 nulldev, /* identify */
845 nulldev, /* probe */
846 fcp_attach, /* attach and detach are mandatory */
847 fcp_detach,
848 nodev, /* reset */
849 &fcp_cb_ops, /* cb_ops */
850 NULL, /* bus_ops */
851 NULL, /* power */
852 };
853
854
855 char *fcp_version = FCP_NAME_VERSION;
856
857 static struct modldrv modldrv = {
858 &mod_driverops,
859 FCP_NAME_VERSION,
860 &fcp_ops
861 };
862
863
864 static struct modlinkage modlinkage = {
865 MODREV_1,
866 &modldrv,
867 NULL
868 };
869
870
871 static fc_ulp_modinfo_t fcp_modinfo = {
872 &fcp_modinfo, /* ulp_handle */
873 FCTL_ULP_MODREV_4, /* ulp_rev */
874 FC4_SCSI_FCP, /* ulp_type */
875 "fcp", /* ulp_name */
876 FCP_STATEC_MASK, /* ulp_statec_mask */
877 fcp_port_attach, /* ulp_port_attach */
878 fcp_port_detach, /* ulp_port_detach */
879 fcp_port_ioctl, /* ulp_port_ioctl */
880 fcp_els_callback, /* ulp_els_callback */
881 fcp_data_callback, /* ulp_data_callback */
882 fcp_statec_callback /* ulp_statec_callback */
883 };
884
885 #ifdef DEBUG
886 #define FCP_TRACE_DEFAULT (FC_TRACE_LOG_MASK | FCP_LEVEL_1 | \
887 FCP_LEVEL_2 | FCP_LEVEL_3 | \
888 FCP_LEVEL_4 | FCP_LEVEL_5 | \
889 FCP_LEVEL_6 | FCP_LEVEL_7)
890 #else
891 #define FCP_TRACE_DEFAULT (FC_TRACE_LOG_MASK | FCP_LEVEL_1 | \
892 FCP_LEVEL_2 | FCP_LEVEL_3 | \
893 FCP_LEVEL_4 | FCP_LEVEL_5 | \
894 FCP_LEVEL_6 | FCP_LEVEL_7)
895 #endif
896
897 /* FCP global variables */
898 int fcp_bus_config_debug = 0;
899 static int fcp_log_size = FCP_LOG_SIZE;
900 static int fcp_trace = FCP_TRACE_DEFAULT;
901 static fc_trace_logq_t *fcp_logq = NULL;
902 static struct fcp_black_list_entry *fcp_lun_blacklist = NULL;
903 /*
904 * The auto-configuration is set by default. The only way of disabling it is
905 * through the property MANUAL_CFG_ONLY in the fcp.conf file.
906 */
907 static int fcp_enable_auto_configuration = 1;
908 static int fcp_max_bus_config_retries = 4;
909 static int fcp_lun_ready_retry = 300;
910 /*
911 * The value assigned to the following variable has changed several times due
912 * to a problem with the data underruns reporting of some firmware(s). The
913 * current value of 50 gives a timeout value of 25 seconds for a max number
914 * of 256 LUNs.
915 */
916 static int fcp_max_target_retries = 50;
917 /*
918 * Watchdog variables
919 * ------------------
920 *
921 * fcp_watchdog_init
922 *
923 * Indicates if the watchdog timer is running or not. This is actually
924 * a counter of the number of Fibre Channel ports that attached. When
925 * the first port attaches the watchdog is started. When the last port
926 * detaches the watchdog timer is stopped.
927 *
928 * fcp_watchdog_time
929 *
930 * This is the watchdog clock counter. It is incremented by
931 * fcp_watchdog_time each time the watchdog timer expires.
932 *
933 * fcp_watchdog_timeout
934 *
935 * Increment value of the variable fcp_watchdog_time as well as the
936 * the timeout value of the watchdog timer. The unit is 1 second. It
937 * is strange that this is not a #define but a variable since the code
938 * never changes this value. The reason why it can be said that the
939 * unit is 1 second is because the number of ticks for the watchdog
940 * timer is determined like this:
941 *
942 * fcp_watchdog_tick = fcp_watchdog_timeout *
943 * drv_usectohz(1000000);
944 *
945 * The value 1000000 is hard coded in the code.
946 *
947 * fcp_watchdog_tick
948 *
949 * Watchdog timer value in ticks.
950 */
951 static int fcp_watchdog_init = 0;
952 static int fcp_watchdog_time = 0;
953 static int fcp_watchdog_timeout = 1;
954 static int fcp_watchdog_tick;
955
956 /*
957 * fcp_offline_delay is a global variable to enable customisation of
958 * the timeout on link offlines or RSCNs. The default value is set
959 * to match FCP_OFFLINE_DELAY (20sec), which is 2*RA_TOV_els as
960 * specified in FCP4 Chapter 11 (see www.t10.org).
961 *
962 * The variable fcp_offline_delay is specified in SECONDS.
963 *
964 * If we made this a static var then the user would not be able to
965 * change it. This variable is set in fcp_attach().
966 */
967 unsigned int fcp_offline_delay = FCP_OFFLINE_DELAY;
968
969 static void *fcp_softstate = NULL; /* for soft state */
970 static uchar_t fcp_oflag = FCP_IDLE; /* open flag */
971 static kmutex_t fcp_global_mutex;
972 static kmutex_t fcp_ioctl_mutex;
973 static dev_info_t *fcp_global_dip = NULL;
974 static timeout_id_t fcp_watchdog_id;
975 const char *fcp_lun_prop = "lun";
976 const char *fcp_sam_lun_prop = "sam-lun";
977 const char *fcp_target_prop = "target";
978 /*
979 * NOTE: consumers of "node-wwn" property include stmsboot in ON
980 * consolidation.
981 */
982 const char *fcp_node_wwn_prop = "node-wwn";
983 const char *fcp_port_wwn_prop = "port-wwn";
984 const char *fcp_conf_wwn_prop = "fc-port-wwn";
985 const char *fcp_obp_boot_wwn = "fc-boot-dev-portwwn";
986 const char *fcp_manual_config_only = "manual_configuration_only";
987 const char *fcp_init_port_prop = "initiator-port";
988 const char *fcp_tgt_port_prop = "target-port";
989 const char *fcp_lun_blacklist_prop = "pwwn-lun-blacklist";
990
991 static struct fcp_port *fcp_port_head = NULL;
992 static ddi_eventcookie_t fcp_insert_eid;
993 static ddi_eventcookie_t fcp_remove_eid;
994
995 static ndi_event_definition_t fcp_ndi_event_defs[] = {
996 { FCP_EVENT_TAG_INSERT, FCAL_INSERT_EVENT, EPL_KERNEL },
997 { FCP_EVENT_TAG_REMOVE, FCAL_REMOVE_EVENT, EPL_INTERRUPT }
998 };
999
1000 /*
1001 * List of valid commands for the scsi_ioctl call
1002 */
1003 static uint8_t scsi_ioctl_list[] = {
1004 SCMD_INQUIRY,
1005 SCMD_REPORT_LUN,
1006 SCMD_READ_CAPACITY
1007 };
1008
1009 /*
1010 * this is used to dummy up a report lun response for cases
1011 * where the target doesn't support it
1012 */
1013 static uchar_t fcp_dummy_lun[] = {
1014 0x00, /* MSB length (length = no of luns * 8) */
1015 0x00,
1016 0x00,
1017 0x08, /* LSB length */
1018 0x00, /* MSB reserved */
1019 0x00,
1020 0x00,
1021 0x00, /* LSB reserved */
1022 FCP_PD_ADDRESSING,
1023 0x00, /* LUN is ZERO at the first level */
1024 0x00,
1025 0x00, /* second level is zero */
1026 0x00,
1027 0x00, /* third level is zero */
1028 0x00,
1029 0x00 /* fourth level is zero */
1030 };
1031
1032 static uchar_t fcp_alpa_to_switch[] = {
1033 0x00, 0x7d, 0x7c, 0x00, 0x7b, 0x00, 0x00, 0x00, 0x7a, 0x00,
1034 0x00, 0x00, 0x00, 0x00, 0x00, 0x79, 0x78, 0x00, 0x00, 0x00,
1035 0x00, 0x00, 0x00, 0x77, 0x76, 0x00, 0x00, 0x75, 0x00, 0x74,
1036 0x73, 0x72, 0x00, 0x00, 0x00, 0x71, 0x00, 0x70, 0x6f, 0x6e,
1037 0x00, 0x6d, 0x6c, 0x6b, 0x6a, 0x69, 0x68, 0x00, 0x00, 0x67,
1038 0x66, 0x65, 0x64, 0x63, 0x62, 0x00, 0x00, 0x61, 0x60, 0x00,
1039 0x5f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x5e, 0x00, 0x5d,
1040 0x5c, 0x5b, 0x00, 0x5a, 0x59, 0x58, 0x57, 0x56, 0x55, 0x00,
1041 0x00, 0x54, 0x53, 0x52, 0x51, 0x50, 0x4f, 0x00, 0x00, 0x4e,
1042 0x4d, 0x00, 0x4c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x4b,
1043 0x00, 0x4a, 0x49, 0x48, 0x00, 0x47, 0x46, 0x45, 0x44, 0x43,
1044 0x42, 0x00, 0x00, 0x41, 0x40, 0x3f, 0x3e, 0x3d, 0x3c, 0x00,
1045 0x00, 0x3b, 0x3a, 0x00, 0x39, 0x00, 0x00, 0x00, 0x38, 0x37,
1046 0x36, 0x00, 0x35, 0x00, 0x00, 0x00, 0x34, 0x00, 0x00, 0x00,
1047 0x00, 0x00, 0x00, 0x33, 0x32, 0x00, 0x00, 0x00, 0x00, 0x00,
1048 0x00, 0x31, 0x30, 0x00, 0x00, 0x2f, 0x00, 0x2e, 0x2d, 0x2c,
1049 0x00, 0x00, 0x00, 0x2b, 0x00, 0x2a, 0x29, 0x28, 0x00, 0x27,
1050 0x26, 0x25, 0x24, 0x23, 0x22, 0x00, 0x00, 0x21, 0x20, 0x1f,
1051 0x1e, 0x1d, 0x1c, 0x00, 0x00, 0x1b, 0x1a, 0x00, 0x19, 0x00,
1052 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x00, 0x17, 0x16, 0x15,
1053 0x00, 0x14, 0x13, 0x12, 0x11, 0x10, 0x0f, 0x00, 0x00, 0x0e,
1054 0x0d, 0x0c, 0x0b, 0x0a, 0x09, 0x00, 0x00, 0x08, 0x07, 0x00,
1055 0x06, 0x00, 0x00, 0x00, 0x05, 0x04, 0x03, 0x00, 0x02, 0x00,
1056 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1057 };
1058
1059 static caddr_t pid = "SESS01 ";
1060
1061 #if !defined(lint)
1062
1063 _NOTE(MUTEX_PROTECTS_DATA(fcp_global_mutex,
1064 fcp_port::fcp_next fcp_watchdog_id))
1065
1066 _NOTE(DATA_READABLE_WITHOUT_LOCK(fcp_watchdog_time))
1067
1068 _NOTE(SCHEME_PROTECTS_DATA("Unshared",
1069 fcp_insert_eid
1070 fcp_remove_eid
1071 fcp_watchdog_time))
1072
1073 _NOTE(SCHEME_PROTECTS_DATA("Unshared",
1074 fcp_cb_ops
1075 fcp_ops
1076 callb_cpr))
1077
1078 #endif /* lint */
1079
1080 /*
1081 * This table is used to determine whether or not it's safe to copy in
1082 * the target node name for a lun. Since all luns behind the same target
1083 * have the same wwnn, only tagets that do not support multiple luns are
1084 * eligible to be enumerated under mpxio if they aren't page83 compliant.
1085 */
1086
1087 char *fcp_symmetric_disk_table[] = {
1088 "SEAGATE ST",
1089 "IBM DDYFT",
1090 "SUNW SUNWGS", /* Daktari enclosure */
1091 "SUN SENA", /* SES device */
1092 "SUN SESS01" /* VICOM SVE box */
1093 };
1094
1095 int fcp_symmetric_disk_table_size =
1096 sizeof (fcp_symmetric_disk_table)/sizeof (char *);
1097
1098 /*
1099 * This structure is bogus. scsi_hba_attach_setup() requires, as in the kernel
1100 * will panic if you don't pass this in to the routine, this information.
1101 * Need to determine what the actual impact to the system is by providing
1102 * this information if any. Since dma allocation is done in pkt_init it may
1103 * not have any impact. These values are straight from the Writing Device
1104 * Driver manual.
1105 */
1106 static ddi_dma_attr_t pseudo_fca_dma_attr = {
1107 DMA_ATTR_V0, /* ddi_dma_attr version */
1108 0, /* low address */
1109 0xffffffff, /* high address */
1110 0x00ffffff, /* counter upper bound */
1111 1, /* alignment requirements */
1112 0x3f, /* burst sizes */
1113 1, /* minimum DMA access */
1114 0xffffffff, /* maximum DMA access */
1115 (1 << 24) - 1, /* segment boundary restrictions */
1116 1, /* scater/gather list length */
1117 512, /* device granularity */
1118 0 /* DMA flags */
1119 };
1120
1121 /*
1122 * The _init(9e) return value should be that of mod_install(9f). Under
1123 * some circumstances, a failure may not be related mod_install(9f) and
1124 * one would then require a return value to indicate the failure. Looking
1125 * at mod_install(9f), it is expected to return 0 for success and non-zero
1126 * for failure. mod_install(9f) for device drivers, further goes down the
1127 * calling chain and ends up in ddi_installdrv(), whose return values are
1128 * DDI_SUCCESS and DDI_FAILURE - There are also other functions in the
1129 * calling chain of mod_install(9f) which return values like EINVAL and
1130 * in some even return -1.
1131 *
1132 * To work around the vagaries of the mod_install() calling chain, return
1133 * either 0 or ENODEV depending on the success or failure of mod_install()
1134 */
1135 int
1136 _init(void)
1137 {
1138 int rval;
1139
1140 /*
1141 * Allocate soft state and prepare to do ddi_soft_state_zalloc()
1142 * before registering with the transport first.
1143 */
1144 if (ddi_soft_state_init(&fcp_softstate,
1145 sizeof (struct fcp_port), FCP_INIT_ITEMS) != 0) {
1146 return (EINVAL);
1147 }
1148
1149 mutex_init(&fcp_global_mutex, NULL, MUTEX_DRIVER, NULL);
1150 mutex_init(&fcp_ioctl_mutex, NULL, MUTEX_DRIVER, NULL);
1151
1152 if ((rval = fc_ulp_add(&fcp_modinfo)) != FC_SUCCESS) {
1153 cmn_err(CE_WARN, "fcp: fc_ulp_add failed");
1154 mutex_destroy(&fcp_global_mutex);
1155 mutex_destroy(&fcp_ioctl_mutex);
1156 ddi_soft_state_fini(&fcp_softstate);
1157 return (ENODEV);
1158 }
1159
1160 fcp_logq = fc_trace_alloc_logq(fcp_log_size);
1161
1162 if ((rval = mod_install(&modlinkage)) != 0) {
1163 fc_trace_free_logq(fcp_logq);
1164 (void) fc_ulp_remove(&fcp_modinfo);
1165 mutex_destroy(&fcp_global_mutex);
1166 mutex_destroy(&fcp_ioctl_mutex);
1167 ddi_soft_state_fini(&fcp_softstate);
1168 rval = ENODEV;
1169 }
1170
1171 return (rval);
1172 }
1173
1174
1175 /*
1176 * the system is done with us as a driver, so clean up
1177 */
1178 int
1179 _fini(void)
1180 {
1181 int rval;
1182
1183 /*
1184 * don't start cleaning up until we know that the module remove
1185 * has worked -- if this works, then we know that each instance
1186 * has successfully been DDI_DETACHed
1187 */
1188 if ((rval = mod_remove(&modlinkage)) != 0) {
1189 return (rval);
1190 }
1191
1192 (void) fc_ulp_remove(&fcp_modinfo);
1193
1194 ddi_soft_state_fini(&fcp_softstate);
1195 mutex_destroy(&fcp_global_mutex);
1196 mutex_destroy(&fcp_ioctl_mutex);
1197 fc_trace_free_logq(fcp_logq);
1198
1199 return (rval);
1200 }
1201
1202
1203 int
1204 _info(struct modinfo *modinfop)
1205 {
1206 return (mod_info(&modlinkage, modinfop));
1207 }
1208
1209
1210 /*
1211 * attach the module
1212 */
1213 static int
1214 fcp_attach(dev_info_t *devi, ddi_attach_cmd_t cmd)
1215 {
1216 int rval = DDI_SUCCESS;
1217
1218 FCP_DTRACE(fcp_logq, "fcp", fcp_trace,
1219 FCP_BUF_LEVEL_8, 0, "fcp module attach: cmd=0x%x", cmd);
1220
1221 if (cmd == DDI_ATTACH) {
1222 /* The FCP pseudo device is created here. */
1223 mutex_enter(&fcp_global_mutex);
1224 fcp_global_dip = devi;
1225 mutex_exit(&fcp_global_mutex);
1226
1227 if (ddi_create_minor_node(fcp_global_dip, "fcp", S_IFCHR,
1228 0, DDI_PSEUDO, 0) == DDI_SUCCESS) {
1229 ddi_report_dev(fcp_global_dip);
1230 } else {
1231 cmn_err(CE_WARN, "FCP: Cannot create minor node");
1232 mutex_enter(&fcp_global_mutex);
1233 fcp_global_dip = NULL;
1234 mutex_exit(&fcp_global_mutex);
1235
1236 rval = DDI_FAILURE;
1237 }
1238 /*
1239 * We check the fcp_offline_delay property at this
1240 * point. This variable is global for the driver,
1241 * not specific to an instance.
1242 *
1243 * We do not recommend setting the value to less
1244 * than 10 seconds (RA_TOV_els), or greater than
1245 * 60 seconds.
1246 */
1247 fcp_offline_delay = ddi_prop_get_int(DDI_DEV_T_ANY,
1248 devi, DDI_PROP_DONTPASS | DDI_PROP_NOTPROM,
1249 "fcp_offline_delay", FCP_OFFLINE_DELAY);
1250 if ((fcp_offline_delay < 10) ||
1251 (fcp_offline_delay > 60)) {
1252 cmn_err(CE_WARN, "Setting fcp_offline_delay "
1253 "to %d second(s). This is outside the "
1254 "recommended range of 10..60 seconds.",
1255 fcp_offline_delay);
1256 }
1257 }
1258
1259 return (rval);
1260 }
1261
1262
1263 /*ARGSUSED*/
1264 static int
1265 fcp_detach(dev_info_t *devi, ddi_detach_cmd_t cmd)
1266 {
1267 int res = DDI_SUCCESS;
1268
1269 FCP_DTRACE(fcp_logq, "fcp", fcp_trace,
1270 FCP_BUF_LEVEL_8, 0, "module detach: cmd=0x%x", cmd);
1271
1272 if (cmd == DDI_DETACH) {
1273 /*
1274 * Check if there are active ports/threads. If there
1275 * are any, we will fail, else we will succeed (there
1276 * should not be much to clean up)
1277 */
1278 mutex_enter(&fcp_global_mutex);
1279 FCP_DTRACE(fcp_logq, "fcp",
1280 fcp_trace, FCP_BUF_LEVEL_8, 0, "port_head=%p",
1281 (void *) fcp_port_head);
1282
1283 if (fcp_port_head == NULL) {
1284 ddi_remove_minor_node(fcp_global_dip, NULL);
1285 fcp_global_dip = NULL;
1286 mutex_exit(&fcp_global_mutex);
1287 } else {
1288 mutex_exit(&fcp_global_mutex);
1289 res = DDI_FAILURE;
1290 }
1291 }
1292 FCP_DTRACE(fcp_logq, "fcp", fcp_trace,
1293 FCP_BUF_LEVEL_8, 0, "module detach returning %d", res);
1294
1295 return (res);
1296 }
1297
1298
1299 /* ARGSUSED */
1300 static int
1301 fcp_open(dev_t *devp, int flag, int otype, cred_t *credp)
1302 {
1303 if (otype != OTYP_CHR) {
1304 return (EINVAL);
1305 }
1306
1307 /*
1308 * Allow only root to talk;
1309 */
1310 if (drv_priv(credp)) {
1311 return (EPERM);
1312 }
1313
1314 mutex_enter(&fcp_global_mutex);
1315 if (fcp_oflag & FCP_EXCL) {
1316 mutex_exit(&fcp_global_mutex);
1317 return (EBUSY);
1318 }
1319
1320 if (flag & FEXCL) {
1321 if (fcp_oflag & FCP_OPEN) {
1322 mutex_exit(&fcp_global_mutex);
1323 return (EBUSY);
1324 }
1325 fcp_oflag |= FCP_EXCL;
1326 }
1327 fcp_oflag |= FCP_OPEN;
1328 mutex_exit(&fcp_global_mutex);
1329
1330 return (0);
1331 }
1332
1333
1334 /* ARGSUSED */
1335 static int
1336 fcp_close(dev_t dev, int flag, int otype, cred_t *credp)
1337 {
1338 if (otype != OTYP_CHR) {
1339 return (EINVAL);
1340 }
1341
1342 mutex_enter(&fcp_global_mutex);
1343 if (!(fcp_oflag & FCP_OPEN)) {
1344 mutex_exit(&fcp_global_mutex);
1345 return (ENODEV);
1346 }
1347 fcp_oflag = FCP_IDLE;
1348 mutex_exit(&fcp_global_mutex);
1349
1350 return (0);
1351 }
1352
1353
1354 /*
1355 * fcp_ioctl
1356 * Entry point for the FCP ioctls
1357 *
1358 * Input:
1359 * See ioctl(9E)
1360 *
1361 * Output:
1362 * See ioctl(9E)
1363 *
1364 * Returns:
1365 * See ioctl(9E)
1366 *
1367 * Context:
1368 * Kernel context.
1369 */
1370 /* ARGSUSED */
1371 static int
1372 fcp_ioctl(dev_t dev, int cmd, intptr_t data, int mode, cred_t *credp,
1373 int *rval)
1374 {
1375 int ret = 0;
1376
1377 mutex_enter(&fcp_global_mutex);
1378 if (!(fcp_oflag & FCP_OPEN)) {
1379 mutex_exit(&fcp_global_mutex);
1380 return (ENXIO);
1381 }
1382 mutex_exit(&fcp_global_mutex);
1383
1384 switch (cmd) {
1385 case FCP_TGT_INQUIRY:
1386 case FCP_TGT_CREATE:
1387 case FCP_TGT_DELETE:
1388 ret = fcp_setup_device_data_ioctl(cmd,
1389 (struct fcp_ioctl *)data, mode, rval);
1390 break;
1391
1392 case FCP_TGT_SEND_SCSI:
1393 mutex_enter(&fcp_ioctl_mutex);
1394 ret = fcp_setup_scsi_ioctl(
1395 (struct fcp_scsi_cmd *)data, mode, rval);
1396 mutex_exit(&fcp_ioctl_mutex);
1397 break;
1398
1399 case FCP_STATE_COUNT:
1400 ret = fcp_get_statec_count((struct fcp_ioctl *)data,
1401 mode, rval);
1402 break;
1403 case FCP_GET_TARGET_MAPPINGS:
1404 ret = fcp_get_target_mappings((struct fcp_ioctl *)data,
1405 mode, rval);
1406 break;
1407 default:
1408 fcp_log(CE_WARN, NULL,
1409 "!Invalid ioctl opcode = 0x%x", cmd);
1410 ret = EINVAL;
1411 }
1412
1413 return (ret);
1414 }
1415
1416
1417 /*
1418 * fcp_setup_device_data_ioctl
1419 * Setup handler for the "device data" style of
1420 * ioctl for FCP. See "fcp_util.h" for data structure
1421 * definition.
1422 *
1423 * Input:
1424 * cmd = FCP ioctl command
1425 * data = ioctl data
1426 * mode = See ioctl(9E)
1427 *
1428 * Output:
1429 * data = ioctl data
1430 * rval = return value - see ioctl(9E)
1431 *
1432 * Returns:
1433 * See ioctl(9E)
1434 *
1435 * Context:
1436 * Kernel context.
1437 */
1438 /* ARGSUSED */
1439 static int
1440 fcp_setup_device_data_ioctl(int cmd, struct fcp_ioctl *data, int mode,
1441 int *rval)
1442 {
1443 struct fcp_port *pptr;
1444 struct device_data *dev_data;
1445 uint32_t link_cnt;
1446 la_wwn_t *wwn_ptr = NULL;
1447 struct fcp_tgt *ptgt = NULL;
1448 struct fcp_lun *plun = NULL;
1449 int i, error;
1450 struct fcp_ioctl fioctl;
1451
1452 #ifdef _MULTI_DATAMODEL
1453 switch (ddi_model_convert_from(mode & FMODELS)) {
1454 case DDI_MODEL_ILP32: {
1455 struct fcp32_ioctl f32_ioctl;
1456
1457 if (ddi_copyin((void *)data, (void *)&f32_ioctl,
1458 sizeof (struct fcp32_ioctl), mode)) {
1459 return (EFAULT);
1460 }
1461 fioctl.fp_minor = f32_ioctl.fp_minor;
1462 fioctl.listlen = f32_ioctl.listlen;
1463 fioctl.list = (caddr_t)(long)f32_ioctl.list;
1464 break;
1465 }
1466 case DDI_MODEL_NONE:
1467 if (ddi_copyin((void *)data, (void *)&fioctl,
1468 sizeof (struct fcp_ioctl), mode)) {
1469 return (EFAULT);
1470 }
1471 break;
1472 }
1473
1474 #else /* _MULTI_DATAMODEL */
1475 if (ddi_copyin((void *)data, (void *)&fioctl,
1476 sizeof (struct fcp_ioctl), mode)) {
1477 return (EFAULT);
1478 }
1479 #endif /* _MULTI_DATAMODEL */
1480
1481 /*
1482 * Right now we can assume that the minor number matches with
1483 * this instance of fp. If this changes we will need to
1484 * revisit this logic.
1485 */
1486 mutex_enter(&fcp_global_mutex);
1487 pptr = fcp_port_head;
1488 while (pptr) {
1489 if (pptr->port_instance == (uint32_t)fioctl.fp_minor) {
1490 break;
1491 } else {
1492 pptr = pptr->port_next;
1493 }
1494 }
1495 mutex_exit(&fcp_global_mutex);
1496 if (pptr == NULL) {
1497 return (ENXIO);
1498 }
1499 mutex_enter(&pptr->port_mutex);
1500
1501
1502 if ((dev_data = kmem_zalloc((sizeof (struct device_data)) *
1503 fioctl.listlen, KM_NOSLEEP)) == NULL) {
1504 mutex_exit(&pptr->port_mutex);
1505 return (ENOMEM);
1506 }
1507
1508 if (ddi_copyin(fioctl.list, dev_data,
1509 (sizeof (struct device_data)) * fioctl.listlen, mode)) {
1510 kmem_free(dev_data, sizeof (*dev_data) * fioctl.listlen);
1511 mutex_exit(&pptr->port_mutex);
1512 return (EFAULT);
1513 }
1514 link_cnt = pptr->port_link_cnt;
1515
1516 if (cmd == FCP_TGT_INQUIRY) {
1517 wwn_ptr = (la_wwn_t *)&(dev_data[0].dev_pwwn);
1518 if (bcmp(wwn_ptr->raw_wwn, pptr->port_pwwn.raw_wwn,
1519 sizeof (wwn_ptr->raw_wwn)) == 0) {
1520 /* This ioctl is requesting INQ info of local HBA */
1521 mutex_exit(&pptr->port_mutex);
1522 dev_data[0].dev0_type = DTYPE_UNKNOWN;
1523 dev_data[0].dev_status = 0;
1524 if (ddi_copyout(dev_data, fioctl.list,
1525 (sizeof (struct device_data)) * fioctl.listlen,
1526 mode)) {
1527 kmem_free(dev_data,
1528 sizeof (*dev_data) * fioctl.listlen);
1529 return (EFAULT);
1530 }
1531 kmem_free(dev_data,
1532 sizeof (*dev_data) * fioctl.listlen);
1533 #ifdef _MULTI_DATAMODEL
1534 switch (ddi_model_convert_from(mode & FMODELS)) {
1535 case DDI_MODEL_ILP32: {
1536 struct fcp32_ioctl f32_ioctl;
1537 f32_ioctl.fp_minor = fioctl.fp_minor;
1538 f32_ioctl.listlen = fioctl.listlen;
1539 f32_ioctl.list = (caddr32_t)(long)fioctl.list;
1540 if (ddi_copyout((void *)&f32_ioctl,
1541 (void *)data,
1542 sizeof (struct fcp32_ioctl), mode)) {
1543 return (EFAULT);
1544 }
1545 break;
1546 }
1547 case DDI_MODEL_NONE:
1548 if (ddi_copyout((void *)&fioctl, (void *)data,
1549 sizeof (struct fcp_ioctl), mode)) {
1550 return (EFAULT);
1551 }
1552 break;
1553 }
1554 #else /* _MULTI_DATAMODEL */
1555 if (ddi_copyout((void *)&fioctl, (void *)data,
1556 sizeof (struct fcp_ioctl), mode)) {
1557 return (EFAULT);
1558 }
1559 #endif /* _MULTI_DATAMODEL */
1560 return (0);
1561 }
1562 }
1563
1564 if (pptr->port_state & (FCP_STATE_INIT | FCP_STATE_OFFLINE)) {
1565 kmem_free(dev_data, sizeof (*dev_data) * fioctl.listlen);
1566 mutex_exit(&pptr->port_mutex);
1567 return (ENXIO);
1568 }
1569
1570 for (i = 0; (i < fioctl.listlen) && (link_cnt == pptr->port_link_cnt);
1571 i++) {
1572 wwn_ptr = (la_wwn_t *)&(dev_data[i].dev_pwwn);
1573
1574 dev_data[i].dev0_type = DTYPE_UNKNOWN;
1575
1576
1577 dev_data[i].dev_status = ENXIO;
1578
1579 if ((ptgt = fcp_lookup_target(pptr,
1580 (uchar_t *)wwn_ptr)) == NULL) {
1581 mutex_exit(&pptr->port_mutex);
1582 if (fc_ulp_get_remote_port(pptr->port_fp_handle,
1583 wwn_ptr, &error, 0) == NULL) {
1584 dev_data[i].dev_status = ENODEV;
1585 mutex_enter(&pptr->port_mutex);
1586 continue;
1587 } else {
1588
1589 dev_data[i].dev_status = EAGAIN;
1590
1591 mutex_enter(&pptr->port_mutex);
1592 continue;
1593 }
1594 } else {
1595 mutex_enter(&ptgt->tgt_mutex);
1596 if (ptgt->tgt_state & (FCP_TGT_MARK |
1597 FCP_TGT_BUSY)) {
1598 dev_data[i].dev_status = EAGAIN;
1599 mutex_exit(&ptgt->tgt_mutex);
1600 continue;
1601 }
1602
1603 if (ptgt->tgt_state & FCP_TGT_OFFLINE) {
1604 if (ptgt->tgt_icap && !ptgt->tgt_tcap) {
1605 dev_data[i].dev_status = ENOTSUP;
1606 } else {
1607 dev_data[i].dev_status = ENXIO;
1608 }
1609 mutex_exit(&ptgt->tgt_mutex);
1610 continue;
1611 }
1612
1613 switch (cmd) {
1614 case FCP_TGT_INQUIRY:
1615 /*
1616 * The reason we give device type of
1617 * lun 0 only even though in some
1618 * cases(like maxstrat) lun 0 device
1619 * type may be 0x3f(invalid) is that
1620 * for bridge boxes target will appear
1621 * as luns and the first lun could be
1622 * a device that utility may not care
1623 * about (like a tape device).
1624 */
1625 dev_data[i].dev_lun_cnt = ptgt->tgt_lun_cnt;
1626 dev_data[i].dev_status = 0;
1627 mutex_exit(&ptgt->tgt_mutex);
1628
1629 if ((plun = fcp_get_lun(ptgt, 0)) == NULL) {
1630 dev_data[i].dev0_type = DTYPE_UNKNOWN;
1631 } else {
1632 dev_data[i].dev0_type = plun->lun_type;
1633 }
1634 mutex_enter(&ptgt->tgt_mutex);
1635 break;
1636
1637 case FCP_TGT_CREATE:
1638 mutex_exit(&ptgt->tgt_mutex);
1639 mutex_exit(&pptr->port_mutex);
1640
1641 /*
1642 * serialize state change call backs.
1643 * only one call back will be handled
1644 * at a time.
1645 */
1646 mutex_enter(&fcp_global_mutex);
1647 if (fcp_oflag & FCP_BUSY) {
1648 mutex_exit(&fcp_global_mutex);
1649 if (dev_data) {
1650 kmem_free(dev_data,
1651 sizeof (*dev_data) *
1652 fioctl.listlen);
1653 }
1654 return (EBUSY);
1655 }
1656 fcp_oflag |= FCP_BUSY;
1657 mutex_exit(&fcp_global_mutex);
1658
1659 dev_data[i].dev_status =
1660 fcp_create_on_demand(pptr,
1661 wwn_ptr->raw_wwn);
1662
1663 if (dev_data[i].dev_status != 0) {
1664 char buf[25];
1665
1666 for (i = 0; i < FC_WWN_SIZE; i++) {
1667 (void) sprintf(&buf[i << 1],
1668 "%02x",
1669 wwn_ptr->raw_wwn[i]);
1670 }
1671
1672 fcp_log(CE_WARN, pptr->port_dip,
1673 "!Failed to create nodes for"
1674 " pwwn=%s; error=%x", buf,
1675 dev_data[i].dev_status);
1676 }
1677
1678 /* allow state change call backs again */
1679 mutex_enter(&fcp_global_mutex);
1680 fcp_oflag &= ~FCP_BUSY;
1681 mutex_exit(&fcp_global_mutex);
1682
1683 mutex_enter(&pptr->port_mutex);
1684 mutex_enter(&ptgt->tgt_mutex);
1685
1686 break;
1687
1688 case FCP_TGT_DELETE:
1689 break;
1690
1691 default:
1692 fcp_log(CE_WARN, pptr->port_dip,
1693 "!Invalid device data ioctl "
1694 "opcode = 0x%x", cmd);
1695 }
1696 mutex_exit(&ptgt->tgt_mutex);
1697 }
1698 }
1699 mutex_exit(&pptr->port_mutex);
1700
1701 if (ddi_copyout(dev_data, fioctl.list,
1702 (sizeof (struct device_data)) * fioctl.listlen, mode)) {
1703 kmem_free(dev_data, sizeof (*dev_data) * fioctl.listlen);
1704 return (EFAULT);
1705 }
1706 kmem_free(dev_data, sizeof (*dev_data) * fioctl.listlen);
1707
1708 #ifdef _MULTI_DATAMODEL
1709 switch (ddi_model_convert_from(mode & FMODELS)) {
1710 case DDI_MODEL_ILP32: {
1711 struct fcp32_ioctl f32_ioctl;
1712
1713 f32_ioctl.fp_minor = fioctl.fp_minor;
1714 f32_ioctl.listlen = fioctl.listlen;
1715 f32_ioctl.list = (caddr32_t)(long)fioctl.list;
1716 if (ddi_copyout((void *)&f32_ioctl, (void *)data,
1717 sizeof (struct fcp32_ioctl), mode)) {
1718 return (EFAULT);
1719 }
1720 break;
1721 }
1722 case DDI_MODEL_NONE:
1723 if (ddi_copyout((void *)&fioctl, (void *)data,
1724 sizeof (struct fcp_ioctl), mode)) {
1725 return (EFAULT);
1726 }
1727 break;
1728 }
1729 #else /* _MULTI_DATAMODEL */
1730
1731 if (ddi_copyout((void *)&fioctl, (void *)data,
1732 sizeof (struct fcp_ioctl), mode)) {
1733 return (EFAULT);
1734 }
1735 #endif /* _MULTI_DATAMODEL */
1736
1737 return (0);
1738 }
1739
1740 /*
1741 * Fetch the target mappings (path, etc.) for all LUNs
1742 * on this port.
1743 */
1744 /* ARGSUSED */
1745 static int
1746 fcp_get_target_mappings(struct fcp_ioctl *data,
1747 int mode, int *rval)
1748 {
1749 struct fcp_port *pptr;
1750 fc_hba_target_mappings_t *mappings;
1751 fc_hba_mapping_entry_t *map;
1752 struct fcp_tgt *ptgt = NULL;
1753 struct fcp_lun *plun = NULL;
1754 int i, mapIndex, mappingSize;
1755 int listlen;
1756 struct fcp_ioctl fioctl;
1757 char *path;
1758 fcp_ent_addr_t sam_lun_addr;
1759
1760 #ifdef _MULTI_DATAMODEL
1761 switch (ddi_model_convert_from(mode & FMODELS)) {
1762 case DDI_MODEL_ILP32: {
1763 struct fcp32_ioctl f32_ioctl;
1764
1765 if (ddi_copyin((void *)data, (void *)&f32_ioctl,
1766 sizeof (struct fcp32_ioctl), mode)) {
1767 return (EFAULT);
1768 }
1769 fioctl.fp_minor = f32_ioctl.fp_minor;
1770 fioctl.listlen = f32_ioctl.listlen;
1771 fioctl.list = (caddr_t)(long)f32_ioctl.list;
1772 break;
1773 }
1774 case DDI_MODEL_NONE:
1775 if (ddi_copyin((void *)data, (void *)&fioctl,
1776 sizeof (struct fcp_ioctl), mode)) {
1777 return (EFAULT);
1778 }
1779 break;
1780 }
1781
1782 #else /* _MULTI_DATAMODEL */
1783 if (ddi_copyin((void *)data, (void *)&fioctl,
1784 sizeof (struct fcp_ioctl), mode)) {
1785 return (EFAULT);
1786 }
1787 #endif /* _MULTI_DATAMODEL */
1788
1789 /*
1790 * Right now we can assume that the minor number matches with
1791 * this instance of fp. If this changes we will need to
1792 * revisit this logic.
1793 */
1794 mutex_enter(&fcp_global_mutex);
1795 pptr = fcp_port_head;
1796 while (pptr) {
1797 if (pptr->port_instance == (uint32_t)fioctl.fp_minor) {
1798 break;
1799 } else {
1800 pptr = pptr->port_next;
1801 }
1802 }
1803 mutex_exit(&fcp_global_mutex);
1804 if (pptr == NULL) {
1805 cmn_err(CE_NOTE, "target mappings: unknown instance number: %d",
1806 fioctl.fp_minor);
1807 return (ENXIO);
1808 }
1809
1810
1811 /* We use listlen to show the total buffer size */
1812 mappingSize = fioctl.listlen;
1813
1814 /* Now calculate how many mapping entries will fit */
1815 listlen = fioctl.listlen + sizeof (fc_hba_mapping_entry_t)
1816 - sizeof (fc_hba_target_mappings_t);
1817 if (listlen <= 0) {
1818 cmn_err(CE_NOTE, "target mappings: Insufficient buffer");
1819 return (ENXIO);
1820 }
1821 listlen = listlen / sizeof (fc_hba_mapping_entry_t);
1822
1823 if ((mappings = kmem_zalloc(mappingSize, KM_SLEEP)) == NULL) {
1824 return (ENOMEM);
1825 }
1826 mappings->version = FC_HBA_TARGET_MAPPINGS_VERSION;
1827
1828 /* Now get to work */
1829 mapIndex = 0;
1830
1831 mutex_enter(&pptr->port_mutex);
1832 /* Loop through all targets on this port */
1833 for (i = 0; i < FCP_NUM_HASH; i++) {
1834 for (ptgt = pptr->port_tgt_hash_table[i]; ptgt != NULL;
1835 ptgt = ptgt->tgt_next) {
1836
1837 mutex_enter(&ptgt->tgt_mutex);
1838
1839 /* Loop through all LUNs on this target */
1840 for (plun = ptgt->tgt_lun; plun != NULL;
1841 plun = plun->lun_next) {
1842 if (plun->lun_state & FCP_LUN_OFFLINE) {
1843 continue;
1844 }
1845
1846 path = fcp_get_lun_path(plun);
1847 if (path == NULL) {
1848 continue;
1849 }
1850
1851 if (mapIndex >= listlen) {
1852 mapIndex ++;
1853 kmem_free(path, MAXPATHLEN);
1854 continue;
1855 }
1856 map = &mappings->entries[mapIndex++];
1857 bcopy(path, map->targetDriver,
1858 sizeof (map->targetDriver));
1859 map->d_id = ptgt->tgt_d_id;
1860 map->busNumber = 0;
1861 map->targetNumber = ptgt->tgt_d_id;
1862 map->osLUN = plun->lun_num;
1863
1864 /*
1865 * We had swapped lun when we stored it in
1866 * lun_addr. We need to swap it back before
1867 * returning it to user land
1868 */
1869
1870 sam_lun_addr.ent_addr_0 =
1871 BE_16(plun->lun_addr.ent_addr_0);
1872 sam_lun_addr.ent_addr_1 =
1873 BE_16(plun->lun_addr.ent_addr_1);
1874 sam_lun_addr.ent_addr_2 =
1875 BE_16(plun->lun_addr.ent_addr_2);
1876 sam_lun_addr.ent_addr_3 =
1877 BE_16(plun->lun_addr.ent_addr_3);
1878
1879 bcopy(&sam_lun_addr, &map->samLUN,
1880 FCP_LUN_SIZE);
1881 bcopy(ptgt->tgt_node_wwn.raw_wwn,
1882 map->NodeWWN.raw_wwn, sizeof (la_wwn_t));
1883 bcopy(ptgt->tgt_port_wwn.raw_wwn,
1884 map->PortWWN.raw_wwn, sizeof (la_wwn_t));
1885
1886 if (plun->lun_guid) {
1887
1888 /* convert ascii wwn to bytes */
1889 fcp_ascii_to_wwn(plun->lun_guid,
1890 map->guid, sizeof (map->guid));
1891
1892 if ((sizeof (map->guid)) <
1893 plun->lun_guid_size / 2) {
1894 cmn_err(CE_WARN,
1895 "fcp_get_target_mappings:"
1896 "guid copy space "
1897 "insufficient."
1898 "Copy Truncation - "
1899 "available %d; need %d",
1900 (int)sizeof (map->guid),
1901 (int)
1902 plun->lun_guid_size / 2);
1903 }
1904 }
1905 kmem_free(path, MAXPATHLEN);
1906 }
1907 mutex_exit(&ptgt->tgt_mutex);
1908 }
1909 }
1910 mutex_exit(&pptr->port_mutex);
1911 mappings->numLuns = mapIndex;
1912
1913 if (ddi_copyout(mappings, fioctl.list, mappingSize, mode)) {
1914 kmem_free(mappings, mappingSize);
1915 return (EFAULT);
1916 }
1917 kmem_free(mappings, mappingSize);
1918
1919 #ifdef _MULTI_DATAMODEL
1920 switch (ddi_model_convert_from(mode & FMODELS)) {
1921 case DDI_MODEL_ILP32: {
1922 struct fcp32_ioctl f32_ioctl;
1923
1924 f32_ioctl.fp_minor = fioctl.fp_minor;
1925 f32_ioctl.listlen = fioctl.listlen;
1926 f32_ioctl.list = (caddr32_t)(long)fioctl.list;
1927 if (ddi_copyout((void *)&f32_ioctl, (void *)data,
1928 sizeof (struct fcp32_ioctl), mode)) {
1929 return (EFAULT);
1930 }
1931 break;
1932 }
1933 case DDI_MODEL_NONE:
1934 if (ddi_copyout((void *)&fioctl, (void *)data,
1935 sizeof (struct fcp_ioctl), mode)) {
1936 return (EFAULT);
1937 }
1938 break;
1939 }
1940 #else /* _MULTI_DATAMODEL */
1941
1942 if (ddi_copyout((void *)&fioctl, (void *)data,
1943 sizeof (struct fcp_ioctl), mode)) {
1944 return (EFAULT);
1945 }
1946 #endif /* _MULTI_DATAMODEL */
1947
1948 return (0);
1949 }
1950
1951 /*
1952 * fcp_setup_scsi_ioctl
1953 * Setup handler for the "scsi passthru" style of
1954 * ioctl for FCP. See "fcp_util.h" for data structure
1955 * definition.
1956 *
1957 * Input:
1958 * u_fscsi = ioctl data (user address space)
1959 * mode = See ioctl(9E)
1960 *
1961 * Output:
1962 * u_fscsi = ioctl data (user address space)
1963 * rval = return value - see ioctl(9E)
1964 *
1965 * Returns:
1966 * 0 = OK
1967 * EAGAIN = See errno.h
1968 * EBUSY = See errno.h
1969 * EFAULT = See errno.h
1970 * EINTR = See errno.h
1971 * EINVAL = See errno.h
1972 * EIO = See errno.h
1973 * ENOMEM = See errno.h
1974 * ENXIO = See errno.h
1975 *
1976 * Context:
1977 * Kernel context.
1978 */
1979 /* ARGSUSED */
1980 static int
1981 fcp_setup_scsi_ioctl(struct fcp_scsi_cmd *u_fscsi,
1982 int mode, int *rval)
1983 {
1984 int ret = 0;
1985 int temp_ret;
1986 caddr_t k_cdbbufaddr = NULL;
1987 caddr_t k_bufaddr = NULL;
1988 caddr_t k_rqbufaddr = NULL;
1989 caddr_t u_cdbbufaddr;
1990 caddr_t u_bufaddr;
1991 caddr_t u_rqbufaddr;
1992 struct fcp_scsi_cmd k_fscsi;
1993
1994 /*
1995 * Get fcp_scsi_cmd array element from user address space
1996 */
1997 if ((ret = fcp_copyin_scsi_cmd((caddr_t)u_fscsi, &k_fscsi, mode))
1998 != 0) {
1999 return (ret);
2000 }
2001
2002
2003 /*
2004 * Even though kmem_alloc() checks the validity of the
2005 * buffer length, this check is needed when the
2006 * kmem_flags set and the zero buffer length is passed.
2007 */
2008 if ((k_fscsi.scsi_cdblen <= 0) ||
2009 (k_fscsi.scsi_buflen <= 0) ||
2010 (k_fscsi.scsi_rqlen <= 0)) {
2011 return (EINVAL);
2012 }
2013
2014 /*
2015 * Allocate data for fcp_scsi_cmd pointer fields
2016 */
2017 if (ret == 0) {
2018 k_cdbbufaddr = kmem_alloc(k_fscsi.scsi_cdblen, KM_NOSLEEP);
2019 k_bufaddr = kmem_alloc(k_fscsi.scsi_buflen, KM_NOSLEEP);
2020 k_rqbufaddr = kmem_alloc(k_fscsi.scsi_rqlen, KM_NOSLEEP);
2021
2022 if (k_cdbbufaddr == NULL ||
2023 k_bufaddr == NULL ||
2024 k_rqbufaddr == NULL) {
2025 ret = ENOMEM;
2026 }
2027 }
2028
2029 /*
2030 * Get fcp_scsi_cmd pointer fields from user
2031 * address space
2032 */
2033 if (ret == 0) {
2034 u_cdbbufaddr = k_fscsi.scsi_cdbbufaddr;
2035 u_bufaddr = k_fscsi.scsi_bufaddr;
2036 u_rqbufaddr = k_fscsi.scsi_rqbufaddr;
2037
2038 if (ddi_copyin(u_cdbbufaddr,
2039 k_cdbbufaddr,
2040 k_fscsi.scsi_cdblen,
2041 mode)) {
2042 ret = EFAULT;
2043 } else if (ddi_copyin(u_bufaddr,
2044 k_bufaddr,
2045 k_fscsi.scsi_buflen,
2046 mode)) {
2047 ret = EFAULT;
2048 } else if (ddi_copyin(u_rqbufaddr,
2049 k_rqbufaddr,
2050 k_fscsi.scsi_rqlen,
2051 mode)) {
2052 ret = EFAULT;
2053 }
2054 }
2055
2056 /*
2057 * Send scsi command (blocking)
2058 */
2059 if (ret == 0) {
2060 /*
2061 * Prior to sending the scsi command, the
2062 * fcp_scsi_cmd data structure must contain kernel,
2063 * not user, addresses.
2064 */
2065 k_fscsi.scsi_cdbbufaddr = k_cdbbufaddr;
2066 k_fscsi.scsi_bufaddr = k_bufaddr;
2067 k_fscsi.scsi_rqbufaddr = k_rqbufaddr;
2068
2069 ret = fcp_send_scsi_ioctl(&k_fscsi);
2070
2071 /*
2072 * After sending the scsi command, the
2073 * fcp_scsi_cmd data structure must contain user,
2074 * not kernel, addresses.
2075 */
2076 k_fscsi.scsi_cdbbufaddr = u_cdbbufaddr;
2077 k_fscsi.scsi_bufaddr = u_bufaddr;
2078 k_fscsi.scsi_rqbufaddr = u_rqbufaddr;
2079 }
2080
2081 /*
2082 * Put fcp_scsi_cmd pointer fields to user address space
2083 */
2084 if (ret == 0) {
2085 if (ddi_copyout(k_cdbbufaddr,
2086 u_cdbbufaddr,
2087 k_fscsi.scsi_cdblen,
2088 mode)) {
2089 ret = EFAULT;
2090 } else if (ddi_copyout(k_bufaddr,
2091 u_bufaddr,
2092 k_fscsi.scsi_buflen,
2093 mode)) {
2094 ret = EFAULT;
2095 } else if (ddi_copyout(k_rqbufaddr,
2096 u_rqbufaddr,
2097 k_fscsi.scsi_rqlen,
2098 mode)) {
2099 ret = EFAULT;
2100 }
2101 }
2102
2103 /*
2104 * Free data for fcp_scsi_cmd pointer fields
2105 */
2106 if (k_cdbbufaddr != NULL) {
2107 kmem_free(k_cdbbufaddr, k_fscsi.scsi_cdblen);
2108 }
2109 if (k_bufaddr != NULL) {
2110 kmem_free(k_bufaddr, k_fscsi.scsi_buflen);
2111 }
2112 if (k_rqbufaddr != NULL) {
2113 kmem_free(k_rqbufaddr, k_fscsi.scsi_rqlen);
2114 }
2115
2116 /*
2117 * Put fcp_scsi_cmd array element to user address space
2118 */
2119 temp_ret = fcp_copyout_scsi_cmd(&k_fscsi, (caddr_t)u_fscsi, mode);
2120 if (temp_ret != 0) {
2121 ret = temp_ret;
2122 }
2123
2124 /*
2125 * Return status
2126 */
2127 return (ret);
2128 }
2129
2130
2131 /*
2132 * fcp_copyin_scsi_cmd
2133 * Copy in fcp_scsi_cmd data structure from user address space.
2134 * The data may be in 32 bit or 64 bit modes.
2135 *
2136 * Input:
2137 * base_addr = from address (user address space)
2138 * mode = See ioctl(9E) and ddi_copyin(9F)
2139 *
2140 * Output:
2141 * fscsi = to address (kernel address space)
2142 *
2143 * Returns:
2144 * 0 = OK
2145 * EFAULT = Error
2146 *
2147 * Context:
2148 * Kernel context.
2149 */
2150 static int
2151 fcp_copyin_scsi_cmd(caddr_t base_addr, struct fcp_scsi_cmd *fscsi, int mode)
2152 {
2153 #ifdef _MULTI_DATAMODEL
2154 struct fcp32_scsi_cmd f32scsi;
2155
2156 switch (ddi_model_convert_from(mode & FMODELS)) {
2157 case DDI_MODEL_ILP32:
2158 /*
2159 * Copy data from user address space
2160 */
2161 if (ddi_copyin((void *)base_addr,
2162 &f32scsi,
2163 sizeof (struct fcp32_scsi_cmd),
2164 mode)) {
2165 return (EFAULT);
2166 }
2167 /*
2168 * Convert from 32 bit to 64 bit
2169 */
2170 FCP32_SCSI_CMD_TO_FCP_SCSI_CMD(&f32scsi, fscsi);
2171 break;
2172 case DDI_MODEL_NONE:
2173 /*
2174 * Copy data from user address space
2175 */
2176 if (ddi_copyin((void *)base_addr,
2177 fscsi,
2178 sizeof (struct fcp_scsi_cmd),
2179 mode)) {
2180 return (EFAULT);
2181 }
2182 break;
2183 }
2184 #else /* _MULTI_DATAMODEL */
2185 /*
2186 * Copy data from user address space
2187 */
2188 if (ddi_copyin((void *)base_addr,
2189 fscsi,
2190 sizeof (struct fcp_scsi_cmd),
2191 mode)) {
2192 return (EFAULT);
2193 }
2194 #endif /* _MULTI_DATAMODEL */
2195
2196 return (0);
2197 }
2198
2199
2200 /*
2201 * fcp_copyout_scsi_cmd
2202 * Copy out fcp_scsi_cmd data structure to user address space.
2203 * The data may be in 32 bit or 64 bit modes.
2204 *
2205 * Input:
2206 * fscsi = to address (kernel address space)
2207 * mode = See ioctl(9E) and ddi_copyin(9F)
2208 *
2209 * Output:
2210 * base_addr = from address (user address space)
2211 *
2212 * Returns:
2213 * 0 = OK
2214 * EFAULT = Error
2215 *
2216 * Context:
2217 * Kernel context.
2218 */
2219 static int
2220 fcp_copyout_scsi_cmd(struct fcp_scsi_cmd *fscsi, caddr_t base_addr, int mode)
2221 {
2222 #ifdef _MULTI_DATAMODEL
2223 struct fcp32_scsi_cmd f32scsi;
2224
2225 switch (ddi_model_convert_from(mode & FMODELS)) {
2226 case DDI_MODEL_ILP32:
2227 /*
2228 * Convert from 64 bit to 32 bit
2229 */
2230 FCP_SCSI_CMD_TO_FCP32_SCSI_CMD(fscsi, &f32scsi);
2231 /*
2232 * Copy data to user address space
2233 */
2234 if (ddi_copyout(&f32scsi,
2235 (void *)base_addr,
2236 sizeof (struct fcp32_scsi_cmd),
2237 mode)) {
2238 return (EFAULT);
2239 }
2240 break;
2241 case DDI_MODEL_NONE:
2242 /*
2243 * Copy data to user address space
2244 */
2245 if (ddi_copyout(fscsi,
2246 (void *)base_addr,
2247 sizeof (struct fcp_scsi_cmd),
2248 mode)) {
2249 return (EFAULT);
2250 }
2251 break;
2252 }
2253 #else /* _MULTI_DATAMODEL */
2254 /*
2255 * Copy data to user address space
2256 */
2257 if (ddi_copyout(fscsi,
2258 (void *)base_addr,
2259 sizeof (struct fcp_scsi_cmd),
2260 mode)) {
2261 return (EFAULT);
2262 }
2263 #endif /* _MULTI_DATAMODEL */
2264
2265 return (0);
2266 }
2267
2268
2269 /*
2270 * fcp_send_scsi_ioctl
2271 * Sends the SCSI command in blocking mode.
2272 *
2273 * Input:
2274 * fscsi = SCSI command data structure
2275 *
2276 * Output:
2277 * fscsi = SCSI command data structure
2278 *
2279 * Returns:
2280 * 0 = OK
2281 * EAGAIN = See errno.h
2282 * EBUSY = See errno.h
2283 * EINTR = See errno.h
2284 * EINVAL = See errno.h
2285 * EIO = See errno.h
2286 * ENOMEM = See errno.h
2287 * ENXIO = See errno.h
2288 *
2289 * Context:
2290 * Kernel context.
2291 */
2292 static int
2293 fcp_send_scsi_ioctl(struct fcp_scsi_cmd *fscsi)
2294 {
2295 struct fcp_lun *plun = NULL;
2296 struct fcp_port *pptr = NULL;
2297 struct fcp_tgt *ptgt = NULL;
2298 fc_packet_t *fpkt = NULL;
2299 struct fcp_ipkt *icmd = NULL;
2300 int target_created = FALSE;
2301 fc_frame_hdr_t *hp;
2302 struct fcp_cmd fcp_cmd;
2303 struct fcp_cmd *fcmd;
2304 union scsi_cdb *scsi_cdb;
2305 la_wwn_t *wwn_ptr;
2306 int nodma;
2307 struct fcp_rsp *rsp;
2308 struct fcp_rsp_info *rsp_info;
2309 caddr_t rsp_sense;
2310 int buf_len;
2311 int info_len;
2312 int sense_len;
2313 struct scsi_extended_sense *sense_to = NULL;
2314 timeout_id_t tid;
2315 uint8_t reconfig_lun = FALSE;
2316 uint8_t reconfig_pending = FALSE;
2317 uint8_t scsi_cmd;
2318 int rsp_len;
2319 int cmd_index;
2320 int fc_status;
2321 int pkt_state;
2322 int pkt_action;
2323 int pkt_reason;
2324 int ret, xport_retval = ~FC_SUCCESS;
2325 int lcount;
2326 int tcount;
2327 int reconfig_status;
2328 int port_busy = FALSE;
2329 uchar_t *lun_string;
2330
2331 /*
2332 * Check valid SCSI command
2333 */
2334 scsi_cmd = ((uint8_t *)fscsi->scsi_cdbbufaddr)[0];
2335 ret = EINVAL;
2336 for (cmd_index = 0;
2337 cmd_index < FCP_NUM_ELEMENTS(scsi_ioctl_list) &&
2338 ret != 0;
2339 cmd_index++) {
2340 /*
2341 * First byte of CDB is the SCSI command
2342 */
2343 if (scsi_ioctl_list[cmd_index] == scsi_cmd) {
2344 ret = 0;
2345 }
2346 }
2347
2348 /*
2349 * Check inputs
2350 */
2351 if (fscsi->scsi_flags != FCP_SCSI_READ) {
2352 ret = EINVAL;
2353 } else if (fscsi->scsi_cdblen > FCP_CDB_SIZE) {
2354 /* no larger than */
2355 ret = EINVAL;
2356 }
2357
2358
2359 /*
2360 * Find FC port
2361 */
2362 if (ret == 0) {
2363 /*
2364 * Acquire global mutex
2365 */
2366 mutex_enter(&fcp_global_mutex);
2367
2368 pptr = fcp_port_head;
2369 while (pptr) {
2370 if (pptr->port_instance ==
2371 (uint32_t)fscsi->scsi_fc_port_num) {
2372 break;
2373 } else {
2374 pptr = pptr->port_next;
2375 }
2376 }
2377
2378 if (pptr == NULL) {
2379 ret = ENXIO;
2380 } else {
2381 /*
2382 * fc_ulp_busy_port can raise power
2383 * so, we must not hold any mutexes involved in PM
2384 */
2385 mutex_exit(&fcp_global_mutex);
2386 ret = fc_ulp_busy_port(pptr->port_fp_handle);
2387 }
2388
2389 if (ret == 0) {
2390
2391 /* remember port is busy, so we will release later */
2392 port_busy = TRUE;
2393
2394 /*
2395 * If there is a reconfiguration in progress, wait
2396 * for it to complete.
2397 */
2398
2399 fcp_reconfig_wait(pptr);
2400
2401 /* reacquire mutexes in order */
2402 mutex_enter(&fcp_global_mutex);
2403 mutex_enter(&pptr->port_mutex);
2404
2405 /*
2406 * Will port accept DMA?
2407 */
2408 nodma = (pptr->port_fcp_dma == FC_NO_DVMA_SPACE)
2409 ? 1 : 0;
2410
2411 /*
2412 * If init or offline, device not known
2413 *
2414 * If we are discovering (onlining), we can
2415 * NOT obviously provide reliable data about
2416 * devices until it is complete
2417 */
2418 if (pptr->port_state & (FCP_STATE_INIT |
2419 FCP_STATE_OFFLINE)) {
2420 ret = ENXIO;
2421 } else if (pptr->port_state & FCP_STATE_ONLINING) {
2422 ret = EBUSY;
2423 } else {
2424 /*
2425 * Find target from pwwn
2426 *
2427 * The wwn must be put into a local
2428 * variable to ensure alignment.
2429 */
2430 wwn_ptr = (la_wwn_t *)&(fscsi->scsi_fc_pwwn);
2431 ptgt = fcp_lookup_target(pptr,
2432 (uchar_t *)wwn_ptr);
2433
2434 /*
2435 * If target not found,
2436 */
2437 if (ptgt == NULL) {
2438 /*
2439 * Note: Still have global &
2440 * port mutexes
2441 */
2442 mutex_exit(&pptr->port_mutex);
2443 ptgt = fcp_port_create_tgt(pptr,
2444 wwn_ptr, &ret, &fc_status,
2445 &pkt_state, &pkt_action,
2446 &pkt_reason);
2447 mutex_enter(&pptr->port_mutex);
2448
2449 fscsi->scsi_fc_status = fc_status;
2450 fscsi->scsi_pkt_state =
2451 (uchar_t)pkt_state;
2452 fscsi->scsi_pkt_reason = pkt_reason;
2453 fscsi->scsi_pkt_action =
2454 (uchar_t)pkt_action;
2455
2456 if (ptgt != NULL) {
2457 target_created = TRUE;
2458 } else if (ret == 0) {
2459 ret = ENOMEM;
2460 }
2461 }
2462
2463 if (ret == 0) {
2464 /*
2465 * Acquire target
2466 */
2467 mutex_enter(&ptgt->tgt_mutex);
2468
2469 /*
2470 * If target is mark or busy,
2471 * then target can not be used
2472 */
2473 if (ptgt->tgt_state &
2474 (FCP_TGT_MARK |
2475 FCP_TGT_BUSY)) {
2476 ret = EBUSY;
2477 } else {
2478 /*
2479 * Mark target as busy
2480 */
2481 ptgt->tgt_state |=
2482 FCP_TGT_BUSY;
2483 }
2484
2485 /*
2486 * Release target
2487 */
2488 lcount = pptr->port_link_cnt;
2489 tcount = ptgt->tgt_change_cnt;
2490 mutex_exit(&ptgt->tgt_mutex);
2491 }
2492 }
2493
2494 /*
2495 * Release port
2496 */
2497 mutex_exit(&pptr->port_mutex);
2498 }
2499
2500 /*
2501 * Release global mutex
2502 */
2503 mutex_exit(&fcp_global_mutex);
2504 }
2505
2506 if (ret == 0) {
2507 uint64_t belun = BE_64(fscsi->scsi_lun);
2508
2509 /*
2510 * If it's a target device, find lun from pwwn
2511 * The wwn must be put into a local
2512 * variable to ensure alignment.
2513 */
2514 mutex_enter(&pptr->port_mutex);
2515 wwn_ptr = (la_wwn_t *)&(fscsi->scsi_fc_pwwn);
2516 if (!ptgt->tgt_tcap && ptgt->tgt_icap) {
2517 /* this is not a target */
2518 fscsi->scsi_fc_status = FC_DEVICE_NOT_TGT;
2519 ret = ENXIO;
2520 } else if ((belun << 16) != 0) {
2521 /*
2522 * Since fcp only support PD and LU addressing method
2523 * so far, the last 6 bytes of a valid LUN are expected
2524 * to be filled with 00h.
2525 */
2526 fscsi->scsi_fc_status = FC_INVALID_LUN;
2527 cmn_err(CE_WARN, "fcp: Unsupported LUN addressing"
2528 " method 0x%02x with LUN number 0x%016" PRIx64,
2529 (uint8_t)(belun >> 62), belun);
2530 ret = ENXIO;
2531 } else if ((plun = fcp_lookup_lun(pptr, (uchar_t *)wwn_ptr,
2532 (uint16_t)((belun >> 48) & 0x3fff))) == NULL) {
2533 /*
2534 * This is a SCSI target, but no LUN at this
2535 * address.
2536 *
2537 * In the future, we may want to send this to
2538 * the target, and let it respond
2539 * appropriately
2540 */
2541 ret = ENXIO;
2542 }
2543 mutex_exit(&pptr->port_mutex);
2544 }
2545
2546 /*
2547 * Finished grabbing external resources
2548 * Allocate internal packet (icmd)
2549 */
2550 if (ret == 0) {
2551 /*
2552 * Calc rsp len assuming rsp info included
2553 */
2554 rsp_len = sizeof (struct fcp_rsp) +
2555 sizeof (struct fcp_rsp_info) + fscsi->scsi_rqlen;
2556
2557 icmd = fcp_icmd_alloc(pptr, ptgt,
2558 sizeof (struct fcp_cmd),
2559 rsp_len,
2560 fscsi->scsi_buflen,
2561 nodma,
2562 lcount, /* ipkt_link_cnt */
2563 tcount, /* ipkt_change_cnt */
2564 0, /* cause */
2565 FC_INVALID_RSCN_COUNT); /* invalidate the count */
2566
2567 if (icmd == NULL) {
2568 ret = ENOMEM;
2569 } else {
2570 /*
2571 * Setup internal packet as sema sync
2572 */
2573 fcp_ipkt_sema_init(icmd);
2574 }
2575 }
2576
2577 if (ret == 0) {
2578 /*
2579 * Init fpkt pointer for use.
2580 */
2581
2582 fpkt = icmd->ipkt_fpkt;
2583
2584 fpkt->pkt_tran_flags = FC_TRAN_CLASS3 | FC_TRAN_INTR;
2585 fpkt->pkt_tran_type = FC_PKT_FCP_READ; /* only rd for now */
2586 fpkt->pkt_timeout = fscsi->scsi_timeout;
2587
2588 /*
2589 * Init fcmd pointer for use by SCSI command
2590 */
2591
2592 if (nodma) {
2593 fcmd = (struct fcp_cmd *)fpkt->pkt_cmd;
2594 } else {
2595 fcmd = &fcp_cmd;
2596 }
2597 bzero(fcmd, sizeof (struct fcp_cmd));
2598 ptgt = plun->lun_tgt;
2599
2600 lun_string = (uchar_t *)&fscsi->scsi_lun;
2601
2602 fcmd->fcp_ent_addr.ent_addr_0 =
2603 BE_16(*(uint16_t *)&(lun_string[0]));
2604 fcmd->fcp_ent_addr.ent_addr_1 =
2605 BE_16(*(uint16_t *)&(lun_string[2]));
2606 fcmd->fcp_ent_addr.ent_addr_2 =
2607 BE_16(*(uint16_t *)&(lun_string[4]));
2608 fcmd->fcp_ent_addr.ent_addr_3 =
2609 BE_16(*(uint16_t *)&(lun_string[6]));
2610
2611 /*
2612 * Setup internal packet(icmd)
2613 */
2614 icmd->ipkt_lun = plun;
2615 icmd->ipkt_restart = 0;
2616 icmd->ipkt_retries = 0;
2617 icmd->ipkt_opcode = 0;
2618
2619 /*
2620 * Init the frame HEADER Pointer for use
2621 */
2622 hp = &fpkt->pkt_cmd_fhdr;
2623
2624 hp->s_id = pptr->port_id;
2625 hp->d_id = ptgt->tgt_d_id;
2626 hp->r_ctl = R_CTL_COMMAND;
2627 hp->type = FC_TYPE_SCSI_FCP;
2628 hp->f_ctl = F_CTL_SEQ_INITIATIVE | F_CTL_FIRST_SEQ;
2629 hp->rsvd = 0;
2630 hp->seq_id = 0;
2631 hp->seq_cnt = 0;
2632 hp->ox_id = 0xffff;
2633 hp->rx_id = 0xffff;
2634 hp->ro = 0;
2635
2636 fcmd->fcp_cntl.cntl_qtype = FCP_QTYPE_SIMPLE;
2637 fcmd->fcp_cntl.cntl_read_data = 1; /* only rd for now */
2638 fcmd->fcp_cntl.cntl_write_data = 0;
2639 fcmd->fcp_data_len = fscsi->scsi_buflen;
2640
2641 scsi_cdb = (union scsi_cdb *)fcmd->fcp_cdb;
2642 bcopy((char *)fscsi->scsi_cdbbufaddr, (char *)scsi_cdb,
2643 fscsi->scsi_cdblen);
2644
2645 if (!nodma) {
2646 FCP_CP_OUT((uint8_t *)fcmd, fpkt->pkt_cmd,
2647 fpkt->pkt_cmd_acc, sizeof (struct fcp_cmd));
2648 }
2649
2650 /*
2651 * Send SCSI command to FC transport
2652 */
2653
2654 if (ret == 0) {
2655 mutex_enter(&ptgt->tgt_mutex);
2656
2657 if (!FCP_TGT_STATE_CHANGED(ptgt, icmd)) {
2658 mutex_exit(&ptgt->tgt_mutex);
2659 fscsi->scsi_fc_status = xport_retval =
2660 fc_ulp_transport(pptr->port_fp_handle,
2661 fpkt);
2662 if (fscsi->scsi_fc_status != FC_SUCCESS) {
2663 ret = EIO;
2664 }
2665 } else {
2666 mutex_exit(&ptgt->tgt_mutex);
2667 ret = EBUSY;
2668 }
2669 }
2670 }
2671
2672 /*
2673 * Wait for completion only if fc_ulp_transport was called and it
2674 * returned a success. This is the only time callback will happen.
2675 * Otherwise, there is no point in waiting
2676 */
2677 if ((ret == 0) && (xport_retval == FC_SUCCESS)) {
2678 ret = fcp_ipkt_sema_wait(icmd);
2679 }
2680
2681 /*
2682 * Copy data to IOCTL data structures
2683 */
2684 rsp = NULL;
2685 if ((ret == 0) && (xport_retval == FC_SUCCESS)) {
2686 rsp = (struct fcp_rsp *)fpkt->pkt_resp;
2687
2688 if (fcp_validate_fcp_response(rsp, pptr) != FC_SUCCESS) {
2689 fcp_log(CE_WARN, pptr->port_dip,
2690 "!SCSI command to d_id=0x%x lun=0x%x"
2691 " failed, Bad FCP response values:"
2692 " rsvd1=%x, rsvd2=%x, sts-rsvd1=%x,"
2693 " sts-rsvd2=%x, rsplen=%x, senselen=%x",
2694 ptgt->tgt_d_id, plun->lun_num,
2695 rsp->reserved_0, rsp->reserved_1,
2696 rsp->fcp_u.fcp_status.reserved_0,
2697 rsp->fcp_u.fcp_status.reserved_1,
2698 rsp->fcp_response_len, rsp->fcp_sense_len);
2699
2700 ret = EIO;
2701 }
2702 }
2703
2704 if ((ret == 0) && (rsp != NULL)) {
2705 /*
2706 * Calc response lengths
2707 */
2708 sense_len = 0;
2709 info_len = 0;
2710
2711 if (rsp->fcp_u.fcp_status.rsp_len_set) {
2712 info_len = rsp->fcp_response_len;
2713 }
2714
2715 rsp_info = (struct fcp_rsp_info *)
2716 ((uint8_t *)rsp + sizeof (struct fcp_rsp));
2717
2718 /*
2719 * Get SCSI status
2720 */
2721 fscsi->scsi_bufstatus = rsp->fcp_u.fcp_status.scsi_status;
2722 /*
2723 * If a lun was just added or removed and the next command
2724 * comes through this interface, we need to capture the check
2725 * condition so we can discover the new topology.
2726 */
2727 if (fscsi->scsi_bufstatus != STATUS_GOOD &&
2728 rsp->fcp_u.fcp_status.sense_len_set) {
2729 sense_len = rsp->fcp_sense_len;
2730 rsp_sense = (caddr_t)((uint8_t *)rsp_info + info_len);
2731 sense_to = (struct scsi_extended_sense *)rsp_sense;
2732 if ((FCP_SENSE_REPORTLUN_CHANGED(sense_to)) ||
2733 (FCP_SENSE_NO_LUN(sense_to))) {
2734 reconfig_lun = TRUE;
2735 }
2736 }
2737
2738 if (fscsi->scsi_bufstatus == STATUS_GOOD && (ptgt != NULL) &&
2739 (reconfig_lun || (scsi_cdb->scc_cmd == SCMD_REPORT_LUN))) {
2740 if (reconfig_lun == FALSE) {
2741 reconfig_status =
2742 fcp_is_reconfig_needed(ptgt, fpkt);
2743 }
2744
2745 if ((reconfig_lun == TRUE) ||
2746 (reconfig_status == TRUE)) {
2747 mutex_enter(&ptgt->tgt_mutex);
2748 if (ptgt->tgt_tid == NULL) {
2749 /*
2750 * Either we've been notified the
2751 * REPORT_LUN data has changed, or
2752 * we've determined on our own that
2753 * we're out of date. Kick off
2754 * rediscovery.
2755 */
2756 tid = timeout(fcp_reconfigure_luns,
2757 (caddr_t)ptgt, drv_usectohz(1));
2758
2759 ptgt->tgt_tid = tid;
2760 ptgt->tgt_state |= FCP_TGT_BUSY;
2761 ret = EBUSY;
2762 reconfig_pending = TRUE;
2763 }
2764 mutex_exit(&ptgt->tgt_mutex);
2765 }
2766 }
2767
2768 /*
2769 * Calc residuals and buffer lengths
2770 */
2771
2772 if (ret == 0) {
2773 buf_len = fscsi->scsi_buflen;
2774 fscsi->scsi_bufresid = 0;
2775 if (rsp->fcp_u.fcp_status.resid_under) {
2776 if (rsp->fcp_resid <= fscsi->scsi_buflen) {
2777 fscsi->scsi_bufresid = rsp->fcp_resid;
2778 } else {
2779 cmn_err(CE_WARN, "fcp: bad residue %x "
2780 "for txfer len %x", rsp->fcp_resid,
2781 fscsi->scsi_buflen);
2782 fscsi->scsi_bufresid =
2783 fscsi->scsi_buflen;
2784 }
2785 buf_len -= fscsi->scsi_bufresid;
2786 }
2787 if (rsp->fcp_u.fcp_status.resid_over) {
2788 fscsi->scsi_bufresid = -rsp->fcp_resid;
2789 }
2790
2791 fscsi->scsi_rqresid = fscsi->scsi_rqlen - sense_len;
2792 if (fscsi->scsi_rqlen < sense_len) {
2793 sense_len = fscsi->scsi_rqlen;
2794 }
2795
2796 fscsi->scsi_fc_rspcode = 0;
2797 if (rsp->fcp_u.fcp_status.rsp_len_set) {
2798 fscsi->scsi_fc_rspcode = rsp_info->rsp_code;
2799 }
2800 fscsi->scsi_pkt_state = fpkt->pkt_state;
2801 fscsi->scsi_pkt_action = fpkt->pkt_action;
2802 fscsi->scsi_pkt_reason = fpkt->pkt_reason;
2803
2804 /*
2805 * Copy data and request sense
2806 *
2807 * Data must be copied by using the FCP_CP_IN macro.
2808 * This will ensure the proper byte order since the data
2809 * is being copied directly from the memory mapped
2810 * device register.
2811 *
2812 * The response (and request sense) will be in the
2813 * correct byte order. No special copy is necessary.
2814 */
2815
2816 if (buf_len) {
2817 FCP_CP_IN(fpkt->pkt_data,
2818 fscsi->scsi_bufaddr,
2819 fpkt->pkt_data_acc,
2820 buf_len);
2821 }
2822 bcopy((void *)rsp_sense,
2823 (void *)fscsi->scsi_rqbufaddr,
2824 sense_len);
2825 }
2826 }
2827
2828 /*
2829 * Cleanup transport data structures if icmd was alloc-ed
2830 * So, cleanup happens in the same thread that icmd was alloc-ed
2831 */
2832 if (icmd != NULL) {
2833 fcp_ipkt_sema_cleanup(icmd);
2834 }
2835
2836 /* restore pm busy/idle status */
2837 if (port_busy) {
2838 fc_ulp_idle_port(pptr->port_fp_handle);
2839 }
2840
2841 /*
2842 * Cleanup target. if a reconfig is pending, don't clear the BUSY
2843 * flag, it'll be cleared when the reconfig is complete.
2844 */
2845 if ((ptgt != NULL) && !reconfig_pending) {
2846 /*
2847 * If target was created,
2848 */
2849 if (target_created) {
2850 mutex_enter(&ptgt->tgt_mutex);
2851 ptgt->tgt_state &= ~FCP_TGT_BUSY;
2852 mutex_exit(&ptgt->tgt_mutex);
2853 } else {
2854 /*
2855 * De-mark target as busy
2856 */
2857 mutex_enter(&ptgt->tgt_mutex);
2858 ptgt->tgt_state &= ~FCP_TGT_BUSY;
2859 mutex_exit(&ptgt->tgt_mutex);
2860 }
2861 }
2862 return (ret);
2863 }
2864
2865
2866 static int
2867 fcp_is_reconfig_needed(struct fcp_tgt *ptgt,
2868 fc_packet_t *fpkt)
2869 {
2870 uchar_t *lun_string;
2871 uint16_t lun_num, i;
2872 int num_luns;
2873 int actual_luns;
2874 int num_masked_luns;
2875 int lun_buflen;
2876 struct fcp_lun *plun = NULL;
2877 struct fcp_reportlun_resp *report_lun;
2878 uint8_t reconfig_needed = FALSE;
2879 uint8_t lun_exists = FALSE;
2880 fcp_port_t *pptr = ptgt->tgt_port;
2881
2882 report_lun = kmem_zalloc(fpkt->pkt_datalen, KM_SLEEP);
2883
2884 FCP_CP_IN(fpkt->pkt_data, report_lun, fpkt->pkt_data_acc,
2885 fpkt->pkt_datalen);
2886
2887 /* get number of luns (which is supplied as LUNS * 8) */
2888 num_luns = BE_32(report_lun->num_lun) >> 3;
2889
2890 /*
2891 * Figure out exactly how many lun strings our response buffer
2892 * can hold.
2893 */
2894 lun_buflen = (fpkt->pkt_datalen -
2895 2 * sizeof (uint32_t)) / sizeof (longlong_t);
2896
2897 /*
2898 * Is our response buffer full or not? We don't want to
2899 * potentially walk beyond the number of luns we have.
2900 */
2901 if (num_luns <= lun_buflen) {
2902 actual_luns = num_luns;
2903 } else {
2904 actual_luns = lun_buflen;
2905 }
2906
2907 mutex_enter(&ptgt->tgt_mutex);
2908
2909 /* Scan each lun to see if we have masked it. */
2910 num_masked_luns = 0;
2911 if (fcp_lun_blacklist != NULL) {
2912 for (i = 0; i < actual_luns; i++) {
2913 lun_string = (uchar_t *)&(report_lun->lun_string[i]);
2914 switch (lun_string[0] & 0xC0) {
2915 case FCP_LUN_ADDRESSING:
2916 case FCP_PD_ADDRESSING:
2917 case FCP_VOLUME_ADDRESSING:
2918 lun_num = ((lun_string[0] & 0x3F) << 8)
2919 | lun_string[1];
2920 if (fcp_should_mask(&ptgt->tgt_port_wwn,
2921 lun_num) == TRUE) {
2922 num_masked_luns++;
2923 }
2924 break;
2925 default:
2926 break;
2927 }
2928 }
2929 }
2930
2931 /*
2932 * The quick and easy check. If the number of LUNs reported
2933 * doesn't match the number we currently know about, we need
2934 * to reconfigure.
2935 */
2936 if (num_luns && num_luns != (ptgt->tgt_lun_cnt + num_masked_luns)) {
2937 mutex_exit(&ptgt->tgt_mutex);
2938 kmem_free(report_lun, fpkt->pkt_datalen);
2939 return (TRUE);
2940 }
2941
2942 /*
2943 * If the quick and easy check doesn't turn up anything, we walk
2944 * the list of luns from the REPORT_LUN response and look for
2945 * any luns we don't know about. If we find one, we know we need
2946 * to reconfigure. We will skip LUNs that are masked because of the
2947 * blacklist.
2948 */
2949 for (i = 0; i < actual_luns; i++) {
2950 lun_string = (uchar_t *)&(report_lun->lun_string[i]);
2951 lun_exists = FALSE;
2952 switch (lun_string[0] & 0xC0) {
2953 case FCP_LUN_ADDRESSING:
2954 case FCP_PD_ADDRESSING:
2955 case FCP_VOLUME_ADDRESSING:
2956 lun_num = ((lun_string[0] & 0x3F) << 8) | lun_string[1];
2957
2958 if ((fcp_lun_blacklist != NULL) && (fcp_should_mask(
2959 &ptgt->tgt_port_wwn, lun_num) == TRUE)) {
2960 lun_exists = TRUE;
2961 break;
2962 }
2963
2964 for (plun = ptgt->tgt_lun; plun;
2965 plun = plun->lun_next) {
2966 if (plun->lun_num == lun_num) {
2967 lun_exists = TRUE;
2968 break;
2969 }
2970 }
2971 break;
2972 default:
2973 break;
2974 }
2975
2976 if (lun_exists == FALSE) {
2977 reconfig_needed = TRUE;
2978 break;
2979 }
2980 }
2981
2982 mutex_exit(&ptgt->tgt_mutex);
2983 kmem_free(report_lun, fpkt->pkt_datalen);
2984
2985 return (reconfig_needed);
2986 }
2987
2988 /*
2989 * This function is called by fcp_handle_page83 and uses inquiry response data
2990 * stored in plun->lun_inq to determine whether or not a device is a member of
2991 * the table fcp_symmetric_disk_table_size. We return 0 if it is in the table,
2992 * otherwise 1.
2993 */
2994 static int
2995 fcp_symmetric_device_probe(struct fcp_lun *plun)
2996 {
2997 struct scsi_inquiry *stdinq = &plun->lun_inq;
2998 char *devidptr;
2999 int i, len;
3000
3001 for (i = 0; i < fcp_symmetric_disk_table_size; i++) {
3002 devidptr = fcp_symmetric_disk_table[i];
3003 len = (int)strlen(devidptr);
3004
3005 if (bcmp(stdinq->inq_vid, devidptr, len) == 0) {
3006 return (0);
3007 }
3008 }
3009 return (1);
3010 }
3011
3012
3013 /*
3014 * This function is called by fcp_ioctl for the FCP_STATE_COUNT ioctl
3015 * It basically returns the current count of # of state change callbacks
3016 * i.e the value of tgt_change_cnt.
3017 *
3018 * INPUT:
3019 * fcp_ioctl.fp_minor -> The minor # of the fp port
3020 * fcp_ioctl.listlen -> 1
3021 * fcp_ioctl.list -> Pointer to a 32 bit integer
3022 */
3023 /*ARGSUSED2*/
3024 static int
3025 fcp_get_statec_count(struct fcp_ioctl *data, int mode, int *rval)
3026 {
3027 int ret;
3028 uint32_t link_cnt;
3029 struct fcp_ioctl fioctl;
3030 struct fcp_port *pptr = NULL;
3031
3032 if ((ret = fcp_copyin_fcp_ioctl_data(data, mode, rval, &fioctl,
3033 &pptr)) != 0) {
3034 return (ret);
3035 }
3036
3037 ASSERT(pptr != NULL);
3038
3039 if (fioctl.listlen != 1) {
3040 return (EINVAL);
3041 }
3042
3043 mutex_enter(&pptr->port_mutex);
3044 if (pptr->port_state & FCP_STATE_OFFLINE) {
3045 mutex_exit(&pptr->port_mutex);
3046 return (ENXIO);
3047 }
3048
3049 /*
3050 * FCP_STATE_INIT is set in 2 cases (not sure why it is overloaded):
3051 * When the fcp initially attaches to the port and there are nothing
3052 * hanging out of the port or if there was a repeat offline state change
3053 * callback (refer fcp_statec_callback() FC_STATE_OFFLINE case).
3054 * In the latter case, port_tmp_cnt will be non-zero and that is how we
3055 * will differentiate the 2 cases.
3056 */
3057 if ((pptr->port_state & FCP_STATE_INIT) && pptr->port_tmp_cnt) {
3058 mutex_exit(&pptr->port_mutex);
3059 return (ENXIO);
3060 }
3061
3062 link_cnt = pptr->port_link_cnt;
3063 mutex_exit(&pptr->port_mutex);
3064
3065 if (ddi_copyout(&link_cnt, fioctl.list, (sizeof (uint32_t)), mode)) {
3066 return (EFAULT);
3067 }
3068
3069 #ifdef _MULTI_DATAMODEL
3070 switch (ddi_model_convert_from(mode & FMODELS)) {
3071 case DDI_MODEL_ILP32: {
3072 struct fcp32_ioctl f32_ioctl;
3073
3074 f32_ioctl.fp_minor = fioctl.fp_minor;
3075 f32_ioctl.listlen = fioctl.listlen;
3076 f32_ioctl.list = (caddr32_t)(long)fioctl.list;
3077 if (ddi_copyout((void *)&f32_ioctl, (void *)data,
3078 sizeof (struct fcp32_ioctl), mode)) {
3079 return (EFAULT);
3080 }
3081 break;
3082 }
3083 case DDI_MODEL_NONE:
3084 if (ddi_copyout((void *)&fioctl, (void *)data,
3085 sizeof (struct fcp_ioctl), mode)) {
3086 return (EFAULT);
3087 }
3088 break;
3089 }
3090 #else /* _MULTI_DATAMODEL */
3091
3092 if (ddi_copyout((void *)&fioctl, (void *)data,
3093 sizeof (struct fcp_ioctl), mode)) {
3094 return (EFAULT);
3095 }
3096 #endif /* _MULTI_DATAMODEL */
3097
3098 return (0);
3099 }
3100
3101 /*
3102 * This function copies the fcp_ioctl structure passed in from user land
3103 * into kernel land. Handles 32 bit applications.
3104 */
3105 /*ARGSUSED*/
3106 static int
3107 fcp_copyin_fcp_ioctl_data(struct fcp_ioctl *data, int mode, int *rval,
3108 struct fcp_ioctl *fioctl, struct fcp_port **pptr)
3109 {
3110 struct fcp_port *t_pptr;
3111
3112 #ifdef _MULTI_DATAMODEL
3113 switch (ddi_model_convert_from(mode & FMODELS)) {
3114 case DDI_MODEL_ILP32: {
3115 struct fcp32_ioctl f32_ioctl;
3116
3117 if (ddi_copyin((void *)data, (void *)&f32_ioctl,
3118 sizeof (struct fcp32_ioctl), mode)) {
3119 return (EFAULT);
3120 }
3121 fioctl->fp_minor = f32_ioctl.fp_minor;
3122 fioctl->listlen = f32_ioctl.listlen;
3123 fioctl->list = (caddr_t)(long)f32_ioctl.list;
3124 break;
3125 }
3126 case DDI_MODEL_NONE:
3127 if (ddi_copyin((void *)data, (void *)fioctl,
3128 sizeof (struct fcp_ioctl), mode)) {
3129 return (EFAULT);
3130 }
3131 break;
3132 }
3133
3134 #else /* _MULTI_DATAMODEL */
3135 if (ddi_copyin((void *)data, (void *)fioctl,
3136 sizeof (struct fcp_ioctl), mode)) {
3137 return (EFAULT);
3138 }
3139 #endif /* _MULTI_DATAMODEL */
3140
3141 /*
3142 * Right now we can assume that the minor number matches with
3143 * this instance of fp. If this changes we will need to
3144 * revisit this logic.
3145 */
3146 mutex_enter(&fcp_global_mutex);
3147 t_pptr = fcp_port_head;
3148 while (t_pptr) {
3149 if (t_pptr->port_instance == (uint32_t)fioctl->fp_minor) {
3150 break;
3151 } else {
3152 t_pptr = t_pptr->port_next;
3153 }
3154 }
3155 *pptr = t_pptr;
3156 mutex_exit(&fcp_global_mutex);
3157 if (t_pptr == NULL) {
3158 return (ENXIO);
3159 }
3160
3161 return (0);
3162 }
3163
3164 /*
3165 * Function: fcp_port_create_tgt
3166 *
3167 * Description: As the name suggest this function creates the target context
3168 * specified by the the WWN provided by the caller. If the
3169 * creation goes well and the target is known by fp/fctl a PLOGI
3170 * followed by a PRLI are issued.
3171 *
3172 * Argument: pptr fcp port structure
3173 * pwwn WWN of the target
3174 * ret_val Address of the return code. It could be:
3175 * EIO, ENOMEM or 0.
3176 * fc_status PLOGI or PRLI status completion
3177 * fc_pkt_state PLOGI or PRLI state completion
3178 * fc_pkt_reason PLOGI or PRLI reason completion
3179 * fc_pkt_action PLOGI or PRLI action completion
3180 *
3181 * Return Value: NULL if it failed
3182 * Target structure address if it succeeds
3183 */
3184 static struct fcp_tgt *
3185 fcp_port_create_tgt(struct fcp_port *pptr, la_wwn_t *pwwn, int *ret_val,
3186 int *fc_status, int *fc_pkt_state, int *fc_pkt_reason, int *fc_pkt_action)
3187 {
3188 struct fcp_tgt *ptgt = NULL;
3189 fc_portmap_t devlist;
3190 int lcount;
3191 int error;
3192
3193 *ret_val = 0;
3194
3195 /*
3196 * Check FC port device & get port map
3197 */
3198 if (fc_ulp_get_remote_port(pptr->port_fp_handle, pwwn,
3199 &error, 1) == NULL) {
3200 *ret_val = EIO;
3201 } else {
3202 if (fc_ulp_pwwn_to_portmap(pptr->port_fp_handle, pwwn,
3203 &devlist) != FC_SUCCESS) {
3204 *ret_val = EIO;
3205 }
3206 }
3207
3208 /* Set port map flags */
3209 devlist.map_type = PORT_DEVICE_USER_CREATE;
3210
3211 /* Allocate target */
3212 if (*ret_val == 0) {
3213 lcount = pptr->port_link_cnt;
3214 ptgt = fcp_alloc_tgt(pptr, &devlist, lcount);
3215 if (ptgt == NULL) {
3216 fcp_log(CE_WARN, pptr->port_dip,
3217 "!FC target allocation failed");
3218 *ret_val = ENOMEM;
3219 } else {
3220 /* Setup target */
3221 mutex_enter(&ptgt->tgt_mutex);
3222
3223 ptgt->tgt_statec_cause = FCP_CAUSE_TGT_CHANGE;
3224 ptgt->tgt_tmp_cnt = 1;
3225 ptgt->tgt_d_id = devlist.map_did.port_id;
3226 ptgt->tgt_hard_addr =
3227 devlist.map_hard_addr.hard_addr;
3228 ptgt->tgt_pd_handle = devlist.map_pd;
3229 ptgt->tgt_fca_dev = NULL;
3230
3231 bcopy(&devlist.map_nwwn, &ptgt->tgt_node_wwn.raw_wwn[0],
3232 FC_WWN_SIZE);
3233 bcopy(&devlist.map_pwwn, &ptgt->tgt_port_wwn.raw_wwn[0],
3234 FC_WWN_SIZE);
3235
3236 mutex_exit(&ptgt->tgt_mutex);
3237 }
3238 }
3239
3240 /* Release global mutex for PLOGI and PRLI */
3241 mutex_exit(&fcp_global_mutex);
3242
3243 /* Send PLOGI (If necessary) */
3244 if (*ret_val == 0) {
3245 *ret_val = fcp_tgt_send_plogi(ptgt, fc_status,
3246 fc_pkt_state, fc_pkt_reason, fc_pkt_action);
3247 }
3248
3249 /* Send PRLI (If necessary) */
3250 if (*ret_val == 0) {
3251 *ret_val = fcp_tgt_send_prli(ptgt, fc_status,
3252 fc_pkt_state, fc_pkt_reason, fc_pkt_action);
3253 }
3254
3255 mutex_enter(&fcp_global_mutex);
3256
3257 return (ptgt);
3258 }
3259
3260 /*
3261 * Function: fcp_tgt_send_plogi
3262 *
3263 * Description: This function sends a PLOGI to the target specified by the
3264 * caller and waits till it completes.
3265 *
3266 * Argument: ptgt Target to send the plogi to.
3267 * fc_status Status returned by fp/fctl in the PLOGI request.
3268 * fc_pkt_state State returned by fp/fctl in the PLOGI request.
3269 * fc_pkt_reason Reason returned by fp/fctl in the PLOGI request.
3270 * fc_pkt_action Action returned by fp/fctl in the PLOGI request.
3271 *
3272 * Return Value: 0
3273 * ENOMEM
3274 * EIO
3275 *
3276 * Context: User context.
3277 */
3278 static int
3279 fcp_tgt_send_plogi(struct fcp_tgt *ptgt, int *fc_status, int *fc_pkt_state,
3280 int *fc_pkt_reason, int *fc_pkt_action)
3281 {
3282 struct fcp_port *pptr;
3283 struct fcp_ipkt *icmd;
3284 struct fc_packet *fpkt;
3285 fc_frame_hdr_t *hp;
3286 struct la_els_logi logi;
3287 int tcount;
3288 int lcount;
3289 int ret, login_retval = ~FC_SUCCESS;
3290
3291 ret = 0;
3292
3293 pptr = ptgt->tgt_port;
3294
3295 lcount = pptr->port_link_cnt;
3296 tcount = ptgt->tgt_change_cnt;
3297
3298 /* Alloc internal packet */
3299 icmd = fcp_icmd_alloc(pptr, ptgt, sizeof (la_els_logi_t),
3300 sizeof (la_els_logi_t), 0,
3301 pptr->port_state & FCP_STATE_FCA_IS_NODMA,
3302 lcount, tcount, 0, FC_INVALID_RSCN_COUNT);
3303
3304 if (icmd == NULL) {
3305 ret = ENOMEM;
3306 } else {
3307 /*
3308 * Setup internal packet as sema sync
3309 */
3310 fcp_ipkt_sema_init(icmd);
3311
3312 /*
3313 * Setup internal packet (icmd)
3314 */
3315 icmd->ipkt_lun = NULL;
3316 icmd->ipkt_restart = 0;
3317 icmd->ipkt_retries = 0;
3318 icmd->ipkt_opcode = LA_ELS_PLOGI;
3319
3320 /*
3321 * Setup fc_packet
3322 */
3323 fpkt = icmd->ipkt_fpkt;
3324
3325 fpkt->pkt_tran_flags = FC_TRAN_CLASS3 | FC_TRAN_INTR;
3326 fpkt->pkt_tran_type = FC_PKT_EXCHANGE;
3327 fpkt->pkt_timeout = FCP_ELS_TIMEOUT;
3328
3329 /*
3330 * Setup FC frame header
3331 */
3332 hp = &fpkt->pkt_cmd_fhdr;
3333
3334 hp->s_id = pptr->port_id; /* source ID */
3335 hp->d_id = ptgt->tgt_d_id; /* dest ID */
3336 hp->r_ctl = R_CTL_ELS_REQ;
3337 hp->type = FC_TYPE_EXTENDED_LS;
3338 hp->f_ctl = F_CTL_SEQ_INITIATIVE | F_CTL_FIRST_SEQ;
3339 hp->seq_id = 0;
3340 hp->rsvd = 0;
3341 hp->df_ctl = 0;
3342 hp->seq_cnt = 0;
3343 hp->ox_id = 0xffff; /* i.e. none */
3344 hp->rx_id = 0xffff; /* i.e. none */
3345 hp->ro = 0;
3346
3347 /*
3348 * Setup PLOGI
3349 */
3350 bzero(&logi, sizeof (struct la_els_logi));
3351 logi.ls_code.ls_code = LA_ELS_PLOGI;
3352
3353 FCP_CP_OUT((uint8_t *)&logi, fpkt->pkt_cmd,
3354 fpkt->pkt_cmd_acc, sizeof (struct la_els_logi));
3355
3356 /*
3357 * Send PLOGI
3358 */
3359 *fc_status = login_retval =
3360 fc_ulp_login(pptr->port_fp_handle, &fpkt, 1);
3361 if (*fc_status != FC_SUCCESS) {
3362 ret = EIO;
3363 }
3364 }
3365
3366 /*
3367 * Wait for completion
3368 */
3369 if ((ret == 0) && (login_retval == FC_SUCCESS)) {
3370 ret = fcp_ipkt_sema_wait(icmd);
3371
3372 *fc_pkt_state = fpkt->pkt_state;
3373 *fc_pkt_reason = fpkt->pkt_reason;
3374 *fc_pkt_action = fpkt->pkt_action;
3375 }
3376
3377 /*
3378 * Cleanup transport data structures if icmd was alloc-ed AND if there
3379 * is going to be no callback (i.e if fc_ulp_login() failed).
3380 * Otherwise, cleanup happens in callback routine.
3381 */
3382 if (icmd != NULL) {
3383 fcp_ipkt_sema_cleanup(icmd);
3384 }
3385
3386 return (ret);
3387 }
3388
3389 /*
3390 * Function: fcp_tgt_send_prli
3391 *
3392 * Description: Does nothing as of today.
3393 *
3394 * Argument: ptgt Target to send the prli to.
3395 * fc_status Status returned by fp/fctl in the PRLI request.
3396 * fc_pkt_state State returned by fp/fctl in the PRLI request.
3397 * fc_pkt_reason Reason returned by fp/fctl in the PRLI request.
3398 * fc_pkt_action Action returned by fp/fctl in the PRLI request.
3399 *
3400 * Return Value: 0
3401 */
3402 /*ARGSUSED*/
3403 static int
3404 fcp_tgt_send_prli(struct fcp_tgt *ptgt, int *fc_status, int *fc_pkt_state,
3405 int *fc_pkt_reason, int *fc_pkt_action)
3406 {
3407 return (0);
3408 }
3409
3410 /*
3411 * Function: fcp_ipkt_sema_init
3412 *
3413 * Description: Initializes the semaphore contained in the internal packet.
3414 *
3415 * Argument: icmd Internal packet the semaphore of which must be
3416 * initialized.
3417 *
3418 * Return Value: None
3419 *
3420 * Context: User context only.
3421 */
3422 static void
3423 fcp_ipkt_sema_init(struct fcp_ipkt *icmd)
3424 {
3425 struct fc_packet *fpkt;
3426
3427 fpkt = icmd->ipkt_fpkt;
3428
3429 /* Create semaphore for sync */
3430 sema_init(&(icmd->ipkt_sema), 0, NULL, SEMA_DRIVER, NULL);
3431
3432 /* Setup the completion callback */
3433 fpkt->pkt_comp = fcp_ipkt_sema_callback;
3434 }
3435
3436 /*
3437 * Function: fcp_ipkt_sema_wait
3438 *
3439 * Description: Wait on the semaphore embedded in the internal packet. The
3440 * semaphore is released in the callback.
3441 *
3442 * Argument: icmd Internal packet to wait on for completion.
3443 *
3444 * Return Value: 0
3445 * EIO
3446 * EBUSY
3447 * EAGAIN
3448 *
3449 * Context: User context only.
3450 *
3451 * This function does a conversion between the field pkt_state of the fc_packet
3452 * embedded in the internal packet (icmd) and the code it returns.
3453 */
3454 static int
3455 fcp_ipkt_sema_wait(struct fcp_ipkt *icmd)
3456 {
3457 struct fc_packet *fpkt;
3458 int ret;
3459
3460 ret = EIO;
3461 fpkt = icmd->ipkt_fpkt;
3462
3463 /*
3464 * Wait on semaphore
3465 */
3466 sema_p(&(icmd->ipkt_sema));
3467
3468 /*
3469 * Check the status of the FC packet
3470 */
3471 switch (fpkt->pkt_state) {
3472 case FC_PKT_SUCCESS:
3473 ret = 0;
3474 break;
3475 case FC_PKT_LOCAL_RJT:
3476 switch (fpkt->pkt_reason) {
3477 case FC_REASON_SEQ_TIMEOUT:
3478 case FC_REASON_RX_BUF_TIMEOUT:
3479 ret = EAGAIN;
3480 break;
3481 case FC_REASON_PKT_BUSY:
3482 ret = EBUSY;
3483 break;
3484 }
3485 break;
3486 case FC_PKT_TIMEOUT:
3487 ret = EAGAIN;
3488 break;
3489 case FC_PKT_LOCAL_BSY:
3490 case FC_PKT_TRAN_BSY:
3491 case FC_PKT_NPORT_BSY:
3492 case FC_PKT_FABRIC_BSY:
3493 ret = EBUSY;
3494 break;
3495 case FC_PKT_LS_RJT:
3496 case FC_PKT_BA_RJT:
3497 switch (fpkt->pkt_reason) {
3498 case FC_REASON_LOGICAL_BSY:
3499 ret = EBUSY;
3500 break;
3501 }
3502 break;
3503 case FC_PKT_FS_RJT:
3504 switch (fpkt->pkt_reason) {
3505 case FC_REASON_FS_LOGICAL_BUSY:
3506 ret = EBUSY;
3507 break;
3508 }
3509 break;
3510 }
3511
3512 return (ret);
3513 }
3514
3515 /*
3516 * Function: fcp_ipkt_sema_callback
3517 *
3518 * Description: Registered as the completion callback function for the FC
3519 * transport when the ipkt semaphore is used for sync. This will
3520 * cleanup the used data structures, if necessary and wake up
3521 * the user thread to complete the transaction.
3522 *
3523 * Argument: fpkt FC packet (points to the icmd)
3524 *
3525 * Return Value: None
3526 *
3527 * Context: User context only
3528 */
3529 static void
3530 fcp_ipkt_sema_callback(struct fc_packet *fpkt)
3531 {
3532 struct fcp_ipkt *icmd;
3533
3534 icmd = (struct fcp_ipkt *)fpkt->pkt_ulp_private;
3535
3536 /*
3537 * Wake up user thread
3538 */
3539 sema_v(&(icmd->ipkt_sema));
3540 }
3541
3542 /*
3543 * Function: fcp_ipkt_sema_cleanup
3544 *
3545 * Description: Called to cleanup (if necessary) the data structures used
3546 * when ipkt sema is used for sync. This function will detect
3547 * whether the caller is the last thread (via counter) and
3548 * cleanup only if necessary.
3549 *
3550 * Argument: icmd Internal command packet
3551 *
3552 * Return Value: None
3553 *
3554 * Context: User context only
3555 */
3556 static void
3557 fcp_ipkt_sema_cleanup(struct fcp_ipkt *icmd)
3558 {
3559 struct fcp_tgt *ptgt;
3560 struct fcp_port *pptr;
3561
3562 ptgt = icmd->ipkt_tgt;
3563 pptr = icmd->ipkt_port;
3564
3565 /*
3566 * Acquire data structure
3567 */
3568 mutex_enter(&ptgt->tgt_mutex);
3569
3570 /*
3571 * Destroy semaphore
3572 */
3573 sema_destroy(&(icmd->ipkt_sema));
3574
3575 /*
3576 * Cleanup internal packet
3577 */
3578 mutex_exit(&ptgt->tgt_mutex);
3579 fcp_icmd_free(pptr, icmd);
3580 }
3581
3582 /*
3583 * Function: fcp_port_attach
3584 *
3585 * Description: Called by the transport framework to resume, suspend or
3586 * attach a new port.
3587 *
3588 * Argument: ulph Port handle
3589 * *pinfo Port information
3590 * cmd Command
3591 * s_id Port ID
3592 *
3593 * Return Value: FC_FAILURE or FC_SUCCESS
3594 */
3595 /*ARGSUSED*/
3596 static int
3597 fcp_port_attach(opaque_t ulph, fc_ulp_port_info_t *pinfo,
3598 fc_attach_cmd_t cmd, uint32_t s_id)
3599 {
3600 int instance;
3601 int res = FC_FAILURE; /* default result */
3602
3603 ASSERT(pinfo != NULL);
3604
3605 instance = ddi_get_instance(pinfo->port_dip);
3606
3607 switch (cmd) {
3608 case FC_CMD_ATTACH:
3609 /*
3610 * this port instance attaching for the first time (or after
3611 * being detached before)
3612 */
3613 if (fcp_handle_port_attach(ulph, pinfo, s_id,
3614 instance) == DDI_SUCCESS) {
3615 res = FC_SUCCESS;
3616 } else {
3617 ASSERT(ddi_get_soft_state(fcp_softstate,
3618 instance) == NULL);
3619 }
3620 break;
3621
3622 case FC_CMD_RESUME:
3623 case FC_CMD_POWER_UP:
3624 /*
3625 * this port instance was attached and the suspended and
3626 * will now be resumed
3627 */
3628 if (fcp_handle_port_resume(ulph, pinfo, s_id, cmd,
3629 instance) == DDI_SUCCESS) {
3630 res = FC_SUCCESS;
3631 }
3632 break;
3633
3634 default:
3635 /* shouldn't happen */
3636 FCP_TRACE(fcp_logq, "fcp",
3637 fcp_trace, FCP_BUF_LEVEL_2, 0,
3638 "port_attach: unknown cmdcommand: %d", cmd);
3639 break;
3640 }
3641
3642 /* return result */
3643 FCP_DTRACE(fcp_logq, "fcp", fcp_trace,
3644 FCP_BUF_LEVEL_1, 0, "fcp_port_attach returning %d", res);
3645
3646 return (res);
3647 }
3648
3649
3650 /*
3651 * detach or suspend this port instance
3652 *
3653 * acquires and releases the global mutex
3654 *
3655 * acquires and releases the mutex for this port
3656 *
3657 * acquires and releases the hotplug mutex for this port
3658 */
3659 /*ARGSUSED*/
3660 static int
3661 fcp_port_detach(opaque_t ulph, fc_ulp_port_info_t *info,
3662 fc_detach_cmd_t cmd)
3663 {
3664 int flag;
3665 int instance;
3666 struct fcp_port *pptr;
3667
3668 instance = ddi_get_instance(info->port_dip);
3669 pptr = ddi_get_soft_state(fcp_softstate, instance);
3670
3671 switch (cmd) {
3672 case FC_CMD_SUSPEND:
3673 FCP_DTRACE(fcp_logq, "fcp",
3674 fcp_trace, FCP_BUF_LEVEL_8, 0,
3675 "port suspend called for port %d", instance);
3676 flag = FCP_STATE_SUSPENDED;
3677 break;
3678
3679 case FC_CMD_POWER_DOWN:
3680 FCP_DTRACE(fcp_logq, "fcp",
3681 fcp_trace, FCP_BUF_LEVEL_8, 0,
3682 "port power down called for port %d", instance);
3683 flag = FCP_STATE_POWER_DOWN;
3684 break;
3685
3686 case FC_CMD_DETACH:
3687 FCP_DTRACE(fcp_logq, "fcp",
3688 fcp_trace, FCP_BUF_LEVEL_8, 0,
3689 "port detach called for port %d", instance);
3690 flag = FCP_STATE_DETACHING;
3691 break;
3692
3693 default:
3694 /* shouldn't happen */
3695 return (FC_FAILURE);
3696 }
3697 FCP_DTRACE(fcp_logq, "fcp", fcp_trace,
3698 FCP_BUF_LEVEL_1, 0, "fcp_port_detach returning");
3699
3700 return (fcp_handle_port_detach(pptr, flag, instance));
3701 }
3702
3703
3704 /*
3705 * called for ioctls on the transport's devctl interface, and the transport
3706 * has passed it to us
3707 *
3708 * this will only be called for device control ioctls (i.e. hotplugging stuff)
3709 *
3710 * return FC_SUCCESS if we decide to claim the ioctl,
3711 * else return FC_UNCLAIMED
3712 *
3713 * *rval is set iff we decide to claim the ioctl
3714 */
3715 /*ARGSUSED*/
3716 static int
3717 fcp_port_ioctl(opaque_t ulph, opaque_t port_handle, dev_t dev, int cmd,
3718 intptr_t data, int mode, cred_t *credp, int *rval, uint32_t claimed)
3719 {
3720 int retval = FC_UNCLAIMED; /* return value */
3721 struct fcp_port *pptr = NULL; /* our soft state */
3722 struct devctl_iocdata *dcp = NULL; /* for devctl */
3723 dev_info_t *cdip;
3724 mdi_pathinfo_t *pip = NULL;
3725 char *ndi_nm; /* NDI name */
3726 char *ndi_addr; /* NDI addr */
3727 int is_mpxio, circ;
3728 int devi_entered = 0;
3729 clock_t end_time;
3730
3731 ASSERT(rval != NULL);
3732
3733 FCP_DTRACE(fcp_logq, "fcp",
3734 fcp_trace, FCP_BUF_LEVEL_8, 0,
3735 "fcp_port_ioctl(cmd=0x%x, claimed=%d)", cmd, claimed);
3736
3737 /* if already claimed then forget it */
3738 if (claimed) {
3739 /*
3740 * for now, if this ioctl has already been claimed, then
3741 * we just ignore it
3742 */
3743 return (retval);
3744 }
3745
3746 /* get our port info */
3747 if ((pptr = fcp_get_port(port_handle)) == NULL) {
3748 fcp_log(CE_WARN, NULL,
3749 "!fcp:Invalid port handle handle in ioctl");
3750 *rval = ENXIO;
3751 return (retval);
3752 }
3753 is_mpxio = pptr->port_mpxio;
3754
3755 switch (cmd) {
3756 case DEVCTL_BUS_GETSTATE:
3757 case DEVCTL_BUS_QUIESCE:
3758 case DEVCTL_BUS_UNQUIESCE:
3759 case DEVCTL_BUS_RESET:
3760 case DEVCTL_BUS_RESETALL:
3761
3762 case DEVCTL_BUS_DEV_CREATE:
3763 if (ndi_dc_allochdl((void *)data, &dcp) != NDI_SUCCESS) {
3764 return (retval);
3765 }
3766 break;
3767
3768 case DEVCTL_DEVICE_GETSTATE:
3769 case DEVCTL_DEVICE_OFFLINE:
3770 case DEVCTL_DEVICE_ONLINE:
3771 case DEVCTL_DEVICE_REMOVE:
3772 case DEVCTL_DEVICE_RESET:
3773 if (ndi_dc_allochdl((void *)data, &dcp) != NDI_SUCCESS) {
3774 return (retval);
3775 }
3776
3777 ASSERT(dcp != NULL);
3778
3779 /* ensure we have a name and address */
3780 if (((ndi_nm = ndi_dc_getname(dcp)) == NULL) ||
3781 ((ndi_addr = ndi_dc_getaddr(dcp)) == NULL)) {
3782 FCP_TRACE(fcp_logq, pptr->port_instbuf,
3783 fcp_trace, FCP_BUF_LEVEL_2, 0,
3784 "ioctl: can't get name (%s) or addr (%s)",
3785 ndi_nm ? ndi_nm : "<null ptr>",
3786 ndi_addr ? ndi_addr : "<null ptr>");
3787 ndi_dc_freehdl(dcp);
3788 return (retval);
3789 }
3790
3791
3792 /* get our child's DIP */
3793 ASSERT(pptr != NULL);
3794 if (is_mpxio) {
3795 mdi_devi_enter(pptr->port_dip, &circ);
3796 } else {
3797 ndi_devi_enter(pptr->port_dip, &circ);
3798 }
3799 devi_entered = 1;
3800
3801 if ((cdip = ndi_devi_find(pptr->port_dip, ndi_nm,
3802 ndi_addr)) == NULL) {
3803 /* Look for virtually enumerated devices. */
3804 pip = mdi_pi_find(pptr->port_dip, NULL, ndi_addr);
3805 if (pip == NULL ||
3806 ((cdip = mdi_pi_get_client(pip)) == NULL)) {
3807 *rval = ENXIO;
3808 goto out;
3809 }
3810 }
3811 break;
3812
3813 default:
3814 *rval = ENOTTY;
3815 return (retval);
3816 }
3817
3818 /* this ioctl is ours -- process it */
3819
3820 retval = FC_SUCCESS; /* just means we claim the ioctl */
3821
3822 /* we assume it will be a success; else we'll set error value */
3823 *rval = 0;
3824
3825
3826 FCP_DTRACE(fcp_logq, pptr->port_instbuf,
3827 fcp_trace, FCP_BUF_LEVEL_8, 0,
3828 "ioctl: claiming this one");
3829
3830 /* handle ioctls now */
3831 switch (cmd) {
3832 case DEVCTL_DEVICE_GETSTATE:
3833 ASSERT(cdip != NULL);
3834 ASSERT(dcp != NULL);
3835 if (ndi_dc_return_dev_state(cdip, dcp) != NDI_SUCCESS) {
3836 *rval = EFAULT;
3837 }
3838 break;
3839
3840 case DEVCTL_DEVICE_REMOVE:
3841 case DEVCTL_DEVICE_OFFLINE: {
3842 int flag = 0;
3843 int lcount;
3844 int tcount;
3845 struct fcp_pkt *head = NULL;
3846 struct fcp_lun *plun;
3847 child_info_t *cip = CIP(cdip);
3848 int all = 1;
3849 struct fcp_lun *tplun;
3850 struct fcp_tgt *ptgt;
3851
3852 ASSERT(pptr != NULL);
3853 ASSERT(cdip != NULL);
3854
3855 mutex_enter(&pptr->port_mutex);
3856 if (pip != NULL) {
3857 cip = CIP(pip);
3858 }
3859 if ((plun = fcp_get_lun_from_cip(pptr, cip)) == NULL) {
3860 mutex_exit(&pptr->port_mutex);
3861 *rval = ENXIO;
3862 break;
3863 }
3864
3865 head = fcp_scan_commands(plun);
3866 if (head != NULL) {
3867 fcp_abort_commands(head, LUN_PORT);
3868 }
3869 lcount = pptr->port_link_cnt;
3870 tcount = plun->lun_tgt->tgt_change_cnt;
3871 mutex_exit(&pptr->port_mutex);
3872
3873 if (cmd == DEVCTL_DEVICE_REMOVE) {
3874 flag = NDI_DEVI_REMOVE;
3875 if (is_mpxio)
3876 flag |= NDI_USER_REQ;
3877 }
3878
3879 if (is_mpxio) {
3880 mdi_devi_exit(pptr->port_dip, circ);
3881 } else {
3882 ndi_devi_exit(pptr->port_dip, circ);
3883 }
3884 devi_entered = 0;
3885
3886 *rval = fcp_pass_to_hp_and_wait(pptr, plun, cip,
3887 FCP_OFFLINE, lcount, tcount, flag);
3888
3889 if (*rval != NDI_SUCCESS) {
3890 *rval = (*rval == NDI_BUSY) ? EBUSY : EIO;
3891 break;
3892 }
3893
3894 fcp_update_offline_flags(plun);
3895
3896 ptgt = plun->lun_tgt;
3897 mutex_enter(&ptgt->tgt_mutex);
3898 for (tplun = ptgt->tgt_lun; tplun != NULL; tplun =
3899 tplun->lun_next) {
3900 mutex_enter(&tplun->lun_mutex);
3901 if (!(tplun->lun_state & FCP_LUN_OFFLINE)) {
3902 all = 0;
3903 }
3904 mutex_exit(&tplun->lun_mutex);
3905 }
3906
3907 if (all) {
3908 ptgt->tgt_node_state = FCP_TGT_NODE_NONE;
3909 /*
3910 * The user is unconfiguring/offlining the device.
3911 * If fabric and the auto configuration is set
3912 * then make sure the user is the only one who
3913 * can reconfigure the device.
3914 */
3915 if (FC_TOP_EXTERNAL(pptr->port_topology) &&
3916 fcp_enable_auto_configuration) {
3917 ptgt->tgt_manual_config_only = 1;
3918 }
3919 }
3920 mutex_exit(&ptgt->tgt_mutex);
3921 break;
3922 }
3923
3924 case DEVCTL_DEVICE_ONLINE: {
3925 int lcount;
3926 int tcount;
3927 struct fcp_lun *plun;
3928 child_info_t *cip = CIP(cdip);
3929
3930 ASSERT(cdip != NULL);
3931 ASSERT(pptr != NULL);
3932
3933 mutex_enter(&pptr->port_mutex);
3934 if (pip != NULL) {
3935 cip = CIP(pip);
3936 }
3937 if ((plun = fcp_get_lun_from_cip(pptr, cip)) == NULL) {
3938 mutex_exit(&pptr->port_mutex);
3939 *rval = ENXIO;
3940 break;
3941 }
3942 lcount = pptr->port_link_cnt;
3943 tcount = plun->lun_tgt->tgt_change_cnt;
3944 mutex_exit(&pptr->port_mutex);
3945
3946 /*
3947 * The FCP_LUN_ONLINING flag is used in fcp_scsi_start()
3948 * to allow the device attach to occur when the device is
3949 * FCP_LUN_OFFLINE (so we don't reject the INQUIRY command
3950 * from the scsi_probe()).
3951 */
3952 mutex_enter(&LUN_TGT->tgt_mutex);
3953 plun->lun_state |= FCP_LUN_ONLINING;
3954 mutex_exit(&LUN_TGT->tgt_mutex);
3955
3956 if (is_mpxio) {
3957 mdi_devi_exit(pptr->port_dip, circ);
3958 } else {
3959 ndi_devi_exit(pptr->port_dip, circ);
3960 }
3961 devi_entered = 0;
3962
3963 *rval = fcp_pass_to_hp_and_wait(pptr, plun, cip,
3964 FCP_ONLINE, lcount, tcount, 0);
3965
3966 if (*rval != NDI_SUCCESS) {
3967 /* Reset the FCP_LUN_ONLINING bit */
3968 mutex_enter(&LUN_TGT->tgt_mutex);
3969 plun->lun_state &= ~FCP_LUN_ONLINING;
3970 mutex_exit(&LUN_TGT->tgt_mutex);
3971 *rval = EIO;
3972 break;
3973 }
3974 mutex_enter(&LUN_TGT->tgt_mutex);
3975 plun->lun_state &= ~(FCP_LUN_OFFLINE | FCP_LUN_BUSY |
3976 FCP_LUN_ONLINING);
3977 mutex_exit(&LUN_TGT->tgt_mutex);
3978 break;
3979 }
3980
3981 case DEVCTL_BUS_DEV_CREATE: {
3982 uchar_t *bytes = NULL;
3983 uint_t nbytes;
3984 struct fcp_tgt *ptgt = NULL;
3985 struct fcp_lun *plun = NULL;
3986 dev_info_t *useless_dip = NULL;
3987
3988 *rval = ndi_dc_devi_create(dcp, pptr->port_dip,
3989 DEVCTL_CONSTRUCT, &useless_dip);
3990 if (*rval != 0 || useless_dip == NULL) {
3991 break;
3992 }
3993
3994 if ((ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, useless_dip,
3995 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, PORT_WWN_PROP, &bytes,
3996 &nbytes) != DDI_PROP_SUCCESS) || nbytes != FC_WWN_SIZE) {
3997 *rval = EINVAL;
3998 (void) ndi_devi_free(useless_dip);
3999 if (bytes != NULL) {
4000 ddi_prop_free(bytes);
4001 }
4002 break;
4003 }
4004
4005 *rval = fcp_create_on_demand(pptr, bytes);
4006 if (*rval == 0) {
4007 mutex_enter(&pptr->port_mutex);
4008 ptgt = fcp_lookup_target(pptr, (uchar_t *)bytes);
4009 if (ptgt) {
4010 /*
4011 * We now have a pointer to the target that
4012 * was created. Lets point to the first LUN on
4013 * this new target.
4014 */
4015 mutex_enter(&ptgt->tgt_mutex);
4016
4017 plun = ptgt->tgt_lun;
4018 /*
4019 * There may be stale/offline LUN entries on
4020 * this list (this is by design) and so we have
4021 * to make sure we point to the first online
4022 * LUN
4023 */
4024 while (plun &&
4025 plun->lun_state & FCP_LUN_OFFLINE) {
4026 plun = plun->lun_next;
4027 }
4028
4029 mutex_exit(&ptgt->tgt_mutex);
4030 }
4031 mutex_exit(&pptr->port_mutex);
4032 }
4033
4034 if (*rval == 0 && ptgt && plun) {
4035 mutex_enter(&plun->lun_mutex);
4036 /*
4037 * Allow up to fcp_lun_ready_retry seconds to
4038 * configure all the luns behind the target.
4039 *
4040 * The intent here is to allow targets with long
4041 * reboot/reset-recovery times to become available
4042 * while limiting the maximum wait time for an
4043 * unresponsive target.
4044 */
4045 end_time = ddi_get_lbolt() +
4046 SEC_TO_TICK(fcp_lun_ready_retry);
4047
4048 while (ddi_get_lbolt() < end_time) {
4049 retval = FC_SUCCESS;
4050
4051 /*
4052 * The new ndi interfaces for on-demand creation
4053 * are inflexible, Do some more work to pass on
4054 * a path name of some LUN (design is broken !)
4055 */
4056 if (plun->lun_cip) {
4057 if (plun->lun_mpxio == 0) {
4058 cdip = DIP(plun->lun_cip);
4059 } else {
4060 cdip = mdi_pi_get_client(
4061 PIP(plun->lun_cip));
4062 }
4063 if (cdip == NULL) {
4064 *rval = ENXIO;
4065 break;
4066 }
4067
4068 if (!i_ddi_devi_attached(cdip)) {
4069 mutex_exit(&plun->lun_mutex);
4070 delay(drv_usectohz(1000000));
4071 mutex_enter(&plun->lun_mutex);
4072 } else {
4073 /*
4074 * This Lun is ready, lets
4075 * check the next one.
4076 */
4077 mutex_exit(&plun->lun_mutex);
4078 plun = plun->lun_next;
4079 while (plun && (plun->lun_state
4080 & FCP_LUN_OFFLINE)) {
4081 plun = plun->lun_next;
4082 }
4083 if (!plun) {
4084 break;
4085 }
4086 mutex_enter(&plun->lun_mutex);
4087 }
4088 } else {
4089 /*
4090 * lun_cip field for a valid lun
4091 * should never be NULL. Fail the
4092 * command.
4093 */
4094 *rval = ENXIO;
4095 break;
4096 }
4097 }
4098 if (plun) {
4099 mutex_exit(&plun->lun_mutex);
4100 } else {
4101 char devnm[MAXNAMELEN];
4102 int nmlen;
4103
4104 nmlen = snprintf(devnm, MAXNAMELEN, "%s@%s",
4105 ddi_node_name(cdip),
4106 ddi_get_name_addr(cdip));
4107
4108 if (copyout(&devnm, dcp->cpyout_buf, nmlen) !=
4109 0) {
4110 *rval = EFAULT;
4111 }
4112 }
4113 } else {
4114 int i;
4115 char buf[25];
4116
4117 for (i = 0; i < FC_WWN_SIZE; i++) {
4118 (void) sprintf(&buf[i << 1], "%02x", bytes[i]);
4119 }
4120
4121 fcp_log(CE_WARN, pptr->port_dip,
4122 "!Failed to create nodes for pwwn=%s; error=%x",
4123 buf, *rval);
4124 }
4125
4126 (void) ndi_devi_free(useless_dip);
4127 ddi_prop_free(bytes);
4128 break;
4129 }
4130
4131 case DEVCTL_DEVICE_RESET: {
4132 struct fcp_lun *plun;
4133 child_info_t *cip = CIP(cdip);
4134
4135 ASSERT(cdip != NULL);
4136 ASSERT(pptr != NULL);
4137 mutex_enter(&pptr->port_mutex);
4138 if (pip != NULL) {
4139 cip = CIP(pip);
4140 }
4141 if ((plun = fcp_get_lun_from_cip(pptr, cip)) == NULL) {
4142 mutex_exit(&pptr->port_mutex);
4143 *rval = ENXIO;
4144 break;
4145 }
4146 mutex_exit(&pptr->port_mutex);
4147
4148 mutex_enter(&plun->lun_tgt->tgt_mutex);
4149 if (!(plun->lun_state & FCP_SCSI_LUN_TGT_INIT)) {
4150 mutex_exit(&plun->lun_tgt->tgt_mutex);
4151
4152 *rval = ENXIO;
4153 break;
4154 }
4155
4156 if (plun->lun_sd == NULL) {
4157 mutex_exit(&plun->lun_tgt->tgt_mutex);
4158
4159 *rval = ENXIO;
4160 break;
4161 }
4162 mutex_exit(&plun->lun_tgt->tgt_mutex);
4163
4164 /*
4165 * set up ap so that fcp_reset can figure out
4166 * which target to reset
4167 */
4168 if (fcp_scsi_reset(&plun->lun_sd->sd_address,
4169 RESET_TARGET) == FALSE) {
4170 *rval = EIO;
4171 }
4172 break;
4173 }
4174
4175 case DEVCTL_BUS_GETSTATE:
4176 ASSERT(dcp != NULL);
4177 ASSERT(pptr != NULL);
4178 ASSERT(pptr->port_dip != NULL);
4179 if (ndi_dc_return_bus_state(pptr->port_dip, dcp) !=
4180 NDI_SUCCESS) {
4181 *rval = EFAULT;
4182 }
4183 break;
4184
4185 case DEVCTL_BUS_QUIESCE:
4186 case DEVCTL_BUS_UNQUIESCE:
4187 *rval = ENOTSUP;
4188 break;
4189
4190 case DEVCTL_BUS_RESET:
4191 case DEVCTL_BUS_RESETALL:
4192 ASSERT(pptr != NULL);
4193 (void) fcp_linkreset(pptr, NULL, KM_SLEEP);
4194 break;
4195
4196 default:
4197 ASSERT(dcp != NULL);
4198 *rval = ENOTTY;
4199 break;
4200 }
4201
4202 /* all done -- clean up and return */
4203 out: if (devi_entered) {
4204 if (is_mpxio) {
4205 mdi_devi_exit(pptr->port_dip, circ);
4206 } else {
4207 ndi_devi_exit(pptr->port_dip, circ);
4208 }
4209 }
4210
4211 if (dcp != NULL) {
4212 ndi_dc_freehdl(dcp);
4213 }
4214
4215 return (retval);
4216 }
4217
4218
4219 /*ARGSUSED*/
4220 static int
4221 fcp_els_callback(opaque_t ulph, opaque_t port_handle, fc_unsol_buf_t *buf,
4222 uint32_t claimed)
4223 {
4224 uchar_t r_ctl;
4225 uchar_t ls_code;
4226 struct fcp_port *pptr;
4227
4228 if ((pptr = fcp_get_port(port_handle)) == NULL || claimed) {
4229 return (FC_UNCLAIMED);
4230 }
4231
4232 mutex_enter(&pptr->port_mutex);
4233 if (pptr->port_state & (FCP_STATE_DETACHING |
4234 FCP_STATE_SUSPENDED | FCP_STATE_POWER_DOWN)) {
4235 mutex_exit(&pptr->port_mutex);
4236 return (FC_UNCLAIMED);
4237 }
4238 mutex_exit(&pptr->port_mutex);
4239
4240 r_ctl = buf->ub_frame.r_ctl;
4241
4242 switch (r_ctl & R_CTL_ROUTING) {
4243 case R_CTL_EXTENDED_SVC:
4244 if (r_ctl == R_CTL_ELS_REQ) {
4245 ls_code = buf->ub_buffer[0];
4246
4247 switch (ls_code) {
4248 case LA_ELS_PRLI:
4249 /*
4250 * We really don't care if something fails.
4251 * If the PRLI was not sent out, then the
4252 * other end will time it out.
4253 */
4254 if (fcp_unsol_prli(pptr, buf) == FC_SUCCESS) {
4255 return (FC_SUCCESS);
4256 }
4257 return (FC_UNCLAIMED);
4258 /* NOTREACHED */
4259
4260 default:
4261 break;
4262 }
4263 }
4264 /* FALLTHROUGH */
4265
4266 default:
4267 return (FC_UNCLAIMED);
4268 }
4269 }
4270
4271
4272 /*ARGSUSED*/
4273 static int
4274 fcp_data_callback(opaque_t ulph, opaque_t port_handle, fc_unsol_buf_t *buf,
4275 uint32_t claimed)
4276 {
4277 return (FC_UNCLAIMED);
4278 }
4279
4280 /*
4281 * Function: fcp_statec_callback
4282 *
4283 * Description: The purpose of this function is to handle a port state change.
4284 * It is called from fp/fctl and, in a few instances, internally.
4285 *
4286 * Argument: ulph fp/fctl port handle
4287 * port_handle fcp_port structure
4288 * port_state Physical state of the port
4289 * port_top Topology
4290 * *devlist Pointer to the first entry of a table
4291 * containing the remote ports that can be
4292 * reached.
4293 * dev_cnt Number of entries pointed by devlist.
4294 * port_sid Port ID of the local port.
4295 *
4296 * Return Value: None
4297 */
4298 /*ARGSUSED*/
4299 static void
4300 fcp_statec_callback(opaque_t ulph, opaque_t port_handle,
4301 uint32_t port_state, uint32_t port_top, fc_portmap_t *devlist,
4302 uint32_t dev_cnt, uint32_t port_sid)
4303 {
4304 uint32_t link_count;
4305 int map_len = 0;
4306 struct fcp_port *pptr;
4307 fcp_map_tag_t *map_tag = NULL;
4308
4309 if ((pptr = fcp_get_port(port_handle)) == NULL) {
4310 fcp_log(CE_WARN, NULL, "!Invalid port handle in callback");
4311 return; /* nothing to work with! */
4312 }
4313
4314 FCP_TRACE(fcp_logq, pptr->port_instbuf,
4315 fcp_trace, FCP_BUF_LEVEL_2, 0,
4316 "fcp_statec_callback: port state/dev_cnt/top ="
4317 "%d/%d/%d", FC_PORT_STATE_MASK(port_state),
4318 dev_cnt, port_top);
4319
4320 mutex_enter(&pptr->port_mutex);
4321
4322 /*
4323 * If a thread is in detach, don't do anything.
4324 */
4325 if (pptr->port_state & (FCP_STATE_DETACHING |
4326 FCP_STATE_SUSPENDED | FCP_STATE_POWER_DOWN)) {
4327 mutex_exit(&pptr->port_mutex);
4328 return;
4329 }
4330
4331 /*
4332 * First thing we do is set the FCP_STATE_IN_CB_DEVC flag so that if
4333 * init_pkt is called, it knows whether or not the target's status
4334 * (or pd) might be changing.
4335 */
4336
4337 if (FC_PORT_STATE_MASK(port_state) == FC_STATE_DEVICE_CHANGE) {
4338 pptr->port_state |= FCP_STATE_IN_CB_DEVC;
4339 }
4340
4341 /*
4342 * the transport doesn't allocate or probe unless being
4343 * asked to by either the applications or ULPs
4344 *
4345 * in cases where the port is OFFLINE at the time of port
4346 * attach callback and the link comes ONLINE later, for
4347 * easier automatic node creation (i.e. without you having to
4348 * go out and run the utility to perform LOGINs) the
4349 * following conditional is helpful
4350 */
4351 pptr->port_phys_state = port_state;
4352
4353 if (dev_cnt) {
4354 mutex_exit(&pptr->port_mutex);
4355
4356 map_len = sizeof (*map_tag) * dev_cnt;
4357 map_tag = kmem_alloc(map_len, KM_NOSLEEP);
4358 if (map_tag == NULL) {
4359 fcp_log(CE_WARN, pptr->port_dip,
4360 "!fcp%d: failed to allocate for map tags; "
4361 " state change will not be processed",
4362 pptr->port_instance);
4363
4364 mutex_enter(&pptr->port_mutex);
4365 pptr->port_state &= ~FCP_STATE_IN_CB_DEVC;
4366 mutex_exit(&pptr->port_mutex);
4367
4368 return;
4369 }
4370
4371 mutex_enter(&pptr->port_mutex);
4372 }
4373
4374 if (pptr->port_id != port_sid) {
4375 FCP_TRACE(fcp_logq, pptr->port_instbuf,
4376 fcp_trace, FCP_BUF_LEVEL_3, 0,
4377 "fcp: Port S_ID=0x%x => 0x%x", pptr->port_id,
4378 port_sid);
4379 /*
4380 * The local port changed ID. It is the first time a port ID
4381 * is assigned or something drastic happened. We might have
4382 * been unplugged and replugged on another loop or fabric port
4383 * or somebody grabbed the AL_PA we had or somebody rezoned
4384 * the fabric we were plugged into.
4385 */
4386 pptr->port_id = port_sid;
4387 }
4388
4389 switch (FC_PORT_STATE_MASK(port_state)) {
4390 case FC_STATE_OFFLINE:
4391 case FC_STATE_RESET_REQUESTED:
4392 /*
4393 * link has gone from online to offline -- just update the
4394 * state of this port to BUSY and MARKed to go offline
4395 */
4396 FCP_TRACE(fcp_logq, pptr->port_instbuf,
4397 fcp_trace, FCP_BUF_LEVEL_3, 0,
4398 "link went offline");
4399 if ((pptr->port_state & FCP_STATE_OFFLINE) && dev_cnt) {
4400 /*
4401 * We were offline a while ago and this one
4402 * seems to indicate that the loop has gone
4403 * dead forever.
4404 */
4405 pptr->port_tmp_cnt += dev_cnt;
4406 pptr->port_state &= ~FCP_STATE_OFFLINE;
4407 pptr->port_state |= FCP_STATE_INIT;
4408 link_count = pptr->port_link_cnt;
4409 fcp_handle_devices(pptr, devlist, dev_cnt,
4410 link_count, map_tag, FCP_CAUSE_LINK_DOWN);
4411 } else {
4412 pptr->port_link_cnt++;
4413 ASSERT(!(pptr->port_state & FCP_STATE_SUSPENDED));
4414 fcp_update_state(pptr, (FCP_LUN_BUSY |
4415 FCP_LUN_MARK), FCP_CAUSE_LINK_DOWN);
4416 if (pptr->port_mpxio) {
4417 fcp_update_mpxio_path_verifybusy(pptr);
4418 }
4419 pptr->port_state |= FCP_STATE_OFFLINE;
4420 pptr->port_state &=
4421 ~(FCP_STATE_ONLINING | FCP_STATE_ONLINE);
4422 pptr->port_tmp_cnt = 0;
4423 }
4424 mutex_exit(&pptr->port_mutex);
4425 break;
4426
4427 case FC_STATE_ONLINE:
4428 case FC_STATE_LIP:
4429 case FC_STATE_LIP_LBIT_SET:
4430 /*
4431 * link has gone from offline to online
4432 */
4433 FCP_TRACE(fcp_logq, pptr->port_instbuf,
4434 fcp_trace, FCP_BUF_LEVEL_3, 0,
4435 "link went online");
4436
4437 pptr->port_link_cnt++;
4438
4439 while (pptr->port_ipkt_cnt) {
4440 mutex_exit(&pptr->port_mutex);
4441 delay(drv_usectohz(1000000));
4442 mutex_enter(&pptr->port_mutex);
4443 }
4444
4445 pptr->port_topology = port_top;
4446
4447 /*
4448 * The state of the targets and luns accessible through this
4449 * port is updated.
4450 */
4451 fcp_update_state(pptr, FCP_LUN_BUSY | FCP_LUN_MARK,
4452 FCP_CAUSE_LINK_CHANGE);
4453
4454 pptr->port_state &= ~(FCP_STATE_INIT | FCP_STATE_OFFLINE);
4455 pptr->port_state |= FCP_STATE_ONLINING;
4456 pptr->port_tmp_cnt = dev_cnt;
4457 link_count = pptr->port_link_cnt;
4458
4459 pptr->port_deadline = fcp_watchdog_time +
4460 FCP_ICMD_DEADLINE;
4461
4462 if (!dev_cnt) {
4463 /*
4464 * We go directly to the online state if no remote
4465 * ports were discovered.
4466 */
4467 FCP_TRACE(fcp_logq, pptr->port_instbuf,
4468 fcp_trace, FCP_BUF_LEVEL_3, 0,
4469 "No remote ports discovered");
4470
4471 pptr->port_state &= ~FCP_STATE_ONLINING;
4472 pptr->port_state |= FCP_STATE_ONLINE;
4473 }
4474
4475 switch (port_top) {
4476 case FC_TOP_FABRIC:
4477 case FC_TOP_PUBLIC_LOOP:
4478 case FC_TOP_PRIVATE_LOOP:
4479 case FC_TOP_PT_PT:
4480
4481 if (pptr->port_state & FCP_STATE_NS_REG_FAILED) {
4482 fcp_retry_ns_registry(pptr, port_sid);
4483 }
4484
4485 fcp_handle_devices(pptr, devlist, dev_cnt, link_count,
4486 map_tag, FCP_CAUSE_LINK_CHANGE);
4487 break;
4488
4489 default:
4490 /*
4491 * We got here because we were provided with an unknown
4492 * topology.
4493 */
4494 if (pptr->port_state & FCP_STATE_NS_REG_FAILED) {
4495 pptr->port_state &= ~FCP_STATE_NS_REG_FAILED;
4496 }
4497
4498 pptr->port_tmp_cnt -= dev_cnt;
4499 fcp_log(CE_WARN, pptr->port_dip,
4500 "!unknown/unsupported topology (0x%x)", port_top);
4501 break;
4502 }
4503 FCP_TRACE(fcp_logq, pptr->port_instbuf,
4504 fcp_trace, FCP_BUF_LEVEL_3, 0,
4505 "Notify ssd of the reset to reinstate the reservations");
4506
4507 scsi_hba_reset_notify_callback(&pptr->port_mutex,
4508 &pptr->port_reset_notify_listf);
4509
4510 mutex_exit(&pptr->port_mutex);
4511
4512 break;
4513
4514 case FC_STATE_RESET:
4515 ASSERT(pptr->port_state & FCP_STATE_OFFLINE);
4516 FCP_TRACE(fcp_logq, pptr->port_instbuf,
4517 fcp_trace, FCP_BUF_LEVEL_3, 0,
4518 "RESET state, waiting for Offline/Online state_cb");
4519 mutex_exit(&pptr->port_mutex);
4520 break;
4521
4522 case FC_STATE_DEVICE_CHANGE:
4523 /*
4524 * We come here when an application has requested
4525 * Dynamic node creation/deletion in Fabric connectivity.
4526 */
4527 if (pptr->port_state & (FCP_STATE_OFFLINE |
4528 FCP_STATE_INIT)) {
4529 /*
4530 * This case can happen when the FCTL is in the
4531 * process of giving us on online and the host on
4532 * the other side issues a PLOGI/PLOGO. Ideally
4533 * the state changes should be serialized unless
4534 * they are opposite (online-offline).
4535 * The transport will give us a final state change
4536 * so we can ignore this for the time being.
4537 */
4538 pptr->port_state &= ~FCP_STATE_IN_CB_DEVC;
4539 mutex_exit(&pptr->port_mutex);
4540 break;
4541 }
4542
4543 if (pptr->port_state & FCP_STATE_NS_REG_FAILED) {
4544 fcp_retry_ns_registry(pptr, port_sid);
4545 }
4546
4547 /*
4548 * Extend the deadline under steady state conditions
4549 * to provide more time for the device-change-commands
4550 */
4551 if (!pptr->port_ipkt_cnt) {
4552 pptr->port_deadline = fcp_watchdog_time +
4553 FCP_ICMD_DEADLINE;
4554 }
4555
4556 /*
4557 * There is another race condition here, where if we were
4558 * in ONLINEING state and a devices in the map logs out,
4559 * fp will give another state change as DEVICE_CHANGE
4560 * and OLD. This will result in that target being offlined.
4561 * The pd_handle is freed. If from the first statec callback
4562 * we were going to fire a PLOGI/PRLI, the system will
4563 * panic in fc_ulp_transport with invalid pd_handle.
4564 * The fix is to check for the link_cnt before issuing
4565 * any command down.
4566 */
4567 fcp_update_targets(pptr, devlist, dev_cnt,
4568 FCP_LUN_BUSY | FCP_LUN_MARK, FCP_CAUSE_TGT_CHANGE);
4569
4570 link_count = pptr->port_link_cnt;
4571
4572 fcp_handle_devices(pptr, devlist, dev_cnt,
4573 link_count, map_tag, FCP_CAUSE_TGT_CHANGE);
4574
4575 pptr->port_state &= ~FCP_STATE_IN_CB_DEVC;
4576
4577 mutex_exit(&pptr->port_mutex);
4578 break;
4579
4580 case FC_STATE_TARGET_PORT_RESET:
4581 if (pptr->port_state & FCP_STATE_NS_REG_FAILED) {
4582 fcp_retry_ns_registry(pptr, port_sid);
4583 }
4584
4585 /* Do nothing else */
4586 mutex_exit(&pptr->port_mutex);
4587 break;
4588
4589 default:
4590 fcp_log(CE_WARN, pptr->port_dip,
4591 "!Invalid state change=0x%x", port_state);
4592 mutex_exit(&pptr->port_mutex);
4593 break;
4594 }
4595
4596 if (map_tag) {
4597 kmem_free(map_tag, map_len);
4598 }
4599 }
4600
4601 /*
4602 * Function: fcp_handle_devices
4603 *
4604 * Description: This function updates the devices currently known by
4605 * walking the list provided by the caller. The list passed
4606 * by the caller is supposed to be the list of reachable
4607 * devices.
4608 *
4609 * Argument: *pptr Fcp port structure.
4610 * *devlist Pointer to the first entry of a table
4611 * containing the remote ports that can be
4612 * reached.
4613 * dev_cnt Number of entries pointed by devlist.
4614 * link_cnt Link state count.
4615 * *map_tag Array of fcp_map_tag_t structures.
4616 * cause What caused this function to be called.
4617 *
4618 * Return Value: None
4619 *
4620 * Notes: The pptr->port_mutex must be held.
4621 */
4622 static void
4623 fcp_handle_devices(struct fcp_port *pptr, fc_portmap_t devlist[],
4624 uint32_t dev_cnt, int link_cnt, fcp_map_tag_t *map_tag, int cause)
4625 {
4626 int i;
4627 int check_finish_init = 0;
4628 fc_portmap_t *map_entry;
4629 struct fcp_tgt *ptgt = NULL;
4630
4631 FCP_TRACE(fcp_logq, pptr->port_instbuf,
4632 fcp_trace, FCP_BUF_LEVEL_3, 0,
4633 "fcp_handle_devices: called for %d dev(s)", dev_cnt);
4634
4635 if (dev_cnt) {
4636 ASSERT(map_tag != NULL);
4637 }
4638
4639 /*
4640 * The following code goes through the list of remote ports that are
4641 * accessible through this (pptr) local port (The list walked is the
4642 * one provided by the caller which is the list of the remote ports
4643 * currently reachable). It checks if any of them was already
4644 * known by looking for the corresponding target structure based on
4645 * the world wide name. If a target is part of the list it is tagged
4646 * (ptgt->tgt_aux_state = FCP_TGT_TAGGED).
4647 *
4648 * Old comment
4649 * -----------
4650 * Before we drop port mutex; we MUST get the tags updated; This
4651 * two step process is somewhat slow, but more reliable.
4652 */
4653 for (i = 0; (i < dev_cnt) && (pptr->port_link_cnt == link_cnt); i++) {
4654 map_entry = &(devlist[i]);
4655
4656 /*
4657 * get ptr to this map entry in our port's
4658 * list (if any)
4659 */
4660 ptgt = fcp_lookup_target(pptr,
4661 (uchar_t *)&(map_entry->map_pwwn));
4662
4663 if (ptgt) {
4664 map_tag[i] = ptgt->tgt_change_cnt;
4665 if (cause == FCP_CAUSE_LINK_CHANGE) {
4666 ptgt->tgt_aux_state = FCP_TGT_TAGGED;
4667 }
4668 }
4669 }
4670
4671 /*
4672 * At this point we know which devices of the new list were already
4673 * known (The field tgt_aux_state of the target structure has been
4674 * set to FCP_TGT_TAGGED).
4675 *
4676 * The following code goes through the list of targets currently known
4677 * by the local port (the list is actually a hashing table). If a
4678 * target is found and is not tagged, it means the target cannot
4679 * be reached anymore through the local port (pptr). It is offlined.
4680 * The offlining only occurs if the cause is FCP_CAUSE_LINK_CHANGE.
4681 */
4682 for (i = 0; i < FCP_NUM_HASH; i++) {
4683 for (ptgt = pptr->port_tgt_hash_table[i]; ptgt != NULL;
4684 ptgt = ptgt->tgt_next) {
4685 mutex_enter(&ptgt->tgt_mutex);
4686 if ((ptgt->tgt_aux_state != FCP_TGT_TAGGED) &&
4687 (cause == FCP_CAUSE_LINK_CHANGE) &&
4688 !(ptgt->tgt_state & FCP_TGT_OFFLINE)) {
4689 fcp_offline_target_now(pptr, ptgt,
4690 link_cnt, ptgt->tgt_change_cnt, 0);
4691 }
4692 mutex_exit(&ptgt->tgt_mutex);
4693 }
4694 }
4695
4696 /*
4697 * At this point, the devices that were known but cannot be reached
4698 * anymore, have most likely been offlined.
4699 *
4700 * The following section of code seems to go through the list of
4701 * remote ports that can now be reached. For every single one it
4702 * checks if it is already known or if it is a new port.
4703 */
4704 for (i = 0; (i < dev_cnt) && (pptr->port_link_cnt == link_cnt); i++) {
4705
4706 if (check_finish_init) {
4707 ASSERT(i > 0);
4708 (void) fcp_call_finish_init_held(pptr, ptgt, link_cnt,
4709 map_tag[i - 1], cause);
4710 check_finish_init = 0;
4711 }
4712
4713 /* get a pointer to this map entry */
4714 map_entry = &(devlist[i]);
4715
4716 /*
4717 * Check for the duplicate map entry flag. If we have marked
4718 * this entry as a duplicate we skip it since the correct
4719 * (perhaps even same) state change will be encountered
4720 * later in the list.
4721 */
4722 if (map_entry->map_flags & PORT_DEVICE_DUPLICATE_MAP_ENTRY) {
4723 continue;
4724 }
4725
4726 /* get ptr to this map entry in our port's list (if any) */
4727 ptgt = fcp_lookup_target(pptr,
4728 (uchar_t *)&(map_entry->map_pwwn));
4729
4730 if (ptgt) {
4731 /*
4732 * This device was already known. The field
4733 * tgt_aux_state is reset (was probably set to
4734 * FCP_TGT_TAGGED previously in this routine).
4735 */
4736 ptgt->tgt_aux_state = 0;
4737 FCP_TRACE(fcp_logq, pptr->port_instbuf,
4738 fcp_trace, FCP_BUF_LEVEL_3, 0,
4739 "handle_devices: map did/state/type/flags = "
4740 "0x%x/0x%x/0x%x/0x%x, tgt_d_id=0x%x, "
4741 "tgt_state=%d",
4742 map_entry->map_did.port_id, map_entry->map_state,
4743 map_entry->map_type, map_entry->map_flags,
4744 ptgt->tgt_d_id, ptgt->tgt_state);
4745 }
4746
4747 if (map_entry->map_type == PORT_DEVICE_OLD ||
4748 map_entry->map_type == PORT_DEVICE_NEW ||
4749 map_entry->map_type == PORT_DEVICE_REPORTLUN_CHANGED ||
4750 map_entry->map_type == PORT_DEVICE_CHANGED) {
4751 FCP_TRACE(fcp_logq, pptr->port_instbuf,
4752 fcp_trace, FCP_BUF_LEVEL_2, 0,
4753 "map_type=%x, did = %x",
4754 map_entry->map_type,
4755 map_entry->map_did.port_id);
4756 }
4757
4758 switch (map_entry->map_type) {
4759 case PORT_DEVICE_NOCHANGE:
4760 case PORT_DEVICE_USER_CREATE:
4761 case PORT_DEVICE_USER_LOGIN:
4762 case PORT_DEVICE_NEW:
4763 case PORT_DEVICE_REPORTLUN_CHANGED:
4764 FCP_TGT_TRACE(ptgt, map_tag[i], FCP_TGT_TRACE_1);
4765
4766 if (fcp_handle_mapflags(pptr, ptgt, map_entry,
4767 link_cnt, (ptgt) ? map_tag[i] : 0,
4768 cause) == TRUE) {
4769
4770 FCP_TGT_TRACE(ptgt, map_tag[i],
4771 FCP_TGT_TRACE_2);
4772 check_finish_init++;
4773 }
4774 break;
4775
4776 case PORT_DEVICE_OLD:
4777 if (ptgt != NULL) {
4778 FCP_TGT_TRACE(ptgt, map_tag[i],
4779 FCP_TGT_TRACE_3);
4780
4781 mutex_enter(&ptgt->tgt_mutex);
4782 if (!(ptgt->tgt_state & FCP_TGT_OFFLINE)) {
4783 /*
4784 * Must do an in-line wait for I/Os
4785 * to get drained
4786 */
4787 mutex_exit(&ptgt->tgt_mutex);
4788 mutex_exit(&pptr->port_mutex);
4789
4790 mutex_enter(&ptgt->tgt_mutex);
4791 while (ptgt->tgt_ipkt_cnt ||
4792 fcp_outstanding_lun_cmds(ptgt)
4793 == FC_SUCCESS) {
4794 mutex_exit(&ptgt->tgt_mutex);
4795 delay(drv_usectohz(1000000));
4796 mutex_enter(&ptgt->tgt_mutex);
4797 }
4798 mutex_exit(&ptgt->tgt_mutex);
4799
4800 mutex_enter(&pptr->port_mutex);
4801 mutex_enter(&ptgt->tgt_mutex);
4802
4803 (void) fcp_offline_target(pptr, ptgt,
4804 link_cnt, map_tag[i], 0, 0);
4805 }
4806 mutex_exit(&ptgt->tgt_mutex);
4807 }
4808 check_finish_init++;
4809 break;
4810
4811 case PORT_DEVICE_USER_DELETE:
4812 case PORT_DEVICE_USER_LOGOUT:
4813 if (ptgt != NULL) {
4814 FCP_TGT_TRACE(ptgt, map_tag[i],
4815 FCP_TGT_TRACE_4);
4816
4817 mutex_enter(&ptgt->tgt_mutex);
4818 if (!(ptgt->tgt_state & FCP_TGT_OFFLINE)) {
4819 (void) fcp_offline_target(pptr, ptgt,
4820 link_cnt, map_tag[i], 1, 0);
4821 }
4822 mutex_exit(&ptgt->tgt_mutex);
4823 }
4824 check_finish_init++;
4825 break;
4826
4827 case PORT_DEVICE_CHANGED:
4828 if (ptgt != NULL) {
4829 FCP_TGT_TRACE(ptgt, map_tag[i],
4830 FCP_TGT_TRACE_5);
4831
4832 if (fcp_device_changed(pptr, ptgt,
4833 map_entry, link_cnt, map_tag[i],
4834 cause) == TRUE) {
4835 check_finish_init++;
4836 }
4837 } else {
4838 if (fcp_handle_mapflags(pptr, ptgt,
4839 map_entry, link_cnt, 0, cause) == TRUE) {
4840 check_finish_init++;
4841 }
4842 }
4843 break;
4844
4845 default:
4846 fcp_log(CE_WARN, pptr->port_dip,
4847 "!Invalid map_type=0x%x", map_entry->map_type);
4848 check_finish_init++;
4849 break;
4850 }
4851 }
4852
4853 if (check_finish_init && pptr->port_link_cnt == link_cnt) {
4854 ASSERT(i > 0);
4855 (void) fcp_call_finish_init_held(pptr, ptgt, link_cnt,
4856 map_tag[i-1], cause);
4857 } else if (dev_cnt == 0 && pptr->port_link_cnt == link_cnt) {
4858 fcp_offline_all(pptr, link_cnt, cause);
4859 }
4860 }
4861
4862 static int
4863 fcp_handle_reportlun_changed(struct fcp_tgt *ptgt, int cause)
4864 {
4865 struct fcp_lun *plun;
4866 struct fcp_port *pptr;
4867 int rscn_count;
4868 int lun0_newalloc;
4869 int ret = TRUE;
4870
4871 ASSERT(ptgt);
4872 pptr = ptgt->tgt_port;
4873 lun0_newalloc = 0;
4874 if ((plun = fcp_get_lun(ptgt, 0)) == NULL) {
4875 /*
4876 * no LUN struct for LUN 0 yet exists,
4877 * so create one
4878 */
4879 plun = fcp_alloc_lun(ptgt);
4880 if (plun == NULL) {
4881 fcp_log(CE_WARN, pptr->port_dip,
4882 "!Failed to allocate lun 0 for"
4883 " D_ID=%x", ptgt->tgt_d_id);
4884 return (ret);
4885 }
4886 lun0_newalloc = 1;
4887 }
4888
4889 mutex_enter(&ptgt->tgt_mutex);
4890 /*
4891 * consider lun 0 as device not connected if it is
4892 * offlined or newly allocated
4893 */
4894 if ((plun->lun_state & FCP_LUN_OFFLINE) || lun0_newalloc) {
4895 plun->lun_state |= FCP_LUN_DEVICE_NOT_CONNECTED;
4896 }
4897 plun->lun_state |= (FCP_LUN_BUSY | FCP_LUN_MARK);
4898 plun->lun_state &= ~FCP_LUN_OFFLINE;
4899 ptgt->tgt_lun_cnt = 1;
4900 ptgt->tgt_report_lun_cnt = 0;
4901 mutex_exit(&ptgt->tgt_mutex);
4902
4903 rscn_count = fc_ulp_get_rscn_count(pptr->port_fp_handle);
4904 if (fcp_send_scsi(plun, SCMD_REPORT_LUN,
4905 sizeof (struct fcp_reportlun_resp), pptr->port_link_cnt,
4906 ptgt->tgt_change_cnt, cause, rscn_count) != DDI_SUCCESS) {
4907 FCP_TRACE(fcp_logq, pptr->port_instbuf,
4908 fcp_trace, FCP_BUF_LEVEL_3, 0, "!Failed to send REPORTLUN "
4909 "to D_ID=%x", ptgt->tgt_d_id);
4910 } else {
4911 ret = FALSE;
4912 }
4913
4914 return (ret);
4915 }
4916
4917 /*
4918 * Function: fcp_handle_mapflags
4919 *
4920 * Description: This function creates a target structure if the ptgt passed
4921 * is NULL. It also kicks off the PLOGI if we are not logged
4922 * into the target yet or the PRLI if we are logged into the
4923 * target already. The rest of the treatment is done in the
4924 * callbacks of the PLOGI or PRLI.
4925 *
4926 * Argument: *pptr FCP Port structure.
4927 * *ptgt Target structure.
4928 * *map_entry Array of fc_portmap_t structures.
4929 * link_cnt Link state count.
4930 * tgt_cnt Target state count.
4931 * cause What caused this function to be called.
4932 *
4933 * Return Value: TRUE Failed
4934 * FALSE Succeeded
4935 *
4936 * Notes: pptr->port_mutex must be owned.
4937 */
4938 static int
4939 fcp_handle_mapflags(struct fcp_port *pptr, struct fcp_tgt *ptgt,
4940 fc_portmap_t *map_entry, int link_cnt, int tgt_cnt, int cause)
4941 {
4942 int lcount;
4943 int tcount;
4944 int ret = TRUE;
4945 int alloc;
4946 struct fcp_ipkt *icmd;
4947 struct fcp_lun *pseq_lun = NULL;
4948 uchar_t opcode;
4949 int valid_ptgt_was_passed = FALSE;
4950
4951 ASSERT(mutex_owned(&pptr->port_mutex));
4952
4953 /*
4954 * This case is possible where the FCTL has come up and done discovery
4955 * before FCP was loaded and attached. FCTL would have discovered the
4956 * devices and later the ULP came online. In this case ULP's would get
4957 * PORT_DEVICE_NOCHANGE but target would be NULL.
4958 */
4959 if (ptgt == NULL) {
4960 /* don't already have a target */
4961 mutex_exit(&pptr->port_mutex);
4962 ptgt = fcp_alloc_tgt(pptr, map_entry, link_cnt);
4963 mutex_enter(&pptr->port_mutex);
4964
4965 if (ptgt == NULL) {
4966 fcp_log(CE_WARN, pptr->port_dip,
4967 "!FC target allocation failed");
4968 return (ret);
4969 }
4970 mutex_enter(&ptgt->tgt_mutex);
4971 ptgt->tgt_statec_cause = cause;
4972 ptgt->tgt_tmp_cnt = 1;
4973 mutex_exit(&ptgt->tgt_mutex);
4974 } else {
4975 valid_ptgt_was_passed = TRUE;
4976 }
4977
4978 /*
4979 * Copy in the target parameters
4980 */
4981 mutex_enter(&ptgt->tgt_mutex);
4982 ptgt->tgt_d_id = map_entry->map_did.port_id;
4983 ptgt->tgt_hard_addr = map_entry->map_hard_addr.hard_addr;
4984 ptgt->tgt_pd_handle = map_entry->map_pd;
4985 ptgt->tgt_fca_dev = NULL;
4986
4987 /* Copy port and node WWNs */
4988 bcopy(&map_entry->map_nwwn, &ptgt->tgt_node_wwn.raw_wwn[0],
4989 FC_WWN_SIZE);
4990 bcopy(&map_entry->map_pwwn, &ptgt->tgt_port_wwn.raw_wwn[0],
4991 FC_WWN_SIZE);
4992
4993 if (!(map_entry->map_flags & PORT_DEVICE_NO_SKIP_DEVICE_DISCOVERY) &&
4994 (map_entry->map_type == PORT_DEVICE_NOCHANGE) &&
4995 (map_entry->map_state == PORT_DEVICE_LOGGED_IN) &&
4996 valid_ptgt_was_passed) {
4997 /*
4998 * determine if there are any tape LUNs on this target
4999 */
5000 for (pseq_lun = ptgt->tgt_lun;
5001 pseq_lun != NULL;
5002 pseq_lun = pseq_lun->lun_next) {
5003 if ((pseq_lun->lun_type == DTYPE_SEQUENTIAL) &&
5004 !(pseq_lun->lun_state & FCP_LUN_OFFLINE)) {
5005 fcp_update_tgt_state(ptgt, FCP_RESET,
5006 FCP_LUN_MARK);
5007 mutex_exit(&ptgt->tgt_mutex);
5008 return (ret);
5009 }
5010 }
5011 }
5012
5013 /*
5014 * if UA'REPORT_LUN_CHANGED received,
5015 * send out REPORT LUN promptly, skip PLOGI/PRLI process
5016 */
5017 if (map_entry->map_type == PORT_DEVICE_REPORTLUN_CHANGED) {
5018 ptgt->tgt_state &= ~(FCP_TGT_OFFLINE | FCP_TGT_MARK);
5019 mutex_exit(&ptgt->tgt_mutex);
5020 mutex_exit(&pptr->port_mutex);
5021
5022 ret = fcp_handle_reportlun_changed(ptgt, cause);
5023
5024 mutex_enter(&pptr->port_mutex);
5025 return (ret);
5026 }
5027
5028 /*
5029 * If ptgt was NULL when this function was entered, then tgt_node_state
5030 * was never specifically initialized but zeroed out which means
5031 * FCP_TGT_NODE_NONE.
5032 */
5033 switch (ptgt->tgt_node_state) {
5034 case FCP_TGT_NODE_NONE:
5035 case FCP_TGT_NODE_ON_DEMAND:
5036 if (FC_TOP_EXTERNAL(pptr->port_topology) &&
5037 !fcp_enable_auto_configuration &&
5038 map_entry->map_type != PORT_DEVICE_USER_CREATE) {
5039 ptgt->tgt_node_state = FCP_TGT_NODE_ON_DEMAND;
5040 } else if (FC_TOP_EXTERNAL(pptr->port_topology) &&
5041 fcp_enable_auto_configuration &&
5042 (ptgt->tgt_manual_config_only == 1) &&
5043 map_entry->map_type != PORT_DEVICE_USER_CREATE) {
5044 /*
5045 * If auto configuration is set and
5046 * the tgt_manual_config_only flag is set then
5047 * we only want the user to be able to change
5048 * the state through create_on_demand.
5049 */
5050 ptgt->tgt_node_state = FCP_TGT_NODE_ON_DEMAND;
5051 } else {
5052 ptgt->tgt_node_state = FCP_TGT_NODE_NONE;
5053 }
5054 break;
5055
5056 case FCP_TGT_NODE_PRESENT:
5057 break;
5058 }
5059 /*
5060 * If we are booting from a fabric device, make sure we
5061 * mark the node state appropriately for this target to be
5062 * enumerated
5063 */
5064 if (FC_TOP_EXTERNAL(pptr->port_topology) && pptr->port_boot_wwn[0]) {
5065 if (bcmp((caddr_t)pptr->port_boot_wwn,
5066 (caddr_t)&ptgt->tgt_port_wwn.raw_wwn[0],
5067 sizeof (ptgt->tgt_port_wwn)) == 0) {
5068 ptgt->tgt_node_state = FCP_TGT_NODE_NONE;
5069 }
5070 }
5071 mutex_exit(&ptgt->tgt_mutex);
5072
5073 FCP_TRACE(fcp_logq, pptr->port_instbuf,
5074 fcp_trace, FCP_BUF_LEVEL_3, 0,
5075 "map_pd=%p, map_type=%x, did = %x, ulp_rscn_count=0x%x",
5076 map_entry->map_pd, map_entry->map_type, map_entry->map_did.port_id,
5077 map_entry->map_rscn_info.ulp_rscn_count);
5078
5079 mutex_enter(&ptgt->tgt_mutex);
5080
5081 /*
5082 * Reset target OFFLINE state and mark the target BUSY
5083 */
5084 ptgt->tgt_state &= ~FCP_TGT_OFFLINE;
5085 ptgt->tgt_state |= (FCP_TGT_BUSY | FCP_TGT_MARK);
5086
5087 tcount = tgt_cnt ? tgt_cnt : ptgt->tgt_change_cnt;
5088 lcount = link_cnt;
5089
5090 mutex_exit(&ptgt->tgt_mutex);
5091 mutex_exit(&pptr->port_mutex);
5092
5093 /*
5094 * if we are already logged in, then we do a PRLI, else
5095 * we do a PLOGI first (to get logged in)
5096 *
5097 * We will not check if we are the PLOGI initiator
5098 */
5099 opcode = (map_entry->map_state == PORT_DEVICE_LOGGED_IN &&
5100 map_entry->map_pd != NULL) ? LA_ELS_PRLI : LA_ELS_PLOGI;
5101
5102 alloc = FCP_MAX(sizeof (la_els_logi_t), sizeof (la_els_prli_t));
5103
5104 icmd = fcp_icmd_alloc(pptr, ptgt, alloc, alloc, 0,
5105 pptr->port_state & FCP_STATE_FCA_IS_NODMA, lcount, tcount,
5106 cause, map_entry->map_rscn_info.ulp_rscn_count);
5107
5108 if (icmd == NULL) {
5109 FCP_TGT_TRACE(ptgt, tgt_cnt, FCP_TGT_TRACE_29);
5110 /*
5111 * We've exited port_mutex before calling fcp_icmd_alloc,
5112 * we need to make sure we reacquire it before returning.
5113 */
5114 mutex_enter(&pptr->port_mutex);
5115 return (FALSE);
5116 }
5117
5118 /* TRUE is only returned while target is intended skipped */
5119 ret = FALSE;
5120 /* discover info about this target */
5121 if ((fcp_send_els(pptr, ptgt, icmd, opcode,
5122 lcount, tcount, cause)) == DDI_SUCCESS) {
5123 FCP_TGT_TRACE(ptgt, tgt_cnt, FCP_TGT_TRACE_9);
5124 } else {
5125 fcp_icmd_free(pptr, icmd);
5126 ret = TRUE;
5127 }
5128 mutex_enter(&pptr->port_mutex);
5129
5130 return (ret);
5131 }
5132
5133 /*
5134 * Function: fcp_send_els
5135 *
5136 * Description: Sends an ELS to the target specified by the caller. Supports
5137 * PLOGI and PRLI.
5138 *
5139 * Argument: *pptr Fcp port.
5140 * *ptgt Target to send the ELS to.
5141 * *icmd Internal packet
5142 * opcode ELS opcode
5143 * lcount Link state change counter
5144 * tcount Target state change counter
5145 * cause What caused the call
5146 *
5147 * Return Value: DDI_SUCCESS
5148 * Others
5149 */
5150 static int
5151 fcp_send_els(struct fcp_port *pptr, struct fcp_tgt *ptgt,
5152 struct fcp_ipkt *icmd, uchar_t opcode, int lcount, int tcount, int cause)
5153 {
5154 fc_packet_t *fpkt;
5155 fc_frame_hdr_t *hp;
5156 int internal = 0;
5157 int alloc;
5158 int cmd_len;
5159 int resp_len;
5160 int res = DDI_FAILURE; /* default result */
5161 int rval = DDI_FAILURE;
5162
5163 ASSERT(opcode == LA_ELS_PLOGI || opcode == LA_ELS_PRLI);
5164 ASSERT(ptgt->tgt_port == pptr);
5165
5166 FCP_TRACE(fcp_logq, pptr->port_instbuf,
5167 fcp_trace, FCP_BUF_LEVEL_5, 0,
5168 "fcp_send_els: d_id=0x%x ELS 0x%x (%s)", ptgt->tgt_d_id, opcode,
5169 (opcode == LA_ELS_PLOGI) ? "PLOGI" : "PRLI");
5170
5171 if (opcode == LA_ELS_PLOGI) {
5172 cmd_len = sizeof (la_els_logi_t);
5173 resp_len = sizeof (la_els_logi_t);
5174 } else {
5175 ASSERT(opcode == LA_ELS_PRLI);
5176 cmd_len = sizeof (la_els_prli_t);
5177 resp_len = sizeof (la_els_prli_t);
5178 }
5179
5180 if (icmd == NULL) {
5181 alloc = FCP_MAX(sizeof (la_els_logi_t),
5182 sizeof (la_els_prli_t));
5183 icmd = fcp_icmd_alloc(pptr, ptgt, alloc, alloc, 0,
5184 pptr->port_state & FCP_STATE_FCA_IS_NODMA,
5185 lcount, tcount, cause, FC_INVALID_RSCN_COUNT);
5186 if (icmd == NULL) {
5187 FCP_TGT_TRACE(ptgt, tcount, FCP_TGT_TRACE_10);
5188 return (res);
5189 }
5190 internal++;
5191 }
5192 fpkt = icmd->ipkt_fpkt;
5193
5194 fpkt->pkt_cmdlen = cmd_len;
5195 fpkt->pkt_rsplen = resp_len;
5196 fpkt->pkt_datalen = 0;
5197 icmd->ipkt_retries = 0;
5198
5199 /* fill in fpkt info */
5200 fpkt->pkt_tran_flags = FC_TRAN_CLASS3 | FC_TRAN_INTR;
5201 fpkt->pkt_tran_type = FC_PKT_EXCHANGE;
5202 fpkt->pkt_timeout = FCP_ELS_TIMEOUT;
5203
5204 /* get ptr to frame hdr in fpkt */
5205 hp = &fpkt->pkt_cmd_fhdr;
5206
5207 /*
5208 * fill in frame hdr
5209 */
5210 hp->r_ctl = R_CTL_ELS_REQ;
5211 hp->s_id = pptr->port_id; /* source ID */
5212 hp->d_id = ptgt->tgt_d_id; /* dest ID */
5213 hp->type = FC_TYPE_EXTENDED_LS;
5214 hp->f_ctl = F_CTL_SEQ_INITIATIVE | F_CTL_FIRST_SEQ;
5215 hp->seq_id = 0;
5216 hp->rsvd = 0;
5217 hp->df_ctl = 0;
5218 hp->seq_cnt = 0;
5219 hp->ox_id = 0xffff; /* i.e. none */
5220 hp->rx_id = 0xffff; /* i.e. none */
5221 hp->ro = 0;
5222
5223 /*
5224 * at this point we have a filled in cmd pkt
5225 *
5226 * fill in the respective info, then use the transport to send
5227 * the packet
5228 *
5229 * for a PLOGI call fc_ulp_login(), and
5230 * for a PRLI call fc_ulp_issue_els()
5231 */
5232 switch (opcode) {
5233 case LA_ELS_PLOGI: {
5234 struct la_els_logi logi;
5235
5236 bzero(&logi, sizeof (struct la_els_logi));
5237
5238 hp = &fpkt->pkt_cmd_fhdr;
5239 hp->r_ctl = R_CTL_ELS_REQ;
5240 logi.ls_code.ls_code = LA_ELS_PLOGI;
5241 logi.ls_code.mbz = 0;
5242
5243 FCP_CP_OUT((uint8_t *)&logi, fpkt->pkt_cmd,
5244 fpkt->pkt_cmd_acc, sizeof (struct la_els_logi));
5245
5246 icmd->ipkt_opcode = LA_ELS_PLOGI;
5247
5248 mutex_enter(&pptr->port_mutex);
5249 if (!FCP_TGT_STATE_CHANGED(ptgt, icmd)) {
5250
5251 mutex_exit(&pptr->port_mutex);
5252
5253 rval = fc_ulp_login(pptr->port_fp_handle, &fpkt, 1);
5254 if (rval == FC_SUCCESS) {
5255 res = DDI_SUCCESS;
5256 break;
5257 }
5258
5259 FCP_TGT_TRACE(ptgt, tcount, FCP_TGT_TRACE_11);
5260
5261 res = fcp_handle_ipkt_errors(pptr, ptgt, icmd,
5262 rval, "PLOGI");
5263 } else {
5264 FCP_TRACE(fcp_logq, pptr->port_instbuf,
5265 fcp_trace, FCP_BUF_LEVEL_5, 0,
5266 "fcp_send_els1: state change occured"
5267 " for D_ID=0x%x", ptgt->tgt_d_id);
5268 mutex_exit(&pptr->port_mutex);
5269 FCP_TGT_TRACE(ptgt, tcount, FCP_TGT_TRACE_12);
5270 }
5271 break;
5272 }
5273
5274 case LA_ELS_PRLI: {
5275 struct la_els_prli prli;
5276 struct fcp_prli *fprli;
5277
5278 bzero(&prli, sizeof (struct la_els_prli));
5279
5280 hp = &fpkt->pkt_cmd_fhdr;
5281 hp->r_ctl = R_CTL_ELS_REQ;
5282
5283 /* fill in PRLI cmd ELS fields */
5284 prli.ls_code = LA_ELS_PRLI;
5285 prli.page_length = 0x10; /* huh? */
5286 prli.payload_length = sizeof (struct la_els_prli);
5287
5288 icmd->ipkt_opcode = LA_ELS_PRLI;
5289
5290 /* get ptr to PRLI service params */
5291 fprli = (struct fcp_prli *)prli.service_params;
5292
5293 /* fill in service params */
5294 fprli->type = 0x08;
5295 fprli->resvd1 = 0;
5296 fprli->orig_process_assoc_valid = 0;
5297 fprli->resp_process_assoc_valid = 0;
5298 fprli->establish_image_pair = 1;
5299 fprli->resvd2 = 0;
5300 fprli->resvd3 = 0;
5301 fprli->obsolete_1 = 0;
5302 fprli->obsolete_2 = 0;
5303 fprli->data_overlay_allowed = 0;
5304 fprli->initiator_fn = 1;
5305 fprli->confirmed_compl_allowed = 1;
5306
5307 if (fc_ulp_is_name_present("ltct") == FC_SUCCESS) {
5308 fprli->target_fn = 1;
5309 } else {
5310 fprli->target_fn = 0;
5311 }
5312
5313 fprli->retry = 1;
5314 fprli->read_xfer_rdy_disabled = 1;
5315 fprli->write_xfer_rdy_disabled = 0;
5316
5317 FCP_CP_OUT((uint8_t *)&prli, fpkt->pkt_cmd,
5318 fpkt->pkt_cmd_acc, sizeof (struct la_els_prli));
5319
5320 /* issue the PRLI request */
5321
5322 mutex_enter(&pptr->port_mutex);
5323 if (!FCP_TGT_STATE_CHANGED(ptgt, icmd)) {
5324
5325 mutex_exit(&pptr->port_mutex);
5326
5327 rval = fc_ulp_issue_els(pptr->port_fp_handle, fpkt);
5328 if (rval == FC_SUCCESS) {
5329 res = DDI_SUCCESS;
5330 break;
5331 }
5332
5333 FCP_TGT_TRACE(ptgt, tcount, FCP_TGT_TRACE_13);
5334
5335 res = fcp_handle_ipkt_errors(pptr, ptgt, icmd,
5336 rval, "PRLI");
5337 } else {
5338 mutex_exit(&pptr->port_mutex);
5339 FCP_TGT_TRACE(ptgt, tcount, FCP_TGT_TRACE_14);
5340 }
5341 break;
5342 }
5343
5344 default:
5345 fcp_log(CE_WARN, NULL, "!invalid ELS opcode=0x%x", opcode);
5346 break;
5347 }
5348
5349 FCP_TRACE(fcp_logq, pptr->port_instbuf,
5350 fcp_trace, FCP_BUF_LEVEL_5, 0,
5351 "fcp_send_els: returning %d", res);
5352
5353 if (res != DDI_SUCCESS) {
5354 if (internal) {
5355 fcp_icmd_free(pptr, icmd);
5356 }
5357 }
5358
5359 return (res);
5360 }
5361
5362
5363 /*
5364 * called internally update the state of all of the tgts and each LUN
5365 * for this port (i.e. each target known to be attached to this port)
5366 * if they are not already offline
5367 *
5368 * must be called with the port mutex owned
5369 *
5370 * acquires and releases the target mutexes for each target attached
5371 * to this port
5372 */
5373 void
5374 fcp_update_state(struct fcp_port *pptr, uint32_t state, int cause)
5375 {
5376 int i;
5377 struct fcp_tgt *ptgt;
5378
5379 ASSERT(mutex_owned(&pptr->port_mutex));
5380
5381 for (i = 0; i < FCP_NUM_HASH; i++) {
5382 for (ptgt = pptr->port_tgt_hash_table[i]; ptgt != NULL;
5383 ptgt = ptgt->tgt_next) {
5384 mutex_enter(&ptgt->tgt_mutex);
5385 fcp_update_tgt_state(ptgt, FCP_SET, state);
5386 ptgt->tgt_change_cnt++;
5387 ptgt->tgt_statec_cause = cause;
5388 ptgt->tgt_tmp_cnt = 1;
5389 ptgt->tgt_done = 0;
5390 mutex_exit(&ptgt->tgt_mutex);
5391 }
5392 }
5393 }
5394
5395
5396 static void
5397 fcp_offline_all(struct fcp_port *pptr, int lcount, int cause)
5398 {
5399 int i;
5400 int ndevs;
5401 struct fcp_tgt *ptgt;
5402
5403 ASSERT(mutex_owned(&pptr->port_mutex));
5404
5405 for (ndevs = 0, i = 0; i < FCP_NUM_HASH; i++) {
5406 for (ptgt = pptr->port_tgt_hash_table[i]; ptgt != NULL;
5407 ptgt = ptgt->tgt_next) {
5408 ndevs++;
5409 }
5410 }
5411
5412 if (ndevs == 0) {
5413 return;
5414 }
5415 pptr->port_tmp_cnt = ndevs;
5416
5417 for (i = 0; i < FCP_NUM_HASH; i++) {
5418 for (ptgt = pptr->port_tgt_hash_table[i]; ptgt != NULL;
5419 ptgt = ptgt->tgt_next) {
5420 (void) fcp_call_finish_init_held(pptr, ptgt,
5421 lcount, ptgt->tgt_change_cnt, cause);
5422 }
5423 }
5424 }
5425
5426 /*
5427 * Function: fcp_update_tgt_state
5428 *
5429 * Description: This function updates the field tgt_state of a target. That
5430 * field is a bitmap and which bit can be set or reset
5431 * individually. The action applied to the target state is also
5432 * applied to all the LUNs belonging to the target (provided the
5433 * LUN is not offline). A side effect of applying the state
5434 * modification to the target and the LUNs is the field tgt_trace
5435 * of the target and lun_trace of the LUNs is set to zero.
5436 *
5437 *
5438 * Argument: *ptgt Target structure.
5439 * flag Flag indication what action to apply (set/reset).
5440 * state State bits to update.
5441 *
5442 * Return Value: None
5443 *
5444 * Context: Interrupt, Kernel or User context.
5445 * The mutex of the target (ptgt->tgt_mutex) must be owned when
5446 * calling this function.
5447 */
5448 void
5449 fcp_update_tgt_state(struct fcp_tgt *ptgt, int flag, uint32_t state)
5450 {
5451 struct fcp_lun *plun;
5452
5453 ASSERT(mutex_owned(&ptgt->tgt_mutex));
5454
5455 if (!(ptgt->tgt_state & FCP_TGT_OFFLINE)) {
5456 /* The target is not offline. */
5457 if (flag == FCP_SET) {
5458 ptgt->tgt_state |= state;
5459 ptgt->tgt_trace = 0;
5460 } else {
5461 ptgt->tgt_state &= ~state;
5462 }
5463
5464 for (plun = ptgt->tgt_lun; plun != NULL;
5465 plun = plun->lun_next) {
5466 if (!(plun->lun_state & FCP_LUN_OFFLINE)) {
5467 /* The LUN is not offline. */
5468 if (flag == FCP_SET) {
5469 plun->lun_state |= state;
5470 plun->lun_trace = 0;
5471 } else {
5472 plun->lun_state &= ~state;
5473 }
5474 }
5475 }
5476 }
5477 }
5478
5479 /*
5480 * Function: fcp_update_tgt_state
5481 *
5482 * Description: This function updates the field lun_state of a LUN. That
5483 * field is a bitmap and which bit can be set or reset
5484 * individually.
5485 *
5486 * Argument: *plun LUN structure.
5487 * flag Flag indication what action to apply (set/reset).
5488 * state State bits to update.
5489 *
5490 * Return Value: None
5491 *
5492 * Context: Interrupt, Kernel or User context.
5493 * The mutex of the target (ptgt->tgt_mutex) must be owned when
5494 * calling this function.
5495 */
5496 void
5497 fcp_update_lun_state(struct fcp_lun *plun, int flag, uint32_t state)
5498 {
5499 struct fcp_tgt *ptgt = plun->lun_tgt;
5500
5501 ASSERT(mutex_owned(&ptgt->tgt_mutex));
5502
5503 if (!(plun->lun_state & FCP_TGT_OFFLINE)) {
5504 if (flag == FCP_SET) {
5505 plun->lun_state |= state;
5506 } else {
5507 plun->lun_state &= ~state;
5508 }
5509 }
5510 }
5511
5512 /*
5513 * Function: fcp_get_port
5514 *
5515 * Description: This function returns the fcp_port structure from the opaque
5516 * handle passed by the caller. That opaque handle is the handle
5517 * used by fp/fctl to identify a particular local port. That
5518 * handle has been stored in the corresponding fcp_port
5519 * structure. This function is going to walk the global list of
5520 * fcp_port structures till one has a port_fp_handle that matches
5521 * the handle passed by the caller. This function enters the
5522 * mutex fcp_global_mutex while walking the global list and then
5523 * releases it.
5524 *
5525 * Argument: port_handle Opaque handle that fp/fctl uses to identify a
5526 * particular port.
5527 *
5528 * Return Value: NULL Not found.
5529 * Not NULL Pointer to the fcp_port structure.
5530 *
5531 * Context: Interrupt, Kernel or User context.
5532 */
5533 static struct fcp_port *
5534 fcp_get_port(opaque_t port_handle)
5535 {
5536 struct fcp_port *pptr;
5537
5538 ASSERT(port_handle != NULL);
5539
5540 mutex_enter(&fcp_global_mutex);
5541 for (pptr = fcp_port_head; pptr != NULL; pptr = pptr->port_next) {
5542 if (pptr->port_fp_handle == port_handle) {
5543 break;
5544 }
5545 }
5546 mutex_exit(&fcp_global_mutex);
5547
5548 return (pptr);
5549 }
5550
5551
5552 static void
5553 fcp_unsol_callback(fc_packet_t *fpkt)
5554 {
5555 struct fcp_ipkt *icmd = (struct fcp_ipkt *)fpkt->pkt_ulp_private;
5556 struct fcp_port *pptr = icmd->ipkt_port;
5557
5558 if (fpkt->pkt_state != FC_PKT_SUCCESS) {
5559 caddr_t state, reason, action, expln;
5560
5561 (void) fc_ulp_pkt_error(fpkt, &state, &reason,
5562 &action, &expln);
5563
5564 fcp_log(CE_WARN, pptr->port_dip,
5565 "!couldn't post response to unsolicited request: "
5566 " state=%s reason=%s rx_id=%x ox_id=%x",
5567 state, reason, fpkt->pkt_cmd_fhdr.ox_id,
5568 fpkt->pkt_cmd_fhdr.rx_id);
5569 }
5570 fcp_icmd_free(pptr, icmd);
5571 }
5572
5573
5574 /*
5575 * Perform general purpose preparation of a response to an unsolicited request
5576 */
5577 static void
5578 fcp_unsol_resp_init(fc_packet_t *pkt, fc_unsol_buf_t *buf,
5579 uchar_t r_ctl, uchar_t type)
5580 {
5581 pkt->pkt_cmd_fhdr.r_ctl = r_ctl;
5582 pkt->pkt_cmd_fhdr.d_id = buf->ub_frame.s_id;
5583 pkt->pkt_cmd_fhdr.s_id = buf->ub_frame.d_id;
5584 pkt->pkt_cmd_fhdr.type = type;
5585 pkt->pkt_cmd_fhdr.f_ctl = F_CTL_LAST_SEQ | F_CTL_XCHG_CONTEXT;
5586 pkt->pkt_cmd_fhdr.seq_id = buf->ub_frame.seq_id;
5587 pkt->pkt_cmd_fhdr.df_ctl = buf->ub_frame.df_ctl;
5588 pkt->pkt_cmd_fhdr.seq_cnt = buf->ub_frame.seq_cnt;
5589 pkt->pkt_cmd_fhdr.ox_id = buf->ub_frame.ox_id;
5590 pkt->pkt_cmd_fhdr.rx_id = buf->ub_frame.rx_id;
5591 pkt->pkt_cmd_fhdr.ro = 0;
5592 pkt->pkt_cmd_fhdr.rsvd = 0;
5593 pkt->pkt_comp = fcp_unsol_callback;
5594 pkt->pkt_pd = NULL;
5595 pkt->pkt_ub_resp_token = (opaque_t)buf;
5596 }
5597
5598
5599 /*ARGSUSED*/
5600 static int
5601 fcp_unsol_prli(struct fcp_port *pptr, fc_unsol_buf_t *buf)
5602 {
5603 fc_packet_t *fpkt;
5604 struct la_els_prli prli;
5605 struct fcp_prli *fprli;
5606 struct fcp_ipkt *icmd;
5607 struct la_els_prli *from;
5608 struct fcp_prli *orig;
5609 struct fcp_tgt *ptgt;
5610 int tcount = 0;
5611 int lcount;
5612
5613 from = (struct la_els_prli *)buf->ub_buffer;
5614 orig = (struct fcp_prli *)from->service_params;
5615 if ((ptgt = fcp_get_target_by_did(pptr, buf->ub_frame.s_id)) !=
5616 NULL) {
5617 mutex_enter(&ptgt->tgt_mutex);
5618 tcount = ptgt->tgt_change_cnt;
5619 mutex_exit(&ptgt->tgt_mutex);
5620 }
5621
5622 mutex_enter(&pptr->port_mutex);
5623 lcount = pptr->port_link_cnt;
5624 mutex_exit(&pptr->port_mutex);
5625
5626 if ((icmd = fcp_icmd_alloc(pptr, ptgt, sizeof (la_els_prli_t),
5627 sizeof (la_els_prli_t), 0,
5628 pptr->port_state & FCP_STATE_FCA_IS_NODMA,
5629 lcount, tcount, 0, FC_INVALID_RSCN_COUNT)) == NULL) {
5630 return (FC_FAILURE);
5631 }
5632
5633 fpkt = icmd->ipkt_fpkt;
5634 fpkt->pkt_tran_flags = FC_TRAN_CLASS3 | FC_TRAN_INTR;
5635 fpkt->pkt_tran_type = FC_PKT_OUTBOUND;
5636 fpkt->pkt_timeout = FCP_ELS_TIMEOUT;
5637 fpkt->pkt_cmdlen = sizeof (la_els_prli_t);
5638 fpkt->pkt_rsplen = 0;
5639 fpkt->pkt_datalen = 0;
5640
5641 icmd->ipkt_opcode = LA_ELS_PRLI;
5642
5643 bzero(&prli, sizeof (struct la_els_prli));
5644 fprli = (struct fcp_prli *)prli.service_params;
5645 prli.ls_code = LA_ELS_ACC;
5646 prli.page_length = 0x10;
5647 prli.payload_length = sizeof (struct la_els_prli);
5648
5649 /* fill in service params */
5650 fprli->type = 0x08;
5651 fprli->resvd1 = 0;
5652 fprli->orig_process_assoc_valid = orig->orig_process_assoc_valid;
5653 fprli->orig_process_associator = orig->orig_process_associator;
5654 fprli->resp_process_assoc_valid = 0;
5655 fprli->establish_image_pair = 1;
5656 fprli->resvd2 = 0;
5657 fprli->resvd3 = 0;
5658 fprli->obsolete_1 = 0;
5659 fprli->obsolete_2 = 0;
5660 fprli->data_overlay_allowed = 0;
5661 fprli->initiator_fn = 1;
5662 fprli->confirmed_compl_allowed = 1;
5663
5664 if (fc_ulp_is_name_present("ltct") == FC_SUCCESS) {
5665 fprli->target_fn = 1;
5666 } else {
5667 fprli->target_fn = 0;
5668 }
5669
5670 fprli->retry = 1;
5671 fprli->read_xfer_rdy_disabled = 1;
5672 fprli->write_xfer_rdy_disabled = 0;
5673
5674 /* save the unsol prli payload first */
5675 FCP_CP_OUT((uint8_t *)from, fpkt->pkt_resp,
5676 fpkt->pkt_resp_acc, sizeof (struct la_els_prli));
5677
5678 FCP_CP_OUT((uint8_t *)&prli, fpkt->pkt_cmd,
5679 fpkt->pkt_cmd_acc, sizeof (struct la_els_prli));
5680
5681 fcp_unsol_resp_init(fpkt, buf, R_CTL_ELS_RSP, FC_TYPE_EXTENDED_LS);
5682
5683 mutex_enter(&pptr->port_mutex);
5684 if (!FCP_LINK_STATE_CHANGED(pptr, icmd)) {
5685 int rval;
5686 mutex_exit(&pptr->port_mutex);
5687
5688 if ((rval = fc_ulp_issue_els(pptr->port_fp_handle, fpkt)) !=
5689 FC_SUCCESS) {
5690 if ((rval == FC_STATEC_BUSY || rval == FC_OFFLINE) &&
5691 ptgt != NULL) {
5692 fcp_queue_ipkt(pptr, fpkt);
5693 return (FC_SUCCESS);
5694 }
5695 /* Let it timeout */
5696 fcp_icmd_free(pptr, icmd);
5697 return (FC_FAILURE);
5698 }
5699 } else {
5700 mutex_exit(&pptr->port_mutex);
5701 fcp_icmd_free(pptr, icmd);
5702 return (FC_FAILURE);
5703 }
5704
5705 (void) fc_ulp_ubrelease(pptr->port_fp_handle, 1, &buf->ub_token);
5706
5707 return (FC_SUCCESS);
5708 }
5709
5710 /*
5711 * Function: fcp_icmd_alloc
5712 *
5713 * Description: This function allocated a fcp_ipkt structure. The pkt_comp
5714 * field is initialized to fcp_icmd_callback. Sometimes it is
5715 * modified by the caller (such as fcp_send_scsi). The
5716 * structure is also tied to the state of the line and of the
5717 * target at a particular time. That link is established by
5718 * setting the fields ipkt_link_cnt and ipkt_change_cnt to lcount
5719 * and tcount which came respectively from pptr->link_cnt and
5720 * ptgt->tgt_change_cnt.
5721 *
5722 * Argument: *pptr Fcp port.
5723 * *ptgt Target (destination of the command).
5724 * cmd_len Length of the command.
5725 * resp_len Length of the expected response.
5726 * data_len Length of the data.
5727 * nodma Indicates weither the command and response.
5728 * will be transfer through DMA or not.
5729 * lcount Link state change counter.
5730 * tcount Target state change counter.
5731 * cause Reason that lead to this call.
5732 *
5733 * Return Value: NULL Failed.
5734 * Not NULL Internal packet address.
5735 */
5736 static struct fcp_ipkt *
5737 fcp_icmd_alloc(struct fcp_port *pptr, struct fcp_tgt *ptgt, int cmd_len,
5738 int resp_len, int data_len, int nodma, int lcount, int tcount, int cause,
5739 uint32_t rscn_count)
5740 {
5741 int dma_setup = 0;
5742 fc_packet_t *fpkt;
5743 struct fcp_ipkt *icmd = NULL;
5744
5745 icmd = kmem_zalloc(sizeof (struct fcp_ipkt) +
5746 pptr->port_dmacookie_sz + pptr->port_priv_pkt_len,
5747 KM_NOSLEEP);
5748 if (icmd == NULL) {
5749 fcp_log(CE_WARN, pptr->port_dip,
5750 "!internal packet allocation failed");
5751 return (NULL);
5752 }
5753
5754 /*
5755 * initialize the allocated packet
5756 */
5757 icmd->ipkt_nodma = nodma;
5758 icmd->ipkt_next = icmd->ipkt_prev = NULL;
5759 icmd->ipkt_lun = NULL;
5760
5761 icmd->ipkt_link_cnt = lcount;
5762 icmd->ipkt_change_cnt = tcount;
5763 icmd->ipkt_cause = cause;
5764
5765 mutex_enter(&pptr->port_mutex);
5766 icmd->ipkt_port = pptr;
5767 mutex_exit(&pptr->port_mutex);
5768
5769 /* keep track of amt of data to be sent in pkt */
5770 icmd->ipkt_cmdlen = cmd_len;
5771 icmd->ipkt_resplen = resp_len;
5772 icmd->ipkt_datalen = data_len;
5773
5774 /* set up pkt's ptr to the fc_packet_t struct, just after the ipkt */
5775 icmd->ipkt_fpkt = (fc_packet_t *)(&icmd->ipkt_fc_packet);
5776
5777 /* set pkt's private ptr to point to cmd pkt */
5778 icmd->ipkt_fpkt->pkt_ulp_private = (opaque_t)icmd;
5779
5780 /* set FCA private ptr to memory just beyond */
5781 icmd->ipkt_fpkt->pkt_fca_private = (opaque_t)
5782 ((char *)icmd + sizeof (struct fcp_ipkt) +
5783 pptr->port_dmacookie_sz);
5784
5785 /* get ptr to fpkt substruct and fill it in */
5786 fpkt = icmd->ipkt_fpkt;
5787 fpkt->pkt_data_cookie = (ddi_dma_cookie_t *)((caddr_t)icmd +
5788 sizeof (struct fcp_ipkt));
5789
5790 if (ptgt != NULL) {
5791 icmd->ipkt_tgt = ptgt;
5792 fpkt->pkt_fca_device = ptgt->tgt_fca_dev;
5793 }
5794
5795 fpkt->pkt_comp = fcp_icmd_callback;
5796 fpkt->pkt_tran_flags = (FC_TRAN_CLASS3 | FC_TRAN_INTR);
5797 fpkt->pkt_cmdlen = cmd_len;
5798 fpkt->pkt_rsplen = resp_len;
5799 fpkt->pkt_datalen = data_len;
5800
5801 /*
5802 * The pkt_ulp_rscn_infop (aka pkt_ulp_rsvd1) field is used to pass the
5803 * rscn_count as fcp knows down to the transport. If a valid count was
5804 * passed into this function, we allocate memory to actually pass down
5805 * this info.
5806 *
5807 * BTW, if the kmem_zalloc fails, we won't try too hard. This will
5808 * basically mean that fcp will not be able to help transport
5809 * distinguish if a new RSCN has come after fcp was last informed about
5810 * it. In such cases, it might lead to the problem mentioned in CR/bug #
5811 * 5068068 where the device might end up going offline in case of RSCN
5812 * storms.
5813 */
5814 fpkt->pkt_ulp_rscn_infop = NULL;
5815 if (rscn_count != FC_INVALID_RSCN_COUNT) {
5816 fpkt->pkt_ulp_rscn_infop = kmem_zalloc(
5817 sizeof (fc_ulp_rscn_info_t), KM_NOSLEEP);
5818 if (fpkt->pkt_ulp_rscn_infop == NULL) {
5819 FCP_TRACE(fcp_logq, pptr->port_instbuf,
5820 fcp_trace, FCP_BUF_LEVEL_6, 0,
5821 "Failed to alloc memory to pass rscn info");
5822 }
5823 }
5824
5825 if (fpkt->pkt_ulp_rscn_infop != NULL) {
5826 fc_ulp_rscn_info_t *rscnp;
5827
5828 rscnp = (fc_ulp_rscn_info_t *)fpkt->pkt_ulp_rscn_infop;
5829 rscnp->ulp_rscn_count = rscn_count;
5830 }
5831
5832 if (fcp_alloc_dma(pptr, icmd, nodma, KM_NOSLEEP) != FC_SUCCESS) {
5833 goto fail;
5834 }
5835 dma_setup++;
5836
5837 /*
5838 * Must hold target mutex across setting of pkt_pd and call to
5839 * fc_ulp_init_packet to ensure the handle to the target doesn't go
5840 * away while we're not looking.
5841 */
5842 if (ptgt != NULL) {
5843 mutex_enter(&ptgt->tgt_mutex);
5844 fpkt->pkt_pd = ptgt->tgt_pd_handle;
5845
5846 /* ask transport to do its initialization on this pkt */
5847 if (fc_ulp_init_packet(pptr->port_fp_handle, fpkt, KM_NOSLEEP)
5848 != FC_SUCCESS) {
5849 FCP_TRACE(fcp_logq, pptr->port_instbuf,
5850 fcp_trace, FCP_BUF_LEVEL_6, 0,
5851 "fc_ulp_init_packet failed");
5852 mutex_exit(&ptgt->tgt_mutex);
5853 goto fail;
5854 }
5855 mutex_exit(&ptgt->tgt_mutex);
5856 } else {
5857 if (fc_ulp_init_packet(pptr->port_fp_handle, fpkt, KM_NOSLEEP)
5858 != FC_SUCCESS) {
5859 FCP_TRACE(fcp_logq, pptr->port_instbuf,
5860 fcp_trace, FCP_BUF_LEVEL_6, 0,
5861 "fc_ulp_init_packet failed");
5862 goto fail;
5863 }
5864 }
5865
5866 mutex_enter(&pptr->port_mutex);
5867 if (pptr->port_state & (FCP_STATE_DETACHING |
5868 FCP_STATE_SUSPENDED | FCP_STATE_POWER_DOWN)) {
5869 int rval;
5870
5871 mutex_exit(&pptr->port_mutex);
5872
5873 rval = fc_ulp_uninit_packet(pptr->port_fp_handle, fpkt);
5874 ASSERT(rval == FC_SUCCESS);
5875
5876 goto fail;
5877 }
5878
5879 if (ptgt != NULL) {
5880 mutex_enter(&ptgt->tgt_mutex);
5881 ptgt->tgt_ipkt_cnt++;
5882 mutex_exit(&ptgt->tgt_mutex);
5883 }
5884
5885 pptr->port_ipkt_cnt++;
5886
5887 mutex_exit(&pptr->port_mutex);
5888
5889 return (icmd);
5890
5891 fail:
5892 if (fpkt->pkt_ulp_rscn_infop != NULL) {
5893 kmem_free(fpkt->pkt_ulp_rscn_infop,
5894 sizeof (fc_ulp_rscn_info_t));
5895 fpkt->pkt_ulp_rscn_infop = NULL;
5896 }
5897
5898 if (dma_setup) {
5899 fcp_free_dma(pptr, icmd);
5900 }
5901 kmem_free(icmd, sizeof (struct fcp_ipkt) + pptr->port_priv_pkt_len +
5902 (size_t)pptr->port_dmacookie_sz);
5903
5904 return (NULL);
5905 }
5906
5907 /*
5908 * Function: fcp_icmd_free
5909 *
5910 * Description: Frees the internal command passed by the caller.
5911 *
5912 * Argument: *pptr Fcp port.
5913 * *icmd Internal packet to free.
5914 *
5915 * Return Value: None
5916 */
5917 static void
5918 fcp_icmd_free(struct fcp_port *pptr, struct fcp_ipkt *icmd)
5919 {
5920 struct fcp_tgt *ptgt = icmd->ipkt_tgt;
5921
5922 /* Let the underlying layers do their cleanup. */
5923 (void) fc_ulp_uninit_packet(pptr->port_fp_handle,
5924 icmd->ipkt_fpkt);
5925
5926 if (icmd->ipkt_fpkt->pkt_ulp_rscn_infop) {
5927 kmem_free(icmd->ipkt_fpkt->pkt_ulp_rscn_infop,
5928 sizeof (fc_ulp_rscn_info_t));
5929 }
5930
5931 fcp_free_dma(pptr, icmd);
5932
5933 kmem_free(icmd, sizeof (struct fcp_ipkt) + pptr->port_priv_pkt_len +
5934 (size_t)pptr->port_dmacookie_sz);
5935
5936 mutex_enter(&pptr->port_mutex);
5937
5938 if (ptgt) {
5939 mutex_enter(&ptgt->tgt_mutex);
5940 ptgt->tgt_ipkt_cnt--;
5941 mutex_exit(&ptgt->tgt_mutex);
5942 }
5943
5944 pptr->port_ipkt_cnt--;
5945 mutex_exit(&pptr->port_mutex);
5946 }
5947
5948 /*
5949 * Function: fcp_alloc_dma
5950 *
5951 * Description: Allocated the DMA resources required for the internal
5952 * packet.
5953 *
5954 * Argument: *pptr FCP port.
5955 * *icmd Internal FCP packet.
5956 * nodma Indicates if the Cmd and Resp will be DMAed.
5957 * flags Allocation flags (Sleep or NoSleep).
5958 *
5959 * Return Value: FC_SUCCESS
5960 * FC_NOMEM
5961 */
5962 static int
5963 fcp_alloc_dma(struct fcp_port *pptr, struct fcp_ipkt *icmd,
5964 int nodma, int flags)
5965 {
5966 int rval;
5967 size_t real_size;
5968 uint_t ccount;
5969 int bound = 0;
5970 int cmd_resp = 0;
5971 fc_packet_t *fpkt;
5972 ddi_dma_cookie_t pkt_data_cookie;
5973 ddi_dma_cookie_t *cp;
5974 uint32_t cnt;
5975
5976 fpkt = &icmd->ipkt_fc_packet;
5977
5978 ASSERT(fpkt->pkt_cmd_dma == NULL && fpkt->pkt_data_dma == NULL &&
5979 fpkt->pkt_resp_dma == NULL);
5980
5981 icmd->ipkt_nodma = nodma;
5982
5983 if (nodma) {
5984 fpkt->pkt_cmd = kmem_zalloc(fpkt->pkt_cmdlen, flags);
5985 if (fpkt->pkt_cmd == NULL) {
5986 goto fail;
5987 }
5988
5989 fpkt->pkt_resp = kmem_zalloc(fpkt->pkt_rsplen, flags);
5990 if (fpkt->pkt_resp == NULL) {
5991 goto fail;
5992 }
5993 } else {
5994 ASSERT(fpkt->pkt_cmdlen && fpkt->pkt_rsplen);
5995
5996 rval = fcp_alloc_cmd_resp(pptr, fpkt, flags);
5997 if (rval == FC_FAILURE) {
5998 ASSERT(fpkt->pkt_cmd_dma == NULL &&
5999 fpkt->pkt_resp_dma == NULL);
6000 goto fail;
6001 }
6002 cmd_resp++;
6003 }
6004
6005 if ((fpkt->pkt_datalen != 0) &&
6006 !(pptr->port_state & FCP_STATE_FCA_IS_NODMA)) {
6007 /*
6008 * set up DMA handle and memory for the data in this packet
6009 */
6010 if (ddi_dma_alloc_handle(pptr->port_dip,
6011 &pptr->port_data_dma_attr, DDI_DMA_DONTWAIT,
6012 NULL, &fpkt->pkt_data_dma) != DDI_SUCCESS) {
6013 goto fail;
6014 }
6015
6016 if (ddi_dma_mem_alloc(fpkt->pkt_data_dma, fpkt->pkt_datalen,
6017 &pptr->port_dma_acc_attr, DDI_DMA_CONSISTENT,
6018 DDI_DMA_DONTWAIT, NULL, &fpkt->pkt_data,
6019 &real_size, &fpkt->pkt_data_acc) != DDI_SUCCESS) {
6020 goto fail;
6021 }
6022
6023 /* was DMA mem size gotten < size asked for/needed ?? */
6024 if (real_size < fpkt->pkt_datalen) {
6025 goto fail;
6026 }
6027
6028 /* bind DMA address and handle together */
6029 if (ddi_dma_addr_bind_handle(fpkt->pkt_data_dma,
6030 NULL, fpkt->pkt_data, real_size, DDI_DMA_READ |
6031 DDI_DMA_CONSISTENT, DDI_DMA_DONTWAIT, NULL,
6032 &pkt_data_cookie, &ccount) != DDI_DMA_MAPPED) {
6033 goto fail;
6034 }
6035 bound++;
6036
6037 if (ccount > pptr->port_data_dma_attr.dma_attr_sgllen) {
6038 goto fail;
6039 }
6040
6041 fpkt->pkt_data_cookie_cnt = ccount;
6042
6043 cp = fpkt->pkt_data_cookie;
6044 *cp = pkt_data_cookie;
6045 cp++;
6046
6047 for (cnt = 1; cnt < ccount; cnt++, cp++) {
6048 ddi_dma_nextcookie(fpkt->pkt_data_dma,
6049 &pkt_data_cookie);
6050 *cp = pkt_data_cookie;
6051 }
6052
6053 } else if (fpkt->pkt_datalen != 0) {
6054 /*
6055 * If it's a pseudo FCA, then it can't support DMA even in
6056 * SCSI data phase.
6057 */
6058 fpkt->pkt_data = kmem_alloc(fpkt->pkt_datalen, flags);
6059 if (fpkt->pkt_data == NULL) {
6060 goto fail;
6061 }
6062
6063 }
6064
6065 return (FC_SUCCESS);
6066
6067 fail:
6068 if (bound) {
6069 (void) ddi_dma_unbind_handle(fpkt->pkt_data_dma);
6070 }
6071
6072 if (fpkt->pkt_data_dma) {
6073 if (fpkt->pkt_data) {
6074 ddi_dma_mem_free(&fpkt->pkt_data_acc);
6075 }
6076 ddi_dma_free_handle(&fpkt->pkt_data_dma);
6077 } else {
6078 if (fpkt->pkt_data) {
6079 kmem_free(fpkt->pkt_data, fpkt->pkt_datalen);
6080 }
6081 }
6082
6083 if (nodma) {
6084 if (fpkt->pkt_cmd) {
6085 kmem_free(fpkt->pkt_cmd, fpkt->pkt_cmdlen);
6086 }
6087 if (fpkt->pkt_resp) {
6088 kmem_free(fpkt->pkt_resp, fpkt->pkt_rsplen);
6089 }
6090 } else {
6091 if (cmd_resp) {
6092 fcp_free_cmd_resp(pptr, fpkt);
6093 }
6094 }
6095
6096 return (FC_NOMEM);
6097 }
6098
6099
6100 static void
6101 fcp_free_dma(struct fcp_port *pptr, struct fcp_ipkt *icmd)
6102 {
6103 fc_packet_t *fpkt = icmd->ipkt_fpkt;
6104
6105 if (fpkt->pkt_data_dma) {
6106 (void) ddi_dma_unbind_handle(fpkt->pkt_data_dma);
6107 if (fpkt->pkt_data) {
6108 ddi_dma_mem_free(&fpkt->pkt_data_acc);
6109 }
6110 ddi_dma_free_handle(&fpkt->pkt_data_dma);
6111 } else {
6112 if (fpkt->pkt_data) {
6113 kmem_free(fpkt->pkt_data, fpkt->pkt_datalen);
6114 }
6115 /*
6116 * Need we reset pkt_* to zero???
6117 */
6118 }
6119
6120 if (icmd->ipkt_nodma) {
6121 if (fpkt->pkt_cmd) {
6122 kmem_free(fpkt->pkt_cmd, icmd->ipkt_cmdlen);
6123 }
6124 if (fpkt->pkt_resp) {
6125 kmem_free(fpkt->pkt_resp, icmd->ipkt_resplen);
6126 }
6127 } else {
6128 ASSERT(fpkt->pkt_resp_dma != NULL && fpkt->pkt_cmd_dma != NULL);
6129
6130 fcp_free_cmd_resp(pptr, fpkt);
6131 }
6132 }
6133
6134 /*
6135 * Function: fcp_lookup_target
6136 *
6137 * Description: Finds a target given a WWN.
6138 *
6139 * Argument: *pptr FCP port.
6140 * *wwn World Wide Name of the device to look for.
6141 *
6142 * Return Value: NULL No target found
6143 * Not NULL Target structure
6144 *
6145 * Context: Interrupt context.
6146 * The mutex pptr->port_mutex must be owned.
6147 */
6148 /* ARGSUSED */
6149 static struct fcp_tgt *
6150 fcp_lookup_target(struct fcp_port *pptr, uchar_t *wwn)
6151 {
6152 int hash;
6153 struct fcp_tgt *ptgt;
6154
6155 ASSERT(mutex_owned(&pptr->port_mutex));
6156
6157 hash = FCP_HASH(wwn);
6158
6159 for (ptgt = pptr->port_tgt_hash_table[hash]; ptgt != NULL;
6160 ptgt = ptgt->tgt_next) {
6161 if (!(ptgt->tgt_state & FCP_TGT_ORPHAN) &&
6162 bcmp((caddr_t)wwn, (caddr_t)&ptgt->tgt_port_wwn.raw_wwn[0],
6163 sizeof (ptgt->tgt_port_wwn)) == 0) {
6164 break;
6165 }
6166 }
6167
6168 return (ptgt);
6169 }
6170
6171
6172 /*
6173 * Find target structure given a port identifier
6174 */
6175 static struct fcp_tgt *
6176 fcp_get_target_by_did(struct fcp_port *pptr, uint32_t d_id)
6177 {
6178 fc_portid_t port_id;
6179 la_wwn_t pwwn;
6180 struct fcp_tgt *ptgt = NULL;
6181
6182 port_id.priv_lilp_posit = 0;
6183 port_id.port_id = d_id;
6184 if (fc_ulp_get_pwwn_by_did(pptr->port_fp_handle, port_id,
6185 &pwwn) == FC_SUCCESS) {
6186 mutex_enter(&pptr->port_mutex);
6187 ptgt = fcp_lookup_target(pptr, pwwn.raw_wwn);
6188 mutex_exit(&pptr->port_mutex);
6189 }
6190
6191 return (ptgt);
6192 }
6193
6194
6195 /*
6196 * the packet completion callback routine for info cmd pkts
6197 *
6198 * this means fpkt pts to a response to either a PLOGI or a PRLI
6199 *
6200 * if there is an error an attempt is made to call a routine to resend
6201 * the command that failed
6202 */
6203 static void
6204 fcp_icmd_callback(fc_packet_t *fpkt)
6205 {
6206 struct fcp_ipkt *icmd;
6207 struct fcp_port *pptr;
6208 struct fcp_tgt *ptgt;
6209 struct la_els_prli *prli;
6210 struct la_els_prli prli_s;
6211 struct fcp_prli *fprli;
6212 struct fcp_lun *plun;
6213 int free_pkt = 1;
6214 int rval;
6215 ls_code_t resp;
6216 uchar_t prli_acc = 0;
6217 uint32_t rscn_count = FC_INVALID_RSCN_COUNT;
6218 int lun0_newalloc;
6219
6220 icmd = (struct fcp_ipkt *)fpkt->pkt_ulp_private;
6221
6222 /* get ptrs to the port and target structs for the cmd */
6223 pptr = icmd->ipkt_port;
6224 ptgt = icmd->ipkt_tgt;
6225
6226 FCP_CP_IN(fpkt->pkt_resp, &resp, fpkt->pkt_resp_acc, sizeof (resp));
6227
6228 if (icmd->ipkt_opcode == LA_ELS_PRLI) {
6229 FCP_CP_IN(fpkt->pkt_cmd, &prli_s, fpkt->pkt_cmd_acc,
6230 sizeof (prli_s));
6231 prli_acc = (prli_s.ls_code == LA_ELS_ACC);
6232 }
6233
6234 FCP_TRACE(fcp_logq, pptr->port_instbuf,
6235 fcp_trace, FCP_BUF_LEVEL_2, 0,
6236 "ELS (%x) callback state=0x%x reason=0x%x for %x",
6237 icmd->ipkt_opcode, fpkt->pkt_state, fpkt->pkt_reason,
6238 ptgt->tgt_d_id);
6239
6240 if ((fpkt->pkt_state == FC_PKT_SUCCESS) &&
6241 ((resp.ls_code == LA_ELS_ACC) || prli_acc)) {
6242
6243 mutex_enter(&ptgt->tgt_mutex);
6244 if (ptgt->tgt_pd_handle == NULL) {
6245 /*
6246 * in a fabric environment the port device handles
6247 * get created only after successful LOGIN into the
6248 * transport, so the transport makes this port
6249 * device (pd) handle available in this packet, so
6250 * save it now
6251 */
6252 ASSERT(fpkt->pkt_pd != NULL);
6253 ptgt->tgt_pd_handle = fpkt->pkt_pd;
6254 }
6255 mutex_exit(&ptgt->tgt_mutex);
6256
6257 /* which ELS cmd is this response for ?? */
6258 switch (icmd->ipkt_opcode) {
6259 case LA_ELS_PLOGI:
6260 FCP_TRACE(fcp_logq, pptr->port_instbuf,
6261 fcp_trace, FCP_BUF_LEVEL_5, 0,
6262 "PLOGI to d_id=0x%x succeeded, wwn=%08x%08x",
6263 ptgt->tgt_d_id,
6264 *((int *)&ptgt->tgt_port_wwn.raw_wwn[0]),
6265 *((int *)&ptgt->tgt_port_wwn.raw_wwn[4]));
6266
6267 FCP_TGT_TRACE(ptgt, icmd->ipkt_change_cnt,
6268 FCP_TGT_TRACE_15);
6269
6270 /* Note that we are not allocating a new icmd */
6271 if (fcp_send_els(pptr, ptgt, icmd, LA_ELS_PRLI,
6272 icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
6273 icmd->ipkt_cause) != DDI_SUCCESS) {
6274 FCP_TGT_TRACE(ptgt, icmd->ipkt_change_cnt,
6275 FCP_TGT_TRACE_16);
6276 goto fail;
6277 }
6278 break;
6279
6280 case LA_ELS_PRLI:
6281 FCP_TRACE(fcp_logq, pptr->port_instbuf,
6282 fcp_trace, FCP_BUF_LEVEL_5, 0,
6283 "PRLI to d_id=0x%x succeeded", ptgt->tgt_d_id);
6284
6285 FCP_TGT_TRACE(ptgt, icmd->ipkt_change_cnt,
6286 FCP_TGT_TRACE_17);
6287
6288 prli = &prli_s;
6289
6290 FCP_CP_IN(fpkt->pkt_resp, prli, fpkt->pkt_resp_acc,
6291 sizeof (prli_s));
6292
6293 fprli = (struct fcp_prli *)prli->service_params;
6294
6295 mutex_enter(&ptgt->tgt_mutex);
6296 ptgt->tgt_icap = fprli->initiator_fn;
6297 ptgt->tgt_tcap = fprli->target_fn;
6298 mutex_exit(&ptgt->tgt_mutex);
6299
6300 if ((fprli->type != 0x08) || (fprli->target_fn != 1)) {
6301 /*
6302 * this FCP device does not support target mode
6303 */
6304 FCP_TGT_TRACE(ptgt, icmd->ipkt_change_cnt,
6305 FCP_TGT_TRACE_18);
6306 goto fail;
6307 }
6308 if (fprli->retry == 1) {
6309 fc_ulp_disable_relogin(pptr->port_fp_handle,
6310 &ptgt->tgt_port_wwn);
6311 }
6312
6313 /* target is no longer offline */
6314 mutex_enter(&pptr->port_mutex);
6315 mutex_enter(&ptgt->tgt_mutex);
6316 if (!FCP_TGT_STATE_CHANGED(ptgt, icmd)) {
6317 ptgt->tgt_state &= ~(FCP_TGT_OFFLINE |
6318 FCP_TGT_MARK);
6319 } else {
6320 FCP_TRACE(fcp_logq, pptr->port_instbuf,
6321 fcp_trace, FCP_BUF_LEVEL_2, 0,
6322 "fcp_icmd_callback,1: state change "
6323 " occured for D_ID=0x%x", ptgt->tgt_d_id);
6324 mutex_exit(&ptgt->tgt_mutex);
6325 mutex_exit(&pptr->port_mutex);
6326 goto fail;
6327 }
6328 mutex_exit(&ptgt->tgt_mutex);
6329 mutex_exit(&pptr->port_mutex);
6330
6331 /*
6332 * lun 0 should always respond to inquiry, so
6333 * get the LUN struct for LUN 0
6334 *
6335 * Currently we deal with first level of addressing.
6336 * If / when we start supporting 0x device types
6337 * (DTYPE_ARRAY_CTRL, i.e. array controllers)
6338 * this logic will need revisiting.
6339 */
6340 lun0_newalloc = 0;
6341 if ((plun = fcp_get_lun(ptgt, 0)) == NULL) {
6342 /*
6343 * no LUN struct for LUN 0 yet exists,
6344 * so create one
6345 */
6346 plun = fcp_alloc_lun(ptgt);
6347 if (plun == NULL) {
6348 fcp_log(CE_WARN, pptr->port_dip,
6349 "!Failed to allocate lun 0 for"
6350 " D_ID=%x", ptgt->tgt_d_id);
6351 goto fail;
6352 }
6353 lun0_newalloc = 1;
6354 }
6355
6356 /* fill in LUN info */
6357 mutex_enter(&ptgt->tgt_mutex);
6358 /*
6359 * consider lun 0 as device not connected if it is
6360 * offlined or newly allocated
6361 */
6362 if ((plun->lun_state & FCP_LUN_OFFLINE) ||
6363 lun0_newalloc) {
6364 plun->lun_state |= FCP_LUN_DEVICE_NOT_CONNECTED;
6365 }
6366 plun->lun_state |= (FCP_LUN_BUSY | FCP_LUN_MARK);
6367 plun->lun_state &= ~FCP_LUN_OFFLINE;
6368 ptgt->tgt_lun_cnt = 1;
6369 ptgt->tgt_report_lun_cnt = 0;
6370 mutex_exit(&ptgt->tgt_mutex);
6371
6372 /* Retrieve the rscn count (if a valid one exists) */
6373 if (icmd->ipkt_fpkt->pkt_ulp_rscn_infop != NULL) {
6374 rscn_count = ((fc_ulp_rscn_info_t *)
6375 (icmd->ipkt_fpkt->pkt_ulp_rscn_infop))
6376 ->ulp_rscn_count;
6377 } else {
6378 rscn_count = FC_INVALID_RSCN_COUNT;
6379 }
6380
6381 /* send Report Lun request to target */
6382 if (fcp_send_scsi(plun, SCMD_REPORT_LUN,
6383 sizeof (struct fcp_reportlun_resp),
6384 icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
6385 icmd->ipkt_cause, rscn_count) != DDI_SUCCESS) {
6386 mutex_enter(&pptr->port_mutex);
6387 if (!FCP_TGT_STATE_CHANGED(ptgt, icmd)) {
6388 fcp_log(CE_WARN, pptr->port_dip,
6389 "!Failed to send REPORT LUN to"
6390 " D_ID=%x", ptgt->tgt_d_id);
6391 } else {
6392 FCP_TRACE(fcp_logq,
6393 pptr->port_instbuf, fcp_trace,
6394 FCP_BUF_LEVEL_5, 0,
6395 "fcp_icmd_callback,2:state change"
6396 " occured for D_ID=0x%x",
6397 ptgt->tgt_d_id);
6398 }
6399 mutex_exit(&pptr->port_mutex);
6400
6401 FCP_TGT_TRACE(ptgt, icmd->ipkt_change_cnt,
6402 FCP_TGT_TRACE_19);
6403
6404 goto fail;
6405 } else {
6406 free_pkt = 0;
6407 fcp_icmd_free(pptr, icmd);
6408 }
6409 break;
6410
6411 default:
6412 fcp_log(CE_WARN, pptr->port_dip,
6413 "!fcp_icmd_callback Invalid opcode");
6414 goto fail;
6415 }
6416
6417 return;
6418 }
6419
6420
6421 /*
6422 * Other PLOGI failures are not retried as the
6423 * transport does it already
6424 */
6425 if (icmd->ipkt_opcode != LA_ELS_PLOGI) {
6426 if (fcp_is_retryable(icmd) &&
6427 icmd->ipkt_retries++ < FCP_MAX_RETRIES) {
6428
6429 if (FCP_MUST_RETRY(fpkt)) {
6430 fcp_queue_ipkt(pptr, fpkt);
6431 return;
6432 }
6433
6434 FCP_TRACE(fcp_logq, pptr->port_instbuf,
6435 fcp_trace, FCP_BUF_LEVEL_2, 0,
6436 "ELS PRLI is retried for d_id=0x%x, state=%x,"
6437 " reason= %x", ptgt->tgt_d_id, fpkt->pkt_state,
6438 fpkt->pkt_reason);
6439
6440 /*
6441 * Retry by recalling the routine that
6442 * originally queued this packet
6443 */
6444 mutex_enter(&pptr->port_mutex);
6445 if (!FCP_TGT_STATE_CHANGED(ptgt, icmd)) {
6446 caddr_t msg;
6447
6448 mutex_exit(&pptr->port_mutex);
6449
6450 ASSERT(icmd->ipkt_opcode != LA_ELS_PLOGI);
6451
6452 if (fpkt->pkt_state == FC_PKT_TIMEOUT) {
6453 fpkt->pkt_timeout +=
6454 FCP_TIMEOUT_DELTA;
6455 }
6456
6457 rval = fc_ulp_issue_els(pptr->port_fp_handle,
6458 fpkt);
6459 if (rval == FC_SUCCESS) {
6460 return;
6461 }
6462
6463 if (rval == FC_STATEC_BUSY ||
6464 rval == FC_OFFLINE) {
6465 fcp_queue_ipkt(pptr, fpkt);
6466 return;
6467 }
6468 (void) fc_ulp_error(rval, &msg);
6469
6470 fcp_log(CE_NOTE, pptr->port_dip,
6471 "!ELS 0x%x failed to d_id=0x%x;"
6472 " %s", icmd->ipkt_opcode,
6473 ptgt->tgt_d_id, msg);
6474 } else {
6475 FCP_TRACE(fcp_logq, pptr->port_instbuf,
6476 fcp_trace, FCP_BUF_LEVEL_2, 0,
6477 "fcp_icmd_callback,3: state change "
6478 " occured for D_ID=0x%x", ptgt->tgt_d_id);
6479 mutex_exit(&pptr->port_mutex);
6480 }
6481 }
6482 } else {
6483 if (fcp_is_retryable(icmd) &&
6484 icmd->ipkt_retries++ < FCP_MAX_RETRIES) {
6485 if (FCP_MUST_RETRY(fpkt)) {
6486 fcp_queue_ipkt(pptr, fpkt);
6487 return;
6488 }
6489 }
6490 mutex_enter(&pptr->port_mutex);
6491 if (!FCP_TGT_STATE_CHANGED(ptgt, icmd) &&
6492 fpkt->pkt_state != FC_PKT_PORT_OFFLINE) {
6493 mutex_exit(&pptr->port_mutex);
6494 fcp_print_error(fpkt);
6495 } else {
6496 FCP_TRACE(fcp_logq, pptr->port_instbuf,
6497 fcp_trace, FCP_BUF_LEVEL_2, 0,
6498 "fcp_icmd_callback,4: state change occured"
6499 " for D_ID=0x%x", ptgt->tgt_d_id);
6500 mutex_exit(&pptr->port_mutex);
6501 }
6502 }
6503
6504 fail:
6505 if (free_pkt) {
6506 (void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
6507 icmd->ipkt_change_cnt, icmd->ipkt_cause);
6508 fcp_icmd_free(pptr, icmd);
6509 }
6510 }
6511
6512
6513 /*
6514 * called internally to send an info cmd using the transport
6515 *
6516 * sends either an INQ or a REPORT_LUN
6517 *
6518 * when the packet is completed fcp_scsi_callback is called
6519 */
6520 static int
6521 fcp_send_scsi(struct fcp_lun *plun, uchar_t opcode, int alloc_len,
6522 int lcount, int tcount, int cause, uint32_t rscn_count)
6523 {
6524 int nodma;
6525 struct fcp_ipkt *icmd;
6526 struct fcp_tgt *ptgt;
6527 struct fcp_port *pptr;
6528 fc_frame_hdr_t *hp;
6529 fc_packet_t *fpkt;
6530 struct fcp_cmd fcp_cmd;
6531 struct fcp_cmd *fcmd;
6532 union scsi_cdb *scsi_cdb;
6533
6534 ASSERT(plun != NULL);
6535
6536 ptgt = plun->lun_tgt;
6537 ASSERT(ptgt != NULL);
6538
6539 pptr = ptgt->tgt_port;
6540 ASSERT(pptr != NULL);
6541
6542 FCP_TRACE(fcp_logq, pptr->port_instbuf,
6543 fcp_trace, FCP_BUF_LEVEL_5, 0,
6544 "fcp_send_scsi: d_id=0x%x opcode=0x%x", ptgt->tgt_d_id, opcode);
6545
6546 nodma = (pptr->port_fcp_dma == FC_NO_DVMA_SPACE) ? 1 : 0;
6547 icmd = fcp_icmd_alloc(pptr, ptgt, sizeof (struct fcp_cmd),
6548 FCP_MAX_RSP_IU_SIZE, alloc_len, nodma, lcount, tcount, cause,
6549 rscn_count);
6550
6551 if (icmd == NULL) {
6552 return (DDI_FAILURE);
6553 }
6554
6555 fpkt = icmd->ipkt_fpkt;
6556 fpkt->pkt_tran_flags = FC_TRAN_CLASS3 | FC_TRAN_INTR;
6557 icmd->ipkt_retries = 0;
6558 icmd->ipkt_opcode = opcode;
6559 icmd->ipkt_lun = plun;
6560
6561 if (nodma) {
6562 fcmd = (struct fcp_cmd *)fpkt->pkt_cmd;
6563 } else {
6564 fcmd = &fcp_cmd;
6565 }
6566 bzero(fcmd, sizeof (struct fcp_cmd));
6567
6568 fpkt->pkt_timeout = FCP_SCSI_CMD_TIMEOUT;
6569
6570 hp = &fpkt->pkt_cmd_fhdr;
6571
6572 hp->s_id = pptr->port_id;
6573 hp->d_id = ptgt->tgt_d_id;
6574 hp->r_ctl = R_CTL_COMMAND;
6575 hp->type = FC_TYPE_SCSI_FCP;
6576 hp->f_ctl = F_CTL_SEQ_INITIATIVE | F_CTL_FIRST_SEQ;
6577 hp->rsvd = 0;
6578 hp->seq_id = 0;
6579 hp->seq_cnt = 0;
6580 hp->ox_id = 0xffff;
6581 hp->rx_id = 0xffff;
6582 hp->ro = 0;
6583
6584 bcopy(&(plun->lun_addr), &(fcmd->fcp_ent_addr), FCP_LUN_SIZE);
6585
6586 /*
6587 * Request SCSI target for expedited processing
6588 */
6589
6590 /*
6591 * Set up for untagged queuing because we do not
6592 * know if the fibre device supports queuing.
6593 */
6594 fcmd->fcp_cntl.cntl_reserved_0 = 0;
6595 fcmd->fcp_cntl.cntl_reserved_1 = 0;
6596 fcmd->fcp_cntl.cntl_reserved_2 = 0;
6597 fcmd->fcp_cntl.cntl_reserved_3 = 0;
6598 fcmd->fcp_cntl.cntl_reserved_4 = 0;
6599 fcmd->fcp_cntl.cntl_qtype = FCP_QTYPE_UNTAGGED;
6600 scsi_cdb = (union scsi_cdb *)fcmd->fcp_cdb;
6601
6602 switch (opcode) {
6603 case SCMD_INQUIRY_PAGE83:
6604 /*
6605 * Prepare to get the Inquiry VPD page 83 information
6606 */
6607 fcmd->fcp_cntl.cntl_read_data = 1;
6608 fcmd->fcp_cntl.cntl_write_data = 0;
6609 fcmd->fcp_data_len = alloc_len;
6610
6611 fpkt->pkt_tran_type = FC_PKT_FCP_READ;
6612 fpkt->pkt_comp = fcp_scsi_callback;
6613
6614 scsi_cdb->scc_cmd = SCMD_INQUIRY;
6615 scsi_cdb->g0_addr2 = 0x01;
6616 scsi_cdb->g0_addr1 = 0x83;
6617 scsi_cdb->g0_count0 = (uchar_t)alloc_len;
6618 break;
6619
6620 case SCMD_INQUIRY:
6621 fcmd->fcp_cntl.cntl_read_data = 1;
6622 fcmd->fcp_cntl.cntl_write_data = 0;
6623 fcmd->fcp_data_len = alloc_len;
6624
6625 fpkt->pkt_tran_type = FC_PKT_FCP_READ;
6626 fpkt->pkt_comp = fcp_scsi_callback;
6627
6628 scsi_cdb->scc_cmd = SCMD_INQUIRY;
6629 scsi_cdb->g0_count0 = SUN_INQSIZE;
6630 break;
6631
6632 case SCMD_REPORT_LUN: {
6633 fc_portid_t d_id;
6634 opaque_t fca_dev;
6635
6636 ASSERT(alloc_len >= 16);
6637
6638 d_id.priv_lilp_posit = 0;
6639 d_id.port_id = ptgt->tgt_d_id;
6640
6641 fca_dev = fc_ulp_get_fca_device(pptr->port_fp_handle, d_id);
6642
6643 mutex_enter(&ptgt->tgt_mutex);
6644 ptgt->tgt_fca_dev = fca_dev;
6645 mutex_exit(&ptgt->tgt_mutex);
6646
6647 fcmd->fcp_cntl.cntl_read_data = 1;
6648 fcmd->fcp_cntl.cntl_write_data = 0;
6649 fcmd->fcp_data_len = alloc_len;
6650
6651 fpkt->pkt_tran_type = FC_PKT_FCP_READ;
6652 fpkt->pkt_comp = fcp_scsi_callback;
6653
6654 scsi_cdb->scc_cmd = SCMD_REPORT_LUN;
6655 scsi_cdb->scc5_count0 = alloc_len & 0xff;
6656 scsi_cdb->scc5_count1 = (alloc_len >> 8) & 0xff;
6657 scsi_cdb->scc5_count2 = (alloc_len >> 16) & 0xff;
6658 scsi_cdb->scc5_count3 = (alloc_len >> 24) & 0xff;
6659 break;
6660 }
6661
6662 default:
6663 fcp_log(CE_WARN, pptr->port_dip,
6664 "!fcp_send_scsi Invalid opcode");
6665 break;
6666 }
6667
6668 if (!nodma) {
6669 FCP_CP_OUT((uint8_t *)fcmd, fpkt->pkt_cmd,
6670 fpkt->pkt_cmd_acc, sizeof (struct fcp_cmd));
6671 }
6672
6673 mutex_enter(&pptr->port_mutex);
6674 if (!FCP_TGT_STATE_CHANGED(ptgt, icmd)) {
6675
6676 mutex_exit(&pptr->port_mutex);
6677 if (fcp_transport(pptr->port_fp_handle, fpkt, 1) !=
6678 FC_SUCCESS) {
6679 fcp_icmd_free(pptr, icmd);
6680 return (DDI_FAILURE);
6681 }
6682 return (DDI_SUCCESS);
6683 } else {
6684 FCP_TRACE(fcp_logq, pptr->port_instbuf,
6685 fcp_trace, FCP_BUF_LEVEL_2, 0,
6686 "fcp_send_scsi,1: state change occured"
6687 " for D_ID=0x%x", ptgt->tgt_d_id);
6688 mutex_exit(&pptr->port_mutex);
6689 fcp_icmd_free(pptr, icmd);
6690 return (DDI_FAILURE);
6691 }
6692 }
6693
6694
6695 /*
6696 * called by fcp_scsi_callback to check to handle the case where
6697 * REPORT_LUN returns ILLEGAL REQUEST or a UNIT ATTENTION
6698 */
6699 static int
6700 fcp_check_reportlun(struct fcp_rsp *rsp, fc_packet_t *fpkt)
6701 {
6702 uchar_t rqlen;
6703 int rval = DDI_FAILURE;
6704 struct scsi_extended_sense sense_info, *sense;
6705 struct fcp_ipkt *icmd = (struct fcp_ipkt *)
6706 fpkt->pkt_ulp_private;
6707 struct fcp_tgt *ptgt = icmd->ipkt_tgt;
6708 struct fcp_port *pptr = ptgt->tgt_port;
6709
6710 ASSERT(icmd->ipkt_opcode == SCMD_REPORT_LUN);
6711
6712 if (rsp->fcp_u.fcp_status.scsi_status == STATUS_RESERVATION_CONFLICT) {
6713 /*
6714 * SCSI-II Reserve Release support. Some older FC drives return
6715 * Reservation conflict for Report Luns command.
6716 */
6717 if (icmd->ipkt_nodma) {
6718 rsp->fcp_u.fcp_status.rsp_len_set = 0;
6719 rsp->fcp_u.fcp_status.sense_len_set = 0;
6720 rsp->fcp_u.fcp_status.scsi_status = STATUS_GOOD;
6721 } else {
6722 fcp_rsp_t new_resp;
6723
6724 FCP_CP_IN(fpkt->pkt_resp, &new_resp,
6725 fpkt->pkt_resp_acc, sizeof (new_resp));
6726
6727 new_resp.fcp_u.fcp_status.rsp_len_set = 0;
6728 new_resp.fcp_u.fcp_status.sense_len_set = 0;
6729 new_resp.fcp_u.fcp_status.scsi_status = STATUS_GOOD;
6730
6731 FCP_CP_OUT(&new_resp, fpkt->pkt_resp,
6732 fpkt->pkt_resp_acc, sizeof (new_resp));
6733 }
6734
6735 FCP_CP_OUT(fcp_dummy_lun, fpkt->pkt_data,
6736 fpkt->pkt_data_acc, sizeof (fcp_dummy_lun));
6737
6738 return (DDI_SUCCESS);
6739 }
6740
6741 sense = &sense_info;
6742 if (!rsp->fcp_u.fcp_status.sense_len_set) {
6743 /* no need to continue if sense length is not set */
6744 return (rval);
6745 }
6746
6747 /* casting 64-bit integer to 8-bit */
6748 rqlen = (uchar_t)min(rsp->fcp_sense_len,
6749 sizeof (struct scsi_extended_sense));
6750
6751 if (rqlen < 14) {
6752 /* no need to continue if request length isn't long enough */
6753 return (rval);
6754 }
6755
6756 if (icmd->ipkt_nodma) {
6757 /*
6758 * We can safely use fcp_response_len here since the
6759 * only path that calls fcp_check_reportlun,
6760 * fcp_scsi_callback, has already called
6761 * fcp_validate_fcp_response.
6762 */
6763 sense = (struct scsi_extended_sense *)(fpkt->pkt_resp +
6764 sizeof (struct fcp_rsp) + rsp->fcp_response_len);
6765 } else {
6766 FCP_CP_IN(fpkt->pkt_resp + sizeof (struct fcp_rsp) +
6767 rsp->fcp_response_len, sense, fpkt->pkt_resp_acc,
6768 sizeof (struct scsi_extended_sense));
6769 }
6770
6771 if (!FCP_SENSE_NO_LUN(sense)) {
6772 mutex_enter(&ptgt->tgt_mutex);
6773 /* clear the flag if any */
6774 ptgt->tgt_state &= ~FCP_TGT_ILLREQ;
6775 mutex_exit(&ptgt->tgt_mutex);
6776 }
6777
6778 if ((sense->es_key == KEY_ILLEGAL_REQUEST) &&
6779 (sense->es_add_code == 0x20)) {
6780 if (icmd->ipkt_nodma) {
6781 rsp->fcp_u.fcp_status.rsp_len_set = 0;
6782 rsp->fcp_u.fcp_status.sense_len_set = 0;
6783 rsp->fcp_u.fcp_status.scsi_status = STATUS_GOOD;
6784 } else {
6785 fcp_rsp_t new_resp;
6786
6787 FCP_CP_IN(fpkt->pkt_resp, &new_resp,
6788 fpkt->pkt_resp_acc, sizeof (new_resp));
6789
6790 new_resp.fcp_u.fcp_status.rsp_len_set = 0;
6791 new_resp.fcp_u.fcp_status.sense_len_set = 0;
6792 new_resp.fcp_u.fcp_status.scsi_status = STATUS_GOOD;
6793
6794 FCP_CP_OUT(&new_resp, fpkt->pkt_resp,
6795 fpkt->pkt_resp_acc, sizeof (new_resp));
6796 }
6797
6798 FCP_CP_OUT(fcp_dummy_lun, fpkt->pkt_data,
6799 fpkt->pkt_data_acc, sizeof (fcp_dummy_lun));
6800
6801 return (DDI_SUCCESS);
6802 }
6803
6804 /*
6805 * This is for the STK library which returns a check condition,
6806 * to indicate device is not ready, manual assistance needed.
6807 * This is to a report lun command when the door is open.
6808 */
6809 if ((sense->es_key == KEY_NOT_READY) && (sense->es_add_code == 0x04)) {
6810 if (icmd->ipkt_nodma) {
6811 rsp->fcp_u.fcp_status.rsp_len_set = 0;
6812 rsp->fcp_u.fcp_status.sense_len_set = 0;
6813 rsp->fcp_u.fcp_status.scsi_status = STATUS_GOOD;
6814 } else {
6815 fcp_rsp_t new_resp;
6816
6817 FCP_CP_IN(fpkt->pkt_resp, &new_resp,
6818 fpkt->pkt_resp_acc, sizeof (new_resp));
6819
6820 new_resp.fcp_u.fcp_status.rsp_len_set = 0;
6821 new_resp.fcp_u.fcp_status.sense_len_set = 0;
6822 new_resp.fcp_u.fcp_status.scsi_status = STATUS_GOOD;
6823
6824 FCP_CP_OUT(&new_resp, fpkt->pkt_resp,
6825 fpkt->pkt_resp_acc, sizeof (new_resp));
6826 }
6827
6828 FCP_CP_OUT(fcp_dummy_lun, fpkt->pkt_data,
6829 fpkt->pkt_data_acc, sizeof (fcp_dummy_lun));
6830
6831 return (DDI_SUCCESS);
6832 }
6833
6834 if ((FCP_SENSE_REPORTLUN_CHANGED(sense)) ||
6835 (FCP_SENSE_NO_LUN(sense))) {
6836 mutex_enter(&ptgt->tgt_mutex);
6837 if ((FCP_SENSE_NO_LUN(sense)) &&
6838 (ptgt->tgt_state & FCP_TGT_ILLREQ)) {
6839 ptgt->tgt_state &= ~FCP_TGT_ILLREQ;
6840 mutex_exit(&ptgt->tgt_mutex);
6841 /*
6842 * reconfig was triggred by ILLEGAL REQUEST but
6843 * got ILLEGAL REQUEST again
6844 */
6845 FCP_TRACE(fcp_logq, pptr->port_instbuf,
6846 fcp_trace, FCP_BUF_LEVEL_3, 0,
6847 "!FCP: Unable to obtain Report Lun data"
6848 " target=%x", ptgt->tgt_d_id);
6849 } else {
6850 if (ptgt->tgt_tid == NULL) {
6851 timeout_id_t tid;
6852 /*
6853 * REPORT LUN data has changed. Kick off
6854 * rediscovery
6855 */
6856 tid = timeout(fcp_reconfigure_luns,
6857 (caddr_t)ptgt, (clock_t)drv_usectohz(1));
6858
6859 ptgt->tgt_tid = tid;
6860 ptgt->tgt_state |= FCP_TGT_BUSY;
6861 }
6862 if (FCP_SENSE_NO_LUN(sense)) {
6863 ptgt->tgt_state |= FCP_TGT_ILLREQ;
6864 }
6865 mutex_exit(&ptgt->tgt_mutex);
6866 if (FCP_SENSE_REPORTLUN_CHANGED(sense)) {
6867 FCP_TRACE(fcp_logq, pptr->port_instbuf,
6868 fcp_trace, FCP_BUF_LEVEL_3, 0,
6869 "!FCP:Report Lun Has Changed"
6870 " target=%x", ptgt->tgt_d_id);
6871 } else if (FCP_SENSE_NO_LUN(sense)) {
6872 FCP_TRACE(fcp_logq, pptr->port_instbuf,
6873 fcp_trace, FCP_BUF_LEVEL_3, 0,
6874 "!FCP:LU Not Supported"
6875 " target=%x", ptgt->tgt_d_id);
6876 }
6877 }
6878 rval = DDI_SUCCESS;
6879 }
6880
6881 FCP_TRACE(fcp_logq, pptr->port_instbuf,
6882 fcp_trace, FCP_BUF_LEVEL_5, 0,
6883 "D_ID=%x, sense=%x, status=%x",
6884 fpkt->pkt_cmd_fhdr.d_id, sense->es_key,
6885 rsp->fcp_u.fcp_status.scsi_status);
6886
6887 return (rval);
6888 }
6889
6890 /*
6891 * Function: fcp_scsi_callback
6892 *
6893 * Description: This is the callback routine set by fcp_send_scsi() after
6894 * it calls fcp_icmd_alloc(). The SCSI command completed here
6895 * and autogenerated by FCP are: REPORT_LUN, INQUIRY and
6896 * INQUIRY_PAGE83.
6897 *
6898 * Argument: *fpkt FC packet used to convey the command
6899 *
6900 * Return Value: None
6901 */
6902 static void
6903 fcp_scsi_callback(fc_packet_t *fpkt)
6904 {
6905 struct fcp_ipkt *icmd = (struct fcp_ipkt *)
6906 fpkt->pkt_ulp_private;
6907 struct fcp_rsp_info fcp_rsp_err, *bep;
6908 struct fcp_port *pptr;
6909 struct fcp_tgt *ptgt;
6910 struct fcp_lun *plun;
6911 struct fcp_rsp response, *rsp;
6912
6913 ptgt = icmd->ipkt_tgt;
6914 pptr = ptgt->tgt_port;
6915 plun = icmd->ipkt_lun;
6916
6917 if (icmd->ipkt_nodma) {
6918 rsp = (struct fcp_rsp *)fpkt->pkt_resp;
6919 } else {
6920 rsp = &response;
6921 FCP_CP_IN(fpkt->pkt_resp, rsp, fpkt->pkt_resp_acc,
6922 sizeof (struct fcp_rsp));
6923 }
6924
6925 FCP_TRACE(fcp_logq, pptr->port_instbuf,
6926 fcp_trace, FCP_BUF_LEVEL_2, 0,
6927 "SCSI callback state=0x%x for %x, op_code=0x%x, "
6928 "status=%x, lun num=%x",
6929 fpkt->pkt_state, ptgt->tgt_d_id, icmd->ipkt_opcode,
6930 rsp->fcp_u.fcp_status.scsi_status, plun->lun_num);
6931
6932 /*
6933 * Pre-init LUN GUID with NWWN if it is not a device that
6934 * supports multiple luns and we know it's not page83
6935 * compliant. Although using a NWWN is not lun unique,
6936 * we will be fine since there is only one lun behind the taget
6937 * in this case.
6938 */
6939 if ((plun->lun_guid_size == 0) &&
6940 (icmd->ipkt_opcode == SCMD_INQUIRY_PAGE83) &&
6941 (fcp_symmetric_device_probe(plun) == 0)) {
6942
6943 char ascii_wwn[FC_WWN_SIZE*2+1];
6944 fcp_wwn_to_ascii(&ptgt->tgt_node_wwn.raw_wwn[0], ascii_wwn);
6945 (void) fcp_copy_guid_2_lun_block(plun, ascii_wwn);
6946 }
6947
6948 /*
6949 * Some old FC tapes and FC <-> SCSI bridge devices return overrun
6950 * when thay have more data than what is asked in CDB. An overrun
6951 * is really when FCP_DL is smaller than the data length in CDB.
6952 * In the case here we know that REPORT LUN command we formed within
6953 * this binary has correct FCP_DL. So this OVERRUN is due to bad device
6954 * behavior. In reality this is FC_SUCCESS.
6955 */
6956 if ((fpkt->pkt_state != FC_PKT_SUCCESS) &&
6957 (fpkt->pkt_reason == FC_REASON_OVERRUN) &&
6958 (icmd->ipkt_opcode == SCMD_REPORT_LUN)) {
6959 fpkt->pkt_state = FC_PKT_SUCCESS;
6960 }
6961
6962 if (fpkt->pkt_state != FC_PKT_SUCCESS) {
6963 FCP_TRACE(fcp_logq, pptr->port_instbuf,
6964 fcp_trace, FCP_BUF_LEVEL_2, 0,
6965 "icmd failed with state=0x%x for %x", fpkt->pkt_state,
6966 ptgt->tgt_d_id);
6967
6968 if (fpkt->pkt_reason == FC_REASON_CRC_ERROR) {
6969 /*
6970 * Inquiry VPD page command on A5K SES devices would
6971 * result in data CRC errors.
6972 */
6973 if (icmd->ipkt_opcode == SCMD_INQUIRY_PAGE83) {
6974 (void) fcp_handle_page83(fpkt, icmd, 1);
6975 return;
6976 }
6977 }
6978 if (fpkt->pkt_state == FC_PKT_TIMEOUT ||
6979 FCP_MUST_RETRY(fpkt)) {
6980 fpkt->pkt_timeout += FCP_TIMEOUT_DELTA;
6981 fcp_retry_scsi_cmd(fpkt);
6982 return;
6983 }
6984
6985 FCP_TGT_TRACE(ptgt, icmd->ipkt_change_cnt,
6986 FCP_TGT_TRACE_20);
6987
6988 mutex_enter(&pptr->port_mutex);
6989 mutex_enter(&ptgt->tgt_mutex);
6990 if (!FCP_STATE_CHANGED(pptr, ptgt, icmd)) {
6991 mutex_exit(&ptgt->tgt_mutex);
6992 mutex_exit(&pptr->port_mutex);
6993 fcp_print_error(fpkt);
6994 } else {
6995 FCP_TRACE(fcp_logq, pptr->port_instbuf,
6996 fcp_trace, FCP_BUF_LEVEL_2, 0,
6997 "fcp_scsi_callback,1: state change occured"
6998 " for D_ID=0x%x", ptgt->tgt_d_id);
6999 mutex_exit(&ptgt->tgt_mutex);
7000 mutex_exit(&pptr->port_mutex);
7001 }
7002 (void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
7003 icmd->ipkt_change_cnt, icmd->ipkt_cause);
7004 fcp_icmd_free(pptr, icmd);
7005 return;
7006 }
7007
7008 FCP_TGT_TRACE(ptgt, icmd->ipkt_change_cnt, FCP_TGT_TRACE_21);
7009
7010 mutex_enter(&pptr->port_mutex);
7011 mutex_enter(&ptgt->tgt_mutex);
7012 if (FCP_STATE_CHANGED(pptr, ptgt, icmd)) {
7013 FCP_TRACE(fcp_logq, pptr->port_instbuf,
7014 fcp_trace, FCP_BUF_LEVEL_2, 0,
7015 "fcp_scsi_callback,2: state change occured"
7016 " for D_ID=0x%x", ptgt->tgt_d_id);
7017 mutex_exit(&ptgt->tgt_mutex);
7018 mutex_exit(&pptr->port_mutex);
7019 (void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
7020 icmd->ipkt_change_cnt, icmd->ipkt_cause);
7021 fcp_icmd_free(pptr, icmd);
7022 return;
7023 }
7024 ASSERT((ptgt->tgt_state & FCP_TGT_MARK) == 0);
7025
7026 mutex_exit(&ptgt->tgt_mutex);
7027 mutex_exit(&pptr->port_mutex);
7028
7029 if (icmd->ipkt_nodma) {
7030 bep = (struct fcp_rsp_info *)(fpkt->pkt_resp +
7031 sizeof (struct fcp_rsp));
7032 } else {
7033 bep = &fcp_rsp_err;
7034 FCP_CP_IN(fpkt->pkt_resp + sizeof (struct fcp_rsp), bep,
7035 fpkt->pkt_resp_acc, sizeof (struct fcp_rsp_info));
7036 }
7037
7038 if (fcp_validate_fcp_response(rsp, pptr) != FC_SUCCESS) {
7039 fcp_retry_scsi_cmd(fpkt);
7040 return;
7041 }
7042
7043 if (rsp->fcp_u.fcp_status.rsp_len_set && bep->rsp_code !=
7044 FCP_NO_FAILURE) {
7045 FCP_TRACE(fcp_logq, pptr->port_instbuf,
7046 fcp_trace, FCP_BUF_LEVEL_2, 0,
7047 "rsp_code=0x%x, rsp_len_set=0x%x",
7048 bep->rsp_code, rsp->fcp_u.fcp_status.rsp_len_set);
7049 fcp_retry_scsi_cmd(fpkt);
7050 return;
7051 }
7052
7053 if (rsp->fcp_u.fcp_status.scsi_status == STATUS_QFULL ||
7054 rsp->fcp_u.fcp_status.scsi_status == STATUS_BUSY) {
7055 fcp_queue_ipkt(pptr, fpkt);
7056 return;
7057 }
7058
7059 /*
7060 * Devices that do not support INQUIRY_PAGE83, return check condition
7061 * with illegal request as per SCSI spec.
7062 * Crossbridge is one such device and Daktari's SES node is another.
7063 * We want to ideally enumerate these devices as a non-mpxio devices.
7064 * SES nodes (Daktari only currently) are an exception to this.
7065 */
7066 if ((icmd->ipkt_opcode == SCMD_INQUIRY_PAGE83) &&
7067 (rsp->fcp_u.fcp_status.scsi_status & STATUS_CHECK)) {
7068
7069 FCP_TRACE(fcp_logq, pptr->port_instbuf,
7070 fcp_trace, FCP_BUF_LEVEL_3, 0,
7071 "INQUIRY_PAGE83 for d_id %x (dtype:0x%x) failed with "
7072 "check condition. May enumerate as non-mpxio device",
7073 ptgt->tgt_d_id, plun->lun_type);
7074
7075 /*
7076 * If we let Daktari's SES be enumerated as a non-mpxio
7077 * device, there will be a discrepency in that the other
7078 * internal FC disks will get enumerated as mpxio devices.
7079 * Applications like luxadm expect this to be consistent.
7080 *
7081 * So, we put in a hack here to check if this is an SES device
7082 * and handle it here.
7083 */
7084 if (plun->lun_type == DTYPE_ESI) {
7085 /*
7086 * Since, pkt_state is actually FC_PKT_SUCCESS
7087 * at this stage, we fake a failure here so that
7088 * fcp_handle_page83 will create a device path using
7089 * the WWN instead of the GUID which is not there anyway
7090 */
7091 fpkt->pkt_state = FC_PKT_LOCAL_RJT;
7092 (void) fcp_handle_page83(fpkt, icmd, 1);
7093 return;
7094 }
7095
7096 mutex_enter(&ptgt->tgt_mutex);
7097 plun->lun_state &= ~(FCP_LUN_OFFLINE |
7098 FCP_LUN_MARK | FCP_LUN_BUSY);
7099 mutex_exit(&ptgt->tgt_mutex);
7100
7101 (void) fcp_call_finish_init(pptr, ptgt,
7102 icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
7103 icmd->ipkt_cause);
7104 fcp_icmd_free(pptr, icmd);
7105 return;
7106 }
7107
7108 if (rsp->fcp_u.fcp_status.scsi_status != STATUS_GOOD) {
7109 int rval = DDI_FAILURE;
7110
7111 /*
7112 * handle cases where report lun isn't supported
7113 * by faking up our own REPORT_LUN response or
7114 * UNIT ATTENTION
7115 */
7116 if (icmd->ipkt_opcode == SCMD_REPORT_LUN) {
7117 rval = fcp_check_reportlun(rsp, fpkt);
7118
7119 /*
7120 * fcp_check_reportlun might have modified the
7121 * FCP response. Copy it in again to get an updated
7122 * FCP response
7123 */
7124 if (rval == DDI_SUCCESS && icmd->ipkt_nodma == 0) {
7125 rsp = &response;
7126
7127 FCP_CP_IN(fpkt->pkt_resp, rsp,
7128 fpkt->pkt_resp_acc,
7129 sizeof (struct fcp_rsp));
7130 }
7131 }
7132
7133 if (rsp->fcp_u.fcp_status.scsi_status != STATUS_GOOD) {
7134 if (rval == DDI_SUCCESS) {
7135 (void) fcp_call_finish_init(pptr, ptgt,
7136 icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
7137 icmd->ipkt_cause);
7138 fcp_icmd_free(pptr, icmd);
7139 } else {
7140 fcp_retry_scsi_cmd(fpkt);
7141 }
7142
7143 return;
7144 }
7145 } else {
7146 if (icmd->ipkt_opcode == SCMD_REPORT_LUN) {
7147 mutex_enter(&ptgt->tgt_mutex);
7148 ptgt->tgt_state &= ~FCP_TGT_ILLREQ;
7149 mutex_exit(&ptgt->tgt_mutex);
7150 }
7151 }
7152
7153 ASSERT(rsp->fcp_u.fcp_status.scsi_status == STATUS_GOOD);
7154 if (!(pptr->port_state & FCP_STATE_FCA_IS_NODMA)) {
7155 (void) ddi_dma_sync(fpkt->pkt_data_dma, 0, 0,
7156 DDI_DMA_SYNC_FORCPU);
7157 }
7158
7159 switch (icmd->ipkt_opcode) {
7160 case SCMD_INQUIRY:
7161 FCP_LUN_TRACE(plun, FCP_LUN_TRACE_1);
7162 fcp_handle_inquiry(fpkt, icmd);
7163 break;
7164
7165 case SCMD_REPORT_LUN:
7166 FCP_TGT_TRACE(ptgt, icmd->ipkt_change_cnt,
7167 FCP_TGT_TRACE_22);
7168 fcp_handle_reportlun(fpkt, icmd);
7169 break;
7170
7171 case SCMD_INQUIRY_PAGE83:
7172 FCP_LUN_TRACE(plun, FCP_LUN_TRACE_2);
7173 (void) fcp_handle_page83(fpkt, icmd, 0);
7174 break;
7175
7176 default:
7177 fcp_log(CE_WARN, NULL, "!Invalid SCSI opcode");
7178 (void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
7179 icmd->ipkt_change_cnt, icmd->ipkt_cause);
7180 fcp_icmd_free(pptr, icmd);
7181 break;
7182 }
7183 }
7184
7185
7186 static void
7187 fcp_retry_scsi_cmd(fc_packet_t *fpkt)
7188 {
7189 struct fcp_ipkt *icmd = (struct fcp_ipkt *)
7190 fpkt->pkt_ulp_private;
7191 struct fcp_tgt *ptgt = icmd->ipkt_tgt;
7192 struct fcp_port *pptr = ptgt->tgt_port;
7193
7194 if (icmd->ipkt_retries < FCP_MAX_RETRIES &&
7195 fcp_is_retryable(icmd)) {
7196 mutex_enter(&pptr->port_mutex);
7197 if (!FCP_TGT_STATE_CHANGED(ptgt, icmd)) {
7198 mutex_exit(&pptr->port_mutex);
7199 FCP_TRACE(fcp_logq, pptr->port_instbuf,
7200 fcp_trace, FCP_BUF_LEVEL_3, 0,
7201 "Retrying %s to %x; state=%x, reason=%x",
7202 (icmd->ipkt_opcode == SCMD_REPORT_LUN) ?
7203 "Report LUN" : "INQUIRY", ptgt->tgt_d_id,
7204 fpkt->pkt_state, fpkt->pkt_reason);
7205
7206 fcp_queue_ipkt(pptr, fpkt);
7207 } else {
7208 FCP_TRACE(fcp_logq, pptr->port_instbuf,
7209 fcp_trace, FCP_BUF_LEVEL_3, 0,
7210 "fcp_retry_scsi_cmd,1: state change occured"
7211 " for D_ID=0x%x", ptgt->tgt_d_id);
7212 mutex_exit(&pptr->port_mutex);
7213 (void) fcp_call_finish_init(pptr, ptgt,
7214 icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
7215 icmd->ipkt_cause);
7216 fcp_icmd_free(pptr, icmd);
7217 }
7218 } else {
7219 fcp_print_error(fpkt);
7220 (void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
7221 icmd->ipkt_change_cnt, icmd->ipkt_cause);
7222 fcp_icmd_free(pptr, icmd);
7223 }
7224 }
7225
7226 /*
7227 * Function: fcp_handle_page83
7228 *
7229 * Description: Treats the response to INQUIRY_PAGE83.
7230 *
7231 * Argument: *fpkt FC packet used to convey the command.
7232 * *icmd Original fcp_ipkt structure.
7233 * ignore_page83_data
7234 * if it's 1, that means it's a special devices's
7235 * page83 response, it should be enumerated under mpxio
7236 *
7237 * Return Value: None
7238 */
7239 static void
7240 fcp_handle_page83(fc_packet_t *fpkt, struct fcp_ipkt *icmd,
7241 int ignore_page83_data)
7242 {
7243 struct fcp_port *pptr;
7244 struct fcp_lun *plun;
7245 struct fcp_tgt *ptgt;
7246 uchar_t dev_id_page[SCMD_MAX_INQUIRY_PAGE83_SIZE];
7247 int fail = 0;
7248 ddi_devid_t devid;
7249 char *guid = NULL;
7250 int ret;
7251
7252 ASSERT(icmd != NULL && fpkt != NULL);
7253
7254 pptr = icmd->ipkt_port;
7255 ptgt = icmd->ipkt_tgt;
7256 plun = icmd->ipkt_lun;
7257
7258 if (fpkt->pkt_state == FC_PKT_SUCCESS) {
7259 FCP_LUN_TRACE(plun, FCP_LUN_TRACE_7);
7260
7261 FCP_CP_IN(fpkt->pkt_data, dev_id_page, fpkt->pkt_data_acc,
7262 SCMD_MAX_INQUIRY_PAGE83_SIZE);
7263
7264 FCP_TRACE(fcp_logq, pptr->port_instbuf,
7265 fcp_trace, FCP_BUF_LEVEL_5, 0,
7266 "fcp_handle_page83: port=%d, tgt D_ID=0x%x, "
7267 "dtype=0x%x, lun num=%x",
7268 pptr->port_instance, ptgt->tgt_d_id,
7269 dev_id_page[0], plun->lun_num);
7270
7271 ret = ddi_devid_scsi_encode(
7272 DEVID_SCSI_ENCODE_VERSION_LATEST,
7273 NULL, /* driver name */
7274 (unsigned char *) &plun->lun_inq, /* standard inquiry */
7275 sizeof (plun->lun_inq), /* size of standard inquiry */
7276 NULL, /* page 80 data */
7277 0, /* page 80 len */
7278 dev_id_page, /* page 83 data */
7279 SCMD_MAX_INQUIRY_PAGE83_SIZE, /* page 83 data len */
7280 &devid);
7281
7282 if (ret == DDI_SUCCESS) {
7283
7284 guid = ddi_devid_to_guid(devid);
7285
7286 if (guid) {
7287 /*
7288 * Check our current guid. If it's non null
7289 * and it has changed, we need to copy it into
7290 * lun_old_guid since we might still need it.
7291 */
7292 if (plun->lun_guid &&
7293 strcmp(guid, plun->lun_guid)) {
7294 unsigned int len;
7295
7296 /*
7297 * If the guid of the LUN changes,
7298 * reconfiguration should be triggered
7299 * to reflect the changes.
7300 * i.e. we should offline the LUN with
7301 * the old guid, and online the LUN with
7302 * the new guid.
7303 */
7304 plun->lun_state |= FCP_LUN_CHANGED;
7305
7306 if (plun->lun_old_guid) {
7307 kmem_free(plun->lun_old_guid,
7308 plun->lun_old_guid_size);
7309 }
7310
7311 len = plun->lun_guid_size;
7312 plun->lun_old_guid_size = len;
7313
7314 plun->lun_old_guid = kmem_zalloc(len,
7315 KM_NOSLEEP);
7316
7317 if (plun->lun_old_guid) {
7318 /*
7319 * The alloc was successful then
7320 * let's do the copy.
7321 */
7322 bcopy(plun->lun_guid,
7323 plun->lun_old_guid, len);
7324 } else {
7325 fail = 1;
7326 plun->lun_old_guid_size = 0;
7327 }
7328 }
7329 if (!fail) {
7330 if (fcp_copy_guid_2_lun_block(
7331 plun, guid)) {
7332 fail = 1;
7333 }
7334 }
7335 ddi_devid_free_guid(guid);
7336
7337 } else {
7338 FCP_TRACE(fcp_logq, pptr->port_instbuf,
7339 fcp_trace, FCP_BUF_LEVEL_2, 0,
7340 "fcp_handle_page83: unable to create "
7341 "GUID");
7342
7343 /* couldn't create good guid from devid */
7344 fail = 1;
7345 }
7346 ddi_devid_free(devid);
7347
7348 } else if (ret == DDI_NOT_WELL_FORMED) {
7349 /* NULL filled data for page 83 */
7350 FCP_TRACE(fcp_logq, pptr->port_instbuf,
7351 fcp_trace, FCP_BUF_LEVEL_2, 0,
7352 "fcp_handle_page83: retry GUID");
7353
7354 icmd->ipkt_retries = 0;
7355 fcp_retry_scsi_cmd(fpkt);
7356 return;
7357 } else {
7358 FCP_TRACE(fcp_logq, pptr->port_instbuf,
7359 fcp_trace, FCP_BUF_LEVEL_2, 0,
7360 "fcp_handle_page83: bad ddi_devid_scsi_encode %x",
7361 ret);
7362 /*
7363 * Since the page83 validation
7364 * introduced late, we are being
7365 * tolerant to the existing devices
7366 * that already found to be working
7367 * under mpxio, like A5200's SES device,
7368 * its page83 response will not be standard-compliant,
7369 * but we still want it to be enumerated under mpxio.
7370 */
7371 if (fcp_symmetric_device_probe(plun) != 0) {
7372 fail = 1;
7373 }
7374 }
7375
7376 } else {
7377 /* bad packet state */
7378 FCP_LUN_TRACE(plun, FCP_LUN_TRACE_8);
7379
7380 /*
7381 * For some special devices (A5K SES and Daktari's SES devices),
7382 * they should be enumerated under mpxio
7383 * or "luxadm dis" will fail
7384 */
7385 if (ignore_page83_data) {
7386 fail = 0;
7387 } else {
7388 fail = 1;
7389 }
7390 FCP_TRACE(fcp_logq, pptr->port_instbuf,
7391 fcp_trace, FCP_BUF_LEVEL_2, 0,
7392 "!Devid page cmd failed. "
7393 "fpkt_state: %x fpkt_reason: %x",
7394 "ignore_page83: %d",
7395 fpkt->pkt_state, fpkt->pkt_reason,
7396 ignore_page83_data);
7397 }
7398
7399 mutex_enter(&pptr->port_mutex);
7400 mutex_enter(&plun->lun_mutex);
7401 /*
7402 * If lun_cip is not NULL, then we needn't update lun_mpxio to avoid
7403 * mismatch between lun_cip and lun_mpxio.
7404 */
7405 if (plun->lun_cip == NULL) {
7406 /*
7407 * If we don't have a guid for this lun it's because we were
7408 * unable to glean one from the page 83 response. Set the
7409 * control flag to 0 here to make sure that we don't attempt to
7410 * enumerate it under mpxio.
7411 */
7412 if (fail || pptr->port_mpxio == 0) {
7413 plun->lun_mpxio = 0;
7414 } else {
7415 plun->lun_mpxio = 1;
7416 }
7417 }
7418 mutex_exit(&plun->lun_mutex);
7419 mutex_exit(&pptr->port_mutex);
7420
7421 mutex_enter(&ptgt->tgt_mutex);
7422 plun->lun_state &=
7423 ~(FCP_LUN_OFFLINE | FCP_LUN_MARK | FCP_LUN_BUSY);
7424 mutex_exit(&ptgt->tgt_mutex);
7425
7426 (void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
7427 icmd->ipkt_change_cnt, icmd->ipkt_cause);
7428
7429 fcp_icmd_free(pptr, icmd);
7430 }
7431
7432 /*
7433 * Function: fcp_handle_inquiry
7434 *
7435 * Description: Called by fcp_scsi_callback to handle the response to an
7436 * INQUIRY request.
7437 *
7438 * Argument: *fpkt FC packet used to convey the command.
7439 * *icmd Original fcp_ipkt structure.
7440 *
7441 * Return Value: None
7442 */
7443 static void
7444 fcp_handle_inquiry(fc_packet_t *fpkt, struct fcp_ipkt *icmd)
7445 {
7446 struct fcp_port *pptr;
7447 struct fcp_lun *plun;
7448 struct fcp_tgt *ptgt;
7449 uchar_t dtype;
7450 uchar_t pqual;
7451 uint32_t rscn_count = FC_INVALID_RSCN_COUNT;
7452
7453 ASSERT(icmd != NULL && fpkt != NULL);
7454
7455 pptr = icmd->ipkt_port;
7456 ptgt = icmd->ipkt_tgt;
7457 plun = icmd->ipkt_lun;
7458
7459 FCP_CP_IN(fpkt->pkt_data, &plun->lun_inq, fpkt->pkt_data_acc,
7460 sizeof (struct scsi_inquiry));
7461
7462 dtype = plun->lun_inq.inq_dtype & DTYPE_MASK;
7463 pqual = plun->lun_inq.inq_dtype >> 5;
7464
7465 FCP_TRACE(fcp_logq, pptr->port_instbuf,
7466 fcp_trace, FCP_BUF_LEVEL_5, 0,
7467 "fcp_handle_inquiry: port=%d, tgt D_ID=0x%x, lun=0x%x, "
7468 "dtype=0x%x pqual: 0x%x", pptr->port_instance, ptgt->tgt_d_id,
7469 plun->lun_num, dtype, pqual);
7470
7471 if (pqual != 0) {
7472 /*
7473 * Non-zero peripheral qualifier
7474 */
7475 fcp_log(CE_CONT, pptr->port_dip,
7476 "!Target 0x%x lun 0x%x: Nonzero peripheral qualifier: "
7477 "Device type=0x%x Peripheral qual=0x%x\n",
7478 ptgt->tgt_d_id, plun->lun_num, dtype, pqual);
7479
7480 FCP_TRACE(fcp_logq, pptr->port_instbuf,
7481 fcp_trace, FCP_BUF_LEVEL_5, 0,
7482 "!Target 0x%x lun 0x%x: Nonzero peripheral qualifier: "
7483 "Device type=0x%x Peripheral qual=0x%x\n",
7484 ptgt->tgt_d_id, plun->lun_num, dtype, pqual);
7485
7486 FCP_LUN_TRACE(plun, FCP_LUN_TRACE_3);
7487
7488 (void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
7489 icmd->ipkt_change_cnt, icmd->ipkt_cause);
7490 fcp_icmd_free(pptr, icmd);
7491 return;
7492 }
7493
7494 /*
7495 * If the device is already initialized, check the dtype
7496 * for a change. If it has changed then update the flags
7497 * so the create_luns will offline the old device and
7498 * create the new device. Refer to bug: 4764752
7499 */
7500 if ((plun->lun_state & FCP_LUN_INIT) && dtype != plun->lun_type) {
7501 plun->lun_state |= FCP_LUN_CHANGED;
7502 }
7503 plun->lun_type = plun->lun_inq.inq_dtype;
7504
7505 /*
7506 * This code is setting/initializing the throttling in the FCA
7507 * driver.
7508 */
7509 mutex_enter(&pptr->port_mutex);
7510 if (!pptr->port_notify) {
7511 if (bcmp(plun->lun_inq.inq_pid, pid, strlen(pid)) == 0) {
7512 uint32_t cmd = 0;
7513 cmd = ((cmd & 0xFF | FC_NOTIFY_THROTTLE) |
7514 ((cmd & 0xFFFFFF00 >> 8) |
7515 FCP_SVE_THROTTLE << 8));
7516 pptr->port_notify = 1;
7517 mutex_exit(&pptr->port_mutex);
7518 (void) fc_ulp_port_notify(pptr->port_fp_handle, cmd);
7519 mutex_enter(&pptr->port_mutex);
7520 }
7521 }
7522
7523 if (FCP_TGT_STATE_CHANGED(ptgt, icmd)) {
7524 FCP_TRACE(fcp_logq, pptr->port_instbuf,
7525 fcp_trace, FCP_BUF_LEVEL_2, 0,
7526 "fcp_handle_inquiry,1:state change occured"
7527 " for D_ID=0x%x", ptgt->tgt_d_id);
7528 mutex_exit(&pptr->port_mutex);
7529
7530 FCP_LUN_TRACE(plun, FCP_LUN_TRACE_5);
7531 (void) fcp_call_finish_init(pptr, ptgt,
7532 icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
7533 icmd->ipkt_cause);
7534 fcp_icmd_free(pptr, icmd);
7535 return;
7536 }
7537 ASSERT((ptgt->tgt_state & FCP_TGT_MARK) == 0);
7538 mutex_exit(&pptr->port_mutex);
7539
7540 /* Retrieve the rscn count (if a valid one exists) */
7541 if (icmd->ipkt_fpkt->pkt_ulp_rscn_infop != NULL) {
7542 rscn_count = ((fc_ulp_rscn_info_t *)
7543 (icmd->ipkt_fpkt->pkt_ulp_rscn_infop))->ulp_rscn_count;
7544 } else {
7545 rscn_count = FC_INVALID_RSCN_COUNT;
7546 }
7547
7548 if (fcp_send_scsi(plun, SCMD_INQUIRY_PAGE83,
7549 SCMD_MAX_INQUIRY_PAGE83_SIZE,
7550 icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
7551 icmd->ipkt_cause, rscn_count) != DDI_SUCCESS) {
7552 fcp_log(CE_WARN, NULL, "!failed to send page 83");
7553 FCP_LUN_TRACE(plun, FCP_LUN_TRACE_6);
7554 (void) fcp_call_finish_init(pptr, ptgt,
7555 icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
7556 icmd->ipkt_cause);
7557 }
7558
7559 /*
7560 * Read Inquiry VPD Page 0x83 to uniquely
7561 * identify this logical unit.
7562 */
7563 fcp_icmd_free(pptr, icmd);
7564 }
7565
7566 /*
7567 * Function: fcp_handle_reportlun
7568 *
7569 * Description: Called by fcp_scsi_callback to handle the response to a
7570 * REPORT_LUN request.
7571 *
7572 * Argument: *fpkt FC packet used to convey the command.
7573 * *icmd Original fcp_ipkt structure.
7574 *
7575 * Return Value: None
7576 */
7577 static void
7578 fcp_handle_reportlun(fc_packet_t *fpkt, struct fcp_ipkt *icmd)
7579 {
7580 int i;
7581 int nluns_claimed;
7582 int nluns_bufmax;
7583 int len;
7584 uint16_t lun_num;
7585 uint32_t rscn_count = FC_INVALID_RSCN_COUNT;
7586 struct fcp_port *pptr;
7587 struct fcp_tgt *ptgt;
7588 struct fcp_lun *plun;
7589 struct fcp_reportlun_resp *report_lun;
7590
7591 pptr = icmd->ipkt_port;
7592 ptgt = icmd->ipkt_tgt;
7593 len = fpkt->pkt_datalen;
7594
7595 if ((len < FCP_LUN_HEADER) ||
7596 ((report_lun = kmem_zalloc(len, KM_NOSLEEP)) == NULL)) {
7597 (void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
7598 icmd->ipkt_change_cnt, icmd->ipkt_cause);
7599 fcp_icmd_free(pptr, icmd);
7600 return;
7601 }
7602
7603 FCP_CP_IN(fpkt->pkt_data, report_lun, fpkt->pkt_data_acc,
7604 fpkt->pkt_datalen);
7605
7606 FCP_TRACE(fcp_logq, pptr->port_instbuf,
7607 fcp_trace, FCP_BUF_LEVEL_5, 0,
7608 "fcp_handle_reportlun: port=%d, tgt D_ID=0x%x",
7609 pptr->port_instance, ptgt->tgt_d_id);
7610
7611 /*
7612 * Get the number of luns (which is supplied as LUNS * 8) the
7613 * device claims it has.
7614 */
7615 nluns_claimed = BE_32(report_lun->num_lun) >> 3;
7616
7617 /*
7618 * Get the maximum number of luns the buffer submitted can hold.
7619 */
7620 nluns_bufmax = (fpkt->pkt_datalen - FCP_LUN_HEADER) / FCP_LUN_SIZE;
7621
7622 /*
7623 * Due to limitations of certain hardware, we support only 16 bit LUNs
7624 */
7625 if (nluns_claimed > FCP_MAX_LUNS_SUPPORTED) {
7626 kmem_free(report_lun, len);
7627
7628 fcp_log(CE_NOTE, pptr->port_dip, "!Can not support"
7629 " 0x%x number of LUNs for target=%x", nluns_claimed,
7630 ptgt->tgt_d_id);
7631
7632 (void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
7633 icmd->ipkt_change_cnt, icmd->ipkt_cause);
7634 fcp_icmd_free(pptr, icmd);
7635 return;
7636 }
7637
7638 /*
7639 * If there are more LUNs than we have allocated memory for,
7640 * allocate more space and send down yet another report lun if
7641 * the maximum number of attempts hasn't been reached.
7642 */
7643 mutex_enter(&ptgt->tgt_mutex);
7644
7645 if ((nluns_claimed > nluns_bufmax) &&
7646 (ptgt->tgt_report_lun_cnt < FCP_MAX_REPORTLUNS_ATTEMPTS)) {
7647
7648 struct fcp_lun *plun;
7649
7650 ptgt->tgt_report_lun_cnt++;
7651 plun = ptgt->tgt_lun;
7652 ASSERT(plun != NULL);
7653 mutex_exit(&ptgt->tgt_mutex);
7654
7655 kmem_free(report_lun, len);
7656
7657 FCP_TRACE(fcp_logq, pptr->port_instbuf,
7658 fcp_trace, FCP_BUF_LEVEL_5, 0,
7659 "!Dynamically discovered %d LUNs for D_ID=%x",
7660 nluns_claimed, ptgt->tgt_d_id);
7661
7662 /* Retrieve the rscn count (if a valid one exists) */
7663 if (icmd->ipkt_fpkt->pkt_ulp_rscn_infop != NULL) {
7664 rscn_count = ((fc_ulp_rscn_info_t *)
7665 (icmd->ipkt_fpkt->pkt_ulp_rscn_infop))->
7666 ulp_rscn_count;
7667 } else {
7668 rscn_count = FC_INVALID_RSCN_COUNT;
7669 }
7670
7671 if (fcp_send_scsi(icmd->ipkt_lun, SCMD_REPORT_LUN,
7672 FCP_LUN_HEADER + (nluns_claimed * FCP_LUN_SIZE),
7673 icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
7674 icmd->ipkt_cause, rscn_count) != DDI_SUCCESS) {
7675 (void) fcp_call_finish_init(pptr, ptgt,
7676 icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
7677 icmd->ipkt_cause);
7678 }
7679
7680 fcp_icmd_free(pptr, icmd);
7681 return;
7682 }
7683
7684 if (nluns_claimed > nluns_bufmax) {
7685 FCP_TRACE(fcp_logq, pptr->port_instbuf,
7686 fcp_trace, FCP_BUF_LEVEL_5, 0,
7687 "Target=%x:%x:%x:%x:%x:%x:%x:%x"
7688 " Number of LUNs lost=%x",
7689 ptgt->tgt_port_wwn.raw_wwn[0],
7690 ptgt->tgt_port_wwn.raw_wwn[1],
7691 ptgt->tgt_port_wwn.raw_wwn[2],
7692 ptgt->tgt_port_wwn.raw_wwn[3],
7693 ptgt->tgt_port_wwn.raw_wwn[4],
7694 ptgt->tgt_port_wwn.raw_wwn[5],
7695 ptgt->tgt_port_wwn.raw_wwn[6],
7696 ptgt->tgt_port_wwn.raw_wwn[7],
7697 nluns_claimed - nluns_bufmax);
7698
7699 nluns_claimed = nluns_bufmax;
7700 }
7701 ptgt->tgt_lun_cnt = nluns_claimed;
7702
7703 /*
7704 * Identify missing LUNs and print warning messages
7705 */
7706 for (plun = ptgt->tgt_lun; plun; plun = plun->lun_next) {
7707 int offline;
7708 int exists = 0;
7709
7710 offline = (plun->lun_state & FCP_LUN_OFFLINE) ? 1 : 0;
7711
7712 for (i = 0; i < nluns_claimed && exists == 0; i++) {
7713 uchar_t *lun_string;
7714
7715 lun_string = (uchar_t *)&(report_lun->lun_string[i]);
7716
7717 switch (lun_string[0] & 0xC0) {
7718 case FCP_LUN_ADDRESSING:
7719 case FCP_PD_ADDRESSING:
7720 case FCP_VOLUME_ADDRESSING:
7721 lun_num = ((lun_string[0] & 0x3F) << 8) |
7722 lun_string[1];
7723 if (plun->lun_num == lun_num) {
7724 exists++;
7725 break;
7726 }
7727 break;
7728
7729 default:
7730 break;
7731 }
7732 }
7733
7734 if (!exists && !offline) {
7735 mutex_exit(&ptgt->tgt_mutex);
7736
7737 mutex_enter(&pptr->port_mutex);
7738 mutex_enter(&ptgt->tgt_mutex);
7739 if (!FCP_STATE_CHANGED(pptr, ptgt, icmd)) {
7740 /*
7741 * set disappear flag when device was connected
7742 */
7743 if (!(plun->lun_state &
7744 FCP_LUN_DEVICE_NOT_CONNECTED)) {
7745 plun->lun_state |= FCP_LUN_DISAPPEARED;
7746 }
7747 mutex_exit(&ptgt->tgt_mutex);
7748 mutex_exit(&pptr->port_mutex);
7749 if (!(plun->lun_state &
7750 FCP_LUN_DEVICE_NOT_CONNECTED)) {
7751 fcp_log(CE_NOTE, pptr->port_dip,
7752 "!Lun=%x for target=%x disappeared",
7753 plun->lun_num, ptgt->tgt_d_id);
7754 }
7755 mutex_enter(&ptgt->tgt_mutex);
7756 } else {
7757 FCP_TRACE(fcp_logq, pptr->port_instbuf,
7758 fcp_trace, FCP_BUF_LEVEL_5, 0,
7759 "fcp_handle_reportlun,1: state change"
7760 " occured for D_ID=0x%x", ptgt->tgt_d_id);
7761 mutex_exit(&ptgt->tgt_mutex);
7762 mutex_exit(&pptr->port_mutex);
7763 kmem_free(report_lun, len);
7764 (void) fcp_call_finish_init(pptr, ptgt,
7765 icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
7766 icmd->ipkt_cause);
7767 fcp_icmd_free(pptr, icmd);
7768 return;
7769 }
7770 } else if (exists) {
7771 /*
7772 * clear FCP_LUN_DEVICE_NOT_CONNECTED when lun 0
7773 * actually exists in REPORT_LUN response
7774 */
7775 if (plun->lun_state & FCP_LUN_DEVICE_NOT_CONNECTED) {
7776 plun->lun_state &=
7777 ~FCP_LUN_DEVICE_NOT_CONNECTED;
7778 }
7779 if (offline || plun->lun_num == 0) {
7780 if (plun->lun_state & FCP_LUN_DISAPPEARED) {
7781 plun->lun_state &= ~FCP_LUN_DISAPPEARED;
7782 mutex_exit(&ptgt->tgt_mutex);
7783 fcp_log(CE_NOTE, pptr->port_dip,
7784 "!Lun=%x for target=%x reappeared",
7785 plun->lun_num, ptgt->tgt_d_id);
7786 mutex_enter(&ptgt->tgt_mutex);
7787 }
7788 }
7789 }
7790 }
7791
7792 ptgt->tgt_tmp_cnt = nluns_claimed ? nluns_claimed : 1;
7793 mutex_exit(&ptgt->tgt_mutex);
7794
7795 FCP_TRACE(fcp_logq, pptr->port_instbuf,
7796 fcp_trace, FCP_BUF_LEVEL_5, 0,
7797 "fcp_handle_reportlun: port=%d, tgt D_ID=0x%x, %d LUN(s)",
7798 pptr->port_instance, ptgt->tgt_d_id, nluns_claimed);
7799
7800 /* scan each lun */
7801 for (i = 0; i < nluns_claimed; i++) {
7802 uchar_t *lun_string;
7803
7804 lun_string = (uchar_t *)&(report_lun->lun_string[i]);
7805
7806 FCP_TRACE(fcp_logq, pptr->port_instbuf,
7807 fcp_trace, FCP_BUF_LEVEL_5, 0,
7808 "handle_reportlun: d_id=%x, LUN ind=%d, LUN=%d,"
7809 " addr=0x%x", ptgt->tgt_d_id, i, lun_string[1],
7810 lun_string[0]);
7811
7812 switch (lun_string[0] & 0xC0) {
7813 case FCP_LUN_ADDRESSING:
7814 case FCP_PD_ADDRESSING:
7815 case FCP_VOLUME_ADDRESSING:
7816 lun_num = ((lun_string[0] & 0x3F) << 8) | lun_string[1];
7817
7818 /* We will skip masked LUNs because of the blacklist. */
7819 if (fcp_lun_blacklist != NULL) {
7820 mutex_enter(&ptgt->tgt_mutex);
7821 if (fcp_should_mask(&ptgt->tgt_port_wwn,
7822 lun_num) == TRUE) {
7823 ptgt->tgt_lun_cnt--;
7824 mutex_exit(&ptgt->tgt_mutex);
7825 break;
7826 }
7827 mutex_exit(&ptgt->tgt_mutex);
7828 }
7829
7830 /* see if this LUN is already allocated */
7831 if ((plun = fcp_get_lun(ptgt, lun_num)) == NULL) {
7832 plun = fcp_alloc_lun(ptgt);
7833 if (plun == NULL) {
7834 fcp_log(CE_NOTE, pptr->port_dip,
7835 "!Lun allocation failed"
7836 " target=%x lun=%x",
7837 ptgt->tgt_d_id, lun_num);
7838 break;
7839 }
7840 }
7841
7842 mutex_enter(&plun->lun_tgt->tgt_mutex);
7843 /* convert to LUN */
7844 plun->lun_addr.ent_addr_0 =
7845 BE_16(*(uint16_t *)&(lun_string[0]));
7846 plun->lun_addr.ent_addr_1 =
7847 BE_16(*(uint16_t *)&(lun_string[2]));
7848 plun->lun_addr.ent_addr_2 =
7849 BE_16(*(uint16_t *)&(lun_string[4]));
7850 plun->lun_addr.ent_addr_3 =
7851 BE_16(*(uint16_t *)&(lun_string[6]));
7852
7853 plun->lun_num = lun_num;
7854 plun->lun_state |= FCP_LUN_BUSY | FCP_LUN_MARK;
7855 plun->lun_state &= ~FCP_LUN_OFFLINE;
7856 mutex_exit(&plun->lun_tgt->tgt_mutex);
7857
7858 /* Retrieve the rscn count (if a valid one exists) */
7859 if (icmd->ipkt_fpkt->pkt_ulp_rscn_infop != NULL) {
7860 rscn_count = ((fc_ulp_rscn_info_t *)
7861 (icmd->ipkt_fpkt->pkt_ulp_rscn_infop))->
7862 ulp_rscn_count;
7863 } else {
7864 rscn_count = FC_INVALID_RSCN_COUNT;
7865 }
7866
7867 if (fcp_send_scsi(plun, SCMD_INQUIRY, SUN_INQSIZE,
7868 icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
7869 icmd->ipkt_cause, rscn_count) != DDI_SUCCESS) {
7870 mutex_enter(&pptr->port_mutex);
7871 mutex_enter(&plun->lun_tgt->tgt_mutex);
7872 if (!FCP_STATE_CHANGED(pptr, ptgt, icmd)) {
7873 fcp_log(CE_NOTE, pptr->port_dip,
7874 "!failed to send INQUIRY"
7875 " target=%x lun=%x",
7876 ptgt->tgt_d_id, plun->lun_num);
7877 } else {
7878 FCP_TRACE(fcp_logq,
7879 pptr->port_instbuf, fcp_trace,
7880 FCP_BUF_LEVEL_5, 0,
7881 "fcp_handle_reportlun,2: state"
7882 " change occured for D_ID=0x%x",
7883 ptgt->tgt_d_id);
7884 }
7885 mutex_exit(&plun->lun_tgt->tgt_mutex);
7886 mutex_exit(&pptr->port_mutex);
7887 } else {
7888 continue;
7889 }
7890 break;
7891
7892 default:
7893 fcp_log(CE_WARN, NULL,
7894 "!Unsupported LUN Addressing method %x "
7895 "in response to REPORT_LUN", lun_string[0]);
7896 break;
7897 }
7898
7899 /*
7900 * each time through this loop we should decrement
7901 * the tmp_cnt by one -- since we go through this loop
7902 * one time for each LUN, the tmp_cnt should never be <=0
7903 */
7904 (void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
7905 icmd->ipkt_change_cnt, icmd->ipkt_cause);
7906 }
7907
7908 if (i == 0) {
7909 fcp_log(CE_WARN, pptr->port_dip,
7910 "!FCP: target=%x reported NO Luns", ptgt->tgt_d_id);
7911 (void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
7912 icmd->ipkt_change_cnt, icmd->ipkt_cause);
7913 }
7914
7915 kmem_free(report_lun, len);
7916 fcp_icmd_free(pptr, icmd);
7917 }
7918
7919
7920 /*
7921 * called internally to return a LUN given a target and a LUN number
7922 */
7923 static struct fcp_lun *
7924 fcp_get_lun(struct fcp_tgt *ptgt, uint16_t lun_num)
7925 {
7926 struct fcp_lun *plun;
7927
7928 mutex_enter(&ptgt->tgt_mutex);
7929 for (plun = ptgt->tgt_lun; plun != NULL; plun = plun->lun_next) {
7930 if (plun->lun_num == lun_num) {
7931 mutex_exit(&ptgt->tgt_mutex);
7932 return (plun);
7933 }
7934 }
7935 mutex_exit(&ptgt->tgt_mutex);
7936
7937 return (NULL);
7938 }
7939
7940
7941 /*
7942 * handle finishing one target for fcp_finish_init
7943 *
7944 * return true (non-zero) if we want finish_init to continue with the
7945 * next target
7946 *
7947 * called with the port mutex held
7948 */
7949 /*ARGSUSED*/
7950 static int
7951 fcp_finish_tgt(struct fcp_port *pptr, struct fcp_tgt *ptgt,
7952 int link_cnt, int tgt_cnt, int cause)
7953 {
7954 int rval = 1;
7955 ASSERT(pptr != NULL);
7956 ASSERT(ptgt != NULL);
7957
7958 FCP_TRACE(fcp_logq, pptr->port_instbuf,
7959 fcp_trace, FCP_BUF_LEVEL_5, 0,
7960 "finish_tgt: D_ID/state = 0x%x/0x%x", ptgt->tgt_d_id,
7961 ptgt->tgt_state);
7962
7963 ASSERT(mutex_owned(&pptr->port_mutex));
7964
7965 if ((pptr->port_link_cnt != link_cnt) ||
7966 (tgt_cnt && ptgt->tgt_change_cnt != tgt_cnt)) {
7967 /*
7968 * oh oh -- another link reset or target change
7969 * must have occurred while we are in here
7970 */
7971 FCP_TGT_TRACE(ptgt, tgt_cnt, FCP_TGT_TRACE_23);
7972
7973 return (0);
7974 } else {
7975 FCP_TGT_TRACE(ptgt, tgt_cnt, FCP_TGT_TRACE_24);
7976 }
7977
7978 mutex_enter(&ptgt->tgt_mutex);
7979
7980 if (!(ptgt->tgt_state & FCP_TGT_OFFLINE)) {
7981 /*
7982 * tgt is not offline -- is it marked (i.e. needs
7983 * to be offlined) ??
7984 */
7985 if (ptgt->tgt_state & FCP_TGT_MARK) {
7986 /*
7987 * this target not offline *and*
7988 * marked
7989 */
7990 ptgt->tgt_state &= ~FCP_TGT_MARK;
7991 rval = fcp_offline_target(pptr, ptgt, link_cnt,
7992 tgt_cnt, 0, 0);
7993 } else {
7994 ptgt->tgt_state &= ~FCP_TGT_BUSY;
7995
7996 /* create the LUNs */
7997 if (ptgt->tgt_node_state != FCP_TGT_NODE_ON_DEMAND) {
7998 ptgt->tgt_node_state = FCP_TGT_NODE_PRESENT;
7999 fcp_create_luns(ptgt, link_cnt, tgt_cnt,
8000 cause);
8001 ptgt->tgt_device_created = 1;
8002 } else {
8003 fcp_update_tgt_state(ptgt, FCP_RESET,
8004 FCP_LUN_BUSY);
8005 }
8006 }
8007 }
8008
8009 mutex_exit(&ptgt->tgt_mutex);
8010
8011 return (rval);
8012 }
8013
8014
8015 /*
8016 * this routine is called to finish port initialization
8017 *
8018 * Each port has a "temp" counter -- when a state change happens (e.g.
8019 * port online), the temp count is set to the number of devices in the map.
8020 * Then, as each device gets "discovered", the temp counter is decremented
8021 * by one. When this count reaches zero we know that all of the devices
8022 * in the map have been discovered (or an error has occurred), so we can
8023 * then finish initialization -- which is done by this routine (well, this
8024 * and fcp-finish_tgt())
8025 *
8026 * acquires and releases the global mutex
8027 *
8028 * called with the port mutex owned
8029 */
8030 static void
8031 fcp_finish_init(struct fcp_port *pptr)
8032 {
8033 #ifdef DEBUG
8034 bzero(pptr->port_finish_stack, sizeof (pptr->port_finish_stack));
8035 pptr->port_finish_depth = getpcstack(pptr->port_finish_stack,
8036 FCP_STACK_DEPTH);
8037 #endif /* DEBUG */
8038
8039 ASSERT(mutex_owned(&pptr->port_mutex));
8040
8041 FCP_TRACE(fcp_logq, pptr->port_instbuf,
8042 fcp_trace, FCP_BUF_LEVEL_2, 0, "finish_init:"
8043 " entering; ipkt count=%d", pptr->port_ipkt_cnt);
8044
8045 if ((pptr->port_state & FCP_STATE_ONLINING) &&
8046 !(pptr->port_state & (FCP_STATE_SUSPENDED |
8047 FCP_STATE_DETACHING | FCP_STATE_POWER_DOWN))) {
8048 pptr->port_state &= ~FCP_STATE_ONLINING;
8049 pptr->port_state |= FCP_STATE_ONLINE;
8050 }
8051
8052 /* Wake up threads waiting on config done */
8053 cv_broadcast(&pptr->port_config_cv);
8054 }
8055
8056
8057 /*
8058 * called from fcp_finish_init to create the LUNs for a target
8059 *
8060 * called with the port mutex owned
8061 */
8062 static void
8063 fcp_create_luns(struct fcp_tgt *ptgt, int link_cnt, int tgt_cnt, int cause)
8064 {
8065 struct fcp_lun *plun;
8066 struct fcp_port *pptr;
8067 child_info_t *cip = NULL;
8068
8069 ASSERT(ptgt != NULL);
8070 ASSERT(mutex_owned(&ptgt->tgt_mutex));
8071
8072 pptr = ptgt->tgt_port;
8073
8074 ASSERT(pptr != NULL);
8075
8076 /* scan all LUNs for this target */
8077 for (plun = ptgt->tgt_lun; plun != NULL; plun = plun->lun_next) {
8078 if (plun->lun_state & FCP_LUN_OFFLINE) {
8079 continue;
8080 }
8081
8082 if (plun->lun_state & FCP_LUN_MARK) {
8083 FCP_TRACE(fcp_logq, pptr->port_instbuf,
8084 fcp_trace, FCP_BUF_LEVEL_2, 0,
8085 "fcp_create_luns: offlining marked LUN!");
8086 fcp_offline_lun(plun, link_cnt, tgt_cnt, 1, 0);
8087 continue;
8088 }
8089
8090 plun->lun_state &= ~FCP_LUN_BUSY;
8091
8092 /*
8093 * There are conditions in which FCP_LUN_INIT flag is cleared
8094 * but we have a valid plun->lun_cip. To cover this case also
8095 * CLEAR_BUSY whenever we have a valid lun_cip.
8096 */
8097 if (plun->lun_mpxio && plun->lun_cip &&
8098 (!fcp_pass_to_hp(pptr, plun, plun->lun_cip,
8099 FCP_MPXIO_PATH_CLEAR_BUSY, link_cnt, tgt_cnt,
8100 0, 0))) {
8101 FCP_TRACE(fcp_logq, pptr->port_instbuf,
8102 fcp_trace, FCP_BUF_LEVEL_2, 0,
8103 "fcp_create_luns: enable lun %p failed!",
8104 plun);
8105 }
8106
8107 if (plun->lun_state & FCP_LUN_INIT &&
8108 !(plun->lun_state & FCP_LUN_CHANGED)) {
8109 continue;
8110 }
8111
8112 if (cause == FCP_CAUSE_USER_CREATE) {
8113 continue;
8114 }
8115
8116 FCP_TRACE(fcp_logq, pptr->port_instbuf,
8117 fcp_trace, FCP_BUF_LEVEL_6, 0,
8118 "create_luns: passing ONLINE elem to HP thread");
8119
8120 /*
8121 * If lun has changed, prepare for offlining the old path.
8122 * Do not offline the old path right now, since it may be
8123 * still opened.
8124 */
8125 if (plun->lun_cip && (plun->lun_state & FCP_LUN_CHANGED)) {
8126 fcp_prepare_offline_lun(plun, link_cnt, tgt_cnt);
8127 }
8128
8129 /* pass an ONLINE element to the hotplug thread */
8130 if (!fcp_pass_to_hp(pptr, plun, cip, FCP_ONLINE,
8131 link_cnt, tgt_cnt, NDI_ONLINE_ATTACH, 0)) {
8132
8133 /*
8134 * We can not synchronous attach (i.e pass
8135 * NDI_ONLINE_ATTACH) here as we might be
8136 * coming from an interrupt or callback
8137 * thread.
8138 */
8139 if (!fcp_pass_to_hp(pptr, plun, cip, FCP_ONLINE,
8140 link_cnt, tgt_cnt, 0, 0)) {
8141 fcp_log(CE_CONT, pptr->port_dip,
8142 "Can not ONLINE LUN; D_ID=%x, LUN=%x\n",
8143 plun->lun_tgt->tgt_d_id, plun->lun_num);
8144 }
8145 }
8146 }
8147 }
8148
8149
8150 /*
8151 * function to online/offline devices
8152 */
8153 static int
8154 fcp_trigger_lun(struct fcp_lun *plun, child_info_t *cip, int old_mpxio,
8155 int online, int lcount, int tcount, int flags)
8156 {
8157 int rval = NDI_FAILURE;
8158 int circ;
8159 child_info_t *ccip;
8160 struct fcp_port *pptr = plun->lun_tgt->tgt_port;
8161 int is_mpxio = pptr->port_mpxio;
8162
8163 if ((old_mpxio != 0) && (plun->lun_mpxio != old_mpxio)) {
8164 /*
8165 * When this event gets serviced, lun_cip and lun_mpxio
8166 * has changed, so it should be invalidated now.
8167 */
8168 FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
8169 FCP_BUF_LEVEL_2, 0, "fcp_trigger_lun: lun_mpxio changed: "
8170 "plun: %p, cip: %p, what:%d", plun, cip, online);
8171 return (rval);
8172 }
8173
8174 FCP_TRACE(fcp_logq, pptr->port_instbuf,
8175 fcp_trace, FCP_BUF_LEVEL_2, 0,
8176 "fcp_trigger_lun: plun=%p target=%x lun=%d cip=%p what=%x "
8177 "flags=%x mpxio=%x\n",
8178 plun, LUN_TGT->tgt_d_id, plun->lun_num, cip, online, flags,
8179 plun->lun_mpxio);
8180
8181 /*
8182 * lun_mpxio needs checking here because we can end up in a race
8183 * condition where this task has been dispatched while lun_mpxio is
8184 * set, but an earlier FCP_ONLINE task for the same LUN tried to
8185 * enable MPXIO for the LUN, but was unable to, and hence cleared
8186 * the flag. We rely on the serialization of the tasks here. We return
8187 * NDI_SUCCESS so any callers continue without reporting spurious
8188 * errors, and the still think we're an MPXIO LUN.
8189 */
8190
8191 if (online == FCP_MPXIO_PATH_CLEAR_BUSY ||
8192 online == FCP_MPXIO_PATH_SET_BUSY) {
8193 if (plun->lun_mpxio) {
8194 rval = fcp_update_mpxio_path(plun, cip, online);
8195 } else {
8196 rval = NDI_SUCCESS;
8197 }
8198 return (rval);
8199 }
8200
8201 if (fc_ulp_busy_port(pptr->port_fp_handle) != 0) {
8202 return (NDI_FAILURE);
8203 }
8204
8205 if (is_mpxio) {
8206 mdi_devi_enter(pptr->port_dip, &circ);
8207 } else {
8208 ndi_devi_enter(pptr->port_dip, &circ);
8209 }
8210
8211 mutex_enter(&pptr->port_mutex);
8212 mutex_enter(&plun->lun_mutex);
8213
8214 if (online == FCP_ONLINE) {
8215 ccip = fcp_get_cip(plun, cip, lcount, tcount);
8216 if (ccip == NULL)
8217 goto skip;
8218 } else {
8219 if (fcp_is_child_present(plun, cip) != FC_SUCCESS)
8220 goto skip;
8221 ccip = cip;
8222 }
8223
8224 if (online == FCP_ONLINE) {
8225 rval = fcp_online_child(plun, ccip, lcount, tcount, flags,
8226 &circ);
8227 } else {
8228 rval = fcp_offline_child(plun, ccip, lcount, tcount, flags,
8229 &circ);
8230 }
8231
8232 skip:
8233 mutex_exit(&plun->lun_mutex);
8234 mutex_exit(&pptr->port_mutex);
8235
8236 if (rval == NDI_SUCCESS) {
8237 fc_ulp_log_device_event(pptr->port_fp_handle,
8238 online == FCP_ONLINE ?
8239 FC_ULP_DEVICE_ONLINE : FC_ULP_DEVICE_OFFLINE);
8240 }
8241
8242 if (is_mpxio) {
8243 mdi_devi_exit(pptr->port_dip, circ);
8244 } else {
8245 ndi_devi_exit(pptr->port_dip, circ);
8246 }
8247
8248 fc_ulp_idle_port(pptr->port_fp_handle);
8249
8250 return (rval);
8251 }
8252
8253
8254 /*
8255 * take a target offline by taking all of its LUNs offline
8256 */
8257 /*ARGSUSED*/
8258 static int
8259 fcp_offline_target(struct fcp_port *pptr, struct fcp_tgt *ptgt,
8260 int link_cnt, int tgt_cnt, int nowait, int flags)
8261 {
8262 struct fcp_tgt_elem *elem;
8263
8264 ASSERT(mutex_owned(&pptr->port_mutex));
8265 ASSERT(mutex_owned(&ptgt->tgt_mutex));
8266
8267 ASSERT(!(ptgt->tgt_state & FCP_TGT_OFFLINE));
8268
8269 if (link_cnt != pptr->port_link_cnt || (tgt_cnt && tgt_cnt !=
8270 ptgt->tgt_change_cnt)) {
8271 mutex_exit(&ptgt->tgt_mutex);
8272 FCP_TGT_TRACE(ptgt, tgt_cnt, FCP_TGT_TRACE_25);
8273 mutex_enter(&ptgt->tgt_mutex);
8274
8275 return (0);
8276 }
8277
8278 ptgt->tgt_pd_handle = NULL;
8279 mutex_exit(&ptgt->tgt_mutex);
8280 FCP_TGT_TRACE(ptgt, tgt_cnt, FCP_TGT_TRACE_26);
8281 mutex_enter(&ptgt->tgt_mutex);
8282
8283 tgt_cnt = tgt_cnt ? tgt_cnt : ptgt->tgt_change_cnt;
8284
8285 if (ptgt->tgt_tcap &&
8286 (elem = kmem_zalloc(sizeof (*elem), KM_NOSLEEP)) != NULL) {
8287 elem->flags = flags;
8288 elem->time = fcp_watchdog_time;
8289 if (nowait == 0) {
8290 elem->time += fcp_offline_delay;
8291 }
8292 elem->ptgt = ptgt;
8293 elem->link_cnt = link_cnt;
8294 elem->tgt_cnt = tgt_cnt;
8295 elem->next = pptr->port_offline_tgts;
8296 pptr->port_offline_tgts = elem;
8297 } else {
8298 fcp_offline_target_now(pptr, ptgt, link_cnt, tgt_cnt, flags);
8299 }
8300
8301 return (1);
8302 }
8303
8304
8305 static void
8306 fcp_offline_target_now(struct fcp_port *pptr, struct fcp_tgt *ptgt,
8307 int link_cnt, int tgt_cnt, int flags)
8308 {
8309 ASSERT(mutex_owned(&pptr->port_mutex));
8310 ASSERT(mutex_owned(&ptgt->tgt_mutex));
8311
8312 fc_ulp_enable_relogin(pptr->port_fp_handle, &ptgt->tgt_port_wwn);
8313 ptgt->tgt_state = FCP_TGT_OFFLINE;
8314 ptgt->tgt_pd_handle = NULL;
8315 fcp_offline_tgt_luns(ptgt, link_cnt, tgt_cnt, flags);
8316 }
8317
8318
8319 static void
8320 fcp_offline_tgt_luns(struct fcp_tgt *ptgt, int link_cnt, int tgt_cnt,
8321 int flags)
8322 {
8323 struct fcp_lun *plun;
8324
8325 ASSERT(mutex_owned(&ptgt->tgt_port->port_mutex));
8326 ASSERT(mutex_owned(&ptgt->tgt_mutex));
8327
8328 for (plun = ptgt->tgt_lun; plun != NULL; plun = plun->lun_next) {
8329 if (!(plun->lun_state & FCP_LUN_OFFLINE)) {
8330 fcp_offline_lun(plun, link_cnt, tgt_cnt, 1, flags);
8331 }
8332 }
8333 }
8334
8335
8336 /*
8337 * take a LUN offline
8338 *
8339 * enters and leaves with the target mutex held, releasing it in the process
8340 *
8341 * allocates memory in non-sleep mode
8342 */
8343 static void
8344 fcp_offline_lun(struct fcp_lun *plun, int link_cnt, int tgt_cnt,
8345 int nowait, int flags)
8346 {
8347 struct fcp_port *pptr = plun->lun_tgt->tgt_port;
8348 struct fcp_lun_elem *elem;
8349
8350 ASSERT(plun != NULL);
8351 ASSERT(mutex_owned(&LUN_TGT->tgt_mutex));
8352
8353 if (nowait) {
8354 fcp_offline_lun_now(plun, link_cnt, tgt_cnt, flags);
8355 return;
8356 }
8357
8358 if ((elem = kmem_zalloc(sizeof (*elem), KM_NOSLEEP)) != NULL) {
8359 elem->flags = flags;
8360 elem->time = fcp_watchdog_time;
8361 if (nowait == 0) {
8362 elem->time += fcp_offline_delay;
8363 }
8364 elem->plun = plun;
8365 elem->link_cnt = link_cnt;
8366 elem->tgt_cnt = plun->lun_tgt->tgt_change_cnt;
8367 elem->next = pptr->port_offline_luns;
8368 pptr->port_offline_luns = elem;
8369 } else {
8370 fcp_offline_lun_now(plun, link_cnt, tgt_cnt, flags);
8371 }
8372 }
8373
8374
8375 static void
8376 fcp_prepare_offline_lun(struct fcp_lun *plun, int link_cnt, int tgt_cnt)
8377 {
8378 struct fcp_pkt *head = NULL;
8379
8380 ASSERT(mutex_owned(&LUN_TGT->tgt_mutex));
8381
8382 mutex_exit(&LUN_TGT->tgt_mutex);
8383
8384 head = fcp_scan_commands(plun);
8385 if (head != NULL) {
8386 fcp_abort_commands(head, LUN_PORT);
8387 }
8388
8389 mutex_enter(&LUN_TGT->tgt_mutex);
8390
8391 if (plun->lun_cip && plun->lun_mpxio) {
8392 /*
8393 * Intimate MPxIO lun busy is cleared
8394 */
8395 if (!fcp_pass_to_hp(LUN_PORT, plun, plun->lun_cip,
8396 FCP_MPXIO_PATH_CLEAR_BUSY, link_cnt, tgt_cnt,
8397 0, 0)) {
8398 fcp_log(CE_NOTE, LUN_PORT->port_dip,
8399 "Can not ENABLE LUN; D_ID=%x, LUN=%x",
8400 LUN_TGT->tgt_d_id, plun->lun_num);
8401 }
8402 /*
8403 * Intimate MPxIO that the lun is now marked for offline
8404 */
8405 mutex_exit(&LUN_TGT->tgt_mutex);
8406 (void) mdi_pi_disable_path(PIP(plun->lun_cip), DRIVER_DISABLE);
8407 mutex_enter(&LUN_TGT->tgt_mutex);
8408 }
8409 }
8410
8411 static void
8412 fcp_offline_lun_now(struct fcp_lun *plun, int link_cnt, int tgt_cnt,
8413 int flags)
8414 {
8415 ASSERT(mutex_owned(&LUN_TGT->tgt_mutex));
8416
8417 mutex_exit(&LUN_TGT->tgt_mutex);
8418 fcp_update_offline_flags(plun);
8419 mutex_enter(&LUN_TGT->tgt_mutex);
8420
8421 fcp_prepare_offline_lun(plun, link_cnt, tgt_cnt);
8422
8423 FCP_TRACE(fcp_logq, LUN_PORT->port_instbuf,
8424 fcp_trace, FCP_BUF_LEVEL_4, 0,
8425 "offline_lun: passing OFFLINE elem to HP thread");
8426
8427 if (plun->lun_cip) {
8428 fcp_log(CE_NOTE, LUN_PORT->port_dip,
8429 "!offlining lun=%x (trace=%x), target=%x (trace=%x)",
8430 plun->lun_num, plun->lun_trace, LUN_TGT->tgt_d_id,
8431 LUN_TGT->tgt_trace);
8432
8433 if (!fcp_pass_to_hp(LUN_PORT, plun, plun->lun_cip, FCP_OFFLINE,
8434 link_cnt, tgt_cnt, flags, 0)) {
8435 fcp_log(CE_CONT, LUN_PORT->port_dip,
8436 "Can not OFFLINE LUN; D_ID=%x, LUN=%x\n",
8437 LUN_TGT->tgt_d_id, plun->lun_num);
8438 }
8439 }
8440 }
8441
8442 static void
8443 fcp_scan_offline_luns(struct fcp_port *pptr)
8444 {
8445 struct fcp_lun_elem *elem;
8446 struct fcp_lun_elem *prev;
8447 struct fcp_lun_elem *next;
8448
8449 ASSERT(MUTEX_HELD(&pptr->port_mutex));
8450
8451 prev = NULL;
8452 elem = pptr->port_offline_luns;
8453 while (elem) {
8454 next = elem->next;
8455 if (elem->time <= fcp_watchdog_time) {
8456 int changed = 1;
8457 struct fcp_tgt *ptgt = elem->plun->lun_tgt;
8458
8459 mutex_enter(&ptgt->tgt_mutex);
8460 if (pptr->port_link_cnt == elem->link_cnt &&
8461 ptgt->tgt_change_cnt == elem->tgt_cnt) {
8462 changed = 0;
8463 }
8464
8465 if (!changed &&
8466 !(elem->plun->lun_state & FCP_TGT_OFFLINE)) {
8467 fcp_offline_lun_now(elem->plun,
8468 elem->link_cnt, elem->tgt_cnt, elem->flags);
8469 }
8470 mutex_exit(&ptgt->tgt_mutex);
8471
8472 kmem_free(elem, sizeof (*elem));
8473
8474 if (prev) {
8475 prev->next = next;
8476 } else {
8477 pptr->port_offline_luns = next;
8478 }
8479 } else {
8480 prev = elem;
8481 }
8482 elem = next;
8483 }
8484 }
8485
8486
8487 static void
8488 fcp_scan_offline_tgts(struct fcp_port *pptr)
8489 {
8490 struct fcp_tgt_elem *elem;
8491 struct fcp_tgt_elem *prev;
8492 struct fcp_tgt_elem *next;
8493
8494 ASSERT(MUTEX_HELD(&pptr->port_mutex));
8495
8496 prev = NULL;
8497 elem = pptr->port_offline_tgts;
8498 while (elem) {
8499 next = elem->next;
8500 if (elem->time <= fcp_watchdog_time) {
8501 int outdated = 1;
8502 struct fcp_tgt *ptgt = elem->ptgt;
8503
8504 mutex_enter(&ptgt->tgt_mutex);
8505
8506 if (ptgt->tgt_change_cnt == elem->tgt_cnt) {
8507 /* No change on tgt since elem was created. */
8508 outdated = 0;
8509 } else if (ptgt->tgt_change_cnt == elem->tgt_cnt + 1 &&
8510 pptr->port_link_cnt == elem->link_cnt + 1 &&
8511 ptgt->tgt_statec_cause == FCP_CAUSE_LINK_DOWN) {
8512 /*
8513 * Exactly one thing happened to the target
8514 * inbetween: the local port went offline.
8515 * For fp the remote port is already gone so
8516 * it will not tell us again to offline the
8517 * target. We must offline it now.
8518 */
8519 outdated = 0;
8520 }
8521
8522 if (!outdated && !(ptgt->tgt_state &
8523 FCP_TGT_OFFLINE)) {
8524 fcp_offline_target_now(pptr,
8525 ptgt, elem->link_cnt, elem->tgt_cnt,
8526 elem->flags);
8527 }
8528
8529 mutex_exit(&ptgt->tgt_mutex);
8530
8531 kmem_free(elem, sizeof (*elem));
8532
8533 if (prev) {
8534 prev->next = next;
8535 } else {
8536 pptr->port_offline_tgts = next;
8537 }
8538 } else {
8539 prev = elem;
8540 }
8541 elem = next;
8542 }
8543 }
8544
8545
8546 static void
8547 fcp_update_offline_flags(struct fcp_lun *plun)
8548 {
8549 struct fcp_port *pptr = LUN_PORT;
8550 ASSERT(plun != NULL);
8551
8552 mutex_enter(&LUN_TGT->tgt_mutex);
8553 plun->lun_state |= FCP_LUN_OFFLINE;
8554 plun->lun_state &= ~(FCP_LUN_INIT | FCP_LUN_BUSY | FCP_LUN_MARK);
8555
8556 mutex_enter(&plun->lun_mutex);
8557 if (plun->lun_cip && plun->lun_state & FCP_SCSI_LUN_TGT_INIT) {
8558 dev_info_t *cdip = NULL;
8559
8560 mutex_exit(&LUN_TGT->tgt_mutex);
8561
8562 if (plun->lun_mpxio == 0) {
8563 cdip = DIP(plun->lun_cip);
8564 } else if (plun->lun_cip) {
8565 cdip = mdi_pi_get_client(PIP(plun->lun_cip));
8566 }
8567
8568 mutex_exit(&plun->lun_mutex);
8569 if (cdip) {
8570 (void) ndi_event_retrieve_cookie(
8571 pptr->port_ndi_event_hdl, cdip, FCAL_REMOVE_EVENT,
8572 &fcp_remove_eid, NDI_EVENT_NOPASS);
8573 (void) ndi_event_run_callbacks(
8574 pptr->port_ndi_event_hdl, cdip,
8575 fcp_remove_eid, NULL);
8576 }
8577 } else {
8578 mutex_exit(&plun->lun_mutex);
8579 mutex_exit(&LUN_TGT->tgt_mutex);
8580 }
8581 }
8582
8583
8584 /*
8585 * Scan all of the command pkts for this port, moving pkts that
8586 * match our LUN onto our own list (headed by "head")
8587 */
8588 static struct fcp_pkt *
8589 fcp_scan_commands(struct fcp_lun *plun)
8590 {
8591 struct fcp_port *pptr = LUN_PORT;
8592
8593 struct fcp_pkt *cmd = NULL; /* pkt cmd ptr */
8594 struct fcp_pkt *ncmd = NULL; /* next pkt ptr */
8595 struct fcp_pkt *pcmd = NULL; /* the previous command */
8596
8597 struct fcp_pkt *head = NULL; /* head of our list */
8598 struct fcp_pkt *tail = NULL; /* tail of our list */
8599
8600 int cmds_found = 0;
8601
8602 mutex_enter(&pptr->port_pkt_mutex);
8603 for (cmd = pptr->port_pkt_head; cmd != NULL; cmd = ncmd) {
8604 struct fcp_lun *tlun =
8605 ADDR2LUN(&cmd->cmd_pkt->pkt_address);
8606
8607 ncmd = cmd->cmd_next; /* set next command */
8608
8609 /*
8610 * if this pkt is for a different LUN or the
8611 * command is sent down, skip it.
8612 */
8613 if (tlun != plun || cmd->cmd_state == FCP_PKT_ISSUED ||
8614 (cmd->cmd_pkt->pkt_flags & FLAG_NOINTR)) {
8615 pcmd = cmd;
8616 continue;
8617 }
8618 cmds_found++;
8619 if (pcmd != NULL) {
8620 ASSERT(pptr->port_pkt_head != cmd);
8621 pcmd->cmd_next = cmd->cmd_next;
8622 } else {
8623 ASSERT(cmd == pptr->port_pkt_head);
8624 pptr->port_pkt_head = cmd->cmd_next;
8625 }
8626
8627 if (cmd == pptr->port_pkt_tail) {
8628 pptr->port_pkt_tail = pcmd;
8629 if (pcmd) {
8630 pcmd->cmd_next = NULL;
8631 }
8632 }
8633
8634 if (head == NULL) {
8635 head = tail = cmd;
8636 } else {
8637 ASSERT(tail != NULL);
8638
8639 tail->cmd_next = cmd;
8640 tail = cmd;
8641 }
8642 cmd->cmd_next = NULL;
8643 }
8644 mutex_exit(&pptr->port_pkt_mutex);
8645
8646 FCP_DTRACE(fcp_logq, pptr->port_instbuf,
8647 fcp_trace, FCP_BUF_LEVEL_8, 0,
8648 "scan commands: %d cmd(s) found", cmds_found);
8649
8650 return (head);
8651 }
8652
8653
8654 /*
8655 * Abort all the commands in the command queue
8656 */
8657 static void
8658 fcp_abort_commands(struct fcp_pkt *head, struct fcp_port *pptr)
8659 {
8660 struct fcp_pkt *cmd = NULL; /* pkt cmd ptr */
8661 struct fcp_pkt *ncmd = NULL; /* next pkt ptr */
8662
8663 ASSERT(mutex_owned(&pptr->port_mutex));
8664
8665 /* scan through the pkts and invalid them */
8666 for (cmd = head; cmd != NULL; cmd = ncmd) {
8667 struct scsi_pkt *pkt = cmd->cmd_pkt;
8668
8669 ncmd = cmd->cmd_next;
8670 ASSERT(pkt != NULL);
8671
8672 /*
8673 * The lun is going to be marked offline. Indicate
8674 * the target driver not to requeue or retry this command
8675 * as the device is going to be offlined pretty soon.
8676 */
8677 pkt->pkt_reason = CMD_DEV_GONE;
8678 pkt->pkt_statistics = 0;
8679 pkt->pkt_state = 0;
8680
8681 /* reset cmd flags/state */
8682 cmd->cmd_flags &= ~CFLAG_IN_QUEUE;
8683 cmd->cmd_state = FCP_PKT_IDLE;
8684
8685 /*
8686 * ensure we have a packet completion routine,
8687 * then call it.
8688 */
8689 ASSERT(pkt->pkt_comp != NULL);
8690
8691 mutex_exit(&pptr->port_mutex);
8692 fcp_post_callback(cmd);
8693 mutex_enter(&pptr->port_mutex);
8694 }
8695 }
8696
8697
8698 /*
8699 * the pkt_comp callback for command packets
8700 */
8701 static void
8702 fcp_cmd_callback(fc_packet_t *fpkt)
8703 {
8704 struct fcp_pkt *cmd = (struct fcp_pkt *)fpkt->pkt_ulp_private;
8705 struct scsi_pkt *pkt = cmd->cmd_pkt;
8706 struct fcp_port *pptr = ADDR2FCP(&pkt->pkt_address);
8707
8708 ASSERT(cmd->cmd_state != FCP_PKT_IDLE);
8709
8710 if (cmd->cmd_state == FCP_PKT_IDLE) {
8711 cmn_err(CE_PANIC, "Packet already completed %p",
8712 (void *)cmd);
8713 }
8714
8715 /*
8716 * Watch thread should be freeing the packet, ignore the pkt.
8717 */
8718 if (cmd->cmd_state == FCP_PKT_ABORTING) {
8719 fcp_log(CE_CONT, pptr->port_dip,
8720 "!FCP: Pkt completed while aborting\n");
8721 return;
8722 }
8723 cmd->cmd_state = FCP_PKT_IDLE;
8724
8725 fcp_complete_pkt(fpkt);
8726
8727 #ifdef DEBUG
8728 mutex_enter(&pptr->port_pkt_mutex);
8729 pptr->port_npkts--;
8730 mutex_exit(&pptr->port_pkt_mutex);
8731 #endif /* DEBUG */
8732
8733 fcp_post_callback(cmd);
8734 }
8735
8736
8737 static void
8738 fcp_complete_pkt(fc_packet_t *fpkt)
8739 {
8740 int error = 0;
8741 struct fcp_pkt *cmd = (struct fcp_pkt *)
8742 fpkt->pkt_ulp_private;
8743 struct scsi_pkt *pkt = cmd->cmd_pkt;
8744 struct fcp_port *pptr = ADDR2FCP(&pkt->pkt_address);
8745 struct fcp_lun *plun;
8746 struct fcp_tgt *ptgt;
8747 struct fcp_rsp *rsp;
8748 struct scsi_address save;
8749
8750 #ifdef DEBUG
8751 save = pkt->pkt_address;
8752 #endif /* DEBUG */
8753
8754 rsp = (struct fcp_rsp *)cmd->cmd_fcp_rsp;
8755
8756 if (fpkt->pkt_state == FC_PKT_SUCCESS) {
8757 if (pptr->port_fcp_dma != FC_NO_DVMA_SPACE) {
8758 FCP_CP_IN(fpkt->pkt_resp, rsp, fpkt->pkt_resp_acc,
8759 sizeof (struct fcp_rsp));
8760 }
8761
8762 pkt->pkt_state = STATE_GOT_BUS | STATE_GOT_TARGET |
8763 STATE_SENT_CMD | STATE_GOT_STATUS;
8764
8765 pkt->pkt_resid = 0;
8766
8767 if (fpkt->pkt_datalen) {
8768 pkt->pkt_state |= STATE_XFERRED_DATA;
8769 if (fpkt->pkt_data_resid) {
8770 error++;
8771 }
8772 }
8773
8774 if ((pkt->pkt_scbp != NULL) && ((*(pkt->pkt_scbp) =
8775 rsp->fcp_u.fcp_status.scsi_status) != STATUS_GOOD)) {
8776 /*
8777 * The next two checks make sure that if there
8778 * is no sense data or a valid response and
8779 * the command came back with check condition,
8780 * the command should be retried.
8781 */
8782 if (!rsp->fcp_u.fcp_status.rsp_len_set &&
8783 !rsp->fcp_u.fcp_status.sense_len_set) {
8784 pkt->pkt_state &= ~STATE_XFERRED_DATA;
8785 pkt->pkt_resid = cmd->cmd_dmacount;
8786 }
8787 }
8788
8789 if ((error | rsp->fcp_u.i_fcp_status | rsp->fcp_resid) == 0) {
8790 return;
8791 }
8792
8793 plun = ADDR2LUN(&pkt->pkt_address);
8794 ptgt = plun->lun_tgt;
8795 ASSERT(ptgt != NULL);
8796
8797 /*
8798 * Update the transfer resid, if appropriate
8799 */
8800 if (rsp->fcp_u.fcp_status.resid_over ||
8801 rsp->fcp_u.fcp_status.resid_under) {
8802 pkt->pkt_resid = rsp->fcp_resid;
8803 }
8804
8805 /*
8806 * First see if we got a FCP protocol error.
8807 */
8808 if (rsp->fcp_u.fcp_status.rsp_len_set) {
8809 struct fcp_rsp_info *bep;
8810 bep = (struct fcp_rsp_info *)(cmd->cmd_fcp_rsp +
8811 sizeof (struct fcp_rsp));
8812
8813 if (fcp_validate_fcp_response(rsp, pptr) !=
8814 FC_SUCCESS) {
8815 pkt->pkt_reason = CMD_CMPLT;
8816 *(pkt->pkt_scbp) = STATUS_CHECK;
8817
8818 fcp_log(CE_WARN, pptr->port_dip,
8819 "!SCSI command to d_id=0x%x lun=0x%x"
8820 " failed, Bad FCP response values:"
8821 " rsvd1=%x, rsvd2=%x, sts-rsvd1=%x,"
8822 " sts-rsvd2=%x, rsplen=%x, senselen=%x",
8823 ptgt->tgt_d_id, plun->lun_num,
8824 rsp->reserved_0, rsp->reserved_1,
8825 rsp->fcp_u.fcp_status.reserved_0,
8826 rsp->fcp_u.fcp_status.reserved_1,
8827 rsp->fcp_response_len, rsp->fcp_sense_len);
8828
8829 return;
8830 }
8831
8832 if (pptr->port_fcp_dma != FC_NO_DVMA_SPACE) {
8833 FCP_CP_IN(fpkt->pkt_resp +
8834 sizeof (struct fcp_rsp), bep,
8835 fpkt->pkt_resp_acc,
8836 sizeof (struct fcp_rsp_info));
8837 }
8838
8839 if (bep->rsp_code != FCP_NO_FAILURE) {
8840 child_info_t *cip;
8841
8842 pkt->pkt_reason = CMD_TRAN_ERR;
8843
8844 mutex_enter(&plun->lun_mutex);
8845 cip = plun->lun_cip;
8846 mutex_exit(&plun->lun_mutex);
8847
8848 FCP_TRACE(fcp_logq, pptr->port_instbuf,
8849 fcp_trace, FCP_BUF_LEVEL_2, 0,
8850 "FCP response error on cmd=%p"
8851 " target=0x%x, cip=%p", cmd,
8852 ptgt->tgt_d_id, cip);
8853 }
8854 }
8855
8856 /*
8857 * See if we got a SCSI error with sense data
8858 */
8859 if (rsp->fcp_u.fcp_status.sense_len_set) {
8860 uchar_t rqlen;
8861 caddr_t sense_from;
8862 child_info_t *cip;
8863 timeout_id_t tid;
8864 struct scsi_arq_status *arq;
8865 struct scsi_extended_sense *sense_to;
8866
8867 arq = (struct scsi_arq_status *)pkt->pkt_scbp;
8868 sense_to = &arq->sts_sensedata;
8869
8870 rqlen = (uchar_t)min(rsp->fcp_sense_len,
8871 sizeof (struct scsi_extended_sense));
8872
8873 sense_from = (caddr_t)fpkt->pkt_resp +
8874 sizeof (struct fcp_rsp) + rsp->fcp_response_len;
8875
8876 if (fcp_validate_fcp_response(rsp, pptr) !=
8877 FC_SUCCESS) {
8878 pkt->pkt_reason = CMD_CMPLT;
8879 *(pkt->pkt_scbp) = STATUS_CHECK;
8880
8881 fcp_log(CE_WARN, pptr->port_dip,
8882 "!SCSI command to d_id=0x%x lun=0x%x"
8883 " failed, Bad FCP response values:"
8884 " rsvd1=%x, rsvd2=%x, sts-rsvd1=%x,"
8885 " sts-rsvd2=%x, rsplen=%x, senselen=%x",
8886 ptgt->tgt_d_id, plun->lun_num,
8887 rsp->reserved_0, rsp->reserved_1,
8888 rsp->fcp_u.fcp_status.reserved_0,
8889 rsp->fcp_u.fcp_status.reserved_1,
8890 rsp->fcp_response_len, rsp->fcp_sense_len);
8891
8892 return;
8893 }
8894
8895 /*
8896 * copy in sense information
8897 */
8898 if (pptr->port_fcp_dma != FC_NO_DVMA_SPACE) {
8899 FCP_CP_IN(sense_from, sense_to,
8900 fpkt->pkt_resp_acc, rqlen);
8901 } else {
8902 bcopy(sense_from, sense_to, rqlen);
8903 }
8904
8905 if ((FCP_SENSE_REPORTLUN_CHANGED(sense_to)) ||
8906 (FCP_SENSE_NO_LUN(sense_to))) {
8907 mutex_enter(&ptgt->tgt_mutex);
8908 if (ptgt->tgt_tid == NULL) {
8909 /*
8910 * Kick off rediscovery
8911 */
8912 tid = timeout(fcp_reconfigure_luns,
8913 (caddr_t)ptgt, drv_usectohz(1));
8914
8915 ptgt->tgt_tid = tid;
8916 ptgt->tgt_state |= FCP_TGT_BUSY;
8917 }
8918 mutex_exit(&ptgt->tgt_mutex);
8919 if (FCP_SENSE_REPORTLUN_CHANGED(sense_to)) {
8920 FCP_TRACE(fcp_logq, pptr->port_instbuf,
8921 fcp_trace, FCP_BUF_LEVEL_3, 0,
8922 "!FCP: Report Lun Has Changed"
8923 " target=%x", ptgt->tgt_d_id);
8924 } else if (FCP_SENSE_NO_LUN(sense_to)) {
8925 FCP_TRACE(fcp_logq, pptr->port_instbuf,
8926 fcp_trace, FCP_BUF_LEVEL_3, 0,
8927 "!FCP: LU Not Supported"
8928 " target=%x", ptgt->tgt_d_id);
8929 }
8930 }
8931 ASSERT(pkt->pkt_scbp != NULL);
8932
8933 pkt->pkt_state |= STATE_ARQ_DONE;
8934
8935 arq->sts_rqpkt_resid = SENSE_LENGTH - rqlen;
8936
8937 *((uchar_t *)&arq->sts_rqpkt_status) = STATUS_GOOD;
8938 arq->sts_rqpkt_reason = 0;
8939 arq->sts_rqpkt_statistics = 0;
8940
8941 arq->sts_rqpkt_state = STATE_GOT_BUS |
8942 STATE_GOT_TARGET | STATE_SENT_CMD |
8943 STATE_GOT_STATUS | STATE_ARQ_DONE |
8944 STATE_XFERRED_DATA;
8945
8946 mutex_enter(&plun->lun_mutex);
8947 cip = plun->lun_cip;
8948 mutex_exit(&plun->lun_mutex);
8949
8950 FCP_DTRACE(fcp_logq, pptr->port_instbuf,
8951 fcp_trace, FCP_BUF_LEVEL_8, 0,
8952 "SCSI Check condition on cmd=%p target=0x%x"
8953 " LUN=%p, cmd=%x SCSI status=%x, es key=%x"
8954 " ASC=%x ASCQ=%x", cmd, ptgt->tgt_d_id, cip,
8955 cmd->cmd_fcp_cmd.fcp_cdb[0],
8956 rsp->fcp_u.fcp_status.scsi_status,
8957 sense_to->es_key, sense_to->es_add_code,
8958 sense_to->es_qual_code);
8959 }
8960 } else {
8961 plun = ADDR2LUN(&pkt->pkt_address);
8962 ptgt = plun->lun_tgt;
8963 ASSERT(ptgt != NULL);
8964
8965 /*
8966 * Work harder to translate errors into target driver
8967 * understandable ones. Note with despair that the target
8968 * drivers don't decode pkt_state and pkt_reason exhaustively
8969 * They resort to using the big hammer most often, which
8970 * may not get fixed in the life time of this driver.
8971 */
8972 pkt->pkt_state = 0;
8973 pkt->pkt_statistics = 0;
8974
8975 switch (fpkt->pkt_state) {
8976 case FC_PKT_TRAN_ERROR:
8977 switch (fpkt->pkt_reason) {
8978 case FC_REASON_OVERRUN:
8979 pkt->pkt_reason = CMD_CMD_OVR;
8980 pkt->pkt_statistics |= STAT_ABORTED;
8981 break;
8982
8983 case FC_REASON_XCHG_BSY: {
8984 caddr_t ptr;
8985
8986 pkt->pkt_reason = CMD_CMPLT; /* Lie */
8987
8988 ptr = (caddr_t)pkt->pkt_scbp;
8989 if (ptr) {
8990 *ptr = STATUS_BUSY;
8991 }
8992 break;
8993 }
8994
8995 case FC_REASON_ABORTED:
8996 pkt->pkt_reason = CMD_TRAN_ERR;
8997 pkt->pkt_statistics |= STAT_ABORTED;
8998 break;
8999
9000 case FC_REASON_ABORT_FAILED:
9001 pkt->pkt_reason = CMD_ABORT_FAIL;
9002 break;
9003
9004 case FC_REASON_NO_SEQ_INIT:
9005 case FC_REASON_CRC_ERROR:
9006 pkt->pkt_reason = CMD_TRAN_ERR;
9007 pkt->pkt_statistics |= STAT_ABORTED;
9008 break;
9009 default:
9010 pkt->pkt_reason = CMD_TRAN_ERR;
9011 break;
9012 }
9013 break;
9014
9015 case FC_PKT_PORT_OFFLINE: {
9016 dev_info_t *cdip = NULL;
9017 caddr_t ptr;
9018
9019 if (fpkt->pkt_reason == FC_REASON_LOGIN_REQUIRED) {
9020 FCP_DTRACE(fcp_logq, pptr->port_instbuf,
9021 fcp_trace, FCP_BUF_LEVEL_8, 0,
9022 "SCSI cmd; LOGIN REQUIRED from FCA for %x",
9023 ptgt->tgt_d_id);
9024 }
9025
9026 mutex_enter(&plun->lun_mutex);
9027 if (plun->lun_mpxio == 0) {
9028 cdip = DIP(plun->lun_cip);
9029 } else if (plun->lun_cip) {
9030 cdip = mdi_pi_get_client(PIP(plun->lun_cip));
9031 }
9032
9033 mutex_exit(&plun->lun_mutex);
9034
9035 if (cdip) {
9036 (void) ndi_event_retrieve_cookie(
9037 pptr->port_ndi_event_hdl, cdip,
9038 FCAL_REMOVE_EVENT, &fcp_remove_eid,
9039 NDI_EVENT_NOPASS);
9040 (void) ndi_event_run_callbacks(
9041 pptr->port_ndi_event_hdl, cdip,
9042 fcp_remove_eid, NULL);
9043 }
9044
9045 /*
9046 * If the link goes off-line for a lip,
9047 * this will cause a error to the ST SG
9048 * SGEN drivers. By setting BUSY we will
9049 * give the drivers the chance to retry
9050 * before it blows of the job. ST will
9051 * remember how many times it has retried.
9052 */
9053
9054 if ((plun->lun_type == DTYPE_SEQUENTIAL) ||
9055 (plun->lun_type == DTYPE_CHANGER)) {
9056 pkt->pkt_reason = CMD_CMPLT; /* Lie */
9057 ptr = (caddr_t)pkt->pkt_scbp;
9058 if (ptr) {
9059 *ptr = STATUS_BUSY;
9060 }
9061 } else {
9062 pkt->pkt_reason = CMD_TRAN_ERR;
9063 pkt->pkt_statistics |= STAT_BUS_RESET;
9064 }
9065 break;
9066 }
9067
9068 case FC_PKT_TRAN_BSY:
9069 /*
9070 * Use the ssd Qfull handling here.
9071 */
9072 *pkt->pkt_scbp = STATUS_INTERMEDIATE;
9073 pkt->pkt_state = STATE_GOT_BUS;
9074 break;
9075
9076 case FC_PKT_TIMEOUT:
9077 pkt->pkt_reason = CMD_TIMEOUT;
9078 if (fpkt->pkt_reason == FC_REASON_ABORT_FAILED) {
9079 pkt->pkt_statistics |= STAT_TIMEOUT;
9080 } else {
9081 pkt->pkt_statistics |= STAT_ABORTED;
9082 }
9083 break;
9084
9085 case FC_PKT_LOCAL_RJT:
9086 switch (fpkt->pkt_reason) {
9087 case FC_REASON_OFFLINE: {
9088 dev_info_t *cdip = NULL;
9089
9090 mutex_enter(&plun->lun_mutex);
9091 if (plun->lun_mpxio == 0) {
9092 cdip = DIP(plun->lun_cip);
9093 } else if (plun->lun_cip) {
9094 cdip = mdi_pi_get_client(
9095 PIP(plun->lun_cip));
9096 }
9097 mutex_exit(&plun->lun_mutex);
9098
9099 if (cdip) {
9100 (void) ndi_event_retrieve_cookie(
9101 pptr->port_ndi_event_hdl, cdip,
9102 FCAL_REMOVE_EVENT,
9103 &fcp_remove_eid,
9104 NDI_EVENT_NOPASS);
9105 (void) ndi_event_run_callbacks(
9106 pptr->port_ndi_event_hdl,
9107 cdip, fcp_remove_eid, NULL);
9108 }
9109
9110 pkt->pkt_reason = CMD_TRAN_ERR;
9111 pkt->pkt_statistics |= STAT_BUS_RESET;
9112
9113 break;
9114 }
9115
9116 case FC_REASON_NOMEM:
9117 case FC_REASON_QFULL: {
9118 caddr_t ptr;
9119
9120 pkt->pkt_reason = CMD_CMPLT; /* Lie */
9121 ptr = (caddr_t)pkt->pkt_scbp;
9122 if (ptr) {
9123 *ptr = STATUS_BUSY;
9124 }
9125 break;
9126 }
9127
9128 case FC_REASON_DMA_ERROR:
9129 pkt->pkt_reason = CMD_DMA_DERR;
9130 pkt->pkt_statistics |= STAT_ABORTED;
9131 break;
9132
9133 case FC_REASON_CRC_ERROR:
9134 case FC_REASON_UNDERRUN: {
9135 uchar_t status;
9136 /*
9137 * Work around for Bugid: 4240945.
9138 * IB on A5k doesn't set the Underrun bit
9139 * in the fcp status, when it is transferring
9140 * less than requested amount of data. Work
9141 * around the ses problem to keep luxadm
9142 * happy till ibfirmware is fixed.
9143 */
9144 if (pptr->port_fcp_dma != FC_NO_DVMA_SPACE) {
9145 FCP_CP_IN(fpkt->pkt_resp, rsp,
9146 fpkt->pkt_resp_acc,
9147 sizeof (struct fcp_rsp));
9148 }
9149 status = rsp->fcp_u.fcp_status.scsi_status;
9150 if (((plun->lun_type & DTYPE_MASK) ==
9151 DTYPE_ESI) && (status == STATUS_GOOD)) {
9152 pkt->pkt_reason = CMD_CMPLT;
9153 *pkt->pkt_scbp = status;
9154 pkt->pkt_resid = 0;
9155 } else {
9156 pkt->pkt_reason = CMD_TRAN_ERR;
9157 pkt->pkt_statistics |= STAT_ABORTED;
9158 }
9159 break;
9160 }
9161
9162 case FC_REASON_NO_CONNECTION:
9163 case FC_REASON_UNSUPPORTED:
9164 case FC_REASON_ILLEGAL_REQ:
9165 case FC_REASON_BAD_SID:
9166 case FC_REASON_DIAG_BUSY:
9167 case FC_REASON_FCAL_OPN_FAIL:
9168 case FC_REASON_BAD_XID:
9169 default:
9170 pkt->pkt_reason = CMD_TRAN_ERR;
9171 pkt->pkt_statistics |= STAT_ABORTED;
9172 break;
9173
9174 }
9175 break;
9176
9177 case FC_PKT_NPORT_RJT:
9178 case FC_PKT_FABRIC_RJT:
9179 case FC_PKT_NPORT_BSY:
9180 case FC_PKT_FABRIC_BSY:
9181 default:
9182 FCP_DTRACE(fcp_logq, pptr->port_instbuf,
9183 fcp_trace, FCP_BUF_LEVEL_8, 0,
9184 "FC Status 0x%x, reason 0x%x",
9185 fpkt->pkt_state, fpkt->pkt_reason);
9186 pkt->pkt_reason = CMD_TRAN_ERR;
9187 pkt->pkt_statistics |= STAT_ABORTED;
9188 break;
9189 }
9190
9191 FCP_DTRACE(fcp_logq, pptr->port_instbuf,
9192 fcp_trace, FCP_BUF_LEVEL_9, 0,
9193 "!FC error on cmd=%p target=0x%x: pkt state=0x%x "
9194 " pkt reason=0x%x", cmd, ptgt->tgt_d_id, fpkt->pkt_state,
9195 fpkt->pkt_reason);
9196 }
9197
9198 ASSERT(save.a_hba_tran == pkt->pkt_address.a_hba_tran);
9199 }
9200
9201
9202 static int
9203 fcp_validate_fcp_response(struct fcp_rsp *rsp, struct fcp_port *pptr)
9204 {
9205 if (rsp->reserved_0 || rsp->reserved_1 ||
9206 rsp->fcp_u.fcp_status.reserved_0 ||
9207 rsp->fcp_u.fcp_status.reserved_1) {
9208 /*
9209 * These reserved fields should ideally be zero. FCP-2 does say
9210 * that the recipient need not check for reserved fields to be
9211 * zero. If they are not zero, we will not make a fuss about it
9212 * - just log it (in debug to both trace buffer and messages
9213 * file and to trace buffer only in non-debug) and move on.
9214 *
9215 * Non-zero reserved fields were seen with minnows.
9216 *
9217 * qlc takes care of some of this but we cannot assume that all
9218 * FCAs will do so.
9219 */
9220 FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
9221 FCP_BUF_LEVEL_5, 0,
9222 "Got fcp response packet with non-zero reserved fields "
9223 "rsp->reserved_0:0x%x, rsp_reserved_1:0x%x, "
9224 "status.reserved_0:0x%x, status.reserved_1:0x%x",
9225 rsp->reserved_0, rsp->reserved_1,
9226 rsp->fcp_u.fcp_status.reserved_0,
9227 rsp->fcp_u.fcp_status.reserved_1);
9228 }
9229
9230 if (rsp->fcp_u.fcp_status.rsp_len_set && (rsp->fcp_response_len >
9231 (FCP_MAX_RSP_IU_SIZE - sizeof (struct fcp_rsp)))) {
9232 return (FC_FAILURE);
9233 }
9234
9235 if (rsp->fcp_u.fcp_status.sense_len_set && rsp->fcp_sense_len >
9236 (FCP_MAX_RSP_IU_SIZE - rsp->fcp_response_len -
9237 sizeof (struct fcp_rsp))) {
9238 return (FC_FAILURE);
9239 }
9240
9241 return (FC_SUCCESS);
9242 }
9243
9244
9245 /*
9246 * This is called when there is a change the in device state. The case we're
9247 * handling here is, if the d_id s does not match, offline this tgt and online
9248 * a new tgt with the new d_id. called from fcp_handle_devices with
9249 * port_mutex held.
9250 */
9251 static int
9252 fcp_device_changed(struct fcp_port *pptr, struct fcp_tgt *ptgt,
9253 fc_portmap_t *map_entry, int link_cnt, int tgt_cnt, int cause)
9254 {
9255 ASSERT(mutex_owned(&pptr->port_mutex));
9256
9257 FCP_TRACE(fcp_logq, pptr->port_instbuf,
9258 fcp_trace, FCP_BUF_LEVEL_3, 0,
9259 "Starting fcp_device_changed...");
9260
9261 /*
9262 * The two cases where the port_device_changed is called is
9263 * either it changes it's d_id or it's hard address.
9264 */
9265 if ((ptgt->tgt_d_id != map_entry->map_did.port_id) ||
9266 (FC_TOP_EXTERNAL(pptr->port_topology) &&
9267 (ptgt->tgt_hard_addr != map_entry->map_hard_addr.hard_addr))) {
9268
9269 /* offline this target */
9270 mutex_enter(&ptgt->tgt_mutex);
9271 if (!(ptgt->tgt_state & FCP_TGT_OFFLINE)) {
9272 (void) fcp_offline_target(pptr, ptgt, link_cnt,
9273 0, 1, NDI_DEVI_REMOVE);
9274 }
9275 mutex_exit(&ptgt->tgt_mutex);
9276
9277 fcp_log(CE_NOTE, pptr->port_dip,
9278 "Change in target properties: Old D_ID=%x New D_ID=%x"
9279 " Old HA=%x New HA=%x", ptgt->tgt_d_id,
9280 map_entry->map_did.port_id, ptgt->tgt_hard_addr,
9281 map_entry->map_hard_addr.hard_addr);
9282 }
9283
9284 return (fcp_handle_mapflags(pptr, ptgt, map_entry,
9285 link_cnt, tgt_cnt, cause));
9286 }
9287
9288 /*
9289 * Function: fcp_alloc_lun
9290 *
9291 * Description: Creates a new lun structure and adds it to the list
9292 * of luns of the target.
9293 *
9294 * Argument: ptgt Target the lun will belong to.
9295 *
9296 * Return Value: NULL Failed
9297 * Not NULL Succeeded
9298 *
9299 * Context: Kernel context
9300 */
9301 static struct fcp_lun *
9302 fcp_alloc_lun(struct fcp_tgt *ptgt)
9303 {
9304 struct fcp_lun *plun;
9305
9306 plun = kmem_zalloc(sizeof (struct fcp_lun), KM_NOSLEEP);
9307 if (plun != NULL) {
9308 /*
9309 * Initialize the mutex before putting in the target list
9310 * especially before releasing the target mutex.
9311 */
9312 mutex_init(&plun->lun_mutex, NULL, MUTEX_DRIVER, NULL);
9313 plun->lun_tgt = ptgt;
9314
9315 mutex_enter(&ptgt->tgt_mutex);
9316 plun->lun_next = ptgt->tgt_lun;
9317 ptgt->tgt_lun = plun;
9318 plun->lun_old_guid = NULL;
9319 plun->lun_old_guid_size = 0;
9320 mutex_exit(&ptgt->tgt_mutex);
9321 }
9322
9323 return (plun);
9324 }
9325
9326 /*
9327 * Function: fcp_dealloc_lun
9328 *
9329 * Description: Frees the LUN structure passed by the caller.
9330 *
9331 * Argument: plun LUN structure to free.
9332 *
9333 * Return Value: None
9334 *
9335 * Context: Kernel context.
9336 */
9337 static void
9338 fcp_dealloc_lun(struct fcp_lun *plun)
9339 {
9340 mutex_enter(&plun->lun_mutex);
9341 if (plun->lun_cip) {
9342 fcp_remove_child(plun);
9343 }
9344 mutex_exit(&plun->lun_mutex);
9345
9346 mutex_destroy(&plun->lun_mutex);
9347 if (plun->lun_guid) {
9348 kmem_free(plun->lun_guid, plun->lun_guid_size);
9349 }
9350 if (plun->lun_old_guid) {
9351 kmem_free(plun->lun_old_guid, plun->lun_old_guid_size);
9352 }
9353 kmem_free(plun, sizeof (*plun));
9354 }
9355
9356 /*
9357 * Function: fcp_alloc_tgt
9358 *
9359 * Description: Creates a new target structure and adds it to the port
9360 * hash list.
9361 *
9362 * Argument: pptr fcp port structure
9363 * *map_entry entry describing the target to create
9364 * link_cnt Link state change counter
9365 *
9366 * Return Value: NULL Failed
9367 * Not NULL Succeeded
9368 *
9369 * Context: Kernel context.
9370 */
9371 static struct fcp_tgt *
9372 fcp_alloc_tgt(struct fcp_port *pptr, fc_portmap_t *map_entry, int link_cnt)
9373 {
9374 int hash;
9375 uchar_t *wwn;
9376 struct fcp_tgt *ptgt;
9377
9378 ptgt = kmem_zalloc(sizeof (*ptgt), KM_NOSLEEP);
9379 if (ptgt != NULL) {
9380 mutex_enter(&pptr->port_mutex);
9381 if (link_cnt != pptr->port_link_cnt) {
9382 /*
9383 * oh oh -- another link reset
9384 * in progress -- give up
9385 */
9386 mutex_exit(&pptr->port_mutex);
9387 kmem_free(ptgt, sizeof (*ptgt));
9388 ptgt = NULL;
9389 } else {
9390 /*
9391 * initialize the mutex before putting in the port
9392 * wwn list, especially before releasing the port
9393 * mutex.
9394 */
9395 mutex_init(&ptgt->tgt_mutex, NULL, MUTEX_DRIVER, NULL);
9396
9397 /* add new target entry to the port's hash list */
9398 wwn = (uchar_t *)&map_entry->map_pwwn;
9399 hash = FCP_HASH(wwn);
9400
9401 ptgt->tgt_next = pptr->port_tgt_hash_table[hash];
9402 pptr->port_tgt_hash_table[hash] = ptgt;
9403
9404 /* save cross-ptr */
9405 ptgt->tgt_port = pptr;
9406
9407 ptgt->tgt_change_cnt = 1;
9408
9409 /* initialize the target manual_config_only flag */
9410 if (fcp_enable_auto_configuration) {
9411 ptgt->tgt_manual_config_only = 0;
9412 } else {
9413 ptgt->tgt_manual_config_only = 1;
9414 }
9415
9416 mutex_exit(&pptr->port_mutex);
9417 }
9418 }
9419
9420 return (ptgt);
9421 }
9422
9423 /*
9424 * Function: fcp_dealloc_tgt
9425 *
9426 * Description: Frees the target structure passed by the caller.
9427 *
9428 * Argument: ptgt Target structure to free.
9429 *
9430 * Return Value: None
9431 *
9432 * Context: Kernel context.
9433 */
9434 static void
9435 fcp_dealloc_tgt(struct fcp_tgt *ptgt)
9436 {
9437 mutex_destroy(&ptgt->tgt_mutex);
9438 kmem_free(ptgt, sizeof (*ptgt));
9439 }
9440
9441
9442 /*
9443 * Handle STATUS_QFULL and STATUS_BUSY by performing delayed retry
9444 *
9445 * Device discovery commands will not be retried for-ever as
9446 * this will have repercussions on other devices that need to
9447 * be submitted to the hotplug thread. After a quick glance
9448 * at the SCSI-3 spec, it was found that the spec doesn't
9449 * mandate a forever retry, rather recommends a delayed retry.
9450 *
9451 * Since Photon IB is single threaded, STATUS_BUSY is common
9452 * in a 4+initiator environment. Make sure the total time
9453 * spent on retries (including command timeout) does not
9454 * 60 seconds
9455 */
9456 static void
9457 fcp_queue_ipkt(struct fcp_port *pptr, fc_packet_t *fpkt)
9458 {
9459 struct fcp_ipkt *icmd = (struct fcp_ipkt *)fpkt->pkt_ulp_private;
9460 struct fcp_tgt *ptgt = icmd->ipkt_tgt;
9461
9462 mutex_enter(&pptr->port_mutex);
9463 mutex_enter(&ptgt->tgt_mutex);
9464 if (FCP_STATE_CHANGED(pptr, ptgt, icmd)) {
9465 FCP_TRACE(fcp_logq, pptr->port_instbuf,
9466 fcp_trace, FCP_BUF_LEVEL_2, 0,
9467 "fcp_queue_ipkt,1:state change occured"
9468 " for D_ID=0x%x", ptgt->tgt_d_id);
9469 mutex_exit(&ptgt->tgt_mutex);
9470 mutex_exit(&pptr->port_mutex);
9471 (void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
9472 icmd->ipkt_change_cnt, icmd->ipkt_cause);
9473 fcp_icmd_free(pptr, icmd);
9474 return;
9475 }
9476 mutex_exit(&ptgt->tgt_mutex);
9477
9478 icmd->ipkt_restart = fcp_watchdog_time + icmd->ipkt_retries++;
9479
9480 if (pptr->port_ipkt_list != NULL) {
9481 /* add pkt to front of doubly-linked list */
9482 pptr->port_ipkt_list->ipkt_prev = icmd;
9483 icmd->ipkt_next = pptr->port_ipkt_list;
9484 pptr->port_ipkt_list = icmd;
9485 icmd->ipkt_prev = NULL;
9486 } else {
9487 /* this is the first/only pkt on the list */
9488 pptr->port_ipkt_list = icmd;
9489 icmd->ipkt_next = NULL;
9490 icmd->ipkt_prev = NULL;
9491 }
9492 mutex_exit(&pptr->port_mutex);
9493 }
9494
9495 /*
9496 * Function: fcp_transport
9497 *
9498 * Description: This function submits the Fibre Channel packet to the transort
9499 * layer by calling fc_ulp_transport(). If fc_ulp_transport()
9500 * fails the submission, the treatment depends on the value of
9501 * the variable internal.
9502 *
9503 * Argument: port_handle fp/fctl port handle.
9504 * *fpkt Packet to submit to the transport layer.
9505 * internal Not zero when it's an internal packet.
9506 *
9507 * Return Value: FC_TRAN_BUSY
9508 * FC_STATEC_BUSY
9509 * FC_OFFLINE
9510 * FC_LOGINREQ
9511 * FC_DEVICE_BUSY
9512 * FC_SUCCESS
9513 */
9514 static int
9515 fcp_transport(opaque_t port_handle, fc_packet_t *fpkt, int internal)
9516 {
9517 int rval;
9518
9519 rval = fc_ulp_transport(port_handle, fpkt);
9520 if (rval == FC_SUCCESS) {
9521 return (rval);
9522 }
9523
9524 /*
9525 * LUN isn't marked BUSY or OFFLINE, so we got here to transport
9526 * a command, if the underlying modules see that there is a state
9527 * change, or if a port is OFFLINE, that means, that state change
9528 * hasn't reached FCP yet, so re-queue the command for deferred
9529 * submission.
9530 */
9531 if ((rval == FC_STATEC_BUSY) || (rval == FC_OFFLINE) ||
9532 (rval == FC_LOGINREQ) || (rval == FC_DEVICE_BUSY) ||
9533 (rval == FC_DEVICE_BUSY_NEW_RSCN) || (rval == FC_TRAN_BUSY)) {
9534 /*
9535 * Defer packet re-submission. Life hang is possible on
9536 * internal commands if the port driver sends FC_STATEC_BUSY
9537 * for ever, but that shouldn't happen in a good environment.
9538 * Limiting re-transport for internal commands is probably a
9539 * good idea..
9540 * A race condition can happen when a port sees barrage of
9541 * link transitions offline to online. If the FCTL has
9542 * returned FC_STATEC_BUSY or FC_OFFLINE then none of the
9543 * internal commands should be queued to do the discovery.
9544 * The race condition is when an online comes and FCP starts
9545 * its internal discovery and the link goes offline. It is
9546 * possible that the statec_callback has not reached FCP
9547 * and FCP is carrying on with its internal discovery.
9548 * FC_STATEC_BUSY or FC_OFFLINE will be the first indication
9549 * that the link has gone offline. At this point FCP should
9550 * drop all the internal commands and wait for the
9551 * statec_callback. It will be facilitated by incrementing
9552 * port_link_cnt.
9553 *
9554 * For external commands, the (FC)pkt_timeout is decremented
9555 * by the QUEUE Delay added by our driver, Care is taken to
9556 * ensure that it doesn't become zero (zero means no timeout)
9557 * If the time expires right inside driver queue itself,
9558 * the watch thread will return it to the original caller
9559 * indicating that the command has timed-out.
9560 */
9561 if (internal) {
9562 char *op;
9563 struct fcp_ipkt *icmd;
9564
9565 icmd = (struct fcp_ipkt *)fpkt->pkt_ulp_private;
9566 switch (icmd->ipkt_opcode) {
9567 case SCMD_REPORT_LUN:
9568 op = "REPORT LUN";
9569 break;
9570
9571 case SCMD_INQUIRY:
9572 op = "INQUIRY";
9573 break;
9574
9575 case SCMD_INQUIRY_PAGE83:
9576 op = "INQUIRY-83";
9577 break;
9578
9579 default:
9580 op = "Internal SCSI COMMAND";
9581 break;
9582 }
9583
9584 if (fcp_handle_ipkt_errors(icmd->ipkt_port,
9585 icmd->ipkt_tgt, icmd, rval, op) == DDI_SUCCESS) {
9586 rval = FC_SUCCESS;
9587 }
9588 } else {
9589 struct fcp_pkt *cmd;
9590 struct fcp_port *pptr;
9591
9592 cmd = (struct fcp_pkt *)fpkt->pkt_ulp_private;
9593 cmd->cmd_state = FCP_PKT_IDLE;
9594 pptr = ADDR2FCP(&cmd->cmd_pkt->pkt_address);
9595
9596 if (cmd->cmd_pkt->pkt_flags & FLAG_NOQUEUE) {
9597 FCP_DTRACE(fcp_logq, pptr->port_instbuf,
9598 fcp_trace, FCP_BUF_LEVEL_9, 0,
9599 "fcp_transport: xport busy for pkt %p",
9600 cmd->cmd_pkt);
9601 rval = FC_TRAN_BUSY;
9602 } else {
9603 fcp_queue_pkt(pptr, cmd);
9604 rval = FC_SUCCESS;
9605 }
9606 }
9607 }
9608
9609 return (rval);
9610 }
9611
9612 /*VARARGS3*/
9613 static void
9614 fcp_log(int level, dev_info_t *dip, const char *fmt, ...)
9615 {
9616 char buf[256];
9617 va_list ap;
9618
9619 if (dip == NULL) {
9620 dip = fcp_global_dip;
9621 }
9622
9623 va_start(ap, fmt);
9624 (void) vsprintf(buf, fmt, ap);
9625 va_end(ap);
9626
9627 scsi_log(dip, "fcp", level, buf);
9628 }
9629
9630 /*
9631 * This function retries NS registry of FC4 type.
9632 * It assumes that fcp_mutex is held.
9633 * The function does nothing if topology is not fabric
9634 * So, the topology has to be set before this function can be called
9635 */
9636 static void
9637 fcp_retry_ns_registry(struct fcp_port *pptr, uint32_t s_id)
9638 {
9639 int rval;
9640
9641 ASSERT(MUTEX_HELD(&pptr->port_mutex));
9642
9643 if (((pptr->port_state & FCP_STATE_NS_REG_FAILED) == 0) ||
9644 ((pptr->port_topology != FC_TOP_FABRIC) &&
9645 (pptr->port_topology != FC_TOP_PUBLIC_LOOP))) {
9646 if (pptr->port_state & FCP_STATE_NS_REG_FAILED) {
9647 pptr->port_state &= ~FCP_STATE_NS_REG_FAILED;
9648 }
9649 return;
9650 }
9651 mutex_exit(&pptr->port_mutex);
9652 rval = fcp_do_ns_registry(pptr, s_id);
9653 mutex_enter(&pptr->port_mutex);
9654
9655 if (rval == 0) {
9656 /* Registry successful. Reset flag */
9657 pptr->port_state &= ~(FCP_STATE_NS_REG_FAILED);
9658 }
9659 }
9660
9661 /*
9662 * This function registers the ULP with the switch by calling transport i/f
9663 */
9664 static int
9665 fcp_do_ns_registry(struct fcp_port *pptr, uint32_t s_id)
9666 {
9667 fc_ns_cmd_t ns_cmd;
9668 ns_rfc_type_t rfc;
9669 uint32_t types[8];
9670
9671 /*
9672 * Prepare the Name server structure to
9673 * register with the transport in case of
9674 * Fabric configuration.
9675 */
9676 bzero(&rfc, sizeof (rfc));
9677 bzero(types, sizeof (types));
9678
9679 types[FC4_TYPE_WORD_POS(FC_TYPE_SCSI_FCP)] =
9680 (1 << FC4_TYPE_BIT_POS(FC_TYPE_SCSI_FCP));
9681
9682 rfc.rfc_port_id.port_id = s_id;
9683 bcopy(types, rfc.rfc_types, sizeof (types));
9684
9685 ns_cmd.ns_flags = 0;
9686 ns_cmd.ns_cmd = NS_RFT_ID;
9687 ns_cmd.ns_req_len = sizeof (rfc);
9688 ns_cmd.ns_req_payload = (caddr_t)&rfc;
9689 ns_cmd.ns_resp_len = 0;
9690 ns_cmd.ns_resp_payload = NULL;
9691
9692 /*
9693 * Perform the Name Server Registration for SCSI_FCP FC4 Type.
9694 */
9695 if (fc_ulp_port_ns(pptr->port_fp_handle, NULL, &ns_cmd)) {
9696 fcp_log(CE_WARN, pptr->port_dip,
9697 "!ns_registry: failed name server registration");
9698 return (1);
9699 }
9700
9701 return (0);
9702 }
9703
9704 /*
9705 * Function: fcp_handle_port_attach
9706 *
9707 * Description: This function is called from fcp_port_attach() to attach a
9708 * new port. This routine does the following:
9709 *
9710 * 1) Allocates an fcp_port structure and initializes it.
9711 * 2) Tries to register the new FC-4 (FCP) capablity with the name
9712 * server.
9713 * 3) Kicks off the enumeration of the targets/luns visible
9714 * through this new port. That is done by calling
9715 * fcp_statec_callback() if the port is online.
9716 *
9717 * Argument: ulph fp/fctl port handle.
9718 * *pinfo Port information.
9719 * s_id Port ID.
9720 * instance Device instance number for the local port
9721 * (returned by ddi_get_instance()).
9722 *
9723 * Return Value: DDI_SUCCESS
9724 * DDI_FAILURE
9725 *
9726 * Context: User and Kernel context.
9727 */
9728 /*ARGSUSED*/
9729 int
9730 fcp_handle_port_attach(opaque_t ulph, fc_ulp_port_info_t *pinfo,
9731 uint32_t s_id, int instance)
9732 {
9733 int res = DDI_FAILURE;
9734 scsi_hba_tran_t *tran;
9735 int mutex_initted = FALSE;
9736 int hba_attached = FALSE;
9737 int soft_state_linked = FALSE;
9738 int event_bind = FALSE;
9739 struct fcp_port *pptr;
9740 fc_portmap_t *tmp_list = NULL;
9741 uint32_t max_cnt, alloc_cnt;
9742 uchar_t *boot_wwn = NULL;
9743 uint_t nbytes;
9744 int manual_cfg;
9745
9746 /*
9747 * this port instance attaching for the first time (or after
9748 * being detached before)
9749 */
9750 FCP_TRACE(fcp_logq, "fcp", fcp_trace,
9751 FCP_BUF_LEVEL_3, 0, "port attach: for port %d", instance);
9752
9753 if (ddi_soft_state_zalloc(fcp_softstate, instance) != DDI_SUCCESS) {
9754 cmn_err(CE_WARN, "fcp: Softstate struct alloc failed"
9755 "parent dip: %p; instance: %d", (void *)pinfo->port_dip,
9756 instance);
9757 return (res);
9758 }
9759
9760 if ((pptr = ddi_get_soft_state(fcp_softstate, instance)) == NULL) {
9761 /* this shouldn't happen */
9762 ddi_soft_state_free(fcp_softstate, instance);
9763 cmn_err(CE_WARN, "fcp: bad soft state");
9764 return (res);
9765 }
9766
9767 (void) sprintf(pptr->port_instbuf, "fcp(%d)", instance);
9768
9769 /*
9770 * Make a copy of ulp_port_info as fctl allocates
9771 * a temp struct.
9772 */
9773 (void) fcp_cp_pinfo(pptr, pinfo);
9774
9775 /*
9776 * Check for manual_configuration_only property.
9777 * Enable manual configurtion if the property is
9778 * set to 1, otherwise disable manual configuration.
9779 */
9780 if ((manual_cfg = ddi_prop_get_int(DDI_DEV_T_ANY, pptr->port_dip,
9781 DDI_PROP_NOTPROM | DDI_PROP_DONTPASS,
9782 MANUAL_CFG_ONLY,
9783 -1)) != -1) {
9784 if (manual_cfg == 1) {
9785 char *pathname;
9786 pathname = kmem_zalloc(MAXPATHLEN, KM_SLEEP);
9787 (void) ddi_pathname(pptr->port_dip, pathname);
9788 cmn_err(CE_NOTE,
9789 "%s (%s%d) %s is enabled via %s.conf.",
9790 pathname,
9791 ddi_driver_name(pptr->port_dip),
9792 ddi_get_instance(pptr->port_dip),
9793 MANUAL_CFG_ONLY,
9794 ddi_driver_name(pptr->port_dip));
9795 fcp_enable_auto_configuration = 0;
9796 kmem_free(pathname, MAXPATHLEN);
9797 }
9798 }
9799 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(pptr->port_link_cnt));
9800 pptr->port_link_cnt = 1;
9801 _NOTE(NOW_VISIBLE_TO_OTHER_THREADS(pptr->port_link_cnt));
9802 pptr->port_id = s_id;
9803 pptr->port_instance = instance;
9804 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(pptr->port_state));
9805 pptr->port_state = FCP_STATE_INIT;
9806 if (pinfo->port_acc_attr == NULL) {
9807 /*
9808 * The corresponding FCA doesn't support DMA at all
9809 */
9810 pptr->port_state |= FCP_STATE_FCA_IS_NODMA;
9811 }
9812
9813 _NOTE(NOW_VISIBLE_TO_OTHER_THREADS(pptr->port_state));
9814
9815 if (!(pptr->port_state & FCP_STATE_FCA_IS_NODMA)) {
9816 /*
9817 * If FCA supports DMA in SCSI data phase, we need preallocate
9818 * dma cookie, so stash the cookie size
9819 */
9820 pptr->port_dmacookie_sz = sizeof (ddi_dma_cookie_t) *
9821 pptr->port_data_dma_attr.dma_attr_sgllen;
9822 }
9823
9824 /*
9825 * The two mutexes of fcp_port are initialized. The variable
9826 * mutex_initted is incremented to remember that fact. That variable
9827 * is checked when the routine fails and the mutexes have to be
9828 * destroyed.
9829 */
9830 mutex_init(&pptr->port_mutex, NULL, MUTEX_DRIVER, NULL);
9831 mutex_init(&pptr->port_pkt_mutex, NULL, MUTEX_DRIVER, NULL);
9832 mutex_initted++;
9833
9834 /*
9835 * The SCSI tran structure is allocate and initialized now.
9836 */
9837 if ((tran = scsi_hba_tran_alloc(pptr->port_dip, 0)) == NULL) {
9838 fcp_log(CE_WARN, pptr->port_dip,
9839 "!fcp%d: scsi_hba_tran_alloc failed", instance);
9840 goto fail;
9841 }
9842
9843 /* link in the transport structure then fill it in */
9844 pptr->port_tran = tran;
9845 tran->tran_hba_private = pptr;
9846 tran->tran_tgt_init = fcp_scsi_tgt_init;
9847 tran->tran_tgt_probe = NULL;
9848 tran->tran_tgt_free = fcp_scsi_tgt_free;
9849 tran->tran_start = fcp_scsi_start;
9850 tran->tran_reset = fcp_scsi_reset;
9851 tran->tran_abort = fcp_scsi_abort;
9852 tran->tran_getcap = fcp_scsi_getcap;
9853 tran->tran_setcap = fcp_scsi_setcap;
9854 tran->tran_init_pkt = NULL;
9855 tran->tran_destroy_pkt = NULL;
9856 tran->tran_dmafree = NULL;
9857 tran->tran_sync_pkt = NULL;
9858 tran->tran_reset_notify = fcp_scsi_reset_notify;
9859 tran->tran_get_bus_addr = fcp_scsi_get_bus_addr;
9860 tran->tran_get_name = fcp_scsi_get_name;
9861 tran->tran_clear_aca = NULL;
9862 tran->tran_clear_task_set = NULL;
9863 tran->tran_terminate_task = NULL;
9864 tran->tran_get_eventcookie = fcp_scsi_bus_get_eventcookie;
9865 tran->tran_add_eventcall = fcp_scsi_bus_add_eventcall;
9866 tran->tran_remove_eventcall = fcp_scsi_bus_remove_eventcall;
9867 tran->tran_post_event = fcp_scsi_bus_post_event;
9868 tran->tran_quiesce = NULL;
9869 tran->tran_unquiesce = NULL;
9870 tran->tran_bus_reset = NULL;
9871 tran->tran_bus_config = fcp_scsi_bus_config;
9872 tran->tran_bus_unconfig = fcp_scsi_bus_unconfig;
9873 tran->tran_bus_power = NULL;
9874 tran->tran_interconnect_type = INTERCONNECT_FABRIC;
9875
9876 tran->tran_pkt_constructor = fcp_kmem_cache_constructor;
9877 tran->tran_pkt_destructor = fcp_kmem_cache_destructor;
9878 tran->tran_setup_pkt = fcp_pkt_setup;
9879 tran->tran_teardown_pkt = fcp_pkt_teardown;
9880 tran->tran_hba_len = pptr->port_priv_pkt_len +
9881 sizeof (struct fcp_pkt) + pptr->port_dmacookie_sz;
9882 if (pptr->port_state & FCP_STATE_FCA_IS_NODMA) {
9883 /*
9884 * If FCA don't support DMA, then we use different vectors to
9885 * minimize the effects on DMA code flow path
9886 */
9887 tran->tran_start = fcp_pseudo_start;
9888 tran->tran_init_pkt = fcp_pseudo_init_pkt;
9889 tran->tran_destroy_pkt = fcp_pseudo_destroy_pkt;
9890 tran->tran_sync_pkt = fcp_pseudo_sync_pkt;
9891 tran->tran_dmafree = fcp_pseudo_dmafree;
9892 tran->tran_setup_pkt = NULL;
9893 tran->tran_teardown_pkt = NULL;
9894 tran->tran_pkt_constructor = NULL;
9895 tran->tran_pkt_destructor = NULL;
9896 pptr->port_data_dma_attr = pseudo_fca_dma_attr;
9897 }
9898
9899 /*
9900 * Allocate an ndi event handle
9901 */
9902 pptr->port_ndi_event_defs = (ndi_event_definition_t *)
9903 kmem_zalloc(sizeof (fcp_ndi_event_defs), KM_SLEEP);
9904
9905 bcopy(fcp_ndi_event_defs, pptr->port_ndi_event_defs,
9906 sizeof (fcp_ndi_event_defs));
9907
9908 (void) ndi_event_alloc_hdl(pptr->port_dip, NULL,
9909 &pptr->port_ndi_event_hdl, NDI_SLEEP);
9910
9911 pptr->port_ndi_events.ndi_events_version = NDI_EVENTS_REV1;
9912 pptr->port_ndi_events.ndi_n_events = FCP_N_NDI_EVENTS;
9913 pptr->port_ndi_events.ndi_event_defs = pptr->port_ndi_event_defs;
9914
9915 if (DEVI_IS_ATTACHING(pptr->port_dip) &&
9916 (ndi_event_bind_set(pptr->port_ndi_event_hdl,
9917 &pptr->port_ndi_events, NDI_SLEEP) != NDI_SUCCESS)) {
9918 goto fail;
9919 }
9920 event_bind++; /* Checked in fail case */
9921
9922 if (scsi_hba_attach_setup(pptr->port_dip, &pptr->port_data_dma_attr,
9923 tran, SCSI_HBA_ADDR_COMPLEX | SCSI_HBA_TRAN_SCB)
9924 != DDI_SUCCESS) {
9925 fcp_log(CE_WARN, pptr->port_dip,
9926 "!fcp%d: scsi_hba_attach_setup failed", instance);
9927 goto fail;
9928 }
9929 hba_attached++; /* Checked in fail case */
9930
9931 pptr->port_mpxio = 0;
9932 if (mdi_phci_register(MDI_HCI_CLASS_SCSI, pptr->port_dip, 0) ==
9933 MDI_SUCCESS) {
9934 pptr->port_mpxio++;
9935 }
9936
9937 /*
9938 * The following code is putting the new port structure in the global
9939 * list of ports and, if it is the first port to attach, it start the
9940 * fcp_watchdog_tick.
9941 *
9942 * Why put this new port in the global before we are done attaching it?
9943 * We are actually making the structure globally known before we are
9944 * done attaching it. The reason for that is: because of the code that
9945 * follows. At this point the resources to handle the port are
9946 * allocated. This function is now going to do the following:
9947 *
9948 * 1) It is going to try to register with the name server advertizing
9949 * the new FCP capability of the port.
9950 * 2) It is going to play the role of the fp/fctl layer by building
9951 * a list of worlwide names reachable through this port and call
9952 * itself on fcp_statec_callback(). That requires the port to
9953 * be part of the global list.
9954 */
9955 mutex_enter(&fcp_global_mutex);
9956 if (fcp_port_head == NULL) {
9957 fcp_read_blacklist(pinfo->port_dip, &fcp_lun_blacklist);
9958 }
9959 pptr->port_next = fcp_port_head;
9960 fcp_port_head = pptr;
9961 soft_state_linked++;
9962
9963 if (fcp_watchdog_init++ == 0) {
9964 fcp_watchdog_tick = fcp_watchdog_timeout *
9965 drv_usectohz(1000000);
9966 fcp_watchdog_id = timeout(fcp_watch, NULL,
9967 fcp_watchdog_tick);
9968 }
9969 mutex_exit(&fcp_global_mutex);
9970
9971 /*
9972 * Here an attempt is made to register with the name server, the new
9973 * FCP capability. That is done using an RTF_ID to the name server.
9974 * It is done synchronously. The function fcp_do_ns_registry()
9975 * doesn't return till the name server responded.
9976 * On failures, just ignore it for now and it will get retried during
9977 * state change callbacks. We'll set a flag to show this failure
9978 */
9979 if (fcp_do_ns_registry(pptr, s_id)) {
9980 mutex_enter(&pptr->port_mutex);
9981 pptr->port_state |= FCP_STATE_NS_REG_FAILED;
9982 mutex_exit(&pptr->port_mutex);
9983 } else {
9984 mutex_enter(&pptr->port_mutex);
9985 pptr->port_state &= ~(FCP_STATE_NS_REG_FAILED);
9986 mutex_exit(&pptr->port_mutex);
9987 }
9988
9989 /*
9990 * Lookup for boot WWN property
9991 */
9992 if (modrootloaded != 1) {
9993 if ((ddi_prop_lookup_byte_array(DDI_DEV_T_ANY,
9994 ddi_get_parent(pinfo->port_dip),
9995 DDI_PROP_DONTPASS, OBP_BOOT_WWN,
9996 &boot_wwn, &nbytes) == DDI_PROP_SUCCESS) &&
9997 (nbytes == FC_WWN_SIZE)) {
9998 bcopy(boot_wwn, pptr->port_boot_wwn, FC_WWN_SIZE);
9999 }
10000 if (boot_wwn) {
10001 ddi_prop_free(boot_wwn);
10002 }
10003 }
10004
10005 /*
10006 * Handle various topologies and link states.
10007 */
10008 switch (FC_PORT_STATE_MASK(pptr->port_phys_state)) {
10009 case FC_STATE_OFFLINE:
10010
10011 /*
10012 * we're attaching a port where the link is offline
10013 *
10014 * Wait for ONLINE, at which time a state
10015 * change will cause a statec_callback
10016 *
10017 * in the mean time, do not do anything
10018 */
10019 res = DDI_SUCCESS;
10020 pptr->port_state |= FCP_STATE_OFFLINE;
10021 break;
10022
10023 case FC_STATE_ONLINE: {
10024 if (pptr->port_topology == FC_TOP_UNKNOWN) {
10025 (void) fcp_linkreset(pptr, NULL, KM_NOSLEEP);
10026 res = DDI_SUCCESS;
10027 break;
10028 }
10029 /*
10030 * discover devices and create nodes (a private
10031 * loop or point-to-point)
10032 */
10033 ASSERT(pptr->port_topology != FC_TOP_UNKNOWN);
10034
10035 /*
10036 * At this point we are going to build a list of all the ports
10037 * that can be reached through this local port. It looks like
10038 * we cannot handle more than FCP_MAX_DEVICES per local port
10039 * (128).
10040 */
10041 if ((tmp_list = (fc_portmap_t *)kmem_zalloc(
10042 sizeof (fc_portmap_t) * FCP_MAX_DEVICES,
10043 KM_NOSLEEP)) == NULL) {
10044 fcp_log(CE_WARN, pptr->port_dip,
10045 "!fcp%d: failed to allocate portmap",
10046 instance);
10047 goto fail;
10048 }
10049
10050 /*
10051 * fc_ulp_getportmap() is going to provide us with the list of
10052 * remote ports in the buffer we just allocated. The way the
10053 * list is going to be retrieved depends on the topology.
10054 * However, if we are connected to a Fabric, a name server
10055 * request may be sent to get the list of FCP capable ports.
10056 * It should be noted that is the case the request is
10057 * synchronous. This means we are stuck here till the name
10058 * server replies. A lot of things can change during that time
10059 * and including, may be, being called on
10060 * fcp_statec_callback() for different reasons. I'm not sure
10061 * the code can handle that.
10062 */
10063 max_cnt = FCP_MAX_DEVICES;
10064 alloc_cnt = FCP_MAX_DEVICES;
10065 if ((res = fc_ulp_getportmap(pptr->port_fp_handle,
10066 &tmp_list, &max_cnt, FC_ULP_PLOGI_PRESERVE)) !=
10067 FC_SUCCESS) {
10068 caddr_t msg;
10069
10070 (void) fc_ulp_error(res, &msg);
10071
10072 /*
10073 * this just means the transport is
10074 * busy perhaps building a portmap so,
10075 * for now, succeed this port attach
10076 * when the transport has a new map,
10077 * it'll send us a state change then
10078 */
10079 fcp_log(CE_WARN, pptr->port_dip,
10080 "!failed to get port map : %s", msg);
10081
10082 res = DDI_SUCCESS;
10083 break; /* go return result */
10084 }
10085 if (max_cnt > alloc_cnt) {
10086 alloc_cnt = max_cnt;
10087 }
10088
10089 /*
10090 * We are now going to call fcp_statec_callback() ourselves.
10091 * By issuing this call we are trying to kick off the enumera-
10092 * tion process.
10093 */
10094 /*
10095 * let the state change callback do the SCSI device
10096 * discovery and create the devinfos
10097 */
10098 fcp_statec_callback(ulph, pptr->port_fp_handle,
10099 pptr->port_phys_state, pptr->port_topology, tmp_list,
10100 max_cnt, pptr->port_id);
10101
10102 res = DDI_SUCCESS;
10103 break;
10104 }
10105
10106 default:
10107 /* unknown port state */
10108 fcp_log(CE_WARN, pptr->port_dip,
10109 "!fcp%d: invalid port state at attach=0x%x",
10110 instance, pptr->port_phys_state);
10111
10112 mutex_enter(&pptr->port_mutex);
10113 pptr->port_phys_state = FCP_STATE_OFFLINE;
10114 mutex_exit(&pptr->port_mutex);
10115
10116 res = DDI_SUCCESS;
10117 break;
10118 }
10119
10120 /* free temp list if used */
10121 if (tmp_list != NULL) {
10122 kmem_free(tmp_list, sizeof (fc_portmap_t) * alloc_cnt);
10123 }
10124
10125 /* note the attach time */
10126 pptr->port_attach_time = ddi_get_lbolt64();
10127
10128 /* all done */
10129 return (res);
10130
10131 /* a failure we have to clean up after */
10132 fail:
10133 fcp_log(CE_WARN, pptr->port_dip, "!failed to attach to port");
10134
10135 if (soft_state_linked) {
10136 /* remove this fcp_port from the linked list */
10137 (void) fcp_soft_state_unlink(pptr);
10138 }
10139
10140 /* unbind and free event set */
10141 if (pptr->port_ndi_event_hdl) {
10142 if (event_bind) {
10143 (void) ndi_event_unbind_set(pptr->port_ndi_event_hdl,
10144 &pptr->port_ndi_events, NDI_SLEEP);
10145 }
10146 (void) ndi_event_free_hdl(pptr->port_ndi_event_hdl);
10147 }
10148
10149 if (pptr->port_ndi_event_defs) {
10150 (void) kmem_free(pptr->port_ndi_event_defs,
10151 sizeof (fcp_ndi_event_defs));
10152 }
10153
10154 /*
10155 * Clean up mpxio stuff
10156 */
10157 if (pptr->port_mpxio) {
10158 (void) mdi_phci_unregister(pptr->port_dip, 0);
10159 pptr->port_mpxio--;
10160 }
10161
10162 /* undo SCSI HBA setup */
10163 if (hba_attached) {
10164 (void) scsi_hba_detach(pptr->port_dip);
10165 }
10166 if (pptr->port_tran != NULL) {
10167 scsi_hba_tran_free(pptr->port_tran);
10168 }
10169
10170 mutex_enter(&fcp_global_mutex);
10171
10172 /*
10173 * We check soft_state_linked, because it is incremented right before
10174 * we call increment fcp_watchdog_init. Therefore, we know if
10175 * soft_state_linked is still FALSE, we do not want to decrement
10176 * fcp_watchdog_init or possibly call untimeout.
10177 */
10178
10179 if (soft_state_linked) {
10180 if (--fcp_watchdog_init == 0) {
10181 timeout_id_t tid = fcp_watchdog_id;
10182
10183 mutex_exit(&fcp_global_mutex);
10184 (void) untimeout(tid);
10185 } else {
10186 mutex_exit(&fcp_global_mutex);
10187 }
10188 } else {
10189 mutex_exit(&fcp_global_mutex);
10190 }
10191
10192 if (mutex_initted) {
10193 mutex_destroy(&pptr->port_mutex);
10194 mutex_destroy(&pptr->port_pkt_mutex);
10195 }
10196
10197 if (tmp_list != NULL) {
10198 kmem_free(tmp_list, sizeof (fc_portmap_t) * alloc_cnt);
10199 }
10200
10201 /* this makes pptr invalid */
10202 ddi_soft_state_free(fcp_softstate, instance);
10203
10204 return (DDI_FAILURE);
10205 }
10206
10207
10208 static int
10209 fcp_handle_port_detach(struct fcp_port *pptr, int flag, int instance)
10210 {
10211 int count = 0;
10212
10213 mutex_enter(&pptr->port_mutex);
10214
10215 /*
10216 * if the port is powered down or suspended, nothing else
10217 * to do; just return.
10218 */
10219 if (flag != FCP_STATE_DETACHING) {
10220 if (pptr->port_state & (FCP_STATE_POWER_DOWN |
10221 FCP_STATE_SUSPENDED)) {
10222 pptr->port_state |= flag;
10223 mutex_exit(&pptr->port_mutex);
10224 return (FC_SUCCESS);
10225 }
10226 }
10227
10228 if (pptr->port_state & FCP_STATE_IN_MDI) {
10229 mutex_exit(&pptr->port_mutex);
10230 return (FC_FAILURE);
10231 }
10232
10233 FCP_TRACE(fcp_logq, pptr->port_instbuf,
10234 fcp_trace, FCP_BUF_LEVEL_2, 0,
10235 "fcp_handle_port_detach: port is detaching");
10236
10237 pptr->port_state |= flag;
10238
10239 /*
10240 * Wait for any ongoing reconfig/ipkt to complete, that
10241 * ensures the freeing to targets/luns is safe.
10242 * No more ref to this port should happen from statec/ioctl
10243 * after that as it was removed from the global port list.
10244 */
10245 while (pptr->port_tmp_cnt || pptr->port_ipkt_cnt ||
10246 (pptr->port_state & FCP_STATE_IN_WATCHDOG)) {
10247 /*
10248 * Let's give sufficient time for reconfig/ipkt
10249 * to complete.
10250 */
10251 if (count++ >= FCP_ICMD_DEADLINE) {
10252 break;
10253 }
10254 mutex_exit(&pptr->port_mutex);
10255 delay(drv_usectohz(1000000));
10256 mutex_enter(&pptr->port_mutex);
10257 }
10258
10259 /*
10260 * if the driver is still busy then fail to
10261 * suspend/power down.
10262 */
10263 if (pptr->port_tmp_cnt || pptr->port_ipkt_cnt ||
10264 (pptr->port_state & FCP_STATE_IN_WATCHDOG)) {
10265 pptr->port_state &= ~flag;
10266 mutex_exit(&pptr->port_mutex);
10267 return (FC_FAILURE);
10268 }
10269
10270 if (flag == FCP_STATE_DETACHING) {
10271 pptr = fcp_soft_state_unlink(pptr);
10272 ASSERT(pptr != NULL);
10273 }
10274
10275 pptr->port_link_cnt++;
10276 pptr->port_state |= FCP_STATE_OFFLINE;
10277 pptr->port_state &= ~(FCP_STATE_ONLINING | FCP_STATE_ONLINE);
10278
10279 fcp_update_state(pptr, (FCP_LUN_BUSY | FCP_LUN_MARK),
10280 FCP_CAUSE_LINK_DOWN);
10281 mutex_exit(&pptr->port_mutex);
10282
10283 /* kill watch dog timer if we're the last */
10284 mutex_enter(&fcp_global_mutex);
10285 if (--fcp_watchdog_init == 0) {
10286 timeout_id_t tid = fcp_watchdog_id;
10287 mutex_exit(&fcp_global_mutex);
10288 (void) untimeout(tid);
10289 } else {
10290 mutex_exit(&fcp_global_mutex);
10291 }
10292
10293 /* clean up the port structures */
10294 if (flag == FCP_STATE_DETACHING) {
10295 fcp_cleanup_port(pptr, instance);
10296 }
10297
10298 return (FC_SUCCESS);
10299 }
10300
10301
10302 static void
10303 fcp_cleanup_port(struct fcp_port *pptr, int instance)
10304 {
10305 ASSERT(pptr != NULL);
10306
10307 /* unbind and free event set */
10308 if (pptr->port_ndi_event_hdl) {
10309 (void) ndi_event_unbind_set(pptr->port_ndi_event_hdl,
10310 &pptr->port_ndi_events, NDI_SLEEP);
10311 (void) ndi_event_free_hdl(pptr->port_ndi_event_hdl);
10312 }
10313
10314 if (pptr->port_ndi_event_defs) {
10315 (void) kmem_free(pptr->port_ndi_event_defs,
10316 sizeof (fcp_ndi_event_defs));
10317 }
10318
10319 /* free the lun/target structures and devinfos */
10320 fcp_free_targets(pptr);
10321
10322 /*
10323 * Clean up mpxio stuff
10324 */
10325 if (pptr->port_mpxio) {
10326 (void) mdi_phci_unregister(pptr->port_dip, 0);
10327 pptr->port_mpxio--;
10328 }
10329
10330 /* clean up SCSA stuff */
10331 (void) scsi_hba_detach(pptr->port_dip);
10332 if (pptr->port_tran != NULL) {
10333 scsi_hba_tran_free(pptr->port_tran);
10334 }
10335
10336 #ifdef KSTATS_CODE
10337 /* clean up kstats */
10338 if (pptr->fcp_ksp != NULL) {
10339 kstat_delete(pptr->fcp_ksp);
10340 }
10341 #endif
10342
10343 /* clean up soft state mutexes/condition variables */
10344 mutex_destroy(&pptr->port_mutex);
10345 mutex_destroy(&pptr->port_pkt_mutex);
10346
10347 /* all done with soft state */
10348 ddi_soft_state_free(fcp_softstate, instance);
10349 }
10350
10351 /*
10352 * Function: fcp_kmem_cache_constructor
10353 *
10354 * Description: This function allocates and initializes the resources required
10355 * to build a scsi_pkt structure the target driver. The result
10356 * of the allocation and initialization will be cached in the
10357 * memory cache. As DMA resources may be allocated here, that
10358 * means DMA resources will be tied up in the cache manager.
10359 * This is a tradeoff that has been made for performance reasons.
10360 *
10361 * Argument: *buf Memory to preinitialize.
10362 * *arg FCP port structure (fcp_port).
10363 * kmflags Value passed to kmem_cache_alloc() and
10364 * propagated to the constructor.
10365 *
10366 * Return Value: 0 Allocation/Initialization was successful.
10367 * -1 Allocation or Initialization failed.
10368 *
10369 *
10370 * If the returned value is 0, the buffer is initialized like this:
10371 *
10372 * +================================+
10373 * +----> | struct scsi_pkt |
10374 * | | |
10375 * | +--- | pkt_ha_private |
10376 * | | | |
10377 * | | +================================+
10378 * | |
10379 * | | +================================+
10380 * | +--> | struct fcp_pkt | <---------+
10381 * | | | |
10382 * +----- | cmd_pkt | |
10383 * | cmd_fp_pkt | ---+ |
10384 * +-------->| cmd_fcp_rsp[] | | |
10385 * | +--->| cmd_fcp_cmd[] | | |
10386 * | | |--------------------------------| | |
10387 * | | | struct fc_packet | <--+ |
10388 * | | | | |
10389 * | | | pkt_ulp_private | ----------+
10390 * | | | pkt_fca_private | -----+
10391 * | | | pkt_data_cookie | ---+ |
10392 * | | | pkt_cmdlen | | |
10393 * | |(a) | pkt_rsplen | | |
10394 * | +----| .......... pkt_cmd ........... | ---|-|---------------+
10395 * | (b) | pkt_cmd_cookie | ---|-|----------+ |
10396 * +---------| .......... pkt_resp .......... | ---|-|------+ | |
10397 * | pkt_resp_cookie | ---|-|--+ | | |
10398 * | pkt_cmd_dma | | | | | | |
10399 * | pkt_cmd_acc | | | | | | |
10400 * +================================+ | | | | | |
10401 * | dma_cookies | <--+ | | | | |
10402 * | | | | | | |
10403 * +================================+ | | | | |
10404 * | fca_private | <----+ | | | |
10405 * | | | | | |
10406 * +================================+ | | | |
10407 * | | | |
10408 * | | | |
10409 * +================================+ (d) | | | |
10410 * | fcp_resp cookies | <-------+ | | |
10411 * | | | | |
10412 * +================================+ | | |
10413 * | | |
10414 * +================================+ (d) | | |
10415 * | fcp_resp | <-----------+ | |
10416 * | (DMA resources associated) | | |
10417 * +================================+ | |
10418 * | |
10419 * | |
10420 * | |
10421 * +================================+ (c) | |
10422 * | fcp_cmd cookies | <---------------+ |
10423 * | | |
10424 * +================================+ |
10425 * |
10426 * +================================+ (c) |
10427 * | fcp_cmd | <--------------------+
10428 * | (DMA resources associated) |
10429 * +================================+
10430 *
10431 * (a) Only if DMA is NOT used for the FCP_CMD buffer.
10432 * (b) Only if DMA is NOT used for the FCP_RESP buffer
10433 * (c) Only if DMA is used for the FCP_CMD buffer.
10434 * (d) Only if DMA is used for the FCP_RESP buffer
10435 */
10436 static int
10437 fcp_kmem_cache_constructor(struct scsi_pkt *pkt, scsi_hba_tran_t *tran,
10438 int kmflags)
10439 {
10440 struct fcp_pkt *cmd;
10441 struct fcp_port *pptr;
10442 fc_packet_t *fpkt;
10443
10444 pptr = (struct fcp_port *)tran->tran_hba_private;
10445 cmd = (struct fcp_pkt *)pkt->pkt_ha_private;
10446 bzero(cmd, tran->tran_hba_len);
10447
10448 cmd->cmd_pkt = pkt;
10449 pkt->pkt_cdbp = cmd->cmd_fcp_cmd.fcp_cdb;
10450 fpkt = (fc_packet_t *)&cmd->cmd_fc_packet;
10451 cmd->cmd_fp_pkt = fpkt;
10452
10453 cmd->cmd_pkt->pkt_ha_private = (opaque_t)cmd;
10454 cmd->cmd_fp_pkt->pkt_ulp_private = (opaque_t)cmd;
10455 cmd->cmd_fp_pkt->pkt_fca_private = (opaque_t)((caddr_t)cmd +
10456 sizeof (struct fcp_pkt) + pptr->port_dmacookie_sz);
10457
10458 fpkt->pkt_data_cookie = (ddi_dma_cookie_t *)((caddr_t)cmd +
10459 sizeof (struct fcp_pkt));
10460
10461 fpkt->pkt_cmdlen = sizeof (struct fcp_cmd);
10462 fpkt->pkt_rsplen = FCP_MAX_RSP_IU_SIZE;
10463
10464 if (pptr->port_fcp_dma == FC_NO_DVMA_SPACE) {
10465 /*
10466 * The underlying HBA doesn't want to DMA the fcp_cmd or
10467 * fcp_resp. The transfer of information will be done by
10468 * bcopy.
10469 * The naming of the flags (that is actually a value) is
10470 * unfortunate. FC_NO_DVMA_SPACE doesn't mean "NO VIRTUAL
10471 * DMA" but instead "NO DMA".
10472 */
10473 fpkt->pkt_resp_acc = fpkt->pkt_cmd_acc = NULL;
10474 fpkt->pkt_cmd = (caddr_t)&cmd->cmd_fcp_cmd;
10475 fpkt->pkt_resp = cmd->cmd_fcp_rsp;
10476 } else {
10477 /*
10478 * The underlying HBA will dma the fcp_cmd buffer and fcp_resp
10479 * buffer. A buffer is allocated for each one the ddi_dma_*
10480 * interfaces.
10481 */
10482 if (fcp_alloc_cmd_resp(pptr, fpkt, kmflags) != FC_SUCCESS) {
10483 return (-1);
10484 }
10485 }
10486
10487 return (0);
10488 }
10489
10490 /*
10491 * Function: fcp_kmem_cache_destructor
10492 *
10493 * Description: Called by the destructor of the cache managed by SCSA.
10494 * All the resources pre-allocated in fcp_pkt_constructor
10495 * and the data also pre-initialized in fcp_pkt_constructor
10496 * are freed and uninitialized here.
10497 *
10498 * Argument: *buf Memory to uninitialize.
10499 * *arg FCP port structure (fcp_port).
10500 *
10501 * Return Value: None
10502 *
10503 * Context: kernel
10504 */
10505 static void
10506 fcp_kmem_cache_destructor(struct scsi_pkt *pkt, scsi_hba_tran_t *tran)
10507 {
10508 struct fcp_pkt *cmd;
10509 struct fcp_port *pptr;
10510
10511 pptr = (struct fcp_port *)(tran->tran_hba_private);
10512 cmd = pkt->pkt_ha_private;
10513
10514 if (pptr->port_fcp_dma != FC_NO_DVMA_SPACE) {
10515 /*
10516 * If DMA was used to transfer the FCP_CMD and FCP_RESP, the
10517 * buffer and DMA resources allocated to do so are released.
10518 */
10519 fcp_free_cmd_resp(pptr, cmd->cmd_fp_pkt);
10520 }
10521 }
10522
10523 /*
10524 * Function: fcp_alloc_cmd_resp
10525 *
10526 * Description: This function allocated an FCP_CMD and FCP_RESP buffer that
10527 * will be DMAed by the HBA. The buffer is allocated applying
10528 * the DMA requirements for the HBA. The buffers allocated will
10529 * also be bound. DMA resources are allocated in the process.
10530 * They will be released by fcp_free_cmd_resp().
10531 *
10532 * Argument: *pptr FCP port.
10533 * *fpkt fc packet for which the cmd and resp packet should be
10534 * allocated.
10535 * flags Allocation flags.
10536 *
10537 * Return Value: FC_FAILURE
10538 * FC_SUCCESS
10539 *
10540 * Context: User or Kernel context only if flags == KM_SLEEP.
10541 * Interrupt context if the KM_SLEEP is not specified.
10542 */
10543 static int
10544 fcp_alloc_cmd_resp(struct fcp_port *pptr, fc_packet_t *fpkt, int flags)
10545 {
10546 int rval;
10547 int cmd_len;
10548 int resp_len;
10549 ulong_t real_len;
10550 int (*cb) (caddr_t);
10551 ddi_dma_cookie_t pkt_cookie;
10552 ddi_dma_cookie_t *cp;
10553 uint32_t cnt;
10554
10555 cb = (flags == KM_SLEEP) ? DDI_DMA_SLEEP : DDI_DMA_DONTWAIT;
10556
10557 cmd_len = fpkt->pkt_cmdlen;
10558 resp_len = fpkt->pkt_rsplen;
10559
10560 ASSERT(fpkt->pkt_cmd_dma == NULL);
10561
10562 /* Allocation of a DMA handle used in subsequent calls. */
10563 if (ddi_dma_alloc_handle(pptr->port_dip, &pptr->port_cmd_dma_attr,
10564 cb, NULL, &fpkt->pkt_cmd_dma) != DDI_SUCCESS) {
10565 return (FC_FAILURE);
10566 }
10567
10568 /* A buffer is allocated that satisfies the DMA requirements. */
10569 rval = ddi_dma_mem_alloc(fpkt->pkt_cmd_dma, cmd_len,
10570 &pptr->port_dma_acc_attr, DDI_DMA_CONSISTENT, cb, NULL,
10571 (caddr_t *)&fpkt->pkt_cmd, &real_len, &fpkt->pkt_cmd_acc);
10572
10573 if (rval != DDI_SUCCESS) {
10574 ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10575 return (FC_FAILURE);
10576 }
10577
10578 if (real_len < cmd_len) {
10579 ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
10580 ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10581 return (FC_FAILURE);
10582 }
10583
10584 /* The buffer allocated is DMA bound. */
10585 rval = ddi_dma_addr_bind_handle(fpkt->pkt_cmd_dma, NULL,
10586 fpkt->pkt_cmd, real_len, DDI_DMA_WRITE | DDI_DMA_CONSISTENT,
10587 cb, NULL, &pkt_cookie, &fpkt->pkt_cmd_cookie_cnt);
10588
10589 if (rval != DDI_DMA_MAPPED) {
10590 ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
10591 ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10592 return (FC_FAILURE);
10593 }
10594
10595 if (fpkt->pkt_cmd_cookie_cnt >
10596 pptr->port_cmd_dma_attr.dma_attr_sgllen) {
10597 (void) ddi_dma_unbind_handle(fpkt->pkt_cmd_dma);
10598 ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
10599 ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10600 return (FC_FAILURE);
10601 }
10602
10603 ASSERT(fpkt->pkt_cmd_cookie_cnt != 0);
10604
10605 /*
10606 * The buffer where the scatter/gather list is going to be built is
10607 * allocated.
10608 */
10609 cp = fpkt->pkt_cmd_cookie = (ddi_dma_cookie_t *)kmem_alloc(
10610 fpkt->pkt_cmd_cookie_cnt * sizeof (pkt_cookie),
10611 KM_NOSLEEP);
10612
10613 if (cp == NULL) {
10614 (void) ddi_dma_unbind_handle(fpkt->pkt_cmd_dma);
10615 ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
10616 ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10617 return (FC_FAILURE);
10618 }
10619
10620 /*
10621 * The scatter/gather list for the buffer we just allocated is built
10622 * here.
10623 */
10624 *cp = pkt_cookie;
10625 cp++;
10626
10627 for (cnt = 1; cnt < fpkt->pkt_cmd_cookie_cnt; cnt++, cp++) {
10628 ddi_dma_nextcookie(fpkt->pkt_cmd_dma,
10629 &pkt_cookie);
10630 *cp = pkt_cookie;
10631 }
10632
10633 ASSERT(fpkt->pkt_resp_dma == NULL);
10634 if (ddi_dma_alloc_handle(pptr->port_dip, &pptr->port_resp_dma_attr,
10635 cb, NULL, &fpkt->pkt_resp_dma) != DDI_SUCCESS) {
10636 (void) ddi_dma_unbind_handle(fpkt->pkt_cmd_dma);
10637 ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
10638 ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10639 return (FC_FAILURE);
10640 }
10641
10642 rval = ddi_dma_mem_alloc(fpkt->pkt_resp_dma, resp_len,
10643 &pptr->port_dma_acc_attr, DDI_DMA_CONSISTENT, cb, NULL,
10644 (caddr_t *)&fpkt->pkt_resp, &real_len,
10645 &fpkt->pkt_resp_acc);
10646
10647 if (rval != DDI_SUCCESS) {
10648 ddi_dma_free_handle(&fpkt->pkt_resp_dma);
10649 (void) ddi_dma_unbind_handle(fpkt->pkt_cmd_dma);
10650 ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
10651 ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10652 kmem_free(fpkt->pkt_cmd_cookie,
10653 fpkt->pkt_cmd_cookie_cnt * sizeof (pkt_cookie));
10654 return (FC_FAILURE);
10655 }
10656
10657 if (real_len < resp_len) {
10658 ddi_dma_mem_free(&fpkt->pkt_resp_acc);
10659 ddi_dma_free_handle(&fpkt->pkt_resp_dma);
10660 (void) ddi_dma_unbind_handle(fpkt->pkt_cmd_dma);
10661 ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
10662 ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10663 kmem_free(fpkt->pkt_cmd_cookie,
10664 fpkt->pkt_cmd_cookie_cnt * sizeof (pkt_cookie));
10665 return (FC_FAILURE);
10666 }
10667
10668 rval = ddi_dma_addr_bind_handle(fpkt->pkt_resp_dma, NULL,
10669 fpkt->pkt_resp, real_len, DDI_DMA_READ | DDI_DMA_CONSISTENT,
10670 cb, NULL, &pkt_cookie, &fpkt->pkt_resp_cookie_cnt);
10671
10672 if (rval != DDI_DMA_MAPPED) {
10673 ddi_dma_mem_free(&fpkt->pkt_resp_acc);
10674 ddi_dma_free_handle(&fpkt->pkt_resp_dma);
10675 (void) ddi_dma_unbind_handle(fpkt->pkt_cmd_dma);
10676 ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
10677 ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10678 kmem_free(fpkt->pkt_cmd_cookie,
10679 fpkt->pkt_cmd_cookie_cnt * sizeof (pkt_cookie));
10680 return (FC_FAILURE);
10681 }
10682
10683 if (fpkt->pkt_resp_cookie_cnt >
10684 pptr->port_resp_dma_attr.dma_attr_sgllen) {
10685 ddi_dma_mem_free(&fpkt->pkt_resp_acc);
10686 ddi_dma_free_handle(&fpkt->pkt_resp_dma);
10687 (void) ddi_dma_unbind_handle(fpkt->pkt_cmd_dma);
10688 ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
10689 ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10690 kmem_free(fpkt->pkt_cmd_cookie,
10691 fpkt->pkt_cmd_cookie_cnt * sizeof (pkt_cookie));
10692 return (FC_FAILURE);
10693 }
10694
10695 ASSERT(fpkt->pkt_resp_cookie_cnt != 0);
10696
10697 cp = fpkt->pkt_resp_cookie = (ddi_dma_cookie_t *)kmem_alloc(
10698 fpkt->pkt_resp_cookie_cnt * sizeof (pkt_cookie),
10699 KM_NOSLEEP);
10700
10701 if (cp == NULL) {
10702 ddi_dma_mem_free(&fpkt->pkt_resp_acc);
10703 ddi_dma_free_handle(&fpkt->pkt_resp_dma);
10704 (void) ddi_dma_unbind_handle(fpkt->pkt_cmd_dma);
10705 ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
10706 ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10707 kmem_free(fpkt->pkt_cmd_cookie,
10708 fpkt->pkt_cmd_cookie_cnt * sizeof (pkt_cookie));
10709 return (FC_FAILURE);
10710 }
10711
10712 *cp = pkt_cookie;
10713 cp++;
10714
10715 for (cnt = 1; cnt < fpkt->pkt_resp_cookie_cnt; cnt++, cp++) {
10716 ddi_dma_nextcookie(fpkt->pkt_resp_dma,
10717 &pkt_cookie);
10718 *cp = pkt_cookie;
10719 }
10720
10721 return (FC_SUCCESS);
10722 }
10723
10724 /*
10725 * Function: fcp_free_cmd_resp
10726 *
10727 * Description: This function releases the FCP_CMD and FCP_RESP buffer
10728 * allocated by fcp_alloc_cmd_resp() and all the resources
10729 * associated with them. That includes the DMA resources and the
10730 * buffer allocated for the cookies of each one of them.
10731 *
10732 * Argument: *pptr FCP port context.
10733 * *fpkt fc packet containing the cmd and resp packet
10734 * to be released.
10735 *
10736 * Return Value: None
10737 *
10738 * Context: Interrupt, User and Kernel context.
10739 */
10740 /* ARGSUSED */
10741 static void
10742 fcp_free_cmd_resp(struct fcp_port *pptr, fc_packet_t *fpkt)
10743 {
10744 ASSERT(fpkt->pkt_resp_dma != NULL && fpkt->pkt_cmd_dma != NULL);
10745
10746 if (fpkt->pkt_resp_dma) {
10747 (void) ddi_dma_unbind_handle(fpkt->pkt_resp_dma);
10748 ddi_dma_mem_free(&fpkt->pkt_resp_acc);
10749 ddi_dma_free_handle(&fpkt->pkt_resp_dma);
10750 }
10751
10752 if (fpkt->pkt_resp_cookie) {
10753 kmem_free(fpkt->pkt_resp_cookie,
10754 fpkt->pkt_resp_cookie_cnt * sizeof (ddi_dma_cookie_t));
10755 fpkt->pkt_resp_cookie = NULL;
10756 }
10757
10758 if (fpkt->pkt_cmd_dma) {
10759 (void) ddi_dma_unbind_handle(fpkt->pkt_cmd_dma);
10760 ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
10761 ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10762 }
10763
10764 if (fpkt->pkt_cmd_cookie) {
10765 kmem_free(fpkt->pkt_cmd_cookie,
10766 fpkt->pkt_cmd_cookie_cnt * sizeof (ddi_dma_cookie_t));
10767 fpkt->pkt_cmd_cookie = NULL;
10768 }
10769 }
10770
10771
10772 /*
10773 * called by the transport to do our own target initialization
10774 *
10775 * can acquire and release the global mutex
10776 */
10777 /* ARGSUSED */
10778 static int
10779 fcp_phys_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
10780 scsi_hba_tran_t *hba_tran, struct scsi_device *sd)
10781 {
10782 uchar_t *bytes;
10783 uint_t nbytes;
10784 uint16_t lun_num;
10785 struct fcp_tgt *ptgt;
10786 struct fcp_lun *plun;
10787 struct fcp_port *pptr = (struct fcp_port *)
10788 hba_tran->tran_hba_private;
10789
10790 ASSERT(pptr != NULL);
10791
10792 FCP_DTRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
10793 FCP_BUF_LEVEL_8, 0,
10794 "fcp_phys_tgt_init: called for %s (instance %d)",
10795 ddi_get_name(tgt_dip), ddi_get_instance(tgt_dip));
10796
10797 /* get our port WWN property */
10798 bytes = NULL;
10799 if ((scsi_device_prop_lookup_byte_array(sd, SCSI_DEVICE_PROP_PATH,
10800 PORT_WWN_PROP, &bytes, &nbytes) != DDI_PROP_SUCCESS) ||
10801 (nbytes != FC_WWN_SIZE)) {
10802 /* no port WWN property */
10803 FCP_DTRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
10804 FCP_BUF_LEVEL_8, 0,
10805 "fcp_phys_tgt_init: Returning DDI_NOT_WELL_FORMED"
10806 " for %s (instance %d): bytes=%p nbytes=%x",
10807 ddi_get_name(tgt_dip), ddi_get_instance(tgt_dip), bytes,
10808 nbytes);
10809
10810 if (bytes != NULL) {
10811 scsi_device_prop_free(sd, SCSI_DEVICE_PROP_PATH, bytes);
10812 }
10813
10814 return (DDI_NOT_WELL_FORMED);
10815 }
10816 ASSERT(bytes != NULL);
10817
10818 lun_num = scsi_device_prop_get_int(sd, SCSI_DEVICE_PROP_PATH,
10819 LUN_PROP, 0xFFFF);
10820 if (lun_num == 0xFFFF) {
10821 FCP_DTRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
10822 FCP_BUF_LEVEL_8, 0,
10823 "fcp_phys_tgt_init: Returning DDI_FAILURE:lun"
10824 " for %s (instance %d)", ddi_get_name(tgt_dip),
10825 ddi_get_instance(tgt_dip));
10826
10827 scsi_device_prop_free(sd, SCSI_DEVICE_PROP_PATH, bytes);
10828 return (DDI_NOT_WELL_FORMED);
10829 }
10830
10831 mutex_enter(&pptr->port_mutex);
10832 if ((plun = fcp_lookup_lun(pptr, bytes, lun_num)) == NULL) {
10833 mutex_exit(&pptr->port_mutex);
10834 FCP_DTRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
10835 FCP_BUF_LEVEL_8, 0,
10836 "fcp_phys_tgt_init: Returning DDI_FAILURE: No Lun"
10837 " for %s (instance %d)", ddi_get_name(tgt_dip),
10838 ddi_get_instance(tgt_dip));
10839
10840 scsi_device_prop_free(sd, SCSI_DEVICE_PROP_PATH, bytes);
10841 return (DDI_FAILURE);
10842 }
10843
10844 ASSERT(bcmp(plun->lun_tgt->tgt_port_wwn.raw_wwn, bytes,
10845 FC_WWN_SIZE) == 0);
10846 ASSERT(plun->lun_num == lun_num);
10847
10848 scsi_device_prop_free(sd, SCSI_DEVICE_PROP_PATH, bytes);
10849
10850 ptgt = plun->lun_tgt;
10851
10852 mutex_enter(&ptgt->tgt_mutex);
10853 plun->lun_tgt_count++;
10854 scsi_device_hba_private_set(sd, plun);
10855 plun->lun_state |= FCP_SCSI_LUN_TGT_INIT;
10856 plun->lun_sd = sd;
10857 mutex_exit(&ptgt->tgt_mutex);
10858 mutex_exit(&pptr->port_mutex);
10859
10860 return (DDI_SUCCESS);
10861 }
10862
10863 /*ARGSUSED*/
10864 static int
10865 fcp_virt_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
10866 scsi_hba_tran_t *hba_tran, struct scsi_device *sd)
10867 {
10868 uchar_t *bytes;
10869 uint_t nbytes;
10870 uint16_t lun_num;
10871 struct fcp_tgt *ptgt;
10872 struct fcp_lun *plun;
10873 struct fcp_port *pptr = (struct fcp_port *)
10874 hba_tran->tran_hba_private;
10875 child_info_t *cip;
10876
10877 ASSERT(pptr != NULL);
10878
10879 FCP_DTRACE(fcp_logq, pptr->port_instbuf,
10880 fcp_trace, FCP_BUF_LEVEL_8, 0,
10881 "fcp_virt_tgt_init: called for %s (instance %d) (hba_dip %p),"
10882 " (tgt_dip %p)", ddi_get_name(tgt_dip),
10883 ddi_get_instance(tgt_dip), hba_dip, tgt_dip);
10884
10885 cip = (child_info_t *)sd->sd_pathinfo;
10886 if (cip == NULL) {
10887 FCP_DTRACE(fcp_logq, pptr->port_instbuf,
10888 fcp_trace, FCP_BUF_LEVEL_8, 0,
10889 "fcp_virt_tgt_init: Returning DDI_NOT_WELL_FORMED"
10890 " for %s (instance %d)", ddi_get_name(tgt_dip),
10891 ddi_get_instance(tgt_dip));
10892
10893 return (DDI_NOT_WELL_FORMED);
10894 }
10895
10896 /* get our port WWN property */
10897 bytes = NULL;
10898 if ((scsi_device_prop_lookup_byte_array(sd, SCSI_DEVICE_PROP_PATH,
10899 PORT_WWN_PROP, &bytes, &nbytes) != DDI_PROP_SUCCESS) ||
10900 (nbytes != FC_WWN_SIZE)) {
10901 if (bytes) {
10902 scsi_device_prop_free(sd, SCSI_DEVICE_PROP_PATH, bytes);
10903 }
10904 return (DDI_NOT_WELL_FORMED);
10905 }
10906
10907 ASSERT(bytes != NULL);
10908
10909 lun_num = scsi_device_prop_get_int(sd, SCSI_DEVICE_PROP_PATH,
10910 LUN_PROP, 0xFFFF);
10911 if (lun_num == 0xFFFF) {
10912 FCP_DTRACE(fcp_logq, pptr->port_instbuf,
10913 fcp_trace, FCP_BUF_LEVEL_8, 0,
10914 "fcp_virt_tgt_init: Returning DDI_FAILURE:lun"
10915 " for %s (instance %d)", ddi_get_name(tgt_dip),
10916 ddi_get_instance(tgt_dip));
10917
10918 scsi_device_prop_free(sd, SCSI_DEVICE_PROP_PATH, bytes);
10919 return (DDI_NOT_WELL_FORMED);
10920 }
10921
10922 mutex_enter(&pptr->port_mutex);
10923 if ((plun = fcp_lookup_lun(pptr, bytes, lun_num)) == NULL) {
10924 mutex_exit(&pptr->port_mutex);
10925 FCP_DTRACE(fcp_logq, pptr->port_instbuf,
10926 fcp_trace, FCP_BUF_LEVEL_8, 0,
10927 "fcp_virt_tgt_init: Returning DDI_FAILURE: No Lun"
10928 " for %s (instance %d)", ddi_get_name(tgt_dip),
10929 ddi_get_instance(tgt_dip));
10930
10931 scsi_device_prop_free(sd, SCSI_DEVICE_PROP_PATH, bytes);
10932 return (DDI_FAILURE);
10933 }
10934
10935 ASSERT(bcmp(plun->lun_tgt->tgt_port_wwn.raw_wwn, bytes,
10936 FC_WWN_SIZE) == 0);
10937 ASSERT(plun->lun_num == lun_num);
10938
10939 scsi_device_prop_free(sd, SCSI_DEVICE_PROP_PATH, bytes);
10940
10941 ptgt = plun->lun_tgt;
10942
10943 mutex_enter(&ptgt->tgt_mutex);
10944 plun->lun_tgt_count++;
10945 scsi_device_hba_private_set(sd, plun);
10946 plun->lun_state |= FCP_SCSI_LUN_TGT_INIT;
10947 plun->lun_sd = sd;
10948 mutex_exit(&ptgt->tgt_mutex);
10949 mutex_exit(&pptr->port_mutex);
10950
10951 return (DDI_SUCCESS);
10952 }
10953
10954
10955 /*
10956 * called by the transport to do our own target initialization
10957 *
10958 * can acquire and release the global mutex
10959 */
10960 /* ARGSUSED */
10961 static int
10962 fcp_scsi_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
10963 scsi_hba_tran_t *hba_tran, struct scsi_device *sd)
10964 {
10965 struct fcp_port *pptr = (struct fcp_port *)
10966 hba_tran->tran_hba_private;
10967 int rval;
10968
10969 ASSERT(pptr != NULL);
10970
10971 /*
10972 * Child node is getting initialized. Look at the mpxio component
10973 * type on the child device to see if this device is mpxio managed
10974 * or not.
10975 */
10976 if (mdi_component_is_client(tgt_dip, NULL) == MDI_SUCCESS) {
10977 rval = fcp_virt_tgt_init(hba_dip, tgt_dip, hba_tran, sd);
10978 } else {
10979 rval = fcp_phys_tgt_init(hba_dip, tgt_dip, hba_tran, sd);
10980 }
10981
10982 return (rval);
10983 }
10984
10985
10986 /* ARGSUSED */
10987 static void
10988 fcp_scsi_tgt_free(dev_info_t *hba_dip, dev_info_t *tgt_dip,
10989 scsi_hba_tran_t *hba_tran, struct scsi_device *sd)
10990 {
10991 struct fcp_lun *plun = scsi_device_hba_private_get(sd);
10992 struct fcp_tgt *ptgt;
10993
10994 FCP_DTRACE(fcp_logq, LUN_PORT->port_instbuf,
10995 fcp_trace, FCP_BUF_LEVEL_8, 0,
10996 "fcp_scsi_tgt_free: called for tran %s%d, dev %s%d",
10997 ddi_get_name(hba_dip), ddi_get_instance(hba_dip),
10998 ddi_get_name(tgt_dip), ddi_get_instance(tgt_dip));
10999
11000 if (plun == NULL) {
11001 return;
11002 }
11003 ptgt = plun->lun_tgt;
11004
11005 ASSERT(ptgt != NULL);
11006
11007 mutex_enter(&ptgt->tgt_mutex);
11008 ASSERT(plun->lun_tgt_count > 0);
11009
11010 if (--plun->lun_tgt_count == 0) {
11011 plun->lun_state &= ~FCP_SCSI_LUN_TGT_INIT;
11012 }
11013 plun->lun_sd = NULL;
11014 mutex_exit(&ptgt->tgt_mutex);
11015 }
11016
11017 /*
11018 * Function: fcp_scsi_start
11019 *
11020 * Description: This function is called by the target driver to request a
11021 * command to be sent.
11022 *
11023 * Argument: *ap SCSI address of the device.
11024 * *pkt SCSI packet containing the cmd to send.
11025 *
11026 * Return Value: TRAN_ACCEPT
11027 * TRAN_BUSY
11028 * TRAN_BADPKT
11029 * TRAN_FATAL_ERROR
11030 */
11031 static int
11032 fcp_scsi_start(struct scsi_address *ap, struct scsi_pkt *pkt)
11033 {
11034 struct fcp_port *pptr = ADDR2FCP(ap);
11035 struct fcp_lun *plun = ADDR2LUN(ap);
11036 struct fcp_pkt *cmd = PKT2CMD(pkt);
11037 struct fcp_tgt *ptgt = plun->lun_tgt;
11038 int rval;
11039
11040 /* ensure command isn't already issued */
11041 ASSERT(cmd->cmd_state != FCP_PKT_ISSUED);
11042
11043 FCP_DTRACE(fcp_logq, pptr->port_instbuf,
11044 fcp_trace, FCP_BUF_LEVEL_9, 0,
11045 "fcp_transport Invoked for %x", plun->lun_tgt->tgt_d_id);
11046
11047 /*
11048 * If the device is offline and is not in the process of coming
11049 * online, fail the request.
11050 */
11051 mutex_enter(&plun->lun_mutex);
11052 if ((plun->lun_state & FCP_LUN_OFFLINE) &&
11053 !(plun->lun_state & FCP_LUN_ONLINING)) {
11054 mutex_exit(&plun->lun_mutex);
11055 if (cmd->cmd_fp_pkt->pkt_pd == NULL)
11056 pkt->pkt_reason = CMD_DEV_GONE;
11057 return (TRAN_FATAL_ERROR);
11058 }
11059 mutex_exit(&plun->lun_mutex);
11060
11061 cmd->cmd_fp_pkt->pkt_timeout = pkt->pkt_time;
11062
11063 /*
11064 * If we are suspended, kernel is trying to dump, so don't
11065 * block, fail or defer requests - send them down right away.
11066 * NOTE: If we are in panic (i.e. trying to dump), we can't
11067 * assume we have been suspended. There is hardware such as
11068 * the v880 that doesn't do PM. Thus, the check for
11069 * ddi_in_panic.
11070 *
11071 * If FCP_STATE_IN_CB_DEVC is set, devices are in the process
11072 * of changing. So, if we can queue the packet, do it. Eventually,
11073 * either the device will have gone away or changed and we can fail
11074 * the request, or we can proceed if the device didn't change.
11075 *
11076 * If the pd in the target or the packet is NULL it's probably
11077 * because the device has gone away, we allow the request to be
11078 * put on the internal queue here in case the device comes back within
11079 * the offline timeout. fctl will fix up the pd's if the tgt_pd_handle
11080 * has gone NULL, while fcp deals cases where pkt_pd is NULL. pkt_pd
11081 * could be NULL because the device was disappearing during or since
11082 * packet initialization.
11083 */
11084
11085 mutex_enter(&pptr->port_mutex);
11086 mutex_enter(&ptgt->tgt_mutex);
11087
11088 if (((plun->lun_state & FCP_LUN_BUSY) && (!(pptr->port_state &
11089 FCP_STATE_SUSPENDED)) && !ddi_in_panic()) ||
11090 (pptr->port_state & (FCP_STATE_ONLINING | FCP_STATE_IN_CB_DEVC)) ||
11091 (ptgt->tgt_pd_handle == NULL) ||
11092 (cmd->cmd_fp_pkt->pkt_pd == NULL)) {
11093 /*
11094 * If ((LUN is busy AND
11095 * LUN not suspended AND
11096 * The system is not in panic state) OR
11097 * (The port is coming up))
11098 *
11099 * We check to see if the any of the flags FLAG_NOINTR or
11100 * FLAG_NOQUEUE is set. If one of them is set the value
11101 * returned will be TRAN_BUSY. If not, the request is queued.
11102 */
11103 mutex_exit(&ptgt->tgt_mutex);
11104 mutex_exit(&pptr->port_mutex);
11105
11106 /* see if using interrupts is allowed (so queueing'll work) */
11107 if (pkt->pkt_flags & FLAG_NOINTR) {
11108 pkt->pkt_resid = 0;
11109 return (TRAN_BUSY);
11110 }
11111 if (pkt->pkt_flags & FLAG_NOQUEUE) {
11112 FCP_DTRACE(fcp_logq, pptr->port_instbuf,
11113 fcp_trace, FCP_BUF_LEVEL_9, 0,
11114 "fcp_scsi_start: lun busy for pkt %p", pkt);
11115 return (TRAN_BUSY);
11116 }
11117 #ifdef DEBUG
11118 mutex_enter(&pptr->port_pkt_mutex);
11119 pptr->port_npkts++;
11120 mutex_exit(&pptr->port_pkt_mutex);
11121 #endif /* DEBUG */
11122
11123 /* got queue up the pkt for later */
11124 fcp_queue_pkt(pptr, cmd);
11125 return (TRAN_ACCEPT);
11126 }
11127 cmd->cmd_state = FCP_PKT_ISSUED;
11128
11129 mutex_exit(&ptgt->tgt_mutex);
11130 mutex_exit(&pptr->port_mutex);
11131
11132 /*
11133 * Now that we released the mutexes, what was protected by them can
11134 * change.
11135 */
11136
11137 /*
11138 * If there is a reconfiguration in progress, wait for it to complete.
11139 */
11140 fcp_reconfig_wait(pptr);
11141
11142 cmd->cmd_timeout = pkt->pkt_time ? fcp_watchdog_time +
11143 pkt->pkt_time : 0;
11144
11145 /* prepare the packet */
11146
11147 fcp_prepare_pkt(pptr, cmd, plun);
11148
11149 if (cmd->cmd_pkt->pkt_time) {
11150 cmd->cmd_fp_pkt->pkt_timeout = cmd->cmd_pkt->pkt_time;
11151 } else {
11152 cmd->cmd_fp_pkt->pkt_timeout = 5 * 60 * 60;
11153 }
11154
11155 /*
11156 * if interrupts aren't allowed (e.g. at dump time) then we'll
11157 * have to do polled I/O
11158 */
11159 if (pkt->pkt_flags & FLAG_NOINTR) {
11160 cmd->cmd_state &= ~FCP_PKT_ISSUED;
11161 return (fcp_dopoll(pptr, cmd));
11162 }
11163
11164 #ifdef DEBUG
11165 mutex_enter(&pptr->port_pkt_mutex);
11166 pptr->port_npkts++;
11167 mutex_exit(&pptr->port_pkt_mutex);
11168 #endif /* DEBUG */
11169
11170 rval = fcp_transport(pptr->port_fp_handle, cmd->cmd_fp_pkt, 0);
11171 if (rval == FC_SUCCESS) {
11172 FCP_DTRACE(fcp_logq, pptr->port_instbuf,
11173 fcp_trace, FCP_BUF_LEVEL_9, 0,
11174 "fcp_transport success for %x", plun->lun_tgt->tgt_d_id);
11175 return (TRAN_ACCEPT);
11176 }
11177
11178 cmd->cmd_state = FCP_PKT_IDLE;
11179
11180 #ifdef DEBUG
11181 mutex_enter(&pptr->port_pkt_mutex);
11182 pptr->port_npkts--;
11183 mutex_exit(&pptr->port_pkt_mutex);
11184 #endif /* DEBUG */
11185
11186 /*
11187 * For lack of clearer definitions, choose
11188 * between TRAN_BUSY and TRAN_FATAL_ERROR.
11189 */
11190
11191 if (rval == FC_TRAN_BUSY) {
11192 pkt->pkt_resid = 0;
11193 rval = TRAN_BUSY;
11194 } else {
11195 mutex_enter(&ptgt->tgt_mutex);
11196 if (plun->lun_state & FCP_LUN_OFFLINE) {
11197 child_info_t *cip;
11198
11199 mutex_enter(&plun->lun_mutex);
11200 cip = plun->lun_cip;
11201 mutex_exit(&plun->lun_mutex);
11202
11203 FCP_TRACE(fcp_logq, pptr->port_instbuf,
11204 fcp_trace, FCP_BUF_LEVEL_6, 0,
11205 "fcp_transport failed 2 for %x: %x; dip=%p",
11206 plun->lun_tgt->tgt_d_id, rval, cip);
11207
11208 rval = TRAN_FATAL_ERROR;
11209 } else {
11210 if (pkt->pkt_flags & FLAG_NOQUEUE) {
11211 FCP_DTRACE(fcp_logq, pptr->port_instbuf,
11212 fcp_trace, FCP_BUF_LEVEL_9, 0,
11213 "fcp_scsi_start: FC_BUSY for pkt %p",
11214 pkt);
11215 rval = TRAN_BUSY;
11216 } else {
11217 rval = TRAN_ACCEPT;
11218 fcp_queue_pkt(pptr, cmd);
11219 }
11220 }
11221 mutex_exit(&ptgt->tgt_mutex);
11222 }
11223
11224 return (rval);
11225 }
11226
11227 /*
11228 * called by the transport to abort a packet
11229 */
11230 /*ARGSUSED*/
11231 static int
11232 fcp_scsi_abort(struct scsi_address *ap, struct scsi_pkt *pkt)
11233 {
11234 int tgt_cnt;
11235 struct fcp_port *pptr = ADDR2FCP(ap);
11236 struct fcp_lun *plun = ADDR2LUN(ap);
11237 struct fcp_tgt *ptgt = plun->lun_tgt;
11238
11239 if (pkt == NULL) {
11240 if (ptgt) {
11241 mutex_enter(&ptgt->tgt_mutex);
11242 tgt_cnt = ptgt->tgt_change_cnt;
11243 mutex_exit(&ptgt->tgt_mutex);
11244 fcp_abort_all(pptr, ptgt, plun, tgt_cnt);
11245 return (TRUE);
11246 }
11247 }
11248 return (FALSE);
11249 }
11250
11251
11252 /*
11253 * Perform reset
11254 */
11255 int
11256 fcp_scsi_reset(struct scsi_address *ap, int level)
11257 {
11258 int rval = 0;
11259 struct fcp_port *pptr = ADDR2FCP(ap);
11260 struct fcp_lun *plun = ADDR2LUN(ap);
11261 struct fcp_tgt *ptgt = plun->lun_tgt;
11262
11263 if (level == RESET_ALL) {
11264 if (fcp_linkreset(pptr, ap, KM_NOSLEEP) == FC_SUCCESS) {
11265 rval = 1;
11266 }
11267 } else if (level == RESET_TARGET || level == RESET_LUN) {
11268 /*
11269 * If we are in the middle of discovery, return
11270 * SUCCESS as this target will be rediscovered
11271 * anyway
11272 */
11273 mutex_enter(&ptgt->tgt_mutex);
11274 if (ptgt->tgt_state & (FCP_TGT_OFFLINE | FCP_TGT_BUSY)) {
11275 mutex_exit(&ptgt->tgt_mutex);
11276 return (1);
11277 }
11278 mutex_exit(&ptgt->tgt_mutex);
11279
11280 if (fcp_reset_target(ap, level) == FC_SUCCESS) {
11281 rval = 1;
11282 }
11283 }
11284 return (rval);
11285 }
11286
11287
11288 /*
11289 * called by the framework to get a SCSI capability
11290 */
11291 static int
11292 fcp_scsi_getcap(struct scsi_address *ap, char *cap, int whom)
11293 {
11294 return (fcp_commoncap(ap, cap, 0, whom, 0));
11295 }
11296
11297
11298 /*
11299 * called by the framework to set a SCSI capability
11300 */
11301 static int
11302 fcp_scsi_setcap(struct scsi_address *ap, char *cap, int value, int whom)
11303 {
11304 return (fcp_commoncap(ap, cap, value, whom, 1));
11305 }
11306
11307 /*
11308 * Function: fcp_pkt_setup
11309 *
11310 * Description: This function sets up the scsi_pkt structure passed by the
11311 * caller. This function assumes fcp_pkt_constructor has been
11312 * called previously for the packet passed by the caller. If
11313 * successful this call will have the following results:
11314 *
11315 * - The resources needed that will be constant through out
11316 * the whole transaction are allocated.
11317 * - The fields that will be constant through out the whole
11318 * transaction are initialized.
11319 * - The scsi packet will be linked to the LUN structure
11320 * addressed by the transaction.
11321 *
11322 * Argument:
11323 * *pkt Pointer to a scsi_pkt structure.
11324 * callback
11325 * arg
11326 *
11327 * Return Value: 0 Success
11328 * !0 Failure
11329 *
11330 * Context: Kernel context or interrupt context
11331 */
11332 /* ARGSUSED */
11333 static int
11334 fcp_pkt_setup(struct scsi_pkt *pkt,
11335 int (*callback)(caddr_t arg),
11336 caddr_t arg)
11337 {
11338 struct fcp_pkt *cmd;
11339 struct fcp_port *pptr;
11340 struct fcp_lun *plun;
11341 struct fcp_tgt *ptgt;
11342 int kf;
11343 fc_packet_t *fpkt;
11344 fc_frame_hdr_t *hp;
11345
11346 pptr = ADDR2FCP(&pkt->pkt_address);
11347 plun = ADDR2LUN(&pkt->pkt_address);
11348 ptgt = plun->lun_tgt;
11349
11350 cmd = (struct fcp_pkt *)pkt->pkt_ha_private;
11351 fpkt = cmd->cmd_fp_pkt;
11352
11353 /*
11354 * this request is for dma allocation only
11355 */
11356 /*
11357 * First step of fcp_scsi_init_pkt: pkt allocation
11358 * We determine if the caller is willing to wait for the
11359 * resources.
11360 */
11361 kf = (callback == SLEEP_FUNC) ? KM_SLEEP: KM_NOSLEEP;
11362
11363 /*
11364 * Selective zeroing of the pkt.
11365 */
11366 cmd->cmd_back = NULL;
11367 cmd->cmd_next = NULL;
11368
11369 /*
11370 * Zero out fcp command
11371 */
11372 bzero(&cmd->cmd_fcp_cmd, sizeof (cmd->cmd_fcp_cmd));
11373
11374 cmd->cmd_state = FCP_PKT_IDLE;
11375
11376 fpkt = cmd->cmd_fp_pkt;
11377 fpkt->pkt_data_acc = NULL;
11378
11379 /*
11380 * When port_state is FCP_STATE_OFFLINE, remote_port (tgt_pd_handle)
11381 * could be destroyed. We need fail pkt_setup.
11382 */
11383 if (pptr->port_state & FCP_STATE_OFFLINE) {
11384 return (-1);
11385 }
11386
11387 mutex_enter(&ptgt->tgt_mutex);
11388 fpkt->pkt_pd = ptgt->tgt_pd_handle;
11389
11390 if (fc_ulp_init_packet(pptr->port_fp_handle, fpkt, kf)
11391 != FC_SUCCESS) {
11392 mutex_exit(&ptgt->tgt_mutex);
11393 return (-1);
11394 }
11395
11396 mutex_exit(&ptgt->tgt_mutex);
11397
11398 /* Fill in the Fabric Channel Header */
11399 hp = &fpkt->pkt_cmd_fhdr;
11400 hp->r_ctl = R_CTL_COMMAND;
11401 hp->rsvd = 0;
11402 hp->type = FC_TYPE_SCSI_FCP;
11403 hp->f_ctl = F_CTL_SEQ_INITIATIVE | F_CTL_FIRST_SEQ;
11404 hp->seq_id = 0;
11405 hp->df_ctl = 0;
11406 hp->seq_cnt = 0;
11407 hp->ox_id = 0xffff;
11408 hp->rx_id = 0xffff;
11409 hp->ro = 0;
11410
11411 /*
11412 * A doubly linked list (cmd_forw, cmd_back) is built
11413 * out of every allocated packet on a per-lun basis
11414 *
11415 * The packets are maintained in the list so as to satisfy
11416 * scsi_abort() requests. At present (which is unlikely to
11417 * change in the future) nobody performs a real scsi_abort
11418 * in the SCSI target drivers (as they don't keep the packets
11419 * after doing scsi_transport - so they don't know how to
11420 * abort a packet other than sending a NULL to abort all
11421 * outstanding packets)
11422 */
11423 mutex_enter(&plun->lun_mutex);
11424 if ((cmd->cmd_forw = plun->lun_pkt_head) != NULL) {
11425 plun->lun_pkt_head->cmd_back = cmd;
11426 } else {
11427 plun->lun_pkt_tail = cmd;
11428 }
11429 plun->lun_pkt_head = cmd;
11430 mutex_exit(&plun->lun_mutex);
11431 return (0);
11432 }
11433
11434 /*
11435 * Function: fcp_pkt_teardown
11436 *
11437 * Description: This function releases a scsi_pkt structure and all the
11438 * resources attached to it.
11439 *
11440 * Argument: *pkt Pointer to a scsi_pkt structure.
11441 *
11442 * Return Value: None
11443 *
11444 * Context: User, Kernel or Interrupt context.
11445 */
11446 static void
11447 fcp_pkt_teardown(struct scsi_pkt *pkt)
11448 {
11449 struct fcp_port *pptr = ADDR2FCP(&pkt->pkt_address);
11450 struct fcp_lun *plun = ADDR2LUN(&pkt->pkt_address);
11451 struct fcp_pkt *cmd = (struct fcp_pkt *)pkt->pkt_ha_private;
11452
11453 /*
11454 * Remove the packet from the per-lun list
11455 */
11456 mutex_enter(&plun->lun_mutex);
11457 if (cmd->cmd_back) {
11458 ASSERT(cmd != plun->lun_pkt_head);
11459 cmd->cmd_back->cmd_forw = cmd->cmd_forw;
11460 } else {
11461 ASSERT(cmd == plun->lun_pkt_head);
11462 plun->lun_pkt_head = cmd->cmd_forw;
11463 }
11464
11465 if (cmd->cmd_forw) {
11466 cmd->cmd_forw->cmd_back = cmd->cmd_back;
11467 } else {
11468 ASSERT(cmd == plun->lun_pkt_tail);
11469 plun->lun_pkt_tail = cmd->cmd_back;
11470 }
11471
11472 mutex_exit(&plun->lun_mutex);
11473
11474 (void) fc_ulp_uninit_packet(pptr->port_fp_handle, cmd->cmd_fp_pkt);
11475 }
11476
11477 /*
11478 * Routine for reset notification setup, to register or cancel.
11479 * This function is called by SCSA
11480 */
11481 /*ARGSUSED*/
11482 static int
11483 fcp_scsi_reset_notify(struct scsi_address *ap, int flag,
11484 void (*callback)(caddr_t), caddr_t arg)
11485 {
11486 struct fcp_port *pptr = ADDR2FCP(ap);
11487
11488 return (scsi_hba_reset_notify_setup(ap, flag, callback, arg,
11489 &pptr->port_mutex, &pptr->port_reset_notify_listf));
11490 }
11491
11492
11493 static int
11494 fcp_scsi_bus_get_eventcookie(dev_info_t *dip, dev_info_t *rdip, char *name,
11495 ddi_eventcookie_t *event_cookiep)
11496 {
11497 struct fcp_port *pptr = fcp_dip2port(dip);
11498
11499 if (pptr == NULL) {
11500 return (DDI_FAILURE);
11501 }
11502
11503 return (ndi_event_retrieve_cookie(pptr->port_ndi_event_hdl, rdip, name,
11504 event_cookiep, NDI_EVENT_NOPASS));
11505 }
11506
11507
11508 static int
11509 fcp_scsi_bus_add_eventcall(dev_info_t *dip, dev_info_t *rdip,
11510 ddi_eventcookie_t eventid, void (*callback)(), void *arg,
11511 ddi_callback_id_t *cb_id)
11512 {
11513 struct fcp_port *pptr = fcp_dip2port(dip);
11514
11515 if (pptr == NULL) {
11516 return (DDI_FAILURE);
11517 }
11518
11519 return (ndi_event_add_callback(pptr->port_ndi_event_hdl, rdip,
11520 eventid, callback, arg, NDI_SLEEP, cb_id));
11521 }
11522
11523
11524 static int
11525 fcp_scsi_bus_remove_eventcall(dev_info_t *dip, ddi_callback_id_t cb_id)
11526 {
11527
11528 struct fcp_port *pptr = fcp_dip2port(dip);
11529
11530 if (pptr == NULL) {
11531 return (DDI_FAILURE);
11532 }
11533 return (ndi_event_remove_callback(pptr->port_ndi_event_hdl, cb_id));
11534 }
11535
11536
11537 /*
11538 * called by the transport to post an event
11539 */
11540 static int
11541 fcp_scsi_bus_post_event(dev_info_t *dip, dev_info_t *rdip,
11542 ddi_eventcookie_t eventid, void *impldata)
11543 {
11544 struct fcp_port *pptr = fcp_dip2port(dip);
11545
11546 if (pptr == NULL) {
11547 return (DDI_FAILURE);
11548 }
11549
11550 return (ndi_event_run_callbacks(pptr->port_ndi_event_hdl, rdip,
11551 eventid, impldata));
11552 }
11553
11554
11555 /*
11556 * A target in in many cases in Fibre Channel has a one to one relation
11557 * with a port identifier (which is also known as D_ID and also as AL_PA
11558 * in private Loop) On Fibre Channel-to-SCSI bridge boxes a target reset
11559 * will most likely result in resetting all LUNs (which means a reset will
11560 * occur on all the SCSI devices connected at the other end of the bridge)
11561 * That is the latest favorite topic for discussion, for, one can debate as
11562 * hot as one likes and come up with arguably a best solution to one's
11563 * satisfaction
11564 *
11565 * To stay on track and not digress much, here are the problems stated
11566 * briefly:
11567 *
11568 * SCSA doesn't define RESET_LUN, It defines RESET_TARGET, but the
11569 * target drivers use RESET_TARGET even if their instance is on a
11570 * LUN. Doesn't that sound a bit broken ?
11571 *
11572 * FCP SCSI (the current spec) only defines RESET TARGET in the
11573 * control fields of an FCP_CMND structure. It should have been
11574 * fixed right there, giving flexibility to the initiators to
11575 * minimize havoc that could be caused by resetting a target.
11576 */
11577 static int
11578 fcp_reset_target(struct scsi_address *ap, int level)
11579 {
11580 int rval = FC_FAILURE;
11581 char lun_id[25];
11582 struct fcp_port *pptr = ADDR2FCP(ap);
11583 struct fcp_lun *plun = ADDR2LUN(ap);
11584 struct fcp_tgt *ptgt = plun->lun_tgt;
11585 struct scsi_pkt *pkt;
11586 struct fcp_pkt *cmd;
11587 struct fcp_rsp *rsp;
11588 uint32_t tgt_cnt;
11589 struct fcp_rsp_info *rsp_info;
11590 struct fcp_reset_elem *p;
11591 int bval;
11592
11593 if ((p = kmem_alloc(sizeof (struct fcp_reset_elem),
11594 KM_NOSLEEP)) == NULL) {
11595 return (rval);
11596 }
11597
11598 mutex_enter(&ptgt->tgt_mutex);
11599 if (level == RESET_TARGET) {
11600 if (ptgt->tgt_state & (FCP_TGT_OFFLINE | FCP_TGT_BUSY)) {
11601 mutex_exit(&ptgt->tgt_mutex);
11602 kmem_free(p, sizeof (struct fcp_reset_elem));
11603 return (rval);
11604 }
11605 fcp_update_tgt_state(ptgt, FCP_SET, FCP_LUN_BUSY);
11606 (void) strcpy(lun_id, " ");
11607 } else {
11608 if (plun->lun_state & (FCP_LUN_OFFLINE | FCP_LUN_BUSY)) {
11609 mutex_exit(&ptgt->tgt_mutex);
11610 kmem_free(p, sizeof (struct fcp_reset_elem));
11611 return (rval);
11612 }
11613 fcp_update_lun_state(plun, FCP_SET, FCP_LUN_BUSY);
11614
11615 (void) sprintf(lun_id, ", LUN=%d", plun->lun_num);
11616 }
11617 tgt_cnt = ptgt->tgt_change_cnt;
11618
11619 mutex_exit(&ptgt->tgt_mutex);
11620
11621 if ((pkt = scsi_init_pkt(ap, NULL, NULL, 0, 0,
11622 0, 0, NULL, 0)) == NULL) {
11623 kmem_free(p, sizeof (struct fcp_reset_elem));
11624 mutex_enter(&ptgt->tgt_mutex);
11625 fcp_update_tgt_state(ptgt, FCP_RESET, FCP_LUN_BUSY);
11626 mutex_exit(&ptgt->tgt_mutex);
11627 return (rval);
11628 }
11629 pkt->pkt_time = FCP_POLL_TIMEOUT;
11630
11631 /* fill in cmd part of packet */
11632 cmd = PKT2CMD(pkt);
11633 if (level == RESET_TARGET) {
11634 cmd->cmd_fcp_cmd.fcp_cntl.cntl_reset_tgt = 1;
11635 } else {
11636 cmd->cmd_fcp_cmd.fcp_cntl.cntl_reset_lun = 1;
11637 }
11638 cmd->cmd_fp_pkt->pkt_comp = NULL;
11639 cmd->cmd_pkt->pkt_flags |= FLAG_NOINTR;
11640
11641 /* prepare a packet for transport */
11642 fcp_prepare_pkt(pptr, cmd, plun);
11643
11644 if (cmd->cmd_pkt->pkt_time) {
11645 cmd->cmd_fp_pkt->pkt_timeout = cmd->cmd_pkt->pkt_time;
11646 } else {
11647 cmd->cmd_fp_pkt->pkt_timeout = 5 * 60 * 60;
11648 }
11649
11650 (void) fc_ulp_busy_port(pptr->port_fp_handle);
11651 bval = fcp_dopoll(pptr, cmd);
11652 fc_ulp_idle_port(pptr->port_fp_handle);
11653
11654 /* submit the packet */
11655 if (bval == TRAN_ACCEPT) {
11656 int error = 3;
11657
11658 rsp = (struct fcp_rsp *)cmd->cmd_fcp_rsp;
11659 rsp_info = (struct fcp_rsp_info *)(cmd->cmd_fcp_rsp +
11660 sizeof (struct fcp_rsp));
11661
11662 if (rsp->fcp_u.fcp_status.rsp_len_set) {
11663 if (fcp_validate_fcp_response(rsp, pptr) ==
11664 FC_SUCCESS) {
11665 if (pptr->port_fcp_dma != FC_NO_DVMA_SPACE) {
11666 FCP_CP_IN(cmd->cmd_fp_pkt->pkt_resp +
11667 sizeof (struct fcp_rsp), rsp_info,
11668 cmd->cmd_fp_pkt->pkt_resp_acc,
11669 sizeof (struct fcp_rsp_info));
11670 }
11671 if (rsp_info->rsp_code == FCP_NO_FAILURE) {
11672 rval = FC_SUCCESS;
11673 error = 0;
11674 } else {
11675 error = 1;
11676 }
11677 } else {
11678 error = 2;
11679 }
11680 }
11681
11682 switch (error) {
11683 case 0:
11684 fcp_log(CE_WARN, pptr->port_dip,
11685 "!FCP: WWN 0x%08x%08x %s reset successfully",
11686 *((int *)&ptgt->tgt_port_wwn.raw_wwn[0]),
11687 *((int *)&ptgt->tgt_port_wwn.raw_wwn[4]), lun_id);
11688 break;
11689
11690 case 1:
11691 fcp_log(CE_WARN, pptr->port_dip,
11692 "!FCP: Reset to WWN 0x%08x%08x %s failed,"
11693 " response code=%x",
11694 *((int *)&ptgt->tgt_port_wwn.raw_wwn[0]),
11695 *((int *)&ptgt->tgt_port_wwn.raw_wwn[4]), lun_id,
11696 rsp_info->rsp_code);
11697 break;
11698
11699 case 2:
11700 fcp_log(CE_WARN, pptr->port_dip,
11701 "!FCP: Reset to WWN 0x%08x%08x %s failed,"
11702 " Bad FCP response values: rsvd1=%x,"
11703 " rsvd2=%x, sts-rsvd1=%x, sts-rsvd2=%x,"
11704 " rsplen=%x, senselen=%x",
11705 *((int *)&ptgt->tgt_port_wwn.raw_wwn[0]),
11706 *((int *)&ptgt->tgt_port_wwn.raw_wwn[4]), lun_id,
11707 rsp->reserved_0, rsp->reserved_1,
11708 rsp->fcp_u.fcp_status.reserved_0,
11709 rsp->fcp_u.fcp_status.reserved_1,
11710 rsp->fcp_response_len, rsp->fcp_sense_len);
11711 break;
11712
11713 default:
11714 fcp_log(CE_WARN, pptr->port_dip,
11715 "!FCP: Reset to WWN 0x%08x%08x %s failed",
11716 *((int *)&ptgt->tgt_port_wwn.raw_wwn[0]),
11717 *((int *)&ptgt->tgt_port_wwn.raw_wwn[4]), lun_id);
11718 break;
11719 }
11720 }
11721 scsi_destroy_pkt(pkt);
11722
11723 if (rval == FC_FAILURE) {
11724 mutex_enter(&ptgt->tgt_mutex);
11725 if (level == RESET_TARGET) {
11726 fcp_update_tgt_state(ptgt, FCP_RESET, FCP_LUN_BUSY);
11727 } else {
11728 fcp_update_lun_state(plun, FCP_RESET, FCP_LUN_BUSY);
11729 }
11730 mutex_exit(&ptgt->tgt_mutex);
11731 kmem_free(p, sizeof (struct fcp_reset_elem));
11732 return (rval);
11733 }
11734
11735 mutex_enter(&pptr->port_mutex);
11736 if (level == RESET_TARGET) {
11737 p->tgt = ptgt;
11738 p->lun = NULL;
11739 } else {
11740 p->tgt = NULL;
11741 p->lun = plun;
11742 }
11743 p->tgt = ptgt;
11744 p->tgt_cnt = tgt_cnt;
11745 p->timeout = fcp_watchdog_time + FCP_RESET_DELAY;
11746 p->next = pptr->port_reset_list;
11747 pptr->port_reset_list = p;
11748
11749 FCP_TRACE(fcp_logq, pptr->port_instbuf,
11750 fcp_trace, FCP_BUF_LEVEL_3, 0,
11751 "Notify ssd of the reset to reinstate the reservations");
11752
11753 scsi_hba_reset_notify_callback(&pptr->port_mutex,
11754 &pptr->port_reset_notify_listf);
11755
11756 mutex_exit(&pptr->port_mutex);
11757
11758 return (rval);
11759 }
11760
11761
11762 /*
11763 * called by fcp_getcap and fcp_setcap to get and set (respectively)
11764 * SCSI capabilities
11765 */
11766 /* ARGSUSED */
11767 static int
11768 fcp_commoncap(struct scsi_address *ap, char *cap,
11769 int val, int tgtonly, int doset)
11770 {
11771 struct fcp_port *pptr = ADDR2FCP(ap);
11772 struct fcp_lun *plun = ADDR2LUN(ap);
11773 struct fcp_tgt *ptgt = plun->lun_tgt;
11774 int cidx;
11775 int rval = FALSE;
11776
11777 if (cap == (char *)0) {
11778 FCP_TRACE(fcp_logq, pptr->port_instbuf,
11779 fcp_trace, FCP_BUF_LEVEL_3, 0,
11780 "fcp_commoncap: invalid arg");
11781 return (rval);
11782 }
11783
11784 if ((cidx = scsi_hba_lookup_capstr(cap)) == -1) {
11785 return (UNDEFINED);
11786 }
11787
11788 /*
11789 * Process setcap request.
11790 */
11791 if (doset) {
11792 /*
11793 * At present, we can only set binary (0/1) values
11794 */
11795 switch (cidx) {
11796 case SCSI_CAP_ARQ:
11797 if (val == 0) {
11798 rval = FALSE;
11799 } else {
11800 rval = TRUE;
11801 }
11802 break;
11803
11804 case SCSI_CAP_LUN_RESET:
11805 if (val) {
11806 plun->lun_cap |= FCP_LUN_CAP_RESET;
11807 } else {
11808 plun->lun_cap &= ~FCP_LUN_CAP_RESET;
11809 }
11810 rval = TRUE;
11811 break;
11812
11813 case SCSI_CAP_SECTOR_SIZE:
11814 rval = TRUE;
11815 break;
11816 default:
11817 FCP_TRACE(fcp_logq, pptr->port_instbuf,
11818 fcp_trace, FCP_BUF_LEVEL_4, 0,
11819 "fcp_setcap: unsupported %d", cidx);
11820 rval = UNDEFINED;
11821 break;
11822 }
11823
11824 FCP_TRACE(fcp_logq, pptr->port_instbuf,
11825 fcp_trace, FCP_BUF_LEVEL_5, 0,
11826 "set cap: cap=%s, val/tgtonly/doset/rval = "
11827 "0x%x/0x%x/0x%x/%d",
11828 cap, val, tgtonly, doset, rval);
11829
11830 } else {
11831 /*
11832 * Process getcap request.
11833 */
11834 switch (cidx) {
11835 case SCSI_CAP_DMA_MAX:
11836 rval = (int)pptr->port_data_dma_attr.dma_attr_maxxfer;
11837
11838 /*
11839 * Need to make an adjustment qlc is uint_t 64
11840 * st is int, so we will make the adjustment here
11841 * being as nobody wants to touch this.
11842 * It still leaves the max single block length
11843 * of 2 gig. This should last .
11844 */
11845
11846 if (rval == -1) {
11847 rval = MAX_INT_DMA;
11848 }
11849
11850 break;
11851
11852 case SCSI_CAP_INITIATOR_ID:
11853 rval = pptr->port_id;
11854 break;
11855
11856 case SCSI_CAP_ARQ:
11857 case SCSI_CAP_RESET_NOTIFICATION:
11858 case SCSI_CAP_TAGGED_QING:
11859 rval = TRUE;
11860 break;
11861
11862 case SCSI_CAP_SCSI_VERSION:
11863 rval = 3;
11864 break;
11865
11866 case SCSI_CAP_INTERCONNECT_TYPE:
11867 if (FC_TOP_EXTERNAL(pptr->port_topology) ||
11868 (ptgt->tgt_hard_addr == 0)) {
11869 rval = INTERCONNECT_FABRIC;
11870 } else {
11871 rval = INTERCONNECT_FIBRE;
11872 }
11873 break;
11874
11875 case SCSI_CAP_LUN_RESET:
11876 rval = ((plun->lun_cap & FCP_LUN_CAP_RESET) != 0) ?
11877 TRUE : FALSE;
11878 break;
11879
11880 default:
11881 FCP_TRACE(fcp_logq, pptr->port_instbuf,
11882 fcp_trace, FCP_BUF_LEVEL_4, 0,
11883 "fcp_getcap: unsupported %d", cidx);
11884 rval = UNDEFINED;
11885 break;
11886 }
11887
11888 FCP_TRACE(fcp_logq, pptr->port_instbuf,
11889 fcp_trace, FCP_BUF_LEVEL_8, 0,
11890 "get cap: cap=%s, val/tgtonly/doset/rval = "
11891 "0x%x/0x%x/0x%x/%d",
11892 cap, val, tgtonly, doset, rval);
11893 }
11894
11895 return (rval);
11896 }
11897
11898 /*
11899 * called by the transport to get the port-wwn and lun
11900 * properties of this device, and to create a "name" based on them
11901 *
11902 * these properties don't exist on sun4m
11903 *
11904 * return 1 for success else return 0
11905 */
11906 /* ARGSUSED */
11907 static int
11908 fcp_scsi_get_name(struct scsi_device *sd, char *name, int len)
11909 {
11910 int i;
11911 int *lun;
11912 int numChars;
11913 uint_t nlun;
11914 uint_t count;
11915 uint_t nbytes;
11916 uchar_t *bytes;
11917 uint16_t lun_num;
11918 uint32_t tgt_id;
11919 char **conf_wwn;
11920 char tbuf[(FC_WWN_SIZE << 1) + 1];
11921 uchar_t barray[FC_WWN_SIZE];
11922 dev_info_t *tgt_dip;
11923 struct fcp_tgt *ptgt;
11924 struct fcp_port *pptr;
11925 struct fcp_lun *plun;
11926
11927 ASSERT(sd != NULL);
11928 ASSERT(name != NULL);
11929
11930 tgt_dip = sd->sd_dev;
11931 pptr = ddi_get_soft_state(fcp_softstate,
11932 ddi_get_instance(ddi_get_parent(tgt_dip)));
11933 if (pptr == NULL) {
11934 return (0);
11935 }
11936
11937 ASSERT(tgt_dip != NULL);
11938
11939 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, sd->sd_dev,
11940 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM,
11941 LUN_PROP, &lun, &nlun) != DDI_SUCCESS) {
11942 name[0] = '\0';
11943 return (0);
11944 }
11945
11946 if (nlun == 0) {
11947 ddi_prop_free(lun);
11948 return (0);
11949 }
11950
11951 lun_num = lun[0];
11952 ddi_prop_free(lun);
11953
11954 /*
11955 * Lookup for .conf WWN property
11956 */
11957 if (ddi_prop_lookup_string_array(DDI_DEV_T_ANY, tgt_dip,
11958 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, CONF_WWN_PROP,
11959 &conf_wwn, &count) == DDI_PROP_SUCCESS) {
11960 ASSERT(count >= 1);
11961
11962 fcp_ascii_to_wwn(conf_wwn[0], barray, FC_WWN_SIZE);
11963 ddi_prop_free(conf_wwn);
11964 mutex_enter(&pptr->port_mutex);
11965 if ((plun = fcp_lookup_lun(pptr, barray, lun_num)) == NULL) {
11966 mutex_exit(&pptr->port_mutex);
11967 return (0);
11968 }
11969 ptgt = plun->lun_tgt;
11970 mutex_exit(&pptr->port_mutex);
11971
11972 (void) ndi_prop_update_byte_array(DDI_DEV_T_NONE,
11973 tgt_dip, PORT_WWN_PROP, barray, FC_WWN_SIZE);
11974
11975 if (!FC_TOP_EXTERNAL(pptr->port_topology) &&
11976 ptgt->tgt_hard_addr != 0) {
11977 tgt_id = (uint32_t)fcp_alpa_to_switch[
11978 ptgt->tgt_hard_addr];
11979 } else {
11980 tgt_id = ptgt->tgt_d_id;
11981 }
11982
11983 (void) ndi_prop_update_int(DDI_DEV_T_NONE, tgt_dip,
11984 TARGET_PROP, tgt_id);
11985 }
11986
11987 /* get the our port-wwn property */
11988 bytes = NULL;
11989 if ((ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, tgt_dip,
11990 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, PORT_WWN_PROP, &bytes,
11991 &nbytes) != DDI_PROP_SUCCESS) || nbytes != FC_WWN_SIZE) {
11992 if (bytes != NULL) {
11993 ddi_prop_free(bytes);
11994 }
11995 return (0);
11996 }
11997
11998 for (i = 0; i < FC_WWN_SIZE; i++) {
11999 (void) sprintf(&tbuf[i << 1], "%02x", *(bytes + i));
12000 }
12001
12002 /* Stick in the address of the form "wWWN,LUN" */
12003 numChars = snprintf(name, len, "w%s,%x", tbuf, lun_num);
12004
12005 ASSERT(numChars < len);
12006 if (numChars >= len) {
12007 fcp_log(CE_WARN, pptr->port_dip,
12008 "!fcp_scsi_get_name: "
12009 "name parameter length too small, it needs to be %d",
12010 numChars+1);
12011 }
12012
12013 ddi_prop_free(bytes);
12014
12015 return (1);
12016 }
12017
12018
12019 /*
12020 * called by the transport to get the SCSI target id value, returning
12021 * it in "name"
12022 *
12023 * this isn't needed/used on sun4m
12024 *
12025 * return 1 for success else return 0
12026 */
12027 /* ARGSUSED */
12028 static int
12029 fcp_scsi_get_bus_addr(struct scsi_device *sd, char *name, int len)
12030 {
12031 struct fcp_lun *plun = ADDR2LUN(&sd->sd_address);
12032 struct fcp_tgt *ptgt;
12033 int numChars;
12034
12035 if (plun == NULL) {
12036 return (0);
12037 }
12038
12039 if ((ptgt = plun->lun_tgt) == NULL) {
12040 return (0);
12041 }
12042
12043 numChars = snprintf(name, len, "%x", ptgt->tgt_d_id);
12044
12045 ASSERT(numChars < len);
12046 if (numChars >= len) {
12047 fcp_log(CE_WARN, NULL,
12048 "!fcp_scsi_get_bus_addr: "
12049 "name parameter length too small, it needs to be %d",
12050 numChars+1);
12051 }
12052
12053 return (1);
12054 }
12055
12056
12057 /*
12058 * called internally to reset the link where the specified port lives
12059 */
12060 static int
12061 fcp_linkreset(struct fcp_port *pptr, struct scsi_address *ap, int sleep)
12062 {
12063 la_wwn_t wwn;
12064 struct fcp_lun *plun;
12065 struct fcp_tgt *ptgt;
12066
12067 /* disable restart of lip if we're suspended */
12068 mutex_enter(&pptr->port_mutex);
12069
12070 if (pptr->port_state & (FCP_STATE_SUSPENDED |
12071 FCP_STATE_POWER_DOWN)) {
12072 mutex_exit(&pptr->port_mutex);
12073 FCP_TRACE(fcp_logq, pptr->port_instbuf,
12074 fcp_trace, FCP_BUF_LEVEL_2, 0,
12075 "fcp_linkreset, fcp%d: link reset "
12076 "disabled due to DDI_SUSPEND",
12077 ddi_get_instance(pptr->port_dip));
12078 return (FC_FAILURE);
12079 }
12080
12081 if (pptr->port_state & (FCP_STATE_OFFLINE | FCP_STATE_ONLINING)) {
12082 mutex_exit(&pptr->port_mutex);
12083 return (FC_SUCCESS);
12084 }
12085
12086 FCP_DTRACE(fcp_logq, pptr->port_instbuf,
12087 fcp_trace, FCP_BUF_LEVEL_8, 0, "Forcing link reset");
12088
12089 /*
12090 * If ap == NULL assume local link reset.
12091 */
12092 if (FC_TOP_EXTERNAL(pptr->port_topology) && (ap != NULL)) {
12093 plun = ADDR2LUN(ap);
12094 ptgt = plun->lun_tgt;
12095 bcopy(&ptgt->tgt_port_wwn.raw_wwn[0], &wwn, sizeof (wwn));
12096 } else {
12097 bzero((caddr_t)&wwn, sizeof (wwn));
12098 }
12099 mutex_exit(&pptr->port_mutex);
12100
12101 return (fc_ulp_linkreset(pptr->port_fp_handle, &wwn, sleep));
12102 }
12103
12104
12105 /*
12106 * called from fcp_port_attach() to resume a port
12107 * return DDI_* success/failure status
12108 * acquires and releases the global mutex
12109 * acquires and releases the port mutex
12110 */
12111 /*ARGSUSED*/
12112
12113 static int
12114 fcp_handle_port_resume(opaque_t ulph, fc_ulp_port_info_t *pinfo,
12115 uint32_t s_id, fc_attach_cmd_t cmd, int instance)
12116 {
12117 int res = DDI_FAILURE; /* default result */
12118 struct fcp_port *pptr; /* port state ptr */
12119 uint32_t alloc_cnt;
12120 uint32_t max_cnt;
12121 fc_portmap_t *tmp_list = NULL;
12122
12123 FCP_DTRACE(fcp_logq, "fcp", fcp_trace,
12124 FCP_BUF_LEVEL_8, 0, "port resume: for port %d",
12125 instance);
12126
12127 if ((pptr = ddi_get_soft_state(fcp_softstate, instance)) == NULL) {
12128 cmn_err(CE_WARN, "fcp: bad soft state");
12129 return (res);
12130 }
12131
12132 mutex_enter(&pptr->port_mutex);
12133 switch (cmd) {
12134 case FC_CMD_RESUME:
12135 ASSERT((pptr->port_state & FCP_STATE_POWER_DOWN) == 0);
12136 pptr->port_state &= ~FCP_STATE_SUSPENDED;
12137 break;
12138
12139 case FC_CMD_POWER_UP:
12140 /*
12141 * If the port is DDI_SUSPENded, defer rediscovery
12142 * until DDI_RESUME occurs
12143 */
12144 if (pptr->port_state & FCP_STATE_SUSPENDED) {
12145 pptr->port_state &= ~FCP_STATE_POWER_DOWN;
12146 mutex_exit(&pptr->port_mutex);
12147 return (DDI_SUCCESS);
12148 }
12149 pptr->port_state &= ~FCP_STATE_POWER_DOWN;
12150 }
12151 pptr->port_id = s_id;
12152 pptr->port_state = FCP_STATE_INIT;
12153 mutex_exit(&pptr->port_mutex);
12154
12155 /*
12156 * Make a copy of ulp_port_info as fctl allocates
12157 * a temp struct.
12158 */
12159 (void) fcp_cp_pinfo(pptr, pinfo);
12160
12161 mutex_enter(&fcp_global_mutex);
12162 if (fcp_watchdog_init++ == 0) {
12163 fcp_watchdog_tick = fcp_watchdog_timeout *
12164 drv_usectohz(1000000);
12165 fcp_watchdog_id = timeout(fcp_watch,
12166 NULL, fcp_watchdog_tick);
12167 }
12168 mutex_exit(&fcp_global_mutex);
12169
12170 /*
12171 * Handle various topologies and link states.
12172 */
12173 switch (FC_PORT_STATE_MASK(pptr->port_phys_state)) {
12174 case FC_STATE_OFFLINE:
12175 /*
12176 * Wait for ONLINE, at which time a state
12177 * change will cause a statec_callback
12178 */
12179 res = DDI_SUCCESS;
12180 break;
12181
12182 case FC_STATE_ONLINE:
12183
12184 if (pptr->port_topology == FC_TOP_UNKNOWN) {
12185 (void) fcp_linkreset(pptr, NULL, KM_NOSLEEP);
12186 res = DDI_SUCCESS;
12187 break;
12188 }
12189
12190 if (FC_TOP_EXTERNAL(pptr->port_topology) &&
12191 !fcp_enable_auto_configuration) {
12192 tmp_list = fcp_construct_map(pptr, &alloc_cnt);
12193 if (tmp_list == NULL) {
12194 if (!alloc_cnt) {
12195 res = DDI_SUCCESS;
12196 }
12197 break;
12198 }
12199 max_cnt = alloc_cnt;
12200 } else {
12201 ASSERT(pptr->port_topology != FC_TOP_UNKNOWN);
12202
12203 alloc_cnt = FCP_MAX_DEVICES;
12204
12205 if ((tmp_list = (fc_portmap_t *)kmem_zalloc(
12206 (sizeof (fc_portmap_t)) * alloc_cnt,
12207 KM_NOSLEEP)) == NULL) {
12208 fcp_log(CE_WARN, pptr->port_dip,
12209 "!fcp%d: failed to allocate portmap",
12210 instance);
12211 break;
12212 }
12213
12214 max_cnt = alloc_cnt;
12215 if ((res = fc_ulp_getportmap(pptr->port_fp_handle,
12216 &tmp_list, &max_cnt, FC_ULP_PLOGI_PRESERVE)) !=
12217 FC_SUCCESS) {
12218 caddr_t msg;
12219
12220 (void) fc_ulp_error(res, &msg);
12221
12222 FCP_TRACE(fcp_logq, pptr->port_instbuf,
12223 fcp_trace, FCP_BUF_LEVEL_2, 0,
12224 "resume failed getportmap: reason=0x%x",
12225 res);
12226
12227 fcp_log(CE_WARN, pptr->port_dip,
12228 "!failed to get port map : %s", msg);
12229 break;
12230 }
12231 if (max_cnt > alloc_cnt) {
12232 alloc_cnt = max_cnt;
12233 }
12234 }
12235
12236 /*
12237 * do the SCSI device discovery and create
12238 * the devinfos
12239 */
12240 fcp_statec_callback(ulph, pptr->port_fp_handle,
12241 pptr->port_phys_state, pptr->port_topology, tmp_list,
12242 max_cnt, pptr->port_id);
12243
12244 res = DDI_SUCCESS;
12245 break;
12246
12247 default:
12248 fcp_log(CE_WARN, pptr->port_dip,
12249 "!fcp%d: invalid port state at attach=0x%x",
12250 instance, pptr->port_phys_state);
12251
12252 mutex_enter(&pptr->port_mutex);
12253 pptr->port_phys_state = FCP_STATE_OFFLINE;
12254 mutex_exit(&pptr->port_mutex);
12255 res = DDI_SUCCESS;
12256
12257 break;
12258 }
12259
12260 if (tmp_list != NULL) {
12261 kmem_free(tmp_list, sizeof (fc_portmap_t) * alloc_cnt);
12262 }
12263
12264 return (res);
12265 }
12266
12267
12268 static void
12269 fcp_cp_pinfo(struct fcp_port *pptr, fc_ulp_port_info_t *pinfo)
12270 {
12271 pptr->port_fp_modlinkage = *pinfo->port_linkage;
12272 pptr->port_dip = pinfo->port_dip;
12273 pptr->port_fp_handle = pinfo->port_handle;
12274 if (pinfo->port_acc_attr != NULL) {
12275 /*
12276 * FCA supports DMA
12277 */
12278 pptr->port_data_dma_attr = *pinfo->port_data_dma_attr;
12279 pptr->port_cmd_dma_attr = *pinfo->port_cmd_dma_attr;
12280 pptr->port_resp_dma_attr = *pinfo->port_resp_dma_attr;
12281 pptr->port_dma_acc_attr = *pinfo->port_acc_attr;
12282 }
12283 pptr->port_priv_pkt_len = pinfo->port_fca_pkt_size;
12284 pptr->port_max_exch = pinfo->port_fca_max_exch;
12285 pptr->port_phys_state = pinfo->port_state;
12286 pptr->port_topology = pinfo->port_flags;
12287 pptr->port_reset_action = pinfo->port_reset_action;
12288 pptr->port_cmds_dma_flags = pinfo->port_dma_behavior;
12289 pptr->port_fcp_dma = pinfo->port_fcp_dma;
12290 bcopy(&pinfo->port_nwwn, &pptr->port_nwwn, sizeof (la_wwn_t));
12291 bcopy(&pinfo->port_pwwn, &pptr->port_pwwn, sizeof (la_wwn_t));
12292
12293 /* Clear FMA caps to avoid fm-capability ereport */
12294 if (pptr->port_cmd_dma_attr.dma_attr_flags & DDI_DMA_FLAGERR)
12295 pptr->port_cmd_dma_attr.dma_attr_flags &= ~DDI_DMA_FLAGERR;
12296 if (pptr->port_data_dma_attr.dma_attr_flags & DDI_DMA_FLAGERR)
12297 pptr->port_data_dma_attr.dma_attr_flags &= ~DDI_DMA_FLAGERR;
12298 if (pptr->port_resp_dma_attr.dma_attr_flags & DDI_DMA_FLAGERR)
12299 pptr->port_resp_dma_attr.dma_attr_flags &= ~DDI_DMA_FLAGERR;
12300 }
12301
12302 /*
12303 * If the elements wait field is set to 1 then
12304 * another thread is waiting for the operation to complete. Once
12305 * it is complete, the waiting thread is signaled and the element is
12306 * freed by the waiting thread. If the elements wait field is set to 0
12307 * the element is freed.
12308 */
12309 static void
12310 fcp_process_elem(struct fcp_hp_elem *elem, int result)
12311 {
12312 ASSERT(elem != NULL);
12313 mutex_enter(&elem->mutex);
12314 elem->result = result;
12315 if (elem->wait) {
12316 elem->wait = 0;
12317 cv_signal(&elem->cv);
12318 mutex_exit(&elem->mutex);
12319 } else {
12320 mutex_exit(&elem->mutex);
12321 cv_destroy(&elem->cv);
12322 mutex_destroy(&elem->mutex);
12323 kmem_free(elem, sizeof (struct fcp_hp_elem));
12324 }
12325 }
12326
12327 /*
12328 * This function is invoked from the taskq thread to allocate
12329 * devinfo nodes and to online/offline them.
12330 */
12331 static void
12332 fcp_hp_task(void *arg)
12333 {
12334 struct fcp_hp_elem *elem = (struct fcp_hp_elem *)arg;
12335 struct fcp_lun *plun = elem->lun;
12336 struct fcp_port *pptr = elem->port;
12337 int result;
12338
12339 ASSERT(elem->what == FCP_ONLINE ||
12340 elem->what == FCP_OFFLINE ||
12341 elem->what == FCP_MPXIO_PATH_CLEAR_BUSY ||
12342 elem->what == FCP_MPXIO_PATH_SET_BUSY);
12343
12344 mutex_enter(&pptr->port_mutex);
12345 mutex_enter(&plun->lun_mutex);
12346 if (((elem->what == FCP_ONLINE || elem->what == FCP_OFFLINE) &&
12347 plun->lun_event_count != elem->event_cnt) ||
12348 pptr->port_state & (FCP_STATE_SUSPENDED |
12349 FCP_STATE_DETACHING | FCP_STATE_POWER_DOWN)) {
12350 mutex_exit(&plun->lun_mutex);
12351 mutex_exit(&pptr->port_mutex);
12352 fcp_process_elem(elem, NDI_FAILURE);
12353 return;
12354 }
12355 mutex_exit(&plun->lun_mutex);
12356 mutex_exit(&pptr->port_mutex);
12357
12358 result = fcp_trigger_lun(plun, elem->cip, elem->old_lun_mpxio,
12359 elem->what, elem->link_cnt, elem->tgt_cnt, elem->flags);
12360 fcp_process_elem(elem, result);
12361 }
12362
12363
12364 static child_info_t *
12365 fcp_get_cip(struct fcp_lun *plun, child_info_t *cip, int lcount,
12366 int tcount)
12367 {
12368 ASSERT(MUTEX_HELD(&plun->lun_mutex));
12369
12370 if (fcp_is_child_present(plun, cip) == FC_FAILURE) {
12371 struct fcp_port *pptr = plun->lun_tgt->tgt_port;
12372
12373 ASSERT(MUTEX_HELD(&pptr->port_mutex));
12374 /*
12375 * Child has not been created yet. Create the child device
12376 * based on the per-Lun flags.
12377 */
12378 if (pptr->port_mpxio == 0 || plun->lun_mpxio == 0) {
12379 plun->lun_cip =
12380 CIP(fcp_create_dip(plun, lcount, tcount));
12381 plun->lun_mpxio = 0;
12382 } else {
12383 plun->lun_cip =
12384 CIP(fcp_create_pip(plun, lcount, tcount));
12385 plun->lun_mpxio = 1;
12386 }
12387 } else {
12388 plun->lun_cip = cip;
12389 }
12390
12391 return (plun->lun_cip);
12392 }
12393
12394
12395 static int
12396 fcp_is_dip_present(struct fcp_lun *plun, dev_info_t *cdip)
12397 {
12398 int rval = FC_FAILURE;
12399 dev_info_t *pdip;
12400 struct dev_info *dip;
12401 int circular;
12402
12403 ASSERT(MUTEX_HELD(&plun->lun_mutex));
12404
12405 pdip = plun->lun_tgt->tgt_port->port_dip;
12406
12407 if (plun->lun_cip == NULL) {
12408 FCP_TRACE(fcp_logq, LUN_PORT->port_instbuf,
12409 fcp_trace, FCP_BUF_LEVEL_3, 0,
12410 "fcp_is_dip_present: plun->lun_cip is NULL: "
12411 "plun: %p lun state: %x num: %d target state: %x",
12412 plun, plun->lun_state, plun->lun_num,
12413 plun->lun_tgt->tgt_port->port_state);
12414 return (rval);
12415 }
12416 ndi_devi_enter(pdip, &circular);
12417 dip = DEVI(pdip)->devi_child;
12418 while (dip) {
12419 if (dip == DEVI(cdip)) {
12420 rval = FC_SUCCESS;
12421 break;
12422 }
12423 dip = dip->devi_sibling;
12424 }
12425 ndi_devi_exit(pdip, circular);
12426 return (rval);
12427 }
12428
12429 static int
12430 fcp_is_child_present(struct fcp_lun *plun, child_info_t *cip)
12431 {
12432 int rval = FC_FAILURE;
12433
12434 ASSERT(plun != NULL);
12435 ASSERT(MUTEX_HELD(&plun->lun_mutex));
12436
12437 if (plun->lun_mpxio == 0) {
12438 rval = fcp_is_dip_present(plun, DIP(cip));
12439 } else {
12440 rval = fcp_is_pip_present(plun, PIP(cip));
12441 }
12442
12443 return (rval);
12444 }
12445
12446 /*
12447 * Function: fcp_create_dip
12448 *
12449 * Description: Creates a dev_info_t structure for the LUN specified by the
12450 * caller.
12451 *
12452 * Argument: plun Lun structure
12453 * link_cnt Link state count.
12454 * tgt_cnt Target state change count.
12455 *
12456 * Return Value: NULL if it failed
12457 * dev_info_t structure address if it succeeded
12458 *
12459 * Context: Kernel context
12460 */
12461 static dev_info_t *
12462 fcp_create_dip(struct fcp_lun *plun, int link_cnt, int tgt_cnt)
12463 {
12464 int failure = 0;
12465 uint32_t tgt_id;
12466 uint64_t sam_lun;
12467 struct fcp_tgt *ptgt = plun->lun_tgt;
12468 struct fcp_port *pptr = ptgt->tgt_port;
12469 dev_info_t *pdip = pptr->port_dip;
12470 dev_info_t *cdip = NULL;
12471 dev_info_t *old_dip = DIP(plun->lun_cip);
12472 char *nname = NULL;
12473 char **compatible = NULL;
12474 int ncompatible;
12475 char *scsi_binding_set;
12476 char t_pwwn[17];
12477
12478 ASSERT(MUTEX_HELD(&plun->lun_mutex));
12479 ASSERT(MUTEX_HELD(&pptr->port_mutex));
12480
12481 /* get the 'scsi-binding-set' property */
12482 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, pdip,
12483 DDI_PROP_NOTPROM | DDI_PROP_DONTPASS, "scsi-binding-set",
12484 &scsi_binding_set) != DDI_PROP_SUCCESS) {
12485 scsi_binding_set = NULL;
12486 }
12487
12488 /* determine the node name and compatible */
12489 scsi_hba_nodename_compatible_get(&plun->lun_inq, scsi_binding_set,
12490 plun->lun_inq.inq_dtype, NULL, &nname, &compatible, &ncompatible);
12491 if (scsi_binding_set) {
12492 ddi_prop_free(scsi_binding_set);
12493 }
12494
12495 if (nname == NULL) {
12496 #ifdef DEBUG
12497 cmn_err(CE_WARN, "%s%d: no driver for "
12498 "device @w%02x%02x%02x%02x%02x%02x%02x%02x,%d:"
12499 " compatible: %s",
12500 ddi_driver_name(pdip), ddi_get_instance(pdip),
12501 ptgt->tgt_port_wwn.raw_wwn[0],
12502 ptgt->tgt_port_wwn.raw_wwn[1],
12503 ptgt->tgt_port_wwn.raw_wwn[2],
12504 ptgt->tgt_port_wwn.raw_wwn[3],
12505 ptgt->tgt_port_wwn.raw_wwn[4],
12506 ptgt->tgt_port_wwn.raw_wwn[5],
12507 ptgt->tgt_port_wwn.raw_wwn[6],
12508 ptgt->tgt_port_wwn.raw_wwn[7], plun->lun_num,
12509 *compatible);
12510 #endif /* DEBUG */
12511 failure++;
12512 goto end_of_fcp_create_dip;
12513 }
12514
12515 cdip = fcp_find_existing_dip(plun, pdip, nname);
12516
12517 /*
12518 * if the old_dip does not match the cdip, that means there is
12519 * some property change. since we'll be using the cdip, we need
12520 * to offline the old_dip. If the state contains FCP_LUN_CHANGED
12521 * then the dtype for the device has been updated. Offline the
12522 * the old device and create a new device with the new device type
12523 * Refer to bug: 4764752
12524 */
12525 if (old_dip && (cdip != old_dip ||
12526 plun->lun_state & FCP_LUN_CHANGED)) {
12527 plun->lun_state &= ~(FCP_LUN_INIT);
12528 mutex_exit(&plun->lun_mutex);
12529 mutex_exit(&pptr->port_mutex);
12530
12531 mutex_enter(&ptgt->tgt_mutex);
12532 (void) fcp_pass_to_hp(pptr, plun, CIP(old_dip), FCP_OFFLINE,
12533 link_cnt, tgt_cnt, NDI_DEVI_REMOVE, 0);
12534 mutex_exit(&ptgt->tgt_mutex);
12535
12536 #ifdef DEBUG
12537 if (cdip != NULL) {
12538 FCP_TRACE(fcp_logq, pptr->port_instbuf,
12539 fcp_trace, FCP_BUF_LEVEL_2, 0,
12540 "Old dip=%p; New dip=%p don't match", old_dip,
12541 cdip);
12542 } else {
12543 FCP_TRACE(fcp_logq, pptr->port_instbuf,
12544 fcp_trace, FCP_BUF_LEVEL_2, 0,
12545 "Old dip=%p; New dip=NULL don't match", old_dip);
12546 }
12547 #endif
12548
12549 mutex_enter(&pptr->port_mutex);
12550 mutex_enter(&plun->lun_mutex);
12551 }
12552
12553 if (cdip == NULL || plun->lun_state & FCP_LUN_CHANGED) {
12554 plun->lun_state &= ~(FCP_LUN_CHANGED);
12555 if (ndi_devi_alloc(pptr->port_dip, nname,
12556 DEVI_SID_NODEID, &cdip) != NDI_SUCCESS) {
12557 failure++;
12558 goto end_of_fcp_create_dip;
12559 }
12560 }
12561
12562 /*
12563 * Previously all the properties for the devinfo were destroyed here
12564 * with a call to ndi_prop_remove_all(). Since this may cause loss of
12565 * the devid property (and other properties established by the target
12566 * driver or framework) which the code does not always recreate, this
12567 * call was removed.
12568 * This opens a theoretical possibility that we may return with a
12569 * stale devid on the node if the scsi entity behind the fibre channel
12570 * lun has changed.
12571 */
12572
12573 /* decorate the node with compatible */
12574 if (ndi_prop_update_string_array(DDI_DEV_T_NONE, cdip,
12575 "compatible", compatible, ncompatible) != DDI_PROP_SUCCESS) {
12576 failure++;
12577 goto end_of_fcp_create_dip;
12578 }
12579
12580 if (ndi_prop_update_byte_array(DDI_DEV_T_NONE, cdip, NODE_WWN_PROP,
12581 ptgt->tgt_node_wwn.raw_wwn, FC_WWN_SIZE) != DDI_PROP_SUCCESS) {
12582 failure++;
12583 goto end_of_fcp_create_dip;
12584 }
12585
12586 if (ndi_prop_update_byte_array(DDI_DEV_T_NONE, cdip, PORT_WWN_PROP,
12587 ptgt->tgt_port_wwn.raw_wwn, FC_WWN_SIZE) != DDI_PROP_SUCCESS) {
12588 failure++;
12589 goto end_of_fcp_create_dip;
12590 }
12591
12592 fcp_wwn_to_ascii(ptgt->tgt_port_wwn.raw_wwn, t_pwwn);
12593 t_pwwn[16] = '\0';
12594 if (ndi_prop_update_string(DDI_DEV_T_NONE, cdip, TGT_PORT_PROP, t_pwwn)
12595 != DDI_PROP_SUCCESS) {
12596 failure++;
12597 goto end_of_fcp_create_dip;
12598 }
12599
12600 /*
12601 * If there is no hard address - We might have to deal with
12602 * that by using WWN - Having said that it is important to
12603 * recognize this problem early so ssd can be informed of
12604 * the right interconnect type.
12605 */
12606 if (!FC_TOP_EXTERNAL(pptr->port_topology) && ptgt->tgt_hard_addr != 0) {
12607 tgt_id = (uint32_t)fcp_alpa_to_switch[ptgt->tgt_hard_addr];
12608 } else {
12609 tgt_id = ptgt->tgt_d_id;
12610 }
12611
12612 if (ndi_prop_update_int(DDI_DEV_T_NONE, cdip, TARGET_PROP,
12613 tgt_id) != DDI_PROP_SUCCESS) {
12614 failure++;
12615 goto end_of_fcp_create_dip;
12616 }
12617
12618 if (ndi_prop_update_int(DDI_DEV_T_NONE, cdip, LUN_PROP,
12619 (int)plun->lun_num) != DDI_PROP_SUCCESS) {
12620 failure++;
12621 goto end_of_fcp_create_dip;
12622 }
12623 bcopy(&plun->lun_addr, &sam_lun, FCP_LUN_SIZE);
12624 if (ndi_prop_update_int64(DDI_DEV_T_NONE, cdip, SAM_LUN_PROP,
12625 sam_lun) != DDI_PROP_SUCCESS) {
12626 failure++;
12627 goto end_of_fcp_create_dip;
12628 }
12629
12630 end_of_fcp_create_dip:
12631 scsi_hba_nodename_compatible_free(nname, compatible);
12632
12633 if (cdip != NULL && failure) {
12634 (void) ndi_prop_remove_all(cdip);
12635 (void) ndi_devi_free(cdip);
12636 cdip = NULL;
12637 }
12638
12639 return (cdip);
12640 }
12641
12642 /*
12643 * Function: fcp_create_pip
12644 *
12645 * Description: Creates a Path Id for the LUN specified by the caller.
12646 *
12647 * Argument: plun Lun structure
12648 * link_cnt Link state count.
12649 * tgt_cnt Target state count.
12650 *
12651 * Return Value: NULL if it failed
12652 * mdi_pathinfo_t structure address if it succeeded
12653 *
12654 * Context: Kernel context
12655 */
12656 static mdi_pathinfo_t *
12657 fcp_create_pip(struct fcp_lun *plun, int lcount, int tcount)
12658 {
12659 int i;
12660 char buf[MAXNAMELEN];
12661 char uaddr[MAXNAMELEN];
12662 int failure = 0;
12663 uint32_t tgt_id;
12664 uint64_t sam_lun;
12665 struct fcp_tgt *ptgt = plun->lun_tgt;
12666 struct fcp_port *pptr = ptgt->tgt_port;
12667 dev_info_t *pdip = pptr->port_dip;
12668 mdi_pathinfo_t *pip = NULL;
12669 mdi_pathinfo_t *old_pip = PIP(plun->lun_cip);
12670 char *nname = NULL;
12671 char **compatible = NULL;
12672 int ncompatible;
12673 char *scsi_binding_set;
12674 char t_pwwn[17];
12675
12676 ASSERT(MUTEX_HELD(&plun->lun_mutex));
12677 ASSERT(MUTEX_HELD(&pptr->port_mutex));
12678
12679 scsi_binding_set = "vhci";
12680
12681 /* determine the node name and compatible */
12682 scsi_hba_nodename_compatible_get(&plun->lun_inq, scsi_binding_set,
12683 plun->lun_inq.inq_dtype, NULL, &nname, &compatible, &ncompatible);
12684
12685 if (nname == NULL) {
12686 #ifdef DEBUG
12687 cmn_err(CE_WARN, "fcp_create_dip: %s%d: no driver for "
12688 "device @w%02x%02x%02x%02x%02x%02x%02x%02x,%d:"
12689 " compatible: %s",
12690 ddi_driver_name(pdip), ddi_get_instance(pdip),
12691 ptgt->tgt_port_wwn.raw_wwn[0],
12692 ptgt->tgt_port_wwn.raw_wwn[1],
12693 ptgt->tgt_port_wwn.raw_wwn[2],
12694 ptgt->tgt_port_wwn.raw_wwn[3],
12695 ptgt->tgt_port_wwn.raw_wwn[4],
12696 ptgt->tgt_port_wwn.raw_wwn[5],
12697 ptgt->tgt_port_wwn.raw_wwn[6],
12698 ptgt->tgt_port_wwn.raw_wwn[7], plun->lun_num,
12699 *compatible);
12700 #endif /* DEBUG */
12701 failure++;
12702 goto end_of_fcp_create_pip;
12703 }
12704
12705 pip = fcp_find_existing_pip(plun, pdip);
12706
12707 /*
12708 * if the old_dip does not match the cdip, that means there is
12709 * some property change. since we'll be using the cdip, we need
12710 * to offline the old_dip. If the state contains FCP_LUN_CHANGED
12711 * then the dtype for the device has been updated. Offline the
12712 * the old device and create a new device with the new device type
12713 * Refer to bug: 4764752
12714 */
12715 if (old_pip && (pip != old_pip ||
12716 plun->lun_state & FCP_LUN_CHANGED)) {
12717 plun->lun_state &= ~(FCP_LUN_INIT);
12718 mutex_exit(&plun->lun_mutex);
12719 mutex_exit(&pptr->port_mutex);
12720
12721 mutex_enter(&ptgt->tgt_mutex);
12722 (void) fcp_pass_to_hp(pptr, plun, CIP(old_pip),
12723 FCP_OFFLINE, lcount, tcount,
12724 NDI_DEVI_REMOVE, 0);
12725 mutex_exit(&ptgt->tgt_mutex);
12726
12727 if (pip != NULL) {
12728 FCP_TRACE(fcp_logq, pptr->port_instbuf,
12729 fcp_trace, FCP_BUF_LEVEL_2, 0,
12730 "Old pip=%p; New pip=%p don't match",
12731 old_pip, pip);
12732 } else {
12733 FCP_TRACE(fcp_logq, pptr->port_instbuf,
12734 fcp_trace, FCP_BUF_LEVEL_2, 0,
12735 "Old pip=%p; New pip=NULL don't match",
12736 old_pip);
12737 }
12738
12739 mutex_enter(&pptr->port_mutex);
12740 mutex_enter(&plun->lun_mutex);
12741 }
12742
12743 /*
12744 * Since FC_WWN_SIZE is 8 bytes and its not like the
12745 * lun_guid_size which is dependent on the target, I don't
12746 * believe the same trancation happens here UNLESS the standards
12747 * change the FC_WWN_SIZE value to something larger than
12748 * MAXNAMELEN(currently 255 bytes).
12749 */
12750
12751 for (i = 0; i < FC_WWN_SIZE; i++) {
12752 (void) sprintf(&buf[i << 1], "%02x",
12753 ptgt->tgt_port_wwn.raw_wwn[i]);
12754 }
12755
12756 (void) snprintf(uaddr, MAXNAMELEN, "w%s,%x",
12757 buf, plun->lun_num);
12758
12759 if (pip == NULL || plun->lun_state & FCP_LUN_CHANGED) {
12760 /*
12761 * Release the locks before calling into
12762 * mdi_pi_alloc_compatible() since this can result in a
12763 * callback into fcp which can result in a deadlock
12764 * (see bug # 4870272).
12765 *
12766 * Basically, what we are trying to avoid is the scenario where
12767 * one thread does ndi_devi_enter() and tries to grab
12768 * fcp_mutex and another does it the other way round.
12769 *
12770 * But before we do that, make sure that nobody releases the
12771 * port in the meantime. We can do this by setting a flag.
12772 */
12773 plun->lun_state &= ~(FCP_LUN_CHANGED);
12774 pptr->port_state |= FCP_STATE_IN_MDI;
12775 mutex_exit(&plun->lun_mutex);
12776 mutex_exit(&pptr->port_mutex);
12777 if (mdi_pi_alloc_compatible(pdip, nname, plun->lun_guid,
12778 uaddr, compatible, ncompatible, 0, &pip) != MDI_SUCCESS) {
12779 fcp_log(CE_WARN, pptr->port_dip,
12780 "!path alloc failed:0x%x", plun);
12781 mutex_enter(&pptr->port_mutex);
12782 mutex_enter(&plun->lun_mutex);
12783 pptr->port_state &= ~FCP_STATE_IN_MDI;
12784 failure++;
12785 goto end_of_fcp_create_pip;
12786 }
12787 mutex_enter(&pptr->port_mutex);
12788 mutex_enter(&plun->lun_mutex);
12789 pptr->port_state &= ~FCP_STATE_IN_MDI;
12790 } else {
12791 (void) mdi_prop_remove(pip, NULL);
12792 }
12793
12794 mdi_pi_set_phci_private(pip, (caddr_t)plun);
12795
12796 if (mdi_prop_update_byte_array(pip, NODE_WWN_PROP,
12797 ptgt->tgt_node_wwn.raw_wwn, FC_WWN_SIZE)
12798 != DDI_PROP_SUCCESS) {
12799 failure++;
12800 goto end_of_fcp_create_pip;
12801 }
12802
12803 if (mdi_prop_update_byte_array(pip, PORT_WWN_PROP,
12804 ptgt->tgt_port_wwn.raw_wwn, FC_WWN_SIZE)
12805 != DDI_PROP_SUCCESS) {
12806 failure++;
12807 goto end_of_fcp_create_pip;
12808 }
12809
12810 fcp_wwn_to_ascii(ptgt->tgt_port_wwn.raw_wwn, t_pwwn);
12811 t_pwwn[16] = '\0';
12812 if (mdi_prop_update_string(pip, TGT_PORT_PROP, t_pwwn)
12813 != DDI_PROP_SUCCESS) {
12814 failure++;
12815 goto end_of_fcp_create_pip;
12816 }
12817
12818 /*
12819 * If there is no hard address - We might have to deal with
12820 * that by using WWN - Having said that it is important to
12821 * recognize this problem early so ssd can be informed of
12822 * the right interconnect type.
12823 */
12824 if (!FC_TOP_EXTERNAL(pptr->port_topology) &&
12825 ptgt->tgt_hard_addr != 0) {
12826 tgt_id = (uint32_t)
12827 fcp_alpa_to_switch[ptgt->tgt_hard_addr];
12828 } else {
12829 tgt_id = ptgt->tgt_d_id;
12830 }
12831
12832 if (mdi_prop_update_int(pip, TARGET_PROP, tgt_id)
12833 != DDI_PROP_SUCCESS) {
12834 failure++;
12835 goto end_of_fcp_create_pip;
12836 }
12837
12838 if (mdi_prop_update_int(pip, LUN_PROP, (int)plun->lun_num)
12839 != DDI_PROP_SUCCESS) {
12840 failure++;
12841 goto end_of_fcp_create_pip;
12842 }
12843 bcopy(&plun->lun_addr, &sam_lun, FCP_LUN_SIZE);
12844 if (mdi_prop_update_int64(pip, SAM_LUN_PROP, sam_lun)
12845 != DDI_PROP_SUCCESS) {
12846 failure++;
12847 goto end_of_fcp_create_pip;
12848 }
12849
12850 end_of_fcp_create_pip:
12851 scsi_hba_nodename_compatible_free(nname, compatible);
12852
12853 if (pip != NULL && failure) {
12854 (void) mdi_prop_remove(pip, NULL);
12855 mutex_exit(&plun->lun_mutex);
12856 mutex_exit(&pptr->port_mutex);
12857 (void) mdi_pi_free(pip, 0);
12858 mutex_enter(&pptr->port_mutex);
12859 mutex_enter(&plun->lun_mutex);
12860 pip = NULL;
12861 }
12862
12863 return (pip);
12864 }
12865
12866 static dev_info_t *
12867 fcp_find_existing_dip(struct fcp_lun *plun, dev_info_t *pdip, caddr_t name)
12868 {
12869 uint_t nbytes;
12870 uchar_t *bytes;
12871 uint_t nwords;
12872 uint32_t tgt_id;
12873 int *words;
12874 dev_info_t *cdip;
12875 dev_info_t *ndip;
12876 struct fcp_tgt *ptgt = plun->lun_tgt;
12877 struct fcp_port *pptr = ptgt->tgt_port;
12878 int circular;
12879
12880 ndi_devi_enter(pdip, &circular);
12881
12882 ndip = (dev_info_t *)DEVI(pdip)->devi_child;
12883 while ((cdip = ndip) != NULL) {
12884 ndip = (dev_info_t *)DEVI(cdip)->devi_sibling;
12885
12886 if (strcmp(DEVI(cdip)->devi_node_name, name)) {
12887 continue;
12888 }
12889
12890 if (ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, cdip,
12891 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, NODE_WWN_PROP, &bytes,
12892 &nbytes) != DDI_PROP_SUCCESS) {
12893 continue;
12894 }
12895
12896 if (nbytes != FC_WWN_SIZE || bytes == NULL) {
12897 if (bytes != NULL) {
12898 ddi_prop_free(bytes);
12899 }
12900 continue;
12901 }
12902 ASSERT(bytes != NULL);
12903
12904 if (bcmp(bytes, ptgt->tgt_node_wwn.raw_wwn, nbytes) != 0) {
12905 ddi_prop_free(bytes);
12906 continue;
12907 }
12908
12909 ddi_prop_free(bytes);
12910
12911 if (ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, cdip,
12912 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, PORT_WWN_PROP, &bytes,
12913 &nbytes) != DDI_PROP_SUCCESS) {
12914 continue;
12915 }
12916
12917 if (nbytes != FC_WWN_SIZE || bytes == NULL) {
12918 if (bytes != NULL) {
12919 ddi_prop_free(bytes);
12920 }
12921 continue;
12922 }
12923 ASSERT(bytes != NULL);
12924
12925 if (bcmp(bytes, ptgt->tgt_port_wwn.raw_wwn, nbytes) != 0) {
12926 ddi_prop_free(bytes);
12927 continue;
12928 }
12929
12930 ddi_prop_free(bytes);
12931
12932 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, cdip,
12933 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, TARGET_PROP, &words,
12934 &nwords) != DDI_PROP_SUCCESS) {
12935 continue;
12936 }
12937
12938 if (nwords != 1 || words == NULL) {
12939 if (words != NULL) {
12940 ddi_prop_free(words);
12941 }
12942 continue;
12943 }
12944 ASSERT(words != NULL);
12945
12946 /*
12947 * If there is no hard address - We might have to deal with
12948 * that by using WWN - Having said that it is important to
12949 * recognize this problem early so ssd can be informed of
12950 * the right interconnect type.
12951 */
12952 if (!FC_TOP_EXTERNAL(pptr->port_topology) &&
12953 ptgt->tgt_hard_addr != 0) {
12954 tgt_id =
12955 (uint32_t)fcp_alpa_to_switch[ptgt->tgt_hard_addr];
12956 } else {
12957 tgt_id = ptgt->tgt_d_id;
12958 }
12959
12960 if (tgt_id != (uint32_t)*words) {
12961 ddi_prop_free(words);
12962 continue;
12963 }
12964 ddi_prop_free(words);
12965
12966 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, cdip,
12967 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, LUN_PROP, &words,
12968 &nwords) != DDI_PROP_SUCCESS) {
12969 continue;
12970 }
12971
12972 if (nwords != 1 || words == NULL) {
12973 if (words != NULL) {
12974 ddi_prop_free(words);
12975 }
12976 continue;
12977 }
12978 ASSERT(words != NULL);
12979
12980 if (plun->lun_num == (uint16_t)*words) {
12981 ddi_prop_free(words);
12982 break;
12983 }
12984 ddi_prop_free(words);
12985 }
12986 ndi_devi_exit(pdip, circular);
12987
12988 return (cdip);
12989 }
12990
12991
12992 static int
12993 fcp_is_pip_present(struct fcp_lun *plun, mdi_pathinfo_t *pip)
12994 {
12995 dev_info_t *pdip;
12996 char buf[MAXNAMELEN];
12997 char uaddr[MAXNAMELEN];
12998 int rval = FC_FAILURE;
12999
13000 ASSERT(MUTEX_HELD(&plun->lun_mutex));
13001
13002 pdip = plun->lun_tgt->tgt_port->port_dip;
13003
13004 /*
13005 * Check if pip (and not plun->lun_cip) is NULL. plun->lun_cip can be
13006 * non-NULL even when the LUN is not there as in the case when a LUN is
13007 * configured and then deleted on the device end (for T3/T4 case). In
13008 * such cases, pip will be NULL.
13009 *
13010 * If the device generates an RSCN, it will end up getting offlined when
13011 * it disappeared and a new LUN will get created when it is rediscovered
13012 * on the device. If we check for lun_cip here, the LUN will not end
13013 * up getting onlined since this function will end up returning a
13014 * FC_SUCCESS.
13015 *
13016 * The behavior is different on other devices. For instance, on a HDS,
13017 * there was no RSCN generated by the device but the next I/O generated
13018 * a check condition and rediscovery got triggered that way. So, in
13019 * such cases, this path will not be exercised
13020 */
13021 if (pip == NULL) {
13022 FCP_TRACE(fcp_logq, LUN_PORT->port_instbuf,
13023 fcp_trace, FCP_BUF_LEVEL_4, 0,
13024 "fcp_is_pip_present: plun->lun_cip is NULL: "
13025 "plun: %p lun state: %x num: %d target state: %x",
13026 plun, plun->lun_state, plun->lun_num,
13027 plun->lun_tgt->tgt_port->port_state);
13028 return (rval);
13029 }
13030
13031 fcp_wwn_to_ascii(plun->lun_tgt->tgt_port_wwn.raw_wwn, buf);
13032
13033 (void) snprintf(uaddr, MAXNAMELEN, "w%s,%x", buf, plun->lun_num);
13034
13035 if (mdi_pi_find(pdip, NULL, uaddr) == pip) {
13036 rval = FC_SUCCESS;
13037 }
13038
13039 return (rval);
13040 }
13041
13042 static mdi_pathinfo_t *
13043 fcp_find_existing_pip(struct fcp_lun *plun, dev_info_t *pdip)
13044 {
13045 char buf[MAXNAMELEN];
13046 char uaddr[MAXNAMELEN];
13047 mdi_pathinfo_t *pip;
13048 struct fcp_tgt *ptgt = plun->lun_tgt;
13049 struct fcp_port *pptr = ptgt->tgt_port;
13050
13051 ASSERT(MUTEX_HELD(&pptr->port_mutex));
13052
13053 fcp_wwn_to_ascii(ptgt->tgt_port_wwn.raw_wwn, buf);
13054 (void) snprintf(uaddr, MAXNAMELEN, "w%s,%x", buf, plun->lun_num);
13055
13056 pip = mdi_pi_find(pdip, plun->lun_guid, uaddr);
13057
13058 return (pip);
13059 }
13060
13061
13062 static int
13063 fcp_online_child(struct fcp_lun *plun, child_info_t *cip, int lcount,
13064 int tcount, int flags, int *circ)
13065 {
13066 int rval;
13067 struct fcp_port *pptr = plun->lun_tgt->tgt_port;
13068 struct fcp_tgt *ptgt = plun->lun_tgt;
13069 dev_info_t *cdip = NULL;
13070
13071 ASSERT(MUTEX_HELD(&pptr->port_mutex));
13072 ASSERT(MUTEX_HELD(&plun->lun_mutex));
13073
13074 if (plun->lun_cip == NULL) {
13075 FCP_TRACE(fcp_logq, pptr->port_instbuf,
13076 fcp_trace, FCP_BUF_LEVEL_3, 0,
13077 "fcp_online_child: plun->lun_cip is NULL: "
13078 "plun: %p state: %x num: %d target state: %x",
13079 plun, plun->lun_state, plun->lun_num,
13080 plun->lun_tgt->tgt_port->port_state);
13081 return (NDI_FAILURE);
13082 }
13083 again:
13084 if (plun->lun_mpxio == 0) {
13085 cdip = DIP(cip);
13086 mutex_exit(&plun->lun_mutex);
13087 mutex_exit(&pptr->port_mutex);
13088
13089 FCP_TRACE(fcp_logq, pptr->port_instbuf,
13090 fcp_trace, FCP_BUF_LEVEL_3, 0,
13091 "!Invoking ndi_devi_online for %s: target=%x lun=%x",
13092 ddi_get_name(cdip), ptgt->tgt_d_id, plun->lun_num);
13093
13094 /*
13095 * We could check for FCP_LUN_INIT here but chances
13096 * of getting here when it's already in FCP_LUN_INIT
13097 * is rare and a duplicate ndi_devi_online wouldn't
13098 * hurt either (as the node would already have been
13099 * in CF2)
13100 */
13101 if (!i_ddi_devi_attached(ddi_get_parent(cdip))) {
13102 rval = ndi_devi_bind_driver(cdip, flags);
13103 FCP_TRACE(fcp_logq, pptr->port_instbuf,
13104 fcp_trace, FCP_BUF_LEVEL_3, 0,
13105 "!Invoking ndi_devi_bind_driver: rval=%d", rval);
13106 } else {
13107 rval = ndi_devi_online(cdip, flags);
13108 }
13109
13110 /*
13111 * We log the message into trace buffer if the device
13112 * is "ses" and into syslog for any other device
13113 * type. This is to prevent the ndi_devi_online failure
13114 * message that appears for V880/A5K ses devices.
13115 */
13116 if (rval == NDI_SUCCESS) {
13117 mutex_enter(&ptgt->tgt_mutex);
13118 plun->lun_state |= FCP_LUN_INIT;
13119 mutex_exit(&ptgt->tgt_mutex);
13120 } else if (strncmp(ddi_node_name(cdip), "ses", 3) != 0) {
13121 fcp_log(CE_NOTE, pptr->port_dip,
13122 "!ndi_devi_online:"
13123 " failed for %s: target=%x lun=%x %x",
13124 ddi_get_name(cdip), ptgt->tgt_d_id,
13125 plun->lun_num, rval);
13126 } else {
13127 FCP_TRACE(fcp_logq, pptr->port_instbuf,
13128 fcp_trace, FCP_BUF_LEVEL_3, 0,
13129 " !ndi_devi_online:"
13130 " failed for %s: target=%x lun=%x %x",
13131 ddi_get_name(cdip), ptgt->tgt_d_id,
13132 plun->lun_num, rval);
13133 }
13134 } else {
13135 cdip = mdi_pi_get_client(PIP(cip));
13136 mutex_exit(&plun->lun_mutex);
13137 mutex_exit(&pptr->port_mutex);
13138
13139 FCP_TRACE(fcp_logq, pptr->port_instbuf,
13140 fcp_trace, FCP_BUF_LEVEL_3, 0,
13141 "!Invoking mdi_pi_online for %s: target=%x lun=%x",
13142 ddi_get_name(cdip), ptgt->tgt_d_id, plun->lun_num);
13143
13144 /*
13145 * Hold path and exit phci to avoid deadlock with power
13146 * management code during mdi_pi_online.
13147 */
13148 mdi_hold_path(PIP(cip));
13149 mdi_devi_exit_phci(pptr->port_dip, *circ);
13150
13151 rval = mdi_pi_online(PIP(cip), flags);
13152
13153 mdi_devi_enter_phci(pptr->port_dip, circ);
13154 mdi_rele_path(PIP(cip));
13155
13156 if (rval == MDI_SUCCESS) {
13157 mutex_enter(&ptgt->tgt_mutex);
13158 plun->lun_state |= FCP_LUN_INIT;
13159 mutex_exit(&ptgt->tgt_mutex);
13160
13161 /*
13162 * Clear MPxIO path permanent disable in case
13163 * fcp hotplug dropped the offline event.
13164 */
13165 (void) mdi_pi_enable_path(PIP(cip), DRIVER_DISABLE);
13166
13167 } else if (rval == MDI_NOT_SUPPORTED) {
13168 child_info_t *old_cip = cip;
13169
13170 /*
13171 * MPxIO does not support this device yet.
13172 * Enumerate in legacy mode.
13173 */
13174 mutex_enter(&pptr->port_mutex);
13175 mutex_enter(&plun->lun_mutex);
13176 plun->lun_mpxio = 0;
13177 plun->lun_cip = NULL;
13178 cdip = fcp_create_dip(plun, lcount, tcount);
13179 plun->lun_cip = cip = CIP(cdip);
13180 if (cip == NULL) {
13181 fcp_log(CE_WARN, pptr->port_dip,
13182 "!fcp_online_child: "
13183 "Create devinfo failed for LU=%p", plun);
13184 mutex_exit(&plun->lun_mutex);
13185
13186 mutex_enter(&ptgt->tgt_mutex);
13187 plun->lun_state |= FCP_LUN_OFFLINE;
13188 mutex_exit(&ptgt->tgt_mutex);
13189
13190 mutex_exit(&pptr->port_mutex);
13191
13192 /*
13193 * free the mdi_pathinfo node
13194 */
13195 (void) mdi_pi_free(PIP(old_cip), 0);
13196 } else {
13197 FCP_TRACE(fcp_logq, pptr->port_instbuf,
13198 fcp_trace, FCP_BUF_LEVEL_3, 0,
13199 "fcp_online_child: creating devinfo "
13200 "node 0x%p for plun 0x%p",
13201 cip, plun);
13202 mutex_exit(&plun->lun_mutex);
13203 mutex_exit(&pptr->port_mutex);
13204 /*
13205 * free the mdi_pathinfo node
13206 */
13207 (void) mdi_pi_free(PIP(old_cip), 0);
13208 mutex_enter(&pptr->port_mutex);
13209 mutex_enter(&plun->lun_mutex);
13210 goto again;
13211 }
13212 } else {
13213 if (cdip) {
13214 fcp_log(CE_NOTE, pptr->port_dip,
13215 "!fcp_online_child: mdi_pi_online:"
13216 " failed for %s: target=%x lun=%x %x",
13217 ddi_get_name(cdip), ptgt->tgt_d_id,
13218 plun->lun_num, rval);
13219 }
13220 }
13221 rval = (rval == MDI_SUCCESS) ? NDI_SUCCESS : NDI_FAILURE;
13222 }
13223
13224 if (rval == NDI_SUCCESS) {
13225 if (cdip) {
13226 (void) ndi_event_retrieve_cookie(
13227 pptr->port_ndi_event_hdl, cdip, FCAL_INSERT_EVENT,
13228 &fcp_insert_eid, NDI_EVENT_NOPASS);
13229 (void) ndi_event_run_callbacks(pptr->port_ndi_event_hdl,
13230 cdip, fcp_insert_eid, NULL);
13231 }
13232 }
13233 mutex_enter(&pptr->port_mutex);
13234 mutex_enter(&plun->lun_mutex);
13235 return (rval);
13236 }
13237
13238 /* ARGSUSED */
13239 static int
13240 fcp_offline_child(struct fcp_lun *plun, child_info_t *cip, int lcount,
13241 int tcount, int flags, int *circ)
13242 {
13243 int rval;
13244 int lun_mpxio;
13245 struct fcp_port *pptr = plun->lun_tgt->tgt_port;
13246 struct fcp_tgt *ptgt = plun->lun_tgt;
13247 dev_info_t *cdip;
13248
13249 ASSERT(MUTEX_HELD(&plun->lun_mutex));
13250 ASSERT(MUTEX_HELD(&pptr->port_mutex));
13251
13252 if (plun->lun_cip == NULL) {
13253 FCP_TRACE(fcp_logq, pptr->port_instbuf,
13254 fcp_trace, FCP_BUF_LEVEL_3, 0,
13255 "fcp_offline_child: plun->lun_cip is NULL: "
13256 "plun: %p lun state: %x num: %d target state: %x",
13257 plun, plun->lun_state, plun->lun_num,
13258 plun->lun_tgt->tgt_port->port_state);
13259 return (NDI_FAILURE);
13260 }
13261
13262 /*
13263 * We will use this value twice. Make a copy to be sure we use
13264 * the same value in both places.
13265 */
13266 lun_mpxio = plun->lun_mpxio;
13267
13268 if (lun_mpxio == 0) {
13269 cdip = DIP(cip);
13270 mutex_exit(&plun->lun_mutex);
13271 mutex_exit(&pptr->port_mutex);
13272 rval = ndi_devi_offline(DIP(cip), NDI_DEVFS_CLEAN | flags);
13273 if (rval != NDI_SUCCESS) {
13274 FCP_TRACE(fcp_logq, pptr->port_instbuf,
13275 fcp_trace, FCP_BUF_LEVEL_3, 0,
13276 "fcp_offline_child: ndi_devi_offline failed "
13277 "rval=%x cip=%p", rval, cip);
13278 }
13279 } else {
13280 cdip = mdi_pi_get_client(PIP(cip));
13281 mutex_exit(&plun->lun_mutex);
13282 mutex_exit(&pptr->port_mutex);
13283
13284 /*
13285 * Exit phci to avoid deadlock with power management code
13286 * during mdi_pi_offline
13287 */
13288 mdi_hold_path(PIP(cip));
13289 mdi_devi_exit_phci(pptr->port_dip, *circ);
13290
13291 rval = mdi_pi_offline(PIP(cip), flags & ~NDI_DEVI_REMOVE);
13292
13293 mdi_devi_enter_phci(pptr->port_dip, circ);
13294 mdi_rele_path(PIP(cip));
13295
13296 rval = (rval == MDI_SUCCESS) ? NDI_SUCCESS : NDI_FAILURE;
13297 }
13298
13299 mutex_enter(&ptgt->tgt_mutex);
13300 plun->lun_state &= ~FCP_LUN_INIT;
13301 mutex_exit(&ptgt->tgt_mutex);
13302
13303 if (rval == NDI_SUCCESS) {
13304 cdip = NULL;
13305 if (flags & NDI_DEVI_REMOVE) {
13306 mutex_enter(&plun->lun_mutex);
13307 /*
13308 * If the guid of the LUN changes, lun_cip will not
13309 * equal to cip, and after offlining the LUN with the
13310 * old guid, we should keep lun_cip since it's the cip
13311 * of the LUN with the new guid.
13312 * Otherwise remove our reference to child node.
13313 *
13314 * This must be done before the child node is freed,
13315 * otherwise other threads could see a stale lun_cip
13316 * pointer.
13317 */
13318 if (plun->lun_cip == cip) {
13319 plun->lun_cip = NULL;
13320 }
13321 if (plun->lun_old_guid) {
13322 kmem_free(plun->lun_old_guid,
13323 plun->lun_old_guid_size);
13324 plun->lun_old_guid = NULL;
13325 plun->lun_old_guid_size = 0;
13326 }
13327 mutex_exit(&plun->lun_mutex);
13328 }
13329 }
13330
13331 if (lun_mpxio != 0) {
13332 if (rval == NDI_SUCCESS) {
13333 /*
13334 * Clear MPxIO path permanent disable as the path is
13335 * already offlined.
13336 */
13337 (void) mdi_pi_enable_path(PIP(cip), DRIVER_DISABLE);
13338
13339 if (flags & NDI_DEVI_REMOVE) {
13340 (void) mdi_pi_free(PIP(cip), 0);
13341 }
13342 } else {
13343 FCP_TRACE(fcp_logq, pptr->port_instbuf,
13344 fcp_trace, FCP_BUF_LEVEL_3, 0,
13345 "fcp_offline_child: mdi_pi_offline failed "
13346 "rval=%x cip=%p", rval, cip);
13347 }
13348 }
13349
13350 mutex_enter(&pptr->port_mutex);
13351 mutex_enter(&plun->lun_mutex);
13352
13353 if (cdip) {
13354 FCP_TRACE(fcp_logq, pptr->port_instbuf,
13355 fcp_trace, FCP_BUF_LEVEL_3, 0, "!%s failed for %s:"
13356 " target=%x lun=%x", "ndi_offline",
13357 ddi_get_name(cdip), ptgt->tgt_d_id, plun->lun_num);
13358 }
13359
13360 return (rval);
13361 }
13362
13363 static void
13364 fcp_remove_child(struct fcp_lun *plun)
13365 {
13366 child_info_t *cip;
13367 int circ;
13368
13369 ASSERT(MUTEX_HELD(&plun->lun_mutex));
13370
13371 if (fcp_is_child_present(plun, plun->lun_cip) == FC_SUCCESS) {
13372 if (plun->lun_mpxio == 0) {
13373 (void) ndi_prop_remove_all(DIP(plun->lun_cip));
13374 (void) ndi_devi_free(DIP(plun->lun_cip));
13375 plun->lun_cip = NULL;
13376 } else {
13377 /*
13378 * Clear reference to the child node in the lun.
13379 * This must be done before freeing it with mdi_pi_free
13380 * and with lun_mutex held so that other threads always
13381 * see either valid lun_cip or NULL when holding
13382 * lun_mutex. We keep a copy in cip.
13383 */
13384 cip = plun->lun_cip;
13385 plun->lun_cip = NULL;
13386
13387 mutex_exit(&plun->lun_mutex);
13388 mutex_exit(&plun->lun_tgt->tgt_mutex);
13389 mutex_exit(&plun->lun_tgt->tgt_port->port_mutex);
13390
13391 mdi_devi_enter(plun->lun_tgt->tgt_port->port_dip,
13392 &circ);
13393
13394 /*
13395 * Exit phci to avoid deadlock with power management
13396 * code during mdi_pi_offline
13397 */
13398 mdi_hold_path(PIP(cip));
13399 mdi_devi_exit_phci(plun->lun_tgt->tgt_port->port_dip,
13400 circ);
13401 (void) mdi_pi_offline(PIP(cip), 0);
13402 mdi_devi_enter_phci(plun->lun_tgt->tgt_port->port_dip,
13403 &circ);
13404 mdi_rele_path(PIP(cip));
13405
13406 mdi_devi_exit(plun->lun_tgt->tgt_port->port_dip, circ);
13407
13408 FCP_TRACE(fcp_logq,
13409 plun->lun_tgt->tgt_port->port_instbuf,
13410 fcp_trace, FCP_BUF_LEVEL_3, 0,
13411 "lun=%p pip freed %p", plun, cip);
13412
13413 (void) mdi_prop_remove(PIP(cip), NULL);
13414 (void) mdi_pi_free(PIP(cip), 0);
13415
13416 mutex_enter(&plun->lun_tgt->tgt_port->port_mutex);
13417 mutex_enter(&plun->lun_tgt->tgt_mutex);
13418 mutex_enter(&plun->lun_mutex);
13419 }
13420 } else {
13421 plun->lun_cip = NULL;
13422 }
13423 }
13424
13425 /*
13426 * called when a timeout occurs
13427 *
13428 * can be scheduled during an attach or resume (if not already running)
13429 *
13430 * one timeout is set up for all ports
13431 *
13432 * acquires and releases the global mutex
13433 */
13434 /*ARGSUSED*/
13435 static void
13436 fcp_watch(void *arg)
13437 {
13438 struct fcp_port *pptr;
13439 struct fcp_ipkt *icmd;
13440 struct fcp_ipkt *nicmd;
13441 struct fcp_pkt *cmd;
13442 struct fcp_pkt *ncmd;
13443 struct fcp_pkt *tail;
13444 struct fcp_pkt *pcmd;
13445 struct fcp_pkt *save_head;
13446 struct fcp_port *save_port;
13447
13448 /* increment global watchdog time */
13449 fcp_watchdog_time += fcp_watchdog_timeout;
13450
13451 mutex_enter(&fcp_global_mutex);
13452
13453 /* scan each port in our list */
13454 for (pptr = fcp_port_head; pptr != NULL; pptr = pptr->port_next) {
13455 save_port = fcp_port_head;
13456 pptr->port_state |= FCP_STATE_IN_WATCHDOG;
13457 mutex_exit(&fcp_global_mutex);
13458
13459 mutex_enter(&pptr->port_mutex);
13460 if (pptr->port_ipkt_list == NULL &&
13461 (pptr->port_state & (FCP_STATE_SUSPENDED |
13462 FCP_STATE_DETACHING | FCP_STATE_POWER_DOWN))) {
13463 pptr->port_state &= ~FCP_STATE_IN_WATCHDOG;
13464 mutex_exit(&pptr->port_mutex);
13465 mutex_enter(&fcp_global_mutex);
13466 goto end_of_watchdog;
13467 }
13468
13469 /*
13470 * We check if a list of targets need to be offlined.
13471 */
13472 if (pptr->port_offline_tgts) {
13473 fcp_scan_offline_tgts(pptr);
13474 }
13475
13476 /*
13477 * We check if a list of luns need to be offlined.
13478 */
13479 if (pptr->port_offline_luns) {
13480 fcp_scan_offline_luns(pptr);
13481 }
13482
13483 /*
13484 * We check if a list of targets or luns need to be reset.
13485 */
13486 if (pptr->port_reset_list) {
13487 fcp_check_reset_delay(pptr);
13488 }
13489
13490 mutex_exit(&pptr->port_mutex);
13491
13492 /*
13493 * This is where the pending commands (pkt) are checked for
13494 * timeout.
13495 */
13496 mutex_enter(&pptr->port_pkt_mutex);
13497 tail = pptr->port_pkt_tail;
13498
13499 for (pcmd = NULL, cmd = pptr->port_pkt_head;
13500 cmd != NULL; cmd = ncmd) {
13501 ncmd = cmd->cmd_next;
13502 /*
13503 * If a command is in this queue the bit CFLAG_IN_QUEUE
13504 * must be set.
13505 */
13506 ASSERT(cmd->cmd_flags & CFLAG_IN_QUEUE);
13507 /*
13508 * FCP_INVALID_TIMEOUT will be set for those
13509 * command that need to be failed. Mostly those
13510 * cmds that could not be queued down for the
13511 * "timeout" value. cmd->cmd_timeout is used
13512 * to try and requeue the command regularly.
13513 */
13514 if (cmd->cmd_timeout >= fcp_watchdog_time) {
13515 /*
13516 * This command hasn't timed out yet. Let's
13517 * go to the next one.
13518 */
13519 pcmd = cmd;
13520 goto end_of_loop;
13521 }
13522
13523 if (cmd == pptr->port_pkt_head) {
13524 ASSERT(pcmd == NULL);
13525 pptr->port_pkt_head = cmd->cmd_next;
13526 } else {
13527 ASSERT(pcmd != NULL);
13528 pcmd->cmd_next = cmd->cmd_next;
13529 }
13530
13531 if (cmd == pptr->port_pkt_tail) {
13532 ASSERT(cmd->cmd_next == NULL);
13533 pptr->port_pkt_tail = pcmd;
13534 if (pcmd) {
13535 pcmd->cmd_next = NULL;
13536 }
13537 }
13538 cmd->cmd_next = NULL;
13539
13540 /*
13541 * save the current head before dropping the
13542 * mutex - If the head doesn't remain the
13543 * same after re acquiring the mutex, just
13544 * bail out and revisit on next tick.
13545 *
13546 * PS: The tail pointer can change as the commands
13547 * get requeued after failure to retransport
13548 */
13549 save_head = pptr->port_pkt_head;
13550 mutex_exit(&pptr->port_pkt_mutex);
13551
13552 if (cmd->cmd_fp_pkt->pkt_timeout ==
13553 FCP_INVALID_TIMEOUT) {
13554 struct scsi_pkt *pkt = cmd->cmd_pkt;
13555 struct fcp_lun *plun;
13556 struct fcp_tgt *ptgt;
13557
13558 plun = ADDR2LUN(&pkt->pkt_address);
13559 ptgt = plun->lun_tgt;
13560
13561 FCP_TRACE(fcp_logq, pptr->port_instbuf,
13562 fcp_trace, FCP_BUF_LEVEL_2, 0,
13563 "SCSI cmd 0x%x to D_ID=%x timed out",
13564 pkt->pkt_cdbp[0], ptgt->tgt_d_id);
13565
13566 cmd->cmd_state == FCP_PKT_ABORTING ?
13567 fcp_fail_cmd(cmd, CMD_RESET,
13568 STAT_DEV_RESET) : fcp_fail_cmd(cmd,
13569 CMD_TIMEOUT, STAT_ABORTED);
13570 } else {
13571 fcp_retransport_cmd(pptr, cmd);
13572 }
13573 mutex_enter(&pptr->port_pkt_mutex);
13574 if (save_head && save_head != pptr->port_pkt_head) {
13575 /*
13576 * Looks like linked list got changed (mostly
13577 * happens when an an OFFLINE LUN code starts
13578 * returning overflow queue commands in
13579 * parallel. So bail out and revisit during
13580 * next tick
13581 */
13582 break;
13583 }
13584 end_of_loop:
13585 /*
13586 * Scan only upto the previously known tail pointer
13587 * to avoid excessive processing - lots of new packets
13588 * could have been added to the tail or the old ones
13589 * re-queued.
13590 */
13591 if (cmd == tail) {
13592 break;
13593 }
13594 }
13595 mutex_exit(&pptr->port_pkt_mutex);
13596
13597 mutex_enter(&pptr->port_mutex);
13598 for (icmd = pptr->port_ipkt_list; icmd != NULL; icmd = nicmd) {
13599 struct fcp_tgt *ptgt = icmd->ipkt_tgt;
13600
13601 nicmd = icmd->ipkt_next;
13602 if ((icmd->ipkt_restart != 0) &&
13603 (icmd->ipkt_restart >= fcp_watchdog_time)) {
13604 /* packet has not timed out */
13605 continue;
13606 }
13607
13608 /* time for packet re-transport */
13609 if (icmd == pptr->port_ipkt_list) {
13610 pptr->port_ipkt_list = icmd->ipkt_next;
13611 if (pptr->port_ipkt_list) {
13612 pptr->port_ipkt_list->ipkt_prev =
13613 NULL;
13614 }
13615 } else {
13616 icmd->ipkt_prev->ipkt_next = icmd->ipkt_next;
13617 if (icmd->ipkt_next) {
13618 icmd->ipkt_next->ipkt_prev =
13619 icmd->ipkt_prev;
13620 }
13621 }
13622 icmd->ipkt_next = NULL;
13623 icmd->ipkt_prev = NULL;
13624 mutex_exit(&pptr->port_mutex);
13625
13626 if (fcp_is_retryable(icmd)) {
13627 fc_ulp_rscn_info_t *rscnp =
13628 (fc_ulp_rscn_info_t *)icmd->ipkt_fpkt->
13629 pkt_ulp_rscn_infop;
13630
13631 FCP_TRACE(fcp_logq, pptr->port_instbuf,
13632 fcp_trace, FCP_BUF_LEVEL_2, 0,
13633 "%x to D_ID=%x Retrying..",
13634 icmd->ipkt_opcode,
13635 icmd->ipkt_fpkt->pkt_cmd_fhdr.d_id);
13636
13637 /*
13638 * Update the RSCN count in the packet
13639 * before resending.
13640 */
13641
13642 if (rscnp != NULL) {
13643 rscnp->ulp_rscn_count =
13644 fc_ulp_get_rscn_count(pptr->
13645 port_fp_handle);
13646 }
13647
13648 mutex_enter(&pptr->port_mutex);
13649 mutex_enter(&ptgt->tgt_mutex);
13650 if (!FCP_STATE_CHANGED(pptr, ptgt, icmd)) {
13651 mutex_exit(&ptgt->tgt_mutex);
13652 mutex_exit(&pptr->port_mutex);
13653 switch (icmd->ipkt_opcode) {
13654 int rval;
13655 case LA_ELS_PLOGI:
13656 if ((rval = fc_ulp_login(
13657 pptr->port_fp_handle,
13658 &icmd->ipkt_fpkt, 1)) ==
13659 FC_SUCCESS) {
13660 mutex_enter(
13661 &pptr->port_mutex);
13662 continue;
13663 }
13664 if (fcp_handle_ipkt_errors(
13665 pptr, ptgt, icmd, rval,
13666 "PLOGI") == DDI_SUCCESS) {
13667 mutex_enter(
13668 &pptr->port_mutex);
13669 continue;
13670 }
13671 break;
13672
13673 case LA_ELS_PRLI:
13674 if ((rval = fc_ulp_issue_els(
13675 pptr->port_fp_handle,
13676 icmd->ipkt_fpkt)) ==
13677 FC_SUCCESS) {
13678 mutex_enter(
13679 &pptr->port_mutex);
13680 continue;
13681 }
13682 if (fcp_handle_ipkt_errors(
13683 pptr, ptgt, icmd, rval,
13684 "PRLI") == DDI_SUCCESS) {
13685 mutex_enter(
13686 &pptr->port_mutex);
13687 continue;
13688 }
13689 break;
13690
13691 default:
13692 if ((rval = fcp_transport(
13693 pptr->port_fp_handle,
13694 icmd->ipkt_fpkt, 1)) ==
13695 FC_SUCCESS) {
13696 mutex_enter(
13697 &pptr->port_mutex);
13698 continue;
13699 }
13700 if (fcp_handle_ipkt_errors(
13701 pptr, ptgt, icmd, rval,
13702 "PRLI") == DDI_SUCCESS) {
13703 mutex_enter(
13704 &pptr->port_mutex);
13705 continue;
13706 }
13707 break;
13708 }
13709 } else {
13710 mutex_exit(&ptgt->tgt_mutex);
13711 mutex_exit(&pptr->port_mutex);
13712 }
13713 } else {
13714 fcp_print_error(icmd->ipkt_fpkt);
13715 }
13716
13717 (void) fcp_call_finish_init(pptr, ptgt,
13718 icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
13719 icmd->ipkt_cause);
13720 fcp_icmd_free(pptr, icmd);
13721 mutex_enter(&pptr->port_mutex);
13722 }
13723
13724 pptr->port_state &= ~FCP_STATE_IN_WATCHDOG;
13725 mutex_exit(&pptr->port_mutex);
13726 mutex_enter(&fcp_global_mutex);
13727
13728 end_of_watchdog:
13729 /*
13730 * Bail out early before getting into trouble
13731 */
13732 if (save_port != fcp_port_head) {
13733 break;
13734 }
13735 }
13736
13737 if (fcp_watchdog_init > 0) {
13738 /* reschedule timeout to go again */
13739 fcp_watchdog_id =
13740 timeout(fcp_watch, NULL, fcp_watchdog_tick);
13741 }
13742 mutex_exit(&fcp_global_mutex);
13743 }
13744
13745
13746 static void
13747 fcp_check_reset_delay(struct fcp_port *pptr)
13748 {
13749 uint32_t tgt_cnt;
13750 int level;
13751 struct fcp_tgt *ptgt;
13752 struct fcp_lun *plun;
13753 struct fcp_reset_elem *cur = NULL;
13754 struct fcp_reset_elem *next = NULL;
13755 struct fcp_reset_elem *prev = NULL;
13756
13757 ASSERT(mutex_owned(&pptr->port_mutex));
13758
13759 next = pptr->port_reset_list;
13760 while ((cur = next) != NULL) {
13761 next = cur->next;
13762
13763 if (cur->timeout < fcp_watchdog_time) {
13764 prev = cur;
13765 continue;
13766 }
13767
13768 ptgt = cur->tgt;
13769 plun = cur->lun;
13770 tgt_cnt = cur->tgt_cnt;
13771
13772 if (ptgt) {
13773 level = RESET_TARGET;
13774 } else {
13775 ASSERT(plun != NULL);
13776 level = RESET_LUN;
13777 ptgt = plun->lun_tgt;
13778 }
13779 if (prev) {
13780 prev->next = next;
13781 } else {
13782 /*
13783 * Because we drop port mutex while doing aborts for
13784 * packets, we can't rely on reset_list pointing to
13785 * our head
13786 */
13787 if (cur == pptr->port_reset_list) {
13788 pptr->port_reset_list = next;
13789 } else {
13790 struct fcp_reset_elem *which;
13791
13792 which = pptr->port_reset_list;
13793 while (which && which->next != cur) {
13794 which = which->next;
13795 }
13796 ASSERT(which != NULL);
13797
13798 which->next = next;
13799 prev = which;
13800 }
13801 }
13802
13803 kmem_free(cur, sizeof (*cur));
13804
13805 if (tgt_cnt == ptgt->tgt_change_cnt) {
13806 mutex_enter(&ptgt->tgt_mutex);
13807 if (level == RESET_TARGET) {
13808 fcp_update_tgt_state(ptgt,
13809 FCP_RESET, FCP_LUN_BUSY);
13810 } else {
13811 fcp_update_lun_state(plun,
13812 FCP_RESET, FCP_LUN_BUSY);
13813 }
13814 mutex_exit(&ptgt->tgt_mutex);
13815
13816 mutex_exit(&pptr->port_mutex);
13817 fcp_abort_all(pptr, ptgt, plun, tgt_cnt);
13818 mutex_enter(&pptr->port_mutex);
13819 }
13820 }
13821 }
13822
13823
13824 static void
13825 fcp_abort_all(struct fcp_port *pptr, struct fcp_tgt *ttgt,
13826 struct fcp_lun *rlun, int tgt_cnt)
13827 {
13828 int rval;
13829 struct fcp_lun *tlun, *nlun;
13830 struct fcp_pkt *pcmd = NULL, *ncmd = NULL,
13831 *cmd = NULL, *head = NULL,
13832 *tail = NULL;
13833
13834 mutex_enter(&pptr->port_pkt_mutex);
13835 for (cmd = pptr->port_pkt_head; cmd != NULL; cmd = ncmd) {
13836 struct fcp_lun *plun = ADDR2LUN(&cmd->cmd_pkt->pkt_address);
13837 struct fcp_tgt *ptgt = plun->lun_tgt;
13838
13839 ncmd = cmd->cmd_next;
13840
13841 if (ptgt != ttgt && plun != rlun) {
13842 pcmd = cmd;
13843 continue;
13844 }
13845
13846 if (pcmd != NULL) {
13847 ASSERT(pptr->port_pkt_head != cmd);
13848 pcmd->cmd_next = ncmd;
13849 } else {
13850 ASSERT(cmd == pptr->port_pkt_head);
13851 pptr->port_pkt_head = ncmd;
13852 }
13853 if (pptr->port_pkt_tail == cmd) {
13854 ASSERT(cmd->cmd_next == NULL);
13855 pptr->port_pkt_tail = pcmd;
13856 if (pcmd != NULL) {
13857 pcmd->cmd_next = NULL;
13858 }
13859 }
13860
13861 if (head == NULL) {
13862 head = tail = cmd;
13863 } else {
13864 ASSERT(tail != NULL);
13865 tail->cmd_next = cmd;
13866 tail = cmd;
13867 }
13868 cmd->cmd_next = NULL;
13869 }
13870 mutex_exit(&pptr->port_pkt_mutex);
13871
13872 for (cmd = head; cmd != NULL; cmd = ncmd) {
13873 struct scsi_pkt *pkt = cmd->cmd_pkt;
13874
13875 ncmd = cmd->cmd_next;
13876 ASSERT(pkt != NULL);
13877
13878 mutex_enter(&pptr->port_mutex);
13879 if (ttgt->tgt_change_cnt == tgt_cnt) {
13880 mutex_exit(&pptr->port_mutex);
13881 cmd->cmd_flags &= ~CFLAG_IN_QUEUE;
13882 pkt->pkt_reason = CMD_RESET;
13883 pkt->pkt_statistics |= STAT_DEV_RESET;
13884 cmd->cmd_state = FCP_PKT_IDLE;
13885 fcp_post_callback(cmd);
13886 } else {
13887 mutex_exit(&pptr->port_mutex);
13888 }
13889 }
13890
13891 /*
13892 * If the FCA will return all the commands in its queue then our
13893 * work is easy, just return.
13894 */
13895
13896 if (pptr->port_reset_action == FC_RESET_RETURN_ALL) {
13897 return;
13898 }
13899
13900 /*
13901 * For RESET_LUN get hold of target pointer
13902 */
13903 if (ttgt == NULL) {
13904 ASSERT(rlun != NULL);
13905
13906 ttgt = rlun->lun_tgt;
13907
13908 ASSERT(ttgt != NULL);
13909 }
13910
13911 /*
13912 * There are some severe race conditions here.
13913 * While we are trying to abort the pkt, it might be completing
13914 * so mark it aborted and if the abort does not succeed then
13915 * handle it in the watch thread.
13916 */
13917 mutex_enter(&ttgt->tgt_mutex);
13918 nlun = ttgt->tgt_lun;
13919 mutex_exit(&ttgt->tgt_mutex);
13920 while ((tlun = nlun) != NULL) {
13921 int restart = 0;
13922 if (rlun && rlun != tlun) {
13923 mutex_enter(&ttgt->tgt_mutex);
13924 nlun = tlun->lun_next;
13925 mutex_exit(&ttgt->tgt_mutex);
13926 continue;
13927 }
13928 mutex_enter(&tlun->lun_mutex);
13929 cmd = tlun->lun_pkt_head;
13930 while (cmd != NULL) {
13931 if (cmd->cmd_state == FCP_PKT_ISSUED) {
13932 struct scsi_pkt *pkt;
13933
13934 restart = 1;
13935 cmd->cmd_state = FCP_PKT_ABORTING;
13936 mutex_exit(&tlun->lun_mutex);
13937 rval = fc_ulp_abort(pptr->port_fp_handle,
13938 cmd->cmd_fp_pkt, KM_SLEEP);
13939 if (rval == FC_SUCCESS) {
13940 pkt = cmd->cmd_pkt;
13941 pkt->pkt_reason = CMD_RESET;
13942 pkt->pkt_statistics |= STAT_DEV_RESET;
13943 cmd->cmd_state = FCP_PKT_IDLE;
13944 fcp_post_callback(cmd);
13945 } else {
13946 caddr_t msg;
13947
13948 (void) fc_ulp_error(rval, &msg);
13949
13950 /*
13951 * This part is tricky. The abort
13952 * failed and now the command could
13953 * be completing. The cmd_state ==
13954 * FCP_PKT_ABORTING should save
13955 * us in fcp_cmd_callback. If we
13956 * are already aborting ignore the
13957 * command in fcp_cmd_callback.
13958 * Here we leave this packet for 20
13959 * sec to be aborted in the
13960 * fcp_watch thread.
13961 */
13962 fcp_log(CE_WARN, pptr->port_dip,
13963 "!Abort failed after reset %s",
13964 msg);
13965
13966 cmd->cmd_timeout =
13967 fcp_watchdog_time +
13968 cmd->cmd_pkt->pkt_time +
13969 FCP_FAILED_DELAY;
13970
13971 cmd->cmd_fp_pkt->pkt_timeout =
13972 FCP_INVALID_TIMEOUT;
13973 /*
13974 * This is a hack, cmd is put in the
13975 * overflow queue so that it can be
13976 * timed out finally
13977 */
13978 cmd->cmd_flags |= CFLAG_IN_QUEUE;
13979
13980 mutex_enter(&pptr->port_pkt_mutex);
13981 if (pptr->port_pkt_head) {
13982 ASSERT(pptr->port_pkt_tail
13983 != NULL);
13984 pptr->port_pkt_tail->cmd_next
13985 = cmd;
13986 pptr->port_pkt_tail = cmd;
13987 } else {
13988 ASSERT(pptr->port_pkt_tail
13989 == NULL);
13990 pptr->port_pkt_head =
13991 pptr->port_pkt_tail
13992 = cmd;
13993 }
13994 cmd->cmd_next = NULL;
13995 mutex_exit(&pptr->port_pkt_mutex);
13996 }
13997 mutex_enter(&tlun->lun_mutex);
13998 cmd = tlun->lun_pkt_head;
13999 } else {
14000 cmd = cmd->cmd_forw;
14001 }
14002 }
14003 mutex_exit(&tlun->lun_mutex);
14004
14005 mutex_enter(&ttgt->tgt_mutex);
14006 restart == 1 ? (nlun = ttgt->tgt_lun) : (nlun = tlun->lun_next);
14007 mutex_exit(&ttgt->tgt_mutex);
14008
14009 mutex_enter(&pptr->port_mutex);
14010 if (tgt_cnt != ttgt->tgt_change_cnt) {
14011 mutex_exit(&pptr->port_mutex);
14012 return;
14013 } else {
14014 mutex_exit(&pptr->port_mutex);
14015 }
14016 }
14017 }
14018
14019
14020 /*
14021 * unlink the soft state, returning the soft state found (if any)
14022 *
14023 * acquires and releases the global mutex
14024 */
14025 struct fcp_port *
14026 fcp_soft_state_unlink(struct fcp_port *pptr)
14027 {
14028 struct fcp_port *hptr; /* ptr index */
14029 struct fcp_port *tptr; /* prev hptr */
14030
14031 mutex_enter(&fcp_global_mutex);
14032 for (hptr = fcp_port_head, tptr = NULL;
14033 hptr != NULL;
14034 tptr = hptr, hptr = hptr->port_next) {
14035 if (hptr == pptr) {
14036 /* we found a match -- remove this item */
14037 if (tptr == NULL) {
14038 /* we're at the head of the list */
14039 fcp_port_head = hptr->port_next;
14040 } else {
14041 tptr->port_next = hptr->port_next;
14042 }
14043 break; /* success */
14044 }
14045 }
14046 if (fcp_port_head == NULL) {
14047 fcp_cleanup_blacklist(&fcp_lun_blacklist);
14048 }
14049 mutex_exit(&fcp_global_mutex);
14050 return (hptr);
14051 }
14052
14053
14054 /*
14055 * called by fcp_scsi_hba_tgt_init to find a LUN given a
14056 * WWN and a LUN number
14057 */
14058 /* ARGSUSED */
14059 static struct fcp_lun *
14060 fcp_lookup_lun(struct fcp_port *pptr, uchar_t *wwn, uint16_t lun)
14061 {
14062 int hash;
14063 struct fcp_tgt *ptgt;
14064 struct fcp_lun *plun;
14065
14066 ASSERT(mutex_owned(&pptr->port_mutex));
14067
14068 hash = FCP_HASH(wwn);
14069 for (ptgt = pptr->port_tgt_hash_table[hash]; ptgt != NULL;
14070 ptgt = ptgt->tgt_next) {
14071 if (bcmp((caddr_t)wwn, (caddr_t)&ptgt->tgt_port_wwn.raw_wwn[0],
14072 sizeof (ptgt->tgt_port_wwn)) == 0) {
14073 mutex_enter(&ptgt->tgt_mutex);
14074 for (plun = ptgt->tgt_lun;
14075 plun != NULL;
14076 plun = plun->lun_next) {
14077 if (plun->lun_num == lun) {
14078 mutex_exit(&ptgt->tgt_mutex);
14079 return (plun);
14080 }
14081 }
14082 mutex_exit(&ptgt->tgt_mutex);
14083 return (NULL);
14084 }
14085 }
14086 return (NULL);
14087 }
14088
14089 /*
14090 * Function: fcp_prepare_pkt
14091 *
14092 * Description: This function prepares the SCSI cmd pkt, passed by the caller,
14093 * for fcp_start(). It binds the data or partially maps it.
14094 * Builds the FCP header and starts the initialization of the
14095 * Fibre Channel header.
14096 *
14097 * Argument: *pptr FCP port.
14098 * *cmd FCP packet.
14099 * *plun LUN the command will be sent to.
14100 *
14101 * Context: User, Kernel and Interrupt context.
14102 */
14103 static void
14104 fcp_prepare_pkt(struct fcp_port *pptr, struct fcp_pkt *cmd,
14105 struct fcp_lun *plun)
14106 {
14107 fc_packet_t *fpkt = cmd->cmd_fp_pkt;
14108 struct fcp_tgt *ptgt = plun->lun_tgt;
14109 struct fcp_cmd *fcmd = &cmd->cmd_fcp_cmd;
14110
14111 ASSERT(cmd->cmd_pkt->pkt_comp ||
14112 (cmd->cmd_pkt->pkt_flags & FLAG_NOINTR));
14113
14114 if (cmd->cmd_pkt->pkt_numcookies) {
14115 if (cmd->cmd_pkt->pkt_dma_flags & DDI_DMA_READ) {
14116 fcmd->fcp_cntl.cntl_read_data = 1;
14117 fcmd->fcp_cntl.cntl_write_data = 0;
14118 fpkt->pkt_tran_type = FC_PKT_FCP_READ;
14119 } else {
14120 fcmd->fcp_cntl.cntl_read_data = 0;
14121 fcmd->fcp_cntl.cntl_write_data = 1;
14122 fpkt->pkt_tran_type = FC_PKT_FCP_WRITE;
14123 }
14124
14125 fpkt->pkt_data_cookie = cmd->cmd_pkt->pkt_cookies;
14126
14127 fpkt->pkt_data_cookie_cnt = cmd->cmd_pkt->pkt_numcookies;
14128 ASSERT(fpkt->pkt_data_cookie_cnt <=
14129 pptr->port_data_dma_attr.dma_attr_sgllen);
14130
14131 cmd->cmd_dmacount = cmd->cmd_pkt->pkt_dma_len;
14132
14133 /* FCA needs pkt_datalen to be set */
14134 fpkt->pkt_datalen = cmd->cmd_dmacount;
14135 fcmd->fcp_data_len = cmd->cmd_dmacount;
14136 } else {
14137 fcmd->fcp_cntl.cntl_read_data = 0;
14138 fcmd->fcp_cntl.cntl_write_data = 0;
14139 fpkt->pkt_tran_type = FC_PKT_EXCHANGE;
14140 fpkt->pkt_datalen = 0;
14141 fcmd->fcp_data_len = 0;
14142 }
14143
14144 /* set up the Tagged Queuing type */
14145 if (cmd->cmd_pkt->pkt_flags & FLAG_HTAG) {
14146 fcmd->fcp_cntl.cntl_qtype = FCP_QTYPE_HEAD_OF_Q;
14147 } else if (cmd->cmd_pkt->pkt_flags & FLAG_OTAG) {
14148 fcmd->fcp_cntl.cntl_qtype = FCP_QTYPE_ORDERED;
14149 } else if (cmd->cmd_pkt->pkt_flags & FLAG_STAG) {
14150 fcmd->fcp_cntl.cntl_qtype = FCP_QTYPE_SIMPLE;
14151 } else {
14152 fcmd->fcp_cntl.cntl_qtype = FCP_QTYPE_UNTAGGED;
14153 }
14154
14155 fcmd->fcp_ent_addr = plun->lun_addr;
14156
14157 if (pptr->port_fcp_dma != FC_NO_DVMA_SPACE) {
14158 FCP_CP_OUT((uint8_t *)fcmd, fpkt->pkt_cmd,
14159 fpkt->pkt_cmd_acc, sizeof (struct fcp_cmd));
14160 } else {
14161 ASSERT(fpkt->pkt_cmd_dma == NULL && fpkt->pkt_resp_dma == NULL);
14162 }
14163
14164 cmd->cmd_pkt->pkt_reason = CMD_CMPLT;
14165 cmd->cmd_pkt->pkt_state = 0;
14166 cmd->cmd_pkt->pkt_statistics = 0;
14167 cmd->cmd_pkt->pkt_resid = 0;
14168
14169 cmd->cmd_fp_pkt->pkt_data_dma = cmd->cmd_pkt->pkt_handle;
14170
14171 if (cmd->cmd_pkt->pkt_flags & FLAG_NOINTR) {
14172 fpkt->pkt_tran_flags = (FC_TRAN_CLASS3 | FC_TRAN_NO_INTR);
14173 fpkt->pkt_comp = NULL;
14174 } else {
14175 fpkt->pkt_tran_flags = (FC_TRAN_CLASS3 | FC_TRAN_INTR);
14176 if (cmd->cmd_pkt->pkt_flags & FLAG_IMMEDIATE_CB) {
14177 fpkt->pkt_tran_flags |= FC_TRAN_IMMEDIATE_CB;
14178 }
14179 fpkt->pkt_comp = fcp_cmd_callback;
14180 }
14181
14182 mutex_enter(&pptr->port_mutex);
14183 if (pptr->port_state & FCP_STATE_SUSPENDED) {
14184 fpkt->pkt_tran_flags |= FC_TRAN_DUMPING;
14185 }
14186 mutex_exit(&pptr->port_mutex);
14187
14188 fpkt->pkt_cmd_fhdr.d_id = ptgt->tgt_d_id;
14189 fpkt->pkt_cmd_fhdr.s_id = pptr->port_id;
14190
14191 /*
14192 * Save a few kernel cycles here
14193 */
14194 #ifndef __lock_lint
14195 fpkt->pkt_fca_device = ptgt->tgt_fca_dev;
14196 #endif /* __lock_lint */
14197 }
14198
14199 static void
14200 fcp_post_callback(struct fcp_pkt *cmd)
14201 {
14202 scsi_hba_pkt_comp(cmd->cmd_pkt);
14203 }
14204
14205
14206 /*
14207 * called to do polled I/O by fcp_start()
14208 *
14209 * return a transport status value, i.e. TRAN_ACCECPT for success
14210 */
14211 static int
14212 fcp_dopoll(struct fcp_port *pptr, struct fcp_pkt *cmd)
14213 {
14214 int rval;
14215
14216 #ifdef DEBUG
14217 mutex_enter(&pptr->port_pkt_mutex);
14218 pptr->port_npkts++;
14219 mutex_exit(&pptr->port_pkt_mutex);
14220 #endif /* DEBUG */
14221
14222 if (cmd->cmd_fp_pkt->pkt_timeout) {
14223 cmd->cmd_fp_pkt->pkt_timeout = cmd->cmd_pkt->pkt_time;
14224 } else {
14225 cmd->cmd_fp_pkt->pkt_timeout = FCP_POLL_TIMEOUT;
14226 }
14227
14228 ASSERT(cmd->cmd_fp_pkt->pkt_comp == NULL);
14229
14230 cmd->cmd_state = FCP_PKT_ISSUED;
14231
14232 rval = fc_ulp_transport(pptr->port_fp_handle, cmd->cmd_fp_pkt);
14233
14234 #ifdef DEBUG
14235 mutex_enter(&pptr->port_pkt_mutex);
14236 pptr->port_npkts--;
14237 mutex_exit(&pptr->port_pkt_mutex);
14238 #endif /* DEBUG */
14239
14240 cmd->cmd_state = FCP_PKT_IDLE;
14241
14242 switch (rval) {
14243 case FC_SUCCESS:
14244 if (cmd->cmd_fp_pkt->pkt_state == FC_PKT_SUCCESS) {
14245 fcp_complete_pkt(cmd->cmd_fp_pkt);
14246 rval = TRAN_ACCEPT;
14247 } else {
14248 rval = TRAN_FATAL_ERROR;
14249 }
14250 break;
14251
14252 case FC_TRAN_BUSY:
14253 rval = TRAN_BUSY;
14254 cmd->cmd_pkt->pkt_resid = 0;
14255 break;
14256
14257 case FC_BADPACKET:
14258 rval = TRAN_BADPKT;
14259 break;
14260
14261 default:
14262 rval = TRAN_FATAL_ERROR;
14263 break;
14264 }
14265
14266 return (rval);
14267 }
14268
14269
14270 /*
14271 * called by some of the following transport-called routines to convert
14272 * a supplied dip ptr to a port struct ptr (i.e. to the soft state)
14273 */
14274 static struct fcp_port *
14275 fcp_dip2port(dev_info_t *dip)
14276 {
14277 int instance;
14278
14279 instance = ddi_get_instance(dip);
14280 return (ddi_get_soft_state(fcp_softstate, instance));
14281 }
14282
14283
14284 /*
14285 * called internally to return a LUN given a dip
14286 */
14287 struct fcp_lun *
14288 fcp_get_lun_from_cip(struct fcp_port *pptr, child_info_t *cip)
14289 {
14290 struct fcp_tgt *ptgt;
14291 struct fcp_lun *plun;
14292 int i;
14293
14294
14295 ASSERT(mutex_owned(&pptr->port_mutex));
14296
14297 for (i = 0; i < FCP_NUM_HASH; i++) {
14298 for (ptgt = pptr->port_tgt_hash_table[i];
14299 ptgt != NULL;
14300 ptgt = ptgt->tgt_next) {
14301 mutex_enter(&ptgt->tgt_mutex);
14302 for (plun = ptgt->tgt_lun; plun != NULL;
14303 plun = plun->lun_next) {
14304 mutex_enter(&plun->lun_mutex);
14305 if (plun->lun_cip == cip) {
14306 mutex_exit(&plun->lun_mutex);
14307 mutex_exit(&ptgt->tgt_mutex);
14308 return (plun); /* match found */
14309 }
14310 mutex_exit(&plun->lun_mutex);
14311 }
14312 mutex_exit(&ptgt->tgt_mutex);
14313 }
14314 }
14315 return (NULL); /* no LUN found */
14316 }
14317
14318 /*
14319 * pass an element to the hotplug list, kick the hotplug thread
14320 * and wait for the element to get processed by the hotplug thread.
14321 * on return the element is freed.
14322 *
14323 * return zero success and non-zero on failure
14324 *
14325 * acquires/releases the target mutex
14326 *
14327 */
14328 static int
14329 fcp_pass_to_hp_and_wait(struct fcp_port *pptr, struct fcp_lun *plun,
14330 child_info_t *cip, int what, int link_cnt, int tgt_cnt, int flags)
14331 {
14332 struct fcp_hp_elem *elem;
14333 int rval;
14334
14335 mutex_enter(&plun->lun_tgt->tgt_mutex);
14336 if ((elem = fcp_pass_to_hp(pptr, plun, cip,
14337 what, link_cnt, tgt_cnt, flags, 1)) == NULL) {
14338 mutex_exit(&plun->lun_tgt->tgt_mutex);
14339 fcp_log(CE_CONT, pptr->port_dip,
14340 "Can not pass_to_hp: what: %d; D_ID=%x, LUN=%x\n",
14341 what, plun->lun_tgt->tgt_d_id, plun->lun_num);
14342 return (NDI_FAILURE);
14343 }
14344 mutex_exit(&plun->lun_tgt->tgt_mutex);
14345 mutex_enter(&elem->mutex);
14346 if (elem->wait) {
14347 while (elem->wait) {
14348 cv_wait(&elem->cv, &elem->mutex);
14349 }
14350 }
14351 rval = (elem->result);
14352 mutex_exit(&elem->mutex);
14353 mutex_destroy(&elem->mutex);
14354 cv_destroy(&elem->cv);
14355 kmem_free(elem, sizeof (struct fcp_hp_elem));
14356 return (rval);
14357 }
14358
14359 /*
14360 * pass an element to the hotplug list, and then
14361 * kick the hotplug thread
14362 *
14363 * return Boolean success, i.e. non-zero if all goes well, else zero on error
14364 *
14365 * acquires/releases the hotplug mutex
14366 *
14367 * called with the target mutex owned
14368 *
14369 * memory acquired in NOSLEEP mode
14370 * NOTE: if wait is set to 1 then the caller is responsible for waiting on
14371 * for the hp daemon to process the request and is responsible for
14372 * freeing the element
14373 */
14374 static struct fcp_hp_elem *
14375 fcp_pass_to_hp(struct fcp_port *pptr, struct fcp_lun *plun,
14376 child_info_t *cip, int what, int link_cnt, int tgt_cnt, int flags, int wait)
14377 {
14378 struct fcp_hp_elem *elem;
14379 dev_info_t *pdip;
14380
14381 ASSERT(pptr != NULL);
14382 ASSERT(plun != NULL);
14383 ASSERT(plun->lun_tgt != NULL);
14384 ASSERT(mutex_owned(&plun->lun_tgt->tgt_mutex));
14385
14386 /* create space for a hotplug element */
14387 if ((elem = kmem_zalloc(sizeof (struct fcp_hp_elem), KM_NOSLEEP))
14388 == NULL) {
14389 fcp_log(CE_WARN, NULL,
14390 "!can't allocate memory for hotplug element");
14391 return (NULL);
14392 }
14393
14394 /* fill in hotplug element */
14395 elem->port = pptr;
14396 elem->lun = plun;
14397 elem->cip = cip;
14398 elem->old_lun_mpxio = plun->lun_mpxio;
14399 elem->what = what;
14400 elem->flags = flags;
14401 elem->link_cnt = link_cnt;
14402 elem->tgt_cnt = tgt_cnt;
14403 elem->wait = wait;
14404 mutex_init(&elem->mutex, NULL, MUTEX_DRIVER, NULL);
14405 cv_init(&elem->cv, NULL, CV_DRIVER, NULL);
14406
14407 /* schedule the hotplug task */
14408 pdip = pptr->port_dip;
14409 mutex_enter(&plun->lun_mutex);
14410 if (elem->what == FCP_ONLINE || elem->what == FCP_OFFLINE) {
14411 plun->lun_event_count++;
14412 elem->event_cnt = plun->lun_event_count;
14413 }
14414 mutex_exit(&plun->lun_mutex);
14415 if (taskq_dispatch(DEVI(pdip)->devi_taskq, fcp_hp_task,
14416 (void *)elem, KM_NOSLEEP) == NULL) {
14417 mutex_enter(&plun->lun_mutex);
14418 if (elem->what == FCP_ONLINE || elem->what == FCP_OFFLINE) {
14419 plun->lun_event_count--;
14420 }
14421 mutex_exit(&plun->lun_mutex);
14422 kmem_free(elem, sizeof (*elem));
14423 return (0);
14424 }
14425
14426 return (elem);
14427 }
14428
14429
14430 static void
14431 fcp_retransport_cmd(struct fcp_port *pptr, struct fcp_pkt *cmd)
14432 {
14433 int rval;
14434 struct scsi_address *ap;
14435 struct fcp_lun *plun;
14436 struct fcp_tgt *ptgt;
14437 fc_packet_t *fpkt;
14438
14439 ap = &cmd->cmd_pkt->pkt_address;
14440 plun = ADDR2LUN(ap);
14441 ptgt = plun->lun_tgt;
14442
14443 ASSERT(cmd->cmd_flags & CFLAG_IN_QUEUE);
14444
14445 cmd->cmd_state = FCP_PKT_IDLE;
14446
14447 mutex_enter(&pptr->port_mutex);
14448 mutex_enter(&ptgt->tgt_mutex);
14449 if (((plun->lun_state & (FCP_LUN_BUSY | FCP_LUN_OFFLINE)) == 0) &&
14450 (!(pptr->port_state & FCP_STATE_ONLINING))) {
14451 fc_ulp_rscn_info_t *rscnp;
14452
14453 cmd->cmd_state = FCP_PKT_ISSUED;
14454
14455 /*
14456 * It is possible for pkt_pd to be NULL if tgt_pd_handle was
14457 * originally NULL, hence we try to set it to the pd pointed
14458 * to by the SCSI device we're trying to get to.
14459 */
14460
14461 fpkt = cmd->cmd_fp_pkt;
14462 if ((fpkt->pkt_pd == NULL) && (ptgt->tgt_pd_handle != NULL)) {
14463 fpkt->pkt_pd = ptgt->tgt_pd_handle;
14464 /*
14465 * We need to notify the transport that we now have a
14466 * reference to the remote port handle.
14467 */
14468 fc_ulp_hold_remote_port(ptgt->tgt_pd_handle);
14469 }
14470
14471 mutex_exit(&ptgt->tgt_mutex);
14472 mutex_exit(&pptr->port_mutex);
14473
14474 ASSERT((cmd->cmd_pkt->pkt_flags & FLAG_NOINTR) == 0);
14475
14476 /* prepare the packet */
14477
14478 fcp_prepare_pkt(pptr, cmd, plun);
14479
14480 rscnp = (fc_ulp_rscn_info_t *)cmd->cmd_fp_pkt->
14481 pkt_ulp_rscn_infop;
14482
14483 cmd->cmd_timeout = cmd->cmd_pkt->pkt_time ?
14484 fcp_watchdog_time + cmd->cmd_pkt->pkt_time : 0;
14485
14486 if (rscnp != NULL) {
14487 rscnp->ulp_rscn_count =
14488 fc_ulp_get_rscn_count(pptr->
14489 port_fp_handle);
14490 }
14491
14492 rval = fcp_transport(pptr->port_fp_handle,
14493 cmd->cmd_fp_pkt, 0);
14494
14495 if (rval == FC_SUCCESS) {
14496 return;
14497 }
14498 cmd->cmd_state &= ~FCP_PKT_ISSUED;
14499 } else {
14500 mutex_exit(&ptgt->tgt_mutex);
14501 mutex_exit(&pptr->port_mutex);
14502 }
14503
14504 fcp_queue_pkt(pptr, cmd);
14505 }
14506
14507
14508 static void
14509 fcp_fail_cmd(struct fcp_pkt *cmd, uchar_t reason, uint_t statistics)
14510 {
14511 ASSERT(cmd->cmd_flags & CFLAG_IN_QUEUE);
14512
14513 cmd->cmd_flags &= ~CFLAG_IN_QUEUE;
14514 cmd->cmd_state = FCP_PKT_IDLE;
14515
14516 cmd->cmd_pkt->pkt_reason = reason;
14517 cmd->cmd_pkt->pkt_state = 0;
14518 cmd->cmd_pkt->pkt_statistics = statistics;
14519
14520 fcp_post_callback(cmd);
14521 }
14522
14523 /*
14524 * Function: fcp_queue_pkt
14525 *
14526 * Description: This function queues the packet passed by the caller into
14527 * the list of packets of the FCP port.
14528 *
14529 * Argument: *pptr FCP port.
14530 * *cmd FCP packet to queue.
14531 *
14532 * Return Value: None
14533 *
14534 * Context: User, Kernel and Interrupt context.
14535 */
14536 static void
14537 fcp_queue_pkt(struct fcp_port *pptr, struct fcp_pkt *cmd)
14538 {
14539 ASSERT((cmd->cmd_pkt->pkt_flags & FLAG_NOQUEUE) == NULL);
14540
14541 mutex_enter(&pptr->port_pkt_mutex);
14542 cmd->cmd_flags |= CFLAG_IN_QUEUE;
14543 ASSERT(cmd->cmd_state != FCP_PKT_ISSUED);
14544 cmd->cmd_timeout = fcp_watchdog_time + FCP_QUEUE_DELAY;
14545
14546 /*
14547 * zero pkt_time means hang around for ever
14548 */
14549 if (cmd->cmd_pkt->pkt_time) {
14550 if (cmd->cmd_fp_pkt->pkt_timeout > FCP_QUEUE_DELAY) {
14551 cmd->cmd_fp_pkt->pkt_timeout -= FCP_QUEUE_DELAY;
14552 } else {
14553 /*
14554 * Indicate the watch thread to fail the
14555 * command by setting it to highest value
14556 */
14557 cmd->cmd_timeout = fcp_watchdog_time;
14558 cmd->cmd_fp_pkt->pkt_timeout = FCP_INVALID_TIMEOUT;
14559 }
14560 }
14561
14562 if (pptr->port_pkt_head) {
14563 ASSERT(pptr->port_pkt_tail != NULL);
14564
14565 pptr->port_pkt_tail->cmd_next = cmd;
14566 pptr->port_pkt_tail = cmd;
14567 } else {
14568 ASSERT(pptr->port_pkt_tail == NULL);
14569
14570 pptr->port_pkt_head = pptr->port_pkt_tail = cmd;
14571 }
14572 cmd->cmd_next = NULL;
14573 mutex_exit(&pptr->port_pkt_mutex);
14574 }
14575
14576 /*
14577 * Function: fcp_update_targets
14578 *
14579 * Description: This function applies the specified change of state to all
14580 * the targets listed. The operation applied is 'set'.
14581 *
14582 * Argument: *pptr FCP port.
14583 * *dev_list Array of fc_portmap_t structures.
14584 * count Length of dev_list.
14585 * state State bits to update.
14586 * cause Reason for the update.
14587 *
14588 * Return Value: None
14589 *
14590 * Context: User, Kernel and Interrupt context.
14591 * The mutex pptr->port_mutex must be held.
14592 */
14593 static void
14594 fcp_update_targets(struct fcp_port *pptr, fc_portmap_t *dev_list,
14595 uint32_t count, uint32_t state, int cause)
14596 {
14597 fc_portmap_t *map_entry;
14598 struct fcp_tgt *ptgt;
14599
14600 ASSERT(MUTEX_HELD(&pptr->port_mutex));
14601
14602 while (count--) {
14603 map_entry = &(dev_list[count]);
14604 ptgt = fcp_lookup_target(pptr,
14605 (uchar_t *)&(map_entry->map_pwwn));
14606 if (ptgt == NULL) {
14607 continue;
14608 }
14609
14610 mutex_enter(&ptgt->tgt_mutex);
14611 ptgt->tgt_trace = 0;
14612 ptgt->tgt_change_cnt++;
14613 ptgt->tgt_statec_cause = cause;
14614 ptgt->tgt_tmp_cnt = 1;
14615 fcp_update_tgt_state(ptgt, FCP_SET, state);
14616 mutex_exit(&ptgt->tgt_mutex);
14617 }
14618 }
14619
14620 static int
14621 fcp_call_finish_init(struct fcp_port *pptr, struct fcp_tgt *ptgt,
14622 int lcount, int tcount, int cause)
14623 {
14624 int rval;
14625
14626 mutex_enter(&pptr->port_mutex);
14627 rval = fcp_call_finish_init_held(pptr, ptgt, lcount, tcount, cause);
14628 mutex_exit(&pptr->port_mutex);
14629
14630 return (rval);
14631 }
14632
14633
14634 static int
14635 fcp_call_finish_init_held(struct fcp_port *pptr, struct fcp_tgt *ptgt,
14636 int lcount, int tcount, int cause)
14637 {
14638 int finish_init = 0;
14639 int finish_tgt = 0;
14640 int do_finish_init = 0;
14641 int rval = FCP_NO_CHANGE;
14642
14643 if (cause == FCP_CAUSE_LINK_CHANGE ||
14644 cause == FCP_CAUSE_LINK_DOWN) {
14645 do_finish_init = 1;
14646 }
14647
14648 if (ptgt != NULL) {
14649 FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
14650 FCP_BUF_LEVEL_2, 0,
14651 "link_cnt: %d,%d; tgt_cnt: %d,%d; tmp_cnt: %d,%d;"
14652 " cause = %d, d_id = 0x%x, tgt_done = %d",
14653 pptr->port_link_cnt, lcount, ptgt->tgt_change_cnt, tcount,
14654 pptr->port_tmp_cnt, ptgt->tgt_tmp_cnt, cause,
14655 ptgt->tgt_d_id, ptgt->tgt_done);
14656
14657 mutex_enter(&ptgt->tgt_mutex);
14658
14659 if (tcount && (ptgt->tgt_change_cnt != tcount)) {
14660 rval = FCP_DEV_CHANGE;
14661 if (do_finish_init && ptgt->tgt_done == 0) {
14662 ptgt->tgt_done++;
14663 finish_init = 1;
14664 }
14665 } else {
14666 if (--ptgt->tgt_tmp_cnt <= 0) {
14667 ptgt->tgt_tmp_cnt = 0;
14668 finish_tgt = 1;
14669
14670 if (do_finish_init) {
14671 finish_init = 1;
14672 }
14673 }
14674 }
14675 mutex_exit(&ptgt->tgt_mutex);
14676 } else {
14677 FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
14678 FCP_BUF_LEVEL_2, 0,
14679 "Call Finish Init for NO target");
14680
14681 if (do_finish_init) {
14682 finish_init = 1;
14683 }
14684 }
14685
14686 if (finish_tgt) {
14687 ASSERT(ptgt != NULL);
14688
14689 mutex_enter(&ptgt->tgt_mutex);
14690 #ifdef DEBUG
14691 bzero(ptgt->tgt_tmp_cnt_stack,
14692 sizeof (ptgt->tgt_tmp_cnt_stack));
14693
14694 ptgt->tgt_tmp_cnt_depth = getpcstack(ptgt->tgt_tmp_cnt_stack,
14695 FCP_STACK_DEPTH);
14696 #endif /* DEBUG */
14697 mutex_exit(&ptgt->tgt_mutex);
14698
14699 (void) fcp_finish_tgt(pptr, ptgt, lcount, tcount, cause);
14700 }
14701
14702 if (finish_init && lcount == pptr->port_link_cnt) {
14703 ASSERT(pptr->port_tmp_cnt > 0);
14704 if (--pptr->port_tmp_cnt == 0) {
14705 fcp_finish_init(pptr);
14706 }
14707 } else if (lcount != pptr->port_link_cnt) {
14708 FCP_TRACE(fcp_logq, pptr->port_instbuf,
14709 fcp_trace, FCP_BUF_LEVEL_2, 0,
14710 "fcp_call_finish_init_held,1: state change occured"
14711 " for D_ID=0x%x", (ptgt) ? ptgt->tgt_d_id : 0);
14712 }
14713
14714 return (rval);
14715 }
14716
14717 static void
14718 fcp_reconfigure_luns(void * tgt_handle)
14719 {
14720 uint32_t dev_cnt;
14721 fc_portmap_t *devlist;
14722 struct fcp_tgt *ptgt = (struct fcp_tgt *)tgt_handle;
14723 struct fcp_port *pptr = ptgt->tgt_port;
14724
14725 /*
14726 * If the timer that fires this off got canceled too late, the
14727 * target could have been destroyed.
14728 */
14729
14730 if (ptgt->tgt_tid == NULL) {
14731 return;
14732 }
14733
14734 devlist = kmem_zalloc(sizeof (*devlist), KM_NOSLEEP);
14735 if (devlist == NULL) {
14736 fcp_log(CE_WARN, pptr->port_dip,
14737 "!fcp%d: failed to allocate for portmap",
14738 pptr->port_instance);
14739 return;
14740 }
14741
14742 dev_cnt = 1;
14743 devlist->map_pd = ptgt->tgt_pd_handle;
14744 devlist->map_hard_addr.hard_addr = ptgt->tgt_hard_addr;
14745 devlist->map_did.port_id = ptgt->tgt_d_id;
14746
14747 bcopy(&ptgt->tgt_node_wwn.raw_wwn[0], &devlist->map_nwwn, FC_WWN_SIZE);
14748 bcopy(&ptgt->tgt_port_wwn.raw_wwn[0], &devlist->map_pwwn, FC_WWN_SIZE);
14749
14750 devlist->map_state = PORT_DEVICE_LOGGED_IN;
14751 devlist->map_type = PORT_DEVICE_REPORTLUN_CHANGED;
14752 devlist->map_flags = 0;
14753
14754 fcp_statec_callback(NULL, pptr->port_fp_handle, FC_STATE_DEVICE_CHANGE,
14755 pptr->port_topology, devlist, dev_cnt, pptr->port_id);
14756
14757 /*
14758 * Clear the tgt_tid after no more references to
14759 * the fcp_tgt
14760 */
14761 mutex_enter(&ptgt->tgt_mutex);
14762 ptgt->tgt_tid = NULL;
14763 mutex_exit(&ptgt->tgt_mutex);
14764
14765 kmem_free(devlist, sizeof (*devlist));
14766 }
14767
14768
14769 static void
14770 fcp_free_targets(struct fcp_port *pptr)
14771 {
14772 int i;
14773 struct fcp_tgt *ptgt;
14774
14775 mutex_enter(&pptr->port_mutex);
14776 for (i = 0; i < FCP_NUM_HASH; i++) {
14777 ptgt = pptr->port_tgt_hash_table[i];
14778 while (ptgt != NULL) {
14779 struct fcp_tgt *next_tgt = ptgt->tgt_next;
14780
14781 fcp_free_target(ptgt);
14782 ptgt = next_tgt;
14783 }
14784 }
14785 mutex_exit(&pptr->port_mutex);
14786 }
14787
14788
14789 static void
14790 fcp_free_target(struct fcp_tgt *ptgt)
14791 {
14792 struct fcp_lun *plun;
14793 timeout_id_t tid;
14794
14795 mutex_enter(&ptgt->tgt_mutex);
14796 tid = ptgt->tgt_tid;
14797
14798 /*
14799 * Cancel any pending timeouts for this target.
14800 */
14801
14802 if (tid != NULL) {
14803 /*
14804 * Set tgt_tid to NULL first to avoid a race in the callback.
14805 * If tgt_tid is NULL, the callback will simply return.
14806 */
14807 ptgt->tgt_tid = NULL;
14808 mutex_exit(&ptgt->tgt_mutex);
14809 (void) untimeout(tid);
14810 mutex_enter(&ptgt->tgt_mutex);
14811 }
14812
14813 plun = ptgt->tgt_lun;
14814 while (plun != NULL) {
14815 struct fcp_lun *next_lun = plun->lun_next;
14816
14817 fcp_dealloc_lun(plun);
14818 plun = next_lun;
14819 }
14820
14821 mutex_exit(&ptgt->tgt_mutex);
14822 fcp_dealloc_tgt(ptgt);
14823 }
14824
14825 /*
14826 * Function: fcp_is_retryable
14827 *
14828 * Description: Indicates if the internal packet is retryable.
14829 *
14830 * Argument: *icmd FCP internal packet.
14831 *
14832 * Return Value: 0 Not retryable
14833 * 1 Retryable
14834 *
14835 * Context: User, Kernel and Interrupt context
14836 */
14837 static int
14838 fcp_is_retryable(struct fcp_ipkt *icmd)
14839 {
14840 if (icmd->ipkt_port->port_state & (FCP_STATE_SUSPENDED |
14841 FCP_STATE_DETACHING | FCP_STATE_POWER_DOWN)) {
14842 return (0);
14843 }
14844
14845 return (((fcp_watchdog_time + icmd->ipkt_fpkt->pkt_timeout) <
14846 icmd->ipkt_port->port_deadline) ? 1 : 0);
14847 }
14848
14849 /*
14850 * Function: fcp_create_on_demand
14851 *
14852 * Argument: *pptr FCP port.
14853 * *pwwn Port WWN.
14854 *
14855 * Return Value: 0 Success
14856 * EIO
14857 * ENOMEM
14858 * EBUSY
14859 * EINVAL
14860 *
14861 * Context: User and Kernel context
14862 */
14863 static int
14864 fcp_create_on_demand(struct fcp_port *pptr, uchar_t *pwwn)
14865 {
14866 int wait_ms;
14867 int tcount;
14868 int lcount;
14869 int ret;
14870 int error;
14871 int rval = EIO;
14872 int ntries;
14873 fc_portmap_t *devlist;
14874 opaque_t pd;
14875 struct fcp_lun *plun;
14876 struct fcp_tgt *ptgt;
14877 int old_manual = 0;
14878
14879 /* Allocates the fc_portmap_t structure. */
14880 devlist = kmem_zalloc(sizeof (*devlist), KM_SLEEP);
14881
14882 /*
14883 * If FC_INVALID_RSCN_COUNT is non-zero, we will have to init as shown
14884 * in the commented statement below:
14885 *
14886 * devlist->map_rscn_info.ulp_rscn_count = FC_INVALID_RSCN_COUNT;
14887 *
14888 * Below, the deadline for the discovery process is set.
14889 */
14890 mutex_enter(&pptr->port_mutex);
14891 pptr->port_deadline = fcp_watchdog_time + FCP_ICMD_DEADLINE;
14892 mutex_exit(&pptr->port_mutex);
14893
14894 /*
14895 * We try to find the remote port based on the WWN provided by the
14896 * caller. We actually ask fp/fctl if it has it.
14897 */
14898 pd = fc_ulp_get_remote_port(pptr->port_fp_handle,
14899 (la_wwn_t *)pwwn, &error, 1);
14900
14901 if (pd == NULL) {
14902 kmem_free(devlist, sizeof (*devlist));
14903 return (rval);
14904 }
14905
14906 /*
14907 * The remote port was found. We ask fp/fctl to update our
14908 * fc_portmap_t structure.
14909 */
14910 ret = fc_ulp_pwwn_to_portmap(pptr->port_fp_handle,
14911 (la_wwn_t *)pwwn, devlist);
14912 if (ret != FC_SUCCESS) {
14913 kmem_free(devlist, sizeof (*devlist));
14914 return (rval);
14915 }
14916
14917 /*
14918 * The map flag field is set to indicates that the creation is being
14919 * done at the user request (Ioclt probably luxadm or cfgadm).
14920 */
14921 devlist->map_type = PORT_DEVICE_USER_CREATE;
14922
14923 mutex_enter(&pptr->port_mutex);
14924
14925 /*
14926 * We check to see if fcp already has a target that describes the
14927 * device being created. If not it is created.
14928 */
14929 ptgt = fcp_lookup_target(pptr, pwwn);
14930 if (ptgt == NULL) {
14931 lcount = pptr->port_link_cnt;
14932 mutex_exit(&pptr->port_mutex);
14933
14934 ptgt = fcp_alloc_tgt(pptr, devlist, lcount);
14935 if (ptgt == NULL) {
14936 fcp_log(CE_WARN, pptr->port_dip,
14937 "!FC target allocation failed");
14938 return (ENOMEM);
14939 }
14940
14941 mutex_enter(&pptr->port_mutex);
14942 }
14943
14944 mutex_enter(&ptgt->tgt_mutex);
14945 ptgt->tgt_statec_cause = FCP_CAUSE_USER_CREATE;
14946 ptgt->tgt_tmp_cnt = 1;
14947 ptgt->tgt_device_created = 0;
14948 /*
14949 * If fabric and auto config is set but the target was
14950 * manually unconfigured then reset to the manual_config_only to
14951 * 0 so the device will get configured.
14952 */
14953 if (FC_TOP_EXTERNAL(pptr->port_topology) &&
14954 fcp_enable_auto_configuration &&
14955 ptgt->tgt_manual_config_only == 1) {
14956 old_manual = 1;
14957 ptgt->tgt_manual_config_only = 0;
14958 }
14959 mutex_exit(&ptgt->tgt_mutex);
14960
14961 fcp_update_targets(pptr, devlist, 1,
14962 FCP_LUN_BUSY | FCP_LUN_MARK, FCP_CAUSE_USER_CREATE);
14963
14964 lcount = pptr->port_link_cnt;
14965 tcount = ptgt->tgt_change_cnt;
14966
14967 if (fcp_handle_mapflags(pptr, ptgt, devlist, lcount,
14968 tcount, FCP_CAUSE_USER_CREATE) == TRUE) {
14969 if (FC_TOP_EXTERNAL(pptr->port_topology) &&
14970 fcp_enable_auto_configuration && old_manual) {
14971 mutex_enter(&ptgt->tgt_mutex);
14972 ptgt->tgt_manual_config_only = 1;
14973 mutex_exit(&ptgt->tgt_mutex);
14974 }
14975
14976 if (pptr->port_link_cnt != lcount ||
14977 ptgt->tgt_change_cnt != tcount) {
14978 rval = EBUSY;
14979 }
14980 mutex_exit(&pptr->port_mutex);
14981
14982 FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
14983 FCP_BUF_LEVEL_3, 0,
14984 "fcp_create_on_demand: mapflags ptgt=%x, "
14985 "lcount=%x::port_link_cnt=%x, "
14986 "tcount=%x: tgt_change_cnt=%x, rval=%x",
14987 ptgt, lcount, pptr->port_link_cnt,
14988 tcount, ptgt->tgt_change_cnt, rval);
14989 return (rval);
14990 }
14991
14992 /*
14993 * Due to lack of synchronization mechanisms, we perform
14994 * periodic monitoring of our request; Because requests
14995 * get dropped when another one supercedes (either because
14996 * of a link change or a target change), it is difficult to
14997 * provide a clean synchronization mechanism (such as a
14998 * semaphore or a conditional variable) without exhaustively
14999 * rewriting the mainline discovery code of this driver.
15000 */
15001 wait_ms = 500;
15002
15003 ntries = fcp_max_target_retries;
15004
15005 FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
15006 FCP_BUF_LEVEL_3, 0,
15007 "fcp_create_on_demand(1): ntries=%x, ptgt=%x, "
15008 "lcount=%x::port_link_cnt=%x, "
15009 "tcount=%x::tgt_change_cnt=%x, rval=%x, tgt_device_created=%x "
15010 "tgt_tmp_cnt =%x",
15011 ntries, ptgt, lcount, pptr->port_link_cnt,
15012 tcount, ptgt->tgt_change_cnt, rval, ptgt->tgt_device_created,
15013 ptgt->tgt_tmp_cnt);
15014
15015 mutex_enter(&ptgt->tgt_mutex);
15016 while (ntries-- != 0 && pptr->port_link_cnt == lcount &&
15017 ptgt->tgt_change_cnt == tcount && ptgt->tgt_device_created == 0) {
15018 mutex_exit(&ptgt->tgt_mutex);
15019 mutex_exit(&pptr->port_mutex);
15020
15021 delay(drv_usectohz(wait_ms * 1000));
15022
15023 mutex_enter(&pptr->port_mutex);
15024 mutex_enter(&ptgt->tgt_mutex);
15025 }
15026
15027
15028 if (pptr->port_link_cnt != lcount || ptgt->tgt_change_cnt != tcount) {
15029 rval = EBUSY;
15030 } else {
15031 if (ptgt->tgt_tmp_cnt == 0 && ptgt->tgt_node_state ==
15032 FCP_TGT_NODE_PRESENT) {
15033 rval = 0;
15034 }
15035 }
15036
15037 FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
15038 FCP_BUF_LEVEL_3, 0,
15039 "fcp_create_on_demand(2): ntries=%x, ptgt=%x, "
15040 "lcount=%x::port_link_cnt=%x, "
15041 "tcount=%x::tgt_change_cnt=%x, rval=%x, tgt_device_created=%x "
15042 "tgt_tmp_cnt =%x",
15043 ntries, ptgt, lcount, pptr->port_link_cnt,
15044 tcount, ptgt->tgt_change_cnt, rval, ptgt->tgt_device_created,
15045 ptgt->tgt_tmp_cnt);
15046
15047 if (rval) {
15048 if (FC_TOP_EXTERNAL(pptr->port_topology) &&
15049 fcp_enable_auto_configuration && old_manual) {
15050 ptgt->tgt_manual_config_only = 1;
15051 }
15052 mutex_exit(&ptgt->tgt_mutex);
15053 mutex_exit(&pptr->port_mutex);
15054 kmem_free(devlist, sizeof (*devlist));
15055
15056 FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
15057 FCP_BUF_LEVEL_3, 0,
15058 "fcp_create_on_demand(3): ntries=%x, ptgt=%x, "
15059 "lcount=%x::port_link_cnt=%x, "
15060 "tcount=%x::tgt_change_cnt=%x, rval=%x, "
15061 "tgt_device_created=%x, tgt D_ID=%x",
15062 ntries, ptgt, lcount, pptr->port_link_cnt,
15063 tcount, ptgt->tgt_change_cnt, rval,
15064 ptgt->tgt_device_created, ptgt->tgt_d_id);
15065 return (rval);
15066 }
15067
15068 if ((plun = ptgt->tgt_lun) != NULL) {
15069 tcount = plun->lun_tgt->tgt_change_cnt;
15070 } else {
15071 rval = EINVAL;
15072 }
15073 lcount = pptr->port_link_cnt;
15074
15075 /*
15076 * Configuring the target with no LUNs will fail. We
15077 * should reset the node state so that it is not
15078 * automatically configured when the LUNs are added
15079 * to this target.
15080 */
15081 if (ptgt->tgt_lun_cnt == 0) {
15082 ptgt->tgt_node_state = FCP_TGT_NODE_NONE;
15083 }
15084 mutex_exit(&ptgt->tgt_mutex);
15085 mutex_exit(&pptr->port_mutex);
15086
15087 while (plun) {
15088 child_info_t *cip;
15089
15090 mutex_enter(&plun->lun_mutex);
15091 cip = plun->lun_cip;
15092 mutex_exit(&plun->lun_mutex);
15093
15094 mutex_enter(&ptgt->tgt_mutex);
15095 if (!(plun->lun_state & FCP_LUN_OFFLINE)) {
15096 mutex_exit(&ptgt->tgt_mutex);
15097
15098 rval = fcp_pass_to_hp_and_wait(pptr, plun, cip,
15099 FCP_ONLINE, lcount, tcount,
15100 NDI_ONLINE_ATTACH);
15101 if (rval != NDI_SUCCESS) {
15102 FCP_TRACE(fcp_logq,
15103 pptr->port_instbuf, fcp_trace,
15104 FCP_BUF_LEVEL_3, 0,
15105 "fcp_create_on_demand: "
15106 "pass_to_hp_and_wait failed "
15107 "rval=%x", rval);
15108 rval = EIO;
15109 } else {
15110 mutex_enter(&LUN_TGT->tgt_mutex);
15111 plun->lun_state &= ~(FCP_LUN_OFFLINE |
15112 FCP_LUN_BUSY);
15113 mutex_exit(&LUN_TGT->tgt_mutex);
15114 }
15115 mutex_enter(&ptgt->tgt_mutex);
15116 }
15117
15118 plun = plun->lun_next;
15119 mutex_exit(&ptgt->tgt_mutex);
15120 }
15121
15122 kmem_free(devlist, sizeof (*devlist));
15123
15124 if (FC_TOP_EXTERNAL(pptr->port_topology) &&
15125 fcp_enable_auto_configuration && old_manual) {
15126 mutex_enter(&ptgt->tgt_mutex);
15127 /* if successful then set manual to 0 */
15128 if (rval == 0) {
15129 ptgt->tgt_manual_config_only = 0;
15130 } else {
15131 /* reset to 1 so the user has to do the config */
15132 ptgt->tgt_manual_config_only = 1;
15133 }
15134 mutex_exit(&ptgt->tgt_mutex);
15135 }
15136
15137 return (rval);
15138 }
15139
15140
15141 static void
15142 fcp_ascii_to_wwn(caddr_t string, uchar_t bytes[], unsigned int byte_len)
15143 {
15144 int count;
15145 uchar_t byte;
15146
15147 count = 0;
15148 while (*string) {
15149 byte = FCP_ATOB(*string); string++;
15150 byte = byte << 4 | FCP_ATOB(*string); string++;
15151 bytes[count++] = byte;
15152
15153 if (count >= byte_len) {
15154 break;
15155 }
15156 }
15157 }
15158
15159 static void
15160 fcp_wwn_to_ascii(uchar_t wwn[], char *string)
15161 {
15162 int i;
15163
15164 for (i = 0; i < FC_WWN_SIZE; i++) {
15165 (void) sprintf(string + (i * 2),
15166 "%02x", wwn[i]);
15167 }
15168
15169 }
15170
15171 static void
15172 fcp_print_error(fc_packet_t *fpkt)
15173 {
15174 struct fcp_ipkt *icmd = (struct fcp_ipkt *)
15175 fpkt->pkt_ulp_private;
15176 struct fcp_port *pptr;
15177 struct fcp_tgt *ptgt;
15178 struct fcp_lun *plun;
15179 caddr_t buf;
15180 int scsi_cmd = 0;
15181
15182 ptgt = icmd->ipkt_tgt;
15183 plun = icmd->ipkt_lun;
15184 pptr = ptgt->tgt_port;
15185
15186 buf = kmem_zalloc(256, KM_NOSLEEP);
15187 if (buf == NULL) {
15188 return;
15189 }
15190
15191 switch (icmd->ipkt_opcode) {
15192 case SCMD_REPORT_LUN:
15193 (void) sprintf(buf, "!REPORT LUN to D_ID=0x%%x"
15194 " lun=0x%%x failed");
15195 scsi_cmd++;
15196 break;
15197
15198 case SCMD_INQUIRY_PAGE83:
15199 (void) sprintf(buf, "!INQUIRY-83 to D_ID=0x%%x"
15200 " lun=0x%%x failed");
15201 scsi_cmd++;
15202 break;
15203
15204 case SCMD_INQUIRY:
15205 (void) sprintf(buf, "!INQUIRY to D_ID=0x%%x"
15206 " lun=0x%%x failed");
15207 scsi_cmd++;
15208 break;
15209
15210 case LA_ELS_PLOGI:
15211 (void) sprintf(buf, "!PLOGI to D_ID=0x%%x failed");
15212 break;
15213
15214 case LA_ELS_PRLI:
15215 (void) sprintf(buf, "!PRLI to D_ID=0x%%x failed");
15216 break;
15217 }
15218
15219 if (scsi_cmd && fpkt->pkt_state == FC_PKT_SUCCESS) {
15220 struct fcp_rsp response, *rsp;
15221 uchar_t asc, ascq;
15222 caddr_t sense_key = NULL;
15223 struct fcp_rsp_info fcp_rsp_err, *bep;
15224
15225 if (icmd->ipkt_nodma) {
15226 rsp = (struct fcp_rsp *)fpkt->pkt_resp;
15227 bep = (struct fcp_rsp_info *)((caddr_t)rsp +
15228 sizeof (struct fcp_rsp));
15229 } else {
15230 rsp = &response;
15231 bep = &fcp_rsp_err;
15232
15233 FCP_CP_IN(fpkt->pkt_resp, rsp, fpkt->pkt_resp_acc,
15234 sizeof (struct fcp_rsp));
15235
15236 FCP_CP_IN(fpkt->pkt_resp + sizeof (struct fcp_rsp),
15237 bep, fpkt->pkt_resp_acc,
15238 sizeof (struct fcp_rsp_info));
15239 }
15240
15241
15242 if (fcp_validate_fcp_response(rsp, pptr) != FC_SUCCESS) {
15243 (void) sprintf(buf + strlen(buf),
15244 " : Bad FCP response values rsvd1=%%x, rsvd2=%%x,"
15245 " sts-rsvd1=%%x, sts-rsvd2=%%x, rsplen=%%x,"
15246 " senselen=%%x. Giving up");
15247
15248 fcp_log(CE_WARN, pptr->port_dip, buf,
15249 ptgt->tgt_d_id, plun->lun_num, rsp->reserved_0,
15250 rsp->reserved_1, rsp->fcp_u.fcp_status.reserved_0,
15251 rsp->fcp_u.fcp_status.reserved_1,
15252 rsp->fcp_response_len, rsp->fcp_sense_len);
15253
15254 kmem_free(buf, 256);
15255 return;
15256 }
15257
15258 if (rsp->fcp_u.fcp_status.rsp_len_set &&
15259 bep->rsp_code != FCP_NO_FAILURE) {
15260 (void) sprintf(buf + strlen(buf),
15261 " FCP Response code = 0x%x", bep->rsp_code);
15262 }
15263
15264 if (rsp->fcp_u.fcp_status.scsi_status & STATUS_CHECK) {
15265 struct scsi_extended_sense sense_info, *sense_ptr;
15266
15267 if (icmd->ipkt_nodma) {
15268 sense_ptr = (struct scsi_extended_sense *)
15269 ((caddr_t)fpkt->pkt_resp +
15270 sizeof (struct fcp_rsp) +
15271 rsp->fcp_response_len);
15272 } else {
15273 sense_ptr = &sense_info;
15274
15275 FCP_CP_IN(fpkt->pkt_resp +
15276 sizeof (struct fcp_rsp) +
15277 rsp->fcp_response_len, &sense_info,
15278 fpkt->pkt_resp_acc,
15279 sizeof (struct scsi_extended_sense));
15280 }
15281
15282 if (sense_ptr->es_key < NUM_SENSE_KEYS +
15283 NUM_IMPL_SENSE_KEYS) {
15284 sense_key = sense_keys[sense_ptr->es_key];
15285 } else {
15286 sense_key = "Undefined";
15287 }
15288
15289 asc = sense_ptr->es_add_code;
15290 ascq = sense_ptr->es_qual_code;
15291
15292 (void) sprintf(buf + strlen(buf),
15293 ": sense key=%%s, ASC=%%x," " ASCQ=%%x."
15294 " Giving up");
15295
15296 fcp_log(CE_WARN, pptr->port_dip, buf,
15297 ptgt->tgt_d_id, plun->lun_num, sense_key,
15298 asc, ascq);
15299 } else {
15300 (void) sprintf(buf + strlen(buf),
15301 " : SCSI status=%%x. Giving up");
15302
15303 fcp_log(CE_WARN, pptr->port_dip, buf,
15304 ptgt->tgt_d_id, plun->lun_num,
15305 rsp->fcp_u.fcp_status.scsi_status);
15306 }
15307 } else {
15308 caddr_t state, reason, action, expln;
15309
15310 (void) fc_ulp_pkt_error(fpkt, &state, &reason,
15311 &action, &expln);
15312
15313 (void) sprintf(buf + strlen(buf), ": State:%%s,"
15314 " Reason:%%s. Giving up");
15315
15316 if (scsi_cmd) {
15317 fcp_log(CE_WARN, pptr->port_dip, buf,
15318 ptgt->tgt_d_id, plun->lun_num, state, reason);
15319 } else {
15320 fcp_log(CE_WARN, pptr->port_dip, buf,
15321 ptgt->tgt_d_id, state, reason);
15322 }
15323 }
15324
15325 kmem_free(buf, 256);
15326 }
15327
15328
15329 static int
15330 fcp_handle_ipkt_errors(struct fcp_port *pptr, struct fcp_tgt *ptgt,
15331 struct fcp_ipkt *icmd, int rval, caddr_t op)
15332 {
15333 int ret = DDI_FAILURE;
15334 char *error;
15335
15336 switch (rval) {
15337 case FC_DEVICE_BUSY_NEW_RSCN:
15338 /*
15339 * This means that there was a new RSCN that the transport
15340 * knows about (which the ULP *may* know about too) but the
15341 * pkt that was sent down was related to an older RSCN. So, we
15342 * are just going to reset the retry count and deadline and
15343 * continue to retry. The idea is that transport is currently
15344 * working on the new RSCN and will soon let the ULPs know
15345 * about it and when it does the existing logic will kick in
15346 * where it will change the tcount to indicate that something
15347 * changed on the target. So, rediscovery will start and there
15348 * will not be an infinite retry.
15349 *
15350 * For a full flow of how the RSCN info is transferred back and
15351 * forth, see fp.c
15352 */
15353 icmd->ipkt_retries = 0;
15354 icmd->ipkt_port->port_deadline = fcp_watchdog_time +
15355 FCP_ICMD_DEADLINE;
15356
15357 FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
15358 FCP_BUF_LEVEL_3, 0,
15359 "fcp_handle_ipkt_errors: rval=%x for D_ID=%x",
15360 rval, ptgt->tgt_d_id);
15361 /* FALLTHROUGH */
15362
15363 case FC_STATEC_BUSY:
15364 case FC_DEVICE_BUSY:
15365 case FC_PBUSY:
15366 case FC_FBUSY:
15367 case FC_TRAN_BUSY:
15368 case FC_OFFLINE:
15369 FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
15370 FCP_BUF_LEVEL_3, 0,
15371 "fcp_handle_ipkt_errors: rval=%x for D_ID=%x",
15372 rval, ptgt->tgt_d_id);
15373 if (icmd->ipkt_retries < FCP_MAX_RETRIES &&
15374 fcp_is_retryable(icmd)) {
15375 fcp_queue_ipkt(pptr, icmd->ipkt_fpkt);
15376 ret = DDI_SUCCESS;
15377 }
15378 break;
15379
15380 case FC_LOGINREQ:
15381 /*
15382 * FC_LOGINREQ used to be handled just like all the cases
15383 * above. It has been changed to handled a PRLI that fails
15384 * with FC_LOGINREQ different than other ipkts that fail
15385 * with FC_LOGINREQ. If a PRLI fails with FC_LOGINREQ it is
15386 * a simple matter to turn it into a PLOGI instead, so that's
15387 * exactly what we do here.
15388 */
15389 if (icmd->ipkt_opcode == LA_ELS_PRLI) {
15390 ret = fcp_send_els(icmd->ipkt_port, icmd->ipkt_tgt,
15391 icmd, LA_ELS_PLOGI, icmd->ipkt_link_cnt,
15392 icmd->ipkt_change_cnt, icmd->ipkt_cause);
15393 } else {
15394 FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
15395 FCP_BUF_LEVEL_3, 0,
15396 "fcp_handle_ipkt_errors: rval=%x for D_ID=%x",
15397 rval, ptgt->tgt_d_id);
15398 if (icmd->ipkt_retries < FCP_MAX_RETRIES &&
15399 fcp_is_retryable(icmd)) {
15400 fcp_queue_ipkt(pptr, icmd->ipkt_fpkt);
15401 ret = DDI_SUCCESS;
15402 }
15403 }
15404 break;
15405
15406 default:
15407 mutex_enter(&pptr->port_mutex);
15408 mutex_enter(&ptgt->tgt_mutex);
15409 if (!FCP_STATE_CHANGED(pptr, ptgt, icmd)) {
15410 mutex_exit(&ptgt->tgt_mutex);
15411 mutex_exit(&pptr->port_mutex);
15412
15413 (void) fc_ulp_error(rval, &error);
15414 fcp_log(CE_WARN, pptr->port_dip,
15415 "!Failed to send %s to D_ID=%x error=%s",
15416 op, ptgt->tgt_d_id, error);
15417 } else {
15418 FCP_TRACE(fcp_logq, pptr->port_instbuf,
15419 fcp_trace, FCP_BUF_LEVEL_2, 0,
15420 "fcp_handle_ipkt_errors,1: state change occured"
15421 " for D_ID=0x%x", ptgt->tgt_d_id);
15422 mutex_exit(&ptgt->tgt_mutex);
15423 mutex_exit(&pptr->port_mutex);
15424 }
15425 break;
15426 }
15427
15428 return (ret);
15429 }
15430
15431
15432 /*
15433 * Check of outstanding commands on any LUN for this target
15434 */
15435 static int
15436 fcp_outstanding_lun_cmds(struct fcp_tgt *ptgt)
15437 {
15438 struct fcp_lun *plun;
15439 struct fcp_pkt *cmd;
15440
15441 for (plun = ptgt->tgt_lun; plun != NULL; plun = plun->lun_next) {
15442 mutex_enter(&plun->lun_mutex);
15443 for (cmd = plun->lun_pkt_head; cmd != NULL;
15444 cmd = cmd->cmd_forw) {
15445 if (cmd->cmd_state == FCP_PKT_ISSUED) {
15446 mutex_exit(&plun->lun_mutex);
15447 return (FC_SUCCESS);
15448 }
15449 }
15450 mutex_exit(&plun->lun_mutex);
15451 }
15452
15453 return (FC_FAILURE);
15454 }
15455
15456 static fc_portmap_t *
15457 fcp_construct_map(struct fcp_port *pptr, uint32_t *dev_cnt)
15458 {
15459 int i;
15460 fc_portmap_t *devlist;
15461 fc_portmap_t *devptr = NULL;
15462 struct fcp_tgt *ptgt;
15463
15464 mutex_enter(&pptr->port_mutex);
15465 for (i = 0, *dev_cnt = 0; i < FCP_NUM_HASH; i++) {
15466 for (ptgt = pptr->port_tgt_hash_table[i]; ptgt != NULL;
15467 ptgt = ptgt->tgt_next) {
15468 if (!(ptgt->tgt_state & FCP_TGT_ORPHAN)) {
15469 ++*dev_cnt;
15470 }
15471 }
15472 }
15473
15474 devptr = devlist = kmem_zalloc(sizeof (*devlist) * *dev_cnt,
15475 KM_NOSLEEP);
15476 if (devlist == NULL) {
15477 mutex_exit(&pptr->port_mutex);
15478 fcp_log(CE_WARN, pptr->port_dip,
15479 "!fcp%d: failed to allocate for portmap for construct map",
15480 pptr->port_instance);
15481 return (devptr);
15482 }
15483
15484 for (i = 0; i < FCP_NUM_HASH; i++) {
15485 for (ptgt = pptr->port_tgt_hash_table[i]; ptgt != NULL;
15486 ptgt = ptgt->tgt_next) {
15487 if (!(ptgt->tgt_state & FCP_TGT_ORPHAN)) {
15488 int ret;
15489
15490 ret = fc_ulp_pwwn_to_portmap(
15491 pptr->port_fp_handle,
15492 (la_wwn_t *)&ptgt->tgt_port_wwn.raw_wwn[0],
15493 devlist);
15494
15495 if (ret == FC_SUCCESS) {
15496 devlist++;
15497 continue;
15498 }
15499
15500 devlist->map_pd = NULL;
15501 devlist->map_did.port_id = ptgt->tgt_d_id;
15502 devlist->map_hard_addr.hard_addr =
15503 ptgt->tgt_hard_addr;
15504
15505 devlist->map_state = PORT_DEVICE_INVALID;
15506 devlist->map_type = PORT_DEVICE_OLD;
15507
15508 bcopy(&ptgt->tgt_node_wwn.raw_wwn[0],
15509 &devlist->map_nwwn, FC_WWN_SIZE);
15510
15511 bcopy(&ptgt->tgt_port_wwn.raw_wwn[0],
15512 &devlist->map_pwwn, FC_WWN_SIZE);
15513
15514 devlist++;
15515 }
15516 }
15517 }
15518
15519 mutex_exit(&pptr->port_mutex);
15520
15521 return (devptr);
15522 }
15523 /*
15524 * Inimate MPxIO that the lun is busy and cannot accept regular IO
15525 */
15526 static void
15527 fcp_update_mpxio_path_verifybusy(struct fcp_port *pptr)
15528 {
15529 int i;
15530 struct fcp_tgt *ptgt;
15531 struct fcp_lun *plun;
15532
15533 for (i = 0; i < FCP_NUM_HASH; i++) {
15534 for (ptgt = pptr->port_tgt_hash_table[i]; ptgt != NULL;
15535 ptgt = ptgt->tgt_next) {
15536 mutex_enter(&ptgt->tgt_mutex);
15537 for (plun = ptgt->tgt_lun; plun != NULL;
15538 plun = plun->lun_next) {
15539 if (plun->lun_mpxio &&
15540 plun->lun_state & FCP_LUN_BUSY) {
15541 if (!fcp_pass_to_hp(pptr, plun,
15542 plun->lun_cip,
15543 FCP_MPXIO_PATH_SET_BUSY,
15544 pptr->port_link_cnt,
15545 ptgt->tgt_change_cnt, 0, 0)) {
15546 FCP_TRACE(fcp_logq,
15547 pptr->port_instbuf,
15548 fcp_trace,
15549 FCP_BUF_LEVEL_2, 0,
15550 "path_verifybusy: "
15551 "disable lun %p failed!",
15552 plun);
15553 }
15554 }
15555 }
15556 mutex_exit(&ptgt->tgt_mutex);
15557 }
15558 }
15559 }
15560
15561 static int
15562 fcp_update_mpxio_path(struct fcp_lun *plun, child_info_t *cip, int what)
15563 {
15564 dev_info_t *cdip = NULL;
15565 dev_info_t *pdip = NULL;
15566
15567 ASSERT(plun);
15568
15569 mutex_enter(&plun->lun_mutex);
15570 if (fcp_is_child_present(plun, cip) == FC_FAILURE) {
15571 mutex_exit(&plun->lun_mutex);
15572 return (NDI_FAILURE);
15573 }
15574 mutex_exit(&plun->lun_mutex);
15575 cdip = mdi_pi_get_client(PIP(cip));
15576 pdip = mdi_pi_get_phci(PIP(cip));
15577
15578 ASSERT(cdip != NULL);
15579 ASSERT(pdip != NULL);
15580
15581 if (what == FCP_MPXIO_PATH_CLEAR_BUSY) {
15582 /* LUN ready for IO */
15583 (void) mdi_pi_enable_path(PIP(cip), DRIVER_DISABLE_TRANSIENT);
15584 } else {
15585 /* LUN busy to accept IO */
15586 (void) mdi_pi_disable_path(PIP(cip), DRIVER_DISABLE_TRANSIENT);
15587 }
15588 return (NDI_SUCCESS);
15589 }
15590
15591 /*
15592 * Caller must free the returned string of MAXPATHLEN len
15593 * If the device is offline (-1 instance number) NULL
15594 * will be returned.
15595 */
15596 static char *
15597 fcp_get_lun_path(struct fcp_lun *plun)
15598 {
15599 dev_info_t *dip = NULL;
15600 char *path = NULL;
15601 mdi_pathinfo_t *pip = NULL;
15602
15603 if (plun == NULL) {
15604 return (NULL);
15605 }
15606
15607 mutex_enter(&plun->lun_mutex);
15608 if (plun->lun_mpxio == 0) {
15609 dip = DIP(plun->lun_cip);
15610 mutex_exit(&plun->lun_mutex);
15611 } else {
15612 /*
15613 * lun_cip must be accessed with lun_mutex held. Here
15614 * plun->lun_cip either points to a valid node or it is NULL.
15615 * Make a copy so that we can release lun_mutex.
15616 */
15617 pip = PIP(plun->lun_cip);
15618
15619 /*
15620 * Increase ref count on the path so that we can release
15621 * lun_mutex and still be sure that the pathinfo node (and thus
15622 * also the client) is not deallocated. If pip is NULL, this
15623 * has no effect.
15624 */
15625 mdi_hold_path(pip);
15626
15627 mutex_exit(&plun->lun_mutex);
15628
15629 /* Get the client. If pip is NULL, we get NULL. */
15630 dip = mdi_pi_get_client(pip);
15631 }
15632
15633 if (dip == NULL)
15634 goto out;
15635 if (ddi_get_instance(dip) < 0)
15636 goto out;
15637
15638 path = kmem_alloc(MAXPATHLEN, KM_SLEEP);
15639 if (path == NULL)
15640 goto out;
15641
15642 (void) ddi_pathname(dip, path);
15643
15644 /* Clean up. */
15645 out:
15646 if (pip != NULL)
15647 mdi_rele_path(pip);
15648
15649 /*
15650 * In reality, the user wants a fully valid path (one they can open)
15651 * but this string is lacking the mount point, and the minor node.
15652 * It would be nice if we could "figure these out" somehow
15653 * and fill them in. Otherwise, the userland code has to understand
15654 * driver specific details of which minor node is the "best" or
15655 * "right" one to expose. (Ex: which slice is the whole disk, or
15656 * which tape doesn't rewind)
15657 */
15658 return (path);
15659 }
15660
15661 static int
15662 fcp_scsi_bus_config(dev_info_t *parent, uint_t flag,
15663 ddi_bus_config_op_t op, void *arg, dev_info_t **childp)
15664 {
15665 int64_t reset_delay;
15666 int rval, retry = 0;
15667 struct fcp_port *pptr = fcp_dip2port(parent);
15668
15669 reset_delay = (int64_t)(USEC_TO_TICK(FCP_INIT_WAIT_TIMEOUT)) -
15670 (ddi_get_lbolt64() - pptr->port_attach_time);
15671 if (reset_delay < 0) {
15672 reset_delay = 0;
15673 }
15674
15675 if (fcp_bus_config_debug) {
15676 flag |= NDI_DEVI_DEBUG;
15677 }
15678
15679 switch (op) {
15680 case BUS_CONFIG_ONE:
15681 /*
15682 * Retry the command since we need to ensure
15683 * the fabric devices are available for root
15684 */
15685 while (retry++ < fcp_max_bus_config_retries) {
15686 rval = (ndi_busop_bus_config(parent,
15687 flag | NDI_MDI_FALLBACK, op,
15688 arg, childp, (clock_t)reset_delay));
15689 if (rval == 0) {
15690 return (rval);
15691 }
15692 }
15693
15694 /*
15695 * drain taskq to make sure nodes are created and then
15696 * try again.
15697 */
15698 taskq_wait(DEVI(parent)->devi_taskq);
15699 return (ndi_busop_bus_config(parent, flag | NDI_MDI_FALLBACK,
15700 op, arg, childp, 0));
15701
15702 case BUS_CONFIG_DRIVER:
15703 case BUS_CONFIG_ALL: {
15704 /*
15705 * delay till all devices report in (port_tmp_cnt == 0)
15706 * or FCP_INIT_WAIT_TIMEOUT
15707 */
15708 mutex_enter(&pptr->port_mutex);
15709 while ((reset_delay > 0) && pptr->port_tmp_cnt) {
15710 (void) cv_timedwait(&pptr->port_config_cv,
15711 &pptr->port_mutex,
15712 ddi_get_lbolt() + (clock_t)reset_delay);
15713 reset_delay =
15714 (int64_t)(USEC_TO_TICK(FCP_INIT_WAIT_TIMEOUT)) -
15715 (ddi_get_lbolt64() - pptr->port_attach_time);
15716 }
15717 mutex_exit(&pptr->port_mutex);
15718 /* drain taskq to make sure nodes are created */
15719 taskq_wait(DEVI(parent)->devi_taskq);
15720 return (ndi_busop_bus_config(parent, flag, op,
15721 arg, childp, 0));
15722 }
15723
15724 default:
15725 return (NDI_FAILURE);
15726 }
15727 /*NOTREACHED*/
15728 }
15729
15730 static int
15731 fcp_scsi_bus_unconfig(dev_info_t *parent, uint_t flag,
15732 ddi_bus_config_op_t op, void *arg)
15733 {
15734 if (fcp_bus_config_debug) {
15735 flag |= NDI_DEVI_DEBUG;
15736 }
15737
15738 return (ndi_busop_bus_unconfig(parent, flag, op, arg));
15739 }
15740
15741
15742 /*
15743 * Routine to copy GUID into the lun structure.
15744 * returns 0 if copy was successful and 1 if encountered a
15745 * failure and did not copy the guid.
15746 */
15747 static int
15748 fcp_copy_guid_2_lun_block(struct fcp_lun *plun, char *guidp)
15749 {
15750
15751 int retval = 0;
15752
15753 /* add one for the null terminator */
15754 const unsigned int len = strlen(guidp) + 1;
15755
15756 if ((guidp == NULL) || (plun == NULL)) {
15757 return (1);
15758 }
15759
15760 /*
15761 * if the plun->lun_guid already has been allocated,
15762 * then check the size. if the size is exact, reuse
15763 * it....if not free it an allocate the required size.
15764 * The reallocation should NOT typically happen
15765 * unless the GUIDs reported changes between passes.
15766 * We free up and alloc again even if the
15767 * size was more than required. This is due to the
15768 * fact that the field lun_guid_size - serves
15769 * dual role of indicating the size of the wwn
15770 * size and ALSO the allocation size.
15771 */
15772 if (plun->lun_guid) {
15773 if (plun->lun_guid_size != len) {
15774 /*
15775 * free the allocated memory and
15776 * initialize the field
15777 * lun_guid_size to 0.
15778 */
15779 kmem_free(plun->lun_guid, plun->lun_guid_size);
15780 plun->lun_guid = NULL;
15781 plun->lun_guid_size = 0;
15782 }
15783 }
15784 /*
15785 * alloc only if not already done.
15786 */
15787 if (plun->lun_guid == NULL) {
15788 plun->lun_guid = kmem_zalloc(len, KM_NOSLEEP);
15789 if (plun->lun_guid == NULL) {
15790 cmn_err(CE_WARN, "fcp_copy_guid_2_lun_block:"
15791 "Unable to allocate"
15792 "Memory for GUID!!! size %d", len);
15793 retval = 1;
15794 } else {
15795 plun->lun_guid_size = len;
15796 }
15797 }
15798 if (plun->lun_guid) {
15799 /*
15800 * now copy the GUID
15801 */
15802 bcopy(guidp, plun->lun_guid, plun->lun_guid_size);
15803 }
15804 return (retval);
15805 }
15806
15807 /*
15808 * fcp_reconfig_wait
15809 *
15810 * Wait for a rediscovery/reconfiguration to complete before continuing.
15811 */
15812
15813 static void
15814 fcp_reconfig_wait(struct fcp_port *pptr)
15815 {
15816 clock_t reconfig_start, wait_timeout;
15817
15818 /*
15819 * Quick check. If pptr->port_tmp_cnt is 0, there is no
15820 * reconfiguration in progress.
15821 */
15822
15823 mutex_enter(&pptr->port_mutex);
15824 if (pptr->port_tmp_cnt == 0) {
15825 mutex_exit(&pptr->port_mutex);
15826 return;
15827 }
15828 mutex_exit(&pptr->port_mutex);
15829
15830 /*
15831 * If we cause a reconfig by raising power, delay until all devices
15832 * report in (port_tmp_cnt returns to 0)
15833 */
15834
15835 reconfig_start = ddi_get_lbolt();
15836 wait_timeout = drv_usectohz(FCP_INIT_WAIT_TIMEOUT);
15837
15838 mutex_enter(&pptr->port_mutex);
15839
15840 while (((ddi_get_lbolt() - reconfig_start) < wait_timeout) &&
15841 pptr->port_tmp_cnt) {
15842
15843 (void) cv_timedwait(&pptr->port_config_cv, &pptr->port_mutex,
15844 reconfig_start + wait_timeout);
15845 }
15846
15847 mutex_exit(&pptr->port_mutex);
15848
15849 /*
15850 * Even if fcp_tmp_count isn't 0, continue without error. The port
15851 * we want may still be ok. If not, it will error out later
15852 */
15853 }
15854
15855 /*
15856 * Read masking info from fp.conf and construct the global fcp_lun_blacklist.
15857 * We rely on the fcp_global_mutex to provide protection against changes to
15858 * the fcp_lun_blacklist.
15859 *
15860 * You can describe a list of target port WWNs and LUN numbers which will
15861 * not be configured. LUN numbers will be interpreted as decimal. White
15862 * spaces and ',' can be used in the list of LUN numbers.
15863 *
15864 * To prevent LUNs 1 and 2 from being configured for target
15865 * port 510000f010fd92a1 and target port 510000e012079df1, set:
15866 *
15867 * pwwn-lun-blacklist=
15868 * "510000f010fd92a1,1,2",
15869 * "510000e012079df1,1,2";
15870 */
15871 static void
15872 fcp_read_blacklist(dev_info_t *dip,
15873 struct fcp_black_list_entry **pplun_blacklist)
15874 {
15875 char **prop_array = NULL;
15876 char *curr_pwwn = NULL;
15877 char *curr_lun = NULL;
15878 uint32_t prop_item = 0;
15879 int idx = 0;
15880 int len = 0;
15881
15882 ASSERT(mutex_owned(&fcp_global_mutex));
15883 if (ddi_prop_lookup_string_array(DDI_DEV_T_ANY, dip,
15884 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM,
15885 LUN_BLACKLIST_PROP, &prop_array, &prop_item) != DDI_PROP_SUCCESS) {
15886 return;
15887 }
15888
15889 for (idx = 0; idx < prop_item; idx++) {
15890
15891 curr_pwwn = prop_array[idx];
15892 while (*curr_pwwn == ' ') {
15893 curr_pwwn++;
15894 }
15895 if (strlen(curr_pwwn) <= (sizeof (la_wwn_t) * 2 + 1)) {
15896 fcp_log(CE_WARN, NULL, "Invalid WWN %s in the blacklist"
15897 ", please check.", curr_pwwn);
15898 continue;
15899 }
15900 if ((*(curr_pwwn + sizeof (la_wwn_t) * 2) != ' ') &&
15901 (*(curr_pwwn + sizeof (la_wwn_t) * 2) != ',')) {
15902 fcp_log(CE_WARN, NULL, "Invalid WWN %s in the blacklist"
15903 ", please check.", curr_pwwn);
15904 continue;
15905 }
15906 for (len = 0; len < sizeof (la_wwn_t) * 2; len++) {
15907 if (isxdigit(curr_pwwn[len]) != TRUE) {
15908 fcp_log(CE_WARN, NULL, "Invalid WWN %s in the "
15909 "blacklist, please check.", curr_pwwn);
15910 break;
15911 }
15912 }
15913 if (len != sizeof (la_wwn_t) * 2) {
15914 continue;
15915 }
15916
15917 curr_lun = curr_pwwn + sizeof (la_wwn_t) * 2 + 1;
15918 *(curr_lun - 1) = '\0';
15919 fcp_mask_pwwn_lun(curr_pwwn, curr_lun, pplun_blacklist);
15920 }
15921
15922 ddi_prop_free(prop_array);
15923 }
15924
15925 /*
15926 * Get the masking info about one remote target port designated by wwn.
15927 * Lun ids could be separated by ',' or white spaces.
15928 */
15929 static void
15930 fcp_mask_pwwn_lun(char *curr_pwwn, char *curr_lun,
15931 struct fcp_black_list_entry **pplun_blacklist)
15932 {
15933 int idx = 0;
15934 uint32_t offset = 0;
15935 unsigned long lun_id = 0;
15936 char lunid_buf[16];
15937 char *pend = NULL;
15938 int illegal_digit = 0;
15939
15940 while (offset < strlen(curr_lun)) {
15941 while ((curr_lun[offset + idx] != ',') &&
15942 (curr_lun[offset + idx] != '\0') &&
15943 (curr_lun[offset + idx] != ' ')) {
15944 if (isdigit(curr_lun[offset + idx]) == 0) {
15945 illegal_digit++;
15946 }
15947 idx++;
15948 }
15949 if (illegal_digit > 0) {
15950 offset += (idx+1); /* To the start of next lun */
15951 idx = 0;
15952 illegal_digit = 0;
15953 fcp_log(CE_WARN, NULL, "Invalid LUN %s for WWN %s in "
15954 "the blacklist, please check digits.",
15955 curr_lun, curr_pwwn);
15956 continue;
15957 }
15958 if (idx >= (sizeof (lunid_buf) / sizeof (lunid_buf[0]))) {
15959 fcp_log(CE_WARN, NULL, "Invalid LUN %s for WWN %s in "
15960 "the blacklist, please check the length of LUN#.",
15961 curr_lun, curr_pwwn);
15962 break;
15963 }
15964 if (idx == 0) { /* ignore ' ' or ',' or '\0' */
15965 offset++;
15966 continue;
15967 }
15968
15969 bcopy(curr_lun + offset, lunid_buf, idx);
15970 lunid_buf[idx] = '\0';
15971 if (ddi_strtoul(lunid_buf, &pend, 10, &lun_id) == 0) {
15972 fcp_add_one_mask(curr_pwwn, lun_id, pplun_blacklist);
15973 } else {
15974 fcp_log(CE_WARN, NULL, "Invalid LUN %s for WWN %s in "
15975 "the blacklist, please check %s.",
15976 curr_lun, curr_pwwn, lunid_buf);
15977 }
15978 offset += (idx+1); /* To the start of next lun */
15979 idx = 0;
15980 }
15981 }
15982
15983 /*
15984 * Add one masking record
15985 */
15986 static void
15987 fcp_add_one_mask(char *curr_pwwn, uint32_t lun_id,
15988 struct fcp_black_list_entry **pplun_blacklist)
15989 {
15990 struct fcp_black_list_entry *tmp_entry = *pplun_blacklist;
15991 struct fcp_black_list_entry *new_entry = NULL;
15992 la_wwn_t wwn;
15993
15994 fcp_ascii_to_wwn(curr_pwwn, wwn.raw_wwn, sizeof (la_wwn_t));
15995 while (tmp_entry) {
15996 if ((bcmp(&tmp_entry->wwn, &wwn,
15997 sizeof (la_wwn_t)) == 0) && (tmp_entry->lun == lun_id)) {
15998 return;
15999 }
16000
16001 tmp_entry = tmp_entry->next;
16002 }
16003
16004 /* add to black list */
16005 new_entry = (struct fcp_black_list_entry *)kmem_zalloc
16006 (sizeof (struct fcp_black_list_entry), KM_SLEEP);
16007 bcopy(&wwn, &new_entry->wwn, sizeof (la_wwn_t));
16008 new_entry->lun = lun_id;
16009 new_entry->masked = 0;
16010 new_entry->next = *pplun_blacklist;
16011 *pplun_blacklist = new_entry;
16012 }
16013
16014 /*
16015 * Check if we should mask the specified lun of this fcp_tgt
16016 */
16017 static int
16018 fcp_should_mask(la_wwn_t *wwn, uint32_t lun_id)
16019 {
16020 struct fcp_black_list_entry *remote_port;
16021
16022 remote_port = fcp_lun_blacklist;
16023 while (remote_port != NULL) {
16024 if (bcmp(wwn, &remote_port->wwn, sizeof (la_wwn_t)) == 0) {
16025 if (remote_port->lun == lun_id) {
16026 remote_port->masked++;
16027 if (remote_port->masked == 1) {
16028 fcp_log(CE_NOTE, NULL, "LUN %d of port "
16029 "%02x%02x%02x%02x%02x%02x%02x%02x "
16030 "is masked due to black listing.\n",
16031 lun_id, wwn->raw_wwn[0],
16032 wwn->raw_wwn[1], wwn->raw_wwn[2],
16033 wwn->raw_wwn[3], wwn->raw_wwn[4],
16034 wwn->raw_wwn[5], wwn->raw_wwn[6],
16035 wwn->raw_wwn[7]);
16036 }
16037 return (TRUE);
16038 }
16039 }
16040 remote_port = remote_port->next;
16041 }
16042 return (FALSE);
16043 }
16044
16045 /*
16046 * Release all allocated resources
16047 */
16048 static void
16049 fcp_cleanup_blacklist(struct fcp_black_list_entry **pplun_blacklist)
16050 {
16051 struct fcp_black_list_entry *tmp_entry = *pplun_blacklist;
16052 struct fcp_black_list_entry *current_entry = NULL;
16053
16054 ASSERT(mutex_owned(&fcp_global_mutex));
16055 /*
16056 * Traverse all luns
16057 */
16058 while (tmp_entry) {
16059 current_entry = tmp_entry;
16060 tmp_entry = tmp_entry->next;
16061 kmem_free(current_entry, sizeof (struct fcp_black_list_entry));
16062 }
16063 *pplun_blacklist = NULL;
16064 }
16065
16066 /*
16067 * In fcp module,
16068 * pkt@scsi_pkt, cmd@fcp_pkt, icmd@fcp_ipkt, fpkt@fc_packet, pptr@fcp_port
16069 */
16070 static struct scsi_pkt *
16071 fcp_pseudo_init_pkt(struct scsi_address *ap, struct scsi_pkt *pkt,
16072 struct buf *bp, int cmdlen, int statuslen, int tgtlen,
16073 int flags, int (*callback)(), caddr_t arg)
16074 {
16075 fcp_port_t *pptr = ADDR2FCP(ap);
16076 fcp_pkt_t *cmd = NULL;
16077 fc_frame_hdr_t *hp;
16078
16079 /*
16080 * First step: get the packet
16081 */
16082 if (pkt == NULL) {
16083 pkt = scsi_hba_pkt_alloc(pptr->port_dip, ap, cmdlen, statuslen,
16084 tgtlen, sizeof (fcp_pkt_t) + pptr->port_priv_pkt_len,
16085 callback, arg);
16086 if (pkt == NULL) {
16087 return (NULL);
16088 }
16089
16090 /*
16091 * All fields in scsi_pkt will be initialized properly or
16092 * set to zero. We need do nothing for scsi_pkt.
16093 */
16094 /*
16095 * But it's our responsibility to link other related data
16096 * structures. Their initialization will be done, just
16097 * before the scsi_pkt will be sent to FCA.
16098 */
16099 cmd = PKT2CMD(pkt);
16100 cmd->cmd_pkt = pkt;
16101 cmd->cmd_fp_pkt = &cmd->cmd_fc_packet;
16102 /*
16103 * fc_packet_t
16104 */
16105 cmd->cmd_fp_pkt->pkt_ulp_private = (opaque_t)cmd;
16106 cmd->cmd_fp_pkt->pkt_fca_private = (opaque_t)((caddr_t)cmd +
16107 sizeof (struct fcp_pkt));
16108 cmd->cmd_fp_pkt->pkt_cmd = (caddr_t)&cmd->cmd_fcp_cmd;
16109 cmd->cmd_fp_pkt->pkt_cmdlen = sizeof (struct fcp_cmd);
16110 cmd->cmd_fp_pkt->pkt_resp = cmd->cmd_fcp_rsp;
16111 cmd->cmd_fp_pkt->pkt_rsplen = FCP_MAX_RSP_IU_SIZE;
16112 /*
16113 * Fill in the Fabric Channel Header
16114 */
16115 hp = &cmd->cmd_fp_pkt->pkt_cmd_fhdr;
16116 hp->r_ctl = R_CTL_COMMAND;
16117 hp->rsvd = 0;
16118 hp->type = FC_TYPE_SCSI_FCP;
16119 hp->f_ctl = F_CTL_SEQ_INITIATIVE | F_CTL_FIRST_SEQ;
16120 hp->seq_id = 0;
16121 hp->df_ctl = 0;
16122 hp->seq_cnt = 0;
16123 hp->ox_id = 0xffff;
16124 hp->rx_id = 0xffff;
16125 hp->ro = 0;
16126 } else {
16127 /*
16128 * We need think if we should reset any elements in
16129 * related data structures.
16130 */
16131 FCP_TRACE(fcp_logq, pptr->port_instbuf,
16132 fcp_trace, FCP_BUF_LEVEL_6, 0,
16133 "reusing pkt, flags %d", flags);
16134 cmd = PKT2CMD(pkt);
16135 if (cmd->cmd_fp_pkt->pkt_pd) {
16136 cmd->cmd_fp_pkt->pkt_pd = NULL;
16137 }
16138 }
16139
16140 /*
16141 * Second step: dma allocation/move
16142 */
16143 if (bp && bp->b_bcount != 0) {
16144 /*
16145 * Mark if it's read or write
16146 */
16147 if (bp->b_flags & B_READ) {
16148 cmd->cmd_flags |= CFLAG_IS_READ;
16149 } else {
16150 cmd->cmd_flags &= ~CFLAG_IS_READ;
16151 }
16152
16153 bp_mapin(bp);
16154 cmd->cmd_fp_pkt->pkt_data = bp->b_un.b_addr;
16155 cmd->cmd_fp_pkt->pkt_datalen = bp->b_bcount;
16156 cmd->cmd_fp_pkt->pkt_data_resid = 0;
16157 } else {
16158 /*
16159 * It seldom happens, except when CLUSTER or SCSI_VHCI wants
16160 * to send zero-length read/write.
16161 */
16162 cmd->cmd_fp_pkt->pkt_data = NULL;
16163 cmd->cmd_fp_pkt->pkt_datalen = 0;
16164 }
16165
16166 return (pkt);
16167 }
16168
16169 static void
16170 fcp_pseudo_destroy_pkt(struct scsi_address *ap, struct scsi_pkt *pkt)
16171 {
16172 fcp_port_t *pptr = ADDR2FCP(ap);
16173
16174 /*
16175 * First we let FCA to uninitilize private part.
16176 */
16177 (void) fc_ulp_uninit_packet(pptr->port_fp_handle,
16178 PKT2CMD(pkt)->cmd_fp_pkt);
16179
16180 /*
16181 * Then we uninitialize fc_packet.
16182 */
16183
16184 /*
16185 * Thirdly, we uninitializae fcp_pkt.
16186 */
16187
16188 /*
16189 * In the end, we free scsi_pkt.
16190 */
16191 scsi_hba_pkt_free(ap, pkt);
16192 }
16193
16194 static int
16195 fcp_pseudo_start(struct scsi_address *ap, struct scsi_pkt *pkt)
16196 {
16197 fcp_port_t *pptr = ADDR2FCP(ap);
16198 fcp_lun_t *plun = ADDR2LUN(ap);
16199 fcp_tgt_t *ptgt = plun->lun_tgt;
16200 fcp_pkt_t *cmd = PKT2CMD(pkt);
16201 fcp_cmd_t *fcmd = &cmd->cmd_fcp_cmd;
16202 fc_packet_t *fpkt = cmd->cmd_fp_pkt;
16203 int rval;
16204
16205 fpkt->pkt_pd = ptgt->tgt_pd_handle;
16206 (void) fc_ulp_init_packet(pptr->port_fp_handle, cmd->cmd_fp_pkt, 1);
16207
16208 /*
16209 * Firstly, we need initialize fcp_pkt_t
16210 * Secondly, we need initialize fcp_cmd_t.
16211 */
16212 bcopy(pkt->pkt_cdbp, fcmd->fcp_cdb, pkt->pkt_cdblen);
16213 fcmd->fcp_data_len = fpkt->pkt_datalen;
16214 fcmd->fcp_ent_addr = plun->lun_addr;
16215 if (pkt->pkt_flags & FLAG_HTAG) {
16216 fcmd->fcp_cntl.cntl_qtype = FCP_QTYPE_HEAD_OF_Q;
16217 } else if (pkt->pkt_flags & FLAG_OTAG) {
16218 fcmd->fcp_cntl.cntl_qtype = FCP_QTYPE_ORDERED;
16219 } else if (pkt->pkt_flags & FLAG_STAG) {
16220 fcmd->fcp_cntl.cntl_qtype = FCP_QTYPE_SIMPLE;
16221 } else {
16222 fcmd->fcp_cntl.cntl_qtype = FCP_QTYPE_UNTAGGED;
16223 }
16224
16225 if (cmd->cmd_flags & CFLAG_IS_READ) {
16226 fcmd->fcp_cntl.cntl_read_data = 1;
16227 fcmd->fcp_cntl.cntl_write_data = 0;
16228 } else {
16229 fcmd->fcp_cntl.cntl_read_data = 0;
16230 fcmd->fcp_cntl.cntl_write_data = 1;
16231 }
16232
16233 /*
16234 * Then we need initialize fc_packet_t too.
16235 */
16236 fpkt->pkt_timeout = pkt->pkt_time + 2;
16237 fpkt->pkt_cmd_fhdr.d_id = ptgt->tgt_d_id;
16238 fpkt->pkt_cmd_fhdr.s_id = pptr->port_id;
16239 if (cmd->cmd_flags & CFLAG_IS_READ) {
16240 fpkt->pkt_tran_type = FC_PKT_FCP_READ;
16241 } else {
16242 fpkt->pkt_tran_type = FC_PKT_FCP_WRITE;
16243 }
16244
16245 if (pkt->pkt_flags & FLAG_NOINTR) {
16246 fpkt->pkt_comp = NULL;
16247 fpkt->pkt_tran_flags = (FC_TRAN_CLASS3 | FC_TRAN_NO_INTR);
16248 } else {
16249 fpkt->pkt_comp = fcp_cmd_callback;
16250 fpkt->pkt_tran_flags = (FC_TRAN_CLASS3 | FC_TRAN_INTR);
16251 if (pkt->pkt_flags & FLAG_IMMEDIATE_CB) {
16252 fpkt->pkt_tran_flags |= FC_TRAN_IMMEDIATE_CB;
16253 }
16254 }
16255
16256 /*
16257 * Lastly, we need initialize scsi_pkt
16258 */
16259 pkt->pkt_reason = CMD_CMPLT;
16260 pkt->pkt_state = 0;
16261 pkt->pkt_statistics = 0;
16262 pkt->pkt_resid = 0;
16263
16264 /*
16265 * if interrupts aren't allowed (e.g. at dump time) then we'll
16266 * have to do polled I/O
16267 */
16268 if (pkt->pkt_flags & FLAG_NOINTR) {
16269 return (fcp_dopoll(pptr, cmd));
16270 }
16271
16272 cmd->cmd_state = FCP_PKT_ISSUED;
16273 rval = fcp_transport(pptr->port_fp_handle, fpkt, 0);
16274 if (rval == FC_SUCCESS) {
16275 return (TRAN_ACCEPT);
16276 }
16277
16278 /*
16279 * Need more consideration
16280 *
16281 * pkt->pkt_flags & FLAG_NOQUEUE could abort other pkt
16282 */
16283 cmd->cmd_state = FCP_PKT_IDLE;
16284 if (rval == FC_TRAN_BUSY) {
16285 return (TRAN_BUSY);
16286 } else {
16287 return (TRAN_FATAL_ERROR);
16288 }
16289 }
16290
16291 /*
16292 * scsi_poll will always call tran_sync_pkt for pseudo FC-HBAs
16293 * SCSA will initialize it to scsi_sync_cache_pkt for physical FC-HBAs
16294 */
16295 static void
16296 fcp_pseudo_sync_pkt(struct scsi_address *ap, struct scsi_pkt *pkt)
16297 {
16298 FCP_TRACE(fcp_logq, "fcp_pseudo_sync_pkt", fcp_trace,
16299 FCP_BUF_LEVEL_2, 0, "ap-%p, scsi_pkt-%p", ap, pkt);
16300 }
16301
16302 /*
16303 * scsi_dmafree will always call tran_dmafree, when STATE_ARQ_DONE
16304 */
16305 static void
16306 fcp_pseudo_dmafree(struct scsi_address *ap, struct scsi_pkt *pkt)
16307 {
16308 FCP_TRACE(fcp_logq, "fcp_pseudo_dmafree", fcp_trace,
16309 FCP_BUF_LEVEL_2, 0, "ap-%p, scsi_pkt-%p", ap, pkt);
16310 }