1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
24 */
25
26 /*
27 * Copyright 2018 Nexenta Systems, Inc.
28 */
29
30 /*
31 * Fibre Channel SCSI ULP Mapping driver
32 */
33
34 #include <sys/scsi/scsi.h>
35 #include <sys/types.h>
36 #include <sys/varargs.h>
37 #include <sys/devctl.h>
38 #include <sys/thread.h>
39 #include <sys/thread.h>
40 #include <sys/open.h>
41 #include <sys/file.h>
42 #include <sys/sunndi.h>
43 #include <sys/console.h>
44 #include <sys/proc.h>
45 #include <sys/time.h>
46 #include <sys/utsname.h>
47 #include <sys/scsi/impl/scsi_reset_notify.h>
48 #include <sys/ndi_impldefs.h>
49 #include <sys/byteorder.h>
50 #include <sys/ctype.h>
51 #include <sys/sunmdi.h>
52
53 #include <sys/fibre-channel/fc.h>
54 #include <sys/fibre-channel/impl/fc_ulpif.h>
55 #include <sys/fibre-channel/ulp/fcpvar.h>
56
57 /*
58 * Discovery Process
59 * =================
60 *
61 * The discovery process is a major function of FCP. In order to help
62 * understand that function a flow diagram is given here. This diagram
63 * doesn't claim to cover all the cases and the events that can occur during
64 * the discovery process nor the subtleties of the code. The code paths shown
65 * are simplified. Its purpose is to help the reader (and potentially bug
66 * fixer) have an overall view of the logic of the code. For that reason the
67 * diagram covers the simple case of the line coming up cleanly or of a new
68 * port attaching to FCP the link being up. The reader must keep in mind
69 * that:
70 *
71 * - There are special cases where bringing devices online and offline
72 * is driven by Ioctl.
73 *
74 * - The behavior of the discovery process can be modified through the
75 * .conf file.
76 *
77 * - The line can go down and come back up at any time during the
78 * discovery process which explains some of the complexity of the code.
79 *
80 * ............................................................................
81 *
82 * STEP 1: The line comes up or a new Fibre Channel port attaches to FCP.
83 *
84 *
85 * +-------------------------+
86 * fp/fctl module --->| fcp_port_attach |
87 * +-------------------------+
88 * | |
89 * | |
90 * | v
91 * | +-------------------------+
92 * | | fcp_handle_port_attach |
93 * | +-------------------------+
94 * | |
95 * | |
96 * +--------------------+ |
97 * | |
98 * v v
99 * +-------------------------+
100 * | fcp_statec_callback |
101 * +-------------------------+
102 * |
103 * |
104 * v
105 * +-------------------------+
106 * | fcp_handle_devices |
107 * +-------------------------+
108 * |
109 * |
110 * v
111 * +-------------------------+
112 * | fcp_handle_mapflags |
113 * +-------------------------+
114 * |
115 * |
116 * v
117 * +-------------------------+
118 * | fcp_send_els |
119 * | |
120 * | PLOGI or PRLI To all the|
121 * | reachable devices. |
122 * +-------------------------+
123 *
124 *
125 * ............................................................................
126 *
127 * STEP 2: The callback functions of the PLOGI and/or PRLI requests sent during
128 * STEP 1 are called (it is actually the same function).
129 *
130 *
131 * +-------------------------+
132 * | fcp_icmd_callback |
133 * fp/fctl module --->| |
134 * | callback for PLOGI and |
135 * | PRLI. |
136 * +-------------------------+
137 * |
138 * |
139 * Received PLOGI Accept /-\ Received PRLI Accept
140 * _ _ _ _ _ _ / \_ _ _ _ _ _
141 * | \ / |
142 * | \-/ |
143 * | |
144 * v v
145 * +-------------------------+ +-------------------------+
146 * | fcp_send_els | | fcp_send_scsi |
147 * | | | |
148 * | PRLI | | REPORT_LUN |
149 * +-------------------------+ +-------------------------+
150 *
151 * ............................................................................
152 *
153 * STEP 3: The callback functions of the SCSI commands issued by FCP are called
154 * (It is actually the same function).
155 *
156 *
157 * +-------------------------+
158 * fp/fctl module ------->| fcp_scsi_callback |
159 * +-------------------------+
160 * |
161 * |
162 * |
163 * Receive REPORT_LUN reply /-\ Receive INQUIRY PAGE83 reply
164 * _ _ _ _ _ _ _ _ _ _ / \_ _ _ _ _ _ _ _ _ _ _ _
165 * | \ / |
166 * | \-/ |
167 * | | |
168 * | Receive INQUIRY reply| |
169 * | | |
170 * v v v
171 * +------------------------+ +----------------------+ +----------------------+
172 * | fcp_handle_reportlun | | fcp_handle_inquiry | | fcp_handle_page83 |
173 * |(Called for each Target)| | (Called for each LUN)| |(Called for each LUN) |
174 * +------------------------+ +----------------------+ +----------------------+
175 * | | |
176 * | | |
177 * | | |
178 * v v |
179 * +-----------------+ +-----------------+ |
180 * | fcp_send_scsi | | fcp_send_scsi | |
181 * | | | | |
182 * | INQUIRY | | INQUIRY PAGE83 | |
183 * | (To each LUN) | +-----------------+ |
184 * +-----------------+ |
185 * |
186 * v
187 * +------------------------+
188 * | fcp_call_finish_init |
189 * +------------------------+
190 * |
191 * v
192 * +-----------------------------+
193 * | fcp_call_finish_init_held |
194 * +-----------------------------+
195 * |
196 * |
197 * All LUNs scanned /-\
198 * _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ __ / \
199 * | \ /
200 * | \-/
201 * v |
202 * +------------------+ |
203 * | fcp_finish_tgt | |
204 * +------------------+ |
205 * | Target Not Offline and |
206 * Target Not Offline and | not marked and tgt_node_state |
207 * marked /-\ not FCP_TGT_NODE_ON_DEMAND |
208 * _ _ _ _ _ _ / \_ _ _ _ _ _ _ _ |
209 * | \ / | |
210 * | \-/ | |
211 * v v |
212 * +----------------------------+ +-------------------+ |
213 * | fcp_offline_target | | fcp_create_luns | |
214 * | | +-------------------+ |
215 * | A structure fcp_tgt_elem | | |
216 * | is created and queued in | v |
217 * | the FCP port list | +-------------------+ |
218 * | port_offline_tgts. It | | fcp_pass_to_hp | |
219 * | will be unqueued by the | | | |
220 * | watchdog timer. | | Called for each | |
221 * +----------------------------+ | LUN. Dispatches | |
222 * | | fcp_hp_task | |
223 * | +-------------------+ |
224 * | | |
225 * | | |
226 * | | |
227 * | +---------------->|
228 * | |
229 * +---------------------------------------------->|
230 * |
231 * |
232 * All the targets (devices) have been scanned /-\
233 * _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ / \
234 * | \ /
235 * | \-/
236 * +-------------------------------------+ |
237 * | fcp_finish_init | |
238 * | | |
239 * | Signal broadcasts the condition | |
240 * | variable port_config_cv of the FCP | |
241 * | port. One potential code sequence | |
242 * | waiting on the condition variable | |
243 * | the code sequence handling | |
244 * | BUS_CONFIG_ALL and BUS_CONFIG_DRIVER| |
245 * | The other is in the function | |
246 * | fcp_reconfig_wait which is called | |
247 * | in the transmit path preventing IOs | |
248 * | from going through till the disco- | |
249 * | very process is over. | |
250 * +-------------------------------------+ |
251 * | |
252 * | |
253 * +--------------------------------->|
254 * |
255 * v
256 * Return
257 *
258 * ............................................................................
259 *
260 * STEP 4: The hot plug task is called (for each fcp_hp_elem).
261 *
262 *
263 * +-------------------------+
264 * | fcp_hp_task |
265 * +-------------------------+
266 * |
267 * |
268 * v
269 * +-------------------------+
270 * | fcp_trigger_lun |
271 * +-------------------------+
272 * |
273 * |
274 * v
275 * Bring offline /-\ Bring online
276 * _ _ _ _ _ _ _ _ _/ \_ _ _ _ _ _ _ _ _ _
277 * | \ / |
278 * | \-/ |
279 * v v
280 * +---------------------+ +-----------------------+
281 * | fcp_offline_child | | fcp_get_cip |
282 * +---------------------+ | |
283 * | Creates a dev_info_t |
284 * | or a mdi_pathinfo_t |
285 * | depending on whether |
286 * | mpxio is on or off. |
287 * +-----------------------+
288 * |
289 * |
290 * v
291 * +-----------------------+
292 * | fcp_online_child |
293 * | |
294 * | Set device online |
295 * | using NDI or MDI. |
296 * +-----------------------+
297 *
298 * ............................................................................
299 *
300 * STEP 5: The watchdog timer expires. The watch dog timer does much more that
301 * what is described here. We only show the target offline path.
302 *
303 *
304 * +--------------------------+
305 * | fcp_watch |
306 * +--------------------------+
307 * |
308 * |
309 * v
310 * +--------------------------+
311 * | fcp_scan_offline_tgts |
312 * +--------------------------+
313 * |
314 * |
315 * v
316 * +--------------------------+
317 * | fcp_offline_target_now |
318 * +--------------------------+
319 * |
320 * |
321 * v
322 * +--------------------------+
323 * | fcp_offline_tgt_luns |
324 * +--------------------------+
325 * |
326 * |
327 * v
328 * +--------------------------+
329 * | fcp_offline_lun |
330 * +--------------------------+
331 * |
332 * |
333 * v
334 * +----------------------------------+
335 * | fcp_offline_lun_now |
336 * | |
337 * | A request (or two if mpxio) is |
338 * | sent to the hot plug task using |
339 * | a fcp_hp_elem structure. |
340 * +----------------------------------+
341 */
342
343 /*
344 * Functions registered with DDI framework
345 */
346 static int fcp_attach(dev_info_t *devi, ddi_attach_cmd_t cmd);
347 static int fcp_detach(dev_info_t *devi, ddi_detach_cmd_t cmd);
348 static int fcp_open(dev_t *devp, int flag, int otype, cred_t *credp);
349 static int fcp_close(dev_t dev, int flag, int otype, cred_t *credp);
350 static int fcp_ioctl(dev_t dev, int cmd, intptr_t data, int mode,
351 cred_t *credp, int *rval);
352
353 /*
354 * Functions registered with FC Transport framework
355 */
356 static int fcp_port_attach(opaque_t ulph, fc_ulp_port_info_t *pinfo,
357 fc_attach_cmd_t cmd, uint32_t s_id);
358 static int fcp_port_detach(opaque_t ulph, fc_ulp_port_info_t *info,
359 fc_detach_cmd_t cmd);
360 static int fcp_port_ioctl(opaque_t ulph, opaque_t port_handle, dev_t dev,
361 int cmd, intptr_t data, int mode, cred_t *credp, int *rval,
362 uint32_t claimed);
363 static int fcp_els_callback(opaque_t ulph, opaque_t port_handle,
364 fc_unsol_buf_t *buf, uint32_t claimed);
365 static int fcp_data_callback(opaque_t ulph, opaque_t port_handle,
366 fc_unsol_buf_t *buf, uint32_t claimed);
367 static void fcp_statec_callback(opaque_t ulph, opaque_t port_handle,
368 uint32_t port_state, uint32_t port_top, fc_portmap_t *devlist,
369 uint32_t dev_cnt, uint32_t port_sid);
370
371 /*
372 * Functions registered with SCSA framework
373 */
374 static int fcp_phys_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
375 scsi_hba_tran_t *hba_tran, struct scsi_device *sd);
376 static int fcp_scsi_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
377 scsi_hba_tran_t *hba_tran, struct scsi_device *sd);
378 static void fcp_scsi_tgt_free(dev_info_t *hba_dip, dev_info_t *tgt_dip,
379 scsi_hba_tran_t *hba_tran, struct scsi_device *sd);
380 static int fcp_scsi_start(struct scsi_address *ap, struct scsi_pkt *pkt);
381 static int fcp_scsi_abort(struct scsi_address *ap, struct scsi_pkt *pkt);
382 static int fcp_scsi_reset(struct scsi_address *ap, int level);
383 static int fcp_scsi_getcap(struct scsi_address *ap, char *cap, int whom);
384 static int fcp_scsi_setcap(struct scsi_address *ap, char *cap, int value,
385 int whom);
386 static void fcp_pkt_teardown(struct scsi_pkt *pkt);
387 static int fcp_scsi_reset_notify(struct scsi_address *ap, int flag,
388 void (*callback)(caddr_t), caddr_t arg);
389 static int fcp_scsi_bus_get_eventcookie(dev_info_t *dip, dev_info_t *rdip,
390 char *name, ddi_eventcookie_t *event_cookiep);
391 static int fcp_scsi_bus_add_eventcall(dev_info_t *dip, dev_info_t *rdip,
392 ddi_eventcookie_t eventid, void (*callback)(), void *arg,
393 ddi_callback_id_t *cb_id);
394 static int fcp_scsi_bus_remove_eventcall(dev_info_t *devi,
395 ddi_callback_id_t cb_id);
396 static int fcp_scsi_bus_post_event(dev_info_t *dip, dev_info_t *rdip,
397 ddi_eventcookie_t eventid, void *impldata);
398 static int fcp_scsi_bus_config(dev_info_t *parent, uint_t flag,
399 ddi_bus_config_op_t op, void *arg, dev_info_t **childp);
400 static int fcp_scsi_bus_unconfig(dev_info_t *parent, uint_t flag,
401 ddi_bus_config_op_t op, void *arg);
402
403 /*
404 * Internal functions
405 */
406 static int fcp_setup_device_data_ioctl(int cmd, struct fcp_ioctl *data,
407 int mode, int *rval);
408
409 static int fcp_setup_scsi_ioctl(struct fcp_scsi_cmd *u_fscsi,
410 int mode, int *rval);
411 static int fcp_copyin_scsi_cmd(caddr_t base_addr,
412 struct fcp_scsi_cmd *fscsi, int mode);
413 static int fcp_copyout_scsi_cmd(struct fcp_scsi_cmd *fscsi,
414 caddr_t base_addr, int mode);
415 static int fcp_send_scsi_ioctl(struct fcp_scsi_cmd *fscsi);
416
417 static struct fcp_tgt *fcp_port_create_tgt(struct fcp_port *pptr,
418 la_wwn_t *pwwn, int *ret_val, int *fc_status, int *fc_pkt_state,
419 int *fc_pkt_reason, int *fc_pkt_action);
420 static int fcp_tgt_send_plogi(struct fcp_tgt *ptgt, int *fc_status,
421 int *fc_pkt_state, int *fc_pkt_reason, int *fc_pkt_action);
422 static int fcp_tgt_send_prli(struct fcp_tgt *ptgt, int *fc_status,
423 int *fc_pkt_state, int *fc_pkt_reason, int *fc_pkt_action);
424 static void fcp_ipkt_sema_init(struct fcp_ipkt *icmd);
425 static int fcp_ipkt_sema_wait(struct fcp_ipkt *icmd);
426 static void fcp_ipkt_sema_callback(struct fc_packet *fpkt);
427 static void fcp_ipkt_sema_cleanup(struct fcp_ipkt *icmd);
428
429 static void fcp_handle_devices(struct fcp_port *pptr,
430 fc_portmap_t devlist[], uint32_t dev_cnt, int link_cnt,
431 fcp_map_tag_t *map_tag, int cause);
432 static int fcp_handle_mapflags(struct fcp_port *pptr,
433 struct fcp_tgt *ptgt, fc_portmap_t *map_entry, int link_cnt,
434 int tgt_cnt, int cause);
435 static int fcp_handle_reportlun_changed(struct fcp_tgt *ptgt, int cause);
436 static int fcp_send_els(struct fcp_port *pptr, struct fcp_tgt *ptgt,
437 struct fcp_ipkt *icmd, uchar_t opcode, int lcount, int tcount, int cause);
438 static void fcp_update_state(struct fcp_port *pptr, uint32_t state,
439 int cause);
440 static void fcp_update_tgt_state(struct fcp_tgt *ptgt, int flag,
441 uint32_t state);
442 static struct fcp_port *fcp_get_port(opaque_t port_handle);
443 static void fcp_unsol_callback(fc_packet_t *fpkt);
444 static void fcp_unsol_resp_init(fc_packet_t *pkt, fc_unsol_buf_t *buf,
445 uchar_t r_ctl, uchar_t type);
446 static int fcp_unsol_prli(struct fcp_port *pptr, fc_unsol_buf_t *buf);
447 static struct fcp_ipkt *fcp_icmd_alloc(struct fcp_port *pptr,
448 struct fcp_tgt *ptgt, int cmd_len, int resp_len, int data_len,
449 int nodma, int lcount, int tcount, int cause, uint32_t rscn_count);
450 static void fcp_icmd_free(struct fcp_port *pptr, struct fcp_ipkt *icmd);
451 static int fcp_alloc_dma(struct fcp_port *pptr, struct fcp_ipkt *icmd,
452 int nodma, int flags);
453 static void fcp_free_dma(struct fcp_port *pptr, struct fcp_ipkt *icmd);
454 static struct fcp_tgt *fcp_lookup_target(struct fcp_port *pptr,
455 uchar_t *wwn);
456 static struct fcp_tgt *fcp_get_target_by_did(struct fcp_port *pptr,
457 uint32_t d_id);
458 static void fcp_icmd_callback(fc_packet_t *fpkt);
459 static int fcp_send_scsi(struct fcp_lun *plun, uchar_t opcode,
460 int len, int lcount, int tcount, int cause, uint32_t rscn_count);
461 static int fcp_check_reportlun(struct fcp_rsp *rsp, fc_packet_t *fpkt);
462 static void fcp_scsi_callback(fc_packet_t *fpkt);
463 static void fcp_retry_scsi_cmd(fc_packet_t *fpkt);
464 static void fcp_handle_inquiry(fc_packet_t *fpkt, struct fcp_ipkt *icmd);
465 static void fcp_handle_reportlun(fc_packet_t *fpkt, struct fcp_ipkt *icmd);
466 static struct fcp_lun *fcp_get_lun(struct fcp_tgt *ptgt,
467 uint16_t lun_num);
468 static int fcp_finish_tgt(struct fcp_port *pptr, struct fcp_tgt *ptgt,
469 int link_cnt, int tgt_cnt, int cause);
470 static void fcp_finish_init(struct fcp_port *pptr);
471 static void fcp_create_luns(struct fcp_tgt *ptgt, int link_cnt,
472 int tgt_cnt, int cause);
473 static int fcp_trigger_lun(struct fcp_lun *plun, child_info_t *cip,
474 int old_mpxio, int online, int link_cnt, int tgt_cnt, int flags);
475 static int fcp_offline_target(struct fcp_port *pptr, struct fcp_tgt *ptgt,
476 int link_cnt, int tgt_cnt, int nowait, int flags);
477 static void fcp_offline_target_now(struct fcp_port *pptr,
478 struct fcp_tgt *ptgt, int link_cnt, int tgt_cnt, int flags);
479 static void fcp_offline_tgt_luns(struct fcp_tgt *ptgt, int link_cnt,
480 int tgt_cnt, int flags);
481 static void fcp_offline_lun(struct fcp_lun *plun, int link_cnt, int tgt_cnt,
482 int nowait, int flags);
483 static void fcp_prepare_offline_lun(struct fcp_lun *plun, int link_cnt,
484 int tgt_cnt);
485 static void fcp_offline_lun_now(struct fcp_lun *plun, int link_cnt,
486 int tgt_cnt, int flags);
487 static void fcp_scan_offline_luns(struct fcp_port *pptr);
488 static void fcp_scan_offline_tgts(struct fcp_port *pptr);
489 static void fcp_update_offline_flags(struct fcp_lun *plun);
490 static struct fcp_pkt *fcp_scan_commands(struct fcp_lun *plun);
491 static void fcp_abort_commands(struct fcp_pkt *head, struct
492 fcp_port *pptr);
493 static void fcp_cmd_callback(fc_packet_t *fpkt);
494 static void fcp_complete_pkt(fc_packet_t *fpkt);
495 static int fcp_validate_fcp_response(struct fcp_rsp *rsp,
496 struct fcp_port *pptr);
497 static int fcp_device_changed(struct fcp_port *pptr, struct fcp_tgt *ptgt,
498 fc_portmap_t *map_entry, int link_cnt, int tgt_cnt, int cause);
499 static struct fcp_lun *fcp_alloc_lun(struct fcp_tgt *ptgt);
500 static void fcp_dealloc_lun(struct fcp_lun *plun);
501 static struct fcp_tgt *fcp_alloc_tgt(struct fcp_port *pptr,
502 fc_portmap_t *map_entry, int link_cnt);
503 static void fcp_dealloc_tgt(struct fcp_tgt *ptgt);
504 static void fcp_queue_ipkt(struct fcp_port *pptr, fc_packet_t *fpkt);
505 static int fcp_transport(opaque_t port_handle, fc_packet_t *fpkt,
506 int internal);
507 static void fcp_log(int level, dev_info_t *dip, const char *fmt, ...);
508 static int fcp_handle_port_attach(opaque_t ulph, fc_ulp_port_info_t *pinfo,
509 uint32_t s_id, int instance);
510 static int fcp_handle_port_detach(struct fcp_port *pptr, int flag,
511 int instance);
512 static void fcp_cleanup_port(struct fcp_port *pptr, int instance);
513 static int fcp_kmem_cache_constructor(struct scsi_pkt *, scsi_hba_tran_t *,
514 int);
515 static void fcp_kmem_cache_destructor(struct scsi_pkt *, scsi_hba_tran_t *);
516 static int fcp_pkt_setup(struct scsi_pkt *, int (*)(), caddr_t);
517 static int fcp_alloc_cmd_resp(struct fcp_port *pptr, fc_packet_t *fpkt,
518 int flags);
519 static void fcp_free_cmd_resp(struct fcp_port *pptr, fc_packet_t *fpkt);
520 static int fcp_reset_target(struct scsi_address *ap, int level);
521 static int fcp_commoncap(struct scsi_address *ap, char *cap,
522 int val, int tgtonly, int doset);
523 static int fcp_scsi_get_name(struct scsi_device *sd, char *name, int len);
524 static int fcp_scsi_get_bus_addr(struct scsi_device *sd, char *name, int len);
525 static int fcp_linkreset(struct fcp_port *pptr, struct scsi_address *ap,
526 int sleep);
527 static int fcp_handle_port_resume(opaque_t ulph, fc_ulp_port_info_t *pinfo,
528 uint32_t s_id, fc_attach_cmd_t cmd, int instance);
529 static void fcp_cp_pinfo(struct fcp_port *pptr, fc_ulp_port_info_t *pinfo);
530 static void fcp_process_elem(struct fcp_hp_elem *elem, int result);
531 static child_info_t *fcp_get_cip(struct fcp_lun *plun, child_info_t *cip,
532 int lcount, int tcount);
533 static int fcp_is_dip_present(struct fcp_lun *plun, dev_info_t *cdip);
534 static int fcp_is_child_present(struct fcp_lun *plun, child_info_t *cip);
535 static dev_info_t *fcp_create_dip(struct fcp_lun *plun, int link_cnt,
536 int tgt_cnt);
537 static dev_info_t *fcp_find_existing_dip(struct fcp_lun *plun,
538 dev_info_t *pdip, caddr_t name);
539 static int fcp_online_child(struct fcp_lun *plun, child_info_t *cip,
540 int lcount, int tcount, int flags, int *circ);
541 static int fcp_offline_child(struct fcp_lun *plun, child_info_t *cip,
542 int lcount, int tcount, int flags, int *circ);
543 static void fcp_remove_child(struct fcp_lun *plun);
544 static void fcp_watch(void *arg);
545 static void fcp_check_reset_delay(struct fcp_port *pptr);
546 static void fcp_abort_all(struct fcp_port *pptr, struct fcp_tgt *ttgt,
547 struct fcp_lun *rlun, int tgt_cnt);
548 struct fcp_port *fcp_soft_state_unlink(struct fcp_port *pptr);
549 static struct fcp_lun *fcp_lookup_lun(struct fcp_port *pptr,
550 uchar_t *wwn, uint16_t lun);
551 static void fcp_prepare_pkt(struct fcp_port *pptr, struct fcp_pkt *cmd,
552 struct fcp_lun *plun);
553 static void fcp_post_callback(struct fcp_pkt *cmd);
554 static int fcp_dopoll(struct fcp_port *pptr, struct fcp_pkt *cmd);
555 static struct fcp_port *fcp_dip2port(dev_info_t *dip);
556 struct fcp_lun *fcp_get_lun_from_cip(struct fcp_port *pptr,
557 child_info_t *cip);
558 static int fcp_pass_to_hp_and_wait(struct fcp_port *pptr,
559 struct fcp_lun *plun, child_info_t *cip, int what, int link_cnt,
560 int tgt_cnt, int flags);
561 static struct fcp_hp_elem *fcp_pass_to_hp(struct fcp_port *pptr,
562 struct fcp_lun *plun, child_info_t *cip, int what, int link_cnt,
563 int tgt_cnt, int flags, int wait);
564 static void fcp_retransport_cmd(struct fcp_port *pptr,
565 struct fcp_pkt *cmd);
566 static void fcp_fail_cmd(struct fcp_pkt *cmd, uchar_t reason,
567 uint_t statistics);
568 static void fcp_queue_pkt(struct fcp_port *pptr, struct fcp_pkt *cmd);
569 static void fcp_update_targets(struct fcp_port *pptr,
570 fc_portmap_t *dev_list, uint32_t count, uint32_t state, int cause);
571 static int fcp_call_finish_init(struct fcp_port *pptr,
572 struct fcp_tgt *ptgt, int lcount, int tcount, int cause);
573 static int fcp_call_finish_init_held(struct fcp_port *pptr,
574 struct fcp_tgt *ptgt, int lcount, int tcount, int cause);
575 static void fcp_reconfigure_luns(void * tgt_handle);
576 static void fcp_free_targets(struct fcp_port *pptr);
577 static void fcp_free_target(struct fcp_tgt *ptgt);
578 static int fcp_is_retryable(struct fcp_ipkt *icmd);
579 static int fcp_create_on_demand(struct fcp_port *pptr, uchar_t *pwwn);
580 static void fcp_ascii_to_wwn(caddr_t string, uchar_t bytes[], unsigned int);
581 static void fcp_wwn_to_ascii(uchar_t bytes[], char *string);
582 static void fcp_print_error(fc_packet_t *fpkt);
583 static int fcp_handle_ipkt_errors(struct fcp_port *pptr,
584 struct fcp_tgt *ptgt, struct fcp_ipkt *icmd, int rval, caddr_t op);
585 static int fcp_outstanding_lun_cmds(struct fcp_tgt *ptgt);
586 static fc_portmap_t *fcp_construct_map(struct fcp_port *pptr,
587 uint32_t *dev_cnt);
588 static void fcp_offline_all(struct fcp_port *pptr, int lcount, int cause);
589 static int fcp_get_statec_count(struct fcp_ioctl *data, int mode, int *rval);
590 static int fcp_copyin_fcp_ioctl_data(struct fcp_ioctl *, int, int *,
591 struct fcp_ioctl *, struct fcp_port **);
592 static char *fcp_get_lun_path(struct fcp_lun *plun);
593 static int fcp_get_target_mappings(struct fcp_ioctl *data, int mode,
594 int *rval);
595 static int fcp_do_ns_registry(struct fcp_port *pptr, uint32_t s_id);
596 static void fcp_retry_ns_registry(struct fcp_port *pptr, uint32_t s_id);
597 static char *fcp_get_lun_path(struct fcp_lun *plun);
598 static int fcp_get_target_mappings(struct fcp_ioctl *data, int mode,
599 int *rval);
600 static void fcp_reconfig_wait(struct fcp_port *pptr);
601
602 /*
603 * New functions added for mpxio support
604 */
605 static int fcp_virt_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
606 scsi_hba_tran_t *hba_tran, struct scsi_device *sd);
607 static mdi_pathinfo_t *fcp_create_pip(struct fcp_lun *plun, int lcount,
608 int tcount);
609 static mdi_pathinfo_t *fcp_find_existing_pip(struct fcp_lun *plun,
610 dev_info_t *pdip);
611 static int fcp_is_pip_present(struct fcp_lun *plun, mdi_pathinfo_t *pip);
612 static void fcp_handle_page83(fc_packet_t *, struct fcp_ipkt *, int);
613 static void fcp_update_mpxio_path_verifybusy(struct fcp_port *pptr);
614 static int fcp_copy_guid_2_lun_block(struct fcp_lun *plun, char *guidp);
615 static int fcp_update_mpxio_path(struct fcp_lun *plun, child_info_t *cip,
616 int what);
617 static int fcp_is_reconfig_needed(struct fcp_tgt *ptgt,
618 fc_packet_t *fpkt);
619 static int fcp_symmetric_device_probe(struct fcp_lun *plun);
620
621 /*
622 * New functions added for lun masking support
623 */
624 static void fcp_read_blacklist(dev_info_t *dip,
625 struct fcp_black_list_entry **pplun_blacklist);
626 static void fcp_mask_pwwn_lun(char *curr_pwwn, char *curr_lun,
627 struct fcp_black_list_entry **pplun_blacklist);
628 static void fcp_add_one_mask(char *curr_pwwn, uint32_t lun_id,
629 struct fcp_black_list_entry **pplun_blacklist);
630 static int fcp_should_mask(la_wwn_t *wwn, uint32_t lun_id);
631 static void fcp_cleanup_blacklist(struct fcp_black_list_entry **lun_blacklist);
632
633 /*
634 * New functions to support software FCA (like fcoei)
635 */
636 static struct scsi_pkt *fcp_pseudo_init_pkt(
637 struct scsi_address *ap, struct scsi_pkt *pkt,
638 struct buf *bp, int cmdlen, int statuslen,
639 int tgtlen, int flags, int (*callback)(), caddr_t arg);
640 static void fcp_pseudo_destroy_pkt(
641 struct scsi_address *ap, struct scsi_pkt *pkt);
642 static void fcp_pseudo_sync_pkt(
643 struct scsi_address *ap, struct scsi_pkt *pkt);
644 static int fcp_pseudo_start(struct scsi_address *ap, struct scsi_pkt *pkt);
645 static void fcp_pseudo_dmafree(
646 struct scsi_address *ap, struct scsi_pkt *pkt);
647
648 extern struct mod_ops mod_driverops;
649 /*
650 * This variable is defined in modctl.c and set to '1' after the root driver
651 * and fs are loaded. It serves as an indication that the root filesystem can
652 * be used.
653 */
654 extern int modrootloaded;
655 /*
656 * This table contains strings associated with the SCSI sense key codes. It
657 * is used by FCP to print a clear explanation of the code returned in the
658 * sense information by a device.
659 */
660 extern char *sense_keys[];
661 /*
662 * This device is created by the SCSI pseudo nexus driver (SCSI vHCI). It is
663 * under this device that the paths to a physical device are created when
664 * MPxIO is used.
665 */
666 extern dev_info_t *scsi_vhci_dip;
667
668 /*
669 * Report lun processing
670 */
671 #define FCP_LUN_ADDRESSING 0x80
672 #define FCP_PD_ADDRESSING 0x00
673 #define FCP_VOLUME_ADDRESSING 0x40
674
675 #define FCP_SVE_THROTTLE 0x28 /* Vicom */
676 #define MAX_INT_DMA 0x7fffffff
677 /*
678 * Property definitions
679 */
680 #define NODE_WWN_PROP (char *)fcp_node_wwn_prop
681 #define PORT_WWN_PROP (char *)fcp_port_wwn_prop
682 #define TARGET_PROP (char *)fcp_target_prop
683 #define LUN_PROP (char *)fcp_lun_prop
684 #define SAM_LUN_PROP (char *)fcp_sam_lun_prop
685 #define CONF_WWN_PROP (char *)fcp_conf_wwn_prop
686 #define OBP_BOOT_WWN (char *)fcp_obp_boot_wwn
687 #define MANUAL_CFG_ONLY (char *)fcp_manual_config_only
688 #define INIT_PORT_PROP (char *)fcp_init_port_prop
689 #define TGT_PORT_PROP (char *)fcp_tgt_port_prop
690 #define LUN_BLACKLIST_PROP (char *)fcp_lun_blacklist_prop
691 /*
692 * Short hand macros.
693 */
694 #define LUN_PORT (plun->lun_tgt->tgt_port)
695 #define LUN_TGT (plun->lun_tgt)
696
697 /*
698 * Driver private macros
699 */
700 #define FCP_ATOB(x) (((x) >= '0' && (x) <= '9') ? ((x) - '0') : \
701 ((x) >= 'a' && (x) <= 'f') ? \
702 ((x) - 'a' + 10) : ((x) - 'A' + 10))
703
704 #define FCP_MAX(a, b) ((a) > (b) ? (a) : (b))
705
706 #define FCP_N_NDI_EVENTS \
707 (sizeof (fcp_ndi_event_defs) / sizeof (ndi_event_definition_t))
708
709 #define FCP_LINK_STATE_CHANGED(p, c) \
710 ((p)->port_link_cnt != (c)->ipkt_link_cnt)
711
712 #define FCP_TGT_STATE_CHANGED(t, c) \
713 ((t)->tgt_change_cnt != (c)->ipkt_change_cnt)
714
715 #define FCP_STATE_CHANGED(p, t, c) \
716 (FCP_TGT_STATE_CHANGED(t, c))
717
718 #define FCP_MUST_RETRY(fpkt) \
719 ((fpkt)->pkt_state == FC_PKT_LOCAL_BSY || \
720 (fpkt)->pkt_state == FC_PKT_LOCAL_RJT || \
721 (fpkt)->pkt_state == FC_PKT_TRAN_BSY || \
722 (fpkt)->pkt_state == FC_PKT_ELS_IN_PROGRESS || \
723 (fpkt)->pkt_state == FC_PKT_NPORT_BSY || \
724 (fpkt)->pkt_state == FC_PKT_FABRIC_BSY || \
725 (fpkt)->pkt_state == FC_PKT_PORT_OFFLINE || \
726 (fpkt)->pkt_reason == FC_REASON_OFFLINE)
727
728 #define FCP_SENSE_REPORTLUN_CHANGED(es) \
729 ((es)->es_key == KEY_UNIT_ATTENTION && \
730 (es)->es_add_code == 0x3f && \
731 (es)->es_qual_code == 0x0e)
732
733 #define FCP_SENSE_NO_LUN(es) \
734 ((es)->es_key == KEY_ILLEGAL_REQUEST && \
735 (es)->es_add_code == 0x25 && \
736 (es)->es_qual_code == 0x0)
737
738 #define FCP_VERSION "20091208-1.192"
739 #define FCP_NAME_VERSION "SunFC FCP v" FCP_VERSION
740
741 #define FCP_NUM_ELEMENTS(array) \
742 (sizeof (array) / sizeof ((array)[0]))
743
744 /*
745 * Debugging, Error reporting, and tracing
746 */
747 #define FCP_LOG_SIZE 1024 * 1024
748
749 #define FCP_LEVEL_1 0x00001 /* attach/detach PM CPR */
750 #define FCP_LEVEL_2 0x00002 /* failures/Invalid data */
751 #define FCP_LEVEL_3 0x00004 /* state change, discovery */
752 #define FCP_LEVEL_4 0x00008 /* ULP messages */
753 #define FCP_LEVEL_5 0x00010 /* ELS/SCSI cmds */
754 #define FCP_LEVEL_6 0x00020 /* Transport failures */
755 #define FCP_LEVEL_7 0x00040
756 #define FCP_LEVEL_8 0x00080 /* I/O tracing */
757 #define FCP_LEVEL_9 0x00100 /* I/O tracing */
758
759
760
761 /*
762 * Log contents to system messages file
763 */
764 #define FCP_MSG_LEVEL_1 (FCP_LEVEL_1 | FC_TRACE_LOG_MSG)
765 #define FCP_MSG_LEVEL_2 (FCP_LEVEL_2 | FC_TRACE_LOG_MSG)
766 #define FCP_MSG_LEVEL_3 (FCP_LEVEL_3 | FC_TRACE_LOG_MSG)
767 #define FCP_MSG_LEVEL_4 (FCP_LEVEL_4 | FC_TRACE_LOG_MSG)
768 #define FCP_MSG_LEVEL_5 (FCP_LEVEL_5 | FC_TRACE_LOG_MSG)
769 #define FCP_MSG_LEVEL_6 (FCP_LEVEL_6 | FC_TRACE_LOG_MSG)
770 #define FCP_MSG_LEVEL_7 (FCP_LEVEL_7 | FC_TRACE_LOG_MSG)
771 #define FCP_MSG_LEVEL_8 (FCP_LEVEL_8 | FC_TRACE_LOG_MSG)
772 #define FCP_MSG_LEVEL_9 (FCP_LEVEL_9 | FC_TRACE_LOG_MSG)
773
774
775 /*
776 * Log contents to trace buffer
777 */
778 #define FCP_BUF_LEVEL_1 (FCP_LEVEL_1 | FC_TRACE_LOG_BUF)
779 #define FCP_BUF_LEVEL_2 (FCP_LEVEL_2 | FC_TRACE_LOG_BUF)
780 #define FCP_BUF_LEVEL_3 (FCP_LEVEL_3 | FC_TRACE_LOG_BUF)
781 #define FCP_BUF_LEVEL_4 (FCP_LEVEL_4 | FC_TRACE_LOG_BUF)
782 #define FCP_BUF_LEVEL_5 (FCP_LEVEL_5 | FC_TRACE_LOG_BUF)
783 #define FCP_BUF_LEVEL_6 (FCP_LEVEL_6 | FC_TRACE_LOG_BUF)
784 #define FCP_BUF_LEVEL_7 (FCP_LEVEL_7 | FC_TRACE_LOG_BUF)
785 #define FCP_BUF_LEVEL_8 (FCP_LEVEL_8 | FC_TRACE_LOG_BUF)
786 #define FCP_BUF_LEVEL_9 (FCP_LEVEL_9 | FC_TRACE_LOG_BUF)
787
788
789 /*
790 * Log contents to both system messages file and trace buffer
791 */
792 #define FCP_MSG_BUF_LEVEL_1 (FCP_LEVEL_1 | FC_TRACE_LOG_BUF | \
793 FC_TRACE_LOG_MSG)
794 #define FCP_MSG_BUF_LEVEL_2 (FCP_LEVEL_2 | FC_TRACE_LOG_BUF | \
795 FC_TRACE_LOG_MSG)
796 #define FCP_MSG_BUF_LEVEL_3 (FCP_LEVEL_3 | FC_TRACE_LOG_BUF | \
797 FC_TRACE_LOG_MSG)
798 #define FCP_MSG_BUF_LEVEL_4 (FCP_LEVEL_4 | FC_TRACE_LOG_BUF | \
799 FC_TRACE_LOG_MSG)
800 #define FCP_MSG_BUF_LEVEL_5 (FCP_LEVEL_5 | FC_TRACE_LOG_BUF | \
801 FC_TRACE_LOG_MSG)
802 #define FCP_MSG_BUF_LEVEL_6 (FCP_LEVEL_6 | FC_TRACE_LOG_BUF | \
803 FC_TRACE_LOG_MSG)
804 #define FCP_MSG_BUF_LEVEL_7 (FCP_LEVEL_7 | FC_TRACE_LOG_BUF | \
805 FC_TRACE_LOG_MSG)
806 #define FCP_MSG_BUF_LEVEL_8 (FCP_LEVEL_8 | FC_TRACE_LOG_BUF | \
807 FC_TRACE_LOG_MSG)
808 #define FCP_MSG_BUF_LEVEL_9 (FCP_LEVEL_9 | FC_TRACE_LOG_BUF | \
809 FC_TRACE_LOG_MSG)
810 #ifdef DEBUG
811 #define FCP_DTRACE fc_trace_debug
812 #else
813 #define FCP_DTRACE
814 #endif
815
816 #define FCP_TRACE fc_trace_debug
817
818 static struct cb_ops fcp_cb_ops = {
819 fcp_open, /* open */
820 fcp_close, /* close */
821 nodev, /* strategy */
822 nodev, /* print */
823 nodev, /* dump */
824 nodev, /* read */
825 nodev, /* write */
826 fcp_ioctl, /* ioctl */
827 nodev, /* devmap */
828 nodev, /* mmap */
829 nodev, /* segmap */
830 nochpoll, /* chpoll */
831 ddi_prop_op, /* cb_prop_op */
832 0, /* streamtab */
833 D_NEW | D_MP | D_HOTPLUG, /* cb_flag */
834 CB_REV, /* rev */
835 nodev, /* aread */
836 nodev /* awrite */
837 };
838
839
840 static struct dev_ops fcp_ops = {
841 DEVO_REV,
842 0,
843 ddi_getinfo_1to1,
844 nulldev, /* identify */
845 nulldev, /* probe */
846 fcp_attach, /* attach and detach are mandatory */
847 fcp_detach,
848 nodev, /* reset */
849 &fcp_cb_ops, /* cb_ops */
850 NULL, /* bus_ops */
851 NULL, /* power */
852 };
853
854
855 char *fcp_version = FCP_NAME_VERSION;
856
857 static struct modldrv modldrv = {
858 &mod_driverops,
859 FCP_NAME_VERSION,
860 &fcp_ops
861 };
862
863
864 static struct modlinkage modlinkage = {
865 MODREV_1,
866 &modldrv,
867 NULL
868 };
869
870
871 static fc_ulp_modinfo_t fcp_modinfo = {
872 &fcp_modinfo, /* ulp_handle */
873 FCTL_ULP_MODREV_4, /* ulp_rev */
874 FC4_SCSI_FCP, /* ulp_type */
875 "fcp", /* ulp_name */
876 FCP_STATEC_MASK, /* ulp_statec_mask */
877 fcp_port_attach, /* ulp_port_attach */
878 fcp_port_detach, /* ulp_port_detach */
879 fcp_port_ioctl, /* ulp_port_ioctl */
880 fcp_els_callback, /* ulp_els_callback */
881 fcp_data_callback, /* ulp_data_callback */
882 fcp_statec_callback /* ulp_statec_callback */
883 };
884
885 #ifdef DEBUG
886 #define FCP_TRACE_DEFAULT (FC_TRACE_LOG_MASK | FCP_LEVEL_1 | \
887 FCP_LEVEL_2 | FCP_LEVEL_3 | \
888 FCP_LEVEL_4 | FCP_LEVEL_5 | \
889 FCP_LEVEL_6 | FCP_LEVEL_7)
890 #else
891 #define FCP_TRACE_DEFAULT (FC_TRACE_LOG_MASK | FCP_LEVEL_1 | \
892 FCP_LEVEL_2 | FCP_LEVEL_3 | \
893 FCP_LEVEL_4 | FCP_LEVEL_5 | \
894 FCP_LEVEL_6 | FCP_LEVEL_7)
895 #endif
896
897 /* FCP global variables */
898 int fcp_bus_config_debug = 0;
899 static int fcp_log_size = FCP_LOG_SIZE;
900 static int fcp_trace = FCP_TRACE_DEFAULT;
901 static fc_trace_logq_t *fcp_logq = NULL;
902 static struct fcp_black_list_entry *fcp_lun_blacklist = NULL;
903 /*
904 * The auto-configuration is set by default. The only way of disabling it is
905 * through the property MANUAL_CFG_ONLY in the fcp.conf file.
906 */
907 static int fcp_enable_auto_configuration = 1;
908 static int fcp_max_bus_config_retries = 4;
909 static int fcp_lun_ready_retry = 300;
910 /*
911 * The value assigned to the following variable has changed several times due
912 * to a problem with the data underruns reporting of some firmware(s). The
913 * current value of 50 gives a timeout value of 25 seconds for a max number
914 * of 256 LUNs.
915 */
916 static int fcp_max_target_retries = 50;
917 /*
918 * Watchdog variables
919 * ------------------
920 *
921 * fcp_watchdog_init
922 *
923 * Indicates if the watchdog timer is running or not. This is actually
924 * a counter of the number of Fibre Channel ports that attached. When
925 * the first port attaches the watchdog is started. When the last port
926 * detaches the watchdog timer is stopped.
927 *
928 * fcp_watchdog_time
929 *
930 * This is the watchdog clock counter. It is incremented by
931 * fcp_watchdog_time each time the watchdog timer expires.
932 *
933 * fcp_watchdog_timeout
934 *
935 * Increment value of the variable fcp_watchdog_time as well as the
936 * the timeout value of the watchdog timer. The unit is 1 second. It
937 * is strange that this is not a #define but a variable since the code
938 * never changes this value. The reason why it can be said that the
939 * unit is 1 second is because the number of ticks for the watchdog
940 * timer is determined like this:
941 *
942 * fcp_watchdog_tick = fcp_watchdog_timeout *
943 * drv_usectohz(1000000);
944 *
945 * The value 1000000 is hard coded in the code.
946 *
947 * fcp_watchdog_tick
948 *
949 * Watchdog timer value in ticks.
950 */
951 static int fcp_watchdog_init = 0;
952 static int fcp_watchdog_time = 0;
953 static int fcp_watchdog_timeout = 1;
954 static int fcp_watchdog_tick;
955
956 /*
957 * fcp_offline_delay is a global variable to enable customisation of
958 * the timeout on link offlines or RSCNs. The default value is set
959 * to match FCP_OFFLINE_DELAY (20sec), which is 2*RA_TOV_els as
960 * specified in FCP4 Chapter 11 (see www.t10.org).
961 *
962 * The variable fcp_offline_delay is specified in SECONDS.
963 *
964 * If we made this a static var then the user would not be able to
965 * change it. This variable is set in fcp_attach().
966 */
967 unsigned int fcp_offline_delay = FCP_OFFLINE_DELAY;
968
969 static void *fcp_softstate = NULL; /* for soft state */
970 static uchar_t fcp_oflag = FCP_IDLE; /* open flag */
971 static kmutex_t fcp_global_mutex;
972 static kmutex_t fcp_ioctl_mutex;
973 static dev_info_t *fcp_global_dip = NULL;
974 static timeout_id_t fcp_watchdog_id;
975 const char *fcp_lun_prop = "lun";
976 const char *fcp_sam_lun_prop = "sam-lun";
977 const char *fcp_target_prop = "target";
978 /*
979 * NOTE: consumers of "node-wwn" property include stmsboot in ON
980 * consolidation.
981 */
982 const char *fcp_node_wwn_prop = "node-wwn";
983 const char *fcp_port_wwn_prop = "port-wwn";
984 const char *fcp_conf_wwn_prop = "fc-port-wwn";
985 const char *fcp_obp_boot_wwn = "fc-boot-dev-portwwn";
986 const char *fcp_manual_config_only = "manual_configuration_only";
987 const char *fcp_init_port_prop = "initiator-port";
988 const char *fcp_tgt_port_prop = "target-port";
989 const char *fcp_lun_blacklist_prop = "pwwn-lun-blacklist";
990
991 static struct fcp_port *fcp_port_head = NULL;
992 static ddi_eventcookie_t fcp_insert_eid;
993 static ddi_eventcookie_t fcp_remove_eid;
994
995 static ndi_event_definition_t fcp_ndi_event_defs[] = {
996 { FCP_EVENT_TAG_INSERT, FCAL_INSERT_EVENT, EPL_KERNEL },
997 { FCP_EVENT_TAG_REMOVE, FCAL_REMOVE_EVENT, EPL_INTERRUPT }
998 };
999
1000 /*
1001 * List of valid commands for the scsi_ioctl call
1002 */
1003 static uint8_t scsi_ioctl_list[] = {
1004 SCMD_INQUIRY,
1005 SCMD_REPORT_LUN,
1006 SCMD_READ_CAPACITY
1007 };
1008
1009 /*
1010 * this is used to dummy up a report lun response for cases
1011 * where the target doesn't support it
1012 */
1013 static uchar_t fcp_dummy_lun[] = {
1014 0x00, /* MSB length (length = no of luns * 8) */
1015 0x00,
1016 0x00,
1017 0x08, /* LSB length */
1018 0x00, /* MSB reserved */
1019 0x00,
1020 0x00,
1021 0x00, /* LSB reserved */
1022 FCP_PD_ADDRESSING,
1023 0x00, /* LUN is ZERO at the first level */
1024 0x00,
1025 0x00, /* second level is zero */
1026 0x00,
1027 0x00, /* third level is zero */
1028 0x00,
1029 0x00 /* fourth level is zero */
1030 };
1031
1032 static uchar_t fcp_alpa_to_switch[] = {
1033 0x00, 0x7d, 0x7c, 0x00, 0x7b, 0x00, 0x00, 0x00, 0x7a, 0x00,
1034 0x00, 0x00, 0x00, 0x00, 0x00, 0x79, 0x78, 0x00, 0x00, 0x00,
1035 0x00, 0x00, 0x00, 0x77, 0x76, 0x00, 0x00, 0x75, 0x00, 0x74,
1036 0x73, 0x72, 0x00, 0x00, 0x00, 0x71, 0x00, 0x70, 0x6f, 0x6e,
1037 0x00, 0x6d, 0x6c, 0x6b, 0x6a, 0x69, 0x68, 0x00, 0x00, 0x67,
1038 0x66, 0x65, 0x64, 0x63, 0x62, 0x00, 0x00, 0x61, 0x60, 0x00,
1039 0x5f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x5e, 0x00, 0x5d,
1040 0x5c, 0x5b, 0x00, 0x5a, 0x59, 0x58, 0x57, 0x56, 0x55, 0x00,
1041 0x00, 0x54, 0x53, 0x52, 0x51, 0x50, 0x4f, 0x00, 0x00, 0x4e,
1042 0x4d, 0x00, 0x4c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x4b,
1043 0x00, 0x4a, 0x49, 0x48, 0x00, 0x47, 0x46, 0x45, 0x44, 0x43,
1044 0x42, 0x00, 0x00, 0x41, 0x40, 0x3f, 0x3e, 0x3d, 0x3c, 0x00,
1045 0x00, 0x3b, 0x3a, 0x00, 0x39, 0x00, 0x00, 0x00, 0x38, 0x37,
1046 0x36, 0x00, 0x35, 0x00, 0x00, 0x00, 0x34, 0x00, 0x00, 0x00,
1047 0x00, 0x00, 0x00, 0x33, 0x32, 0x00, 0x00, 0x00, 0x00, 0x00,
1048 0x00, 0x31, 0x30, 0x00, 0x00, 0x2f, 0x00, 0x2e, 0x2d, 0x2c,
1049 0x00, 0x00, 0x00, 0x2b, 0x00, 0x2a, 0x29, 0x28, 0x00, 0x27,
1050 0x26, 0x25, 0x24, 0x23, 0x22, 0x00, 0x00, 0x21, 0x20, 0x1f,
1051 0x1e, 0x1d, 0x1c, 0x00, 0x00, 0x1b, 0x1a, 0x00, 0x19, 0x00,
1052 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x00, 0x17, 0x16, 0x15,
1053 0x00, 0x14, 0x13, 0x12, 0x11, 0x10, 0x0f, 0x00, 0x00, 0x0e,
1054 0x0d, 0x0c, 0x0b, 0x0a, 0x09, 0x00, 0x00, 0x08, 0x07, 0x00,
1055 0x06, 0x00, 0x00, 0x00, 0x05, 0x04, 0x03, 0x00, 0x02, 0x00,
1056 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1057 };
1058
1059 static caddr_t pid = "SESS01 ";
1060
1061 #if !defined(lint)
1062
1063 _NOTE(MUTEX_PROTECTS_DATA(fcp_global_mutex,
1064 fcp_port::fcp_next fcp_watchdog_id))
1065
1066 _NOTE(DATA_READABLE_WITHOUT_LOCK(fcp_watchdog_time))
1067
1068 _NOTE(SCHEME_PROTECTS_DATA("Unshared",
1069 fcp_insert_eid
1070 fcp_remove_eid
1071 fcp_watchdog_time))
1072
1073 _NOTE(SCHEME_PROTECTS_DATA("Unshared",
1074 fcp_cb_ops
1075 fcp_ops
1076 callb_cpr))
1077
1078 #endif /* lint */
1079
1080 /*
1081 * This table is used to determine whether or not it's safe to copy in
1082 * the target node name for a lun. Since all luns behind the same target
1083 * have the same wwnn, only tagets that do not support multiple luns are
1084 * eligible to be enumerated under mpxio if they aren't page83 compliant.
1085 */
1086
1087 char *fcp_symmetric_disk_table[] = {
1088 "SEAGATE ST",
1089 "IBM DDYFT",
1090 "SUNW SUNWGS", /* Daktari enclosure */
1091 "SUN SENA", /* SES device */
1092 "SUN SESS01" /* VICOM SVE box */
1093 };
1094
1095 int fcp_symmetric_disk_table_size =
1096 sizeof (fcp_symmetric_disk_table)/sizeof (char *);
1097
1098 /*
1099 * This structure is bogus. scsi_hba_attach_setup() requires, as in the kernel
1100 * will panic if you don't pass this in to the routine, this information.
1101 * Need to determine what the actual impact to the system is by providing
1102 * this information if any. Since dma allocation is done in pkt_init it may
1103 * not have any impact. These values are straight from the Writing Device
1104 * Driver manual.
1105 */
1106 static ddi_dma_attr_t pseudo_fca_dma_attr = {
1107 DMA_ATTR_V0, /* ddi_dma_attr version */
1108 0, /* low address */
1109 0xffffffff, /* high address */
1110 0x00ffffff, /* counter upper bound */
1111 1, /* alignment requirements */
1112 0x3f, /* burst sizes */
1113 1, /* minimum DMA access */
1114 0xffffffff, /* maximum DMA access */
1115 (1 << 24) - 1, /* segment boundary restrictions */
1116 1, /* scater/gather list length */
1117 512, /* device granularity */
1118 0 /* DMA flags */
1119 };
1120
1121 /*
1122 * The _init(9e) return value should be that of mod_install(9f). Under
1123 * some circumstances, a failure may not be related mod_install(9f) and
1124 * one would then require a return value to indicate the failure. Looking
1125 * at mod_install(9f), it is expected to return 0 for success and non-zero
1126 * for failure. mod_install(9f) for device drivers, further goes down the
1127 * calling chain and ends up in ddi_installdrv(), whose return values are
1128 * DDI_SUCCESS and DDI_FAILURE - There are also other functions in the
1129 * calling chain of mod_install(9f) which return values like EINVAL and
1130 * in some even return -1.
1131 *
1132 * To work around the vagaries of the mod_install() calling chain, return
1133 * either 0 or ENODEV depending on the success or failure of mod_install()
1134 */
1135 int
1136 _init(void)
1137 {
1138 int rval;
1139
1140 /*
1141 * Allocate soft state and prepare to do ddi_soft_state_zalloc()
1142 * before registering with the transport first.
1143 */
1144 if (ddi_soft_state_init(&fcp_softstate,
1145 sizeof (struct fcp_port), FCP_INIT_ITEMS) != 0) {
1146 return (EINVAL);
1147 }
1148
1149 mutex_init(&fcp_global_mutex, NULL, MUTEX_DRIVER, NULL);
1150 mutex_init(&fcp_ioctl_mutex, NULL, MUTEX_DRIVER, NULL);
1151
1152 if ((rval = fc_ulp_add(&fcp_modinfo)) != FC_SUCCESS) {
1153 cmn_err(CE_WARN, "fcp: fc_ulp_add failed");
1154 mutex_destroy(&fcp_global_mutex);
1155 mutex_destroy(&fcp_ioctl_mutex);
1156 ddi_soft_state_fini(&fcp_softstate);
1157 return (ENODEV);
1158 }
1159
1160 fcp_logq = fc_trace_alloc_logq(fcp_log_size);
1161
1162 if ((rval = mod_install(&modlinkage)) != 0) {
1163 fc_trace_free_logq(fcp_logq);
1164 (void) fc_ulp_remove(&fcp_modinfo);
1165 mutex_destroy(&fcp_global_mutex);
1166 mutex_destroy(&fcp_ioctl_mutex);
1167 ddi_soft_state_fini(&fcp_softstate);
1168 rval = ENODEV;
1169 }
1170
1171 return (rval);
1172 }
1173
1174
1175 /*
1176 * the system is done with us as a driver, so clean up
1177 */
1178 int
1179 _fini(void)
1180 {
1181 int rval;
1182
1183 /*
1184 * don't start cleaning up until we know that the module remove
1185 * has worked -- if this works, then we know that each instance
1186 * has successfully been DDI_DETACHed
1187 */
1188 if ((rval = mod_remove(&modlinkage)) != 0) {
1189 return (rval);
1190 }
1191
1192 (void) fc_ulp_remove(&fcp_modinfo);
1193
1194 ddi_soft_state_fini(&fcp_softstate);
1195 mutex_destroy(&fcp_global_mutex);
1196 mutex_destroy(&fcp_ioctl_mutex);
1197 fc_trace_free_logq(fcp_logq);
1198
1199 return (rval);
1200 }
1201
1202
1203 int
1204 _info(struct modinfo *modinfop)
1205 {
1206 return (mod_info(&modlinkage, modinfop));
1207 }
1208
1209
1210 /*
1211 * attach the module
1212 */
1213 static int
1214 fcp_attach(dev_info_t *devi, ddi_attach_cmd_t cmd)
1215 {
1216 int rval = DDI_SUCCESS;
1217
1218 FCP_DTRACE(fcp_logq, "fcp", fcp_trace,
1219 FCP_BUF_LEVEL_8, 0, "fcp module attach: cmd=0x%x", cmd);
1220
1221 if (cmd == DDI_ATTACH) {
1222 /* The FCP pseudo device is created here. */
1223 mutex_enter(&fcp_global_mutex);
1224 fcp_global_dip = devi;
1225 mutex_exit(&fcp_global_mutex);
1226
1227 if (ddi_create_minor_node(fcp_global_dip, "fcp", S_IFCHR,
1228 0, DDI_PSEUDO, 0) == DDI_SUCCESS) {
1229 ddi_report_dev(fcp_global_dip);
1230 } else {
1231 cmn_err(CE_WARN, "FCP: Cannot create minor node");
1232 mutex_enter(&fcp_global_mutex);
1233 fcp_global_dip = NULL;
1234 mutex_exit(&fcp_global_mutex);
1235
1236 rval = DDI_FAILURE;
1237 }
1238 /*
1239 * We check the fcp_offline_delay property at this
1240 * point. This variable is global for the driver,
1241 * not specific to an instance.
1242 *
1243 * We do not recommend setting the value to less
1244 * than 10 seconds (RA_TOV_els), or greater than
1245 * 60 seconds.
1246 */
1247 fcp_offline_delay = ddi_prop_get_int(DDI_DEV_T_ANY,
1248 devi, DDI_PROP_DONTPASS | DDI_PROP_NOTPROM,
1249 "fcp_offline_delay", FCP_OFFLINE_DELAY);
1250 if ((fcp_offline_delay < 10) ||
1251 (fcp_offline_delay > 60)) {
1252 cmn_err(CE_WARN, "Setting fcp_offline_delay "
1253 "to %d second(s). This is outside the "
1254 "recommended range of 10..60 seconds.",
1255 fcp_offline_delay);
1256 }
1257 }
1258
1259 return (rval);
1260 }
1261
1262
1263 /*ARGSUSED*/
1264 static int
1265 fcp_detach(dev_info_t *devi, ddi_detach_cmd_t cmd)
1266 {
1267 int res = DDI_SUCCESS;
1268
1269 FCP_DTRACE(fcp_logq, "fcp", fcp_trace,
1270 FCP_BUF_LEVEL_8, 0, "module detach: cmd=0x%x", cmd);
1271
1272 if (cmd == DDI_DETACH) {
1273 /*
1274 * Check if there are active ports/threads. If there
1275 * are any, we will fail, else we will succeed (there
1276 * should not be much to clean up)
1277 */
1278 mutex_enter(&fcp_global_mutex);
1279 FCP_DTRACE(fcp_logq, "fcp",
1280 fcp_trace, FCP_BUF_LEVEL_8, 0, "port_head=%p",
1281 (void *) fcp_port_head);
1282
1283 if (fcp_port_head == NULL) {
1284 ddi_remove_minor_node(fcp_global_dip, NULL);
1285 fcp_global_dip = NULL;
1286 mutex_exit(&fcp_global_mutex);
1287 } else {
1288 mutex_exit(&fcp_global_mutex);
1289 res = DDI_FAILURE;
1290 }
1291 }
1292 FCP_DTRACE(fcp_logq, "fcp", fcp_trace,
1293 FCP_BUF_LEVEL_8, 0, "module detach returning %d", res);
1294
1295 return (res);
1296 }
1297
1298
1299 /* ARGSUSED */
1300 static int
1301 fcp_open(dev_t *devp, int flag, int otype, cred_t *credp)
1302 {
1303 if (otype != OTYP_CHR) {
1304 return (EINVAL);
1305 }
1306
1307 /*
1308 * Allow only root to talk;
1309 */
1310 if (drv_priv(credp)) {
1311 return (EPERM);
1312 }
1313
1314 mutex_enter(&fcp_global_mutex);
1315 if (fcp_oflag & FCP_EXCL) {
1316 mutex_exit(&fcp_global_mutex);
1317 return (EBUSY);
1318 }
1319
1320 if (flag & FEXCL) {
1321 if (fcp_oflag & FCP_OPEN) {
1322 mutex_exit(&fcp_global_mutex);
1323 return (EBUSY);
1324 }
1325 fcp_oflag |= FCP_EXCL;
1326 }
1327 fcp_oflag |= FCP_OPEN;
1328 mutex_exit(&fcp_global_mutex);
1329
1330 return (0);
1331 }
1332
1333
1334 /* ARGSUSED */
1335 static int
1336 fcp_close(dev_t dev, int flag, int otype, cred_t *credp)
1337 {
1338 if (otype != OTYP_CHR) {
1339 return (EINVAL);
1340 }
1341
1342 mutex_enter(&fcp_global_mutex);
1343 if (!(fcp_oflag & FCP_OPEN)) {
1344 mutex_exit(&fcp_global_mutex);
1345 return (ENODEV);
1346 }
1347 fcp_oflag = FCP_IDLE;
1348 mutex_exit(&fcp_global_mutex);
1349
1350 return (0);
1351 }
1352
1353
1354 /*
1355 * fcp_ioctl
1356 * Entry point for the FCP ioctls
1357 *
1358 * Input:
1359 * See ioctl(9E)
1360 *
1361 * Output:
1362 * See ioctl(9E)
1363 *
1364 * Returns:
1365 * See ioctl(9E)
1366 *
1367 * Context:
1368 * Kernel context.
1369 */
1370 /* ARGSUSED */
1371 static int
1372 fcp_ioctl(dev_t dev, int cmd, intptr_t data, int mode, cred_t *credp,
1373 int *rval)
1374 {
1375 int ret = 0;
1376
1377 mutex_enter(&fcp_global_mutex);
1378 if (!(fcp_oflag & FCP_OPEN)) {
1379 mutex_exit(&fcp_global_mutex);
1380 return (ENXIO);
1381 }
1382 mutex_exit(&fcp_global_mutex);
1383
1384 switch (cmd) {
1385 case FCP_TGT_INQUIRY:
1386 case FCP_TGT_CREATE:
1387 case FCP_TGT_DELETE:
1388 ret = fcp_setup_device_data_ioctl(cmd,
1389 (struct fcp_ioctl *)data, mode, rval);
1390 break;
1391
1392 case FCP_TGT_SEND_SCSI:
1393 mutex_enter(&fcp_ioctl_mutex);
1394 ret = fcp_setup_scsi_ioctl(
1395 (struct fcp_scsi_cmd *)data, mode, rval);
1396 mutex_exit(&fcp_ioctl_mutex);
1397 break;
1398
1399 case FCP_STATE_COUNT:
1400 ret = fcp_get_statec_count((struct fcp_ioctl *)data,
1401 mode, rval);
1402 break;
1403 case FCP_GET_TARGET_MAPPINGS:
1404 ret = fcp_get_target_mappings((struct fcp_ioctl *)data,
1405 mode, rval);
1406 break;
1407 default:
1408 fcp_log(CE_WARN, NULL,
1409 "!Invalid ioctl opcode = 0x%x", cmd);
1410 ret = EINVAL;
1411 }
1412
1413 return (ret);
1414 }
1415
1416
1417 /*
1418 * fcp_setup_device_data_ioctl
1419 * Setup handler for the "device data" style of
1420 * ioctl for FCP. See "fcp_util.h" for data structure
1421 * definition.
1422 *
1423 * Input:
1424 * cmd = FCP ioctl command
1425 * data = ioctl data
1426 * mode = See ioctl(9E)
1427 *
1428 * Output:
1429 * data = ioctl data
1430 * rval = return value - see ioctl(9E)
1431 *
1432 * Returns:
1433 * See ioctl(9E)
1434 *
1435 * Context:
1436 * Kernel context.
1437 */
1438 /* ARGSUSED */
1439 static int
1440 fcp_setup_device_data_ioctl(int cmd, struct fcp_ioctl *data, int mode,
1441 int *rval)
1442 {
1443 struct fcp_port *pptr;
1444 struct device_data *dev_data;
1445 uint32_t link_cnt;
1446 la_wwn_t *wwn_ptr = NULL;
1447 struct fcp_tgt *ptgt = NULL;
1448 struct fcp_lun *plun = NULL;
1449 int i, error;
1450 struct fcp_ioctl fioctl;
1451
1452 #ifdef _MULTI_DATAMODEL
1453 switch (ddi_model_convert_from(mode & FMODELS)) {
1454 case DDI_MODEL_ILP32: {
1455 struct fcp32_ioctl f32_ioctl;
1456
1457 if (ddi_copyin((void *)data, (void *)&f32_ioctl,
1458 sizeof (struct fcp32_ioctl), mode)) {
1459 return (EFAULT);
1460 }
1461 fioctl.fp_minor = f32_ioctl.fp_minor;
1462 fioctl.listlen = f32_ioctl.listlen;
1463 fioctl.list = (caddr_t)(long)f32_ioctl.list;
1464 break;
1465 }
1466 case DDI_MODEL_NONE:
1467 if (ddi_copyin((void *)data, (void *)&fioctl,
1468 sizeof (struct fcp_ioctl), mode)) {
1469 return (EFAULT);
1470 }
1471 break;
1472 }
1473
1474 #else /* _MULTI_DATAMODEL */
1475 if (ddi_copyin((void *)data, (void *)&fioctl,
1476 sizeof (struct fcp_ioctl), mode)) {
1477 return (EFAULT);
1478 }
1479 #endif /* _MULTI_DATAMODEL */
1480
1481 /*
1482 * Right now we can assume that the minor number matches with
1483 * this instance of fp. If this changes we will need to
1484 * revisit this logic.
1485 */
1486 mutex_enter(&fcp_global_mutex);
1487 pptr = fcp_port_head;
1488 while (pptr) {
1489 if (pptr->port_instance == (uint32_t)fioctl.fp_minor) {
1490 break;
1491 } else {
1492 pptr = pptr->port_next;
1493 }
1494 }
1495 mutex_exit(&fcp_global_mutex);
1496 if (pptr == NULL) {
1497 return (ENXIO);
1498 }
1499 mutex_enter(&pptr->port_mutex);
1500
1501
1502 if ((dev_data = kmem_zalloc((sizeof (struct device_data)) *
1503 fioctl.listlen, KM_NOSLEEP)) == NULL) {
1504 mutex_exit(&pptr->port_mutex);
1505 return (ENOMEM);
1506 }
1507
1508 if (ddi_copyin(fioctl.list, dev_data,
1509 (sizeof (struct device_data)) * fioctl.listlen, mode)) {
1510 kmem_free(dev_data, sizeof (*dev_data) * fioctl.listlen);
1511 mutex_exit(&pptr->port_mutex);
1512 return (EFAULT);
1513 }
1514 link_cnt = pptr->port_link_cnt;
1515
1516 if (cmd == FCP_TGT_INQUIRY) {
1517 wwn_ptr = (la_wwn_t *)&(dev_data[0].dev_pwwn);
1518 if (bcmp(wwn_ptr->raw_wwn, pptr->port_pwwn.raw_wwn,
1519 sizeof (wwn_ptr->raw_wwn)) == 0) {
1520 /* This ioctl is requesting INQ info of local HBA */
1521 mutex_exit(&pptr->port_mutex);
1522 dev_data[0].dev0_type = DTYPE_UNKNOWN;
1523 dev_data[0].dev_status = 0;
1524 if (ddi_copyout(dev_data, fioctl.list,
1525 (sizeof (struct device_data)) * fioctl.listlen,
1526 mode)) {
1527 kmem_free(dev_data,
1528 sizeof (*dev_data) * fioctl.listlen);
1529 return (EFAULT);
1530 }
1531 kmem_free(dev_data,
1532 sizeof (*dev_data) * fioctl.listlen);
1533 #ifdef _MULTI_DATAMODEL
1534 switch (ddi_model_convert_from(mode & FMODELS)) {
1535 case DDI_MODEL_ILP32: {
1536 struct fcp32_ioctl f32_ioctl;
1537 f32_ioctl.fp_minor = fioctl.fp_minor;
1538 f32_ioctl.listlen = fioctl.listlen;
1539 f32_ioctl.list = (caddr32_t)(long)fioctl.list;
1540 if (ddi_copyout((void *)&f32_ioctl,
1541 (void *)data,
1542 sizeof (struct fcp32_ioctl), mode)) {
1543 return (EFAULT);
1544 }
1545 break;
1546 }
1547 case DDI_MODEL_NONE:
1548 if (ddi_copyout((void *)&fioctl, (void *)data,
1549 sizeof (struct fcp_ioctl), mode)) {
1550 return (EFAULT);
1551 }
1552 break;
1553 }
1554 #else /* _MULTI_DATAMODEL */
1555 if (ddi_copyout((void *)&fioctl, (void *)data,
1556 sizeof (struct fcp_ioctl), mode)) {
1557 return (EFAULT);
1558 }
1559 #endif /* _MULTI_DATAMODEL */
1560 return (0);
1561 }
1562 }
1563
1564 if (pptr->port_state & (FCP_STATE_INIT | FCP_STATE_OFFLINE)) {
1565 kmem_free(dev_data, sizeof (*dev_data) * fioctl.listlen);
1566 mutex_exit(&pptr->port_mutex);
1567 return (ENXIO);
1568 }
1569
1570 for (i = 0; (i < fioctl.listlen) && (link_cnt == pptr->port_link_cnt);
1571 i++) {
1572 wwn_ptr = (la_wwn_t *)&(dev_data[i].dev_pwwn);
1573
1574 dev_data[i].dev0_type = DTYPE_UNKNOWN;
1575
1576
1577 dev_data[i].dev_status = ENXIO;
1578
1579 if ((ptgt = fcp_lookup_target(pptr,
1580 (uchar_t *)wwn_ptr)) == NULL) {
1581 mutex_exit(&pptr->port_mutex);
1582 if (fc_ulp_get_remote_port(pptr->port_fp_handle,
1583 wwn_ptr, &error, 0) == NULL) {
1584 dev_data[i].dev_status = ENODEV;
1585 mutex_enter(&pptr->port_mutex);
1586 continue;
1587 } else {
1588
1589 dev_data[i].dev_status = EAGAIN;
1590
1591 mutex_enter(&pptr->port_mutex);
1592 continue;
1593 }
1594 } else {
1595 mutex_enter(&ptgt->tgt_mutex);
1596 if (ptgt->tgt_state & (FCP_TGT_MARK |
1597 FCP_TGT_BUSY)) {
1598 dev_data[i].dev_status = EAGAIN;
1599 mutex_exit(&ptgt->tgt_mutex);
1600 continue;
1601 }
1602
1603 if (ptgt->tgt_state & FCP_TGT_OFFLINE) {
1604 if (ptgt->tgt_icap && !ptgt->tgt_tcap) {
1605 dev_data[i].dev_status = ENOTSUP;
1606 } else {
1607 dev_data[i].dev_status = ENXIO;
1608 }
1609 mutex_exit(&ptgt->tgt_mutex);
1610 continue;
1611 }
1612
1613 switch (cmd) {
1614 case FCP_TGT_INQUIRY:
1615 /*
1616 * The reason we give device type of
1617 * lun 0 only even though in some
1618 * cases(like maxstrat) lun 0 device
1619 * type may be 0x3f(invalid) is that
1620 * for bridge boxes target will appear
1621 * as luns and the first lun could be
1622 * a device that utility may not care
1623 * about (like a tape device).
1624 */
1625 dev_data[i].dev_lun_cnt = ptgt->tgt_lun_cnt;
1626 dev_data[i].dev_status = 0;
1627 mutex_exit(&ptgt->tgt_mutex);
1628
1629 if ((plun = fcp_get_lun(ptgt, 0)) == NULL) {
1630 dev_data[i].dev0_type = DTYPE_UNKNOWN;
1631 } else {
1632 dev_data[i].dev0_type = plun->lun_type;
1633 }
1634 mutex_enter(&ptgt->tgt_mutex);
1635 break;
1636
1637 case FCP_TGT_CREATE:
1638 mutex_exit(&ptgt->tgt_mutex);
1639 mutex_exit(&pptr->port_mutex);
1640
1641 /*
1642 * serialize state change call backs.
1643 * only one call back will be handled
1644 * at a time.
1645 */
1646 mutex_enter(&fcp_global_mutex);
1647 if (fcp_oflag & FCP_BUSY) {
1648 mutex_exit(&fcp_global_mutex);
1649 if (dev_data) {
1650 kmem_free(dev_data,
1651 sizeof (*dev_data) *
1652 fioctl.listlen);
1653 }
1654 return (EBUSY);
1655 }
1656 fcp_oflag |= FCP_BUSY;
1657 mutex_exit(&fcp_global_mutex);
1658
1659 dev_data[i].dev_status =
1660 fcp_create_on_demand(pptr,
1661 wwn_ptr->raw_wwn);
1662
1663 if (dev_data[i].dev_status != 0) {
1664 char buf[25];
1665
1666 for (i = 0; i < FC_WWN_SIZE; i++) {
1667 (void) sprintf(&buf[i << 1],
1668 "%02x",
1669 wwn_ptr->raw_wwn[i]);
1670 }
1671
1672 fcp_log(CE_WARN, pptr->port_dip,
1673 "!Failed to create nodes for"
1674 " pwwn=%s; error=%x", buf,
1675 dev_data[i].dev_status);
1676 }
1677
1678 /* allow state change call backs again */
1679 mutex_enter(&fcp_global_mutex);
1680 fcp_oflag &= ~FCP_BUSY;
1681 mutex_exit(&fcp_global_mutex);
1682
1683 mutex_enter(&pptr->port_mutex);
1684 mutex_enter(&ptgt->tgt_mutex);
1685
1686 break;
1687
1688 case FCP_TGT_DELETE:
1689 break;
1690
1691 default:
1692 fcp_log(CE_WARN, pptr->port_dip,
1693 "!Invalid device data ioctl "
1694 "opcode = 0x%x", cmd);
1695 }
1696 mutex_exit(&ptgt->tgt_mutex);
1697 }
1698 }
1699 mutex_exit(&pptr->port_mutex);
1700
1701 if (ddi_copyout(dev_data, fioctl.list,
1702 (sizeof (struct device_data)) * fioctl.listlen, mode)) {
1703 kmem_free(dev_data, sizeof (*dev_data) * fioctl.listlen);
1704 return (EFAULT);
1705 }
1706 kmem_free(dev_data, sizeof (*dev_data) * fioctl.listlen);
1707
1708 #ifdef _MULTI_DATAMODEL
1709 switch (ddi_model_convert_from(mode & FMODELS)) {
1710 case DDI_MODEL_ILP32: {
1711 struct fcp32_ioctl f32_ioctl;
1712
1713 f32_ioctl.fp_minor = fioctl.fp_minor;
1714 f32_ioctl.listlen = fioctl.listlen;
1715 f32_ioctl.list = (caddr32_t)(long)fioctl.list;
1716 if (ddi_copyout((void *)&f32_ioctl, (void *)data,
1717 sizeof (struct fcp32_ioctl), mode)) {
1718 return (EFAULT);
1719 }
1720 break;
1721 }
1722 case DDI_MODEL_NONE:
1723 if (ddi_copyout((void *)&fioctl, (void *)data,
1724 sizeof (struct fcp_ioctl), mode)) {
1725 return (EFAULT);
1726 }
1727 break;
1728 }
1729 #else /* _MULTI_DATAMODEL */
1730
1731 if (ddi_copyout((void *)&fioctl, (void *)data,
1732 sizeof (struct fcp_ioctl), mode)) {
1733 return (EFAULT);
1734 }
1735 #endif /* _MULTI_DATAMODEL */
1736
1737 return (0);
1738 }
1739
1740 /*
1741 * Fetch the target mappings (path, etc.) for all LUNs
1742 * on this port.
1743 */
1744 /* ARGSUSED */
1745 static int
1746 fcp_get_target_mappings(struct fcp_ioctl *data,
1747 int mode, int *rval)
1748 {
1749 struct fcp_port *pptr;
1750 fc_hba_target_mappings_t *mappings;
1751 fc_hba_mapping_entry_t *map;
1752 struct fcp_tgt *ptgt = NULL;
1753 struct fcp_lun *plun = NULL;
1754 int i, mapIndex, mappingSize;
1755 int listlen;
1756 struct fcp_ioctl fioctl;
1757 char *path;
1758 fcp_ent_addr_t sam_lun_addr;
1759
1760 #ifdef _MULTI_DATAMODEL
1761 switch (ddi_model_convert_from(mode & FMODELS)) {
1762 case DDI_MODEL_ILP32: {
1763 struct fcp32_ioctl f32_ioctl;
1764
1765 if (ddi_copyin((void *)data, (void *)&f32_ioctl,
1766 sizeof (struct fcp32_ioctl), mode)) {
1767 return (EFAULT);
1768 }
1769 fioctl.fp_minor = f32_ioctl.fp_minor;
1770 fioctl.listlen = f32_ioctl.listlen;
1771 fioctl.list = (caddr_t)(long)f32_ioctl.list;
1772 break;
1773 }
1774 case DDI_MODEL_NONE:
1775 if (ddi_copyin((void *)data, (void *)&fioctl,
1776 sizeof (struct fcp_ioctl), mode)) {
1777 return (EFAULT);
1778 }
1779 break;
1780 }
1781
1782 #else /* _MULTI_DATAMODEL */
1783 if (ddi_copyin((void *)data, (void *)&fioctl,
1784 sizeof (struct fcp_ioctl), mode)) {
1785 return (EFAULT);
1786 }
1787 #endif /* _MULTI_DATAMODEL */
1788
1789 /*
1790 * Right now we can assume that the minor number matches with
1791 * this instance of fp. If this changes we will need to
1792 * revisit this logic.
1793 */
1794 mutex_enter(&fcp_global_mutex);
1795 pptr = fcp_port_head;
1796 while (pptr) {
1797 if (pptr->port_instance == (uint32_t)fioctl.fp_minor) {
1798 break;
1799 } else {
1800 pptr = pptr->port_next;
1801 }
1802 }
1803 mutex_exit(&fcp_global_mutex);
1804 if (pptr == NULL) {
1805 cmn_err(CE_NOTE, "target mappings: unknown instance number: %d",
1806 fioctl.fp_minor);
1807 return (ENXIO);
1808 }
1809
1810
1811 /* We use listlen to show the total buffer size */
1812 mappingSize = fioctl.listlen;
1813
1814 /* Now calculate how many mapping entries will fit */
1815 listlen = fioctl.listlen + sizeof (fc_hba_mapping_entry_t)
1816 - sizeof (fc_hba_target_mappings_t);
1817 if (listlen <= 0) {
1818 cmn_err(CE_NOTE, "target mappings: Insufficient buffer");
1819 return (ENXIO);
1820 }
1821 listlen = listlen / sizeof (fc_hba_mapping_entry_t);
1822
1823 if ((mappings = kmem_zalloc(mappingSize, KM_SLEEP)) == NULL) {
1824 return (ENOMEM);
1825 }
1826 mappings->version = FC_HBA_TARGET_MAPPINGS_VERSION;
1827
1828 /* Now get to work */
1829 mapIndex = 0;
1830
1831 mutex_enter(&pptr->port_mutex);
1832 /* Loop through all targets on this port */
1833 for (i = 0; i < FCP_NUM_HASH; i++) {
1834 for (ptgt = pptr->port_tgt_hash_table[i]; ptgt != NULL;
1835 ptgt = ptgt->tgt_next) {
1836
1837 mutex_enter(&ptgt->tgt_mutex);
1838
1839 /* Loop through all LUNs on this target */
1840 for (plun = ptgt->tgt_lun; plun != NULL;
1841 plun = plun->lun_next) {
1842 if (plun->lun_state & FCP_LUN_OFFLINE) {
1843 continue;
1844 }
1845
1846 path = fcp_get_lun_path(plun);
1847 if (path == NULL) {
1848 continue;
1849 }
1850
1851 if (mapIndex >= listlen) {
1852 mapIndex ++;
1853 kmem_free(path, MAXPATHLEN);
1854 continue;
1855 }
1856 map = &mappings->entries[mapIndex++];
1857 bcopy(path, map->targetDriver,
1858 sizeof (map->targetDriver));
1859 map->d_id = ptgt->tgt_d_id;
1860 map->busNumber = 0;
1861 map->targetNumber = ptgt->tgt_d_id;
1862 map->osLUN = plun->lun_num;
1863
1864 /*
1865 * We had swapped lun when we stored it in
1866 * lun_addr. We need to swap it back before
1867 * returning it to user land
1868 */
1869
1870 sam_lun_addr.ent_addr_0 =
1871 BE_16(plun->lun_addr.ent_addr_0);
1872 sam_lun_addr.ent_addr_1 =
1873 BE_16(plun->lun_addr.ent_addr_1);
1874 sam_lun_addr.ent_addr_2 =
1875 BE_16(plun->lun_addr.ent_addr_2);
1876 sam_lun_addr.ent_addr_3 =
1877 BE_16(plun->lun_addr.ent_addr_3);
1878
1879 bcopy(&sam_lun_addr, &map->samLUN,
1880 FCP_LUN_SIZE);
1881 bcopy(ptgt->tgt_node_wwn.raw_wwn,
1882 map->NodeWWN.raw_wwn, sizeof (la_wwn_t));
1883 bcopy(ptgt->tgt_port_wwn.raw_wwn,
1884 map->PortWWN.raw_wwn, sizeof (la_wwn_t));
1885
1886 if (plun->lun_guid) {
1887
1888 /* convert ascii wwn to bytes */
1889 fcp_ascii_to_wwn(plun->lun_guid,
1890 map->guid, sizeof (map->guid));
1891
1892 if ((sizeof (map->guid)) <
1893 plun->lun_guid_size / 2) {
1894 cmn_err(CE_WARN,
1895 "fcp_get_target_mappings:"
1896 "guid copy space "
1897 "insufficient."
1898 "Copy Truncation - "
1899 "available %d; need %d",
1900 (int)sizeof (map->guid),
1901 (int)
1902 plun->lun_guid_size / 2);
1903 }
1904 }
1905 kmem_free(path, MAXPATHLEN);
1906 }
1907 mutex_exit(&ptgt->tgt_mutex);
1908 }
1909 }
1910 mutex_exit(&pptr->port_mutex);
1911 mappings->numLuns = mapIndex;
1912
1913 if (ddi_copyout(mappings, fioctl.list, mappingSize, mode)) {
1914 kmem_free(mappings, mappingSize);
1915 return (EFAULT);
1916 }
1917 kmem_free(mappings, mappingSize);
1918
1919 #ifdef _MULTI_DATAMODEL
1920 switch (ddi_model_convert_from(mode & FMODELS)) {
1921 case DDI_MODEL_ILP32: {
1922 struct fcp32_ioctl f32_ioctl;
1923
1924 f32_ioctl.fp_minor = fioctl.fp_minor;
1925 f32_ioctl.listlen = fioctl.listlen;
1926 f32_ioctl.list = (caddr32_t)(long)fioctl.list;
1927 if (ddi_copyout((void *)&f32_ioctl, (void *)data,
1928 sizeof (struct fcp32_ioctl), mode)) {
1929 return (EFAULT);
1930 }
1931 break;
1932 }
1933 case DDI_MODEL_NONE:
1934 if (ddi_copyout((void *)&fioctl, (void *)data,
1935 sizeof (struct fcp_ioctl), mode)) {
1936 return (EFAULT);
1937 }
1938 break;
1939 }
1940 #else /* _MULTI_DATAMODEL */
1941
1942 if (ddi_copyout((void *)&fioctl, (void *)data,
1943 sizeof (struct fcp_ioctl), mode)) {
1944 return (EFAULT);
1945 }
1946 #endif /* _MULTI_DATAMODEL */
1947
1948 return (0);
1949 }
1950
1951 /*
1952 * fcp_setup_scsi_ioctl
1953 * Setup handler for the "scsi passthru" style of
1954 * ioctl for FCP. See "fcp_util.h" for data structure
1955 * definition.
1956 *
1957 * Input:
1958 * u_fscsi = ioctl data (user address space)
1959 * mode = See ioctl(9E)
1960 *
1961 * Output:
1962 * u_fscsi = ioctl data (user address space)
1963 * rval = return value - see ioctl(9E)
1964 *
1965 * Returns:
1966 * 0 = OK
1967 * EAGAIN = See errno.h
1968 * EBUSY = See errno.h
1969 * EFAULT = See errno.h
1970 * EINTR = See errno.h
1971 * EINVAL = See errno.h
1972 * EIO = See errno.h
1973 * ENOMEM = See errno.h
1974 * ENXIO = See errno.h
1975 *
1976 * Context:
1977 * Kernel context.
1978 */
1979 /* ARGSUSED */
1980 static int
1981 fcp_setup_scsi_ioctl(struct fcp_scsi_cmd *u_fscsi,
1982 int mode, int *rval)
1983 {
1984 int ret = 0;
1985 int temp_ret;
1986 caddr_t k_cdbbufaddr = NULL;
1987 caddr_t k_bufaddr = NULL;
1988 caddr_t k_rqbufaddr = NULL;
1989 caddr_t u_cdbbufaddr;
1990 caddr_t u_bufaddr;
1991 caddr_t u_rqbufaddr;
1992 struct fcp_scsi_cmd k_fscsi;
1993
1994 /*
1995 * Get fcp_scsi_cmd array element from user address space
1996 */
1997 if ((ret = fcp_copyin_scsi_cmd((caddr_t)u_fscsi, &k_fscsi, mode))
1998 != 0) {
1999 return (ret);
2000 }
2001
2002
2003 /*
2004 * Even though kmem_alloc() checks the validity of the
2005 * buffer length, this check is needed when the
2006 * kmem_flags set and the zero buffer length is passed.
2007 */
2008 if ((k_fscsi.scsi_cdblen <= 0) ||
2009 (k_fscsi.scsi_buflen <= 0) ||
2010 (k_fscsi.scsi_rqlen <= 0)) {
2011 return (EINVAL);
2012 }
2013
2014 /*
2015 * Allocate data for fcp_scsi_cmd pointer fields
2016 */
2017 if (ret == 0) {
2018 k_cdbbufaddr = kmem_alloc(k_fscsi.scsi_cdblen, KM_NOSLEEP);
2019 k_bufaddr = kmem_alloc(k_fscsi.scsi_buflen, KM_NOSLEEP);
2020 k_rqbufaddr = kmem_alloc(k_fscsi.scsi_rqlen, KM_NOSLEEP);
2021
2022 if (k_cdbbufaddr == NULL ||
2023 k_bufaddr == NULL ||
2024 k_rqbufaddr == NULL) {
2025 ret = ENOMEM;
2026 }
2027 }
2028
2029 /*
2030 * Get fcp_scsi_cmd pointer fields from user
2031 * address space
2032 */
2033 if (ret == 0) {
2034 u_cdbbufaddr = k_fscsi.scsi_cdbbufaddr;
2035 u_bufaddr = k_fscsi.scsi_bufaddr;
2036 u_rqbufaddr = k_fscsi.scsi_rqbufaddr;
2037
2038 if (ddi_copyin(u_cdbbufaddr,
2039 k_cdbbufaddr,
2040 k_fscsi.scsi_cdblen,
2041 mode)) {
2042 ret = EFAULT;
2043 } else if (ddi_copyin(u_bufaddr,
2044 k_bufaddr,
2045 k_fscsi.scsi_buflen,
2046 mode)) {
2047 ret = EFAULT;
2048 } else if (ddi_copyin(u_rqbufaddr,
2049 k_rqbufaddr,
2050 k_fscsi.scsi_rqlen,
2051 mode)) {
2052 ret = EFAULT;
2053 }
2054 }
2055
2056 /*
2057 * Send scsi command (blocking)
2058 */
2059 if (ret == 0) {
2060 /*
2061 * Prior to sending the scsi command, the
2062 * fcp_scsi_cmd data structure must contain kernel,
2063 * not user, addresses.
2064 */
2065 k_fscsi.scsi_cdbbufaddr = k_cdbbufaddr;
2066 k_fscsi.scsi_bufaddr = k_bufaddr;
2067 k_fscsi.scsi_rqbufaddr = k_rqbufaddr;
2068
2069 ret = fcp_send_scsi_ioctl(&k_fscsi);
2070
2071 /*
2072 * After sending the scsi command, the
2073 * fcp_scsi_cmd data structure must contain user,
2074 * not kernel, addresses.
2075 */
2076 k_fscsi.scsi_cdbbufaddr = u_cdbbufaddr;
2077 k_fscsi.scsi_bufaddr = u_bufaddr;
2078 k_fscsi.scsi_rqbufaddr = u_rqbufaddr;
2079 }
2080
2081 /*
2082 * Put fcp_scsi_cmd pointer fields to user address space
2083 */
2084 if (ret == 0) {
2085 if (ddi_copyout(k_cdbbufaddr,
2086 u_cdbbufaddr,
2087 k_fscsi.scsi_cdblen,
2088 mode)) {
2089 ret = EFAULT;
2090 } else if (ddi_copyout(k_bufaddr,
2091 u_bufaddr,
2092 k_fscsi.scsi_buflen,
2093 mode)) {
2094 ret = EFAULT;
2095 } else if (ddi_copyout(k_rqbufaddr,
2096 u_rqbufaddr,
2097 k_fscsi.scsi_rqlen,
2098 mode)) {
2099 ret = EFAULT;
2100 }
2101 }
2102
2103 /*
2104 * Free data for fcp_scsi_cmd pointer fields
2105 */
2106 if (k_cdbbufaddr != NULL) {
2107 kmem_free(k_cdbbufaddr, k_fscsi.scsi_cdblen);
2108 }
2109 if (k_bufaddr != NULL) {
2110 kmem_free(k_bufaddr, k_fscsi.scsi_buflen);
2111 }
2112 if (k_rqbufaddr != NULL) {
2113 kmem_free(k_rqbufaddr, k_fscsi.scsi_rqlen);
2114 }
2115
2116 /*
2117 * Put fcp_scsi_cmd array element to user address space
2118 */
2119 temp_ret = fcp_copyout_scsi_cmd(&k_fscsi, (caddr_t)u_fscsi, mode);
2120 if (temp_ret != 0) {
2121 ret = temp_ret;
2122 }
2123
2124 /*
2125 * Return status
2126 */
2127 return (ret);
2128 }
2129
2130
2131 /*
2132 * fcp_copyin_scsi_cmd
2133 * Copy in fcp_scsi_cmd data structure from user address space.
2134 * The data may be in 32 bit or 64 bit modes.
2135 *
2136 * Input:
2137 * base_addr = from address (user address space)
2138 * mode = See ioctl(9E) and ddi_copyin(9F)
2139 *
2140 * Output:
2141 * fscsi = to address (kernel address space)
2142 *
2143 * Returns:
2144 * 0 = OK
2145 * EFAULT = Error
2146 *
2147 * Context:
2148 * Kernel context.
2149 */
2150 static int
2151 fcp_copyin_scsi_cmd(caddr_t base_addr, struct fcp_scsi_cmd *fscsi, int mode)
2152 {
2153 #ifdef _MULTI_DATAMODEL
2154 struct fcp32_scsi_cmd f32scsi;
2155
2156 switch (ddi_model_convert_from(mode & FMODELS)) {
2157 case DDI_MODEL_ILP32:
2158 /*
2159 * Copy data from user address space
2160 */
2161 if (ddi_copyin((void *)base_addr,
2162 &f32scsi,
2163 sizeof (struct fcp32_scsi_cmd),
2164 mode)) {
2165 return (EFAULT);
2166 }
2167 /*
2168 * Convert from 32 bit to 64 bit
2169 */
2170 FCP32_SCSI_CMD_TO_FCP_SCSI_CMD(&f32scsi, fscsi);
2171 break;
2172 case DDI_MODEL_NONE:
2173 /*
2174 * Copy data from user address space
2175 */
2176 if (ddi_copyin((void *)base_addr,
2177 fscsi,
2178 sizeof (struct fcp_scsi_cmd),
2179 mode)) {
2180 return (EFAULT);
2181 }
2182 break;
2183 }
2184 #else /* _MULTI_DATAMODEL */
2185 /*
2186 * Copy data from user address space
2187 */
2188 if (ddi_copyin((void *)base_addr,
2189 fscsi,
2190 sizeof (struct fcp_scsi_cmd),
2191 mode)) {
2192 return (EFAULT);
2193 }
2194 #endif /* _MULTI_DATAMODEL */
2195
2196 return (0);
2197 }
2198
2199
2200 /*
2201 * fcp_copyout_scsi_cmd
2202 * Copy out fcp_scsi_cmd data structure to user address space.
2203 * The data may be in 32 bit or 64 bit modes.
2204 *
2205 * Input:
2206 * fscsi = to address (kernel address space)
2207 * mode = See ioctl(9E) and ddi_copyin(9F)
2208 *
2209 * Output:
2210 * base_addr = from address (user address space)
2211 *
2212 * Returns:
2213 * 0 = OK
2214 * EFAULT = Error
2215 *
2216 * Context:
2217 * Kernel context.
2218 */
2219 static int
2220 fcp_copyout_scsi_cmd(struct fcp_scsi_cmd *fscsi, caddr_t base_addr, int mode)
2221 {
2222 #ifdef _MULTI_DATAMODEL
2223 struct fcp32_scsi_cmd f32scsi;
2224
2225 switch (ddi_model_convert_from(mode & FMODELS)) {
2226 case DDI_MODEL_ILP32:
2227 /*
2228 * Convert from 64 bit to 32 bit
2229 */
2230 FCP_SCSI_CMD_TO_FCP32_SCSI_CMD(fscsi, &f32scsi);
2231 /*
2232 * Copy data to user address space
2233 */
2234 if (ddi_copyout(&f32scsi,
2235 (void *)base_addr,
2236 sizeof (struct fcp32_scsi_cmd),
2237 mode)) {
2238 return (EFAULT);
2239 }
2240 break;
2241 case DDI_MODEL_NONE:
2242 /*
2243 * Copy data to user address space
2244 */
2245 if (ddi_copyout(fscsi,
2246 (void *)base_addr,
2247 sizeof (struct fcp_scsi_cmd),
2248 mode)) {
2249 return (EFAULT);
2250 }
2251 break;
2252 }
2253 #else /* _MULTI_DATAMODEL */
2254 /*
2255 * Copy data to user address space
2256 */
2257 if (ddi_copyout(fscsi,
2258 (void *)base_addr,
2259 sizeof (struct fcp_scsi_cmd),
2260 mode)) {
2261 return (EFAULT);
2262 }
2263 #endif /* _MULTI_DATAMODEL */
2264
2265 return (0);
2266 }
2267
2268
2269 /*
2270 * fcp_send_scsi_ioctl
2271 * Sends the SCSI command in blocking mode.
2272 *
2273 * Input:
2274 * fscsi = SCSI command data structure
2275 *
2276 * Output:
2277 * fscsi = SCSI command data structure
2278 *
2279 * Returns:
2280 * 0 = OK
2281 * EAGAIN = See errno.h
2282 * EBUSY = See errno.h
2283 * EINTR = See errno.h
2284 * EINVAL = See errno.h
2285 * EIO = See errno.h
2286 * ENOMEM = See errno.h
2287 * ENXIO = See errno.h
2288 *
2289 * Context:
2290 * Kernel context.
2291 */
2292 static int
2293 fcp_send_scsi_ioctl(struct fcp_scsi_cmd *fscsi)
2294 {
2295 struct fcp_lun *plun = NULL;
2296 struct fcp_port *pptr = NULL;
2297 struct fcp_tgt *ptgt = NULL;
2298 fc_packet_t *fpkt = NULL;
2299 struct fcp_ipkt *icmd = NULL;
2300 int target_created = FALSE;
2301 fc_frame_hdr_t *hp;
2302 struct fcp_cmd fcp_cmd;
2303 struct fcp_cmd *fcmd;
2304 union scsi_cdb *scsi_cdb;
2305 la_wwn_t *wwn_ptr;
2306 int nodma;
2307 struct fcp_rsp *rsp;
2308 struct fcp_rsp_info *rsp_info;
2309 caddr_t rsp_sense;
2310 int buf_len;
2311 int info_len;
2312 int sense_len;
2313 struct scsi_extended_sense *sense_to = NULL;
2314 timeout_id_t tid;
2315 uint8_t reconfig_lun = FALSE;
2316 uint8_t reconfig_pending = FALSE;
2317 uint8_t scsi_cmd;
2318 int rsp_len;
2319 int cmd_index;
2320 int fc_status;
2321 int pkt_state;
2322 int pkt_action;
2323 int pkt_reason;
2324 int ret, xport_retval = ~FC_SUCCESS;
2325 int lcount;
2326 int tcount;
2327 int reconfig_status;
2328 int port_busy = FALSE;
2329 uchar_t *lun_string;
2330
2331 /*
2332 * Check valid SCSI command
2333 */
2334 scsi_cmd = ((uint8_t *)fscsi->scsi_cdbbufaddr)[0];
2335 ret = EINVAL;
2336 for (cmd_index = 0;
2337 cmd_index < FCP_NUM_ELEMENTS(scsi_ioctl_list) &&
2338 ret != 0;
2339 cmd_index++) {
2340 /*
2341 * First byte of CDB is the SCSI command
2342 */
2343 if (scsi_ioctl_list[cmd_index] == scsi_cmd) {
2344 ret = 0;
2345 }
2346 }
2347
2348 /*
2349 * Check inputs
2350 */
2351 if (fscsi->scsi_flags != FCP_SCSI_READ) {
2352 ret = EINVAL;
2353 } else if (fscsi->scsi_cdblen > FCP_CDB_SIZE) {
2354 /* no larger than */
2355 ret = EINVAL;
2356 }
2357
2358
2359 /*
2360 * Find FC port
2361 */
2362 if (ret == 0) {
2363 /*
2364 * Acquire global mutex
2365 */
2366 mutex_enter(&fcp_global_mutex);
2367
2368 pptr = fcp_port_head;
2369 while (pptr) {
2370 if (pptr->port_instance ==
2371 (uint32_t)fscsi->scsi_fc_port_num) {
2372 break;
2373 } else {
2374 pptr = pptr->port_next;
2375 }
2376 }
2377
2378 if (pptr == NULL) {
2379 ret = ENXIO;
2380 } else {
2381 /*
2382 * fc_ulp_busy_port can raise power
2383 * so, we must not hold any mutexes involved in PM
2384 */
2385 mutex_exit(&fcp_global_mutex);
2386 ret = fc_ulp_busy_port(pptr->port_fp_handle);
2387 }
2388
2389 if (ret == 0) {
2390
2391 /* remember port is busy, so we will release later */
2392 port_busy = TRUE;
2393
2394 /*
2395 * If there is a reconfiguration in progress, wait
2396 * for it to complete.
2397 */
2398
2399 fcp_reconfig_wait(pptr);
2400
2401 /* reacquire mutexes in order */
2402 mutex_enter(&fcp_global_mutex);
2403 mutex_enter(&pptr->port_mutex);
2404
2405 /*
2406 * Will port accept DMA?
2407 */
2408 nodma = (pptr->port_fcp_dma == FC_NO_DVMA_SPACE)
2409 ? 1 : 0;
2410
2411 /*
2412 * If init or offline, device not known
2413 *
2414 * If we are discovering (onlining), we can
2415 * NOT obviously provide reliable data about
2416 * devices until it is complete
2417 */
2418 if (pptr->port_state & (FCP_STATE_INIT |
2419 FCP_STATE_OFFLINE)) {
2420 ret = ENXIO;
2421 } else if (pptr->port_state & FCP_STATE_ONLINING) {
2422 ret = EBUSY;
2423 } else {
2424 /*
2425 * Find target from pwwn
2426 *
2427 * The wwn must be put into a local
2428 * variable to ensure alignment.
2429 */
2430 wwn_ptr = (la_wwn_t *)&(fscsi->scsi_fc_pwwn);
2431 ptgt = fcp_lookup_target(pptr,
2432 (uchar_t *)wwn_ptr);
2433
2434 /*
2435 * If target not found,
2436 */
2437 if (ptgt == NULL) {
2438 /*
2439 * Note: Still have global &
2440 * port mutexes
2441 */
2442 mutex_exit(&pptr->port_mutex);
2443 ptgt = fcp_port_create_tgt(pptr,
2444 wwn_ptr, &ret, &fc_status,
2445 &pkt_state, &pkt_action,
2446 &pkt_reason);
2447 mutex_enter(&pptr->port_mutex);
2448
2449 fscsi->scsi_fc_status = fc_status;
2450 fscsi->scsi_pkt_state =
2451 (uchar_t)pkt_state;
2452 fscsi->scsi_pkt_reason = pkt_reason;
2453 fscsi->scsi_pkt_action =
2454 (uchar_t)pkt_action;
2455
2456 if (ptgt != NULL) {
2457 target_created = TRUE;
2458 } else if (ret == 0) {
2459 ret = ENOMEM;
2460 }
2461 }
2462
2463 if (ret == 0) {
2464 /*
2465 * Acquire target
2466 */
2467 mutex_enter(&ptgt->tgt_mutex);
2468
2469 /*
2470 * If target is mark or busy,
2471 * then target can not be used
2472 */
2473 if (ptgt->tgt_state &
2474 (FCP_TGT_MARK |
2475 FCP_TGT_BUSY)) {
2476 ret = EBUSY;
2477 } else {
2478 /*
2479 * Mark target as busy
2480 */
2481 ptgt->tgt_state |=
2482 FCP_TGT_BUSY;
2483 }
2484
2485 /*
2486 * Release target
2487 */
2488 lcount = pptr->port_link_cnt;
2489 tcount = ptgt->tgt_change_cnt;
2490 mutex_exit(&ptgt->tgt_mutex);
2491 }
2492 }
2493
2494 /*
2495 * Release port
2496 */
2497 mutex_exit(&pptr->port_mutex);
2498 }
2499
2500 /*
2501 * Release global mutex
2502 */
2503 mutex_exit(&fcp_global_mutex);
2504 }
2505
2506 if (ret == 0) {
2507 uint64_t belun = BE_64(fscsi->scsi_lun);
2508
2509 /*
2510 * If it's a target device, find lun from pwwn
2511 * The wwn must be put into a local
2512 * variable to ensure alignment.
2513 */
2514 mutex_enter(&pptr->port_mutex);
2515 wwn_ptr = (la_wwn_t *)&(fscsi->scsi_fc_pwwn);
2516 if (!ptgt->tgt_tcap && ptgt->tgt_icap) {
2517 /* this is not a target */
2518 fscsi->scsi_fc_status = FC_DEVICE_NOT_TGT;
2519 ret = ENXIO;
2520 } else if ((belun << 16) != 0) {
2521 /*
2522 * Since fcp only support PD and LU addressing method
2523 * so far, the last 6 bytes of a valid LUN are expected
2524 * to be filled with 00h.
2525 */
2526 fscsi->scsi_fc_status = FC_INVALID_LUN;
2527 cmn_err(CE_WARN, "fcp: Unsupported LUN addressing"
2528 " method 0x%02x with LUN number 0x%016" PRIx64,
2529 (uint8_t)(belun >> 62), belun);
2530 ret = ENXIO;
2531 } else if ((plun = fcp_lookup_lun(pptr, (uchar_t *)wwn_ptr,
2532 (uint16_t)((belun >> 48) & 0x3fff))) == NULL) {
2533 /*
2534 * This is a SCSI target, but no LUN at this
2535 * address.
2536 *
2537 * In the future, we may want to send this to
2538 * the target, and let it respond
2539 * appropriately
2540 */
2541 ret = ENXIO;
2542 }
2543 mutex_exit(&pptr->port_mutex);
2544 }
2545
2546 /*
2547 * Finished grabbing external resources
2548 * Allocate internal packet (icmd)
2549 */
2550 if (ret == 0) {
2551 /*
2552 * Calc rsp len assuming rsp info included
2553 */
2554 rsp_len = sizeof (struct fcp_rsp) +
2555 sizeof (struct fcp_rsp_info) + fscsi->scsi_rqlen;
2556
2557 icmd = fcp_icmd_alloc(pptr, ptgt,
2558 sizeof (struct fcp_cmd),
2559 rsp_len,
2560 fscsi->scsi_buflen,
2561 nodma,
2562 lcount, /* ipkt_link_cnt */
2563 tcount, /* ipkt_change_cnt */
2564 0, /* cause */
2565 FC_INVALID_RSCN_COUNT); /* invalidate the count */
2566
2567 if (icmd == NULL) {
2568 ret = ENOMEM;
2569 } else {
2570 /*
2571 * Setup internal packet as sema sync
2572 */
2573 fcp_ipkt_sema_init(icmd);
2574 }
2575 }
2576
2577 if (ret == 0) {
2578 /*
2579 * Init fpkt pointer for use.
2580 */
2581
2582 fpkt = icmd->ipkt_fpkt;
2583
2584 fpkt->pkt_tran_flags = FC_TRAN_CLASS3 | FC_TRAN_INTR;
2585 fpkt->pkt_tran_type = FC_PKT_FCP_READ; /* only rd for now */
2586 fpkt->pkt_timeout = fscsi->scsi_timeout;
2587
2588 /*
2589 * Init fcmd pointer for use by SCSI command
2590 */
2591
2592 if (nodma) {
2593 fcmd = (struct fcp_cmd *)fpkt->pkt_cmd;
2594 } else {
2595 fcmd = &fcp_cmd;
2596 }
2597 bzero(fcmd, sizeof (struct fcp_cmd));
2598 ptgt = plun->lun_tgt;
2599
2600 lun_string = (uchar_t *)&fscsi->scsi_lun;
2601
2602 fcmd->fcp_ent_addr.ent_addr_0 =
2603 BE_16(*(uint16_t *)&(lun_string[0]));
2604 fcmd->fcp_ent_addr.ent_addr_1 =
2605 BE_16(*(uint16_t *)&(lun_string[2]));
2606 fcmd->fcp_ent_addr.ent_addr_2 =
2607 BE_16(*(uint16_t *)&(lun_string[4]));
2608 fcmd->fcp_ent_addr.ent_addr_3 =
2609 BE_16(*(uint16_t *)&(lun_string[6]));
2610
2611 /*
2612 * Setup internal packet(icmd)
2613 */
2614 icmd->ipkt_lun = plun;
2615 icmd->ipkt_restart = 0;
2616 icmd->ipkt_retries = 0;
2617 icmd->ipkt_opcode = 0;
2618
2619 /*
2620 * Init the frame HEADER Pointer for use
2621 */
2622 hp = &fpkt->pkt_cmd_fhdr;
2623
2624 hp->s_id = pptr->port_id;
2625 hp->d_id = ptgt->tgt_d_id;
2626 hp->r_ctl = R_CTL_COMMAND;
2627 hp->type = FC_TYPE_SCSI_FCP;
2628 hp->f_ctl = F_CTL_SEQ_INITIATIVE | F_CTL_FIRST_SEQ;
2629 hp->rsvd = 0;
2630 hp->seq_id = 0;
2631 hp->seq_cnt = 0;
2632 hp->ox_id = 0xffff;
2633 hp->rx_id = 0xffff;
2634 hp->ro = 0;
2635
2636 fcmd->fcp_cntl.cntl_qtype = FCP_QTYPE_SIMPLE;
2637 fcmd->fcp_cntl.cntl_read_data = 1; /* only rd for now */
2638 fcmd->fcp_cntl.cntl_write_data = 0;
2639 fcmd->fcp_data_len = fscsi->scsi_buflen;
2640
2641 scsi_cdb = (union scsi_cdb *)fcmd->fcp_cdb;
2642 bcopy((char *)fscsi->scsi_cdbbufaddr, (char *)scsi_cdb,
2643 fscsi->scsi_cdblen);
2644
2645 if (!nodma) {
2646 FCP_CP_OUT((uint8_t *)fcmd, fpkt->pkt_cmd,
2647 fpkt->pkt_cmd_acc, sizeof (struct fcp_cmd));
2648 }
2649
2650 /*
2651 * Send SCSI command to FC transport
2652 */
2653
2654 if (ret == 0) {
2655 mutex_enter(&ptgt->tgt_mutex);
2656
2657 if (!FCP_TGT_STATE_CHANGED(ptgt, icmd)) {
2658 mutex_exit(&ptgt->tgt_mutex);
2659 fscsi->scsi_fc_status = xport_retval =
2660 fc_ulp_transport(pptr->port_fp_handle,
2661 fpkt);
2662 if (fscsi->scsi_fc_status != FC_SUCCESS) {
2663 ret = EIO;
2664 }
2665 } else {
2666 mutex_exit(&ptgt->tgt_mutex);
2667 ret = EBUSY;
2668 }
2669 }
2670 }
2671
2672 /*
2673 * Wait for completion only if fc_ulp_transport was called and it
2674 * returned a success. This is the only time callback will happen.
2675 * Otherwise, there is no point in waiting
2676 */
2677 if ((ret == 0) && (xport_retval == FC_SUCCESS)) {
2678 ret = fcp_ipkt_sema_wait(icmd);
2679 }
2680
2681 /*
2682 * Copy data to IOCTL data structures
2683 */
2684 rsp = NULL;
2685 if ((ret == 0) && (xport_retval == FC_SUCCESS)) {
2686 rsp = (struct fcp_rsp *)fpkt->pkt_resp;
2687
2688 if (fcp_validate_fcp_response(rsp, pptr) != FC_SUCCESS) {
2689 fcp_log(CE_WARN, pptr->port_dip,
2690 "!SCSI command to d_id=0x%x lun=0x%x"
2691 " failed, Bad FCP response values:"
2692 " rsvd1=%x, rsvd2=%x, sts-rsvd1=%x,"
2693 " sts-rsvd2=%x, rsplen=%x, senselen=%x",
2694 ptgt->tgt_d_id, plun->lun_num,
2695 rsp->reserved_0, rsp->reserved_1,
2696 rsp->fcp_u.fcp_status.reserved_0,
2697 rsp->fcp_u.fcp_status.reserved_1,
2698 rsp->fcp_response_len, rsp->fcp_sense_len);
2699
2700 ret = EIO;
2701 }
2702 }
2703
2704 if ((ret == 0) && (rsp != NULL)) {
2705 /*
2706 * Calc response lengths
2707 */
2708 sense_len = 0;
2709 info_len = 0;
2710
2711 if (rsp->fcp_u.fcp_status.rsp_len_set) {
2712 info_len = rsp->fcp_response_len;
2713 }
2714
2715 rsp_info = (struct fcp_rsp_info *)
2716 ((uint8_t *)rsp + sizeof (struct fcp_rsp));
2717
2718 /*
2719 * Get SCSI status
2720 */
2721 fscsi->scsi_bufstatus = rsp->fcp_u.fcp_status.scsi_status;
2722 /*
2723 * If a lun was just added or removed and the next command
2724 * comes through this interface, we need to capture the check
2725 * condition so we can discover the new topology.
2726 */
2727 if (fscsi->scsi_bufstatus != STATUS_GOOD &&
2728 rsp->fcp_u.fcp_status.sense_len_set) {
2729 sense_len = rsp->fcp_sense_len;
2730 rsp_sense = (caddr_t)((uint8_t *)rsp_info + info_len);
2731 sense_to = (struct scsi_extended_sense *)rsp_sense;
2732 if ((FCP_SENSE_REPORTLUN_CHANGED(sense_to)) ||
2733 (FCP_SENSE_NO_LUN(sense_to))) {
2734 reconfig_lun = TRUE;
2735 }
2736 }
2737
2738 if (fscsi->scsi_bufstatus == STATUS_GOOD && (ptgt != NULL) &&
2739 (reconfig_lun || (scsi_cdb->scc_cmd == SCMD_REPORT_LUN))) {
2740 if (reconfig_lun == FALSE) {
2741 reconfig_status =
2742 fcp_is_reconfig_needed(ptgt, fpkt);
2743 }
2744
2745 if ((reconfig_lun == TRUE) ||
2746 (reconfig_status == TRUE)) {
2747 mutex_enter(&ptgt->tgt_mutex);
2748 if (ptgt->tgt_tid == NULL) {
2749 /*
2750 * Either we've been notified the
2751 * REPORT_LUN data has changed, or
2752 * we've determined on our own that
2753 * we're out of date. Kick off
2754 * rediscovery.
2755 */
2756 tid = timeout(fcp_reconfigure_luns,
2757 (caddr_t)ptgt, drv_usectohz(1));
2758
2759 ptgt->tgt_tid = tid;
2760 ptgt->tgt_state |= FCP_TGT_BUSY;
2761 ret = EBUSY;
2762 reconfig_pending = TRUE;
2763 }
2764 mutex_exit(&ptgt->tgt_mutex);
2765 }
2766 }
2767
2768 /*
2769 * Calc residuals and buffer lengths
2770 */
2771
2772 if (ret == 0) {
2773 buf_len = fscsi->scsi_buflen;
2774 fscsi->scsi_bufresid = 0;
2775 if (rsp->fcp_u.fcp_status.resid_under) {
2776 if (rsp->fcp_resid <= fscsi->scsi_buflen) {
2777 fscsi->scsi_bufresid = rsp->fcp_resid;
2778 } else {
2779 cmn_err(CE_WARN, "fcp: bad residue %x "
2780 "for txfer len %x", rsp->fcp_resid,
2781 fscsi->scsi_buflen);
2782 fscsi->scsi_bufresid =
2783 fscsi->scsi_buflen;
2784 }
2785 buf_len -= fscsi->scsi_bufresid;
2786 }
2787 if (rsp->fcp_u.fcp_status.resid_over) {
2788 fscsi->scsi_bufresid = -rsp->fcp_resid;
2789 }
2790
2791 fscsi->scsi_rqresid = fscsi->scsi_rqlen - sense_len;
2792 if (fscsi->scsi_rqlen < sense_len) {
2793 sense_len = fscsi->scsi_rqlen;
2794 }
2795
2796 fscsi->scsi_fc_rspcode = 0;
2797 if (rsp->fcp_u.fcp_status.rsp_len_set) {
2798 fscsi->scsi_fc_rspcode = rsp_info->rsp_code;
2799 }
2800 fscsi->scsi_pkt_state = fpkt->pkt_state;
2801 fscsi->scsi_pkt_action = fpkt->pkt_action;
2802 fscsi->scsi_pkt_reason = fpkt->pkt_reason;
2803
2804 /*
2805 * Copy data and request sense
2806 *
2807 * Data must be copied by using the FCP_CP_IN macro.
2808 * This will ensure the proper byte order since the data
2809 * is being copied directly from the memory mapped
2810 * device register.
2811 *
2812 * The response (and request sense) will be in the
2813 * correct byte order. No special copy is necessary.
2814 */
2815
2816 if (buf_len) {
2817 FCP_CP_IN(fpkt->pkt_data,
2818 fscsi->scsi_bufaddr,
2819 fpkt->pkt_data_acc,
2820 buf_len);
2821 }
2822 bcopy((void *)rsp_sense,
2823 (void *)fscsi->scsi_rqbufaddr,
2824 sense_len);
2825 }
2826 }
2827
2828 /*
2829 * Cleanup transport data structures if icmd was alloc-ed
2830 * So, cleanup happens in the same thread that icmd was alloc-ed
2831 */
2832 if (icmd != NULL) {
2833 fcp_ipkt_sema_cleanup(icmd);
2834 }
2835
2836 /* restore pm busy/idle status */
2837 if (port_busy) {
2838 fc_ulp_idle_port(pptr->port_fp_handle);
2839 }
2840
2841 /*
2842 * Cleanup target. if a reconfig is pending, don't clear the BUSY
2843 * flag, it'll be cleared when the reconfig is complete.
2844 */
2845 if ((ptgt != NULL) && !reconfig_pending) {
2846 /*
2847 * If target was created,
2848 */
2849 if (target_created) {
2850 mutex_enter(&ptgt->tgt_mutex);
2851 ptgt->tgt_state &= ~FCP_TGT_BUSY;
2852 mutex_exit(&ptgt->tgt_mutex);
2853 } else {
2854 /*
2855 * De-mark target as busy
2856 */
2857 mutex_enter(&ptgt->tgt_mutex);
2858 ptgt->tgt_state &= ~FCP_TGT_BUSY;
2859 mutex_exit(&ptgt->tgt_mutex);
2860 }
2861 }
2862 return (ret);
2863 }
2864
2865
2866 static int
2867 fcp_is_reconfig_needed(struct fcp_tgt *ptgt,
2868 fc_packet_t *fpkt)
2869 {
2870 uchar_t *lun_string;
2871 uint16_t lun_num, i;
2872 int num_luns;
2873 int actual_luns;
2874 int num_masked_luns;
2875 int lun_buflen;
2876 struct fcp_lun *plun = NULL;
2877 struct fcp_reportlun_resp *report_lun;
2878 uint8_t reconfig_needed = FALSE;
2879 uint8_t lun_exists = FALSE;
2880 fcp_port_t *pptr = ptgt->tgt_port;
2881
2882 report_lun = kmem_zalloc(fpkt->pkt_datalen, KM_SLEEP);
2883
2884 FCP_CP_IN(fpkt->pkt_data, report_lun, fpkt->pkt_data_acc,
2885 fpkt->pkt_datalen);
2886
2887 /* get number of luns (which is supplied as LUNS * 8) */
2888 num_luns = BE_32(report_lun->num_lun) >> 3;
2889
2890 /*
2891 * Figure out exactly how many lun strings our response buffer
2892 * can hold.
2893 */
2894 lun_buflen = (fpkt->pkt_datalen -
2895 2 * sizeof (uint32_t)) / sizeof (longlong_t);
2896
2897 /*
2898 * Is our response buffer full or not? We don't want to
2899 * potentially walk beyond the number of luns we have.
2900 */
2901 if (num_luns <= lun_buflen) {
2902 actual_luns = num_luns;
2903 } else {
2904 actual_luns = lun_buflen;
2905 }
2906
2907 mutex_enter(&ptgt->tgt_mutex);
2908
2909 /* Scan each lun to see if we have masked it. */
2910 num_masked_luns = 0;
2911 if (fcp_lun_blacklist != NULL) {
2912 for (i = 0; i < actual_luns; i++) {
2913 lun_string = (uchar_t *)&(report_lun->lun_string[i]);
2914 switch (lun_string[0] & 0xC0) {
2915 case FCP_LUN_ADDRESSING:
2916 case FCP_PD_ADDRESSING:
2917 case FCP_VOLUME_ADDRESSING:
2918 lun_num = ((lun_string[0] & 0x3F) << 8)
2919 | lun_string[1];
2920 if (fcp_should_mask(&ptgt->tgt_port_wwn,
2921 lun_num) == TRUE) {
2922 num_masked_luns++;
2923 }
2924 break;
2925 default:
2926 break;
2927 }
2928 }
2929 }
2930
2931 /*
2932 * The quick and easy check. If the number of LUNs reported
2933 * doesn't match the number we currently know about, we need
2934 * to reconfigure.
2935 */
2936 if (num_luns && num_luns != (ptgt->tgt_lun_cnt + num_masked_luns)) {
2937 mutex_exit(&ptgt->tgt_mutex);
2938 kmem_free(report_lun, fpkt->pkt_datalen);
2939 return (TRUE);
2940 }
2941
2942 /*
2943 * If the quick and easy check doesn't turn up anything, we walk
2944 * the list of luns from the REPORT_LUN response and look for
2945 * any luns we don't know about. If we find one, we know we need
2946 * to reconfigure. We will skip LUNs that are masked because of the
2947 * blacklist.
2948 */
2949 for (i = 0; i < actual_luns; i++) {
2950 lun_string = (uchar_t *)&(report_lun->lun_string[i]);
2951 lun_exists = FALSE;
2952 switch (lun_string[0] & 0xC0) {
2953 case FCP_LUN_ADDRESSING:
2954 case FCP_PD_ADDRESSING:
2955 case FCP_VOLUME_ADDRESSING:
2956 lun_num = ((lun_string[0] & 0x3F) << 8) | lun_string[1];
2957
2958 if ((fcp_lun_blacklist != NULL) && (fcp_should_mask(
2959 &ptgt->tgt_port_wwn, lun_num) == TRUE)) {
2960 lun_exists = TRUE;
2961 break;
2962 }
2963
2964 for (plun = ptgt->tgt_lun; plun;
2965 plun = plun->lun_next) {
2966 if (plun->lun_num == lun_num) {
2967 lun_exists = TRUE;
2968 break;
2969 }
2970 }
2971 break;
2972 default:
2973 break;
2974 }
2975
2976 if (lun_exists == FALSE) {
2977 reconfig_needed = TRUE;
2978 break;
2979 }
2980 }
2981
2982 mutex_exit(&ptgt->tgt_mutex);
2983 kmem_free(report_lun, fpkt->pkt_datalen);
2984
2985 return (reconfig_needed);
2986 }
2987
2988 /*
2989 * This function is called by fcp_handle_page83 and uses inquiry response data
2990 * stored in plun->lun_inq to determine whether or not a device is a member of
2991 * the table fcp_symmetric_disk_table_size. We return 0 if it is in the table,
2992 * otherwise 1.
2993 */
2994 static int
2995 fcp_symmetric_device_probe(struct fcp_lun *plun)
2996 {
2997 struct scsi_inquiry *stdinq = &plun->lun_inq;
2998 char *devidptr;
2999 int i, len;
3000
3001 for (i = 0; i < fcp_symmetric_disk_table_size; i++) {
3002 devidptr = fcp_symmetric_disk_table[i];
3003 len = (int)strlen(devidptr);
3004
3005 if (bcmp(stdinq->inq_vid, devidptr, len) == 0) {
3006 return (0);
3007 }
3008 }
3009 return (1);
3010 }
3011
3012
3013 /*
3014 * This function is called by fcp_ioctl for the FCP_STATE_COUNT ioctl
3015 * It basically returns the current count of # of state change callbacks
3016 * i.e the value of tgt_change_cnt.
3017 *
3018 * INPUT:
3019 * fcp_ioctl.fp_minor -> The minor # of the fp port
3020 * fcp_ioctl.listlen -> 1
3021 * fcp_ioctl.list -> Pointer to a 32 bit integer
3022 */
3023 /*ARGSUSED2*/
3024 static int
3025 fcp_get_statec_count(struct fcp_ioctl *data, int mode, int *rval)
3026 {
3027 int ret;
3028 uint32_t link_cnt;
3029 struct fcp_ioctl fioctl;
3030 struct fcp_port *pptr = NULL;
3031
3032 if ((ret = fcp_copyin_fcp_ioctl_data(data, mode, rval, &fioctl,
3033 &pptr)) != 0) {
3034 return (ret);
3035 }
3036
3037 ASSERT(pptr != NULL);
3038
3039 if (fioctl.listlen != 1) {
3040 return (EINVAL);
3041 }
3042
3043 mutex_enter(&pptr->port_mutex);
3044 if (pptr->port_state & FCP_STATE_OFFLINE) {
3045 mutex_exit(&pptr->port_mutex);
3046 return (ENXIO);
3047 }
3048
3049 /*
3050 * FCP_STATE_INIT is set in 2 cases (not sure why it is overloaded):
3051 * When the fcp initially attaches to the port and there are nothing
3052 * hanging out of the port or if there was a repeat offline state change
3053 * callback (refer fcp_statec_callback() FC_STATE_OFFLINE case).
3054 * In the latter case, port_tmp_cnt will be non-zero and that is how we
3055 * will differentiate the 2 cases.
3056 */
3057 if ((pptr->port_state & FCP_STATE_INIT) && pptr->port_tmp_cnt) {
3058 mutex_exit(&pptr->port_mutex);
3059 return (ENXIO);
3060 }
3061
3062 link_cnt = pptr->port_link_cnt;
3063 mutex_exit(&pptr->port_mutex);
3064
3065 if (ddi_copyout(&link_cnt, fioctl.list, (sizeof (uint32_t)), mode)) {
3066 return (EFAULT);
3067 }
3068
3069 #ifdef _MULTI_DATAMODEL
3070 switch (ddi_model_convert_from(mode & FMODELS)) {
3071 case DDI_MODEL_ILP32: {
3072 struct fcp32_ioctl f32_ioctl;
3073
3074 f32_ioctl.fp_minor = fioctl.fp_minor;
3075 f32_ioctl.listlen = fioctl.listlen;
3076 f32_ioctl.list = (caddr32_t)(long)fioctl.list;
3077 if (ddi_copyout((void *)&f32_ioctl, (void *)data,
3078 sizeof (struct fcp32_ioctl), mode)) {
3079 return (EFAULT);
3080 }
3081 break;
3082 }
3083 case DDI_MODEL_NONE:
3084 if (ddi_copyout((void *)&fioctl, (void *)data,
3085 sizeof (struct fcp_ioctl), mode)) {
3086 return (EFAULT);
3087 }
3088 break;
3089 }
3090 #else /* _MULTI_DATAMODEL */
3091
3092 if (ddi_copyout((void *)&fioctl, (void *)data,
3093 sizeof (struct fcp_ioctl), mode)) {
3094 return (EFAULT);
3095 }
3096 #endif /* _MULTI_DATAMODEL */
3097
3098 return (0);
3099 }
3100
3101 /*
3102 * This function copies the fcp_ioctl structure passed in from user land
3103 * into kernel land. Handles 32 bit applications.
3104 */
3105 /*ARGSUSED*/
3106 static int
3107 fcp_copyin_fcp_ioctl_data(struct fcp_ioctl *data, int mode, int *rval,
3108 struct fcp_ioctl *fioctl, struct fcp_port **pptr)
3109 {
3110 struct fcp_port *t_pptr;
3111
3112 #ifdef _MULTI_DATAMODEL
3113 switch (ddi_model_convert_from(mode & FMODELS)) {
3114 case DDI_MODEL_ILP32: {
3115 struct fcp32_ioctl f32_ioctl;
3116
3117 if (ddi_copyin((void *)data, (void *)&f32_ioctl,
3118 sizeof (struct fcp32_ioctl), mode)) {
3119 return (EFAULT);
3120 }
3121 fioctl->fp_minor = f32_ioctl.fp_minor;
3122 fioctl->listlen = f32_ioctl.listlen;
3123 fioctl->list = (caddr_t)(long)f32_ioctl.list;
3124 break;
3125 }
3126 case DDI_MODEL_NONE:
3127 if (ddi_copyin((void *)data, (void *)fioctl,
3128 sizeof (struct fcp_ioctl), mode)) {
3129 return (EFAULT);
3130 }
3131 break;
3132 }
3133
3134 #else /* _MULTI_DATAMODEL */
3135 if (ddi_copyin((void *)data, (void *)fioctl,
3136 sizeof (struct fcp_ioctl), mode)) {
3137 return (EFAULT);
3138 }
3139 #endif /* _MULTI_DATAMODEL */
3140
3141 /*
3142 * Right now we can assume that the minor number matches with
3143 * this instance of fp. If this changes we will need to
3144 * revisit this logic.
3145 */
3146 mutex_enter(&fcp_global_mutex);
3147 t_pptr = fcp_port_head;
3148 while (t_pptr) {
3149 if (t_pptr->port_instance == (uint32_t)fioctl->fp_minor) {
3150 break;
3151 } else {
3152 t_pptr = t_pptr->port_next;
3153 }
3154 }
3155 *pptr = t_pptr;
3156 mutex_exit(&fcp_global_mutex);
3157 if (t_pptr == NULL) {
3158 return (ENXIO);
3159 }
3160
3161 return (0);
3162 }
3163
3164 /*
3165 * Function: fcp_port_create_tgt
3166 *
3167 * Description: As the name suggest this function creates the target context
3168 * specified by the the WWN provided by the caller. If the
3169 * creation goes well and the target is known by fp/fctl a PLOGI
3170 * followed by a PRLI are issued.
3171 *
3172 * Argument: pptr fcp port structure
3173 * pwwn WWN of the target
3174 * ret_val Address of the return code. It could be:
3175 * EIO, ENOMEM or 0.
3176 * fc_status PLOGI or PRLI status completion
3177 * fc_pkt_state PLOGI or PRLI state completion
3178 * fc_pkt_reason PLOGI or PRLI reason completion
3179 * fc_pkt_action PLOGI or PRLI action completion
3180 *
3181 * Return Value: NULL if it failed
3182 * Target structure address if it succeeds
3183 */
3184 static struct fcp_tgt *
3185 fcp_port_create_tgt(struct fcp_port *pptr, la_wwn_t *pwwn, int *ret_val,
3186 int *fc_status, int *fc_pkt_state, int *fc_pkt_reason, int *fc_pkt_action)
3187 {
3188 struct fcp_tgt *ptgt = NULL;
3189 fc_portmap_t devlist;
3190 int lcount;
3191 int error;
3192
3193 *ret_val = 0;
3194
3195 /*
3196 * Check FC port device & get port map
3197 */
3198 if (fc_ulp_get_remote_port(pptr->port_fp_handle, pwwn,
3199 &error, 1) == NULL) {
3200 *ret_val = EIO;
3201 } else {
3202 if (fc_ulp_pwwn_to_portmap(pptr->port_fp_handle, pwwn,
3203 &devlist) != FC_SUCCESS) {
3204 *ret_val = EIO;
3205 }
3206 }
3207
3208 /* Set port map flags */
3209 devlist.map_type = PORT_DEVICE_USER_CREATE;
3210
3211 /* Allocate target */
3212 if (*ret_val == 0) {
3213 lcount = pptr->port_link_cnt;
3214 ptgt = fcp_alloc_tgt(pptr, &devlist, lcount);
3215 if (ptgt == NULL) {
3216 fcp_log(CE_WARN, pptr->port_dip,
3217 "!FC target allocation failed");
3218 *ret_val = ENOMEM;
3219 } else {
3220 /* Setup target */
3221 mutex_enter(&ptgt->tgt_mutex);
3222
3223 ptgt->tgt_statec_cause = FCP_CAUSE_TGT_CHANGE;
3224 ptgt->tgt_tmp_cnt = 1;
3225 ptgt->tgt_d_id = devlist.map_did.port_id;
3226 ptgt->tgt_hard_addr =
3227 devlist.map_hard_addr.hard_addr;
3228 ptgt->tgt_pd_handle = devlist.map_pd;
3229 ptgt->tgt_fca_dev = NULL;
3230
3231 bcopy(&devlist.map_nwwn, &ptgt->tgt_node_wwn.raw_wwn[0],
3232 FC_WWN_SIZE);
3233 bcopy(&devlist.map_pwwn, &ptgt->tgt_port_wwn.raw_wwn[0],
3234 FC_WWN_SIZE);
3235
3236 mutex_exit(&ptgt->tgt_mutex);
3237 }
3238 }
3239
3240 /* Release global mutex for PLOGI and PRLI */
3241 mutex_exit(&fcp_global_mutex);
3242
3243 /* Send PLOGI (If necessary) */
3244 if (*ret_val == 0) {
3245 *ret_val = fcp_tgt_send_plogi(ptgt, fc_status,
3246 fc_pkt_state, fc_pkt_reason, fc_pkt_action);
3247 }
3248
3249 /* Send PRLI (If necessary) */
3250 if (*ret_val == 0) {
3251 *ret_val = fcp_tgt_send_prli(ptgt, fc_status,
3252 fc_pkt_state, fc_pkt_reason, fc_pkt_action);
3253 }
3254
3255 mutex_enter(&fcp_global_mutex);
3256
3257 return (ptgt);
3258 }
3259
3260 /*
3261 * Function: fcp_tgt_send_plogi
3262 *
3263 * Description: This function sends a PLOGI to the target specified by the
3264 * caller and waits till it completes.
3265 *
3266 * Argument: ptgt Target to send the plogi to.
3267 * fc_status Status returned by fp/fctl in the PLOGI request.
3268 * fc_pkt_state State returned by fp/fctl in the PLOGI request.
3269 * fc_pkt_reason Reason returned by fp/fctl in the PLOGI request.
3270 * fc_pkt_action Action returned by fp/fctl in the PLOGI request.
3271 *
3272 * Return Value: 0
3273 * ENOMEM
3274 * EIO
3275 *
3276 * Context: User context.
3277 */
3278 static int
3279 fcp_tgt_send_plogi(struct fcp_tgt *ptgt, int *fc_status, int *fc_pkt_state,
3280 int *fc_pkt_reason, int *fc_pkt_action)
3281 {
3282 struct fcp_port *pptr;
3283 struct fcp_ipkt *icmd;
3284 struct fc_packet *fpkt;
3285 fc_frame_hdr_t *hp;
3286 struct la_els_logi logi;
3287 int tcount;
3288 int lcount;
3289 int ret, login_retval = ~FC_SUCCESS;
3290
3291 ret = 0;
3292
3293 pptr = ptgt->tgt_port;
3294
3295 lcount = pptr->port_link_cnt;
3296 tcount = ptgt->tgt_change_cnt;
3297
3298 /* Alloc internal packet */
3299 icmd = fcp_icmd_alloc(pptr, ptgt, sizeof (la_els_logi_t),
3300 sizeof (la_els_logi_t), 0,
3301 pptr->port_state & FCP_STATE_FCA_IS_NODMA,
3302 lcount, tcount, 0, FC_INVALID_RSCN_COUNT);
3303
3304 if (icmd == NULL) {
3305 ret = ENOMEM;
3306 } else {
3307 /*
3308 * Setup internal packet as sema sync
3309 */
3310 fcp_ipkt_sema_init(icmd);
3311
3312 /*
3313 * Setup internal packet (icmd)
3314 */
3315 icmd->ipkt_lun = NULL;
3316 icmd->ipkt_restart = 0;
3317 icmd->ipkt_retries = 0;
3318 icmd->ipkt_opcode = LA_ELS_PLOGI;
3319
3320 /*
3321 * Setup fc_packet
3322 */
3323 fpkt = icmd->ipkt_fpkt;
3324
3325 fpkt->pkt_tran_flags = FC_TRAN_CLASS3 | FC_TRAN_INTR;
3326 fpkt->pkt_tran_type = FC_PKT_EXCHANGE;
3327 fpkt->pkt_timeout = FCP_ELS_TIMEOUT;
3328
3329 /*
3330 * Setup FC frame header
3331 */
3332 hp = &fpkt->pkt_cmd_fhdr;
3333
3334 hp->s_id = pptr->port_id; /* source ID */
3335 hp->d_id = ptgt->tgt_d_id; /* dest ID */
3336 hp->r_ctl = R_CTL_ELS_REQ;
3337 hp->type = FC_TYPE_EXTENDED_LS;
3338 hp->f_ctl = F_CTL_SEQ_INITIATIVE | F_CTL_FIRST_SEQ;
3339 hp->seq_id = 0;
3340 hp->rsvd = 0;
3341 hp->df_ctl = 0;
3342 hp->seq_cnt = 0;
3343 hp->ox_id = 0xffff; /* i.e. none */
3344 hp->rx_id = 0xffff; /* i.e. none */
3345 hp->ro = 0;
3346
3347 /*
3348 * Setup PLOGI
3349 */
3350 bzero(&logi, sizeof (struct la_els_logi));
3351 logi.ls_code.ls_code = LA_ELS_PLOGI;
3352
3353 FCP_CP_OUT((uint8_t *)&logi, fpkt->pkt_cmd,
3354 fpkt->pkt_cmd_acc, sizeof (struct la_els_logi));
3355
3356 /*
3357 * Send PLOGI
3358 */
3359 *fc_status = login_retval =
3360 fc_ulp_login(pptr->port_fp_handle, &fpkt, 1);
3361 if (*fc_status != FC_SUCCESS) {
3362 ret = EIO;
3363 }
3364 }
3365
3366 /*
3367 * Wait for completion
3368 */
3369 if ((ret == 0) && (login_retval == FC_SUCCESS)) {
3370 ret = fcp_ipkt_sema_wait(icmd);
3371
3372 *fc_pkt_state = fpkt->pkt_state;
3373 *fc_pkt_reason = fpkt->pkt_reason;
3374 *fc_pkt_action = fpkt->pkt_action;
3375 }
3376
3377 /*
3378 * Cleanup transport data structures if icmd was alloc-ed AND if there
3379 * is going to be no callback (i.e if fc_ulp_login() failed).
3380 * Otherwise, cleanup happens in callback routine.
3381 */
3382 if (icmd != NULL) {
3383 fcp_ipkt_sema_cleanup(icmd);
3384 }
3385
3386 return (ret);
3387 }
3388
3389 /*
3390 * Function: fcp_tgt_send_prli
3391 *
3392 * Description: Does nothing as of today.
3393 *
3394 * Argument: ptgt Target to send the prli to.
3395 * fc_status Status returned by fp/fctl in the PRLI request.
3396 * fc_pkt_state State returned by fp/fctl in the PRLI request.
3397 * fc_pkt_reason Reason returned by fp/fctl in the PRLI request.
3398 * fc_pkt_action Action returned by fp/fctl in the PRLI request.
3399 *
3400 * Return Value: 0
3401 */
3402 /*ARGSUSED*/
3403 static int
3404 fcp_tgt_send_prli(struct fcp_tgt *ptgt, int *fc_status, int *fc_pkt_state,
3405 int *fc_pkt_reason, int *fc_pkt_action)
3406 {
3407 return (0);
3408 }
3409
3410 /*
3411 * Function: fcp_ipkt_sema_init
3412 *
3413 * Description: Initializes the semaphore contained in the internal packet.
3414 *
3415 * Argument: icmd Internal packet the semaphore of which must be
3416 * initialized.
3417 *
3418 * Return Value: None
3419 *
3420 * Context: User context only.
3421 */
3422 static void
3423 fcp_ipkt_sema_init(struct fcp_ipkt *icmd)
3424 {
3425 struct fc_packet *fpkt;
3426
3427 fpkt = icmd->ipkt_fpkt;
3428
3429 /* Create semaphore for sync */
3430 sema_init(&(icmd->ipkt_sema), 0, NULL, SEMA_DRIVER, NULL);
3431
3432 /* Setup the completion callback */
3433 fpkt->pkt_comp = fcp_ipkt_sema_callback;
3434 }
3435
3436 /*
3437 * Function: fcp_ipkt_sema_wait
3438 *
3439 * Description: Wait on the semaphore embedded in the internal packet. The
3440 * semaphore is released in the callback.
3441 *
3442 * Argument: icmd Internal packet to wait on for completion.
3443 *
3444 * Return Value: 0
3445 * EIO
3446 * EBUSY
3447 * EAGAIN
3448 *
3449 * Context: User context only.
3450 *
3451 * This function does a conversion between the field pkt_state of the fc_packet
3452 * embedded in the internal packet (icmd) and the code it returns.
3453 */
3454 static int
3455 fcp_ipkt_sema_wait(struct fcp_ipkt *icmd)
3456 {
3457 struct fc_packet *fpkt;
3458 int ret;
3459
3460 ret = EIO;
3461 fpkt = icmd->ipkt_fpkt;
3462
3463 /*
3464 * Wait on semaphore
3465 */
3466 sema_p(&(icmd->ipkt_sema));
3467
3468 /*
3469 * Check the status of the FC packet
3470 */
3471 switch (fpkt->pkt_state) {
3472 case FC_PKT_SUCCESS:
3473 ret = 0;
3474 break;
3475 case FC_PKT_LOCAL_RJT:
3476 switch (fpkt->pkt_reason) {
3477 case FC_REASON_SEQ_TIMEOUT:
3478 case FC_REASON_RX_BUF_TIMEOUT:
3479 ret = EAGAIN;
3480 break;
3481 case FC_REASON_PKT_BUSY:
3482 ret = EBUSY;
3483 break;
3484 }
3485 break;
3486 case FC_PKT_TIMEOUT:
3487 ret = EAGAIN;
3488 break;
3489 case FC_PKT_LOCAL_BSY:
3490 case FC_PKT_TRAN_BSY:
3491 case FC_PKT_NPORT_BSY:
3492 case FC_PKT_FABRIC_BSY:
3493 ret = EBUSY;
3494 break;
3495 case FC_PKT_LS_RJT:
3496 case FC_PKT_BA_RJT:
3497 switch (fpkt->pkt_reason) {
3498 case FC_REASON_LOGICAL_BSY:
3499 ret = EBUSY;
3500 break;
3501 }
3502 break;
3503 case FC_PKT_FS_RJT:
3504 switch (fpkt->pkt_reason) {
3505 case FC_REASON_FS_LOGICAL_BUSY:
3506 ret = EBUSY;
3507 break;
3508 }
3509 break;
3510 }
3511
3512 return (ret);
3513 }
3514
3515 /*
3516 * Function: fcp_ipkt_sema_callback
3517 *
3518 * Description: Registered as the completion callback function for the FC
3519 * transport when the ipkt semaphore is used for sync. This will
3520 * cleanup the used data structures, if necessary and wake up
3521 * the user thread to complete the transaction.
3522 *
3523 * Argument: fpkt FC packet (points to the icmd)
3524 *
3525 * Return Value: None
3526 *
3527 * Context: User context only
3528 */
3529 static void
3530 fcp_ipkt_sema_callback(struct fc_packet *fpkt)
3531 {
3532 struct fcp_ipkt *icmd;
3533
3534 icmd = (struct fcp_ipkt *)fpkt->pkt_ulp_private;
3535
3536 /*
3537 * Wake up user thread
3538 */
3539 sema_v(&(icmd->ipkt_sema));
3540 }
3541
3542 /*
3543 * Function: fcp_ipkt_sema_cleanup
3544 *
3545 * Description: Called to cleanup (if necessary) the data structures used
3546 * when ipkt sema is used for sync. This function will detect
3547 * whether the caller is the last thread (via counter) and
3548 * cleanup only if necessary.
3549 *
3550 * Argument: icmd Internal command packet
3551 *
3552 * Return Value: None
3553 *
3554 * Context: User context only
3555 */
3556 static void
3557 fcp_ipkt_sema_cleanup(struct fcp_ipkt *icmd)
3558 {
3559 struct fcp_tgt *ptgt;
3560 struct fcp_port *pptr;
3561
3562 ptgt = icmd->ipkt_tgt;
3563 pptr = icmd->ipkt_port;
3564
3565 /*
3566 * Acquire data structure
3567 */
3568 mutex_enter(&ptgt->tgt_mutex);
3569
3570 /*
3571 * Destroy semaphore
3572 */
3573 sema_destroy(&(icmd->ipkt_sema));
3574
3575 /*
3576 * Cleanup internal packet
3577 */
3578 mutex_exit(&ptgt->tgt_mutex);
3579 fcp_icmd_free(pptr, icmd);
3580 }
3581
3582 /*
3583 * Function: fcp_port_attach
3584 *
3585 * Description: Called by the transport framework to resume, suspend or
3586 * attach a new port.
3587 *
3588 * Argument: ulph Port handle
3589 * *pinfo Port information
3590 * cmd Command
3591 * s_id Port ID
3592 *
3593 * Return Value: FC_FAILURE or FC_SUCCESS
3594 */
3595 /*ARGSUSED*/
3596 static int
3597 fcp_port_attach(opaque_t ulph, fc_ulp_port_info_t *pinfo,
3598 fc_attach_cmd_t cmd, uint32_t s_id)
3599 {
3600 int instance;
3601 int res = FC_FAILURE; /* default result */
3602
3603 ASSERT(pinfo != NULL);
3604
3605 instance = ddi_get_instance(pinfo->port_dip);
3606
3607 switch (cmd) {
3608 case FC_CMD_ATTACH:
3609 /*
3610 * this port instance attaching for the first time (or after
3611 * being detached before)
3612 */
3613 if (fcp_handle_port_attach(ulph, pinfo, s_id,
3614 instance) == DDI_SUCCESS) {
3615 res = FC_SUCCESS;
3616 } else {
3617 ASSERT(ddi_get_soft_state(fcp_softstate,
3618 instance) == NULL);
3619 }
3620 break;
3621
3622 case FC_CMD_RESUME:
3623 case FC_CMD_POWER_UP:
3624 /*
3625 * this port instance was attached and the suspended and
3626 * will now be resumed
3627 */
3628 if (fcp_handle_port_resume(ulph, pinfo, s_id, cmd,
3629 instance) == DDI_SUCCESS) {
3630 res = FC_SUCCESS;
3631 }
3632 break;
3633
3634 default:
3635 /* shouldn't happen */
3636 FCP_TRACE(fcp_logq, "fcp",
3637 fcp_trace, FCP_BUF_LEVEL_2, 0,
3638 "port_attach: unknown cmdcommand: %d", cmd);
3639 break;
3640 }
3641
3642 /* return result */
3643 FCP_DTRACE(fcp_logq, "fcp", fcp_trace,
3644 FCP_BUF_LEVEL_1, 0, "fcp_port_attach returning %d", res);
3645
3646 return (res);
3647 }
3648
3649
3650 /*
3651 * detach or suspend this port instance
3652 *
3653 * acquires and releases the global mutex
3654 *
3655 * acquires and releases the mutex for this port
3656 *
3657 * acquires and releases the hotplug mutex for this port
3658 */
3659 /*ARGSUSED*/
3660 static int
3661 fcp_port_detach(opaque_t ulph, fc_ulp_port_info_t *info,
3662 fc_detach_cmd_t cmd)
3663 {
3664 int flag;
3665 int instance;
3666 struct fcp_port *pptr;
3667
3668 instance = ddi_get_instance(info->port_dip);
3669 pptr = ddi_get_soft_state(fcp_softstate, instance);
3670
3671 switch (cmd) {
3672 case FC_CMD_SUSPEND:
3673 FCP_DTRACE(fcp_logq, "fcp",
3674 fcp_trace, FCP_BUF_LEVEL_8, 0,
3675 "port suspend called for port %d", instance);
3676 flag = FCP_STATE_SUSPENDED;
3677 break;
3678
3679 case FC_CMD_POWER_DOWN:
3680 FCP_DTRACE(fcp_logq, "fcp",
3681 fcp_trace, FCP_BUF_LEVEL_8, 0,
3682 "port power down called for port %d", instance);
3683 flag = FCP_STATE_POWER_DOWN;
3684 break;
3685
3686 case FC_CMD_DETACH:
3687 FCP_DTRACE(fcp_logq, "fcp",
3688 fcp_trace, FCP_BUF_LEVEL_8, 0,
3689 "port detach called for port %d", instance);
3690 flag = FCP_STATE_DETACHING;
3691 break;
3692
3693 default:
3694 /* shouldn't happen */
3695 return (FC_FAILURE);
3696 }
3697 FCP_DTRACE(fcp_logq, "fcp", fcp_trace,
3698 FCP_BUF_LEVEL_1, 0, "fcp_port_detach returning");
3699
3700 return (fcp_handle_port_detach(pptr, flag, instance));
3701 }
3702
3703
3704 /*
3705 * called for ioctls on the transport's devctl interface, and the transport
3706 * has passed it to us
3707 *
3708 * this will only be called for device control ioctls (i.e. hotplugging stuff)
3709 *
3710 * return FC_SUCCESS if we decide to claim the ioctl,
3711 * else return FC_UNCLAIMED
3712 *
3713 * *rval is set iff we decide to claim the ioctl
3714 */
3715 /*ARGSUSED*/
3716 static int
3717 fcp_port_ioctl(opaque_t ulph, opaque_t port_handle, dev_t dev, int cmd,
3718 intptr_t data, int mode, cred_t *credp, int *rval, uint32_t claimed)
3719 {
3720 int retval = FC_UNCLAIMED; /* return value */
3721 struct fcp_port *pptr = NULL; /* our soft state */
3722 struct devctl_iocdata *dcp = NULL; /* for devctl */
3723 dev_info_t *cdip;
3724 mdi_pathinfo_t *pip = NULL;
3725 char *ndi_nm; /* NDI name */
3726 char *ndi_addr; /* NDI addr */
3727 int is_mpxio, circ;
3728 int devi_entered = 0;
3729 clock_t end_time;
3730
3731 ASSERT(rval != NULL);
3732
3733 FCP_DTRACE(fcp_logq, "fcp",
3734 fcp_trace, FCP_BUF_LEVEL_8, 0,
3735 "fcp_port_ioctl(cmd=0x%x, claimed=%d)", cmd, claimed);
3736
3737 /* if already claimed then forget it */
3738 if (claimed) {
3739 /*
3740 * for now, if this ioctl has already been claimed, then
3741 * we just ignore it
3742 */
3743 return (retval);
3744 }
3745
3746 /* get our port info */
3747 if ((pptr = fcp_get_port(port_handle)) == NULL) {
3748 fcp_log(CE_WARN, NULL,
3749 "!fcp:Invalid port handle handle in ioctl");
3750 *rval = ENXIO;
3751 return (retval);
3752 }
3753 is_mpxio = pptr->port_mpxio;
3754
3755 switch (cmd) {
3756 case DEVCTL_BUS_GETSTATE:
3757 case DEVCTL_BUS_QUIESCE:
3758 case DEVCTL_BUS_UNQUIESCE:
3759 case DEVCTL_BUS_RESET:
3760 case DEVCTL_BUS_RESETALL:
3761
3762 case DEVCTL_BUS_DEV_CREATE:
3763 if (ndi_dc_allochdl((void *)data, &dcp) != NDI_SUCCESS) {
3764 return (retval);
3765 }
3766 break;
3767
3768 case DEVCTL_DEVICE_GETSTATE:
3769 case DEVCTL_DEVICE_OFFLINE:
3770 case DEVCTL_DEVICE_ONLINE:
3771 case DEVCTL_DEVICE_REMOVE:
3772 case DEVCTL_DEVICE_RESET:
3773 if (ndi_dc_allochdl((void *)data, &dcp) != NDI_SUCCESS) {
3774 return (retval);
3775 }
3776
3777 ASSERT(dcp != NULL);
3778
3779 /* ensure we have a name and address */
3780 if (((ndi_nm = ndi_dc_getname(dcp)) == NULL) ||
3781 ((ndi_addr = ndi_dc_getaddr(dcp)) == NULL)) {
3782 FCP_TRACE(fcp_logq, pptr->port_instbuf,
3783 fcp_trace, FCP_BUF_LEVEL_2, 0,
3784 "ioctl: can't get name (%s) or addr (%s)",
3785 ndi_nm ? ndi_nm : "<null ptr>",
3786 ndi_addr ? ndi_addr : "<null ptr>");
3787 ndi_dc_freehdl(dcp);
3788 return (retval);
3789 }
3790
3791
3792 /* get our child's DIP */
3793 ASSERT(pptr != NULL);
3794 if (is_mpxio) {
3795 mdi_devi_enter(pptr->port_dip, &circ);
3796 } else {
3797 ndi_devi_enter(pptr->port_dip, &circ);
3798 }
3799 devi_entered = 1;
3800
3801 if ((cdip = ndi_devi_find(pptr->port_dip, ndi_nm,
3802 ndi_addr)) == NULL) {
3803 /* Look for virtually enumerated devices. */
3804 pip = mdi_pi_find(pptr->port_dip, NULL, ndi_addr);
3805 if (pip == NULL ||
3806 ((cdip = mdi_pi_get_client(pip)) == NULL)) {
3807 *rval = ENXIO;
3808 goto out;
3809 }
3810 }
3811 break;
3812
3813 default:
3814 *rval = ENOTTY;
3815 return (retval);
3816 }
3817
3818 /* this ioctl is ours -- process it */
3819
3820 retval = FC_SUCCESS; /* just means we claim the ioctl */
3821
3822 /* we assume it will be a success; else we'll set error value */
3823 *rval = 0;
3824
3825
3826 FCP_DTRACE(fcp_logq, pptr->port_instbuf,
3827 fcp_trace, FCP_BUF_LEVEL_8, 0,
3828 "ioctl: claiming this one");
3829
3830 /* handle ioctls now */
3831 switch (cmd) {
3832 case DEVCTL_DEVICE_GETSTATE:
3833 ASSERT(cdip != NULL);
3834 ASSERT(dcp != NULL);
3835 if (ndi_dc_return_dev_state(cdip, dcp) != NDI_SUCCESS) {
3836 *rval = EFAULT;
3837 }
3838 break;
3839
3840 case DEVCTL_DEVICE_REMOVE:
3841 case DEVCTL_DEVICE_OFFLINE: {
3842 int flag = 0;
3843 int lcount;
3844 int tcount;
3845 struct fcp_pkt *head = NULL;
3846 struct fcp_lun *plun;
3847 child_info_t *cip = CIP(cdip);
3848 int all = 1;
3849 struct fcp_lun *tplun;
3850 struct fcp_tgt *ptgt;
3851
3852 ASSERT(pptr != NULL);
3853 ASSERT(cdip != NULL);
3854
3855 mutex_enter(&pptr->port_mutex);
3856 if (pip != NULL) {
3857 cip = CIP(pip);
3858 }
3859 if ((plun = fcp_get_lun_from_cip(pptr, cip)) == NULL) {
3860 mutex_exit(&pptr->port_mutex);
3861 *rval = ENXIO;
3862 break;
3863 }
3864
3865 head = fcp_scan_commands(plun);
3866 if (head != NULL) {
3867 fcp_abort_commands(head, LUN_PORT);
3868 }
3869 lcount = pptr->port_link_cnt;
3870 tcount = plun->lun_tgt->tgt_change_cnt;
3871 mutex_exit(&pptr->port_mutex);
3872
3873 if (cmd == DEVCTL_DEVICE_REMOVE) {
3874 flag = NDI_DEVI_REMOVE;
3875 }
3876
3877 if (is_mpxio) {
3878 mdi_devi_exit(pptr->port_dip, circ);
3879 } else {
3880 ndi_devi_exit(pptr->port_dip, circ);
3881 }
3882 devi_entered = 0;
3883
3884 *rval = fcp_pass_to_hp_and_wait(pptr, plun, cip,
3885 FCP_OFFLINE, lcount, tcount, flag);
3886
3887 if (*rval != NDI_SUCCESS) {
3888 *rval = (*rval == NDI_BUSY) ? EBUSY : EIO;
3889 break;
3890 }
3891
3892 fcp_update_offline_flags(plun);
3893
3894 ptgt = plun->lun_tgt;
3895 mutex_enter(&ptgt->tgt_mutex);
3896 for (tplun = ptgt->tgt_lun; tplun != NULL; tplun =
3897 tplun->lun_next) {
3898 mutex_enter(&tplun->lun_mutex);
3899 if (!(tplun->lun_state & FCP_LUN_OFFLINE)) {
3900 all = 0;
3901 }
3902 mutex_exit(&tplun->lun_mutex);
3903 }
3904
3905 if (all) {
3906 ptgt->tgt_node_state = FCP_TGT_NODE_NONE;
3907 /*
3908 * The user is unconfiguring/offlining the device.
3909 * If fabric and the auto configuration is set
3910 * then make sure the user is the only one who
3911 * can reconfigure the device.
3912 */
3913 if (FC_TOP_EXTERNAL(pptr->port_topology) &&
3914 fcp_enable_auto_configuration) {
3915 ptgt->tgt_manual_config_only = 1;
3916 }
3917 }
3918 mutex_exit(&ptgt->tgt_mutex);
3919 break;
3920 }
3921
3922 case DEVCTL_DEVICE_ONLINE: {
3923 int lcount;
3924 int tcount;
3925 struct fcp_lun *plun;
3926 child_info_t *cip = CIP(cdip);
3927
3928 ASSERT(cdip != NULL);
3929 ASSERT(pptr != NULL);
3930
3931 mutex_enter(&pptr->port_mutex);
3932 if (pip != NULL) {
3933 cip = CIP(pip);
3934 }
3935 if ((plun = fcp_get_lun_from_cip(pptr, cip)) == NULL) {
3936 mutex_exit(&pptr->port_mutex);
3937 *rval = ENXIO;
3938 break;
3939 }
3940 lcount = pptr->port_link_cnt;
3941 tcount = plun->lun_tgt->tgt_change_cnt;
3942 mutex_exit(&pptr->port_mutex);
3943
3944 /*
3945 * The FCP_LUN_ONLINING flag is used in fcp_scsi_start()
3946 * to allow the device attach to occur when the device is
3947 * FCP_LUN_OFFLINE (so we don't reject the INQUIRY command
3948 * from the scsi_probe()).
3949 */
3950 mutex_enter(&LUN_TGT->tgt_mutex);
3951 plun->lun_state |= FCP_LUN_ONLINING;
3952 mutex_exit(&LUN_TGT->tgt_mutex);
3953
3954 if (is_mpxio) {
3955 mdi_devi_exit(pptr->port_dip, circ);
3956 } else {
3957 ndi_devi_exit(pptr->port_dip, circ);
3958 }
3959 devi_entered = 0;
3960
3961 *rval = fcp_pass_to_hp_and_wait(pptr, plun, cip,
3962 FCP_ONLINE, lcount, tcount, 0);
3963
3964 if (*rval != NDI_SUCCESS) {
3965 /* Reset the FCP_LUN_ONLINING bit */
3966 mutex_enter(&LUN_TGT->tgt_mutex);
3967 plun->lun_state &= ~FCP_LUN_ONLINING;
3968 mutex_exit(&LUN_TGT->tgt_mutex);
3969 *rval = EIO;
3970 break;
3971 }
3972 mutex_enter(&LUN_TGT->tgt_mutex);
3973 plun->lun_state &= ~(FCP_LUN_OFFLINE | FCP_LUN_BUSY |
3974 FCP_LUN_ONLINING);
3975 mutex_exit(&LUN_TGT->tgt_mutex);
3976 break;
3977 }
3978
3979 case DEVCTL_BUS_DEV_CREATE: {
3980 uchar_t *bytes = NULL;
3981 uint_t nbytes;
3982 struct fcp_tgt *ptgt = NULL;
3983 struct fcp_lun *plun = NULL;
3984 dev_info_t *useless_dip = NULL;
3985
3986 *rval = ndi_dc_devi_create(dcp, pptr->port_dip,
3987 DEVCTL_CONSTRUCT, &useless_dip);
3988 if (*rval != 0 || useless_dip == NULL) {
3989 break;
3990 }
3991
3992 if ((ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, useless_dip,
3993 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, PORT_WWN_PROP, &bytes,
3994 &nbytes) != DDI_PROP_SUCCESS) || nbytes != FC_WWN_SIZE) {
3995 *rval = EINVAL;
3996 (void) ndi_devi_free(useless_dip);
3997 if (bytes != NULL) {
3998 ddi_prop_free(bytes);
3999 }
4000 break;
4001 }
4002
4003 *rval = fcp_create_on_demand(pptr, bytes);
4004 if (*rval == 0) {
4005 mutex_enter(&pptr->port_mutex);
4006 ptgt = fcp_lookup_target(pptr, (uchar_t *)bytes);
4007 if (ptgt) {
4008 /*
4009 * We now have a pointer to the target that
4010 * was created. Lets point to the first LUN on
4011 * this new target.
4012 */
4013 mutex_enter(&ptgt->tgt_mutex);
4014
4015 plun = ptgt->tgt_lun;
4016 /*
4017 * There may be stale/offline LUN entries on
4018 * this list (this is by design) and so we have
4019 * to make sure we point to the first online
4020 * LUN
4021 */
4022 while (plun &&
4023 plun->lun_state & FCP_LUN_OFFLINE) {
4024 plun = plun->lun_next;
4025 }
4026
4027 mutex_exit(&ptgt->tgt_mutex);
4028 }
4029 mutex_exit(&pptr->port_mutex);
4030 }
4031
4032 if (*rval == 0 && ptgt && plun) {
4033 mutex_enter(&plun->lun_mutex);
4034 /*
4035 * Allow up to fcp_lun_ready_retry seconds to
4036 * configure all the luns behind the target.
4037 *
4038 * The intent here is to allow targets with long
4039 * reboot/reset-recovery times to become available
4040 * while limiting the maximum wait time for an
4041 * unresponsive target.
4042 */
4043 end_time = ddi_get_lbolt() +
4044 SEC_TO_TICK(fcp_lun_ready_retry);
4045
4046 while (ddi_get_lbolt() < end_time) {
4047 retval = FC_SUCCESS;
4048
4049 /*
4050 * The new ndi interfaces for on-demand creation
4051 * are inflexible, Do some more work to pass on
4052 * a path name of some LUN (design is broken !)
4053 */
4054 if (plun->lun_cip) {
4055 if (plun->lun_mpxio == 0) {
4056 cdip = DIP(plun->lun_cip);
4057 } else {
4058 cdip = mdi_pi_get_client(
4059 PIP(plun->lun_cip));
4060 }
4061 if (cdip == NULL) {
4062 *rval = ENXIO;
4063 break;
4064 }
4065
4066 if (!i_ddi_devi_attached(cdip)) {
4067 mutex_exit(&plun->lun_mutex);
4068 delay(drv_usectohz(1000000));
4069 mutex_enter(&plun->lun_mutex);
4070 } else {
4071 /*
4072 * This Lun is ready, lets
4073 * check the next one.
4074 */
4075 mutex_exit(&plun->lun_mutex);
4076 plun = plun->lun_next;
4077 while (plun && (plun->lun_state
4078 & FCP_LUN_OFFLINE)) {
4079 plun = plun->lun_next;
4080 }
4081 if (!plun) {
4082 break;
4083 }
4084 mutex_enter(&plun->lun_mutex);
4085 }
4086 } else {
4087 /*
4088 * lun_cip field for a valid lun
4089 * should never be NULL. Fail the
4090 * command.
4091 */
4092 *rval = ENXIO;
4093 break;
4094 }
4095 }
4096 if (plun) {
4097 mutex_exit(&plun->lun_mutex);
4098 } else {
4099 char devnm[MAXNAMELEN];
4100 int nmlen;
4101
4102 nmlen = snprintf(devnm, MAXNAMELEN, "%s@%s",
4103 ddi_node_name(cdip),
4104 ddi_get_name_addr(cdip));
4105
4106 if (copyout(&devnm, dcp->cpyout_buf, nmlen) !=
4107 0) {
4108 *rval = EFAULT;
4109 }
4110 }
4111 } else {
4112 int i;
4113 char buf[25];
4114
4115 for (i = 0; i < FC_WWN_SIZE; i++) {
4116 (void) sprintf(&buf[i << 1], "%02x", bytes[i]);
4117 }
4118
4119 fcp_log(CE_WARN, pptr->port_dip,
4120 "!Failed to create nodes for pwwn=%s; error=%x",
4121 buf, *rval);
4122 }
4123
4124 (void) ndi_devi_free(useless_dip);
4125 ddi_prop_free(bytes);
4126 break;
4127 }
4128
4129 case DEVCTL_DEVICE_RESET: {
4130 struct fcp_lun *plun;
4131 child_info_t *cip = CIP(cdip);
4132
4133 ASSERT(cdip != NULL);
4134 ASSERT(pptr != NULL);
4135 mutex_enter(&pptr->port_mutex);
4136 if (pip != NULL) {
4137 cip = CIP(pip);
4138 }
4139 if ((plun = fcp_get_lun_from_cip(pptr, cip)) == NULL) {
4140 mutex_exit(&pptr->port_mutex);
4141 *rval = ENXIO;
4142 break;
4143 }
4144 mutex_exit(&pptr->port_mutex);
4145
4146 mutex_enter(&plun->lun_tgt->tgt_mutex);
4147 if (!(plun->lun_state & FCP_SCSI_LUN_TGT_INIT)) {
4148 mutex_exit(&plun->lun_tgt->tgt_mutex);
4149
4150 *rval = ENXIO;
4151 break;
4152 }
4153
4154 if (plun->lun_sd == NULL) {
4155 mutex_exit(&plun->lun_tgt->tgt_mutex);
4156
4157 *rval = ENXIO;
4158 break;
4159 }
4160 mutex_exit(&plun->lun_tgt->tgt_mutex);
4161
4162 /*
4163 * set up ap so that fcp_reset can figure out
4164 * which target to reset
4165 */
4166 if (fcp_scsi_reset(&plun->lun_sd->sd_address,
4167 RESET_TARGET) == FALSE) {
4168 *rval = EIO;
4169 }
4170 break;
4171 }
4172
4173 case DEVCTL_BUS_GETSTATE:
4174 ASSERT(dcp != NULL);
4175 ASSERT(pptr != NULL);
4176 ASSERT(pptr->port_dip != NULL);
4177 if (ndi_dc_return_bus_state(pptr->port_dip, dcp) !=
4178 NDI_SUCCESS) {
4179 *rval = EFAULT;
4180 }
4181 break;
4182
4183 case DEVCTL_BUS_QUIESCE:
4184 case DEVCTL_BUS_UNQUIESCE:
4185 *rval = ENOTSUP;
4186 break;
4187
4188 case DEVCTL_BUS_RESET:
4189 case DEVCTL_BUS_RESETALL:
4190 ASSERT(pptr != NULL);
4191 (void) fcp_linkreset(pptr, NULL, KM_SLEEP);
4192 break;
4193
4194 default:
4195 ASSERT(dcp != NULL);
4196 *rval = ENOTTY;
4197 break;
4198 }
4199
4200 /* all done -- clean up and return */
4201 out: if (devi_entered) {
4202 if (is_mpxio) {
4203 mdi_devi_exit(pptr->port_dip, circ);
4204 } else {
4205 ndi_devi_exit(pptr->port_dip, circ);
4206 }
4207 }
4208
4209 if (dcp != NULL) {
4210 ndi_dc_freehdl(dcp);
4211 }
4212
4213 return (retval);
4214 }
4215
4216
4217 /*ARGSUSED*/
4218 static int
4219 fcp_els_callback(opaque_t ulph, opaque_t port_handle, fc_unsol_buf_t *buf,
4220 uint32_t claimed)
4221 {
4222 uchar_t r_ctl;
4223 uchar_t ls_code;
4224 struct fcp_port *pptr;
4225
4226 if ((pptr = fcp_get_port(port_handle)) == NULL || claimed) {
4227 return (FC_UNCLAIMED);
4228 }
4229
4230 mutex_enter(&pptr->port_mutex);
4231 if (pptr->port_state & (FCP_STATE_DETACHING |
4232 FCP_STATE_SUSPENDED | FCP_STATE_POWER_DOWN)) {
4233 mutex_exit(&pptr->port_mutex);
4234 return (FC_UNCLAIMED);
4235 }
4236 mutex_exit(&pptr->port_mutex);
4237
4238 r_ctl = buf->ub_frame.r_ctl;
4239
4240 switch (r_ctl & R_CTL_ROUTING) {
4241 case R_CTL_EXTENDED_SVC:
4242 if (r_ctl == R_CTL_ELS_REQ) {
4243 ls_code = buf->ub_buffer[0];
4244
4245 switch (ls_code) {
4246 case LA_ELS_PRLI:
4247 /*
4248 * We really don't care if something fails.
4249 * If the PRLI was not sent out, then the
4250 * other end will time it out.
4251 */
4252 if (fcp_unsol_prli(pptr, buf) == FC_SUCCESS) {
4253 return (FC_SUCCESS);
4254 }
4255 return (FC_UNCLAIMED);
4256 /* NOTREACHED */
4257
4258 default:
4259 break;
4260 }
4261 }
4262 /* FALLTHROUGH */
4263
4264 default:
4265 return (FC_UNCLAIMED);
4266 }
4267 }
4268
4269
4270 /*ARGSUSED*/
4271 static int
4272 fcp_data_callback(opaque_t ulph, opaque_t port_handle, fc_unsol_buf_t *buf,
4273 uint32_t claimed)
4274 {
4275 return (FC_UNCLAIMED);
4276 }
4277
4278 /*
4279 * Function: fcp_statec_callback
4280 *
4281 * Description: The purpose of this function is to handle a port state change.
4282 * It is called from fp/fctl and, in a few instances, internally.
4283 *
4284 * Argument: ulph fp/fctl port handle
4285 * port_handle fcp_port structure
4286 * port_state Physical state of the port
4287 * port_top Topology
4288 * *devlist Pointer to the first entry of a table
4289 * containing the remote ports that can be
4290 * reached.
4291 * dev_cnt Number of entries pointed by devlist.
4292 * port_sid Port ID of the local port.
4293 *
4294 * Return Value: None
4295 */
4296 /*ARGSUSED*/
4297 static void
4298 fcp_statec_callback(opaque_t ulph, opaque_t port_handle,
4299 uint32_t port_state, uint32_t port_top, fc_portmap_t *devlist,
4300 uint32_t dev_cnt, uint32_t port_sid)
4301 {
4302 uint32_t link_count;
4303 int map_len = 0;
4304 struct fcp_port *pptr;
4305 fcp_map_tag_t *map_tag = NULL;
4306
4307 if ((pptr = fcp_get_port(port_handle)) == NULL) {
4308 fcp_log(CE_WARN, NULL, "!Invalid port handle in callback");
4309 return; /* nothing to work with! */
4310 }
4311
4312 FCP_TRACE(fcp_logq, pptr->port_instbuf,
4313 fcp_trace, FCP_BUF_LEVEL_2, 0,
4314 "fcp_statec_callback: port state/dev_cnt/top ="
4315 "%d/%d/%d", FC_PORT_STATE_MASK(port_state),
4316 dev_cnt, port_top);
4317
4318 mutex_enter(&pptr->port_mutex);
4319
4320 /*
4321 * If a thread is in detach, don't do anything.
4322 */
4323 if (pptr->port_state & (FCP_STATE_DETACHING |
4324 FCP_STATE_SUSPENDED | FCP_STATE_POWER_DOWN)) {
4325 mutex_exit(&pptr->port_mutex);
4326 return;
4327 }
4328
4329 /*
4330 * First thing we do is set the FCP_STATE_IN_CB_DEVC flag so that if
4331 * init_pkt is called, it knows whether or not the target's status
4332 * (or pd) might be changing.
4333 */
4334
4335 if (FC_PORT_STATE_MASK(port_state) == FC_STATE_DEVICE_CHANGE) {
4336 pptr->port_state |= FCP_STATE_IN_CB_DEVC;
4337 }
4338
4339 /*
4340 * the transport doesn't allocate or probe unless being
4341 * asked to by either the applications or ULPs
4342 *
4343 * in cases where the port is OFFLINE at the time of port
4344 * attach callback and the link comes ONLINE later, for
4345 * easier automatic node creation (i.e. without you having to
4346 * go out and run the utility to perform LOGINs) the
4347 * following conditional is helpful
4348 */
4349 pptr->port_phys_state = port_state;
4350
4351 if (dev_cnt) {
4352 mutex_exit(&pptr->port_mutex);
4353
4354 map_len = sizeof (*map_tag) * dev_cnt;
4355 map_tag = kmem_alloc(map_len, KM_NOSLEEP);
4356 if (map_tag == NULL) {
4357 fcp_log(CE_WARN, pptr->port_dip,
4358 "!fcp%d: failed to allocate for map tags; "
4359 " state change will not be processed",
4360 pptr->port_instance);
4361
4362 mutex_enter(&pptr->port_mutex);
4363 pptr->port_state &= ~FCP_STATE_IN_CB_DEVC;
4364 mutex_exit(&pptr->port_mutex);
4365
4366 return;
4367 }
4368
4369 mutex_enter(&pptr->port_mutex);
4370 }
4371
4372 if (pptr->port_id != port_sid) {
4373 FCP_TRACE(fcp_logq, pptr->port_instbuf,
4374 fcp_trace, FCP_BUF_LEVEL_3, 0,
4375 "fcp: Port S_ID=0x%x => 0x%x", pptr->port_id,
4376 port_sid);
4377 /*
4378 * The local port changed ID. It is the first time a port ID
4379 * is assigned or something drastic happened. We might have
4380 * been unplugged and replugged on another loop or fabric port
4381 * or somebody grabbed the AL_PA we had or somebody rezoned
4382 * the fabric we were plugged into.
4383 */
4384 pptr->port_id = port_sid;
4385 }
4386
4387 switch (FC_PORT_STATE_MASK(port_state)) {
4388 case FC_STATE_OFFLINE:
4389 case FC_STATE_RESET_REQUESTED:
4390 /*
4391 * link has gone from online to offline -- just update the
4392 * state of this port to BUSY and MARKed to go offline
4393 */
4394 FCP_TRACE(fcp_logq, pptr->port_instbuf,
4395 fcp_trace, FCP_BUF_LEVEL_3, 0,
4396 "link went offline");
4397 if ((pptr->port_state & FCP_STATE_OFFLINE) && dev_cnt) {
4398 /*
4399 * We were offline a while ago and this one
4400 * seems to indicate that the loop has gone
4401 * dead forever.
4402 */
4403 pptr->port_tmp_cnt += dev_cnt;
4404 pptr->port_state &= ~FCP_STATE_OFFLINE;
4405 pptr->port_state |= FCP_STATE_INIT;
4406 link_count = pptr->port_link_cnt;
4407 fcp_handle_devices(pptr, devlist, dev_cnt,
4408 link_count, map_tag, FCP_CAUSE_LINK_DOWN);
4409 } else {
4410 pptr->port_link_cnt++;
4411 ASSERT(!(pptr->port_state & FCP_STATE_SUSPENDED));
4412 fcp_update_state(pptr, (FCP_LUN_BUSY |
4413 FCP_LUN_MARK), FCP_CAUSE_LINK_DOWN);
4414 if (pptr->port_mpxio) {
4415 fcp_update_mpxio_path_verifybusy(pptr);
4416 }
4417 pptr->port_state |= FCP_STATE_OFFLINE;
4418 pptr->port_state &=
4419 ~(FCP_STATE_ONLINING | FCP_STATE_ONLINE);
4420 pptr->port_tmp_cnt = 0;
4421 }
4422 mutex_exit(&pptr->port_mutex);
4423 break;
4424
4425 case FC_STATE_ONLINE:
4426 case FC_STATE_LIP:
4427 case FC_STATE_LIP_LBIT_SET:
4428 /*
4429 * link has gone from offline to online
4430 */
4431 FCP_TRACE(fcp_logq, pptr->port_instbuf,
4432 fcp_trace, FCP_BUF_LEVEL_3, 0,
4433 "link went online");
4434
4435 pptr->port_link_cnt++;
4436
4437 while (pptr->port_ipkt_cnt) {
4438 mutex_exit(&pptr->port_mutex);
4439 delay(drv_usectohz(1000000));
4440 mutex_enter(&pptr->port_mutex);
4441 }
4442
4443 pptr->port_topology = port_top;
4444
4445 /*
4446 * The state of the targets and luns accessible through this
4447 * port is updated.
4448 */
4449 fcp_update_state(pptr, FCP_LUN_BUSY | FCP_LUN_MARK,
4450 FCP_CAUSE_LINK_CHANGE);
4451
4452 pptr->port_state &= ~(FCP_STATE_INIT | FCP_STATE_OFFLINE);
4453 pptr->port_state |= FCP_STATE_ONLINING;
4454 pptr->port_tmp_cnt = dev_cnt;
4455 link_count = pptr->port_link_cnt;
4456
4457 pptr->port_deadline = fcp_watchdog_time +
4458 FCP_ICMD_DEADLINE;
4459
4460 if (!dev_cnt) {
4461 /*
4462 * We go directly to the online state if no remote
4463 * ports were discovered.
4464 */
4465 FCP_TRACE(fcp_logq, pptr->port_instbuf,
4466 fcp_trace, FCP_BUF_LEVEL_3, 0,
4467 "No remote ports discovered");
4468
4469 pptr->port_state &= ~FCP_STATE_ONLINING;
4470 pptr->port_state |= FCP_STATE_ONLINE;
4471 }
4472
4473 switch (port_top) {
4474 case FC_TOP_FABRIC:
4475 case FC_TOP_PUBLIC_LOOP:
4476 case FC_TOP_PRIVATE_LOOP:
4477 case FC_TOP_PT_PT:
4478
4479 if (pptr->port_state & FCP_STATE_NS_REG_FAILED) {
4480 fcp_retry_ns_registry(pptr, port_sid);
4481 }
4482
4483 fcp_handle_devices(pptr, devlist, dev_cnt, link_count,
4484 map_tag, FCP_CAUSE_LINK_CHANGE);
4485 break;
4486
4487 default:
4488 /*
4489 * We got here because we were provided with an unknown
4490 * topology.
4491 */
4492 if (pptr->port_state & FCP_STATE_NS_REG_FAILED) {
4493 pptr->port_state &= ~FCP_STATE_NS_REG_FAILED;
4494 }
4495
4496 pptr->port_tmp_cnt -= dev_cnt;
4497 fcp_log(CE_WARN, pptr->port_dip,
4498 "!unknown/unsupported topology (0x%x)", port_top);
4499 break;
4500 }
4501 FCP_TRACE(fcp_logq, pptr->port_instbuf,
4502 fcp_trace, FCP_BUF_LEVEL_3, 0,
4503 "Notify ssd of the reset to reinstate the reservations");
4504
4505 scsi_hba_reset_notify_callback(&pptr->port_mutex,
4506 &pptr->port_reset_notify_listf);
4507
4508 mutex_exit(&pptr->port_mutex);
4509
4510 break;
4511
4512 case FC_STATE_RESET:
4513 ASSERT(pptr->port_state & FCP_STATE_OFFLINE);
4514 FCP_TRACE(fcp_logq, pptr->port_instbuf,
4515 fcp_trace, FCP_BUF_LEVEL_3, 0,
4516 "RESET state, waiting for Offline/Online state_cb");
4517 mutex_exit(&pptr->port_mutex);
4518 break;
4519
4520 case FC_STATE_DEVICE_CHANGE:
4521 /*
4522 * We come here when an application has requested
4523 * Dynamic node creation/deletion in Fabric connectivity.
4524 */
4525 if (pptr->port_state & (FCP_STATE_OFFLINE |
4526 FCP_STATE_INIT)) {
4527 /*
4528 * This case can happen when the FCTL is in the
4529 * process of giving us on online and the host on
4530 * the other side issues a PLOGI/PLOGO. Ideally
4531 * the state changes should be serialized unless
4532 * they are opposite (online-offline).
4533 * The transport will give us a final state change
4534 * so we can ignore this for the time being.
4535 */
4536 pptr->port_state &= ~FCP_STATE_IN_CB_DEVC;
4537 mutex_exit(&pptr->port_mutex);
4538 break;
4539 }
4540
4541 if (pptr->port_state & FCP_STATE_NS_REG_FAILED) {
4542 fcp_retry_ns_registry(pptr, port_sid);
4543 }
4544
4545 /*
4546 * Extend the deadline under steady state conditions
4547 * to provide more time for the device-change-commands
4548 */
4549 if (!pptr->port_ipkt_cnt) {
4550 pptr->port_deadline = fcp_watchdog_time +
4551 FCP_ICMD_DEADLINE;
4552 }
4553
4554 /*
4555 * There is another race condition here, where if we were
4556 * in ONLINEING state and a devices in the map logs out,
4557 * fp will give another state change as DEVICE_CHANGE
4558 * and OLD. This will result in that target being offlined.
4559 * The pd_handle is freed. If from the first statec callback
4560 * we were going to fire a PLOGI/PRLI, the system will
4561 * panic in fc_ulp_transport with invalid pd_handle.
4562 * The fix is to check for the link_cnt before issuing
4563 * any command down.
4564 */
4565 fcp_update_targets(pptr, devlist, dev_cnt,
4566 FCP_LUN_BUSY | FCP_LUN_MARK, FCP_CAUSE_TGT_CHANGE);
4567
4568 link_count = pptr->port_link_cnt;
4569
4570 fcp_handle_devices(pptr, devlist, dev_cnt,
4571 link_count, map_tag, FCP_CAUSE_TGT_CHANGE);
4572
4573 pptr->port_state &= ~FCP_STATE_IN_CB_DEVC;
4574
4575 mutex_exit(&pptr->port_mutex);
4576 break;
4577
4578 case FC_STATE_TARGET_PORT_RESET:
4579 if (pptr->port_state & FCP_STATE_NS_REG_FAILED) {
4580 fcp_retry_ns_registry(pptr, port_sid);
4581 }
4582
4583 /* Do nothing else */
4584 mutex_exit(&pptr->port_mutex);
4585 break;
4586
4587 default:
4588 fcp_log(CE_WARN, pptr->port_dip,
4589 "!Invalid state change=0x%x", port_state);
4590 mutex_exit(&pptr->port_mutex);
4591 break;
4592 }
4593
4594 if (map_tag) {
4595 kmem_free(map_tag, map_len);
4596 }
4597 }
4598
4599 /*
4600 * Function: fcp_handle_devices
4601 *
4602 * Description: This function updates the devices currently known by
4603 * walking the list provided by the caller. The list passed
4604 * by the caller is supposed to be the list of reachable
4605 * devices.
4606 *
4607 * Argument: *pptr Fcp port structure.
4608 * *devlist Pointer to the first entry of a table
4609 * containing the remote ports that can be
4610 * reached.
4611 * dev_cnt Number of entries pointed by devlist.
4612 * link_cnt Link state count.
4613 * *map_tag Array of fcp_map_tag_t structures.
4614 * cause What caused this function to be called.
4615 *
4616 * Return Value: None
4617 *
4618 * Notes: The pptr->port_mutex must be held.
4619 */
4620 static void
4621 fcp_handle_devices(struct fcp_port *pptr, fc_portmap_t devlist[],
4622 uint32_t dev_cnt, int link_cnt, fcp_map_tag_t *map_tag, int cause)
4623 {
4624 int i;
4625 int check_finish_init = 0;
4626 fc_portmap_t *map_entry;
4627 struct fcp_tgt *ptgt = NULL;
4628
4629 FCP_TRACE(fcp_logq, pptr->port_instbuf,
4630 fcp_trace, FCP_BUF_LEVEL_3, 0,
4631 "fcp_handle_devices: called for %d dev(s)", dev_cnt);
4632
4633 if (dev_cnt) {
4634 ASSERT(map_tag != NULL);
4635 }
4636
4637 /*
4638 * The following code goes through the list of remote ports that are
4639 * accessible through this (pptr) local port (The list walked is the
4640 * one provided by the caller which is the list of the remote ports
4641 * currently reachable). It checks if any of them was already
4642 * known by looking for the corresponding target structure based on
4643 * the world wide name. If a target is part of the list it is tagged
4644 * (ptgt->tgt_aux_state = FCP_TGT_TAGGED).
4645 *
4646 * Old comment
4647 * -----------
4648 * Before we drop port mutex; we MUST get the tags updated; This
4649 * two step process is somewhat slow, but more reliable.
4650 */
4651 for (i = 0; (i < dev_cnt) && (pptr->port_link_cnt == link_cnt); i++) {
4652 map_entry = &(devlist[i]);
4653
4654 /*
4655 * get ptr to this map entry in our port's
4656 * list (if any)
4657 */
4658 ptgt = fcp_lookup_target(pptr,
4659 (uchar_t *)&(map_entry->map_pwwn));
4660
4661 if (ptgt) {
4662 map_tag[i] = ptgt->tgt_change_cnt;
4663 if (cause == FCP_CAUSE_LINK_CHANGE) {
4664 ptgt->tgt_aux_state = FCP_TGT_TAGGED;
4665 }
4666 }
4667 }
4668
4669 /*
4670 * At this point we know which devices of the new list were already
4671 * known (The field tgt_aux_state of the target structure has been
4672 * set to FCP_TGT_TAGGED).
4673 *
4674 * The following code goes through the list of targets currently known
4675 * by the local port (the list is actually a hashing table). If a
4676 * target is found and is not tagged, it means the target cannot
4677 * be reached anymore through the local port (pptr). It is offlined.
4678 * The offlining only occurs if the cause is FCP_CAUSE_LINK_CHANGE.
4679 */
4680 for (i = 0; i < FCP_NUM_HASH; i++) {
4681 for (ptgt = pptr->port_tgt_hash_table[i]; ptgt != NULL;
4682 ptgt = ptgt->tgt_next) {
4683 mutex_enter(&ptgt->tgt_mutex);
4684 if ((ptgt->tgt_aux_state != FCP_TGT_TAGGED) &&
4685 (cause == FCP_CAUSE_LINK_CHANGE) &&
4686 !(ptgt->tgt_state & FCP_TGT_OFFLINE)) {
4687 fcp_offline_target_now(pptr, ptgt,
4688 link_cnt, ptgt->tgt_change_cnt, 0);
4689 }
4690 mutex_exit(&ptgt->tgt_mutex);
4691 }
4692 }
4693
4694 /*
4695 * At this point, the devices that were known but cannot be reached
4696 * anymore, have most likely been offlined.
4697 *
4698 * The following section of code seems to go through the list of
4699 * remote ports that can now be reached. For every single one it
4700 * checks if it is already known or if it is a new port.
4701 */
4702 for (i = 0; (i < dev_cnt) && (pptr->port_link_cnt == link_cnt); i++) {
4703
4704 if (check_finish_init) {
4705 ASSERT(i > 0);
4706 (void) fcp_call_finish_init_held(pptr, ptgt, link_cnt,
4707 map_tag[i - 1], cause);
4708 check_finish_init = 0;
4709 }
4710
4711 /* get a pointer to this map entry */
4712 map_entry = &(devlist[i]);
4713
4714 /*
4715 * Check for the duplicate map entry flag. If we have marked
4716 * this entry as a duplicate we skip it since the correct
4717 * (perhaps even same) state change will be encountered
4718 * later in the list.
4719 */
4720 if (map_entry->map_flags & PORT_DEVICE_DUPLICATE_MAP_ENTRY) {
4721 continue;
4722 }
4723
4724 /* get ptr to this map entry in our port's list (if any) */
4725 ptgt = fcp_lookup_target(pptr,
4726 (uchar_t *)&(map_entry->map_pwwn));
4727
4728 if (ptgt) {
4729 /*
4730 * This device was already known. The field
4731 * tgt_aux_state is reset (was probably set to
4732 * FCP_TGT_TAGGED previously in this routine).
4733 */
4734 ptgt->tgt_aux_state = 0;
4735 FCP_TRACE(fcp_logq, pptr->port_instbuf,
4736 fcp_trace, FCP_BUF_LEVEL_3, 0,
4737 "handle_devices: map did/state/type/flags = "
4738 "0x%x/0x%x/0x%x/0x%x, tgt_d_id=0x%x, "
4739 "tgt_state=%d",
4740 map_entry->map_did.port_id, map_entry->map_state,
4741 map_entry->map_type, map_entry->map_flags,
4742 ptgt->tgt_d_id, ptgt->tgt_state);
4743 }
4744
4745 if (map_entry->map_type == PORT_DEVICE_OLD ||
4746 map_entry->map_type == PORT_DEVICE_NEW ||
4747 map_entry->map_type == PORT_DEVICE_REPORTLUN_CHANGED ||
4748 map_entry->map_type == PORT_DEVICE_CHANGED) {
4749 FCP_TRACE(fcp_logq, pptr->port_instbuf,
4750 fcp_trace, FCP_BUF_LEVEL_2, 0,
4751 "map_type=%x, did = %x",
4752 map_entry->map_type,
4753 map_entry->map_did.port_id);
4754 }
4755
4756 switch (map_entry->map_type) {
4757 case PORT_DEVICE_NOCHANGE:
4758 case PORT_DEVICE_USER_CREATE:
4759 case PORT_DEVICE_USER_LOGIN:
4760 case PORT_DEVICE_NEW:
4761 case PORT_DEVICE_REPORTLUN_CHANGED:
4762 FCP_TGT_TRACE(ptgt, map_tag[i], FCP_TGT_TRACE_1);
4763
4764 if (fcp_handle_mapflags(pptr, ptgt, map_entry,
4765 link_cnt, (ptgt) ? map_tag[i] : 0,
4766 cause) == TRUE) {
4767
4768 FCP_TGT_TRACE(ptgt, map_tag[i],
4769 FCP_TGT_TRACE_2);
4770 check_finish_init++;
4771 }
4772 break;
4773
4774 case PORT_DEVICE_OLD:
4775 if (ptgt != NULL) {
4776 FCP_TGT_TRACE(ptgt, map_tag[i],
4777 FCP_TGT_TRACE_3);
4778
4779 mutex_enter(&ptgt->tgt_mutex);
4780 if (!(ptgt->tgt_state & FCP_TGT_OFFLINE)) {
4781 /*
4782 * Must do an in-line wait for I/Os
4783 * to get drained
4784 */
4785 mutex_exit(&ptgt->tgt_mutex);
4786 mutex_exit(&pptr->port_mutex);
4787
4788 mutex_enter(&ptgt->tgt_mutex);
4789 while (ptgt->tgt_ipkt_cnt ||
4790 fcp_outstanding_lun_cmds(ptgt)
4791 == FC_SUCCESS) {
4792 mutex_exit(&ptgt->tgt_mutex);
4793 delay(drv_usectohz(1000000));
4794 mutex_enter(&ptgt->tgt_mutex);
4795 }
4796 mutex_exit(&ptgt->tgt_mutex);
4797
4798 mutex_enter(&pptr->port_mutex);
4799 mutex_enter(&ptgt->tgt_mutex);
4800
4801 (void) fcp_offline_target(pptr, ptgt,
4802 link_cnt, map_tag[i], 0, 0);
4803 }
4804 mutex_exit(&ptgt->tgt_mutex);
4805 }
4806 check_finish_init++;
4807 break;
4808
4809 case PORT_DEVICE_USER_DELETE:
4810 case PORT_DEVICE_USER_LOGOUT:
4811 if (ptgt != NULL) {
4812 FCP_TGT_TRACE(ptgt, map_tag[i],
4813 FCP_TGT_TRACE_4);
4814
4815 mutex_enter(&ptgt->tgt_mutex);
4816 if (!(ptgt->tgt_state & FCP_TGT_OFFLINE)) {
4817 (void) fcp_offline_target(pptr, ptgt,
4818 link_cnt, map_tag[i], 1, 0);
4819 }
4820 mutex_exit(&ptgt->tgt_mutex);
4821 }
4822 check_finish_init++;
4823 break;
4824
4825 case PORT_DEVICE_CHANGED:
4826 if (ptgt != NULL) {
4827 FCP_TGT_TRACE(ptgt, map_tag[i],
4828 FCP_TGT_TRACE_5);
4829
4830 if (fcp_device_changed(pptr, ptgt,
4831 map_entry, link_cnt, map_tag[i],
4832 cause) == TRUE) {
4833 check_finish_init++;
4834 }
4835 } else {
4836 if (fcp_handle_mapflags(pptr, ptgt,
4837 map_entry, link_cnt, 0, cause) == TRUE) {
4838 check_finish_init++;
4839 }
4840 }
4841 break;
4842
4843 default:
4844 fcp_log(CE_WARN, pptr->port_dip,
4845 "!Invalid map_type=0x%x", map_entry->map_type);
4846 check_finish_init++;
4847 break;
4848 }
4849 }
4850
4851 if (check_finish_init && pptr->port_link_cnt == link_cnt) {
4852 ASSERT(i > 0);
4853 (void) fcp_call_finish_init_held(pptr, ptgt, link_cnt,
4854 map_tag[i-1], cause);
4855 } else if (dev_cnt == 0 && pptr->port_link_cnt == link_cnt) {
4856 fcp_offline_all(pptr, link_cnt, cause);
4857 }
4858 }
4859
4860 static int
4861 fcp_handle_reportlun_changed(struct fcp_tgt *ptgt, int cause)
4862 {
4863 struct fcp_lun *plun;
4864 struct fcp_port *pptr;
4865 int rscn_count;
4866 int lun0_newalloc;
4867 int ret = TRUE;
4868
4869 ASSERT(ptgt);
4870 pptr = ptgt->tgt_port;
4871 lun0_newalloc = 0;
4872 if ((plun = fcp_get_lun(ptgt, 0)) == NULL) {
4873 /*
4874 * no LUN struct for LUN 0 yet exists,
4875 * so create one
4876 */
4877 plun = fcp_alloc_lun(ptgt);
4878 if (plun == NULL) {
4879 fcp_log(CE_WARN, pptr->port_dip,
4880 "!Failed to allocate lun 0 for"
4881 " D_ID=%x", ptgt->tgt_d_id);
4882 return (ret);
4883 }
4884 lun0_newalloc = 1;
4885 }
4886
4887 mutex_enter(&ptgt->tgt_mutex);
4888 /*
4889 * consider lun 0 as device not connected if it is
4890 * offlined or newly allocated
4891 */
4892 if ((plun->lun_state & FCP_LUN_OFFLINE) || lun0_newalloc) {
4893 plun->lun_state |= FCP_LUN_DEVICE_NOT_CONNECTED;
4894 }
4895 plun->lun_state |= (FCP_LUN_BUSY | FCP_LUN_MARK);
4896 plun->lun_state &= ~FCP_LUN_OFFLINE;
4897 ptgt->tgt_lun_cnt = 1;
4898 ptgt->tgt_report_lun_cnt = 0;
4899 mutex_exit(&ptgt->tgt_mutex);
4900
4901 rscn_count = fc_ulp_get_rscn_count(pptr->port_fp_handle);
4902 if (fcp_send_scsi(plun, SCMD_REPORT_LUN,
4903 sizeof (struct fcp_reportlun_resp), pptr->port_link_cnt,
4904 ptgt->tgt_change_cnt, cause, rscn_count) != DDI_SUCCESS) {
4905 FCP_TRACE(fcp_logq, pptr->port_instbuf,
4906 fcp_trace, FCP_BUF_LEVEL_3, 0, "!Failed to send REPORTLUN "
4907 "to D_ID=%x", ptgt->tgt_d_id);
4908 } else {
4909 ret = FALSE;
4910 }
4911
4912 return (ret);
4913 }
4914
4915 /*
4916 * Function: fcp_handle_mapflags
4917 *
4918 * Description: This function creates a target structure if the ptgt passed
4919 * is NULL. It also kicks off the PLOGI if we are not logged
4920 * into the target yet or the PRLI if we are logged into the
4921 * target already. The rest of the treatment is done in the
4922 * callbacks of the PLOGI or PRLI.
4923 *
4924 * Argument: *pptr FCP Port structure.
4925 * *ptgt Target structure.
4926 * *map_entry Array of fc_portmap_t structures.
4927 * link_cnt Link state count.
4928 * tgt_cnt Target state count.
4929 * cause What caused this function to be called.
4930 *
4931 * Return Value: TRUE Failed
4932 * FALSE Succeeded
4933 *
4934 * Notes: pptr->port_mutex must be owned.
4935 */
4936 static int
4937 fcp_handle_mapflags(struct fcp_port *pptr, struct fcp_tgt *ptgt,
4938 fc_portmap_t *map_entry, int link_cnt, int tgt_cnt, int cause)
4939 {
4940 int lcount;
4941 int tcount;
4942 int ret = TRUE;
4943 int alloc;
4944 struct fcp_ipkt *icmd;
4945 struct fcp_lun *pseq_lun = NULL;
4946 uchar_t opcode;
4947 int valid_ptgt_was_passed = FALSE;
4948
4949 ASSERT(mutex_owned(&pptr->port_mutex));
4950
4951 /*
4952 * This case is possible where the FCTL has come up and done discovery
4953 * before FCP was loaded and attached. FCTL would have discovered the
4954 * devices and later the ULP came online. In this case ULP's would get
4955 * PORT_DEVICE_NOCHANGE but target would be NULL.
4956 */
4957 if (ptgt == NULL) {
4958 /* don't already have a target */
4959 mutex_exit(&pptr->port_mutex);
4960 ptgt = fcp_alloc_tgt(pptr, map_entry, link_cnt);
4961 mutex_enter(&pptr->port_mutex);
4962
4963 if (ptgt == NULL) {
4964 fcp_log(CE_WARN, pptr->port_dip,
4965 "!FC target allocation failed");
4966 return (ret);
4967 }
4968 mutex_enter(&ptgt->tgt_mutex);
4969 ptgt->tgt_statec_cause = cause;
4970 ptgt->tgt_tmp_cnt = 1;
4971 mutex_exit(&ptgt->tgt_mutex);
4972 } else {
4973 valid_ptgt_was_passed = TRUE;
4974 }
4975
4976 /*
4977 * Copy in the target parameters
4978 */
4979 mutex_enter(&ptgt->tgt_mutex);
4980 ptgt->tgt_d_id = map_entry->map_did.port_id;
4981 ptgt->tgt_hard_addr = map_entry->map_hard_addr.hard_addr;
4982 ptgt->tgt_pd_handle = map_entry->map_pd;
4983 ptgt->tgt_fca_dev = NULL;
4984
4985 /* Copy port and node WWNs */
4986 bcopy(&map_entry->map_nwwn, &ptgt->tgt_node_wwn.raw_wwn[0],
4987 FC_WWN_SIZE);
4988 bcopy(&map_entry->map_pwwn, &ptgt->tgt_port_wwn.raw_wwn[0],
4989 FC_WWN_SIZE);
4990
4991 if (!(map_entry->map_flags & PORT_DEVICE_NO_SKIP_DEVICE_DISCOVERY) &&
4992 (map_entry->map_type == PORT_DEVICE_NOCHANGE) &&
4993 (map_entry->map_state == PORT_DEVICE_LOGGED_IN) &&
4994 valid_ptgt_was_passed) {
4995 /*
4996 * determine if there are any tape LUNs on this target
4997 */
4998 for (pseq_lun = ptgt->tgt_lun;
4999 pseq_lun != NULL;
5000 pseq_lun = pseq_lun->lun_next) {
5001 if ((pseq_lun->lun_type == DTYPE_SEQUENTIAL) &&
5002 !(pseq_lun->lun_state & FCP_LUN_OFFLINE)) {
5003 fcp_update_tgt_state(ptgt, FCP_RESET,
5004 FCP_LUN_MARK);
5005 mutex_exit(&ptgt->tgt_mutex);
5006 return (ret);
5007 }
5008 }
5009 }
5010
5011 /*
5012 * if UA'REPORT_LUN_CHANGED received,
5013 * send out REPORT LUN promptly, skip PLOGI/PRLI process
5014 */
5015 if (map_entry->map_type == PORT_DEVICE_REPORTLUN_CHANGED) {
5016 ptgt->tgt_state &= ~(FCP_TGT_OFFLINE | FCP_TGT_MARK);
5017 mutex_exit(&ptgt->tgt_mutex);
5018 mutex_exit(&pptr->port_mutex);
5019
5020 ret = fcp_handle_reportlun_changed(ptgt, cause);
5021
5022 mutex_enter(&pptr->port_mutex);
5023 return (ret);
5024 }
5025
5026 /*
5027 * If ptgt was NULL when this function was entered, then tgt_node_state
5028 * was never specifically initialized but zeroed out which means
5029 * FCP_TGT_NODE_NONE.
5030 */
5031 switch (ptgt->tgt_node_state) {
5032 case FCP_TGT_NODE_NONE:
5033 case FCP_TGT_NODE_ON_DEMAND:
5034 if (FC_TOP_EXTERNAL(pptr->port_topology) &&
5035 !fcp_enable_auto_configuration &&
5036 map_entry->map_type != PORT_DEVICE_USER_CREATE) {
5037 ptgt->tgt_node_state = FCP_TGT_NODE_ON_DEMAND;
5038 } else if (FC_TOP_EXTERNAL(pptr->port_topology) &&
5039 fcp_enable_auto_configuration &&
5040 (ptgt->tgt_manual_config_only == 1) &&
5041 map_entry->map_type != PORT_DEVICE_USER_CREATE) {
5042 /*
5043 * If auto configuration is set and
5044 * the tgt_manual_config_only flag is set then
5045 * we only want the user to be able to change
5046 * the state through create_on_demand.
5047 */
5048 ptgt->tgt_node_state = FCP_TGT_NODE_ON_DEMAND;
5049 } else {
5050 ptgt->tgt_node_state = FCP_TGT_NODE_NONE;
5051 }
5052 break;
5053
5054 case FCP_TGT_NODE_PRESENT:
5055 break;
5056 }
5057 /*
5058 * If we are booting from a fabric device, make sure we
5059 * mark the node state appropriately for this target to be
5060 * enumerated
5061 */
5062 if (FC_TOP_EXTERNAL(pptr->port_topology) && pptr->port_boot_wwn[0]) {
5063 if (bcmp((caddr_t)pptr->port_boot_wwn,
5064 (caddr_t)&ptgt->tgt_port_wwn.raw_wwn[0],
5065 sizeof (ptgt->tgt_port_wwn)) == 0) {
5066 ptgt->tgt_node_state = FCP_TGT_NODE_NONE;
5067 }
5068 }
5069 mutex_exit(&ptgt->tgt_mutex);
5070
5071 FCP_TRACE(fcp_logq, pptr->port_instbuf,
5072 fcp_trace, FCP_BUF_LEVEL_3, 0,
5073 "map_pd=%p, map_type=%x, did = %x, ulp_rscn_count=0x%x",
5074 map_entry->map_pd, map_entry->map_type, map_entry->map_did.port_id,
5075 map_entry->map_rscn_info.ulp_rscn_count);
5076
5077 mutex_enter(&ptgt->tgt_mutex);
5078
5079 /*
5080 * Reset target OFFLINE state and mark the target BUSY
5081 */
5082 ptgt->tgt_state &= ~FCP_TGT_OFFLINE;
5083 ptgt->tgt_state |= (FCP_TGT_BUSY | FCP_TGT_MARK);
5084
5085 tcount = tgt_cnt ? tgt_cnt : ptgt->tgt_change_cnt;
5086 lcount = link_cnt;
5087
5088 mutex_exit(&ptgt->tgt_mutex);
5089 mutex_exit(&pptr->port_mutex);
5090
5091 /*
5092 * if we are already logged in, then we do a PRLI, else
5093 * we do a PLOGI first (to get logged in)
5094 *
5095 * We will not check if we are the PLOGI initiator
5096 */
5097 opcode = (map_entry->map_state == PORT_DEVICE_LOGGED_IN &&
5098 map_entry->map_pd != NULL) ? LA_ELS_PRLI : LA_ELS_PLOGI;
5099
5100 alloc = FCP_MAX(sizeof (la_els_logi_t), sizeof (la_els_prli_t));
5101
5102 icmd = fcp_icmd_alloc(pptr, ptgt, alloc, alloc, 0,
5103 pptr->port_state & FCP_STATE_FCA_IS_NODMA, lcount, tcount,
5104 cause, map_entry->map_rscn_info.ulp_rscn_count);
5105
5106 if (icmd == NULL) {
5107 FCP_TGT_TRACE(ptgt, tgt_cnt, FCP_TGT_TRACE_29);
5108 /*
5109 * We've exited port_mutex before calling fcp_icmd_alloc,
5110 * we need to make sure we reacquire it before returning.
5111 */
5112 mutex_enter(&pptr->port_mutex);
5113 return (FALSE);
5114 }
5115
5116 /* TRUE is only returned while target is intended skipped */
5117 ret = FALSE;
5118 /* discover info about this target */
5119 if ((fcp_send_els(pptr, ptgt, icmd, opcode,
5120 lcount, tcount, cause)) == DDI_SUCCESS) {
5121 FCP_TGT_TRACE(ptgt, tgt_cnt, FCP_TGT_TRACE_9);
5122 } else {
5123 fcp_icmd_free(pptr, icmd);
5124 ret = TRUE;
5125 }
5126 mutex_enter(&pptr->port_mutex);
5127
5128 return (ret);
5129 }
5130
5131 /*
5132 * Function: fcp_send_els
5133 *
5134 * Description: Sends an ELS to the target specified by the caller. Supports
5135 * PLOGI and PRLI.
5136 *
5137 * Argument: *pptr Fcp port.
5138 * *ptgt Target to send the ELS to.
5139 * *icmd Internal packet
5140 * opcode ELS opcode
5141 * lcount Link state change counter
5142 * tcount Target state change counter
5143 * cause What caused the call
5144 *
5145 * Return Value: DDI_SUCCESS
5146 * Others
5147 */
5148 static int
5149 fcp_send_els(struct fcp_port *pptr, struct fcp_tgt *ptgt,
5150 struct fcp_ipkt *icmd, uchar_t opcode, int lcount, int tcount, int cause)
5151 {
5152 fc_packet_t *fpkt;
5153 fc_frame_hdr_t *hp;
5154 int internal = 0;
5155 int alloc;
5156 int cmd_len;
5157 int resp_len;
5158 int res = DDI_FAILURE; /* default result */
5159 int rval = DDI_FAILURE;
5160
5161 ASSERT(opcode == LA_ELS_PLOGI || opcode == LA_ELS_PRLI);
5162 ASSERT(ptgt->tgt_port == pptr);
5163
5164 FCP_TRACE(fcp_logq, pptr->port_instbuf,
5165 fcp_trace, FCP_BUF_LEVEL_5, 0,
5166 "fcp_send_els: d_id=0x%x ELS 0x%x (%s)", ptgt->tgt_d_id, opcode,
5167 (opcode == LA_ELS_PLOGI) ? "PLOGI" : "PRLI");
5168
5169 if (opcode == LA_ELS_PLOGI) {
5170 cmd_len = sizeof (la_els_logi_t);
5171 resp_len = sizeof (la_els_logi_t);
5172 } else {
5173 ASSERT(opcode == LA_ELS_PRLI);
5174 cmd_len = sizeof (la_els_prli_t);
5175 resp_len = sizeof (la_els_prli_t);
5176 }
5177
5178 if (icmd == NULL) {
5179 alloc = FCP_MAX(sizeof (la_els_logi_t),
5180 sizeof (la_els_prli_t));
5181 icmd = fcp_icmd_alloc(pptr, ptgt, alloc, alloc, 0,
5182 pptr->port_state & FCP_STATE_FCA_IS_NODMA,
5183 lcount, tcount, cause, FC_INVALID_RSCN_COUNT);
5184 if (icmd == NULL) {
5185 FCP_TGT_TRACE(ptgt, tcount, FCP_TGT_TRACE_10);
5186 return (res);
5187 }
5188 internal++;
5189 }
5190 fpkt = icmd->ipkt_fpkt;
5191
5192 fpkt->pkt_cmdlen = cmd_len;
5193 fpkt->pkt_rsplen = resp_len;
5194 fpkt->pkt_datalen = 0;
5195 icmd->ipkt_retries = 0;
5196
5197 /* fill in fpkt info */
5198 fpkt->pkt_tran_flags = FC_TRAN_CLASS3 | FC_TRAN_INTR;
5199 fpkt->pkt_tran_type = FC_PKT_EXCHANGE;
5200 fpkt->pkt_timeout = FCP_ELS_TIMEOUT;
5201
5202 /* get ptr to frame hdr in fpkt */
5203 hp = &fpkt->pkt_cmd_fhdr;
5204
5205 /*
5206 * fill in frame hdr
5207 */
5208 hp->r_ctl = R_CTL_ELS_REQ;
5209 hp->s_id = pptr->port_id; /* source ID */
5210 hp->d_id = ptgt->tgt_d_id; /* dest ID */
5211 hp->type = FC_TYPE_EXTENDED_LS;
5212 hp->f_ctl = F_CTL_SEQ_INITIATIVE | F_CTL_FIRST_SEQ;
5213 hp->seq_id = 0;
5214 hp->rsvd = 0;
5215 hp->df_ctl = 0;
5216 hp->seq_cnt = 0;
5217 hp->ox_id = 0xffff; /* i.e. none */
5218 hp->rx_id = 0xffff; /* i.e. none */
5219 hp->ro = 0;
5220
5221 /*
5222 * at this point we have a filled in cmd pkt
5223 *
5224 * fill in the respective info, then use the transport to send
5225 * the packet
5226 *
5227 * for a PLOGI call fc_ulp_login(), and
5228 * for a PRLI call fc_ulp_issue_els()
5229 */
5230 switch (opcode) {
5231 case LA_ELS_PLOGI: {
5232 struct la_els_logi logi;
5233
5234 bzero(&logi, sizeof (struct la_els_logi));
5235
5236 hp = &fpkt->pkt_cmd_fhdr;
5237 hp->r_ctl = R_CTL_ELS_REQ;
5238 logi.ls_code.ls_code = LA_ELS_PLOGI;
5239 logi.ls_code.mbz = 0;
5240
5241 FCP_CP_OUT((uint8_t *)&logi, fpkt->pkt_cmd,
5242 fpkt->pkt_cmd_acc, sizeof (struct la_els_logi));
5243
5244 icmd->ipkt_opcode = LA_ELS_PLOGI;
5245
5246 mutex_enter(&pptr->port_mutex);
5247 if (!FCP_TGT_STATE_CHANGED(ptgt, icmd)) {
5248
5249 mutex_exit(&pptr->port_mutex);
5250
5251 rval = fc_ulp_login(pptr->port_fp_handle, &fpkt, 1);
5252 if (rval == FC_SUCCESS) {
5253 res = DDI_SUCCESS;
5254 break;
5255 }
5256
5257 FCP_TGT_TRACE(ptgt, tcount, FCP_TGT_TRACE_11);
5258
5259 res = fcp_handle_ipkt_errors(pptr, ptgt, icmd,
5260 rval, "PLOGI");
5261 } else {
5262 FCP_TRACE(fcp_logq, pptr->port_instbuf,
5263 fcp_trace, FCP_BUF_LEVEL_5, 0,
5264 "fcp_send_els1: state change occured"
5265 " for D_ID=0x%x", ptgt->tgt_d_id);
5266 mutex_exit(&pptr->port_mutex);
5267 FCP_TGT_TRACE(ptgt, tcount, FCP_TGT_TRACE_12);
5268 }
5269 break;
5270 }
5271
5272 case LA_ELS_PRLI: {
5273 struct la_els_prli prli;
5274 struct fcp_prli *fprli;
5275
5276 bzero(&prli, sizeof (struct la_els_prli));
5277
5278 hp = &fpkt->pkt_cmd_fhdr;
5279 hp->r_ctl = R_CTL_ELS_REQ;
5280
5281 /* fill in PRLI cmd ELS fields */
5282 prli.ls_code = LA_ELS_PRLI;
5283 prli.page_length = 0x10; /* huh? */
5284 prli.payload_length = sizeof (struct la_els_prli);
5285
5286 icmd->ipkt_opcode = LA_ELS_PRLI;
5287
5288 /* get ptr to PRLI service params */
5289 fprli = (struct fcp_prli *)prli.service_params;
5290
5291 /* fill in service params */
5292 fprli->type = 0x08;
5293 fprli->resvd1 = 0;
5294 fprli->orig_process_assoc_valid = 0;
5295 fprli->resp_process_assoc_valid = 0;
5296 fprli->establish_image_pair = 1;
5297 fprli->resvd2 = 0;
5298 fprli->resvd3 = 0;
5299 fprli->obsolete_1 = 0;
5300 fprli->obsolete_2 = 0;
5301 fprli->data_overlay_allowed = 0;
5302 fprli->initiator_fn = 1;
5303 fprli->confirmed_compl_allowed = 1;
5304
5305 if (fc_ulp_is_name_present("ltct") == FC_SUCCESS) {
5306 fprli->target_fn = 1;
5307 } else {
5308 fprli->target_fn = 0;
5309 }
5310
5311 fprli->retry = 1;
5312 fprli->read_xfer_rdy_disabled = 1;
5313 fprli->write_xfer_rdy_disabled = 0;
5314
5315 FCP_CP_OUT((uint8_t *)&prli, fpkt->pkt_cmd,
5316 fpkt->pkt_cmd_acc, sizeof (struct la_els_prli));
5317
5318 /* issue the PRLI request */
5319
5320 mutex_enter(&pptr->port_mutex);
5321 if (!FCP_TGT_STATE_CHANGED(ptgt, icmd)) {
5322
5323 mutex_exit(&pptr->port_mutex);
5324
5325 rval = fc_ulp_issue_els(pptr->port_fp_handle, fpkt);
5326 if (rval == FC_SUCCESS) {
5327 res = DDI_SUCCESS;
5328 break;
5329 }
5330
5331 FCP_TGT_TRACE(ptgt, tcount, FCP_TGT_TRACE_13);
5332
5333 res = fcp_handle_ipkt_errors(pptr, ptgt, icmd,
5334 rval, "PRLI");
5335 } else {
5336 mutex_exit(&pptr->port_mutex);
5337 FCP_TGT_TRACE(ptgt, tcount, FCP_TGT_TRACE_14);
5338 }
5339 break;
5340 }
5341
5342 default:
5343 fcp_log(CE_WARN, NULL, "!invalid ELS opcode=0x%x", opcode);
5344 break;
5345 }
5346
5347 FCP_TRACE(fcp_logq, pptr->port_instbuf,
5348 fcp_trace, FCP_BUF_LEVEL_5, 0,
5349 "fcp_send_els: returning %d", res);
5350
5351 if (res != DDI_SUCCESS) {
5352 if (internal) {
5353 fcp_icmd_free(pptr, icmd);
5354 }
5355 }
5356
5357 return (res);
5358 }
5359
5360
5361 /*
5362 * called internally update the state of all of the tgts and each LUN
5363 * for this port (i.e. each target known to be attached to this port)
5364 * if they are not already offline
5365 *
5366 * must be called with the port mutex owned
5367 *
5368 * acquires and releases the target mutexes for each target attached
5369 * to this port
5370 */
5371 void
5372 fcp_update_state(struct fcp_port *pptr, uint32_t state, int cause)
5373 {
5374 int i;
5375 struct fcp_tgt *ptgt;
5376
5377 ASSERT(mutex_owned(&pptr->port_mutex));
5378
5379 for (i = 0; i < FCP_NUM_HASH; i++) {
5380 for (ptgt = pptr->port_tgt_hash_table[i]; ptgt != NULL;
5381 ptgt = ptgt->tgt_next) {
5382 mutex_enter(&ptgt->tgt_mutex);
5383 fcp_update_tgt_state(ptgt, FCP_SET, state);
5384 ptgt->tgt_change_cnt++;
5385 ptgt->tgt_statec_cause = cause;
5386 ptgt->tgt_tmp_cnt = 1;
5387 ptgt->tgt_done = 0;
5388 mutex_exit(&ptgt->tgt_mutex);
5389 }
5390 }
5391 }
5392
5393
5394 static void
5395 fcp_offline_all(struct fcp_port *pptr, int lcount, int cause)
5396 {
5397 int i;
5398 int ndevs;
5399 struct fcp_tgt *ptgt;
5400
5401 ASSERT(mutex_owned(&pptr->port_mutex));
5402
5403 for (ndevs = 0, i = 0; i < FCP_NUM_HASH; i++) {
5404 for (ptgt = pptr->port_tgt_hash_table[i]; ptgt != NULL;
5405 ptgt = ptgt->tgt_next) {
5406 ndevs++;
5407 }
5408 }
5409
5410 if (ndevs == 0) {
5411 return;
5412 }
5413 pptr->port_tmp_cnt = ndevs;
5414
5415 for (i = 0; i < FCP_NUM_HASH; i++) {
5416 for (ptgt = pptr->port_tgt_hash_table[i]; ptgt != NULL;
5417 ptgt = ptgt->tgt_next) {
5418 (void) fcp_call_finish_init_held(pptr, ptgt,
5419 lcount, ptgt->tgt_change_cnt, cause);
5420 }
5421 }
5422 }
5423
5424 /*
5425 * Function: fcp_update_tgt_state
5426 *
5427 * Description: This function updates the field tgt_state of a target. That
5428 * field is a bitmap and which bit can be set or reset
5429 * individually. The action applied to the target state is also
5430 * applied to all the LUNs belonging to the target (provided the
5431 * LUN is not offline). A side effect of applying the state
5432 * modification to the target and the LUNs is the field tgt_trace
5433 * of the target and lun_trace of the LUNs is set to zero.
5434 *
5435 *
5436 * Argument: *ptgt Target structure.
5437 * flag Flag indication what action to apply (set/reset).
5438 * state State bits to update.
5439 *
5440 * Return Value: None
5441 *
5442 * Context: Interrupt, Kernel or User context.
5443 * The mutex of the target (ptgt->tgt_mutex) must be owned when
5444 * calling this function.
5445 */
5446 void
5447 fcp_update_tgt_state(struct fcp_tgt *ptgt, int flag, uint32_t state)
5448 {
5449 struct fcp_lun *plun;
5450
5451 ASSERT(mutex_owned(&ptgt->tgt_mutex));
5452
5453 if (!(ptgt->tgt_state & FCP_TGT_OFFLINE)) {
5454 /* The target is not offline. */
5455 if (flag == FCP_SET) {
5456 ptgt->tgt_state |= state;
5457 ptgt->tgt_trace = 0;
5458 } else {
5459 ptgt->tgt_state &= ~state;
5460 }
5461
5462 for (plun = ptgt->tgt_lun; plun != NULL;
5463 plun = plun->lun_next) {
5464 if (!(plun->lun_state & FCP_LUN_OFFLINE)) {
5465 /* The LUN is not offline. */
5466 if (flag == FCP_SET) {
5467 plun->lun_state |= state;
5468 plun->lun_trace = 0;
5469 } else {
5470 plun->lun_state &= ~state;
5471 }
5472 }
5473 }
5474 }
5475 }
5476
5477 /*
5478 * Function: fcp_update_tgt_state
5479 *
5480 * Description: This function updates the field lun_state of a LUN. That
5481 * field is a bitmap and which bit can be set or reset
5482 * individually.
5483 *
5484 * Argument: *plun LUN structure.
5485 * flag Flag indication what action to apply (set/reset).
5486 * state State bits to update.
5487 *
5488 * Return Value: None
5489 *
5490 * Context: Interrupt, Kernel or User context.
5491 * The mutex of the target (ptgt->tgt_mutex) must be owned when
5492 * calling this function.
5493 */
5494 void
5495 fcp_update_lun_state(struct fcp_lun *plun, int flag, uint32_t state)
5496 {
5497 struct fcp_tgt *ptgt = plun->lun_tgt;
5498
5499 ASSERT(mutex_owned(&ptgt->tgt_mutex));
5500
5501 if (!(plun->lun_state & FCP_TGT_OFFLINE)) {
5502 if (flag == FCP_SET) {
5503 plun->lun_state |= state;
5504 } else {
5505 plun->lun_state &= ~state;
5506 }
5507 }
5508 }
5509
5510 /*
5511 * Function: fcp_get_port
5512 *
5513 * Description: This function returns the fcp_port structure from the opaque
5514 * handle passed by the caller. That opaque handle is the handle
5515 * used by fp/fctl to identify a particular local port. That
5516 * handle has been stored in the corresponding fcp_port
5517 * structure. This function is going to walk the global list of
5518 * fcp_port structures till one has a port_fp_handle that matches
5519 * the handle passed by the caller. This function enters the
5520 * mutex fcp_global_mutex while walking the global list and then
5521 * releases it.
5522 *
5523 * Argument: port_handle Opaque handle that fp/fctl uses to identify a
5524 * particular port.
5525 *
5526 * Return Value: NULL Not found.
5527 * Not NULL Pointer to the fcp_port structure.
5528 *
5529 * Context: Interrupt, Kernel or User context.
5530 */
5531 static struct fcp_port *
5532 fcp_get_port(opaque_t port_handle)
5533 {
5534 struct fcp_port *pptr;
5535
5536 ASSERT(port_handle != NULL);
5537
5538 mutex_enter(&fcp_global_mutex);
5539 for (pptr = fcp_port_head; pptr != NULL; pptr = pptr->port_next) {
5540 if (pptr->port_fp_handle == port_handle) {
5541 break;
5542 }
5543 }
5544 mutex_exit(&fcp_global_mutex);
5545
5546 return (pptr);
5547 }
5548
5549
5550 static void
5551 fcp_unsol_callback(fc_packet_t *fpkt)
5552 {
5553 struct fcp_ipkt *icmd = (struct fcp_ipkt *)fpkt->pkt_ulp_private;
5554 struct fcp_port *pptr = icmd->ipkt_port;
5555
5556 if (fpkt->pkt_state != FC_PKT_SUCCESS) {
5557 caddr_t state, reason, action, expln;
5558
5559 (void) fc_ulp_pkt_error(fpkt, &state, &reason,
5560 &action, &expln);
5561
5562 fcp_log(CE_WARN, pptr->port_dip,
5563 "!couldn't post response to unsolicited request: "
5564 " state=%s reason=%s rx_id=%x ox_id=%x",
5565 state, reason, fpkt->pkt_cmd_fhdr.ox_id,
5566 fpkt->pkt_cmd_fhdr.rx_id);
5567 }
5568 fcp_icmd_free(pptr, icmd);
5569 }
5570
5571
5572 /*
5573 * Perform general purpose preparation of a response to an unsolicited request
5574 */
5575 static void
5576 fcp_unsol_resp_init(fc_packet_t *pkt, fc_unsol_buf_t *buf,
5577 uchar_t r_ctl, uchar_t type)
5578 {
5579 pkt->pkt_cmd_fhdr.r_ctl = r_ctl;
5580 pkt->pkt_cmd_fhdr.d_id = buf->ub_frame.s_id;
5581 pkt->pkt_cmd_fhdr.s_id = buf->ub_frame.d_id;
5582 pkt->pkt_cmd_fhdr.type = type;
5583 pkt->pkt_cmd_fhdr.f_ctl = F_CTL_LAST_SEQ | F_CTL_XCHG_CONTEXT;
5584 pkt->pkt_cmd_fhdr.seq_id = buf->ub_frame.seq_id;
5585 pkt->pkt_cmd_fhdr.df_ctl = buf->ub_frame.df_ctl;
5586 pkt->pkt_cmd_fhdr.seq_cnt = buf->ub_frame.seq_cnt;
5587 pkt->pkt_cmd_fhdr.ox_id = buf->ub_frame.ox_id;
5588 pkt->pkt_cmd_fhdr.rx_id = buf->ub_frame.rx_id;
5589 pkt->pkt_cmd_fhdr.ro = 0;
5590 pkt->pkt_cmd_fhdr.rsvd = 0;
5591 pkt->pkt_comp = fcp_unsol_callback;
5592 pkt->pkt_pd = NULL;
5593 pkt->pkt_ub_resp_token = (opaque_t)buf;
5594 }
5595
5596
5597 /*ARGSUSED*/
5598 static int
5599 fcp_unsol_prli(struct fcp_port *pptr, fc_unsol_buf_t *buf)
5600 {
5601 fc_packet_t *fpkt;
5602 struct la_els_prli prli;
5603 struct fcp_prli *fprli;
5604 struct fcp_ipkt *icmd;
5605 struct la_els_prli *from;
5606 struct fcp_prli *orig;
5607 struct fcp_tgt *ptgt;
5608 int tcount = 0;
5609 int lcount;
5610
5611 from = (struct la_els_prli *)buf->ub_buffer;
5612 orig = (struct fcp_prli *)from->service_params;
5613 if ((ptgt = fcp_get_target_by_did(pptr, buf->ub_frame.s_id)) !=
5614 NULL) {
5615 mutex_enter(&ptgt->tgt_mutex);
5616 tcount = ptgt->tgt_change_cnt;
5617 mutex_exit(&ptgt->tgt_mutex);
5618 }
5619
5620 mutex_enter(&pptr->port_mutex);
5621 lcount = pptr->port_link_cnt;
5622 mutex_exit(&pptr->port_mutex);
5623
5624 if ((icmd = fcp_icmd_alloc(pptr, ptgt, sizeof (la_els_prli_t),
5625 sizeof (la_els_prli_t), 0,
5626 pptr->port_state & FCP_STATE_FCA_IS_NODMA,
5627 lcount, tcount, 0, FC_INVALID_RSCN_COUNT)) == NULL) {
5628 return (FC_FAILURE);
5629 }
5630
5631 fpkt = icmd->ipkt_fpkt;
5632 fpkt->pkt_tran_flags = FC_TRAN_CLASS3 | FC_TRAN_INTR;
5633 fpkt->pkt_tran_type = FC_PKT_OUTBOUND;
5634 fpkt->pkt_timeout = FCP_ELS_TIMEOUT;
5635 fpkt->pkt_cmdlen = sizeof (la_els_prli_t);
5636 fpkt->pkt_rsplen = 0;
5637 fpkt->pkt_datalen = 0;
5638
5639 icmd->ipkt_opcode = LA_ELS_PRLI;
5640
5641 bzero(&prli, sizeof (struct la_els_prli));
5642 fprli = (struct fcp_prli *)prli.service_params;
5643 prli.ls_code = LA_ELS_ACC;
5644 prli.page_length = 0x10;
5645 prli.payload_length = sizeof (struct la_els_prli);
5646
5647 /* fill in service params */
5648 fprli->type = 0x08;
5649 fprli->resvd1 = 0;
5650 fprli->orig_process_assoc_valid = orig->orig_process_assoc_valid;
5651 fprli->orig_process_associator = orig->orig_process_associator;
5652 fprli->resp_process_assoc_valid = 0;
5653 fprli->establish_image_pair = 1;
5654 fprli->resvd2 = 0;
5655 fprli->resvd3 = 0;
5656 fprli->obsolete_1 = 0;
5657 fprli->obsolete_2 = 0;
5658 fprli->data_overlay_allowed = 0;
5659 fprli->initiator_fn = 1;
5660 fprli->confirmed_compl_allowed = 1;
5661
5662 if (fc_ulp_is_name_present("ltct") == FC_SUCCESS) {
5663 fprli->target_fn = 1;
5664 } else {
5665 fprli->target_fn = 0;
5666 }
5667
5668 fprli->retry = 1;
5669 fprli->read_xfer_rdy_disabled = 1;
5670 fprli->write_xfer_rdy_disabled = 0;
5671
5672 /* save the unsol prli payload first */
5673 FCP_CP_OUT((uint8_t *)from, fpkt->pkt_resp,
5674 fpkt->pkt_resp_acc, sizeof (struct la_els_prli));
5675
5676 FCP_CP_OUT((uint8_t *)&prli, fpkt->pkt_cmd,
5677 fpkt->pkt_cmd_acc, sizeof (struct la_els_prli));
5678
5679 fcp_unsol_resp_init(fpkt, buf, R_CTL_ELS_RSP, FC_TYPE_EXTENDED_LS);
5680
5681 mutex_enter(&pptr->port_mutex);
5682 if (!FCP_LINK_STATE_CHANGED(pptr, icmd)) {
5683 int rval;
5684 mutex_exit(&pptr->port_mutex);
5685
5686 if ((rval = fc_ulp_issue_els(pptr->port_fp_handle, fpkt)) !=
5687 FC_SUCCESS) {
5688 if ((rval == FC_STATEC_BUSY || rval == FC_OFFLINE) &&
5689 ptgt != NULL) {
5690 fcp_queue_ipkt(pptr, fpkt);
5691 return (FC_SUCCESS);
5692 }
5693 /* Let it timeout */
5694 fcp_icmd_free(pptr, icmd);
5695 return (FC_FAILURE);
5696 }
5697 } else {
5698 mutex_exit(&pptr->port_mutex);
5699 fcp_icmd_free(pptr, icmd);
5700 return (FC_FAILURE);
5701 }
5702
5703 (void) fc_ulp_ubrelease(pptr->port_fp_handle, 1, &buf->ub_token);
5704
5705 return (FC_SUCCESS);
5706 }
5707
5708 /*
5709 * Function: fcp_icmd_alloc
5710 *
5711 * Description: This function allocated a fcp_ipkt structure. The pkt_comp
5712 * field is initialized to fcp_icmd_callback. Sometimes it is
5713 * modified by the caller (such as fcp_send_scsi). The
5714 * structure is also tied to the state of the line and of the
5715 * target at a particular time. That link is established by
5716 * setting the fields ipkt_link_cnt and ipkt_change_cnt to lcount
5717 * and tcount which came respectively from pptr->link_cnt and
5718 * ptgt->tgt_change_cnt.
5719 *
5720 * Argument: *pptr Fcp port.
5721 * *ptgt Target (destination of the command).
5722 * cmd_len Length of the command.
5723 * resp_len Length of the expected response.
5724 * data_len Length of the data.
5725 * nodma Indicates weither the command and response.
5726 * will be transfer through DMA or not.
5727 * lcount Link state change counter.
5728 * tcount Target state change counter.
5729 * cause Reason that lead to this call.
5730 *
5731 * Return Value: NULL Failed.
5732 * Not NULL Internal packet address.
5733 */
5734 static struct fcp_ipkt *
5735 fcp_icmd_alloc(struct fcp_port *pptr, struct fcp_tgt *ptgt, int cmd_len,
5736 int resp_len, int data_len, int nodma, int lcount, int tcount, int cause,
5737 uint32_t rscn_count)
5738 {
5739 int dma_setup = 0;
5740 fc_packet_t *fpkt;
5741 struct fcp_ipkt *icmd = NULL;
5742
5743 icmd = kmem_zalloc(sizeof (struct fcp_ipkt) +
5744 pptr->port_dmacookie_sz + pptr->port_priv_pkt_len,
5745 KM_NOSLEEP);
5746 if (icmd == NULL) {
5747 fcp_log(CE_WARN, pptr->port_dip,
5748 "!internal packet allocation failed");
5749 return (NULL);
5750 }
5751
5752 /*
5753 * initialize the allocated packet
5754 */
5755 icmd->ipkt_nodma = nodma;
5756 icmd->ipkt_next = icmd->ipkt_prev = NULL;
5757 icmd->ipkt_lun = NULL;
5758
5759 icmd->ipkt_link_cnt = lcount;
5760 icmd->ipkt_change_cnt = tcount;
5761 icmd->ipkt_cause = cause;
5762
5763 mutex_enter(&pptr->port_mutex);
5764 icmd->ipkt_port = pptr;
5765 mutex_exit(&pptr->port_mutex);
5766
5767 /* keep track of amt of data to be sent in pkt */
5768 icmd->ipkt_cmdlen = cmd_len;
5769 icmd->ipkt_resplen = resp_len;
5770 icmd->ipkt_datalen = data_len;
5771
5772 /* set up pkt's ptr to the fc_packet_t struct, just after the ipkt */
5773 icmd->ipkt_fpkt = (fc_packet_t *)(&icmd->ipkt_fc_packet);
5774
5775 /* set pkt's private ptr to point to cmd pkt */
5776 icmd->ipkt_fpkt->pkt_ulp_private = (opaque_t)icmd;
5777
5778 /* set FCA private ptr to memory just beyond */
5779 icmd->ipkt_fpkt->pkt_fca_private = (opaque_t)
5780 ((char *)icmd + sizeof (struct fcp_ipkt) +
5781 pptr->port_dmacookie_sz);
5782
5783 /* get ptr to fpkt substruct and fill it in */
5784 fpkt = icmd->ipkt_fpkt;
5785 fpkt->pkt_data_cookie = (ddi_dma_cookie_t *)((caddr_t)icmd +
5786 sizeof (struct fcp_ipkt));
5787
5788 if (ptgt != NULL) {
5789 icmd->ipkt_tgt = ptgt;
5790 fpkt->pkt_fca_device = ptgt->tgt_fca_dev;
5791 }
5792
5793 fpkt->pkt_comp = fcp_icmd_callback;
5794 fpkt->pkt_tran_flags = (FC_TRAN_CLASS3 | FC_TRAN_INTR);
5795 fpkt->pkt_cmdlen = cmd_len;
5796 fpkt->pkt_rsplen = resp_len;
5797 fpkt->pkt_datalen = data_len;
5798
5799 /*
5800 * The pkt_ulp_rscn_infop (aka pkt_ulp_rsvd1) field is used to pass the
5801 * rscn_count as fcp knows down to the transport. If a valid count was
5802 * passed into this function, we allocate memory to actually pass down
5803 * this info.
5804 *
5805 * BTW, if the kmem_zalloc fails, we won't try too hard. This will
5806 * basically mean that fcp will not be able to help transport
5807 * distinguish if a new RSCN has come after fcp was last informed about
5808 * it. In such cases, it might lead to the problem mentioned in CR/bug #
5809 * 5068068 where the device might end up going offline in case of RSCN
5810 * storms.
5811 */
5812 fpkt->pkt_ulp_rscn_infop = NULL;
5813 if (rscn_count != FC_INVALID_RSCN_COUNT) {
5814 fpkt->pkt_ulp_rscn_infop = kmem_zalloc(
5815 sizeof (fc_ulp_rscn_info_t), KM_NOSLEEP);
5816 if (fpkt->pkt_ulp_rscn_infop == NULL) {
5817 FCP_TRACE(fcp_logq, pptr->port_instbuf,
5818 fcp_trace, FCP_BUF_LEVEL_6, 0,
5819 "Failed to alloc memory to pass rscn info");
5820 }
5821 }
5822
5823 if (fpkt->pkt_ulp_rscn_infop != NULL) {
5824 fc_ulp_rscn_info_t *rscnp;
5825
5826 rscnp = (fc_ulp_rscn_info_t *)fpkt->pkt_ulp_rscn_infop;
5827 rscnp->ulp_rscn_count = rscn_count;
5828 }
5829
5830 if (fcp_alloc_dma(pptr, icmd, nodma, KM_NOSLEEP) != FC_SUCCESS) {
5831 goto fail;
5832 }
5833 dma_setup++;
5834
5835 /*
5836 * Must hold target mutex across setting of pkt_pd and call to
5837 * fc_ulp_init_packet to ensure the handle to the target doesn't go
5838 * away while we're not looking.
5839 */
5840 if (ptgt != NULL) {
5841 mutex_enter(&ptgt->tgt_mutex);
5842 fpkt->pkt_pd = ptgt->tgt_pd_handle;
5843
5844 /* ask transport to do its initialization on this pkt */
5845 if (fc_ulp_init_packet(pptr->port_fp_handle, fpkt, KM_NOSLEEP)
5846 != FC_SUCCESS) {
5847 FCP_TRACE(fcp_logq, pptr->port_instbuf,
5848 fcp_trace, FCP_BUF_LEVEL_6, 0,
5849 "fc_ulp_init_packet failed");
5850 mutex_exit(&ptgt->tgt_mutex);
5851 goto fail;
5852 }
5853 mutex_exit(&ptgt->tgt_mutex);
5854 } else {
5855 if (fc_ulp_init_packet(pptr->port_fp_handle, fpkt, KM_NOSLEEP)
5856 != FC_SUCCESS) {
5857 FCP_TRACE(fcp_logq, pptr->port_instbuf,
5858 fcp_trace, FCP_BUF_LEVEL_6, 0,
5859 "fc_ulp_init_packet failed");
5860 goto fail;
5861 }
5862 }
5863
5864 mutex_enter(&pptr->port_mutex);
5865 if (pptr->port_state & (FCP_STATE_DETACHING |
5866 FCP_STATE_SUSPENDED | FCP_STATE_POWER_DOWN)) {
5867 int rval;
5868
5869 mutex_exit(&pptr->port_mutex);
5870
5871 rval = fc_ulp_uninit_packet(pptr->port_fp_handle, fpkt);
5872 ASSERT(rval == FC_SUCCESS);
5873
5874 goto fail;
5875 }
5876
5877 if (ptgt != NULL) {
5878 mutex_enter(&ptgt->tgt_mutex);
5879 ptgt->tgt_ipkt_cnt++;
5880 mutex_exit(&ptgt->tgt_mutex);
5881 }
5882
5883 pptr->port_ipkt_cnt++;
5884
5885 mutex_exit(&pptr->port_mutex);
5886
5887 return (icmd);
5888
5889 fail:
5890 if (fpkt->pkt_ulp_rscn_infop != NULL) {
5891 kmem_free(fpkt->pkt_ulp_rscn_infop,
5892 sizeof (fc_ulp_rscn_info_t));
5893 fpkt->pkt_ulp_rscn_infop = NULL;
5894 }
5895
5896 if (dma_setup) {
5897 fcp_free_dma(pptr, icmd);
5898 }
5899 kmem_free(icmd, sizeof (struct fcp_ipkt) + pptr->port_priv_pkt_len +
5900 (size_t)pptr->port_dmacookie_sz);
5901
5902 return (NULL);
5903 }
5904
5905 /*
5906 * Function: fcp_icmd_free
5907 *
5908 * Description: Frees the internal command passed by the caller.
5909 *
5910 * Argument: *pptr Fcp port.
5911 * *icmd Internal packet to free.
5912 *
5913 * Return Value: None
5914 */
5915 static void
5916 fcp_icmd_free(struct fcp_port *pptr, struct fcp_ipkt *icmd)
5917 {
5918 struct fcp_tgt *ptgt = icmd->ipkt_tgt;
5919
5920 /* Let the underlying layers do their cleanup. */
5921 (void) fc_ulp_uninit_packet(pptr->port_fp_handle,
5922 icmd->ipkt_fpkt);
5923
5924 if (icmd->ipkt_fpkt->pkt_ulp_rscn_infop) {
5925 kmem_free(icmd->ipkt_fpkt->pkt_ulp_rscn_infop,
5926 sizeof (fc_ulp_rscn_info_t));
5927 }
5928
5929 fcp_free_dma(pptr, icmd);
5930
5931 kmem_free(icmd, sizeof (struct fcp_ipkt) + pptr->port_priv_pkt_len +
5932 (size_t)pptr->port_dmacookie_sz);
5933
5934 mutex_enter(&pptr->port_mutex);
5935
5936 if (ptgt) {
5937 mutex_enter(&ptgt->tgt_mutex);
5938 ptgt->tgt_ipkt_cnt--;
5939 mutex_exit(&ptgt->tgt_mutex);
5940 }
5941
5942 pptr->port_ipkt_cnt--;
5943 mutex_exit(&pptr->port_mutex);
5944 }
5945
5946 /*
5947 * Function: fcp_alloc_dma
5948 *
5949 * Description: Allocated the DMA resources required for the internal
5950 * packet.
5951 *
5952 * Argument: *pptr FCP port.
5953 * *icmd Internal FCP packet.
5954 * nodma Indicates if the Cmd and Resp will be DMAed.
5955 * flags Allocation flags (Sleep or NoSleep).
5956 *
5957 * Return Value: FC_SUCCESS
5958 * FC_NOMEM
5959 */
5960 static int
5961 fcp_alloc_dma(struct fcp_port *pptr, struct fcp_ipkt *icmd,
5962 int nodma, int flags)
5963 {
5964 int rval;
5965 size_t real_size;
5966 uint_t ccount;
5967 int bound = 0;
5968 int cmd_resp = 0;
5969 fc_packet_t *fpkt;
5970 ddi_dma_cookie_t pkt_data_cookie;
5971 ddi_dma_cookie_t *cp;
5972 uint32_t cnt;
5973
5974 fpkt = &icmd->ipkt_fc_packet;
5975
5976 ASSERT(fpkt->pkt_cmd_dma == NULL && fpkt->pkt_data_dma == NULL &&
5977 fpkt->pkt_resp_dma == NULL);
5978
5979 icmd->ipkt_nodma = nodma;
5980
5981 if (nodma) {
5982 fpkt->pkt_cmd = kmem_zalloc(fpkt->pkt_cmdlen, flags);
5983 if (fpkt->pkt_cmd == NULL) {
5984 goto fail;
5985 }
5986
5987 fpkt->pkt_resp = kmem_zalloc(fpkt->pkt_rsplen, flags);
5988 if (fpkt->pkt_resp == NULL) {
5989 goto fail;
5990 }
5991 } else {
5992 ASSERT(fpkt->pkt_cmdlen && fpkt->pkt_rsplen);
5993
5994 rval = fcp_alloc_cmd_resp(pptr, fpkt, flags);
5995 if (rval == FC_FAILURE) {
5996 ASSERT(fpkt->pkt_cmd_dma == NULL &&
5997 fpkt->pkt_resp_dma == NULL);
5998 goto fail;
5999 }
6000 cmd_resp++;
6001 }
6002
6003 if ((fpkt->pkt_datalen != 0) &&
6004 !(pptr->port_state & FCP_STATE_FCA_IS_NODMA)) {
6005 /*
6006 * set up DMA handle and memory for the data in this packet
6007 */
6008 if (ddi_dma_alloc_handle(pptr->port_dip,
6009 &pptr->port_data_dma_attr, DDI_DMA_DONTWAIT,
6010 NULL, &fpkt->pkt_data_dma) != DDI_SUCCESS) {
6011 goto fail;
6012 }
6013
6014 if (ddi_dma_mem_alloc(fpkt->pkt_data_dma, fpkt->pkt_datalen,
6015 &pptr->port_dma_acc_attr, DDI_DMA_CONSISTENT,
6016 DDI_DMA_DONTWAIT, NULL, &fpkt->pkt_data,
6017 &real_size, &fpkt->pkt_data_acc) != DDI_SUCCESS) {
6018 goto fail;
6019 }
6020
6021 /* was DMA mem size gotten < size asked for/needed ?? */
6022 if (real_size < fpkt->pkt_datalen) {
6023 goto fail;
6024 }
6025
6026 /* bind DMA address and handle together */
6027 if (ddi_dma_addr_bind_handle(fpkt->pkt_data_dma,
6028 NULL, fpkt->pkt_data, real_size, DDI_DMA_READ |
6029 DDI_DMA_CONSISTENT, DDI_DMA_DONTWAIT, NULL,
6030 &pkt_data_cookie, &ccount) != DDI_DMA_MAPPED) {
6031 goto fail;
6032 }
6033 bound++;
6034
6035 if (ccount > pptr->port_data_dma_attr.dma_attr_sgllen) {
6036 goto fail;
6037 }
6038
6039 fpkt->pkt_data_cookie_cnt = ccount;
6040
6041 cp = fpkt->pkt_data_cookie;
6042 *cp = pkt_data_cookie;
6043 cp++;
6044
6045 for (cnt = 1; cnt < ccount; cnt++, cp++) {
6046 ddi_dma_nextcookie(fpkt->pkt_data_dma,
6047 &pkt_data_cookie);
6048 *cp = pkt_data_cookie;
6049 }
6050
6051 } else if (fpkt->pkt_datalen != 0) {
6052 /*
6053 * If it's a pseudo FCA, then it can't support DMA even in
6054 * SCSI data phase.
6055 */
6056 fpkt->pkt_data = kmem_alloc(fpkt->pkt_datalen, flags);
6057 if (fpkt->pkt_data == NULL) {
6058 goto fail;
6059 }
6060
6061 }
6062
6063 return (FC_SUCCESS);
6064
6065 fail:
6066 if (bound) {
6067 (void) ddi_dma_unbind_handle(fpkt->pkt_data_dma);
6068 }
6069
6070 if (fpkt->pkt_data_dma) {
6071 if (fpkt->pkt_data) {
6072 ddi_dma_mem_free(&fpkt->pkt_data_acc);
6073 }
6074 ddi_dma_free_handle(&fpkt->pkt_data_dma);
6075 } else {
6076 if (fpkt->pkt_data) {
6077 kmem_free(fpkt->pkt_data, fpkt->pkt_datalen);
6078 }
6079 }
6080
6081 if (nodma) {
6082 if (fpkt->pkt_cmd) {
6083 kmem_free(fpkt->pkt_cmd, fpkt->pkt_cmdlen);
6084 }
6085 if (fpkt->pkt_resp) {
6086 kmem_free(fpkt->pkt_resp, fpkt->pkt_rsplen);
6087 }
6088 } else {
6089 if (cmd_resp) {
6090 fcp_free_cmd_resp(pptr, fpkt);
6091 }
6092 }
6093
6094 return (FC_NOMEM);
6095 }
6096
6097
6098 static void
6099 fcp_free_dma(struct fcp_port *pptr, struct fcp_ipkt *icmd)
6100 {
6101 fc_packet_t *fpkt = icmd->ipkt_fpkt;
6102
6103 if (fpkt->pkt_data_dma) {
6104 (void) ddi_dma_unbind_handle(fpkt->pkt_data_dma);
6105 if (fpkt->pkt_data) {
6106 ddi_dma_mem_free(&fpkt->pkt_data_acc);
6107 }
6108 ddi_dma_free_handle(&fpkt->pkt_data_dma);
6109 } else {
6110 if (fpkt->pkt_data) {
6111 kmem_free(fpkt->pkt_data, fpkt->pkt_datalen);
6112 }
6113 /*
6114 * Need we reset pkt_* to zero???
6115 */
6116 }
6117
6118 if (icmd->ipkt_nodma) {
6119 if (fpkt->pkt_cmd) {
6120 kmem_free(fpkt->pkt_cmd, icmd->ipkt_cmdlen);
6121 }
6122 if (fpkt->pkt_resp) {
6123 kmem_free(fpkt->pkt_resp, icmd->ipkt_resplen);
6124 }
6125 } else {
6126 ASSERT(fpkt->pkt_resp_dma != NULL && fpkt->pkt_cmd_dma != NULL);
6127
6128 fcp_free_cmd_resp(pptr, fpkt);
6129 }
6130 }
6131
6132 /*
6133 * Function: fcp_lookup_target
6134 *
6135 * Description: Finds a target given a WWN.
6136 *
6137 * Argument: *pptr FCP port.
6138 * *wwn World Wide Name of the device to look for.
6139 *
6140 * Return Value: NULL No target found
6141 * Not NULL Target structure
6142 *
6143 * Context: Interrupt context.
6144 * The mutex pptr->port_mutex must be owned.
6145 */
6146 /* ARGSUSED */
6147 static struct fcp_tgt *
6148 fcp_lookup_target(struct fcp_port *pptr, uchar_t *wwn)
6149 {
6150 int hash;
6151 struct fcp_tgt *ptgt;
6152
6153 ASSERT(mutex_owned(&pptr->port_mutex));
6154
6155 hash = FCP_HASH(wwn);
6156
6157 for (ptgt = pptr->port_tgt_hash_table[hash]; ptgt != NULL;
6158 ptgt = ptgt->tgt_next) {
6159 if (!(ptgt->tgt_state & FCP_TGT_ORPHAN) &&
6160 bcmp((caddr_t)wwn, (caddr_t)&ptgt->tgt_port_wwn.raw_wwn[0],
6161 sizeof (ptgt->tgt_port_wwn)) == 0) {
6162 break;
6163 }
6164 }
6165
6166 return (ptgt);
6167 }
6168
6169
6170 /*
6171 * Find target structure given a port identifier
6172 */
6173 static struct fcp_tgt *
6174 fcp_get_target_by_did(struct fcp_port *pptr, uint32_t d_id)
6175 {
6176 fc_portid_t port_id;
6177 la_wwn_t pwwn;
6178 struct fcp_tgt *ptgt = NULL;
6179
6180 port_id.priv_lilp_posit = 0;
6181 port_id.port_id = d_id;
6182 if (fc_ulp_get_pwwn_by_did(pptr->port_fp_handle, port_id,
6183 &pwwn) == FC_SUCCESS) {
6184 mutex_enter(&pptr->port_mutex);
6185 ptgt = fcp_lookup_target(pptr, pwwn.raw_wwn);
6186 mutex_exit(&pptr->port_mutex);
6187 }
6188
6189 return (ptgt);
6190 }
6191
6192
6193 /*
6194 * the packet completion callback routine for info cmd pkts
6195 *
6196 * this means fpkt pts to a response to either a PLOGI or a PRLI
6197 *
6198 * if there is an error an attempt is made to call a routine to resend
6199 * the command that failed
6200 */
6201 static void
6202 fcp_icmd_callback(fc_packet_t *fpkt)
6203 {
6204 struct fcp_ipkt *icmd;
6205 struct fcp_port *pptr;
6206 struct fcp_tgt *ptgt;
6207 struct la_els_prli *prli;
6208 struct la_els_prli prli_s;
6209 struct fcp_prli *fprli;
6210 struct fcp_lun *plun;
6211 int free_pkt = 1;
6212 int rval;
6213 ls_code_t resp;
6214 uchar_t prli_acc = 0;
6215 uint32_t rscn_count = FC_INVALID_RSCN_COUNT;
6216 int lun0_newalloc;
6217
6218 icmd = (struct fcp_ipkt *)fpkt->pkt_ulp_private;
6219
6220 /* get ptrs to the port and target structs for the cmd */
6221 pptr = icmd->ipkt_port;
6222 ptgt = icmd->ipkt_tgt;
6223
6224 FCP_CP_IN(fpkt->pkt_resp, &resp, fpkt->pkt_resp_acc, sizeof (resp));
6225
6226 if (icmd->ipkt_opcode == LA_ELS_PRLI) {
6227 FCP_CP_IN(fpkt->pkt_cmd, &prli_s, fpkt->pkt_cmd_acc,
6228 sizeof (prli_s));
6229 prli_acc = (prli_s.ls_code == LA_ELS_ACC);
6230 }
6231
6232 FCP_TRACE(fcp_logq, pptr->port_instbuf,
6233 fcp_trace, FCP_BUF_LEVEL_2, 0,
6234 "ELS (%x) callback state=0x%x reason=0x%x for %x",
6235 icmd->ipkt_opcode, fpkt->pkt_state, fpkt->pkt_reason,
6236 ptgt->tgt_d_id);
6237
6238 if ((fpkt->pkt_state == FC_PKT_SUCCESS) &&
6239 ((resp.ls_code == LA_ELS_ACC) || prli_acc)) {
6240
6241 mutex_enter(&ptgt->tgt_mutex);
6242 if (ptgt->tgt_pd_handle == NULL) {
6243 /*
6244 * in a fabric environment the port device handles
6245 * get created only after successful LOGIN into the
6246 * transport, so the transport makes this port
6247 * device (pd) handle available in this packet, so
6248 * save it now
6249 */
6250 ASSERT(fpkt->pkt_pd != NULL);
6251 ptgt->tgt_pd_handle = fpkt->pkt_pd;
6252 }
6253 mutex_exit(&ptgt->tgt_mutex);
6254
6255 /* which ELS cmd is this response for ?? */
6256 switch (icmd->ipkt_opcode) {
6257 case LA_ELS_PLOGI:
6258 FCP_TRACE(fcp_logq, pptr->port_instbuf,
6259 fcp_trace, FCP_BUF_LEVEL_5, 0,
6260 "PLOGI to d_id=0x%x succeeded, wwn=%08x%08x",
6261 ptgt->tgt_d_id,
6262 *((int *)&ptgt->tgt_port_wwn.raw_wwn[0]),
6263 *((int *)&ptgt->tgt_port_wwn.raw_wwn[4]));
6264
6265 FCP_TGT_TRACE(ptgt, icmd->ipkt_change_cnt,
6266 FCP_TGT_TRACE_15);
6267
6268 /* Note that we are not allocating a new icmd */
6269 if (fcp_send_els(pptr, ptgt, icmd, LA_ELS_PRLI,
6270 icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
6271 icmd->ipkt_cause) != DDI_SUCCESS) {
6272 FCP_TGT_TRACE(ptgt, icmd->ipkt_change_cnt,
6273 FCP_TGT_TRACE_16);
6274 goto fail;
6275 }
6276 break;
6277
6278 case LA_ELS_PRLI:
6279 FCP_TRACE(fcp_logq, pptr->port_instbuf,
6280 fcp_trace, FCP_BUF_LEVEL_5, 0,
6281 "PRLI to d_id=0x%x succeeded", ptgt->tgt_d_id);
6282
6283 FCP_TGT_TRACE(ptgt, icmd->ipkt_change_cnt,
6284 FCP_TGT_TRACE_17);
6285
6286 prli = &prli_s;
6287
6288 FCP_CP_IN(fpkt->pkt_resp, prli, fpkt->pkt_resp_acc,
6289 sizeof (prli_s));
6290
6291 fprli = (struct fcp_prli *)prli->service_params;
6292
6293 mutex_enter(&ptgt->tgt_mutex);
6294 ptgt->tgt_icap = fprli->initiator_fn;
6295 ptgt->tgt_tcap = fprli->target_fn;
6296 mutex_exit(&ptgt->tgt_mutex);
6297
6298 if ((fprli->type != 0x08) || (fprli->target_fn != 1)) {
6299 /*
6300 * this FCP device does not support target mode
6301 */
6302 FCP_TGT_TRACE(ptgt, icmd->ipkt_change_cnt,
6303 FCP_TGT_TRACE_18);
6304 goto fail;
6305 }
6306 if (fprli->retry == 1) {
6307 fc_ulp_disable_relogin(pptr->port_fp_handle,
6308 &ptgt->tgt_port_wwn);
6309 }
6310
6311 /* target is no longer offline */
6312 mutex_enter(&pptr->port_mutex);
6313 mutex_enter(&ptgt->tgt_mutex);
6314 if (!FCP_TGT_STATE_CHANGED(ptgt, icmd)) {
6315 ptgt->tgt_state &= ~(FCP_TGT_OFFLINE |
6316 FCP_TGT_MARK);
6317 } else {
6318 FCP_TRACE(fcp_logq, pptr->port_instbuf,
6319 fcp_trace, FCP_BUF_LEVEL_2, 0,
6320 "fcp_icmd_callback,1: state change "
6321 " occured for D_ID=0x%x", ptgt->tgt_d_id);
6322 mutex_exit(&ptgt->tgt_mutex);
6323 mutex_exit(&pptr->port_mutex);
6324 goto fail;
6325 }
6326 mutex_exit(&ptgt->tgt_mutex);
6327 mutex_exit(&pptr->port_mutex);
6328
6329 /*
6330 * lun 0 should always respond to inquiry, so
6331 * get the LUN struct for LUN 0
6332 *
6333 * Currently we deal with first level of addressing.
6334 * If / when we start supporting 0x device types
6335 * (DTYPE_ARRAY_CTRL, i.e. array controllers)
6336 * this logic will need revisiting.
6337 */
6338 lun0_newalloc = 0;
6339 if ((plun = fcp_get_lun(ptgt, 0)) == NULL) {
6340 /*
6341 * no LUN struct for LUN 0 yet exists,
6342 * so create one
6343 */
6344 plun = fcp_alloc_lun(ptgt);
6345 if (plun == NULL) {
6346 fcp_log(CE_WARN, pptr->port_dip,
6347 "!Failed to allocate lun 0 for"
6348 " D_ID=%x", ptgt->tgt_d_id);
6349 goto fail;
6350 }
6351 lun0_newalloc = 1;
6352 }
6353
6354 /* fill in LUN info */
6355 mutex_enter(&ptgt->tgt_mutex);
6356 /*
6357 * consider lun 0 as device not connected if it is
6358 * offlined or newly allocated
6359 */
6360 if ((plun->lun_state & FCP_LUN_OFFLINE) ||
6361 lun0_newalloc) {
6362 plun->lun_state |= FCP_LUN_DEVICE_NOT_CONNECTED;
6363 }
6364 plun->lun_state |= (FCP_LUN_BUSY | FCP_LUN_MARK);
6365 plun->lun_state &= ~FCP_LUN_OFFLINE;
6366 ptgt->tgt_lun_cnt = 1;
6367 ptgt->tgt_report_lun_cnt = 0;
6368 mutex_exit(&ptgt->tgt_mutex);
6369
6370 /* Retrieve the rscn count (if a valid one exists) */
6371 if (icmd->ipkt_fpkt->pkt_ulp_rscn_infop != NULL) {
6372 rscn_count = ((fc_ulp_rscn_info_t *)
6373 (icmd->ipkt_fpkt->pkt_ulp_rscn_infop))
6374 ->ulp_rscn_count;
6375 } else {
6376 rscn_count = FC_INVALID_RSCN_COUNT;
6377 }
6378
6379 /* send Report Lun request to target */
6380 if (fcp_send_scsi(plun, SCMD_REPORT_LUN,
6381 sizeof (struct fcp_reportlun_resp),
6382 icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
6383 icmd->ipkt_cause, rscn_count) != DDI_SUCCESS) {
6384 mutex_enter(&pptr->port_mutex);
6385 if (!FCP_TGT_STATE_CHANGED(ptgt, icmd)) {
6386 fcp_log(CE_WARN, pptr->port_dip,
6387 "!Failed to send REPORT LUN to"
6388 " D_ID=%x", ptgt->tgt_d_id);
6389 } else {
6390 FCP_TRACE(fcp_logq,
6391 pptr->port_instbuf, fcp_trace,
6392 FCP_BUF_LEVEL_5, 0,
6393 "fcp_icmd_callback,2:state change"
6394 " occured for D_ID=0x%x",
6395 ptgt->tgt_d_id);
6396 }
6397 mutex_exit(&pptr->port_mutex);
6398
6399 FCP_TGT_TRACE(ptgt, icmd->ipkt_change_cnt,
6400 FCP_TGT_TRACE_19);
6401
6402 goto fail;
6403 } else {
6404 free_pkt = 0;
6405 fcp_icmd_free(pptr, icmd);
6406 }
6407 break;
6408
6409 default:
6410 fcp_log(CE_WARN, pptr->port_dip,
6411 "!fcp_icmd_callback Invalid opcode");
6412 goto fail;
6413 }
6414
6415 return;
6416 }
6417
6418
6419 /*
6420 * Other PLOGI failures are not retried as the
6421 * transport does it already
6422 */
6423 if (icmd->ipkt_opcode != LA_ELS_PLOGI) {
6424 if (fcp_is_retryable(icmd) &&
6425 icmd->ipkt_retries++ < FCP_MAX_RETRIES) {
6426
6427 if (FCP_MUST_RETRY(fpkt)) {
6428 fcp_queue_ipkt(pptr, fpkt);
6429 return;
6430 }
6431
6432 FCP_TRACE(fcp_logq, pptr->port_instbuf,
6433 fcp_trace, FCP_BUF_LEVEL_2, 0,
6434 "ELS PRLI is retried for d_id=0x%x, state=%x,"
6435 " reason= %x", ptgt->tgt_d_id, fpkt->pkt_state,
6436 fpkt->pkt_reason);
6437
6438 /*
6439 * Retry by recalling the routine that
6440 * originally queued this packet
6441 */
6442 mutex_enter(&pptr->port_mutex);
6443 if (!FCP_TGT_STATE_CHANGED(ptgt, icmd)) {
6444 caddr_t msg;
6445
6446 mutex_exit(&pptr->port_mutex);
6447
6448 ASSERT(icmd->ipkt_opcode != LA_ELS_PLOGI);
6449
6450 if (fpkt->pkt_state == FC_PKT_TIMEOUT) {
6451 fpkt->pkt_timeout +=
6452 FCP_TIMEOUT_DELTA;
6453 }
6454
6455 rval = fc_ulp_issue_els(pptr->port_fp_handle,
6456 fpkt);
6457 if (rval == FC_SUCCESS) {
6458 return;
6459 }
6460
6461 if (rval == FC_STATEC_BUSY ||
6462 rval == FC_OFFLINE) {
6463 fcp_queue_ipkt(pptr, fpkt);
6464 return;
6465 }
6466 (void) fc_ulp_error(rval, &msg);
6467
6468 fcp_log(CE_NOTE, pptr->port_dip,
6469 "!ELS 0x%x failed to d_id=0x%x;"
6470 " %s", icmd->ipkt_opcode,
6471 ptgt->tgt_d_id, msg);
6472 } else {
6473 FCP_TRACE(fcp_logq, pptr->port_instbuf,
6474 fcp_trace, FCP_BUF_LEVEL_2, 0,
6475 "fcp_icmd_callback,3: state change "
6476 " occured for D_ID=0x%x", ptgt->tgt_d_id);
6477 mutex_exit(&pptr->port_mutex);
6478 }
6479 }
6480 } else {
6481 if (fcp_is_retryable(icmd) &&
6482 icmd->ipkt_retries++ < FCP_MAX_RETRIES) {
6483 if (FCP_MUST_RETRY(fpkt)) {
6484 fcp_queue_ipkt(pptr, fpkt);
6485 return;
6486 }
6487 }
6488 mutex_enter(&pptr->port_mutex);
6489 if (!FCP_TGT_STATE_CHANGED(ptgt, icmd) &&
6490 fpkt->pkt_state != FC_PKT_PORT_OFFLINE) {
6491 mutex_exit(&pptr->port_mutex);
6492 fcp_print_error(fpkt);
6493 } else {
6494 FCP_TRACE(fcp_logq, pptr->port_instbuf,
6495 fcp_trace, FCP_BUF_LEVEL_2, 0,
6496 "fcp_icmd_callback,4: state change occured"
6497 " for D_ID=0x%x", ptgt->tgt_d_id);
6498 mutex_exit(&pptr->port_mutex);
6499 }
6500 }
6501
6502 fail:
6503 if (free_pkt) {
6504 (void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
6505 icmd->ipkt_change_cnt, icmd->ipkt_cause);
6506 fcp_icmd_free(pptr, icmd);
6507 }
6508 }
6509
6510
6511 /*
6512 * called internally to send an info cmd using the transport
6513 *
6514 * sends either an INQ or a REPORT_LUN
6515 *
6516 * when the packet is completed fcp_scsi_callback is called
6517 */
6518 static int
6519 fcp_send_scsi(struct fcp_lun *plun, uchar_t opcode, int alloc_len,
6520 int lcount, int tcount, int cause, uint32_t rscn_count)
6521 {
6522 int nodma;
6523 struct fcp_ipkt *icmd;
6524 struct fcp_tgt *ptgt;
6525 struct fcp_port *pptr;
6526 fc_frame_hdr_t *hp;
6527 fc_packet_t *fpkt;
6528 struct fcp_cmd fcp_cmd;
6529 struct fcp_cmd *fcmd;
6530 union scsi_cdb *scsi_cdb;
6531
6532 ASSERT(plun != NULL);
6533
6534 ptgt = plun->lun_tgt;
6535 ASSERT(ptgt != NULL);
6536
6537 pptr = ptgt->tgt_port;
6538 ASSERT(pptr != NULL);
6539
6540 FCP_TRACE(fcp_logq, pptr->port_instbuf,
6541 fcp_trace, FCP_BUF_LEVEL_5, 0,
6542 "fcp_send_scsi: d_id=0x%x opcode=0x%x", ptgt->tgt_d_id, opcode);
6543
6544 nodma = (pptr->port_fcp_dma == FC_NO_DVMA_SPACE) ? 1 : 0;
6545 icmd = fcp_icmd_alloc(pptr, ptgt, sizeof (struct fcp_cmd),
6546 FCP_MAX_RSP_IU_SIZE, alloc_len, nodma, lcount, tcount, cause,
6547 rscn_count);
6548
6549 if (icmd == NULL) {
6550 return (DDI_FAILURE);
6551 }
6552
6553 fpkt = icmd->ipkt_fpkt;
6554 fpkt->pkt_tran_flags = FC_TRAN_CLASS3 | FC_TRAN_INTR;
6555 icmd->ipkt_retries = 0;
6556 icmd->ipkt_opcode = opcode;
6557 icmd->ipkt_lun = plun;
6558
6559 if (nodma) {
6560 fcmd = (struct fcp_cmd *)fpkt->pkt_cmd;
6561 } else {
6562 fcmd = &fcp_cmd;
6563 }
6564 bzero(fcmd, sizeof (struct fcp_cmd));
6565
6566 fpkt->pkt_timeout = FCP_SCSI_CMD_TIMEOUT;
6567
6568 hp = &fpkt->pkt_cmd_fhdr;
6569
6570 hp->s_id = pptr->port_id;
6571 hp->d_id = ptgt->tgt_d_id;
6572 hp->r_ctl = R_CTL_COMMAND;
6573 hp->type = FC_TYPE_SCSI_FCP;
6574 hp->f_ctl = F_CTL_SEQ_INITIATIVE | F_CTL_FIRST_SEQ;
6575 hp->rsvd = 0;
6576 hp->seq_id = 0;
6577 hp->seq_cnt = 0;
6578 hp->ox_id = 0xffff;
6579 hp->rx_id = 0xffff;
6580 hp->ro = 0;
6581
6582 bcopy(&(plun->lun_addr), &(fcmd->fcp_ent_addr), FCP_LUN_SIZE);
6583
6584 /*
6585 * Request SCSI target for expedited processing
6586 */
6587
6588 /*
6589 * Set up for untagged queuing because we do not
6590 * know if the fibre device supports queuing.
6591 */
6592 fcmd->fcp_cntl.cntl_reserved_0 = 0;
6593 fcmd->fcp_cntl.cntl_reserved_1 = 0;
6594 fcmd->fcp_cntl.cntl_reserved_2 = 0;
6595 fcmd->fcp_cntl.cntl_reserved_3 = 0;
6596 fcmd->fcp_cntl.cntl_reserved_4 = 0;
6597 fcmd->fcp_cntl.cntl_qtype = FCP_QTYPE_UNTAGGED;
6598 scsi_cdb = (union scsi_cdb *)fcmd->fcp_cdb;
6599
6600 switch (opcode) {
6601 case SCMD_INQUIRY_PAGE83:
6602 /*
6603 * Prepare to get the Inquiry VPD page 83 information
6604 */
6605 fcmd->fcp_cntl.cntl_read_data = 1;
6606 fcmd->fcp_cntl.cntl_write_data = 0;
6607 fcmd->fcp_data_len = alloc_len;
6608
6609 fpkt->pkt_tran_type = FC_PKT_FCP_READ;
6610 fpkt->pkt_comp = fcp_scsi_callback;
6611
6612 scsi_cdb->scc_cmd = SCMD_INQUIRY;
6613 scsi_cdb->g0_addr2 = 0x01;
6614 scsi_cdb->g0_addr1 = 0x83;
6615 scsi_cdb->g0_count0 = (uchar_t)alloc_len;
6616 break;
6617
6618 case SCMD_INQUIRY:
6619 fcmd->fcp_cntl.cntl_read_data = 1;
6620 fcmd->fcp_cntl.cntl_write_data = 0;
6621 fcmd->fcp_data_len = alloc_len;
6622
6623 fpkt->pkt_tran_type = FC_PKT_FCP_READ;
6624 fpkt->pkt_comp = fcp_scsi_callback;
6625
6626 scsi_cdb->scc_cmd = SCMD_INQUIRY;
6627 scsi_cdb->g0_count0 = SUN_INQSIZE;
6628 break;
6629
6630 case SCMD_REPORT_LUN: {
6631 fc_portid_t d_id;
6632 opaque_t fca_dev;
6633
6634 ASSERT(alloc_len >= 16);
6635
6636 d_id.priv_lilp_posit = 0;
6637 d_id.port_id = ptgt->tgt_d_id;
6638
6639 fca_dev = fc_ulp_get_fca_device(pptr->port_fp_handle, d_id);
6640
6641 mutex_enter(&ptgt->tgt_mutex);
6642 ptgt->tgt_fca_dev = fca_dev;
6643 mutex_exit(&ptgt->tgt_mutex);
6644
6645 fcmd->fcp_cntl.cntl_read_data = 1;
6646 fcmd->fcp_cntl.cntl_write_data = 0;
6647 fcmd->fcp_data_len = alloc_len;
6648
6649 fpkt->pkt_tran_type = FC_PKT_FCP_READ;
6650 fpkt->pkt_comp = fcp_scsi_callback;
6651
6652 scsi_cdb->scc_cmd = SCMD_REPORT_LUN;
6653 scsi_cdb->scc5_count0 = alloc_len & 0xff;
6654 scsi_cdb->scc5_count1 = (alloc_len >> 8) & 0xff;
6655 scsi_cdb->scc5_count2 = (alloc_len >> 16) & 0xff;
6656 scsi_cdb->scc5_count3 = (alloc_len >> 24) & 0xff;
6657 break;
6658 }
6659
6660 default:
6661 fcp_log(CE_WARN, pptr->port_dip,
6662 "!fcp_send_scsi Invalid opcode");
6663 break;
6664 }
6665
6666 if (!nodma) {
6667 FCP_CP_OUT((uint8_t *)fcmd, fpkt->pkt_cmd,
6668 fpkt->pkt_cmd_acc, sizeof (struct fcp_cmd));
6669 }
6670
6671 mutex_enter(&pptr->port_mutex);
6672 if (!FCP_TGT_STATE_CHANGED(ptgt, icmd)) {
6673
6674 mutex_exit(&pptr->port_mutex);
6675 if (fcp_transport(pptr->port_fp_handle, fpkt, 1) !=
6676 FC_SUCCESS) {
6677 fcp_icmd_free(pptr, icmd);
6678 return (DDI_FAILURE);
6679 }
6680 return (DDI_SUCCESS);
6681 } else {
6682 FCP_TRACE(fcp_logq, pptr->port_instbuf,
6683 fcp_trace, FCP_BUF_LEVEL_2, 0,
6684 "fcp_send_scsi,1: state change occured"
6685 " for D_ID=0x%x", ptgt->tgt_d_id);
6686 mutex_exit(&pptr->port_mutex);
6687 fcp_icmd_free(pptr, icmd);
6688 return (DDI_FAILURE);
6689 }
6690 }
6691
6692
6693 /*
6694 * called by fcp_scsi_callback to check to handle the case where
6695 * REPORT_LUN returns ILLEGAL REQUEST or a UNIT ATTENTION
6696 */
6697 static int
6698 fcp_check_reportlun(struct fcp_rsp *rsp, fc_packet_t *fpkt)
6699 {
6700 uchar_t rqlen;
6701 int rval = DDI_FAILURE;
6702 struct scsi_extended_sense sense_info, *sense;
6703 struct fcp_ipkt *icmd = (struct fcp_ipkt *)
6704 fpkt->pkt_ulp_private;
6705 struct fcp_tgt *ptgt = icmd->ipkt_tgt;
6706 struct fcp_port *pptr = ptgt->tgt_port;
6707
6708 ASSERT(icmd->ipkt_opcode == SCMD_REPORT_LUN);
6709
6710 if (rsp->fcp_u.fcp_status.scsi_status == STATUS_RESERVATION_CONFLICT) {
6711 /*
6712 * SCSI-II Reserve Release support. Some older FC drives return
6713 * Reservation conflict for Report Luns command.
6714 */
6715 if (icmd->ipkt_nodma) {
6716 rsp->fcp_u.fcp_status.rsp_len_set = 0;
6717 rsp->fcp_u.fcp_status.sense_len_set = 0;
6718 rsp->fcp_u.fcp_status.scsi_status = STATUS_GOOD;
6719 } else {
6720 fcp_rsp_t new_resp;
6721
6722 FCP_CP_IN(fpkt->pkt_resp, &new_resp,
6723 fpkt->pkt_resp_acc, sizeof (new_resp));
6724
6725 new_resp.fcp_u.fcp_status.rsp_len_set = 0;
6726 new_resp.fcp_u.fcp_status.sense_len_set = 0;
6727 new_resp.fcp_u.fcp_status.scsi_status = STATUS_GOOD;
6728
6729 FCP_CP_OUT(&new_resp, fpkt->pkt_resp,
6730 fpkt->pkt_resp_acc, sizeof (new_resp));
6731 }
6732
6733 FCP_CP_OUT(fcp_dummy_lun, fpkt->pkt_data,
6734 fpkt->pkt_data_acc, sizeof (fcp_dummy_lun));
6735
6736 return (DDI_SUCCESS);
6737 }
6738
6739 sense = &sense_info;
6740 if (!rsp->fcp_u.fcp_status.sense_len_set) {
6741 /* no need to continue if sense length is not set */
6742 return (rval);
6743 }
6744
6745 /* casting 64-bit integer to 8-bit */
6746 rqlen = (uchar_t)min(rsp->fcp_sense_len,
6747 sizeof (struct scsi_extended_sense));
6748
6749 if (rqlen < 14) {
6750 /* no need to continue if request length isn't long enough */
6751 return (rval);
6752 }
6753
6754 if (icmd->ipkt_nodma) {
6755 /*
6756 * We can safely use fcp_response_len here since the
6757 * only path that calls fcp_check_reportlun,
6758 * fcp_scsi_callback, has already called
6759 * fcp_validate_fcp_response.
6760 */
6761 sense = (struct scsi_extended_sense *)(fpkt->pkt_resp +
6762 sizeof (struct fcp_rsp) + rsp->fcp_response_len);
6763 } else {
6764 FCP_CP_IN(fpkt->pkt_resp + sizeof (struct fcp_rsp) +
6765 rsp->fcp_response_len, sense, fpkt->pkt_resp_acc,
6766 sizeof (struct scsi_extended_sense));
6767 }
6768
6769 if (!FCP_SENSE_NO_LUN(sense)) {
6770 mutex_enter(&ptgt->tgt_mutex);
6771 /* clear the flag if any */
6772 ptgt->tgt_state &= ~FCP_TGT_ILLREQ;
6773 mutex_exit(&ptgt->tgt_mutex);
6774 }
6775
6776 if ((sense->es_key == KEY_ILLEGAL_REQUEST) &&
6777 (sense->es_add_code == 0x20)) {
6778 if (icmd->ipkt_nodma) {
6779 rsp->fcp_u.fcp_status.rsp_len_set = 0;
6780 rsp->fcp_u.fcp_status.sense_len_set = 0;
6781 rsp->fcp_u.fcp_status.scsi_status = STATUS_GOOD;
6782 } else {
6783 fcp_rsp_t new_resp;
6784
6785 FCP_CP_IN(fpkt->pkt_resp, &new_resp,
6786 fpkt->pkt_resp_acc, sizeof (new_resp));
6787
6788 new_resp.fcp_u.fcp_status.rsp_len_set = 0;
6789 new_resp.fcp_u.fcp_status.sense_len_set = 0;
6790 new_resp.fcp_u.fcp_status.scsi_status = STATUS_GOOD;
6791
6792 FCP_CP_OUT(&new_resp, fpkt->pkt_resp,
6793 fpkt->pkt_resp_acc, sizeof (new_resp));
6794 }
6795
6796 FCP_CP_OUT(fcp_dummy_lun, fpkt->pkt_data,
6797 fpkt->pkt_data_acc, sizeof (fcp_dummy_lun));
6798
6799 return (DDI_SUCCESS);
6800 }
6801
6802 /*
6803 * This is for the STK library which returns a check condition,
6804 * to indicate device is not ready, manual assistance needed.
6805 * This is to a report lun command when the door is open.
6806 */
6807 if ((sense->es_key == KEY_NOT_READY) && (sense->es_add_code == 0x04)) {
6808 if (icmd->ipkt_nodma) {
6809 rsp->fcp_u.fcp_status.rsp_len_set = 0;
6810 rsp->fcp_u.fcp_status.sense_len_set = 0;
6811 rsp->fcp_u.fcp_status.scsi_status = STATUS_GOOD;
6812 } else {
6813 fcp_rsp_t new_resp;
6814
6815 FCP_CP_IN(fpkt->pkt_resp, &new_resp,
6816 fpkt->pkt_resp_acc, sizeof (new_resp));
6817
6818 new_resp.fcp_u.fcp_status.rsp_len_set = 0;
6819 new_resp.fcp_u.fcp_status.sense_len_set = 0;
6820 new_resp.fcp_u.fcp_status.scsi_status = STATUS_GOOD;
6821
6822 FCP_CP_OUT(&new_resp, fpkt->pkt_resp,
6823 fpkt->pkt_resp_acc, sizeof (new_resp));
6824 }
6825
6826 FCP_CP_OUT(fcp_dummy_lun, fpkt->pkt_data,
6827 fpkt->pkt_data_acc, sizeof (fcp_dummy_lun));
6828
6829 return (DDI_SUCCESS);
6830 }
6831
6832 if ((FCP_SENSE_REPORTLUN_CHANGED(sense)) ||
6833 (FCP_SENSE_NO_LUN(sense))) {
6834 mutex_enter(&ptgt->tgt_mutex);
6835 if ((FCP_SENSE_NO_LUN(sense)) &&
6836 (ptgt->tgt_state & FCP_TGT_ILLREQ)) {
6837 ptgt->tgt_state &= ~FCP_TGT_ILLREQ;
6838 mutex_exit(&ptgt->tgt_mutex);
6839 /*
6840 * reconfig was triggred by ILLEGAL REQUEST but
6841 * got ILLEGAL REQUEST again
6842 */
6843 FCP_TRACE(fcp_logq, pptr->port_instbuf,
6844 fcp_trace, FCP_BUF_LEVEL_3, 0,
6845 "!FCP: Unable to obtain Report Lun data"
6846 " target=%x", ptgt->tgt_d_id);
6847 } else {
6848 if (ptgt->tgt_tid == NULL) {
6849 timeout_id_t tid;
6850 /*
6851 * REPORT LUN data has changed. Kick off
6852 * rediscovery
6853 */
6854 tid = timeout(fcp_reconfigure_luns,
6855 (caddr_t)ptgt, (clock_t)drv_usectohz(1));
6856
6857 ptgt->tgt_tid = tid;
6858 ptgt->tgt_state |= FCP_TGT_BUSY;
6859 }
6860 if (FCP_SENSE_NO_LUN(sense)) {
6861 ptgt->tgt_state |= FCP_TGT_ILLREQ;
6862 }
6863 mutex_exit(&ptgt->tgt_mutex);
6864 if (FCP_SENSE_REPORTLUN_CHANGED(sense)) {
6865 FCP_TRACE(fcp_logq, pptr->port_instbuf,
6866 fcp_trace, FCP_BUF_LEVEL_3, 0,
6867 "!FCP:Report Lun Has Changed"
6868 " target=%x", ptgt->tgt_d_id);
6869 } else if (FCP_SENSE_NO_LUN(sense)) {
6870 FCP_TRACE(fcp_logq, pptr->port_instbuf,
6871 fcp_trace, FCP_BUF_LEVEL_3, 0,
6872 "!FCP:LU Not Supported"
6873 " target=%x", ptgt->tgt_d_id);
6874 }
6875 }
6876 rval = DDI_SUCCESS;
6877 }
6878
6879 FCP_TRACE(fcp_logq, pptr->port_instbuf,
6880 fcp_trace, FCP_BUF_LEVEL_5, 0,
6881 "D_ID=%x, sense=%x, status=%x",
6882 fpkt->pkt_cmd_fhdr.d_id, sense->es_key,
6883 rsp->fcp_u.fcp_status.scsi_status);
6884
6885 return (rval);
6886 }
6887
6888 /*
6889 * Function: fcp_scsi_callback
6890 *
6891 * Description: This is the callback routine set by fcp_send_scsi() after
6892 * it calls fcp_icmd_alloc(). The SCSI command completed here
6893 * and autogenerated by FCP are: REPORT_LUN, INQUIRY and
6894 * INQUIRY_PAGE83.
6895 *
6896 * Argument: *fpkt FC packet used to convey the command
6897 *
6898 * Return Value: None
6899 */
6900 static void
6901 fcp_scsi_callback(fc_packet_t *fpkt)
6902 {
6903 struct fcp_ipkt *icmd = (struct fcp_ipkt *)
6904 fpkt->pkt_ulp_private;
6905 struct fcp_rsp_info fcp_rsp_err, *bep;
6906 struct fcp_port *pptr;
6907 struct fcp_tgt *ptgt;
6908 struct fcp_lun *plun;
6909 struct fcp_rsp response, *rsp;
6910
6911 ptgt = icmd->ipkt_tgt;
6912 pptr = ptgt->tgt_port;
6913 plun = icmd->ipkt_lun;
6914
6915 if (icmd->ipkt_nodma) {
6916 rsp = (struct fcp_rsp *)fpkt->pkt_resp;
6917 } else {
6918 rsp = &response;
6919 FCP_CP_IN(fpkt->pkt_resp, rsp, fpkt->pkt_resp_acc,
6920 sizeof (struct fcp_rsp));
6921 }
6922
6923 FCP_TRACE(fcp_logq, pptr->port_instbuf,
6924 fcp_trace, FCP_BUF_LEVEL_2, 0,
6925 "SCSI callback state=0x%x for %x, op_code=0x%x, "
6926 "status=%x, lun num=%x",
6927 fpkt->pkt_state, ptgt->tgt_d_id, icmd->ipkt_opcode,
6928 rsp->fcp_u.fcp_status.scsi_status, plun->lun_num);
6929
6930 /*
6931 * Pre-init LUN GUID with NWWN if it is not a device that
6932 * supports multiple luns and we know it's not page83
6933 * compliant. Although using a NWWN is not lun unique,
6934 * we will be fine since there is only one lun behind the taget
6935 * in this case.
6936 */
6937 if ((plun->lun_guid_size == 0) &&
6938 (icmd->ipkt_opcode == SCMD_INQUIRY_PAGE83) &&
6939 (fcp_symmetric_device_probe(plun) == 0)) {
6940
6941 char ascii_wwn[FC_WWN_SIZE*2+1];
6942 fcp_wwn_to_ascii(&ptgt->tgt_node_wwn.raw_wwn[0], ascii_wwn);
6943 (void) fcp_copy_guid_2_lun_block(plun, ascii_wwn);
6944 }
6945
6946 /*
6947 * Some old FC tapes and FC <-> SCSI bridge devices return overrun
6948 * when thay have more data than what is asked in CDB. An overrun
6949 * is really when FCP_DL is smaller than the data length in CDB.
6950 * In the case here we know that REPORT LUN command we formed within
6951 * this binary has correct FCP_DL. So this OVERRUN is due to bad device
6952 * behavior. In reality this is FC_SUCCESS.
6953 */
6954 if ((fpkt->pkt_state != FC_PKT_SUCCESS) &&
6955 (fpkt->pkt_reason == FC_REASON_OVERRUN) &&
6956 (icmd->ipkt_opcode == SCMD_REPORT_LUN)) {
6957 fpkt->pkt_state = FC_PKT_SUCCESS;
6958 }
6959
6960 if (fpkt->pkt_state != FC_PKT_SUCCESS) {
6961 FCP_TRACE(fcp_logq, pptr->port_instbuf,
6962 fcp_trace, FCP_BUF_LEVEL_2, 0,
6963 "icmd failed with state=0x%x for %x", fpkt->pkt_state,
6964 ptgt->tgt_d_id);
6965
6966 if (fpkt->pkt_reason == FC_REASON_CRC_ERROR) {
6967 /*
6968 * Inquiry VPD page command on A5K SES devices would
6969 * result in data CRC errors.
6970 */
6971 if (icmd->ipkt_opcode == SCMD_INQUIRY_PAGE83) {
6972 (void) fcp_handle_page83(fpkt, icmd, 1);
6973 return;
6974 }
6975 }
6976 if (fpkt->pkt_state == FC_PKT_TIMEOUT ||
6977 FCP_MUST_RETRY(fpkt)) {
6978 fpkt->pkt_timeout += FCP_TIMEOUT_DELTA;
6979 fcp_retry_scsi_cmd(fpkt);
6980 return;
6981 }
6982
6983 FCP_TGT_TRACE(ptgt, icmd->ipkt_change_cnt,
6984 FCP_TGT_TRACE_20);
6985
6986 mutex_enter(&pptr->port_mutex);
6987 mutex_enter(&ptgt->tgt_mutex);
6988 if (!FCP_STATE_CHANGED(pptr, ptgt, icmd)) {
6989 mutex_exit(&ptgt->tgt_mutex);
6990 mutex_exit(&pptr->port_mutex);
6991 fcp_print_error(fpkt);
6992 } else {
6993 FCP_TRACE(fcp_logq, pptr->port_instbuf,
6994 fcp_trace, FCP_BUF_LEVEL_2, 0,
6995 "fcp_scsi_callback,1: state change occured"
6996 " for D_ID=0x%x", ptgt->tgt_d_id);
6997 mutex_exit(&ptgt->tgt_mutex);
6998 mutex_exit(&pptr->port_mutex);
6999 }
7000 (void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
7001 icmd->ipkt_change_cnt, icmd->ipkt_cause);
7002 fcp_icmd_free(pptr, icmd);
7003 return;
7004 }
7005
7006 FCP_TGT_TRACE(ptgt, icmd->ipkt_change_cnt, FCP_TGT_TRACE_21);
7007
7008 mutex_enter(&pptr->port_mutex);
7009 mutex_enter(&ptgt->tgt_mutex);
7010 if (FCP_STATE_CHANGED(pptr, ptgt, icmd)) {
7011 FCP_TRACE(fcp_logq, pptr->port_instbuf,
7012 fcp_trace, FCP_BUF_LEVEL_2, 0,
7013 "fcp_scsi_callback,2: state change occured"
7014 " for D_ID=0x%x", ptgt->tgt_d_id);
7015 mutex_exit(&ptgt->tgt_mutex);
7016 mutex_exit(&pptr->port_mutex);
7017 (void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
7018 icmd->ipkt_change_cnt, icmd->ipkt_cause);
7019 fcp_icmd_free(pptr, icmd);
7020 return;
7021 }
7022 ASSERT((ptgt->tgt_state & FCP_TGT_MARK) == 0);
7023
7024 mutex_exit(&ptgt->tgt_mutex);
7025 mutex_exit(&pptr->port_mutex);
7026
7027 if (icmd->ipkt_nodma) {
7028 bep = (struct fcp_rsp_info *)(fpkt->pkt_resp +
7029 sizeof (struct fcp_rsp));
7030 } else {
7031 bep = &fcp_rsp_err;
7032 FCP_CP_IN(fpkt->pkt_resp + sizeof (struct fcp_rsp), bep,
7033 fpkt->pkt_resp_acc, sizeof (struct fcp_rsp_info));
7034 }
7035
7036 if (fcp_validate_fcp_response(rsp, pptr) != FC_SUCCESS) {
7037 fcp_retry_scsi_cmd(fpkt);
7038 return;
7039 }
7040
7041 if (rsp->fcp_u.fcp_status.rsp_len_set && bep->rsp_code !=
7042 FCP_NO_FAILURE) {
7043 FCP_TRACE(fcp_logq, pptr->port_instbuf,
7044 fcp_trace, FCP_BUF_LEVEL_2, 0,
7045 "rsp_code=0x%x, rsp_len_set=0x%x",
7046 bep->rsp_code, rsp->fcp_u.fcp_status.rsp_len_set);
7047 fcp_retry_scsi_cmd(fpkt);
7048 return;
7049 }
7050
7051 if (rsp->fcp_u.fcp_status.scsi_status == STATUS_QFULL ||
7052 rsp->fcp_u.fcp_status.scsi_status == STATUS_BUSY) {
7053 fcp_queue_ipkt(pptr, fpkt);
7054 return;
7055 }
7056
7057 /*
7058 * Devices that do not support INQUIRY_PAGE83, return check condition
7059 * with illegal request as per SCSI spec.
7060 * Crossbridge is one such device and Daktari's SES node is another.
7061 * We want to ideally enumerate these devices as a non-mpxio devices.
7062 * SES nodes (Daktari only currently) are an exception to this.
7063 */
7064 if ((icmd->ipkt_opcode == SCMD_INQUIRY_PAGE83) &&
7065 (rsp->fcp_u.fcp_status.scsi_status & STATUS_CHECK)) {
7066
7067 FCP_TRACE(fcp_logq, pptr->port_instbuf,
7068 fcp_trace, FCP_BUF_LEVEL_3, 0,
7069 "INQUIRY_PAGE83 for d_id %x (dtype:0x%x) failed with "
7070 "check condition. May enumerate as non-mpxio device",
7071 ptgt->tgt_d_id, plun->lun_type);
7072
7073 /*
7074 * If we let Daktari's SES be enumerated as a non-mpxio
7075 * device, there will be a discrepency in that the other
7076 * internal FC disks will get enumerated as mpxio devices.
7077 * Applications like luxadm expect this to be consistent.
7078 *
7079 * So, we put in a hack here to check if this is an SES device
7080 * and handle it here.
7081 */
7082 if (plun->lun_type == DTYPE_ESI) {
7083 /*
7084 * Since, pkt_state is actually FC_PKT_SUCCESS
7085 * at this stage, we fake a failure here so that
7086 * fcp_handle_page83 will create a device path using
7087 * the WWN instead of the GUID which is not there anyway
7088 */
7089 fpkt->pkt_state = FC_PKT_LOCAL_RJT;
7090 (void) fcp_handle_page83(fpkt, icmd, 1);
7091 return;
7092 }
7093
7094 mutex_enter(&ptgt->tgt_mutex);
7095 plun->lun_state &= ~(FCP_LUN_OFFLINE |
7096 FCP_LUN_MARK | FCP_LUN_BUSY);
7097 mutex_exit(&ptgt->tgt_mutex);
7098
7099 (void) fcp_call_finish_init(pptr, ptgt,
7100 icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
7101 icmd->ipkt_cause);
7102 fcp_icmd_free(pptr, icmd);
7103 return;
7104 }
7105
7106 if (rsp->fcp_u.fcp_status.scsi_status != STATUS_GOOD) {
7107 int rval = DDI_FAILURE;
7108
7109 /*
7110 * handle cases where report lun isn't supported
7111 * by faking up our own REPORT_LUN response or
7112 * UNIT ATTENTION
7113 */
7114 if (icmd->ipkt_opcode == SCMD_REPORT_LUN) {
7115 rval = fcp_check_reportlun(rsp, fpkt);
7116
7117 /*
7118 * fcp_check_reportlun might have modified the
7119 * FCP response. Copy it in again to get an updated
7120 * FCP response
7121 */
7122 if (rval == DDI_SUCCESS && icmd->ipkt_nodma == 0) {
7123 rsp = &response;
7124
7125 FCP_CP_IN(fpkt->pkt_resp, rsp,
7126 fpkt->pkt_resp_acc,
7127 sizeof (struct fcp_rsp));
7128 }
7129 }
7130
7131 if (rsp->fcp_u.fcp_status.scsi_status != STATUS_GOOD) {
7132 if (rval == DDI_SUCCESS) {
7133 (void) fcp_call_finish_init(pptr, ptgt,
7134 icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
7135 icmd->ipkt_cause);
7136 fcp_icmd_free(pptr, icmd);
7137 } else {
7138 fcp_retry_scsi_cmd(fpkt);
7139 }
7140
7141 return;
7142 }
7143 } else {
7144 if (icmd->ipkt_opcode == SCMD_REPORT_LUN) {
7145 mutex_enter(&ptgt->tgt_mutex);
7146 ptgt->tgt_state &= ~FCP_TGT_ILLREQ;
7147 mutex_exit(&ptgt->tgt_mutex);
7148 }
7149 }
7150
7151 ASSERT(rsp->fcp_u.fcp_status.scsi_status == STATUS_GOOD);
7152 if (!(pptr->port_state & FCP_STATE_FCA_IS_NODMA)) {
7153 (void) ddi_dma_sync(fpkt->pkt_data_dma, 0, 0,
7154 DDI_DMA_SYNC_FORCPU);
7155 }
7156
7157 switch (icmd->ipkt_opcode) {
7158 case SCMD_INQUIRY:
7159 FCP_LUN_TRACE(plun, FCP_LUN_TRACE_1);
7160 fcp_handle_inquiry(fpkt, icmd);
7161 break;
7162
7163 case SCMD_REPORT_LUN:
7164 FCP_TGT_TRACE(ptgt, icmd->ipkt_change_cnt,
7165 FCP_TGT_TRACE_22);
7166 fcp_handle_reportlun(fpkt, icmd);
7167 break;
7168
7169 case SCMD_INQUIRY_PAGE83:
7170 FCP_LUN_TRACE(plun, FCP_LUN_TRACE_2);
7171 (void) fcp_handle_page83(fpkt, icmd, 0);
7172 break;
7173
7174 default:
7175 fcp_log(CE_WARN, NULL, "!Invalid SCSI opcode");
7176 (void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
7177 icmd->ipkt_change_cnt, icmd->ipkt_cause);
7178 fcp_icmd_free(pptr, icmd);
7179 break;
7180 }
7181 }
7182
7183
7184 static void
7185 fcp_retry_scsi_cmd(fc_packet_t *fpkt)
7186 {
7187 struct fcp_ipkt *icmd = (struct fcp_ipkt *)
7188 fpkt->pkt_ulp_private;
7189 struct fcp_tgt *ptgt = icmd->ipkt_tgt;
7190 struct fcp_port *pptr = ptgt->tgt_port;
7191
7192 if (icmd->ipkt_retries < FCP_MAX_RETRIES &&
7193 fcp_is_retryable(icmd)) {
7194 mutex_enter(&pptr->port_mutex);
7195 if (!FCP_TGT_STATE_CHANGED(ptgt, icmd)) {
7196 mutex_exit(&pptr->port_mutex);
7197 FCP_TRACE(fcp_logq, pptr->port_instbuf,
7198 fcp_trace, FCP_BUF_LEVEL_3, 0,
7199 "Retrying %s to %x; state=%x, reason=%x",
7200 (icmd->ipkt_opcode == SCMD_REPORT_LUN) ?
7201 "Report LUN" : "INQUIRY", ptgt->tgt_d_id,
7202 fpkt->pkt_state, fpkt->pkt_reason);
7203
7204 fcp_queue_ipkt(pptr, fpkt);
7205 } else {
7206 FCP_TRACE(fcp_logq, pptr->port_instbuf,
7207 fcp_trace, FCP_BUF_LEVEL_3, 0,
7208 "fcp_retry_scsi_cmd,1: state change occured"
7209 " for D_ID=0x%x", ptgt->tgt_d_id);
7210 mutex_exit(&pptr->port_mutex);
7211 (void) fcp_call_finish_init(pptr, ptgt,
7212 icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
7213 icmd->ipkt_cause);
7214 fcp_icmd_free(pptr, icmd);
7215 }
7216 } else {
7217 fcp_print_error(fpkt);
7218 (void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
7219 icmd->ipkt_change_cnt, icmd->ipkt_cause);
7220 fcp_icmd_free(pptr, icmd);
7221 }
7222 }
7223
7224 /*
7225 * Function: fcp_handle_page83
7226 *
7227 * Description: Treats the response to INQUIRY_PAGE83.
7228 *
7229 * Argument: *fpkt FC packet used to convey the command.
7230 * *icmd Original fcp_ipkt structure.
7231 * ignore_page83_data
7232 * if it's 1, that means it's a special devices's
7233 * page83 response, it should be enumerated under mpxio
7234 *
7235 * Return Value: None
7236 */
7237 static void
7238 fcp_handle_page83(fc_packet_t *fpkt, struct fcp_ipkt *icmd,
7239 int ignore_page83_data)
7240 {
7241 struct fcp_port *pptr;
7242 struct fcp_lun *plun;
7243 struct fcp_tgt *ptgt;
7244 uchar_t dev_id_page[SCMD_MAX_INQUIRY_PAGE83_SIZE];
7245 int fail = 0;
7246 ddi_devid_t devid;
7247 char *guid = NULL;
7248 int ret;
7249
7250 ASSERT(icmd != NULL && fpkt != NULL);
7251
7252 pptr = icmd->ipkt_port;
7253 ptgt = icmd->ipkt_tgt;
7254 plun = icmd->ipkt_lun;
7255
7256 if (fpkt->pkt_state == FC_PKT_SUCCESS) {
7257 FCP_LUN_TRACE(plun, FCP_LUN_TRACE_7);
7258
7259 FCP_CP_IN(fpkt->pkt_data, dev_id_page, fpkt->pkt_data_acc,
7260 SCMD_MAX_INQUIRY_PAGE83_SIZE);
7261
7262 FCP_TRACE(fcp_logq, pptr->port_instbuf,
7263 fcp_trace, FCP_BUF_LEVEL_5, 0,
7264 "fcp_handle_page83: port=%d, tgt D_ID=0x%x, "
7265 "dtype=0x%x, lun num=%x",
7266 pptr->port_instance, ptgt->tgt_d_id,
7267 dev_id_page[0], plun->lun_num);
7268
7269 ret = ddi_devid_scsi_encode(
7270 DEVID_SCSI_ENCODE_VERSION_LATEST,
7271 NULL, /* driver name */
7272 (unsigned char *) &plun->lun_inq, /* standard inquiry */
7273 sizeof (plun->lun_inq), /* size of standard inquiry */
7274 NULL, /* page 80 data */
7275 0, /* page 80 len */
7276 dev_id_page, /* page 83 data */
7277 SCMD_MAX_INQUIRY_PAGE83_SIZE, /* page 83 data len */
7278 &devid);
7279
7280 if (ret == DDI_SUCCESS) {
7281
7282 guid = ddi_devid_to_guid(devid);
7283
7284 if (guid) {
7285 /*
7286 * Check our current guid. If it's non null
7287 * and it has changed, we need to copy it into
7288 * lun_old_guid since we might still need it.
7289 */
7290 if (plun->lun_guid &&
7291 strcmp(guid, plun->lun_guid)) {
7292 unsigned int len;
7293
7294 /*
7295 * If the guid of the LUN changes,
7296 * reconfiguration should be triggered
7297 * to reflect the changes.
7298 * i.e. we should offline the LUN with
7299 * the old guid, and online the LUN with
7300 * the new guid.
7301 */
7302 plun->lun_state |= FCP_LUN_CHANGED;
7303
7304 if (plun->lun_old_guid) {
7305 kmem_free(plun->lun_old_guid,
7306 plun->lun_old_guid_size);
7307 }
7308
7309 len = plun->lun_guid_size;
7310 plun->lun_old_guid_size = len;
7311
7312 plun->lun_old_guid = kmem_zalloc(len,
7313 KM_NOSLEEP);
7314
7315 if (plun->lun_old_guid) {
7316 /*
7317 * The alloc was successful then
7318 * let's do the copy.
7319 */
7320 bcopy(plun->lun_guid,
7321 plun->lun_old_guid, len);
7322 } else {
7323 fail = 1;
7324 plun->lun_old_guid_size = 0;
7325 }
7326 }
7327 if (!fail) {
7328 if (fcp_copy_guid_2_lun_block(
7329 plun, guid)) {
7330 fail = 1;
7331 }
7332 }
7333 ddi_devid_free_guid(guid);
7334
7335 } else {
7336 FCP_TRACE(fcp_logq, pptr->port_instbuf,
7337 fcp_trace, FCP_BUF_LEVEL_2, 0,
7338 "fcp_handle_page83: unable to create "
7339 "GUID");
7340
7341 /* couldn't create good guid from devid */
7342 fail = 1;
7343 }
7344 ddi_devid_free(devid);
7345
7346 } else if (ret == DDI_NOT_WELL_FORMED) {
7347 /* NULL filled data for page 83 */
7348 FCP_TRACE(fcp_logq, pptr->port_instbuf,
7349 fcp_trace, FCP_BUF_LEVEL_2, 0,
7350 "fcp_handle_page83: retry GUID");
7351
7352 icmd->ipkt_retries = 0;
7353 fcp_retry_scsi_cmd(fpkt);
7354 return;
7355 } else {
7356 FCP_TRACE(fcp_logq, pptr->port_instbuf,
7357 fcp_trace, FCP_BUF_LEVEL_2, 0,
7358 "fcp_handle_page83: bad ddi_devid_scsi_encode %x",
7359 ret);
7360 /*
7361 * Since the page83 validation
7362 * introduced late, we are being
7363 * tolerant to the existing devices
7364 * that already found to be working
7365 * under mpxio, like A5200's SES device,
7366 * its page83 response will not be standard-compliant,
7367 * but we still want it to be enumerated under mpxio.
7368 */
7369 if (fcp_symmetric_device_probe(plun) != 0) {
7370 fail = 1;
7371 }
7372 }
7373
7374 } else {
7375 /* bad packet state */
7376 FCP_LUN_TRACE(plun, FCP_LUN_TRACE_8);
7377
7378 /*
7379 * For some special devices (A5K SES and Daktari's SES devices),
7380 * they should be enumerated under mpxio
7381 * or "luxadm dis" will fail
7382 */
7383 if (ignore_page83_data) {
7384 fail = 0;
7385 } else {
7386 fail = 1;
7387 }
7388 FCP_TRACE(fcp_logq, pptr->port_instbuf,
7389 fcp_trace, FCP_BUF_LEVEL_2, 0,
7390 "!Devid page cmd failed. "
7391 "fpkt_state: %x fpkt_reason: %x",
7392 "ignore_page83: %d",
7393 fpkt->pkt_state, fpkt->pkt_reason,
7394 ignore_page83_data);
7395 }
7396
7397 mutex_enter(&pptr->port_mutex);
7398 mutex_enter(&plun->lun_mutex);
7399 /*
7400 * If lun_cip is not NULL, then we needn't update lun_mpxio to avoid
7401 * mismatch between lun_cip and lun_mpxio.
7402 */
7403 if (plun->lun_cip == NULL) {
7404 /*
7405 * If we don't have a guid for this lun it's because we were
7406 * unable to glean one from the page 83 response. Set the
7407 * control flag to 0 here to make sure that we don't attempt to
7408 * enumerate it under mpxio.
7409 */
7410 if (fail || pptr->port_mpxio == 0) {
7411 plun->lun_mpxio = 0;
7412 } else {
7413 plun->lun_mpxio = 1;
7414 }
7415 }
7416 mutex_exit(&plun->lun_mutex);
7417 mutex_exit(&pptr->port_mutex);
7418
7419 mutex_enter(&ptgt->tgt_mutex);
7420 plun->lun_state &=
7421 ~(FCP_LUN_OFFLINE | FCP_LUN_MARK | FCP_LUN_BUSY);
7422 mutex_exit(&ptgt->tgt_mutex);
7423
7424 (void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
7425 icmd->ipkt_change_cnt, icmd->ipkt_cause);
7426
7427 fcp_icmd_free(pptr, icmd);
7428 }
7429
7430 /*
7431 * Function: fcp_handle_inquiry
7432 *
7433 * Description: Called by fcp_scsi_callback to handle the response to an
7434 * INQUIRY request.
7435 *
7436 * Argument: *fpkt FC packet used to convey the command.
7437 * *icmd Original fcp_ipkt structure.
7438 *
7439 * Return Value: None
7440 */
7441 static void
7442 fcp_handle_inquiry(fc_packet_t *fpkt, struct fcp_ipkt *icmd)
7443 {
7444 struct fcp_port *pptr;
7445 struct fcp_lun *plun;
7446 struct fcp_tgt *ptgt;
7447 uchar_t dtype;
7448 uchar_t pqual;
7449 uint32_t rscn_count = FC_INVALID_RSCN_COUNT;
7450
7451 ASSERT(icmd != NULL && fpkt != NULL);
7452
7453 pptr = icmd->ipkt_port;
7454 ptgt = icmd->ipkt_tgt;
7455 plun = icmd->ipkt_lun;
7456
7457 FCP_CP_IN(fpkt->pkt_data, &plun->lun_inq, fpkt->pkt_data_acc,
7458 sizeof (struct scsi_inquiry));
7459
7460 dtype = plun->lun_inq.inq_dtype & DTYPE_MASK;
7461 pqual = plun->lun_inq.inq_dtype >> 5;
7462
7463 FCP_TRACE(fcp_logq, pptr->port_instbuf,
7464 fcp_trace, FCP_BUF_LEVEL_5, 0,
7465 "fcp_handle_inquiry: port=%d, tgt D_ID=0x%x, lun=0x%x, "
7466 "dtype=0x%x pqual: 0x%x", pptr->port_instance, ptgt->tgt_d_id,
7467 plun->lun_num, dtype, pqual);
7468
7469 if (pqual != 0) {
7470 /*
7471 * Non-zero peripheral qualifier
7472 */
7473 fcp_log(CE_CONT, pptr->port_dip,
7474 "!Target 0x%x lun 0x%x: Nonzero peripheral qualifier: "
7475 "Device type=0x%x Peripheral qual=0x%x\n",
7476 ptgt->tgt_d_id, plun->lun_num, dtype, pqual);
7477
7478 FCP_TRACE(fcp_logq, pptr->port_instbuf,
7479 fcp_trace, FCP_BUF_LEVEL_5, 0,
7480 "!Target 0x%x lun 0x%x: Nonzero peripheral qualifier: "
7481 "Device type=0x%x Peripheral qual=0x%x\n",
7482 ptgt->tgt_d_id, plun->lun_num, dtype, pqual);
7483
7484 FCP_LUN_TRACE(plun, FCP_LUN_TRACE_3);
7485
7486 (void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
7487 icmd->ipkt_change_cnt, icmd->ipkt_cause);
7488 fcp_icmd_free(pptr, icmd);
7489 return;
7490 }
7491
7492 /*
7493 * If the device is already initialized, check the dtype
7494 * for a change. If it has changed then update the flags
7495 * so the create_luns will offline the old device and
7496 * create the new device. Refer to bug: 4764752
7497 */
7498 if ((plun->lun_state & FCP_LUN_INIT) && dtype != plun->lun_type) {
7499 plun->lun_state |= FCP_LUN_CHANGED;
7500 }
7501 plun->lun_type = plun->lun_inq.inq_dtype;
7502
7503 /*
7504 * This code is setting/initializing the throttling in the FCA
7505 * driver.
7506 */
7507 mutex_enter(&pptr->port_mutex);
7508 if (!pptr->port_notify) {
7509 if (bcmp(plun->lun_inq.inq_pid, pid, strlen(pid)) == 0) {
7510 uint32_t cmd = 0;
7511 cmd = ((cmd & 0xFF | FC_NOTIFY_THROTTLE) |
7512 ((cmd & 0xFFFFFF00 >> 8) |
7513 FCP_SVE_THROTTLE << 8));
7514 pptr->port_notify = 1;
7515 mutex_exit(&pptr->port_mutex);
7516 (void) fc_ulp_port_notify(pptr->port_fp_handle, cmd);
7517 mutex_enter(&pptr->port_mutex);
7518 }
7519 }
7520
7521 if (FCP_TGT_STATE_CHANGED(ptgt, icmd)) {
7522 FCP_TRACE(fcp_logq, pptr->port_instbuf,
7523 fcp_trace, FCP_BUF_LEVEL_2, 0,
7524 "fcp_handle_inquiry,1:state change occured"
7525 " for D_ID=0x%x", ptgt->tgt_d_id);
7526 mutex_exit(&pptr->port_mutex);
7527
7528 FCP_LUN_TRACE(plun, FCP_LUN_TRACE_5);
7529 (void) fcp_call_finish_init(pptr, ptgt,
7530 icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
7531 icmd->ipkt_cause);
7532 fcp_icmd_free(pptr, icmd);
7533 return;
7534 }
7535 ASSERT((ptgt->tgt_state & FCP_TGT_MARK) == 0);
7536 mutex_exit(&pptr->port_mutex);
7537
7538 /* Retrieve the rscn count (if a valid one exists) */
7539 if (icmd->ipkt_fpkt->pkt_ulp_rscn_infop != NULL) {
7540 rscn_count = ((fc_ulp_rscn_info_t *)
7541 (icmd->ipkt_fpkt->pkt_ulp_rscn_infop))->ulp_rscn_count;
7542 } else {
7543 rscn_count = FC_INVALID_RSCN_COUNT;
7544 }
7545
7546 if (fcp_send_scsi(plun, SCMD_INQUIRY_PAGE83,
7547 SCMD_MAX_INQUIRY_PAGE83_SIZE,
7548 icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
7549 icmd->ipkt_cause, rscn_count) != DDI_SUCCESS) {
7550 fcp_log(CE_WARN, NULL, "!failed to send page 83");
7551 FCP_LUN_TRACE(plun, FCP_LUN_TRACE_6);
7552 (void) fcp_call_finish_init(pptr, ptgt,
7553 icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
7554 icmd->ipkt_cause);
7555 }
7556
7557 /*
7558 * Read Inquiry VPD Page 0x83 to uniquely
7559 * identify this logical unit.
7560 */
7561 fcp_icmd_free(pptr, icmd);
7562 }
7563
7564 /*
7565 * Function: fcp_handle_reportlun
7566 *
7567 * Description: Called by fcp_scsi_callback to handle the response to a
7568 * REPORT_LUN request.
7569 *
7570 * Argument: *fpkt FC packet used to convey the command.
7571 * *icmd Original fcp_ipkt structure.
7572 *
7573 * Return Value: None
7574 */
7575 static void
7576 fcp_handle_reportlun(fc_packet_t *fpkt, struct fcp_ipkt *icmd)
7577 {
7578 int i;
7579 int nluns_claimed;
7580 int nluns_bufmax;
7581 int len;
7582 uint16_t lun_num;
7583 uint32_t rscn_count = FC_INVALID_RSCN_COUNT;
7584 struct fcp_port *pptr;
7585 struct fcp_tgt *ptgt;
7586 struct fcp_lun *plun;
7587 struct fcp_reportlun_resp *report_lun;
7588
7589 pptr = icmd->ipkt_port;
7590 ptgt = icmd->ipkt_tgt;
7591 len = fpkt->pkt_datalen;
7592
7593 if ((len < FCP_LUN_HEADER) ||
7594 ((report_lun = kmem_zalloc(len, KM_NOSLEEP)) == NULL)) {
7595 (void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
7596 icmd->ipkt_change_cnt, icmd->ipkt_cause);
7597 fcp_icmd_free(pptr, icmd);
7598 return;
7599 }
7600
7601 FCP_CP_IN(fpkt->pkt_data, report_lun, fpkt->pkt_data_acc,
7602 fpkt->pkt_datalen);
7603
7604 FCP_TRACE(fcp_logq, pptr->port_instbuf,
7605 fcp_trace, FCP_BUF_LEVEL_5, 0,
7606 "fcp_handle_reportlun: port=%d, tgt D_ID=0x%x",
7607 pptr->port_instance, ptgt->tgt_d_id);
7608
7609 /*
7610 * Get the number of luns (which is supplied as LUNS * 8) the
7611 * device claims it has.
7612 */
7613 nluns_claimed = BE_32(report_lun->num_lun) >> 3;
7614
7615 /*
7616 * Get the maximum number of luns the buffer submitted can hold.
7617 */
7618 nluns_bufmax = (fpkt->pkt_datalen - FCP_LUN_HEADER) / FCP_LUN_SIZE;
7619
7620 /*
7621 * Due to limitations of certain hardware, we support only 16 bit LUNs
7622 */
7623 if (nluns_claimed > FCP_MAX_LUNS_SUPPORTED) {
7624 kmem_free(report_lun, len);
7625
7626 fcp_log(CE_NOTE, pptr->port_dip, "!Can not support"
7627 " 0x%x number of LUNs for target=%x", nluns_claimed,
7628 ptgt->tgt_d_id);
7629
7630 (void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
7631 icmd->ipkt_change_cnt, icmd->ipkt_cause);
7632 fcp_icmd_free(pptr, icmd);
7633 return;
7634 }
7635
7636 /*
7637 * If there are more LUNs than we have allocated memory for,
7638 * allocate more space and send down yet another report lun if
7639 * the maximum number of attempts hasn't been reached.
7640 */
7641 mutex_enter(&ptgt->tgt_mutex);
7642
7643 if ((nluns_claimed > nluns_bufmax) &&
7644 (ptgt->tgt_report_lun_cnt < FCP_MAX_REPORTLUNS_ATTEMPTS)) {
7645
7646 struct fcp_lun *plun;
7647
7648 ptgt->tgt_report_lun_cnt++;
7649 plun = ptgt->tgt_lun;
7650 ASSERT(plun != NULL);
7651 mutex_exit(&ptgt->tgt_mutex);
7652
7653 kmem_free(report_lun, len);
7654
7655 FCP_TRACE(fcp_logq, pptr->port_instbuf,
7656 fcp_trace, FCP_BUF_LEVEL_5, 0,
7657 "!Dynamically discovered %d LUNs for D_ID=%x",
7658 nluns_claimed, ptgt->tgt_d_id);
7659
7660 /* Retrieve the rscn count (if a valid one exists) */
7661 if (icmd->ipkt_fpkt->pkt_ulp_rscn_infop != NULL) {
7662 rscn_count = ((fc_ulp_rscn_info_t *)
7663 (icmd->ipkt_fpkt->pkt_ulp_rscn_infop))->
7664 ulp_rscn_count;
7665 } else {
7666 rscn_count = FC_INVALID_RSCN_COUNT;
7667 }
7668
7669 if (fcp_send_scsi(icmd->ipkt_lun, SCMD_REPORT_LUN,
7670 FCP_LUN_HEADER + (nluns_claimed * FCP_LUN_SIZE),
7671 icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
7672 icmd->ipkt_cause, rscn_count) != DDI_SUCCESS) {
7673 (void) fcp_call_finish_init(pptr, ptgt,
7674 icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
7675 icmd->ipkt_cause);
7676 }
7677
7678 fcp_icmd_free(pptr, icmd);
7679 return;
7680 }
7681
7682 if (nluns_claimed > nluns_bufmax) {
7683 FCP_TRACE(fcp_logq, pptr->port_instbuf,
7684 fcp_trace, FCP_BUF_LEVEL_5, 0,
7685 "Target=%x:%x:%x:%x:%x:%x:%x:%x"
7686 " Number of LUNs lost=%x",
7687 ptgt->tgt_port_wwn.raw_wwn[0],
7688 ptgt->tgt_port_wwn.raw_wwn[1],
7689 ptgt->tgt_port_wwn.raw_wwn[2],
7690 ptgt->tgt_port_wwn.raw_wwn[3],
7691 ptgt->tgt_port_wwn.raw_wwn[4],
7692 ptgt->tgt_port_wwn.raw_wwn[5],
7693 ptgt->tgt_port_wwn.raw_wwn[6],
7694 ptgt->tgt_port_wwn.raw_wwn[7],
7695 nluns_claimed - nluns_bufmax);
7696
7697 nluns_claimed = nluns_bufmax;
7698 }
7699 ptgt->tgt_lun_cnt = nluns_claimed;
7700
7701 /*
7702 * Identify missing LUNs and print warning messages
7703 */
7704 for (plun = ptgt->tgt_lun; plun; plun = plun->lun_next) {
7705 int offline;
7706 int exists = 0;
7707
7708 offline = (plun->lun_state & FCP_LUN_OFFLINE) ? 1 : 0;
7709
7710 for (i = 0; i < nluns_claimed && exists == 0; i++) {
7711 uchar_t *lun_string;
7712
7713 lun_string = (uchar_t *)&(report_lun->lun_string[i]);
7714
7715 switch (lun_string[0] & 0xC0) {
7716 case FCP_LUN_ADDRESSING:
7717 case FCP_PD_ADDRESSING:
7718 case FCP_VOLUME_ADDRESSING:
7719 lun_num = ((lun_string[0] & 0x3F) << 8) |
7720 lun_string[1];
7721 if (plun->lun_num == lun_num) {
7722 exists++;
7723 break;
7724 }
7725 break;
7726
7727 default:
7728 break;
7729 }
7730 }
7731
7732 if (!exists && !offline) {
7733 mutex_exit(&ptgt->tgt_mutex);
7734
7735 mutex_enter(&pptr->port_mutex);
7736 mutex_enter(&ptgt->tgt_mutex);
7737 if (!FCP_STATE_CHANGED(pptr, ptgt, icmd)) {
7738 /*
7739 * set disappear flag when device was connected
7740 */
7741 if (!(plun->lun_state &
7742 FCP_LUN_DEVICE_NOT_CONNECTED)) {
7743 plun->lun_state |= FCP_LUN_DISAPPEARED;
7744 }
7745 mutex_exit(&ptgt->tgt_mutex);
7746 mutex_exit(&pptr->port_mutex);
7747 if (!(plun->lun_state &
7748 FCP_LUN_DEVICE_NOT_CONNECTED)) {
7749 fcp_log(CE_NOTE, pptr->port_dip,
7750 "!Lun=%x for target=%x disappeared",
7751 plun->lun_num, ptgt->tgt_d_id);
7752 }
7753 mutex_enter(&ptgt->tgt_mutex);
7754 } else {
7755 FCP_TRACE(fcp_logq, pptr->port_instbuf,
7756 fcp_trace, FCP_BUF_LEVEL_5, 0,
7757 "fcp_handle_reportlun,1: state change"
7758 " occured for D_ID=0x%x", ptgt->tgt_d_id);
7759 mutex_exit(&ptgt->tgt_mutex);
7760 mutex_exit(&pptr->port_mutex);
7761 kmem_free(report_lun, len);
7762 (void) fcp_call_finish_init(pptr, ptgt,
7763 icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
7764 icmd->ipkt_cause);
7765 fcp_icmd_free(pptr, icmd);
7766 return;
7767 }
7768 } else if (exists) {
7769 /*
7770 * clear FCP_LUN_DEVICE_NOT_CONNECTED when lun 0
7771 * actually exists in REPORT_LUN response
7772 */
7773 if (plun->lun_state & FCP_LUN_DEVICE_NOT_CONNECTED) {
7774 plun->lun_state &=
7775 ~FCP_LUN_DEVICE_NOT_CONNECTED;
7776 }
7777 if (offline || plun->lun_num == 0) {
7778 if (plun->lun_state & FCP_LUN_DISAPPEARED) {
7779 plun->lun_state &= ~FCP_LUN_DISAPPEARED;
7780 mutex_exit(&ptgt->tgt_mutex);
7781 fcp_log(CE_NOTE, pptr->port_dip,
7782 "!Lun=%x for target=%x reappeared",
7783 plun->lun_num, ptgt->tgt_d_id);
7784 mutex_enter(&ptgt->tgt_mutex);
7785 }
7786 }
7787 }
7788 }
7789
7790 ptgt->tgt_tmp_cnt = nluns_claimed ? nluns_claimed : 1;
7791 mutex_exit(&ptgt->tgt_mutex);
7792
7793 FCP_TRACE(fcp_logq, pptr->port_instbuf,
7794 fcp_trace, FCP_BUF_LEVEL_5, 0,
7795 "fcp_handle_reportlun: port=%d, tgt D_ID=0x%x, %d LUN(s)",
7796 pptr->port_instance, ptgt->tgt_d_id, nluns_claimed);
7797
7798 /* scan each lun */
7799 for (i = 0; i < nluns_claimed; i++) {
7800 uchar_t *lun_string;
7801
7802 lun_string = (uchar_t *)&(report_lun->lun_string[i]);
7803
7804 FCP_TRACE(fcp_logq, pptr->port_instbuf,
7805 fcp_trace, FCP_BUF_LEVEL_5, 0,
7806 "handle_reportlun: d_id=%x, LUN ind=%d, LUN=%d,"
7807 " addr=0x%x", ptgt->tgt_d_id, i, lun_string[1],
7808 lun_string[0]);
7809
7810 switch (lun_string[0] & 0xC0) {
7811 case FCP_LUN_ADDRESSING:
7812 case FCP_PD_ADDRESSING:
7813 case FCP_VOLUME_ADDRESSING:
7814 lun_num = ((lun_string[0] & 0x3F) << 8) | lun_string[1];
7815
7816 /* We will skip masked LUNs because of the blacklist. */
7817 if (fcp_lun_blacklist != NULL) {
7818 mutex_enter(&ptgt->tgt_mutex);
7819 if (fcp_should_mask(&ptgt->tgt_port_wwn,
7820 lun_num) == TRUE) {
7821 ptgt->tgt_lun_cnt--;
7822 mutex_exit(&ptgt->tgt_mutex);
7823 break;
7824 }
7825 mutex_exit(&ptgt->tgt_mutex);
7826 }
7827
7828 /* see if this LUN is already allocated */
7829 if ((plun = fcp_get_lun(ptgt, lun_num)) == NULL) {
7830 plun = fcp_alloc_lun(ptgt);
7831 if (plun == NULL) {
7832 fcp_log(CE_NOTE, pptr->port_dip,
7833 "!Lun allocation failed"
7834 " target=%x lun=%x",
7835 ptgt->tgt_d_id, lun_num);
7836 break;
7837 }
7838 }
7839
7840 mutex_enter(&plun->lun_tgt->tgt_mutex);
7841 /* convert to LUN */
7842 plun->lun_addr.ent_addr_0 =
7843 BE_16(*(uint16_t *)&(lun_string[0]));
7844 plun->lun_addr.ent_addr_1 =
7845 BE_16(*(uint16_t *)&(lun_string[2]));
7846 plun->lun_addr.ent_addr_2 =
7847 BE_16(*(uint16_t *)&(lun_string[4]));
7848 plun->lun_addr.ent_addr_3 =
7849 BE_16(*(uint16_t *)&(lun_string[6]));
7850
7851 plun->lun_num = lun_num;
7852 plun->lun_state |= FCP_LUN_BUSY | FCP_LUN_MARK;
7853 plun->lun_state &= ~FCP_LUN_OFFLINE;
7854 mutex_exit(&plun->lun_tgt->tgt_mutex);
7855
7856 /* Retrieve the rscn count (if a valid one exists) */
7857 if (icmd->ipkt_fpkt->pkt_ulp_rscn_infop != NULL) {
7858 rscn_count = ((fc_ulp_rscn_info_t *)
7859 (icmd->ipkt_fpkt->pkt_ulp_rscn_infop))->
7860 ulp_rscn_count;
7861 } else {
7862 rscn_count = FC_INVALID_RSCN_COUNT;
7863 }
7864
7865 if (fcp_send_scsi(plun, SCMD_INQUIRY, SUN_INQSIZE,
7866 icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
7867 icmd->ipkt_cause, rscn_count) != DDI_SUCCESS) {
7868 mutex_enter(&pptr->port_mutex);
7869 mutex_enter(&plun->lun_tgt->tgt_mutex);
7870 if (!FCP_STATE_CHANGED(pptr, ptgt, icmd)) {
7871 fcp_log(CE_NOTE, pptr->port_dip,
7872 "!failed to send INQUIRY"
7873 " target=%x lun=%x",
7874 ptgt->tgt_d_id, plun->lun_num);
7875 } else {
7876 FCP_TRACE(fcp_logq,
7877 pptr->port_instbuf, fcp_trace,
7878 FCP_BUF_LEVEL_5, 0,
7879 "fcp_handle_reportlun,2: state"
7880 " change occured for D_ID=0x%x",
7881 ptgt->tgt_d_id);
7882 }
7883 mutex_exit(&plun->lun_tgt->tgt_mutex);
7884 mutex_exit(&pptr->port_mutex);
7885 } else {
7886 continue;
7887 }
7888 break;
7889
7890 default:
7891 fcp_log(CE_WARN, NULL,
7892 "!Unsupported LUN Addressing method %x "
7893 "in response to REPORT_LUN", lun_string[0]);
7894 break;
7895 }
7896
7897 /*
7898 * each time through this loop we should decrement
7899 * the tmp_cnt by one -- since we go through this loop
7900 * one time for each LUN, the tmp_cnt should never be <=0
7901 */
7902 (void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
7903 icmd->ipkt_change_cnt, icmd->ipkt_cause);
7904 }
7905
7906 if (i == 0) {
7907 fcp_log(CE_WARN, pptr->port_dip,
7908 "!FCP: target=%x reported NO Luns", ptgt->tgt_d_id);
7909 (void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
7910 icmd->ipkt_change_cnt, icmd->ipkt_cause);
7911 }
7912
7913 kmem_free(report_lun, len);
7914 fcp_icmd_free(pptr, icmd);
7915 }
7916
7917
7918 /*
7919 * called internally to return a LUN given a target and a LUN number
7920 */
7921 static struct fcp_lun *
7922 fcp_get_lun(struct fcp_tgt *ptgt, uint16_t lun_num)
7923 {
7924 struct fcp_lun *plun;
7925
7926 mutex_enter(&ptgt->tgt_mutex);
7927 for (plun = ptgt->tgt_lun; plun != NULL; plun = plun->lun_next) {
7928 if (plun->lun_num == lun_num) {
7929 mutex_exit(&ptgt->tgt_mutex);
7930 return (plun);
7931 }
7932 }
7933 mutex_exit(&ptgt->tgt_mutex);
7934
7935 return (NULL);
7936 }
7937
7938
7939 /*
7940 * handle finishing one target for fcp_finish_init
7941 *
7942 * return true (non-zero) if we want finish_init to continue with the
7943 * next target
7944 *
7945 * called with the port mutex held
7946 */
7947 /*ARGSUSED*/
7948 static int
7949 fcp_finish_tgt(struct fcp_port *pptr, struct fcp_tgt *ptgt,
7950 int link_cnt, int tgt_cnt, int cause)
7951 {
7952 int rval = 1;
7953 ASSERT(pptr != NULL);
7954 ASSERT(ptgt != NULL);
7955
7956 FCP_TRACE(fcp_logq, pptr->port_instbuf,
7957 fcp_trace, FCP_BUF_LEVEL_5, 0,
7958 "finish_tgt: D_ID/state = 0x%x/0x%x", ptgt->tgt_d_id,
7959 ptgt->tgt_state);
7960
7961 ASSERT(mutex_owned(&pptr->port_mutex));
7962
7963 if ((pptr->port_link_cnt != link_cnt) ||
7964 (tgt_cnt && ptgt->tgt_change_cnt != tgt_cnt)) {
7965 /*
7966 * oh oh -- another link reset or target change
7967 * must have occurred while we are in here
7968 */
7969 FCP_TGT_TRACE(ptgt, tgt_cnt, FCP_TGT_TRACE_23);
7970
7971 return (0);
7972 } else {
7973 FCP_TGT_TRACE(ptgt, tgt_cnt, FCP_TGT_TRACE_24);
7974 }
7975
7976 mutex_enter(&ptgt->tgt_mutex);
7977
7978 if (!(ptgt->tgt_state & FCP_TGT_OFFLINE)) {
7979 /*
7980 * tgt is not offline -- is it marked (i.e. needs
7981 * to be offlined) ??
7982 */
7983 if (ptgt->tgt_state & FCP_TGT_MARK) {
7984 /*
7985 * this target not offline *and*
7986 * marked
7987 */
7988 ptgt->tgt_state &= ~FCP_TGT_MARK;
7989 rval = fcp_offline_target(pptr, ptgt, link_cnt,
7990 tgt_cnt, 0, 0);
7991 } else {
7992 ptgt->tgt_state &= ~FCP_TGT_BUSY;
7993
7994 /* create the LUNs */
7995 if (ptgt->tgt_node_state != FCP_TGT_NODE_ON_DEMAND) {
7996 ptgt->tgt_node_state = FCP_TGT_NODE_PRESENT;
7997 fcp_create_luns(ptgt, link_cnt, tgt_cnt,
7998 cause);
7999 ptgt->tgt_device_created = 1;
8000 } else {
8001 fcp_update_tgt_state(ptgt, FCP_RESET,
8002 FCP_LUN_BUSY);
8003 }
8004 }
8005 }
8006
8007 mutex_exit(&ptgt->tgt_mutex);
8008
8009 return (rval);
8010 }
8011
8012
8013 /*
8014 * this routine is called to finish port initialization
8015 *
8016 * Each port has a "temp" counter -- when a state change happens (e.g.
8017 * port online), the temp count is set to the number of devices in the map.
8018 * Then, as each device gets "discovered", the temp counter is decremented
8019 * by one. When this count reaches zero we know that all of the devices
8020 * in the map have been discovered (or an error has occurred), so we can
8021 * then finish initialization -- which is done by this routine (well, this
8022 * and fcp-finish_tgt())
8023 *
8024 * acquires and releases the global mutex
8025 *
8026 * called with the port mutex owned
8027 */
8028 static void
8029 fcp_finish_init(struct fcp_port *pptr)
8030 {
8031 #ifdef DEBUG
8032 bzero(pptr->port_finish_stack, sizeof (pptr->port_finish_stack));
8033 pptr->port_finish_depth = getpcstack(pptr->port_finish_stack,
8034 FCP_STACK_DEPTH);
8035 #endif /* DEBUG */
8036
8037 ASSERT(mutex_owned(&pptr->port_mutex));
8038
8039 FCP_TRACE(fcp_logq, pptr->port_instbuf,
8040 fcp_trace, FCP_BUF_LEVEL_2, 0, "finish_init:"
8041 " entering; ipkt count=%d", pptr->port_ipkt_cnt);
8042
8043 if ((pptr->port_state & FCP_STATE_ONLINING) &&
8044 !(pptr->port_state & (FCP_STATE_SUSPENDED |
8045 FCP_STATE_DETACHING | FCP_STATE_POWER_DOWN))) {
8046 pptr->port_state &= ~FCP_STATE_ONLINING;
8047 pptr->port_state |= FCP_STATE_ONLINE;
8048 }
8049
8050 /* Wake up threads waiting on config done */
8051 cv_broadcast(&pptr->port_config_cv);
8052 }
8053
8054
8055 /*
8056 * called from fcp_finish_init to create the LUNs for a target
8057 *
8058 * called with the port mutex owned
8059 */
8060 static void
8061 fcp_create_luns(struct fcp_tgt *ptgt, int link_cnt, int tgt_cnt, int cause)
8062 {
8063 struct fcp_lun *plun;
8064 struct fcp_port *pptr;
8065 child_info_t *cip = NULL;
8066
8067 ASSERT(ptgt != NULL);
8068 ASSERT(mutex_owned(&ptgt->tgt_mutex));
8069
8070 pptr = ptgt->tgt_port;
8071
8072 ASSERT(pptr != NULL);
8073
8074 /* scan all LUNs for this target */
8075 for (plun = ptgt->tgt_lun; plun != NULL; plun = plun->lun_next) {
8076 if (plun->lun_state & FCP_LUN_OFFLINE) {
8077 continue;
8078 }
8079
8080 if (plun->lun_state & FCP_LUN_MARK) {
8081 FCP_TRACE(fcp_logq, pptr->port_instbuf,
8082 fcp_trace, FCP_BUF_LEVEL_2, 0,
8083 "fcp_create_luns: offlining marked LUN!");
8084 fcp_offline_lun(plun, link_cnt, tgt_cnt, 1, 0);
8085 continue;
8086 }
8087
8088 plun->lun_state &= ~FCP_LUN_BUSY;
8089
8090 /*
8091 * There are conditions in which FCP_LUN_INIT flag is cleared
8092 * but we have a valid plun->lun_cip. To cover this case also
8093 * CLEAR_BUSY whenever we have a valid lun_cip.
8094 */
8095 if (plun->lun_mpxio && plun->lun_cip &&
8096 (!fcp_pass_to_hp(pptr, plun, plun->lun_cip,
8097 FCP_MPXIO_PATH_CLEAR_BUSY, link_cnt, tgt_cnt,
8098 0, 0))) {
8099 FCP_TRACE(fcp_logq, pptr->port_instbuf,
8100 fcp_trace, FCP_BUF_LEVEL_2, 0,
8101 "fcp_create_luns: enable lun %p failed!",
8102 plun);
8103 }
8104
8105 if (plun->lun_state & FCP_LUN_INIT &&
8106 !(plun->lun_state & FCP_LUN_CHANGED)) {
8107 continue;
8108 }
8109
8110 if (cause == FCP_CAUSE_USER_CREATE) {
8111 continue;
8112 }
8113
8114 FCP_TRACE(fcp_logq, pptr->port_instbuf,
8115 fcp_trace, FCP_BUF_LEVEL_6, 0,
8116 "create_luns: passing ONLINE elem to HP thread");
8117
8118 /*
8119 * If lun has changed, prepare for offlining the old path.
8120 * Do not offline the old path right now, since it may be
8121 * still opened.
8122 */
8123 if (plun->lun_cip && (plun->lun_state & FCP_LUN_CHANGED)) {
8124 fcp_prepare_offline_lun(plun, link_cnt, tgt_cnt);
8125 }
8126
8127 /* pass an ONLINE element to the hotplug thread */
8128 if (!fcp_pass_to_hp(pptr, plun, cip, FCP_ONLINE,
8129 link_cnt, tgt_cnt, NDI_ONLINE_ATTACH, 0)) {
8130
8131 /*
8132 * We can not synchronous attach (i.e pass
8133 * NDI_ONLINE_ATTACH) here as we might be
8134 * coming from an interrupt or callback
8135 * thread.
8136 */
8137 if (!fcp_pass_to_hp(pptr, plun, cip, FCP_ONLINE,
8138 link_cnt, tgt_cnt, 0, 0)) {
8139 fcp_log(CE_CONT, pptr->port_dip,
8140 "Can not ONLINE LUN; D_ID=%x, LUN=%x\n",
8141 plun->lun_tgt->tgt_d_id, plun->lun_num);
8142 }
8143 }
8144 }
8145 }
8146
8147
8148 /*
8149 * function to online/offline devices
8150 */
8151 static int
8152 fcp_trigger_lun(struct fcp_lun *plun, child_info_t *cip, int old_mpxio,
8153 int online, int lcount, int tcount, int flags)
8154 {
8155 int rval = NDI_FAILURE;
8156 int circ;
8157 child_info_t *ccip;
8158 struct fcp_port *pptr = plun->lun_tgt->tgt_port;
8159 int is_mpxio = pptr->port_mpxio;
8160
8161 if ((old_mpxio != 0) && (plun->lun_mpxio != old_mpxio)) {
8162 /*
8163 * When this event gets serviced, lun_cip and lun_mpxio
8164 * has changed, so it should be invalidated now.
8165 */
8166 FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
8167 FCP_BUF_LEVEL_2, 0, "fcp_trigger_lun: lun_mpxio changed: "
8168 "plun: %p, cip: %p, what:%d", plun, cip, online);
8169 return (rval);
8170 }
8171
8172 FCP_TRACE(fcp_logq, pptr->port_instbuf,
8173 fcp_trace, FCP_BUF_LEVEL_2, 0,
8174 "fcp_trigger_lun: plun=%p target=%x lun=%d cip=%p what=%x "
8175 "flags=%x mpxio=%x\n",
8176 plun, LUN_TGT->tgt_d_id, plun->lun_num, cip, online, flags,
8177 plun->lun_mpxio);
8178
8179 /*
8180 * lun_mpxio needs checking here because we can end up in a race
8181 * condition where this task has been dispatched while lun_mpxio is
8182 * set, but an earlier FCP_ONLINE task for the same LUN tried to
8183 * enable MPXIO for the LUN, but was unable to, and hence cleared
8184 * the flag. We rely on the serialization of the tasks here. We return
8185 * NDI_SUCCESS so any callers continue without reporting spurious
8186 * errors, and the still think we're an MPXIO LUN.
8187 */
8188
8189 if (online == FCP_MPXIO_PATH_CLEAR_BUSY ||
8190 online == FCP_MPXIO_PATH_SET_BUSY) {
8191 if (plun->lun_mpxio) {
8192 rval = fcp_update_mpxio_path(plun, cip, online);
8193 } else {
8194 rval = NDI_SUCCESS;
8195 }
8196 return (rval);
8197 }
8198
8199 if (fc_ulp_busy_port(pptr->port_fp_handle) != 0) {
8200 return (NDI_FAILURE);
8201 }
8202
8203 if (is_mpxio) {
8204 mdi_devi_enter(pptr->port_dip, &circ);
8205 } else {
8206 ndi_devi_enter(pptr->port_dip, &circ);
8207 }
8208
8209 mutex_enter(&pptr->port_mutex);
8210 mutex_enter(&plun->lun_mutex);
8211
8212 if (online == FCP_ONLINE) {
8213 ccip = fcp_get_cip(plun, cip, lcount, tcount);
8214 if (ccip == NULL) {
8215 goto fail;
8216 }
8217 } else {
8218 if (fcp_is_child_present(plun, cip) != FC_SUCCESS) {
8219 goto fail;
8220 }
8221 ccip = cip;
8222 }
8223
8224 if (online == FCP_ONLINE) {
8225 rval = fcp_online_child(plun, ccip, lcount, tcount, flags,
8226 &circ);
8227 fc_ulp_log_device_event(pptr->port_fp_handle,
8228 FC_ULP_DEVICE_ONLINE);
8229 } else {
8230 rval = fcp_offline_child(plun, ccip, lcount, tcount, flags,
8231 &circ);
8232 fc_ulp_log_device_event(pptr->port_fp_handle,
8233 FC_ULP_DEVICE_OFFLINE);
8234 }
8235
8236 fail: mutex_exit(&plun->lun_mutex);
8237 mutex_exit(&pptr->port_mutex);
8238
8239 if (is_mpxio) {
8240 mdi_devi_exit(pptr->port_dip, circ);
8241 } else {
8242 ndi_devi_exit(pptr->port_dip, circ);
8243 }
8244
8245 fc_ulp_idle_port(pptr->port_fp_handle);
8246
8247 return (rval);
8248 }
8249
8250
8251 /*
8252 * take a target offline by taking all of its LUNs offline
8253 */
8254 /*ARGSUSED*/
8255 static int
8256 fcp_offline_target(struct fcp_port *pptr, struct fcp_tgt *ptgt,
8257 int link_cnt, int tgt_cnt, int nowait, int flags)
8258 {
8259 struct fcp_tgt_elem *elem;
8260
8261 ASSERT(mutex_owned(&pptr->port_mutex));
8262 ASSERT(mutex_owned(&ptgt->tgt_mutex));
8263
8264 ASSERT(!(ptgt->tgt_state & FCP_TGT_OFFLINE));
8265
8266 if (link_cnt != pptr->port_link_cnt || (tgt_cnt && tgt_cnt !=
8267 ptgt->tgt_change_cnt)) {
8268 mutex_exit(&ptgt->tgt_mutex);
8269 FCP_TGT_TRACE(ptgt, tgt_cnt, FCP_TGT_TRACE_25);
8270 mutex_enter(&ptgt->tgt_mutex);
8271
8272 return (0);
8273 }
8274
8275 ptgt->tgt_pd_handle = NULL;
8276 mutex_exit(&ptgt->tgt_mutex);
8277 FCP_TGT_TRACE(ptgt, tgt_cnt, FCP_TGT_TRACE_26);
8278 mutex_enter(&ptgt->tgt_mutex);
8279
8280 tgt_cnt = tgt_cnt ? tgt_cnt : ptgt->tgt_change_cnt;
8281
8282 if (ptgt->tgt_tcap &&
8283 (elem = kmem_zalloc(sizeof (*elem), KM_NOSLEEP)) != NULL) {
8284 elem->flags = flags;
8285 elem->time = fcp_watchdog_time;
8286 if (nowait == 0) {
8287 elem->time += fcp_offline_delay;
8288 }
8289 elem->ptgt = ptgt;
8290 elem->link_cnt = link_cnt;
8291 elem->tgt_cnt = tgt_cnt;
8292 elem->next = pptr->port_offline_tgts;
8293 pptr->port_offline_tgts = elem;
8294 } else {
8295 fcp_offline_target_now(pptr, ptgt, link_cnt, tgt_cnt, flags);
8296 }
8297
8298 return (1);
8299 }
8300
8301
8302 static void
8303 fcp_offline_target_now(struct fcp_port *pptr, struct fcp_tgt *ptgt,
8304 int link_cnt, int tgt_cnt, int flags)
8305 {
8306 ASSERT(mutex_owned(&pptr->port_mutex));
8307 ASSERT(mutex_owned(&ptgt->tgt_mutex));
8308
8309 fc_ulp_enable_relogin(pptr->port_fp_handle, &ptgt->tgt_port_wwn);
8310 ptgt->tgt_state = FCP_TGT_OFFLINE;
8311 ptgt->tgt_pd_handle = NULL;
8312 fcp_offline_tgt_luns(ptgt, link_cnt, tgt_cnt, flags);
8313 }
8314
8315
8316 static void
8317 fcp_offline_tgt_luns(struct fcp_tgt *ptgt, int link_cnt, int tgt_cnt,
8318 int flags)
8319 {
8320 struct fcp_lun *plun;
8321
8322 ASSERT(mutex_owned(&ptgt->tgt_port->port_mutex));
8323 ASSERT(mutex_owned(&ptgt->tgt_mutex));
8324
8325 for (plun = ptgt->tgt_lun; plun != NULL; plun = plun->lun_next) {
8326 if (!(plun->lun_state & FCP_LUN_OFFLINE)) {
8327 fcp_offline_lun(plun, link_cnt, tgt_cnt, 1, flags);
8328 }
8329 }
8330 }
8331
8332
8333 /*
8334 * take a LUN offline
8335 *
8336 * enters and leaves with the target mutex held, releasing it in the process
8337 *
8338 * allocates memory in non-sleep mode
8339 */
8340 static void
8341 fcp_offline_lun(struct fcp_lun *plun, int link_cnt, int tgt_cnt,
8342 int nowait, int flags)
8343 {
8344 struct fcp_port *pptr = plun->lun_tgt->tgt_port;
8345 struct fcp_lun_elem *elem;
8346
8347 ASSERT(plun != NULL);
8348 ASSERT(mutex_owned(&LUN_TGT->tgt_mutex));
8349
8350 if (nowait) {
8351 fcp_offline_lun_now(plun, link_cnt, tgt_cnt, flags);
8352 return;
8353 }
8354
8355 if ((elem = kmem_zalloc(sizeof (*elem), KM_NOSLEEP)) != NULL) {
8356 elem->flags = flags;
8357 elem->time = fcp_watchdog_time;
8358 if (nowait == 0) {
8359 elem->time += fcp_offline_delay;
8360 }
8361 elem->plun = plun;
8362 elem->link_cnt = link_cnt;
8363 elem->tgt_cnt = plun->lun_tgt->tgt_change_cnt;
8364 elem->next = pptr->port_offline_luns;
8365 pptr->port_offline_luns = elem;
8366 } else {
8367 fcp_offline_lun_now(plun, link_cnt, tgt_cnt, flags);
8368 }
8369 }
8370
8371
8372 static void
8373 fcp_prepare_offline_lun(struct fcp_lun *plun, int link_cnt, int tgt_cnt)
8374 {
8375 struct fcp_pkt *head = NULL;
8376
8377 ASSERT(mutex_owned(&LUN_TGT->tgt_mutex));
8378
8379 mutex_exit(&LUN_TGT->tgt_mutex);
8380
8381 head = fcp_scan_commands(plun);
8382 if (head != NULL) {
8383 fcp_abort_commands(head, LUN_PORT);
8384 }
8385
8386 mutex_enter(&LUN_TGT->tgt_mutex);
8387
8388 if (plun->lun_cip && plun->lun_mpxio) {
8389 /*
8390 * Intimate MPxIO lun busy is cleared
8391 */
8392 if (!fcp_pass_to_hp(LUN_PORT, plun, plun->lun_cip,
8393 FCP_MPXIO_PATH_CLEAR_BUSY, link_cnt, tgt_cnt,
8394 0, 0)) {
8395 fcp_log(CE_NOTE, LUN_PORT->port_dip,
8396 "Can not ENABLE LUN; D_ID=%x, LUN=%x",
8397 LUN_TGT->tgt_d_id, plun->lun_num);
8398 }
8399 /*
8400 * Intimate MPxIO that the lun is now marked for offline
8401 */
8402 mutex_exit(&LUN_TGT->tgt_mutex);
8403 (void) mdi_pi_disable_path(PIP(plun->lun_cip), DRIVER_DISABLE);
8404 mutex_enter(&LUN_TGT->tgt_mutex);
8405 }
8406 }
8407
8408 static void
8409 fcp_offline_lun_now(struct fcp_lun *plun, int link_cnt, int tgt_cnt,
8410 int flags)
8411 {
8412 ASSERT(mutex_owned(&LUN_TGT->tgt_mutex));
8413
8414 mutex_exit(&LUN_TGT->tgt_mutex);
8415 fcp_update_offline_flags(plun);
8416 mutex_enter(&LUN_TGT->tgt_mutex);
8417
8418 fcp_prepare_offline_lun(plun, link_cnt, tgt_cnt);
8419
8420 FCP_TRACE(fcp_logq, LUN_PORT->port_instbuf,
8421 fcp_trace, FCP_BUF_LEVEL_4, 0,
8422 "offline_lun: passing OFFLINE elem to HP thread");
8423
8424 if (plun->lun_cip) {
8425 fcp_log(CE_NOTE, LUN_PORT->port_dip,
8426 "!offlining lun=%x (trace=%x), target=%x (trace=%x)",
8427 plun->lun_num, plun->lun_trace, LUN_TGT->tgt_d_id,
8428 LUN_TGT->tgt_trace);
8429
8430 if (!fcp_pass_to_hp(LUN_PORT, plun, plun->lun_cip, FCP_OFFLINE,
8431 link_cnt, tgt_cnt, flags, 0)) {
8432 fcp_log(CE_CONT, LUN_PORT->port_dip,
8433 "Can not OFFLINE LUN; D_ID=%x, LUN=%x\n",
8434 LUN_TGT->tgt_d_id, plun->lun_num);
8435 }
8436 }
8437 }
8438
8439 static void
8440 fcp_scan_offline_luns(struct fcp_port *pptr)
8441 {
8442 struct fcp_lun_elem *elem;
8443 struct fcp_lun_elem *prev;
8444 struct fcp_lun_elem *next;
8445
8446 ASSERT(MUTEX_HELD(&pptr->port_mutex));
8447
8448 prev = NULL;
8449 elem = pptr->port_offline_luns;
8450 while (elem) {
8451 next = elem->next;
8452 if (elem->time <= fcp_watchdog_time) {
8453 int changed = 1;
8454 struct fcp_tgt *ptgt = elem->plun->lun_tgt;
8455
8456 mutex_enter(&ptgt->tgt_mutex);
8457 if (pptr->port_link_cnt == elem->link_cnt &&
8458 ptgt->tgt_change_cnt == elem->tgt_cnt) {
8459 changed = 0;
8460 }
8461
8462 if (!changed &&
8463 !(elem->plun->lun_state & FCP_TGT_OFFLINE)) {
8464 fcp_offline_lun_now(elem->plun,
8465 elem->link_cnt, elem->tgt_cnt, elem->flags);
8466 }
8467 mutex_exit(&ptgt->tgt_mutex);
8468
8469 kmem_free(elem, sizeof (*elem));
8470
8471 if (prev) {
8472 prev->next = next;
8473 } else {
8474 pptr->port_offline_luns = next;
8475 }
8476 } else {
8477 prev = elem;
8478 }
8479 elem = next;
8480 }
8481 }
8482
8483
8484 static void
8485 fcp_scan_offline_tgts(struct fcp_port *pptr)
8486 {
8487 struct fcp_tgt_elem *elem;
8488 struct fcp_tgt_elem *prev;
8489 struct fcp_tgt_elem *next;
8490
8491 ASSERT(MUTEX_HELD(&pptr->port_mutex));
8492
8493 prev = NULL;
8494 elem = pptr->port_offline_tgts;
8495 while (elem) {
8496 next = elem->next;
8497 if (elem->time <= fcp_watchdog_time) {
8498 int outdated = 1;
8499 struct fcp_tgt *ptgt = elem->ptgt;
8500
8501 mutex_enter(&ptgt->tgt_mutex);
8502
8503 if (ptgt->tgt_change_cnt == elem->tgt_cnt) {
8504 /* No change on tgt since elem was created. */
8505 outdated = 0;
8506 } else if (ptgt->tgt_change_cnt == elem->tgt_cnt + 1 &&
8507 pptr->port_link_cnt == elem->link_cnt + 1 &&
8508 ptgt->tgt_statec_cause == FCP_CAUSE_LINK_DOWN) {
8509 /*
8510 * Exactly one thing happened to the target
8511 * inbetween: the local port went offline.
8512 * For fp the remote port is already gone so
8513 * it will not tell us again to offline the
8514 * target. We must offline it now.
8515 */
8516 outdated = 0;
8517 }
8518
8519 if (!outdated && !(ptgt->tgt_state &
8520 FCP_TGT_OFFLINE)) {
8521 fcp_offline_target_now(pptr,
8522 ptgt, elem->link_cnt, elem->tgt_cnt,
8523 elem->flags);
8524 }
8525
8526 mutex_exit(&ptgt->tgt_mutex);
8527
8528 kmem_free(elem, sizeof (*elem));
8529
8530 if (prev) {
8531 prev->next = next;
8532 } else {
8533 pptr->port_offline_tgts = next;
8534 }
8535 } else {
8536 prev = elem;
8537 }
8538 elem = next;
8539 }
8540 }
8541
8542
8543 static void
8544 fcp_update_offline_flags(struct fcp_lun *plun)
8545 {
8546 struct fcp_port *pptr = LUN_PORT;
8547 ASSERT(plun != NULL);
8548
8549 mutex_enter(&LUN_TGT->tgt_mutex);
8550 plun->lun_state |= FCP_LUN_OFFLINE;
8551 plun->lun_state &= ~(FCP_LUN_INIT | FCP_LUN_BUSY | FCP_LUN_MARK);
8552
8553 mutex_enter(&plun->lun_mutex);
8554 if (plun->lun_cip && plun->lun_state & FCP_SCSI_LUN_TGT_INIT) {
8555 dev_info_t *cdip = NULL;
8556
8557 mutex_exit(&LUN_TGT->tgt_mutex);
8558
8559 if (plun->lun_mpxio == 0) {
8560 cdip = DIP(plun->lun_cip);
8561 } else if (plun->lun_cip) {
8562 cdip = mdi_pi_get_client(PIP(plun->lun_cip));
8563 }
8564
8565 mutex_exit(&plun->lun_mutex);
8566 if (cdip) {
8567 (void) ndi_event_retrieve_cookie(
8568 pptr->port_ndi_event_hdl, cdip, FCAL_REMOVE_EVENT,
8569 &fcp_remove_eid, NDI_EVENT_NOPASS);
8570 (void) ndi_event_run_callbacks(
8571 pptr->port_ndi_event_hdl, cdip,
8572 fcp_remove_eid, NULL);
8573 }
8574 } else {
8575 mutex_exit(&plun->lun_mutex);
8576 mutex_exit(&LUN_TGT->tgt_mutex);
8577 }
8578 }
8579
8580
8581 /*
8582 * Scan all of the command pkts for this port, moving pkts that
8583 * match our LUN onto our own list (headed by "head")
8584 */
8585 static struct fcp_pkt *
8586 fcp_scan_commands(struct fcp_lun *plun)
8587 {
8588 struct fcp_port *pptr = LUN_PORT;
8589
8590 struct fcp_pkt *cmd = NULL; /* pkt cmd ptr */
8591 struct fcp_pkt *ncmd = NULL; /* next pkt ptr */
8592 struct fcp_pkt *pcmd = NULL; /* the previous command */
8593
8594 struct fcp_pkt *head = NULL; /* head of our list */
8595 struct fcp_pkt *tail = NULL; /* tail of our list */
8596
8597 int cmds_found = 0;
8598
8599 mutex_enter(&pptr->port_pkt_mutex);
8600 for (cmd = pptr->port_pkt_head; cmd != NULL; cmd = ncmd) {
8601 struct fcp_lun *tlun =
8602 ADDR2LUN(&cmd->cmd_pkt->pkt_address);
8603
8604 ncmd = cmd->cmd_next; /* set next command */
8605
8606 /*
8607 * if this pkt is for a different LUN or the
8608 * command is sent down, skip it.
8609 */
8610 if (tlun != plun || cmd->cmd_state == FCP_PKT_ISSUED ||
8611 (cmd->cmd_pkt->pkt_flags & FLAG_NOINTR)) {
8612 pcmd = cmd;
8613 continue;
8614 }
8615 cmds_found++;
8616 if (pcmd != NULL) {
8617 ASSERT(pptr->port_pkt_head != cmd);
8618 pcmd->cmd_next = cmd->cmd_next;
8619 } else {
8620 ASSERT(cmd == pptr->port_pkt_head);
8621 pptr->port_pkt_head = cmd->cmd_next;
8622 }
8623
8624 if (cmd == pptr->port_pkt_tail) {
8625 pptr->port_pkt_tail = pcmd;
8626 if (pcmd) {
8627 pcmd->cmd_next = NULL;
8628 }
8629 }
8630
8631 if (head == NULL) {
8632 head = tail = cmd;
8633 } else {
8634 ASSERT(tail != NULL);
8635
8636 tail->cmd_next = cmd;
8637 tail = cmd;
8638 }
8639 cmd->cmd_next = NULL;
8640 }
8641 mutex_exit(&pptr->port_pkt_mutex);
8642
8643 FCP_DTRACE(fcp_logq, pptr->port_instbuf,
8644 fcp_trace, FCP_BUF_LEVEL_8, 0,
8645 "scan commands: %d cmd(s) found", cmds_found);
8646
8647 return (head);
8648 }
8649
8650
8651 /*
8652 * Abort all the commands in the command queue
8653 */
8654 static void
8655 fcp_abort_commands(struct fcp_pkt *head, struct fcp_port *pptr)
8656 {
8657 struct fcp_pkt *cmd = NULL; /* pkt cmd ptr */
8658 struct fcp_pkt *ncmd = NULL; /* next pkt ptr */
8659
8660 ASSERT(mutex_owned(&pptr->port_mutex));
8661
8662 /* scan through the pkts and invalid them */
8663 for (cmd = head; cmd != NULL; cmd = ncmd) {
8664 struct scsi_pkt *pkt = cmd->cmd_pkt;
8665
8666 ncmd = cmd->cmd_next;
8667 ASSERT(pkt != NULL);
8668
8669 /*
8670 * The lun is going to be marked offline. Indicate
8671 * the target driver not to requeue or retry this command
8672 * as the device is going to be offlined pretty soon.
8673 */
8674 pkt->pkt_reason = CMD_DEV_GONE;
8675 pkt->pkt_statistics = 0;
8676 pkt->pkt_state = 0;
8677
8678 /* reset cmd flags/state */
8679 cmd->cmd_flags &= ~CFLAG_IN_QUEUE;
8680 cmd->cmd_state = FCP_PKT_IDLE;
8681
8682 /*
8683 * ensure we have a packet completion routine,
8684 * then call it.
8685 */
8686 ASSERT(pkt->pkt_comp != NULL);
8687
8688 mutex_exit(&pptr->port_mutex);
8689 fcp_post_callback(cmd);
8690 mutex_enter(&pptr->port_mutex);
8691 }
8692 }
8693
8694
8695 /*
8696 * the pkt_comp callback for command packets
8697 */
8698 static void
8699 fcp_cmd_callback(fc_packet_t *fpkt)
8700 {
8701 struct fcp_pkt *cmd = (struct fcp_pkt *)fpkt->pkt_ulp_private;
8702 struct scsi_pkt *pkt = cmd->cmd_pkt;
8703 struct fcp_port *pptr = ADDR2FCP(&pkt->pkt_address);
8704
8705 ASSERT(cmd->cmd_state != FCP_PKT_IDLE);
8706
8707 if (cmd->cmd_state == FCP_PKT_IDLE) {
8708 cmn_err(CE_PANIC, "Packet already completed %p",
8709 (void *)cmd);
8710 }
8711
8712 /*
8713 * Watch thread should be freeing the packet, ignore the pkt.
8714 */
8715 if (cmd->cmd_state == FCP_PKT_ABORTING) {
8716 fcp_log(CE_CONT, pptr->port_dip,
8717 "!FCP: Pkt completed while aborting\n");
8718 return;
8719 }
8720 cmd->cmd_state = FCP_PKT_IDLE;
8721
8722 fcp_complete_pkt(fpkt);
8723
8724 #ifdef DEBUG
8725 mutex_enter(&pptr->port_pkt_mutex);
8726 pptr->port_npkts--;
8727 mutex_exit(&pptr->port_pkt_mutex);
8728 #endif /* DEBUG */
8729
8730 fcp_post_callback(cmd);
8731 }
8732
8733
8734 static void
8735 fcp_complete_pkt(fc_packet_t *fpkt)
8736 {
8737 int error = 0;
8738 struct fcp_pkt *cmd = (struct fcp_pkt *)
8739 fpkt->pkt_ulp_private;
8740 struct scsi_pkt *pkt = cmd->cmd_pkt;
8741 struct fcp_port *pptr = ADDR2FCP(&pkt->pkt_address);
8742 struct fcp_lun *plun;
8743 struct fcp_tgt *ptgt;
8744 struct fcp_rsp *rsp;
8745 struct scsi_address save;
8746
8747 #ifdef DEBUG
8748 save = pkt->pkt_address;
8749 #endif /* DEBUG */
8750
8751 rsp = (struct fcp_rsp *)cmd->cmd_fcp_rsp;
8752
8753 if (fpkt->pkt_state == FC_PKT_SUCCESS) {
8754 if (pptr->port_fcp_dma != FC_NO_DVMA_SPACE) {
8755 FCP_CP_IN(fpkt->pkt_resp, rsp, fpkt->pkt_resp_acc,
8756 sizeof (struct fcp_rsp));
8757 }
8758
8759 pkt->pkt_state = STATE_GOT_BUS | STATE_GOT_TARGET |
8760 STATE_SENT_CMD | STATE_GOT_STATUS;
8761
8762 pkt->pkt_resid = 0;
8763
8764 if (fpkt->pkt_datalen) {
8765 pkt->pkt_state |= STATE_XFERRED_DATA;
8766 if (fpkt->pkt_data_resid) {
8767 error++;
8768 }
8769 }
8770
8771 if ((pkt->pkt_scbp != NULL) && ((*(pkt->pkt_scbp) =
8772 rsp->fcp_u.fcp_status.scsi_status) != STATUS_GOOD)) {
8773 /*
8774 * The next two checks make sure that if there
8775 * is no sense data or a valid response and
8776 * the command came back with check condition,
8777 * the command should be retried.
8778 */
8779 if (!rsp->fcp_u.fcp_status.rsp_len_set &&
8780 !rsp->fcp_u.fcp_status.sense_len_set) {
8781 pkt->pkt_state &= ~STATE_XFERRED_DATA;
8782 pkt->pkt_resid = cmd->cmd_dmacount;
8783 }
8784 }
8785
8786 if ((error | rsp->fcp_u.i_fcp_status | rsp->fcp_resid) == 0) {
8787 return;
8788 }
8789
8790 plun = ADDR2LUN(&pkt->pkt_address);
8791 ptgt = plun->lun_tgt;
8792 ASSERT(ptgt != NULL);
8793
8794 /*
8795 * Update the transfer resid, if appropriate
8796 */
8797 if (rsp->fcp_u.fcp_status.resid_over ||
8798 rsp->fcp_u.fcp_status.resid_under) {
8799 pkt->pkt_resid = rsp->fcp_resid;
8800 }
8801
8802 /*
8803 * First see if we got a FCP protocol error.
8804 */
8805 if (rsp->fcp_u.fcp_status.rsp_len_set) {
8806 struct fcp_rsp_info *bep;
8807 bep = (struct fcp_rsp_info *)(cmd->cmd_fcp_rsp +
8808 sizeof (struct fcp_rsp));
8809
8810 if (fcp_validate_fcp_response(rsp, pptr) !=
8811 FC_SUCCESS) {
8812 pkt->pkt_reason = CMD_CMPLT;
8813 *(pkt->pkt_scbp) = STATUS_CHECK;
8814
8815 fcp_log(CE_WARN, pptr->port_dip,
8816 "!SCSI command to d_id=0x%x lun=0x%x"
8817 " failed, Bad FCP response values:"
8818 " rsvd1=%x, rsvd2=%x, sts-rsvd1=%x,"
8819 " sts-rsvd2=%x, rsplen=%x, senselen=%x",
8820 ptgt->tgt_d_id, plun->lun_num,
8821 rsp->reserved_0, rsp->reserved_1,
8822 rsp->fcp_u.fcp_status.reserved_0,
8823 rsp->fcp_u.fcp_status.reserved_1,
8824 rsp->fcp_response_len, rsp->fcp_sense_len);
8825
8826 return;
8827 }
8828
8829 if (pptr->port_fcp_dma != FC_NO_DVMA_SPACE) {
8830 FCP_CP_IN(fpkt->pkt_resp +
8831 sizeof (struct fcp_rsp), bep,
8832 fpkt->pkt_resp_acc,
8833 sizeof (struct fcp_rsp_info));
8834 }
8835
8836 if (bep->rsp_code != FCP_NO_FAILURE) {
8837 child_info_t *cip;
8838
8839 pkt->pkt_reason = CMD_TRAN_ERR;
8840
8841 mutex_enter(&plun->lun_mutex);
8842 cip = plun->lun_cip;
8843 mutex_exit(&plun->lun_mutex);
8844
8845 FCP_TRACE(fcp_logq, pptr->port_instbuf,
8846 fcp_trace, FCP_BUF_LEVEL_2, 0,
8847 "FCP response error on cmd=%p"
8848 " target=0x%x, cip=%p", cmd,
8849 ptgt->tgt_d_id, cip);
8850 }
8851 }
8852
8853 /*
8854 * See if we got a SCSI error with sense data
8855 */
8856 if (rsp->fcp_u.fcp_status.sense_len_set) {
8857 uchar_t rqlen;
8858 caddr_t sense_from;
8859 child_info_t *cip;
8860 timeout_id_t tid;
8861 struct scsi_arq_status *arq;
8862 struct scsi_extended_sense *sense_to;
8863
8864 arq = (struct scsi_arq_status *)pkt->pkt_scbp;
8865 sense_to = &arq->sts_sensedata;
8866
8867 rqlen = (uchar_t)min(rsp->fcp_sense_len,
8868 sizeof (struct scsi_extended_sense));
8869
8870 sense_from = (caddr_t)fpkt->pkt_resp +
8871 sizeof (struct fcp_rsp) + rsp->fcp_response_len;
8872
8873 if (fcp_validate_fcp_response(rsp, pptr) !=
8874 FC_SUCCESS) {
8875 pkt->pkt_reason = CMD_CMPLT;
8876 *(pkt->pkt_scbp) = STATUS_CHECK;
8877
8878 fcp_log(CE_WARN, pptr->port_dip,
8879 "!SCSI command to d_id=0x%x lun=0x%x"
8880 " failed, Bad FCP response values:"
8881 " rsvd1=%x, rsvd2=%x, sts-rsvd1=%x,"
8882 " sts-rsvd2=%x, rsplen=%x, senselen=%x",
8883 ptgt->tgt_d_id, plun->lun_num,
8884 rsp->reserved_0, rsp->reserved_1,
8885 rsp->fcp_u.fcp_status.reserved_0,
8886 rsp->fcp_u.fcp_status.reserved_1,
8887 rsp->fcp_response_len, rsp->fcp_sense_len);
8888
8889 return;
8890 }
8891
8892 /*
8893 * copy in sense information
8894 */
8895 if (pptr->port_fcp_dma != FC_NO_DVMA_SPACE) {
8896 FCP_CP_IN(sense_from, sense_to,
8897 fpkt->pkt_resp_acc, rqlen);
8898 } else {
8899 bcopy(sense_from, sense_to, rqlen);
8900 }
8901
8902 if ((FCP_SENSE_REPORTLUN_CHANGED(sense_to)) ||
8903 (FCP_SENSE_NO_LUN(sense_to))) {
8904 mutex_enter(&ptgt->tgt_mutex);
8905 if (ptgt->tgt_tid == NULL) {
8906 /*
8907 * Kick off rediscovery
8908 */
8909 tid = timeout(fcp_reconfigure_luns,
8910 (caddr_t)ptgt, drv_usectohz(1));
8911
8912 ptgt->tgt_tid = tid;
8913 ptgt->tgt_state |= FCP_TGT_BUSY;
8914 }
8915 mutex_exit(&ptgt->tgt_mutex);
8916 if (FCP_SENSE_REPORTLUN_CHANGED(sense_to)) {
8917 FCP_TRACE(fcp_logq, pptr->port_instbuf,
8918 fcp_trace, FCP_BUF_LEVEL_3, 0,
8919 "!FCP: Report Lun Has Changed"
8920 " target=%x", ptgt->tgt_d_id);
8921 } else if (FCP_SENSE_NO_LUN(sense_to)) {
8922 FCP_TRACE(fcp_logq, pptr->port_instbuf,
8923 fcp_trace, FCP_BUF_LEVEL_3, 0,
8924 "!FCP: LU Not Supported"
8925 " target=%x", ptgt->tgt_d_id);
8926 }
8927 }
8928 ASSERT(pkt->pkt_scbp != NULL);
8929
8930 pkt->pkt_state |= STATE_ARQ_DONE;
8931
8932 arq->sts_rqpkt_resid = SENSE_LENGTH - rqlen;
8933
8934 *((uchar_t *)&arq->sts_rqpkt_status) = STATUS_GOOD;
8935 arq->sts_rqpkt_reason = 0;
8936 arq->sts_rqpkt_statistics = 0;
8937
8938 arq->sts_rqpkt_state = STATE_GOT_BUS |
8939 STATE_GOT_TARGET | STATE_SENT_CMD |
8940 STATE_GOT_STATUS | STATE_ARQ_DONE |
8941 STATE_XFERRED_DATA;
8942
8943 mutex_enter(&plun->lun_mutex);
8944 cip = plun->lun_cip;
8945 mutex_exit(&plun->lun_mutex);
8946
8947 FCP_DTRACE(fcp_logq, pptr->port_instbuf,
8948 fcp_trace, FCP_BUF_LEVEL_8, 0,
8949 "SCSI Check condition on cmd=%p target=0x%x"
8950 " LUN=%p, cmd=%x SCSI status=%x, es key=%x"
8951 " ASC=%x ASCQ=%x", cmd, ptgt->tgt_d_id, cip,
8952 cmd->cmd_fcp_cmd.fcp_cdb[0],
8953 rsp->fcp_u.fcp_status.scsi_status,
8954 sense_to->es_key, sense_to->es_add_code,
8955 sense_to->es_qual_code);
8956 }
8957 } else {
8958 plun = ADDR2LUN(&pkt->pkt_address);
8959 ptgt = plun->lun_tgt;
8960 ASSERT(ptgt != NULL);
8961
8962 /*
8963 * Work harder to translate errors into target driver
8964 * understandable ones. Note with despair that the target
8965 * drivers don't decode pkt_state and pkt_reason exhaustively
8966 * They resort to using the big hammer most often, which
8967 * may not get fixed in the life time of this driver.
8968 */
8969 pkt->pkt_state = 0;
8970 pkt->pkt_statistics = 0;
8971
8972 switch (fpkt->pkt_state) {
8973 case FC_PKT_TRAN_ERROR:
8974 switch (fpkt->pkt_reason) {
8975 case FC_REASON_OVERRUN:
8976 pkt->pkt_reason = CMD_CMD_OVR;
8977 pkt->pkt_statistics |= STAT_ABORTED;
8978 break;
8979
8980 case FC_REASON_XCHG_BSY: {
8981 caddr_t ptr;
8982
8983 pkt->pkt_reason = CMD_CMPLT; /* Lie */
8984
8985 ptr = (caddr_t)pkt->pkt_scbp;
8986 if (ptr) {
8987 *ptr = STATUS_BUSY;
8988 }
8989 break;
8990 }
8991
8992 case FC_REASON_ABORTED:
8993 pkt->pkt_reason = CMD_TRAN_ERR;
8994 pkt->pkt_statistics |= STAT_ABORTED;
8995 break;
8996
8997 case FC_REASON_ABORT_FAILED:
8998 pkt->pkt_reason = CMD_ABORT_FAIL;
8999 break;
9000
9001 case FC_REASON_NO_SEQ_INIT:
9002 case FC_REASON_CRC_ERROR:
9003 pkt->pkt_reason = CMD_TRAN_ERR;
9004 pkt->pkt_statistics |= STAT_ABORTED;
9005 break;
9006 default:
9007 pkt->pkt_reason = CMD_TRAN_ERR;
9008 break;
9009 }
9010 break;
9011
9012 case FC_PKT_PORT_OFFLINE: {
9013 dev_info_t *cdip = NULL;
9014 caddr_t ptr;
9015
9016 if (fpkt->pkt_reason == FC_REASON_LOGIN_REQUIRED) {
9017 FCP_DTRACE(fcp_logq, pptr->port_instbuf,
9018 fcp_trace, FCP_BUF_LEVEL_8, 0,
9019 "SCSI cmd; LOGIN REQUIRED from FCA for %x",
9020 ptgt->tgt_d_id);
9021 }
9022
9023 mutex_enter(&plun->lun_mutex);
9024 if (plun->lun_mpxio == 0) {
9025 cdip = DIP(plun->lun_cip);
9026 } else if (plun->lun_cip) {
9027 cdip = mdi_pi_get_client(PIP(plun->lun_cip));
9028 }
9029
9030 mutex_exit(&plun->lun_mutex);
9031
9032 if (cdip) {
9033 (void) ndi_event_retrieve_cookie(
9034 pptr->port_ndi_event_hdl, cdip,
9035 FCAL_REMOVE_EVENT, &fcp_remove_eid,
9036 NDI_EVENT_NOPASS);
9037 (void) ndi_event_run_callbacks(
9038 pptr->port_ndi_event_hdl, cdip,
9039 fcp_remove_eid, NULL);
9040 }
9041
9042 /*
9043 * If the link goes off-line for a lip,
9044 * this will cause a error to the ST SG
9045 * SGEN drivers. By setting BUSY we will
9046 * give the drivers the chance to retry
9047 * before it blows of the job. ST will
9048 * remember how many times it has retried.
9049 */
9050
9051 if ((plun->lun_type == DTYPE_SEQUENTIAL) ||
9052 (plun->lun_type == DTYPE_CHANGER)) {
9053 pkt->pkt_reason = CMD_CMPLT; /* Lie */
9054 ptr = (caddr_t)pkt->pkt_scbp;
9055 if (ptr) {
9056 *ptr = STATUS_BUSY;
9057 }
9058 } else {
9059 pkt->pkt_reason = CMD_TRAN_ERR;
9060 pkt->pkt_statistics |= STAT_BUS_RESET;
9061 }
9062 break;
9063 }
9064
9065 case FC_PKT_TRAN_BSY:
9066 /*
9067 * Use the ssd Qfull handling here.
9068 */
9069 *pkt->pkt_scbp = STATUS_INTERMEDIATE;
9070 pkt->pkt_state = STATE_GOT_BUS;
9071 break;
9072
9073 case FC_PKT_TIMEOUT:
9074 pkt->pkt_reason = CMD_TIMEOUT;
9075 if (fpkt->pkt_reason == FC_REASON_ABORT_FAILED) {
9076 pkt->pkt_statistics |= STAT_TIMEOUT;
9077 } else {
9078 pkt->pkt_statistics |= STAT_ABORTED;
9079 }
9080 break;
9081
9082 case FC_PKT_LOCAL_RJT:
9083 switch (fpkt->pkt_reason) {
9084 case FC_REASON_OFFLINE: {
9085 dev_info_t *cdip = NULL;
9086
9087 mutex_enter(&plun->lun_mutex);
9088 if (plun->lun_mpxio == 0) {
9089 cdip = DIP(plun->lun_cip);
9090 } else if (plun->lun_cip) {
9091 cdip = mdi_pi_get_client(
9092 PIP(plun->lun_cip));
9093 }
9094 mutex_exit(&plun->lun_mutex);
9095
9096 if (cdip) {
9097 (void) ndi_event_retrieve_cookie(
9098 pptr->port_ndi_event_hdl, cdip,
9099 FCAL_REMOVE_EVENT,
9100 &fcp_remove_eid,
9101 NDI_EVENT_NOPASS);
9102 (void) ndi_event_run_callbacks(
9103 pptr->port_ndi_event_hdl,
9104 cdip, fcp_remove_eid, NULL);
9105 }
9106
9107 pkt->pkt_reason = CMD_TRAN_ERR;
9108 pkt->pkt_statistics |= STAT_BUS_RESET;
9109
9110 break;
9111 }
9112
9113 case FC_REASON_NOMEM:
9114 case FC_REASON_QFULL: {
9115 caddr_t ptr;
9116
9117 pkt->pkt_reason = CMD_CMPLT; /* Lie */
9118 ptr = (caddr_t)pkt->pkt_scbp;
9119 if (ptr) {
9120 *ptr = STATUS_BUSY;
9121 }
9122 break;
9123 }
9124
9125 case FC_REASON_DMA_ERROR:
9126 pkt->pkt_reason = CMD_DMA_DERR;
9127 pkt->pkt_statistics |= STAT_ABORTED;
9128 break;
9129
9130 case FC_REASON_CRC_ERROR:
9131 case FC_REASON_UNDERRUN: {
9132 uchar_t status;
9133 /*
9134 * Work around for Bugid: 4240945.
9135 * IB on A5k doesn't set the Underrun bit
9136 * in the fcp status, when it is transferring
9137 * less than requested amount of data. Work
9138 * around the ses problem to keep luxadm
9139 * happy till ibfirmware is fixed.
9140 */
9141 if (pptr->port_fcp_dma != FC_NO_DVMA_SPACE) {
9142 FCP_CP_IN(fpkt->pkt_resp, rsp,
9143 fpkt->pkt_resp_acc,
9144 sizeof (struct fcp_rsp));
9145 }
9146 status = rsp->fcp_u.fcp_status.scsi_status;
9147 if (((plun->lun_type & DTYPE_MASK) ==
9148 DTYPE_ESI) && (status == STATUS_GOOD)) {
9149 pkt->pkt_reason = CMD_CMPLT;
9150 *pkt->pkt_scbp = status;
9151 pkt->pkt_resid = 0;
9152 } else {
9153 pkt->pkt_reason = CMD_TRAN_ERR;
9154 pkt->pkt_statistics |= STAT_ABORTED;
9155 }
9156 break;
9157 }
9158
9159 case FC_REASON_NO_CONNECTION:
9160 case FC_REASON_UNSUPPORTED:
9161 case FC_REASON_ILLEGAL_REQ:
9162 case FC_REASON_BAD_SID:
9163 case FC_REASON_DIAG_BUSY:
9164 case FC_REASON_FCAL_OPN_FAIL:
9165 case FC_REASON_BAD_XID:
9166 default:
9167 pkt->pkt_reason = CMD_TRAN_ERR;
9168 pkt->pkt_statistics |= STAT_ABORTED;
9169 break;
9170
9171 }
9172 break;
9173
9174 case FC_PKT_NPORT_RJT:
9175 case FC_PKT_FABRIC_RJT:
9176 case FC_PKT_NPORT_BSY:
9177 case FC_PKT_FABRIC_BSY:
9178 default:
9179 FCP_DTRACE(fcp_logq, pptr->port_instbuf,
9180 fcp_trace, FCP_BUF_LEVEL_8, 0,
9181 "FC Status 0x%x, reason 0x%x",
9182 fpkt->pkt_state, fpkt->pkt_reason);
9183 pkt->pkt_reason = CMD_TRAN_ERR;
9184 pkt->pkt_statistics |= STAT_ABORTED;
9185 break;
9186 }
9187
9188 FCP_DTRACE(fcp_logq, pptr->port_instbuf,
9189 fcp_trace, FCP_BUF_LEVEL_9, 0,
9190 "!FC error on cmd=%p target=0x%x: pkt state=0x%x "
9191 " pkt reason=0x%x", cmd, ptgt->tgt_d_id, fpkt->pkt_state,
9192 fpkt->pkt_reason);
9193 }
9194
9195 ASSERT(save.a_hba_tran == pkt->pkt_address.a_hba_tran);
9196 }
9197
9198
9199 static int
9200 fcp_validate_fcp_response(struct fcp_rsp *rsp, struct fcp_port *pptr)
9201 {
9202 if (rsp->reserved_0 || rsp->reserved_1 ||
9203 rsp->fcp_u.fcp_status.reserved_0 ||
9204 rsp->fcp_u.fcp_status.reserved_1) {
9205 /*
9206 * These reserved fields should ideally be zero. FCP-2 does say
9207 * that the recipient need not check for reserved fields to be
9208 * zero. If they are not zero, we will not make a fuss about it
9209 * - just log it (in debug to both trace buffer and messages
9210 * file and to trace buffer only in non-debug) and move on.
9211 *
9212 * Non-zero reserved fields were seen with minnows.
9213 *
9214 * qlc takes care of some of this but we cannot assume that all
9215 * FCAs will do so.
9216 */
9217 FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
9218 FCP_BUF_LEVEL_5, 0,
9219 "Got fcp response packet with non-zero reserved fields "
9220 "rsp->reserved_0:0x%x, rsp_reserved_1:0x%x, "
9221 "status.reserved_0:0x%x, status.reserved_1:0x%x",
9222 rsp->reserved_0, rsp->reserved_1,
9223 rsp->fcp_u.fcp_status.reserved_0,
9224 rsp->fcp_u.fcp_status.reserved_1);
9225 }
9226
9227 if (rsp->fcp_u.fcp_status.rsp_len_set && (rsp->fcp_response_len >
9228 (FCP_MAX_RSP_IU_SIZE - sizeof (struct fcp_rsp)))) {
9229 return (FC_FAILURE);
9230 }
9231
9232 if (rsp->fcp_u.fcp_status.sense_len_set && rsp->fcp_sense_len >
9233 (FCP_MAX_RSP_IU_SIZE - rsp->fcp_response_len -
9234 sizeof (struct fcp_rsp))) {
9235 return (FC_FAILURE);
9236 }
9237
9238 return (FC_SUCCESS);
9239 }
9240
9241
9242 /*
9243 * This is called when there is a change the in device state. The case we're
9244 * handling here is, if the d_id s does not match, offline this tgt and online
9245 * a new tgt with the new d_id. called from fcp_handle_devices with
9246 * port_mutex held.
9247 */
9248 static int
9249 fcp_device_changed(struct fcp_port *pptr, struct fcp_tgt *ptgt,
9250 fc_portmap_t *map_entry, int link_cnt, int tgt_cnt, int cause)
9251 {
9252 ASSERT(mutex_owned(&pptr->port_mutex));
9253
9254 FCP_TRACE(fcp_logq, pptr->port_instbuf,
9255 fcp_trace, FCP_BUF_LEVEL_3, 0,
9256 "Starting fcp_device_changed...");
9257
9258 /*
9259 * The two cases where the port_device_changed is called is
9260 * either it changes it's d_id or it's hard address.
9261 */
9262 if ((ptgt->tgt_d_id != map_entry->map_did.port_id) ||
9263 (FC_TOP_EXTERNAL(pptr->port_topology) &&
9264 (ptgt->tgt_hard_addr != map_entry->map_hard_addr.hard_addr))) {
9265
9266 /* offline this target */
9267 mutex_enter(&ptgt->tgt_mutex);
9268 if (!(ptgt->tgt_state & FCP_TGT_OFFLINE)) {
9269 (void) fcp_offline_target(pptr, ptgt, link_cnt,
9270 0, 1, NDI_DEVI_REMOVE);
9271 }
9272 mutex_exit(&ptgt->tgt_mutex);
9273
9274 fcp_log(CE_NOTE, pptr->port_dip,
9275 "Change in target properties: Old D_ID=%x New D_ID=%x"
9276 " Old HA=%x New HA=%x", ptgt->tgt_d_id,
9277 map_entry->map_did.port_id, ptgt->tgt_hard_addr,
9278 map_entry->map_hard_addr.hard_addr);
9279 }
9280
9281 return (fcp_handle_mapflags(pptr, ptgt, map_entry,
9282 link_cnt, tgt_cnt, cause));
9283 }
9284
9285 /*
9286 * Function: fcp_alloc_lun
9287 *
9288 * Description: Creates a new lun structure and adds it to the list
9289 * of luns of the target.
9290 *
9291 * Argument: ptgt Target the lun will belong to.
9292 *
9293 * Return Value: NULL Failed
9294 * Not NULL Succeeded
9295 *
9296 * Context: Kernel context
9297 */
9298 static struct fcp_lun *
9299 fcp_alloc_lun(struct fcp_tgt *ptgt)
9300 {
9301 struct fcp_lun *plun;
9302
9303 plun = kmem_zalloc(sizeof (struct fcp_lun), KM_NOSLEEP);
9304 if (plun != NULL) {
9305 /*
9306 * Initialize the mutex before putting in the target list
9307 * especially before releasing the target mutex.
9308 */
9309 mutex_init(&plun->lun_mutex, NULL, MUTEX_DRIVER, NULL);
9310 plun->lun_tgt = ptgt;
9311
9312 mutex_enter(&ptgt->tgt_mutex);
9313 plun->lun_next = ptgt->tgt_lun;
9314 ptgt->tgt_lun = plun;
9315 plun->lun_old_guid = NULL;
9316 plun->lun_old_guid_size = 0;
9317 mutex_exit(&ptgt->tgt_mutex);
9318 }
9319
9320 return (plun);
9321 }
9322
9323 /*
9324 * Function: fcp_dealloc_lun
9325 *
9326 * Description: Frees the LUN structure passed by the caller.
9327 *
9328 * Argument: plun LUN structure to free.
9329 *
9330 * Return Value: None
9331 *
9332 * Context: Kernel context.
9333 */
9334 static void
9335 fcp_dealloc_lun(struct fcp_lun *plun)
9336 {
9337 mutex_enter(&plun->lun_mutex);
9338 if (plun->lun_cip) {
9339 fcp_remove_child(plun);
9340 }
9341 mutex_exit(&plun->lun_mutex);
9342
9343 mutex_destroy(&plun->lun_mutex);
9344 if (plun->lun_guid) {
9345 kmem_free(plun->lun_guid, plun->lun_guid_size);
9346 }
9347 if (plun->lun_old_guid) {
9348 kmem_free(plun->lun_old_guid, plun->lun_old_guid_size);
9349 }
9350 kmem_free(plun, sizeof (*plun));
9351 }
9352
9353 /*
9354 * Function: fcp_alloc_tgt
9355 *
9356 * Description: Creates a new target structure and adds it to the port
9357 * hash list.
9358 *
9359 * Argument: pptr fcp port structure
9360 * *map_entry entry describing the target to create
9361 * link_cnt Link state change counter
9362 *
9363 * Return Value: NULL Failed
9364 * Not NULL Succeeded
9365 *
9366 * Context: Kernel context.
9367 */
9368 static struct fcp_tgt *
9369 fcp_alloc_tgt(struct fcp_port *pptr, fc_portmap_t *map_entry, int link_cnt)
9370 {
9371 int hash;
9372 uchar_t *wwn;
9373 struct fcp_tgt *ptgt;
9374
9375 ptgt = kmem_zalloc(sizeof (*ptgt), KM_NOSLEEP);
9376 if (ptgt != NULL) {
9377 mutex_enter(&pptr->port_mutex);
9378 if (link_cnt != pptr->port_link_cnt) {
9379 /*
9380 * oh oh -- another link reset
9381 * in progress -- give up
9382 */
9383 mutex_exit(&pptr->port_mutex);
9384 kmem_free(ptgt, sizeof (*ptgt));
9385 ptgt = NULL;
9386 } else {
9387 /*
9388 * initialize the mutex before putting in the port
9389 * wwn list, especially before releasing the port
9390 * mutex.
9391 */
9392 mutex_init(&ptgt->tgt_mutex, NULL, MUTEX_DRIVER, NULL);
9393
9394 /* add new target entry to the port's hash list */
9395 wwn = (uchar_t *)&map_entry->map_pwwn;
9396 hash = FCP_HASH(wwn);
9397
9398 ptgt->tgt_next = pptr->port_tgt_hash_table[hash];
9399 pptr->port_tgt_hash_table[hash] = ptgt;
9400
9401 /* save cross-ptr */
9402 ptgt->tgt_port = pptr;
9403
9404 ptgt->tgt_change_cnt = 1;
9405
9406 /* initialize the target manual_config_only flag */
9407 if (fcp_enable_auto_configuration) {
9408 ptgt->tgt_manual_config_only = 0;
9409 } else {
9410 ptgt->tgt_manual_config_only = 1;
9411 }
9412
9413 mutex_exit(&pptr->port_mutex);
9414 }
9415 }
9416
9417 return (ptgt);
9418 }
9419
9420 /*
9421 * Function: fcp_dealloc_tgt
9422 *
9423 * Description: Frees the target structure passed by the caller.
9424 *
9425 * Argument: ptgt Target structure to free.
9426 *
9427 * Return Value: None
9428 *
9429 * Context: Kernel context.
9430 */
9431 static void
9432 fcp_dealloc_tgt(struct fcp_tgt *ptgt)
9433 {
9434 mutex_destroy(&ptgt->tgt_mutex);
9435 kmem_free(ptgt, sizeof (*ptgt));
9436 }
9437
9438
9439 /*
9440 * Handle STATUS_QFULL and STATUS_BUSY by performing delayed retry
9441 *
9442 * Device discovery commands will not be retried for-ever as
9443 * this will have repercussions on other devices that need to
9444 * be submitted to the hotplug thread. After a quick glance
9445 * at the SCSI-3 spec, it was found that the spec doesn't
9446 * mandate a forever retry, rather recommends a delayed retry.
9447 *
9448 * Since Photon IB is single threaded, STATUS_BUSY is common
9449 * in a 4+initiator environment. Make sure the total time
9450 * spent on retries (including command timeout) does not
9451 * 60 seconds
9452 */
9453 static void
9454 fcp_queue_ipkt(struct fcp_port *pptr, fc_packet_t *fpkt)
9455 {
9456 struct fcp_ipkt *icmd = (struct fcp_ipkt *)fpkt->pkt_ulp_private;
9457 struct fcp_tgt *ptgt = icmd->ipkt_tgt;
9458
9459 mutex_enter(&pptr->port_mutex);
9460 mutex_enter(&ptgt->tgt_mutex);
9461 if (FCP_STATE_CHANGED(pptr, ptgt, icmd)) {
9462 FCP_TRACE(fcp_logq, pptr->port_instbuf,
9463 fcp_trace, FCP_BUF_LEVEL_2, 0,
9464 "fcp_queue_ipkt,1:state change occured"
9465 " for D_ID=0x%x", ptgt->tgt_d_id);
9466 mutex_exit(&ptgt->tgt_mutex);
9467 mutex_exit(&pptr->port_mutex);
9468 (void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
9469 icmd->ipkt_change_cnt, icmd->ipkt_cause);
9470 fcp_icmd_free(pptr, icmd);
9471 return;
9472 }
9473 mutex_exit(&ptgt->tgt_mutex);
9474
9475 icmd->ipkt_restart = fcp_watchdog_time + icmd->ipkt_retries++;
9476
9477 if (pptr->port_ipkt_list != NULL) {
9478 /* add pkt to front of doubly-linked list */
9479 pptr->port_ipkt_list->ipkt_prev = icmd;
9480 icmd->ipkt_next = pptr->port_ipkt_list;
9481 pptr->port_ipkt_list = icmd;
9482 icmd->ipkt_prev = NULL;
9483 } else {
9484 /* this is the first/only pkt on the list */
9485 pptr->port_ipkt_list = icmd;
9486 icmd->ipkt_next = NULL;
9487 icmd->ipkt_prev = NULL;
9488 }
9489 mutex_exit(&pptr->port_mutex);
9490 }
9491
9492 /*
9493 * Function: fcp_transport
9494 *
9495 * Description: This function submits the Fibre Channel packet to the transort
9496 * layer by calling fc_ulp_transport(). If fc_ulp_transport()
9497 * fails the submission, the treatment depends on the value of
9498 * the variable internal.
9499 *
9500 * Argument: port_handle fp/fctl port handle.
9501 * *fpkt Packet to submit to the transport layer.
9502 * internal Not zero when it's an internal packet.
9503 *
9504 * Return Value: FC_TRAN_BUSY
9505 * FC_STATEC_BUSY
9506 * FC_OFFLINE
9507 * FC_LOGINREQ
9508 * FC_DEVICE_BUSY
9509 * FC_SUCCESS
9510 */
9511 static int
9512 fcp_transport(opaque_t port_handle, fc_packet_t *fpkt, int internal)
9513 {
9514 int rval;
9515
9516 rval = fc_ulp_transport(port_handle, fpkt);
9517 if (rval == FC_SUCCESS) {
9518 return (rval);
9519 }
9520
9521 /*
9522 * LUN isn't marked BUSY or OFFLINE, so we got here to transport
9523 * a command, if the underlying modules see that there is a state
9524 * change, or if a port is OFFLINE, that means, that state change
9525 * hasn't reached FCP yet, so re-queue the command for deferred
9526 * submission.
9527 */
9528 if ((rval == FC_STATEC_BUSY) || (rval == FC_OFFLINE) ||
9529 (rval == FC_LOGINREQ) || (rval == FC_DEVICE_BUSY) ||
9530 (rval == FC_DEVICE_BUSY_NEW_RSCN) || (rval == FC_TRAN_BUSY)) {
9531 /*
9532 * Defer packet re-submission. Life hang is possible on
9533 * internal commands if the port driver sends FC_STATEC_BUSY
9534 * for ever, but that shouldn't happen in a good environment.
9535 * Limiting re-transport for internal commands is probably a
9536 * good idea..
9537 * A race condition can happen when a port sees barrage of
9538 * link transitions offline to online. If the FCTL has
9539 * returned FC_STATEC_BUSY or FC_OFFLINE then none of the
9540 * internal commands should be queued to do the discovery.
9541 * The race condition is when an online comes and FCP starts
9542 * its internal discovery and the link goes offline. It is
9543 * possible that the statec_callback has not reached FCP
9544 * and FCP is carrying on with its internal discovery.
9545 * FC_STATEC_BUSY or FC_OFFLINE will be the first indication
9546 * that the link has gone offline. At this point FCP should
9547 * drop all the internal commands and wait for the
9548 * statec_callback. It will be facilitated by incrementing
9549 * port_link_cnt.
9550 *
9551 * For external commands, the (FC)pkt_timeout is decremented
9552 * by the QUEUE Delay added by our driver, Care is taken to
9553 * ensure that it doesn't become zero (zero means no timeout)
9554 * If the time expires right inside driver queue itself,
9555 * the watch thread will return it to the original caller
9556 * indicating that the command has timed-out.
9557 */
9558 if (internal) {
9559 char *op;
9560 struct fcp_ipkt *icmd;
9561
9562 icmd = (struct fcp_ipkt *)fpkt->pkt_ulp_private;
9563 switch (icmd->ipkt_opcode) {
9564 case SCMD_REPORT_LUN:
9565 op = "REPORT LUN";
9566 break;
9567
9568 case SCMD_INQUIRY:
9569 op = "INQUIRY";
9570 break;
9571
9572 case SCMD_INQUIRY_PAGE83:
9573 op = "INQUIRY-83";
9574 break;
9575
9576 default:
9577 op = "Internal SCSI COMMAND";
9578 break;
9579 }
9580
9581 if (fcp_handle_ipkt_errors(icmd->ipkt_port,
9582 icmd->ipkt_tgt, icmd, rval, op) == DDI_SUCCESS) {
9583 rval = FC_SUCCESS;
9584 }
9585 } else {
9586 struct fcp_pkt *cmd;
9587 struct fcp_port *pptr;
9588
9589 cmd = (struct fcp_pkt *)fpkt->pkt_ulp_private;
9590 cmd->cmd_state = FCP_PKT_IDLE;
9591 pptr = ADDR2FCP(&cmd->cmd_pkt->pkt_address);
9592
9593 if (cmd->cmd_pkt->pkt_flags & FLAG_NOQUEUE) {
9594 FCP_DTRACE(fcp_logq, pptr->port_instbuf,
9595 fcp_trace, FCP_BUF_LEVEL_9, 0,
9596 "fcp_transport: xport busy for pkt %p",
9597 cmd->cmd_pkt);
9598 rval = FC_TRAN_BUSY;
9599 } else {
9600 fcp_queue_pkt(pptr, cmd);
9601 rval = FC_SUCCESS;
9602 }
9603 }
9604 }
9605
9606 return (rval);
9607 }
9608
9609 /*VARARGS3*/
9610 static void
9611 fcp_log(int level, dev_info_t *dip, const char *fmt, ...)
9612 {
9613 char buf[256];
9614 va_list ap;
9615
9616 if (dip == NULL) {
9617 dip = fcp_global_dip;
9618 }
9619
9620 va_start(ap, fmt);
9621 (void) vsprintf(buf, fmt, ap);
9622 va_end(ap);
9623
9624 scsi_log(dip, "fcp", level, buf);
9625 }
9626
9627 /*
9628 * This function retries NS registry of FC4 type.
9629 * It assumes that fcp_mutex is held.
9630 * The function does nothing if topology is not fabric
9631 * So, the topology has to be set before this function can be called
9632 */
9633 static void
9634 fcp_retry_ns_registry(struct fcp_port *pptr, uint32_t s_id)
9635 {
9636 int rval;
9637
9638 ASSERT(MUTEX_HELD(&pptr->port_mutex));
9639
9640 if (((pptr->port_state & FCP_STATE_NS_REG_FAILED) == 0) ||
9641 ((pptr->port_topology != FC_TOP_FABRIC) &&
9642 (pptr->port_topology != FC_TOP_PUBLIC_LOOP))) {
9643 if (pptr->port_state & FCP_STATE_NS_REG_FAILED) {
9644 pptr->port_state &= ~FCP_STATE_NS_REG_FAILED;
9645 }
9646 return;
9647 }
9648 mutex_exit(&pptr->port_mutex);
9649 rval = fcp_do_ns_registry(pptr, s_id);
9650 mutex_enter(&pptr->port_mutex);
9651
9652 if (rval == 0) {
9653 /* Registry successful. Reset flag */
9654 pptr->port_state &= ~(FCP_STATE_NS_REG_FAILED);
9655 }
9656 }
9657
9658 /*
9659 * This function registers the ULP with the switch by calling transport i/f
9660 */
9661 static int
9662 fcp_do_ns_registry(struct fcp_port *pptr, uint32_t s_id)
9663 {
9664 fc_ns_cmd_t ns_cmd;
9665 ns_rfc_type_t rfc;
9666 uint32_t types[8];
9667
9668 /*
9669 * Prepare the Name server structure to
9670 * register with the transport in case of
9671 * Fabric configuration.
9672 */
9673 bzero(&rfc, sizeof (rfc));
9674 bzero(types, sizeof (types));
9675
9676 types[FC4_TYPE_WORD_POS(FC_TYPE_SCSI_FCP)] =
9677 (1 << FC4_TYPE_BIT_POS(FC_TYPE_SCSI_FCP));
9678
9679 rfc.rfc_port_id.port_id = s_id;
9680 bcopy(types, rfc.rfc_types, sizeof (types));
9681
9682 ns_cmd.ns_flags = 0;
9683 ns_cmd.ns_cmd = NS_RFT_ID;
9684 ns_cmd.ns_req_len = sizeof (rfc);
9685 ns_cmd.ns_req_payload = (caddr_t)&rfc;
9686 ns_cmd.ns_resp_len = 0;
9687 ns_cmd.ns_resp_payload = NULL;
9688
9689 /*
9690 * Perform the Name Server Registration for SCSI_FCP FC4 Type.
9691 */
9692 if (fc_ulp_port_ns(pptr->port_fp_handle, NULL, &ns_cmd)) {
9693 fcp_log(CE_WARN, pptr->port_dip,
9694 "!ns_registry: failed name server registration");
9695 return (1);
9696 }
9697
9698 return (0);
9699 }
9700
9701 /*
9702 * Function: fcp_handle_port_attach
9703 *
9704 * Description: This function is called from fcp_port_attach() to attach a
9705 * new port. This routine does the following:
9706 *
9707 * 1) Allocates an fcp_port structure and initializes it.
9708 * 2) Tries to register the new FC-4 (FCP) capablity with the name
9709 * server.
9710 * 3) Kicks off the enumeration of the targets/luns visible
9711 * through this new port. That is done by calling
9712 * fcp_statec_callback() if the port is online.
9713 *
9714 * Argument: ulph fp/fctl port handle.
9715 * *pinfo Port information.
9716 * s_id Port ID.
9717 * instance Device instance number for the local port
9718 * (returned by ddi_get_instance()).
9719 *
9720 * Return Value: DDI_SUCCESS
9721 * DDI_FAILURE
9722 *
9723 * Context: User and Kernel context.
9724 */
9725 /*ARGSUSED*/
9726 int
9727 fcp_handle_port_attach(opaque_t ulph, fc_ulp_port_info_t *pinfo,
9728 uint32_t s_id, int instance)
9729 {
9730 int res = DDI_FAILURE;
9731 scsi_hba_tran_t *tran;
9732 int mutex_initted = FALSE;
9733 int hba_attached = FALSE;
9734 int soft_state_linked = FALSE;
9735 int event_bind = FALSE;
9736 struct fcp_port *pptr;
9737 fc_portmap_t *tmp_list = NULL;
9738 uint32_t max_cnt, alloc_cnt;
9739 uchar_t *boot_wwn = NULL;
9740 uint_t nbytes;
9741 int manual_cfg;
9742
9743 /*
9744 * this port instance attaching for the first time (or after
9745 * being detached before)
9746 */
9747 FCP_TRACE(fcp_logq, "fcp", fcp_trace,
9748 FCP_BUF_LEVEL_3, 0, "port attach: for port %d", instance);
9749
9750 if (ddi_soft_state_zalloc(fcp_softstate, instance) != DDI_SUCCESS) {
9751 cmn_err(CE_WARN, "fcp: Softstate struct alloc failed"
9752 "parent dip: %p; instance: %d", (void *)pinfo->port_dip,
9753 instance);
9754 return (res);
9755 }
9756
9757 if ((pptr = ddi_get_soft_state(fcp_softstate, instance)) == NULL) {
9758 /* this shouldn't happen */
9759 ddi_soft_state_free(fcp_softstate, instance);
9760 cmn_err(CE_WARN, "fcp: bad soft state");
9761 return (res);
9762 }
9763
9764 (void) sprintf(pptr->port_instbuf, "fcp(%d)", instance);
9765
9766 /*
9767 * Make a copy of ulp_port_info as fctl allocates
9768 * a temp struct.
9769 */
9770 (void) fcp_cp_pinfo(pptr, pinfo);
9771
9772 /*
9773 * Check for manual_configuration_only property.
9774 * Enable manual configurtion if the property is
9775 * set to 1, otherwise disable manual configuration.
9776 */
9777 if ((manual_cfg = ddi_prop_get_int(DDI_DEV_T_ANY, pptr->port_dip,
9778 DDI_PROP_NOTPROM | DDI_PROP_DONTPASS,
9779 MANUAL_CFG_ONLY,
9780 -1)) != -1) {
9781 if (manual_cfg == 1) {
9782 char *pathname;
9783 pathname = kmem_zalloc(MAXPATHLEN, KM_SLEEP);
9784 (void) ddi_pathname(pptr->port_dip, pathname);
9785 cmn_err(CE_NOTE,
9786 "%s (%s%d) %s is enabled via %s.conf.",
9787 pathname,
9788 ddi_driver_name(pptr->port_dip),
9789 ddi_get_instance(pptr->port_dip),
9790 MANUAL_CFG_ONLY,
9791 ddi_driver_name(pptr->port_dip));
9792 fcp_enable_auto_configuration = 0;
9793 kmem_free(pathname, MAXPATHLEN);
9794 }
9795 }
9796 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(pptr->port_link_cnt));
9797 pptr->port_link_cnt = 1;
9798 _NOTE(NOW_VISIBLE_TO_OTHER_THREADS(pptr->port_link_cnt));
9799 pptr->port_id = s_id;
9800 pptr->port_instance = instance;
9801 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(pptr->port_state));
9802 pptr->port_state = FCP_STATE_INIT;
9803 if (pinfo->port_acc_attr == NULL) {
9804 /*
9805 * The corresponding FCA doesn't support DMA at all
9806 */
9807 pptr->port_state |= FCP_STATE_FCA_IS_NODMA;
9808 }
9809
9810 _NOTE(NOW_VISIBLE_TO_OTHER_THREADS(pptr->port_state));
9811
9812 if (!(pptr->port_state & FCP_STATE_FCA_IS_NODMA)) {
9813 /*
9814 * If FCA supports DMA in SCSI data phase, we need preallocate
9815 * dma cookie, so stash the cookie size
9816 */
9817 pptr->port_dmacookie_sz = sizeof (ddi_dma_cookie_t) *
9818 pptr->port_data_dma_attr.dma_attr_sgllen;
9819 }
9820
9821 /*
9822 * The two mutexes of fcp_port are initialized. The variable
9823 * mutex_initted is incremented to remember that fact. That variable
9824 * is checked when the routine fails and the mutexes have to be
9825 * destroyed.
9826 */
9827 mutex_init(&pptr->port_mutex, NULL, MUTEX_DRIVER, NULL);
9828 mutex_init(&pptr->port_pkt_mutex, NULL, MUTEX_DRIVER, NULL);
9829 mutex_initted++;
9830
9831 /*
9832 * The SCSI tran structure is allocate and initialized now.
9833 */
9834 if ((tran = scsi_hba_tran_alloc(pptr->port_dip, 0)) == NULL) {
9835 fcp_log(CE_WARN, pptr->port_dip,
9836 "!fcp%d: scsi_hba_tran_alloc failed", instance);
9837 goto fail;
9838 }
9839
9840 /* link in the transport structure then fill it in */
9841 pptr->port_tran = tran;
9842 tran->tran_hba_private = pptr;
9843 tran->tran_tgt_init = fcp_scsi_tgt_init;
9844 tran->tran_tgt_probe = NULL;
9845 tran->tran_tgt_free = fcp_scsi_tgt_free;
9846 tran->tran_start = fcp_scsi_start;
9847 tran->tran_reset = fcp_scsi_reset;
9848 tran->tran_abort = fcp_scsi_abort;
9849 tran->tran_getcap = fcp_scsi_getcap;
9850 tran->tran_setcap = fcp_scsi_setcap;
9851 tran->tran_init_pkt = NULL;
9852 tran->tran_destroy_pkt = NULL;
9853 tran->tran_dmafree = NULL;
9854 tran->tran_sync_pkt = NULL;
9855 tran->tran_reset_notify = fcp_scsi_reset_notify;
9856 tran->tran_get_bus_addr = fcp_scsi_get_bus_addr;
9857 tran->tran_get_name = fcp_scsi_get_name;
9858 tran->tran_clear_aca = NULL;
9859 tran->tran_clear_task_set = NULL;
9860 tran->tran_terminate_task = NULL;
9861 tran->tran_get_eventcookie = fcp_scsi_bus_get_eventcookie;
9862 tran->tran_add_eventcall = fcp_scsi_bus_add_eventcall;
9863 tran->tran_remove_eventcall = fcp_scsi_bus_remove_eventcall;
9864 tran->tran_post_event = fcp_scsi_bus_post_event;
9865 tran->tran_quiesce = NULL;
9866 tran->tran_unquiesce = NULL;
9867 tran->tran_bus_reset = NULL;
9868 tran->tran_bus_config = fcp_scsi_bus_config;
9869 tran->tran_bus_unconfig = fcp_scsi_bus_unconfig;
9870 tran->tran_bus_power = NULL;
9871 tran->tran_interconnect_type = INTERCONNECT_FABRIC;
9872
9873 tran->tran_pkt_constructor = fcp_kmem_cache_constructor;
9874 tran->tran_pkt_destructor = fcp_kmem_cache_destructor;
9875 tran->tran_setup_pkt = fcp_pkt_setup;
9876 tran->tran_teardown_pkt = fcp_pkt_teardown;
9877 tran->tran_hba_len = pptr->port_priv_pkt_len +
9878 sizeof (struct fcp_pkt) + pptr->port_dmacookie_sz;
9879 if (pptr->port_state & FCP_STATE_FCA_IS_NODMA) {
9880 /*
9881 * If FCA don't support DMA, then we use different vectors to
9882 * minimize the effects on DMA code flow path
9883 */
9884 tran->tran_start = fcp_pseudo_start;
9885 tran->tran_init_pkt = fcp_pseudo_init_pkt;
9886 tran->tran_destroy_pkt = fcp_pseudo_destroy_pkt;
9887 tran->tran_sync_pkt = fcp_pseudo_sync_pkt;
9888 tran->tran_dmafree = fcp_pseudo_dmafree;
9889 tran->tran_setup_pkt = NULL;
9890 tran->tran_teardown_pkt = NULL;
9891 tran->tran_pkt_constructor = NULL;
9892 tran->tran_pkt_destructor = NULL;
9893 pptr->port_data_dma_attr = pseudo_fca_dma_attr;
9894 }
9895
9896 /*
9897 * Allocate an ndi event handle
9898 */
9899 pptr->port_ndi_event_defs = (ndi_event_definition_t *)
9900 kmem_zalloc(sizeof (fcp_ndi_event_defs), KM_SLEEP);
9901
9902 bcopy(fcp_ndi_event_defs, pptr->port_ndi_event_defs,
9903 sizeof (fcp_ndi_event_defs));
9904
9905 (void) ndi_event_alloc_hdl(pptr->port_dip, NULL,
9906 &pptr->port_ndi_event_hdl, NDI_SLEEP);
9907
9908 pptr->port_ndi_events.ndi_events_version = NDI_EVENTS_REV1;
9909 pptr->port_ndi_events.ndi_n_events = FCP_N_NDI_EVENTS;
9910 pptr->port_ndi_events.ndi_event_defs = pptr->port_ndi_event_defs;
9911
9912 if (DEVI_IS_ATTACHING(pptr->port_dip) &&
9913 (ndi_event_bind_set(pptr->port_ndi_event_hdl,
9914 &pptr->port_ndi_events, NDI_SLEEP) != NDI_SUCCESS)) {
9915 goto fail;
9916 }
9917 event_bind++; /* Checked in fail case */
9918
9919 if (scsi_hba_attach_setup(pptr->port_dip, &pptr->port_data_dma_attr,
9920 tran, SCSI_HBA_ADDR_COMPLEX | SCSI_HBA_TRAN_SCB)
9921 != DDI_SUCCESS) {
9922 fcp_log(CE_WARN, pptr->port_dip,
9923 "!fcp%d: scsi_hba_attach_setup failed", instance);
9924 goto fail;
9925 }
9926 hba_attached++; /* Checked in fail case */
9927
9928 pptr->port_mpxio = 0;
9929 if (mdi_phci_register(MDI_HCI_CLASS_SCSI, pptr->port_dip, 0) ==
9930 MDI_SUCCESS) {
9931 pptr->port_mpxio++;
9932 }
9933
9934 /*
9935 * The following code is putting the new port structure in the global
9936 * list of ports and, if it is the first port to attach, it start the
9937 * fcp_watchdog_tick.
9938 *
9939 * Why put this new port in the global before we are done attaching it?
9940 * We are actually making the structure globally known before we are
9941 * done attaching it. The reason for that is: because of the code that
9942 * follows. At this point the resources to handle the port are
9943 * allocated. This function is now going to do the following:
9944 *
9945 * 1) It is going to try to register with the name server advertizing
9946 * the new FCP capability of the port.
9947 * 2) It is going to play the role of the fp/fctl layer by building
9948 * a list of worlwide names reachable through this port and call
9949 * itself on fcp_statec_callback(). That requires the port to
9950 * be part of the global list.
9951 */
9952 mutex_enter(&fcp_global_mutex);
9953 if (fcp_port_head == NULL) {
9954 fcp_read_blacklist(pinfo->port_dip, &fcp_lun_blacklist);
9955 }
9956 pptr->port_next = fcp_port_head;
9957 fcp_port_head = pptr;
9958 soft_state_linked++;
9959
9960 if (fcp_watchdog_init++ == 0) {
9961 fcp_watchdog_tick = fcp_watchdog_timeout *
9962 drv_usectohz(1000000);
9963 fcp_watchdog_id = timeout(fcp_watch, NULL,
9964 fcp_watchdog_tick);
9965 }
9966 mutex_exit(&fcp_global_mutex);
9967
9968 /*
9969 * Here an attempt is made to register with the name server, the new
9970 * FCP capability. That is done using an RTF_ID to the name server.
9971 * It is done synchronously. The function fcp_do_ns_registry()
9972 * doesn't return till the name server responded.
9973 * On failures, just ignore it for now and it will get retried during
9974 * state change callbacks. We'll set a flag to show this failure
9975 */
9976 if (fcp_do_ns_registry(pptr, s_id)) {
9977 mutex_enter(&pptr->port_mutex);
9978 pptr->port_state |= FCP_STATE_NS_REG_FAILED;
9979 mutex_exit(&pptr->port_mutex);
9980 } else {
9981 mutex_enter(&pptr->port_mutex);
9982 pptr->port_state &= ~(FCP_STATE_NS_REG_FAILED);
9983 mutex_exit(&pptr->port_mutex);
9984 }
9985
9986 /*
9987 * Lookup for boot WWN property
9988 */
9989 if (modrootloaded != 1) {
9990 if ((ddi_prop_lookup_byte_array(DDI_DEV_T_ANY,
9991 ddi_get_parent(pinfo->port_dip),
9992 DDI_PROP_DONTPASS, OBP_BOOT_WWN,
9993 &boot_wwn, &nbytes) == DDI_PROP_SUCCESS) &&
9994 (nbytes == FC_WWN_SIZE)) {
9995 bcopy(boot_wwn, pptr->port_boot_wwn, FC_WWN_SIZE);
9996 }
9997 if (boot_wwn) {
9998 ddi_prop_free(boot_wwn);
9999 }
10000 }
10001
10002 /*
10003 * Handle various topologies and link states.
10004 */
10005 switch (FC_PORT_STATE_MASK(pptr->port_phys_state)) {
10006 case FC_STATE_OFFLINE:
10007
10008 /*
10009 * we're attaching a port where the link is offline
10010 *
10011 * Wait for ONLINE, at which time a state
10012 * change will cause a statec_callback
10013 *
10014 * in the mean time, do not do anything
10015 */
10016 res = DDI_SUCCESS;
10017 pptr->port_state |= FCP_STATE_OFFLINE;
10018 break;
10019
10020 case FC_STATE_ONLINE: {
10021 if (pptr->port_topology == FC_TOP_UNKNOWN) {
10022 (void) fcp_linkreset(pptr, NULL, KM_NOSLEEP);
10023 res = DDI_SUCCESS;
10024 break;
10025 }
10026 /*
10027 * discover devices and create nodes (a private
10028 * loop or point-to-point)
10029 */
10030 ASSERT(pptr->port_topology != FC_TOP_UNKNOWN);
10031
10032 /*
10033 * At this point we are going to build a list of all the ports
10034 * that can be reached through this local port. It looks like
10035 * we cannot handle more than FCP_MAX_DEVICES per local port
10036 * (128).
10037 */
10038 if ((tmp_list = (fc_portmap_t *)kmem_zalloc(
10039 sizeof (fc_portmap_t) * FCP_MAX_DEVICES,
10040 KM_NOSLEEP)) == NULL) {
10041 fcp_log(CE_WARN, pptr->port_dip,
10042 "!fcp%d: failed to allocate portmap",
10043 instance);
10044 goto fail;
10045 }
10046
10047 /*
10048 * fc_ulp_getportmap() is going to provide us with the list of
10049 * remote ports in the buffer we just allocated. The way the
10050 * list is going to be retrieved depends on the topology.
10051 * However, if we are connected to a Fabric, a name server
10052 * request may be sent to get the list of FCP capable ports.
10053 * It should be noted that is the case the request is
10054 * synchronous. This means we are stuck here till the name
10055 * server replies. A lot of things can change during that time
10056 * and including, may be, being called on
10057 * fcp_statec_callback() for different reasons. I'm not sure
10058 * the code can handle that.
10059 */
10060 max_cnt = FCP_MAX_DEVICES;
10061 alloc_cnt = FCP_MAX_DEVICES;
10062 if ((res = fc_ulp_getportmap(pptr->port_fp_handle,
10063 &tmp_list, &max_cnt, FC_ULP_PLOGI_PRESERVE)) !=
10064 FC_SUCCESS) {
10065 caddr_t msg;
10066
10067 (void) fc_ulp_error(res, &msg);
10068
10069 /*
10070 * this just means the transport is
10071 * busy perhaps building a portmap so,
10072 * for now, succeed this port attach
10073 * when the transport has a new map,
10074 * it'll send us a state change then
10075 */
10076 fcp_log(CE_WARN, pptr->port_dip,
10077 "!failed to get port map : %s", msg);
10078
10079 res = DDI_SUCCESS;
10080 break; /* go return result */
10081 }
10082 if (max_cnt > alloc_cnt) {
10083 alloc_cnt = max_cnt;
10084 }
10085
10086 /*
10087 * We are now going to call fcp_statec_callback() ourselves.
10088 * By issuing this call we are trying to kick off the enumera-
10089 * tion process.
10090 */
10091 /*
10092 * let the state change callback do the SCSI device
10093 * discovery and create the devinfos
10094 */
10095 fcp_statec_callback(ulph, pptr->port_fp_handle,
10096 pptr->port_phys_state, pptr->port_topology, tmp_list,
10097 max_cnt, pptr->port_id);
10098
10099 res = DDI_SUCCESS;
10100 break;
10101 }
10102
10103 default:
10104 /* unknown port state */
10105 fcp_log(CE_WARN, pptr->port_dip,
10106 "!fcp%d: invalid port state at attach=0x%x",
10107 instance, pptr->port_phys_state);
10108
10109 mutex_enter(&pptr->port_mutex);
10110 pptr->port_phys_state = FCP_STATE_OFFLINE;
10111 mutex_exit(&pptr->port_mutex);
10112
10113 res = DDI_SUCCESS;
10114 break;
10115 }
10116
10117 /* free temp list if used */
10118 if (tmp_list != NULL) {
10119 kmem_free(tmp_list, sizeof (fc_portmap_t) * alloc_cnt);
10120 }
10121
10122 /* note the attach time */
10123 pptr->port_attach_time = ddi_get_lbolt64();
10124
10125 /* all done */
10126 return (res);
10127
10128 /* a failure we have to clean up after */
10129 fail:
10130 fcp_log(CE_WARN, pptr->port_dip, "!failed to attach to port");
10131
10132 if (soft_state_linked) {
10133 /* remove this fcp_port from the linked list */
10134 (void) fcp_soft_state_unlink(pptr);
10135 }
10136
10137 /* unbind and free event set */
10138 if (pptr->port_ndi_event_hdl) {
10139 if (event_bind) {
10140 (void) ndi_event_unbind_set(pptr->port_ndi_event_hdl,
10141 &pptr->port_ndi_events, NDI_SLEEP);
10142 }
10143 (void) ndi_event_free_hdl(pptr->port_ndi_event_hdl);
10144 }
10145
10146 if (pptr->port_ndi_event_defs) {
10147 (void) kmem_free(pptr->port_ndi_event_defs,
10148 sizeof (fcp_ndi_event_defs));
10149 }
10150
10151 /*
10152 * Clean up mpxio stuff
10153 */
10154 if (pptr->port_mpxio) {
10155 (void) mdi_phci_unregister(pptr->port_dip, 0);
10156 pptr->port_mpxio--;
10157 }
10158
10159 /* undo SCSI HBA setup */
10160 if (hba_attached) {
10161 (void) scsi_hba_detach(pptr->port_dip);
10162 }
10163 if (pptr->port_tran != NULL) {
10164 scsi_hba_tran_free(pptr->port_tran);
10165 }
10166
10167 mutex_enter(&fcp_global_mutex);
10168
10169 /*
10170 * We check soft_state_linked, because it is incremented right before
10171 * we call increment fcp_watchdog_init. Therefore, we know if
10172 * soft_state_linked is still FALSE, we do not want to decrement
10173 * fcp_watchdog_init or possibly call untimeout.
10174 */
10175
10176 if (soft_state_linked) {
10177 if (--fcp_watchdog_init == 0) {
10178 timeout_id_t tid = fcp_watchdog_id;
10179
10180 mutex_exit(&fcp_global_mutex);
10181 (void) untimeout(tid);
10182 } else {
10183 mutex_exit(&fcp_global_mutex);
10184 }
10185 } else {
10186 mutex_exit(&fcp_global_mutex);
10187 }
10188
10189 if (mutex_initted) {
10190 mutex_destroy(&pptr->port_mutex);
10191 mutex_destroy(&pptr->port_pkt_mutex);
10192 }
10193
10194 if (tmp_list != NULL) {
10195 kmem_free(tmp_list, sizeof (fc_portmap_t) * alloc_cnt);
10196 }
10197
10198 /* this makes pptr invalid */
10199 ddi_soft_state_free(fcp_softstate, instance);
10200
10201 return (DDI_FAILURE);
10202 }
10203
10204
10205 static int
10206 fcp_handle_port_detach(struct fcp_port *pptr, int flag, int instance)
10207 {
10208 int count = 0;
10209
10210 mutex_enter(&pptr->port_mutex);
10211
10212 /*
10213 * if the port is powered down or suspended, nothing else
10214 * to do; just return.
10215 */
10216 if (flag != FCP_STATE_DETACHING) {
10217 if (pptr->port_state & (FCP_STATE_POWER_DOWN |
10218 FCP_STATE_SUSPENDED)) {
10219 pptr->port_state |= flag;
10220 mutex_exit(&pptr->port_mutex);
10221 return (FC_SUCCESS);
10222 }
10223 }
10224
10225 if (pptr->port_state & FCP_STATE_IN_MDI) {
10226 mutex_exit(&pptr->port_mutex);
10227 return (FC_FAILURE);
10228 }
10229
10230 FCP_TRACE(fcp_logq, pptr->port_instbuf,
10231 fcp_trace, FCP_BUF_LEVEL_2, 0,
10232 "fcp_handle_port_detach: port is detaching");
10233
10234 pptr->port_state |= flag;
10235
10236 /*
10237 * Wait for any ongoing reconfig/ipkt to complete, that
10238 * ensures the freeing to targets/luns is safe.
10239 * No more ref to this port should happen from statec/ioctl
10240 * after that as it was removed from the global port list.
10241 */
10242 while (pptr->port_tmp_cnt || pptr->port_ipkt_cnt ||
10243 (pptr->port_state & FCP_STATE_IN_WATCHDOG)) {
10244 /*
10245 * Let's give sufficient time for reconfig/ipkt
10246 * to complete.
10247 */
10248 if (count++ >= FCP_ICMD_DEADLINE) {
10249 break;
10250 }
10251 mutex_exit(&pptr->port_mutex);
10252 delay(drv_usectohz(1000000));
10253 mutex_enter(&pptr->port_mutex);
10254 }
10255
10256 /*
10257 * if the driver is still busy then fail to
10258 * suspend/power down.
10259 */
10260 if (pptr->port_tmp_cnt || pptr->port_ipkt_cnt ||
10261 (pptr->port_state & FCP_STATE_IN_WATCHDOG)) {
10262 pptr->port_state &= ~flag;
10263 mutex_exit(&pptr->port_mutex);
10264 return (FC_FAILURE);
10265 }
10266
10267 if (flag == FCP_STATE_DETACHING) {
10268 pptr = fcp_soft_state_unlink(pptr);
10269 ASSERT(pptr != NULL);
10270 }
10271
10272 pptr->port_link_cnt++;
10273 pptr->port_state |= FCP_STATE_OFFLINE;
10274 pptr->port_state &= ~(FCP_STATE_ONLINING | FCP_STATE_ONLINE);
10275
10276 fcp_update_state(pptr, (FCP_LUN_BUSY | FCP_LUN_MARK),
10277 FCP_CAUSE_LINK_DOWN);
10278 mutex_exit(&pptr->port_mutex);
10279
10280 /* kill watch dog timer if we're the last */
10281 mutex_enter(&fcp_global_mutex);
10282 if (--fcp_watchdog_init == 0) {
10283 timeout_id_t tid = fcp_watchdog_id;
10284 mutex_exit(&fcp_global_mutex);
10285 (void) untimeout(tid);
10286 } else {
10287 mutex_exit(&fcp_global_mutex);
10288 }
10289
10290 /* clean up the port structures */
10291 if (flag == FCP_STATE_DETACHING) {
10292 fcp_cleanup_port(pptr, instance);
10293 }
10294
10295 return (FC_SUCCESS);
10296 }
10297
10298
10299 static void
10300 fcp_cleanup_port(struct fcp_port *pptr, int instance)
10301 {
10302 ASSERT(pptr != NULL);
10303
10304 /* unbind and free event set */
10305 if (pptr->port_ndi_event_hdl) {
10306 (void) ndi_event_unbind_set(pptr->port_ndi_event_hdl,
10307 &pptr->port_ndi_events, NDI_SLEEP);
10308 (void) ndi_event_free_hdl(pptr->port_ndi_event_hdl);
10309 }
10310
10311 if (pptr->port_ndi_event_defs) {
10312 (void) kmem_free(pptr->port_ndi_event_defs,
10313 sizeof (fcp_ndi_event_defs));
10314 }
10315
10316 /* free the lun/target structures and devinfos */
10317 fcp_free_targets(pptr);
10318
10319 /*
10320 * Clean up mpxio stuff
10321 */
10322 if (pptr->port_mpxio) {
10323 (void) mdi_phci_unregister(pptr->port_dip, 0);
10324 pptr->port_mpxio--;
10325 }
10326
10327 /* clean up SCSA stuff */
10328 (void) scsi_hba_detach(pptr->port_dip);
10329 if (pptr->port_tran != NULL) {
10330 scsi_hba_tran_free(pptr->port_tran);
10331 }
10332
10333 #ifdef KSTATS_CODE
10334 /* clean up kstats */
10335 if (pptr->fcp_ksp != NULL) {
10336 kstat_delete(pptr->fcp_ksp);
10337 }
10338 #endif
10339
10340 /* clean up soft state mutexes/condition variables */
10341 mutex_destroy(&pptr->port_mutex);
10342 mutex_destroy(&pptr->port_pkt_mutex);
10343
10344 /* all done with soft state */
10345 ddi_soft_state_free(fcp_softstate, instance);
10346 }
10347
10348 /*
10349 * Function: fcp_kmem_cache_constructor
10350 *
10351 * Description: This function allocates and initializes the resources required
10352 * to build a scsi_pkt structure the target driver. The result
10353 * of the allocation and initialization will be cached in the
10354 * memory cache. As DMA resources may be allocated here, that
10355 * means DMA resources will be tied up in the cache manager.
10356 * This is a tradeoff that has been made for performance reasons.
10357 *
10358 * Argument: *buf Memory to preinitialize.
10359 * *arg FCP port structure (fcp_port).
10360 * kmflags Value passed to kmem_cache_alloc() and
10361 * propagated to the constructor.
10362 *
10363 * Return Value: 0 Allocation/Initialization was successful.
10364 * -1 Allocation or Initialization failed.
10365 *
10366 *
10367 * If the returned value is 0, the buffer is initialized like this:
10368 *
10369 * +================================+
10370 * +----> | struct scsi_pkt |
10371 * | | |
10372 * | +--- | pkt_ha_private |
10373 * | | | |
10374 * | | +================================+
10375 * | |
10376 * | | +================================+
10377 * | +--> | struct fcp_pkt | <---------+
10378 * | | | |
10379 * +----- | cmd_pkt | |
10380 * | cmd_fp_pkt | ---+ |
10381 * +-------->| cmd_fcp_rsp[] | | |
10382 * | +--->| cmd_fcp_cmd[] | | |
10383 * | | |--------------------------------| | |
10384 * | | | struct fc_packet | <--+ |
10385 * | | | | |
10386 * | | | pkt_ulp_private | ----------+
10387 * | | | pkt_fca_private | -----+
10388 * | | | pkt_data_cookie | ---+ |
10389 * | | | pkt_cmdlen | | |
10390 * | |(a) | pkt_rsplen | | |
10391 * | +----| .......... pkt_cmd ........... | ---|-|---------------+
10392 * | (b) | pkt_cmd_cookie | ---|-|----------+ |
10393 * +---------| .......... pkt_resp .......... | ---|-|------+ | |
10394 * | pkt_resp_cookie | ---|-|--+ | | |
10395 * | pkt_cmd_dma | | | | | | |
10396 * | pkt_cmd_acc | | | | | | |
10397 * +================================+ | | | | | |
10398 * | dma_cookies | <--+ | | | | |
10399 * | | | | | | |
10400 * +================================+ | | | | |
10401 * | fca_private | <----+ | | | |
10402 * | | | | | |
10403 * +================================+ | | | |
10404 * | | | |
10405 * | | | |
10406 * +================================+ (d) | | | |
10407 * | fcp_resp cookies | <-------+ | | |
10408 * | | | | |
10409 * +================================+ | | |
10410 * | | |
10411 * +================================+ (d) | | |
10412 * | fcp_resp | <-----------+ | |
10413 * | (DMA resources associated) | | |
10414 * +================================+ | |
10415 * | |
10416 * | |
10417 * | |
10418 * +================================+ (c) | |
10419 * | fcp_cmd cookies | <---------------+ |
10420 * | | |
10421 * +================================+ |
10422 * |
10423 * +================================+ (c) |
10424 * | fcp_cmd | <--------------------+
10425 * | (DMA resources associated) |
10426 * +================================+
10427 *
10428 * (a) Only if DMA is NOT used for the FCP_CMD buffer.
10429 * (b) Only if DMA is NOT used for the FCP_RESP buffer
10430 * (c) Only if DMA is used for the FCP_CMD buffer.
10431 * (d) Only if DMA is used for the FCP_RESP buffer
10432 */
10433 static int
10434 fcp_kmem_cache_constructor(struct scsi_pkt *pkt, scsi_hba_tran_t *tran,
10435 int kmflags)
10436 {
10437 struct fcp_pkt *cmd;
10438 struct fcp_port *pptr;
10439 fc_packet_t *fpkt;
10440
10441 pptr = (struct fcp_port *)tran->tran_hba_private;
10442 cmd = (struct fcp_pkt *)pkt->pkt_ha_private;
10443 bzero(cmd, tran->tran_hba_len);
10444
10445 cmd->cmd_pkt = pkt;
10446 pkt->pkt_cdbp = cmd->cmd_fcp_cmd.fcp_cdb;
10447 fpkt = (fc_packet_t *)&cmd->cmd_fc_packet;
10448 cmd->cmd_fp_pkt = fpkt;
10449
10450 cmd->cmd_pkt->pkt_ha_private = (opaque_t)cmd;
10451 cmd->cmd_fp_pkt->pkt_ulp_private = (opaque_t)cmd;
10452 cmd->cmd_fp_pkt->pkt_fca_private = (opaque_t)((caddr_t)cmd +
10453 sizeof (struct fcp_pkt) + pptr->port_dmacookie_sz);
10454
10455 fpkt->pkt_data_cookie = (ddi_dma_cookie_t *)((caddr_t)cmd +
10456 sizeof (struct fcp_pkt));
10457
10458 fpkt->pkt_cmdlen = sizeof (struct fcp_cmd);
10459 fpkt->pkt_rsplen = FCP_MAX_RSP_IU_SIZE;
10460
10461 if (pptr->port_fcp_dma == FC_NO_DVMA_SPACE) {
10462 /*
10463 * The underlying HBA doesn't want to DMA the fcp_cmd or
10464 * fcp_resp. The transfer of information will be done by
10465 * bcopy.
10466 * The naming of the flags (that is actually a value) is
10467 * unfortunate. FC_NO_DVMA_SPACE doesn't mean "NO VIRTUAL
10468 * DMA" but instead "NO DMA".
10469 */
10470 fpkt->pkt_resp_acc = fpkt->pkt_cmd_acc = NULL;
10471 fpkt->pkt_cmd = (caddr_t)&cmd->cmd_fcp_cmd;
10472 fpkt->pkt_resp = cmd->cmd_fcp_rsp;
10473 } else {
10474 /*
10475 * The underlying HBA will dma the fcp_cmd buffer and fcp_resp
10476 * buffer. A buffer is allocated for each one the ddi_dma_*
10477 * interfaces.
10478 */
10479 if (fcp_alloc_cmd_resp(pptr, fpkt, kmflags) != FC_SUCCESS) {
10480 return (-1);
10481 }
10482 }
10483
10484 return (0);
10485 }
10486
10487 /*
10488 * Function: fcp_kmem_cache_destructor
10489 *
10490 * Description: Called by the destructor of the cache managed by SCSA.
10491 * All the resources pre-allocated in fcp_pkt_constructor
10492 * and the data also pre-initialized in fcp_pkt_constructor
10493 * are freed and uninitialized here.
10494 *
10495 * Argument: *buf Memory to uninitialize.
10496 * *arg FCP port structure (fcp_port).
10497 *
10498 * Return Value: None
10499 *
10500 * Context: kernel
10501 */
10502 static void
10503 fcp_kmem_cache_destructor(struct scsi_pkt *pkt, scsi_hba_tran_t *tran)
10504 {
10505 struct fcp_pkt *cmd;
10506 struct fcp_port *pptr;
10507
10508 pptr = (struct fcp_port *)(tran->tran_hba_private);
10509 cmd = pkt->pkt_ha_private;
10510
10511 if (pptr->port_fcp_dma != FC_NO_DVMA_SPACE) {
10512 /*
10513 * If DMA was used to transfer the FCP_CMD and FCP_RESP, the
10514 * buffer and DMA resources allocated to do so are released.
10515 */
10516 fcp_free_cmd_resp(pptr, cmd->cmd_fp_pkt);
10517 }
10518 }
10519
10520 /*
10521 * Function: fcp_alloc_cmd_resp
10522 *
10523 * Description: This function allocated an FCP_CMD and FCP_RESP buffer that
10524 * will be DMAed by the HBA. The buffer is allocated applying
10525 * the DMA requirements for the HBA. The buffers allocated will
10526 * also be bound. DMA resources are allocated in the process.
10527 * They will be released by fcp_free_cmd_resp().
10528 *
10529 * Argument: *pptr FCP port.
10530 * *fpkt fc packet for which the cmd and resp packet should be
10531 * allocated.
10532 * flags Allocation flags.
10533 *
10534 * Return Value: FC_FAILURE
10535 * FC_SUCCESS
10536 *
10537 * Context: User or Kernel context only if flags == KM_SLEEP.
10538 * Interrupt context if the KM_SLEEP is not specified.
10539 */
10540 static int
10541 fcp_alloc_cmd_resp(struct fcp_port *pptr, fc_packet_t *fpkt, int flags)
10542 {
10543 int rval;
10544 int cmd_len;
10545 int resp_len;
10546 ulong_t real_len;
10547 int (*cb) (caddr_t);
10548 ddi_dma_cookie_t pkt_cookie;
10549 ddi_dma_cookie_t *cp;
10550 uint32_t cnt;
10551
10552 cb = (flags == KM_SLEEP) ? DDI_DMA_SLEEP : DDI_DMA_DONTWAIT;
10553
10554 cmd_len = fpkt->pkt_cmdlen;
10555 resp_len = fpkt->pkt_rsplen;
10556
10557 ASSERT(fpkt->pkt_cmd_dma == NULL);
10558
10559 /* Allocation of a DMA handle used in subsequent calls. */
10560 if (ddi_dma_alloc_handle(pptr->port_dip, &pptr->port_cmd_dma_attr,
10561 cb, NULL, &fpkt->pkt_cmd_dma) != DDI_SUCCESS) {
10562 return (FC_FAILURE);
10563 }
10564
10565 /* A buffer is allocated that satisfies the DMA requirements. */
10566 rval = ddi_dma_mem_alloc(fpkt->pkt_cmd_dma, cmd_len,
10567 &pptr->port_dma_acc_attr, DDI_DMA_CONSISTENT, cb, NULL,
10568 (caddr_t *)&fpkt->pkt_cmd, &real_len, &fpkt->pkt_cmd_acc);
10569
10570 if (rval != DDI_SUCCESS) {
10571 ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10572 return (FC_FAILURE);
10573 }
10574
10575 if (real_len < cmd_len) {
10576 ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
10577 ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10578 return (FC_FAILURE);
10579 }
10580
10581 /* The buffer allocated is DMA bound. */
10582 rval = ddi_dma_addr_bind_handle(fpkt->pkt_cmd_dma, NULL,
10583 fpkt->pkt_cmd, real_len, DDI_DMA_WRITE | DDI_DMA_CONSISTENT,
10584 cb, NULL, &pkt_cookie, &fpkt->pkt_cmd_cookie_cnt);
10585
10586 if (rval != DDI_DMA_MAPPED) {
10587 ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
10588 ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10589 return (FC_FAILURE);
10590 }
10591
10592 if (fpkt->pkt_cmd_cookie_cnt >
10593 pptr->port_cmd_dma_attr.dma_attr_sgllen) {
10594 (void) ddi_dma_unbind_handle(fpkt->pkt_cmd_dma);
10595 ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
10596 ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10597 return (FC_FAILURE);
10598 }
10599
10600 ASSERT(fpkt->pkt_cmd_cookie_cnt != 0);
10601
10602 /*
10603 * The buffer where the scatter/gather list is going to be built is
10604 * allocated.
10605 */
10606 cp = fpkt->pkt_cmd_cookie = (ddi_dma_cookie_t *)kmem_alloc(
10607 fpkt->pkt_cmd_cookie_cnt * sizeof (pkt_cookie),
10608 KM_NOSLEEP);
10609
10610 if (cp == NULL) {
10611 (void) ddi_dma_unbind_handle(fpkt->pkt_cmd_dma);
10612 ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
10613 ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10614 return (FC_FAILURE);
10615 }
10616
10617 /*
10618 * The scatter/gather list for the buffer we just allocated is built
10619 * here.
10620 */
10621 *cp = pkt_cookie;
10622 cp++;
10623
10624 for (cnt = 1; cnt < fpkt->pkt_cmd_cookie_cnt; cnt++, cp++) {
10625 ddi_dma_nextcookie(fpkt->pkt_cmd_dma,
10626 &pkt_cookie);
10627 *cp = pkt_cookie;
10628 }
10629
10630 ASSERT(fpkt->pkt_resp_dma == NULL);
10631 if (ddi_dma_alloc_handle(pptr->port_dip, &pptr->port_resp_dma_attr,
10632 cb, NULL, &fpkt->pkt_resp_dma) != DDI_SUCCESS) {
10633 (void) ddi_dma_unbind_handle(fpkt->pkt_cmd_dma);
10634 ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
10635 ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10636 return (FC_FAILURE);
10637 }
10638
10639 rval = ddi_dma_mem_alloc(fpkt->pkt_resp_dma, resp_len,
10640 &pptr->port_dma_acc_attr, DDI_DMA_CONSISTENT, cb, NULL,
10641 (caddr_t *)&fpkt->pkt_resp, &real_len,
10642 &fpkt->pkt_resp_acc);
10643
10644 if (rval != DDI_SUCCESS) {
10645 ddi_dma_free_handle(&fpkt->pkt_resp_dma);
10646 (void) ddi_dma_unbind_handle(fpkt->pkt_cmd_dma);
10647 ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
10648 ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10649 kmem_free(fpkt->pkt_cmd_cookie,
10650 fpkt->pkt_cmd_cookie_cnt * sizeof (pkt_cookie));
10651 return (FC_FAILURE);
10652 }
10653
10654 if (real_len < resp_len) {
10655 ddi_dma_mem_free(&fpkt->pkt_resp_acc);
10656 ddi_dma_free_handle(&fpkt->pkt_resp_dma);
10657 (void) ddi_dma_unbind_handle(fpkt->pkt_cmd_dma);
10658 ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
10659 ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10660 kmem_free(fpkt->pkt_cmd_cookie,
10661 fpkt->pkt_cmd_cookie_cnt * sizeof (pkt_cookie));
10662 return (FC_FAILURE);
10663 }
10664
10665 rval = ddi_dma_addr_bind_handle(fpkt->pkt_resp_dma, NULL,
10666 fpkt->pkt_resp, real_len, DDI_DMA_READ | DDI_DMA_CONSISTENT,
10667 cb, NULL, &pkt_cookie, &fpkt->pkt_resp_cookie_cnt);
10668
10669 if (rval != DDI_DMA_MAPPED) {
10670 ddi_dma_mem_free(&fpkt->pkt_resp_acc);
10671 ddi_dma_free_handle(&fpkt->pkt_resp_dma);
10672 (void) ddi_dma_unbind_handle(fpkt->pkt_cmd_dma);
10673 ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
10674 ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10675 kmem_free(fpkt->pkt_cmd_cookie,
10676 fpkt->pkt_cmd_cookie_cnt * sizeof (pkt_cookie));
10677 return (FC_FAILURE);
10678 }
10679
10680 if (fpkt->pkt_resp_cookie_cnt >
10681 pptr->port_resp_dma_attr.dma_attr_sgllen) {
10682 ddi_dma_mem_free(&fpkt->pkt_resp_acc);
10683 ddi_dma_free_handle(&fpkt->pkt_resp_dma);
10684 (void) ddi_dma_unbind_handle(fpkt->pkt_cmd_dma);
10685 ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
10686 ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10687 kmem_free(fpkt->pkt_cmd_cookie,
10688 fpkt->pkt_cmd_cookie_cnt * sizeof (pkt_cookie));
10689 return (FC_FAILURE);
10690 }
10691
10692 ASSERT(fpkt->pkt_resp_cookie_cnt != 0);
10693
10694 cp = fpkt->pkt_resp_cookie = (ddi_dma_cookie_t *)kmem_alloc(
10695 fpkt->pkt_resp_cookie_cnt * sizeof (pkt_cookie),
10696 KM_NOSLEEP);
10697
10698 if (cp == NULL) {
10699 ddi_dma_mem_free(&fpkt->pkt_resp_acc);
10700 ddi_dma_free_handle(&fpkt->pkt_resp_dma);
10701 (void) ddi_dma_unbind_handle(fpkt->pkt_cmd_dma);
10702 ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
10703 ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10704 kmem_free(fpkt->pkt_cmd_cookie,
10705 fpkt->pkt_cmd_cookie_cnt * sizeof (pkt_cookie));
10706 return (FC_FAILURE);
10707 }
10708
10709 *cp = pkt_cookie;
10710 cp++;
10711
10712 for (cnt = 1; cnt < fpkt->pkt_resp_cookie_cnt; cnt++, cp++) {
10713 ddi_dma_nextcookie(fpkt->pkt_resp_dma,
10714 &pkt_cookie);
10715 *cp = pkt_cookie;
10716 }
10717
10718 return (FC_SUCCESS);
10719 }
10720
10721 /*
10722 * Function: fcp_free_cmd_resp
10723 *
10724 * Description: This function releases the FCP_CMD and FCP_RESP buffer
10725 * allocated by fcp_alloc_cmd_resp() and all the resources
10726 * associated with them. That includes the DMA resources and the
10727 * buffer allocated for the cookies of each one of them.
10728 *
10729 * Argument: *pptr FCP port context.
10730 * *fpkt fc packet containing the cmd and resp packet
10731 * to be released.
10732 *
10733 * Return Value: None
10734 *
10735 * Context: Interrupt, User and Kernel context.
10736 */
10737 /* ARGSUSED */
10738 static void
10739 fcp_free_cmd_resp(struct fcp_port *pptr, fc_packet_t *fpkt)
10740 {
10741 ASSERT(fpkt->pkt_resp_dma != NULL && fpkt->pkt_cmd_dma != NULL);
10742
10743 if (fpkt->pkt_resp_dma) {
10744 (void) ddi_dma_unbind_handle(fpkt->pkt_resp_dma);
10745 ddi_dma_mem_free(&fpkt->pkt_resp_acc);
10746 ddi_dma_free_handle(&fpkt->pkt_resp_dma);
10747 }
10748
10749 if (fpkt->pkt_resp_cookie) {
10750 kmem_free(fpkt->pkt_resp_cookie,
10751 fpkt->pkt_resp_cookie_cnt * sizeof (ddi_dma_cookie_t));
10752 fpkt->pkt_resp_cookie = NULL;
10753 }
10754
10755 if (fpkt->pkt_cmd_dma) {
10756 (void) ddi_dma_unbind_handle(fpkt->pkt_cmd_dma);
10757 ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
10758 ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10759 }
10760
10761 if (fpkt->pkt_cmd_cookie) {
10762 kmem_free(fpkt->pkt_cmd_cookie,
10763 fpkt->pkt_cmd_cookie_cnt * sizeof (ddi_dma_cookie_t));
10764 fpkt->pkt_cmd_cookie = NULL;
10765 }
10766 }
10767
10768
10769 /*
10770 * called by the transport to do our own target initialization
10771 *
10772 * can acquire and release the global mutex
10773 */
10774 /* ARGSUSED */
10775 static int
10776 fcp_phys_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
10777 scsi_hba_tran_t *hba_tran, struct scsi_device *sd)
10778 {
10779 uchar_t *bytes;
10780 uint_t nbytes;
10781 uint16_t lun_num;
10782 struct fcp_tgt *ptgt;
10783 struct fcp_lun *plun;
10784 struct fcp_port *pptr = (struct fcp_port *)
10785 hba_tran->tran_hba_private;
10786
10787 ASSERT(pptr != NULL);
10788
10789 FCP_DTRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
10790 FCP_BUF_LEVEL_8, 0,
10791 "fcp_phys_tgt_init: called for %s (instance %d)",
10792 ddi_get_name(tgt_dip), ddi_get_instance(tgt_dip));
10793
10794 /* get our port WWN property */
10795 bytes = NULL;
10796 if ((scsi_device_prop_lookup_byte_array(sd, SCSI_DEVICE_PROP_PATH,
10797 PORT_WWN_PROP, &bytes, &nbytes) != DDI_PROP_SUCCESS) ||
10798 (nbytes != FC_WWN_SIZE)) {
10799 /* no port WWN property */
10800 FCP_DTRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
10801 FCP_BUF_LEVEL_8, 0,
10802 "fcp_phys_tgt_init: Returning DDI_NOT_WELL_FORMED"
10803 " for %s (instance %d): bytes=%p nbytes=%x",
10804 ddi_get_name(tgt_dip), ddi_get_instance(tgt_dip), bytes,
10805 nbytes);
10806
10807 if (bytes != NULL) {
10808 scsi_device_prop_free(sd, SCSI_DEVICE_PROP_PATH, bytes);
10809 }
10810
10811 return (DDI_NOT_WELL_FORMED);
10812 }
10813 ASSERT(bytes != NULL);
10814
10815 lun_num = scsi_device_prop_get_int(sd, SCSI_DEVICE_PROP_PATH,
10816 LUN_PROP, 0xFFFF);
10817 if (lun_num == 0xFFFF) {
10818 FCP_DTRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
10819 FCP_BUF_LEVEL_8, 0,
10820 "fcp_phys_tgt_init: Returning DDI_FAILURE:lun"
10821 " for %s (instance %d)", ddi_get_name(tgt_dip),
10822 ddi_get_instance(tgt_dip));
10823
10824 scsi_device_prop_free(sd, SCSI_DEVICE_PROP_PATH, bytes);
10825 return (DDI_NOT_WELL_FORMED);
10826 }
10827
10828 mutex_enter(&pptr->port_mutex);
10829 if ((plun = fcp_lookup_lun(pptr, bytes, lun_num)) == NULL) {
10830 mutex_exit(&pptr->port_mutex);
10831 FCP_DTRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
10832 FCP_BUF_LEVEL_8, 0,
10833 "fcp_phys_tgt_init: Returning DDI_FAILURE: No Lun"
10834 " for %s (instance %d)", ddi_get_name(tgt_dip),
10835 ddi_get_instance(tgt_dip));
10836
10837 scsi_device_prop_free(sd, SCSI_DEVICE_PROP_PATH, bytes);
10838 return (DDI_FAILURE);
10839 }
10840
10841 ASSERT(bcmp(plun->lun_tgt->tgt_port_wwn.raw_wwn, bytes,
10842 FC_WWN_SIZE) == 0);
10843 ASSERT(plun->lun_num == lun_num);
10844
10845 scsi_device_prop_free(sd, SCSI_DEVICE_PROP_PATH, bytes);
10846
10847 ptgt = plun->lun_tgt;
10848
10849 mutex_enter(&ptgt->tgt_mutex);
10850 plun->lun_tgt_count++;
10851 scsi_device_hba_private_set(sd, plun);
10852 plun->lun_state |= FCP_SCSI_LUN_TGT_INIT;
10853 plun->lun_sd = sd;
10854 mutex_exit(&ptgt->tgt_mutex);
10855 mutex_exit(&pptr->port_mutex);
10856
10857 return (DDI_SUCCESS);
10858 }
10859
10860 /*ARGSUSED*/
10861 static int
10862 fcp_virt_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
10863 scsi_hba_tran_t *hba_tran, struct scsi_device *sd)
10864 {
10865 uchar_t *bytes;
10866 uint_t nbytes;
10867 uint16_t lun_num;
10868 struct fcp_tgt *ptgt;
10869 struct fcp_lun *plun;
10870 struct fcp_port *pptr = (struct fcp_port *)
10871 hba_tran->tran_hba_private;
10872 child_info_t *cip;
10873
10874 ASSERT(pptr != NULL);
10875
10876 FCP_DTRACE(fcp_logq, pptr->port_instbuf,
10877 fcp_trace, FCP_BUF_LEVEL_8, 0,
10878 "fcp_virt_tgt_init: called for %s (instance %d) (hba_dip %p),"
10879 " (tgt_dip %p)", ddi_get_name(tgt_dip),
10880 ddi_get_instance(tgt_dip), hba_dip, tgt_dip);
10881
10882 cip = (child_info_t *)sd->sd_pathinfo;
10883 if (cip == NULL) {
10884 FCP_DTRACE(fcp_logq, pptr->port_instbuf,
10885 fcp_trace, FCP_BUF_LEVEL_8, 0,
10886 "fcp_virt_tgt_init: Returning DDI_NOT_WELL_FORMED"
10887 " for %s (instance %d)", ddi_get_name(tgt_dip),
10888 ddi_get_instance(tgt_dip));
10889
10890 return (DDI_NOT_WELL_FORMED);
10891 }
10892
10893 /* get our port WWN property */
10894 bytes = NULL;
10895 if ((scsi_device_prop_lookup_byte_array(sd, SCSI_DEVICE_PROP_PATH,
10896 PORT_WWN_PROP, &bytes, &nbytes) != DDI_PROP_SUCCESS) ||
10897 (nbytes != FC_WWN_SIZE)) {
10898 if (bytes) {
10899 scsi_device_prop_free(sd, SCSI_DEVICE_PROP_PATH, bytes);
10900 }
10901 return (DDI_NOT_WELL_FORMED);
10902 }
10903
10904 ASSERT(bytes != NULL);
10905
10906 lun_num = scsi_device_prop_get_int(sd, SCSI_DEVICE_PROP_PATH,
10907 LUN_PROP, 0xFFFF);
10908 if (lun_num == 0xFFFF) {
10909 FCP_DTRACE(fcp_logq, pptr->port_instbuf,
10910 fcp_trace, FCP_BUF_LEVEL_8, 0,
10911 "fcp_virt_tgt_init: Returning DDI_FAILURE:lun"
10912 " for %s (instance %d)", ddi_get_name(tgt_dip),
10913 ddi_get_instance(tgt_dip));
10914
10915 scsi_device_prop_free(sd, SCSI_DEVICE_PROP_PATH, bytes);
10916 return (DDI_NOT_WELL_FORMED);
10917 }
10918
10919 mutex_enter(&pptr->port_mutex);
10920 if ((plun = fcp_lookup_lun(pptr, bytes, lun_num)) == NULL) {
10921 mutex_exit(&pptr->port_mutex);
10922 FCP_DTRACE(fcp_logq, pptr->port_instbuf,
10923 fcp_trace, FCP_BUF_LEVEL_8, 0,
10924 "fcp_virt_tgt_init: Returning DDI_FAILURE: No Lun"
10925 " for %s (instance %d)", ddi_get_name(tgt_dip),
10926 ddi_get_instance(tgt_dip));
10927
10928 scsi_device_prop_free(sd, SCSI_DEVICE_PROP_PATH, bytes);
10929 return (DDI_FAILURE);
10930 }
10931
10932 ASSERT(bcmp(plun->lun_tgt->tgt_port_wwn.raw_wwn, bytes,
10933 FC_WWN_SIZE) == 0);
10934 ASSERT(plun->lun_num == lun_num);
10935
10936 scsi_device_prop_free(sd, SCSI_DEVICE_PROP_PATH, bytes);
10937
10938 ptgt = plun->lun_tgt;
10939
10940 mutex_enter(&ptgt->tgt_mutex);
10941 plun->lun_tgt_count++;
10942 scsi_device_hba_private_set(sd, plun);
10943 plun->lun_state |= FCP_SCSI_LUN_TGT_INIT;
10944 plun->lun_sd = sd;
10945 mutex_exit(&ptgt->tgt_mutex);
10946 mutex_exit(&pptr->port_mutex);
10947
10948 return (DDI_SUCCESS);
10949 }
10950
10951
10952 /*
10953 * called by the transport to do our own target initialization
10954 *
10955 * can acquire and release the global mutex
10956 */
10957 /* ARGSUSED */
10958 static int
10959 fcp_scsi_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
10960 scsi_hba_tran_t *hba_tran, struct scsi_device *sd)
10961 {
10962 struct fcp_port *pptr = (struct fcp_port *)
10963 hba_tran->tran_hba_private;
10964 int rval;
10965
10966 ASSERT(pptr != NULL);
10967
10968 /*
10969 * Child node is getting initialized. Look at the mpxio component
10970 * type on the child device to see if this device is mpxio managed
10971 * or not.
10972 */
10973 if (mdi_component_is_client(tgt_dip, NULL) == MDI_SUCCESS) {
10974 rval = fcp_virt_tgt_init(hba_dip, tgt_dip, hba_tran, sd);
10975 } else {
10976 rval = fcp_phys_tgt_init(hba_dip, tgt_dip, hba_tran, sd);
10977 }
10978
10979 return (rval);
10980 }
10981
10982
10983 /* ARGSUSED */
10984 static void
10985 fcp_scsi_tgt_free(dev_info_t *hba_dip, dev_info_t *tgt_dip,
10986 scsi_hba_tran_t *hba_tran, struct scsi_device *sd)
10987 {
10988 struct fcp_lun *plun = scsi_device_hba_private_get(sd);
10989 struct fcp_tgt *ptgt;
10990
10991 FCP_DTRACE(fcp_logq, LUN_PORT->port_instbuf,
10992 fcp_trace, FCP_BUF_LEVEL_8, 0,
10993 "fcp_scsi_tgt_free: called for tran %s%d, dev %s%d",
10994 ddi_get_name(hba_dip), ddi_get_instance(hba_dip),
10995 ddi_get_name(tgt_dip), ddi_get_instance(tgt_dip));
10996
10997 if (plun == NULL) {
10998 return;
10999 }
11000 ptgt = plun->lun_tgt;
11001
11002 ASSERT(ptgt != NULL);
11003
11004 mutex_enter(&ptgt->tgt_mutex);
11005 ASSERT(plun->lun_tgt_count > 0);
11006
11007 if (--plun->lun_tgt_count == 0) {
11008 plun->lun_state &= ~FCP_SCSI_LUN_TGT_INIT;
11009 }
11010 plun->lun_sd = NULL;
11011 mutex_exit(&ptgt->tgt_mutex);
11012 }
11013
11014 /*
11015 * Function: fcp_scsi_start
11016 *
11017 * Description: This function is called by the target driver to request a
11018 * command to be sent.
11019 *
11020 * Argument: *ap SCSI address of the device.
11021 * *pkt SCSI packet containing the cmd to send.
11022 *
11023 * Return Value: TRAN_ACCEPT
11024 * TRAN_BUSY
11025 * TRAN_BADPKT
11026 * TRAN_FATAL_ERROR
11027 */
11028 static int
11029 fcp_scsi_start(struct scsi_address *ap, struct scsi_pkt *pkt)
11030 {
11031 struct fcp_port *pptr = ADDR2FCP(ap);
11032 struct fcp_lun *plun = ADDR2LUN(ap);
11033 struct fcp_pkt *cmd = PKT2CMD(pkt);
11034 struct fcp_tgt *ptgt = plun->lun_tgt;
11035 int rval;
11036
11037 /* ensure command isn't already issued */
11038 ASSERT(cmd->cmd_state != FCP_PKT_ISSUED);
11039
11040 FCP_DTRACE(fcp_logq, pptr->port_instbuf,
11041 fcp_trace, FCP_BUF_LEVEL_9, 0,
11042 "fcp_transport Invoked for %x", plun->lun_tgt->tgt_d_id);
11043
11044 /*
11045 * It is strange that we enter the fcp_port mutex and the target
11046 * mutex to check the lun state (which has a mutex of its own).
11047 */
11048 mutex_enter(&pptr->port_mutex);
11049 mutex_enter(&ptgt->tgt_mutex);
11050
11051 /*
11052 * If the device is offline and is not in the process of coming
11053 * online, fail the request.
11054 */
11055
11056 if ((plun->lun_state & FCP_LUN_OFFLINE) &&
11057 !(plun->lun_state & FCP_LUN_ONLINING)) {
11058 mutex_exit(&ptgt->tgt_mutex);
11059 mutex_exit(&pptr->port_mutex);
11060
11061 if (cmd->cmd_fp_pkt->pkt_pd == NULL) {
11062 pkt->pkt_reason = CMD_DEV_GONE;
11063 }
11064
11065 return (TRAN_FATAL_ERROR);
11066 }
11067 cmd->cmd_fp_pkt->pkt_timeout = pkt->pkt_time;
11068
11069 /*
11070 * If we are suspended, kernel is trying to dump, so don't
11071 * block, fail or defer requests - send them down right away.
11072 * NOTE: If we are in panic (i.e. trying to dump), we can't
11073 * assume we have been suspended. There is hardware such as
11074 * the v880 that doesn't do PM. Thus, the check for
11075 * ddi_in_panic.
11076 *
11077 * If FCP_STATE_IN_CB_DEVC is set, devices are in the process
11078 * of changing. So, if we can queue the packet, do it. Eventually,
11079 * either the device will have gone away or changed and we can fail
11080 * the request, or we can proceed if the device didn't change.
11081 *
11082 * If the pd in the target or the packet is NULL it's probably
11083 * because the device has gone away, we allow the request to be
11084 * put on the internal queue here in case the device comes back within
11085 * the offline timeout. fctl will fix up the pd's if the tgt_pd_handle
11086 * has gone NULL, while fcp deals cases where pkt_pd is NULL. pkt_pd
11087 * could be NULL because the device was disappearing during or since
11088 * packet initialization.
11089 */
11090
11091 if (((plun->lun_state & FCP_LUN_BUSY) && (!(pptr->port_state &
11092 FCP_STATE_SUSPENDED)) && !ddi_in_panic()) ||
11093 (pptr->port_state & (FCP_STATE_ONLINING | FCP_STATE_IN_CB_DEVC)) ||
11094 (ptgt->tgt_pd_handle == NULL) ||
11095 (cmd->cmd_fp_pkt->pkt_pd == NULL)) {
11096 /*
11097 * If ((LUN is busy AND
11098 * LUN not suspended AND
11099 * The system is not in panic state) OR
11100 * (The port is coming up))
11101 *
11102 * We check to see if the any of the flags FLAG_NOINTR or
11103 * FLAG_NOQUEUE is set. If one of them is set the value
11104 * returned will be TRAN_BUSY. If not, the request is queued.
11105 */
11106 mutex_exit(&ptgt->tgt_mutex);
11107 mutex_exit(&pptr->port_mutex);
11108
11109 /* see if using interrupts is allowed (so queueing'll work) */
11110 if (pkt->pkt_flags & FLAG_NOINTR) {
11111 pkt->pkt_resid = 0;
11112 return (TRAN_BUSY);
11113 }
11114 if (pkt->pkt_flags & FLAG_NOQUEUE) {
11115 FCP_DTRACE(fcp_logq, pptr->port_instbuf,
11116 fcp_trace, FCP_BUF_LEVEL_9, 0,
11117 "fcp_scsi_start: lun busy for pkt %p", pkt);
11118 return (TRAN_BUSY);
11119 }
11120 #ifdef DEBUG
11121 mutex_enter(&pptr->port_pkt_mutex);
11122 pptr->port_npkts++;
11123 mutex_exit(&pptr->port_pkt_mutex);
11124 #endif /* DEBUG */
11125
11126 /* got queue up the pkt for later */
11127 fcp_queue_pkt(pptr, cmd);
11128 return (TRAN_ACCEPT);
11129 }
11130 cmd->cmd_state = FCP_PKT_ISSUED;
11131
11132 mutex_exit(&ptgt->tgt_mutex);
11133 mutex_exit(&pptr->port_mutex);
11134
11135 /*
11136 * Now that we released the mutexes, what was protected by them can
11137 * change.
11138 */
11139
11140 /*
11141 * If there is a reconfiguration in progress, wait for it to complete.
11142 */
11143 fcp_reconfig_wait(pptr);
11144
11145 cmd->cmd_timeout = pkt->pkt_time ? fcp_watchdog_time +
11146 pkt->pkt_time : 0;
11147
11148 /* prepare the packet */
11149
11150 fcp_prepare_pkt(pptr, cmd, plun);
11151
11152 if (cmd->cmd_pkt->pkt_time) {
11153 cmd->cmd_fp_pkt->pkt_timeout = cmd->cmd_pkt->pkt_time;
11154 } else {
11155 cmd->cmd_fp_pkt->pkt_timeout = 5 * 60 * 60;
11156 }
11157
11158 /*
11159 * if interrupts aren't allowed (e.g. at dump time) then we'll
11160 * have to do polled I/O
11161 */
11162 if (pkt->pkt_flags & FLAG_NOINTR) {
11163 cmd->cmd_state &= ~FCP_PKT_ISSUED;
11164 return (fcp_dopoll(pptr, cmd));
11165 }
11166
11167 #ifdef DEBUG
11168 mutex_enter(&pptr->port_pkt_mutex);
11169 pptr->port_npkts++;
11170 mutex_exit(&pptr->port_pkt_mutex);
11171 #endif /* DEBUG */
11172
11173 rval = fcp_transport(pptr->port_fp_handle, cmd->cmd_fp_pkt, 0);
11174 if (rval == FC_SUCCESS) {
11175 FCP_DTRACE(fcp_logq, pptr->port_instbuf,
11176 fcp_trace, FCP_BUF_LEVEL_9, 0,
11177 "fcp_transport success for %x", plun->lun_tgt->tgt_d_id);
11178 return (TRAN_ACCEPT);
11179 }
11180
11181 cmd->cmd_state = FCP_PKT_IDLE;
11182
11183 #ifdef DEBUG
11184 mutex_enter(&pptr->port_pkt_mutex);
11185 pptr->port_npkts--;
11186 mutex_exit(&pptr->port_pkt_mutex);
11187 #endif /* DEBUG */
11188
11189 /*
11190 * For lack of clearer definitions, choose
11191 * between TRAN_BUSY and TRAN_FATAL_ERROR.
11192 */
11193
11194 if (rval == FC_TRAN_BUSY) {
11195 pkt->pkt_resid = 0;
11196 rval = TRAN_BUSY;
11197 } else {
11198 mutex_enter(&ptgt->tgt_mutex);
11199 if (plun->lun_state & FCP_LUN_OFFLINE) {
11200 child_info_t *cip;
11201
11202 mutex_enter(&plun->lun_mutex);
11203 cip = plun->lun_cip;
11204 mutex_exit(&plun->lun_mutex);
11205
11206 FCP_TRACE(fcp_logq, pptr->port_instbuf,
11207 fcp_trace, FCP_BUF_LEVEL_6, 0,
11208 "fcp_transport failed 2 for %x: %x; dip=%p",
11209 plun->lun_tgt->tgt_d_id, rval, cip);
11210
11211 rval = TRAN_FATAL_ERROR;
11212 } else {
11213 if (pkt->pkt_flags & FLAG_NOQUEUE) {
11214 FCP_DTRACE(fcp_logq, pptr->port_instbuf,
11215 fcp_trace, FCP_BUF_LEVEL_9, 0,
11216 "fcp_scsi_start: FC_BUSY for pkt %p",
11217 pkt);
11218 rval = TRAN_BUSY;
11219 } else {
11220 rval = TRAN_ACCEPT;
11221 fcp_queue_pkt(pptr, cmd);
11222 }
11223 }
11224 mutex_exit(&ptgt->tgt_mutex);
11225 }
11226
11227 return (rval);
11228 }
11229
11230 /*
11231 * called by the transport to abort a packet
11232 */
11233 /*ARGSUSED*/
11234 static int
11235 fcp_scsi_abort(struct scsi_address *ap, struct scsi_pkt *pkt)
11236 {
11237 int tgt_cnt;
11238 struct fcp_port *pptr = ADDR2FCP(ap);
11239 struct fcp_lun *plun = ADDR2LUN(ap);
11240 struct fcp_tgt *ptgt = plun->lun_tgt;
11241
11242 if (pkt == NULL) {
11243 if (ptgt) {
11244 mutex_enter(&ptgt->tgt_mutex);
11245 tgt_cnt = ptgt->tgt_change_cnt;
11246 mutex_exit(&ptgt->tgt_mutex);
11247 fcp_abort_all(pptr, ptgt, plun, tgt_cnt);
11248 return (TRUE);
11249 }
11250 }
11251 return (FALSE);
11252 }
11253
11254
11255 /*
11256 * Perform reset
11257 */
11258 int
11259 fcp_scsi_reset(struct scsi_address *ap, int level)
11260 {
11261 int rval = 0;
11262 struct fcp_port *pptr = ADDR2FCP(ap);
11263 struct fcp_lun *plun = ADDR2LUN(ap);
11264 struct fcp_tgt *ptgt = plun->lun_tgt;
11265
11266 if (level == RESET_ALL) {
11267 if (fcp_linkreset(pptr, ap, KM_NOSLEEP) == FC_SUCCESS) {
11268 rval = 1;
11269 }
11270 } else if (level == RESET_TARGET || level == RESET_LUN) {
11271 /*
11272 * If we are in the middle of discovery, return
11273 * SUCCESS as this target will be rediscovered
11274 * anyway
11275 */
11276 mutex_enter(&ptgt->tgt_mutex);
11277 if (ptgt->tgt_state & (FCP_TGT_OFFLINE | FCP_TGT_BUSY)) {
11278 mutex_exit(&ptgt->tgt_mutex);
11279 return (1);
11280 }
11281 mutex_exit(&ptgt->tgt_mutex);
11282
11283 if (fcp_reset_target(ap, level) == FC_SUCCESS) {
11284 rval = 1;
11285 }
11286 }
11287 return (rval);
11288 }
11289
11290
11291 /*
11292 * called by the framework to get a SCSI capability
11293 */
11294 static int
11295 fcp_scsi_getcap(struct scsi_address *ap, char *cap, int whom)
11296 {
11297 return (fcp_commoncap(ap, cap, 0, whom, 0));
11298 }
11299
11300
11301 /*
11302 * called by the framework to set a SCSI capability
11303 */
11304 static int
11305 fcp_scsi_setcap(struct scsi_address *ap, char *cap, int value, int whom)
11306 {
11307 return (fcp_commoncap(ap, cap, value, whom, 1));
11308 }
11309
11310 /*
11311 * Function: fcp_pkt_setup
11312 *
11313 * Description: This function sets up the scsi_pkt structure passed by the
11314 * caller. This function assumes fcp_pkt_constructor has been
11315 * called previously for the packet passed by the caller. If
11316 * successful this call will have the following results:
11317 *
11318 * - The resources needed that will be constant through out
11319 * the whole transaction are allocated.
11320 * - The fields that will be constant through out the whole
11321 * transaction are initialized.
11322 * - The scsi packet will be linked to the LUN structure
11323 * addressed by the transaction.
11324 *
11325 * Argument:
11326 * *pkt Pointer to a scsi_pkt structure.
11327 * callback
11328 * arg
11329 *
11330 * Return Value: 0 Success
11331 * !0 Failure
11332 *
11333 * Context: Kernel context or interrupt context
11334 */
11335 /* ARGSUSED */
11336 static int
11337 fcp_pkt_setup(struct scsi_pkt *pkt,
11338 int (*callback)(caddr_t arg),
11339 caddr_t arg)
11340 {
11341 struct fcp_pkt *cmd;
11342 struct fcp_port *pptr;
11343 struct fcp_lun *plun;
11344 struct fcp_tgt *ptgt;
11345 int kf;
11346 fc_packet_t *fpkt;
11347 fc_frame_hdr_t *hp;
11348
11349 pptr = ADDR2FCP(&pkt->pkt_address);
11350 plun = ADDR2LUN(&pkt->pkt_address);
11351 ptgt = plun->lun_tgt;
11352
11353 cmd = (struct fcp_pkt *)pkt->pkt_ha_private;
11354 fpkt = cmd->cmd_fp_pkt;
11355
11356 /*
11357 * this request is for dma allocation only
11358 */
11359 /*
11360 * First step of fcp_scsi_init_pkt: pkt allocation
11361 * We determine if the caller is willing to wait for the
11362 * resources.
11363 */
11364 kf = (callback == SLEEP_FUNC) ? KM_SLEEP: KM_NOSLEEP;
11365
11366 /*
11367 * Selective zeroing of the pkt.
11368 */
11369 cmd->cmd_back = NULL;
11370 cmd->cmd_next = NULL;
11371
11372 /*
11373 * Zero out fcp command
11374 */
11375 bzero(&cmd->cmd_fcp_cmd, sizeof (cmd->cmd_fcp_cmd));
11376
11377 cmd->cmd_state = FCP_PKT_IDLE;
11378
11379 fpkt = cmd->cmd_fp_pkt;
11380 fpkt->pkt_data_acc = NULL;
11381
11382 /*
11383 * When port_state is FCP_STATE_OFFLINE, remote_port (tgt_pd_handle)
11384 * could be destroyed. We need fail pkt_setup.
11385 */
11386 if (pptr->port_state & FCP_STATE_OFFLINE) {
11387 return (-1);
11388 }
11389
11390 mutex_enter(&ptgt->tgt_mutex);
11391 fpkt->pkt_pd = ptgt->tgt_pd_handle;
11392
11393 if (fc_ulp_init_packet(pptr->port_fp_handle, fpkt, kf)
11394 != FC_SUCCESS) {
11395 mutex_exit(&ptgt->tgt_mutex);
11396 return (-1);
11397 }
11398
11399 mutex_exit(&ptgt->tgt_mutex);
11400
11401 /* Fill in the Fabric Channel Header */
11402 hp = &fpkt->pkt_cmd_fhdr;
11403 hp->r_ctl = R_CTL_COMMAND;
11404 hp->rsvd = 0;
11405 hp->type = FC_TYPE_SCSI_FCP;
11406 hp->f_ctl = F_CTL_SEQ_INITIATIVE | F_CTL_FIRST_SEQ;
11407 hp->seq_id = 0;
11408 hp->df_ctl = 0;
11409 hp->seq_cnt = 0;
11410 hp->ox_id = 0xffff;
11411 hp->rx_id = 0xffff;
11412 hp->ro = 0;
11413
11414 /*
11415 * A doubly linked list (cmd_forw, cmd_back) is built
11416 * out of every allocated packet on a per-lun basis
11417 *
11418 * The packets are maintained in the list so as to satisfy
11419 * scsi_abort() requests. At present (which is unlikely to
11420 * change in the future) nobody performs a real scsi_abort
11421 * in the SCSI target drivers (as they don't keep the packets
11422 * after doing scsi_transport - so they don't know how to
11423 * abort a packet other than sending a NULL to abort all
11424 * outstanding packets)
11425 */
11426 mutex_enter(&plun->lun_mutex);
11427 if ((cmd->cmd_forw = plun->lun_pkt_head) != NULL) {
11428 plun->lun_pkt_head->cmd_back = cmd;
11429 } else {
11430 plun->lun_pkt_tail = cmd;
11431 }
11432 plun->lun_pkt_head = cmd;
11433 mutex_exit(&plun->lun_mutex);
11434 return (0);
11435 }
11436
11437 /*
11438 * Function: fcp_pkt_teardown
11439 *
11440 * Description: This function releases a scsi_pkt structure and all the
11441 * resources attached to it.
11442 *
11443 * Argument: *pkt Pointer to a scsi_pkt structure.
11444 *
11445 * Return Value: None
11446 *
11447 * Context: User, Kernel or Interrupt context.
11448 */
11449 static void
11450 fcp_pkt_teardown(struct scsi_pkt *pkt)
11451 {
11452 struct fcp_port *pptr = ADDR2FCP(&pkt->pkt_address);
11453 struct fcp_lun *plun = ADDR2LUN(&pkt->pkt_address);
11454 struct fcp_pkt *cmd = (struct fcp_pkt *)pkt->pkt_ha_private;
11455
11456 /*
11457 * Remove the packet from the per-lun list
11458 */
11459 mutex_enter(&plun->lun_mutex);
11460 if (cmd->cmd_back) {
11461 ASSERT(cmd != plun->lun_pkt_head);
11462 cmd->cmd_back->cmd_forw = cmd->cmd_forw;
11463 } else {
11464 ASSERT(cmd == plun->lun_pkt_head);
11465 plun->lun_pkt_head = cmd->cmd_forw;
11466 }
11467
11468 if (cmd->cmd_forw) {
11469 cmd->cmd_forw->cmd_back = cmd->cmd_back;
11470 } else {
11471 ASSERT(cmd == plun->lun_pkt_tail);
11472 plun->lun_pkt_tail = cmd->cmd_back;
11473 }
11474
11475 mutex_exit(&plun->lun_mutex);
11476
11477 (void) fc_ulp_uninit_packet(pptr->port_fp_handle, cmd->cmd_fp_pkt);
11478 }
11479
11480 /*
11481 * Routine for reset notification setup, to register or cancel.
11482 * This function is called by SCSA
11483 */
11484 /*ARGSUSED*/
11485 static int
11486 fcp_scsi_reset_notify(struct scsi_address *ap, int flag,
11487 void (*callback)(caddr_t), caddr_t arg)
11488 {
11489 struct fcp_port *pptr = ADDR2FCP(ap);
11490
11491 return (scsi_hba_reset_notify_setup(ap, flag, callback, arg,
11492 &pptr->port_mutex, &pptr->port_reset_notify_listf));
11493 }
11494
11495
11496 static int
11497 fcp_scsi_bus_get_eventcookie(dev_info_t *dip, dev_info_t *rdip, char *name,
11498 ddi_eventcookie_t *event_cookiep)
11499 {
11500 struct fcp_port *pptr = fcp_dip2port(dip);
11501
11502 if (pptr == NULL) {
11503 return (DDI_FAILURE);
11504 }
11505
11506 return (ndi_event_retrieve_cookie(pptr->port_ndi_event_hdl, rdip, name,
11507 event_cookiep, NDI_EVENT_NOPASS));
11508 }
11509
11510
11511 static int
11512 fcp_scsi_bus_add_eventcall(dev_info_t *dip, dev_info_t *rdip,
11513 ddi_eventcookie_t eventid, void (*callback)(), void *arg,
11514 ddi_callback_id_t *cb_id)
11515 {
11516 struct fcp_port *pptr = fcp_dip2port(dip);
11517
11518 if (pptr == NULL) {
11519 return (DDI_FAILURE);
11520 }
11521
11522 return (ndi_event_add_callback(pptr->port_ndi_event_hdl, rdip,
11523 eventid, callback, arg, NDI_SLEEP, cb_id));
11524 }
11525
11526
11527 static int
11528 fcp_scsi_bus_remove_eventcall(dev_info_t *dip, ddi_callback_id_t cb_id)
11529 {
11530
11531 struct fcp_port *pptr = fcp_dip2port(dip);
11532
11533 if (pptr == NULL) {
11534 return (DDI_FAILURE);
11535 }
11536 return (ndi_event_remove_callback(pptr->port_ndi_event_hdl, cb_id));
11537 }
11538
11539
11540 /*
11541 * called by the transport to post an event
11542 */
11543 static int
11544 fcp_scsi_bus_post_event(dev_info_t *dip, dev_info_t *rdip,
11545 ddi_eventcookie_t eventid, void *impldata)
11546 {
11547 struct fcp_port *pptr = fcp_dip2port(dip);
11548
11549 if (pptr == NULL) {
11550 return (DDI_FAILURE);
11551 }
11552
11553 return (ndi_event_run_callbacks(pptr->port_ndi_event_hdl, rdip,
11554 eventid, impldata));
11555 }
11556
11557
11558 /*
11559 * A target in in many cases in Fibre Channel has a one to one relation
11560 * with a port identifier (which is also known as D_ID and also as AL_PA
11561 * in private Loop) On Fibre Channel-to-SCSI bridge boxes a target reset
11562 * will most likely result in resetting all LUNs (which means a reset will
11563 * occur on all the SCSI devices connected at the other end of the bridge)
11564 * That is the latest favorite topic for discussion, for, one can debate as
11565 * hot as one likes and come up with arguably a best solution to one's
11566 * satisfaction
11567 *
11568 * To stay on track and not digress much, here are the problems stated
11569 * briefly:
11570 *
11571 * SCSA doesn't define RESET_LUN, It defines RESET_TARGET, but the
11572 * target drivers use RESET_TARGET even if their instance is on a
11573 * LUN. Doesn't that sound a bit broken ?
11574 *
11575 * FCP SCSI (the current spec) only defines RESET TARGET in the
11576 * control fields of an FCP_CMND structure. It should have been
11577 * fixed right there, giving flexibility to the initiators to
11578 * minimize havoc that could be caused by resetting a target.
11579 */
11580 static int
11581 fcp_reset_target(struct scsi_address *ap, int level)
11582 {
11583 int rval = FC_FAILURE;
11584 char lun_id[25];
11585 struct fcp_port *pptr = ADDR2FCP(ap);
11586 struct fcp_lun *plun = ADDR2LUN(ap);
11587 struct fcp_tgt *ptgt = plun->lun_tgt;
11588 struct scsi_pkt *pkt;
11589 struct fcp_pkt *cmd;
11590 struct fcp_rsp *rsp;
11591 uint32_t tgt_cnt;
11592 struct fcp_rsp_info *rsp_info;
11593 struct fcp_reset_elem *p;
11594 int bval;
11595
11596 if ((p = kmem_alloc(sizeof (struct fcp_reset_elem),
11597 KM_NOSLEEP)) == NULL) {
11598 return (rval);
11599 }
11600
11601 mutex_enter(&ptgt->tgt_mutex);
11602 if (level == RESET_TARGET) {
11603 if (ptgt->tgt_state & (FCP_TGT_OFFLINE | FCP_TGT_BUSY)) {
11604 mutex_exit(&ptgt->tgt_mutex);
11605 kmem_free(p, sizeof (struct fcp_reset_elem));
11606 return (rval);
11607 }
11608 fcp_update_tgt_state(ptgt, FCP_SET, FCP_LUN_BUSY);
11609 (void) strcpy(lun_id, " ");
11610 } else {
11611 if (plun->lun_state & (FCP_LUN_OFFLINE | FCP_LUN_BUSY)) {
11612 mutex_exit(&ptgt->tgt_mutex);
11613 kmem_free(p, sizeof (struct fcp_reset_elem));
11614 return (rval);
11615 }
11616 fcp_update_lun_state(plun, FCP_SET, FCP_LUN_BUSY);
11617
11618 (void) sprintf(lun_id, ", LUN=%d", plun->lun_num);
11619 }
11620 tgt_cnt = ptgt->tgt_change_cnt;
11621
11622 mutex_exit(&ptgt->tgt_mutex);
11623
11624 if ((pkt = scsi_init_pkt(ap, NULL, NULL, 0, 0,
11625 0, 0, NULL, 0)) == NULL) {
11626 kmem_free(p, sizeof (struct fcp_reset_elem));
11627 mutex_enter(&ptgt->tgt_mutex);
11628 fcp_update_tgt_state(ptgt, FCP_RESET, FCP_LUN_BUSY);
11629 mutex_exit(&ptgt->tgt_mutex);
11630 return (rval);
11631 }
11632 pkt->pkt_time = FCP_POLL_TIMEOUT;
11633
11634 /* fill in cmd part of packet */
11635 cmd = PKT2CMD(pkt);
11636 if (level == RESET_TARGET) {
11637 cmd->cmd_fcp_cmd.fcp_cntl.cntl_reset_tgt = 1;
11638 } else {
11639 cmd->cmd_fcp_cmd.fcp_cntl.cntl_reset_lun = 1;
11640 }
11641 cmd->cmd_fp_pkt->pkt_comp = NULL;
11642 cmd->cmd_pkt->pkt_flags |= FLAG_NOINTR;
11643
11644 /* prepare a packet for transport */
11645 fcp_prepare_pkt(pptr, cmd, plun);
11646
11647 if (cmd->cmd_pkt->pkt_time) {
11648 cmd->cmd_fp_pkt->pkt_timeout = cmd->cmd_pkt->pkt_time;
11649 } else {
11650 cmd->cmd_fp_pkt->pkt_timeout = 5 * 60 * 60;
11651 }
11652
11653 (void) fc_ulp_busy_port(pptr->port_fp_handle);
11654 bval = fcp_dopoll(pptr, cmd);
11655 fc_ulp_idle_port(pptr->port_fp_handle);
11656
11657 /* submit the packet */
11658 if (bval == TRAN_ACCEPT) {
11659 int error = 3;
11660
11661 rsp = (struct fcp_rsp *)cmd->cmd_fcp_rsp;
11662 rsp_info = (struct fcp_rsp_info *)(cmd->cmd_fcp_rsp +
11663 sizeof (struct fcp_rsp));
11664
11665 if (rsp->fcp_u.fcp_status.rsp_len_set) {
11666 if (fcp_validate_fcp_response(rsp, pptr) ==
11667 FC_SUCCESS) {
11668 if (pptr->port_fcp_dma != FC_NO_DVMA_SPACE) {
11669 FCP_CP_IN(cmd->cmd_fp_pkt->pkt_resp +
11670 sizeof (struct fcp_rsp), rsp_info,
11671 cmd->cmd_fp_pkt->pkt_resp_acc,
11672 sizeof (struct fcp_rsp_info));
11673 }
11674 if (rsp_info->rsp_code == FCP_NO_FAILURE) {
11675 rval = FC_SUCCESS;
11676 error = 0;
11677 } else {
11678 error = 1;
11679 }
11680 } else {
11681 error = 2;
11682 }
11683 }
11684
11685 switch (error) {
11686 case 0:
11687 fcp_log(CE_WARN, pptr->port_dip,
11688 "!FCP: WWN 0x%08x%08x %s reset successfully",
11689 *((int *)&ptgt->tgt_port_wwn.raw_wwn[0]),
11690 *((int *)&ptgt->tgt_port_wwn.raw_wwn[4]), lun_id);
11691 break;
11692
11693 case 1:
11694 fcp_log(CE_WARN, pptr->port_dip,
11695 "!FCP: Reset to WWN 0x%08x%08x %s failed,"
11696 " response code=%x",
11697 *((int *)&ptgt->tgt_port_wwn.raw_wwn[0]),
11698 *((int *)&ptgt->tgt_port_wwn.raw_wwn[4]), lun_id,
11699 rsp_info->rsp_code);
11700 break;
11701
11702 case 2:
11703 fcp_log(CE_WARN, pptr->port_dip,
11704 "!FCP: Reset to WWN 0x%08x%08x %s failed,"
11705 " Bad FCP response values: rsvd1=%x,"
11706 " rsvd2=%x, sts-rsvd1=%x, sts-rsvd2=%x,"
11707 " rsplen=%x, senselen=%x",
11708 *((int *)&ptgt->tgt_port_wwn.raw_wwn[0]),
11709 *((int *)&ptgt->tgt_port_wwn.raw_wwn[4]), lun_id,
11710 rsp->reserved_0, rsp->reserved_1,
11711 rsp->fcp_u.fcp_status.reserved_0,
11712 rsp->fcp_u.fcp_status.reserved_1,
11713 rsp->fcp_response_len, rsp->fcp_sense_len);
11714 break;
11715
11716 default:
11717 fcp_log(CE_WARN, pptr->port_dip,
11718 "!FCP: Reset to WWN 0x%08x%08x %s failed",
11719 *((int *)&ptgt->tgt_port_wwn.raw_wwn[0]),
11720 *((int *)&ptgt->tgt_port_wwn.raw_wwn[4]), lun_id);
11721 break;
11722 }
11723 }
11724 scsi_destroy_pkt(pkt);
11725
11726 if (rval == FC_FAILURE) {
11727 mutex_enter(&ptgt->tgt_mutex);
11728 if (level == RESET_TARGET) {
11729 fcp_update_tgt_state(ptgt, FCP_RESET, FCP_LUN_BUSY);
11730 } else {
11731 fcp_update_lun_state(plun, FCP_RESET, FCP_LUN_BUSY);
11732 }
11733 mutex_exit(&ptgt->tgt_mutex);
11734 kmem_free(p, sizeof (struct fcp_reset_elem));
11735 return (rval);
11736 }
11737
11738 mutex_enter(&pptr->port_mutex);
11739 if (level == RESET_TARGET) {
11740 p->tgt = ptgt;
11741 p->lun = NULL;
11742 } else {
11743 p->tgt = NULL;
11744 p->lun = plun;
11745 }
11746 p->tgt = ptgt;
11747 p->tgt_cnt = tgt_cnt;
11748 p->timeout = fcp_watchdog_time + FCP_RESET_DELAY;
11749 p->next = pptr->port_reset_list;
11750 pptr->port_reset_list = p;
11751
11752 FCP_TRACE(fcp_logq, pptr->port_instbuf,
11753 fcp_trace, FCP_BUF_LEVEL_3, 0,
11754 "Notify ssd of the reset to reinstate the reservations");
11755
11756 scsi_hba_reset_notify_callback(&pptr->port_mutex,
11757 &pptr->port_reset_notify_listf);
11758
11759 mutex_exit(&pptr->port_mutex);
11760
11761 return (rval);
11762 }
11763
11764
11765 /*
11766 * called by fcp_getcap and fcp_setcap to get and set (respectively)
11767 * SCSI capabilities
11768 */
11769 /* ARGSUSED */
11770 static int
11771 fcp_commoncap(struct scsi_address *ap, char *cap,
11772 int val, int tgtonly, int doset)
11773 {
11774 struct fcp_port *pptr = ADDR2FCP(ap);
11775 struct fcp_lun *plun = ADDR2LUN(ap);
11776 struct fcp_tgt *ptgt = plun->lun_tgt;
11777 int cidx;
11778 int rval = FALSE;
11779
11780 if (cap == (char *)0) {
11781 FCP_TRACE(fcp_logq, pptr->port_instbuf,
11782 fcp_trace, FCP_BUF_LEVEL_3, 0,
11783 "fcp_commoncap: invalid arg");
11784 return (rval);
11785 }
11786
11787 if ((cidx = scsi_hba_lookup_capstr(cap)) == -1) {
11788 return (UNDEFINED);
11789 }
11790
11791 /*
11792 * Process setcap request.
11793 */
11794 if (doset) {
11795 /*
11796 * At present, we can only set binary (0/1) values
11797 */
11798 switch (cidx) {
11799 case SCSI_CAP_ARQ:
11800 if (val == 0) {
11801 rval = FALSE;
11802 } else {
11803 rval = TRUE;
11804 }
11805 break;
11806
11807 case SCSI_CAP_LUN_RESET:
11808 if (val) {
11809 plun->lun_cap |= FCP_LUN_CAP_RESET;
11810 } else {
11811 plun->lun_cap &= ~FCP_LUN_CAP_RESET;
11812 }
11813 rval = TRUE;
11814 break;
11815
11816 case SCSI_CAP_SECTOR_SIZE:
11817 rval = TRUE;
11818 break;
11819 default:
11820 FCP_TRACE(fcp_logq, pptr->port_instbuf,
11821 fcp_trace, FCP_BUF_LEVEL_4, 0,
11822 "fcp_setcap: unsupported %d", cidx);
11823 rval = UNDEFINED;
11824 break;
11825 }
11826
11827 FCP_TRACE(fcp_logq, pptr->port_instbuf,
11828 fcp_trace, FCP_BUF_LEVEL_5, 0,
11829 "set cap: cap=%s, val/tgtonly/doset/rval = "
11830 "0x%x/0x%x/0x%x/%d",
11831 cap, val, tgtonly, doset, rval);
11832
11833 } else {
11834 /*
11835 * Process getcap request.
11836 */
11837 switch (cidx) {
11838 case SCSI_CAP_DMA_MAX:
11839 rval = (int)pptr->port_data_dma_attr.dma_attr_maxxfer;
11840
11841 /*
11842 * Need to make an adjustment qlc is uint_t 64
11843 * st is int, so we will make the adjustment here
11844 * being as nobody wants to touch this.
11845 * It still leaves the max single block length
11846 * of 2 gig. This should last .
11847 */
11848
11849 if (rval == -1) {
11850 rval = MAX_INT_DMA;
11851 }
11852
11853 break;
11854
11855 case SCSI_CAP_INITIATOR_ID:
11856 rval = pptr->port_id;
11857 break;
11858
11859 case SCSI_CAP_ARQ:
11860 case SCSI_CAP_RESET_NOTIFICATION:
11861 case SCSI_CAP_TAGGED_QING:
11862 rval = TRUE;
11863 break;
11864
11865 case SCSI_CAP_SCSI_VERSION:
11866 rval = 3;
11867 break;
11868
11869 case SCSI_CAP_INTERCONNECT_TYPE:
11870 if (FC_TOP_EXTERNAL(pptr->port_topology) ||
11871 (ptgt->tgt_hard_addr == 0)) {
11872 rval = INTERCONNECT_FABRIC;
11873 } else {
11874 rval = INTERCONNECT_FIBRE;
11875 }
11876 break;
11877
11878 case SCSI_CAP_LUN_RESET:
11879 rval = ((plun->lun_cap & FCP_LUN_CAP_RESET) != 0) ?
11880 TRUE : FALSE;
11881 break;
11882
11883 default:
11884 FCP_TRACE(fcp_logq, pptr->port_instbuf,
11885 fcp_trace, FCP_BUF_LEVEL_4, 0,
11886 "fcp_getcap: unsupported %d", cidx);
11887 rval = UNDEFINED;
11888 break;
11889 }
11890
11891 FCP_TRACE(fcp_logq, pptr->port_instbuf,
11892 fcp_trace, FCP_BUF_LEVEL_8, 0,
11893 "get cap: cap=%s, val/tgtonly/doset/rval = "
11894 "0x%x/0x%x/0x%x/%d",
11895 cap, val, tgtonly, doset, rval);
11896 }
11897
11898 return (rval);
11899 }
11900
11901 /*
11902 * called by the transport to get the port-wwn and lun
11903 * properties of this device, and to create a "name" based on them
11904 *
11905 * these properties don't exist on sun4m
11906 *
11907 * return 1 for success else return 0
11908 */
11909 /* ARGSUSED */
11910 static int
11911 fcp_scsi_get_name(struct scsi_device *sd, char *name, int len)
11912 {
11913 int i;
11914 int *lun;
11915 int numChars;
11916 uint_t nlun;
11917 uint_t count;
11918 uint_t nbytes;
11919 uchar_t *bytes;
11920 uint16_t lun_num;
11921 uint32_t tgt_id;
11922 char **conf_wwn;
11923 char tbuf[(FC_WWN_SIZE << 1) + 1];
11924 uchar_t barray[FC_WWN_SIZE];
11925 dev_info_t *tgt_dip;
11926 struct fcp_tgt *ptgt;
11927 struct fcp_port *pptr;
11928 struct fcp_lun *plun;
11929
11930 ASSERT(sd != NULL);
11931 ASSERT(name != NULL);
11932
11933 tgt_dip = sd->sd_dev;
11934 pptr = ddi_get_soft_state(fcp_softstate,
11935 ddi_get_instance(ddi_get_parent(tgt_dip)));
11936 if (pptr == NULL) {
11937 return (0);
11938 }
11939
11940 ASSERT(tgt_dip != NULL);
11941
11942 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, sd->sd_dev,
11943 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM,
11944 LUN_PROP, &lun, &nlun) != DDI_SUCCESS) {
11945 name[0] = '\0';
11946 return (0);
11947 }
11948
11949 if (nlun == 0) {
11950 ddi_prop_free(lun);
11951 return (0);
11952 }
11953
11954 lun_num = lun[0];
11955 ddi_prop_free(lun);
11956
11957 /*
11958 * Lookup for .conf WWN property
11959 */
11960 if (ddi_prop_lookup_string_array(DDI_DEV_T_ANY, tgt_dip,
11961 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, CONF_WWN_PROP,
11962 &conf_wwn, &count) == DDI_PROP_SUCCESS) {
11963 ASSERT(count >= 1);
11964
11965 fcp_ascii_to_wwn(conf_wwn[0], barray, FC_WWN_SIZE);
11966 ddi_prop_free(conf_wwn);
11967 mutex_enter(&pptr->port_mutex);
11968 if ((plun = fcp_lookup_lun(pptr, barray, lun_num)) == NULL) {
11969 mutex_exit(&pptr->port_mutex);
11970 return (0);
11971 }
11972 ptgt = plun->lun_tgt;
11973 mutex_exit(&pptr->port_mutex);
11974
11975 (void) ndi_prop_update_byte_array(DDI_DEV_T_NONE,
11976 tgt_dip, PORT_WWN_PROP, barray, FC_WWN_SIZE);
11977
11978 if (!FC_TOP_EXTERNAL(pptr->port_topology) &&
11979 ptgt->tgt_hard_addr != 0) {
11980 tgt_id = (uint32_t)fcp_alpa_to_switch[
11981 ptgt->tgt_hard_addr];
11982 } else {
11983 tgt_id = ptgt->tgt_d_id;
11984 }
11985
11986 (void) ndi_prop_update_int(DDI_DEV_T_NONE, tgt_dip,
11987 TARGET_PROP, tgt_id);
11988 }
11989
11990 /* get the our port-wwn property */
11991 bytes = NULL;
11992 if ((ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, tgt_dip,
11993 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, PORT_WWN_PROP, &bytes,
11994 &nbytes) != DDI_PROP_SUCCESS) || nbytes != FC_WWN_SIZE) {
11995 if (bytes != NULL) {
11996 ddi_prop_free(bytes);
11997 }
11998 return (0);
11999 }
12000
12001 for (i = 0; i < FC_WWN_SIZE; i++) {
12002 (void) sprintf(&tbuf[i << 1], "%02x", *(bytes + i));
12003 }
12004
12005 /* Stick in the address of the form "wWWN,LUN" */
12006 numChars = snprintf(name, len, "w%s,%x", tbuf, lun_num);
12007
12008 ASSERT(numChars < len);
12009 if (numChars >= len) {
12010 fcp_log(CE_WARN, pptr->port_dip,
12011 "!fcp_scsi_get_name: "
12012 "name parameter length too small, it needs to be %d",
12013 numChars+1);
12014 }
12015
12016 ddi_prop_free(bytes);
12017
12018 return (1);
12019 }
12020
12021
12022 /*
12023 * called by the transport to get the SCSI target id value, returning
12024 * it in "name"
12025 *
12026 * this isn't needed/used on sun4m
12027 *
12028 * return 1 for success else return 0
12029 */
12030 /* ARGSUSED */
12031 static int
12032 fcp_scsi_get_bus_addr(struct scsi_device *sd, char *name, int len)
12033 {
12034 struct fcp_lun *plun = ADDR2LUN(&sd->sd_address);
12035 struct fcp_tgt *ptgt;
12036 int numChars;
12037
12038 if (plun == NULL) {
12039 return (0);
12040 }
12041
12042 if ((ptgt = plun->lun_tgt) == NULL) {
12043 return (0);
12044 }
12045
12046 numChars = snprintf(name, len, "%x", ptgt->tgt_d_id);
12047
12048 ASSERT(numChars < len);
12049 if (numChars >= len) {
12050 fcp_log(CE_WARN, NULL,
12051 "!fcp_scsi_get_bus_addr: "
12052 "name parameter length too small, it needs to be %d",
12053 numChars+1);
12054 }
12055
12056 return (1);
12057 }
12058
12059
12060 /*
12061 * called internally to reset the link where the specified port lives
12062 */
12063 static int
12064 fcp_linkreset(struct fcp_port *pptr, struct scsi_address *ap, int sleep)
12065 {
12066 la_wwn_t wwn;
12067 struct fcp_lun *plun;
12068 struct fcp_tgt *ptgt;
12069
12070 /* disable restart of lip if we're suspended */
12071 mutex_enter(&pptr->port_mutex);
12072
12073 if (pptr->port_state & (FCP_STATE_SUSPENDED |
12074 FCP_STATE_POWER_DOWN)) {
12075 mutex_exit(&pptr->port_mutex);
12076 FCP_TRACE(fcp_logq, pptr->port_instbuf,
12077 fcp_trace, FCP_BUF_LEVEL_2, 0,
12078 "fcp_linkreset, fcp%d: link reset "
12079 "disabled due to DDI_SUSPEND",
12080 ddi_get_instance(pptr->port_dip));
12081 return (FC_FAILURE);
12082 }
12083
12084 if (pptr->port_state & (FCP_STATE_OFFLINE | FCP_STATE_ONLINING)) {
12085 mutex_exit(&pptr->port_mutex);
12086 return (FC_SUCCESS);
12087 }
12088
12089 FCP_DTRACE(fcp_logq, pptr->port_instbuf,
12090 fcp_trace, FCP_BUF_LEVEL_8, 0, "Forcing link reset");
12091
12092 /*
12093 * If ap == NULL assume local link reset.
12094 */
12095 if (FC_TOP_EXTERNAL(pptr->port_topology) && (ap != NULL)) {
12096 plun = ADDR2LUN(ap);
12097 ptgt = plun->lun_tgt;
12098 bcopy(&ptgt->tgt_port_wwn.raw_wwn[0], &wwn, sizeof (wwn));
12099 } else {
12100 bzero((caddr_t)&wwn, sizeof (wwn));
12101 }
12102 mutex_exit(&pptr->port_mutex);
12103
12104 return (fc_ulp_linkreset(pptr->port_fp_handle, &wwn, sleep));
12105 }
12106
12107
12108 /*
12109 * called from fcp_port_attach() to resume a port
12110 * return DDI_* success/failure status
12111 * acquires and releases the global mutex
12112 * acquires and releases the port mutex
12113 */
12114 /*ARGSUSED*/
12115
12116 static int
12117 fcp_handle_port_resume(opaque_t ulph, fc_ulp_port_info_t *pinfo,
12118 uint32_t s_id, fc_attach_cmd_t cmd, int instance)
12119 {
12120 int res = DDI_FAILURE; /* default result */
12121 struct fcp_port *pptr; /* port state ptr */
12122 uint32_t alloc_cnt;
12123 uint32_t max_cnt;
12124 fc_portmap_t *tmp_list = NULL;
12125
12126 FCP_DTRACE(fcp_logq, "fcp", fcp_trace,
12127 FCP_BUF_LEVEL_8, 0, "port resume: for port %d",
12128 instance);
12129
12130 if ((pptr = ddi_get_soft_state(fcp_softstate, instance)) == NULL) {
12131 cmn_err(CE_WARN, "fcp: bad soft state");
12132 return (res);
12133 }
12134
12135 mutex_enter(&pptr->port_mutex);
12136 switch (cmd) {
12137 case FC_CMD_RESUME:
12138 ASSERT((pptr->port_state & FCP_STATE_POWER_DOWN) == 0);
12139 pptr->port_state &= ~FCP_STATE_SUSPENDED;
12140 break;
12141
12142 case FC_CMD_POWER_UP:
12143 /*
12144 * If the port is DDI_SUSPENded, defer rediscovery
12145 * until DDI_RESUME occurs
12146 */
12147 if (pptr->port_state & FCP_STATE_SUSPENDED) {
12148 pptr->port_state &= ~FCP_STATE_POWER_DOWN;
12149 mutex_exit(&pptr->port_mutex);
12150 return (DDI_SUCCESS);
12151 }
12152 pptr->port_state &= ~FCP_STATE_POWER_DOWN;
12153 }
12154 pptr->port_id = s_id;
12155 pptr->port_state = FCP_STATE_INIT;
12156 mutex_exit(&pptr->port_mutex);
12157
12158 /*
12159 * Make a copy of ulp_port_info as fctl allocates
12160 * a temp struct.
12161 */
12162 (void) fcp_cp_pinfo(pptr, pinfo);
12163
12164 mutex_enter(&fcp_global_mutex);
12165 if (fcp_watchdog_init++ == 0) {
12166 fcp_watchdog_tick = fcp_watchdog_timeout *
12167 drv_usectohz(1000000);
12168 fcp_watchdog_id = timeout(fcp_watch,
12169 NULL, fcp_watchdog_tick);
12170 }
12171 mutex_exit(&fcp_global_mutex);
12172
12173 /*
12174 * Handle various topologies and link states.
12175 */
12176 switch (FC_PORT_STATE_MASK(pptr->port_phys_state)) {
12177 case FC_STATE_OFFLINE:
12178 /*
12179 * Wait for ONLINE, at which time a state
12180 * change will cause a statec_callback
12181 */
12182 res = DDI_SUCCESS;
12183 break;
12184
12185 case FC_STATE_ONLINE:
12186
12187 if (pptr->port_topology == FC_TOP_UNKNOWN) {
12188 (void) fcp_linkreset(pptr, NULL, KM_NOSLEEP);
12189 res = DDI_SUCCESS;
12190 break;
12191 }
12192
12193 if (FC_TOP_EXTERNAL(pptr->port_topology) &&
12194 !fcp_enable_auto_configuration) {
12195 tmp_list = fcp_construct_map(pptr, &alloc_cnt);
12196 if (tmp_list == NULL) {
12197 if (!alloc_cnt) {
12198 res = DDI_SUCCESS;
12199 }
12200 break;
12201 }
12202 max_cnt = alloc_cnt;
12203 } else {
12204 ASSERT(pptr->port_topology != FC_TOP_UNKNOWN);
12205
12206 alloc_cnt = FCP_MAX_DEVICES;
12207
12208 if ((tmp_list = (fc_portmap_t *)kmem_zalloc(
12209 (sizeof (fc_portmap_t)) * alloc_cnt,
12210 KM_NOSLEEP)) == NULL) {
12211 fcp_log(CE_WARN, pptr->port_dip,
12212 "!fcp%d: failed to allocate portmap",
12213 instance);
12214 break;
12215 }
12216
12217 max_cnt = alloc_cnt;
12218 if ((res = fc_ulp_getportmap(pptr->port_fp_handle,
12219 &tmp_list, &max_cnt, FC_ULP_PLOGI_PRESERVE)) !=
12220 FC_SUCCESS) {
12221 caddr_t msg;
12222
12223 (void) fc_ulp_error(res, &msg);
12224
12225 FCP_TRACE(fcp_logq, pptr->port_instbuf,
12226 fcp_trace, FCP_BUF_LEVEL_2, 0,
12227 "resume failed getportmap: reason=0x%x",
12228 res);
12229
12230 fcp_log(CE_WARN, pptr->port_dip,
12231 "!failed to get port map : %s", msg);
12232 break;
12233 }
12234 if (max_cnt > alloc_cnt) {
12235 alloc_cnt = max_cnt;
12236 }
12237 }
12238
12239 /*
12240 * do the SCSI device discovery and create
12241 * the devinfos
12242 */
12243 fcp_statec_callback(ulph, pptr->port_fp_handle,
12244 pptr->port_phys_state, pptr->port_topology, tmp_list,
12245 max_cnt, pptr->port_id);
12246
12247 res = DDI_SUCCESS;
12248 break;
12249
12250 default:
12251 fcp_log(CE_WARN, pptr->port_dip,
12252 "!fcp%d: invalid port state at attach=0x%x",
12253 instance, pptr->port_phys_state);
12254
12255 mutex_enter(&pptr->port_mutex);
12256 pptr->port_phys_state = FCP_STATE_OFFLINE;
12257 mutex_exit(&pptr->port_mutex);
12258 res = DDI_SUCCESS;
12259
12260 break;
12261 }
12262
12263 if (tmp_list != NULL) {
12264 kmem_free(tmp_list, sizeof (fc_portmap_t) * alloc_cnt);
12265 }
12266
12267 return (res);
12268 }
12269
12270
12271 static void
12272 fcp_cp_pinfo(struct fcp_port *pptr, fc_ulp_port_info_t *pinfo)
12273 {
12274 pptr->port_fp_modlinkage = *pinfo->port_linkage;
12275 pptr->port_dip = pinfo->port_dip;
12276 pptr->port_fp_handle = pinfo->port_handle;
12277 if (pinfo->port_acc_attr != NULL) {
12278 /*
12279 * FCA supports DMA
12280 */
12281 pptr->port_data_dma_attr = *pinfo->port_data_dma_attr;
12282 pptr->port_cmd_dma_attr = *pinfo->port_cmd_dma_attr;
12283 pptr->port_resp_dma_attr = *pinfo->port_resp_dma_attr;
12284 pptr->port_dma_acc_attr = *pinfo->port_acc_attr;
12285 }
12286 pptr->port_priv_pkt_len = pinfo->port_fca_pkt_size;
12287 pptr->port_max_exch = pinfo->port_fca_max_exch;
12288 pptr->port_phys_state = pinfo->port_state;
12289 pptr->port_topology = pinfo->port_flags;
12290 pptr->port_reset_action = pinfo->port_reset_action;
12291 pptr->port_cmds_dma_flags = pinfo->port_dma_behavior;
12292 pptr->port_fcp_dma = pinfo->port_fcp_dma;
12293 bcopy(&pinfo->port_nwwn, &pptr->port_nwwn, sizeof (la_wwn_t));
12294 bcopy(&pinfo->port_pwwn, &pptr->port_pwwn, sizeof (la_wwn_t));
12295
12296 /* Clear FMA caps to avoid fm-capability ereport */
12297 if (pptr->port_cmd_dma_attr.dma_attr_flags & DDI_DMA_FLAGERR)
12298 pptr->port_cmd_dma_attr.dma_attr_flags &= ~DDI_DMA_FLAGERR;
12299 if (pptr->port_data_dma_attr.dma_attr_flags & DDI_DMA_FLAGERR)
12300 pptr->port_data_dma_attr.dma_attr_flags &= ~DDI_DMA_FLAGERR;
12301 if (pptr->port_resp_dma_attr.dma_attr_flags & DDI_DMA_FLAGERR)
12302 pptr->port_resp_dma_attr.dma_attr_flags &= ~DDI_DMA_FLAGERR;
12303 }
12304
12305 /*
12306 * If the elements wait field is set to 1 then
12307 * another thread is waiting for the operation to complete. Once
12308 * it is complete, the waiting thread is signaled and the element is
12309 * freed by the waiting thread. If the elements wait field is set to 0
12310 * the element is freed.
12311 */
12312 static void
12313 fcp_process_elem(struct fcp_hp_elem *elem, int result)
12314 {
12315 ASSERT(elem != NULL);
12316 mutex_enter(&elem->mutex);
12317 elem->result = result;
12318 if (elem->wait) {
12319 elem->wait = 0;
12320 cv_signal(&elem->cv);
12321 mutex_exit(&elem->mutex);
12322 } else {
12323 mutex_exit(&elem->mutex);
12324 cv_destroy(&elem->cv);
12325 mutex_destroy(&elem->mutex);
12326 kmem_free(elem, sizeof (struct fcp_hp_elem));
12327 }
12328 }
12329
12330 /*
12331 * This function is invoked from the taskq thread to allocate
12332 * devinfo nodes and to online/offline them.
12333 */
12334 static void
12335 fcp_hp_task(void *arg)
12336 {
12337 struct fcp_hp_elem *elem = (struct fcp_hp_elem *)arg;
12338 struct fcp_lun *plun = elem->lun;
12339 struct fcp_port *pptr = elem->port;
12340 int result;
12341
12342 ASSERT(elem->what == FCP_ONLINE ||
12343 elem->what == FCP_OFFLINE ||
12344 elem->what == FCP_MPXIO_PATH_CLEAR_BUSY ||
12345 elem->what == FCP_MPXIO_PATH_SET_BUSY);
12346
12347 mutex_enter(&pptr->port_mutex);
12348 mutex_enter(&plun->lun_mutex);
12349 if (((elem->what == FCP_ONLINE || elem->what == FCP_OFFLINE) &&
12350 plun->lun_event_count != elem->event_cnt) ||
12351 pptr->port_state & (FCP_STATE_SUSPENDED |
12352 FCP_STATE_DETACHING | FCP_STATE_POWER_DOWN)) {
12353 mutex_exit(&plun->lun_mutex);
12354 mutex_exit(&pptr->port_mutex);
12355 fcp_process_elem(elem, NDI_FAILURE);
12356 return;
12357 }
12358 mutex_exit(&plun->lun_mutex);
12359 mutex_exit(&pptr->port_mutex);
12360
12361 result = fcp_trigger_lun(plun, elem->cip, elem->old_lun_mpxio,
12362 elem->what, elem->link_cnt, elem->tgt_cnt, elem->flags);
12363 fcp_process_elem(elem, result);
12364 }
12365
12366
12367 static child_info_t *
12368 fcp_get_cip(struct fcp_lun *plun, child_info_t *cip, int lcount,
12369 int tcount)
12370 {
12371 ASSERT(MUTEX_HELD(&plun->lun_mutex));
12372
12373 if (fcp_is_child_present(plun, cip) == FC_FAILURE) {
12374 struct fcp_port *pptr = plun->lun_tgt->tgt_port;
12375
12376 ASSERT(MUTEX_HELD(&pptr->port_mutex));
12377 /*
12378 * Child has not been created yet. Create the child device
12379 * based on the per-Lun flags.
12380 */
12381 if (pptr->port_mpxio == 0 || plun->lun_mpxio == 0) {
12382 plun->lun_cip =
12383 CIP(fcp_create_dip(plun, lcount, tcount));
12384 plun->lun_mpxio = 0;
12385 } else {
12386 plun->lun_cip =
12387 CIP(fcp_create_pip(plun, lcount, tcount));
12388 plun->lun_mpxio = 1;
12389 }
12390 } else {
12391 plun->lun_cip = cip;
12392 }
12393
12394 return (plun->lun_cip);
12395 }
12396
12397
12398 static int
12399 fcp_is_dip_present(struct fcp_lun *plun, dev_info_t *cdip)
12400 {
12401 int rval = FC_FAILURE;
12402 dev_info_t *pdip;
12403 struct dev_info *dip;
12404 int circular;
12405
12406 ASSERT(MUTEX_HELD(&plun->lun_mutex));
12407
12408 pdip = plun->lun_tgt->tgt_port->port_dip;
12409
12410 if (plun->lun_cip == NULL) {
12411 FCP_TRACE(fcp_logq, LUN_PORT->port_instbuf,
12412 fcp_trace, FCP_BUF_LEVEL_3, 0,
12413 "fcp_is_dip_present: plun->lun_cip is NULL: "
12414 "plun: %p lun state: %x num: %d target state: %x",
12415 plun, plun->lun_state, plun->lun_num,
12416 plun->lun_tgt->tgt_port->port_state);
12417 return (rval);
12418 }
12419 ndi_devi_enter(pdip, &circular);
12420 dip = DEVI(pdip)->devi_child;
12421 while (dip) {
12422 if (dip == DEVI(cdip)) {
12423 rval = FC_SUCCESS;
12424 break;
12425 }
12426 dip = dip->devi_sibling;
12427 }
12428 ndi_devi_exit(pdip, circular);
12429 return (rval);
12430 }
12431
12432 static int
12433 fcp_is_child_present(struct fcp_lun *plun, child_info_t *cip)
12434 {
12435 int rval = FC_FAILURE;
12436
12437 ASSERT(plun != NULL);
12438 ASSERT(MUTEX_HELD(&plun->lun_mutex));
12439
12440 if (plun->lun_mpxio == 0) {
12441 rval = fcp_is_dip_present(plun, DIP(cip));
12442 } else {
12443 rval = fcp_is_pip_present(plun, PIP(cip));
12444 }
12445
12446 return (rval);
12447 }
12448
12449 /*
12450 * Function: fcp_create_dip
12451 *
12452 * Description: Creates a dev_info_t structure for the LUN specified by the
12453 * caller.
12454 *
12455 * Argument: plun Lun structure
12456 * link_cnt Link state count.
12457 * tgt_cnt Target state change count.
12458 *
12459 * Return Value: NULL if it failed
12460 * dev_info_t structure address if it succeeded
12461 *
12462 * Context: Kernel context
12463 */
12464 static dev_info_t *
12465 fcp_create_dip(struct fcp_lun *plun, int link_cnt, int tgt_cnt)
12466 {
12467 int failure = 0;
12468 uint32_t tgt_id;
12469 uint64_t sam_lun;
12470 struct fcp_tgt *ptgt = plun->lun_tgt;
12471 struct fcp_port *pptr = ptgt->tgt_port;
12472 dev_info_t *pdip = pptr->port_dip;
12473 dev_info_t *cdip = NULL;
12474 dev_info_t *old_dip = DIP(plun->lun_cip);
12475 char *nname = NULL;
12476 char **compatible = NULL;
12477 int ncompatible;
12478 char *scsi_binding_set;
12479 char t_pwwn[17];
12480
12481 ASSERT(MUTEX_HELD(&plun->lun_mutex));
12482 ASSERT(MUTEX_HELD(&pptr->port_mutex));
12483
12484 /* get the 'scsi-binding-set' property */
12485 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, pdip,
12486 DDI_PROP_NOTPROM | DDI_PROP_DONTPASS, "scsi-binding-set",
12487 &scsi_binding_set) != DDI_PROP_SUCCESS) {
12488 scsi_binding_set = NULL;
12489 }
12490
12491 /* determine the node name and compatible */
12492 scsi_hba_nodename_compatible_get(&plun->lun_inq, scsi_binding_set,
12493 plun->lun_inq.inq_dtype, NULL, &nname, &compatible, &ncompatible);
12494 if (scsi_binding_set) {
12495 ddi_prop_free(scsi_binding_set);
12496 }
12497
12498 if (nname == NULL) {
12499 #ifdef DEBUG
12500 cmn_err(CE_WARN, "%s%d: no driver for "
12501 "device @w%02x%02x%02x%02x%02x%02x%02x%02x,%d:"
12502 " compatible: %s",
12503 ddi_driver_name(pdip), ddi_get_instance(pdip),
12504 ptgt->tgt_port_wwn.raw_wwn[0],
12505 ptgt->tgt_port_wwn.raw_wwn[1],
12506 ptgt->tgt_port_wwn.raw_wwn[2],
12507 ptgt->tgt_port_wwn.raw_wwn[3],
12508 ptgt->tgt_port_wwn.raw_wwn[4],
12509 ptgt->tgt_port_wwn.raw_wwn[5],
12510 ptgt->tgt_port_wwn.raw_wwn[6],
12511 ptgt->tgt_port_wwn.raw_wwn[7], plun->lun_num,
12512 *compatible);
12513 #endif /* DEBUG */
12514 failure++;
12515 goto end_of_fcp_create_dip;
12516 }
12517
12518 cdip = fcp_find_existing_dip(plun, pdip, nname);
12519
12520 /*
12521 * if the old_dip does not match the cdip, that means there is
12522 * some property change. since we'll be using the cdip, we need
12523 * to offline the old_dip. If the state contains FCP_LUN_CHANGED
12524 * then the dtype for the device has been updated. Offline the
12525 * the old device and create a new device with the new device type
12526 * Refer to bug: 4764752
12527 */
12528 if (old_dip && (cdip != old_dip ||
12529 plun->lun_state & FCP_LUN_CHANGED)) {
12530 plun->lun_state &= ~(FCP_LUN_INIT);
12531 mutex_exit(&plun->lun_mutex);
12532 mutex_exit(&pptr->port_mutex);
12533
12534 mutex_enter(&ptgt->tgt_mutex);
12535 (void) fcp_pass_to_hp(pptr, plun, CIP(old_dip), FCP_OFFLINE,
12536 link_cnt, tgt_cnt, NDI_DEVI_REMOVE, 0);
12537 mutex_exit(&ptgt->tgt_mutex);
12538
12539 #ifdef DEBUG
12540 if (cdip != NULL) {
12541 FCP_TRACE(fcp_logq, pptr->port_instbuf,
12542 fcp_trace, FCP_BUF_LEVEL_2, 0,
12543 "Old dip=%p; New dip=%p don't match", old_dip,
12544 cdip);
12545 } else {
12546 FCP_TRACE(fcp_logq, pptr->port_instbuf,
12547 fcp_trace, FCP_BUF_LEVEL_2, 0,
12548 "Old dip=%p; New dip=NULL don't match", old_dip);
12549 }
12550 #endif
12551
12552 mutex_enter(&pptr->port_mutex);
12553 mutex_enter(&plun->lun_mutex);
12554 }
12555
12556 if (cdip == NULL || plun->lun_state & FCP_LUN_CHANGED) {
12557 plun->lun_state &= ~(FCP_LUN_CHANGED);
12558 if (ndi_devi_alloc(pptr->port_dip, nname,
12559 DEVI_SID_NODEID, &cdip) != NDI_SUCCESS) {
12560 failure++;
12561 goto end_of_fcp_create_dip;
12562 }
12563 }
12564
12565 /*
12566 * Previously all the properties for the devinfo were destroyed here
12567 * with a call to ndi_prop_remove_all(). Since this may cause loss of
12568 * the devid property (and other properties established by the target
12569 * driver or framework) which the code does not always recreate, this
12570 * call was removed.
12571 * This opens a theoretical possibility that we may return with a
12572 * stale devid on the node if the scsi entity behind the fibre channel
12573 * lun has changed.
12574 */
12575
12576 /* decorate the node with compatible */
12577 if (ndi_prop_update_string_array(DDI_DEV_T_NONE, cdip,
12578 "compatible", compatible, ncompatible) != DDI_PROP_SUCCESS) {
12579 failure++;
12580 goto end_of_fcp_create_dip;
12581 }
12582
12583 if (ndi_prop_update_byte_array(DDI_DEV_T_NONE, cdip, NODE_WWN_PROP,
12584 ptgt->tgt_node_wwn.raw_wwn, FC_WWN_SIZE) != DDI_PROP_SUCCESS) {
12585 failure++;
12586 goto end_of_fcp_create_dip;
12587 }
12588
12589 if (ndi_prop_update_byte_array(DDI_DEV_T_NONE, cdip, PORT_WWN_PROP,
12590 ptgt->tgt_port_wwn.raw_wwn, FC_WWN_SIZE) != DDI_PROP_SUCCESS) {
12591 failure++;
12592 goto end_of_fcp_create_dip;
12593 }
12594
12595 fcp_wwn_to_ascii(ptgt->tgt_port_wwn.raw_wwn, t_pwwn);
12596 t_pwwn[16] = '\0';
12597 if (ndi_prop_update_string(DDI_DEV_T_NONE, cdip, TGT_PORT_PROP, t_pwwn)
12598 != DDI_PROP_SUCCESS) {
12599 failure++;
12600 goto end_of_fcp_create_dip;
12601 }
12602
12603 /*
12604 * If there is no hard address - We might have to deal with
12605 * that by using WWN - Having said that it is important to
12606 * recognize this problem early so ssd can be informed of
12607 * the right interconnect type.
12608 */
12609 if (!FC_TOP_EXTERNAL(pptr->port_topology) && ptgt->tgt_hard_addr != 0) {
12610 tgt_id = (uint32_t)fcp_alpa_to_switch[ptgt->tgt_hard_addr];
12611 } else {
12612 tgt_id = ptgt->tgt_d_id;
12613 }
12614
12615 if (ndi_prop_update_int(DDI_DEV_T_NONE, cdip, TARGET_PROP,
12616 tgt_id) != DDI_PROP_SUCCESS) {
12617 failure++;
12618 goto end_of_fcp_create_dip;
12619 }
12620
12621 if (ndi_prop_update_int(DDI_DEV_T_NONE, cdip, LUN_PROP,
12622 (int)plun->lun_num) != DDI_PROP_SUCCESS) {
12623 failure++;
12624 goto end_of_fcp_create_dip;
12625 }
12626 bcopy(&plun->lun_addr, &sam_lun, FCP_LUN_SIZE);
12627 if (ndi_prop_update_int64(DDI_DEV_T_NONE, cdip, SAM_LUN_PROP,
12628 sam_lun) != DDI_PROP_SUCCESS) {
12629 failure++;
12630 goto end_of_fcp_create_dip;
12631 }
12632
12633 end_of_fcp_create_dip:
12634 scsi_hba_nodename_compatible_free(nname, compatible);
12635
12636 if (cdip != NULL && failure) {
12637 (void) ndi_prop_remove_all(cdip);
12638 (void) ndi_devi_free(cdip);
12639 cdip = NULL;
12640 }
12641
12642 return (cdip);
12643 }
12644
12645 /*
12646 * Function: fcp_create_pip
12647 *
12648 * Description: Creates a Path Id for the LUN specified by the caller.
12649 *
12650 * Argument: plun Lun structure
12651 * link_cnt Link state count.
12652 * tgt_cnt Target state count.
12653 *
12654 * Return Value: NULL if it failed
12655 * mdi_pathinfo_t structure address if it succeeded
12656 *
12657 * Context: Kernel context
12658 */
12659 static mdi_pathinfo_t *
12660 fcp_create_pip(struct fcp_lun *plun, int lcount, int tcount)
12661 {
12662 int i;
12663 char buf[MAXNAMELEN];
12664 char uaddr[MAXNAMELEN];
12665 int failure = 0;
12666 uint32_t tgt_id;
12667 uint64_t sam_lun;
12668 struct fcp_tgt *ptgt = plun->lun_tgt;
12669 struct fcp_port *pptr = ptgt->tgt_port;
12670 dev_info_t *pdip = pptr->port_dip;
12671 mdi_pathinfo_t *pip = NULL;
12672 mdi_pathinfo_t *old_pip = PIP(plun->lun_cip);
12673 char *nname = NULL;
12674 char **compatible = NULL;
12675 int ncompatible;
12676 char *scsi_binding_set;
12677 char t_pwwn[17];
12678
12679 ASSERT(MUTEX_HELD(&plun->lun_mutex));
12680 ASSERT(MUTEX_HELD(&pptr->port_mutex));
12681
12682 scsi_binding_set = "vhci";
12683
12684 /* determine the node name and compatible */
12685 scsi_hba_nodename_compatible_get(&plun->lun_inq, scsi_binding_set,
12686 plun->lun_inq.inq_dtype, NULL, &nname, &compatible, &ncompatible);
12687
12688 if (nname == NULL) {
12689 #ifdef DEBUG
12690 cmn_err(CE_WARN, "fcp_create_dip: %s%d: no driver for "
12691 "device @w%02x%02x%02x%02x%02x%02x%02x%02x,%d:"
12692 " compatible: %s",
12693 ddi_driver_name(pdip), ddi_get_instance(pdip),
12694 ptgt->tgt_port_wwn.raw_wwn[0],
12695 ptgt->tgt_port_wwn.raw_wwn[1],
12696 ptgt->tgt_port_wwn.raw_wwn[2],
12697 ptgt->tgt_port_wwn.raw_wwn[3],
12698 ptgt->tgt_port_wwn.raw_wwn[4],
12699 ptgt->tgt_port_wwn.raw_wwn[5],
12700 ptgt->tgt_port_wwn.raw_wwn[6],
12701 ptgt->tgt_port_wwn.raw_wwn[7], plun->lun_num,
12702 *compatible);
12703 #endif /* DEBUG */
12704 failure++;
12705 goto end_of_fcp_create_pip;
12706 }
12707
12708 pip = fcp_find_existing_pip(plun, pdip);
12709
12710 /*
12711 * if the old_dip does not match the cdip, that means there is
12712 * some property change. since we'll be using the cdip, we need
12713 * to offline the old_dip. If the state contains FCP_LUN_CHANGED
12714 * then the dtype for the device has been updated. Offline the
12715 * the old device and create a new device with the new device type
12716 * Refer to bug: 4764752
12717 */
12718 if (old_pip && (pip != old_pip ||
12719 plun->lun_state & FCP_LUN_CHANGED)) {
12720 plun->lun_state &= ~(FCP_LUN_INIT);
12721 mutex_exit(&plun->lun_mutex);
12722 mutex_exit(&pptr->port_mutex);
12723
12724 mutex_enter(&ptgt->tgt_mutex);
12725 (void) fcp_pass_to_hp(pptr, plun, CIP(old_pip),
12726 FCP_OFFLINE, lcount, tcount,
12727 NDI_DEVI_REMOVE, 0);
12728 mutex_exit(&ptgt->tgt_mutex);
12729
12730 if (pip != NULL) {
12731 FCP_TRACE(fcp_logq, pptr->port_instbuf,
12732 fcp_trace, FCP_BUF_LEVEL_2, 0,
12733 "Old pip=%p; New pip=%p don't match",
12734 old_pip, pip);
12735 } else {
12736 FCP_TRACE(fcp_logq, pptr->port_instbuf,
12737 fcp_trace, FCP_BUF_LEVEL_2, 0,
12738 "Old pip=%p; New pip=NULL don't match",
12739 old_pip);
12740 }
12741
12742 mutex_enter(&pptr->port_mutex);
12743 mutex_enter(&plun->lun_mutex);
12744 }
12745
12746 /*
12747 * Since FC_WWN_SIZE is 8 bytes and its not like the
12748 * lun_guid_size which is dependent on the target, I don't
12749 * believe the same trancation happens here UNLESS the standards
12750 * change the FC_WWN_SIZE value to something larger than
12751 * MAXNAMELEN(currently 255 bytes).
12752 */
12753
12754 for (i = 0; i < FC_WWN_SIZE; i++) {
12755 (void) sprintf(&buf[i << 1], "%02x",
12756 ptgt->tgt_port_wwn.raw_wwn[i]);
12757 }
12758
12759 (void) snprintf(uaddr, MAXNAMELEN, "w%s,%x",
12760 buf, plun->lun_num);
12761
12762 if (pip == NULL || plun->lun_state & FCP_LUN_CHANGED) {
12763 /*
12764 * Release the locks before calling into
12765 * mdi_pi_alloc_compatible() since this can result in a
12766 * callback into fcp which can result in a deadlock
12767 * (see bug # 4870272).
12768 *
12769 * Basically, what we are trying to avoid is the scenario where
12770 * one thread does ndi_devi_enter() and tries to grab
12771 * fcp_mutex and another does it the other way round.
12772 *
12773 * But before we do that, make sure that nobody releases the
12774 * port in the meantime. We can do this by setting a flag.
12775 */
12776 plun->lun_state &= ~(FCP_LUN_CHANGED);
12777 pptr->port_state |= FCP_STATE_IN_MDI;
12778 mutex_exit(&plun->lun_mutex);
12779 mutex_exit(&pptr->port_mutex);
12780 if (mdi_pi_alloc_compatible(pdip, nname, plun->lun_guid,
12781 uaddr, compatible, ncompatible, 0, &pip) != MDI_SUCCESS) {
12782 fcp_log(CE_WARN, pptr->port_dip,
12783 "!path alloc failed:0x%x", plun);
12784 mutex_enter(&pptr->port_mutex);
12785 mutex_enter(&plun->lun_mutex);
12786 pptr->port_state &= ~FCP_STATE_IN_MDI;
12787 failure++;
12788 goto end_of_fcp_create_pip;
12789 }
12790 mutex_enter(&pptr->port_mutex);
12791 mutex_enter(&plun->lun_mutex);
12792 pptr->port_state &= ~FCP_STATE_IN_MDI;
12793 } else {
12794 (void) mdi_prop_remove(pip, NULL);
12795 }
12796
12797 mdi_pi_set_phci_private(pip, (caddr_t)plun);
12798
12799 if (mdi_prop_update_byte_array(pip, NODE_WWN_PROP,
12800 ptgt->tgt_node_wwn.raw_wwn, FC_WWN_SIZE)
12801 != DDI_PROP_SUCCESS) {
12802 failure++;
12803 goto end_of_fcp_create_pip;
12804 }
12805
12806 if (mdi_prop_update_byte_array(pip, PORT_WWN_PROP,
12807 ptgt->tgt_port_wwn.raw_wwn, FC_WWN_SIZE)
12808 != DDI_PROP_SUCCESS) {
12809 failure++;
12810 goto end_of_fcp_create_pip;
12811 }
12812
12813 fcp_wwn_to_ascii(ptgt->tgt_port_wwn.raw_wwn, t_pwwn);
12814 t_pwwn[16] = '\0';
12815 if (mdi_prop_update_string(pip, TGT_PORT_PROP, t_pwwn)
12816 != DDI_PROP_SUCCESS) {
12817 failure++;
12818 goto end_of_fcp_create_pip;
12819 }
12820
12821 /*
12822 * If there is no hard address - We might have to deal with
12823 * that by using WWN - Having said that it is important to
12824 * recognize this problem early so ssd can be informed of
12825 * the right interconnect type.
12826 */
12827 if (!FC_TOP_EXTERNAL(pptr->port_topology) &&
12828 ptgt->tgt_hard_addr != 0) {
12829 tgt_id = (uint32_t)
12830 fcp_alpa_to_switch[ptgt->tgt_hard_addr];
12831 } else {
12832 tgt_id = ptgt->tgt_d_id;
12833 }
12834
12835 if (mdi_prop_update_int(pip, TARGET_PROP, tgt_id)
12836 != DDI_PROP_SUCCESS) {
12837 failure++;
12838 goto end_of_fcp_create_pip;
12839 }
12840
12841 if (mdi_prop_update_int(pip, LUN_PROP, (int)plun->lun_num)
12842 != DDI_PROP_SUCCESS) {
12843 failure++;
12844 goto end_of_fcp_create_pip;
12845 }
12846 bcopy(&plun->lun_addr, &sam_lun, FCP_LUN_SIZE);
12847 if (mdi_prop_update_int64(pip, SAM_LUN_PROP, sam_lun)
12848 != DDI_PROP_SUCCESS) {
12849 failure++;
12850 goto end_of_fcp_create_pip;
12851 }
12852
12853 end_of_fcp_create_pip:
12854 scsi_hba_nodename_compatible_free(nname, compatible);
12855
12856 if (pip != NULL && failure) {
12857 (void) mdi_prop_remove(pip, NULL);
12858 mutex_exit(&plun->lun_mutex);
12859 mutex_exit(&pptr->port_mutex);
12860 (void) mdi_pi_free(pip, 0);
12861 mutex_enter(&pptr->port_mutex);
12862 mutex_enter(&plun->lun_mutex);
12863 pip = NULL;
12864 }
12865
12866 return (pip);
12867 }
12868
12869 static dev_info_t *
12870 fcp_find_existing_dip(struct fcp_lun *plun, dev_info_t *pdip, caddr_t name)
12871 {
12872 uint_t nbytes;
12873 uchar_t *bytes;
12874 uint_t nwords;
12875 uint32_t tgt_id;
12876 int *words;
12877 dev_info_t *cdip;
12878 dev_info_t *ndip;
12879 struct fcp_tgt *ptgt = plun->lun_tgt;
12880 struct fcp_port *pptr = ptgt->tgt_port;
12881 int circular;
12882
12883 ndi_devi_enter(pdip, &circular);
12884
12885 ndip = (dev_info_t *)DEVI(pdip)->devi_child;
12886 while ((cdip = ndip) != NULL) {
12887 ndip = (dev_info_t *)DEVI(cdip)->devi_sibling;
12888
12889 if (strcmp(DEVI(cdip)->devi_node_name, name)) {
12890 continue;
12891 }
12892
12893 if (ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, cdip,
12894 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, NODE_WWN_PROP, &bytes,
12895 &nbytes) != DDI_PROP_SUCCESS) {
12896 continue;
12897 }
12898
12899 if (nbytes != FC_WWN_SIZE || bytes == NULL) {
12900 if (bytes != NULL) {
12901 ddi_prop_free(bytes);
12902 }
12903 continue;
12904 }
12905 ASSERT(bytes != NULL);
12906
12907 if (bcmp(bytes, ptgt->tgt_node_wwn.raw_wwn, nbytes) != 0) {
12908 ddi_prop_free(bytes);
12909 continue;
12910 }
12911
12912 ddi_prop_free(bytes);
12913
12914 if (ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, cdip,
12915 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, PORT_WWN_PROP, &bytes,
12916 &nbytes) != DDI_PROP_SUCCESS) {
12917 continue;
12918 }
12919
12920 if (nbytes != FC_WWN_SIZE || bytes == NULL) {
12921 if (bytes != NULL) {
12922 ddi_prop_free(bytes);
12923 }
12924 continue;
12925 }
12926 ASSERT(bytes != NULL);
12927
12928 if (bcmp(bytes, ptgt->tgt_port_wwn.raw_wwn, nbytes) != 0) {
12929 ddi_prop_free(bytes);
12930 continue;
12931 }
12932
12933 ddi_prop_free(bytes);
12934
12935 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, cdip,
12936 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, TARGET_PROP, &words,
12937 &nwords) != DDI_PROP_SUCCESS) {
12938 continue;
12939 }
12940
12941 if (nwords != 1 || words == NULL) {
12942 if (words != NULL) {
12943 ddi_prop_free(words);
12944 }
12945 continue;
12946 }
12947 ASSERT(words != NULL);
12948
12949 /*
12950 * If there is no hard address - We might have to deal with
12951 * that by using WWN - Having said that it is important to
12952 * recognize this problem early so ssd can be informed of
12953 * the right interconnect type.
12954 */
12955 if (!FC_TOP_EXTERNAL(pptr->port_topology) &&
12956 ptgt->tgt_hard_addr != 0) {
12957 tgt_id =
12958 (uint32_t)fcp_alpa_to_switch[ptgt->tgt_hard_addr];
12959 } else {
12960 tgt_id = ptgt->tgt_d_id;
12961 }
12962
12963 if (tgt_id != (uint32_t)*words) {
12964 ddi_prop_free(words);
12965 continue;
12966 }
12967 ddi_prop_free(words);
12968
12969 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, cdip,
12970 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, LUN_PROP, &words,
12971 &nwords) != DDI_PROP_SUCCESS) {
12972 continue;
12973 }
12974
12975 if (nwords != 1 || words == NULL) {
12976 if (words != NULL) {
12977 ddi_prop_free(words);
12978 }
12979 continue;
12980 }
12981 ASSERT(words != NULL);
12982
12983 if (plun->lun_num == (uint16_t)*words) {
12984 ddi_prop_free(words);
12985 break;
12986 }
12987 ddi_prop_free(words);
12988 }
12989 ndi_devi_exit(pdip, circular);
12990
12991 return (cdip);
12992 }
12993
12994
12995 static int
12996 fcp_is_pip_present(struct fcp_lun *plun, mdi_pathinfo_t *pip)
12997 {
12998 dev_info_t *pdip;
12999 char buf[MAXNAMELEN];
13000 char uaddr[MAXNAMELEN];
13001 int rval = FC_FAILURE;
13002
13003 ASSERT(MUTEX_HELD(&plun->lun_mutex));
13004
13005 pdip = plun->lun_tgt->tgt_port->port_dip;
13006
13007 /*
13008 * Check if pip (and not plun->lun_cip) is NULL. plun->lun_cip can be
13009 * non-NULL even when the LUN is not there as in the case when a LUN is
13010 * configured and then deleted on the device end (for T3/T4 case). In
13011 * such cases, pip will be NULL.
13012 *
13013 * If the device generates an RSCN, it will end up getting offlined when
13014 * it disappeared and a new LUN will get created when it is rediscovered
13015 * on the device. If we check for lun_cip here, the LUN will not end
13016 * up getting onlined since this function will end up returning a
13017 * FC_SUCCESS.
13018 *
13019 * The behavior is different on other devices. For instance, on a HDS,
13020 * there was no RSCN generated by the device but the next I/O generated
13021 * a check condition and rediscovery got triggered that way. So, in
13022 * such cases, this path will not be exercised
13023 */
13024 if (pip == NULL) {
13025 FCP_TRACE(fcp_logq, LUN_PORT->port_instbuf,
13026 fcp_trace, FCP_BUF_LEVEL_4, 0,
13027 "fcp_is_pip_present: plun->lun_cip is NULL: "
13028 "plun: %p lun state: %x num: %d target state: %x",
13029 plun, plun->lun_state, plun->lun_num,
13030 plun->lun_tgt->tgt_port->port_state);
13031 return (rval);
13032 }
13033
13034 fcp_wwn_to_ascii(plun->lun_tgt->tgt_port_wwn.raw_wwn, buf);
13035
13036 (void) snprintf(uaddr, MAXNAMELEN, "w%s,%x", buf, plun->lun_num);
13037
13038 if (plun->lun_old_guid) {
13039 if (mdi_pi_find(pdip, plun->lun_old_guid, uaddr) == pip) {
13040 rval = FC_SUCCESS;
13041 }
13042 } else {
13043 if (mdi_pi_find(pdip, plun->lun_guid, uaddr) == pip) {
13044 rval = FC_SUCCESS;
13045 }
13046 }
13047 return (rval);
13048 }
13049
13050 static mdi_pathinfo_t *
13051 fcp_find_existing_pip(struct fcp_lun *plun, dev_info_t *pdip)
13052 {
13053 char buf[MAXNAMELEN];
13054 char uaddr[MAXNAMELEN];
13055 mdi_pathinfo_t *pip;
13056 struct fcp_tgt *ptgt = plun->lun_tgt;
13057 struct fcp_port *pptr = ptgt->tgt_port;
13058
13059 ASSERT(MUTEX_HELD(&pptr->port_mutex));
13060
13061 fcp_wwn_to_ascii(ptgt->tgt_port_wwn.raw_wwn, buf);
13062 (void) snprintf(uaddr, MAXNAMELEN, "w%s,%x", buf, plun->lun_num);
13063
13064 pip = mdi_pi_find(pdip, plun->lun_guid, uaddr);
13065
13066 return (pip);
13067 }
13068
13069
13070 static int
13071 fcp_online_child(struct fcp_lun *plun, child_info_t *cip, int lcount,
13072 int tcount, int flags, int *circ)
13073 {
13074 int rval;
13075 struct fcp_port *pptr = plun->lun_tgt->tgt_port;
13076 struct fcp_tgt *ptgt = plun->lun_tgt;
13077 dev_info_t *cdip = NULL;
13078
13079 ASSERT(MUTEX_HELD(&pptr->port_mutex));
13080 ASSERT(MUTEX_HELD(&plun->lun_mutex));
13081
13082 if (plun->lun_cip == NULL) {
13083 FCP_TRACE(fcp_logq, pptr->port_instbuf,
13084 fcp_trace, FCP_BUF_LEVEL_3, 0,
13085 "fcp_online_child: plun->lun_cip is NULL: "
13086 "plun: %p state: %x num: %d target state: %x",
13087 plun, plun->lun_state, plun->lun_num,
13088 plun->lun_tgt->tgt_port->port_state);
13089 return (NDI_FAILURE);
13090 }
13091 again:
13092 if (plun->lun_mpxio == 0) {
13093 cdip = DIP(cip);
13094 mutex_exit(&plun->lun_mutex);
13095 mutex_exit(&pptr->port_mutex);
13096
13097 FCP_TRACE(fcp_logq, pptr->port_instbuf,
13098 fcp_trace, FCP_BUF_LEVEL_3, 0,
13099 "!Invoking ndi_devi_online for %s: target=%x lun=%x",
13100 ddi_get_name(cdip), ptgt->tgt_d_id, plun->lun_num);
13101
13102 /*
13103 * We could check for FCP_LUN_INIT here but chances
13104 * of getting here when it's already in FCP_LUN_INIT
13105 * is rare and a duplicate ndi_devi_online wouldn't
13106 * hurt either (as the node would already have been
13107 * in CF2)
13108 */
13109 if (!i_ddi_devi_attached(ddi_get_parent(cdip))) {
13110 rval = ndi_devi_bind_driver(cdip, flags);
13111 FCP_TRACE(fcp_logq, pptr->port_instbuf,
13112 fcp_trace, FCP_BUF_LEVEL_3, 0,
13113 "!Invoking ndi_devi_bind_driver: rval=%d", rval);
13114 } else {
13115 rval = ndi_devi_online(cdip, flags);
13116 }
13117
13118 /*
13119 * We log the message into trace buffer if the device
13120 * is "ses" and into syslog for any other device
13121 * type. This is to prevent the ndi_devi_online failure
13122 * message that appears for V880/A5K ses devices.
13123 */
13124 if (rval == NDI_SUCCESS) {
13125 mutex_enter(&ptgt->tgt_mutex);
13126 plun->lun_state |= FCP_LUN_INIT;
13127 mutex_exit(&ptgt->tgt_mutex);
13128 } else if (strncmp(ddi_node_name(cdip), "ses", 3) != 0) {
13129 fcp_log(CE_NOTE, pptr->port_dip,
13130 "!ndi_devi_online:"
13131 " failed for %s: target=%x lun=%x %x",
13132 ddi_get_name(cdip), ptgt->tgt_d_id,
13133 plun->lun_num, rval);
13134 } else {
13135 FCP_TRACE(fcp_logq, pptr->port_instbuf,
13136 fcp_trace, FCP_BUF_LEVEL_3, 0,
13137 " !ndi_devi_online:"
13138 " failed for %s: target=%x lun=%x %x",
13139 ddi_get_name(cdip), ptgt->tgt_d_id,
13140 plun->lun_num, rval);
13141 }
13142 } else {
13143 cdip = mdi_pi_get_client(PIP(cip));
13144 mutex_exit(&plun->lun_mutex);
13145 mutex_exit(&pptr->port_mutex);
13146
13147 FCP_TRACE(fcp_logq, pptr->port_instbuf,
13148 fcp_trace, FCP_BUF_LEVEL_3, 0,
13149 "!Invoking mdi_pi_online for %s: target=%x lun=%x",
13150 ddi_get_name(cdip), ptgt->tgt_d_id, plun->lun_num);
13151
13152 /*
13153 * Hold path and exit phci to avoid deadlock with power
13154 * management code during mdi_pi_online.
13155 */
13156 mdi_hold_path(PIP(cip));
13157 mdi_devi_exit_phci(pptr->port_dip, *circ);
13158
13159 rval = mdi_pi_online(PIP(cip), flags);
13160
13161 mdi_devi_enter_phci(pptr->port_dip, circ);
13162 mdi_rele_path(PIP(cip));
13163
13164 if (rval == MDI_SUCCESS) {
13165 mutex_enter(&ptgt->tgt_mutex);
13166 plun->lun_state |= FCP_LUN_INIT;
13167 mutex_exit(&ptgt->tgt_mutex);
13168
13169 /*
13170 * Clear MPxIO path permanent disable in case
13171 * fcp hotplug dropped the offline event.
13172 */
13173 (void) mdi_pi_enable_path(PIP(cip), DRIVER_DISABLE);
13174
13175 } else if (rval == MDI_NOT_SUPPORTED) {
13176 child_info_t *old_cip = cip;
13177
13178 /*
13179 * MPxIO does not support this device yet.
13180 * Enumerate in legacy mode.
13181 */
13182 mutex_enter(&pptr->port_mutex);
13183 mutex_enter(&plun->lun_mutex);
13184 plun->lun_mpxio = 0;
13185 plun->lun_cip = NULL;
13186 cdip = fcp_create_dip(plun, lcount, tcount);
13187 plun->lun_cip = cip = CIP(cdip);
13188 if (cip == NULL) {
13189 fcp_log(CE_WARN, pptr->port_dip,
13190 "!fcp_online_child: "
13191 "Create devinfo failed for LU=%p", plun);
13192 mutex_exit(&plun->lun_mutex);
13193
13194 mutex_enter(&ptgt->tgt_mutex);
13195 plun->lun_state |= FCP_LUN_OFFLINE;
13196 mutex_exit(&ptgt->tgt_mutex);
13197
13198 mutex_exit(&pptr->port_mutex);
13199
13200 /*
13201 * free the mdi_pathinfo node
13202 */
13203 (void) mdi_pi_free(PIP(old_cip), 0);
13204 } else {
13205 FCP_TRACE(fcp_logq, pptr->port_instbuf,
13206 fcp_trace, FCP_BUF_LEVEL_3, 0,
13207 "fcp_online_child: creating devinfo "
13208 "node 0x%p for plun 0x%p",
13209 cip, plun);
13210 mutex_exit(&plun->lun_mutex);
13211 mutex_exit(&pptr->port_mutex);
13212 /*
13213 * free the mdi_pathinfo node
13214 */
13215 (void) mdi_pi_free(PIP(old_cip), 0);
13216 mutex_enter(&pptr->port_mutex);
13217 mutex_enter(&plun->lun_mutex);
13218 goto again;
13219 }
13220 } else {
13221 if (cdip) {
13222 fcp_log(CE_NOTE, pptr->port_dip,
13223 "!fcp_online_child: mdi_pi_online:"
13224 " failed for %s: target=%x lun=%x %x",
13225 ddi_get_name(cdip), ptgt->tgt_d_id,
13226 plun->lun_num, rval);
13227 }
13228 }
13229 rval = (rval == MDI_SUCCESS) ? NDI_SUCCESS : NDI_FAILURE;
13230 }
13231
13232 if (rval == NDI_SUCCESS) {
13233 if (cdip) {
13234 (void) ndi_event_retrieve_cookie(
13235 pptr->port_ndi_event_hdl, cdip, FCAL_INSERT_EVENT,
13236 &fcp_insert_eid, NDI_EVENT_NOPASS);
13237 (void) ndi_event_run_callbacks(pptr->port_ndi_event_hdl,
13238 cdip, fcp_insert_eid, NULL);
13239 }
13240 }
13241 mutex_enter(&pptr->port_mutex);
13242 mutex_enter(&plun->lun_mutex);
13243 return (rval);
13244 }
13245
13246 /* ARGSUSED */
13247 static int
13248 fcp_offline_child(struct fcp_lun *plun, child_info_t *cip, int lcount,
13249 int tcount, int flags, int *circ)
13250 {
13251 int rval;
13252 int lun_mpxio;
13253 struct fcp_port *pptr = plun->lun_tgt->tgt_port;
13254 struct fcp_tgt *ptgt = plun->lun_tgt;
13255 dev_info_t *cdip;
13256
13257 ASSERT(MUTEX_HELD(&plun->lun_mutex));
13258 ASSERT(MUTEX_HELD(&pptr->port_mutex));
13259
13260 if (plun->lun_cip == NULL) {
13261 FCP_TRACE(fcp_logq, pptr->port_instbuf,
13262 fcp_trace, FCP_BUF_LEVEL_3, 0,
13263 "fcp_offline_child: plun->lun_cip is NULL: "
13264 "plun: %p lun state: %x num: %d target state: %x",
13265 plun, plun->lun_state, plun->lun_num,
13266 plun->lun_tgt->tgt_port->port_state);
13267 return (NDI_FAILURE);
13268 }
13269
13270 /*
13271 * We will use this value twice. Make a copy to be sure we use
13272 * the same value in both places.
13273 */
13274 lun_mpxio = plun->lun_mpxio;
13275
13276 if (lun_mpxio == 0) {
13277 cdip = DIP(cip);
13278 mutex_exit(&plun->lun_mutex);
13279 mutex_exit(&pptr->port_mutex);
13280 rval = ndi_devi_offline(DIP(cip), NDI_DEVFS_CLEAN | flags);
13281 if (rval != NDI_SUCCESS) {
13282 FCP_TRACE(fcp_logq, pptr->port_instbuf,
13283 fcp_trace, FCP_BUF_LEVEL_3, 0,
13284 "fcp_offline_child: ndi_devi_offline failed "
13285 "rval=%x cip=%p", rval, cip);
13286 }
13287 } else {
13288 cdip = mdi_pi_get_client(PIP(cip));
13289 mutex_exit(&plun->lun_mutex);
13290 mutex_exit(&pptr->port_mutex);
13291
13292 /*
13293 * Exit phci to avoid deadlock with power management code
13294 * during mdi_pi_offline
13295 */
13296 mdi_hold_path(PIP(cip));
13297 mdi_devi_exit_phci(pptr->port_dip, *circ);
13298
13299 rval = mdi_pi_offline(PIP(cip), flags);
13300
13301 mdi_devi_enter_phci(pptr->port_dip, circ);
13302 mdi_rele_path(PIP(cip));
13303
13304 rval = (rval == MDI_SUCCESS) ? NDI_SUCCESS : NDI_FAILURE;
13305 }
13306
13307 mutex_enter(&ptgt->tgt_mutex);
13308 plun->lun_state &= ~FCP_LUN_INIT;
13309 mutex_exit(&ptgt->tgt_mutex);
13310
13311 if (rval == NDI_SUCCESS) {
13312 cdip = NULL;
13313 if (flags & NDI_DEVI_REMOVE) {
13314 mutex_enter(&plun->lun_mutex);
13315 /*
13316 * If the guid of the LUN changes, lun_cip will not
13317 * equal to cip, and after offlining the LUN with the
13318 * old guid, we should keep lun_cip since it's the cip
13319 * of the LUN with the new guid.
13320 * Otherwise remove our reference to child node.
13321 *
13322 * This must be done before the child node is freed,
13323 * otherwise other threads could see a stale lun_cip
13324 * pointer.
13325 */
13326 if (plun->lun_cip == cip) {
13327 plun->lun_cip = NULL;
13328 }
13329 if (plun->lun_old_guid) {
13330 kmem_free(plun->lun_old_guid,
13331 plun->lun_old_guid_size);
13332 plun->lun_old_guid = NULL;
13333 plun->lun_old_guid_size = 0;
13334 }
13335 mutex_exit(&plun->lun_mutex);
13336 }
13337 }
13338
13339 if (lun_mpxio != 0) {
13340 if (rval == NDI_SUCCESS) {
13341 /*
13342 * Clear MPxIO path permanent disable as the path is
13343 * already offlined.
13344 */
13345 (void) mdi_pi_enable_path(PIP(cip), DRIVER_DISABLE);
13346
13347 if (flags & NDI_DEVI_REMOVE) {
13348 (void) mdi_pi_free(PIP(cip), 0);
13349 }
13350 } else {
13351 FCP_TRACE(fcp_logq, pptr->port_instbuf,
13352 fcp_trace, FCP_BUF_LEVEL_3, 0,
13353 "fcp_offline_child: mdi_pi_offline failed "
13354 "rval=%x cip=%p", rval, cip);
13355 }
13356 }
13357
13358 mutex_enter(&pptr->port_mutex);
13359 mutex_enter(&plun->lun_mutex);
13360
13361 if (cdip) {
13362 FCP_TRACE(fcp_logq, pptr->port_instbuf,
13363 fcp_trace, FCP_BUF_LEVEL_3, 0, "!%s failed for %s:"
13364 " target=%x lun=%x", "ndi_offline",
13365 ddi_get_name(cdip), ptgt->tgt_d_id, plun->lun_num);
13366 }
13367
13368 return (rval);
13369 }
13370
13371 static void
13372 fcp_remove_child(struct fcp_lun *plun)
13373 {
13374 child_info_t *cip;
13375 int circ;
13376
13377 ASSERT(MUTEX_HELD(&plun->lun_mutex));
13378
13379 if (fcp_is_child_present(plun, plun->lun_cip) == FC_SUCCESS) {
13380 if (plun->lun_mpxio == 0) {
13381 (void) ndi_prop_remove_all(DIP(plun->lun_cip));
13382 (void) ndi_devi_free(DIP(plun->lun_cip));
13383 plun->lun_cip = NULL;
13384 } else {
13385 /*
13386 * Clear reference to the child node in the lun.
13387 * This must be done before freeing it with mdi_pi_free
13388 * and with lun_mutex held so that other threads always
13389 * see either valid lun_cip or NULL when holding
13390 * lun_mutex. We keep a copy in cip.
13391 */
13392 cip = plun->lun_cip;
13393 plun->lun_cip = NULL;
13394
13395 mutex_exit(&plun->lun_mutex);
13396 mutex_exit(&plun->lun_tgt->tgt_mutex);
13397 mutex_exit(&plun->lun_tgt->tgt_port->port_mutex);
13398
13399 mdi_devi_enter(
13400 plun->lun_tgt->tgt_port->port_dip, &circ);
13401
13402 /*
13403 * Exit phci to avoid deadlock with power management
13404 * code during mdi_pi_offline
13405 */
13406 mdi_hold_path(PIP(cip));
13407 mdi_devi_exit_phci(
13408 plun->lun_tgt->tgt_port->port_dip, circ);
13409 (void) mdi_pi_offline(PIP(cip),
13410 NDI_DEVI_REMOVE);
13411 mdi_devi_enter_phci(
13412 plun->lun_tgt->tgt_port->port_dip, &circ);
13413 mdi_rele_path(PIP(cip));
13414
13415 mdi_devi_exit(
13416 plun->lun_tgt->tgt_port->port_dip, circ);
13417
13418 FCP_TRACE(fcp_logq,
13419 plun->lun_tgt->tgt_port->port_instbuf,
13420 fcp_trace, FCP_BUF_LEVEL_3, 0,
13421 "lun=%p pip freed %p", plun, cip);
13422
13423 (void) mdi_prop_remove(PIP(cip), NULL);
13424 (void) mdi_pi_free(PIP(cip), 0);
13425
13426 mutex_enter(&plun->lun_tgt->tgt_port->port_mutex);
13427 mutex_enter(&plun->lun_tgt->tgt_mutex);
13428 mutex_enter(&plun->lun_mutex);
13429 }
13430 } else {
13431 plun->lun_cip = NULL;
13432 }
13433 }
13434
13435 /*
13436 * called when a timeout occurs
13437 *
13438 * can be scheduled during an attach or resume (if not already running)
13439 *
13440 * one timeout is set up for all ports
13441 *
13442 * acquires and releases the global mutex
13443 */
13444 /*ARGSUSED*/
13445 static void
13446 fcp_watch(void *arg)
13447 {
13448 struct fcp_port *pptr;
13449 struct fcp_ipkt *icmd;
13450 struct fcp_ipkt *nicmd;
13451 struct fcp_pkt *cmd;
13452 struct fcp_pkt *ncmd;
13453 struct fcp_pkt *tail;
13454 struct fcp_pkt *pcmd;
13455 struct fcp_pkt *save_head;
13456 struct fcp_port *save_port;
13457
13458 /* increment global watchdog time */
13459 fcp_watchdog_time += fcp_watchdog_timeout;
13460
13461 mutex_enter(&fcp_global_mutex);
13462
13463 /* scan each port in our list */
13464 for (pptr = fcp_port_head; pptr != NULL; pptr = pptr->port_next) {
13465 save_port = fcp_port_head;
13466 pptr->port_state |= FCP_STATE_IN_WATCHDOG;
13467 mutex_exit(&fcp_global_mutex);
13468
13469 mutex_enter(&pptr->port_mutex);
13470 if (pptr->port_ipkt_list == NULL &&
13471 (pptr->port_state & (FCP_STATE_SUSPENDED |
13472 FCP_STATE_DETACHING | FCP_STATE_POWER_DOWN))) {
13473 pptr->port_state &= ~FCP_STATE_IN_WATCHDOG;
13474 mutex_exit(&pptr->port_mutex);
13475 mutex_enter(&fcp_global_mutex);
13476 goto end_of_watchdog;
13477 }
13478
13479 /*
13480 * We check if a list of targets need to be offlined.
13481 */
13482 if (pptr->port_offline_tgts) {
13483 fcp_scan_offline_tgts(pptr);
13484 }
13485
13486 /*
13487 * We check if a list of luns need to be offlined.
13488 */
13489 if (pptr->port_offline_luns) {
13490 fcp_scan_offline_luns(pptr);
13491 }
13492
13493 /*
13494 * We check if a list of targets or luns need to be reset.
13495 */
13496 if (pptr->port_reset_list) {
13497 fcp_check_reset_delay(pptr);
13498 }
13499
13500 mutex_exit(&pptr->port_mutex);
13501
13502 /*
13503 * This is where the pending commands (pkt) are checked for
13504 * timeout.
13505 */
13506 mutex_enter(&pptr->port_pkt_mutex);
13507 tail = pptr->port_pkt_tail;
13508
13509 for (pcmd = NULL, cmd = pptr->port_pkt_head;
13510 cmd != NULL; cmd = ncmd) {
13511 ncmd = cmd->cmd_next;
13512 /*
13513 * If a command is in this queue the bit CFLAG_IN_QUEUE
13514 * must be set.
13515 */
13516 ASSERT(cmd->cmd_flags & CFLAG_IN_QUEUE);
13517 /*
13518 * FCP_INVALID_TIMEOUT will be set for those
13519 * command that need to be failed. Mostly those
13520 * cmds that could not be queued down for the
13521 * "timeout" value. cmd->cmd_timeout is used
13522 * to try and requeue the command regularly.
13523 */
13524 if (cmd->cmd_timeout >= fcp_watchdog_time) {
13525 /*
13526 * This command hasn't timed out yet. Let's
13527 * go to the next one.
13528 */
13529 pcmd = cmd;
13530 goto end_of_loop;
13531 }
13532
13533 if (cmd == pptr->port_pkt_head) {
13534 ASSERT(pcmd == NULL);
13535 pptr->port_pkt_head = cmd->cmd_next;
13536 } else {
13537 ASSERT(pcmd != NULL);
13538 pcmd->cmd_next = cmd->cmd_next;
13539 }
13540
13541 if (cmd == pptr->port_pkt_tail) {
13542 ASSERT(cmd->cmd_next == NULL);
13543 pptr->port_pkt_tail = pcmd;
13544 if (pcmd) {
13545 pcmd->cmd_next = NULL;
13546 }
13547 }
13548 cmd->cmd_next = NULL;
13549
13550 /*
13551 * save the current head before dropping the
13552 * mutex - If the head doesn't remain the
13553 * same after re acquiring the mutex, just
13554 * bail out and revisit on next tick.
13555 *
13556 * PS: The tail pointer can change as the commands
13557 * get requeued after failure to retransport
13558 */
13559 save_head = pptr->port_pkt_head;
13560 mutex_exit(&pptr->port_pkt_mutex);
13561
13562 if (cmd->cmd_fp_pkt->pkt_timeout ==
13563 FCP_INVALID_TIMEOUT) {
13564 struct scsi_pkt *pkt = cmd->cmd_pkt;
13565 struct fcp_lun *plun;
13566 struct fcp_tgt *ptgt;
13567
13568 plun = ADDR2LUN(&pkt->pkt_address);
13569 ptgt = plun->lun_tgt;
13570
13571 FCP_TRACE(fcp_logq, pptr->port_instbuf,
13572 fcp_trace, FCP_BUF_LEVEL_2, 0,
13573 "SCSI cmd 0x%x to D_ID=%x timed out",
13574 pkt->pkt_cdbp[0], ptgt->tgt_d_id);
13575
13576 cmd->cmd_state == FCP_PKT_ABORTING ?
13577 fcp_fail_cmd(cmd, CMD_RESET,
13578 STAT_DEV_RESET) : fcp_fail_cmd(cmd,
13579 CMD_TIMEOUT, STAT_ABORTED);
13580 } else {
13581 fcp_retransport_cmd(pptr, cmd);
13582 }
13583 mutex_enter(&pptr->port_pkt_mutex);
13584 if (save_head && save_head != pptr->port_pkt_head) {
13585 /*
13586 * Looks like linked list got changed (mostly
13587 * happens when an an OFFLINE LUN code starts
13588 * returning overflow queue commands in
13589 * parallel. So bail out and revisit during
13590 * next tick
13591 */
13592 break;
13593 }
13594 end_of_loop:
13595 /*
13596 * Scan only upto the previously known tail pointer
13597 * to avoid excessive processing - lots of new packets
13598 * could have been added to the tail or the old ones
13599 * re-queued.
13600 */
13601 if (cmd == tail) {
13602 break;
13603 }
13604 }
13605 mutex_exit(&pptr->port_pkt_mutex);
13606
13607 mutex_enter(&pptr->port_mutex);
13608 for (icmd = pptr->port_ipkt_list; icmd != NULL; icmd = nicmd) {
13609 struct fcp_tgt *ptgt = icmd->ipkt_tgt;
13610
13611 nicmd = icmd->ipkt_next;
13612 if ((icmd->ipkt_restart != 0) &&
13613 (icmd->ipkt_restart >= fcp_watchdog_time)) {
13614 /* packet has not timed out */
13615 continue;
13616 }
13617
13618 /* time for packet re-transport */
13619 if (icmd == pptr->port_ipkt_list) {
13620 pptr->port_ipkt_list = icmd->ipkt_next;
13621 if (pptr->port_ipkt_list) {
13622 pptr->port_ipkt_list->ipkt_prev =
13623 NULL;
13624 }
13625 } else {
13626 icmd->ipkt_prev->ipkt_next = icmd->ipkt_next;
13627 if (icmd->ipkt_next) {
13628 icmd->ipkt_next->ipkt_prev =
13629 icmd->ipkt_prev;
13630 }
13631 }
13632 icmd->ipkt_next = NULL;
13633 icmd->ipkt_prev = NULL;
13634 mutex_exit(&pptr->port_mutex);
13635
13636 if (fcp_is_retryable(icmd)) {
13637 fc_ulp_rscn_info_t *rscnp =
13638 (fc_ulp_rscn_info_t *)icmd->ipkt_fpkt->
13639 pkt_ulp_rscn_infop;
13640
13641 FCP_TRACE(fcp_logq, pptr->port_instbuf,
13642 fcp_trace, FCP_BUF_LEVEL_2, 0,
13643 "%x to D_ID=%x Retrying..",
13644 icmd->ipkt_opcode,
13645 icmd->ipkt_fpkt->pkt_cmd_fhdr.d_id);
13646
13647 /*
13648 * Update the RSCN count in the packet
13649 * before resending.
13650 */
13651
13652 if (rscnp != NULL) {
13653 rscnp->ulp_rscn_count =
13654 fc_ulp_get_rscn_count(pptr->
13655 port_fp_handle);
13656 }
13657
13658 mutex_enter(&pptr->port_mutex);
13659 mutex_enter(&ptgt->tgt_mutex);
13660 if (!FCP_STATE_CHANGED(pptr, ptgt, icmd)) {
13661 mutex_exit(&ptgt->tgt_mutex);
13662 mutex_exit(&pptr->port_mutex);
13663 switch (icmd->ipkt_opcode) {
13664 int rval;
13665 case LA_ELS_PLOGI:
13666 if ((rval = fc_ulp_login(
13667 pptr->port_fp_handle,
13668 &icmd->ipkt_fpkt, 1)) ==
13669 FC_SUCCESS) {
13670 mutex_enter(
13671 &pptr->port_mutex);
13672 continue;
13673 }
13674 if (fcp_handle_ipkt_errors(
13675 pptr, ptgt, icmd, rval,
13676 "PLOGI") == DDI_SUCCESS) {
13677 mutex_enter(
13678 &pptr->port_mutex);
13679 continue;
13680 }
13681 break;
13682
13683 case LA_ELS_PRLI:
13684 if ((rval = fc_ulp_issue_els(
13685 pptr->port_fp_handle,
13686 icmd->ipkt_fpkt)) ==
13687 FC_SUCCESS) {
13688 mutex_enter(
13689 &pptr->port_mutex);
13690 continue;
13691 }
13692 if (fcp_handle_ipkt_errors(
13693 pptr, ptgt, icmd, rval,
13694 "PRLI") == DDI_SUCCESS) {
13695 mutex_enter(
13696 &pptr->port_mutex);
13697 continue;
13698 }
13699 break;
13700
13701 default:
13702 if ((rval = fcp_transport(
13703 pptr->port_fp_handle,
13704 icmd->ipkt_fpkt, 1)) ==
13705 FC_SUCCESS) {
13706 mutex_enter(
13707 &pptr->port_mutex);
13708 continue;
13709 }
13710 if (fcp_handle_ipkt_errors(
13711 pptr, ptgt, icmd, rval,
13712 "PRLI") == DDI_SUCCESS) {
13713 mutex_enter(
13714 &pptr->port_mutex);
13715 continue;
13716 }
13717 break;
13718 }
13719 } else {
13720 mutex_exit(&ptgt->tgt_mutex);
13721 mutex_exit(&pptr->port_mutex);
13722 }
13723 } else {
13724 fcp_print_error(icmd->ipkt_fpkt);
13725 }
13726
13727 (void) fcp_call_finish_init(pptr, ptgt,
13728 icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
13729 icmd->ipkt_cause);
13730 fcp_icmd_free(pptr, icmd);
13731 mutex_enter(&pptr->port_mutex);
13732 }
13733
13734 pptr->port_state &= ~FCP_STATE_IN_WATCHDOG;
13735 mutex_exit(&pptr->port_mutex);
13736 mutex_enter(&fcp_global_mutex);
13737
13738 end_of_watchdog:
13739 /*
13740 * Bail out early before getting into trouble
13741 */
13742 if (save_port != fcp_port_head) {
13743 break;
13744 }
13745 }
13746
13747 if (fcp_watchdog_init > 0) {
13748 /* reschedule timeout to go again */
13749 fcp_watchdog_id =
13750 timeout(fcp_watch, NULL, fcp_watchdog_tick);
13751 }
13752 mutex_exit(&fcp_global_mutex);
13753 }
13754
13755
13756 static void
13757 fcp_check_reset_delay(struct fcp_port *pptr)
13758 {
13759 uint32_t tgt_cnt;
13760 int level;
13761 struct fcp_tgt *ptgt;
13762 struct fcp_lun *plun;
13763 struct fcp_reset_elem *cur = NULL;
13764 struct fcp_reset_elem *next = NULL;
13765 struct fcp_reset_elem *prev = NULL;
13766
13767 ASSERT(mutex_owned(&pptr->port_mutex));
13768
13769 next = pptr->port_reset_list;
13770 while ((cur = next) != NULL) {
13771 next = cur->next;
13772
13773 if (cur->timeout < fcp_watchdog_time) {
13774 prev = cur;
13775 continue;
13776 }
13777
13778 ptgt = cur->tgt;
13779 plun = cur->lun;
13780 tgt_cnt = cur->tgt_cnt;
13781
13782 if (ptgt) {
13783 level = RESET_TARGET;
13784 } else {
13785 ASSERT(plun != NULL);
13786 level = RESET_LUN;
13787 ptgt = plun->lun_tgt;
13788 }
13789 if (prev) {
13790 prev->next = next;
13791 } else {
13792 /*
13793 * Because we drop port mutex while doing aborts for
13794 * packets, we can't rely on reset_list pointing to
13795 * our head
13796 */
13797 if (cur == pptr->port_reset_list) {
13798 pptr->port_reset_list = next;
13799 } else {
13800 struct fcp_reset_elem *which;
13801
13802 which = pptr->port_reset_list;
13803 while (which && which->next != cur) {
13804 which = which->next;
13805 }
13806 ASSERT(which != NULL);
13807
13808 which->next = next;
13809 prev = which;
13810 }
13811 }
13812
13813 kmem_free(cur, sizeof (*cur));
13814
13815 if (tgt_cnt == ptgt->tgt_change_cnt) {
13816 mutex_enter(&ptgt->tgt_mutex);
13817 if (level == RESET_TARGET) {
13818 fcp_update_tgt_state(ptgt,
13819 FCP_RESET, FCP_LUN_BUSY);
13820 } else {
13821 fcp_update_lun_state(plun,
13822 FCP_RESET, FCP_LUN_BUSY);
13823 }
13824 mutex_exit(&ptgt->tgt_mutex);
13825
13826 mutex_exit(&pptr->port_mutex);
13827 fcp_abort_all(pptr, ptgt, plun, tgt_cnt);
13828 mutex_enter(&pptr->port_mutex);
13829 }
13830 }
13831 }
13832
13833
13834 static void
13835 fcp_abort_all(struct fcp_port *pptr, struct fcp_tgt *ttgt,
13836 struct fcp_lun *rlun, int tgt_cnt)
13837 {
13838 int rval;
13839 struct fcp_lun *tlun, *nlun;
13840 struct fcp_pkt *pcmd = NULL, *ncmd = NULL,
13841 *cmd = NULL, *head = NULL,
13842 *tail = NULL;
13843
13844 mutex_enter(&pptr->port_pkt_mutex);
13845 for (cmd = pptr->port_pkt_head; cmd != NULL; cmd = ncmd) {
13846 struct fcp_lun *plun = ADDR2LUN(&cmd->cmd_pkt->pkt_address);
13847 struct fcp_tgt *ptgt = plun->lun_tgt;
13848
13849 ncmd = cmd->cmd_next;
13850
13851 if (ptgt != ttgt && plun != rlun) {
13852 pcmd = cmd;
13853 continue;
13854 }
13855
13856 if (pcmd != NULL) {
13857 ASSERT(pptr->port_pkt_head != cmd);
13858 pcmd->cmd_next = ncmd;
13859 } else {
13860 ASSERT(cmd == pptr->port_pkt_head);
13861 pptr->port_pkt_head = ncmd;
13862 }
13863 if (pptr->port_pkt_tail == cmd) {
13864 ASSERT(cmd->cmd_next == NULL);
13865 pptr->port_pkt_tail = pcmd;
13866 if (pcmd != NULL) {
13867 pcmd->cmd_next = NULL;
13868 }
13869 }
13870
13871 if (head == NULL) {
13872 head = tail = cmd;
13873 } else {
13874 ASSERT(tail != NULL);
13875 tail->cmd_next = cmd;
13876 tail = cmd;
13877 }
13878 cmd->cmd_next = NULL;
13879 }
13880 mutex_exit(&pptr->port_pkt_mutex);
13881
13882 for (cmd = head; cmd != NULL; cmd = ncmd) {
13883 struct scsi_pkt *pkt = cmd->cmd_pkt;
13884
13885 ncmd = cmd->cmd_next;
13886 ASSERT(pkt != NULL);
13887
13888 mutex_enter(&pptr->port_mutex);
13889 if (ttgt->tgt_change_cnt == tgt_cnt) {
13890 mutex_exit(&pptr->port_mutex);
13891 cmd->cmd_flags &= ~CFLAG_IN_QUEUE;
13892 pkt->pkt_reason = CMD_RESET;
13893 pkt->pkt_statistics |= STAT_DEV_RESET;
13894 cmd->cmd_state = FCP_PKT_IDLE;
13895 fcp_post_callback(cmd);
13896 } else {
13897 mutex_exit(&pptr->port_mutex);
13898 }
13899 }
13900
13901 /*
13902 * If the FCA will return all the commands in its queue then our
13903 * work is easy, just return.
13904 */
13905
13906 if (pptr->port_reset_action == FC_RESET_RETURN_ALL) {
13907 return;
13908 }
13909
13910 /*
13911 * For RESET_LUN get hold of target pointer
13912 */
13913 if (ttgt == NULL) {
13914 ASSERT(rlun != NULL);
13915
13916 ttgt = rlun->lun_tgt;
13917
13918 ASSERT(ttgt != NULL);
13919 }
13920
13921 /*
13922 * There are some severe race conditions here.
13923 * While we are trying to abort the pkt, it might be completing
13924 * so mark it aborted and if the abort does not succeed then
13925 * handle it in the watch thread.
13926 */
13927 mutex_enter(&ttgt->tgt_mutex);
13928 nlun = ttgt->tgt_lun;
13929 mutex_exit(&ttgt->tgt_mutex);
13930 while ((tlun = nlun) != NULL) {
13931 int restart = 0;
13932 if (rlun && rlun != tlun) {
13933 mutex_enter(&ttgt->tgt_mutex);
13934 nlun = tlun->lun_next;
13935 mutex_exit(&ttgt->tgt_mutex);
13936 continue;
13937 }
13938 mutex_enter(&tlun->lun_mutex);
13939 cmd = tlun->lun_pkt_head;
13940 while (cmd != NULL) {
13941 if (cmd->cmd_state == FCP_PKT_ISSUED) {
13942 struct scsi_pkt *pkt;
13943
13944 restart = 1;
13945 cmd->cmd_state = FCP_PKT_ABORTING;
13946 mutex_exit(&tlun->lun_mutex);
13947 rval = fc_ulp_abort(pptr->port_fp_handle,
13948 cmd->cmd_fp_pkt, KM_SLEEP);
13949 if (rval == FC_SUCCESS) {
13950 pkt = cmd->cmd_pkt;
13951 pkt->pkt_reason = CMD_RESET;
13952 pkt->pkt_statistics |= STAT_DEV_RESET;
13953 cmd->cmd_state = FCP_PKT_IDLE;
13954 fcp_post_callback(cmd);
13955 } else {
13956 caddr_t msg;
13957
13958 (void) fc_ulp_error(rval, &msg);
13959
13960 /*
13961 * This part is tricky. The abort
13962 * failed and now the command could
13963 * be completing. The cmd_state ==
13964 * FCP_PKT_ABORTING should save
13965 * us in fcp_cmd_callback. If we
13966 * are already aborting ignore the
13967 * command in fcp_cmd_callback.
13968 * Here we leave this packet for 20
13969 * sec to be aborted in the
13970 * fcp_watch thread.
13971 */
13972 fcp_log(CE_WARN, pptr->port_dip,
13973 "!Abort failed after reset %s",
13974 msg);
13975
13976 cmd->cmd_timeout =
13977 fcp_watchdog_time +
13978 cmd->cmd_pkt->pkt_time +
13979 FCP_FAILED_DELAY;
13980
13981 cmd->cmd_fp_pkt->pkt_timeout =
13982 FCP_INVALID_TIMEOUT;
13983 /*
13984 * This is a hack, cmd is put in the
13985 * overflow queue so that it can be
13986 * timed out finally
13987 */
13988 cmd->cmd_flags |= CFLAG_IN_QUEUE;
13989
13990 mutex_enter(&pptr->port_pkt_mutex);
13991 if (pptr->port_pkt_head) {
13992 ASSERT(pptr->port_pkt_tail
13993 != NULL);
13994 pptr->port_pkt_tail->cmd_next
13995 = cmd;
13996 pptr->port_pkt_tail = cmd;
13997 } else {
13998 ASSERT(pptr->port_pkt_tail
13999 == NULL);
14000 pptr->port_pkt_head =
14001 pptr->port_pkt_tail
14002 = cmd;
14003 }
14004 cmd->cmd_next = NULL;
14005 mutex_exit(&pptr->port_pkt_mutex);
14006 }
14007 mutex_enter(&tlun->lun_mutex);
14008 cmd = tlun->lun_pkt_head;
14009 } else {
14010 cmd = cmd->cmd_forw;
14011 }
14012 }
14013 mutex_exit(&tlun->lun_mutex);
14014
14015 mutex_enter(&ttgt->tgt_mutex);
14016 restart == 1 ? (nlun = ttgt->tgt_lun) : (nlun = tlun->lun_next);
14017 mutex_exit(&ttgt->tgt_mutex);
14018
14019 mutex_enter(&pptr->port_mutex);
14020 if (tgt_cnt != ttgt->tgt_change_cnt) {
14021 mutex_exit(&pptr->port_mutex);
14022 return;
14023 } else {
14024 mutex_exit(&pptr->port_mutex);
14025 }
14026 }
14027 }
14028
14029
14030 /*
14031 * unlink the soft state, returning the soft state found (if any)
14032 *
14033 * acquires and releases the global mutex
14034 */
14035 struct fcp_port *
14036 fcp_soft_state_unlink(struct fcp_port *pptr)
14037 {
14038 struct fcp_port *hptr; /* ptr index */
14039 struct fcp_port *tptr; /* prev hptr */
14040
14041 mutex_enter(&fcp_global_mutex);
14042 for (hptr = fcp_port_head, tptr = NULL;
14043 hptr != NULL;
14044 tptr = hptr, hptr = hptr->port_next) {
14045 if (hptr == pptr) {
14046 /* we found a match -- remove this item */
14047 if (tptr == NULL) {
14048 /* we're at the head of the list */
14049 fcp_port_head = hptr->port_next;
14050 } else {
14051 tptr->port_next = hptr->port_next;
14052 }
14053 break; /* success */
14054 }
14055 }
14056 if (fcp_port_head == NULL) {
14057 fcp_cleanup_blacklist(&fcp_lun_blacklist);
14058 }
14059 mutex_exit(&fcp_global_mutex);
14060 return (hptr);
14061 }
14062
14063
14064 /*
14065 * called by fcp_scsi_hba_tgt_init to find a LUN given a
14066 * WWN and a LUN number
14067 */
14068 /* ARGSUSED */
14069 static struct fcp_lun *
14070 fcp_lookup_lun(struct fcp_port *pptr, uchar_t *wwn, uint16_t lun)
14071 {
14072 int hash;
14073 struct fcp_tgt *ptgt;
14074 struct fcp_lun *plun;
14075
14076 ASSERT(mutex_owned(&pptr->port_mutex));
14077
14078 hash = FCP_HASH(wwn);
14079 for (ptgt = pptr->port_tgt_hash_table[hash]; ptgt != NULL;
14080 ptgt = ptgt->tgt_next) {
14081 if (bcmp((caddr_t)wwn, (caddr_t)&ptgt->tgt_port_wwn.raw_wwn[0],
14082 sizeof (ptgt->tgt_port_wwn)) == 0) {
14083 mutex_enter(&ptgt->tgt_mutex);
14084 for (plun = ptgt->tgt_lun;
14085 plun != NULL;
14086 plun = plun->lun_next) {
14087 if (plun->lun_num == lun) {
14088 mutex_exit(&ptgt->tgt_mutex);
14089 return (plun);
14090 }
14091 }
14092 mutex_exit(&ptgt->tgt_mutex);
14093 return (NULL);
14094 }
14095 }
14096 return (NULL);
14097 }
14098
14099 /*
14100 * Function: fcp_prepare_pkt
14101 *
14102 * Description: This function prepares the SCSI cmd pkt, passed by the caller,
14103 * for fcp_start(). It binds the data or partially maps it.
14104 * Builds the FCP header and starts the initialization of the
14105 * Fibre Channel header.
14106 *
14107 * Argument: *pptr FCP port.
14108 * *cmd FCP packet.
14109 * *plun LUN the command will be sent to.
14110 *
14111 * Context: User, Kernel and Interrupt context.
14112 */
14113 static void
14114 fcp_prepare_pkt(struct fcp_port *pptr, struct fcp_pkt *cmd,
14115 struct fcp_lun *plun)
14116 {
14117 fc_packet_t *fpkt = cmd->cmd_fp_pkt;
14118 struct fcp_tgt *ptgt = plun->lun_tgt;
14119 struct fcp_cmd *fcmd = &cmd->cmd_fcp_cmd;
14120
14121 ASSERT(cmd->cmd_pkt->pkt_comp ||
14122 (cmd->cmd_pkt->pkt_flags & FLAG_NOINTR));
14123
14124 if (cmd->cmd_pkt->pkt_numcookies) {
14125 if (cmd->cmd_pkt->pkt_dma_flags & DDI_DMA_READ) {
14126 fcmd->fcp_cntl.cntl_read_data = 1;
14127 fcmd->fcp_cntl.cntl_write_data = 0;
14128 fpkt->pkt_tran_type = FC_PKT_FCP_READ;
14129 } else {
14130 fcmd->fcp_cntl.cntl_read_data = 0;
14131 fcmd->fcp_cntl.cntl_write_data = 1;
14132 fpkt->pkt_tran_type = FC_PKT_FCP_WRITE;
14133 }
14134
14135 fpkt->pkt_data_cookie = cmd->cmd_pkt->pkt_cookies;
14136
14137 fpkt->pkt_data_cookie_cnt = cmd->cmd_pkt->pkt_numcookies;
14138 ASSERT(fpkt->pkt_data_cookie_cnt <=
14139 pptr->port_data_dma_attr.dma_attr_sgllen);
14140
14141 cmd->cmd_dmacount = cmd->cmd_pkt->pkt_dma_len;
14142
14143 /* FCA needs pkt_datalen to be set */
14144 fpkt->pkt_datalen = cmd->cmd_dmacount;
14145 fcmd->fcp_data_len = cmd->cmd_dmacount;
14146 } else {
14147 fcmd->fcp_cntl.cntl_read_data = 0;
14148 fcmd->fcp_cntl.cntl_write_data = 0;
14149 fpkt->pkt_tran_type = FC_PKT_EXCHANGE;
14150 fpkt->pkt_datalen = 0;
14151 fcmd->fcp_data_len = 0;
14152 }
14153
14154 /* set up the Tagged Queuing type */
14155 if (cmd->cmd_pkt->pkt_flags & FLAG_HTAG) {
14156 fcmd->fcp_cntl.cntl_qtype = FCP_QTYPE_HEAD_OF_Q;
14157 } else if (cmd->cmd_pkt->pkt_flags & FLAG_OTAG) {
14158 fcmd->fcp_cntl.cntl_qtype = FCP_QTYPE_ORDERED;
14159 } else if (cmd->cmd_pkt->pkt_flags & FLAG_STAG) {
14160 fcmd->fcp_cntl.cntl_qtype = FCP_QTYPE_SIMPLE;
14161 } else {
14162 fcmd->fcp_cntl.cntl_qtype = FCP_QTYPE_UNTAGGED;
14163 }
14164
14165 fcmd->fcp_ent_addr = plun->lun_addr;
14166
14167 if (pptr->port_fcp_dma != FC_NO_DVMA_SPACE) {
14168 FCP_CP_OUT((uint8_t *)fcmd, fpkt->pkt_cmd,
14169 fpkt->pkt_cmd_acc, sizeof (struct fcp_cmd));
14170 } else {
14171 ASSERT(fpkt->pkt_cmd_dma == NULL && fpkt->pkt_resp_dma == NULL);
14172 }
14173
14174 cmd->cmd_pkt->pkt_reason = CMD_CMPLT;
14175 cmd->cmd_pkt->pkt_state = 0;
14176 cmd->cmd_pkt->pkt_statistics = 0;
14177 cmd->cmd_pkt->pkt_resid = 0;
14178
14179 cmd->cmd_fp_pkt->pkt_data_dma = cmd->cmd_pkt->pkt_handle;
14180
14181 if (cmd->cmd_pkt->pkt_flags & FLAG_NOINTR) {
14182 fpkt->pkt_tran_flags = (FC_TRAN_CLASS3 | FC_TRAN_NO_INTR);
14183 fpkt->pkt_comp = NULL;
14184 } else {
14185 fpkt->pkt_tran_flags = (FC_TRAN_CLASS3 | FC_TRAN_INTR);
14186 if (cmd->cmd_pkt->pkt_flags & FLAG_IMMEDIATE_CB) {
14187 fpkt->pkt_tran_flags |= FC_TRAN_IMMEDIATE_CB;
14188 }
14189 fpkt->pkt_comp = fcp_cmd_callback;
14190 }
14191
14192 mutex_enter(&pptr->port_mutex);
14193 if (pptr->port_state & FCP_STATE_SUSPENDED) {
14194 fpkt->pkt_tran_flags |= FC_TRAN_DUMPING;
14195 }
14196 mutex_exit(&pptr->port_mutex);
14197
14198 fpkt->pkt_cmd_fhdr.d_id = ptgt->tgt_d_id;
14199 fpkt->pkt_cmd_fhdr.s_id = pptr->port_id;
14200
14201 /*
14202 * Save a few kernel cycles here
14203 */
14204 #ifndef __lock_lint
14205 fpkt->pkt_fca_device = ptgt->tgt_fca_dev;
14206 #endif /* __lock_lint */
14207 }
14208
14209 static void
14210 fcp_post_callback(struct fcp_pkt *cmd)
14211 {
14212 scsi_hba_pkt_comp(cmd->cmd_pkt);
14213 }
14214
14215
14216 /*
14217 * called to do polled I/O by fcp_start()
14218 *
14219 * return a transport status value, i.e. TRAN_ACCECPT for success
14220 */
14221 static int
14222 fcp_dopoll(struct fcp_port *pptr, struct fcp_pkt *cmd)
14223 {
14224 int rval;
14225
14226 #ifdef DEBUG
14227 mutex_enter(&pptr->port_pkt_mutex);
14228 pptr->port_npkts++;
14229 mutex_exit(&pptr->port_pkt_mutex);
14230 #endif /* DEBUG */
14231
14232 if (cmd->cmd_fp_pkt->pkt_timeout) {
14233 cmd->cmd_fp_pkt->pkt_timeout = cmd->cmd_pkt->pkt_time;
14234 } else {
14235 cmd->cmd_fp_pkt->pkt_timeout = FCP_POLL_TIMEOUT;
14236 }
14237
14238 ASSERT(cmd->cmd_fp_pkt->pkt_comp == NULL);
14239
14240 cmd->cmd_state = FCP_PKT_ISSUED;
14241
14242 rval = fc_ulp_transport(pptr->port_fp_handle, cmd->cmd_fp_pkt);
14243
14244 #ifdef DEBUG
14245 mutex_enter(&pptr->port_pkt_mutex);
14246 pptr->port_npkts--;
14247 mutex_exit(&pptr->port_pkt_mutex);
14248 #endif /* DEBUG */
14249
14250 cmd->cmd_state = FCP_PKT_IDLE;
14251
14252 switch (rval) {
14253 case FC_SUCCESS:
14254 if (cmd->cmd_fp_pkt->pkt_state == FC_PKT_SUCCESS) {
14255 fcp_complete_pkt(cmd->cmd_fp_pkt);
14256 rval = TRAN_ACCEPT;
14257 } else {
14258 rval = TRAN_FATAL_ERROR;
14259 }
14260 break;
14261
14262 case FC_TRAN_BUSY:
14263 rval = TRAN_BUSY;
14264 cmd->cmd_pkt->pkt_resid = 0;
14265 break;
14266
14267 case FC_BADPACKET:
14268 rval = TRAN_BADPKT;
14269 break;
14270
14271 default:
14272 rval = TRAN_FATAL_ERROR;
14273 break;
14274 }
14275
14276 return (rval);
14277 }
14278
14279
14280 /*
14281 * called by some of the following transport-called routines to convert
14282 * a supplied dip ptr to a port struct ptr (i.e. to the soft state)
14283 */
14284 static struct fcp_port *
14285 fcp_dip2port(dev_info_t *dip)
14286 {
14287 int instance;
14288
14289 instance = ddi_get_instance(dip);
14290 return (ddi_get_soft_state(fcp_softstate, instance));
14291 }
14292
14293
14294 /*
14295 * called internally to return a LUN given a dip
14296 */
14297 struct fcp_lun *
14298 fcp_get_lun_from_cip(struct fcp_port *pptr, child_info_t *cip)
14299 {
14300 struct fcp_tgt *ptgt;
14301 struct fcp_lun *plun;
14302 int i;
14303
14304
14305 ASSERT(mutex_owned(&pptr->port_mutex));
14306
14307 for (i = 0; i < FCP_NUM_HASH; i++) {
14308 for (ptgt = pptr->port_tgt_hash_table[i];
14309 ptgt != NULL;
14310 ptgt = ptgt->tgt_next) {
14311 mutex_enter(&ptgt->tgt_mutex);
14312 for (plun = ptgt->tgt_lun; plun != NULL;
14313 plun = plun->lun_next) {
14314 mutex_enter(&plun->lun_mutex);
14315 if (plun->lun_cip == cip) {
14316 mutex_exit(&plun->lun_mutex);
14317 mutex_exit(&ptgt->tgt_mutex);
14318 return (plun); /* match found */
14319 }
14320 mutex_exit(&plun->lun_mutex);
14321 }
14322 mutex_exit(&ptgt->tgt_mutex);
14323 }
14324 }
14325 return (NULL); /* no LUN found */
14326 }
14327
14328 /*
14329 * pass an element to the hotplug list, kick the hotplug thread
14330 * and wait for the element to get processed by the hotplug thread.
14331 * on return the element is freed.
14332 *
14333 * return zero success and non-zero on failure
14334 *
14335 * acquires/releases the target mutex
14336 *
14337 */
14338 static int
14339 fcp_pass_to_hp_and_wait(struct fcp_port *pptr, struct fcp_lun *plun,
14340 child_info_t *cip, int what, int link_cnt, int tgt_cnt, int flags)
14341 {
14342 struct fcp_hp_elem *elem;
14343 int rval;
14344
14345 mutex_enter(&plun->lun_tgt->tgt_mutex);
14346 if ((elem = fcp_pass_to_hp(pptr, plun, cip,
14347 what, link_cnt, tgt_cnt, flags, 1)) == NULL) {
14348 mutex_exit(&plun->lun_tgt->tgt_mutex);
14349 fcp_log(CE_CONT, pptr->port_dip,
14350 "Can not pass_to_hp: what: %d; D_ID=%x, LUN=%x\n",
14351 what, plun->lun_tgt->tgt_d_id, plun->lun_num);
14352 return (NDI_FAILURE);
14353 }
14354 mutex_exit(&plun->lun_tgt->tgt_mutex);
14355 mutex_enter(&elem->mutex);
14356 if (elem->wait) {
14357 while (elem->wait) {
14358 cv_wait(&elem->cv, &elem->mutex);
14359 }
14360 }
14361 rval = (elem->result);
14362 mutex_exit(&elem->mutex);
14363 mutex_destroy(&elem->mutex);
14364 cv_destroy(&elem->cv);
14365 kmem_free(elem, sizeof (struct fcp_hp_elem));
14366 return (rval);
14367 }
14368
14369 /*
14370 * pass an element to the hotplug list, and then
14371 * kick the hotplug thread
14372 *
14373 * return Boolean success, i.e. non-zero if all goes well, else zero on error
14374 *
14375 * acquires/releases the hotplug mutex
14376 *
14377 * called with the target mutex owned
14378 *
14379 * memory acquired in NOSLEEP mode
14380 * NOTE: if wait is set to 1 then the caller is responsible for waiting on
14381 * for the hp daemon to process the request and is responsible for
14382 * freeing the element
14383 */
14384 static struct fcp_hp_elem *
14385 fcp_pass_to_hp(struct fcp_port *pptr, struct fcp_lun *plun,
14386 child_info_t *cip, int what, int link_cnt, int tgt_cnt, int flags, int wait)
14387 {
14388 struct fcp_hp_elem *elem;
14389 dev_info_t *pdip;
14390
14391 ASSERT(pptr != NULL);
14392 ASSERT(plun != NULL);
14393 ASSERT(plun->lun_tgt != NULL);
14394 ASSERT(mutex_owned(&plun->lun_tgt->tgt_mutex));
14395
14396 /* create space for a hotplug element */
14397 if ((elem = kmem_zalloc(sizeof (struct fcp_hp_elem), KM_NOSLEEP))
14398 == NULL) {
14399 fcp_log(CE_WARN, NULL,
14400 "!can't allocate memory for hotplug element");
14401 return (NULL);
14402 }
14403
14404 /* fill in hotplug element */
14405 elem->port = pptr;
14406 elem->lun = plun;
14407 elem->cip = cip;
14408 elem->old_lun_mpxio = plun->lun_mpxio;
14409 elem->what = what;
14410 elem->flags = flags;
14411 elem->link_cnt = link_cnt;
14412 elem->tgt_cnt = tgt_cnt;
14413 elem->wait = wait;
14414 mutex_init(&elem->mutex, NULL, MUTEX_DRIVER, NULL);
14415 cv_init(&elem->cv, NULL, CV_DRIVER, NULL);
14416
14417 /* schedule the hotplug task */
14418 pdip = pptr->port_dip;
14419 mutex_enter(&plun->lun_mutex);
14420 if (elem->what == FCP_ONLINE || elem->what == FCP_OFFLINE) {
14421 plun->lun_event_count++;
14422 elem->event_cnt = plun->lun_event_count;
14423 }
14424 mutex_exit(&plun->lun_mutex);
14425 if (taskq_dispatch(DEVI(pdip)->devi_taskq, fcp_hp_task,
14426 (void *)elem, KM_NOSLEEP) == NULL) {
14427 mutex_enter(&plun->lun_mutex);
14428 if (elem->what == FCP_ONLINE || elem->what == FCP_OFFLINE) {
14429 plun->lun_event_count--;
14430 }
14431 mutex_exit(&plun->lun_mutex);
14432 kmem_free(elem, sizeof (*elem));
14433 return (0);
14434 }
14435
14436 return (elem);
14437 }
14438
14439
14440 static void
14441 fcp_retransport_cmd(struct fcp_port *pptr, struct fcp_pkt *cmd)
14442 {
14443 int rval;
14444 struct scsi_address *ap;
14445 struct fcp_lun *plun;
14446 struct fcp_tgt *ptgt;
14447 fc_packet_t *fpkt;
14448
14449 ap = &cmd->cmd_pkt->pkt_address;
14450 plun = ADDR2LUN(ap);
14451 ptgt = plun->lun_tgt;
14452
14453 ASSERT(cmd->cmd_flags & CFLAG_IN_QUEUE);
14454
14455 cmd->cmd_state = FCP_PKT_IDLE;
14456
14457 mutex_enter(&pptr->port_mutex);
14458 mutex_enter(&ptgt->tgt_mutex);
14459 if (((plun->lun_state & (FCP_LUN_BUSY | FCP_LUN_OFFLINE)) == 0) &&
14460 (!(pptr->port_state & FCP_STATE_ONLINING))) {
14461 fc_ulp_rscn_info_t *rscnp;
14462
14463 cmd->cmd_state = FCP_PKT_ISSUED;
14464
14465 /*
14466 * It is possible for pkt_pd to be NULL if tgt_pd_handle was
14467 * originally NULL, hence we try to set it to the pd pointed
14468 * to by the SCSI device we're trying to get to.
14469 */
14470
14471 fpkt = cmd->cmd_fp_pkt;
14472 if ((fpkt->pkt_pd == NULL) && (ptgt->tgt_pd_handle != NULL)) {
14473 fpkt->pkt_pd = ptgt->tgt_pd_handle;
14474 /*
14475 * We need to notify the transport that we now have a
14476 * reference to the remote port handle.
14477 */
14478 fc_ulp_hold_remote_port(ptgt->tgt_pd_handle);
14479 }
14480
14481 mutex_exit(&ptgt->tgt_mutex);
14482 mutex_exit(&pptr->port_mutex);
14483
14484 ASSERT((cmd->cmd_pkt->pkt_flags & FLAG_NOINTR) == 0);
14485
14486 /* prepare the packet */
14487
14488 fcp_prepare_pkt(pptr, cmd, plun);
14489
14490 rscnp = (fc_ulp_rscn_info_t *)cmd->cmd_fp_pkt->
14491 pkt_ulp_rscn_infop;
14492
14493 cmd->cmd_timeout = cmd->cmd_pkt->pkt_time ?
14494 fcp_watchdog_time + cmd->cmd_pkt->pkt_time : 0;
14495
14496 if (rscnp != NULL) {
14497 rscnp->ulp_rscn_count =
14498 fc_ulp_get_rscn_count(pptr->
14499 port_fp_handle);
14500 }
14501
14502 rval = fcp_transport(pptr->port_fp_handle,
14503 cmd->cmd_fp_pkt, 0);
14504
14505 if (rval == FC_SUCCESS) {
14506 return;
14507 }
14508 cmd->cmd_state &= ~FCP_PKT_ISSUED;
14509 } else {
14510 mutex_exit(&ptgt->tgt_mutex);
14511 mutex_exit(&pptr->port_mutex);
14512 }
14513
14514 fcp_queue_pkt(pptr, cmd);
14515 }
14516
14517
14518 static void
14519 fcp_fail_cmd(struct fcp_pkt *cmd, uchar_t reason, uint_t statistics)
14520 {
14521 ASSERT(cmd->cmd_flags & CFLAG_IN_QUEUE);
14522
14523 cmd->cmd_flags &= ~CFLAG_IN_QUEUE;
14524 cmd->cmd_state = FCP_PKT_IDLE;
14525
14526 cmd->cmd_pkt->pkt_reason = reason;
14527 cmd->cmd_pkt->pkt_state = 0;
14528 cmd->cmd_pkt->pkt_statistics = statistics;
14529
14530 fcp_post_callback(cmd);
14531 }
14532
14533 /*
14534 * Function: fcp_queue_pkt
14535 *
14536 * Description: This function queues the packet passed by the caller into
14537 * the list of packets of the FCP port.
14538 *
14539 * Argument: *pptr FCP port.
14540 * *cmd FCP packet to queue.
14541 *
14542 * Return Value: None
14543 *
14544 * Context: User, Kernel and Interrupt context.
14545 */
14546 static void
14547 fcp_queue_pkt(struct fcp_port *pptr, struct fcp_pkt *cmd)
14548 {
14549 ASSERT((cmd->cmd_pkt->pkt_flags & FLAG_NOQUEUE) == NULL);
14550
14551 mutex_enter(&pptr->port_pkt_mutex);
14552 cmd->cmd_flags |= CFLAG_IN_QUEUE;
14553 ASSERT(cmd->cmd_state != FCP_PKT_ISSUED);
14554 cmd->cmd_timeout = fcp_watchdog_time + FCP_QUEUE_DELAY;
14555
14556 /*
14557 * zero pkt_time means hang around for ever
14558 */
14559 if (cmd->cmd_pkt->pkt_time) {
14560 if (cmd->cmd_fp_pkt->pkt_timeout > FCP_QUEUE_DELAY) {
14561 cmd->cmd_fp_pkt->pkt_timeout -= FCP_QUEUE_DELAY;
14562 } else {
14563 /*
14564 * Indicate the watch thread to fail the
14565 * command by setting it to highest value
14566 */
14567 cmd->cmd_timeout = fcp_watchdog_time;
14568 cmd->cmd_fp_pkt->pkt_timeout = FCP_INVALID_TIMEOUT;
14569 }
14570 }
14571
14572 if (pptr->port_pkt_head) {
14573 ASSERT(pptr->port_pkt_tail != NULL);
14574
14575 pptr->port_pkt_tail->cmd_next = cmd;
14576 pptr->port_pkt_tail = cmd;
14577 } else {
14578 ASSERT(pptr->port_pkt_tail == NULL);
14579
14580 pptr->port_pkt_head = pptr->port_pkt_tail = cmd;
14581 }
14582 cmd->cmd_next = NULL;
14583 mutex_exit(&pptr->port_pkt_mutex);
14584 }
14585
14586 /*
14587 * Function: fcp_update_targets
14588 *
14589 * Description: This function applies the specified change of state to all
14590 * the targets listed. The operation applied is 'set'.
14591 *
14592 * Argument: *pptr FCP port.
14593 * *dev_list Array of fc_portmap_t structures.
14594 * count Length of dev_list.
14595 * state State bits to update.
14596 * cause Reason for the update.
14597 *
14598 * Return Value: None
14599 *
14600 * Context: User, Kernel and Interrupt context.
14601 * The mutex pptr->port_mutex must be held.
14602 */
14603 static void
14604 fcp_update_targets(struct fcp_port *pptr, fc_portmap_t *dev_list,
14605 uint32_t count, uint32_t state, int cause)
14606 {
14607 fc_portmap_t *map_entry;
14608 struct fcp_tgt *ptgt;
14609
14610 ASSERT(MUTEX_HELD(&pptr->port_mutex));
14611
14612 while (count--) {
14613 map_entry = &(dev_list[count]);
14614 ptgt = fcp_lookup_target(pptr,
14615 (uchar_t *)&(map_entry->map_pwwn));
14616 if (ptgt == NULL) {
14617 continue;
14618 }
14619
14620 mutex_enter(&ptgt->tgt_mutex);
14621 ptgt->tgt_trace = 0;
14622 ptgt->tgt_change_cnt++;
14623 ptgt->tgt_statec_cause = cause;
14624 ptgt->tgt_tmp_cnt = 1;
14625 fcp_update_tgt_state(ptgt, FCP_SET, state);
14626 mutex_exit(&ptgt->tgt_mutex);
14627 }
14628 }
14629
14630 static int
14631 fcp_call_finish_init(struct fcp_port *pptr, struct fcp_tgt *ptgt,
14632 int lcount, int tcount, int cause)
14633 {
14634 int rval;
14635
14636 mutex_enter(&pptr->port_mutex);
14637 rval = fcp_call_finish_init_held(pptr, ptgt, lcount, tcount, cause);
14638 mutex_exit(&pptr->port_mutex);
14639
14640 return (rval);
14641 }
14642
14643
14644 static int
14645 fcp_call_finish_init_held(struct fcp_port *pptr, struct fcp_tgt *ptgt,
14646 int lcount, int tcount, int cause)
14647 {
14648 int finish_init = 0;
14649 int finish_tgt = 0;
14650 int do_finish_init = 0;
14651 int rval = FCP_NO_CHANGE;
14652
14653 if (cause == FCP_CAUSE_LINK_CHANGE ||
14654 cause == FCP_CAUSE_LINK_DOWN) {
14655 do_finish_init = 1;
14656 }
14657
14658 if (ptgt != NULL) {
14659 FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
14660 FCP_BUF_LEVEL_2, 0,
14661 "link_cnt: %d,%d; tgt_cnt: %d,%d; tmp_cnt: %d,%d;"
14662 " cause = %d, d_id = 0x%x, tgt_done = %d",
14663 pptr->port_link_cnt, lcount, ptgt->tgt_change_cnt, tcount,
14664 pptr->port_tmp_cnt, ptgt->tgt_tmp_cnt, cause,
14665 ptgt->tgt_d_id, ptgt->tgt_done);
14666
14667 mutex_enter(&ptgt->tgt_mutex);
14668
14669 if (tcount && (ptgt->tgt_change_cnt != tcount)) {
14670 rval = FCP_DEV_CHANGE;
14671 if (do_finish_init && ptgt->tgt_done == 0) {
14672 ptgt->tgt_done++;
14673 finish_init = 1;
14674 }
14675 } else {
14676 if (--ptgt->tgt_tmp_cnt <= 0) {
14677 ptgt->tgt_tmp_cnt = 0;
14678 finish_tgt = 1;
14679
14680 if (do_finish_init) {
14681 finish_init = 1;
14682 }
14683 }
14684 }
14685 mutex_exit(&ptgt->tgt_mutex);
14686 } else {
14687 FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
14688 FCP_BUF_LEVEL_2, 0,
14689 "Call Finish Init for NO target");
14690
14691 if (do_finish_init) {
14692 finish_init = 1;
14693 }
14694 }
14695
14696 if (finish_tgt) {
14697 ASSERT(ptgt != NULL);
14698
14699 mutex_enter(&ptgt->tgt_mutex);
14700 #ifdef DEBUG
14701 bzero(ptgt->tgt_tmp_cnt_stack,
14702 sizeof (ptgt->tgt_tmp_cnt_stack));
14703
14704 ptgt->tgt_tmp_cnt_depth = getpcstack(ptgt->tgt_tmp_cnt_stack,
14705 FCP_STACK_DEPTH);
14706 #endif /* DEBUG */
14707 mutex_exit(&ptgt->tgt_mutex);
14708
14709 (void) fcp_finish_tgt(pptr, ptgt, lcount, tcount, cause);
14710 }
14711
14712 if (finish_init && lcount == pptr->port_link_cnt) {
14713 ASSERT(pptr->port_tmp_cnt > 0);
14714 if (--pptr->port_tmp_cnt == 0) {
14715 fcp_finish_init(pptr);
14716 }
14717 } else if (lcount != pptr->port_link_cnt) {
14718 FCP_TRACE(fcp_logq, pptr->port_instbuf,
14719 fcp_trace, FCP_BUF_LEVEL_2, 0,
14720 "fcp_call_finish_init_held,1: state change occured"
14721 " for D_ID=0x%x", (ptgt) ? ptgt->tgt_d_id : 0);
14722 }
14723
14724 return (rval);
14725 }
14726
14727 static void
14728 fcp_reconfigure_luns(void * tgt_handle)
14729 {
14730 uint32_t dev_cnt;
14731 fc_portmap_t *devlist;
14732 struct fcp_tgt *ptgt = (struct fcp_tgt *)tgt_handle;
14733 struct fcp_port *pptr = ptgt->tgt_port;
14734
14735 /*
14736 * If the timer that fires this off got canceled too late, the
14737 * target could have been destroyed.
14738 */
14739
14740 if (ptgt->tgt_tid == NULL) {
14741 return;
14742 }
14743
14744 devlist = kmem_zalloc(sizeof (*devlist), KM_NOSLEEP);
14745 if (devlist == NULL) {
14746 fcp_log(CE_WARN, pptr->port_dip,
14747 "!fcp%d: failed to allocate for portmap",
14748 pptr->port_instance);
14749 return;
14750 }
14751
14752 dev_cnt = 1;
14753 devlist->map_pd = ptgt->tgt_pd_handle;
14754 devlist->map_hard_addr.hard_addr = ptgt->tgt_hard_addr;
14755 devlist->map_did.port_id = ptgt->tgt_d_id;
14756
14757 bcopy(&ptgt->tgt_node_wwn.raw_wwn[0], &devlist->map_nwwn, FC_WWN_SIZE);
14758 bcopy(&ptgt->tgt_port_wwn.raw_wwn[0], &devlist->map_pwwn, FC_WWN_SIZE);
14759
14760 devlist->map_state = PORT_DEVICE_LOGGED_IN;
14761 devlist->map_type = PORT_DEVICE_REPORTLUN_CHANGED;
14762 devlist->map_flags = 0;
14763
14764 fcp_statec_callback(NULL, pptr->port_fp_handle, FC_STATE_DEVICE_CHANGE,
14765 pptr->port_topology, devlist, dev_cnt, pptr->port_id);
14766
14767 /*
14768 * Clear the tgt_tid after no more references to
14769 * the fcp_tgt
14770 */
14771 mutex_enter(&ptgt->tgt_mutex);
14772 ptgt->tgt_tid = NULL;
14773 mutex_exit(&ptgt->tgt_mutex);
14774
14775 kmem_free(devlist, sizeof (*devlist));
14776 }
14777
14778
14779 static void
14780 fcp_free_targets(struct fcp_port *pptr)
14781 {
14782 int i;
14783 struct fcp_tgt *ptgt;
14784
14785 mutex_enter(&pptr->port_mutex);
14786 for (i = 0; i < FCP_NUM_HASH; i++) {
14787 ptgt = pptr->port_tgt_hash_table[i];
14788 while (ptgt != NULL) {
14789 struct fcp_tgt *next_tgt = ptgt->tgt_next;
14790
14791 fcp_free_target(ptgt);
14792 ptgt = next_tgt;
14793 }
14794 }
14795 mutex_exit(&pptr->port_mutex);
14796 }
14797
14798
14799 static void
14800 fcp_free_target(struct fcp_tgt *ptgt)
14801 {
14802 struct fcp_lun *plun;
14803 timeout_id_t tid;
14804
14805 mutex_enter(&ptgt->tgt_mutex);
14806 tid = ptgt->tgt_tid;
14807
14808 /*
14809 * Cancel any pending timeouts for this target.
14810 */
14811
14812 if (tid != NULL) {
14813 /*
14814 * Set tgt_tid to NULL first to avoid a race in the callback.
14815 * If tgt_tid is NULL, the callback will simply return.
14816 */
14817 ptgt->tgt_tid = NULL;
14818 mutex_exit(&ptgt->tgt_mutex);
14819 (void) untimeout(tid);
14820 mutex_enter(&ptgt->tgt_mutex);
14821 }
14822
14823 plun = ptgt->tgt_lun;
14824 while (plun != NULL) {
14825 struct fcp_lun *next_lun = plun->lun_next;
14826
14827 fcp_dealloc_lun(plun);
14828 plun = next_lun;
14829 }
14830
14831 mutex_exit(&ptgt->tgt_mutex);
14832 fcp_dealloc_tgt(ptgt);
14833 }
14834
14835 /*
14836 * Function: fcp_is_retryable
14837 *
14838 * Description: Indicates if the internal packet is retryable.
14839 *
14840 * Argument: *icmd FCP internal packet.
14841 *
14842 * Return Value: 0 Not retryable
14843 * 1 Retryable
14844 *
14845 * Context: User, Kernel and Interrupt context
14846 */
14847 static int
14848 fcp_is_retryable(struct fcp_ipkt *icmd)
14849 {
14850 if (icmd->ipkt_port->port_state & (FCP_STATE_SUSPENDED |
14851 FCP_STATE_DETACHING | FCP_STATE_POWER_DOWN)) {
14852 return (0);
14853 }
14854
14855 return (((fcp_watchdog_time + icmd->ipkt_fpkt->pkt_timeout) <
14856 icmd->ipkt_port->port_deadline) ? 1 : 0);
14857 }
14858
14859 /*
14860 * Function: fcp_create_on_demand
14861 *
14862 * Argument: *pptr FCP port.
14863 * *pwwn Port WWN.
14864 *
14865 * Return Value: 0 Success
14866 * EIO
14867 * ENOMEM
14868 * EBUSY
14869 * EINVAL
14870 *
14871 * Context: User and Kernel context
14872 */
14873 static int
14874 fcp_create_on_demand(struct fcp_port *pptr, uchar_t *pwwn)
14875 {
14876 int wait_ms;
14877 int tcount;
14878 int lcount;
14879 int ret;
14880 int error;
14881 int rval = EIO;
14882 int ntries;
14883 fc_portmap_t *devlist;
14884 opaque_t pd;
14885 struct fcp_lun *plun;
14886 struct fcp_tgt *ptgt;
14887 int old_manual = 0;
14888
14889 /* Allocates the fc_portmap_t structure. */
14890 devlist = kmem_zalloc(sizeof (*devlist), KM_SLEEP);
14891
14892 /*
14893 * If FC_INVALID_RSCN_COUNT is non-zero, we will have to init as shown
14894 * in the commented statement below:
14895 *
14896 * devlist->map_rscn_info.ulp_rscn_count = FC_INVALID_RSCN_COUNT;
14897 *
14898 * Below, the deadline for the discovery process is set.
14899 */
14900 mutex_enter(&pptr->port_mutex);
14901 pptr->port_deadline = fcp_watchdog_time + FCP_ICMD_DEADLINE;
14902 mutex_exit(&pptr->port_mutex);
14903
14904 /*
14905 * We try to find the remote port based on the WWN provided by the
14906 * caller. We actually ask fp/fctl if it has it.
14907 */
14908 pd = fc_ulp_get_remote_port(pptr->port_fp_handle,
14909 (la_wwn_t *)pwwn, &error, 1);
14910
14911 if (pd == NULL) {
14912 kmem_free(devlist, sizeof (*devlist));
14913 return (rval);
14914 }
14915
14916 /*
14917 * The remote port was found. We ask fp/fctl to update our
14918 * fc_portmap_t structure.
14919 */
14920 ret = fc_ulp_pwwn_to_portmap(pptr->port_fp_handle,
14921 (la_wwn_t *)pwwn, devlist);
14922 if (ret != FC_SUCCESS) {
14923 kmem_free(devlist, sizeof (*devlist));
14924 return (rval);
14925 }
14926
14927 /*
14928 * The map flag field is set to indicates that the creation is being
14929 * done at the user request (Ioclt probably luxadm or cfgadm).
14930 */
14931 devlist->map_type = PORT_DEVICE_USER_CREATE;
14932
14933 mutex_enter(&pptr->port_mutex);
14934
14935 /*
14936 * We check to see if fcp already has a target that describes the
14937 * device being created. If not it is created.
14938 */
14939 ptgt = fcp_lookup_target(pptr, pwwn);
14940 if (ptgt == NULL) {
14941 lcount = pptr->port_link_cnt;
14942 mutex_exit(&pptr->port_mutex);
14943
14944 ptgt = fcp_alloc_tgt(pptr, devlist, lcount);
14945 if (ptgt == NULL) {
14946 fcp_log(CE_WARN, pptr->port_dip,
14947 "!FC target allocation failed");
14948 return (ENOMEM);
14949 }
14950
14951 mutex_enter(&pptr->port_mutex);
14952 }
14953
14954 mutex_enter(&ptgt->tgt_mutex);
14955 ptgt->tgt_statec_cause = FCP_CAUSE_USER_CREATE;
14956 ptgt->tgt_tmp_cnt = 1;
14957 ptgt->tgt_device_created = 0;
14958 /*
14959 * If fabric and auto config is set but the target was
14960 * manually unconfigured then reset to the manual_config_only to
14961 * 0 so the device will get configured.
14962 */
14963 if (FC_TOP_EXTERNAL(pptr->port_topology) &&
14964 fcp_enable_auto_configuration &&
14965 ptgt->tgt_manual_config_only == 1) {
14966 old_manual = 1;
14967 ptgt->tgt_manual_config_only = 0;
14968 }
14969 mutex_exit(&ptgt->tgt_mutex);
14970
14971 fcp_update_targets(pptr, devlist, 1,
14972 FCP_LUN_BUSY | FCP_LUN_MARK, FCP_CAUSE_USER_CREATE);
14973
14974 lcount = pptr->port_link_cnt;
14975 tcount = ptgt->tgt_change_cnt;
14976
14977 if (fcp_handle_mapflags(pptr, ptgt, devlist, lcount,
14978 tcount, FCP_CAUSE_USER_CREATE) == TRUE) {
14979 if (FC_TOP_EXTERNAL(pptr->port_topology) &&
14980 fcp_enable_auto_configuration && old_manual) {
14981 mutex_enter(&ptgt->tgt_mutex);
14982 ptgt->tgt_manual_config_only = 1;
14983 mutex_exit(&ptgt->tgt_mutex);
14984 }
14985
14986 if (pptr->port_link_cnt != lcount ||
14987 ptgt->tgt_change_cnt != tcount) {
14988 rval = EBUSY;
14989 }
14990 mutex_exit(&pptr->port_mutex);
14991
14992 FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
14993 FCP_BUF_LEVEL_3, 0,
14994 "fcp_create_on_demand: mapflags ptgt=%x, "
14995 "lcount=%x::port_link_cnt=%x, "
14996 "tcount=%x: tgt_change_cnt=%x, rval=%x",
14997 ptgt, lcount, pptr->port_link_cnt,
14998 tcount, ptgt->tgt_change_cnt, rval);
14999 return (rval);
15000 }
15001
15002 /*
15003 * Due to lack of synchronization mechanisms, we perform
15004 * periodic monitoring of our request; Because requests
15005 * get dropped when another one supercedes (either because
15006 * of a link change or a target change), it is difficult to
15007 * provide a clean synchronization mechanism (such as a
15008 * semaphore or a conditional variable) without exhaustively
15009 * rewriting the mainline discovery code of this driver.
15010 */
15011 wait_ms = 500;
15012
15013 ntries = fcp_max_target_retries;
15014
15015 FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
15016 FCP_BUF_LEVEL_3, 0,
15017 "fcp_create_on_demand(1): ntries=%x, ptgt=%x, "
15018 "lcount=%x::port_link_cnt=%x, "
15019 "tcount=%x::tgt_change_cnt=%x, rval=%x, tgt_device_created=%x "
15020 "tgt_tmp_cnt =%x",
15021 ntries, ptgt, lcount, pptr->port_link_cnt,
15022 tcount, ptgt->tgt_change_cnt, rval, ptgt->tgt_device_created,
15023 ptgt->tgt_tmp_cnt);
15024
15025 mutex_enter(&ptgt->tgt_mutex);
15026 while (ntries-- != 0 && pptr->port_link_cnt == lcount &&
15027 ptgt->tgt_change_cnt == tcount && ptgt->tgt_device_created == 0) {
15028 mutex_exit(&ptgt->tgt_mutex);
15029 mutex_exit(&pptr->port_mutex);
15030
15031 delay(drv_usectohz(wait_ms * 1000));
15032
15033 mutex_enter(&pptr->port_mutex);
15034 mutex_enter(&ptgt->tgt_mutex);
15035 }
15036
15037
15038 if (pptr->port_link_cnt != lcount || ptgt->tgt_change_cnt != tcount) {
15039 rval = EBUSY;
15040 } else {
15041 if (ptgt->tgt_tmp_cnt == 0 && ptgt->tgt_node_state ==
15042 FCP_TGT_NODE_PRESENT) {
15043 rval = 0;
15044 }
15045 }
15046
15047 FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
15048 FCP_BUF_LEVEL_3, 0,
15049 "fcp_create_on_demand(2): ntries=%x, ptgt=%x, "
15050 "lcount=%x::port_link_cnt=%x, "
15051 "tcount=%x::tgt_change_cnt=%x, rval=%x, tgt_device_created=%x "
15052 "tgt_tmp_cnt =%x",
15053 ntries, ptgt, lcount, pptr->port_link_cnt,
15054 tcount, ptgt->tgt_change_cnt, rval, ptgt->tgt_device_created,
15055 ptgt->tgt_tmp_cnt);
15056
15057 if (rval) {
15058 if (FC_TOP_EXTERNAL(pptr->port_topology) &&
15059 fcp_enable_auto_configuration && old_manual) {
15060 ptgt->tgt_manual_config_only = 1;
15061 }
15062 mutex_exit(&ptgt->tgt_mutex);
15063 mutex_exit(&pptr->port_mutex);
15064 kmem_free(devlist, sizeof (*devlist));
15065
15066 FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
15067 FCP_BUF_LEVEL_3, 0,
15068 "fcp_create_on_demand(3): ntries=%x, ptgt=%x, "
15069 "lcount=%x::port_link_cnt=%x, "
15070 "tcount=%x::tgt_change_cnt=%x, rval=%x, "
15071 "tgt_device_created=%x, tgt D_ID=%x",
15072 ntries, ptgt, lcount, pptr->port_link_cnt,
15073 tcount, ptgt->tgt_change_cnt, rval,
15074 ptgt->tgt_device_created, ptgt->tgt_d_id);
15075 return (rval);
15076 }
15077
15078 if ((plun = ptgt->tgt_lun) != NULL) {
15079 tcount = plun->lun_tgt->tgt_change_cnt;
15080 } else {
15081 rval = EINVAL;
15082 }
15083 lcount = pptr->port_link_cnt;
15084
15085 /*
15086 * Configuring the target with no LUNs will fail. We
15087 * should reset the node state so that it is not
15088 * automatically configured when the LUNs are added
15089 * to this target.
15090 */
15091 if (ptgt->tgt_lun_cnt == 0) {
15092 ptgt->tgt_node_state = FCP_TGT_NODE_NONE;
15093 }
15094 mutex_exit(&ptgt->tgt_mutex);
15095 mutex_exit(&pptr->port_mutex);
15096
15097 while (plun) {
15098 child_info_t *cip;
15099
15100 mutex_enter(&plun->lun_mutex);
15101 cip = plun->lun_cip;
15102 mutex_exit(&plun->lun_mutex);
15103
15104 mutex_enter(&ptgt->tgt_mutex);
15105 if (!(plun->lun_state & FCP_LUN_OFFLINE)) {
15106 mutex_exit(&ptgt->tgt_mutex);
15107
15108 rval = fcp_pass_to_hp_and_wait(pptr, plun, cip,
15109 FCP_ONLINE, lcount, tcount,
15110 NDI_ONLINE_ATTACH);
15111 if (rval != NDI_SUCCESS) {
15112 FCP_TRACE(fcp_logq,
15113 pptr->port_instbuf, fcp_trace,
15114 FCP_BUF_LEVEL_3, 0,
15115 "fcp_create_on_demand: "
15116 "pass_to_hp_and_wait failed "
15117 "rval=%x", rval);
15118 rval = EIO;
15119 } else {
15120 mutex_enter(&LUN_TGT->tgt_mutex);
15121 plun->lun_state &= ~(FCP_LUN_OFFLINE |
15122 FCP_LUN_BUSY);
15123 mutex_exit(&LUN_TGT->tgt_mutex);
15124 }
15125 mutex_enter(&ptgt->tgt_mutex);
15126 }
15127
15128 plun = plun->lun_next;
15129 mutex_exit(&ptgt->tgt_mutex);
15130 }
15131
15132 kmem_free(devlist, sizeof (*devlist));
15133
15134 if (FC_TOP_EXTERNAL(pptr->port_topology) &&
15135 fcp_enable_auto_configuration && old_manual) {
15136 mutex_enter(&ptgt->tgt_mutex);
15137 /* if successful then set manual to 0 */
15138 if (rval == 0) {
15139 ptgt->tgt_manual_config_only = 0;
15140 } else {
15141 /* reset to 1 so the user has to do the config */
15142 ptgt->tgt_manual_config_only = 1;
15143 }
15144 mutex_exit(&ptgt->tgt_mutex);
15145 }
15146
15147 return (rval);
15148 }
15149
15150
15151 static void
15152 fcp_ascii_to_wwn(caddr_t string, uchar_t bytes[], unsigned int byte_len)
15153 {
15154 int count;
15155 uchar_t byte;
15156
15157 count = 0;
15158 while (*string) {
15159 byte = FCP_ATOB(*string); string++;
15160 byte = byte << 4 | FCP_ATOB(*string); string++;
15161 bytes[count++] = byte;
15162
15163 if (count >= byte_len) {
15164 break;
15165 }
15166 }
15167 }
15168
15169 static void
15170 fcp_wwn_to_ascii(uchar_t wwn[], char *string)
15171 {
15172 int i;
15173
15174 for (i = 0; i < FC_WWN_SIZE; i++) {
15175 (void) sprintf(string + (i * 2),
15176 "%02x", wwn[i]);
15177 }
15178
15179 }
15180
15181 static void
15182 fcp_print_error(fc_packet_t *fpkt)
15183 {
15184 struct fcp_ipkt *icmd = (struct fcp_ipkt *)
15185 fpkt->pkt_ulp_private;
15186 struct fcp_port *pptr;
15187 struct fcp_tgt *ptgt;
15188 struct fcp_lun *plun;
15189 caddr_t buf;
15190 int scsi_cmd = 0;
15191
15192 ptgt = icmd->ipkt_tgt;
15193 plun = icmd->ipkt_lun;
15194 pptr = ptgt->tgt_port;
15195
15196 buf = kmem_zalloc(256, KM_NOSLEEP);
15197 if (buf == NULL) {
15198 return;
15199 }
15200
15201 switch (icmd->ipkt_opcode) {
15202 case SCMD_REPORT_LUN:
15203 (void) sprintf(buf, "!REPORT LUN to D_ID=0x%%x"
15204 " lun=0x%%x failed");
15205 scsi_cmd++;
15206 break;
15207
15208 case SCMD_INQUIRY_PAGE83:
15209 (void) sprintf(buf, "!INQUIRY-83 to D_ID=0x%%x"
15210 " lun=0x%%x failed");
15211 scsi_cmd++;
15212 break;
15213
15214 case SCMD_INQUIRY:
15215 (void) sprintf(buf, "!INQUIRY to D_ID=0x%%x"
15216 " lun=0x%%x failed");
15217 scsi_cmd++;
15218 break;
15219
15220 case LA_ELS_PLOGI:
15221 (void) sprintf(buf, "!PLOGI to D_ID=0x%%x failed");
15222 break;
15223
15224 case LA_ELS_PRLI:
15225 (void) sprintf(buf, "!PRLI to D_ID=0x%%x failed");
15226 break;
15227 }
15228
15229 if (scsi_cmd && fpkt->pkt_state == FC_PKT_SUCCESS) {
15230 struct fcp_rsp response, *rsp;
15231 uchar_t asc, ascq;
15232 caddr_t sense_key = NULL;
15233 struct fcp_rsp_info fcp_rsp_err, *bep;
15234
15235 if (icmd->ipkt_nodma) {
15236 rsp = (struct fcp_rsp *)fpkt->pkt_resp;
15237 bep = (struct fcp_rsp_info *)((caddr_t)rsp +
15238 sizeof (struct fcp_rsp));
15239 } else {
15240 rsp = &response;
15241 bep = &fcp_rsp_err;
15242
15243 FCP_CP_IN(fpkt->pkt_resp, rsp, fpkt->pkt_resp_acc,
15244 sizeof (struct fcp_rsp));
15245
15246 FCP_CP_IN(fpkt->pkt_resp + sizeof (struct fcp_rsp),
15247 bep, fpkt->pkt_resp_acc,
15248 sizeof (struct fcp_rsp_info));
15249 }
15250
15251
15252 if (fcp_validate_fcp_response(rsp, pptr) != FC_SUCCESS) {
15253 (void) sprintf(buf + strlen(buf),
15254 " : Bad FCP response values rsvd1=%%x, rsvd2=%%x,"
15255 " sts-rsvd1=%%x, sts-rsvd2=%%x, rsplen=%%x,"
15256 " senselen=%%x. Giving up");
15257
15258 fcp_log(CE_WARN, pptr->port_dip, buf,
15259 ptgt->tgt_d_id, plun->lun_num, rsp->reserved_0,
15260 rsp->reserved_1, rsp->fcp_u.fcp_status.reserved_0,
15261 rsp->fcp_u.fcp_status.reserved_1,
15262 rsp->fcp_response_len, rsp->fcp_sense_len);
15263
15264 kmem_free(buf, 256);
15265 return;
15266 }
15267
15268 if (rsp->fcp_u.fcp_status.rsp_len_set &&
15269 bep->rsp_code != FCP_NO_FAILURE) {
15270 (void) sprintf(buf + strlen(buf),
15271 " FCP Response code = 0x%x", bep->rsp_code);
15272 }
15273
15274 if (rsp->fcp_u.fcp_status.scsi_status & STATUS_CHECK) {
15275 struct scsi_extended_sense sense_info, *sense_ptr;
15276
15277 if (icmd->ipkt_nodma) {
15278 sense_ptr = (struct scsi_extended_sense *)
15279 ((caddr_t)fpkt->pkt_resp +
15280 sizeof (struct fcp_rsp) +
15281 rsp->fcp_response_len);
15282 } else {
15283 sense_ptr = &sense_info;
15284
15285 FCP_CP_IN(fpkt->pkt_resp +
15286 sizeof (struct fcp_rsp) +
15287 rsp->fcp_response_len, &sense_info,
15288 fpkt->pkt_resp_acc,
15289 sizeof (struct scsi_extended_sense));
15290 }
15291
15292 if (sense_ptr->es_key < NUM_SENSE_KEYS +
15293 NUM_IMPL_SENSE_KEYS) {
15294 sense_key = sense_keys[sense_ptr->es_key];
15295 } else {
15296 sense_key = "Undefined";
15297 }
15298
15299 asc = sense_ptr->es_add_code;
15300 ascq = sense_ptr->es_qual_code;
15301
15302 (void) sprintf(buf + strlen(buf),
15303 ": sense key=%%s, ASC=%%x," " ASCQ=%%x."
15304 " Giving up");
15305
15306 fcp_log(CE_WARN, pptr->port_dip, buf,
15307 ptgt->tgt_d_id, plun->lun_num, sense_key,
15308 asc, ascq);
15309 } else {
15310 (void) sprintf(buf + strlen(buf),
15311 " : SCSI status=%%x. Giving up");
15312
15313 fcp_log(CE_WARN, pptr->port_dip, buf,
15314 ptgt->tgt_d_id, plun->lun_num,
15315 rsp->fcp_u.fcp_status.scsi_status);
15316 }
15317 } else {
15318 caddr_t state, reason, action, expln;
15319
15320 (void) fc_ulp_pkt_error(fpkt, &state, &reason,
15321 &action, &expln);
15322
15323 (void) sprintf(buf + strlen(buf), ": State:%%s,"
15324 " Reason:%%s. Giving up");
15325
15326 if (scsi_cmd) {
15327 fcp_log(CE_WARN, pptr->port_dip, buf,
15328 ptgt->tgt_d_id, plun->lun_num, state, reason);
15329 } else {
15330 fcp_log(CE_WARN, pptr->port_dip, buf,
15331 ptgt->tgt_d_id, state, reason);
15332 }
15333 }
15334
15335 kmem_free(buf, 256);
15336 }
15337
15338
15339 static int
15340 fcp_handle_ipkt_errors(struct fcp_port *pptr, struct fcp_tgt *ptgt,
15341 struct fcp_ipkt *icmd, int rval, caddr_t op)
15342 {
15343 int ret = DDI_FAILURE;
15344 char *error;
15345
15346 switch (rval) {
15347 case FC_DEVICE_BUSY_NEW_RSCN:
15348 /*
15349 * This means that there was a new RSCN that the transport
15350 * knows about (which the ULP *may* know about too) but the
15351 * pkt that was sent down was related to an older RSCN. So, we
15352 * are just going to reset the retry count and deadline and
15353 * continue to retry. The idea is that transport is currently
15354 * working on the new RSCN and will soon let the ULPs know
15355 * about it and when it does the existing logic will kick in
15356 * where it will change the tcount to indicate that something
15357 * changed on the target. So, rediscovery will start and there
15358 * will not be an infinite retry.
15359 *
15360 * For a full flow of how the RSCN info is transferred back and
15361 * forth, see fp.c
15362 */
15363 icmd->ipkt_retries = 0;
15364 icmd->ipkt_port->port_deadline = fcp_watchdog_time +
15365 FCP_ICMD_DEADLINE;
15366
15367 FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
15368 FCP_BUF_LEVEL_3, 0,
15369 "fcp_handle_ipkt_errors: rval=%x for D_ID=%x",
15370 rval, ptgt->tgt_d_id);
15371 /* FALLTHROUGH */
15372
15373 case FC_STATEC_BUSY:
15374 case FC_DEVICE_BUSY:
15375 case FC_PBUSY:
15376 case FC_FBUSY:
15377 case FC_TRAN_BUSY:
15378 case FC_OFFLINE:
15379 FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
15380 FCP_BUF_LEVEL_3, 0,
15381 "fcp_handle_ipkt_errors: rval=%x for D_ID=%x",
15382 rval, ptgt->tgt_d_id);
15383 if (icmd->ipkt_retries < FCP_MAX_RETRIES &&
15384 fcp_is_retryable(icmd)) {
15385 fcp_queue_ipkt(pptr, icmd->ipkt_fpkt);
15386 ret = DDI_SUCCESS;
15387 }
15388 break;
15389
15390 case FC_LOGINREQ:
15391 /*
15392 * FC_LOGINREQ used to be handled just like all the cases
15393 * above. It has been changed to handled a PRLI that fails
15394 * with FC_LOGINREQ different than other ipkts that fail
15395 * with FC_LOGINREQ. If a PRLI fails with FC_LOGINREQ it is
15396 * a simple matter to turn it into a PLOGI instead, so that's
15397 * exactly what we do here.
15398 */
15399 if (icmd->ipkt_opcode == LA_ELS_PRLI) {
15400 ret = fcp_send_els(icmd->ipkt_port, icmd->ipkt_tgt,
15401 icmd, LA_ELS_PLOGI, icmd->ipkt_link_cnt,
15402 icmd->ipkt_change_cnt, icmd->ipkt_cause);
15403 } else {
15404 FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
15405 FCP_BUF_LEVEL_3, 0,
15406 "fcp_handle_ipkt_errors: rval=%x for D_ID=%x",
15407 rval, ptgt->tgt_d_id);
15408 if (icmd->ipkt_retries < FCP_MAX_RETRIES &&
15409 fcp_is_retryable(icmd)) {
15410 fcp_queue_ipkt(pptr, icmd->ipkt_fpkt);
15411 ret = DDI_SUCCESS;
15412 }
15413 }
15414 break;
15415
15416 default:
15417 mutex_enter(&pptr->port_mutex);
15418 mutex_enter(&ptgt->tgt_mutex);
15419 if (!FCP_STATE_CHANGED(pptr, ptgt, icmd)) {
15420 mutex_exit(&ptgt->tgt_mutex);
15421 mutex_exit(&pptr->port_mutex);
15422
15423 (void) fc_ulp_error(rval, &error);
15424 fcp_log(CE_WARN, pptr->port_dip,
15425 "!Failed to send %s to D_ID=%x error=%s",
15426 op, ptgt->tgt_d_id, error);
15427 } else {
15428 FCP_TRACE(fcp_logq, pptr->port_instbuf,
15429 fcp_trace, FCP_BUF_LEVEL_2, 0,
15430 "fcp_handle_ipkt_errors,1: state change occured"
15431 " for D_ID=0x%x", ptgt->tgt_d_id);
15432 mutex_exit(&ptgt->tgt_mutex);
15433 mutex_exit(&pptr->port_mutex);
15434 }
15435 break;
15436 }
15437
15438 return (ret);
15439 }
15440
15441
15442 /*
15443 * Check of outstanding commands on any LUN for this target
15444 */
15445 static int
15446 fcp_outstanding_lun_cmds(struct fcp_tgt *ptgt)
15447 {
15448 struct fcp_lun *plun;
15449 struct fcp_pkt *cmd;
15450
15451 for (plun = ptgt->tgt_lun; plun != NULL; plun = plun->lun_next) {
15452 mutex_enter(&plun->lun_mutex);
15453 for (cmd = plun->lun_pkt_head; cmd != NULL;
15454 cmd = cmd->cmd_forw) {
15455 if (cmd->cmd_state == FCP_PKT_ISSUED) {
15456 mutex_exit(&plun->lun_mutex);
15457 return (FC_SUCCESS);
15458 }
15459 }
15460 mutex_exit(&plun->lun_mutex);
15461 }
15462
15463 return (FC_FAILURE);
15464 }
15465
15466 static fc_portmap_t *
15467 fcp_construct_map(struct fcp_port *pptr, uint32_t *dev_cnt)
15468 {
15469 int i;
15470 fc_portmap_t *devlist;
15471 fc_portmap_t *devptr = NULL;
15472 struct fcp_tgt *ptgt;
15473
15474 mutex_enter(&pptr->port_mutex);
15475 for (i = 0, *dev_cnt = 0; i < FCP_NUM_HASH; i++) {
15476 for (ptgt = pptr->port_tgt_hash_table[i]; ptgt != NULL;
15477 ptgt = ptgt->tgt_next) {
15478 if (!(ptgt->tgt_state & FCP_TGT_ORPHAN)) {
15479 ++*dev_cnt;
15480 }
15481 }
15482 }
15483
15484 devptr = devlist = kmem_zalloc(sizeof (*devlist) * *dev_cnt,
15485 KM_NOSLEEP);
15486 if (devlist == NULL) {
15487 mutex_exit(&pptr->port_mutex);
15488 fcp_log(CE_WARN, pptr->port_dip,
15489 "!fcp%d: failed to allocate for portmap for construct map",
15490 pptr->port_instance);
15491 return (devptr);
15492 }
15493
15494 for (i = 0; i < FCP_NUM_HASH; i++) {
15495 for (ptgt = pptr->port_tgt_hash_table[i]; ptgt != NULL;
15496 ptgt = ptgt->tgt_next) {
15497 if (!(ptgt->tgt_state & FCP_TGT_ORPHAN)) {
15498 int ret;
15499
15500 ret = fc_ulp_pwwn_to_portmap(
15501 pptr->port_fp_handle,
15502 (la_wwn_t *)&ptgt->tgt_port_wwn.raw_wwn[0],
15503 devlist);
15504
15505 if (ret == FC_SUCCESS) {
15506 devlist++;
15507 continue;
15508 }
15509
15510 devlist->map_pd = NULL;
15511 devlist->map_did.port_id = ptgt->tgt_d_id;
15512 devlist->map_hard_addr.hard_addr =
15513 ptgt->tgt_hard_addr;
15514
15515 devlist->map_state = PORT_DEVICE_INVALID;
15516 devlist->map_type = PORT_DEVICE_OLD;
15517
15518 bcopy(&ptgt->tgt_node_wwn.raw_wwn[0],
15519 &devlist->map_nwwn, FC_WWN_SIZE);
15520
15521 bcopy(&ptgt->tgt_port_wwn.raw_wwn[0],
15522 &devlist->map_pwwn, FC_WWN_SIZE);
15523
15524 devlist++;
15525 }
15526 }
15527 }
15528
15529 mutex_exit(&pptr->port_mutex);
15530
15531 return (devptr);
15532 }
15533 /*
15534 * Inimate MPxIO that the lun is busy and cannot accept regular IO
15535 */
15536 static void
15537 fcp_update_mpxio_path_verifybusy(struct fcp_port *pptr)
15538 {
15539 int i;
15540 struct fcp_tgt *ptgt;
15541 struct fcp_lun *plun;
15542
15543 for (i = 0; i < FCP_NUM_HASH; i++) {
15544 for (ptgt = pptr->port_tgt_hash_table[i]; ptgt != NULL;
15545 ptgt = ptgt->tgt_next) {
15546 mutex_enter(&ptgt->tgt_mutex);
15547 for (plun = ptgt->tgt_lun; plun != NULL;
15548 plun = plun->lun_next) {
15549 if (plun->lun_mpxio &&
15550 plun->lun_state & FCP_LUN_BUSY) {
15551 if (!fcp_pass_to_hp(pptr, plun,
15552 plun->lun_cip,
15553 FCP_MPXIO_PATH_SET_BUSY,
15554 pptr->port_link_cnt,
15555 ptgt->tgt_change_cnt, 0, 0)) {
15556 FCP_TRACE(fcp_logq,
15557 pptr->port_instbuf,
15558 fcp_trace,
15559 FCP_BUF_LEVEL_2, 0,
15560 "path_verifybusy: "
15561 "disable lun %p failed!",
15562 plun);
15563 }
15564 }
15565 }
15566 mutex_exit(&ptgt->tgt_mutex);
15567 }
15568 }
15569 }
15570
15571 static int
15572 fcp_update_mpxio_path(struct fcp_lun *plun, child_info_t *cip, int what)
15573 {
15574 dev_info_t *cdip = NULL;
15575 dev_info_t *pdip = NULL;
15576
15577 ASSERT(plun);
15578
15579 mutex_enter(&plun->lun_mutex);
15580 if (fcp_is_child_present(plun, cip) == FC_FAILURE) {
15581 mutex_exit(&plun->lun_mutex);
15582 return (NDI_FAILURE);
15583 }
15584 mutex_exit(&plun->lun_mutex);
15585 cdip = mdi_pi_get_client(PIP(cip));
15586 pdip = mdi_pi_get_phci(PIP(cip));
15587
15588 ASSERT(cdip != NULL);
15589 ASSERT(pdip != NULL);
15590
15591 if (what == FCP_MPXIO_PATH_CLEAR_BUSY) {
15592 /* LUN ready for IO */
15593 (void) mdi_pi_enable_path(PIP(cip), DRIVER_DISABLE_TRANSIENT);
15594 } else {
15595 /* LUN busy to accept IO */
15596 (void) mdi_pi_disable_path(PIP(cip), DRIVER_DISABLE_TRANSIENT);
15597 }
15598 return (NDI_SUCCESS);
15599 }
15600
15601 /*
15602 * Caller must free the returned string of MAXPATHLEN len
15603 * If the device is offline (-1 instance number) NULL
15604 * will be returned.
15605 */
15606 static char *
15607 fcp_get_lun_path(struct fcp_lun *plun)
15608 {
15609 dev_info_t *dip = NULL;
15610 char *path = NULL;
15611 mdi_pathinfo_t *pip = NULL;
15612
15613 if (plun == NULL) {
15614 return (NULL);
15615 }
15616
15617 mutex_enter(&plun->lun_mutex);
15618 if (plun->lun_mpxio == 0) {
15619 dip = DIP(plun->lun_cip);
15620 mutex_exit(&plun->lun_mutex);
15621 } else {
15622 /*
15623 * lun_cip must be accessed with lun_mutex held. Here
15624 * plun->lun_cip either points to a valid node or it is NULL.
15625 * Make a copy so that we can release lun_mutex.
15626 */
15627 pip = PIP(plun->lun_cip);
15628
15629 /*
15630 * Increase ref count on the path so that we can release
15631 * lun_mutex and still be sure that the pathinfo node (and thus
15632 * also the client) is not deallocated. If pip is NULL, this
15633 * has no effect.
15634 */
15635 mdi_hold_path(pip);
15636
15637 mutex_exit(&plun->lun_mutex);
15638
15639 /* Get the client. If pip is NULL, we get NULL. */
15640 dip = mdi_pi_get_client(pip);
15641 }
15642
15643 if (dip == NULL)
15644 goto out;
15645 if (ddi_get_instance(dip) < 0)
15646 goto out;
15647
15648 path = kmem_alloc(MAXPATHLEN, KM_SLEEP);
15649 if (path == NULL)
15650 goto out;
15651
15652 (void) ddi_pathname(dip, path);
15653
15654 /* Clean up. */
15655 out:
15656 if (pip != NULL)
15657 mdi_rele_path(pip);
15658
15659 /*
15660 * In reality, the user wants a fully valid path (one they can open)
15661 * but this string is lacking the mount point, and the minor node.
15662 * It would be nice if we could "figure these out" somehow
15663 * and fill them in. Otherwise, the userland code has to understand
15664 * driver specific details of which minor node is the "best" or
15665 * "right" one to expose. (Ex: which slice is the whole disk, or
15666 * which tape doesn't rewind)
15667 */
15668 return (path);
15669 }
15670
15671 static int
15672 fcp_scsi_bus_config(dev_info_t *parent, uint_t flag,
15673 ddi_bus_config_op_t op, void *arg, dev_info_t **childp)
15674 {
15675 int64_t reset_delay;
15676 int rval, retry = 0;
15677 struct fcp_port *pptr = fcp_dip2port(parent);
15678
15679 reset_delay = (int64_t)(USEC_TO_TICK(FCP_INIT_WAIT_TIMEOUT)) -
15680 (ddi_get_lbolt64() - pptr->port_attach_time);
15681 if (reset_delay < 0) {
15682 reset_delay = 0;
15683 }
15684
15685 if (fcp_bus_config_debug) {
15686 flag |= NDI_DEVI_DEBUG;
15687 }
15688
15689 switch (op) {
15690 case BUS_CONFIG_ONE:
15691 /*
15692 * Retry the command since we need to ensure
15693 * the fabric devices are available for root
15694 */
15695 while (retry++ < fcp_max_bus_config_retries) {
15696 rval = (ndi_busop_bus_config(parent,
15697 flag | NDI_MDI_FALLBACK, op,
15698 arg, childp, (clock_t)reset_delay));
15699 if (rval == 0) {
15700 return (rval);
15701 }
15702 }
15703
15704 /*
15705 * drain taskq to make sure nodes are created and then
15706 * try again.
15707 */
15708 taskq_wait(DEVI(parent)->devi_taskq);
15709 return (ndi_busop_bus_config(parent, flag | NDI_MDI_FALLBACK,
15710 op, arg, childp, 0));
15711
15712 case BUS_CONFIG_DRIVER:
15713 case BUS_CONFIG_ALL: {
15714 /*
15715 * delay till all devices report in (port_tmp_cnt == 0)
15716 * or FCP_INIT_WAIT_TIMEOUT
15717 */
15718 mutex_enter(&pptr->port_mutex);
15719 while ((reset_delay > 0) && pptr->port_tmp_cnt) {
15720 (void) cv_timedwait(&pptr->port_config_cv,
15721 &pptr->port_mutex,
15722 ddi_get_lbolt() + (clock_t)reset_delay);
15723 reset_delay =
15724 (int64_t)(USEC_TO_TICK(FCP_INIT_WAIT_TIMEOUT)) -
15725 (ddi_get_lbolt64() - pptr->port_attach_time);
15726 }
15727 mutex_exit(&pptr->port_mutex);
15728 /* drain taskq to make sure nodes are created */
15729 taskq_wait(DEVI(parent)->devi_taskq);
15730 return (ndi_busop_bus_config(parent, flag, op,
15731 arg, childp, 0));
15732 }
15733
15734 default:
15735 return (NDI_FAILURE);
15736 }
15737 /*NOTREACHED*/
15738 }
15739
15740 static int
15741 fcp_scsi_bus_unconfig(dev_info_t *parent, uint_t flag,
15742 ddi_bus_config_op_t op, void *arg)
15743 {
15744 if (fcp_bus_config_debug) {
15745 flag |= NDI_DEVI_DEBUG;
15746 }
15747
15748 return (ndi_busop_bus_unconfig(parent, flag, op, arg));
15749 }
15750
15751
15752 /*
15753 * Routine to copy GUID into the lun structure.
15754 * returns 0 if copy was successful and 1 if encountered a
15755 * failure and did not copy the guid.
15756 */
15757 static int
15758 fcp_copy_guid_2_lun_block(struct fcp_lun *plun, char *guidp)
15759 {
15760
15761 int retval = 0;
15762
15763 /* add one for the null terminator */
15764 const unsigned int len = strlen(guidp) + 1;
15765
15766 if ((guidp == NULL) || (plun == NULL)) {
15767 return (1);
15768 }
15769
15770 /*
15771 * if the plun->lun_guid already has been allocated,
15772 * then check the size. if the size is exact, reuse
15773 * it....if not free it an allocate the required size.
15774 * The reallocation should NOT typically happen
15775 * unless the GUIDs reported changes between passes.
15776 * We free up and alloc again even if the
15777 * size was more than required. This is due to the
15778 * fact that the field lun_guid_size - serves
15779 * dual role of indicating the size of the wwn
15780 * size and ALSO the allocation size.
15781 */
15782 if (plun->lun_guid) {
15783 if (plun->lun_guid_size != len) {
15784 /*
15785 * free the allocated memory and
15786 * initialize the field
15787 * lun_guid_size to 0.
15788 */
15789 kmem_free(plun->lun_guid, plun->lun_guid_size);
15790 plun->lun_guid = NULL;
15791 plun->lun_guid_size = 0;
15792 }
15793 }
15794 /*
15795 * alloc only if not already done.
15796 */
15797 if (plun->lun_guid == NULL) {
15798 plun->lun_guid = kmem_zalloc(len, KM_NOSLEEP);
15799 if (plun->lun_guid == NULL) {
15800 cmn_err(CE_WARN, "fcp_copy_guid_2_lun_block:"
15801 "Unable to allocate"
15802 "Memory for GUID!!! size %d", len);
15803 retval = 1;
15804 } else {
15805 plun->lun_guid_size = len;
15806 }
15807 }
15808 if (plun->lun_guid) {
15809 /*
15810 * now copy the GUID
15811 */
15812 bcopy(guidp, plun->lun_guid, plun->lun_guid_size);
15813 }
15814 return (retval);
15815 }
15816
15817 /*
15818 * fcp_reconfig_wait
15819 *
15820 * Wait for a rediscovery/reconfiguration to complete before continuing.
15821 */
15822
15823 static void
15824 fcp_reconfig_wait(struct fcp_port *pptr)
15825 {
15826 clock_t reconfig_start, wait_timeout;
15827
15828 /*
15829 * Quick check. If pptr->port_tmp_cnt is 0, there is no
15830 * reconfiguration in progress.
15831 */
15832
15833 mutex_enter(&pptr->port_mutex);
15834 if (pptr->port_tmp_cnt == 0) {
15835 mutex_exit(&pptr->port_mutex);
15836 return;
15837 }
15838 mutex_exit(&pptr->port_mutex);
15839
15840 /*
15841 * If we cause a reconfig by raising power, delay until all devices
15842 * report in (port_tmp_cnt returns to 0)
15843 */
15844
15845 reconfig_start = ddi_get_lbolt();
15846 wait_timeout = drv_usectohz(FCP_INIT_WAIT_TIMEOUT);
15847
15848 mutex_enter(&pptr->port_mutex);
15849
15850 while (((ddi_get_lbolt() - reconfig_start) < wait_timeout) &&
15851 pptr->port_tmp_cnt) {
15852
15853 (void) cv_timedwait(&pptr->port_config_cv, &pptr->port_mutex,
15854 reconfig_start + wait_timeout);
15855 }
15856
15857 mutex_exit(&pptr->port_mutex);
15858
15859 /*
15860 * Even if fcp_tmp_count isn't 0, continue without error. The port
15861 * we want may still be ok. If not, it will error out later
15862 */
15863 }
15864
15865 /*
15866 * Read masking info from fp.conf and construct the global fcp_lun_blacklist.
15867 * We rely on the fcp_global_mutex to provide protection against changes to
15868 * the fcp_lun_blacklist.
15869 *
15870 * You can describe a list of target port WWNs and LUN numbers which will
15871 * not be configured. LUN numbers will be interpreted as decimal. White
15872 * spaces and ',' can be used in the list of LUN numbers.
15873 *
15874 * To prevent LUNs 1 and 2 from being configured for target
15875 * port 510000f010fd92a1 and target port 510000e012079df1, set:
15876 *
15877 * pwwn-lun-blacklist=
15878 * "510000f010fd92a1,1,2",
15879 * "510000e012079df1,1,2";
15880 */
15881 static void
15882 fcp_read_blacklist(dev_info_t *dip,
15883 struct fcp_black_list_entry **pplun_blacklist)
15884 {
15885 char **prop_array = NULL;
15886 char *curr_pwwn = NULL;
15887 char *curr_lun = NULL;
15888 uint32_t prop_item = 0;
15889 int idx = 0;
15890 int len = 0;
15891
15892 ASSERT(mutex_owned(&fcp_global_mutex));
15893 if (ddi_prop_lookup_string_array(DDI_DEV_T_ANY, dip,
15894 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM,
15895 LUN_BLACKLIST_PROP, &prop_array, &prop_item) != DDI_PROP_SUCCESS) {
15896 return;
15897 }
15898
15899 for (idx = 0; idx < prop_item; idx++) {
15900
15901 curr_pwwn = prop_array[idx];
15902 while (*curr_pwwn == ' ') {
15903 curr_pwwn++;
15904 }
15905 if (strlen(curr_pwwn) <= (sizeof (la_wwn_t) * 2 + 1)) {
15906 fcp_log(CE_WARN, NULL, "Invalid WWN %s in the blacklist"
15907 ", please check.", curr_pwwn);
15908 continue;
15909 }
15910 if ((*(curr_pwwn + sizeof (la_wwn_t) * 2) != ' ') &&
15911 (*(curr_pwwn + sizeof (la_wwn_t) * 2) != ',')) {
15912 fcp_log(CE_WARN, NULL, "Invalid WWN %s in the blacklist"
15913 ", please check.", curr_pwwn);
15914 continue;
15915 }
15916 for (len = 0; len < sizeof (la_wwn_t) * 2; len++) {
15917 if (isxdigit(curr_pwwn[len]) != TRUE) {
15918 fcp_log(CE_WARN, NULL, "Invalid WWN %s in the "
15919 "blacklist, please check.", curr_pwwn);
15920 break;
15921 }
15922 }
15923 if (len != sizeof (la_wwn_t) * 2) {
15924 continue;
15925 }
15926
15927 curr_lun = curr_pwwn + sizeof (la_wwn_t) * 2 + 1;
15928 *(curr_lun - 1) = '\0';
15929 fcp_mask_pwwn_lun(curr_pwwn, curr_lun, pplun_blacklist);
15930 }
15931
15932 ddi_prop_free(prop_array);
15933 }
15934
15935 /*
15936 * Get the masking info about one remote target port designated by wwn.
15937 * Lun ids could be separated by ',' or white spaces.
15938 */
15939 static void
15940 fcp_mask_pwwn_lun(char *curr_pwwn, char *curr_lun,
15941 struct fcp_black_list_entry **pplun_blacklist)
15942 {
15943 int idx = 0;
15944 uint32_t offset = 0;
15945 unsigned long lun_id = 0;
15946 char lunid_buf[16];
15947 char *pend = NULL;
15948 int illegal_digit = 0;
15949
15950 while (offset < strlen(curr_lun)) {
15951 while ((curr_lun[offset + idx] != ',') &&
15952 (curr_lun[offset + idx] != '\0') &&
15953 (curr_lun[offset + idx] != ' ')) {
15954 if (isdigit(curr_lun[offset + idx]) == 0) {
15955 illegal_digit++;
15956 }
15957 idx++;
15958 }
15959 if (illegal_digit > 0) {
15960 offset += (idx+1); /* To the start of next lun */
15961 idx = 0;
15962 illegal_digit = 0;
15963 fcp_log(CE_WARN, NULL, "Invalid LUN %s for WWN %s in "
15964 "the blacklist, please check digits.",
15965 curr_lun, curr_pwwn);
15966 continue;
15967 }
15968 if (idx >= (sizeof (lunid_buf) / sizeof (lunid_buf[0]))) {
15969 fcp_log(CE_WARN, NULL, "Invalid LUN %s for WWN %s in "
15970 "the blacklist, please check the length of LUN#.",
15971 curr_lun, curr_pwwn);
15972 break;
15973 }
15974 if (idx == 0) { /* ignore ' ' or ',' or '\0' */
15975 offset++;
15976 continue;
15977 }
15978
15979 bcopy(curr_lun + offset, lunid_buf, idx);
15980 lunid_buf[idx] = '\0';
15981 if (ddi_strtoul(lunid_buf, &pend, 10, &lun_id) == 0) {
15982 fcp_add_one_mask(curr_pwwn, lun_id, pplun_blacklist);
15983 } else {
15984 fcp_log(CE_WARN, NULL, "Invalid LUN %s for WWN %s in "
15985 "the blacklist, please check %s.",
15986 curr_lun, curr_pwwn, lunid_buf);
15987 }
15988 offset += (idx+1); /* To the start of next lun */
15989 idx = 0;
15990 }
15991 }
15992
15993 /*
15994 * Add one masking record
15995 */
15996 static void
15997 fcp_add_one_mask(char *curr_pwwn, uint32_t lun_id,
15998 struct fcp_black_list_entry **pplun_blacklist)
15999 {
16000 struct fcp_black_list_entry *tmp_entry = *pplun_blacklist;
16001 struct fcp_black_list_entry *new_entry = NULL;
16002 la_wwn_t wwn;
16003
16004 fcp_ascii_to_wwn(curr_pwwn, wwn.raw_wwn, sizeof (la_wwn_t));
16005 while (tmp_entry) {
16006 if ((bcmp(&tmp_entry->wwn, &wwn,
16007 sizeof (la_wwn_t)) == 0) && (tmp_entry->lun == lun_id)) {
16008 return;
16009 }
16010
16011 tmp_entry = tmp_entry->next;
16012 }
16013
16014 /* add to black list */
16015 new_entry = (struct fcp_black_list_entry *)kmem_zalloc
16016 (sizeof (struct fcp_black_list_entry), KM_SLEEP);
16017 bcopy(&wwn, &new_entry->wwn, sizeof (la_wwn_t));
16018 new_entry->lun = lun_id;
16019 new_entry->masked = 0;
16020 new_entry->next = *pplun_blacklist;
16021 *pplun_blacklist = new_entry;
16022 }
16023
16024 /*
16025 * Check if we should mask the specified lun of this fcp_tgt
16026 */
16027 static int
16028 fcp_should_mask(la_wwn_t *wwn, uint32_t lun_id)
16029 {
16030 struct fcp_black_list_entry *remote_port;
16031
16032 remote_port = fcp_lun_blacklist;
16033 while (remote_port != NULL) {
16034 if (bcmp(wwn, &remote_port->wwn, sizeof (la_wwn_t)) == 0) {
16035 if (remote_port->lun == lun_id) {
16036 remote_port->masked++;
16037 if (remote_port->masked == 1) {
16038 fcp_log(CE_NOTE, NULL, "LUN %d of port "
16039 "%02x%02x%02x%02x%02x%02x%02x%02x "
16040 "is masked due to black listing.\n",
16041 lun_id, wwn->raw_wwn[0],
16042 wwn->raw_wwn[1], wwn->raw_wwn[2],
16043 wwn->raw_wwn[3], wwn->raw_wwn[4],
16044 wwn->raw_wwn[5], wwn->raw_wwn[6],
16045 wwn->raw_wwn[7]);
16046 }
16047 return (TRUE);
16048 }
16049 }
16050 remote_port = remote_port->next;
16051 }
16052 return (FALSE);
16053 }
16054
16055 /*
16056 * Release all allocated resources
16057 */
16058 static void
16059 fcp_cleanup_blacklist(struct fcp_black_list_entry **pplun_blacklist)
16060 {
16061 struct fcp_black_list_entry *tmp_entry = *pplun_blacklist;
16062 struct fcp_black_list_entry *current_entry = NULL;
16063
16064 ASSERT(mutex_owned(&fcp_global_mutex));
16065 /*
16066 * Traverse all luns
16067 */
16068 while (tmp_entry) {
16069 current_entry = tmp_entry;
16070 tmp_entry = tmp_entry->next;
16071 kmem_free(current_entry, sizeof (struct fcp_black_list_entry));
16072 }
16073 *pplun_blacklist = NULL;
16074 }
16075
16076 /*
16077 * In fcp module,
16078 * pkt@scsi_pkt, cmd@fcp_pkt, icmd@fcp_ipkt, fpkt@fc_packet, pptr@fcp_port
16079 */
16080 static struct scsi_pkt *
16081 fcp_pseudo_init_pkt(struct scsi_address *ap, struct scsi_pkt *pkt,
16082 struct buf *bp, int cmdlen, int statuslen, int tgtlen,
16083 int flags, int (*callback)(), caddr_t arg)
16084 {
16085 fcp_port_t *pptr = ADDR2FCP(ap);
16086 fcp_pkt_t *cmd = NULL;
16087 fc_frame_hdr_t *hp;
16088
16089 /*
16090 * First step: get the packet
16091 */
16092 if (pkt == NULL) {
16093 pkt = scsi_hba_pkt_alloc(pptr->port_dip, ap, cmdlen, statuslen,
16094 tgtlen, sizeof (fcp_pkt_t) + pptr->port_priv_pkt_len,
16095 callback, arg);
16096 if (pkt == NULL) {
16097 return (NULL);
16098 }
16099
16100 /*
16101 * All fields in scsi_pkt will be initialized properly or
16102 * set to zero. We need do nothing for scsi_pkt.
16103 */
16104 /*
16105 * But it's our responsibility to link other related data
16106 * structures. Their initialization will be done, just
16107 * before the scsi_pkt will be sent to FCA.
16108 */
16109 cmd = PKT2CMD(pkt);
16110 cmd->cmd_pkt = pkt;
16111 cmd->cmd_fp_pkt = &cmd->cmd_fc_packet;
16112 /*
16113 * fc_packet_t
16114 */
16115 cmd->cmd_fp_pkt->pkt_ulp_private = (opaque_t)cmd;
16116 cmd->cmd_fp_pkt->pkt_fca_private = (opaque_t)((caddr_t)cmd +
16117 sizeof (struct fcp_pkt));
16118 cmd->cmd_fp_pkt->pkt_cmd = (caddr_t)&cmd->cmd_fcp_cmd;
16119 cmd->cmd_fp_pkt->pkt_cmdlen = sizeof (struct fcp_cmd);
16120 cmd->cmd_fp_pkt->pkt_resp = cmd->cmd_fcp_rsp;
16121 cmd->cmd_fp_pkt->pkt_rsplen = FCP_MAX_RSP_IU_SIZE;
16122 /*
16123 * Fill in the Fabric Channel Header
16124 */
16125 hp = &cmd->cmd_fp_pkt->pkt_cmd_fhdr;
16126 hp->r_ctl = R_CTL_COMMAND;
16127 hp->rsvd = 0;
16128 hp->type = FC_TYPE_SCSI_FCP;
16129 hp->f_ctl = F_CTL_SEQ_INITIATIVE | F_CTL_FIRST_SEQ;
16130 hp->seq_id = 0;
16131 hp->df_ctl = 0;
16132 hp->seq_cnt = 0;
16133 hp->ox_id = 0xffff;
16134 hp->rx_id = 0xffff;
16135 hp->ro = 0;
16136 } else {
16137 /*
16138 * We need think if we should reset any elements in
16139 * related data structures.
16140 */
16141 FCP_TRACE(fcp_logq, pptr->port_instbuf,
16142 fcp_trace, FCP_BUF_LEVEL_6, 0,
16143 "reusing pkt, flags %d", flags);
16144 cmd = PKT2CMD(pkt);
16145 if (cmd->cmd_fp_pkt->pkt_pd) {
16146 cmd->cmd_fp_pkt->pkt_pd = NULL;
16147 }
16148 }
16149
16150 /*
16151 * Second step: dma allocation/move
16152 */
16153 if (bp && bp->b_bcount != 0) {
16154 /*
16155 * Mark if it's read or write
16156 */
16157 if (bp->b_flags & B_READ) {
16158 cmd->cmd_flags |= CFLAG_IS_READ;
16159 } else {
16160 cmd->cmd_flags &= ~CFLAG_IS_READ;
16161 }
16162
16163 bp_mapin(bp);
16164 cmd->cmd_fp_pkt->pkt_data = bp->b_un.b_addr;
16165 cmd->cmd_fp_pkt->pkt_datalen = bp->b_bcount;
16166 cmd->cmd_fp_pkt->pkt_data_resid = 0;
16167 } else {
16168 /*
16169 * It seldom happens, except when CLUSTER or SCSI_VHCI wants
16170 * to send zero-length read/write.
16171 */
16172 cmd->cmd_fp_pkt->pkt_data = NULL;
16173 cmd->cmd_fp_pkt->pkt_datalen = 0;
16174 }
16175
16176 return (pkt);
16177 }
16178
16179 static void
16180 fcp_pseudo_destroy_pkt(struct scsi_address *ap, struct scsi_pkt *pkt)
16181 {
16182 fcp_port_t *pptr = ADDR2FCP(ap);
16183
16184 /*
16185 * First we let FCA to uninitilize private part.
16186 */
16187 (void) fc_ulp_uninit_packet(pptr->port_fp_handle,
16188 PKT2CMD(pkt)->cmd_fp_pkt);
16189
16190 /*
16191 * Then we uninitialize fc_packet.
16192 */
16193
16194 /*
16195 * Thirdly, we uninitializae fcp_pkt.
16196 */
16197
16198 /*
16199 * In the end, we free scsi_pkt.
16200 */
16201 scsi_hba_pkt_free(ap, pkt);
16202 }
16203
16204 static int
16205 fcp_pseudo_start(struct scsi_address *ap, struct scsi_pkt *pkt)
16206 {
16207 fcp_port_t *pptr = ADDR2FCP(ap);
16208 fcp_lun_t *plun = ADDR2LUN(ap);
16209 fcp_tgt_t *ptgt = plun->lun_tgt;
16210 fcp_pkt_t *cmd = PKT2CMD(pkt);
16211 fcp_cmd_t *fcmd = &cmd->cmd_fcp_cmd;
16212 fc_packet_t *fpkt = cmd->cmd_fp_pkt;
16213 int rval;
16214
16215 fpkt->pkt_pd = ptgt->tgt_pd_handle;
16216 (void) fc_ulp_init_packet(pptr->port_fp_handle, cmd->cmd_fp_pkt, 1);
16217
16218 /*
16219 * Firstly, we need initialize fcp_pkt_t
16220 * Secondly, we need initialize fcp_cmd_t.
16221 */
16222 bcopy(pkt->pkt_cdbp, fcmd->fcp_cdb, pkt->pkt_cdblen);
16223 fcmd->fcp_data_len = fpkt->pkt_datalen;
16224 fcmd->fcp_ent_addr = plun->lun_addr;
16225 if (pkt->pkt_flags & FLAG_HTAG) {
16226 fcmd->fcp_cntl.cntl_qtype = FCP_QTYPE_HEAD_OF_Q;
16227 } else if (pkt->pkt_flags & FLAG_OTAG) {
16228 fcmd->fcp_cntl.cntl_qtype = FCP_QTYPE_ORDERED;
16229 } else if (pkt->pkt_flags & FLAG_STAG) {
16230 fcmd->fcp_cntl.cntl_qtype = FCP_QTYPE_SIMPLE;
16231 } else {
16232 fcmd->fcp_cntl.cntl_qtype = FCP_QTYPE_UNTAGGED;
16233 }
16234
16235 if (cmd->cmd_flags & CFLAG_IS_READ) {
16236 fcmd->fcp_cntl.cntl_read_data = 1;
16237 fcmd->fcp_cntl.cntl_write_data = 0;
16238 } else {
16239 fcmd->fcp_cntl.cntl_read_data = 0;
16240 fcmd->fcp_cntl.cntl_write_data = 1;
16241 }
16242
16243 /*
16244 * Then we need initialize fc_packet_t too.
16245 */
16246 fpkt->pkt_timeout = pkt->pkt_time + 2;
16247 fpkt->pkt_cmd_fhdr.d_id = ptgt->tgt_d_id;
16248 fpkt->pkt_cmd_fhdr.s_id = pptr->port_id;
16249 if (cmd->cmd_flags & CFLAG_IS_READ) {
16250 fpkt->pkt_tran_type = FC_PKT_FCP_READ;
16251 } else {
16252 fpkt->pkt_tran_type = FC_PKT_FCP_WRITE;
16253 }
16254
16255 if (pkt->pkt_flags & FLAG_NOINTR) {
16256 fpkt->pkt_comp = NULL;
16257 fpkt->pkt_tran_flags = (FC_TRAN_CLASS3 | FC_TRAN_NO_INTR);
16258 } else {
16259 fpkt->pkt_comp = fcp_cmd_callback;
16260 fpkt->pkt_tran_flags = (FC_TRAN_CLASS3 | FC_TRAN_INTR);
16261 if (pkt->pkt_flags & FLAG_IMMEDIATE_CB) {
16262 fpkt->pkt_tran_flags |= FC_TRAN_IMMEDIATE_CB;
16263 }
16264 }
16265
16266 /*
16267 * Lastly, we need initialize scsi_pkt
16268 */
16269 pkt->pkt_reason = CMD_CMPLT;
16270 pkt->pkt_state = 0;
16271 pkt->pkt_statistics = 0;
16272 pkt->pkt_resid = 0;
16273
16274 /*
16275 * if interrupts aren't allowed (e.g. at dump time) then we'll
16276 * have to do polled I/O
16277 */
16278 if (pkt->pkt_flags & FLAG_NOINTR) {
16279 return (fcp_dopoll(pptr, cmd));
16280 }
16281
16282 cmd->cmd_state = FCP_PKT_ISSUED;
16283 rval = fcp_transport(pptr->port_fp_handle, fpkt, 0);
16284 if (rval == FC_SUCCESS) {
16285 return (TRAN_ACCEPT);
16286 }
16287
16288 /*
16289 * Need more consideration
16290 *
16291 * pkt->pkt_flags & FLAG_NOQUEUE could abort other pkt
16292 */
16293 cmd->cmd_state = FCP_PKT_IDLE;
16294 if (rval == FC_TRAN_BUSY) {
16295 return (TRAN_BUSY);
16296 } else {
16297 return (TRAN_FATAL_ERROR);
16298 }
16299 }
16300
16301 /*
16302 * scsi_poll will always call tran_sync_pkt for pseudo FC-HBAs
16303 * SCSA will initialize it to scsi_sync_cache_pkt for physical FC-HBAs
16304 */
16305 static void
16306 fcp_pseudo_sync_pkt(struct scsi_address *ap, struct scsi_pkt *pkt)
16307 {
16308 FCP_TRACE(fcp_logq, "fcp_pseudo_sync_pkt", fcp_trace,
16309 FCP_BUF_LEVEL_2, 0, "ap-%p, scsi_pkt-%p", ap, pkt);
16310 }
16311
16312 /*
16313 * scsi_dmafree will always call tran_dmafree, when STATE_ARQ_DONE
16314 */
16315 static void
16316 fcp_pseudo_dmafree(struct scsi_address *ap, struct scsi_pkt *pkt)
16317 {
16318 FCP_TRACE(fcp_logq, "fcp_pseudo_dmafree", fcp_trace,
16319 FCP_BUF_LEVEL_2, 0, "ap-%p, scsi_pkt-%p", ap, pkt);
16320 }