1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
23 *
24 * Fibre Channel SCSI ULP Mapping driver
25 */
26
27 #include <sys/scsi/scsi.h>
28 #include <sys/types.h>
29 #include <sys/varargs.h>
30 #include <sys/devctl.h>
31 #include <sys/thread.h>
32 #include <sys/thread.h>
33 #include <sys/open.h>
34 #include <sys/file.h>
35 #include <sys/sunndi.h>
36 #include <sys/console.h>
37 #include <sys/proc.h>
38 #include <sys/time.h>
39 #include <sys/utsname.h>
40 #include <sys/scsi/impl/scsi_reset_notify.h>
41 #include <sys/ndi_impldefs.h>
42 #include <sys/byteorder.h>
43 #include <sys/fs/dv_node.h>
44 #include <sys/ctype.h>
45 #include <sys/sunmdi.h>
46
47 #include <sys/fibre-channel/fc.h>
48 #include <sys/fibre-channel/impl/fc_ulpif.h>
49 #include <sys/fibre-channel/ulp/fcpvar.h>
50
51 /*
52 * Discovery Process
53 * =================
54 *
55 * The discovery process is a major function of FCP. In order to help
56 * understand that function a flow diagram is given here. This diagram
57 * doesn't claim to cover all the cases and the events that can occur during
58 * the discovery process nor the subtleties of the code. The code paths shown
59 * are simplified. Its purpose is to help the reader (and potentially bug
60 * fixer) have an overall view of the logic of the code. For that reason the
61 * diagram covers the simple case of the line coming up cleanly or of a new
62 * port attaching to FCP the link being up. The reader must keep in mind
63 * that:
64 *
65 * - There are special cases where bringing devices online and offline
66 * is driven by Ioctl.
67 *
68 * - The behavior of the discovery process can be modified through the
69 * .conf file.
70 *
71 * - The line can go down and come back up at any time during the
72 * discovery process which explains some of the complexity of the code.
73 *
74 * ............................................................................
75 *
76 * STEP 1: The line comes up or a new Fibre Channel port attaches to FCP.
77 *
78 *
79 * +-------------------------+
80 * fp/fctl module --->| fcp_port_attach |
81 * +-------------------------+
82 * | |
83 * | |
84 * | v
85 * | +-------------------------+
86 * | | fcp_handle_port_attach |
87 * | +-------------------------+
88 * | |
89 * | |
90 * +--------------------+ |
91 * | |
92 * v v
93 * +-------------------------+
94 * | fcp_statec_callback |
95 * +-------------------------+
96 * |
97 * |
98 * v
99 * +-------------------------+
100 * | fcp_handle_devices |
101 * +-------------------------+
102 * |
103 * |
104 * v
105 * +-------------------------+
106 * | fcp_handle_mapflags |
107 * +-------------------------+
108 * |
109 * |
110 * v
111 * +-------------------------+
112 * | fcp_send_els |
113 * | |
114 * | PLOGI or PRLI To all the|
115 * | reachable devices. |
116 * +-------------------------+
117 *
118 *
119 * ............................................................................
120 *
121 * STEP 2: The callback functions of the PLOGI and/or PRLI requests sent during
122 * STEP 1 are called (it is actually the same function).
123 *
124 *
125 * +-------------------------+
126 * | fcp_icmd_callback |
127 * fp/fctl module --->| |
128 * | callback for PLOGI and |
129 * | PRLI. |
130 * +-------------------------+
131 * |
132 * |
133 * Received PLOGI Accept /-\ Received PRLI Accept
134 * _ _ _ _ _ _ / \_ _ _ _ _ _
135 * | \ / |
136 * | \-/ |
137 * | |
138 * v v
139 * +-------------------------+ +-------------------------+
140 * | fcp_send_els | | fcp_send_scsi |
141 * | | | |
142 * | PRLI | | REPORT_LUN |
143 * +-------------------------+ +-------------------------+
144 *
145 * ............................................................................
146 *
147 * STEP 3: The callback functions of the SCSI commands issued by FCP are called
148 * (It is actually the same function).
149 *
150 *
151 * +-------------------------+
152 * fp/fctl module ------->| fcp_scsi_callback |
153 * +-------------------------+
154 * |
155 * |
156 * |
157 * Receive REPORT_LUN reply /-\ Receive INQUIRY PAGE83 reply
158 * _ _ _ _ _ _ _ _ _ _ / \_ _ _ _ _ _ _ _ _ _ _ _
159 * | \ / |
160 * | \-/ |
161 * | | |
162 * | Receive INQUIRY reply| |
163 * | | |
164 * v v v
165 * +------------------------+ +----------------------+ +----------------------+
166 * | fcp_handle_reportlun | | fcp_handle_inquiry | | fcp_handle_page83 |
167 * |(Called for each Target)| | (Called for each LUN)| |(Called for each LUN) |
168 * +------------------------+ +----------------------+ +----------------------+
169 * | | |
170 * | | |
171 * | | |
172 * v v |
173 * +-----------------+ +-----------------+ |
174 * | fcp_send_scsi | | fcp_send_scsi | |
175 * | | | | |
176 * | INQUIRY | | INQUIRY PAGE83 | |
177 * | (To each LUN) | +-----------------+ |
178 * +-----------------+ |
179 * |
180 * v
181 * +------------------------+
182 * | fcp_call_finish_init |
183 * +------------------------+
184 * |
185 * v
186 * +-----------------------------+
187 * | fcp_call_finish_init_held |
188 * +-----------------------------+
189 * |
190 * |
191 * All LUNs scanned /-\
192 * _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ __ / \
193 * | \ /
194 * | \-/
195 * v |
196 * +------------------+ |
197 * | fcp_finish_tgt | |
198 * +------------------+ |
199 * | Target Not Offline and |
200 * Target Not Offline and | not marked and tgt_node_state |
201 * marked /-\ not FCP_TGT_NODE_ON_DEMAND |
202 * _ _ _ _ _ _ / \_ _ _ _ _ _ _ _ |
203 * | \ / | |
204 * | \-/ | |
205 * v v |
206 * +----------------------------+ +-------------------+ |
207 * | fcp_offline_target | | fcp_create_luns | |
208 * | | +-------------------+ |
209 * | A structure fcp_tgt_elem | | |
210 * | is created and queued in | v |
211 * | the FCP port list | +-------------------+ |
212 * | port_offline_tgts. It | | fcp_pass_to_hp | |
213 * | will be unqueued by the | | | |
214 * | watchdog timer. | | Called for each | |
215 * +----------------------------+ | LUN. Dispatches | |
216 * | | fcp_hp_task | |
217 * | +-------------------+ |
218 * | | |
219 * | | |
220 * | | |
221 * | +---------------->|
222 * | |
223 * +---------------------------------------------->|
224 * |
225 * |
226 * All the targets (devices) have been scanned /-\
227 * _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ / \
228 * | \ /
229 * | \-/
230 * +-------------------------------------+ |
231 * | fcp_finish_init | |
232 * | | |
233 * | Signal broadcasts the condition | |
234 * | variable port_config_cv of the FCP | |
235 * | port. One potential code sequence | |
236 * | waiting on the condition variable | |
237 * | the code sequence handling | |
238 * | BUS_CONFIG_ALL and BUS_CONFIG_DRIVER| |
239 * | The other is in the function | |
240 * | fcp_reconfig_wait which is called | |
241 * | in the transmit path preventing IOs | |
242 * | from going through till the disco- | |
243 * | very process is over. | |
244 * +-------------------------------------+ |
245 * | |
246 * | |
247 * +--------------------------------->|
248 * |
249 * v
250 * Return
251 *
252 * ............................................................................
253 *
254 * STEP 4: The hot plug task is called (for each fcp_hp_elem).
255 *
256 *
257 * +-------------------------+
258 * | fcp_hp_task |
259 * +-------------------------+
260 * |
261 * |
262 * v
263 * +-------------------------+
264 * | fcp_trigger_lun |
265 * +-------------------------+
266 * |
267 * |
268 * v
269 * Bring offline /-\ Bring online
270 * _ _ _ _ _ _ _ _ _/ \_ _ _ _ _ _ _ _ _ _
271 * | \ / |
272 * | \-/ |
273 * v v
274 * +---------------------+ +-----------------------+
275 * | fcp_offline_child | | fcp_get_cip |
276 * +---------------------+ | |
277 * | Creates a dev_info_t |
278 * | or a mdi_pathinfo_t |
279 * | depending on whether |
280 * | mpxio is on or off. |
281 * +-----------------------+
282 * |
283 * |
284 * v
285 * +-----------------------+
286 * | fcp_online_child |
287 * | |
288 * | Set device online |
289 * | using NDI or MDI. |
290 * +-----------------------+
291 *
292 * ............................................................................
293 *
294 * STEP 5: The watchdog timer expires. The watch dog timer does much more that
295 * what is described here. We only show the target offline path.
296 *
297 *
298 * +--------------------------+
299 * | fcp_watch |
300 * +--------------------------+
301 * |
302 * |
303 * v
304 * +--------------------------+
305 * | fcp_scan_offline_tgts |
306 * +--------------------------+
307 * |
308 * |
309 * v
310 * +--------------------------+
311 * | fcp_offline_target_now |
312 * +--------------------------+
313 * |
314 * |
315 * v
316 * +--------------------------+
317 * | fcp_offline_tgt_luns |
318 * +--------------------------+
319 * |
320 * |
321 * v
322 * +--------------------------+
323 * | fcp_offline_lun |
324 * +--------------------------+
325 * |
326 * |
327 * v
328 * +----------------------------------+
329 * | fcp_offline_lun_now |
330 * | |
331 * | A request (or two if mpxio) is |
332 * | sent to the hot plug task using |
333 * | a fcp_hp_elem structure. |
334 * +----------------------------------+
335 */
336
337 /*
338 * Functions registered with DDI framework
339 */
340 static int fcp_attach(dev_info_t *devi, ddi_attach_cmd_t cmd);
341 static int fcp_detach(dev_info_t *devi, ddi_detach_cmd_t cmd);
342 static int fcp_open(dev_t *devp, int flag, int otype, cred_t *credp);
343 static int fcp_close(dev_t dev, int flag, int otype, cred_t *credp);
344 static int fcp_ioctl(dev_t dev, int cmd, intptr_t data, int mode,
345 cred_t *credp, int *rval);
346
347 /*
348 * Functions registered with FC Transport framework
349 */
350 static int fcp_port_attach(opaque_t ulph, fc_ulp_port_info_t *pinfo,
351 fc_attach_cmd_t cmd, uint32_t s_id);
352 static int fcp_port_detach(opaque_t ulph, fc_ulp_port_info_t *info,
353 fc_detach_cmd_t cmd);
354 static int fcp_port_ioctl(opaque_t ulph, opaque_t port_handle, dev_t dev,
355 int cmd, intptr_t data, int mode, cred_t *credp, int *rval,
356 uint32_t claimed);
357 static int fcp_els_callback(opaque_t ulph, opaque_t port_handle,
358 fc_unsol_buf_t *buf, uint32_t claimed);
359 static int fcp_data_callback(opaque_t ulph, opaque_t port_handle,
360 fc_unsol_buf_t *buf, uint32_t claimed);
361 static void fcp_statec_callback(opaque_t ulph, opaque_t port_handle,
362 uint32_t port_state, uint32_t port_top, fc_portmap_t *devlist,
363 uint32_t dev_cnt, uint32_t port_sid);
364
365 /*
366 * Functions registered with SCSA framework
367 */
368 static int fcp_phys_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
369 scsi_hba_tran_t *hba_tran, struct scsi_device *sd);
370 static int fcp_scsi_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
371 scsi_hba_tran_t *hba_tran, struct scsi_device *sd);
372 static void fcp_scsi_tgt_free(dev_info_t *hba_dip, dev_info_t *tgt_dip,
373 scsi_hba_tran_t *hba_tran, struct scsi_device *sd);
374 static int fcp_scsi_start(struct scsi_address *ap, struct scsi_pkt *pkt);
375 static int fcp_scsi_abort(struct scsi_address *ap, struct scsi_pkt *pkt);
376 static int fcp_scsi_reset(struct scsi_address *ap, int level);
377 static int fcp_scsi_getcap(struct scsi_address *ap, char *cap, int whom);
378 static int fcp_scsi_setcap(struct scsi_address *ap, char *cap, int value,
379 int whom);
380 static void fcp_pkt_teardown(struct scsi_pkt *pkt);
381 static int fcp_scsi_reset_notify(struct scsi_address *ap, int flag,
382 void (*callback)(caddr_t), caddr_t arg);
383 static int fcp_scsi_bus_get_eventcookie(dev_info_t *dip, dev_info_t *rdip,
384 char *name, ddi_eventcookie_t *event_cookiep);
385 static int fcp_scsi_bus_add_eventcall(dev_info_t *dip, dev_info_t *rdip,
386 ddi_eventcookie_t eventid, void (*callback)(), void *arg,
387 ddi_callback_id_t *cb_id);
388 static int fcp_scsi_bus_remove_eventcall(dev_info_t *devi,
389 ddi_callback_id_t cb_id);
390 static int fcp_scsi_bus_post_event(dev_info_t *dip, dev_info_t *rdip,
391 ddi_eventcookie_t eventid, void *impldata);
392 static int fcp_scsi_bus_config(dev_info_t *parent, uint_t flag,
393 ddi_bus_config_op_t op, void *arg, dev_info_t **childp);
394 static int fcp_scsi_bus_unconfig(dev_info_t *parent, uint_t flag,
395 ddi_bus_config_op_t op, void *arg);
396
397 /*
398 * Internal functions
399 */
400 static int fcp_setup_device_data_ioctl(int cmd, struct fcp_ioctl *data,
401 int mode, int *rval);
402
403 static int fcp_setup_scsi_ioctl(struct fcp_scsi_cmd *u_fscsi,
404 int mode, int *rval);
405 static int fcp_copyin_scsi_cmd(caddr_t base_addr,
406 struct fcp_scsi_cmd *fscsi, int mode);
407 static int fcp_copyout_scsi_cmd(struct fcp_scsi_cmd *fscsi,
408 caddr_t base_addr, int mode);
409 static int fcp_send_scsi_ioctl(struct fcp_scsi_cmd *fscsi);
410
411 static struct fcp_tgt *fcp_port_create_tgt(struct fcp_port *pptr,
412 la_wwn_t *pwwn, int *ret_val, int *fc_status, int *fc_pkt_state,
413 int *fc_pkt_reason, int *fc_pkt_action);
414 static int fcp_tgt_send_plogi(struct fcp_tgt *ptgt, int *fc_status,
415 int *fc_pkt_state, int *fc_pkt_reason, int *fc_pkt_action);
416 static int fcp_tgt_send_prli(struct fcp_tgt *ptgt, int *fc_status,
417 int *fc_pkt_state, int *fc_pkt_reason, int *fc_pkt_action);
418 static void fcp_ipkt_sema_init(struct fcp_ipkt *icmd);
419 static int fcp_ipkt_sema_wait(struct fcp_ipkt *icmd);
420 static void fcp_ipkt_sema_callback(struct fc_packet *fpkt);
421 static void fcp_ipkt_sema_cleanup(struct fcp_ipkt *icmd);
422
423 static void fcp_handle_devices(struct fcp_port *pptr,
424 fc_portmap_t devlist[], uint32_t dev_cnt, int link_cnt,
425 fcp_map_tag_t *map_tag, int cause);
426 static int fcp_handle_mapflags(struct fcp_port *pptr,
427 struct fcp_tgt *ptgt, fc_portmap_t *map_entry, int link_cnt,
428 int tgt_cnt, int cause);
429 static int fcp_handle_reportlun_changed(struct fcp_tgt *ptgt, int cause);
430 static int fcp_send_els(struct fcp_port *pptr, struct fcp_tgt *ptgt,
431 struct fcp_ipkt *icmd, uchar_t opcode, int lcount, int tcount, int cause);
432 static void fcp_update_state(struct fcp_port *pptr, uint32_t state,
433 int cause);
434 static void fcp_update_tgt_state(struct fcp_tgt *ptgt, int flag,
435 uint32_t state);
436 static struct fcp_port *fcp_get_port(opaque_t port_handle);
437 static void fcp_unsol_callback(fc_packet_t *fpkt);
438 static void fcp_unsol_resp_init(fc_packet_t *pkt, fc_unsol_buf_t *buf,
439 uchar_t r_ctl, uchar_t type);
440 static int fcp_unsol_prli(struct fcp_port *pptr, fc_unsol_buf_t *buf);
441 static struct fcp_ipkt *fcp_icmd_alloc(struct fcp_port *pptr,
442 struct fcp_tgt *ptgt, int cmd_len, int resp_len, int data_len,
443 int nodma, int lcount, int tcount, int cause, uint32_t rscn_count);
444 static void fcp_icmd_free(struct fcp_port *pptr, struct fcp_ipkt *icmd);
445 static int fcp_alloc_dma(struct fcp_port *pptr, struct fcp_ipkt *icmd,
446 int nodma, int flags);
447 static void fcp_free_dma(struct fcp_port *pptr, struct fcp_ipkt *icmd);
448 static struct fcp_tgt *fcp_lookup_target(struct fcp_port *pptr,
449 uchar_t *wwn);
450 static struct fcp_tgt *fcp_get_target_by_did(struct fcp_port *pptr,
451 uint32_t d_id);
452 static void fcp_icmd_callback(fc_packet_t *fpkt);
453 static int fcp_send_scsi(struct fcp_lun *plun, uchar_t opcode,
454 int len, int lcount, int tcount, int cause, uint32_t rscn_count);
455 static int fcp_check_reportlun(struct fcp_rsp *rsp, fc_packet_t *fpkt);
456 static void fcp_scsi_callback(fc_packet_t *fpkt);
457 static void fcp_retry_scsi_cmd(fc_packet_t *fpkt);
458 static void fcp_handle_inquiry(fc_packet_t *fpkt, struct fcp_ipkt *icmd);
459 static void fcp_handle_reportlun(fc_packet_t *fpkt, struct fcp_ipkt *icmd);
460 static struct fcp_lun *fcp_get_lun(struct fcp_tgt *ptgt,
461 uint16_t lun_num);
462 static int fcp_finish_tgt(struct fcp_port *pptr, struct fcp_tgt *ptgt,
463 int link_cnt, int tgt_cnt, int cause);
464 static void fcp_finish_init(struct fcp_port *pptr);
465 static void fcp_create_luns(struct fcp_tgt *ptgt, int link_cnt,
466 int tgt_cnt, int cause);
467 static int fcp_trigger_lun(struct fcp_lun *plun, child_info_t *cip,
468 int old_mpxio, int online, int link_cnt, int tgt_cnt, int flags);
469 static int fcp_offline_target(struct fcp_port *pptr, struct fcp_tgt *ptgt,
470 int link_cnt, int tgt_cnt, int nowait, int flags);
471 static void fcp_offline_target_now(struct fcp_port *pptr,
472 struct fcp_tgt *ptgt, int link_cnt, int tgt_cnt, int flags);
473 static void fcp_offline_tgt_luns(struct fcp_tgt *ptgt, int link_cnt,
474 int tgt_cnt, int flags);
475 static void fcp_offline_lun(struct fcp_lun *plun, int link_cnt, int tgt_cnt,
476 int nowait, int flags);
477 static void fcp_prepare_offline_lun(struct fcp_lun *plun, int link_cnt,
478 int tgt_cnt);
479 static void fcp_offline_lun_now(struct fcp_lun *plun, int link_cnt,
480 int tgt_cnt, int flags);
481 static void fcp_scan_offline_luns(struct fcp_port *pptr);
482 static void fcp_scan_offline_tgts(struct fcp_port *pptr);
483 static void fcp_update_offline_flags(struct fcp_lun *plun);
484 static struct fcp_pkt *fcp_scan_commands(struct fcp_lun *plun);
485 static void fcp_abort_commands(struct fcp_pkt *head, struct
486 fcp_port *pptr);
487 static void fcp_cmd_callback(fc_packet_t *fpkt);
488 static void fcp_complete_pkt(fc_packet_t *fpkt);
489 static int fcp_validate_fcp_response(struct fcp_rsp *rsp,
490 struct fcp_port *pptr);
491 static int fcp_device_changed(struct fcp_port *pptr, struct fcp_tgt *ptgt,
492 fc_portmap_t *map_entry, int link_cnt, int tgt_cnt, int cause);
493 static struct fcp_lun *fcp_alloc_lun(struct fcp_tgt *ptgt);
494 static void fcp_dealloc_lun(struct fcp_lun *plun);
495 static struct fcp_tgt *fcp_alloc_tgt(struct fcp_port *pptr,
496 fc_portmap_t *map_entry, int link_cnt);
497 static void fcp_dealloc_tgt(struct fcp_tgt *ptgt);
498 static void fcp_queue_ipkt(struct fcp_port *pptr, fc_packet_t *fpkt);
499 static int fcp_transport(opaque_t port_handle, fc_packet_t *fpkt,
500 int internal);
501 static void fcp_log(int level, dev_info_t *dip, const char *fmt, ...);
502 static int fcp_handle_port_attach(opaque_t ulph, fc_ulp_port_info_t *pinfo,
503 uint32_t s_id, int instance);
504 static int fcp_handle_port_detach(struct fcp_port *pptr, int flag,
505 int instance);
506 static void fcp_cleanup_port(struct fcp_port *pptr, int instance);
507 static int fcp_kmem_cache_constructor(struct scsi_pkt *, scsi_hba_tran_t *,
508 int);
509 static void fcp_kmem_cache_destructor(struct scsi_pkt *, scsi_hba_tran_t *);
510 static int fcp_pkt_setup(struct scsi_pkt *, int (*)(), caddr_t);
511 static int fcp_alloc_cmd_resp(struct fcp_port *pptr, fc_packet_t *fpkt,
512 int flags);
513 static void fcp_free_cmd_resp(struct fcp_port *pptr, fc_packet_t *fpkt);
514 static int fcp_reset_target(struct scsi_address *ap, int level);
515 static int fcp_commoncap(struct scsi_address *ap, char *cap,
516 int val, int tgtonly, int doset);
517 static int fcp_scsi_get_name(struct scsi_device *sd, char *name, int len);
518 static int fcp_scsi_get_bus_addr(struct scsi_device *sd, char *name, int len);
519 static int fcp_linkreset(struct fcp_port *pptr, struct scsi_address *ap,
520 int sleep);
521 static int fcp_handle_port_resume(opaque_t ulph, fc_ulp_port_info_t *pinfo,
522 uint32_t s_id, fc_attach_cmd_t cmd, int instance);
523 static void fcp_cp_pinfo(struct fcp_port *pptr, fc_ulp_port_info_t *pinfo);
524 static void fcp_process_elem(struct fcp_hp_elem *elem, int result);
525 static child_info_t *fcp_get_cip(struct fcp_lun *plun, child_info_t *cip,
526 int lcount, int tcount);
527 static int fcp_is_dip_present(struct fcp_lun *plun, dev_info_t *cdip);
528 static int fcp_is_child_present(struct fcp_lun *plun, child_info_t *cip);
529 static dev_info_t *fcp_create_dip(struct fcp_lun *plun, int link_cnt,
530 int tgt_cnt);
531 static dev_info_t *fcp_find_existing_dip(struct fcp_lun *plun,
532 dev_info_t *pdip, caddr_t name);
533 static int fcp_online_child(struct fcp_lun *plun, child_info_t *cip,
534 int lcount, int tcount, int flags, int *circ);
535 static int fcp_offline_child(struct fcp_lun *plun, child_info_t *cip,
536 int lcount, int tcount, int flags, int *circ);
537 static void fcp_remove_child(struct fcp_lun *plun);
538 static void fcp_watch(void *arg);
539 static void fcp_check_reset_delay(struct fcp_port *pptr);
540 static void fcp_abort_all(struct fcp_port *pptr, struct fcp_tgt *ttgt,
541 struct fcp_lun *rlun, int tgt_cnt);
542 struct fcp_port *fcp_soft_state_unlink(struct fcp_port *pptr);
543 static struct fcp_lun *fcp_lookup_lun(struct fcp_port *pptr,
544 uchar_t *wwn, uint16_t lun);
545 static void fcp_prepare_pkt(struct fcp_port *pptr, struct fcp_pkt *cmd,
546 struct fcp_lun *plun);
547 static void fcp_post_callback(struct fcp_pkt *cmd);
548 static int fcp_dopoll(struct fcp_port *pptr, struct fcp_pkt *cmd);
549 static struct fcp_port *fcp_dip2port(dev_info_t *dip);
550 struct fcp_lun *fcp_get_lun_from_cip(struct fcp_port *pptr,
551 child_info_t *cip);
552 static int fcp_pass_to_hp_and_wait(struct fcp_port *pptr,
553 struct fcp_lun *plun, child_info_t *cip, int what, int link_cnt,
554 int tgt_cnt, int flags);
555 static struct fcp_hp_elem *fcp_pass_to_hp(struct fcp_port *pptr,
556 struct fcp_lun *plun, child_info_t *cip, int what, int link_cnt,
557 int tgt_cnt, int flags, int wait);
558 static void fcp_retransport_cmd(struct fcp_port *pptr,
559 struct fcp_pkt *cmd);
560 static void fcp_fail_cmd(struct fcp_pkt *cmd, uchar_t reason,
561 uint_t statistics);
562 static void fcp_queue_pkt(struct fcp_port *pptr, struct fcp_pkt *cmd);
563 static void fcp_update_targets(struct fcp_port *pptr,
564 fc_portmap_t *dev_list, uint32_t count, uint32_t state, int cause);
565 static int fcp_call_finish_init(struct fcp_port *pptr,
566 struct fcp_tgt *ptgt, int lcount, int tcount, int cause);
567 static int fcp_call_finish_init_held(struct fcp_port *pptr,
568 struct fcp_tgt *ptgt, int lcount, int tcount, int cause);
569 static void fcp_reconfigure_luns(void * tgt_handle);
570 static void fcp_free_targets(struct fcp_port *pptr);
571 static void fcp_free_target(struct fcp_tgt *ptgt);
572 static int fcp_is_retryable(struct fcp_ipkt *icmd);
573 static int fcp_create_on_demand(struct fcp_port *pptr, uchar_t *pwwn);
574 static void fcp_ascii_to_wwn(caddr_t string, uchar_t bytes[], unsigned int);
575 static void fcp_wwn_to_ascii(uchar_t bytes[], char *string);
576 static void fcp_print_error(fc_packet_t *fpkt);
577 static int fcp_handle_ipkt_errors(struct fcp_port *pptr,
578 struct fcp_tgt *ptgt, struct fcp_ipkt *icmd, int rval, caddr_t op);
579 static int fcp_outstanding_lun_cmds(struct fcp_tgt *ptgt);
580 static fc_portmap_t *fcp_construct_map(struct fcp_port *pptr,
581 uint32_t *dev_cnt);
582 static void fcp_offline_all(struct fcp_port *pptr, int lcount, int cause);
583 static int fcp_get_statec_count(struct fcp_ioctl *data, int mode, int *rval);
584 static int fcp_copyin_fcp_ioctl_data(struct fcp_ioctl *, int, int *,
585 struct fcp_ioctl *, struct fcp_port **);
586 static char *fcp_get_lun_path(struct fcp_lun *plun);
587 static int fcp_get_target_mappings(struct fcp_ioctl *data, int mode,
588 int *rval);
589 static int fcp_do_ns_registry(struct fcp_port *pptr, uint32_t s_id);
590 static void fcp_retry_ns_registry(struct fcp_port *pptr, uint32_t s_id);
591 static char *fcp_get_lun_path(struct fcp_lun *plun);
592 static int fcp_get_target_mappings(struct fcp_ioctl *data, int mode,
593 int *rval);
594 static void fcp_reconfig_wait(struct fcp_port *pptr);
595
596 /*
597 * New functions added for mpxio support
598 */
599 static int fcp_virt_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
600 scsi_hba_tran_t *hba_tran, struct scsi_device *sd);
601 static mdi_pathinfo_t *fcp_create_pip(struct fcp_lun *plun, int lcount,
602 int tcount);
603 static mdi_pathinfo_t *fcp_find_existing_pip(struct fcp_lun *plun,
604 dev_info_t *pdip);
605 static int fcp_is_pip_present(struct fcp_lun *plun, mdi_pathinfo_t *pip);
606 static void fcp_handle_page83(fc_packet_t *, struct fcp_ipkt *, int);
607 static void fcp_update_mpxio_path_verifybusy(struct fcp_port *pptr);
608 static int fcp_copy_guid_2_lun_block(struct fcp_lun *plun, char *guidp);
609 static int fcp_update_mpxio_path(struct fcp_lun *plun, child_info_t *cip,
610 int what);
611 static int fcp_is_reconfig_needed(struct fcp_tgt *ptgt,
612 fc_packet_t *fpkt);
613 static int fcp_symmetric_device_probe(struct fcp_lun *plun);
614
615 /*
616 * New functions added for lun masking support
617 */
618 static void fcp_read_blacklist(dev_info_t *dip,
619 struct fcp_black_list_entry **pplun_blacklist);
620 static void fcp_mask_pwwn_lun(char *curr_pwwn, char *curr_lun,
621 struct fcp_black_list_entry **pplun_blacklist);
622 static void fcp_add_one_mask(char *curr_pwwn, uint32_t lun_id,
623 struct fcp_black_list_entry **pplun_blacklist);
624 static int fcp_should_mask(la_wwn_t *wwn, uint32_t lun_id);
625 static void fcp_cleanup_blacklist(struct fcp_black_list_entry **lun_blacklist);
626
627 /*
628 * New functions to support software FCA (like fcoei)
629 */
630 static struct scsi_pkt *fcp_pseudo_init_pkt(
631 struct scsi_address *ap, struct scsi_pkt *pkt,
632 struct buf *bp, int cmdlen, int statuslen,
633 int tgtlen, int flags, int (*callback)(), caddr_t arg);
634 static void fcp_pseudo_destroy_pkt(
635 struct scsi_address *ap, struct scsi_pkt *pkt);
636 static void fcp_pseudo_sync_pkt(
637 struct scsi_address *ap, struct scsi_pkt *pkt);
638 static int fcp_pseudo_start(struct scsi_address *ap, struct scsi_pkt *pkt);
639 static void fcp_pseudo_dmafree(
640 struct scsi_address *ap, struct scsi_pkt *pkt);
641
642 extern struct mod_ops mod_driverops;
643 /*
644 * This variable is defined in modctl.c and set to '1' after the root driver
645 * and fs are loaded. It serves as an indication that the root filesystem can
646 * be used.
647 */
648 extern int modrootloaded;
649 /*
650 * This table contains strings associated with the SCSI sense key codes. It
651 * is used by FCP to print a clear explanation of the code returned in the
652 * sense information by a device.
653 */
654 extern char *sense_keys[];
655 /*
656 * This device is created by the SCSI pseudo nexus driver (SCSI vHCI). It is
657 * under this device that the paths to a physical device are created when
658 * MPxIO is used.
659 */
660 extern dev_info_t *scsi_vhci_dip;
661
662 /*
663 * Report lun processing
664 */
665 #define FCP_LUN_ADDRESSING 0x80
666 #define FCP_PD_ADDRESSING 0x00
667 #define FCP_VOLUME_ADDRESSING 0x40
668
669 #define FCP_SVE_THROTTLE 0x28 /* Vicom */
670 #define MAX_INT_DMA 0x7fffffff
671 /*
672 * Property definitions
673 */
674 #define NODE_WWN_PROP (char *)fcp_node_wwn_prop
675 #define PORT_WWN_PROP (char *)fcp_port_wwn_prop
676 #define TARGET_PROP (char *)fcp_target_prop
677 #define LUN_PROP (char *)fcp_lun_prop
678 #define SAM_LUN_PROP (char *)fcp_sam_lun_prop
679 #define CONF_WWN_PROP (char *)fcp_conf_wwn_prop
680 #define OBP_BOOT_WWN (char *)fcp_obp_boot_wwn
681 #define MANUAL_CFG_ONLY (char *)fcp_manual_config_only
682 #define INIT_PORT_PROP (char *)fcp_init_port_prop
683 #define TGT_PORT_PROP (char *)fcp_tgt_port_prop
684 #define LUN_BLACKLIST_PROP (char *)fcp_lun_blacklist_prop
685 /*
686 * Short hand macros.
687 */
688 #define LUN_PORT (plun->lun_tgt->tgt_port)
689 #define LUN_TGT (plun->lun_tgt)
690
691 /*
692 * Driver private macros
693 */
694 #define FCP_ATOB(x) (((x) >= '0' && (x) <= '9') ? ((x) - '0') : \
695 ((x) >= 'a' && (x) <= 'f') ? \
696 ((x) - 'a' + 10) : ((x) - 'A' + 10))
697
698 #define FCP_MAX(a, b) ((a) > (b) ? (a) : (b))
699
700 #define FCP_N_NDI_EVENTS \
701 (sizeof (fcp_ndi_event_defs) / sizeof (ndi_event_definition_t))
702
703 #define FCP_LINK_STATE_CHANGED(p, c) \
704 ((p)->port_link_cnt != (c)->ipkt_link_cnt)
705
706 #define FCP_TGT_STATE_CHANGED(t, c) \
707 ((t)->tgt_change_cnt != (c)->ipkt_change_cnt)
708
709 #define FCP_STATE_CHANGED(p, t, c) \
710 (FCP_TGT_STATE_CHANGED(t, c))
711
712 #define FCP_MUST_RETRY(fpkt) \
713 ((fpkt)->pkt_state == FC_PKT_LOCAL_BSY || \
714 (fpkt)->pkt_state == FC_PKT_LOCAL_RJT || \
715 (fpkt)->pkt_state == FC_PKT_TRAN_BSY || \
716 (fpkt)->pkt_state == FC_PKT_ELS_IN_PROGRESS || \
717 (fpkt)->pkt_state == FC_PKT_NPORT_BSY || \
718 (fpkt)->pkt_state == FC_PKT_FABRIC_BSY || \
719 (fpkt)->pkt_state == FC_PKT_PORT_OFFLINE || \
720 (fpkt)->pkt_reason == FC_REASON_OFFLINE)
721
722 #define FCP_SENSE_REPORTLUN_CHANGED(es) \
723 ((es)->es_key == KEY_UNIT_ATTENTION && \
724 (es)->es_add_code == 0x3f && \
725 (es)->es_qual_code == 0x0e)
726
727 #define FCP_SENSE_NO_LUN(es) \
728 ((es)->es_key == KEY_ILLEGAL_REQUEST && \
729 (es)->es_add_code == 0x25 && \
730 (es)->es_qual_code == 0x0)
731
732 #define FCP_VERSION "20091208-1.192"
733 #define FCP_NAME_VERSION "SunFC FCP v" FCP_VERSION
734
735 #define FCP_NUM_ELEMENTS(array) \
736 (sizeof (array) / sizeof ((array)[0]))
737
738 /*
739 * Debugging, Error reporting, and tracing
740 */
741 #define FCP_LOG_SIZE 1024 * 1024
742
743 #define FCP_LEVEL_1 0x00001 /* attach/detach PM CPR */
744 #define FCP_LEVEL_2 0x00002 /* failures/Invalid data */
745 #define FCP_LEVEL_3 0x00004 /* state change, discovery */
746 #define FCP_LEVEL_4 0x00008 /* ULP messages */
747 #define FCP_LEVEL_5 0x00010 /* ELS/SCSI cmds */
748 #define FCP_LEVEL_6 0x00020 /* Transport failures */
749 #define FCP_LEVEL_7 0x00040
750 #define FCP_LEVEL_8 0x00080 /* I/O tracing */
751 #define FCP_LEVEL_9 0x00100 /* I/O tracing */
752
753
754
755 /*
756 * Log contents to system messages file
757 */
758 #define FCP_MSG_LEVEL_1 (FCP_LEVEL_1 | FC_TRACE_LOG_MSG)
759 #define FCP_MSG_LEVEL_2 (FCP_LEVEL_2 | FC_TRACE_LOG_MSG)
760 #define FCP_MSG_LEVEL_3 (FCP_LEVEL_3 | FC_TRACE_LOG_MSG)
761 #define FCP_MSG_LEVEL_4 (FCP_LEVEL_4 | FC_TRACE_LOG_MSG)
762 #define FCP_MSG_LEVEL_5 (FCP_LEVEL_5 | FC_TRACE_LOG_MSG)
763 #define FCP_MSG_LEVEL_6 (FCP_LEVEL_6 | FC_TRACE_LOG_MSG)
764 #define FCP_MSG_LEVEL_7 (FCP_LEVEL_7 | FC_TRACE_LOG_MSG)
765 #define FCP_MSG_LEVEL_8 (FCP_LEVEL_8 | FC_TRACE_LOG_MSG)
766 #define FCP_MSG_LEVEL_9 (FCP_LEVEL_9 | FC_TRACE_LOG_MSG)
767
768
769 /*
770 * Log contents to trace buffer
771 */
772 #define FCP_BUF_LEVEL_1 (FCP_LEVEL_1 | FC_TRACE_LOG_BUF)
773 #define FCP_BUF_LEVEL_2 (FCP_LEVEL_2 | FC_TRACE_LOG_BUF)
774 #define FCP_BUF_LEVEL_3 (FCP_LEVEL_3 | FC_TRACE_LOG_BUF)
775 #define FCP_BUF_LEVEL_4 (FCP_LEVEL_4 | FC_TRACE_LOG_BUF)
776 #define FCP_BUF_LEVEL_5 (FCP_LEVEL_5 | FC_TRACE_LOG_BUF)
777 #define FCP_BUF_LEVEL_6 (FCP_LEVEL_6 | FC_TRACE_LOG_BUF)
778 #define FCP_BUF_LEVEL_7 (FCP_LEVEL_7 | FC_TRACE_LOG_BUF)
779 #define FCP_BUF_LEVEL_8 (FCP_LEVEL_8 | FC_TRACE_LOG_BUF)
780 #define FCP_BUF_LEVEL_9 (FCP_LEVEL_9 | FC_TRACE_LOG_BUF)
781
782
783 /*
784 * Log contents to both system messages file and trace buffer
785 */
786 #define FCP_MSG_BUF_LEVEL_1 (FCP_LEVEL_1 | FC_TRACE_LOG_BUF | \
787 FC_TRACE_LOG_MSG)
788 #define FCP_MSG_BUF_LEVEL_2 (FCP_LEVEL_2 | FC_TRACE_LOG_BUF | \
789 FC_TRACE_LOG_MSG)
790 #define FCP_MSG_BUF_LEVEL_3 (FCP_LEVEL_3 | FC_TRACE_LOG_BUF | \
791 FC_TRACE_LOG_MSG)
792 #define FCP_MSG_BUF_LEVEL_4 (FCP_LEVEL_4 | FC_TRACE_LOG_BUF | \
793 FC_TRACE_LOG_MSG)
794 #define FCP_MSG_BUF_LEVEL_5 (FCP_LEVEL_5 | FC_TRACE_LOG_BUF | \
795 FC_TRACE_LOG_MSG)
796 #define FCP_MSG_BUF_LEVEL_6 (FCP_LEVEL_6 | FC_TRACE_LOG_BUF | \
797 FC_TRACE_LOG_MSG)
798 #define FCP_MSG_BUF_LEVEL_7 (FCP_LEVEL_7 | FC_TRACE_LOG_BUF | \
799 FC_TRACE_LOG_MSG)
800 #define FCP_MSG_BUF_LEVEL_8 (FCP_LEVEL_8 | FC_TRACE_LOG_BUF | \
801 FC_TRACE_LOG_MSG)
802 #define FCP_MSG_BUF_LEVEL_9 (FCP_LEVEL_9 | FC_TRACE_LOG_BUF | \
803 FC_TRACE_LOG_MSG)
804 #ifdef DEBUG
805 #define FCP_DTRACE fc_trace_debug
806 #else
807 #define FCP_DTRACE
808 #endif
809
810 #define FCP_TRACE fc_trace_debug
811
812 static struct cb_ops fcp_cb_ops = {
813 fcp_open, /* open */
814 fcp_close, /* close */
815 nodev, /* strategy */
816 nodev, /* print */
817 nodev, /* dump */
818 nodev, /* read */
819 nodev, /* write */
820 fcp_ioctl, /* ioctl */
821 nodev, /* devmap */
822 nodev, /* mmap */
823 nodev, /* segmap */
824 nochpoll, /* chpoll */
825 ddi_prop_op, /* cb_prop_op */
826 0, /* streamtab */
827 D_NEW | D_MP | D_HOTPLUG, /* cb_flag */
828 CB_REV, /* rev */
829 nodev, /* aread */
830 nodev /* awrite */
831 };
832
833
834 static struct dev_ops fcp_ops = {
835 DEVO_REV,
836 0,
837 ddi_getinfo_1to1,
838 nulldev, /* identify */
839 nulldev, /* probe */
840 fcp_attach, /* attach and detach are mandatory */
841 fcp_detach,
842 nodev, /* reset */
843 &fcp_cb_ops, /* cb_ops */
844 NULL, /* bus_ops */
845 NULL, /* power */
846 };
847
848
849 char *fcp_version = FCP_NAME_VERSION;
850
851 static struct modldrv modldrv = {
852 &mod_driverops,
853 FCP_NAME_VERSION,
854 &fcp_ops
855 };
856
857
858 static struct modlinkage modlinkage = {
859 MODREV_1,
860 &modldrv,
861 NULL
862 };
863
864
865 static fc_ulp_modinfo_t fcp_modinfo = {
866 &fcp_modinfo, /* ulp_handle */
867 FCTL_ULP_MODREV_4, /* ulp_rev */
868 FC4_SCSI_FCP, /* ulp_type */
869 "fcp", /* ulp_name */
870 FCP_STATEC_MASK, /* ulp_statec_mask */
871 fcp_port_attach, /* ulp_port_attach */
872 fcp_port_detach, /* ulp_port_detach */
873 fcp_port_ioctl, /* ulp_port_ioctl */
874 fcp_els_callback, /* ulp_els_callback */
875 fcp_data_callback, /* ulp_data_callback */
876 fcp_statec_callback /* ulp_statec_callback */
877 };
878
879 #ifdef DEBUG
880 #define FCP_TRACE_DEFAULT (FC_TRACE_LOG_MASK | FCP_LEVEL_1 | \
881 FCP_LEVEL_2 | FCP_LEVEL_3 | \
882 FCP_LEVEL_4 | FCP_LEVEL_5 | \
883 FCP_LEVEL_6 | FCP_LEVEL_7)
884 #else
885 #define FCP_TRACE_DEFAULT (FC_TRACE_LOG_MASK | FCP_LEVEL_1 | \
886 FCP_LEVEL_2 | FCP_LEVEL_3 | \
887 FCP_LEVEL_4 | FCP_LEVEL_5 | \
888 FCP_LEVEL_6 | FCP_LEVEL_7)
889 #endif
890
891 /* FCP global variables */
892 int fcp_bus_config_debug = 0;
893 static int fcp_log_size = FCP_LOG_SIZE;
894 static int fcp_trace = FCP_TRACE_DEFAULT;
895 static fc_trace_logq_t *fcp_logq = NULL;
896 static struct fcp_black_list_entry *fcp_lun_blacklist = NULL;
897 /*
898 * The auto-configuration is set by default. The only way of disabling it is
899 * through the property MANUAL_CFG_ONLY in the fcp.conf file.
900 */
901 static int fcp_enable_auto_configuration = 1;
902 static int fcp_max_bus_config_retries = 4;
903 static int fcp_lun_ready_retry = 300;
904 /*
905 * The value assigned to the following variable has changed several times due
906 * to a problem with the data underruns reporting of some firmware(s). The
907 * current value of 50 gives a timeout value of 25 seconds for a max number
908 * of 256 LUNs.
909 */
910 static int fcp_max_target_retries = 50;
911 /*
912 * Watchdog variables
913 * ------------------
914 *
915 * fcp_watchdog_init
916 *
917 * Indicates if the watchdog timer is running or not. This is actually
918 * a counter of the number of Fibre Channel ports that attached. When
919 * the first port attaches the watchdog is started. When the last port
920 * detaches the watchdog timer is stopped.
921 *
922 * fcp_watchdog_time
923 *
924 * This is the watchdog clock counter. It is incremented by
925 * fcp_watchdog_time each time the watchdog timer expires.
926 *
927 * fcp_watchdog_timeout
928 *
929 * Increment value of the variable fcp_watchdog_time as well as the
930 * the timeout value of the watchdog timer. The unit is 1 second. It
931 * is strange that this is not a #define but a variable since the code
932 * never changes this value. The reason why it can be said that the
933 * unit is 1 second is because the number of ticks for the watchdog
934 * timer is determined like this:
935 *
936 * fcp_watchdog_tick = fcp_watchdog_timeout *
937 * drv_usectohz(1000000);
938 *
939 * The value 1000000 is hard coded in the code.
940 *
941 * fcp_watchdog_tick
942 *
943 * Watchdog timer value in ticks.
944 */
945 static int fcp_watchdog_init = 0;
946 static int fcp_watchdog_time = 0;
947 static int fcp_watchdog_timeout = 1;
948 static int fcp_watchdog_tick;
949
950 /*
951 * fcp_offline_delay is a global variable to enable customisation of
952 * the timeout on link offlines or RSCNs. The default value is set
953 * to match FCP_OFFLINE_DELAY (20sec), which is 2*RA_TOV_els as
954 * specified in FCP4 Chapter 11 (see www.t10.org).
955 *
956 * The variable fcp_offline_delay is specified in SECONDS.
957 *
958 * If we made this a static var then the user would not be able to
959 * change it. This variable is set in fcp_attach().
960 */
961 unsigned int fcp_offline_delay = FCP_OFFLINE_DELAY;
962
963 static void *fcp_softstate = NULL; /* for soft state */
964 static uchar_t fcp_oflag = FCP_IDLE; /* open flag */
965 static kmutex_t fcp_global_mutex;
966 static kmutex_t fcp_ioctl_mutex;
967 static dev_info_t *fcp_global_dip = NULL;
968 static timeout_id_t fcp_watchdog_id;
969 const char *fcp_lun_prop = "lun";
970 const char *fcp_sam_lun_prop = "sam-lun";
971 const char *fcp_target_prop = "target";
972 /*
973 * NOTE: consumers of "node-wwn" property include stmsboot in ON
974 * consolidation.
975 */
976 const char *fcp_node_wwn_prop = "node-wwn";
977 const char *fcp_port_wwn_prop = "port-wwn";
978 const char *fcp_conf_wwn_prop = "fc-port-wwn";
979 const char *fcp_obp_boot_wwn = "fc-boot-dev-portwwn";
980 const char *fcp_manual_config_only = "manual_configuration_only";
981 const char *fcp_init_port_prop = "initiator-port";
982 const char *fcp_tgt_port_prop = "target-port";
983 const char *fcp_lun_blacklist_prop = "pwwn-lun-blacklist";
984
985 static struct fcp_port *fcp_port_head = NULL;
986 static ddi_eventcookie_t fcp_insert_eid;
987 static ddi_eventcookie_t fcp_remove_eid;
988
989 static ndi_event_definition_t fcp_ndi_event_defs[] = {
990 { FCP_EVENT_TAG_INSERT, FCAL_INSERT_EVENT, EPL_KERNEL },
991 { FCP_EVENT_TAG_REMOVE, FCAL_REMOVE_EVENT, EPL_INTERRUPT }
992 };
993
994 /*
995 * List of valid commands for the scsi_ioctl call
996 */
997 static uint8_t scsi_ioctl_list[] = {
998 SCMD_INQUIRY,
999 SCMD_REPORT_LUN,
1000 SCMD_READ_CAPACITY
1001 };
1002
1003 /*
1004 * this is used to dummy up a report lun response for cases
1005 * where the target doesn't support it
1006 */
1007 static uchar_t fcp_dummy_lun[] = {
1008 0x00, /* MSB length (length = no of luns * 8) */
1009 0x00,
1010 0x00,
1011 0x08, /* LSB length */
1012 0x00, /* MSB reserved */
1013 0x00,
1014 0x00,
1015 0x00, /* LSB reserved */
1016 FCP_PD_ADDRESSING,
1017 0x00, /* LUN is ZERO at the first level */
1018 0x00,
1019 0x00, /* second level is zero */
1020 0x00,
1021 0x00, /* third level is zero */
1022 0x00,
1023 0x00 /* fourth level is zero */
1024 };
1025
1026 static uchar_t fcp_alpa_to_switch[] = {
1027 0x00, 0x7d, 0x7c, 0x00, 0x7b, 0x00, 0x00, 0x00, 0x7a, 0x00,
1028 0x00, 0x00, 0x00, 0x00, 0x00, 0x79, 0x78, 0x00, 0x00, 0x00,
1029 0x00, 0x00, 0x00, 0x77, 0x76, 0x00, 0x00, 0x75, 0x00, 0x74,
1030 0x73, 0x72, 0x00, 0x00, 0x00, 0x71, 0x00, 0x70, 0x6f, 0x6e,
1031 0x00, 0x6d, 0x6c, 0x6b, 0x6a, 0x69, 0x68, 0x00, 0x00, 0x67,
1032 0x66, 0x65, 0x64, 0x63, 0x62, 0x00, 0x00, 0x61, 0x60, 0x00,
1033 0x5f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x5e, 0x00, 0x5d,
1034 0x5c, 0x5b, 0x00, 0x5a, 0x59, 0x58, 0x57, 0x56, 0x55, 0x00,
1035 0x00, 0x54, 0x53, 0x52, 0x51, 0x50, 0x4f, 0x00, 0x00, 0x4e,
1036 0x4d, 0x00, 0x4c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x4b,
1037 0x00, 0x4a, 0x49, 0x48, 0x00, 0x47, 0x46, 0x45, 0x44, 0x43,
1038 0x42, 0x00, 0x00, 0x41, 0x40, 0x3f, 0x3e, 0x3d, 0x3c, 0x00,
1039 0x00, 0x3b, 0x3a, 0x00, 0x39, 0x00, 0x00, 0x00, 0x38, 0x37,
1040 0x36, 0x00, 0x35, 0x00, 0x00, 0x00, 0x34, 0x00, 0x00, 0x00,
1041 0x00, 0x00, 0x00, 0x33, 0x32, 0x00, 0x00, 0x00, 0x00, 0x00,
1042 0x00, 0x31, 0x30, 0x00, 0x00, 0x2f, 0x00, 0x2e, 0x2d, 0x2c,
1043 0x00, 0x00, 0x00, 0x2b, 0x00, 0x2a, 0x29, 0x28, 0x00, 0x27,
1044 0x26, 0x25, 0x24, 0x23, 0x22, 0x00, 0x00, 0x21, 0x20, 0x1f,
1045 0x1e, 0x1d, 0x1c, 0x00, 0x00, 0x1b, 0x1a, 0x00, 0x19, 0x00,
1046 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x00, 0x17, 0x16, 0x15,
1047 0x00, 0x14, 0x13, 0x12, 0x11, 0x10, 0x0f, 0x00, 0x00, 0x0e,
1048 0x0d, 0x0c, 0x0b, 0x0a, 0x09, 0x00, 0x00, 0x08, 0x07, 0x00,
1049 0x06, 0x00, 0x00, 0x00, 0x05, 0x04, 0x03, 0x00, 0x02, 0x00,
1050 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1051 };
1052
1053 static caddr_t pid = "SESS01 ";
1054
1055 /*
1056 * This table is used to determine whether or not it's safe to copy in
1057 * the target node name for a lun. Since all luns behind the same target
1058 * have the same wwnn, only tagets that do not support multiple luns are
1059 * eligible to be enumerated under mpxio if they aren't page83 compliant.
1060 */
1061
1062 char *fcp_symmetric_disk_table[] = {
1063 "SEAGATE ST",
1064 "IBM DDYFT",
1065 "SUNW SUNWGS", /* Daktari enclosure */
1066 "SUN SENA", /* SES device */
1067 "SUN SESS01" /* VICOM SVE box */
1068 };
1069
1070 int fcp_symmetric_disk_table_size =
1071 sizeof (fcp_symmetric_disk_table)/sizeof (char *);
1072
1073 /*
1074 * This structure is bogus. scsi_hba_attach_setup() requires, as in the kernel
1075 * will panic if you don't pass this in to the routine, this information.
1076 * Need to determine what the actual impact to the system is by providing
1077 * this information if any. Since dma allocation is done in pkt_init it may
1078 * not have any impact. These values are straight from the Writing Device
1079 * Driver manual.
1080 */
1081 static ddi_dma_attr_t pseudo_fca_dma_attr = {
1082 DMA_ATTR_V0, /* ddi_dma_attr version */
1083 0, /* low address */
1084 0xffffffff, /* high address */
1085 0x00ffffff, /* counter upper bound */
1086 1, /* alignment requirements */
1087 0x3f, /* burst sizes */
1088 1, /* minimum DMA access */
1089 0xffffffff, /* maximum DMA access */
1090 (1 << 24) - 1, /* segment boundary restrictions */
1091 1, /* scater/gather list length */
1092 512, /* device granularity */
1093 0 /* DMA flags */
1094 };
1095
1096 /*
1097 * The _init(9e) return value should be that of mod_install(9f). Under
1098 * some circumstances, a failure may not be related mod_install(9f) and
1099 * one would then require a return value to indicate the failure. Looking
1100 * at mod_install(9f), it is expected to return 0 for success and non-zero
1101 * for failure. mod_install(9f) for device drivers, further goes down the
1102 * calling chain and ends up in ddi_installdrv(), whose return values are
1103 * DDI_SUCCESS and DDI_FAILURE - There are also other functions in the
1104 * calling chain of mod_install(9f) which return values like EINVAL and
1105 * in some even return -1.
1106 *
1107 * To work around the vagaries of the mod_install() calling chain, return
1108 * either 0 or ENODEV depending on the success or failure of mod_install()
1109 */
1110 int
1111 _init(void)
1112 {
1113 int rval;
1114
1115 /*
1116 * Allocate soft state and prepare to do ddi_soft_state_zalloc()
1117 * before registering with the transport first.
1118 */
1119 if (ddi_soft_state_init(&fcp_softstate,
1120 sizeof (struct fcp_port), FCP_INIT_ITEMS) != 0) {
1121 return (EINVAL);
1122 }
1123
1124 mutex_init(&fcp_global_mutex, NULL, MUTEX_DRIVER, NULL);
1125 mutex_init(&fcp_ioctl_mutex, NULL, MUTEX_DRIVER, NULL);
1126
1127 if ((rval = fc_ulp_add(&fcp_modinfo)) != FC_SUCCESS) {
1128 cmn_err(CE_WARN, "fcp: fc_ulp_add failed");
1129 mutex_destroy(&fcp_global_mutex);
1130 mutex_destroy(&fcp_ioctl_mutex);
1131 ddi_soft_state_fini(&fcp_softstate);
1132 return (ENODEV);
1133 }
1134
1135 fcp_logq = fc_trace_alloc_logq(fcp_log_size);
1136
1137 if ((rval = mod_install(&modlinkage)) != 0) {
1138 fc_trace_free_logq(fcp_logq);
1139 (void) fc_ulp_remove(&fcp_modinfo);
1140 mutex_destroy(&fcp_global_mutex);
1141 mutex_destroy(&fcp_ioctl_mutex);
1142 ddi_soft_state_fini(&fcp_softstate);
1143 rval = ENODEV;
1144 }
1145
1146 return (rval);
1147 }
1148
1149
1150 /*
1151 * the system is done with us as a driver, so clean up
1152 */
1153 int
1154 _fini(void)
1155 {
1156 int rval;
1157
1158 /*
1159 * don't start cleaning up until we know that the module remove
1160 * has worked -- if this works, then we know that each instance
1161 * has successfully been DDI_DETACHed
1162 */
1163 if ((rval = mod_remove(&modlinkage)) != 0) {
1164 return (rval);
1165 }
1166
1167 (void) fc_ulp_remove(&fcp_modinfo);
1168
1169 ddi_soft_state_fini(&fcp_softstate);
1170 mutex_destroy(&fcp_global_mutex);
1171 mutex_destroy(&fcp_ioctl_mutex);
1172 fc_trace_free_logq(fcp_logq);
1173
1174 return (rval);
1175 }
1176
1177
1178 int
1179 _info(struct modinfo *modinfop)
1180 {
1181 return (mod_info(&modlinkage, modinfop));
1182 }
1183
1184
1185 /*
1186 * attach the module
1187 */
1188 static int
1189 fcp_attach(dev_info_t *devi, ddi_attach_cmd_t cmd)
1190 {
1191 int rval = DDI_SUCCESS;
1192
1193 FCP_DTRACE(fcp_logq, "fcp", fcp_trace,
1194 FCP_BUF_LEVEL_8, 0, "fcp module attach: cmd=0x%x", cmd);
1195
1196 if (cmd == DDI_ATTACH) {
1197 /* The FCP pseudo device is created here. */
1198 mutex_enter(&fcp_global_mutex);
1199 fcp_global_dip = devi;
1200 mutex_exit(&fcp_global_mutex);
1201
1202 if (ddi_create_minor_node(fcp_global_dip, "fcp", S_IFCHR,
1203 0, DDI_PSEUDO, 0) == DDI_SUCCESS) {
1204 ddi_report_dev(fcp_global_dip);
1205 } else {
1206 cmn_err(CE_WARN, "FCP: Cannot create minor node");
1207 mutex_enter(&fcp_global_mutex);
1208 fcp_global_dip = NULL;
1209 mutex_exit(&fcp_global_mutex);
1210
1211 rval = DDI_FAILURE;
1212 }
1213 /*
1214 * We check the fcp_offline_delay property at this
1215 * point. This variable is global for the driver,
1216 * not specific to an instance.
1217 *
1218 * We do not recommend setting the value to less
1219 * than 10 seconds (RA_TOV_els), or greater than
1220 * 60 seconds.
1221 */
1222 fcp_offline_delay = ddi_prop_get_int(DDI_DEV_T_ANY,
1223 devi, DDI_PROP_DONTPASS | DDI_PROP_NOTPROM,
1224 "fcp_offline_delay", FCP_OFFLINE_DELAY);
1225 if ((fcp_offline_delay < 10) ||
1226 (fcp_offline_delay > 60)) {
1227 cmn_err(CE_WARN, "Setting fcp_offline_delay "
1228 "to %d second(s). This is outside the "
1229 "recommended range of 10..60 seconds.",
1230 fcp_offline_delay);
1231 }
1232 }
1233
1234 return (rval);
1235 }
1236
1237
1238 /*ARGSUSED*/
1239 static int
1240 fcp_detach(dev_info_t *devi, ddi_detach_cmd_t cmd)
1241 {
1242 int res = DDI_SUCCESS;
1243
1244 FCP_DTRACE(fcp_logq, "fcp", fcp_trace,
1245 FCP_BUF_LEVEL_8, 0, "module detach: cmd=0x%x", cmd);
1246
1247 if (cmd == DDI_DETACH) {
1248 /*
1249 * Check if there are active ports/threads. If there
1250 * are any, we will fail, else we will succeed (there
1251 * should not be much to clean up)
1252 */
1253 mutex_enter(&fcp_global_mutex);
1254 FCP_DTRACE(fcp_logq, "fcp",
1255 fcp_trace, FCP_BUF_LEVEL_8, 0, "port_head=%p",
1256 (void *) fcp_port_head);
1257
1258 if (fcp_port_head == NULL) {
1259 ddi_remove_minor_node(fcp_global_dip, NULL);
1260 fcp_global_dip = NULL;
1261 mutex_exit(&fcp_global_mutex);
1262 } else {
1263 mutex_exit(&fcp_global_mutex);
1264 res = DDI_FAILURE;
1265 }
1266 }
1267 FCP_DTRACE(fcp_logq, "fcp", fcp_trace,
1268 FCP_BUF_LEVEL_8, 0, "module detach returning %d", res);
1269
1270 return (res);
1271 }
1272
1273
1274 /* ARGSUSED */
1275 static int
1276 fcp_open(dev_t *devp, int flag, int otype, cred_t *credp)
1277 {
1278 if (otype != OTYP_CHR) {
1279 return (EINVAL);
1280 }
1281
1282 /*
1283 * Allow only root to talk;
1284 */
1285 if (drv_priv(credp)) {
1286 return (EPERM);
1287 }
1288
1289 mutex_enter(&fcp_global_mutex);
1290 if (fcp_oflag & FCP_EXCL) {
1291 mutex_exit(&fcp_global_mutex);
1292 return (EBUSY);
1293 }
1294
1295 if (flag & FEXCL) {
1296 if (fcp_oflag & FCP_OPEN) {
1297 mutex_exit(&fcp_global_mutex);
1298 return (EBUSY);
1299 }
1300 fcp_oflag |= FCP_EXCL;
1301 }
1302 fcp_oflag |= FCP_OPEN;
1303 mutex_exit(&fcp_global_mutex);
1304
1305 return (0);
1306 }
1307
1308
1309 /* ARGSUSED */
1310 static int
1311 fcp_close(dev_t dev, int flag, int otype, cred_t *credp)
1312 {
1313 if (otype != OTYP_CHR) {
1314 return (EINVAL);
1315 }
1316
1317 mutex_enter(&fcp_global_mutex);
1318 if (!(fcp_oflag & FCP_OPEN)) {
1319 mutex_exit(&fcp_global_mutex);
1320 return (ENODEV);
1321 }
1322 fcp_oflag = FCP_IDLE;
1323 mutex_exit(&fcp_global_mutex);
1324
1325 return (0);
1326 }
1327
1328
1329 /*
1330 * fcp_ioctl
1331 * Entry point for the FCP ioctls
1332 *
1333 * Input:
1334 * See ioctl(9E)
1335 *
1336 * Output:
1337 * See ioctl(9E)
1338 *
1339 * Returns:
1340 * See ioctl(9E)
1341 *
1342 * Context:
1343 * Kernel context.
1344 */
1345 /* ARGSUSED */
1346 static int
1347 fcp_ioctl(dev_t dev, int cmd, intptr_t data, int mode, cred_t *credp,
1348 int *rval)
1349 {
1350 int ret = 0;
1351
1352 mutex_enter(&fcp_global_mutex);
1353 if (!(fcp_oflag & FCP_OPEN)) {
1354 mutex_exit(&fcp_global_mutex);
1355 return (ENXIO);
1356 }
1357 mutex_exit(&fcp_global_mutex);
1358
1359 switch (cmd) {
1360 case FCP_TGT_INQUIRY:
1361 case FCP_TGT_CREATE:
1362 case FCP_TGT_DELETE:
1363 ret = fcp_setup_device_data_ioctl(cmd,
1364 (struct fcp_ioctl *)data, mode, rval);
1365 break;
1366
1367 case FCP_TGT_SEND_SCSI:
1368 mutex_enter(&fcp_ioctl_mutex);
1369 ret = fcp_setup_scsi_ioctl(
1370 (struct fcp_scsi_cmd *)data, mode, rval);
1371 mutex_exit(&fcp_ioctl_mutex);
1372 break;
1373
1374 case FCP_STATE_COUNT:
1375 ret = fcp_get_statec_count((struct fcp_ioctl *)data,
1376 mode, rval);
1377 break;
1378 case FCP_GET_TARGET_MAPPINGS:
1379 ret = fcp_get_target_mappings((struct fcp_ioctl *)data,
1380 mode, rval);
1381 break;
1382 default:
1383 fcp_log(CE_WARN, NULL,
1384 "!Invalid ioctl opcode = 0x%x", cmd);
1385 ret = EINVAL;
1386 }
1387
1388 return (ret);
1389 }
1390
1391
1392 /*
1393 * fcp_setup_device_data_ioctl
1394 * Setup handler for the "device data" style of
1395 * ioctl for FCP. See "fcp_util.h" for data structure
1396 * definition.
1397 *
1398 * Input:
1399 * cmd = FCP ioctl command
1400 * data = ioctl data
1401 * mode = See ioctl(9E)
1402 *
1403 * Output:
1404 * data = ioctl data
1405 * rval = return value - see ioctl(9E)
1406 *
1407 * Returns:
1408 * See ioctl(9E)
1409 *
1410 * Context:
1411 * Kernel context.
1412 */
1413 /* ARGSUSED */
1414 static int
1415 fcp_setup_device_data_ioctl(int cmd, struct fcp_ioctl *data, int mode,
1416 int *rval)
1417 {
1418 struct fcp_port *pptr;
1419 struct device_data *dev_data;
1420 uint32_t link_cnt;
1421 la_wwn_t *wwn_ptr = NULL;
1422 struct fcp_tgt *ptgt = NULL;
1423 struct fcp_lun *plun = NULL;
1424 int i, error;
1425 struct fcp_ioctl fioctl;
1426
1427 #ifdef _MULTI_DATAMODEL
1428 switch (ddi_model_convert_from(mode & FMODELS)) {
1429 case DDI_MODEL_ILP32: {
1430 struct fcp32_ioctl f32_ioctl;
1431
1432 if (ddi_copyin((void *)data, (void *)&f32_ioctl,
1433 sizeof (struct fcp32_ioctl), mode)) {
1434 return (EFAULT);
1435 }
1436 fioctl.fp_minor = f32_ioctl.fp_minor;
1437 fioctl.listlen = f32_ioctl.listlen;
1438 fioctl.list = (caddr_t)(long)f32_ioctl.list;
1439 break;
1440 }
1441 case DDI_MODEL_NONE:
1442 if (ddi_copyin((void *)data, (void *)&fioctl,
1443 sizeof (struct fcp_ioctl), mode)) {
1444 return (EFAULT);
1445 }
1446 break;
1447 }
1448
1449 #else /* _MULTI_DATAMODEL */
1450 if (ddi_copyin((void *)data, (void *)&fioctl,
1451 sizeof (struct fcp_ioctl), mode)) {
1452 return (EFAULT);
1453 }
1454 #endif /* _MULTI_DATAMODEL */
1455
1456 /*
1457 * Right now we can assume that the minor number matches with
1458 * this instance of fp. If this changes we will need to
1459 * revisit this logic.
1460 */
1461 mutex_enter(&fcp_global_mutex);
1462 pptr = fcp_port_head;
1463 while (pptr) {
1464 if (pptr->port_instance == (uint32_t)fioctl.fp_minor) {
1465 break;
1466 } else {
1467 pptr = pptr->port_next;
1468 }
1469 }
1470 mutex_exit(&fcp_global_mutex);
1471 if (pptr == NULL) {
1472 return (ENXIO);
1473 }
1474 mutex_enter(&pptr->port_mutex);
1475
1476
1477 if ((dev_data = kmem_zalloc((sizeof (struct device_data)) *
1478 fioctl.listlen, KM_NOSLEEP)) == NULL) {
1479 mutex_exit(&pptr->port_mutex);
1480 return (ENOMEM);
1481 }
1482
1483 if (ddi_copyin(fioctl.list, dev_data,
1484 (sizeof (struct device_data)) * fioctl.listlen, mode)) {
1485 kmem_free(dev_data, sizeof (*dev_data) * fioctl.listlen);
1486 mutex_exit(&pptr->port_mutex);
1487 return (EFAULT);
1488 }
1489 link_cnt = pptr->port_link_cnt;
1490
1491 if (cmd == FCP_TGT_INQUIRY) {
1492 wwn_ptr = (la_wwn_t *)&(dev_data[0].dev_pwwn);
1493 if (bcmp(wwn_ptr->raw_wwn, pptr->port_pwwn.raw_wwn,
1494 sizeof (wwn_ptr->raw_wwn)) == 0) {
1495 /* This ioctl is requesting INQ info of local HBA */
1496 mutex_exit(&pptr->port_mutex);
1497 dev_data[0].dev0_type = DTYPE_UNKNOWN;
1498 dev_data[0].dev_status = 0;
1499 if (ddi_copyout(dev_data, fioctl.list,
1500 (sizeof (struct device_data)) * fioctl.listlen,
1501 mode)) {
1502 kmem_free(dev_data,
1503 sizeof (*dev_data) * fioctl.listlen);
1504 return (EFAULT);
1505 }
1506 kmem_free(dev_data,
1507 sizeof (*dev_data) * fioctl.listlen);
1508 #ifdef _MULTI_DATAMODEL
1509 switch (ddi_model_convert_from(mode & FMODELS)) {
1510 case DDI_MODEL_ILP32: {
1511 struct fcp32_ioctl f32_ioctl;
1512 f32_ioctl.fp_minor = fioctl.fp_minor;
1513 f32_ioctl.listlen = fioctl.listlen;
1514 f32_ioctl.list = (caddr32_t)(long)fioctl.list;
1515 if (ddi_copyout((void *)&f32_ioctl,
1516 (void *)data,
1517 sizeof (struct fcp32_ioctl), mode)) {
1518 return (EFAULT);
1519 }
1520 break;
1521 }
1522 case DDI_MODEL_NONE:
1523 if (ddi_copyout((void *)&fioctl, (void *)data,
1524 sizeof (struct fcp_ioctl), mode)) {
1525 return (EFAULT);
1526 }
1527 break;
1528 }
1529 #else /* _MULTI_DATAMODEL */
1530 if (ddi_copyout((void *)&fioctl, (void *)data,
1531 sizeof (struct fcp_ioctl), mode)) {
1532 return (EFAULT);
1533 }
1534 #endif /* _MULTI_DATAMODEL */
1535 return (0);
1536 }
1537 }
1538
1539 if (pptr->port_state & (FCP_STATE_INIT | FCP_STATE_OFFLINE)) {
1540 kmem_free(dev_data, sizeof (*dev_data) * fioctl.listlen);
1541 mutex_exit(&pptr->port_mutex);
1542 return (ENXIO);
1543 }
1544
1545 for (i = 0; (i < fioctl.listlen) && (link_cnt == pptr->port_link_cnt);
1546 i++) {
1547 wwn_ptr = (la_wwn_t *)&(dev_data[i].dev_pwwn);
1548
1549 dev_data[i].dev0_type = DTYPE_UNKNOWN;
1550
1551
1552 dev_data[i].dev_status = ENXIO;
1553
1554 if ((ptgt = fcp_lookup_target(pptr,
1555 (uchar_t *)wwn_ptr)) == NULL) {
1556 mutex_exit(&pptr->port_mutex);
1557 if (fc_ulp_get_remote_port(pptr->port_fp_handle,
1558 wwn_ptr, &error, 0) == NULL) {
1559 dev_data[i].dev_status = ENODEV;
1560 mutex_enter(&pptr->port_mutex);
1561 continue;
1562 } else {
1563
1564 dev_data[i].dev_status = EAGAIN;
1565
1566 mutex_enter(&pptr->port_mutex);
1567 continue;
1568 }
1569 } else {
1570 mutex_enter(&ptgt->tgt_mutex);
1571 if (ptgt->tgt_state & (FCP_TGT_MARK |
1572 FCP_TGT_BUSY)) {
1573 dev_data[i].dev_status = EAGAIN;
1574 mutex_exit(&ptgt->tgt_mutex);
1575 continue;
1576 }
1577
1578 if (ptgt->tgt_state & FCP_TGT_OFFLINE) {
1579 if (ptgt->tgt_icap && !ptgt->tgt_tcap) {
1580 dev_data[i].dev_status = ENOTSUP;
1581 } else {
1582 dev_data[i].dev_status = ENXIO;
1583 }
1584 mutex_exit(&ptgt->tgt_mutex);
1585 continue;
1586 }
1587
1588 switch (cmd) {
1589 case FCP_TGT_INQUIRY:
1590 /*
1591 * The reason we give device type of
1592 * lun 0 only even though in some
1593 * cases(like maxstrat) lun 0 device
1594 * type may be 0x3f(invalid) is that
1595 * for bridge boxes target will appear
1596 * as luns and the first lun could be
1597 * a device that utility may not care
1598 * about (like a tape device).
1599 */
1600 dev_data[i].dev_lun_cnt = ptgt->tgt_lun_cnt;
1601 dev_data[i].dev_status = 0;
1602 mutex_exit(&ptgt->tgt_mutex);
1603
1604 if ((plun = fcp_get_lun(ptgt, 0)) == NULL) {
1605 dev_data[i].dev0_type = DTYPE_UNKNOWN;
1606 } else {
1607 dev_data[i].dev0_type = plun->lun_type;
1608 }
1609 mutex_enter(&ptgt->tgt_mutex);
1610 break;
1611
1612 case FCP_TGT_CREATE:
1613 mutex_exit(&ptgt->tgt_mutex);
1614 mutex_exit(&pptr->port_mutex);
1615
1616 /*
1617 * serialize state change call backs.
1618 * only one call back will be handled
1619 * at a time.
1620 */
1621 mutex_enter(&fcp_global_mutex);
1622 if (fcp_oflag & FCP_BUSY) {
1623 mutex_exit(&fcp_global_mutex);
1624 if (dev_data) {
1625 kmem_free(dev_data,
1626 sizeof (*dev_data) *
1627 fioctl.listlen);
1628 }
1629 return (EBUSY);
1630 }
1631 fcp_oflag |= FCP_BUSY;
1632 mutex_exit(&fcp_global_mutex);
1633
1634 dev_data[i].dev_status =
1635 fcp_create_on_demand(pptr,
1636 wwn_ptr->raw_wwn);
1637
1638 if (dev_data[i].dev_status != 0) {
1639 char buf[25];
1640
1641 for (i = 0; i < FC_WWN_SIZE; i++) {
1642 (void) sprintf(&buf[i << 1],
1643 "%02x",
1644 wwn_ptr->raw_wwn[i]);
1645 }
1646
1647 fcp_log(CE_WARN, pptr->port_dip,
1648 "!Failed to create nodes for"
1649 " pwwn=%s; error=%x", buf,
1650 dev_data[i].dev_status);
1651 }
1652
1653 /* allow state change call backs again */
1654 mutex_enter(&fcp_global_mutex);
1655 fcp_oflag &= ~FCP_BUSY;
1656 mutex_exit(&fcp_global_mutex);
1657
1658 mutex_enter(&pptr->port_mutex);
1659 mutex_enter(&ptgt->tgt_mutex);
1660
1661 break;
1662
1663 case FCP_TGT_DELETE:
1664 break;
1665
1666 default:
1667 fcp_log(CE_WARN, pptr->port_dip,
1668 "!Invalid device data ioctl "
1669 "opcode = 0x%x", cmd);
1670 }
1671 mutex_exit(&ptgt->tgt_mutex);
1672 }
1673 }
1674 mutex_exit(&pptr->port_mutex);
1675
1676 if (ddi_copyout(dev_data, fioctl.list,
1677 (sizeof (struct device_data)) * fioctl.listlen, mode)) {
1678 kmem_free(dev_data, sizeof (*dev_data) * fioctl.listlen);
1679 return (EFAULT);
1680 }
1681 kmem_free(dev_data, sizeof (*dev_data) * fioctl.listlen);
1682
1683 #ifdef _MULTI_DATAMODEL
1684 switch (ddi_model_convert_from(mode & FMODELS)) {
1685 case DDI_MODEL_ILP32: {
1686 struct fcp32_ioctl f32_ioctl;
1687
1688 f32_ioctl.fp_minor = fioctl.fp_minor;
1689 f32_ioctl.listlen = fioctl.listlen;
1690 f32_ioctl.list = (caddr32_t)(long)fioctl.list;
1691 if (ddi_copyout((void *)&f32_ioctl, (void *)data,
1692 sizeof (struct fcp32_ioctl), mode)) {
1693 return (EFAULT);
1694 }
1695 break;
1696 }
1697 case DDI_MODEL_NONE:
1698 if (ddi_copyout((void *)&fioctl, (void *)data,
1699 sizeof (struct fcp_ioctl), mode)) {
1700 return (EFAULT);
1701 }
1702 break;
1703 }
1704 #else /* _MULTI_DATAMODEL */
1705
1706 if (ddi_copyout((void *)&fioctl, (void *)data,
1707 sizeof (struct fcp_ioctl), mode)) {
1708 return (EFAULT);
1709 }
1710 #endif /* _MULTI_DATAMODEL */
1711
1712 return (0);
1713 }
1714
1715 /*
1716 * Fetch the target mappings (path, etc.) for all LUNs
1717 * on this port.
1718 */
1719 /* ARGSUSED */
1720 static int
1721 fcp_get_target_mappings(struct fcp_ioctl *data,
1722 int mode, int *rval)
1723 {
1724 struct fcp_port *pptr;
1725 fc_hba_target_mappings_t *mappings;
1726 fc_hba_mapping_entry_t *map;
1727 struct fcp_tgt *ptgt = NULL;
1728 struct fcp_lun *plun = NULL;
1729 int i, mapIndex, mappingSize;
1730 int listlen;
1731 struct fcp_ioctl fioctl;
1732 char *path;
1733 fcp_ent_addr_t sam_lun_addr;
1734
1735 #ifdef _MULTI_DATAMODEL
1736 switch (ddi_model_convert_from(mode & FMODELS)) {
1737 case DDI_MODEL_ILP32: {
1738 struct fcp32_ioctl f32_ioctl;
1739
1740 if (ddi_copyin((void *)data, (void *)&f32_ioctl,
1741 sizeof (struct fcp32_ioctl), mode)) {
1742 return (EFAULT);
1743 }
1744 fioctl.fp_minor = f32_ioctl.fp_minor;
1745 fioctl.listlen = f32_ioctl.listlen;
1746 fioctl.list = (caddr_t)(long)f32_ioctl.list;
1747 break;
1748 }
1749 case DDI_MODEL_NONE:
1750 if (ddi_copyin((void *)data, (void *)&fioctl,
1751 sizeof (struct fcp_ioctl), mode)) {
1752 return (EFAULT);
1753 }
1754 break;
1755 }
1756
1757 #else /* _MULTI_DATAMODEL */
1758 if (ddi_copyin((void *)data, (void *)&fioctl,
1759 sizeof (struct fcp_ioctl), mode)) {
1760 return (EFAULT);
1761 }
1762 #endif /* _MULTI_DATAMODEL */
1763
1764 /*
1765 * Right now we can assume that the minor number matches with
1766 * this instance of fp. If this changes we will need to
1767 * revisit this logic.
1768 */
1769 mutex_enter(&fcp_global_mutex);
1770 pptr = fcp_port_head;
1771 while (pptr) {
1772 if (pptr->port_instance == (uint32_t)fioctl.fp_minor) {
1773 break;
1774 } else {
1775 pptr = pptr->port_next;
1776 }
1777 }
1778 mutex_exit(&fcp_global_mutex);
1779 if (pptr == NULL) {
1780 cmn_err(CE_NOTE, "target mappings: unknown instance number: %d",
1781 fioctl.fp_minor);
1782 return (ENXIO);
1783 }
1784
1785
1786 /* We use listlen to show the total buffer size */
1787 mappingSize = fioctl.listlen;
1788
1789 /* Now calculate how many mapping entries will fit */
1790 listlen = fioctl.listlen + sizeof (fc_hba_mapping_entry_t)
1791 - sizeof (fc_hba_target_mappings_t);
1792 if (listlen <= 0) {
1793 cmn_err(CE_NOTE, "target mappings: Insufficient buffer");
1794 return (ENXIO);
1795 }
1796 listlen = listlen / sizeof (fc_hba_mapping_entry_t);
1797
1798 if ((mappings = kmem_zalloc(mappingSize, KM_SLEEP)) == NULL) {
1799 return (ENOMEM);
1800 }
1801 mappings->version = FC_HBA_TARGET_MAPPINGS_VERSION;
1802
1803 /* Now get to work */
1804 mapIndex = 0;
1805
1806 mutex_enter(&pptr->port_mutex);
1807 /* Loop through all targets on this port */
1808 for (i = 0; i < FCP_NUM_HASH; i++) {
1809 for (ptgt = pptr->port_tgt_hash_table[i]; ptgt != NULL;
1810 ptgt = ptgt->tgt_next) {
1811
1812 mutex_enter(&ptgt->tgt_mutex);
1813
1814 /* Loop through all LUNs on this target */
1815 for (plun = ptgt->tgt_lun; plun != NULL;
1816 plun = plun->lun_next) {
1817 if (plun->lun_state & FCP_LUN_OFFLINE) {
1818 continue;
1819 }
1820
1821 path = fcp_get_lun_path(plun);
1822 if (path == NULL) {
1823 continue;
1824 }
1825
1826 if (mapIndex >= listlen) {
1827 mapIndex ++;
1828 kmem_free(path, MAXPATHLEN);
1829 continue;
1830 }
1831 map = &mappings->entries[mapIndex++];
1832 bcopy(path, map->targetDriver,
1833 sizeof (map->targetDriver));
1834 map->d_id = ptgt->tgt_d_id;
1835 map->busNumber = 0;
1836 map->targetNumber = ptgt->tgt_d_id;
1837 map->osLUN = plun->lun_num;
1838
1839 /*
1840 * We had swapped lun when we stored it in
1841 * lun_addr. We need to swap it back before
1842 * returning it to user land
1843 */
1844
1845 sam_lun_addr.ent_addr_0 =
1846 BE_16(plun->lun_addr.ent_addr_0);
1847 sam_lun_addr.ent_addr_1 =
1848 BE_16(plun->lun_addr.ent_addr_1);
1849 sam_lun_addr.ent_addr_2 =
1850 BE_16(plun->lun_addr.ent_addr_2);
1851 sam_lun_addr.ent_addr_3 =
1852 BE_16(plun->lun_addr.ent_addr_3);
1853
1854 bcopy(&sam_lun_addr, &map->samLUN,
1855 FCP_LUN_SIZE);
1856 bcopy(ptgt->tgt_node_wwn.raw_wwn,
1857 map->NodeWWN.raw_wwn, sizeof (la_wwn_t));
1858 bcopy(ptgt->tgt_port_wwn.raw_wwn,
1859 map->PortWWN.raw_wwn, sizeof (la_wwn_t));
1860
1861 if (plun->lun_guid) {
1862
1863 /* convert ascii wwn to bytes */
1864 fcp_ascii_to_wwn(plun->lun_guid,
1865 map->guid, sizeof (map->guid));
1866
1867 if ((sizeof (map->guid)) <
1868 plun->lun_guid_size / 2) {
1869 cmn_err(CE_WARN,
1870 "fcp_get_target_mappings:"
1871 "guid copy space "
1872 "insufficient."
1873 "Copy Truncation - "
1874 "available %d; need %d",
1875 (int)sizeof (map->guid),
1876 (int)
1877 plun->lun_guid_size / 2);
1878 }
1879 }
1880 kmem_free(path, MAXPATHLEN);
1881 }
1882 mutex_exit(&ptgt->tgt_mutex);
1883 }
1884 }
1885 mutex_exit(&pptr->port_mutex);
1886 mappings->numLuns = mapIndex;
1887
1888 if (ddi_copyout(mappings, fioctl.list, mappingSize, mode)) {
1889 kmem_free(mappings, mappingSize);
1890 return (EFAULT);
1891 }
1892 kmem_free(mappings, mappingSize);
1893
1894 #ifdef _MULTI_DATAMODEL
1895 switch (ddi_model_convert_from(mode & FMODELS)) {
1896 case DDI_MODEL_ILP32: {
1897 struct fcp32_ioctl f32_ioctl;
1898
1899 f32_ioctl.fp_minor = fioctl.fp_minor;
1900 f32_ioctl.listlen = fioctl.listlen;
1901 f32_ioctl.list = (caddr32_t)(long)fioctl.list;
1902 if (ddi_copyout((void *)&f32_ioctl, (void *)data,
1903 sizeof (struct fcp32_ioctl), mode)) {
1904 return (EFAULT);
1905 }
1906 break;
1907 }
1908 case DDI_MODEL_NONE:
1909 if (ddi_copyout((void *)&fioctl, (void *)data,
1910 sizeof (struct fcp_ioctl), mode)) {
1911 return (EFAULT);
1912 }
1913 break;
1914 }
1915 #else /* _MULTI_DATAMODEL */
1916
1917 if (ddi_copyout((void *)&fioctl, (void *)data,
1918 sizeof (struct fcp_ioctl), mode)) {
1919 return (EFAULT);
1920 }
1921 #endif /* _MULTI_DATAMODEL */
1922
1923 return (0);
1924 }
1925
1926 /*
1927 * fcp_setup_scsi_ioctl
1928 * Setup handler for the "scsi passthru" style of
1929 * ioctl for FCP. See "fcp_util.h" for data structure
1930 * definition.
1931 *
1932 * Input:
1933 * u_fscsi = ioctl data (user address space)
1934 * mode = See ioctl(9E)
1935 *
1936 * Output:
1937 * u_fscsi = ioctl data (user address space)
1938 * rval = return value - see ioctl(9E)
1939 *
1940 * Returns:
1941 * 0 = OK
1942 * EAGAIN = See errno.h
1943 * EBUSY = See errno.h
1944 * EFAULT = See errno.h
1945 * EINTR = See errno.h
1946 * EINVAL = See errno.h
1947 * EIO = See errno.h
1948 * ENOMEM = See errno.h
1949 * ENXIO = See errno.h
1950 *
1951 * Context:
1952 * Kernel context.
1953 */
1954 /* ARGSUSED */
1955 static int
1956 fcp_setup_scsi_ioctl(struct fcp_scsi_cmd *u_fscsi,
1957 int mode, int *rval)
1958 {
1959 int ret = 0;
1960 int temp_ret;
1961 caddr_t k_cdbbufaddr = NULL;
1962 caddr_t k_bufaddr = NULL;
1963 caddr_t k_rqbufaddr = NULL;
1964 caddr_t u_cdbbufaddr;
1965 caddr_t u_bufaddr;
1966 caddr_t u_rqbufaddr;
1967 struct fcp_scsi_cmd k_fscsi;
1968
1969 /*
1970 * Get fcp_scsi_cmd array element from user address space
1971 */
1972 if ((ret = fcp_copyin_scsi_cmd((caddr_t)u_fscsi, &k_fscsi, mode))
1973 != 0) {
1974 return (ret);
1975 }
1976
1977
1978 /*
1979 * Even though kmem_alloc() checks the validity of the
1980 * buffer length, this check is needed when the
1981 * kmem_flags set and the zero buffer length is passed.
1982 */
1983 if ((k_fscsi.scsi_cdblen <= 0) ||
1984 (k_fscsi.scsi_buflen <= 0) ||
1985 (k_fscsi.scsi_rqlen <= 0)) {
1986 return (EINVAL);
1987 }
1988
1989 /*
1990 * Allocate data for fcp_scsi_cmd pointer fields
1991 */
1992 if (ret == 0) {
1993 k_cdbbufaddr = kmem_alloc(k_fscsi.scsi_cdblen, KM_NOSLEEP);
1994 k_bufaddr = kmem_alloc(k_fscsi.scsi_buflen, KM_NOSLEEP);
1995 k_rqbufaddr = kmem_alloc(k_fscsi.scsi_rqlen, KM_NOSLEEP);
1996
1997 if (k_cdbbufaddr == NULL ||
1998 k_bufaddr == NULL ||
1999 k_rqbufaddr == NULL) {
2000 ret = ENOMEM;
2001 }
2002 }
2003
2004 /*
2005 * Get fcp_scsi_cmd pointer fields from user
2006 * address space
2007 */
2008 if (ret == 0) {
2009 u_cdbbufaddr = k_fscsi.scsi_cdbbufaddr;
2010 u_bufaddr = k_fscsi.scsi_bufaddr;
2011 u_rqbufaddr = k_fscsi.scsi_rqbufaddr;
2012
2013 if (ddi_copyin(u_cdbbufaddr,
2014 k_cdbbufaddr,
2015 k_fscsi.scsi_cdblen,
2016 mode)) {
2017 ret = EFAULT;
2018 } else if (ddi_copyin(u_bufaddr,
2019 k_bufaddr,
2020 k_fscsi.scsi_buflen,
2021 mode)) {
2022 ret = EFAULT;
2023 } else if (ddi_copyin(u_rqbufaddr,
2024 k_rqbufaddr,
2025 k_fscsi.scsi_rqlen,
2026 mode)) {
2027 ret = EFAULT;
2028 }
2029 }
2030
2031 /*
2032 * Send scsi command (blocking)
2033 */
2034 if (ret == 0) {
2035 /*
2036 * Prior to sending the scsi command, the
2037 * fcp_scsi_cmd data structure must contain kernel,
2038 * not user, addresses.
2039 */
2040 k_fscsi.scsi_cdbbufaddr = k_cdbbufaddr;
2041 k_fscsi.scsi_bufaddr = k_bufaddr;
2042 k_fscsi.scsi_rqbufaddr = k_rqbufaddr;
2043
2044 ret = fcp_send_scsi_ioctl(&k_fscsi);
2045
2046 /*
2047 * After sending the scsi command, the
2048 * fcp_scsi_cmd data structure must contain user,
2049 * not kernel, addresses.
2050 */
2051 k_fscsi.scsi_cdbbufaddr = u_cdbbufaddr;
2052 k_fscsi.scsi_bufaddr = u_bufaddr;
2053 k_fscsi.scsi_rqbufaddr = u_rqbufaddr;
2054 }
2055
2056 /*
2057 * Put fcp_scsi_cmd pointer fields to user address space
2058 */
2059 if (ret == 0) {
2060 if (ddi_copyout(k_cdbbufaddr,
2061 u_cdbbufaddr,
2062 k_fscsi.scsi_cdblen,
2063 mode)) {
2064 ret = EFAULT;
2065 } else if (ddi_copyout(k_bufaddr,
2066 u_bufaddr,
2067 k_fscsi.scsi_buflen,
2068 mode)) {
2069 ret = EFAULT;
2070 } else if (ddi_copyout(k_rqbufaddr,
2071 u_rqbufaddr,
2072 k_fscsi.scsi_rqlen,
2073 mode)) {
2074 ret = EFAULT;
2075 }
2076 }
2077
2078 /*
2079 * Free data for fcp_scsi_cmd pointer fields
2080 */
2081 if (k_cdbbufaddr != NULL) {
2082 kmem_free(k_cdbbufaddr, k_fscsi.scsi_cdblen);
2083 }
2084 if (k_bufaddr != NULL) {
2085 kmem_free(k_bufaddr, k_fscsi.scsi_buflen);
2086 }
2087 if (k_rqbufaddr != NULL) {
2088 kmem_free(k_rqbufaddr, k_fscsi.scsi_rqlen);
2089 }
2090
2091 /*
2092 * Put fcp_scsi_cmd array element to user address space
2093 */
2094 temp_ret = fcp_copyout_scsi_cmd(&k_fscsi, (caddr_t)u_fscsi, mode);
2095 if (temp_ret != 0) {
2096 ret = temp_ret;
2097 }
2098
2099 /*
2100 * Return status
2101 */
2102 return (ret);
2103 }
2104
2105
2106 /*
2107 * fcp_copyin_scsi_cmd
2108 * Copy in fcp_scsi_cmd data structure from user address space.
2109 * The data may be in 32 bit or 64 bit modes.
2110 *
2111 * Input:
2112 * base_addr = from address (user address space)
2113 * mode = See ioctl(9E) and ddi_copyin(9F)
2114 *
2115 * Output:
2116 * fscsi = to address (kernel address space)
2117 *
2118 * Returns:
2119 * 0 = OK
2120 * EFAULT = Error
2121 *
2122 * Context:
2123 * Kernel context.
2124 */
2125 static int
2126 fcp_copyin_scsi_cmd(caddr_t base_addr, struct fcp_scsi_cmd *fscsi, int mode)
2127 {
2128 #ifdef _MULTI_DATAMODEL
2129 struct fcp32_scsi_cmd f32scsi;
2130
2131 switch (ddi_model_convert_from(mode & FMODELS)) {
2132 case DDI_MODEL_ILP32:
2133 /*
2134 * Copy data from user address space
2135 */
2136 if (ddi_copyin((void *)base_addr,
2137 &f32scsi,
2138 sizeof (struct fcp32_scsi_cmd),
2139 mode)) {
2140 return (EFAULT);
2141 }
2142 /*
2143 * Convert from 32 bit to 64 bit
2144 */
2145 FCP32_SCSI_CMD_TO_FCP_SCSI_CMD(&f32scsi, fscsi);
2146 break;
2147 case DDI_MODEL_NONE:
2148 /*
2149 * Copy data from user address space
2150 */
2151 if (ddi_copyin((void *)base_addr,
2152 fscsi,
2153 sizeof (struct fcp_scsi_cmd),
2154 mode)) {
2155 return (EFAULT);
2156 }
2157 break;
2158 }
2159 #else /* _MULTI_DATAMODEL */
2160 /*
2161 * Copy data from user address space
2162 */
2163 if (ddi_copyin((void *)base_addr,
2164 fscsi,
2165 sizeof (struct fcp_scsi_cmd),
2166 mode)) {
2167 return (EFAULT);
2168 }
2169 #endif /* _MULTI_DATAMODEL */
2170
2171 return (0);
2172 }
2173
2174
2175 /*
2176 * fcp_copyout_scsi_cmd
2177 * Copy out fcp_scsi_cmd data structure to user address space.
2178 * The data may be in 32 bit or 64 bit modes.
2179 *
2180 * Input:
2181 * fscsi = to address (kernel address space)
2182 * mode = See ioctl(9E) and ddi_copyin(9F)
2183 *
2184 * Output:
2185 * base_addr = from address (user address space)
2186 *
2187 * Returns:
2188 * 0 = OK
2189 * EFAULT = Error
2190 *
2191 * Context:
2192 * Kernel context.
2193 */
2194 static int
2195 fcp_copyout_scsi_cmd(struct fcp_scsi_cmd *fscsi, caddr_t base_addr, int mode)
2196 {
2197 #ifdef _MULTI_DATAMODEL
2198 struct fcp32_scsi_cmd f32scsi;
2199
2200 switch (ddi_model_convert_from(mode & FMODELS)) {
2201 case DDI_MODEL_ILP32:
2202 /*
2203 * Convert from 64 bit to 32 bit
2204 */
2205 FCP_SCSI_CMD_TO_FCP32_SCSI_CMD(fscsi, &f32scsi);
2206 /*
2207 * Copy data to user address space
2208 */
2209 if (ddi_copyout(&f32scsi,
2210 (void *)base_addr,
2211 sizeof (struct fcp32_scsi_cmd),
2212 mode)) {
2213 return (EFAULT);
2214 }
2215 break;
2216 case DDI_MODEL_NONE:
2217 /*
2218 * Copy data to user address space
2219 */
2220 if (ddi_copyout(fscsi,
2221 (void *)base_addr,
2222 sizeof (struct fcp_scsi_cmd),
2223 mode)) {
2224 return (EFAULT);
2225 }
2226 break;
2227 }
2228 #else /* _MULTI_DATAMODEL */
2229 /*
2230 * Copy data to user address space
2231 */
2232 if (ddi_copyout(fscsi,
2233 (void *)base_addr,
2234 sizeof (struct fcp_scsi_cmd),
2235 mode)) {
2236 return (EFAULT);
2237 }
2238 #endif /* _MULTI_DATAMODEL */
2239
2240 return (0);
2241 }
2242
2243
2244 /*
2245 * fcp_send_scsi_ioctl
2246 * Sends the SCSI command in blocking mode.
2247 *
2248 * Input:
2249 * fscsi = SCSI command data structure
2250 *
2251 * Output:
2252 * fscsi = SCSI command data structure
2253 *
2254 * Returns:
2255 * 0 = OK
2256 * EAGAIN = See errno.h
2257 * EBUSY = See errno.h
2258 * EINTR = See errno.h
2259 * EINVAL = See errno.h
2260 * EIO = See errno.h
2261 * ENOMEM = See errno.h
2262 * ENXIO = See errno.h
2263 *
2264 * Context:
2265 * Kernel context.
2266 */
2267 static int
2268 fcp_send_scsi_ioctl(struct fcp_scsi_cmd *fscsi)
2269 {
2270 struct fcp_lun *plun = NULL;
2271 struct fcp_port *pptr = NULL;
2272 struct fcp_tgt *ptgt = NULL;
2273 fc_packet_t *fpkt = NULL;
2274 struct fcp_ipkt *icmd = NULL;
2275 int target_created = FALSE;
2276 fc_frame_hdr_t *hp;
2277 struct fcp_cmd fcp_cmd;
2278 struct fcp_cmd *fcmd;
2279 union scsi_cdb *scsi_cdb;
2280 la_wwn_t *wwn_ptr;
2281 int nodma;
2282 struct fcp_rsp *rsp;
2283 struct fcp_rsp_info *rsp_info;
2284 caddr_t rsp_sense;
2285 int buf_len;
2286 int info_len;
2287 int sense_len;
2288 struct scsi_extended_sense *sense_to = NULL;
2289 timeout_id_t tid;
2290 uint8_t reconfig_lun = FALSE;
2291 uint8_t reconfig_pending = FALSE;
2292 uint8_t scsi_cmd;
2293 int rsp_len;
2294 int cmd_index;
2295 int fc_status;
2296 int pkt_state;
2297 int pkt_action;
2298 int pkt_reason;
2299 int ret, xport_retval = ~FC_SUCCESS;
2300 int lcount;
2301 int tcount;
2302 int reconfig_status;
2303 int port_busy = FALSE;
2304 uchar_t *lun_string;
2305
2306 /*
2307 * Check valid SCSI command
2308 */
2309 scsi_cmd = ((uint8_t *)fscsi->scsi_cdbbufaddr)[0];
2310 ret = EINVAL;
2311 for (cmd_index = 0;
2312 cmd_index < FCP_NUM_ELEMENTS(scsi_ioctl_list) &&
2313 ret != 0;
2314 cmd_index++) {
2315 /*
2316 * First byte of CDB is the SCSI command
2317 */
2318 if (scsi_ioctl_list[cmd_index] == scsi_cmd) {
2319 ret = 0;
2320 }
2321 }
2322
2323 /*
2324 * Check inputs
2325 */
2326 if (fscsi->scsi_flags != FCP_SCSI_READ) {
2327 ret = EINVAL;
2328 } else if (fscsi->scsi_cdblen > FCP_CDB_SIZE) {
2329 /* no larger than */
2330 ret = EINVAL;
2331 }
2332
2333
2334 /*
2335 * Find FC port
2336 */
2337 if (ret == 0) {
2338 /*
2339 * Acquire global mutex
2340 */
2341 mutex_enter(&fcp_global_mutex);
2342
2343 pptr = fcp_port_head;
2344 while (pptr) {
2345 if (pptr->port_instance ==
2346 (uint32_t)fscsi->scsi_fc_port_num) {
2347 break;
2348 } else {
2349 pptr = pptr->port_next;
2350 }
2351 }
2352
2353 if (pptr == NULL) {
2354 ret = ENXIO;
2355 } else {
2356 /*
2357 * fc_ulp_busy_port can raise power
2358 * so, we must not hold any mutexes involved in PM
2359 */
2360 mutex_exit(&fcp_global_mutex);
2361 ret = fc_ulp_busy_port(pptr->port_fp_handle);
2362 }
2363
2364 if (ret == 0) {
2365
2366 /* remember port is busy, so we will release later */
2367 port_busy = TRUE;
2368
2369 /*
2370 * If there is a reconfiguration in progress, wait
2371 * for it to complete.
2372 */
2373
2374 fcp_reconfig_wait(pptr);
2375
2376 /* reacquire mutexes in order */
2377 mutex_enter(&fcp_global_mutex);
2378 mutex_enter(&pptr->port_mutex);
2379
2380 /*
2381 * Will port accept DMA?
2382 */
2383 nodma = (pptr->port_fcp_dma == FC_NO_DVMA_SPACE)
2384 ? 1 : 0;
2385
2386 /*
2387 * If init or offline, device not known
2388 *
2389 * If we are discovering (onlining), we can
2390 * NOT obviously provide reliable data about
2391 * devices until it is complete
2392 */
2393 if (pptr->port_state & (FCP_STATE_INIT |
2394 FCP_STATE_OFFLINE)) {
2395 ret = ENXIO;
2396 } else if (pptr->port_state & FCP_STATE_ONLINING) {
2397 ret = EBUSY;
2398 } else {
2399 /*
2400 * Find target from pwwn
2401 *
2402 * The wwn must be put into a local
2403 * variable to ensure alignment.
2404 */
2405 wwn_ptr = (la_wwn_t *)&(fscsi->scsi_fc_pwwn);
2406 ptgt = fcp_lookup_target(pptr,
2407 (uchar_t *)wwn_ptr);
2408
2409 /*
2410 * If target not found,
2411 */
2412 if (ptgt == NULL) {
2413 /*
2414 * Note: Still have global &
2415 * port mutexes
2416 */
2417 mutex_exit(&pptr->port_mutex);
2418 ptgt = fcp_port_create_tgt(pptr,
2419 wwn_ptr, &ret, &fc_status,
2420 &pkt_state, &pkt_action,
2421 &pkt_reason);
2422 mutex_enter(&pptr->port_mutex);
2423
2424 fscsi->scsi_fc_status = fc_status;
2425 fscsi->scsi_pkt_state =
2426 (uchar_t)pkt_state;
2427 fscsi->scsi_pkt_reason = pkt_reason;
2428 fscsi->scsi_pkt_action =
2429 (uchar_t)pkt_action;
2430
2431 if (ptgt != NULL) {
2432 target_created = TRUE;
2433 } else if (ret == 0) {
2434 ret = ENOMEM;
2435 }
2436 }
2437
2438 if (ret == 0) {
2439 /*
2440 * Acquire target
2441 */
2442 mutex_enter(&ptgt->tgt_mutex);
2443
2444 /*
2445 * If target is mark or busy,
2446 * then target can not be used
2447 */
2448 if (ptgt->tgt_state &
2449 (FCP_TGT_MARK |
2450 FCP_TGT_BUSY)) {
2451 ret = EBUSY;
2452 } else {
2453 /*
2454 * Mark target as busy
2455 */
2456 ptgt->tgt_state |=
2457 FCP_TGT_BUSY;
2458 }
2459
2460 /*
2461 * Release target
2462 */
2463 lcount = pptr->port_link_cnt;
2464 tcount = ptgt->tgt_change_cnt;
2465 mutex_exit(&ptgt->tgt_mutex);
2466 }
2467 }
2468
2469 /*
2470 * Release port
2471 */
2472 mutex_exit(&pptr->port_mutex);
2473 }
2474
2475 /*
2476 * Release global mutex
2477 */
2478 mutex_exit(&fcp_global_mutex);
2479 }
2480
2481 if (ret == 0) {
2482 uint64_t belun = BE_64(fscsi->scsi_lun);
2483
2484 /*
2485 * If it's a target device, find lun from pwwn
2486 * The wwn must be put into a local
2487 * variable to ensure alignment.
2488 */
2489 mutex_enter(&pptr->port_mutex);
2490 wwn_ptr = (la_wwn_t *)&(fscsi->scsi_fc_pwwn);
2491 if (!ptgt->tgt_tcap && ptgt->tgt_icap) {
2492 /* this is not a target */
2493 fscsi->scsi_fc_status = FC_DEVICE_NOT_TGT;
2494 ret = ENXIO;
2495 } else if ((belun << 16) != 0) {
2496 /*
2497 * Since fcp only support PD and LU addressing method
2498 * so far, the last 6 bytes of a valid LUN are expected
2499 * to be filled with 00h.
2500 */
2501 fscsi->scsi_fc_status = FC_INVALID_LUN;
2502 cmn_err(CE_WARN, "fcp: Unsupported LUN addressing"
2503 " method 0x%02x with LUN number 0x%016" PRIx64,
2504 (uint8_t)(belun >> 62), belun);
2505 ret = ENXIO;
2506 } else if ((plun = fcp_lookup_lun(pptr, (uchar_t *)wwn_ptr,
2507 (uint16_t)((belun >> 48) & 0x3fff))) == NULL) {
2508 /*
2509 * This is a SCSI target, but no LUN at this
2510 * address.
2511 *
2512 * In the future, we may want to send this to
2513 * the target, and let it respond
2514 * appropriately
2515 */
2516 ret = ENXIO;
2517 }
2518 mutex_exit(&pptr->port_mutex);
2519 }
2520
2521 /*
2522 * Finished grabbing external resources
2523 * Allocate internal packet (icmd)
2524 */
2525 if (ret == 0) {
2526 /*
2527 * Calc rsp len assuming rsp info included
2528 */
2529 rsp_len = sizeof (struct fcp_rsp) +
2530 sizeof (struct fcp_rsp_info) + fscsi->scsi_rqlen;
2531
2532 icmd = fcp_icmd_alloc(pptr, ptgt,
2533 sizeof (struct fcp_cmd),
2534 rsp_len,
2535 fscsi->scsi_buflen,
2536 nodma,
2537 lcount, /* ipkt_link_cnt */
2538 tcount, /* ipkt_change_cnt */
2539 0, /* cause */
2540 FC_INVALID_RSCN_COUNT); /* invalidate the count */
2541
2542 if (icmd == NULL) {
2543 ret = ENOMEM;
2544 } else {
2545 /*
2546 * Setup internal packet as sema sync
2547 */
2548 fcp_ipkt_sema_init(icmd);
2549 }
2550 }
2551
2552 if (ret == 0) {
2553 /*
2554 * Init fpkt pointer for use.
2555 */
2556
2557 fpkt = icmd->ipkt_fpkt;
2558
2559 fpkt->pkt_tran_flags = FC_TRAN_CLASS3 | FC_TRAN_INTR;
2560 fpkt->pkt_tran_type = FC_PKT_FCP_READ; /* only rd for now */
2561 fpkt->pkt_timeout = fscsi->scsi_timeout;
2562
2563 /*
2564 * Init fcmd pointer for use by SCSI command
2565 */
2566
2567 if (nodma) {
2568 fcmd = (struct fcp_cmd *)fpkt->pkt_cmd;
2569 } else {
2570 fcmd = &fcp_cmd;
2571 }
2572 bzero(fcmd, sizeof (struct fcp_cmd));
2573 ptgt = plun->lun_tgt;
2574
2575 lun_string = (uchar_t *)&fscsi->scsi_lun;
2576
2577 fcmd->fcp_ent_addr.ent_addr_0 =
2578 BE_16(*(uint16_t *)&(lun_string[0]));
2579 fcmd->fcp_ent_addr.ent_addr_1 =
2580 BE_16(*(uint16_t *)&(lun_string[2]));
2581 fcmd->fcp_ent_addr.ent_addr_2 =
2582 BE_16(*(uint16_t *)&(lun_string[4]));
2583 fcmd->fcp_ent_addr.ent_addr_3 =
2584 BE_16(*(uint16_t *)&(lun_string[6]));
2585
2586 /*
2587 * Setup internal packet(icmd)
2588 */
2589 icmd->ipkt_lun = plun;
2590 icmd->ipkt_restart = 0;
2591 icmd->ipkt_retries = 0;
2592 icmd->ipkt_opcode = 0;
2593
2594 /*
2595 * Init the frame HEADER Pointer for use
2596 */
2597 hp = &fpkt->pkt_cmd_fhdr;
2598
2599 hp->s_id = pptr->port_id;
2600 hp->d_id = ptgt->tgt_d_id;
2601 hp->r_ctl = R_CTL_COMMAND;
2602 hp->type = FC_TYPE_SCSI_FCP;
2603 hp->f_ctl = F_CTL_SEQ_INITIATIVE | F_CTL_FIRST_SEQ;
2604 hp->rsvd = 0;
2605 hp->seq_id = 0;
2606 hp->seq_cnt = 0;
2607 hp->ox_id = 0xffff;
2608 hp->rx_id = 0xffff;
2609 hp->ro = 0;
2610
2611 fcmd->fcp_cntl.cntl_qtype = FCP_QTYPE_SIMPLE;
2612 fcmd->fcp_cntl.cntl_read_data = 1; /* only rd for now */
2613 fcmd->fcp_cntl.cntl_write_data = 0;
2614 fcmd->fcp_data_len = fscsi->scsi_buflen;
2615
2616 scsi_cdb = (union scsi_cdb *)fcmd->fcp_cdb;
2617 bcopy((char *)fscsi->scsi_cdbbufaddr, (char *)scsi_cdb,
2618 fscsi->scsi_cdblen);
2619
2620 if (!nodma) {
2621 FCP_CP_OUT((uint8_t *)fcmd, fpkt->pkt_cmd,
2622 fpkt->pkt_cmd_acc, sizeof (struct fcp_cmd));
2623 }
2624
2625 /*
2626 * Send SCSI command to FC transport
2627 */
2628
2629 if (ret == 0) {
2630 mutex_enter(&ptgt->tgt_mutex);
2631
2632 if (!FCP_TGT_STATE_CHANGED(ptgt, icmd)) {
2633 mutex_exit(&ptgt->tgt_mutex);
2634 fscsi->scsi_fc_status = xport_retval =
2635 fc_ulp_transport(pptr->port_fp_handle,
2636 fpkt);
2637 if (fscsi->scsi_fc_status != FC_SUCCESS) {
2638 ret = EIO;
2639 }
2640 } else {
2641 mutex_exit(&ptgt->tgt_mutex);
2642 ret = EBUSY;
2643 }
2644 }
2645 }
2646
2647 /*
2648 * Wait for completion only if fc_ulp_transport was called and it
2649 * returned a success. This is the only time callback will happen.
2650 * Otherwise, there is no point in waiting
2651 */
2652 if ((ret == 0) && (xport_retval == FC_SUCCESS)) {
2653 ret = fcp_ipkt_sema_wait(icmd);
2654 }
2655
2656 /*
2657 * Copy data to IOCTL data structures
2658 */
2659 rsp = NULL;
2660 if ((ret == 0) && (xport_retval == FC_SUCCESS)) {
2661 rsp = (struct fcp_rsp *)fpkt->pkt_resp;
2662
2663 if (fcp_validate_fcp_response(rsp, pptr) != FC_SUCCESS) {
2664 fcp_log(CE_WARN, pptr->port_dip,
2665 "!SCSI command to d_id=0x%x lun=0x%x"
2666 " failed, Bad FCP response values:"
2667 " rsvd1=%x, rsvd2=%x, sts-rsvd1=%x,"
2668 " sts-rsvd2=%x, rsplen=%x, senselen=%x",
2669 ptgt->tgt_d_id, plun->lun_num,
2670 rsp->reserved_0, rsp->reserved_1,
2671 rsp->fcp_u.fcp_status.reserved_0,
2672 rsp->fcp_u.fcp_status.reserved_1,
2673 rsp->fcp_response_len, rsp->fcp_sense_len);
2674
2675 ret = EIO;
2676 }
2677 }
2678
2679 if ((ret == 0) && (rsp != NULL)) {
2680 /*
2681 * Calc response lengths
2682 */
2683 sense_len = 0;
2684 info_len = 0;
2685
2686 if (rsp->fcp_u.fcp_status.rsp_len_set) {
2687 info_len = rsp->fcp_response_len;
2688 }
2689
2690 rsp_info = (struct fcp_rsp_info *)
2691 ((uint8_t *)rsp + sizeof (struct fcp_rsp));
2692
2693 /*
2694 * Get SCSI status
2695 */
2696 fscsi->scsi_bufstatus = rsp->fcp_u.fcp_status.scsi_status;
2697 /*
2698 * If a lun was just added or removed and the next command
2699 * comes through this interface, we need to capture the check
2700 * condition so we can discover the new topology.
2701 */
2702 if (fscsi->scsi_bufstatus != STATUS_GOOD &&
2703 rsp->fcp_u.fcp_status.sense_len_set) {
2704 sense_len = rsp->fcp_sense_len;
2705 rsp_sense = (caddr_t)((uint8_t *)rsp_info + info_len);
2706 sense_to = (struct scsi_extended_sense *)rsp_sense;
2707 if ((FCP_SENSE_REPORTLUN_CHANGED(sense_to)) ||
2708 (FCP_SENSE_NO_LUN(sense_to))) {
2709 reconfig_lun = TRUE;
2710 }
2711 }
2712
2713 if (fscsi->scsi_bufstatus == STATUS_GOOD && (ptgt != NULL) &&
2714 (reconfig_lun || (scsi_cdb->scc_cmd == SCMD_REPORT_LUN))) {
2715 if (reconfig_lun == FALSE) {
2716 reconfig_status =
2717 fcp_is_reconfig_needed(ptgt, fpkt);
2718 }
2719
2720 if ((reconfig_lun == TRUE) ||
2721 (reconfig_status == TRUE)) {
2722 mutex_enter(&ptgt->tgt_mutex);
2723 if (ptgt->tgt_tid == NULL) {
2724 /*
2725 * Either we've been notified the
2726 * REPORT_LUN data has changed, or
2727 * we've determined on our own that
2728 * we're out of date. Kick off
2729 * rediscovery.
2730 */
2731 tid = timeout(fcp_reconfigure_luns,
2732 (caddr_t)ptgt, drv_usectohz(1));
2733
2734 ptgt->tgt_tid = tid;
2735 ptgt->tgt_state |= FCP_TGT_BUSY;
2736 ret = EBUSY;
2737 reconfig_pending = TRUE;
2738 }
2739 mutex_exit(&ptgt->tgt_mutex);
2740 }
2741 }
2742
2743 /*
2744 * Calc residuals and buffer lengths
2745 */
2746
2747 if (ret == 0) {
2748 buf_len = fscsi->scsi_buflen;
2749 fscsi->scsi_bufresid = 0;
2750 if (rsp->fcp_u.fcp_status.resid_under) {
2751 if (rsp->fcp_resid <= fscsi->scsi_buflen) {
2752 fscsi->scsi_bufresid = rsp->fcp_resid;
2753 } else {
2754 cmn_err(CE_WARN, "fcp: bad residue %x "
2755 "for txfer len %x", rsp->fcp_resid,
2756 fscsi->scsi_buflen);
2757 fscsi->scsi_bufresid =
2758 fscsi->scsi_buflen;
2759 }
2760 buf_len -= fscsi->scsi_bufresid;
2761 }
2762 if (rsp->fcp_u.fcp_status.resid_over) {
2763 fscsi->scsi_bufresid = -rsp->fcp_resid;
2764 }
2765
2766 fscsi->scsi_rqresid = fscsi->scsi_rqlen - sense_len;
2767 if (fscsi->scsi_rqlen < sense_len) {
2768 sense_len = fscsi->scsi_rqlen;
2769 }
2770
2771 fscsi->scsi_fc_rspcode = 0;
2772 if (rsp->fcp_u.fcp_status.rsp_len_set) {
2773 fscsi->scsi_fc_rspcode = rsp_info->rsp_code;
2774 }
2775 fscsi->scsi_pkt_state = fpkt->pkt_state;
2776 fscsi->scsi_pkt_action = fpkt->pkt_action;
2777 fscsi->scsi_pkt_reason = fpkt->pkt_reason;
2778
2779 /*
2780 * Copy data and request sense
2781 *
2782 * Data must be copied by using the FCP_CP_IN macro.
2783 * This will ensure the proper byte order since the data
2784 * is being copied directly from the memory mapped
2785 * device register.
2786 *
2787 * The response (and request sense) will be in the
2788 * correct byte order. No special copy is necessary.
2789 */
2790
2791 if (buf_len) {
2792 FCP_CP_IN(fpkt->pkt_data,
2793 fscsi->scsi_bufaddr,
2794 fpkt->pkt_data_acc,
2795 buf_len);
2796 }
2797 bcopy((void *)rsp_sense,
2798 (void *)fscsi->scsi_rqbufaddr,
2799 sense_len);
2800 }
2801 }
2802
2803 /*
2804 * Cleanup transport data structures if icmd was alloc-ed
2805 * So, cleanup happens in the same thread that icmd was alloc-ed
2806 */
2807 if (icmd != NULL) {
2808 fcp_ipkt_sema_cleanup(icmd);
2809 }
2810
2811 /* restore pm busy/idle status */
2812 if (port_busy) {
2813 fc_ulp_idle_port(pptr->port_fp_handle);
2814 }
2815
2816 /*
2817 * Cleanup target. if a reconfig is pending, don't clear the BUSY
2818 * flag, it'll be cleared when the reconfig is complete.
2819 */
2820 if ((ptgt != NULL) && !reconfig_pending) {
2821 /*
2822 * If target was created,
2823 */
2824 if (target_created) {
2825 mutex_enter(&ptgt->tgt_mutex);
2826 ptgt->tgt_state &= ~FCP_TGT_BUSY;
2827 mutex_exit(&ptgt->tgt_mutex);
2828 } else {
2829 /*
2830 * De-mark target as busy
2831 */
2832 mutex_enter(&ptgt->tgt_mutex);
2833 ptgt->tgt_state &= ~FCP_TGT_BUSY;
2834 mutex_exit(&ptgt->tgt_mutex);
2835 }
2836 }
2837 return (ret);
2838 }
2839
2840
2841 static int
2842 fcp_is_reconfig_needed(struct fcp_tgt *ptgt,
2843 fc_packet_t *fpkt)
2844 {
2845 uchar_t *lun_string;
2846 uint16_t lun_num, i;
2847 int num_luns;
2848 int actual_luns;
2849 int num_masked_luns;
2850 int lun_buflen;
2851 struct fcp_lun *plun = NULL;
2852 struct fcp_reportlun_resp *report_lun;
2853 uint8_t reconfig_needed = FALSE;
2854 uint8_t lun_exists = FALSE;
2855 fcp_port_t *pptr = ptgt->tgt_port;
2856
2857 report_lun = kmem_zalloc(fpkt->pkt_datalen, KM_SLEEP);
2858
2859 FCP_CP_IN(fpkt->pkt_data, report_lun, fpkt->pkt_data_acc,
2860 fpkt->pkt_datalen);
2861
2862 /* get number of luns (which is supplied as LUNS * 8) */
2863 num_luns = BE_32(report_lun->num_lun) >> 3;
2864
2865 /*
2866 * Figure out exactly how many lun strings our response buffer
2867 * can hold.
2868 */
2869 lun_buflen = (fpkt->pkt_datalen -
2870 2 * sizeof (uint32_t)) / sizeof (longlong_t);
2871
2872 /*
2873 * Is our response buffer full or not? We don't want to
2874 * potentially walk beyond the number of luns we have.
2875 */
2876 if (num_luns <= lun_buflen) {
2877 actual_luns = num_luns;
2878 } else {
2879 actual_luns = lun_buflen;
2880 }
2881
2882 mutex_enter(&ptgt->tgt_mutex);
2883
2884 /* Scan each lun to see if we have masked it. */
2885 num_masked_luns = 0;
2886 if (fcp_lun_blacklist != NULL) {
2887 for (i = 0; i < actual_luns; i++) {
2888 lun_string = (uchar_t *)&(report_lun->lun_string[i]);
2889 switch (lun_string[0] & 0xC0) {
2890 case FCP_LUN_ADDRESSING:
2891 case FCP_PD_ADDRESSING:
2892 case FCP_VOLUME_ADDRESSING:
2893 lun_num = ((lun_string[0] & 0x3F) << 8)
2894 | lun_string[1];
2895 if (fcp_should_mask(&ptgt->tgt_port_wwn,
2896 lun_num) == TRUE) {
2897 num_masked_luns++;
2898 }
2899 break;
2900 default:
2901 break;
2902 }
2903 }
2904 }
2905
2906 /*
2907 * The quick and easy check. If the number of LUNs reported
2908 * doesn't match the number we currently know about, we need
2909 * to reconfigure.
2910 */
2911 if (num_luns && num_luns != (ptgt->tgt_lun_cnt + num_masked_luns)) {
2912 mutex_exit(&ptgt->tgt_mutex);
2913 kmem_free(report_lun, fpkt->pkt_datalen);
2914 return (TRUE);
2915 }
2916
2917 /*
2918 * If the quick and easy check doesn't turn up anything, we walk
2919 * the list of luns from the REPORT_LUN response and look for
2920 * any luns we don't know about. If we find one, we know we need
2921 * to reconfigure. We will skip LUNs that are masked because of the
2922 * blacklist.
2923 */
2924 for (i = 0; i < actual_luns; i++) {
2925 lun_string = (uchar_t *)&(report_lun->lun_string[i]);
2926 lun_exists = FALSE;
2927 switch (lun_string[0] & 0xC0) {
2928 case FCP_LUN_ADDRESSING:
2929 case FCP_PD_ADDRESSING:
2930 case FCP_VOLUME_ADDRESSING:
2931 lun_num = ((lun_string[0] & 0x3F) << 8) | lun_string[1];
2932
2933 if ((fcp_lun_blacklist != NULL) && (fcp_should_mask(
2934 &ptgt->tgt_port_wwn, lun_num) == TRUE)) {
2935 lun_exists = TRUE;
2936 break;
2937 }
2938
2939 for (plun = ptgt->tgt_lun; plun;
2940 plun = plun->lun_next) {
2941 if (plun->lun_num == lun_num) {
2942 lun_exists = TRUE;
2943 break;
2944 }
2945 }
2946 break;
2947 default:
2948 break;
2949 }
2950
2951 if (lun_exists == FALSE) {
2952 reconfig_needed = TRUE;
2953 break;
2954 }
2955 }
2956
2957 mutex_exit(&ptgt->tgt_mutex);
2958 kmem_free(report_lun, fpkt->pkt_datalen);
2959
2960 return (reconfig_needed);
2961 }
2962
2963 /*
2964 * This function is called by fcp_handle_page83 and uses inquiry response data
2965 * stored in plun->lun_inq to determine whether or not a device is a member of
2966 * the table fcp_symmetric_disk_table_size. We return 0 if it is in the table,
2967 * otherwise 1.
2968 */
2969 static int
2970 fcp_symmetric_device_probe(struct fcp_lun *plun)
2971 {
2972 struct scsi_inquiry *stdinq = &plun->lun_inq;
2973 char *devidptr;
2974 int i, len;
2975
2976 for (i = 0; i < fcp_symmetric_disk_table_size; i++) {
2977 devidptr = fcp_symmetric_disk_table[i];
2978 len = (int)strlen(devidptr);
2979
2980 if (bcmp(stdinq->inq_vid, devidptr, len) == 0) {
2981 return (0);
2982 }
2983 }
2984 return (1);
2985 }
2986
2987
2988 /*
2989 * This function is called by fcp_ioctl for the FCP_STATE_COUNT ioctl
2990 * It basically returns the current count of # of state change callbacks
2991 * i.e the value of tgt_change_cnt.
2992 *
2993 * INPUT:
2994 * fcp_ioctl.fp_minor -> The minor # of the fp port
2995 * fcp_ioctl.listlen -> 1
2996 * fcp_ioctl.list -> Pointer to a 32 bit integer
2997 */
2998 /*ARGSUSED2*/
2999 static int
3000 fcp_get_statec_count(struct fcp_ioctl *data, int mode, int *rval)
3001 {
3002 int ret;
3003 uint32_t link_cnt;
3004 struct fcp_ioctl fioctl;
3005 struct fcp_port *pptr = NULL;
3006
3007 if ((ret = fcp_copyin_fcp_ioctl_data(data, mode, rval, &fioctl,
3008 &pptr)) != 0) {
3009 return (ret);
3010 }
3011
3012 ASSERT(pptr != NULL);
3013
3014 if (fioctl.listlen != 1) {
3015 return (EINVAL);
3016 }
3017
3018 mutex_enter(&pptr->port_mutex);
3019 if (pptr->port_state & FCP_STATE_OFFLINE) {
3020 mutex_exit(&pptr->port_mutex);
3021 return (ENXIO);
3022 }
3023
3024 /*
3025 * FCP_STATE_INIT is set in 2 cases (not sure why it is overloaded):
3026 * When the fcp initially attaches to the port and there are nothing
3027 * hanging out of the port or if there was a repeat offline state change
3028 * callback (refer fcp_statec_callback() FC_STATE_OFFLINE case).
3029 * In the latter case, port_tmp_cnt will be non-zero and that is how we
3030 * will differentiate the 2 cases.
3031 */
3032 if ((pptr->port_state & FCP_STATE_INIT) && pptr->port_tmp_cnt) {
3033 mutex_exit(&pptr->port_mutex);
3034 return (ENXIO);
3035 }
3036
3037 link_cnt = pptr->port_link_cnt;
3038 mutex_exit(&pptr->port_mutex);
3039
3040 if (ddi_copyout(&link_cnt, fioctl.list, (sizeof (uint32_t)), mode)) {
3041 return (EFAULT);
3042 }
3043
3044 #ifdef _MULTI_DATAMODEL
3045 switch (ddi_model_convert_from(mode & FMODELS)) {
3046 case DDI_MODEL_ILP32: {
3047 struct fcp32_ioctl f32_ioctl;
3048
3049 f32_ioctl.fp_minor = fioctl.fp_minor;
3050 f32_ioctl.listlen = fioctl.listlen;
3051 f32_ioctl.list = (caddr32_t)(long)fioctl.list;
3052 if (ddi_copyout((void *)&f32_ioctl, (void *)data,
3053 sizeof (struct fcp32_ioctl), mode)) {
3054 return (EFAULT);
3055 }
3056 break;
3057 }
3058 case DDI_MODEL_NONE:
3059 if (ddi_copyout((void *)&fioctl, (void *)data,
3060 sizeof (struct fcp_ioctl), mode)) {
3061 return (EFAULT);
3062 }
3063 break;
3064 }
3065 #else /* _MULTI_DATAMODEL */
3066
3067 if (ddi_copyout((void *)&fioctl, (void *)data,
3068 sizeof (struct fcp_ioctl), mode)) {
3069 return (EFAULT);
3070 }
3071 #endif /* _MULTI_DATAMODEL */
3072
3073 return (0);
3074 }
3075
3076 /*
3077 * This function copies the fcp_ioctl structure passed in from user land
3078 * into kernel land. Handles 32 bit applications.
3079 */
3080 /*ARGSUSED*/
3081 static int
3082 fcp_copyin_fcp_ioctl_data(struct fcp_ioctl *data, int mode, int *rval,
3083 struct fcp_ioctl *fioctl, struct fcp_port **pptr)
3084 {
3085 struct fcp_port *t_pptr;
3086
3087 #ifdef _MULTI_DATAMODEL
3088 switch (ddi_model_convert_from(mode & FMODELS)) {
3089 case DDI_MODEL_ILP32: {
3090 struct fcp32_ioctl f32_ioctl;
3091
3092 if (ddi_copyin((void *)data, (void *)&f32_ioctl,
3093 sizeof (struct fcp32_ioctl), mode)) {
3094 return (EFAULT);
3095 }
3096 fioctl->fp_minor = f32_ioctl.fp_minor;
3097 fioctl->listlen = f32_ioctl.listlen;
3098 fioctl->list = (caddr_t)(long)f32_ioctl.list;
3099 break;
3100 }
3101 case DDI_MODEL_NONE:
3102 if (ddi_copyin((void *)data, (void *)fioctl,
3103 sizeof (struct fcp_ioctl), mode)) {
3104 return (EFAULT);
3105 }
3106 break;
3107 }
3108
3109 #else /* _MULTI_DATAMODEL */
3110 if (ddi_copyin((void *)data, (void *)fioctl,
3111 sizeof (struct fcp_ioctl), mode)) {
3112 return (EFAULT);
3113 }
3114 #endif /* _MULTI_DATAMODEL */
3115
3116 /*
3117 * Right now we can assume that the minor number matches with
3118 * this instance of fp. If this changes we will need to
3119 * revisit this logic.
3120 */
3121 mutex_enter(&fcp_global_mutex);
3122 t_pptr = fcp_port_head;
3123 while (t_pptr) {
3124 if (t_pptr->port_instance == (uint32_t)fioctl->fp_minor) {
3125 break;
3126 } else {
3127 t_pptr = t_pptr->port_next;
3128 }
3129 }
3130 *pptr = t_pptr;
3131 mutex_exit(&fcp_global_mutex);
3132 if (t_pptr == NULL) {
3133 return (ENXIO);
3134 }
3135
3136 return (0);
3137 }
3138
3139 /*
3140 * Function: fcp_port_create_tgt
3141 *
3142 * Description: As the name suggest this function creates the target context
3143 * specified by the the WWN provided by the caller. If the
3144 * creation goes well and the target is known by fp/fctl a PLOGI
3145 * followed by a PRLI are issued.
3146 *
3147 * Argument: pptr fcp port structure
3148 * pwwn WWN of the target
3149 * ret_val Address of the return code. It could be:
3150 * EIO, ENOMEM or 0.
3151 * fc_status PLOGI or PRLI status completion
3152 * fc_pkt_state PLOGI or PRLI state completion
3153 * fc_pkt_reason PLOGI or PRLI reason completion
3154 * fc_pkt_action PLOGI or PRLI action completion
3155 *
3156 * Return Value: NULL if it failed
3157 * Target structure address if it succeeds
3158 */
3159 static struct fcp_tgt *
3160 fcp_port_create_tgt(struct fcp_port *pptr, la_wwn_t *pwwn, int *ret_val,
3161 int *fc_status, int *fc_pkt_state, int *fc_pkt_reason, int *fc_pkt_action)
3162 {
3163 struct fcp_tgt *ptgt = NULL;
3164 fc_portmap_t devlist;
3165 int lcount;
3166 int error;
3167
3168 *ret_val = 0;
3169
3170 /*
3171 * Check FC port device & get port map
3172 */
3173 if (fc_ulp_get_remote_port(pptr->port_fp_handle, pwwn,
3174 &error, 1) == NULL) {
3175 *ret_val = EIO;
3176 } else {
3177 if (fc_ulp_pwwn_to_portmap(pptr->port_fp_handle, pwwn,
3178 &devlist) != FC_SUCCESS) {
3179 *ret_val = EIO;
3180 }
3181 }
3182
3183 /* Set port map flags */
3184 devlist.map_type = PORT_DEVICE_USER_CREATE;
3185
3186 /* Allocate target */
3187 if (*ret_val == 0) {
3188 lcount = pptr->port_link_cnt;
3189 ptgt = fcp_alloc_tgt(pptr, &devlist, lcount);
3190 if (ptgt == NULL) {
3191 fcp_log(CE_WARN, pptr->port_dip,
3192 "!FC target allocation failed");
3193 *ret_val = ENOMEM;
3194 } else {
3195 /* Setup target */
3196 mutex_enter(&ptgt->tgt_mutex);
3197
3198 ptgt->tgt_statec_cause = FCP_CAUSE_TGT_CHANGE;
3199 ptgt->tgt_tmp_cnt = 1;
3200 ptgt->tgt_d_id = devlist.map_did.port_id;
3201 ptgt->tgt_hard_addr =
3202 devlist.map_hard_addr.hard_addr;
3203 ptgt->tgt_pd_handle = devlist.map_pd;
3204 ptgt->tgt_fca_dev = NULL;
3205
3206 bcopy(&devlist.map_nwwn, &ptgt->tgt_node_wwn.raw_wwn[0],
3207 FC_WWN_SIZE);
3208 bcopy(&devlist.map_pwwn, &ptgt->tgt_port_wwn.raw_wwn[0],
3209 FC_WWN_SIZE);
3210
3211 mutex_exit(&ptgt->tgt_mutex);
3212 }
3213 }
3214
3215 /* Release global mutex for PLOGI and PRLI */
3216 mutex_exit(&fcp_global_mutex);
3217
3218 /* Send PLOGI (If necessary) */
3219 if (*ret_val == 0) {
3220 *ret_val = fcp_tgt_send_plogi(ptgt, fc_status,
3221 fc_pkt_state, fc_pkt_reason, fc_pkt_action);
3222 }
3223
3224 /* Send PRLI (If necessary) */
3225 if (*ret_val == 0) {
3226 *ret_val = fcp_tgt_send_prli(ptgt, fc_status,
3227 fc_pkt_state, fc_pkt_reason, fc_pkt_action);
3228 }
3229
3230 mutex_enter(&fcp_global_mutex);
3231
3232 return (ptgt);
3233 }
3234
3235 /*
3236 * Function: fcp_tgt_send_plogi
3237 *
3238 * Description: This function sends a PLOGI to the target specified by the
3239 * caller and waits till it completes.
3240 *
3241 * Argument: ptgt Target to send the plogi to.
3242 * fc_status Status returned by fp/fctl in the PLOGI request.
3243 * fc_pkt_state State returned by fp/fctl in the PLOGI request.
3244 * fc_pkt_reason Reason returned by fp/fctl in the PLOGI request.
3245 * fc_pkt_action Action returned by fp/fctl in the PLOGI request.
3246 *
3247 * Return Value: 0
3248 * ENOMEM
3249 * EIO
3250 *
3251 * Context: User context.
3252 */
3253 static int
3254 fcp_tgt_send_plogi(struct fcp_tgt *ptgt, int *fc_status, int *fc_pkt_state,
3255 int *fc_pkt_reason, int *fc_pkt_action)
3256 {
3257 struct fcp_port *pptr;
3258 struct fcp_ipkt *icmd;
3259 struct fc_packet *fpkt;
3260 fc_frame_hdr_t *hp;
3261 struct la_els_logi logi;
3262 int tcount;
3263 int lcount;
3264 int ret, login_retval = ~FC_SUCCESS;
3265
3266 ret = 0;
3267
3268 pptr = ptgt->tgt_port;
3269
3270 lcount = pptr->port_link_cnt;
3271 tcount = ptgt->tgt_change_cnt;
3272
3273 /* Alloc internal packet */
3274 icmd = fcp_icmd_alloc(pptr, ptgt, sizeof (la_els_logi_t),
3275 sizeof (la_els_logi_t), 0,
3276 pptr->port_state & FCP_STATE_FCA_IS_NODMA,
3277 lcount, tcount, 0, FC_INVALID_RSCN_COUNT);
3278
3279 if (icmd == NULL) {
3280 ret = ENOMEM;
3281 } else {
3282 /*
3283 * Setup internal packet as sema sync
3284 */
3285 fcp_ipkt_sema_init(icmd);
3286
3287 /*
3288 * Setup internal packet (icmd)
3289 */
3290 icmd->ipkt_lun = NULL;
3291 icmd->ipkt_restart = 0;
3292 icmd->ipkt_retries = 0;
3293 icmd->ipkt_opcode = LA_ELS_PLOGI;
3294
3295 /*
3296 * Setup fc_packet
3297 */
3298 fpkt = icmd->ipkt_fpkt;
3299
3300 fpkt->pkt_tran_flags = FC_TRAN_CLASS3 | FC_TRAN_INTR;
3301 fpkt->pkt_tran_type = FC_PKT_EXCHANGE;
3302 fpkt->pkt_timeout = FCP_ELS_TIMEOUT;
3303
3304 /*
3305 * Setup FC frame header
3306 */
3307 hp = &fpkt->pkt_cmd_fhdr;
3308
3309 hp->s_id = pptr->port_id; /* source ID */
3310 hp->d_id = ptgt->tgt_d_id; /* dest ID */
3311 hp->r_ctl = R_CTL_ELS_REQ;
3312 hp->type = FC_TYPE_EXTENDED_LS;
3313 hp->f_ctl = F_CTL_SEQ_INITIATIVE | F_CTL_FIRST_SEQ;
3314 hp->seq_id = 0;
3315 hp->rsvd = 0;
3316 hp->df_ctl = 0;
3317 hp->seq_cnt = 0;
3318 hp->ox_id = 0xffff; /* i.e. none */
3319 hp->rx_id = 0xffff; /* i.e. none */
3320 hp->ro = 0;
3321
3322 /*
3323 * Setup PLOGI
3324 */
3325 bzero(&logi, sizeof (struct la_els_logi));
3326 logi.ls_code.ls_code = LA_ELS_PLOGI;
3327
3328 FCP_CP_OUT((uint8_t *)&logi, fpkt->pkt_cmd,
3329 fpkt->pkt_cmd_acc, sizeof (struct la_els_logi));
3330
3331 /*
3332 * Send PLOGI
3333 */
3334 *fc_status = login_retval =
3335 fc_ulp_login(pptr->port_fp_handle, &fpkt, 1);
3336 if (*fc_status != FC_SUCCESS) {
3337 ret = EIO;
3338 }
3339 }
3340
3341 /*
3342 * Wait for completion
3343 */
3344 if ((ret == 0) && (login_retval == FC_SUCCESS)) {
3345 ret = fcp_ipkt_sema_wait(icmd);
3346
3347 *fc_pkt_state = fpkt->pkt_state;
3348 *fc_pkt_reason = fpkt->pkt_reason;
3349 *fc_pkt_action = fpkt->pkt_action;
3350 }
3351
3352 /*
3353 * Cleanup transport data structures if icmd was alloc-ed AND if there
3354 * is going to be no callback (i.e if fc_ulp_login() failed).
3355 * Otherwise, cleanup happens in callback routine.
3356 */
3357 if (icmd != NULL) {
3358 fcp_ipkt_sema_cleanup(icmd);
3359 }
3360
3361 return (ret);
3362 }
3363
3364 /*
3365 * Function: fcp_tgt_send_prli
3366 *
3367 * Description: Does nothing as of today.
3368 *
3369 * Argument: ptgt Target to send the prli to.
3370 * fc_status Status returned by fp/fctl in the PRLI request.
3371 * fc_pkt_state State returned by fp/fctl in the PRLI request.
3372 * fc_pkt_reason Reason returned by fp/fctl in the PRLI request.
3373 * fc_pkt_action Action returned by fp/fctl in the PRLI request.
3374 *
3375 * Return Value: 0
3376 */
3377 /*ARGSUSED*/
3378 static int
3379 fcp_tgt_send_prli(struct fcp_tgt *ptgt, int *fc_status, int *fc_pkt_state,
3380 int *fc_pkt_reason, int *fc_pkt_action)
3381 {
3382 return (0);
3383 }
3384
3385 /*
3386 * Function: fcp_ipkt_sema_init
3387 *
3388 * Description: Initializes the semaphore contained in the internal packet.
3389 *
3390 * Argument: icmd Internal packet the semaphore of which must be
3391 * initialized.
3392 *
3393 * Return Value: None
3394 *
3395 * Context: User context only.
3396 */
3397 static void
3398 fcp_ipkt_sema_init(struct fcp_ipkt *icmd)
3399 {
3400 struct fc_packet *fpkt;
3401
3402 fpkt = icmd->ipkt_fpkt;
3403
3404 /* Create semaphore for sync */
3405 sema_init(&(icmd->ipkt_sema), 0, NULL, SEMA_DRIVER, NULL);
3406
3407 /* Setup the completion callback */
3408 fpkt->pkt_comp = fcp_ipkt_sema_callback;
3409 }
3410
3411 /*
3412 * Function: fcp_ipkt_sema_wait
3413 *
3414 * Description: Wait on the semaphore embedded in the internal packet. The
3415 * semaphore is released in the callback.
3416 *
3417 * Argument: icmd Internal packet to wait on for completion.
3418 *
3419 * Return Value: 0
3420 * EIO
3421 * EBUSY
3422 * EAGAIN
3423 *
3424 * Context: User context only.
3425 *
3426 * This function does a conversion between the field pkt_state of the fc_packet
3427 * embedded in the internal packet (icmd) and the code it returns.
3428 */
3429 static int
3430 fcp_ipkt_sema_wait(struct fcp_ipkt *icmd)
3431 {
3432 struct fc_packet *fpkt;
3433 int ret;
3434
3435 ret = EIO;
3436 fpkt = icmd->ipkt_fpkt;
3437
3438 /*
3439 * Wait on semaphore
3440 */
3441 sema_p(&(icmd->ipkt_sema));
3442
3443 /*
3444 * Check the status of the FC packet
3445 */
3446 switch (fpkt->pkt_state) {
3447 case FC_PKT_SUCCESS:
3448 ret = 0;
3449 break;
3450 case FC_PKT_LOCAL_RJT:
3451 switch (fpkt->pkt_reason) {
3452 case FC_REASON_SEQ_TIMEOUT:
3453 case FC_REASON_RX_BUF_TIMEOUT:
3454 ret = EAGAIN;
3455 break;
3456 case FC_REASON_PKT_BUSY:
3457 ret = EBUSY;
3458 break;
3459 }
3460 break;
3461 case FC_PKT_TIMEOUT:
3462 ret = EAGAIN;
3463 break;
3464 case FC_PKT_LOCAL_BSY:
3465 case FC_PKT_TRAN_BSY:
3466 case FC_PKT_NPORT_BSY:
3467 case FC_PKT_FABRIC_BSY:
3468 ret = EBUSY;
3469 break;
3470 case FC_PKT_LS_RJT:
3471 case FC_PKT_BA_RJT:
3472 switch (fpkt->pkt_reason) {
3473 case FC_REASON_LOGICAL_BSY:
3474 ret = EBUSY;
3475 break;
3476 }
3477 break;
3478 case FC_PKT_FS_RJT:
3479 switch (fpkt->pkt_reason) {
3480 case FC_REASON_FS_LOGICAL_BUSY:
3481 ret = EBUSY;
3482 break;
3483 }
3484 break;
3485 }
3486
3487 return (ret);
3488 }
3489
3490 /*
3491 * Function: fcp_ipkt_sema_callback
3492 *
3493 * Description: Registered as the completion callback function for the FC
3494 * transport when the ipkt semaphore is used for sync. This will
3495 * cleanup the used data structures, if necessary and wake up
3496 * the user thread to complete the transaction.
3497 *
3498 * Argument: fpkt FC packet (points to the icmd)
3499 *
3500 * Return Value: None
3501 *
3502 * Context: User context only
3503 */
3504 static void
3505 fcp_ipkt_sema_callback(struct fc_packet *fpkt)
3506 {
3507 struct fcp_ipkt *icmd;
3508
3509 icmd = (struct fcp_ipkt *)fpkt->pkt_ulp_private;
3510
3511 /*
3512 * Wake up user thread
3513 */
3514 sema_v(&(icmd->ipkt_sema));
3515 }
3516
3517 /*
3518 * Function: fcp_ipkt_sema_cleanup
3519 *
3520 * Description: Called to cleanup (if necessary) the data structures used
3521 * when ipkt sema is used for sync. This function will detect
3522 * whether the caller is the last thread (via counter) and
3523 * cleanup only if necessary.
3524 *
3525 * Argument: icmd Internal command packet
3526 *
3527 * Return Value: None
3528 *
3529 * Context: User context only
3530 */
3531 static void
3532 fcp_ipkt_sema_cleanup(struct fcp_ipkt *icmd)
3533 {
3534 struct fcp_tgt *ptgt;
3535 struct fcp_port *pptr;
3536
3537 ptgt = icmd->ipkt_tgt;
3538 pptr = icmd->ipkt_port;
3539
3540 /*
3541 * Acquire data structure
3542 */
3543 mutex_enter(&ptgt->tgt_mutex);
3544
3545 /*
3546 * Destroy semaphore
3547 */
3548 sema_destroy(&(icmd->ipkt_sema));
3549
3550 /*
3551 * Cleanup internal packet
3552 */
3553 mutex_exit(&ptgt->tgt_mutex);
3554 fcp_icmd_free(pptr, icmd);
3555 }
3556
3557 /*
3558 * Function: fcp_port_attach
3559 *
3560 * Description: Called by the transport framework to resume, suspend or
3561 * attach a new port.
3562 *
3563 * Argument: ulph Port handle
3564 * *pinfo Port information
3565 * cmd Command
3566 * s_id Port ID
3567 *
3568 * Return Value: FC_FAILURE or FC_SUCCESS
3569 */
3570 /*ARGSUSED*/
3571 static int
3572 fcp_port_attach(opaque_t ulph, fc_ulp_port_info_t *pinfo,
3573 fc_attach_cmd_t cmd, uint32_t s_id)
3574 {
3575 int instance;
3576 int res = FC_FAILURE; /* default result */
3577
3578 ASSERT(pinfo != NULL);
3579
3580 instance = ddi_get_instance(pinfo->port_dip);
3581
3582 switch (cmd) {
3583 case FC_CMD_ATTACH:
3584 /*
3585 * this port instance attaching for the first time (or after
3586 * being detached before)
3587 */
3588 if (fcp_handle_port_attach(ulph, pinfo, s_id,
3589 instance) == DDI_SUCCESS) {
3590 res = FC_SUCCESS;
3591 } else {
3592 ASSERT(ddi_get_soft_state(fcp_softstate,
3593 instance) == NULL);
3594 }
3595 break;
3596
3597 case FC_CMD_RESUME:
3598 case FC_CMD_POWER_UP:
3599 /*
3600 * this port instance was attached and the suspended and
3601 * will now be resumed
3602 */
3603 if (fcp_handle_port_resume(ulph, pinfo, s_id, cmd,
3604 instance) == DDI_SUCCESS) {
3605 res = FC_SUCCESS;
3606 }
3607 break;
3608
3609 default:
3610 /* shouldn't happen */
3611 FCP_TRACE(fcp_logq, "fcp",
3612 fcp_trace, FCP_BUF_LEVEL_2, 0,
3613 "port_attach: unknown cmdcommand: %d", cmd);
3614 break;
3615 }
3616
3617 /* return result */
3618 FCP_DTRACE(fcp_logq, "fcp", fcp_trace,
3619 FCP_BUF_LEVEL_1, 0, "fcp_port_attach returning %d", res);
3620
3621 return (res);
3622 }
3623
3624
3625 /*
3626 * detach or suspend this port instance
3627 *
3628 * acquires and releases the global mutex
3629 *
3630 * acquires and releases the mutex for this port
3631 *
3632 * acquires and releases the hotplug mutex for this port
3633 */
3634 /*ARGSUSED*/
3635 static int
3636 fcp_port_detach(opaque_t ulph, fc_ulp_port_info_t *info,
3637 fc_detach_cmd_t cmd)
3638 {
3639 int flag;
3640 int instance;
3641 struct fcp_port *pptr;
3642
3643 instance = ddi_get_instance(info->port_dip);
3644 pptr = ddi_get_soft_state(fcp_softstate, instance);
3645
3646 switch (cmd) {
3647 case FC_CMD_SUSPEND:
3648 FCP_DTRACE(fcp_logq, "fcp",
3649 fcp_trace, FCP_BUF_LEVEL_8, 0,
3650 "port suspend called for port %d", instance);
3651 flag = FCP_STATE_SUSPENDED;
3652 break;
3653
3654 case FC_CMD_POWER_DOWN:
3655 FCP_DTRACE(fcp_logq, "fcp",
3656 fcp_trace, FCP_BUF_LEVEL_8, 0,
3657 "port power down called for port %d", instance);
3658 flag = FCP_STATE_POWER_DOWN;
3659 break;
3660
3661 case FC_CMD_DETACH:
3662 FCP_DTRACE(fcp_logq, "fcp",
3663 fcp_trace, FCP_BUF_LEVEL_8, 0,
3664 "port detach called for port %d", instance);
3665 flag = FCP_STATE_DETACHING;
3666 break;
3667
3668 default:
3669 /* shouldn't happen */
3670 return (FC_FAILURE);
3671 }
3672 FCP_DTRACE(fcp_logq, "fcp", fcp_trace,
3673 FCP_BUF_LEVEL_1, 0, "fcp_port_detach returning");
3674
3675 return (fcp_handle_port_detach(pptr, flag, instance));
3676 }
3677
3678
3679 /*
3680 * called for ioctls on the transport's devctl interface, and the transport
3681 * has passed it to us
3682 *
3683 * this will only be called for device control ioctls (i.e. hotplugging stuff)
3684 *
3685 * return FC_SUCCESS if we decide to claim the ioctl,
3686 * else return FC_UNCLAIMED
3687 *
3688 * *rval is set iff we decide to claim the ioctl
3689 */
3690 /*ARGSUSED*/
3691 static int
3692 fcp_port_ioctl(opaque_t ulph, opaque_t port_handle, dev_t dev, int cmd,
3693 intptr_t data, int mode, cred_t *credp, int *rval, uint32_t claimed)
3694 {
3695 int retval = FC_UNCLAIMED; /* return value */
3696 struct fcp_port *pptr = NULL; /* our soft state */
3697 struct devctl_iocdata *dcp = NULL; /* for devctl */
3698 dev_info_t *cdip;
3699 mdi_pathinfo_t *pip = NULL;
3700 char *ndi_nm; /* NDI name */
3701 char *ndi_addr; /* NDI addr */
3702 int is_mpxio, circ;
3703 int devi_entered = 0;
3704 clock_t end_time;
3705
3706 ASSERT(rval != NULL);
3707
3708 FCP_DTRACE(fcp_logq, "fcp",
3709 fcp_trace, FCP_BUF_LEVEL_8, 0,
3710 "fcp_port_ioctl(cmd=0x%x, claimed=%d)", cmd, claimed);
3711
3712 /* if already claimed then forget it */
3713 if (claimed) {
3714 /*
3715 * for now, if this ioctl has already been claimed, then
3716 * we just ignore it
3717 */
3718 return (retval);
3719 }
3720
3721 /* get our port info */
3722 if ((pptr = fcp_get_port(port_handle)) == NULL) {
3723 fcp_log(CE_WARN, NULL,
3724 "!fcp:Invalid port handle handle in ioctl");
3725 *rval = ENXIO;
3726 return (retval);
3727 }
3728 is_mpxio = pptr->port_mpxio;
3729
3730 switch (cmd) {
3731 case DEVCTL_BUS_GETSTATE:
3732 case DEVCTL_BUS_QUIESCE:
3733 case DEVCTL_BUS_UNQUIESCE:
3734 case DEVCTL_BUS_RESET:
3735 case DEVCTL_BUS_RESETALL:
3736
3737 case DEVCTL_BUS_DEV_CREATE:
3738 if (ndi_dc_allochdl((void *)data, &dcp) != NDI_SUCCESS) {
3739 return (retval);
3740 }
3741 break;
3742
3743 case DEVCTL_DEVICE_GETSTATE:
3744 case DEVCTL_DEVICE_OFFLINE:
3745 case DEVCTL_DEVICE_ONLINE:
3746 case DEVCTL_DEVICE_REMOVE:
3747 case DEVCTL_DEVICE_RESET:
3748 if (ndi_dc_allochdl((void *)data, &dcp) != NDI_SUCCESS) {
3749 return (retval);
3750 }
3751
3752 ASSERT(dcp != NULL);
3753
3754 /* ensure we have a name and address */
3755 if (((ndi_nm = ndi_dc_getname(dcp)) == NULL) ||
3756 ((ndi_addr = ndi_dc_getaddr(dcp)) == NULL)) {
3757 FCP_TRACE(fcp_logq, pptr->port_instbuf,
3758 fcp_trace, FCP_BUF_LEVEL_2, 0,
3759 "ioctl: can't get name (%s) or addr (%s)",
3760 ndi_nm ? ndi_nm : "<null ptr>",
3761 ndi_addr ? ndi_addr : "<null ptr>");
3762 ndi_dc_freehdl(dcp);
3763 return (retval);
3764 }
3765
3766
3767 /* get our child's DIP */
3768 ASSERT(pptr != NULL);
3769 if (is_mpxio) {
3770 mdi_devi_enter(pptr->port_dip, &circ);
3771 } else {
3772 ndi_devi_enter(pptr->port_dip, &circ);
3773 }
3774 devi_entered = 1;
3775
3776 if ((cdip = ndi_devi_find(pptr->port_dip, ndi_nm,
3777 ndi_addr)) == NULL) {
3778 /* Look for virtually enumerated devices. */
3779 pip = mdi_pi_find(pptr->port_dip, NULL, ndi_addr);
3780 if (pip == NULL ||
3781 ((cdip = mdi_pi_get_client(pip)) == NULL)) {
3782 *rval = ENXIO;
3783 goto out;
3784 }
3785 }
3786 break;
3787
3788 default:
3789 *rval = ENOTTY;
3790 return (retval);
3791 }
3792
3793 /* this ioctl is ours -- process it */
3794
3795 retval = FC_SUCCESS; /* just means we claim the ioctl */
3796
3797 /* we assume it will be a success; else we'll set error value */
3798 *rval = 0;
3799
3800
3801 FCP_DTRACE(fcp_logq, pptr->port_instbuf,
3802 fcp_trace, FCP_BUF_LEVEL_8, 0,
3803 "ioctl: claiming this one");
3804
3805 /* handle ioctls now */
3806 switch (cmd) {
3807 case DEVCTL_DEVICE_GETSTATE:
3808 ASSERT(cdip != NULL);
3809 ASSERT(dcp != NULL);
3810 if (ndi_dc_return_dev_state(cdip, dcp) != NDI_SUCCESS) {
3811 *rval = EFAULT;
3812 }
3813 break;
3814
3815 case DEVCTL_DEVICE_REMOVE:
3816 case DEVCTL_DEVICE_OFFLINE: {
3817 int flag = 0;
3818 int lcount;
3819 int tcount;
3820 struct fcp_pkt *head = NULL;
3821 struct fcp_lun *plun;
3822 child_info_t *cip = CIP(cdip);
3823 int all = 1;
3824 struct fcp_lun *tplun;
3825 struct fcp_tgt *ptgt;
3826
3827 ASSERT(pptr != NULL);
3828 ASSERT(cdip != NULL);
3829
3830 mutex_enter(&pptr->port_mutex);
3831 if (pip != NULL) {
3832 cip = CIP(pip);
3833 }
3834 if ((plun = fcp_get_lun_from_cip(pptr, cip)) == NULL) {
3835 mutex_exit(&pptr->port_mutex);
3836 *rval = ENXIO;
3837 break;
3838 }
3839
3840 head = fcp_scan_commands(plun);
3841 if (head != NULL) {
3842 fcp_abort_commands(head, LUN_PORT);
3843 }
3844 lcount = pptr->port_link_cnt;
3845 tcount = plun->lun_tgt->tgt_change_cnt;
3846 mutex_exit(&pptr->port_mutex);
3847
3848 if (cmd == DEVCTL_DEVICE_REMOVE) {
3849 flag = NDI_DEVI_REMOVE;
3850 }
3851
3852 if (is_mpxio) {
3853 mdi_devi_exit(pptr->port_dip, circ);
3854 } else {
3855 ndi_devi_exit(pptr->port_dip, circ);
3856 }
3857 devi_entered = 0;
3858
3859 *rval = fcp_pass_to_hp_and_wait(pptr, plun, cip,
3860 FCP_OFFLINE, lcount, tcount, flag);
3861
3862 if (*rval != NDI_SUCCESS) {
3863 *rval = (*rval == NDI_BUSY) ? EBUSY : EIO;
3864 break;
3865 }
3866
3867 fcp_update_offline_flags(plun);
3868
3869 ptgt = plun->lun_tgt;
3870 mutex_enter(&ptgt->tgt_mutex);
3871 for (tplun = ptgt->tgt_lun; tplun != NULL; tplun =
3872 tplun->lun_next) {
3873 mutex_enter(&tplun->lun_mutex);
3874 if (!(tplun->lun_state & FCP_LUN_OFFLINE)) {
3875 all = 0;
3876 }
3877 mutex_exit(&tplun->lun_mutex);
3878 }
3879
3880 if (all) {
3881 ptgt->tgt_node_state = FCP_TGT_NODE_NONE;
3882 /*
3883 * The user is unconfiguring/offlining the device.
3884 * If fabric and the auto configuration is set
3885 * then make sure the user is the only one who
3886 * can reconfigure the device.
3887 */
3888 if (FC_TOP_EXTERNAL(pptr->port_topology) &&
3889 fcp_enable_auto_configuration) {
3890 ptgt->tgt_manual_config_only = 1;
3891 }
3892 }
3893 mutex_exit(&ptgt->tgt_mutex);
3894 break;
3895 }
3896
3897 case DEVCTL_DEVICE_ONLINE: {
3898 int lcount;
3899 int tcount;
3900 struct fcp_lun *plun;
3901 child_info_t *cip = CIP(cdip);
3902
3903 ASSERT(cdip != NULL);
3904 ASSERT(pptr != NULL);
3905
3906 mutex_enter(&pptr->port_mutex);
3907 if (pip != NULL) {
3908 cip = CIP(pip);
3909 }
3910 if ((plun = fcp_get_lun_from_cip(pptr, cip)) == NULL) {
3911 mutex_exit(&pptr->port_mutex);
3912 *rval = ENXIO;
3913 break;
3914 }
3915 lcount = pptr->port_link_cnt;
3916 tcount = plun->lun_tgt->tgt_change_cnt;
3917 mutex_exit(&pptr->port_mutex);
3918
3919 /*
3920 * The FCP_LUN_ONLINING flag is used in fcp_scsi_start()
3921 * to allow the device attach to occur when the device is
3922 * FCP_LUN_OFFLINE (so we don't reject the INQUIRY command
3923 * from the scsi_probe()).
3924 */
3925 mutex_enter(&LUN_TGT->tgt_mutex);
3926 plun->lun_state |= FCP_LUN_ONLINING;
3927 mutex_exit(&LUN_TGT->tgt_mutex);
3928
3929 if (is_mpxio) {
3930 mdi_devi_exit(pptr->port_dip, circ);
3931 } else {
3932 ndi_devi_exit(pptr->port_dip, circ);
3933 }
3934 devi_entered = 0;
3935
3936 *rval = fcp_pass_to_hp_and_wait(pptr, plun, cip,
3937 FCP_ONLINE, lcount, tcount, 0);
3938
3939 if (*rval != NDI_SUCCESS) {
3940 /* Reset the FCP_LUN_ONLINING bit */
3941 mutex_enter(&LUN_TGT->tgt_mutex);
3942 plun->lun_state &= ~FCP_LUN_ONLINING;
3943 mutex_exit(&LUN_TGT->tgt_mutex);
3944 *rval = EIO;
3945 break;
3946 }
3947 mutex_enter(&LUN_TGT->tgt_mutex);
3948 plun->lun_state &= ~(FCP_LUN_OFFLINE | FCP_LUN_BUSY |
3949 FCP_LUN_ONLINING);
3950 mutex_exit(&LUN_TGT->tgt_mutex);
3951 break;
3952 }
3953
3954 case DEVCTL_BUS_DEV_CREATE: {
3955 uchar_t *bytes = NULL;
3956 uint_t nbytes;
3957 struct fcp_tgt *ptgt = NULL;
3958 struct fcp_lun *plun = NULL;
3959 dev_info_t *useless_dip = NULL;
3960
3961 *rval = ndi_dc_devi_create(dcp, pptr->port_dip,
3962 DEVCTL_CONSTRUCT, &useless_dip);
3963 if (*rval != 0 || useless_dip == NULL) {
3964 break;
3965 }
3966
3967 if ((ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, useless_dip,
3968 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, PORT_WWN_PROP, &bytes,
3969 &nbytes) != DDI_PROP_SUCCESS) || nbytes != FC_WWN_SIZE) {
3970 *rval = EINVAL;
3971 (void) ndi_devi_free(useless_dip);
3972 if (bytes != NULL) {
3973 ddi_prop_free(bytes);
3974 }
3975 break;
3976 }
3977
3978 *rval = fcp_create_on_demand(pptr, bytes);
3979 if (*rval == 0) {
3980 mutex_enter(&pptr->port_mutex);
3981 ptgt = fcp_lookup_target(pptr, (uchar_t *)bytes);
3982 if (ptgt) {
3983 /*
3984 * We now have a pointer to the target that
3985 * was created. Lets point to the first LUN on
3986 * this new target.
3987 */
3988 mutex_enter(&ptgt->tgt_mutex);
3989
3990 plun = ptgt->tgt_lun;
3991 /*
3992 * There may be stale/offline LUN entries on
3993 * this list (this is by design) and so we have
3994 * to make sure we point to the first online
3995 * LUN
3996 */
3997 while (plun &&
3998 plun->lun_state & FCP_LUN_OFFLINE) {
3999 plun = plun->lun_next;
4000 }
4001
4002 mutex_exit(&ptgt->tgt_mutex);
4003 }
4004 mutex_exit(&pptr->port_mutex);
4005 }
4006
4007 if (*rval == 0 && ptgt && plun) {
4008 mutex_enter(&plun->lun_mutex);
4009 /*
4010 * Allow up to fcp_lun_ready_retry seconds to
4011 * configure all the luns behind the target.
4012 *
4013 * The intent here is to allow targets with long
4014 * reboot/reset-recovery times to become available
4015 * while limiting the maximum wait time for an
4016 * unresponsive target.
4017 */
4018 end_time = ddi_get_lbolt() +
4019 SEC_TO_TICK(fcp_lun_ready_retry);
4020
4021 while (ddi_get_lbolt() < end_time) {
4022 retval = FC_SUCCESS;
4023
4024 /*
4025 * The new ndi interfaces for on-demand creation
4026 * are inflexible, Do some more work to pass on
4027 * a path name of some LUN (design is broken !)
4028 */
4029 if (plun->lun_cip) {
4030 if (plun->lun_mpxio == 0) {
4031 cdip = DIP(plun->lun_cip);
4032 } else {
4033 cdip = mdi_pi_get_client(
4034 PIP(plun->lun_cip));
4035 }
4036 if (cdip == NULL) {
4037 *rval = ENXIO;
4038 break;
4039 }
4040
4041 if (!i_ddi_devi_attached(cdip)) {
4042 mutex_exit(&plun->lun_mutex);
4043 delay(drv_usectohz(1000000));
4044 mutex_enter(&plun->lun_mutex);
4045 } else {
4046 /*
4047 * This Lun is ready, lets
4048 * check the next one.
4049 */
4050 mutex_exit(&plun->lun_mutex);
4051 plun = plun->lun_next;
4052 while (plun && (plun->lun_state
4053 & FCP_LUN_OFFLINE)) {
4054 plun = plun->lun_next;
4055 }
4056 if (!plun) {
4057 break;
4058 }
4059 mutex_enter(&plun->lun_mutex);
4060 }
4061 } else {
4062 /*
4063 * lun_cip field for a valid lun
4064 * should never be NULL. Fail the
4065 * command.
4066 */
4067 *rval = ENXIO;
4068 break;
4069 }
4070 }
4071 if (plun) {
4072 mutex_exit(&plun->lun_mutex);
4073 } else {
4074 char devnm[MAXNAMELEN];
4075 int nmlen;
4076
4077 nmlen = snprintf(devnm, MAXNAMELEN, "%s@%s",
4078 ddi_node_name(cdip),
4079 ddi_get_name_addr(cdip));
4080
4081 if (copyout(&devnm, dcp->cpyout_buf, nmlen) !=
4082 0) {
4083 *rval = EFAULT;
4084 }
4085 }
4086 } else {
4087 int i;
4088 char buf[25];
4089
4090 for (i = 0; i < FC_WWN_SIZE; i++) {
4091 (void) sprintf(&buf[i << 1], "%02x", bytes[i]);
4092 }
4093
4094 fcp_log(CE_WARN, pptr->port_dip,
4095 "!Failed to create nodes for pwwn=%s; error=%x",
4096 buf, *rval);
4097 }
4098
4099 (void) ndi_devi_free(useless_dip);
4100 ddi_prop_free(bytes);
4101 break;
4102 }
4103
4104 case DEVCTL_DEVICE_RESET: {
4105 struct fcp_lun *plun;
4106 child_info_t *cip = CIP(cdip);
4107
4108 ASSERT(cdip != NULL);
4109 ASSERT(pptr != NULL);
4110 mutex_enter(&pptr->port_mutex);
4111 if (pip != NULL) {
4112 cip = CIP(pip);
4113 }
4114 if ((plun = fcp_get_lun_from_cip(pptr, cip)) == NULL) {
4115 mutex_exit(&pptr->port_mutex);
4116 *rval = ENXIO;
4117 break;
4118 }
4119 mutex_exit(&pptr->port_mutex);
4120
4121 mutex_enter(&plun->lun_tgt->tgt_mutex);
4122 if (!(plun->lun_state & FCP_SCSI_LUN_TGT_INIT)) {
4123 mutex_exit(&plun->lun_tgt->tgt_mutex);
4124
4125 *rval = ENXIO;
4126 break;
4127 }
4128
4129 if (plun->lun_sd == NULL) {
4130 mutex_exit(&plun->lun_tgt->tgt_mutex);
4131
4132 *rval = ENXIO;
4133 break;
4134 }
4135 mutex_exit(&plun->lun_tgt->tgt_mutex);
4136
4137 /*
4138 * set up ap so that fcp_reset can figure out
4139 * which target to reset
4140 */
4141 if (fcp_scsi_reset(&plun->lun_sd->sd_address,
4142 RESET_TARGET) == FALSE) {
4143 *rval = EIO;
4144 }
4145 break;
4146 }
4147
4148 case DEVCTL_BUS_GETSTATE:
4149 ASSERT(dcp != NULL);
4150 ASSERT(pptr != NULL);
4151 ASSERT(pptr->port_dip != NULL);
4152 if (ndi_dc_return_bus_state(pptr->port_dip, dcp) !=
4153 NDI_SUCCESS) {
4154 *rval = EFAULT;
4155 }
4156 break;
4157
4158 case DEVCTL_BUS_QUIESCE:
4159 case DEVCTL_BUS_UNQUIESCE:
4160 *rval = ENOTSUP;
4161 break;
4162
4163 case DEVCTL_BUS_RESET:
4164 case DEVCTL_BUS_RESETALL:
4165 ASSERT(pptr != NULL);
4166 (void) fcp_linkreset(pptr, NULL, KM_SLEEP);
4167 break;
4168
4169 default:
4170 ASSERT(dcp != NULL);
4171 *rval = ENOTTY;
4172 break;
4173 }
4174
4175 /* all done -- clean up and return */
4176 out: if (devi_entered) {
4177 if (is_mpxio) {
4178 mdi_devi_exit(pptr->port_dip, circ);
4179 } else {
4180 ndi_devi_exit(pptr->port_dip, circ);
4181 }
4182 }
4183
4184 if (dcp != NULL) {
4185 ndi_dc_freehdl(dcp);
4186 }
4187
4188 return (retval);
4189 }
4190
4191
4192 /*ARGSUSED*/
4193 static int
4194 fcp_els_callback(opaque_t ulph, opaque_t port_handle, fc_unsol_buf_t *buf,
4195 uint32_t claimed)
4196 {
4197 uchar_t r_ctl;
4198 uchar_t ls_code;
4199 struct fcp_port *pptr;
4200
4201 if ((pptr = fcp_get_port(port_handle)) == NULL || claimed) {
4202 return (FC_UNCLAIMED);
4203 }
4204
4205 mutex_enter(&pptr->port_mutex);
4206 if (pptr->port_state & (FCP_STATE_DETACHING |
4207 FCP_STATE_SUSPENDED | FCP_STATE_POWER_DOWN)) {
4208 mutex_exit(&pptr->port_mutex);
4209 return (FC_UNCLAIMED);
4210 }
4211 mutex_exit(&pptr->port_mutex);
4212
4213 r_ctl = buf->ub_frame.r_ctl;
4214
4215 switch (r_ctl & R_CTL_ROUTING) {
4216 case R_CTL_EXTENDED_SVC:
4217 if (r_ctl == R_CTL_ELS_REQ) {
4218 ls_code = buf->ub_buffer[0];
4219
4220 switch (ls_code) {
4221 case LA_ELS_PRLI:
4222 /*
4223 * We really don't care if something fails.
4224 * If the PRLI was not sent out, then the
4225 * other end will time it out.
4226 */
4227 if (fcp_unsol_prli(pptr, buf) == FC_SUCCESS) {
4228 return (FC_SUCCESS);
4229 }
4230 return (FC_UNCLAIMED);
4231 /* NOTREACHED */
4232
4233 default:
4234 break;
4235 }
4236 }
4237 /* FALLTHROUGH */
4238
4239 default:
4240 return (FC_UNCLAIMED);
4241 }
4242 }
4243
4244
4245 /*ARGSUSED*/
4246 static int
4247 fcp_data_callback(opaque_t ulph, opaque_t port_handle, fc_unsol_buf_t *buf,
4248 uint32_t claimed)
4249 {
4250 return (FC_UNCLAIMED);
4251 }
4252
4253 /*
4254 * Function: fcp_statec_callback
4255 *
4256 * Description: The purpose of this function is to handle a port state change.
4257 * It is called from fp/fctl and, in a few instances, internally.
4258 *
4259 * Argument: ulph fp/fctl port handle
4260 * port_handle fcp_port structure
4261 * port_state Physical state of the port
4262 * port_top Topology
4263 * *devlist Pointer to the first entry of a table
4264 * containing the remote ports that can be
4265 * reached.
4266 * dev_cnt Number of entries pointed by devlist.
4267 * port_sid Port ID of the local port.
4268 *
4269 * Return Value: None
4270 */
4271 /*ARGSUSED*/
4272 static void
4273 fcp_statec_callback(opaque_t ulph, opaque_t port_handle,
4274 uint32_t port_state, uint32_t port_top, fc_portmap_t *devlist,
4275 uint32_t dev_cnt, uint32_t port_sid)
4276 {
4277 uint32_t link_count;
4278 int map_len = 0;
4279 struct fcp_port *pptr;
4280 fcp_map_tag_t *map_tag = NULL;
4281
4282 if ((pptr = fcp_get_port(port_handle)) == NULL) {
4283 fcp_log(CE_WARN, NULL, "!Invalid port handle in callback");
4284 return; /* nothing to work with! */
4285 }
4286
4287 FCP_TRACE(fcp_logq, pptr->port_instbuf,
4288 fcp_trace, FCP_BUF_LEVEL_2, 0,
4289 "fcp_statec_callback: port state/dev_cnt/top ="
4290 "%d/%d/%d", FC_PORT_STATE_MASK(port_state),
4291 dev_cnt, port_top);
4292
4293 mutex_enter(&pptr->port_mutex);
4294
4295 /*
4296 * If a thread is in detach, don't do anything.
4297 */
4298 if (pptr->port_state & (FCP_STATE_DETACHING |
4299 FCP_STATE_SUSPENDED | FCP_STATE_POWER_DOWN)) {
4300 mutex_exit(&pptr->port_mutex);
4301 return;
4302 }
4303
4304 /*
4305 * First thing we do is set the FCP_STATE_IN_CB_DEVC flag so that if
4306 * init_pkt is called, it knows whether or not the target's status
4307 * (or pd) might be changing.
4308 */
4309
4310 if (FC_PORT_STATE_MASK(port_state) == FC_STATE_DEVICE_CHANGE) {
4311 pptr->port_state |= FCP_STATE_IN_CB_DEVC;
4312 }
4313
4314 /*
4315 * the transport doesn't allocate or probe unless being
4316 * asked to by either the applications or ULPs
4317 *
4318 * in cases where the port is OFFLINE at the time of port
4319 * attach callback and the link comes ONLINE later, for
4320 * easier automatic node creation (i.e. without you having to
4321 * go out and run the utility to perform LOGINs) the
4322 * following conditional is helpful
4323 */
4324 pptr->port_phys_state = port_state;
4325
4326 if (dev_cnt) {
4327 mutex_exit(&pptr->port_mutex);
4328
4329 map_len = sizeof (*map_tag) * dev_cnt;
4330 map_tag = kmem_alloc(map_len, KM_NOSLEEP);
4331 if (map_tag == NULL) {
4332 fcp_log(CE_WARN, pptr->port_dip,
4333 "!fcp%d: failed to allocate for map tags; "
4334 " state change will not be processed",
4335 pptr->port_instance);
4336
4337 mutex_enter(&pptr->port_mutex);
4338 pptr->port_state &= ~FCP_STATE_IN_CB_DEVC;
4339 mutex_exit(&pptr->port_mutex);
4340
4341 return;
4342 }
4343
4344 mutex_enter(&pptr->port_mutex);
4345 }
4346
4347 if (pptr->port_id != port_sid) {
4348 FCP_TRACE(fcp_logq, pptr->port_instbuf,
4349 fcp_trace, FCP_BUF_LEVEL_3, 0,
4350 "fcp: Port S_ID=0x%x => 0x%x", pptr->port_id,
4351 port_sid);
4352 /*
4353 * The local port changed ID. It is the first time a port ID
4354 * is assigned or something drastic happened. We might have
4355 * been unplugged and replugged on another loop or fabric port
4356 * or somebody grabbed the AL_PA we had or somebody rezoned
4357 * the fabric we were plugged into.
4358 */
4359 pptr->port_id = port_sid;
4360 }
4361
4362 switch (FC_PORT_STATE_MASK(port_state)) {
4363 case FC_STATE_OFFLINE:
4364 case FC_STATE_RESET_REQUESTED:
4365 /*
4366 * link has gone from online to offline -- just update the
4367 * state of this port to BUSY and MARKed to go offline
4368 */
4369 FCP_TRACE(fcp_logq, pptr->port_instbuf,
4370 fcp_trace, FCP_BUF_LEVEL_3, 0,
4371 "link went offline");
4372 if ((pptr->port_state & FCP_STATE_OFFLINE) && dev_cnt) {
4373 /*
4374 * We were offline a while ago and this one
4375 * seems to indicate that the loop has gone
4376 * dead forever.
4377 */
4378 pptr->port_tmp_cnt += dev_cnt;
4379 pptr->port_state &= ~FCP_STATE_OFFLINE;
4380 pptr->port_state |= FCP_STATE_INIT;
4381 link_count = pptr->port_link_cnt;
4382 fcp_handle_devices(pptr, devlist, dev_cnt,
4383 link_count, map_tag, FCP_CAUSE_LINK_DOWN);
4384 } else {
4385 pptr->port_link_cnt++;
4386 ASSERT(!(pptr->port_state & FCP_STATE_SUSPENDED));
4387 fcp_update_state(pptr, (FCP_LUN_BUSY |
4388 FCP_LUN_MARK), FCP_CAUSE_LINK_DOWN);
4389 if (pptr->port_mpxio) {
4390 fcp_update_mpxio_path_verifybusy(pptr);
4391 }
4392 pptr->port_state |= FCP_STATE_OFFLINE;
4393 pptr->port_state &=
4394 ~(FCP_STATE_ONLINING | FCP_STATE_ONLINE);
4395 pptr->port_tmp_cnt = 0;
4396 }
4397 mutex_exit(&pptr->port_mutex);
4398 break;
4399
4400 case FC_STATE_ONLINE:
4401 case FC_STATE_LIP:
4402 case FC_STATE_LIP_LBIT_SET:
4403 /*
4404 * link has gone from offline to online
4405 */
4406 FCP_TRACE(fcp_logq, pptr->port_instbuf,
4407 fcp_trace, FCP_BUF_LEVEL_3, 0,
4408 "link went online");
4409
4410 pptr->port_link_cnt++;
4411
4412 while (pptr->port_ipkt_cnt) {
4413 mutex_exit(&pptr->port_mutex);
4414 delay(drv_usectohz(1000000));
4415 mutex_enter(&pptr->port_mutex);
4416 }
4417
4418 pptr->port_topology = port_top;
4419
4420 /*
4421 * The state of the targets and luns accessible through this
4422 * port is updated.
4423 */
4424 fcp_update_state(pptr, FCP_LUN_BUSY | FCP_LUN_MARK,
4425 FCP_CAUSE_LINK_CHANGE);
4426
4427 pptr->port_state &= ~(FCP_STATE_INIT | FCP_STATE_OFFLINE);
4428 pptr->port_state |= FCP_STATE_ONLINING;
4429 pptr->port_tmp_cnt = dev_cnt;
4430 link_count = pptr->port_link_cnt;
4431
4432 pptr->port_deadline = fcp_watchdog_time +
4433 FCP_ICMD_DEADLINE;
4434
4435 if (!dev_cnt) {
4436 /*
4437 * We go directly to the online state if no remote
4438 * ports were discovered.
4439 */
4440 FCP_TRACE(fcp_logq, pptr->port_instbuf,
4441 fcp_trace, FCP_BUF_LEVEL_3, 0,
4442 "No remote ports discovered");
4443
4444 pptr->port_state &= ~FCP_STATE_ONLINING;
4445 pptr->port_state |= FCP_STATE_ONLINE;
4446 }
4447
4448 switch (port_top) {
4449 case FC_TOP_FABRIC:
4450 case FC_TOP_PUBLIC_LOOP:
4451 case FC_TOP_PRIVATE_LOOP:
4452 case FC_TOP_PT_PT:
4453
4454 if (pptr->port_state & FCP_STATE_NS_REG_FAILED) {
4455 fcp_retry_ns_registry(pptr, port_sid);
4456 }
4457
4458 fcp_handle_devices(pptr, devlist, dev_cnt, link_count,
4459 map_tag, FCP_CAUSE_LINK_CHANGE);
4460 break;
4461
4462 default:
4463 /*
4464 * We got here because we were provided with an unknown
4465 * topology.
4466 */
4467 if (pptr->port_state & FCP_STATE_NS_REG_FAILED) {
4468 pptr->port_state &= ~FCP_STATE_NS_REG_FAILED;
4469 }
4470
4471 pptr->port_tmp_cnt -= dev_cnt;
4472 fcp_log(CE_WARN, pptr->port_dip,
4473 "!unknown/unsupported topology (0x%x)", port_top);
4474 break;
4475 }
4476 FCP_TRACE(fcp_logq, pptr->port_instbuf,
4477 fcp_trace, FCP_BUF_LEVEL_3, 0,
4478 "Notify ssd of the reset to reinstate the reservations");
4479
4480 scsi_hba_reset_notify_callback(&pptr->port_mutex,
4481 &pptr->port_reset_notify_listf);
4482
4483 mutex_exit(&pptr->port_mutex);
4484
4485 break;
4486
4487 case FC_STATE_RESET:
4488 ASSERT(pptr->port_state & FCP_STATE_OFFLINE);
4489 FCP_TRACE(fcp_logq, pptr->port_instbuf,
4490 fcp_trace, FCP_BUF_LEVEL_3, 0,
4491 "RESET state, waiting for Offline/Online state_cb");
4492 mutex_exit(&pptr->port_mutex);
4493 break;
4494
4495 case FC_STATE_DEVICE_CHANGE:
4496 /*
4497 * We come here when an application has requested
4498 * Dynamic node creation/deletion in Fabric connectivity.
4499 */
4500 if (pptr->port_state & (FCP_STATE_OFFLINE |
4501 FCP_STATE_INIT)) {
4502 /*
4503 * This case can happen when the FCTL is in the
4504 * process of giving us on online and the host on
4505 * the other side issues a PLOGI/PLOGO. Ideally
4506 * the state changes should be serialized unless
4507 * they are opposite (online-offline).
4508 * The transport will give us a final state change
4509 * so we can ignore this for the time being.
4510 */
4511 pptr->port_state &= ~FCP_STATE_IN_CB_DEVC;
4512 mutex_exit(&pptr->port_mutex);
4513 break;
4514 }
4515
4516 if (pptr->port_state & FCP_STATE_NS_REG_FAILED) {
4517 fcp_retry_ns_registry(pptr, port_sid);
4518 }
4519
4520 /*
4521 * Extend the deadline under steady state conditions
4522 * to provide more time for the device-change-commands
4523 */
4524 if (!pptr->port_ipkt_cnt) {
4525 pptr->port_deadline = fcp_watchdog_time +
4526 FCP_ICMD_DEADLINE;
4527 }
4528
4529 /*
4530 * There is another race condition here, where if we were
4531 * in ONLINEING state and a devices in the map logs out,
4532 * fp will give another state change as DEVICE_CHANGE
4533 * and OLD. This will result in that target being offlined.
4534 * The pd_handle is freed. If from the first statec callback
4535 * we were going to fire a PLOGI/PRLI, the system will
4536 * panic in fc_ulp_transport with invalid pd_handle.
4537 * The fix is to check for the link_cnt before issuing
4538 * any command down.
4539 */
4540 fcp_update_targets(pptr, devlist, dev_cnt,
4541 FCP_LUN_BUSY | FCP_LUN_MARK, FCP_CAUSE_TGT_CHANGE);
4542
4543 link_count = pptr->port_link_cnt;
4544
4545 fcp_handle_devices(pptr, devlist, dev_cnt,
4546 link_count, map_tag, FCP_CAUSE_TGT_CHANGE);
4547
4548 pptr->port_state &= ~FCP_STATE_IN_CB_DEVC;
4549
4550 mutex_exit(&pptr->port_mutex);
4551 break;
4552
4553 case FC_STATE_TARGET_PORT_RESET:
4554 if (pptr->port_state & FCP_STATE_NS_REG_FAILED) {
4555 fcp_retry_ns_registry(pptr, port_sid);
4556 }
4557
4558 /* Do nothing else */
4559 mutex_exit(&pptr->port_mutex);
4560 break;
4561
4562 default:
4563 fcp_log(CE_WARN, pptr->port_dip,
4564 "!Invalid state change=0x%x", port_state);
4565 mutex_exit(&pptr->port_mutex);
4566 break;
4567 }
4568
4569 if (map_tag) {
4570 kmem_free(map_tag, map_len);
4571 }
4572 }
4573
4574 /*
4575 * Function: fcp_handle_devices
4576 *
4577 * Description: This function updates the devices currently known by
4578 * walking the list provided by the caller. The list passed
4579 * by the caller is supposed to be the list of reachable
4580 * devices.
4581 *
4582 * Argument: *pptr Fcp port structure.
4583 * *devlist Pointer to the first entry of a table
4584 * containing the remote ports that can be
4585 * reached.
4586 * dev_cnt Number of entries pointed by devlist.
4587 * link_cnt Link state count.
4588 * *map_tag Array of fcp_map_tag_t structures.
4589 * cause What caused this function to be called.
4590 *
4591 * Return Value: None
4592 *
4593 * Notes: The pptr->port_mutex must be held.
4594 */
4595 static void
4596 fcp_handle_devices(struct fcp_port *pptr, fc_portmap_t devlist[],
4597 uint32_t dev_cnt, int link_cnt, fcp_map_tag_t *map_tag, int cause)
4598 {
4599 int i;
4600 int check_finish_init = 0;
4601 fc_portmap_t *map_entry;
4602 struct fcp_tgt *ptgt = NULL;
4603
4604 FCP_TRACE(fcp_logq, pptr->port_instbuf,
4605 fcp_trace, FCP_BUF_LEVEL_3, 0,
4606 "fcp_handle_devices: called for %d dev(s)", dev_cnt);
4607
4608 if (dev_cnt) {
4609 ASSERT(map_tag != NULL);
4610 }
4611
4612 /*
4613 * The following code goes through the list of remote ports that are
4614 * accessible through this (pptr) local port (The list walked is the
4615 * one provided by the caller which is the list of the remote ports
4616 * currently reachable). It checks if any of them was already
4617 * known by looking for the corresponding target structure based on
4618 * the world wide name. If a target is part of the list it is tagged
4619 * (ptgt->tgt_aux_state = FCP_TGT_TAGGED).
4620 *
4621 * Old comment
4622 * -----------
4623 * Before we drop port mutex; we MUST get the tags updated; This
4624 * two step process is somewhat slow, but more reliable.
4625 */
4626 for (i = 0; (i < dev_cnt) && (pptr->port_link_cnt == link_cnt); i++) {
4627 map_entry = &(devlist[i]);
4628
4629 /*
4630 * get ptr to this map entry in our port's
4631 * list (if any)
4632 */
4633 ptgt = fcp_lookup_target(pptr,
4634 (uchar_t *)&(map_entry->map_pwwn));
4635
4636 if (ptgt) {
4637 map_tag[i] = ptgt->tgt_change_cnt;
4638 if (cause == FCP_CAUSE_LINK_CHANGE) {
4639 ptgt->tgt_aux_state = FCP_TGT_TAGGED;
4640 }
4641 }
4642 }
4643
4644 /*
4645 * At this point we know which devices of the new list were already
4646 * known (The field tgt_aux_state of the target structure has been
4647 * set to FCP_TGT_TAGGED).
4648 *
4649 * The following code goes through the list of targets currently known
4650 * by the local port (the list is actually a hashing table). If a
4651 * target is found and is not tagged, it means the target cannot
4652 * be reached anymore through the local port (pptr). It is offlined.
4653 * The offlining only occurs if the cause is FCP_CAUSE_LINK_CHANGE.
4654 */
4655 for (i = 0; i < FCP_NUM_HASH; i++) {
4656 for (ptgt = pptr->port_tgt_hash_table[i]; ptgt != NULL;
4657 ptgt = ptgt->tgt_next) {
4658 mutex_enter(&ptgt->tgt_mutex);
4659 if ((ptgt->tgt_aux_state != FCP_TGT_TAGGED) &&
4660 (cause == FCP_CAUSE_LINK_CHANGE) &&
4661 !(ptgt->tgt_state & FCP_TGT_OFFLINE)) {
4662 fcp_offline_target_now(pptr, ptgt,
4663 link_cnt, ptgt->tgt_change_cnt, 0);
4664 }
4665 mutex_exit(&ptgt->tgt_mutex);
4666 }
4667 }
4668
4669 /*
4670 * At this point, the devices that were known but cannot be reached
4671 * anymore, have most likely been offlined.
4672 *
4673 * The following section of code seems to go through the list of
4674 * remote ports that can now be reached. For every single one it
4675 * checks if it is already known or if it is a new port.
4676 */
4677 for (i = 0; (i < dev_cnt) && (pptr->port_link_cnt == link_cnt); i++) {
4678
4679 if (check_finish_init) {
4680 ASSERT(i > 0);
4681 (void) fcp_call_finish_init_held(pptr, ptgt, link_cnt,
4682 map_tag[i - 1], cause);
4683 check_finish_init = 0;
4684 }
4685
4686 /* get a pointer to this map entry */
4687 map_entry = &(devlist[i]);
4688
4689 /*
4690 * Check for the duplicate map entry flag. If we have marked
4691 * this entry as a duplicate we skip it since the correct
4692 * (perhaps even same) state change will be encountered
4693 * later in the list.
4694 */
4695 if (map_entry->map_flags & PORT_DEVICE_DUPLICATE_MAP_ENTRY) {
4696 continue;
4697 }
4698
4699 /* get ptr to this map entry in our port's list (if any) */
4700 ptgt = fcp_lookup_target(pptr,
4701 (uchar_t *)&(map_entry->map_pwwn));
4702
4703 if (ptgt) {
4704 /*
4705 * This device was already known. The field
4706 * tgt_aux_state is reset (was probably set to
4707 * FCP_TGT_TAGGED previously in this routine).
4708 */
4709 ptgt->tgt_aux_state = 0;
4710 FCP_TRACE(fcp_logq, pptr->port_instbuf,
4711 fcp_trace, FCP_BUF_LEVEL_3, 0,
4712 "handle_devices: map did/state/type/flags = "
4713 "0x%x/0x%x/0x%x/0x%x, tgt_d_id=0x%x, "
4714 "tgt_state=%d",
4715 map_entry->map_did.port_id, map_entry->map_state,
4716 map_entry->map_type, map_entry->map_flags,
4717 ptgt->tgt_d_id, ptgt->tgt_state);
4718 }
4719
4720 if (map_entry->map_type == PORT_DEVICE_OLD ||
4721 map_entry->map_type == PORT_DEVICE_NEW ||
4722 map_entry->map_type == PORT_DEVICE_REPORTLUN_CHANGED ||
4723 map_entry->map_type == PORT_DEVICE_CHANGED) {
4724 FCP_TRACE(fcp_logq, pptr->port_instbuf,
4725 fcp_trace, FCP_BUF_LEVEL_2, 0,
4726 "map_type=%x, did = %x",
4727 map_entry->map_type,
4728 map_entry->map_did.port_id);
4729 }
4730
4731 switch (map_entry->map_type) {
4732 case PORT_DEVICE_NOCHANGE:
4733 case PORT_DEVICE_USER_CREATE:
4734 case PORT_DEVICE_USER_LOGIN:
4735 case PORT_DEVICE_NEW:
4736 case PORT_DEVICE_REPORTLUN_CHANGED:
4737 FCP_TGT_TRACE(ptgt, map_tag[i], FCP_TGT_TRACE_1);
4738
4739 if (fcp_handle_mapflags(pptr, ptgt, map_entry,
4740 link_cnt, (ptgt) ? map_tag[i] : 0,
4741 cause) == TRUE) {
4742
4743 FCP_TGT_TRACE(ptgt, map_tag[i],
4744 FCP_TGT_TRACE_2);
4745 check_finish_init++;
4746 }
4747 break;
4748
4749 case PORT_DEVICE_OLD:
4750 if (ptgt != NULL) {
4751 FCP_TGT_TRACE(ptgt, map_tag[i],
4752 FCP_TGT_TRACE_3);
4753
4754 mutex_enter(&ptgt->tgt_mutex);
4755 if (!(ptgt->tgt_state & FCP_TGT_OFFLINE)) {
4756 /*
4757 * Must do an in-line wait for I/Os
4758 * to get drained
4759 */
4760 mutex_exit(&ptgt->tgt_mutex);
4761 mutex_exit(&pptr->port_mutex);
4762
4763 mutex_enter(&ptgt->tgt_mutex);
4764 while (ptgt->tgt_ipkt_cnt ||
4765 fcp_outstanding_lun_cmds(ptgt)
4766 == FC_SUCCESS) {
4767 mutex_exit(&ptgt->tgt_mutex);
4768 delay(drv_usectohz(1000000));
4769 mutex_enter(&ptgt->tgt_mutex);
4770 }
4771 mutex_exit(&ptgt->tgt_mutex);
4772
4773 mutex_enter(&pptr->port_mutex);
4774 mutex_enter(&ptgt->tgt_mutex);
4775
4776 (void) fcp_offline_target(pptr, ptgt,
4777 link_cnt, map_tag[i], 0, 0);
4778 }
4779 mutex_exit(&ptgt->tgt_mutex);
4780 }
4781 check_finish_init++;
4782 break;
4783
4784 case PORT_DEVICE_USER_DELETE:
4785 case PORT_DEVICE_USER_LOGOUT:
4786 if (ptgt != NULL) {
4787 FCP_TGT_TRACE(ptgt, map_tag[i],
4788 FCP_TGT_TRACE_4);
4789
4790 mutex_enter(&ptgt->tgt_mutex);
4791 if (!(ptgt->tgt_state & FCP_TGT_OFFLINE)) {
4792 (void) fcp_offline_target(pptr, ptgt,
4793 link_cnt, map_tag[i], 1, 0);
4794 }
4795 mutex_exit(&ptgt->tgt_mutex);
4796 }
4797 check_finish_init++;
4798 break;
4799
4800 case PORT_DEVICE_CHANGED:
4801 if (ptgt != NULL) {
4802 FCP_TGT_TRACE(ptgt, map_tag[i],
4803 FCP_TGT_TRACE_5);
4804
4805 if (fcp_device_changed(pptr, ptgt,
4806 map_entry, link_cnt, map_tag[i],
4807 cause) == TRUE) {
4808 check_finish_init++;
4809 }
4810 } else {
4811 if (fcp_handle_mapflags(pptr, ptgt,
4812 map_entry, link_cnt, 0, cause) == TRUE) {
4813 check_finish_init++;
4814 }
4815 }
4816 break;
4817
4818 default:
4819 fcp_log(CE_WARN, pptr->port_dip,
4820 "!Invalid map_type=0x%x", map_entry->map_type);
4821 check_finish_init++;
4822 break;
4823 }
4824 }
4825
4826 if (check_finish_init && pptr->port_link_cnt == link_cnt) {
4827 ASSERT(i > 0);
4828 (void) fcp_call_finish_init_held(pptr, ptgt, link_cnt,
4829 map_tag[i-1], cause);
4830 } else if (dev_cnt == 0 && pptr->port_link_cnt == link_cnt) {
4831 fcp_offline_all(pptr, link_cnt, cause);
4832 }
4833 }
4834
4835 static int
4836 fcp_handle_reportlun_changed(struct fcp_tgt *ptgt, int cause)
4837 {
4838 struct fcp_lun *plun;
4839 struct fcp_port *pptr;
4840 int rscn_count;
4841 int lun0_newalloc;
4842 int ret = TRUE;
4843
4844 ASSERT(ptgt);
4845 pptr = ptgt->tgt_port;
4846 lun0_newalloc = 0;
4847 if ((plun = fcp_get_lun(ptgt, 0)) == NULL) {
4848 /*
4849 * no LUN struct for LUN 0 yet exists,
4850 * so create one
4851 */
4852 plun = fcp_alloc_lun(ptgt);
4853 if (plun == NULL) {
4854 fcp_log(CE_WARN, pptr->port_dip,
4855 "!Failed to allocate lun 0 for"
4856 " D_ID=%x", ptgt->tgt_d_id);
4857 return (ret);
4858 }
4859 lun0_newalloc = 1;
4860 }
4861
4862 mutex_enter(&ptgt->tgt_mutex);
4863 /*
4864 * consider lun 0 as device not connected if it is
4865 * offlined or newly allocated
4866 */
4867 if ((plun->lun_state & FCP_LUN_OFFLINE) || lun0_newalloc) {
4868 plun->lun_state |= FCP_LUN_DEVICE_NOT_CONNECTED;
4869 }
4870 plun->lun_state |= (FCP_LUN_BUSY | FCP_LUN_MARK);
4871 plun->lun_state &= ~FCP_LUN_OFFLINE;
4872 ptgt->tgt_lun_cnt = 1;
4873 ptgt->tgt_report_lun_cnt = 0;
4874 mutex_exit(&ptgt->tgt_mutex);
4875
4876 rscn_count = fc_ulp_get_rscn_count(pptr->port_fp_handle);
4877 if (fcp_send_scsi(plun, SCMD_REPORT_LUN,
4878 sizeof (struct fcp_reportlun_resp), pptr->port_link_cnt,
4879 ptgt->tgt_change_cnt, cause, rscn_count) != DDI_SUCCESS) {
4880 FCP_TRACE(fcp_logq, pptr->port_instbuf,
4881 fcp_trace, FCP_BUF_LEVEL_3, 0, "!Failed to send REPORTLUN "
4882 "to D_ID=%x", ptgt->tgt_d_id);
4883 } else {
4884 ret = FALSE;
4885 }
4886
4887 return (ret);
4888 }
4889
4890 /*
4891 * Function: fcp_handle_mapflags
4892 *
4893 * Description: This function creates a target structure if the ptgt passed
4894 * is NULL. It also kicks off the PLOGI if we are not logged
4895 * into the target yet or the PRLI if we are logged into the
4896 * target already. The rest of the treatment is done in the
4897 * callbacks of the PLOGI or PRLI.
4898 *
4899 * Argument: *pptr FCP Port structure.
4900 * *ptgt Target structure.
4901 * *map_entry Array of fc_portmap_t structures.
4902 * link_cnt Link state count.
4903 * tgt_cnt Target state count.
4904 * cause What caused this function to be called.
4905 *
4906 * Return Value: TRUE Failed
4907 * FALSE Succeeded
4908 *
4909 * Notes: pptr->port_mutex must be owned.
4910 */
4911 static int
4912 fcp_handle_mapflags(struct fcp_port *pptr, struct fcp_tgt *ptgt,
4913 fc_portmap_t *map_entry, int link_cnt, int tgt_cnt, int cause)
4914 {
4915 int lcount;
4916 int tcount;
4917 int ret = TRUE;
4918 int alloc;
4919 struct fcp_ipkt *icmd;
4920 struct fcp_lun *pseq_lun = NULL;
4921 uchar_t opcode;
4922 int valid_ptgt_was_passed = FALSE;
4923
4924 ASSERT(mutex_owned(&pptr->port_mutex));
4925
4926 /*
4927 * This case is possible where the FCTL has come up and done discovery
4928 * before FCP was loaded and attached. FCTL would have discovered the
4929 * devices and later the ULP came online. In this case ULP's would get
4930 * PORT_DEVICE_NOCHANGE but target would be NULL.
4931 */
4932 if (ptgt == NULL) {
4933 /* don't already have a target */
4934 mutex_exit(&pptr->port_mutex);
4935 ptgt = fcp_alloc_tgt(pptr, map_entry, link_cnt);
4936 mutex_enter(&pptr->port_mutex);
4937
4938 if (ptgt == NULL) {
4939 fcp_log(CE_WARN, pptr->port_dip,
4940 "!FC target allocation failed");
4941 return (ret);
4942 }
4943 mutex_enter(&ptgt->tgt_mutex);
4944 ptgt->tgt_statec_cause = cause;
4945 ptgt->tgt_tmp_cnt = 1;
4946 mutex_exit(&ptgt->tgt_mutex);
4947 } else {
4948 valid_ptgt_was_passed = TRUE;
4949 }
4950
4951 /*
4952 * Copy in the target parameters
4953 */
4954 mutex_enter(&ptgt->tgt_mutex);
4955 ptgt->tgt_d_id = map_entry->map_did.port_id;
4956 ptgt->tgt_hard_addr = map_entry->map_hard_addr.hard_addr;
4957 ptgt->tgt_pd_handle = map_entry->map_pd;
4958 ptgt->tgt_fca_dev = NULL;
4959
4960 /* Copy port and node WWNs */
4961 bcopy(&map_entry->map_nwwn, &ptgt->tgt_node_wwn.raw_wwn[0],
4962 FC_WWN_SIZE);
4963 bcopy(&map_entry->map_pwwn, &ptgt->tgt_port_wwn.raw_wwn[0],
4964 FC_WWN_SIZE);
4965
4966 if (!(map_entry->map_flags & PORT_DEVICE_NO_SKIP_DEVICE_DISCOVERY) &&
4967 (map_entry->map_type == PORT_DEVICE_NOCHANGE) &&
4968 (map_entry->map_state == PORT_DEVICE_LOGGED_IN) &&
4969 valid_ptgt_was_passed) {
4970 /*
4971 * determine if there are any tape LUNs on this target
4972 */
4973 for (pseq_lun = ptgt->tgt_lun;
4974 pseq_lun != NULL;
4975 pseq_lun = pseq_lun->lun_next) {
4976 if ((pseq_lun->lun_type == DTYPE_SEQUENTIAL) &&
4977 !(pseq_lun->lun_state & FCP_LUN_OFFLINE)) {
4978 fcp_update_tgt_state(ptgt, FCP_RESET,
4979 FCP_LUN_MARK);
4980 mutex_exit(&ptgt->tgt_mutex);
4981 return (ret);
4982 }
4983 }
4984 }
4985
4986 /*
4987 * if UA'REPORT_LUN_CHANGED received,
4988 * send out REPORT LUN promptly, skip PLOGI/PRLI process
4989 */
4990 if (map_entry->map_type == PORT_DEVICE_REPORTLUN_CHANGED) {
4991 ptgt->tgt_state &= ~(FCP_TGT_OFFLINE | FCP_TGT_MARK);
4992 mutex_exit(&ptgt->tgt_mutex);
4993 mutex_exit(&pptr->port_mutex);
4994
4995 ret = fcp_handle_reportlun_changed(ptgt, cause);
4996
4997 mutex_enter(&pptr->port_mutex);
4998 return (ret);
4999 }
5000
5001 /*
5002 * If ptgt was NULL when this function was entered, then tgt_node_state
5003 * was never specifically initialized but zeroed out which means
5004 * FCP_TGT_NODE_NONE.
5005 */
5006 switch (ptgt->tgt_node_state) {
5007 case FCP_TGT_NODE_NONE:
5008 case FCP_TGT_NODE_ON_DEMAND:
5009 if (FC_TOP_EXTERNAL(pptr->port_topology) &&
5010 !fcp_enable_auto_configuration &&
5011 map_entry->map_type != PORT_DEVICE_USER_CREATE) {
5012 ptgt->tgt_node_state = FCP_TGT_NODE_ON_DEMAND;
5013 } else if (FC_TOP_EXTERNAL(pptr->port_topology) &&
5014 fcp_enable_auto_configuration &&
5015 (ptgt->tgt_manual_config_only == 1) &&
5016 map_entry->map_type != PORT_DEVICE_USER_CREATE) {
5017 /*
5018 * If auto configuration is set and
5019 * the tgt_manual_config_only flag is set then
5020 * we only want the user to be able to change
5021 * the state through create_on_demand.
5022 */
5023 ptgt->tgt_node_state = FCP_TGT_NODE_ON_DEMAND;
5024 } else {
5025 ptgt->tgt_node_state = FCP_TGT_NODE_NONE;
5026 }
5027 break;
5028
5029 case FCP_TGT_NODE_PRESENT:
5030 break;
5031 }
5032 /*
5033 * If we are booting from a fabric device, make sure we
5034 * mark the node state appropriately for this target to be
5035 * enumerated
5036 */
5037 if (FC_TOP_EXTERNAL(pptr->port_topology) && pptr->port_boot_wwn[0]) {
5038 if (bcmp((caddr_t)pptr->port_boot_wwn,
5039 (caddr_t)&ptgt->tgt_port_wwn.raw_wwn[0],
5040 sizeof (ptgt->tgt_port_wwn)) == 0) {
5041 ptgt->tgt_node_state = FCP_TGT_NODE_NONE;
5042 }
5043 }
5044 mutex_exit(&ptgt->tgt_mutex);
5045
5046 FCP_TRACE(fcp_logq, pptr->port_instbuf,
5047 fcp_trace, FCP_BUF_LEVEL_3, 0,
5048 "map_pd=%p, map_type=%x, did = %x, ulp_rscn_count=0x%x",
5049 map_entry->map_pd, map_entry->map_type, map_entry->map_did.port_id,
5050 map_entry->map_rscn_info.ulp_rscn_count);
5051
5052 mutex_enter(&ptgt->tgt_mutex);
5053
5054 /*
5055 * Reset target OFFLINE state and mark the target BUSY
5056 */
5057 ptgt->tgt_state &= ~FCP_TGT_OFFLINE;
5058 ptgt->tgt_state |= (FCP_TGT_BUSY | FCP_TGT_MARK);
5059
5060 tcount = tgt_cnt ? tgt_cnt : ptgt->tgt_change_cnt;
5061 lcount = link_cnt;
5062
5063 mutex_exit(&ptgt->tgt_mutex);
5064 mutex_exit(&pptr->port_mutex);
5065
5066 /*
5067 * if we are already logged in, then we do a PRLI, else
5068 * we do a PLOGI first (to get logged in)
5069 *
5070 * We will not check if we are the PLOGI initiator
5071 */
5072 opcode = (map_entry->map_state == PORT_DEVICE_LOGGED_IN &&
5073 map_entry->map_pd != NULL) ? LA_ELS_PRLI : LA_ELS_PLOGI;
5074
5075 alloc = FCP_MAX(sizeof (la_els_logi_t), sizeof (la_els_prli_t));
5076
5077 icmd = fcp_icmd_alloc(pptr, ptgt, alloc, alloc, 0,
5078 pptr->port_state & FCP_STATE_FCA_IS_NODMA, lcount, tcount,
5079 cause, map_entry->map_rscn_info.ulp_rscn_count);
5080
5081 if (icmd == NULL) {
5082 FCP_TGT_TRACE(ptgt, tgt_cnt, FCP_TGT_TRACE_29);
5083 /*
5084 * We've exited port_mutex before calling fcp_icmd_alloc,
5085 * we need to make sure we reacquire it before returning.
5086 */
5087 mutex_enter(&pptr->port_mutex);
5088 return (FALSE);
5089 }
5090
5091 /* TRUE is only returned while target is intended skipped */
5092 ret = FALSE;
5093 /* discover info about this target */
5094 if ((fcp_send_els(pptr, ptgt, icmd, opcode,
5095 lcount, tcount, cause)) == DDI_SUCCESS) {
5096 FCP_TGT_TRACE(ptgt, tgt_cnt, FCP_TGT_TRACE_9);
5097 } else {
5098 fcp_icmd_free(pptr, icmd);
5099 ret = TRUE;
5100 }
5101 mutex_enter(&pptr->port_mutex);
5102
5103 return (ret);
5104 }
5105
5106 /*
5107 * Function: fcp_send_els
5108 *
5109 * Description: Sends an ELS to the target specified by the caller. Supports
5110 * PLOGI and PRLI.
5111 *
5112 * Argument: *pptr Fcp port.
5113 * *ptgt Target to send the ELS to.
5114 * *icmd Internal packet
5115 * opcode ELS opcode
5116 * lcount Link state change counter
5117 * tcount Target state change counter
5118 * cause What caused the call
5119 *
5120 * Return Value: DDI_SUCCESS
5121 * Others
5122 */
5123 static int
5124 fcp_send_els(struct fcp_port *pptr, struct fcp_tgt *ptgt,
5125 struct fcp_ipkt *icmd, uchar_t opcode, int lcount, int tcount, int cause)
5126 {
5127 fc_packet_t *fpkt;
5128 fc_frame_hdr_t *hp;
5129 int internal = 0;
5130 int alloc;
5131 int cmd_len;
5132 int resp_len;
5133 int res = DDI_FAILURE; /* default result */
5134 int rval = DDI_FAILURE;
5135
5136 ASSERT(opcode == LA_ELS_PLOGI || opcode == LA_ELS_PRLI);
5137 ASSERT(ptgt->tgt_port == pptr);
5138
5139 FCP_TRACE(fcp_logq, pptr->port_instbuf,
5140 fcp_trace, FCP_BUF_LEVEL_5, 0,
5141 "fcp_send_els: d_id=0x%x ELS 0x%x (%s)", ptgt->tgt_d_id, opcode,
5142 (opcode == LA_ELS_PLOGI) ? "PLOGI" : "PRLI");
5143
5144 if (opcode == LA_ELS_PLOGI) {
5145 cmd_len = sizeof (la_els_logi_t);
5146 resp_len = sizeof (la_els_logi_t);
5147 } else {
5148 ASSERT(opcode == LA_ELS_PRLI);
5149 cmd_len = sizeof (la_els_prli_t);
5150 resp_len = sizeof (la_els_prli_t);
5151 }
5152
5153 if (icmd == NULL) {
5154 alloc = FCP_MAX(sizeof (la_els_logi_t),
5155 sizeof (la_els_prli_t));
5156 icmd = fcp_icmd_alloc(pptr, ptgt, alloc, alloc, 0,
5157 pptr->port_state & FCP_STATE_FCA_IS_NODMA,
5158 lcount, tcount, cause, FC_INVALID_RSCN_COUNT);
5159 if (icmd == NULL) {
5160 FCP_TGT_TRACE(ptgt, tcount, FCP_TGT_TRACE_10);
5161 return (res);
5162 }
5163 internal++;
5164 }
5165 fpkt = icmd->ipkt_fpkt;
5166
5167 fpkt->pkt_cmdlen = cmd_len;
5168 fpkt->pkt_rsplen = resp_len;
5169 fpkt->pkt_datalen = 0;
5170 icmd->ipkt_retries = 0;
5171
5172 /* fill in fpkt info */
5173 fpkt->pkt_tran_flags = FC_TRAN_CLASS3 | FC_TRAN_INTR;
5174 fpkt->pkt_tran_type = FC_PKT_EXCHANGE;
5175 fpkt->pkt_timeout = FCP_ELS_TIMEOUT;
5176
5177 /* get ptr to frame hdr in fpkt */
5178 hp = &fpkt->pkt_cmd_fhdr;
5179
5180 /*
5181 * fill in frame hdr
5182 */
5183 hp->r_ctl = R_CTL_ELS_REQ;
5184 hp->s_id = pptr->port_id; /* source ID */
5185 hp->d_id = ptgt->tgt_d_id; /* dest ID */
5186 hp->type = FC_TYPE_EXTENDED_LS;
5187 hp->f_ctl = F_CTL_SEQ_INITIATIVE | F_CTL_FIRST_SEQ;
5188 hp->seq_id = 0;
5189 hp->rsvd = 0;
5190 hp->df_ctl = 0;
5191 hp->seq_cnt = 0;
5192 hp->ox_id = 0xffff; /* i.e. none */
5193 hp->rx_id = 0xffff; /* i.e. none */
5194 hp->ro = 0;
5195
5196 /*
5197 * at this point we have a filled in cmd pkt
5198 *
5199 * fill in the respective info, then use the transport to send
5200 * the packet
5201 *
5202 * for a PLOGI call fc_ulp_login(), and
5203 * for a PRLI call fc_ulp_issue_els()
5204 */
5205 switch (opcode) {
5206 case LA_ELS_PLOGI: {
5207 struct la_els_logi logi;
5208
5209 bzero(&logi, sizeof (struct la_els_logi));
5210
5211 hp = &fpkt->pkt_cmd_fhdr;
5212 hp->r_ctl = R_CTL_ELS_REQ;
5213 logi.ls_code.ls_code = LA_ELS_PLOGI;
5214 logi.ls_code.mbz = 0;
5215
5216 FCP_CP_OUT((uint8_t *)&logi, fpkt->pkt_cmd,
5217 fpkt->pkt_cmd_acc, sizeof (struct la_els_logi));
5218
5219 icmd->ipkt_opcode = LA_ELS_PLOGI;
5220
5221 mutex_enter(&pptr->port_mutex);
5222 if (!FCP_TGT_STATE_CHANGED(ptgt, icmd)) {
5223
5224 mutex_exit(&pptr->port_mutex);
5225
5226 rval = fc_ulp_login(pptr->port_fp_handle, &fpkt, 1);
5227 if (rval == FC_SUCCESS) {
5228 res = DDI_SUCCESS;
5229 break;
5230 }
5231
5232 FCP_TGT_TRACE(ptgt, tcount, FCP_TGT_TRACE_11);
5233
5234 res = fcp_handle_ipkt_errors(pptr, ptgt, icmd,
5235 rval, "PLOGI");
5236 } else {
5237 FCP_TRACE(fcp_logq, pptr->port_instbuf,
5238 fcp_trace, FCP_BUF_LEVEL_5, 0,
5239 "fcp_send_els1: state change occured"
5240 " for D_ID=0x%x", ptgt->tgt_d_id);
5241 mutex_exit(&pptr->port_mutex);
5242 FCP_TGT_TRACE(ptgt, tcount, FCP_TGT_TRACE_12);
5243 }
5244 break;
5245 }
5246
5247 case LA_ELS_PRLI: {
5248 struct la_els_prli prli;
5249 struct fcp_prli *fprli;
5250
5251 bzero(&prli, sizeof (struct la_els_prli));
5252
5253 hp = &fpkt->pkt_cmd_fhdr;
5254 hp->r_ctl = R_CTL_ELS_REQ;
5255
5256 /* fill in PRLI cmd ELS fields */
5257 prli.ls_code = LA_ELS_PRLI;
5258 prli.page_length = 0x10; /* huh? */
5259 prli.payload_length = sizeof (struct la_els_prli);
5260
5261 icmd->ipkt_opcode = LA_ELS_PRLI;
5262
5263 /* get ptr to PRLI service params */
5264 fprli = (struct fcp_prli *)prli.service_params;
5265
5266 /* fill in service params */
5267 fprli->type = 0x08;
5268 fprli->resvd1 = 0;
5269 fprli->orig_process_assoc_valid = 0;
5270 fprli->resp_process_assoc_valid = 0;
5271 fprli->establish_image_pair = 1;
5272 fprli->resvd2 = 0;
5273 fprli->resvd3 = 0;
5274 fprli->obsolete_1 = 0;
5275 fprli->obsolete_2 = 0;
5276 fprli->data_overlay_allowed = 0;
5277 fprli->initiator_fn = 1;
5278 fprli->confirmed_compl_allowed = 1;
5279
5280 if (fc_ulp_is_name_present("ltct") == FC_SUCCESS) {
5281 fprli->target_fn = 1;
5282 } else {
5283 fprli->target_fn = 0;
5284 }
5285
5286 fprli->retry = 1;
5287 fprli->read_xfer_rdy_disabled = 1;
5288 fprli->write_xfer_rdy_disabled = 0;
5289
5290 FCP_CP_OUT((uint8_t *)&prli, fpkt->pkt_cmd,
5291 fpkt->pkt_cmd_acc, sizeof (struct la_els_prli));
5292
5293 /* issue the PRLI request */
5294
5295 mutex_enter(&pptr->port_mutex);
5296 if (!FCP_TGT_STATE_CHANGED(ptgt, icmd)) {
5297
5298 mutex_exit(&pptr->port_mutex);
5299
5300 rval = fc_ulp_issue_els(pptr->port_fp_handle, fpkt);
5301 if (rval == FC_SUCCESS) {
5302 res = DDI_SUCCESS;
5303 break;
5304 }
5305
5306 FCP_TGT_TRACE(ptgt, tcount, FCP_TGT_TRACE_13);
5307
5308 res = fcp_handle_ipkt_errors(pptr, ptgt, icmd,
5309 rval, "PRLI");
5310 } else {
5311 mutex_exit(&pptr->port_mutex);
5312 FCP_TGT_TRACE(ptgt, tcount, FCP_TGT_TRACE_14);
5313 }
5314 break;
5315 }
5316
5317 default:
5318 fcp_log(CE_WARN, NULL, "!invalid ELS opcode=0x%x", opcode);
5319 break;
5320 }
5321
5322 FCP_TRACE(fcp_logq, pptr->port_instbuf,
5323 fcp_trace, FCP_BUF_LEVEL_5, 0,
5324 "fcp_send_els: returning %d", res);
5325
5326 if (res != DDI_SUCCESS) {
5327 if (internal) {
5328 fcp_icmd_free(pptr, icmd);
5329 }
5330 }
5331
5332 return (res);
5333 }
5334
5335
5336 /*
5337 * called internally update the state of all of the tgts and each LUN
5338 * for this port (i.e. each target known to be attached to this port)
5339 * if they are not already offline
5340 *
5341 * must be called with the port mutex owned
5342 *
5343 * acquires and releases the target mutexes for each target attached
5344 * to this port
5345 */
5346 void
5347 fcp_update_state(struct fcp_port *pptr, uint32_t state, int cause)
5348 {
5349 int i;
5350 struct fcp_tgt *ptgt;
5351
5352 ASSERT(mutex_owned(&pptr->port_mutex));
5353
5354 for (i = 0; i < FCP_NUM_HASH; i++) {
5355 for (ptgt = pptr->port_tgt_hash_table[i]; ptgt != NULL;
5356 ptgt = ptgt->tgt_next) {
5357 mutex_enter(&ptgt->tgt_mutex);
5358 fcp_update_tgt_state(ptgt, FCP_SET, state);
5359 ptgt->tgt_change_cnt++;
5360 ptgt->tgt_statec_cause = cause;
5361 ptgt->tgt_tmp_cnt = 1;
5362 ptgt->tgt_done = 0;
5363 mutex_exit(&ptgt->tgt_mutex);
5364 }
5365 }
5366 }
5367
5368
5369 static void
5370 fcp_offline_all(struct fcp_port *pptr, int lcount, int cause)
5371 {
5372 int i;
5373 int ndevs;
5374 struct fcp_tgt *ptgt;
5375
5376 ASSERT(mutex_owned(&pptr->port_mutex));
5377
5378 for (ndevs = 0, i = 0; i < FCP_NUM_HASH; i++) {
5379 for (ptgt = pptr->port_tgt_hash_table[i]; ptgt != NULL;
5380 ptgt = ptgt->tgt_next) {
5381 ndevs++;
5382 }
5383 }
5384
5385 if (ndevs == 0) {
5386 return;
5387 }
5388 pptr->port_tmp_cnt = ndevs;
5389
5390 for (i = 0; i < FCP_NUM_HASH; i++) {
5391 for (ptgt = pptr->port_tgt_hash_table[i]; ptgt != NULL;
5392 ptgt = ptgt->tgt_next) {
5393 (void) fcp_call_finish_init_held(pptr, ptgt,
5394 lcount, ptgt->tgt_change_cnt, cause);
5395 }
5396 }
5397 }
5398
5399 /*
5400 * Function: fcp_update_tgt_state
5401 *
5402 * Description: This function updates the field tgt_state of a target. That
5403 * field is a bitmap and which bit can be set or reset
5404 * individually. The action applied to the target state is also
5405 * applied to all the LUNs belonging to the target (provided the
5406 * LUN is not offline). A side effect of applying the state
5407 * modification to the target and the LUNs is the field tgt_trace
5408 * of the target and lun_trace of the LUNs is set to zero.
5409 *
5410 *
5411 * Argument: *ptgt Target structure.
5412 * flag Flag indication what action to apply (set/reset).
5413 * state State bits to update.
5414 *
5415 * Return Value: None
5416 *
5417 * Context: Interrupt, Kernel or User context.
5418 * The mutex of the target (ptgt->tgt_mutex) must be owned when
5419 * calling this function.
5420 */
5421 void
5422 fcp_update_tgt_state(struct fcp_tgt *ptgt, int flag, uint32_t state)
5423 {
5424 struct fcp_lun *plun;
5425
5426 ASSERT(mutex_owned(&ptgt->tgt_mutex));
5427
5428 if (!(ptgt->tgt_state & FCP_TGT_OFFLINE)) {
5429 /* The target is not offline. */
5430 if (flag == FCP_SET) {
5431 ptgt->tgt_state |= state;
5432 ptgt->tgt_trace = 0;
5433 } else {
5434 ptgt->tgt_state &= ~state;
5435 }
5436
5437 for (plun = ptgt->tgt_lun; plun != NULL;
5438 plun = plun->lun_next) {
5439 if (!(plun->lun_state & FCP_LUN_OFFLINE)) {
5440 /* The LUN is not offline. */
5441 if (flag == FCP_SET) {
5442 plun->lun_state |= state;
5443 plun->lun_trace = 0;
5444 } else {
5445 plun->lun_state &= ~state;
5446 }
5447 }
5448 }
5449 }
5450 }
5451
5452 /*
5453 * Function: fcp_update_tgt_state
5454 *
5455 * Description: This function updates the field lun_state of a LUN. That
5456 * field is a bitmap and which bit can be set or reset
5457 * individually.
5458 *
5459 * Argument: *plun LUN structure.
5460 * flag Flag indication what action to apply (set/reset).
5461 * state State bits to update.
5462 *
5463 * Return Value: None
5464 *
5465 * Context: Interrupt, Kernel or User context.
5466 * The mutex of the target (ptgt->tgt_mutex) must be owned when
5467 * calling this function.
5468 */
5469 void
5470 fcp_update_lun_state(struct fcp_lun *plun, int flag, uint32_t state)
5471 {
5472 struct fcp_tgt *ptgt = plun->lun_tgt;
5473
5474 ASSERT(mutex_owned(&ptgt->tgt_mutex));
5475
5476 if (!(plun->lun_state & FCP_TGT_OFFLINE)) {
5477 if (flag == FCP_SET) {
5478 plun->lun_state |= state;
5479 } else {
5480 plun->lun_state &= ~state;
5481 }
5482 }
5483 }
5484
5485 /*
5486 * Function: fcp_get_port
5487 *
5488 * Description: This function returns the fcp_port structure from the opaque
5489 * handle passed by the caller. That opaque handle is the handle
5490 * used by fp/fctl to identify a particular local port. That
5491 * handle has been stored in the corresponding fcp_port
5492 * structure. This function is going to walk the global list of
5493 * fcp_port structures till one has a port_fp_handle that matches
5494 * the handle passed by the caller. This function enters the
5495 * mutex fcp_global_mutex while walking the global list and then
5496 * releases it.
5497 *
5498 * Argument: port_handle Opaque handle that fp/fctl uses to identify a
5499 * particular port.
5500 *
5501 * Return Value: NULL Not found.
5502 * Not NULL Pointer to the fcp_port structure.
5503 *
5504 * Context: Interrupt, Kernel or User context.
5505 */
5506 static struct fcp_port *
5507 fcp_get_port(opaque_t port_handle)
5508 {
5509 struct fcp_port *pptr;
5510
5511 ASSERT(port_handle != NULL);
5512
5513 mutex_enter(&fcp_global_mutex);
5514 for (pptr = fcp_port_head; pptr != NULL; pptr = pptr->port_next) {
5515 if (pptr->port_fp_handle == port_handle) {
5516 break;
5517 }
5518 }
5519 mutex_exit(&fcp_global_mutex);
5520
5521 return (pptr);
5522 }
5523
5524
5525 static void
5526 fcp_unsol_callback(fc_packet_t *fpkt)
5527 {
5528 struct fcp_ipkt *icmd = (struct fcp_ipkt *)fpkt->pkt_ulp_private;
5529 struct fcp_port *pptr = icmd->ipkt_port;
5530
5531 if (fpkt->pkt_state != FC_PKT_SUCCESS) {
5532 caddr_t state, reason, action, expln;
5533
5534 (void) fc_ulp_pkt_error(fpkt, &state, &reason,
5535 &action, &expln);
5536
5537 fcp_log(CE_WARN, pptr->port_dip,
5538 "!couldn't post response to unsolicited request: "
5539 " state=%s reason=%s rx_id=%x ox_id=%x",
5540 state, reason, fpkt->pkt_cmd_fhdr.ox_id,
5541 fpkt->pkt_cmd_fhdr.rx_id);
5542 }
5543 fcp_icmd_free(pptr, icmd);
5544 }
5545
5546
5547 /*
5548 * Perform general purpose preparation of a response to an unsolicited request
5549 */
5550 static void
5551 fcp_unsol_resp_init(fc_packet_t *pkt, fc_unsol_buf_t *buf,
5552 uchar_t r_ctl, uchar_t type)
5553 {
5554 pkt->pkt_cmd_fhdr.r_ctl = r_ctl;
5555 pkt->pkt_cmd_fhdr.d_id = buf->ub_frame.s_id;
5556 pkt->pkt_cmd_fhdr.s_id = buf->ub_frame.d_id;
5557 pkt->pkt_cmd_fhdr.type = type;
5558 pkt->pkt_cmd_fhdr.f_ctl = F_CTL_LAST_SEQ | F_CTL_XCHG_CONTEXT;
5559 pkt->pkt_cmd_fhdr.seq_id = buf->ub_frame.seq_id;
5560 pkt->pkt_cmd_fhdr.df_ctl = buf->ub_frame.df_ctl;
5561 pkt->pkt_cmd_fhdr.seq_cnt = buf->ub_frame.seq_cnt;
5562 pkt->pkt_cmd_fhdr.ox_id = buf->ub_frame.ox_id;
5563 pkt->pkt_cmd_fhdr.rx_id = buf->ub_frame.rx_id;
5564 pkt->pkt_cmd_fhdr.ro = 0;
5565 pkt->pkt_cmd_fhdr.rsvd = 0;
5566 pkt->pkt_comp = fcp_unsol_callback;
5567 pkt->pkt_pd = NULL;
5568 pkt->pkt_ub_resp_token = (opaque_t)buf;
5569 }
5570
5571
5572 /*ARGSUSED*/
5573 static int
5574 fcp_unsol_prli(struct fcp_port *pptr, fc_unsol_buf_t *buf)
5575 {
5576 fc_packet_t *fpkt;
5577 struct la_els_prli prli;
5578 struct fcp_prli *fprli;
5579 struct fcp_ipkt *icmd;
5580 struct la_els_prli *from;
5581 struct fcp_prli *orig;
5582 struct fcp_tgt *ptgt;
5583 int tcount = 0;
5584 int lcount;
5585
5586 from = (struct la_els_prli *)buf->ub_buffer;
5587 orig = (struct fcp_prli *)from->service_params;
5588 if ((ptgt = fcp_get_target_by_did(pptr, buf->ub_frame.s_id)) !=
5589 NULL) {
5590 mutex_enter(&ptgt->tgt_mutex);
5591 tcount = ptgt->tgt_change_cnt;
5592 mutex_exit(&ptgt->tgt_mutex);
5593 }
5594
5595 mutex_enter(&pptr->port_mutex);
5596 lcount = pptr->port_link_cnt;
5597 mutex_exit(&pptr->port_mutex);
5598
5599 if ((icmd = fcp_icmd_alloc(pptr, ptgt, sizeof (la_els_prli_t),
5600 sizeof (la_els_prli_t), 0,
5601 pptr->port_state & FCP_STATE_FCA_IS_NODMA,
5602 lcount, tcount, 0, FC_INVALID_RSCN_COUNT)) == NULL) {
5603 return (FC_FAILURE);
5604 }
5605
5606 fpkt = icmd->ipkt_fpkt;
5607 fpkt->pkt_tran_flags = FC_TRAN_CLASS3 | FC_TRAN_INTR;
5608 fpkt->pkt_tran_type = FC_PKT_OUTBOUND;
5609 fpkt->pkt_timeout = FCP_ELS_TIMEOUT;
5610 fpkt->pkt_cmdlen = sizeof (la_els_prli_t);
5611 fpkt->pkt_rsplen = 0;
5612 fpkt->pkt_datalen = 0;
5613
5614 icmd->ipkt_opcode = LA_ELS_PRLI;
5615
5616 bzero(&prli, sizeof (struct la_els_prli));
5617 fprli = (struct fcp_prli *)prli.service_params;
5618 prli.ls_code = LA_ELS_ACC;
5619 prli.page_length = 0x10;
5620 prli.payload_length = sizeof (struct la_els_prli);
5621
5622 /* fill in service params */
5623 fprli->type = 0x08;
5624 fprli->resvd1 = 0;
5625 fprli->orig_process_assoc_valid = orig->orig_process_assoc_valid;
5626 fprli->orig_process_associator = orig->orig_process_associator;
5627 fprli->resp_process_assoc_valid = 0;
5628 fprli->establish_image_pair = 1;
5629 fprli->resvd2 = 0;
5630 fprli->resvd3 = 0;
5631 fprli->obsolete_1 = 0;
5632 fprli->obsolete_2 = 0;
5633 fprli->data_overlay_allowed = 0;
5634 fprli->initiator_fn = 1;
5635 fprli->confirmed_compl_allowed = 1;
5636
5637 if (fc_ulp_is_name_present("ltct") == FC_SUCCESS) {
5638 fprli->target_fn = 1;
5639 } else {
5640 fprli->target_fn = 0;
5641 }
5642
5643 fprli->retry = 1;
5644 fprli->read_xfer_rdy_disabled = 1;
5645 fprli->write_xfer_rdy_disabled = 0;
5646
5647 /* save the unsol prli payload first */
5648 FCP_CP_OUT((uint8_t *)from, fpkt->pkt_resp,
5649 fpkt->pkt_resp_acc, sizeof (struct la_els_prli));
5650
5651 FCP_CP_OUT((uint8_t *)&prli, fpkt->pkt_cmd,
5652 fpkt->pkt_cmd_acc, sizeof (struct la_els_prli));
5653
5654 fcp_unsol_resp_init(fpkt, buf, R_CTL_ELS_RSP, FC_TYPE_EXTENDED_LS);
5655
5656 mutex_enter(&pptr->port_mutex);
5657 if (!FCP_LINK_STATE_CHANGED(pptr, icmd)) {
5658 int rval;
5659 mutex_exit(&pptr->port_mutex);
5660
5661 if ((rval = fc_ulp_issue_els(pptr->port_fp_handle, fpkt)) !=
5662 FC_SUCCESS) {
5663 if ((rval == FC_STATEC_BUSY || rval == FC_OFFLINE) &&
5664 ptgt != NULL) {
5665 fcp_queue_ipkt(pptr, fpkt);
5666 return (FC_SUCCESS);
5667 }
5668 /* Let it timeout */
5669 fcp_icmd_free(pptr, icmd);
5670 return (FC_FAILURE);
5671 }
5672 } else {
5673 mutex_exit(&pptr->port_mutex);
5674 fcp_icmd_free(pptr, icmd);
5675 return (FC_FAILURE);
5676 }
5677
5678 (void) fc_ulp_ubrelease(pptr->port_fp_handle, 1, &buf->ub_token);
5679
5680 return (FC_SUCCESS);
5681 }
5682
5683 /*
5684 * Function: fcp_icmd_alloc
5685 *
5686 * Description: This function allocated a fcp_ipkt structure. The pkt_comp
5687 * field is initialized to fcp_icmd_callback. Sometimes it is
5688 * modified by the caller (such as fcp_send_scsi). The
5689 * structure is also tied to the state of the line and of the
5690 * target at a particular time. That link is established by
5691 * setting the fields ipkt_link_cnt and ipkt_change_cnt to lcount
5692 * and tcount which came respectively from pptr->link_cnt and
5693 * ptgt->tgt_change_cnt.
5694 *
5695 * Argument: *pptr Fcp port.
5696 * *ptgt Target (destination of the command).
5697 * cmd_len Length of the command.
5698 * resp_len Length of the expected response.
5699 * data_len Length of the data.
5700 * nodma Indicates weither the command and response.
5701 * will be transfer through DMA or not.
5702 * lcount Link state change counter.
5703 * tcount Target state change counter.
5704 * cause Reason that lead to this call.
5705 *
5706 * Return Value: NULL Failed.
5707 * Not NULL Internal packet address.
5708 */
5709 static struct fcp_ipkt *
5710 fcp_icmd_alloc(struct fcp_port *pptr, struct fcp_tgt *ptgt, int cmd_len,
5711 int resp_len, int data_len, int nodma, int lcount, int tcount, int cause,
5712 uint32_t rscn_count)
5713 {
5714 int dma_setup = 0;
5715 fc_packet_t *fpkt;
5716 struct fcp_ipkt *icmd = NULL;
5717
5718 icmd = kmem_zalloc(sizeof (struct fcp_ipkt) +
5719 pptr->port_dmacookie_sz + pptr->port_priv_pkt_len,
5720 KM_NOSLEEP);
5721 if (icmd == NULL) {
5722 fcp_log(CE_WARN, pptr->port_dip,
5723 "!internal packet allocation failed");
5724 return (NULL);
5725 }
5726
5727 /*
5728 * initialize the allocated packet
5729 */
5730 icmd->ipkt_nodma = nodma;
5731 icmd->ipkt_next = icmd->ipkt_prev = NULL;
5732 icmd->ipkt_lun = NULL;
5733
5734 icmd->ipkt_link_cnt = lcount;
5735 icmd->ipkt_change_cnt = tcount;
5736 icmd->ipkt_cause = cause;
5737
5738 mutex_enter(&pptr->port_mutex);
5739 icmd->ipkt_port = pptr;
5740 mutex_exit(&pptr->port_mutex);
5741
5742 /* keep track of amt of data to be sent in pkt */
5743 icmd->ipkt_cmdlen = cmd_len;
5744 icmd->ipkt_resplen = resp_len;
5745 icmd->ipkt_datalen = data_len;
5746
5747 /* set up pkt's ptr to the fc_packet_t struct, just after the ipkt */
5748 icmd->ipkt_fpkt = (fc_packet_t *)(&icmd->ipkt_fc_packet);
5749
5750 /* set pkt's private ptr to point to cmd pkt */
5751 icmd->ipkt_fpkt->pkt_ulp_private = (opaque_t)icmd;
5752
5753 /* set FCA private ptr to memory just beyond */
5754 icmd->ipkt_fpkt->pkt_fca_private = (opaque_t)
5755 ((char *)icmd + sizeof (struct fcp_ipkt) +
5756 pptr->port_dmacookie_sz);
5757
5758 /* get ptr to fpkt substruct and fill it in */
5759 fpkt = icmd->ipkt_fpkt;
5760 fpkt->pkt_data_cookie = (ddi_dma_cookie_t *)((caddr_t)icmd +
5761 sizeof (struct fcp_ipkt));
5762
5763 if (ptgt != NULL) {
5764 icmd->ipkt_tgt = ptgt;
5765 fpkt->pkt_fca_device = ptgt->tgt_fca_dev;
5766 }
5767
5768 fpkt->pkt_comp = fcp_icmd_callback;
5769 fpkt->pkt_tran_flags = (FC_TRAN_CLASS3 | FC_TRAN_INTR);
5770 fpkt->pkt_cmdlen = cmd_len;
5771 fpkt->pkt_rsplen = resp_len;
5772 fpkt->pkt_datalen = data_len;
5773
5774 /*
5775 * The pkt_ulp_rscn_infop (aka pkt_ulp_rsvd1) field is used to pass the
5776 * rscn_count as fcp knows down to the transport. If a valid count was
5777 * passed into this function, we allocate memory to actually pass down
5778 * this info.
5779 *
5780 * BTW, if the kmem_zalloc fails, we won't try too hard. This will
5781 * basically mean that fcp will not be able to help transport
5782 * distinguish if a new RSCN has come after fcp was last informed about
5783 * it. In such cases, it might lead to the problem mentioned in CR/bug #
5784 * 5068068 where the device might end up going offline in case of RSCN
5785 * storms.
5786 */
5787 fpkt->pkt_ulp_rscn_infop = NULL;
5788 if (rscn_count != FC_INVALID_RSCN_COUNT) {
5789 fpkt->pkt_ulp_rscn_infop = kmem_zalloc(
5790 sizeof (fc_ulp_rscn_info_t), KM_NOSLEEP);
5791 if (fpkt->pkt_ulp_rscn_infop == NULL) {
5792 FCP_TRACE(fcp_logq, pptr->port_instbuf,
5793 fcp_trace, FCP_BUF_LEVEL_6, 0,
5794 "Failed to alloc memory to pass rscn info");
5795 }
5796 }
5797
5798 if (fpkt->pkt_ulp_rscn_infop != NULL) {
5799 fc_ulp_rscn_info_t *rscnp;
5800
5801 rscnp = (fc_ulp_rscn_info_t *)fpkt->pkt_ulp_rscn_infop;
5802 rscnp->ulp_rscn_count = rscn_count;
5803 }
5804
5805 if (fcp_alloc_dma(pptr, icmd, nodma, KM_NOSLEEP) != FC_SUCCESS) {
5806 goto fail;
5807 }
5808 dma_setup++;
5809
5810 /*
5811 * Must hold target mutex across setting of pkt_pd and call to
5812 * fc_ulp_init_packet to ensure the handle to the target doesn't go
5813 * away while we're not looking.
5814 */
5815 if (ptgt != NULL) {
5816 mutex_enter(&ptgt->tgt_mutex);
5817 fpkt->pkt_pd = ptgt->tgt_pd_handle;
5818
5819 /* ask transport to do its initialization on this pkt */
5820 if (fc_ulp_init_packet(pptr->port_fp_handle, fpkt, KM_NOSLEEP)
5821 != FC_SUCCESS) {
5822 FCP_TRACE(fcp_logq, pptr->port_instbuf,
5823 fcp_trace, FCP_BUF_LEVEL_6, 0,
5824 "fc_ulp_init_packet failed");
5825 mutex_exit(&ptgt->tgt_mutex);
5826 goto fail;
5827 }
5828 mutex_exit(&ptgt->tgt_mutex);
5829 } else {
5830 if (fc_ulp_init_packet(pptr->port_fp_handle, fpkt, KM_NOSLEEP)
5831 != FC_SUCCESS) {
5832 FCP_TRACE(fcp_logq, pptr->port_instbuf,
5833 fcp_trace, FCP_BUF_LEVEL_6, 0,
5834 "fc_ulp_init_packet failed");
5835 goto fail;
5836 }
5837 }
5838
5839 mutex_enter(&pptr->port_mutex);
5840 if (pptr->port_state & (FCP_STATE_DETACHING |
5841 FCP_STATE_SUSPENDED | FCP_STATE_POWER_DOWN)) {
5842 int rval;
5843
5844 mutex_exit(&pptr->port_mutex);
5845
5846 rval = fc_ulp_uninit_packet(pptr->port_fp_handle, fpkt);
5847 ASSERT(rval == FC_SUCCESS);
5848
5849 goto fail;
5850 }
5851
5852 if (ptgt != NULL) {
5853 mutex_enter(&ptgt->tgt_mutex);
5854 ptgt->tgt_ipkt_cnt++;
5855 mutex_exit(&ptgt->tgt_mutex);
5856 }
5857
5858 pptr->port_ipkt_cnt++;
5859
5860 mutex_exit(&pptr->port_mutex);
5861
5862 return (icmd);
5863
5864 fail:
5865 if (fpkt->pkt_ulp_rscn_infop != NULL) {
5866 kmem_free(fpkt->pkt_ulp_rscn_infop,
5867 sizeof (fc_ulp_rscn_info_t));
5868 fpkt->pkt_ulp_rscn_infop = NULL;
5869 }
5870
5871 if (dma_setup) {
5872 fcp_free_dma(pptr, icmd);
5873 }
5874 kmem_free(icmd, sizeof (struct fcp_ipkt) + pptr->port_priv_pkt_len +
5875 (size_t)pptr->port_dmacookie_sz);
5876
5877 return (NULL);
5878 }
5879
5880 /*
5881 * Function: fcp_icmd_free
5882 *
5883 * Description: Frees the internal command passed by the caller.
5884 *
5885 * Argument: *pptr Fcp port.
5886 * *icmd Internal packet to free.
5887 *
5888 * Return Value: None
5889 */
5890 static void
5891 fcp_icmd_free(struct fcp_port *pptr, struct fcp_ipkt *icmd)
5892 {
5893 struct fcp_tgt *ptgt = icmd->ipkt_tgt;
5894
5895 /* Let the underlying layers do their cleanup. */
5896 (void) fc_ulp_uninit_packet(pptr->port_fp_handle,
5897 icmd->ipkt_fpkt);
5898
5899 if (icmd->ipkt_fpkt->pkt_ulp_rscn_infop) {
5900 kmem_free(icmd->ipkt_fpkt->pkt_ulp_rscn_infop,
5901 sizeof (fc_ulp_rscn_info_t));
5902 }
5903
5904 fcp_free_dma(pptr, icmd);
5905
5906 kmem_free(icmd, sizeof (struct fcp_ipkt) + pptr->port_priv_pkt_len +
5907 (size_t)pptr->port_dmacookie_sz);
5908
5909 mutex_enter(&pptr->port_mutex);
5910
5911 if (ptgt) {
5912 mutex_enter(&ptgt->tgt_mutex);
5913 ptgt->tgt_ipkt_cnt--;
5914 mutex_exit(&ptgt->tgt_mutex);
5915 }
5916
5917 pptr->port_ipkt_cnt--;
5918 mutex_exit(&pptr->port_mutex);
5919 }
5920
5921 /*
5922 * Function: fcp_alloc_dma
5923 *
5924 * Description: Allocated the DMA resources required for the internal
5925 * packet.
5926 *
5927 * Argument: *pptr FCP port.
5928 * *icmd Internal FCP packet.
5929 * nodma Indicates if the Cmd and Resp will be DMAed.
5930 * flags Allocation flags (Sleep or NoSleep).
5931 *
5932 * Return Value: FC_SUCCESS
5933 * FC_NOMEM
5934 */
5935 static int
5936 fcp_alloc_dma(struct fcp_port *pptr, struct fcp_ipkt *icmd,
5937 int nodma, int flags)
5938 {
5939 int rval;
5940 size_t real_size;
5941 uint_t ccount;
5942 int bound = 0;
5943 int cmd_resp = 0;
5944 fc_packet_t *fpkt;
5945 ddi_dma_cookie_t pkt_data_cookie;
5946 ddi_dma_cookie_t *cp;
5947 uint32_t cnt;
5948
5949 fpkt = &icmd->ipkt_fc_packet;
5950
5951 ASSERT(fpkt->pkt_cmd_dma == NULL && fpkt->pkt_data_dma == NULL &&
5952 fpkt->pkt_resp_dma == NULL);
5953
5954 icmd->ipkt_nodma = nodma;
5955
5956 if (nodma) {
5957 fpkt->pkt_cmd = kmem_zalloc(fpkt->pkt_cmdlen, flags);
5958 if (fpkt->pkt_cmd == NULL) {
5959 goto fail;
5960 }
5961
5962 fpkt->pkt_resp = kmem_zalloc(fpkt->pkt_rsplen, flags);
5963 if (fpkt->pkt_resp == NULL) {
5964 goto fail;
5965 }
5966 } else {
5967 ASSERT(fpkt->pkt_cmdlen && fpkt->pkt_rsplen);
5968
5969 rval = fcp_alloc_cmd_resp(pptr, fpkt, flags);
5970 if (rval == FC_FAILURE) {
5971 ASSERT(fpkt->pkt_cmd_dma == NULL &&
5972 fpkt->pkt_resp_dma == NULL);
5973 goto fail;
5974 }
5975 cmd_resp++;
5976 }
5977
5978 if ((fpkt->pkt_datalen != 0) &&
5979 !(pptr->port_state & FCP_STATE_FCA_IS_NODMA)) {
5980 /*
5981 * set up DMA handle and memory for the data in this packet
5982 */
5983 if (ddi_dma_alloc_handle(pptr->port_dip,
5984 &pptr->port_data_dma_attr, DDI_DMA_DONTWAIT,
5985 NULL, &fpkt->pkt_data_dma) != DDI_SUCCESS) {
5986 goto fail;
5987 }
5988
5989 if (ddi_dma_mem_alloc(fpkt->pkt_data_dma, fpkt->pkt_datalen,
5990 &pptr->port_dma_acc_attr, DDI_DMA_CONSISTENT,
5991 DDI_DMA_DONTWAIT, NULL, &fpkt->pkt_data,
5992 &real_size, &fpkt->pkt_data_acc) != DDI_SUCCESS) {
5993 goto fail;
5994 }
5995
5996 /* was DMA mem size gotten < size asked for/needed ?? */
5997 if (real_size < fpkt->pkt_datalen) {
5998 goto fail;
5999 }
6000
6001 /* bind DMA address and handle together */
6002 if (ddi_dma_addr_bind_handle(fpkt->pkt_data_dma,
6003 NULL, fpkt->pkt_data, real_size, DDI_DMA_READ |
6004 DDI_DMA_CONSISTENT, DDI_DMA_DONTWAIT, NULL,
6005 &pkt_data_cookie, &ccount) != DDI_DMA_MAPPED) {
6006 goto fail;
6007 }
6008 bound++;
6009
6010 if (ccount > pptr->port_data_dma_attr.dma_attr_sgllen) {
6011 goto fail;
6012 }
6013
6014 fpkt->pkt_data_cookie_cnt = ccount;
6015
6016 cp = fpkt->pkt_data_cookie;
6017 *cp = pkt_data_cookie;
6018 cp++;
6019
6020 for (cnt = 1; cnt < ccount; cnt++, cp++) {
6021 ddi_dma_nextcookie(fpkt->pkt_data_dma,
6022 &pkt_data_cookie);
6023 *cp = pkt_data_cookie;
6024 }
6025
6026 } else if (fpkt->pkt_datalen != 0) {
6027 /*
6028 * If it's a pseudo FCA, then it can't support DMA even in
6029 * SCSI data phase.
6030 */
6031 fpkt->pkt_data = kmem_alloc(fpkt->pkt_datalen, flags);
6032 if (fpkt->pkt_data == NULL) {
6033 goto fail;
6034 }
6035
6036 }
6037
6038 return (FC_SUCCESS);
6039
6040 fail:
6041 if (bound) {
6042 (void) ddi_dma_unbind_handle(fpkt->pkt_data_dma);
6043 }
6044
6045 if (fpkt->pkt_data_dma) {
6046 if (fpkt->pkt_data) {
6047 ddi_dma_mem_free(&fpkt->pkt_data_acc);
6048 }
6049 ddi_dma_free_handle(&fpkt->pkt_data_dma);
6050 } else {
6051 if (fpkt->pkt_data) {
6052 kmem_free(fpkt->pkt_data, fpkt->pkt_datalen);
6053 }
6054 }
6055
6056 if (nodma) {
6057 if (fpkt->pkt_cmd) {
6058 kmem_free(fpkt->pkt_cmd, fpkt->pkt_cmdlen);
6059 }
6060 if (fpkt->pkt_resp) {
6061 kmem_free(fpkt->pkt_resp, fpkt->pkt_rsplen);
6062 }
6063 } else {
6064 if (cmd_resp) {
6065 fcp_free_cmd_resp(pptr, fpkt);
6066 }
6067 }
6068
6069 return (FC_NOMEM);
6070 }
6071
6072
6073 static void
6074 fcp_free_dma(struct fcp_port *pptr, struct fcp_ipkt *icmd)
6075 {
6076 fc_packet_t *fpkt = icmd->ipkt_fpkt;
6077
6078 if (fpkt->pkt_data_dma) {
6079 (void) ddi_dma_unbind_handle(fpkt->pkt_data_dma);
6080 if (fpkt->pkt_data) {
6081 ddi_dma_mem_free(&fpkt->pkt_data_acc);
6082 }
6083 ddi_dma_free_handle(&fpkt->pkt_data_dma);
6084 } else {
6085 if (fpkt->pkt_data) {
6086 kmem_free(fpkt->pkt_data, fpkt->pkt_datalen);
6087 }
6088 /*
6089 * Need we reset pkt_* to zero???
6090 */
6091 }
6092
6093 if (icmd->ipkt_nodma) {
6094 if (fpkt->pkt_cmd) {
6095 kmem_free(fpkt->pkt_cmd, icmd->ipkt_cmdlen);
6096 }
6097 if (fpkt->pkt_resp) {
6098 kmem_free(fpkt->pkt_resp, icmd->ipkt_resplen);
6099 }
6100 } else {
6101 ASSERT(fpkt->pkt_resp_dma != NULL && fpkt->pkt_cmd_dma != NULL);
6102
6103 fcp_free_cmd_resp(pptr, fpkt);
6104 }
6105 }
6106
6107 /*
6108 * Function: fcp_lookup_target
6109 *
6110 * Description: Finds a target given a WWN.
6111 *
6112 * Argument: *pptr FCP port.
6113 * *wwn World Wide Name of the device to look for.
6114 *
6115 * Return Value: NULL No target found
6116 * Not NULL Target structure
6117 *
6118 * Context: Interrupt context.
6119 * The mutex pptr->port_mutex must be owned.
6120 */
6121 /* ARGSUSED */
6122 static struct fcp_tgt *
6123 fcp_lookup_target(struct fcp_port *pptr, uchar_t *wwn)
6124 {
6125 int hash;
6126 struct fcp_tgt *ptgt;
6127
6128 ASSERT(mutex_owned(&pptr->port_mutex));
6129
6130 hash = FCP_HASH(wwn);
6131
6132 for (ptgt = pptr->port_tgt_hash_table[hash]; ptgt != NULL;
6133 ptgt = ptgt->tgt_next) {
6134 if (!(ptgt->tgt_state & FCP_TGT_ORPHAN) &&
6135 bcmp((caddr_t)wwn, (caddr_t)&ptgt->tgt_port_wwn.raw_wwn[0],
6136 sizeof (ptgt->tgt_port_wwn)) == 0) {
6137 break;
6138 }
6139 }
6140
6141 return (ptgt);
6142 }
6143
6144
6145 /*
6146 * Find target structure given a port identifier
6147 */
6148 static struct fcp_tgt *
6149 fcp_get_target_by_did(struct fcp_port *pptr, uint32_t d_id)
6150 {
6151 fc_portid_t port_id;
6152 la_wwn_t pwwn;
6153 struct fcp_tgt *ptgt = NULL;
6154
6155 port_id.priv_lilp_posit = 0;
6156 port_id.port_id = d_id;
6157 if (fc_ulp_get_pwwn_by_did(pptr->port_fp_handle, port_id,
6158 &pwwn) == FC_SUCCESS) {
6159 mutex_enter(&pptr->port_mutex);
6160 ptgt = fcp_lookup_target(pptr, pwwn.raw_wwn);
6161 mutex_exit(&pptr->port_mutex);
6162 }
6163
6164 return (ptgt);
6165 }
6166
6167
6168 /*
6169 * the packet completion callback routine for info cmd pkts
6170 *
6171 * this means fpkt pts to a response to either a PLOGI or a PRLI
6172 *
6173 * if there is an error an attempt is made to call a routine to resend
6174 * the command that failed
6175 */
6176 static void
6177 fcp_icmd_callback(fc_packet_t *fpkt)
6178 {
6179 struct fcp_ipkt *icmd;
6180 struct fcp_port *pptr;
6181 struct fcp_tgt *ptgt;
6182 struct la_els_prli *prli;
6183 struct la_els_prli prli_s;
6184 struct fcp_prli *fprli;
6185 struct fcp_lun *plun;
6186 int free_pkt = 1;
6187 int rval;
6188 ls_code_t resp;
6189 uchar_t prli_acc = 0;
6190 uint32_t rscn_count = FC_INVALID_RSCN_COUNT;
6191 int lun0_newalloc;
6192
6193 icmd = (struct fcp_ipkt *)fpkt->pkt_ulp_private;
6194
6195 /* get ptrs to the port and target structs for the cmd */
6196 pptr = icmd->ipkt_port;
6197 ptgt = icmd->ipkt_tgt;
6198
6199 FCP_CP_IN(fpkt->pkt_resp, &resp, fpkt->pkt_resp_acc, sizeof (resp));
6200
6201 if (icmd->ipkt_opcode == LA_ELS_PRLI) {
6202 FCP_CP_IN(fpkt->pkt_cmd, &prli_s, fpkt->pkt_cmd_acc,
6203 sizeof (prli_s));
6204 prli_acc = (prli_s.ls_code == LA_ELS_ACC);
6205 }
6206
6207 FCP_TRACE(fcp_logq, pptr->port_instbuf,
6208 fcp_trace, FCP_BUF_LEVEL_2, 0,
6209 "ELS (%x) callback state=0x%x reason=0x%x for %x",
6210 icmd->ipkt_opcode, fpkt->pkt_state, fpkt->pkt_reason,
6211 ptgt->tgt_d_id);
6212
6213 if ((fpkt->pkt_state == FC_PKT_SUCCESS) &&
6214 ((resp.ls_code == LA_ELS_ACC) || prli_acc)) {
6215
6216 mutex_enter(&ptgt->tgt_mutex);
6217 if (ptgt->tgt_pd_handle == NULL) {
6218 /*
6219 * in a fabric environment the port device handles
6220 * get created only after successful LOGIN into the
6221 * transport, so the transport makes this port
6222 * device (pd) handle available in this packet, so
6223 * save it now
6224 */
6225 ASSERT(fpkt->pkt_pd != NULL);
6226 ptgt->tgt_pd_handle = fpkt->pkt_pd;
6227 }
6228 mutex_exit(&ptgt->tgt_mutex);
6229
6230 /* which ELS cmd is this response for ?? */
6231 switch (icmd->ipkt_opcode) {
6232 case LA_ELS_PLOGI:
6233 FCP_TRACE(fcp_logq, pptr->port_instbuf,
6234 fcp_trace, FCP_BUF_LEVEL_5, 0,
6235 "PLOGI to d_id=0x%x succeeded, wwn=%08x%08x",
6236 ptgt->tgt_d_id,
6237 *((int *)&ptgt->tgt_port_wwn.raw_wwn[0]),
6238 *((int *)&ptgt->tgt_port_wwn.raw_wwn[4]));
6239
6240 FCP_TGT_TRACE(ptgt, icmd->ipkt_change_cnt,
6241 FCP_TGT_TRACE_15);
6242
6243 /* Note that we are not allocating a new icmd */
6244 if (fcp_send_els(pptr, ptgt, icmd, LA_ELS_PRLI,
6245 icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
6246 icmd->ipkt_cause) != DDI_SUCCESS) {
6247 FCP_TGT_TRACE(ptgt, icmd->ipkt_change_cnt,
6248 FCP_TGT_TRACE_16);
6249 goto fail;
6250 }
6251 break;
6252
6253 case LA_ELS_PRLI:
6254 FCP_TRACE(fcp_logq, pptr->port_instbuf,
6255 fcp_trace, FCP_BUF_LEVEL_5, 0,
6256 "PRLI to d_id=0x%x succeeded", ptgt->tgt_d_id);
6257
6258 FCP_TGT_TRACE(ptgt, icmd->ipkt_change_cnt,
6259 FCP_TGT_TRACE_17);
6260
6261 prli = &prli_s;
6262
6263 FCP_CP_IN(fpkt->pkt_resp, prli, fpkt->pkt_resp_acc,
6264 sizeof (prli_s));
6265
6266 fprli = (struct fcp_prli *)prli->service_params;
6267
6268 mutex_enter(&ptgt->tgt_mutex);
6269 ptgt->tgt_icap = fprli->initiator_fn;
6270 ptgt->tgt_tcap = fprli->target_fn;
6271 mutex_exit(&ptgt->tgt_mutex);
6272
6273 if ((fprli->type != 0x08) || (fprli->target_fn != 1)) {
6274 /*
6275 * this FCP device does not support target mode
6276 */
6277 FCP_TGT_TRACE(ptgt, icmd->ipkt_change_cnt,
6278 FCP_TGT_TRACE_18);
6279 goto fail;
6280 }
6281 if (fprli->retry == 1) {
6282 fc_ulp_disable_relogin(pptr->port_fp_handle,
6283 &ptgt->tgt_port_wwn);
6284 }
6285
6286 /* target is no longer offline */
6287 mutex_enter(&pptr->port_mutex);
6288 mutex_enter(&ptgt->tgt_mutex);
6289 if (!FCP_TGT_STATE_CHANGED(ptgt, icmd)) {
6290 ptgt->tgt_state &= ~(FCP_TGT_OFFLINE |
6291 FCP_TGT_MARK);
6292 } else {
6293 FCP_TRACE(fcp_logq, pptr->port_instbuf,
6294 fcp_trace, FCP_BUF_LEVEL_2, 0,
6295 "fcp_icmd_callback,1: state change "
6296 " occured for D_ID=0x%x", ptgt->tgt_d_id);
6297 mutex_exit(&ptgt->tgt_mutex);
6298 mutex_exit(&pptr->port_mutex);
6299 goto fail;
6300 }
6301 mutex_exit(&ptgt->tgt_mutex);
6302 mutex_exit(&pptr->port_mutex);
6303
6304 /*
6305 * lun 0 should always respond to inquiry, so
6306 * get the LUN struct for LUN 0
6307 *
6308 * Currently we deal with first level of addressing.
6309 * If / when we start supporting 0x device types
6310 * (DTYPE_ARRAY_CTRL, i.e. array controllers)
6311 * this logic will need revisiting.
6312 */
6313 lun0_newalloc = 0;
6314 if ((plun = fcp_get_lun(ptgt, 0)) == NULL) {
6315 /*
6316 * no LUN struct for LUN 0 yet exists,
6317 * so create one
6318 */
6319 plun = fcp_alloc_lun(ptgt);
6320 if (plun == NULL) {
6321 fcp_log(CE_WARN, pptr->port_dip,
6322 "!Failed to allocate lun 0 for"
6323 " D_ID=%x", ptgt->tgt_d_id);
6324 goto fail;
6325 }
6326 lun0_newalloc = 1;
6327 }
6328
6329 /* fill in LUN info */
6330 mutex_enter(&ptgt->tgt_mutex);
6331 /*
6332 * consider lun 0 as device not connected if it is
6333 * offlined or newly allocated
6334 */
6335 if ((plun->lun_state & FCP_LUN_OFFLINE) ||
6336 lun0_newalloc) {
6337 plun->lun_state |= FCP_LUN_DEVICE_NOT_CONNECTED;
6338 }
6339 plun->lun_state |= (FCP_LUN_BUSY | FCP_LUN_MARK);
6340 plun->lun_state &= ~FCP_LUN_OFFLINE;
6341 ptgt->tgt_lun_cnt = 1;
6342 ptgt->tgt_report_lun_cnt = 0;
6343 mutex_exit(&ptgt->tgt_mutex);
6344
6345 /* Retrieve the rscn count (if a valid one exists) */
6346 if (icmd->ipkt_fpkt->pkt_ulp_rscn_infop != NULL) {
6347 rscn_count = ((fc_ulp_rscn_info_t *)
6348 (icmd->ipkt_fpkt->pkt_ulp_rscn_infop))
6349 ->ulp_rscn_count;
6350 } else {
6351 rscn_count = FC_INVALID_RSCN_COUNT;
6352 }
6353
6354 /* send Report Lun request to target */
6355 if (fcp_send_scsi(plun, SCMD_REPORT_LUN,
6356 sizeof (struct fcp_reportlun_resp),
6357 icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
6358 icmd->ipkt_cause, rscn_count) != DDI_SUCCESS) {
6359 mutex_enter(&pptr->port_mutex);
6360 if (!FCP_TGT_STATE_CHANGED(ptgt, icmd)) {
6361 fcp_log(CE_WARN, pptr->port_dip,
6362 "!Failed to send REPORT LUN to"
6363 " D_ID=%x", ptgt->tgt_d_id);
6364 } else {
6365 FCP_TRACE(fcp_logq,
6366 pptr->port_instbuf, fcp_trace,
6367 FCP_BUF_LEVEL_5, 0,
6368 "fcp_icmd_callback,2:state change"
6369 " occured for D_ID=0x%x",
6370 ptgt->tgt_d_id);
6371 }
6372 mutex_exit(&pptr->port_mutex);
6373
6374 FCP_TGT_TRACE(ptgt, icmd->ipkt_change_cnt,
6375 FCP_TGT_TRACE_19);
6376
6377 goto fail;
6378 } else {
6379 free_pkt = 0;
6380 fcp_icmd_free(pptr, icmd);
6381 }
6382 break;
6383
6384 default:
6385 fcp_log(CE_WARN, pptr->port_dip,
6386 "!fcp_icmd_callback Invalid opcode");
6387 goto fail;
6388 }
6389
6390 return;
6391 }
6392
6393
6394 /*
6395 * Other PLOGI failures are not retried as the
6396 * transport does it already
6397 */
6398 if (icmd->ipkt_opcode != LA_ELS_PLOGI) {
6399 if (fcp_is_retryable(icmd) &&
6400 icmd->ipkt_retries++ < FCP_MAX_RETRIES) {
6401
6402 if (FCP_MUST_RETRY(fpkt)) {
6403 fcp_queue_ipkt(pptr, fpkt);
6404 return;
6405 }
6406
6407 FCP_TRACE(fcp_logq, pptr->port_instbuf,
6408 fcp_trace, FCP_BUF_LEVEL_2, 0,
6409 "ELS PRLI is retried for d_id=0x%x, state=%x,"
6410 " reason= %x", ptgt->tgt_d_id, fpkt->pkt_state,
6411 fpkt->pkt_reason);
6412
6413 /*
6414 * Retry by recalling the routine that
6415 * originally queued this packet
6416 */
6417 mutex_enter(&pptr->port_mutex);
6418 if (!FCP_TGT_STATE_CHANGED(ptgt, icmd)) {
6419 caddr_t msg;
6420
6421 mutex_exit(&pptr->port_mutex);
6422
6423 ASSERT(icmd->ipkt_opcode != LA_ELS_PLOGI);
6424
6425 if (fpkt->pkt_state == FC_PKT_TIMEOUT) {
6426 fpkt->pkt_timeout +=
6427 FCP_TIMEOUT_DELTA;
6428 }
6429
6430 rval = fc_ulp_issue_els(pptr->port_fp_handle,
6431 fpkt);
6432 if (rval == FC_SUCCESS) {
6433 return;
6434 }
6435
6436 if (rval == FC_STATEC_BUSY ||
6437 rval == FC_OFFLINE) {
6438 fcp_queue_ipkt(pptr, fpkt);
6439 return;
6440 }
6441 (void) fc_ulp_error(rval, &msg);
6442
6443 fcp_log(CE_NOTE, pptr->port_dip,
6444 "!ELS 0x%x failed to d_id=0x%x;"
6445 " %s", icmd->ipkt_opcode,
6446 ptgt->tgt_d_id, msg);
6447 } else {
6448 FCP_TRACE(fcp_logq, pptr->port_instbuf,
6449 fcp_trace, FCP_BUF_LEVEL_2, 0,
6450 "fcp_icmd_callback,3: state change "
6451 " occured for D_ID=0x%x", ptgt->tgt_d_id);
6452 mutex_exit(&pptr->port_mutex);
6453 }
6454 }
6455 } else {
6456 if (fcp_is_retryable(icmd) &&
6457 icmd->ipkt_retries++ < FCP_MAX_RETRIES) {
6458 if (FCP_MUST_RETRY(fpkt)) {
6459 fcp_queue_ipkt(pptr, fpkt);
6460 return;
6461 }
6462 }
6463 mutex_enter(&pptr->port_mutex);
6464 if (!FCP_TGT_STATE_CHANGED(ptgt, icmd) &&
6465 fpkt->pkt_state != FC_PKT_PORT_OFFLINE) {
6466 mutex_exit(&pptr->port_mutex);
6467 fcp_print_error(fpkt);
6468 } else {
6469 FCP_TRACE(fcp_logq, pptr->port_instbuf,
6470 fcp_trace, FCP_BUF_LEVEL_2, 0,
6471 "fcp_icmd_callback,4: state change occured"
6472 " for D_ID=0x%x", ptgt->tgt_d_id);
6473 mutex_exit(&pptr->port_mutex);
6474 }
6475 }
6476
6477 fail:
6478 if (free_pkt) {
6479 (void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
6480 icmd->ipkt_change_cnt, icmd->ipkt_cause);
6481 fcp_icmd_free(pptr, icmd);
6482 }
6483 }
6484
6485
6486 /*
6487 * called internally to send an info cmd using the transport
6488 *
6489 * sends either an INQ or a REPORT_LUN
6490 *
6491 * when the packet is completed fcp_scsi_callback is called
6492 */
6493 static int
6494 fcp_send_scsi(struct fcp_lun *plun, uchar_t opcode, int alloc_len,
6495 int lcount, int tcount, int cause, uint32_t rscn_count)
6496 {
6497 int nodma;
6498 struct fcp_ipkt *icmd;
6499 struct fcp_tgt *ptgt;
6500 struct fcp_port *pptr;
6501 fc_frame_hdr_t *hp;
6502 fc_packet_t *fpkt;
6503 struct fcp_cmd fcp_cmd;
6504 struct fcp_cmd *fcmd;
6505 union scsi_cdb *scsi_cdb;
6506
6507 ASSERT(plun != NULL);
6508
6509 ptgt = plun->lun_tgt;
6510 ASSERT(ptgt != NULL);
6511
6512 pptr = ptgt->tgt_port;
6513 ASSERT(pptr != NULL);
6514
6515 FCP_TRACE(fcp_logq, pptr->port_instbuf,
6516 fcp_trace, FCP_BUF_LEVEL_5, 0,
6517 "fcp_send_scsi: d_id=0x%x opcode=0x%x", ptgt->tgt_d_id, opcode);
6518
6519 nodma = (pptr->port_fcp_dma == FC_NO_DVMA_SPACE) ? 1 : 0;
6520 icmd = fcp_icmd_alloc(pptr, ptgt, sizeof (struct fcp_cmd),
6521 FCP_MAX_RSP_IU_SIZE, alloc_len, nodma, lcount, tcount, cause,
6522 rscn_count);
6523
6524 if (icmd == NULL) {
6525 return (DDI_FAILURE);
6526 }
6527
6528 fpkt = icmd->ipkt_fpkt;
6529 fpkt->pkt_tran_flags = FC_TRAN_CLASS3 | FC_TRAN_INTR;
6530 icmd->ipkt_retries = 0;
6531 icmd->ipkt_opcode = opcode;
6532 icmd->ipkt_lun = plun;
6533
6534 if (nodma) {
6535 fcmd = (struct fcp_cmd *)fpkt->pkt_cmd;
6536 } else {
6537 fcmd = &fcp_cmd;
6538 }
6539 bzero(fcmd, sizeof (struct fcp_cmd));
6540
6541 fpkt->pkt_timeout = FCP_SCSI_CMD_TIMEOUT;
6542
6543 hp = &fpkt->pkt_cmd_fhdr;
6544
6545 hp->s_id = pptr->port_id;
6546 hp->d_id = ptgt->tgt_d_id;
6547 hp->r_ctl = R_CTL_COMMAND;
6548 hp->type = FC_TYPE_SCSI_FCP;
6549 hp->f_ctl = F_CTL_SEQ_INITIATIVE | F_CTL_FIRST_SEQ;
6550 hp->rsvd = 0;
6551 hp->seq_id = 0;
6552 hp->seq_cnt = 0;
6553 hp->ox_id = 0xffff;
6554 hp->rx_id = 0xffff;
6555 hp->ro = 0;
6556
6557 bcopy(&(plun->lun_addr), &(fcmd->fcp_ent_addr), FCP_LUN_SIZE);
6558
6559 /*
6560 * Request SCSI target for expedited processing
6561 */
6562
6563 /*
6564 * Set up for untagged queuing because we do not
6565 * know if the fibre device supports queuing.
6566 */
6567 fcmd->fcp_cntl.cntl_reserved_0 = 0;
6568 fcmd->fcp_cntl.cntl_reserved_1 = 0;
6569 fcmd->fcp_cntl.cntl_reserved_2 = 0;
6570 fcmd->fcp_cntl.cntl_reserved_3 = 0;
6571 fcmd->fcp_cntl.cntl_reserved_4 = 0;
6572 fcmd->fcp_cntl.cntl_qtype = FCP_QTYPE_UNTAGGED;
6573 scsi_cdb = (union scsi_cdb *)fcmd->fcp_cdb;
6574
6575 switch (opcode) {
6576 case SCMD_INQUIRY_PAGE83:
6577 /*
6578 * Prepare to get the Inquiry VPD page 83 information
6579 */
6580 fcmd->fcp_cntl.cntl_read_data = 1;
6581 fcmd->fcp_cntl.cntl_write_data = 0;
6582 fcmd->fcp_data_len = alloc_len;
6583
6584 fpkt->pkt_tran_type = FC_PKT_FCP_READ;
6585 fpkt->pkt_comp = fcp_scsi_callback;
6586
6587 scsi_cdb->scc_cmd = SCMD_INQUIRY;
6588 scsi_cdb->g0_addr2 = 0x01;
6589 scsi_cdb->g0_addr1 = 0x83;
6590 scsi_cdb->g0_count0 = (uchar_t)alloc_len;
6591 break;
6592
6593 case SCMD_INQUIRY:
6594 fcmd->fcp_cntl.cntl_read_data = 1;
6595 fcmd->fcp_cntl.cntl_write_data = 0;
6596 fcmd->fcp_data_len = alloc_len;
6597
6598 fpkt->pkt_tran_type = FC_PKT_FCP_READ;
6599 fpkt->pkt_comp = fcp_scsi_callback;
6600
6601 scsi_cdb->scc_cmd = SCMD_INQUIRY;
6602 scsi_cdb->g0_count0 = SUN_INQSIZE;
6603 break;
6604
6605 case SCMD_REPORT_LUN: {
6606 fc_portid_t d_id;
6607 opaque_t fca_dev;
6608
6609 ASSERT(alloc_len >= 16);
6610
6611 d_id.priv_lilp_posit = 0;
6612 d_id.port_id = ptgt->tgt_d_id;
6613
6614 fca_dev = fc_ulp_get_fca_device(pptr->port_fp_handle, d_id);
6615
6616 mutex_enter(&ptgt->tgt_mutex);
6617 ptgt->tgt_fca_dev = fca_dev;
6618 mutex_exit(&ptgt->tgt_mutex);
6619
6620 fcmd->fcp_cntl.cntl_read_data = 1;
6621 fcmd->fcp_cntl.cntl_write_data = 0;
6622 fcmd->fcp_data_len = alloc_len;
6623
6624 fpkt->pkt_tran_type = FC_PKT_FCP_READ;
6625 fpkt->pkt_comp = fcp_scsi_callback;
6626
6627 scsi_cdb->scc_cmd = SCMD_REPORT_LUN;
6628 scsi_cdb->scc5_count0 = alloc_len & 0xff;
6629 scsi_cdb->scc5_count1 = (alloc_len >> 8) & 0xff;
6630 scsi_cdb->scc5_count2 = (alloc_len >> 16) & 0xff;
6631 scsi_cdb->scc5_count3 = (alloc_len >> 24) & 0xff;
6632 break;
6633 }
6634
6635 default:
6636 fcp_log(CE_WARN, pptr->port_dip,
6637 "!fcp_send_scsi Invalid opcode");
6638 break;
6639 }
6640
6641 if (!nodma) {
6642 FCP_CP_OUT((uint8_t *)fcmd, fpkt->pkt_cmd,
6643 fpkt->pkt_cmd_acc, sizeof (struct fcp_cmd));
6644 }
6645
6646 mutex_enter(&pptr->port_mutex);
6647 if (!FCP_TGT_STATE_CHANGED(ptgt, icmd)) {
6648
6649 mutex_exit(&pptr->port_mutex);
6650 if (fcp_transport(pptr->port_fp_handle, fpkt, 1) !=
6651 FC_SUCCESS) {
6652 fcp_icmd_free(pptr, icmd);
6653 return (DDI_FAILURE);
6654 }
6655 return (DDI_SUCCESS);
6656 } else {
6657 FCP_TRACE(fcp_logq, pptr->port_instbuf,
6658 fcp_trace, FCP_BUF_LEVEL_2, 0,
6659 "fcp_send_scsi,1: state change occured"
6660 " for D_ID=0x%x", ptgt->tgt_d_id);
6661 mutex_exit(&pptr->port_mutex);
6662 fcp_icmd_free(pptr, icmd);
6663 return (DDI_FAILURE);
6664 }
6665 }
6666
6667
6668 /*
6669 * called by fcp_scsi_callback to check to handle the case where
6670 * REPORT_LUN returns ILLEGAL REQUEST or a UNIT ATTENTION
6671 */
6672 static int
6673 fcp_check_reportlun(struct fcp_rsp *rsp, fc_packet_t *fpkt)
6674 {
6675 uchar_t rqlen;
6676 int rval = DDI_FAILURE;
6677 struct scsi_extended_sense sense_info, *sense;
6678 struct fcp_ipkt *icmd = (struct fcp_ipkt *)
6679 fpkt->pkt_ulp_private;
6680 struct fcp_tgt *ptgt = icmd->ipkt_tgt;
6681 struct fcp_port *pptr = ptgt->tgt_port;
6682
6683 ASSERT(icmd->ipkt_opcode == SCMD_REPORT_LUN);
6684
6685 if (rsp->fcp_u.fcp_status.scsi_status == STATUS_RESERVATION_CONFLICT) {
6686 /*
6687 * SCSI-II Reserve Release support. Some older FC drives return
6688 * Reservation conflict for Report Luns command.
6689 */
6690 if (icmd->ipkt_nodma) {
6691 rsp->fcp_u.fcp_status.rsp_len_set = 0;
6692 rsp->fcp_u.fcp_status.sense_len_set = 0;
6693 rsp->fcp_u.fcp_status.scsi_status = STATUS_GOOD;
6694 } else {
6695 fcp_rsp_t new_resp;
6696
6697 FCP_CP_IN(fpkt->pkt_resp, &new_resp,
6698 fpkt->pkt_resp_acc, sizeof (new_resp));
6699
6700 new_resp.fcp_u.fcp_status.rsp_len_set = 0;
6701 new_resp.fcp_u.fcp_status.sense_len_set = 0;
6702 new_resp.fcp_u.fcp_status.scsi_status = STATUS_GOOD;
6703
6704 FCP_CP_OUT(&new_resp, fpkt->pkt_resp,
6705 fpkt->pkt_resp_acc, sizeof (new_resp));
6706 }
6707
6708 FCP_CP_OUT(fcp_dummy_lun, fpkt->pkt_data,
6709 fpkt->pkt_data_acc, sizeof (fcp_dummy_lun));
6710
6711 return (DDI_SUCCESS);
6712 }
6713
6714 sense = &sense_info;
6715 if (!rsp->fcp_u.fcp_status.sense_len_set) {
6716 /* no need to continue if sense length is not set */
6717 return (rval);
6718 }
6719
6720 /* casting 64-bit integer to 8-bit */
6721 rqlen = (uchar_t)min(rsp->fcp_sense_len,
6722 sizeof (struct scsi_extended_sense));
6723
6724 if (rqlen < 14) {
6725 /* no need to continue if request length isn't long enough */
6726 return (rval);
6727 }
6728
6729 if (icmd->ipkt_nodma) {
6730 /*
6731 * We can safely use fcp_response_len here since the
6732 * only path that calls fcp_check_reportlun,
6733 * fcp_scsi_callback, has already called
6734 * fcp_validate_fcp_response.
6735 */
6736 sense = (struct scsi_extended_sense *)(fpkt->pkt_resp +
6737 sizeof (struct fcp_rsp) + rsp->fcp_response_len);
6738 } else {
6739 FCP_CP_IN(fpkt->pkt_resp + sizeof (struct fcp_rsp) +
6740 rsp->fcp_response_len, sense, fpkt->pkt_resp_acc,
6741 sizeof (struct scsi_extended_sense));
6742 }
6743
6744 if (!FCP_SENSE_NO_LUN(sense)) {
6745 mutex_enter(&ptgt->tgt_mutex);
6746 /* clear the flag if any */
6747 ptgt->tgt_state &= ~FCP_TGT_ILLREQ;
6748 mutex_exit(&ptgt->tgt_mutex);
6749 }
6750
6751 if ((sense->es_key == KEY_ILLEGAL_REQUEST) &&
6752 (sense->es_add_code == 0x20)) {
6753 if (icmd->ipkt_nodma) {
6754 rsp->fcp_u.fcp_status.rsp_len_set = 0;
6755 rsp->fcp_u.fcp_status.sense_len_set = 0;
6756 rsp->fcp_u.fcp_status.scsi_status = STATUS_GOOD;
6757 } else {
6758 fcp_rsp_t new_resp;
6759
6760 FCP_CP_IN(fpkt->pkt_resp, &new_resp,
6761 fpkt->pkt_resp_acc, sizeof (new_resp));
6762
6763 new_resp.fcp_u.fcp_status.rsp_len_set = 0;
6764 new_resp.fcp_u.fcp_status.sense_len_set = 0;
6765 new_resp.fcp_u.fcp_status.scsi_status = STATUS_GOOD;
6766
6767 FCP_CP_OUT(&new_resp, fpkt->pkt_resp,
6768 fpkt->pkt_resp_acc, sizeof (new_resp));
6769 }
6770
6771 FCP_CP_OUT(fcp_dummy_lun, fpkt->pkt_data,
6772 fpkt->pkt_data_acc, sizeof (fcp_dummy_lun));
6773
6774 return (DDI_SUCCESS);
6775 }
6776
6777 /*
6778 * This is for the STK library which returns a check condition,
6779 * to indicate device is not ready, manual assistance needed.
6780 * This is to a report lun command when the door is open.
6781 */
6782 if ((sense->es_key == KEY_NOT_READY) && (sense->es_add_code == 0x04)) {
6783 if (icmd->ipkt_nodma) {
6784 rsp->fcp_u.fcp_status.rsp_len_set = 0;
6785 rsp->fcp_u.fcp_status.sense_len_set = 0;
6786 rsp->fcp_u.fcp_status.scsi_status = STATUS_GOOD;
6787 } else {
6788 fcp_rsp_t new_resp;
6789
6790 FCP_CP_IN(fpkt->pkt_resp, &new_resp,
6791 fpkt->pkt_resp_acc, sizeof (new_resp));
6792
6793 new_resp.fcp_u.fcp_status.rsp_len_set = 0;
6794 new_resp.fcp_u.fcp_status.sense_len_set = 0;
6795 new_resp.fcp_u.fcp_status.scsi_status = STATUS_GOOD;
6796
6797 FCP_CP_OUT(&new_resp, fpkt->pkt_resp,
6798 fpkt->pkt_resp_acc, sizeof (new_resp));
6799 }
6800
6801 FCP_CP_OUT(fcp_dummy_lun, fpkt->pkt_data,
6802 fpkt->pkt_data_acc, sizeof (fcp_dummy_lun));
6803
6804 return (DDI_SUCCESS);
6805 }
6806
6807 if ((FCP_SENSE_REPORTLUN_CHANGED(sense)) ||
6808 (FCP_SENSE_NO_LUN(sense))) {
6809 mutex_enter(&ptgt->tgt_mutex);
6810 if ((FCP_SENSE_NO_LUN(sense)) &&
6811 (ptgt->tgt_state & FCP_TGT_ILLREQ)) {
6812 ptgt->tgt_state &= ~FCP_TGT_ILLREQ;
6813 mutex_exit(&ptgt->tgt_mutex);
6814 /*
6815 * reconfig was triggred by ILLEGAL REQUEST but
6816 * got ILLEGAL REQUEST again
6817 */
6818 FCP_TRACE(fcp_logq, pptr->port_instbuf,
6819 fcp_trace, FCP_BUF_LEVEL_3, 0,
6820 "!FCP: Unable to obtain Report Lun data"
6821 " target=%x", ptgt->tgt_d_id);
6822 } else {
6823 if (ptgt->tgt_tid == NULL) {
6824 timeout_id_t tid;
6825 /*
6826 * REPORT LUN data has changed. Kick off
6827 * rediscovery
6828 */
6829 tid = timeout(fcp_reconfigure_luns,
6830 (caddr_t)ptgt, (clock_t)drv_usectohz(1));
6831
6832 ptgt->tgt_tid = tid;
6833 ptgt->tgt_state |= FCP_TGT_BUSY;
6834 }
6835 if (FCP_SENSE_NO_LUN(sense)) {
6836 ptgt->tgt_state |= FCP_TGT_ILLREQ;
6837 }
6838 mutex_exit(&ptgt->tgt_mutex);
6839 if (FCP_SENSE_REPORTLUN_CHANGED(sense)) {
6840 FCP_TRACE(fcp_logq, pptr->port_instbuf,
6841 fcp_trace, FCP_BUF_LEVEL_3, 0,
6842 "!FCP:Report Lun Has Changed"
6843 " target=%x", ptgt->tgt_d_id);
6844 } else if (FCP_SENSE_NO_LUN(sense)) {
6845 FCP_TRACE(fcp_logq, pptr->port_instbuf,
6846 fcp_trace, FCP_BUF_LEVEL_3, 0,
6847 "!FCP:LU Not Supported"
6848 " target=%x", ptgt->tgt_d_id);
6849 }
6850 }
6851 rval = DDI_SUCCESS;
6852 }
6853
6854 FCP_TRACE(fcp_logq, pptr->port_instbuf,
6855 fcp_trace, FCP_BUF_LEVEL_5, 0,
6856 "D_ID=%x, sense=%x, status=%x",
6857 fpkt->pkt_cmd_fhdr.d_id, sense->es_key,
6858 rsp->fcp_u.fcp_status.scsi_status);
6859
6860 return (rval);
6861 }
6862
6863 /*
6864 * Function: fcp_scsi_callback
6865 *
6866 * Description: This is the callback routine set by fcp_send_scsi() after
6867 * it calls fcp_icmd_alloc(). The SCSI command completed here
6868 * and autogenerated by FCP are: REPORT_LUN, INQUIRY and
6869 * INQUIRY_PAGE83.
6870 *
6871 * Argument: *fpkt FC packet used to convey the command
6872 *
6873 * Return Value: None
6874 */
6875 static void
6876 fcp_scsi_callback(fc_packet_t *fpkt)
6877 {
6878 struct fcp_ipkt *icmd = (struct fcp_ipkt *)
6879 fpkt->pkt_ulp_private;
6880 struct fcp_rsp_info fcp_rsp_err, *bep;
6881 struct fcp_port *pptr;
6882 struct fcp_tgt *ptgt;
6883 struct fcp_lun *plun;
6884 struct fcp_rsp response, *rsp;
6885
6886 ptgt = icmd->ipkt_tgt;
6887 pptr = ptgt->tgt_port;
6888 plun = icmd->ipkt_lun;
6889
6890 if (icmd->ipkt_nodma) {
6891 rsp = (struct fcp_rsp *)fpkt->pkt_resp;
6892 } else {
6893 rsp = &response;
6894 FCP_CP_IN(fpkt->pkt_resp, rsp, fpkt->pkt_resp_acc,
6895 sizeof (struct fcp_rsp));
6896 }
6897
6898 FCP_TRACE(fcp_logq, pptr->port_instbuf,
6899 fcp_trace, FCP_BUF_LEVEL_2, 0,
6900 "SCSI callback state=0x%x for %x, op_code=0x%x, "
6901 "status=%x, lun num=%x",
6902 fpkt->pkt_state, ptgt->tgt_d_id, icmd->ipkt_opcode,
6903 rsp->fcp_u.fcp_status.scsi_status, plun->lun_num);
6904
6905 /*
6906 * Pre-init LUN GUID with NWWN if it is not a device that
6907 * supports multiple luns and we know it's not page83
6908 * compliant. Although using a NWWN is not lun unique,
6909 * we will be fine since there is only one lun behind the taget
6910 * in this case.
6911 */
6912 if ((plun->lun_guid_size == 0) &&
6913 (icmd->ipkt_opcode == SCMD_INQUIRY_PAGE83) &&
6914 (fcp_symmetric_device_probe(plun) == 0)) {
6915
6916 char ascii_wwn[FC_WWN_SIZE*2+1];
6917 fcp_wwn_to_ascii(&ptgt->tgt_node_wwn.raw_wwn[0], ascii_wwn);
6918 (void) fcp_copy_guid_2_lun_block(plun, ascii_wwn);
6919 }
6920
6921 /*
6922 * Some old FC tapes and FC <-> SCSI bridge devices return overrun
6923 * when thay have more data than what is asked in CDB. An overrun
6924 * is really when FCP_DL is smaller than the data length in CDB.
6925 * In the case here we know that REPORT LUN command we formed within
6926 * this binary has correct FCP_DL. So this OVERRUN is due to bad device
6927 * behavior. In reality this is FC_SUCCESS.
6928 */
6929 if ((fpkt->pkt_state != FC_PKT_SUCCESS) &&
6930 (fpkt->pkt_reason == FC_REASON_OVERRUN) &&
6931 (icmd->ipkt_opcode == SCMD_REPORT_LUN)) {
6932 fpkt->pkt_state = FC_PKT_SUCCESS;
6933 }
6934
6935 if (fpkt->pkt_state != FC_PKT_SUCCESS) {
6936 FCP_TRACE(fcp_logq, pptr->port_instbuf,
6937 fcp_trace, FCP_BUF_LEVEL_2, 0,
6938 "icmd failed with state=0x%x for %x", fpkt->pkt_state,
6939 ptgt->tgt_d_id);
6940
6941 if (fpkt->pkt_reason == FC_REASON_CRC_ERROR) {
6942 /*
6943 * Inquiry VPD page command on A5K SES devices would
6944 * result in data CRC errors.
6945 */
6946 if (icmd->ipkt_opcode == SCMD_INQUIRY_PAGE83) {
6947 (void) fcp_handle_page83(fpkt, icmd, 1);
6948 return;
6949 }
6950 }
6951 if (fpkt->pkt_state == FC_PKT_TIMEOUT ||
6952 FCP_MUST_RETRY(fpkt)) {
6953 fpkt->pkt_timeout += FCP_TIMEOUT_DELTA;
6954 fcp_retry_scsi_cmd(fpkt);
6955 return;
6956 }
6957
6958 FCP_TGT_TRACE(ptgt, icmd->ipkt_change_cnt,
6959 FCP_TGT_TRACE_20);
6960
6961 mutex_enter(&pptr->port_mutex);
6962 mutex_enter(&ptgt->tgt_mutex);
6963 if (!FCP_STATE_CHANGED(pptr, ptgt, icmd)) {
6964 mutex_exit(&ptgt->tgt_mutex);
6965 mutex_exit(&pptr->port_mutex);
6966 fcp_print_error(fpkt);
6967 } else {
6968 FCP_TRACE(fcp_logq, pptr->port_instbuf,
6969 fcp_trace, FCP_BUF_LEVEL_2, 0,
6970 "fcp_scsi_callback,1: state change occured"
6971 " for D_ID=0x%x", ptgt->tgt_d_id);
6972 mutex_exit(&ptgt->tgt_mutex);
6973 mutex_exit(&pptr->port_mutex);
6974 }
6975 (void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
6976 icmd->ipkt_change_cnt, icmd->ipkt_cause);
6977 fcp_icmd_free(pptr, icmd);
6978 return;
6979 }
6980
6981 FCP_TGT_TRACE(ptgt, icmd->ipkt_change_cnt, FCP_TGT_TRACE_21);
6982
6983 mutex_enter(&pptr->port_mutex);
6984 mutex_enter(&ptgt->tgt_mutex);
6985 if (FCP_STATE_CHANGED(pptr, ptgt, icmd)) {
6986 FCP_TRACE(fcp_logq, pptr->port_instbuf,
6987 fcp_trace, FCP_BUF_LEVEL_2, 0,
6988 "fcp_scsi_callback,2: state change occured"
6989 " for D_ID=0x%x", ptgt->tgt_d_id);
6990 mutex_exit(&ptgt->tgt_mutex);
6991 mutex_exit(&pptr->port_mutex);
6992 (void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
6993 icmd->ipkt_change_cnt, icmd->ipkt_cause);
6994 fcp_icmd_free(pptr, icmd);
6995 return;
6996 }
6997 ASSERT((ptgt->tgt_state & FCP_TGT_MARK) == 0);
6998
6999 mutex_exit(&ptgt->tgt_mutex);
7000 mutex_exit(&pptr->port_mutex);
7001
7002 if (icmd->ipkt_nodma) {
7003 bep = (struct fcp_rsp_info *)(fpkt->pkt_resp +
7004 sizeof (struct fcp_rsp));
7005 } else {
7006 bep = &fcp_rsp_err;
7007 FCP_CP_IN(fpkt->pkt_resp + sizeof (struct fcp_rsp), bep,
7008 fpkt->pkt_resp_acc, sizeof (struct fcp_rsp_info));
7009 }
7010
7011 if (fcp_validate_fcp_response(rsp, pptr) != FC_SUCCESS) {
7012 fcp_retry_scsi_cmd(fpkt);
7013 return;
7014 }
7015
7016 if (rsp->fcp_u.fcp_status.rsp_len_set && bep->rsp_code !=
7017 FCP_NO_FAILURE) {
7018 FCP_TRACE(fcp_logq, pptr->port_instbuf,
7019 fcp_trace, FCP_BUF_LEVEL_2, 0,
7020 "rsp_code=0x%x, rsp_len_set=0x%x",
7021 bep->rsp_code, rsp->fcp_u.fcp_status.rsp_len_set);
7022 fcp_retry_scsi_cmd(fpkt);
7023 return;
7024 }
7025
7026 if (rsp->fcp_u.fcp_status.scsi_status == STATUS_QFULL ||
7027 rsp->fcp_u.fcp_status.scsi_status == STATUS_BUSY) {
7028 fcp_queue_ipkt(pptr, fpkt);
7029 return;
7030 }
7031
7032 /*
7033 * Devices that do not support INQUIRY_PAGE83, return check condition
7034 * with illegal request as per SCSI spec.
7035 * Crossbridge is one such device and Daktari's SES node is another.
7036 * We want to ideally enumerate these devices as a non-mpxio devices.
7037 * SES nodes (Daktari only currently) are an exception to this.
7038 */
7039 if ((icmd->ipkt_opcode == SCMD_INQUIRY_PAGE83) &&
7040 (rsp->fcp_u.fcp_status.scsi_status & STATUS_CHECK)) {
7041
7042 FCP_TRACE(fcp_logq, pptr->port_instbuf,
7043 fcp_trace, FCP_BUF_LEVEL_3, 0,
7044 "INQUIRY_PAGE83 for d_id %x (dtype:0x%x) failed with "
7045 "check condition. May enumerate as non-mpxio device",
7046 ptgt->tgt_d_id, plun->lun_type);
7047
7048 /*
7049 * If we let Daktari's SES be enumerated as a non-mpxio
7050 * device, there will be a discrepency in that the other
7051 * internal FC disks will get enumerated as mpxio devices.
7052 * Applications like luxadm expect this to be consistent.
7053 *
7054 * So, we put in a hack here to check if this is an SES device
7055 * and handle it here.
7056 */
7057 if (plun->lun_type == DTYPE_ESI) {
7058 /*
7059 * Since, pkt_state is actually FC_PKT_SUCCESS
7060 * at this stage, we fake a failure here so that
7061 * fcp_handle_page83 will create a device path using
7062 * the WWN instead of the GUID which is not there anyway
7063 */
7064 fpkt->pkt_state = FC_PKT_LOCAL_RJT;
7065 (void) fcp_handle_page83(fpkt, icmd, 1);
7066 return;
7067 }
7068
7069 mutex_enter(&ptgt->tgt_mutex);
7070 plun->lun_state &= ~(FCP_LUN_OFFLINE |
7071 FCP_LUN_MARK | FCP_LUN_BUSY);
7072 mutex_exit(&ptgt->tgt_mutex);
7073
7074 (void) fcp_call_finish_init(pptr, ptgt,
7075 icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
7076 icmd->ipkt_cause);
7077 fcp_icmd_free(pptr, icmd);
7078 return;
7079 }
7080
7081 if (rsp->fcp_u.fcp_status.scsi_status != STATUS_GOOD) {
7082 int rval = DDI_FAILURE;
7083
7084 /*
7085 * handle cases where report lun isn't supported
7086 * by faking up our own REPORT_LUN response or
7087 * UNIT ATTENTION
7088 */
7089 if (icmd->ipkt_opcode == SCMD_REPORT_LUN) {
7090 rval = fcp_check_reportlun(rsp, fpkt);
7091
7092 /*
7093 * fcp_check_reportlun might have modified the
7094 * FCP response. Copy it in again to get an updated
7095 * FCP response
7096 */
7097 if (rval == DDI_SUCCESS && icmd->ipkt_nodma == 0) {
7098 rsp = &response;
7099
7100 FCP_CP_IN(fpkt->pkt_resp, rsp,
7101 fpkt->pkt_resp_acc,
7102 sizeof (struct fcp_rsp));
7103 }
7104 }
7105
7106 if (rsp->fcp_u.fcp_status.scsi_status != STATUS_GOOD) {
7107 if (rval == DDI_SUCCESS) {
7108 (void) fcp_call_finish_init(pptr, ptgt,
7109 icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
7110 icmd->ipkt_cause);
7111 fcp_icmd_free(pptr, icmd);
7112 } else {
7113 fcp_retry_scsi_cmd(fpkt);
7114 }
7115
7116 return;
7117 }
7118 } else {
7119 if (icmd->ipkt_opcode == SCMD_REPORT_LUN) {
7120 mutex_enter(&ptgt->tgt_mutex);
7121 ptgt->tgt_state &= ~FCP_TGT_ILLREQ;
7122 mutex_exit(&ptgt->tgt_mutex);
7123 }
7124 }
7125
7126 ASSERT(rsp->fcp_u.fcp_status.scsi_status == STATUS_GOOD);
7127 if (!(pptr->port_state & FCP_STATE_FCA_IS_NODMA)) {
7128 (void) ddi_dma_sync(fpkt->pkt_data_dma, 0, 0,
7129 DDI_DMA_SYNC_FORCPU);
7130 }
7131
7132 switch (icmd->ipkt_opcode) {
7133 case SCMD_INQUIRY:
7134 FCP_LUN_TRACE(plun, FCP_LUN_TRACE_1);
7135 fcp_handle_inquiry(fpkt, icmd);
7136 break;
7137
7138 case SCMD_REPORT_LUN:
7139 FCP_TGT_TRACE(ptgt, icmd->ipkt_change_cnt,
7140 FCP_TGT_TRACE_22);
7141 fcp_handle_reportlun(fpkt, icmd);
7142 break;
7143
7144 case SCMD_INQUIRY_PAGE83:
7145 FCP_LUN_TRACE(plun, FCP_LUN_TRACE_2);
7146 (void) fcp_handle_page83(fpkt, icmd, 0);
7147 break;
7148
7149 default:
7150 fcp_log(CE_WARN, NULL, "!Invalid SCSI opcode");
7151 (void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
7152 icmd->ipkt_change_cnt, icmd->ipkt_cause);
7153 fcp_icmd_free(pptr, icmd);
7154 break;
7155 }
7156 }
7157
7158
7159 static void
7160 fcp_retry_scsi_cmd(fc_packet_t *fpkt)
7161 {
7162 struct fcp_ipkt *icmd = (struct fcp_ipkt *)
7163 fpkt->pkt_ulp_private;
7164 struct fcp_tgt *ptgt = icmd->ipkt_tgt;
7165 struct fcp_port *pptr = ptgt->tgt_port;
7166
7167 if (icmd->ipkt_retries < FCP_MAX_RETRIES &&
7168 fcp_is_retryable(icmd)) {
7169 mutex_enter(&pptr->port_mutex);
7170 if (!FCP_TGT_STATE_CHANGED(ptgt, icmd)) {
7171 mutex_exit(&pptr->port_mutex);
7172 FCP_TRACE(fcp_logq, pptr->port_instbuf,
7173 fcp_trace, FCP_BUF_LEVEL_3, 0,
7174 "Retrying %s to %x; state=%x, reason=%x",
7175 (icmd->ipkt_opcode == SCMD_REPORT_LUN) ?
7176 "Report LUN" : "INQUIRY", ptgt->tgt_d_id,
7177 fpkt->pkt_state, fpkt->pkt_reason);
7178
7179 fcp_queue_ipkt(pptr, fpkt);
7180 } else {
7181 FCP_TRACE(fcp_logq, pptr->port_instbuf,
7182 fcp_trace, FCP_BUF_LEVEL_3, 0,
7183 "fcp_retry_scsi_cmd,1: state change occured"
7184 " for D_ID=0x%x", ptgt->tgt_d_id);
7185 mutex_exit(&pptr->port_mutex);
7186 (void) fcp_call_finish_init(pptr, ptgt,
7187 icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
7188 icmd->ipkt_cause);
7189 fcp_icmd_free(pptr, icmd);
7190 }
7191 } else {
7192 fcp_print_error(fpkt);
7193 (void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
7194 icmd->ipkt_change_cnt, icmd->ipkt_cause);
7195 fcp_icmd_free(pptr, icmd);
7196 }
7197 }
7198
7199 /*
7200 * Function: fcp_handle_page83
7201 *
7202 * Description: Treats the response to INQUIRY_PAGE83.
7203 *
7204 * Argument: *fpkt FC packet used to convey the command.
7205 * *icmd Original fcp_ipkt structure.
7206 * ignore_page83_data
7207 * if it's 1, that means it's a special devices's
7208 * page83 response, it should be enumerated under mpxio
7209 *
7210 * Return Value: None
7211 */
7212 static void
7213 fcp_handle_page83(fc_packet_t *fpkt, struct fcp_ipkt *icmd,
7214 int ignore_page83_data)
7215 {
7216 struct fcp_port *pptr;
7217 struct fcp_lun *plun;
7218 struct fcp_tgt *ptgt;
7219 uchar_t dev_id_page[SCMD_MAX_INQUIRY_PAGE83_SIZE];
7220 int fail = 0;
7221 ddi_devid_t devid;
7222 char *guid = NULL;
7223 int ret;
7224
7225 ASSERT(icmd != NULL && fpkt != NULL);
7226
7227 pptr = icmd->ipkt_port;
7228 ptgt = icmd->ipkt_tgt;
7229 plun = icmd->ipkt_lun;
7230
7231 if (fpkt->pkt_state == FC_PKT_SUCCESS) {
7232 FCP_LUN_TRACE(plun, FCP_LUN_TRACE_7);
7233
7234 FCP_CP_IN(fpkt->pkt_data, dev_id_page, fpkt->pkt_data_acc,
7235 SCMD_MAX_INQUIRY_PAGE83_SIZE);
7236
7237 FCP_TRACE(fcp_logq, pptr->port_instbuf,
7238 fcp_trace, FCP_BUF_LEVEL_5, 0,
7239 "fcp_handle_page83: port=%d, tgt D_ID=0x%x, "
7240 "dtype=0x%x, lun num=%x",
7241 pptr->port_instance, ptgt->tgt_d_id,
7242 dev_id_page[0], plun->lun_num);
7243
7244 ret = ddi_devid_scsi_encode(
7245 DEVID_SCSI_ENCODE_VERSION_LATEST,
7246 NULL, /* driver name */
7247 (unsigned char *) &plun->lun_inq, /* standard inquiry */
7248 sizeof (plun->lun_inq), /* size of standard inquiry */
7249 NULL, /* page 80 data */
7250 0, /* page 80 len */
7251 dev_id_page, /* page 83 data */
7252 SCMD_MAX_INQUIRY_PAGE83_SIZE, /* page 83 data len */
7253 &devid);
7254
7255 if (ret == DDI_SUCCESS) {
7256
7257 guid = ddi_devid_to_guid(devid);
7258
7259 if (guid) {
7260 /*
7261 * Check our current guid. If it's non null
7262 * and it has changed, we need to copy it into
7263 * lun_old_guid since we might still need it.
7264 */
7265 if (plun->lun_guid &&
7266 strcmp(guid, plun->lun_guid)) {
7267 unsigned int len;
7268
7269 /*
7270 * If the guid of the LUN changes,
7271 * reconfiguration should be triggered
7272 * to reflect the changes.
7273 * i.e. we should offline the LUN with
7274 * the old guid, and online the LUN with
7275 * the new guid.
7276 */
7277 plun->lun_state |= FCP_LUN_CHANGED;
7278
7279 if (plun->lun_old_guid) {
7280 kmem_free(plun->lun_old_guid,
7281 plun->lun_old_guid_size);
7282 }
7283
7284 len = plun->lun_guid_size;
7285 plun->lun_old_guid_size = len;
7286
7287 plun->lun_old_guid = kmem_zalloc(len,
7288 KM_NOSLEEP);
7289
7290 if (plun->lun_old_guid) {
7291 /*
7292 * The alloc was successful then
7293 * let's do the copy.
7294 */
7295 bcopy(plun->lun_guid,
7296 plun->lun_old_guid, len);
7297 } else {
7298 fail = 1;
7299 plun->lun_old_guid_size = 0;
7300 }
7301 }
7302 if (!fail) {
7303 if (fcp_copy_guid_2_lun_block(
7304 plun, guid)) {
7305 fail = 1;
7306 }
7307 }
7308 ddi_devid_free_guid(guid);
7309
7310 } else {
7311 FCP_TRACE(fcp_logq, pptr->port_instbuf,
7312 fcp_trace, FCP_BUF_LEVEL_2, 0,
7313 "fcp_handle_page83: unable to create "
7314 "GUID");
7315
7316 /* couldn't create good guid from devid */
7317 fail = 1;
7318 }
7319 ddi_devid_free(devid);
7320
7321 } else if (ret == DDI_NOT_WELL_FORMED) {
7322 /* NULL filled data for page 83 */
7323 FCP_TRACE(fcp_logq, pptr->port_instbuf,
7324 fcp_trace, FCP_BUF_LEVEL_2, 0,
7325 "fcp_handle_page83: retry GUID");
7326
7327 icmd->ipkt_retries = 0;
7328 fcp_retry_scsi_cmd(fpkt);
7329 return;
7330 } else {
7331 FCP_TRACE(fcp_logq, pptr->port_instbuf,
7332 fcp_trace, FCP_BUF_LEVEL_2, 0,
7333 "fcp_handle_page83: bad ddi_devid_scsi_encode %x",
7334 ret);
7335 /*
7336 * Since the page83 validation
7337 * introduced late, we are being
7338 * tolerant to the existing devices
7339 * that already found to be working
7340 * under mpxio, like A5200's SES device,
7341 * its page83 response will not be standard-compliant,
7342 * but we still want it to be enumerated under mpxio.
7343 */
7344 if (fcp_symmetric_device_probe(plun) != 0) {
7345 fail = 1;
7346 }
7347 }
7348
7349 } else {
7350 /* bad packet state */
7351 FCP_LUN_TRACE(plun, FCP_LUN_TRACE_8);
7352
7353 /*
7354 * For some special devices (A5K SES and Daktari's SES devices),
7355 * they should be enumerated under mpxio
7356 * or "luxadm dis" will fail
7357 */
7358 if (ignore_page83_data) {
7359 fail = 0;
7360 } else {
7361 fail = 1;
7362 }
7363 FCP_TRACE(fcp_logq, pptr->port_instbuf,
7364 fcp_trace, FCP_BUF_LEVEL_2, 0,
7365 "!Devid page cmd failed. "
7366 "fpkt_state: %x fpkt_reason: %x",
7367 "ignore_page83: %d",
7368 fpkt->pkt_state, fpkt->pkt_reason,
7369 ignore_page83_data);
7370 }
7371
7372 mutex_enter(&pptr->port_mutex);
7373 mutex_enter(&plun->lun_mutex);
7374 /*
7375 * If lun_cip is not NULL, then we needn't update lun_mpxio to avoid
7376 * mismatch between lun_cip and lun_mpxio.
7377 */
7378 if (plun->lun_cip == NULL) {
7379 /*
7380 * If we don't have a guid for this lun it's because we were
7381 * unable to glean one from the page 83 response. Set the
7382 * control flag to 0 here to make sure that we don't attempt to
7383 * enumerate it under mpxio.
7384 */
7385 if (fail || pptr->port_mpxio == 0) {
7386 plun->lun_mpxio = 0;
7387 } else {
7388 plun->lun_mpxio = 1;
7389 }
7390 }
7391 mutex_exit(&plun->lun_mutex);
7392 mutex_exit(&pptr->port_mutex);
7393
7394 mutex_enter(&ptgt->tgt_mutex);
7395 plun->lun_state &=
7396 ~(FCP_LUN_OFFLINE | FCP_LUN_MARK | FCP_LUN_BUSY);
7397 mutex_exit(&ptgt->tgt_mutex);
7398
7399 (void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
7400 icmd->ipkt_change_cnt, icmd->ipkt_cause);
7401
7402 fcp_icmd_free(pptr, icmd);
7403 }
7404
7405 /*
7406 * Function: fcp_handle_inquiry
7407 *
7408 * Description: Called by fcp_scsi_callback to handle the response to an
7409 * INQUIRY request.
7410 *
7411 * Argument: *fpkt FC packet used to convey the command.
7412 * *icmd Original fcp_ipkt structure.
7413 *
7414 * Return Value: None
7415 */
7416 static void
7417 fcp_handle_inquiry(fc_packet_t *fpkt, struct fcp_ipkt *icmd)
7418 {
7419 struct fcp_port *pptr;
7420 struct fcp_lun *plun;
7421 struct fcp_tgt *ptgt;
7422 uchar_t dtype;
7423 uchar_t pqual;
7424 uint32_t rscn_count = FC_INVALID_RSCN_COUNT;
7425
7426 ASSERT(icmd != NULL && fpkt != NULL);
7427
7428 pptr = icmd->ipkt_port;
7429 ptgt = icmd->ipkt_tgt;
7430 plun = icmd->ipkt_lun;
7431
7432 FCP_CP_IN(fpkt->pkt_data, &plun->lun_inq, fpkt->pkt_data_acc,
7433 sizeof (struct scsi_inquiry));
7434
7435 dtype = plun->lun_inq.inq_dtype & DTYPE_MASK;
7436 pqual = plun->lun_inq.inq_dtype >> 5;
7437
7438 FCP_TRACE(fcp_logq, pptr->port_instbuf,
7439 fcp_trace, FCP_BUF_LEVEL_5, 0,
7440 "fcp_handle_inquiry: port=%d, tgt D_ID=0x%x, lun=0x%x, "
7441 "dtype=0x%x pqual: 0x%x", pptr->port_instance, ptgt->tgt_d_id,
7442 plun->lun_num, dtype, pqual);
7443
7444 if (pqual != 0) {
7445 /*
7446 * Non-zero peripheral qualifier
7447 */
7448 fcp_log(CE_CONT, pptr->port_dip,
7449 "!Target 0x%x lun 0x%x: Nonzero peripheral qualifier: "
7450 "Device type=0x%x Peripheral qual=0x%x\n",
7451 ptgt->tgt_d_id, plun->lun_num, dtype, pqual);
7452
7453 FCP_TRACE(fcp_logq, pptr->port_instbuf,
7454 fcp_trace, FCP_BUF_LEVEL_5, 0,
7455 "!Target 0x%x lun 0x%x: Nonzero peripheral qualifier: "
7456 "Device type=0x%x Peripheral qual=0x%x\n",
7457 ptgt->tgt_d_id, plun->lun_num, dtype, pqual);
7458
7459 FCP_LUN_TRACE(plun, FCP_LUN_TRACE_3);
7460
7461 (void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
7462 icmd->ipkt_change_cnt, icmd->ipkt_cause);
7463 fcp_icmd_free(pptr, icmd);
7464 return;
7465 }
7466
7467 /*
7468 * If the device is already initialized, check the dtype
7469 * for a change. If it has changed then update the flags
7470 * so the create_luns will offline the old device and
7471 * create the new device. Refer to bug: 4764752
7472 */
7473 if ((plun->lun_state & FCP_LUN_INIT) && dtype != plun->lun_type) {
7474 plun->lun_state |= FCP_LUN_CHANGED;
7475 }
7476 plun->lun_type = plun->lun_inq.inq_dtype;
7477
7478 /*
7479 * This code is setting/initializing the throttling in the FCA
7480 * driver.
7481 */
7482 mutex_enter(&pptr->port_mutex);
7483 if (!pptr->port_notify) {
7484 if (bcmp(plun->lun_inq.inq_pid, pid, strlen(pid)) == 0) {
7485 uint32_t cmd = 0;
7486 cmd = ((cmd & 0xFF | FC_NOTIFY_THROTTLE) |
7487 ((cmd & 0xFFFFFF00 >> 8) |
7488 FCP_SVE_THROTTLE << 8));
7489 pptr->port_notify = 1;
7490 mutex_exit(&pptr->port_mutex);
7491 (void) fc_ulp_port_notify(pptr->port_fp_handle, cmd);
7492 mutex_enter(&pptr->port_mutex);
7493 }
7494 }
7495
7496 if (FCP_TGT_STATE_CHANGED(ptgt, icmd)) {
7497 FCP_TRACE(fcp_logq, pptr->port_instbuf,
7498 fcp_trace, FCP_BUF_LEVEL_2, 0,
7499 "fcp_handle_inquiry,1:state change occured"
7500 " for D_ID=0x%x", ptgt->tgt_d_id);
7501 mutex_exit(&pptr->port_mutex);
7502
7503 FCP_LUN_TRACE(plun, FCP_LUN_TRACE_5);
7504 (void) fcp_call_finish_init(pptr, ptgt,
7505 icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
7506 icmd->ipkt_cause);
7507 fcp_icmd_free(pptr, icmd);
7508 return;
7509 }
7510 ASSERT((ptgt->tgt_state & FCP_TGT_MARK) == 0);
7511 mutex_exit(&pptr->port_mutex);
7512
7513 /* Retrieve the rscn count (if a valid one exists) */
7514 if (icmd->ipkt_fpkt->pkt_ulp_rscn_infop != NULL) {
7515 rscn_count = ((fc_ulp_rscn_info_t *)
7516 (icmd->ipkt_fpkt->pkt_ulp_rscn_infop))->ulp_rscn_count;
7517 } else {
7518 rscn_count = FC_INVALID_RSCN_COUNT;
7519 }
7520
7521 if (fcp_send_scsi(plun, SCMD_INQUIRY_PAGE83,
7522 SCMD_MAX_INQUIRY_PAGE83_SIZE,
7523 icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
7524 icmd->ipkt_cause, rscn_count) != DDI_SUCCESS) {
7525 fcp_log(CE_WARN, NULL, "!failed to send page 83");
7526 FCP_LUN_TRACE(plun, FCP_LUN_TRACE_6);
7527 (void) fcp_call_finish_init(pptr, ptgt,
7528 icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
7529 icmd->ipkt_cause);
7530 }
7531
7532 /*
7533 * Read Inquiry VPD Page 0x83 to uniquely
7534 * identify this logical unit.
7535 */
7536 fcp_icmd_free(pptr, icmd);
7537 }
7538
7539 /*
7540 * Function: fcp_handle_reportlun
7541 *
7542 * Description: Called by fcp_scsi_callback to handle the response to a
7543 * REPORT_LUN request.
7544 *
7545 * Argument: *fpkt FC packet used to convey the command.
7546 * *icmd Original fcp_ipkt structure.
7547 *
7548 * Return Value: None
7549 */
7550 static void
7551 fcp_handle_reportlun(fc_packet_t *fpkt, struct fcp_ipkt *icmd)
7552 {
7553 int i;
7554 int nluns_claimed;
7555 int nluns_bufmax;
7556 int len;
7557 uint16_t lun_num;
7558 uint32_t rscn_count = FC_INVALID_RSCN_COUNT;
7559 struct fcp_port *pptr;
7560 struct fcp_tgt *ptgt;
7561 struct fcp_lun *plun;
7562 struct fcp_reportlun_resp *report_lun;
7563
7564 pptr = icmd->ipkt_port;
7565 ptgt = icmd->ipkt_tgt;
7566 len = fpkt->pkt_datalen;
7567
7568 if ((len < FCP_LUN_HEADER) ||
7569 ((report_lun = kmem_zalloc(len, KM_NOSLEEP)) == NULL)) {
7570 (void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
7571 icmd->ipkt_change_cnt, icmd->ipkt_cause);
7572 fcp_icmd_free(pptr, icmd);
7573 return;
7574 }
7575
7576 FCP_CP_IN(fpkt->pkt_data, report_lun, fpkt->pkt_data_acc,
7577 fpkt->pkt_datalen);
7578
7579 FCP_TRACE(fcp_logq, pptr->port_instbuf,
7580 fcp_trace, FCP_BUF_LEVEL_5, 0,
7581 "fcp_handle_reportlun: port=%d, tgt D_ID=0x%x",
7582 pptr->port_instance, ptgt->tgt_d_id);
7583
7584 /*
7585 * Get the number of luns (which is supplied as LUNS * 8) the
7586 * device claims it has.
7587 */
7588 nluns_claimed = BE_32(report_lun->num_lun) >> 3;
7589
7590 /*
7591 * Get the maximum number of luns the buffer submitted can hold.
7592 */
7593 nluns_bufmax = (fpkt->pkt_datalen - FCP_LUN_HEADER) / FCP_LUN_SIZE;
7594
7595 /*
7596 * Due to limitations of certain hardware, we support only 16 bit LUNs
7597 */
7598 if (nluns_claimed > FCP_MAX_LUNS_SUPPORTED) {
7599 kmem_free(report_lun, len);
7600
7601 fcp_log(CE_NOTE, pptr->port_dip, "!Can not support"
7602 " 0x%x number of LUNs for target=%x", nluns_claimed,
7603 ptgt->tgt_d_id);
7604
7605 (void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
7606 icmd->ipkt_change_cnt, icmd->ipkt_cause);
7607 fcp_icmd_free(pptr, icmd);
7608 return;
7609 }
7610
7611 /*
7612 * If there are more LUNs than we have allocated memory for,
7613 * allocate more space and send down yet another report lun if
7614 * the maximum number of attempts hasn't been reached.
7615 */
7616 mutex_enter(&ptgt->tgt_mutex);
7617
7618 if ((nluns_claimed > nluns_bufmax) &&
7619 (ptgt->tgt_report_lun_cnt < FCP_MAX_REPORTLUNS_ATTEMPTS)) {
7620
7621 struct fcp_lun *plun;
7622
7623 ptgt->tgt_report_lun_cnt++;
7624 plun = ptgt->tgt_lun;
7625 ASSERT(plun != NULL);
7626 mutex_exit(&ptgt->tgt_mutex);
7627
7628 kmem_free(report_lun, len);
7629
7630 FCP_TRACE(fcp_logq, pptr->port_instbuf,
7631 fcp_trace, FCP_BUF_LEVEL_5, 0,
7632 "!Dynamically discovered %d LUNs for D_ID=%x",
7633 nluns_claimed, ptgt->tgt_d_id);
7634
7635 /* Retrieve the rscn count (if a valid one exists) */
7636 if (icmd->ipkt_fpkt->pkt_ulp_rscn_infop != NULL) {
7637 rscn_count = ((fc_ulp_rscn_info_t *)
7638 (icmd->ipkt_fpkt->pkt_ulp_rscn_infop))->
7639 ulp_rscn_count;
7640 } else {
7641 rscn_count = FC_INVALID_RSCN_COUNT;
7642 }
7643
7644 if (fcp_send_scsi(icmd->ipkt_lun, SCMD_REPORT_LUN,
7645 FCP_LUN_HEADER + (nluns_claimed * FCP_LUN_SIZE),
7646 icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
7647 icmd->ipkt_cause, rscn_count) != DDI_SUCCESS) {
7648 (void) fcp_call_finish_init(pptr, ptgt,
7649 icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
7650 icmd->ipkt_cause);
7651 }
7652
7653 fcp_icmd_free(pptr, icmd);
7654 return;
7655 }
7656
7657 if (nluns_claimed > nluns_bufmax) {
7658 FCP_TRACE(fcp_logq, pptr->port_instbuf,
7659 fcp_trace, FCP_BUF_LEVEL_5, 0,
7660 "Target=%x:%x:%x:%x:%x:%x:%x:%x"
7661 " Number of LUNs lost=%x",
7662 ptgt->tgt_port_wwn.raw_wwn[0],
7663 ptgt->tgt_port_wwn.raw_wwn[1],
7664 ptgt->tgt_port_wwn.raw_wwn[2],
7665 ptgt->tgt_port_wwn.raw_wwn[3],
7666 ptgt->tgt_port_wwn.raw_wwn[4],
7667 ptgt->tgt_port_wwn.raw_wwn[5],
7668 ptgt->tgt_port_wwn.raw_wwn[6],
7669 ptgt->tgt_port_wwn.raw_wwn[7],
7670 nluns_claimed - nluns_bufmax);
7671
7672 nluns_claimed = nluns_bufmax;
7673 }
7674 ptgt->tgt_lun_cnt = nluns_claimed;
7675
7676 /*
7677 * Identify missing LUNs and print warning messages
7678 */
7679 for (plun = ptgt->tgt_lun; plun; plun = plun->lun_next) {
7680 int offline;
7681 int exists = 0;
7682
7683 offline = (plun->lun_state & FCP_LUN_OFFLINE) ? 1 : 0;
7684
7685 for (i = 0; i < nluns_claimed && exists == 0; i++) {
7686 uchar_t *lun_string;
7687
7688 lun_string = (uchar_t *)&(report_lun->lun_string[i]);
7689
7690 switch (lun_string[0] & 0xC0) {
7691 case FCP_LUN_ADDRESSING:
7692 case FCP_PD_ADDRESSING:
7693 case FCP_VOLUME_ADDRESSING:
7694 lun_num = ((lun_string[0] & 0x3F) << 8) |
7695 lun_string[1];
7696 if (plun->lun_num == lun_num) {
7697 exists++;
7698 break;
7699 }
7700 break;
7701
7702 default:
7703 break;
7704 }
7705 }
7706
7707 if (!exists && !offline) {
7708 mutex_exit(&ptgt->tgt_mutex);
7709
7710 mutex_enter(&pptr->port_mutex);
7711 mutex_enter(&ptgt->tgt_mutex);
7712 if (!FCP_STATE_CHANGED(pptr, ptgt, icmd)) {
7713 /*
7714 * set disappear flag when device was connected
7715 */
7716 if (!(plun->lun_state &
7717 FCP_LUN_DEVICE_NOT_CONNECTED)) {
7718 plun->lun_state |= FCP_LUN_DISAPPEARED;
7719 }
7720 mutex_exit(&ptgt->tgt_mutex);
7721 mutex_exit(&pptr->port_mutex);
7722 if (!(plun->lun_state &
7723 FCP_LUN_DEVICE_NOT_CONNECTED)) {
7724 fcp_log(CE_NOTE, pptr->port_dip,
7725 "!Lun=%x for target=%x disappeared",
7726 plun->lun_num, ptgt->tgt_d_id);
7727 }
7728 mutex_enter(&ptgt->tgt_mutex);
7729 } else {
7730 FCP_TRACE(fcp_logq, pptr->port_instbuf,
7731 fcp_trace, FCP_BUF_LEVEL_5, 0,
7732 "fcp_handle_reportlun,1: state change"
7733 " occured for D_ID=0x%x", ptgt->tgt_d_id);
7734 mutex_exit(&ptgt->tgt_mutex);
7735 mutex_exit(&pptr->port_mutex);
7736 kmem_free(report_lun, len);
7737 (void) fcp_call_finish_init(pptr, ptgt,
7738 icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
7739 icmd->ipkt_cause);
7740 fcp_icmd_free(pptr, icmd);
7741 return;
7742 }
7743 } else if (exists) {
7744 /*
7745 * clear FCP_LUN_DEVICE_NOT_CONNECTED when lun 0
7746 * actually exists in REPORT_LUN response
7747 */
7748 if (plun->lun_state & FCP_LUN_DEVICE_NOT_CONNECTED) {
7749 plun->lun_state &=
7750 ~FCP_LUN_DEVICE_NOT_CONNECTED;
7751 }
7752 if (offline || plun->lun_num == 0) {
7753 if (plun->lun_state & FCP_LUN_DISAPPEARED) {
7754 plun->lun_state &= ~FCP_LUN_DISAPPEARED;
7755 mutex_exit(&ptgt->tgt_mutex);
7756 fcp_log(CE_NOTE, pptr->port_dip,
7757 "!Lun=%x for target=%x reappeared",
7758 plun->lun_num, ptgt->tgt_d_id);
7759 mutex_enter(&ptgt->tgt_mutex);
7760 }
7761 }
7762 }
7763 }
7764
7765 ptgt->tgt_tmp_cnt = nluns_claimed ? nluns_claimed : 1;
7766 mutex_exit(&ptgt->tgt_mutex);
7767
7768 FCP_TRACE(fcp_logq, pptr->port_instbuf,
7769 fcp_trace, FCP_BUF_LEVEL_5, 0,
7770 "fcp_handle_reportlun: port=%d, tgt D_ID=0x%x, %d LUN(s)",
7771 pptr->port_instance, ptgt->tgt_d_id, nluns_claimed);
7772
7773 /* scan each lun */
7774 for (i = 0; i < nluns_claimed; i++) {
7775 uchar_t *lun_string;
7776
7777 lun_string = (uchar_t *)&(report_lun->lun_string[i]);
7778
7779 FCP_TRACE(fcp_logq, pptr->port_instbuf,
7780 fcp_trace, FCP_BUF_LEVEL_5, 0,
7781 "handle_reportlun: d_id=%x, LUN ind=%d, LUN=%d,"
7782 " addr=0x%x", ptgt->tgt_d_id, i, lun_string[1],
7783 lun_string[0]);
7784
7785 switch (lun_string[0] & 0xC0) {
7786 case FCP_LUN_ADDRESSING:
7787 case FCP_PD_ADDRESSING:
7788 case FCP_VOLUME_ADDRESSING:
7789 lun_num = ((lun_string[0] & 0x3F) << 8) | lun_string[1];
7790
7791 /* We will skip masked LUNs because of the blacklist. */
7792 if (fcp_lun_blacklist != NULL) {
7793 mutex_enter(&ptgt->tgt_mutex);
7794 if (fcp_should_mask(&ptgt->tgt_port_wwn,
7795 lun_num) == TRUE) {
7796 ptgt->tgt_lun_cnt--;
7797 mutex_exit(&ptgt->tgt_mutex);
7798 break;
7799 }
7800 mutex_exit(&ptgt->tgt_mutex);
7801 }
7802
7803 /* see if this LUN is already allocated */
7804 if ((plun = fcp_get_lun(ptgt, lun_num)) == NULL) {
7805 plun = fcp_alloc_lun(ptgt);
7806 if (plun == NULL) {
7807 fcp_log(CE_NOTE, pptr->port_dip,
7808 "!Lun allocation failed"
7809 " target=%x lun=%x",
7810 ptgt->tgt_d_id, lun_num);
7811 break;
7812 }
7813 }
7814
7815 mutex_enter(&plun->lun_tgt->tgt_mutex);
7816 /* convert to LUN */
7817 plun->lun_addr.ent_addr_0 =
7818 BE_16(*(uint16_t *)&(lun_string[0]));
7819 plun->lun_addr.ent_addr_1 =
7820 BE_16(*(uint16_t *)&(lun_string[2]));
7821 plun->lun_addr.ent_addr_2 =
7822 BE_16(*(uint16_t *)&(lun_string[4]));
7823 plun->lun_addr.ent_addr_3 =
7824 BE_16(*(uint16_t *)&(lun_string[6]));
7825
7826 plun->lun_num = lun_num;
7827 plun->lun_state |= FCP_LUN_BUSY | FCP_LUN_MARK;
7828 plun->lun_state &= ~FCP_LUN_OFFLINE;
7829 mutex_exit(&plun->lun_tgt->tgt_mutex);
7830
7831 /* Retrieve the rscn count (if a valid one exists) */
7832 if (icmd->ipkt_fpkt->pkt_ulp_rscn_infop != NULL) {
7833 rscn_count = ((fc_ulp_rscn_info_t *)
7834 (icmd->ipkt_fpkt->pkt_ulp_rscn_infop))->
7835 ulp_rscn_count;
7836 } else {
7837 rscn_count = FC_INVALID_RSCN_COUNT;
7838 }
7839
7840 if (fcp_send_scsi(plun, SCMD_INQUIRY, SUN_INQSIZE,
7841 icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
7842 icmd->ipkt_cause, rscn_count) != DDI_SUCCESS) {
7843 mutex_enter(&pptr->port_mutex);
7844 mutex_enter(&plun->lun_tgt->tgt_mutex);
7845 if (!FCP_STATE_CHANGED(pptr, ptgt, icmd)) {
7846 fcp_log(CE_NOTE, pptr->port_dip,
7847 "!failed to send INQUIRY"
7848 " target=%x lun=%x",
7849 ptgt->tgt_d_id, plun->lun_num);
7850 } else {
7851 FCP_TRACE(fcp_logq,
7852 pptr->port_instbuf, fcp_trace,
7853 FCP_BUF_LEVEL_5, 0,
7854 "fcp_handle_reportlun,2: state"
7855 " change occured for D_ID=0x%x",
7856 ptgt->tgt_d_id);
7857 }
7858 mutex_exit(&plun->lun_tgt->tgt_mutex);
7859 mutex_exit(&pptr->port_mutex);
7860 } else {
7861 continue;
7862 }
7863 break;
7864
7865 default:
7866 fcp_log(CE_WARN, NULL,
7867 "!Unsupported LUN Addressing method %x "
7868 "in response to REPORT_LUN", lun_string[0]);
7869 break;
7870 }
7871
7872 /*
7873 * each time through this loop we should decrement
7874 * the tmp_cnt by one -- since we go through this loop
7875 * one time for each LUN, the tmp_cnt should never be <=0
7876 */
7877 (void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
7878 icmd->ipkt_change_cnt, icmd->ipkt_cause);
7879 }
7880
7881 if (i == 0) {
7882 fcp_log(CE_WARN, pptr->port_dip,
7883 "!FCP: target=%x reported NO Luns", ptgt->tgt_d_id);
7884 (void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
7885 icmd->ipkt_change_cnt, icmd->ipkt_cause);
7886 }
7887
7888 kmem_free(report_lun, len);
7889 fcp_icmd_free(pptr, icmd);
7890 }
7891
7892
7893 /*
7894 * called internally to return a LUN given a target and a LUN number
7895 */
7896 static struct fcp_lun *
7897 fcp_get_lun(struct fcp_tgt *ptgt, uint16_t lun_num)
7898 {
7899 struct fcp_lun *plun;
7900
7901 mutex_enter(&ptgt->tgt_mutex);
7902 for (plun = ptgt->tgt_lun; plun != NULL; plun = plun->lun_next) {
7903 if (plun->lun_num == lun_num) {
7904 mutex_exit(&ptgt->tgt_mutex);
7905 return (plun);
7906 }
7907 }
7908 mutex_exit(&ptgt->tgt_mutex);
7909
7910 return (NULL);
7911 }
7912
7913
7914 /*
7915 * handle finishing one target for fcp_finish_init
7916 *
7917 * return true (non-zero) if we want finish_init to continue with the
7918 * next target
7919 *
7920 * called with the port mutex held
7921 */
7922 /*ARGSUSED*/
7923 static int
7924 fcp_finish_tgt(struct fcp_port *pptr, struct fcp_tgt *ptgt,
7925 int link_cnt, int tgt_cnt, int cause)
7926 {
7927 int rval = 1;
7928 ASSERT(pptr != NULL);
7929 ASSERT(ptgt != NULL);
7930
7931 FCP_TRACE(fcp_logq, pptr->port_instbuf,
7932 fcp_trace, FCP_BUF_LEVEL_5, 0,
7933 "finish_tgt: D_ID/state = 0x%x/0x%x", ptgt->tgt_d_id,
7934 ptgt->tgt_state);
7935
7936 ASSERT(mutex_owned(&pptr->port_mutex));
7937
7938 if ((pptr->port_link_cnt != link_cnt) ||
7939 (tgt_cnt && ptgt->tgt_change_cnt != tgt_cnt)) {
7940 /*
7941 * oh oh -- another link reset or target change
7942 * must have occurred while we are in here
7943 */
7944 FCP_TGT_TRACE(ptgt, tgt_cnt, FCP_TGT_TRACE_23);
7945
7946 return (0);
7947 } else {
7948 FCP_TGT_TRACE(ptgt, tgt_cnt, FCP_TGT_TRACE_24);
7949 }
7950
7951 mutex_enter(&ptgt->tgt_mutex);
7952
7953 if (!(ptgt->tgt_state & FCP_TGT_OFFLINE)) {
7954 /*
7955 * tgt is not offline -- is it marked (i.e. needs
7956 * to be offlined) ??
7957 */
7958 if (ptgt->tgt_state & FCP_TGT_MARK) {
7959 /*
7960 * this target not offline *and*
7961 * marked
7962 */
7963 ptgt->tgt_state &= ~FCP_TGT_MARK;
7964 rval = fcp_offline_target(pptr, ptgt, link_cnt,
7965 tgt_cnt, 0, 0);
7966 } else {
7967 ptgt->tgt_state &= ~FCP_TGT_BUSY;
7968
7969 /* create the LUNs */
7970 if (ptgt->tgt_node_state != FCP_TGT_NODE_ON_DEMAND) {
7971 ptgt->tgt_node_state = FCP_TGT_NODE_PRESENT;
7972 fcp_create_luns(ptgt, link_cnt, tgt_cnt,
7973 cause);
7974 ptgt->tgt_device_created = 1;
7975 } else {
7976 fcp_update_tgt_state(ptgt, FCP_RESET,
7977 FCP_LUN_BUSY);
7978 }
7979 }
7980 }
7981
7982 mutex_exit(&ptgt->tgt_mutex);
7983
7984 return (rval);
7985 }
7986
7987
7988 /*
7989 * this routine is called to finish port initialization
7990 *
7991 * Each port has a "temp" counter -- when a state change happens (e.g.
7992 * port online), the temp count is set to the number of devices in the map.
7993 * Then, as each device gets "discovered", the temp counter is decremented
7994 * by one. When this count reaches zero we know that all of the devices
7995 * in the map have been discovered (or an error has occurred), so we can
7996 * then finish initialization -- which is done by this routine (well, this
7997 * and fcp-finish_tgt())
7998 *
7999 * acquires and releases the global mutex
8000 *
8001 * called with the port mutex owned
8002 */
8003 static void
8004 fcp_finish_init(struct fcp_port *pptr)
8005 {
8006 #ifdef DEBUG
8007 bzero(pptr->port_finish_stack, sizeof (pptr->port_finish_stack));
8008 pptr->port_finish_depth = getpcstack(pptr->port_finish_stack,
8009 FCP_STACK_DEPTH);
8010 #endif /* DEBUG */
8011
8012 ASSERT(mutex_owned(&pptr->port_mutex));
8013
8014 FCP_TRACE(fcp_logq, pptr->port_instbuf,
8015 fcp_trace, FCP_BUF_LEVEL_2, 0, "finish_init:"
8016 " entering; ipkt count=%d", pptr->port_ipkt_cnt);
8017
8018 if ((pptr->port_state & FCP_STATE_ONLINING) &&
8019 !(pptr->port_state & (FCP_STATE_SUSPENDED |
8020 FCP_STATE_DETACHING | FCP_STATE_POWER_DOWN))) {
8021 pptr->port_state &= ~FCP_STATE_ONLINING;
8022 pptr->port_state |= FCP_STATE_ONLINE;
8023 }
8024
8025 /* Wake up threads waiting on config done */
8026 cv_broadcast(&pptr->port_config_cv);
8027 }
8028
8029
8030 /*
8031 * called from fcp_finish_init to create the LUNs for a target
8032 *
8033 * called with the port mutex owned
8034 */
8035 static void
8036 fcp_create_luns(struct fcp_tgt *ptgt, int link_cnt, int tgt_cnt, int cause)
8037 {
8038 struct fcp_lun *plun;
8039 struct fcp_port *pptr;
8040 child_info_t *cip = NULL;
8041
8042 ASSERT(ptgt != NULL);
8043 ASSERT(mutex_owned(&ptgt->tgt_mutex));
8044
8045 pptr = ptgt->tgt_port;
8046
8047 ASSERT(pptr != NULL);
8048
8049 /* scan all LUNs for this target */
8050 for (plun = ptgt->tgt_lun; plun != NULL; plun = plun->lun_next) {
8051 if (plun->lun_state & FCP_LUN_OFFLINE) {
8052 continue;
8053 }
8054
8055 if (plun->lun_state & FCP_LUN_MARK) {
8056 FCP_TRACE(fcp_logq, pptr->port_instbuf,
8057 fcp_trace, FCP_BUF_LEVEL_2, 0,
8058 "fcp_create_luns: offlining marked LUN!");
8059 fcp_offline_lun(plun, link_cnt, tgt_cnt, 1, 0);
8060 continue;
8061 }
8062
8063 plun->lun_state &= ~FCP_LUN_BUSY;
8064
8065 /*
8066 * There are conditions in which FCP_LUN_INIT flag is cleared
8067 * but we have a valid plun->lun_cip. To cover this case also
8068 * CLEAR_BUSY whenever we have a valid lun_cip.
8069 */
8070 if (plun->lun_mpxio && plun->lun_cip &&
8071 (!fcp_pass_to_hp(pptr, plun, plun->lun_cip,
8072 FCP_MPXIO_PATH_CLEAR_BUSY, link_cnt, tgt_cnt,
8073 0, 0))) {
8074 FCP_TRACE(fcp_logq, pptr->port_instbuf,
8075 fcp_trace, FCP_BUF_LEVEL_2, 0,
8076 "fcp_create_luns: enable lun %p failed!",
8077 plun);
8078 }
8079
8080 if (plun->lun_state & FCP_LUN_INIT &&
8081 !(plun->lun_state & FCP_LUN_CHANGED)) {
8082 continue;
8083 }
8084
8085 if (cause == FCP_CAUSE_USER_CREATE) {
8086 continue;
8087 }
8088
8089 FCP_TRACE(fcp_logq, pptr->port_instbuf,
8090 fcp_trace, FCP_BUF_LEVEL_6, 0,
8091 "create_luns: passing ONLINE elem to HP thread");
8092
8093 /*
8094 * If lun has changed, prepare for offlining the old path.
8095 * Do not offline the old path right now, since it may be
8096 * still opened.
8097 */
8098 if (plun->lun_cip && (plun->lun_state & FCP_LUN_CHANGED)) {
8099 fcp_prepare_offline_lun(plun, link_cnt, tgt_cnt);
8100 }
8101
8102 /* pass an ONLINE element to the hotplug thread */
8103 if (!fcp_pass_to_hp(pptr, plun, cip, FCP_ONLINE,
8104 link_cnt, tgt_cnt, NDI_ONLINE_ATTACH, 0)) {
8105
8106 /*
8107 * We can not synchronous attach (i.e pass
8108 * NDI_ONLINE_ATTACH) here as we might be
8109 * coming from an interrupt or callback
8110 * thread.
8111 */
8112 if (!fcp_pass_to_hp(pptr, plun, cip, FCP_ONLINE,
8113 link_cnt, tgt_cnt, 0, 0)) {
8114 fcp_log(CE_CONT, pptr->port_dip,
8115 "Can not ONLINE LUN; D_ID=%x, LUN=%x\n",
8116 plun->lun_tgt->tgt_d_id, plun->lun_num);
8117 }
8118 }
8119 }
8120 }
8121
8122
8123 /*
8124 * function to online/offline devices
8125 */
8126 static int
8127 fcp_trigger_lun(struct fcp_lun *plun, child_info_t *cip, int old_mpxio,
8128 int online, int lcount, int tcount, int flags)
8129 {
8130 int rval = NDI_FAILURE;
8131 int circ;
8132 child_info_t *ccip;
8133 struct fcp_port *pptr = plun->lun_tgt->tgt_port;
8134 int is_mpxio = pptr->port_mpxio;
8135 dev_info_t *cdip, *pdip;
8136 char *devname;
8137
8138 if ((old_mpxio != 0) && (plun->lun_mpxio != old_mpxio)) {
8139 /*
8140 * When this event gets serviced, lun_cip and lun_mpxio
8141 * has changed, so it should be invalidated now.
8142 */
8143 FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
8144 FCP_BUF_LEVEL_2, 0, "fcp_trigger_lun: lun_mpxio changed: "
8145 "plun: %p, cip: %p, what:%d", plun, cip, online);
8146 return (rval);
8147 }
8148
8149 FCP_TRACE(fcp_logq, pptr->port_instbuf,
8150 fcp_trace, FCP_BUF_LEVEL_2, 0,
8151 "fcp_trigger_lun: plun=%p target=%x lun=%d cip=%p what=%x "
8152 "flags=%x mpxio=%x\n",
8153 plun, LUN_TGT->tgt_d_id, plun->lun_num, cip, online, flags,
8154 plun->lun_mpxio);
8155
8156 /*
8157 * lun_mpxio needs checking here because we can end up in a race
8158 * condition where this task has been dispatched while lun_mpxio is
8159 * set, but an earlier FCP_ONLINE task for the same LUN tried to
8160 * enable MPXIO for the LUN, but was unable to, and hence cleared
8161 * the flag. We rely on the serialization of the tasks here. We return
8162 * NDI_SUCCESS so any callers continue without reporting spurious
8163 * errors, and the still think we're an MPXIO LUN.
8164 */
8165
8166 if (online == FCP_MPXIO_PATH_CLEAR_BUSY ||
8167 online == FCP_MPXIO_PATH_SET_BUSY) {
8168 if (plun->lun_mpxio) {
8169 rval = fcp_update_mpxio_path(plun, cip, online);
8170 } else {
8171 rval = NDI_SUCCESS;
8172 }
8173 return (rval);
8174 }
8175
8176 /*
8177 * Explicit devfs_clean() due to ndi_devi_offline() not
8178 * executing devfs_clean() if parent lock is held.
8179 */
8180 ASSERT(!servicing_interrupt());
8181 if (online == FCP_OFFLINE) {
8182 if (plun->lun_mpxio == 0) {
8183 if (plun->lun_cip == cip) {
8184 cdip = DIP(plun->lun_cip);
8185 } else {
8186 cdip = DIP(cip);
8187 }
8188 } else if ((plun->lun_cip == cip) && plun->lun_cip) {
8189 cdip = mdi_pi_get_client(PIP(plun->lun_cip));
8190 } else if ((plun->lun_cip != cip) && cip) {
8191 /*
8192 * This means a DTYPE/GUID change, we shall get the
8193 * dip of the old cip instead of the current lun_cip.
8194 */
8195 cdip = mdi_pi_get_client(PIP(cip));
8196 }
8197 if (cdip) {
8198 if (i_ddi_devi_attached(cdip)) {
8199 pdip = ddi_get_parent(cdip);
8200 devname = kmem_alloc(MAXNAMELEN + 1, KM_SLEEP);
8201 ndi_devi_enter(pdip, &circ);
8202 (void) ddi_deviname(cdip, devname);
8203 /*
8204 * Release parent lock before calling
8205 * devfs_clean().
8206 */
8207 ndi_devi_exit(pdip, circ);
8208 (void) devfs_clean(pdip, devname + 1,
8209 DV_CLEAN_FORCE);
8210 kmem_free(devname, MAXNAMELEN + 1);
8211 }
8212 }
8213 }
8214
8215 if (fc_ulp_busy_port(pptr->port_fp_handle) != 0) {
8216 return (NDI_FAILURE);
8217 }
8218
8219 if (is_mpxio) {
8220 mdi_devi_enter(pptr->port_dip, &circ);
8221 } else {
8222 ndi_devi_enter(pptr->port_dip, &circ);
8223 }
8224
8225 mutex_enter(&pptr->port_mutex);
8226 mutex_enter(&plun->lun_mutex);
8227
8228 if (online == FCP_ONLINE) {
8229 ccip = fcp_get_cip(plun, cip, lcount, tcount);
8230 if (ccip == NULL) {
8231 goto fail;
8232 }
8233 } else {
8234 if (fcp_is_child_present(plun, cip) != FC_SUCCESS) {
8235 goto fail;
8236 }
8237 ccip = cip;
8238 }
8239
8240 if (online == FCP_ONLINE) {
8241 rval = fcp_online_child(plun, ccip, lcount, tcount, flags,
8242 &circ);
8243 fc_ulp_log_device_event(pptr->port_fp_handle,
8244 FC_ULP_DEVICE_ONLINE);
8245 } else {
8246 rval = fcp_offline_child(plun, ccip, lcount, tcount, flags,
8247 &circ);
8248 fc_ulp_log_device_event(pptr->port_fp_handle,
8249 FC_ULP_DEVICE_OFFLINE);
8250 }
8251
8252 fail: mutex_exit(&plun->lun_mutex);
8253 mutex_exit(&pptr->port_mutex);
8254
8255 if (is_mpxio) {
8256 mdi_devi_exit(pptr->port_dip, circ);
8257 } else {
8258 ndi_devi_exit(pptr->port_dip, circ);
8259 }
8260
8261 fc_ulp_idle_port(pptr->port_fp_handle);
8262
8263 return (rval);
8264 }
8265
8266
8267 /*
8268 * take a target offline by taking all of its LUNs offline
8269 */
8270 /*ARGSUSED*/
8271 static int
8272 fcp_offline_target(struct fcp_port *pptr, struct fcp_tgt *ptgt,
8273 int link_cnt, int tgt_cnt, int nowait, int flags)
8274 {
8275 struct fcp_tgt_elem *elem;
8276
8277 ASSERT(mutex_owned(&pptr->port_mutex));
8278 ASSERT(mutex_owned(&ptgt->tgt_mutex));
8279
8280 ASSERT(!(ptgt->tgt_state & FCP_TGT_OFFLINE));
8281
8282 if (link_cnt != pptr->port_link_cnt || (tgt_cnt && tgt_cnt !=
8283 ptgt->tgt_change_cnt)) {
8284 mutex_exit(&ptgt->tgt_mutex);
8285 FCP_TGT_TRACE(ptgt, tgt_cnt, FCP_TGT_TRACE_25);
8286 mutex_enter(&ptgt->tgt_mutex);
8287
8288 return (0);
8289 }
8290
8291 ptgt->tgt_pd_handle = NULL;
8292 mutex_exit(&ptgt->tgt_mutex);
8293 FCP_TGT_TRACE(ptgt, tgt_cnt, FCP_TGT_TRACE_26);
8294 mutex_enter(&ptgt->tgt_mutex);
8295
8296 tgt_cnt = tgt_cnt ? tgt_cnt : ptgt->tgt_change_cnt;
8297
8298 if (ptgt->tgt_tcap &&
8299 (elem = kmem_zalloc(sizeof (*elem), KM_NOSLEEP)) != NULL) {
8300 elem->flags = flags;
8301 elem->time = fcp_watchdog_time;
8302 if (nowait == 0) {
8303 elem->time += fcp_offline_delay;
8304 }
8305 elem->ptgt = ptgt;
8306 elem->link_cnt = link_cnt;
8307 elem->tgt_cnt = tgt_cnt;
8308 elem->next = pptr->port_offline_tgts;
8309 pptr->port_offline_tgts = elem;
8310 } else {
8311 fcp_offline_target_now(pptr, ptgt, link_cnt, tgt_cnt, flags);
8312 }
8313
8314 return (1);
8315 }
8316
8317
8318 static void
8319 fcp_offline_target_now(struct fcp_port *pptr, struct fcp_tgt *ptgt,
8320 int link_cnt, int tgt_cnt, int flags)
8321 {
8322 ASSERT(mutex_owned(&pptr->port_mutex));
8323 ASSERT(mutex_owned(&ptgt->tgt_mutex));
8324
8325 fc_ulp_enable_relogin(pptr->port_fp_handle, &ptgt->tgt_port_wwn);
8326 ptgt->tgt_state = FCP_TGT_OFFLINE;
8327 ptgt->tgt_pd_handle = NULL;
8328 fcp_offline_tgt_luns(ptgt, link_cnt, tgt_cnt, flags);
8329 }
8330
8331
8332 static void
8333 fcp_offline_tgt_luns(struct fcp_tgt *ptgt, int link_cnt, int tgt_cnt,
8334 int flags)
8335 {
8336 struct fcp_lun *plun;
8337
8338 ASSERT(mutex_owned(&ptgt->tgt_port->port_mutex));
8339 ASSERT(mutex_owned(&ptgt->tgt_mutex));
8340
8341 for (plun = ptgt->tgt_lun; plun != NULL; plun = plun->lun_next) {
8342 if (!(plun->lun_state & FCP_LUN_OFFLINE)) {
8343 fcp_offline_lun(plun, link_cnt, tgt_cnt, 1, flags);
8344 }
8345 }
8346 }
8347
8348
8349 /*
8350 * take a LUN offline
8351 *
8352 * enters and leaves with the target mutex held, releasing it in the process
8353 *
8354 * allocates memory in non-sleep mode
8355 */
8356 static void
8357 fcp_offline_lun(struct fcp_lun *plun, int link_cnt, int tgt_cnt,
8358 int nowait, int flags)
8359 {
8360 struct fcp_port *pptr = plun->lun_tgt->tgt_port;
8361 struct fcp_lun_elem *elem;
8362
8363 ASSERT(plun != NULL);
8364 ASSERT(mutex_owned(&LUN_TGT->tgt_mutex));
8365
8366 if (nowait) {
8367 fcp_offline_lun_now(plun, link_cnt, tgt_cnt, flags);
8368 return;
8369 }
8370
8371 if ((elem = kmem_zalloc(sizeof (*elem), KM_NOSLEEP)) != NULL) {
8372 elem->flags = flags;
8373 elem->time = fcp_watchdog_time;
8374 if (nowait == 0) {
8375 elem->time += fcp_offline_delay;
8376 }
8377 elem->plun = plun;
8378 elem->link_cnt = link_cnt;
8379 elem->tgt_cnt = plun->lun_tgt->tgt_change_cnt;
8380 elem->next = pptr->port_offline_luns;
8381 pptr->port_offline_luns = elem;
8382 } else {
8383 fcp_offline_lun_now(plun, link_cnt, tgt_cnt, flags);
8384 }
8385 }
8386
8387
8388 static void
8389 fcp_prepare_offline_lun(struct fcp_lun *plun, int link_cnt, int tgt_cnt)
8390 {
8391 struct fcp_pkt *head = NULL;
8392
8393 ASSERT(mutex_owned(&LUN_TGT->tgt_mutex));
8394
8395 mutex_exit(&LUN_TGT->tgt_mutex);
8396
8397 head = fcp_scan_commands(plun);
8398 if (head != NULL) {
8399 fcp_abort_commands(head, LUN_PORT);
8400 }
8401
8402 mutex_enter(&LUN_TGT->tgt_mutex);
8403
8404 if (plun->lun_cip && plun->lun_mpxio) {
8405 /*
8406 * Intimate MPxIO lun busy is cleared
8407 */
8408 if (!fcp_pass_to_hp(LUN_PORT, plun, plun->lun_cip,
8409 FCP_MPXIO_PATH_CLEAR_BUSY, link_cnt, tgt_cnt,
8410 0, 0)) {
8411 fcp_log(CE_NOTE, LUN_PORT->port_dip,
8412 "Can not ENABLE LUN; D_ID=%x, LUN=%x",
8413 LUN_TGT->tgt_d_id, plun->lun_num);
8414 }
8415 /*
8416 * Intimate MPxIO that the lun is now marked for offline
8417 */
8418 mutex_exit(&LUN_TGT->tgt_mutex);
8419 (void) mdi_pi_disable_path(PIP(plun->lun_cip), DRIVER_DISABLE);
8420 mutex_enter(&LUN_TGT->tgt_mutex);
8421 }
8422 }
8423
8424 static void
8425 fcp_offline_lun_now(struct fcp_lun *plun, int link_cnt, int tgt_cnt,
8426 int flags)
8427 {
8428 ASSERT(mutex_owned(&LUN_TGT->tgt_mutex));
8429
8430 mutex_exit(&LUN_TGT->tgt_mutex);
8431 fcp_update_offline_flags(plun);
8432 mutex_enter(&LUN_TGT->tgt_mutex);
8433
8434 fcp_prepare_offline_lun(plun, link_cnt, tgt_cnt);
8435
8436 FCP_TRACE(fcp_logq, LUN_PORT->port_instbuf,
8437 fcp_trace, FCP_BUF_LEVEL_4, 0,
8438 "offline_lun: passing OFFLINE elem to HP thread");
8439
8440 if (plun->lun_cip) {
8441 fcp_log(CE_NOTE, LUN_PORT->port_dip,
8442 "!offlining lun=%x (trace=%x), target=%x (trace=%x)",
8443 plun->lun_num, plun->lun_trace, LUN_TGT->tgt_d_id,
8444 LUN_TGT->tgt_trace);
8445
8446 if (!fcp_pass_to_hp(LUN_PORT, plun, plun->lun_cip, FCP_OFFLINE,
8447 link_cnt, tgt_cnt, flags, 0)) {
8448 fcp_log(CE_CONT, LUN_PORT->port_dip,
8449 "Can not OFFLINE LUN; D_ID=%x, LUN=%x\n",
8450 LUN_TGT->tgt_d_id, plun->lun_num);
8451 }
8452 }
8453 }
8454
8455 static void
8456 fcp_scan_offline_luns(struct fcp_port *pptr)
8457 {
8458 struct fcp_lun_elem *elem;
8459 struct fcp_lun_elem *prev;
8460 struct fcp_lun_elem *next;
8461
8462 ASSERT(MUTEX_HELD(&pptr->port_mutex));
8463
8464 prev = NULL;
8465 elem = pptr->port_offline_luns;
8466 while (elem) {
8467 next = elem->next;
8468 if (elem->time <= fcp_watchdog_time) {
8469 int changed = 1;
8470 struct fcp_tgt *ptgt = elem->plun->lun_tgt;
8471
8472 mutex_enter(&ptgt->tgt_mutex);
8473 if (pptr->port_link_cnt == elem->link_cnt &&
8474 ptgt->tgt_change_cnt == elem->tgt_cnt) {
8475 changed = 0;
8476 }
8477
8478 if (!changed &&
8479 !(elem->plun->lun_state & FCP_TGT_OFFLINE)) {
8480 fcp_offline_lun_now(elem->plun,
8481 elem->link_cnt, elem->tgt_cnt, elem->flags);
8482 }
8483 mutex_exit(&ptgt->tgt_mutex);
8484
8485 kmem_free(elem, sizeof (*elem));
8486
8487 if (prev) {
8488 prev->next = next;
8489 } else {
8490 pptr->port_offline_luns = next;
8491 }
8492 } else {
8493 prev = elem;
8494 }
8495 elem = next;
8496 }
8497 }
8498
8499
8500 static void
8501 fcp_scan_offline_tgts(struct fcp_port *pptr)
8502 {
8503 struct fcp_tgt_elem *elem;
8504 struct fcp_tgt_elem *prev;
8505 struct fcp_tgt_elem *next;
8506
8507 ASSERT(MUTEX_HELD(&pptr->port_mutex));
8508
8509 prev = NULL;
8510 elem = pptr->port_offline_tgts;
8511 while (elem) {
8512 next = elem->next;
8513 if (elem->time <= fcp_watchdog_time) {
8514 int outdated = 1;
8515 struct fcp_tgt *ptgt = elem->ptgt;
8516
8517 mutex_enter(&ptgt->tgt_mutex);
8518
8519 if (ptgt->tgt_change_cnt == elem->tgt_cnt) {
8520 /* No change on tgt since elem was created. */
8521 outdated = 0;
8522 } else if (ptgt->tgt_change_cnt == elem->tgt_cnt + 1 &&
8523 pptr->port_link_cnt == elem->link_cnt + 1 &&
8524 ptgt->tgt_statec_cause == FCP_CAUSE_LINK_DOWN) {
8525 /*
8526 * Exactly one thing happened to the target
8527 * inbetween: the local port went offline.
8528 * For fp the remote port is already gone so
8529 * it will not tell us again to offline the
8530 * target. We must offline it now.
8531 */
8532 outdated = 0;
8533 }
8534
8535 if (!outdated && !(ptgt->tgt_state &
8536 FCP_TGT_OFFLINE)) {
8537 fcp_offline_target_now(pptr,
8538 ptgt, elem->link_cnt, elem->tgt_cnt,
8539 elem->flags);
8540 }
8541
8542 mutex_exit(&ptgt->tgt_mutex);
8543
8544 kmem_free(elem, sizeof (*elem));
8545
8546 if (prev) {
8547 prev->next = next;
8548 } else {
8549 pptr->port_offline_tgts = next;
8550 }
8551 } else {
8552 prev = elem;
8553 }
8554 elem = next;
8555 }
8556 }
8557
8558
8559 static void
8560 fcp_update_offline_flags(struct fcp_lun *plun)
8561 {
8562 struct fcp_port *pptr = LUN_PORT;
8563 ASSERT(plun != NULL);
8564
8565 mutex_enter(&LUN_TGT->tgt_mutex);
8566 plun->lun_state |= FCP_LUN_OFFLINE;
8567 plun->lun_state &= ~(FCP_LUN_INIT | FCP_LUN_BUSY | FCP_LUN_MARK);
8568
8569 mutex_enter(&plun->lun_mutex);
8570 if (plun->lun_cip && plun->lun_state & FCP_SCSI_LUN_TGT_INIT) {
8571 dev_info_t *cdip = NULL;
8572
8573 mutex_exit(&LUN_TGT->tgt_mutex);
8574
8575 if (plun->lun_mpxio == 0) {
8576 cdip = DIP(plun->lun_cip);
8577 } else if (plun->lun_cip) {
8578 cdip = mdi_pi_get_client(PIP(plun->lun_cip));
8579 }
8580
8581 mutex_exit(&plun->lun_mutex);
8582 if (cdip) {
8583 (void) ndi_event_retrieve_cookie(
8584 pptr->port_ndi_event_hdl, cdip, FCAL_REMOVE_EVENT,
8585 &fcp_remove_eid, NDI_EVENT_NOPASS);
8586 (void) ndi_event_run_callbacks(
8587 pptr->port_ndi_event_hdl, cdip,
8588 fcp_remove_eid, NULL);
8589 }
8590 } else {
8591 mutex_exit(&plun->lun_mutex);
8592 mutex_exit(&LUN_TGT->tgt_mutex);
8593 }
8594 }
8595
8596
8597 /*
8598 * Scan all of the command pkts for this port, moving pkts that
8599 * match our LUN onto our own list (headed by "head")
8600 */
8601 static struct fcp_pkt *
8602 fcp_scan_commands(struct fcp_lun *plun)
8603 {
8604 struct fcp_port *pptr = LUN_PORT;
8605
8606 struct fcp_pkt *cmd = NULL; /* pkt cmd ptr */
8607 struct fcp_pkt *ncmd = NULL; /* next pkt ptr */
8608 struct fcp_pkt *pcmd = NULL; /* the previous command */
8609
8610 struct fcp_pkt *head = NULL; /* head of our list */
8611 struct fcp_pkt *tail = NULL; /* tail of our list */
8612
8613 int cmds_found = 0;
8614
8615 mutex_enter(&pptr->port_pkt_mutex);
8616 for (cmd = pptr->port_pkt_head; cmd != NULL; cmd = ncmd) {
8617 struct fcp_lun *tlun =
8618 ADDR2LUN(&cmd->cmd_pkt->pkt_address);
8619
8620 ncmd = cmd->cmd_next; /* set next command */
8621
8622 /*
8623 * if this pkt is for a different LUN or the
8624 * command is sent down, skip it.
8625 */
8626 if (tlun != plun || cmd->cmd_state == FCP_PKT_ISSUED ||
8627 (cmd->cmd_pkt->pkt_flags & FLAG_NOINTR)) {
8628 pcmd = cmd;
8629 continue;
8630 }
8631 cmds_found++;
8632 if (pcmd != NULL) {
8633 ASSERT(pptr->port_pkt_head != cmd);
8634 pcmd->cmd_next = cmd->cmd_next;
8635 } else {
8636 ASSERT(cmd == pptr->port_pkt_head);
8637 pptr->port_pkt_head = cmd->cmd_next;
8638 }
8639
8640 if (cmd == pptr->port_pkt_tail) {
8641 pptr->port_pkt_tail = pcmd;
8642 if (pcmd) {
8643 pcmd->cmd_next = NULL;
8644 }
8645 }
8646
8647 if (head == NULL) {
8648 head = tail = cmd;
8649 } else {
8650 ASSERT(tail != NULL);
8651
8652 tail->cmd_next = cmd;
8653 tail = cmd;
8654 }
8655 cmd->cmd_next = NULL;
8656 }
8657 mutex_exit(&pptr->port_pkt_mutex);
8658
8659 FCP_DTRACE(fcp_logq, pptr->port_instbuf,
8660 fcp_trace, FCP_BUF_LEVEL_8, 0,
8661 "scan commands: %d cmd(s) found", cmds_found);
8662
8663 return (head);
8664 }
8665
8666
8667 /*
8668 * Abort all the commands in the command queue
8669 */
8670 static void
8671 fcp_abort_commands(struct fcp_pkt *head, struct fcp_port *pptr)
8672 {
8673 struct fcp_pkt *cmd = NULL; /* pkt cmd ptr */
8674 struct fcp_pkt *ncmd = NULL; /* next pkt ptr */
8675
8676 ASSERT(mutex_owned(&pptr->port_mutex));
8677
8678 /* scan through the pkts and invalid them */
8679 for (cmd = head; cmd != NULL; cmd = ncmd) {
8680 struct scsi_pkt *pkt = cmd->cmd_pkt;
8681
8682 ncmd = cmd->cmd_next;
8683 ASSERT(pkt != NULL);
8684
8685 /*
8686 * The lun is going to be marked offline. Indicate
8687 * the target driver not to requeue or retry this command
8688 * as the device is going to be offlined pretty soon.
8689 */
8690 pkt->pkt_reason = CMD_DEV_GONE;
8691 pkt->pkt_statistics = 0;
8692 pkt->pkt_state = 0;
8693
8694 /* reset cmd flags/state */
8695 cmd->cmd_flags &= ~CFLAG_IN_QUEUE;
8696 cmd->cmd_state = FCP_PKT_IDLE;
8697
8698 /*
8699 * ensure we have a packet completion routine,
8700 * then call it.
8701 */
8702 ASSERT(pkt->pkt_comp != NULL);
8703
8704 mutex_exit(&pptr->port_mutex);
8705 fcp_post_callback(cmd);
8706 mutex_enter(&pptr->port_mutex);
8707 }
8708 }
8709
8710
8711 /*
8712 * the pkt_comp callback for command packets
8713 */
8714 static void
8715 fcp_cmd_callback(fc_packet_t *fpkt)
8716 {
8717 struct fcp_pkt *cmd = (struct fcp_pkt *)fpkt->pkt_ulp_private;
8718 struct scsi_pkt *pkt = cmd->cmd_pkt;
8719 struct fcp_port *pptr = ADDR2FCP(&pkt->pkt_address);
8720
8721 ASSERT(cmd->cmd_state != FCP_PKT_IDLE);
8722
8723 if (cmd->cmd_state == FCP_PKT_IDLE) {
8724 cmn_err(CE_PANIC, "Packet already completed %p",
8725 (void *)cmd);
8726 }
8727
8728 /*
8729 * Watch thread should be freeing the packet, ignore the pkt.
8730 */
8731 if (cmd->cmd_state == FCP_PKT_ABORTING) {
8732 fcp_log(CE_CONT, pptr->port_dip,
8733 "!FCP: Pkt completed while aborting\n");
8734 return;
8735 }
8736 cmd->cmd_state = FCP_PKT_IDLE;
8737
8738 fcp_complete_pkt(fpkt);
8739
8740 #ifdef DEBUG
8741 mutex_enter(&pptr->port_pkt_mutex);
8742 pptr->port_npkts--;
8743 mutex_exit(&pptr->port_pkt_mutex);
8744 #endif /* DEBUG */
8745
8746 fcp_post_callback(cmd);
8747 }
8748
8749
8750 static void
8751 fcp_complete_pkt(fc_packet_t *fpkt)
8752 {
8753 int error = 0;
8754 struct fcp_pkt *cmd = (struct fcp_pkt *)
8755 fpkt->pkt_ulp_private;
8756 struct scsi_pkt *pkt = cmd->cmd_pkt;
8757 struct fcp_port *pptr = ADDR2FCP(&pkt->pkt_address);
8758 struct fcp_lun *plun;
8759 struct fcp_tgt *ptgt;
8760 struct fcp_rsp *rsp;
8761 struct scsi_address save;
8762
8763 #ifdef DEBUG
8764 save = pkt->pkt_address;
8765 #endif /* DEBUG */
8766
8767 rsp = (struct fcp_rsp *)cmd->cmd_fcp_rsp;
8768
8769 if (fpkt->pkt_state == FC_PKT_SUCCESS) {
8770 if (pptr->port_fcp_dma != FC_NO_DVMA_SPACE) {
8771 FCP_CP_IN(fpkt->pkt_resp, rsp, fpkt->pkt_resp_acc,
8772 sizeof (struct fcp_rsp));
8773 }
8774
8775 pkt->pkt_state = STATE_GOT_BUS | STATE_GOT_TARGET |
8776 STATE_SENT_CMD | STATE_GOT_STATUS;
8777
8778 pkt->pkt_resid = 0;
8779
8780 if (fpkt->pkt_datalen) {
8781 pkt->pkt_state |= STATE_XFERRED_DATA;
8782 if (fpkt->pkt_data_resid) {
8783 error++;
8784 }
8785 }
8786
8787 if ((pkt->pkt_scbp != NULL) && ((*(pkt->pkt_scbp) =
8788 rsp->fcp_u.fcp_status.scsi_status) != STATUS_GOOD)) {
8789 /*
8790 * The next two checks make sure that if there
8791 * is no sense data or a valid response and
8792 * the command came back with check condition,
8793 * the command should be retried.
8794 */
8795 if (!rsp->fcp_u.fcp_status.rsp_len_set &&
8796 !rsp->fcp_u.fcp_status.sense_len_set) {
8797 pkt->pkt_state &= ~STATE_XFERRED_DATA;
8798 pkt->pkt_resid = cmd->cmd_dmacount;
8799 }
8800 }
8801
8802 if ((error | rsp->fcp_u.i_fcp_status | rsp->fcp_resid) == 0) {
8803 return;
8804 }
8805
8806 plun = ADDR2LUN(&pkt->pkt_address);
8807 ptgt = plun->lun_tgt;
8808 ASSERT(ptgt != NULL);
8809
8810 /*
8811 * Update the transfer resid, if appropriate
8812 */
8813 if (rsp->fcp_u.fcp_status.resid_over ||
8814 rsp->fcp_u.fcp_status.resid_under) {
8815 pkt->pkt_resid = rsp->fcp_resid;
8816 }
8817
8818 /*
8819 * First see if we got a FCP protocol error.
8820 */
8821 if (rsp->fcp_u.fcp_status.rsp_len_set) {
8822 struct fcp_rsp_info *bep;
8823 bep = (struct fcp_rsp_info *)(cmd->cmd_fcp_rsp +
8824 sizeof (struct fcp_rsp));
8825
8826 if (fcp_validate_fcp_response(rsp, pptr) !=
8827 FC_SUCCESS) {
8828 pkt->pkt_reason = CMD_CMPLT;
8829 *(pkt->pkt_scbp) = STATUS_CHECK;
8830
8831 fcp_log(CE_WARN, pptr->port_dip,
8832 "!SCSI command to d_id=0x%x lun=0x%x"
8833 " failed, Bad FCP response values:"
8834 " rsvd1=%x, rsvd2=%x, sts-rsvd1=%x,"
8835 " sts-rsvd2=%x, rsplen=%x, senselen=%x",
8836 ptgt->tgt_d_id, plun->lun_num,
8837 rsp->reserved_0, rsp->reserved_1,
8838 rsp->fcp_u.fcp_status.reserved_0,
8839 rsp->fcp_u.fcp_status.reserved_1,
8840 rsp->fcp_response_len, rsp->fcp_sense_len);
8841
8842 return;
8843 }
8844
8845 if (pptr->port_fcp_dma != FC_NO_DVMA_SPACE) {
8846 FCP_CP_IN(fpkt->pkt_resp +
8847 sizeof (struct fcp_rsp), bep,
8848 fpkt->pkt_resp_acc,
8849 sizeof (struct fcp_rsp_info));
8850 }
8851
8852 if (bep->rsp_code != FCP_NO_FAILURE) {
8853 child_info_t *cip;
8854
8855 pkt->pkt_reason = CMD_TRAN_ERR;
8856
8857 mutex_enter(&plun->lun_mutex);
8858 cip = plun->lun_cip;
8859 mutex_exit(&plun->lun_mutex);
8860
8861 FCP_TRACE(fcp_logq, pptr->port_instbuf,
8862 fcp_trace, FCP_BUF_LEVEL_2, 0,
8863 "FCP response error on cmd=%p"
8864 " target=0x%x, cip=%p", cmd,
8865 ptgt->tgt_d_id, cip);
8866 }
8867 }
8868
8869 /*
8870 * See if we got a SCSI error with sense data
8871 */
8872 if (rsp->fcp_u.fcp_status.sense_len_set) {
8873 uchar_t rqlen;
8874 caddr_t sense_from;
8875 child_info_t *cip;
8876 timeout_id_t tid;
8877 struct scsi_arq_status *arq;
8878 struct scsi_extended_sense *sense_to;
8879
8880 arq = (struct scsi_arq_status *)pkt->pkt_scbp;
8881 sense_to = &arq->sts_sensedata;
8882
8883 rqlen = (uchar_t)min(rsp->fcp_sense_len,
8884 sizeof (struct scsi_extended_sense));
8885
8886 sense_from = (caddr_t)fpkt->pkt_resp +
8887 sizeof (struct fcp_rsp) + rsp->fcp_response_len;
8888
8889 if (fcp_validate_fcp_response(rsp, pptr) !=
8890 FC_SUCCESS) {
8891 pkt->pkt_reason = CMD_CMPLT;
8892 *(pkt->pkt_scbp) = STATUS_CHECK;
8893
8894 fcp_log(CE_WARN, pptr->port_dip,
8895 "!SCSI command to d_id=0x%x lun=0x%x"
8896 " failed, Bad FCP response values:"
8897 " rsvd1=%x, rsvd2=%x, sts-rsvd1=%x,"
8898 " sts-rsvd2=%x, rsplen=%x, senselen=%x",
8899 ptgt->tgt_d_id, plun->lun_num,
8900 rsp->reserved_0, rsp->reserved_1,
8901 rsp->fcp_u.fcp_status.reserved_0,
8902 rsp->fcp_u.fcp_status.reserved_1,
8903 rsp->fcp_response_len, rsp->fcp_sense_len);
8904
8905 return;
8906 }
8907
8908 /*
8909 * copy in sense information
8910 */
8911 if (pptr->port_fcp_dma != FC_NO_DVMA_SPACE) {
8912 FCP_CP_IN(sense_from, sense_to,
8913 fpkt->pkt_resp_acc, rqlen);
8914 } else {
8915 bcopy(sense_from, sense_to, rqlen);
8916 }
8917
8918 if ((FCP_SENSE_REPORTLUN_CHANGED(sense_to)) ||
8919 (FCP_SENSE_NO_LUN(sense_to))) {
8920 mutex_enter(&ptgt->tgt_mutex);
8921 if (ptgt->tgt_tid == NULL) {
8922 /*
8923 * Kick off rediscovery
8924 */
8925 tid = timeout(fcp_reconfigure_luns,
8926 (caddr_t)ptgt, drv_usectohz(1));
8927
8928 ptgt->tgt_tid = tid;
8929 ptgt->tgt_state |= FCP_TGT_BUSY;
8930 }
8931 mutex_exit(&ptgt->tgt_mutex);
8932 if (FCP_SENSE_REPORTLUN_CHANGED(sense_to)) {
8933 FCP_TRACE(fcp_logq, pptr->port_instbuf,
8934 fcp_trace, FCP_BUF_LEVEL_3, 0,
8935 "!FCP: Report Lun Has Changed"
8936 " target=%x", ptgt->tgt_d_id);
8937 } else if (FCP_SENSE_NO_LUN(sense_to)) {
8938 FCP_TRACE(fcp_logq, pptr->port_instbuf,
8939 fcp_trace, FCP_BUF_LEVEL_3, 0,
8940 "!FCP: LU Not Supported"
8941 " target=%x", ptgt->tgt_d_id);
8942 }
8943 }
8944 ASSERT(pkt->pkt_scbp != NULL);
8945
8946 pkt->pkt_state |= STATE_ARQ_DONE;
8947
8948 arq->sts_rqpkt_resid = SENSE_LENGTH - rqlen;
8949
8950 *((uchar_t *)&arq->sts_rqpkt_status) = STATUS_GOOD;
8951 arq->sts_rqpkt_reason = 0;
8952 arq->sts_rqpkt_statistics = 0;
8953
8954 arq->sts_rqpkt_state = STATE_GOT_BUS |
8955 STATE_GOT_TARGET | STATE_SENT_CMD |
8956 STATE_GOT_STATUS | STATE_ARQ_DONE |
8957 STATE_XFERRED_DATA;
8958
8959 mutex_enter(&plun->lun_mutex);
8960 cip = plun->lun_cip;
8961 mutex_exit(&plun->lun_mutex);
8962
8963 FCP_DTRACE(fcp_logq, pptr->port_instbuf,
8964 fcp_trace, FCP_BUF_LEVEL_8, 0,
8965 "SCSI Check condition on cmd=%p target=0x%x"
8966 " LUN=%p, cmd=%x SCSI status=%x, es key=%x"
8967 " ASC=%x ASCQ=%x", cmd, ptgt->tgt_d_id, cip,
8968 cmd->cmd_fcp_cmd.fcp_cdb[0],
8969 rsp->fcp_u.fcp_status.scsi_status,
8970 sense_to->es_key, sense_to->es_add_code,
8971 sense_to->es_qual_code);
8972 }
8973 } else {
8974 plun = ADDR2LUN(&pkt->pkt_address);
8975 ptgt = plun->lun_tgt;
8976 ASSERT(ptgt != NULL);
8977
8978 /*
8979 * Work harder to translate errors into target driver
8980 * understandable ones. Note with despair that the target
8981 * drivers don't decode pkt_state and pkt_reason exhaustively
8982 * They resort to using the big hammer most often, which
8983 * may not get fixed in the life time of this driver.
8984 */
8985 pkt->pkt_state = 0;
8986 pkt->pkt_statistics = 0;
8987
8988 switch (fpkt->pkt_state) {
8989 case FC_PKT_TRAN_ERROR:
8990 switch (fpkt->pkt_reason) {
8991 case FC_REASON_OVERRUN:
8992 pkt->pkt_reason = CMD_CMD_OVR;
8993 pkt->pkt_statistics |= STAT_ABORTED;
8994 break;
8995
8996 case FC_REASON_XCHG_BSY: {
8997 caddr_t ptr;
8998
8999 pkt->pkt_reason = CMD_CMPLT; /* Lie */
9000
9001 ptr = (caddr_t)pkt->pkt_scbp;
9002 if (ptr) {
9003 *ptr = STATUS_BUSY;
9004 }
9005 break;
9006 }
9007
9008 case FC_REASON_ABORTED:
9009 pkt->pkt_reason = CMD_TRAN_ERR;
9010 pkt->pkt_statistics |= STAT_ABORTED;
9011 break;
9012
9013 case FC_REASON_ABORT_FAILED:
9014 pkt->pkt_reason = CMD_ABORT_FAIL;
9015 break;
9016
9017 case FC_REASON_NO_SEQ_INIT:
9018 case FC_REASON_CRC_ERROR:
9019 pkt->pkt_reason = CMD_TRAN_ERR;
9020 pkt->pkt_statistics |= STAT_ABORTED;
9021 break;
9022 default:
9023 pkt->pkt_reason = CMD_TRAN_ERR;
9024 break;
9025 }
9026 break;
9027
9028 case FC_PKT_PORT_OFFLINE: {
9029 dev_info_t *cdip = NULL;
9030 caddr_t ptr;
9031
9032 if (fpkt->pkt_reason == FC_REASON_LOGIN_REQUIRED) {
9033 FCP_DTRACE(fcp_logq, pptr->port_instbuf,
9034 fcp_trace, FCP_BUF_LEVEL_8, 0,
9035 "SCSI cmd; LOGIN REQUIRED from FCA for %x",
9036 ptgt->tgt_d_id);
9037 }
9038
9039 mutex_enter(&plun->lun_mutex);
9040 if (plun->lun_mpxio == 0) {
9041 cdip = DIP(plun->lun_cip);
9042 } else if (plun->lun_cip) {
9043 cdip = mdi_pi_get_client(PIP(plun->lun_cip));
9044 }
9045
9046 mutex_exit(&plun->lun_mutex);
9047
9048 if (cdip) {
9049 (void) ndi_event_retrieve_cookie(
9050 pptr->port_ndi_event_hdl, cdip,
9051 FCAL_REMOVE_EVENT, &fcp_remove_eid,
9052 NDI_EVENT_NOPASS);
9053 (void) ndi_event_run_callbacks(
9054 pptr->port_ndi_event_hdl, cdip,
9055 fcp_remove_eid, NULL);
9056 }
9057
9058 /*
9059 * If the link goes off-line for a lip,
9060 * this will cause a error to the ST SG
9061 * SGEN drivers. By setting BUSY we will
9062 * give the drivers the chance to retry
9063 * before it blows of the job. ST will
9064 * remember how many times it has retried.
9065 */
9066
9067 if ((plun->lun_type == DTYPE_SEQUENTIAL) ||
9068 (plun->lun_type == DTYPE_CHANGER)) {
9069 pkt->pkt_reason = CMD_CMPLT; /* Lie */
9070 ptr = (caddr_t)pkt->pkt_scbp;
9071 if (ptr) {
9072 *ptr = STATUS_BUSY;
9073 }
9074 } else {
9075 pkt->pkt_reason = CMD_TRAN_ERR;
9076 pkt->pkt_statistics |= STAT_BUS_RESET;
9077 }
9078 break;
9079 }
9080
9081 case FC_PKT_TRAN_BSY:
9082 /*
9083 * Use the ssd Qfull handling here.
9084 */
9085 *pkt->pkt_scbp = STATUS_INTERMEDIATE;
9086 pkt->pkt_state = STATE_GOT_BUS;
9087 break;
9088
9089 case FC_PKT_TIMEOUT:
9090 pkt->pkt_reason = CMD_TIMEOUT;
9091 if (fpkt->pkt_reason == FC_REASON_ABORT_FAILED) {
9092 pkt->pkt_statistics |= STAT_TIMEOUT;
9093 } else {
9094 pkt->pkt_statistics |= STAT_ABORTED;
9095 }
9096 break;
9097
9098 case FC_PKT_LOCAL_RJT:
9099 switch (fpkt->pkt_reason) {
9100 case FC_REASON_OFFLINE: {
9101 dev_info_t *cdip = NULL;
9102
9103 mutex_enter(&plun->lun_mutex);
9104 if (plun->lun_mpxio == 0) {
9105 cdip = DIP(plun->lun_cip);
9106 } else if (plun->lun_cip) {
9107 cdip = mdi_pi_get_client(
9108 PIP(plun->lun_cip));
9109 }
9110 mutex_exit(&plun->lun_mutex);
9111
9112 if (cdip) {
9113 (void) ndi_event_retrieve_cookie(
9114 pptr->port_ndi_event_hdl, cdip,
9115 FCAL_REMOVE_EVENT,
9116 &fcp_remove_eid,
9117 NDI_EVENT_NOPASS);
9118 (void) ndi_event_run_callbacks(
9119 pptr->port_ndi_event_hdl,
9120 cdip, fcp_remove_eid, NULL);
9121 }
9122
9123 pkt->pkt_reason = CMD_TRAN_ERR;
9124 pkt->pkt_statistics |= STAT_BUS_RESET;
9125
9126 break;
9127 }
9128
9129 case FC_REASON_NOMEM:
9130 case FC_REASON_QFULL: {
9131 caddr_t ptr;
9132
9133 pkt->pkt_reason = CMD_CMPLT; /* Lie */
9134 ptr = (caddr_t)pkt->pkt_scbp;
9135 if (ptr) {
9136 *ptr = STATUS_BUSY;
9137 }
9138 break;
9139 }
9140
9141 case FC_REASON_DMA_ERROR:
9142 pkt->pkt_reason = CMD_DMA_DERR;
9143 pkt->pkt_statistics |= STAT_ABORTED;
9144 break;
9145
9146 case FC_REASON_CRC_ERROR:
9147 case FC_REASON_UNDERRUN: {
9148 uchar_t status;
9149 /*
9150 * Work around for Bugid: 4240945.
9151 * IB on A5k doesn't set the Underrun bit
9152 * in the fcp status, when it is transferring
9153 * less than requested amount of data. Work
9154 * around the ses problem to keep luxadm
9155 * happy till ibfirmware is fixed.
9156 */
9157 if (pptr->port_fcp_dma != FC_NO_DVMA_SPACE) {
9158 FCP_CP_IN(fpkt->pkt_resp, rsp,
9159 fpkt->pkt_resp_acc,
9160 sizeof (struct fcp_rsp));
9161 }
9162 status = rsp->fcp_u.fcp_status.scsi_status;
9163 if (((plun->lun_type & DTYPE_MASK) ==
9164 DTYPE_ESI) && (status == STATUS_GOOD)) {
9165 pkt->pkt_reason = CMD_CMPLT;
9166 *pkt->pkt_scbp = status;
9167 pkt->pkt_resid = 0;
9168 } else {
9169 pkt->pkt_reason = CMD_TRAN_ERR;
9170 pkt->pkt_statistics |= STAT_ABORTED;
9171 }
9172 break;
9173 }
9174
9175 case FC_REASON_NO_CONNECTION:
9176 case FC_REASON_UNSUPPORTED:
9177 case FC_REASON_ILLEGAL_REQ:
9178 case FC_REASON_BAD_SID:
9179 case FC_REASON_DIAG_BUSY:
9180 case FC_REASON_FCAL_OPN_FAIL:
9181 case FC_REASON_BAD_XID:
9182 default:
9183 pkt->pkt_reason = CMD_TRAN_ERR;
9184 pkt->pkt_statistics |= STAT_ABORTED;
9185 break;
9186
9187 }
9188 break;
9189
9190 case FC_PKT_NPORT_RJT:
9191 case FC_PKT_FABRIC_RJT:
9192 case FC_PKT_NPORT_BSY:
9193 case FC_PKT_FABRIC_BSY:
9194 default:
9195 FCP_DTRACE(fcp_logq, pptr->port_instbuf,
9196 fcp_trace, FCP_BUF_LEVEL_8, 0,
9197 "FC Status 0x%x, reason 0x%x",
9198 fpkt->pkt_state, fpkt->pkt_reason);
9199 pkt->pkt_reason = CMD_TRAN_ERR;
9200 pkt->pkt_statistics |= STAT_ABORTED;
9201 break;
9202 }
9203
9204 FCP_DTRACE(fcp_logq, pptr->port_instbuf,
9205 fcp_trace, FCP_BUF_LEVEL_9, 0,
9206 "!FC error on cmd=%p target=0x%x: pkt state=0x%x "
9207 " pkt reason=0x%x", cmd, ptgt->tgt_d_id, fpkt->pkt_state,
9208 fpkt->pkt_reason);
9209 }
9210
9211 ASSERT(save.a_hba_tran == pkt->pkt_address.a_hba_tran);
9212 }
9213
9214
9215 static int
9216 fcp_validate_fcp_response(struct fcp_rsp *rsp, struct fcp_port *pptr)
9217 {
9218 if (rsp->reserved_0 || rsp->reserved_1 ||
9219 rsp->fcp_u.fcp_status.reserved_0 ||
9220 rsp->fcp_u.fcp_status.reserved_1) {
9221 /*
9222 * These reserved fields should ideally be zero. FCP-2 does say
9223 * that the recipient need not check for reserved fields to be
9224 * zero. If they are not zero, we will not make a fuss about it
9225 * - just log it (in debug to both trace buffer and messages
9226 * file and to trace buffer only in non-debug) and move on.
9227 *
9228 * Non-zero reserved fields were seen with minnows.
9229 *
9230 * qlc takes care of some of this but we cannot assume that all
9231 * FCAs will do so.
9232 */
9233 FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
9234 FCP_BUF_LEVEL_5, 0,
9235 "Got fcp response packet with non-zero reserved fields "
9236 "rsp->reserved_0:0x%x, rsp_reserved_1:0x%x, "
9237 "status.reserved_0:0x%x, status.reserved_1:0x%x",
9238 rsp->reserved_0, rsp->reserved_1,
9239 rsp->fcp_u.fcp_status.reserved_0,
9240 rsp->fcp_u.fcp_status.reserved_1);
9241 }
9242
9243 if (rsp->fcp_u.fcp_status.rsp_len_set && (rsp->fcp_response_len >
9244 (FCP_MAX_RSP_IU_SIZE - sizeof (struct fcp_rsp)))) {
9245 return (FC_FAILURE);
9246 }
9247
9248 if (rsp->fcp_u.fcp_status.sense_len_set && rsp->fcp_sense_len >
9249 (FCP_MAX_RSP_IU_SIZE - rsp->fcp_response_len -
9250 sizeof (struct fcp_rsp))) {
9251 return (FC_FAILURE);
9252 }
9253
9254 return (FC_SUCCESS);
9255 }
9256
9257
9258 /*
9259 * This is called when there is a change the in device state. The case we're
9260 * handling here is, if the d_id s does not match, offline this tgt and online
9261 * a new tgt with the new d_id. called from fcp_handle_devices with
9262 * port_mutex held.
9263 */
9264 static int
9265 fcp_device_changed(struct fcp_port *pptr, struct fcp_tgt *ptgt,
9266 fc_portmap_t *map_entry, int link_cnt, int tgt_cnt, int cause)
9267 {
9268 ASSERT(mutex_owned(&pptr->port_mutex));
9269
9270 FCP_TRACE(fcp_logq, pptr->port_instbuf,
9271 fcp_trace, FCP_BUF_LEVEL_3, 0,
9272 "Starting fcp_device_changed...");
9273
9274 /*
9275 * The two cases where the port_device_changed is called is
9276 * either it changes it's d_id or it's hard address.
9277 */
9278 if ((ptgt->tgt_d_id != map_entry->map_did.port_id) ||
9279 (FC_TOP_EXTERNAL(pptr->port_topology) &&
9280 (ptgt->tgt_hard_addr != map_entry->map_hard_addr.hard_addr))) {
9281
9282 /* offline this target */
9283 mutex_enter(&ptgt->tgt_mutex);
9284 if (!(ptgt->tgt_state & FCP_TGT_OFFLINE)) {
9285 (void) fcp_offline_target(pptr, ptgt, link_cnt,
9286 0, 1, NDI_DEVI_REMOVE);
9287 }
9288 mutex_exit(&ptgt->tgt_mutex);
9289
9290 fcp_log(CE_NOTE, pptr->port_dip,
9291 "Change in target properties: Old D_ID=%x New D_ID=%x"
9292 " Old HA=%x New HA=%x", ptgt->tgt_d_id,
9293 map_entry->map_did.port_id, ptgt->tgt_hard_addr,
9294 map_entry->map_hard_addr.hard_addr);
9295 }
9296
9297 return (fcp_handle_mapflags(pptr, ptgt, map_entry,
9298 link_cnt, tgt_cnt, cause));
9299 }
9300
9301 /*
9302 * Function: fcp_alloc_lun
9303 *
9304 * Description: Creates a new lun structure and adds it to the list
9305 * of luns of the target.
9306 *
9307 * Argument: ptgt Target the lun will belong to.
9308 *
9309 * Return Value: NULL Failed
9310 * Not NULL Succeeded
9311 *
9312 * Context: Kernel context
9313 */
9314 static struct fcp_lun *
9315 fcp_alloc_lun(struct fcp_tgt *ptgt)
9316 {
9317 struct fcp_lun *plun;
9318
9319 plun = kmem_zalloc(sizeof (struct fcp_lun), KM_NOSLEEP);
9320 if (plun != NULL) {
9321 /*
9322 * Initialize the mutex before putting in the target list
9323 * especially before releasing the target mutex.
9324 */
9325 mutex_init(&plun->lun_mutex, NULL, MUTEX_DRIVER, NULL);
9326 plun->lun_tgt = ptgt;
9327
9328 mutex_enter(&ptgt->tgt_mutex);
9329 plun->lun_next = ptgt->tgt_lun;
9330 ptgt->tgt_lun = plun;
9331 plun->lun_old_guid = NULL;
9332 plun->lun_old_guid_size = 0;
9333 mutex_exit(&ptgt->tgt_mutex);
9334 }
9335
9336 return (plun);
9337 }
9338
9339 /*
9340 * Function: fcp_dealloc_lun
9341 *
9342 * Description: Frees the LUN structure passed by the caller.
9343 *
9344 * Argument: plun LUN structure to free.
9345 *
9346 * Return Value: None
9347 *
9348 * Context: Kernel context.
9349 */
9350 static void
9351 fcp_dealloc_lun(struct fcp_lun *plun)
9352 {
9353 mutex_enter(&plun->lun_mutex);
9354 if (plun->lun_cip) {
9355 fcp_remove_child(plun);
9356 }
9357 mutex_exit(&plun->lun_mutex);
9358
9359 mutex_destroy(&plun->lun_mutex);
9360 if (plun->lun_guid) {
9361 kmem_free(plun->lun_guid, plun->lun_guid_size);
9362 }
9363 if (plun->lun_old_guid) {
9364 kmem_free(plun->lun_old_guid, plun->lun_old_guid_size);
9365 }
9366 kmem_free(plun, sizeof (*plun));
9367 }
9368
9369 /*
9370 * Function: fcp_alloc_tgt
9371 *
9372 * Description: Creates a new target structure and adds it to the port
9373 * hash list.
9374 *
9375 * Argument: pptr fcp port structure
9376 * *map_entry entry describing the target to create
9377 * link_cnt Link state change counter
9378 *
9379 * Return Value: NULL Failed
9380 * Not NULL Succeeded
9381 *
9382 * Context: Kernel context.
9383 */
9384 static struct fcp_tgt *
9385 fcp_alloc_tgt(struct fcp_port *pptr, fc_portmap_t *map_entry, int link_cnt)
9386 {
9387 int hash;
9388 uchar_t *wwn;
9389 struct fcp_tgt *ptgt;
9390
9391 ptgt = kmem_zalloc(sizeof (*ptgt), KM_NOSLEEP);
9392 if (ptgt != NULL) {
9393 mutex_enter(&pptr->port_mutex);
9394 if (link_cnt != pptr->port_link_cnt) {
9395 /*
9396 * oh oh -- another link reset
9397 * in progress -- give up
9398 */
9399 mutex_exit(&pptr->port_mutex);
9400 kmem_free(ptgt, sizeof (*ptgt));
9401 ptgt = NULL;
9402 } else {
9403 /*
9404 * initialize the mutex before putting in the port
9405 * wwn list, especially before releasing the port
9406 * mutex.
9407 */
9408 mutex_init(&ptgt->tgt_mutex, NULL, MUTEX_DRIVER, NULL);
9409
9410 /* add new target entry to the port's hash list */
9411 wwn = (uchar_t *)&map_entry->map_pwwn;
9412 hash = FCP_HASH(wwn);
9413
9414 ptgt->tgt_next = pptr->port_tgt_hash_table[hash];
9415 pptr->port_tgt_hash_table[hash] = ptgt;
9416
9417 /* save cross-ptr */
9418 ptgt->tgt_port = pptr;
9419
9420 ptgt->tgt_change_cnt = 1;
9421
9422 /* initialize the target manual_config_only flag */
9423 if (fcp_enable_auto_configuration) {
9424 ptgt->tgt_manual_config_only = 0;
9425 } else {
9426 ptgt->tgt_manual_config_only = 1;
9427 }
9428
9429 mutex_exit(&pptr->port_mutex);
9430 }
9431 }
9432
9433 return (ptgt);
9434 }
9435
9436 /*
9437 * Function: fcp_dealloc_tgt
9438 *
9439 * Description: Frees the target structure passed by the caller.
9440 *
9441 * Argument: ptgt Target structure to free.
9442 *
9443 * Return Value: None
9444 *
9445 * Context: Kernel context.
9446 */
9447 static void
9448 fcp_dealloc_tgt(struct fcp_tgt *ptgt)
9449 {
9450 mutex_destroy(&ptgt->tgt_mutex);
9451 kmem_free(ptgt, sizeof (*ptgt));
9452 }
9453
9454
9455 /*
9456 * Handle STATUS_QFULL and STATUS_BUSY by performing delayed retry
9457 *
9458 * Device discovery commands will not be retried for-ever as
9459 * this will have repercussions on other devices that need to
9460 * be submitted to the hotplug thread. After a quick glance
9461 * at the SCSI-3 spec, it was found that the spec doesn't
9462 * mandate a forever retry, rather recommends a delayed retry.
9463 *
9464 * Since Photon IB is single threaded, STATUS_BUSY is common
9465 * in a 4+initiator environment. Make sure the total time
9466 * spent on retries (including command timeout) does not
9467 * 60 seconds
9468 */
9469 static void
9470 fcp_queue_ipkt(struct fcp_port *pptr, fc_packet_t *fpkt)
9471 {
9472 struct fcp_ipkt *icmd = (struct fcp_ipkt *)fpkt->pkt_ulp_private;
9473 struct fcp_tgt *ptgt = icmd->ipkt_tgt;
9474
9475 mutex_enter(&pptr->port_mutex);
9476 mutex_enter(&ptgt->tgt_mutex);
9477 if (FCP_STATE_CHANGED(pptr, ptgt, icmd)) {
9478 FCP_TRACE(fcp_logq, pptr->port_instbuf,
9479 fcp_trace, FCP_BUF_LEVEL_2, 0,
9480 "fcp_queue_ipkt,1:state change occured"
9481 " for D_ID=0x%x", ptgt->tgt_d_id);
9482 mutex_exit(&ptgt->tgt_mutex);
9483 mutex_exit(&pptr->port_mutex);
9484 (void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
9485 icmd->ipkt_change_cnt, icmd->ipkt_cause);
9486 fcp_icmd_free(pptr, icmd);
9487 return;
9488 }
9489 mutex_exit(&ptgt->tgt_mutex);
9490
9491 icmd->ipkt_restart = fcp_watchdog_time + icmd->ipkt_retries++;
9492
9493 if (pptr->port_ipkt_list != NULL) {
9494 /* add pkt to front of doubly-linked list */
9495 pptr->port_ipkt_list->ipkt_prev = icmd;
9496 icmd->ipkt_next = pptr->port_ipkt_list;
9497 pptr->port_ipkt_list = icmd;
9498 icmd->ipkt_prev = NULL;
9499 } else {
9500 /* this is the first/only pkt on the list */
9501 pptr->port_ipkt_list = icmd;
9502 icmd->ipkt_next = NULL;
9503 icmd->ipkt_prev = NULL;
9504 }
9505 mutex_exit(&pptr->port_mutex);
9506 }
9507
9508 /*
9509 * Function: fcp_transport
9510 *
9511 * Description: This function submits the Fibre Channel packet to the transort
9512 * layer by calling fc_ulp_transport(). If fc_ulp_transport()
9513 * fails the submission, the treatment depends on the value of
9514 * the variable internal.
9515 *
9516 * Argument: port_handle fp/fctl port handle.
9517 * *fpkt Packet to submit to the transport layer.
9518 * internal Not zero when it's an internal packet.
9519 *
9520 * Return Value: FC_TRAN_BUSY
9521 * FC_STATEC_BUSY
9522 * FC_OFFLINE
9523 * FC_LOGINREQ
9524 * FC_DEVICE_BUSY
9525 * FC_SUCCESS
9526 */
9527 static int
9528 fcp_transport(opaque_t port_handle, fc_packet_t *fpkt, int internal)
9529 {
9530 int rval;
9531
9532 rval = fc_ulp_transport(port_handle, fpkt);
9533 if (rval == FC_SUCCESS) {
9534 return (rval);
9535 }
9536
9537 /*
9538 * LUN isn't marked BUSY or OFFLINE, so we got here to transport
9539 * a command, if the underlying modules see that there is a state
9540 * change, or if a port is OFFLINE, that means, that state change
9541 * hasn't reached FCP yet, so re-queue the command for deferred
9542 * submission.
9543 */
9544 if ((rval == FC_STATEC_BUSY) || (rval == FC_OFFLINE) ||
9545 (rval == FC_LOGINREQ) || (rval == FC_DEVICE_BUSY) ||
9546 (rval == FC_DEVICE_BUSY_NEW_RSCN) || (rval == FC_TRAN_BUSY)) {
9547 /*
9548 * Defer packet re-submission. Life hang is possible on
9549 * internal commands if the port driver sends FC_STATEC_BUSY
9550 * for ever, but that shouldn't happen in a good environment.
9551 * Limiting re-transport for internal commands is probably a
9552 * good idea..
9553 * A race condition can happen when a port sees barrage of
9554 * link transitions offline to online. If the FCTL has
9555 * returned FC_STATEC_BUSY or FC_OFFLINE then none of the
9556 * internal commands should be queued to do the discovery.
9557 * The race condition is when an online comes and FCP starts
9558 * its internal discovery and the link goes offline. It is
9559 * possible that the statec_callback has not reached FCP
9560 * and FCP is carrying on with its internal discovery.
9561 * FC_STATEC_BUSY or FC_OFFLINE will be the first indication
9562 * that the link has gone offline. At this point FCP should
9563 * drop all the internal commands and wait for the
9564 * statec_callback. It will be facilitated by incrementing
9565 * port_link_cnt.
9566 *
9567 * For external commands, the (FC)pkt_timeout is decremented
9568 * by the QUEUE Delay added by our driver, Care is taken to
9569 * ensure that it doesn't become zero (zero means no timeout)
9570 * If the time expires right inside driver queue itself,
9571 * the watch thread will return it to the original caller
9572 * indicating that the command has timed-out.
9573 */
9574 if (internal) {
9575 char *op;
9576 struct fcp_ipkt *icmd;
9577
9578 icmd = (struct fcp_ipkt *)fpkt->pkt_ulp_private;
9579 switch (icmd->ipkt_opcode) {
9580 case SCMD_REPORT_LUN:
9581 op = "REPORT LUN";
9582 break;
9583
9584 case SCMD_INQUIRY:
9585 op = "INQUIRY";
9586 break;
9587
9588 case SCMD_INQUIRY_PAGE83:
9589 op = "INQUIRY-83";
9590 break;
9591
9592 default:
9593 op = "Internal SCSI COMMAND";
9594 break;
9595 }
9596
9597 if (fcp_handle_ipkt_errors(icmd->ipkt_port,
9598 icmd->ipkt_tgt, icmd, rval, op) == DDI_SUCCESS) {
9599 rval = FC_SUCCESS;
9600 }
9601 } else {
9602 struct fcp_pkt *cmd;
9603 struct fcp_port *pptr;
9604
9605 cmd = (struct fcp_pkt *)fpkt->pkt_ulp_private;
9606 cmd->cmd_state = FCP_PKT_IDLE;
9607 pptr = ADDR2FCP(&cmd->cmd_pkt->pkt_address);
9608
9609 if (cmd->cmd_pkt->pkt_flags & FLAG_NOQUEUE) {
9610 FCP_DTRACE(fcp_logq, pptr->port_instbuf,
9611 fcp_trace, FCP_BUF_LEVEL_9, 0,
9612 "fcp_transport: xport busy for pkt %p",
9613 cmd->cmd_pkt);
9614 rval = FC_TRAN_BUSY;
9615 } else {
9616 fcp_queue_pkt(pptr, cmd);
9617 rval = FC_SUCCESS;
9618 }
9619 }
9620 }
9621
9622 return (rval);
9623 }
9624
9625 /*VARARGS3*/
9626 static void
9627 fcp_log(int level, dev_info_t *dip, const char *fmt, ...)
9628 {
9629 char buf[256];
9630 va_list ap;
9631
9632 if (dip == NULL) {
9633 dip = fcp_global_dip;
9634 }
9635
9636 va_start(ap, fmt);
9637 (void) vsprintf(buf, fmt, ap);
9638 va_end(ap);
9639
9640 scsi_log(dip, "fcp", level, buf);
9641 }
9642
9643 /*
9644 * This function retries NS registry of FC4 type.
9645 * It assumes that fcp_mutex is held.
9646 * The function does nothing if topology is not fabric
9647 * So, the topology has to be set before this function can be called
9648 */
9649 static void
9650 fcp_retry_ns_registry(struct fcp_port *pptr, uint32_t s_id)
9651 {
9652 int rval;
9653
9654 ASSERT(MUTEX_HELD(&pptr->port_mutex));
9655
9656 if (((pptr->port_state & FCP_STATE_NS_REG_FAILED) == 0) ||
9657 ((pptr->port_topology != FC_TOP_FABRIC) &&
9658 (pptr->port_topology != FC_TOP_PUBLIC_LOOP))) {
9659 if (pptr->port_state & FCP_STATE_NS_REG_FAILED) {
9660 pptr->port_state &= ~FCP_STATE_NS_REG_FAILED;
9661 }
9662 return;
9663 }
9664 mutex_exit(&pptr->port_mutex);
9665 rval = fcp_do_ns_registry(pptr, s_id);
9666 mutex_enter(&pptr->port_mutex);
9667
9668 if (rval == 0) {
9669 /* Registry successful. Reset flag */
9670 pptr->port_state &= ~(FCP_STATE_NS_REG_FAILED);
9671 }
9672 }
9673
9674 /*
9675 * This function registers the ULP with the switch by calling transport i/f
9676 */
9677 static int
9678 fcp_do_ns_registry(struct fcp_port *pptr, uint32_t s_id)
9679 {
9680 fc_ns_cmd_t ns_cmd;
9681 ns_rfc_type_t rfc;
9682 uint32_t types[8];
9683
9684 /*
9685 * Prepare the Name server structure to
9686 * register with the transport in case of
9687 * Fabric configuration.
9688 */
9689 bzero(&rfc, sizeof (rfc));
9690 bzero(types, sizeof (types));
9691
9692 types[FC4_TYPE_WORD_POS(FC_TYPE_SCSI_FCP)] =
9693 (1 << FC4_TYPE_BIT_POS(FC_TYPE_SCSI_FCP));
9694
9695 rfc.rfc_port_id.port_id = s_id;
9696 bcopy(types, rfc.rfc_types, sizeof (types));
9697
9698 ns_cmd.ns_flags = 0;
9699 ns_cmd.ns_cmd = NS_RFT_ID;
9700 ns_cmd.ns_req_len = sizeof (rfc);
9701 ns_cmd.ns_req_payload = (caddr_t)&rfc;
9702 ns_cmd.ns_resp_len = 0;
9703 ns_cmd.ns_resp_payload = NULL;
9704
9705 /*
9706 * Perform the Name Server Registration for SCSI_FCP FC4 Type.
9707 */
9708 if (fc_ulp_port_ns(pptr->port_fp_handle, NULL, &ns_cmd)) {
9709 fcp_log(CE_WARN, pptr->port_dip,
9710 "!ns_registry: failed name server registration");
9711 return (1);
9712 }
9713
9714 return (0);
9715 }
9716
9717 /*
9718 * Function: fcp_handle_port_attach
9719 *
9720 * Description: This function is called from fcp_port_attach() to attach a
9721 * new port. This routine does the following:
9722 *
9723 * 1) Allocates an fcp_port structure and initializes it.
9724 * 2) Tries to register the new FC-4 (FCP) capablity with the name
9725 * server.
9726 * 3) Kicks off the enumeration of the targets/luns visible
9727 * through this new port. That is done by calling
9728 * fcp_statec_callback() if the port is online.
9729 *
9730 * Argument: ulph fp/fctl port handle.
9731 * *pinfo Port information.
9732 * s_id Port ID.
9733 * instance Device instance number for the local port
9734 * (returned by ddi_get_instance()).
9735 *
9736 * Return Value: DDI_SUCCESS
9737 * DDI_FAILURE
9738 *
9739 * Context: User and Kernel context.
9740 */
9741 /*ARGSUSED*/
9742 int
9743 fcp_handle_port_attach(opaque_t ulph, fc_ulp_port_info_t *pinfo,
9744 uint32_t s_id, int instance)
9745 {
9746 int res = DDI_FAILURE;
9747 scsi_hba_tran_t *tran;
9748 int mutex_initted = FALSE;
9749 int hba_attached = FALSE;
9750 int soft_state_linked = FALSE;
9751 int event_bind = FALSE;
9752 struct fcp_port *pptr;
9753 fc_portmap_t *tmp_list = NULL;
9754 uint32_t max_cnt, alloc_cnt;
9755 uchar_t *boot_wwn = NULL;
9756 uint_t nbytes;
9757 int manual_cfg;
9758
9759 /*
9760 * this port instance attaching for the first time (or after
9761 * being detached before)
9762 */
9763 FCP_TRACE(fcp_logq, "fcp", fcp_trace,
9764 FCP_BUF_LEVEL_3, 0, "port attach: for port %d", instance);
9765
9766 if (ddi_soft_state_zalloc(fcp_softstate, instance) != DDI_SUCCESS) {
9767 cmn_err(CE_WARN, "fcp: Softstate struct alloc failed"
9768 "parent dip: %p; instance: %d", (void *)pinfo->port_dip,
9769 instance);
9770 return (res);
9771 }
9772
9773 if ((pptr = ddi_get_soft_state(fcp_softstate, instance)) == NULL) {
9774 /* this shouldn't happen */
9775 ddi_soft_state_free(fcp_softstate, instance);
9776 cmn_err(CE_WARN, "fcp: bad soft state");
9777 return (res);
9778 }
9779
9780 (void) sprintf(pptr->port_instbuf, "fcp(%d)", instance);
9781
9782 /*
9783 * Make a copy of ulp_port_info as fctl allocates
9784 * a temp struct.
9785 */
9786 (void) fcp_cp_pinfo(pptr, pinfo);
9787
9788 /*
9789 * Check for manual_configuration_only property.
9790 * Enable manual configurtion if the property is
9791 * set to 1, otherwise disable manual configuration.
9792 */
9793 if ((manual_cfg = ddi_prop_get_int(DDI_DEV_T_ANY, pptr->port_dip,
9794 DDI_PROP_NOTPROM | DDI_PROP_DONTPASS,
9795 MANUAL_CFG_ONLY,
9796 -1)) != -1) {
9797 if (manual_cfg == 1) {
9798 char *pathname;
9799 pathname = kmem_zalloc(MAXPATHLEN, KM_SLEEP);
9800 (void) ddi_pathname(pptr->port_dip, pathname);
9801 cmn_err(CE_NOTE,
9802 "%s (%s%d) %s is enabled via %s.conf.",
9803 pathname,
9804 ddi_driver_name(pptr->port_dip),
9805 ddi_get_instance(pptr->port_dip),
9806 MANUAL_CFG_ONLY,
9807 ddi_driver_name(pptr->port_dip));
9808 fcp_enable_auto_configuration = 0;
9809 kmem_free(pathname, MAXPATHLEN);
9810 }
9811 }
9812 pptr->port_link_cnt = 1;
9813 pptr->port_id = s_id;
9814 pptr->port_instance = instance;
9815 pptr->port_state = FCP_STATE_INIT;
9816 if (pinfo->port_acc_attr == NULL) {
9817 /*
9818 * The corresponding FCA doesn't support DMA at all
9819 */
9820 pptr->port_state |= FCP_STATE_FCA_IS_NODMA;
9821 }
9822
9823 if (!(pptr->port_state & FCP_STATE_FCA_IS_NODMA)) {
9824 /*
9825 * If FCA supports DMA in SCSI data phase, we need preallocate
9826 * dma cookie, so stash the cookie size
9827 */
9828 pptr->port_dmacookie_sz = sizeof (ddi_dma_cookie_t) *
9829 pptr->port_data_dma_attr.dma_attr_sgllen;
9830 }
9831
9832 /*
9833 * The two mutexes of fcp_port are initialized. The variable
9834 * mutex_initted is incremented to remember that fact. That variable
9835 * is checked when the routine fails and the mutexes have to be
9836 * destroyed.
9837 */
9838 mutex_init(&pptr->port_mutex, NULL, MUTEX_DRIVER, NULL);
9839 mutex_init(&pptr->port_pkt_mutex, NULL, MUTEX_DRIVER, NULL);
9840 mutex_initted++;
9841
9842 /*
9843 * The SCSI tran structure is allocate and initialized now.
9844 */
9845 if ((tran = scsi_hba_tran_alloc(pptr->port_dip, 0)) == NULL) {
9846 fcp_log(CE_WARN, pptr->port_dip,
9847 "!fcp%d: scsi_hba_tran_alloc failed", instance);
9848 goto fail;
9849 }
9850
9851 /* link in the transport structure then fill it in */
9852 pptr->port_tran = tran;
9853 tran->tran_hba_private = pptr;
9854 tran->tran_tgt_init = fcp_scsi_tgt_init;
9855 tran->tran_tgt_probe = NULL;
9856 tran->tran_tgt_free = fcp_scsi_tgt_free;
9857 tran->tran_start = fcp_scsi_start;
9858 tran->tran_reset = fcp_scsi_reset;
9859 tran->tran_abort = fcp_scsi_abort;
9860 tran->tran_getcap = fcp_scsi_getcap;
9861 tran->tran_setcap = fcp_scsi_setcap;
9862 tran->tran_init_pkt = NULL;
9863 tran->tran_destroy_pkt = NULL;
9864 tran->tran_dmafree = NULL;
9865 tran->tran_sync_pkt = NULL;
9866 tran->tran_reset_notify = fcp_scsi_reset_notify;
9867 tran->tran_get_bus_addr = fcp_scsi_get_bus_addr;
9868 tran->tran_get_name = fcp_scsi_get_name;
9869 tran->tran_clear_aca = NULL;
9870 tran->tran_clear_task_set = NULL;
9871 tran->tran_terminate_task = NULL;
9872 tran->tran_get_eventcookie = fcp_scsi_bus_get_eventcookie;
9873 tran->tran_add_eventcall = fcp_scsi_bus_add_eventcall;
9874 tran->tran_remove_eventcall = fcp_scsi_bus_remove_eventcall;
9875 tran->tran_post_event = fcp_scsi_bus_post_event;
9876 tran->tran_quiesce = NULL;
9877 tran->tran_unquiesce = NULL;
9878 tran->tran_bus_reset = NULL;
9879 tran->tran_bus_config = fcp_scsi_bus_config;
9880 tran->tran_bus_unconfig = fcp_scsi_bus_unconfig;
9881 tran->tran_bus_power = NULL;
9882 tran->tran_interconnect_type = INTERCONNECT_FABRIC;
9883
9884 tran->tran_pkt_constructor = fcp_kmem_cache_constructor;
9885 tran->tran_pkt_destructor = fcp_kmem_cache_destructor;
9886 tran->tran_setup_pkt = fcp_pkt_setup;
9887 tran->tran_teardown_pkt = fcp_pkt_teardown;
9888 tran->tran_hba_len = pptr->port_priv_pkt_len +
9889 sizeof (struct fcp_pkt) + pptr->port_dmacookie_sz;
9890 if (pptr->port_state & FCP_STATE_FCA_IS_NODMA) {
9891 /*
9892 * If FCA don't support DMA, then we use different vectors to
9893 * minimize the effects on DMA code flow path
9894 */
9895 tran->tran_start = fcp_pseudo_start;
9896 tran->tran_init_pkt = fcp_pseudo_init_pkt;
9897 tran->tran_destroy_pkt = fcp_pseudo_destroy_pkt;
9898 tran->tran_sync_pkt = fcp_pseudo_sync_pkt;
9899 tran->tran_dmafree = fcp_pseudo_dmafree;
9900 tran->tran_setup_pkt = NULL;
9901 tran->tran_teardown_pkt = NULL;
9902 tran->tran_pkt_constructor = NULL;
9903 tran->tran_pkt_destructor = NULL;
9904 pptr->port_data_dma_attr = pseudo_fca_dma_attr;
9905 }
9906
9907 /*
9908 * Allocate an ndi event handle
9909 */
9910 pptr->port_ndi_event_defs = (ndi_event_definition_t *)
9911 kmem_zalloc(sizeof (fcp_ndi_event_defs), KM_SLEEP);
9912
9913 bcopy(fcp_ndi_event_defs, pptr->port_ndi_event_defs,
9914 sizeof (fcp_ndi_event_defs));
9915
9916 (void) ndi_event_alloc_hdl(pptr->port_dip, NULL,
9917 &pptr->port_ndi_event_hdl, NDI_SLEEP);
9918
9919 pptr->port_ndi_events.ndi_events_version = NDI_EVENTS_REV1;
9920 pptr->port_ndi_events.ndi_n_events = FCP_N_NDI_EVENTS;
9921 pptr->port_ndi_events.ndi_event_defs = pptr->port_ndi_event_defs;
9922
9923 if (DEVI_IS_ATTACHING(pptr->port_dip) &&
9924 (ndi_event_bind_set(pptr->port_ndi_event_hdl,
9925 &pptr->port_ndi_events, NDI_SLEEP) != NDI_SUCCESS)) {
9926 goto fail;
9927 }
9928 event_bind++; /* Checked in fail case */
9929
9930 if (scsi_hba_attach_setup(pptr->port_dip, &pptr->port_data_dma_attr,
9931 tran, SCSI_HBA_ADDR_COMPLEX | SCSI_HBA_TRAN_SCB)
9932 != DDI_SUCCESS) {
9933 fcp_log(CE_WARN, pptr->port_dip,
9934 "!fcp%d: scsi_hba_attach_setup failed", instance);
9935 goto fail;
9936 }
9937 hba_attached++; /* Checked in fail case */
9938
9939 pptr->port_mpxio = 0;
9940 if (mdi_phci_register(MDI_HCI_CLASS_SCSI, pptr->port_dip, 0) ==
9941 MDI_SUCCESS) {
9942 pptr->port_mpxio++;
9943 }
9944
9945 /*
9946 * The following code is putting the new port structure in the global
9947 * list of ports and, if it is the first port to attach, it start the
9948 * fcp_watchdog_tick.
9949 *
9950 * Why put this new port in the global before we are done attaching it?
9951 * We are actually making the structure globally known before we are
9952 * done attaching it. The reason for that is: because of the code that
9953 * follows. At this point the resources to handle the port are
9954 * allocated. This function is now going to do the following:
9955 *
9956 * 1) It is going to try to register with the name server advertizing
9957 * the new FCP capability of the port.
9958 * 2) It is going to play the role of the fp/fctl layer by building
9959 * a list of worlwide names reachable through this port and call
9960 * itself on fcp_statec_callback(). That requires the port to
9961 * be part of the global list.
9962 */
9963 mutex_enter(&fcp_global_mutex);
9964 if (fcp_port_head == NULL) {
9965 fcp_read_blacklist(pinfo->port_dip, &fcp_lun_blacklist);
9966 }
9967 pptr->port_next = fcp_port_head;
9968 fcp_port_head = pptr;
9969 soft_state_linked++;
9970
9971 if (fcp_watchdog_init++ == 0) {
9972 fcp_watchdog_tick = fcp_watchdog_timeout *
9973 drv_usectohz(1000000);
9974 fcp_watchdog_id = timeout(fcp_watch, NULL,
9975 fcp_watchdog_tick);
9976 }
9977 mutex_exit(&fcp_global_mutex);
9978
9979 /*
9980 * Here an attempt is made to register with the name server, the new
9981 * FCP capability. That is done using an RTF_ID to the name server.
9982 * It is done synchronously. The function fcp_do_ns_registry()
9983 * doesn't return till the name server responded.
9984 * On failures, just ignore it for now and it will get retried during
9985 * state change callbacks. We'll set a flag to show this failure
9986 */
9987 if (fcp_do_ns_registry(pptr, s_id)) {
9988 mutex_enter(&pptr->port_mutex);
9989 pptr->port_state |= FCP_STATE_NS_REG_FAILED;
9990 mutex_exit(&pptr->port_mutex);
9991 } else {
9992 mutex_enter(&pptr->port_mutex);
9993 pptr->port_state &= ~(FCP_STATE_NS_REG_FAILED);
9994 mutex_exit(&pptr->port_mutex);
9995 }
9996
9997 /*
9998 * Lookup for boot WWN property
9999 */
10000 if (modrootloaded != 1) {
10001 if ((ddi_prop_lookup_byte_array(DDI_DEV_T_ANY,
10002 ddi_get_parent(pinfo->port_dip),
10003 DDI_PROP_DONTPASS, OBP_BOOT_WWN,
10004 &boot_wwn, &nbytes) == DDI_PROP_SUCCESS) &&
10005 (nbytes == FC_WWN_SIZE)) {
10006 bcopy(boot_wwn, pptr->port_boot_wwn, FC_WWN_SIZE);
10007 }
10008 if (boot_wwn) {
10009 ddi_prop_free(boot_wwn);
10010 }
10011 }
10012
10013 /*
10014 * Handle various topologies and link states.
10015 */
10016 switch (FC_PORT_STATE_MASK(pptr->port_phys_state)) {
10017 case FC_STATE_OFFLINE:
10018
10019 /*
10020 * we're attaching a port where the link is offline
10021 *
10022 * Wait for ONLINE, at which time a state
10023 * change will cause a statec_callback
10024 *
10025 * in the mean time, do not do anything
10026 */
10027 res = DDI_SUCCESS;
10028 pptr->port_state |= FCP_STATE_OFFLINE;
10029 break;
10030
10031 case FC_STATE_ONLINE: {
10032 if (pptr->port_topology == FC_TOP_UNKNOWN) {
10033 (void) fcp_linkreset(pptr, NULL, KM_NOSLEEP);
10034 res = DDI_SUCCESS;
10035 break;
10036 }
10037 /*
10038 * discover devices and create nodes (a private
10039 * loop or point-to-point)
10040 */
10041 ASSERT(pptr->port_topology != FC_TOP_UNKNOWN);
10042
10043 /*
10044 * At this point we are going to build a list of all the ports
10045 * that can be reached through this local port. It looks like
10046 * we cannot handle more than FCP_MAX_DEVICES per local port
10047 * (128).
10048 */
10049 if ((tmp_list = (fc_portmap_t *)kmem_zalloc(
10050 sizeof (fc_portmap_t) * FCP_MAX_DEVICES,
10051 KM_NOSLEEP)) == NULL) {
10052 fcp_log(CE_WARN, pptr->port_dip,
10053 "!fcp%d: failed to allocate portmap",
10054 instance);
10055 goto fail;
10056 }
10057
10058 /*
10059 * fc_ulp_getportmap() is going to provide us with the list of
10060 * remote ports in the buffer we just allocated. The way the
10061 * list is going to be retrieved depends on the topology.
10062 * However, if we are connected to a Fabric, a name server
10063 * request may be sent to get the list of FCP capable ports.
10064 * It should be noted that is the case the request is
10065 * synchronous. This means we are stuck here till the name
10066 * server replies. A lot of things can change during that time
10067 * and including, may be, being called on
10068 * fcp_statec_callback() for different reasons. I'm not sure
10069 * the code can handle that.
10070 */
10071 max_cnt = FCP_MAX_DEVICES;
10072 alloc_cnt = FCP_MAX_DEVICES;
10073 if ((res = fc_ulp_getportmap(pptr->port_fp_handle,
10074 &tmp_list, &max_cnt, FC_ULP_PLOGI_PRESERVE)) !=
10075 FC_SUCCESS) {
10076 caddr_t msg;
10077
10078 (void) fc_ulp_error(res, &msg);
10079
10080 /*
10081 * this just means the transport is
10082 * busy perhaps building a portmap so,
10083 * for now, succeed this port attach
10084 * when the transport has a new map,
10085 * it'll send us a state change then
10086 */
10087 fcp_log(CE_WARN, pptr->port_dip,
10088 "!failed to get port map : %s", msg);
10089
10090 res = DDI_SUCCESS;
10091 break; /* go return result */
10092 }
10093 if (max_cnt > alloc_cnt) {
10094 alloc_cnt = max_cnt;
10095 }
10096
10097 /*
10098 * We are now going to call fcp_statec_callback() ourselves.
10099 * By issuing this call we are trying to kick off the enumera-
10100 * tion process.
10101 */
10102 /*
10103 * let the state change callback do the SCSI device
10104 * discovery and create the devinfos
10105 */
10106 fcp_statec_callback(ulph, pptr->port_fp_handle,
10107 pptr->port_phys_state, pptr->port_topology, tmp_list,
10108 max_cnt, pptr->port_id);
10109
10110 res = DDI_SUCCESS;
10111 break;
10112 }
10113
10114 default:
10115 /* unknown port state */
10116 fcp_log(CE_WARN, pptr->port_dip,
10117 "!fcp%d: invalid port state at attach=0x%x",
10118 instance, pptr->port_phys_state);
10119
10120 mutex_enter(&pptr->port_mutex);
10121 pptr->port_phys_state = FCP_STATE_OFFLINE;
10122 mutex_exit(&pptr->port_mutex);
10123
10124 res = DDI_SUCCESS;
10125 break;
10126 }
10127
10128 /* free temp list if used */
10129 if (tmp_list != NULL) {
10130 kmem_free(tmp_list, sizeof (fc_portmap_t) * alloc_cnt);
10131 }
10132
10133 /* note the attach time */
10134 pptr->port_attach_time = ddi_get_lbolt64();
10135
10136 /* all done */
10137 return (res);
10138
10139 /* a failure we have to clean up after */
10140 fail:
10141 fcp_log(CE_WARN, pptr->port_dip, "!failed to attach to port");
10142
10143 if (soft_state_linked) {
10144 /* remove this fcp_port from the linked list */
10145 (void) fcp_soft_state_unlink(pptr);
10146 }
10147
10148 /* unbind and free event set */
10149 if (pptr->port_ndi_event_hdl) {
10150 if (event_bind) {
10151 (void) ndi_event_unbind_set(pptr->port_ndi_event_hdl,
10152 &pptr->port_ndi_events, NDI_SLEEP);
10153 }
10154 (void) ndi_event_free_hdl(pptr->port_ndi_event_hdl);
10155 }
10156
10157 if (pptr->port_ndi_event_defs) {
10158 (void) kmem_free(pptr->port_ndi_event_defs,
10159 sizeof (fcp_ndi_event_defs));
10160 }
10161
10162 /*
10163 * Clean up mpxio stuff
10164 */
10165 if (pptr->port_mpxio) {
10166 (void) mdi_phci_unregister(pptr->port_dip, 0);
10167 pptr->port_mpxio--;
10168 }
10169
10170 /* undo SCSI HBA setup */
10171 if (hba_attached) {
10172 (void) scsi_hba_detach(pptr->port_dip);
10173 }
10174 if (pptr->port_tran != NULL) {
10175 scsi_hba_tran_free(pptr->port_tran);
10176 }
10177
10178 mutex_enter(&fcp_global_mutex);
10179
10180 /*
10181 * We check soft_state_linked, because it is incremented right before
10182 * we call increment fcp_watchdog_init. Therefore, we know if
10183 * soft_state_linked is still FALSE, we do not want to decrement
10184 * fcp_watchdog_init or possibly call untimeout.
10185 */
10186
10187 if (soft_state_linked) {
10188 if (--fcp_watchdog_init == 0) {
10189 timeout_id_t tid = fcp_watchdog_id;
10190
10191 mutex_exit(&fcp_global_mutex);
10192 (void) untimeout(tid);
10193 } else {
10194 mutex_exit(&fcp_global_mutex);
10195 }
10196 } else {
10197 mutex_exit(&fcp_global_mutex);
10198 }
10199
10200 if (mutex_initted) {
10201 mutex_destroy(&pptr->port_mutex);
10202 mutex_destroy(&pptr->port_pkt_mutex);
10203 }
10204
10205 if (tmp_list != NULL) {
10206 kmem_free(tmp_list, sizeof (fc_portmap_t) * alloc_cnt);
10207 }
10208
10209 /* this makes pptr invalid */
10210 ddi_soft_state_free(fcp_softstate, instance);
10211
10212 return (DDI_FAILURE);
10213 }
10214
10215
10216 static int
10217 fcp_handle_port_detach(struct fcp_port *pptr, int flag, int instance)
10218 {
10219 int count = 0;
10220
10221 mutex_enter(&pptr->port_mutex);
10222
10223 /*
10224 * if the port is powered down or suspended, nothing else
10225 * to do; just return.
10226 */
10227 if (flag != FCP_STATE_DETACHING) {
10228 if (pptr->port_state & (FCP_STATE_POWER_DOWN |
10229 FCP_STATE_SUSPENDED)) {
10230 pptr->port_state |= flag;
10231 mutex_exit(&pptr->port_mutex);
10232 return (FC_SUCCESS);
10233 }
10234 }
10235
10236 if (pptr->port_state & FCP_STATE_IN_MDI) {
10237 mutex_exit(&pptr->port_mutex);
10238 return (FC_FAILURE);
10239 }
10240
10241 FCP_TRACE(fcp_logq, pptr->port_instbuf,
10242 fcp_trace, FCP_BUF_LEVEL_2, 0,
10243 "fcp_handle_port_detach: port is detaching");
10244
10245 pptr->port_state |= flag;
10246
10247 /*
10248 * Wait for any ongoing reconfig/ipkt to complete, that
10249 * ensures the freeing to targets/luns is safe.
10250 * No more ref to this port should happen from statec/ioctl
10251 * after that as it was removed from the global port list.
10252 */
10253 while (pptr->port_tmp_cnt || pptr->port_ipkt_cnt ||
10254 (pptr->port_state & FCP_STATE_IN_WATCHDOG)) {
10255 /*
10256 * Let's give sufficient time for reconfig/ipkt
10257 * to complete.
10258 */
10259 if (count++ >= FCP_ICMD_DEADLINE) {
10260 break;
10261 }
10262 mutex_exit(&pptr->port_mutex);
10263 delay(drv_usectohz(1000000));
10264 mutex_enter(&pptr->port_mutex);
10265 }
10266
10267 /*
10268 * if the driver is still busy then fail to
10269 * suspend/power down.
10270 */
10271 if (pptr->port_tmp_cnt || pptr->port_ipkt_cnt ||
10272 (pptr->port_state & FCP_STATE_IN_WATCHDOG)) {
10273 pptr->port_state &= ~flag;
10274 mutex_exit(&pptr->port_mutex);
10275 return (FC_FAILURE);
10276 }
10277
10278 if (flag == FCP_STATE_DETACHING) {
10279 pptr = fcp_soft_state_unlink(pptr);
10280 ASSERT(pptr != NULL);
10281 }
10282
10283 pptr->port_link_cnt++;
10284 pptr->port_state |= FCP_STATE_OFFLINE;
10285 pptr->port_state &= ~(FCP_STATE_ONLINING | FCP_STATE_ONLINE);
10286
10287 fcp_update_state(pptr, (FCP_LUN_BUSY | FCP_LUN_MARK),
10288 FCP_CAUSE_LINK_DOWN);
10289 mutex_exit(&pptr->port_mutex);
10290
10291 /* kill watch dog timer if we're the last */
10292 mutex_enter(&fcp_global_mutex);
10293 if (--fcp_watchdog_init == 0) {
10294 timeout_id_t tid = fcp_watchdog_id;
10295 mutex_exit(&fcp_global_mutex);
10296 (void) untimeout(tid);
10297 } else {
10298 mutex_exit(&fcp_global_mutex);
10299 }
10300
10301 /* clean up the port structures */
10302 if (flag == FCP_STATE_DETACHING) {
10303 fcp_cleanup_port(pptr, instance);
10304 }
10305
10306 return (FC_SUCCESS);
10307 }
10308
10309
10310 static void
10311 fcp_cleanup_port(struct fcp_port *pptr, int instance)
10312 {
10313 ASSERT(pptr != NULL);
10314
10315 /* unbind and free event set */
10316 if (pptr->port_ndi_event_hdl) {
10317 (void) ndi_event_unbind_set(pptr->port_ndi_event_hdl,
10318 &pptr->port_ndi_events, NDI_SLEEP);
10319 (void) ndi_event_free_hdl(pptr->port_ndi_event_hdl);
10320 }
10321
10322 if (pptr->port_ndi_event_defs) {
10323 (void) kmem_free(pptr->port_ndi_event_defs,
10324 sizeof (fcp_ndi_event_defs));
10325 }
10326
10327 /* free the lun/target structures and devinfos */
10328 fcp_free_targets(pptr);
10329
10330 /*
10331 * Clean up mpxio stuff
10332 */
10333 if (pptr->port_mpxio) {
10334 (void) mdi_phci_unregister(pptr->port_dip, 0);
10335 pptr->port_mpxio--;
10336 }
10337
10338 /* clean up SCSA stuff */
10339 (void) scsi_hba_detach(pptr->port_dip);
10340 if (pptr->port_tran != NULL) {
10341 scsi_hba_tran_free(pptr->port_tran);
10342 }
10343
10344 #ifdef KSTATS_CODE
10345 /* clean up kstats */
10346 if (pptr->fcp_ksp != NULL) {
10347 kstat_delete(pptr->fcp_ksp);
10348 }
10349 #endif
10350
10351 /* clean up soft state mutexes/condition variables */
10352 mutex_destroy(&pptr->port_mutex);
10353 mutex_destroy(&pptr->port_pkt_mutex);
10354
10355 /* all done with soft state */
10356 ddi_soft_state_free(fcp_softstate, instance);
10357 }
10358
10359 /*
10360 * Function: fcp_kmem_cache_constructor
10361 *
10362 * Description: This function allocates and initializes the resources required
10363 * to build a scsi_pkt structure the target driver. The result
10364 * of the allocation and initialization will be cached in the
10365 * memory cache. As DMA resources may be allocated here, that
10366 * means DMA resources will be tied up in the cache manager.
10367 * This is a tradeoff that has been made for performance reasons.
10368 *
10369 * Argument: *buf Memory to preinitialize.
10370 * *arg FCP port structure (fcp_port).
10371 * kmflags Value passed to kmem_cache_alloc() and
10372 * propagated to the constructor.
10373 *
10374 * Return Value: 0 Allocation/Initialization was successful.
10375 * -1 Allocation or Initialization failed.
10376 *
10377 *
10378 * If the returned value is 0, the buffer is initialized like this:
10379 *
10380 * +================================+
10381 * +----> | struct scsi_pkt |
10382 * | | |
10383 * | +--- | pkt_ha_private |
10384 * | | | |
10385 * | | +================================+
10386 * | |
10387 * | | +================================+
10388 * | +--> | struct fcp_pkt | <---------+
10389 * | | | |
10390 * +----- | cmd_pkt | |
10391 * | cmd_fp_pkt | ---+ |
10392 * +-------->| cmd_fcp_rsp[] | | |
10393 * | +--->| cmd_fcp_cmd[] | | |
10394 * | | |--------------------------------| | |
10395 * | | | struct fc_packet | <--+ |
10396 * | | | | |
10397 * | | | pkt_ulp_private | ----------+
10398 * | | | pkt_fca_private | -----+
10399 * | | | pkt_data_cookie | ---+ |
10400 * | | | pkt_cmdlen | | |
10401 * | |(a) | pkt_rsplen | | |
10402 * | +----| .......... pkt_cmd ........... | ---|-|---------------+
10403 * | (b) | pkt_cmd_cookie | ---|-|----------+ |
10404 * +---------| .......... pkt_resp .......... | ---|-|------+ | |
10405 * | pkt_resp_cookie | ---|-|--+ | | |
10406 * | pkt_cmd_dma | | | | | | |
10407 * | pkt_cmd_acc | | | | | | |
10408 * +================================+ | | | | | |
10409 * | dma_cookies | <--+ | | | | |
10410 * | | | | | | |
10411 * +================================+ | | | | |
10412 * | fca_private | <----+ | | | |
10413 * | | | | | |
10414 * +================================+ | | | |
10415 * | | | |
10416 * | | | |
10417 * +================================+ (d) | | | |
10418 * | fcp_resp cookies | <-------+ | | |
10419 * | | | | |
10420 * +================================+ | | |
10421 * | | |
10422 * +================================+ (d) | | |
10423 * | fcp_resp | <-----------+ | |
10424 * | (DMA resources associated) | | |
10425 * +================================+ | |
10426 * | |
10427 * | |
10428 * | |
10429 * +================================+ (c) | |
10430 * | fcp_cmd cookies | <---------------+ |
10431 * | | |
10432 * +================================+ |
10433 * |
10434 * +================================+ (c) |
10435 * | fcp_cmd | <--------------------+
10436 * | (DMA resources associated) |
10437 * +================================+
10438 *
10439 * (a) Only if DMA is NOT used for the FCP_CMD buffer.
10440 * (b) Only if DMA is NOT used for the FCP_RESP buffer
10441 * (c) Only if DMA is used for the FCP_CMD buffer.
10442 * (d) Only if DMA is used for the FCP_RESP buffer
10443 */
10444 static int
10445 fcp_kmem_cache_constructor(struct scsi_pkt *pkt, scsi_hba_tran_t *tran,
10446 int kmflags)
10447 {
10448 struct fcp_pkt *cmd;
10449 struct fcp_port *pptr;
10450 fc_packet_t *fpkt;
10451
10452 pptr = (struct fcp_port *)tran->tran_hba_private;
10453 cmd = (struct fcp_pkt *)pkt->pkt_ha_private;
10454 bzero(cmd, tran->tran_hba_len);
10455
10456 cmd->cmd_pkt = pkt;
10457 pkt->pkt_cdbp = cmd->cmd_fcp_cmd.fcp_cdb;
10458 fpkt = (fc_packet_t *)&cmd->cmd_fc_packet;
10459 cmd->cmd_fp_pkt = fpkt;
10460
10461 cmd->cmd_pkt->pkt_ha_private = (opaque_t)cmd;
10462 cmd->cmd_fp_pkt->pkt_ulp_private = (opaque_t)cmd;
10463 cmd->cmd_fp_pkt->pkt_fca_private = (opaque_t)((caddr_t)cmd +
10464 sizeof (struct fcp_pkt) + pptr->port_dmacookie_sz);
10465
10466 fpkt->pkt_data_cookie = (ddi_dma_cookie_t *)((caddr_t)cmd +
10467 sizeof (struct fcp_pkt));
10468
10469 fpkt->pkt_cmdlen = sizeof (struct fcp_cmd);
10470 fpkt->pkt_rsplen = FCP_MAX_RSP_IU_SIZE;
10471
10472 if (pptr->port_fcp_dma == FC_NO_DVMA_SPACE) {
10473 /*
10474 * The underlying HBA doesn't want to DMA the fcp_cmd or
10475 * fcp_resp. The transfer of information will be done by
10476 * bcopy.
10477 * The naming of the flags (that is actually a value) is
10478 * unfortunate. FC_NO_DVMA_SPACE doesn't mean "NO VIRTUAL
10479 * DMA" but instead "NO DMA".
10480 */
10481 fpkt->pkt_resp_acc = fpkt->pkt_cmd_acc = NULL;
10482 fpkt->pkt_cmd = (caddr_t)&cmd->cmd_fcp_cmd;
10483 fpkt->pkt_resp = cmd->cmd_fcp_rsp;
10484 } else {
10485 /*
10486 * The underlying HBA will dma the fcp_cmd buffer and fcp_resp
10487 * buffer. A buffer is allocated for each one the ddi_dma_*
10488 * interfaces.
10489 */
10490 if (fcp_alloc_cmd_resp(pptr, fpkt, kmflags) != FC_SUCCESS) {
10491 return (-1);
10492 }
10493 }
10494
10495 return (0);
10496 }
10497
10498 /*
10499 * Function: fcp_kmem_cache_destructor
10500 *
10501 * Description: Called by the destructor of the cache managed by SCSA.
10502 * All the resources pre-allocated in fcp_pkt_constructor
10503 * and the data also pre-initialized in fcp_pkt_constructor
10504 * are freed and uninitialized here.
10505 *
10506 * Argument: *buf Memory to uninitialize.
10507 * *arg FCP port structure (fcp_port).
10508 *
10509 * Return Value: None
10510 *
10511 * Context: kernel
10512 */
10513 static void
10514 fcp_kmem_cache_destructor(struct scsi_pkt *pkt, scsi_hba_tran_t *tran)
10515 {
10516 struct fcp_pkt *cmd;
10517 struct fcp_port *pptr;
10518
10519 pptr = (struct fcp_port *)(tran->tran_hba_private);
10520 cmd = pkt->pkt_ha_private;
10521
10522 if (pptr->port_fcp_dma != FC_NO_DVMA_SPACE) {
10523 /*
10524 * If DMA was used to transfer the FCP_CMD and FCP_RESP, the
10525 * buffer and DMA resources allocated to do so are released.
10526 */
10527 fcp_free_cmd_resp(pptr, cmd->cmd_fp_pkt);
10528 }
10529 }
10530
10531 /*
10532 * Function: fcp_alloc_cmd_resp
10533 *
10534 * Description: This function allocated an FCP_CMD and FCP_RESP buffer that
10535 * will be DMAed by the HBA. The buffer is allocated applying
10536 * the DMA requirements for the HBA. The buffers allocated will
10537 * also be bound. DMA resources are allocated in the process.
10538 * They will be released by fcp_free_cmd_resp().
10539 *
10540 * Argument: *pptr FCP port.
10541 * *fpkt fc packet for which the cmd and resp packet should be
10542 * allocated.
10543 * flags Allocation flags.
10544 *
10545 * Return Value: FC_FAILURE
10546 * FC_SUCCESS
10547 *
10548 * Context: User or Kernel context only if flags == KM_SLEEP.
10549 * Interrupt context if the KM_SLEEP is not specified.
10550 */
10551 static int
10552 fcp_alloc_cmd_resp(struct fcp_port *pptr, fc_packet_t *fpkt, int flags)
10553 {
10554 int rval;
10555 int cmd_len;
10556 int resp_len;
10557 ulong_t real_len;
10558 int (*cb) (caddr_t);
10559 ddi_dma_cookie_t pkt_cookie;
10560 ddi_dma_cookie_t *cp;
10561 uint32_t cnt;
10562
10563 cb = (flags == KM_SLEEP) ? DDI_DMA_SLEEP : DDI_DMA_DONTWAIT;
10564
10565 cmd_len = fpkt->pkt_cmdlen;
10566 resp_len = fpkt->pkt_rsplen;
10567
10568 ASSERT(fpkt->pkt_cmd_dma == NULL);
10569
10570 /* Allocation of a DMA handle used in subsequent calls. */
10571 if (ddi_dma_alloc_handle(pptr->port_dip, &pptr->port_cmd_dma_attr,
10572 cb, NULL, &fpkt->pkt_cmd_dma) != DDI_SUCCESS) {
10573 return (FC_FAILURE);
10574 }
10575
10576 /* A buffer is allocated that satisfies the DMA requirements. */
10577 rval = ddi_dma_mem_alloc(fpkt->pkt_cmd_dma, cmd_len,
10578 &pptr->port_dma_acc_attr, DDI_DMA_CONSISTENT, cb, NULL,
10579 (caddr_t *)&fpkt->pkt_cmd, &real_len, &fpkt->pkt_cmd_acc);
10580
10581 if (rval != DDI_SUCCESS) {
10582 ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10583 return (FC_FAILURE);
10584 }
10585
10586 if (real_len < cmd_len) {
10587 ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
10588 ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10589 return (FC_FAILURE);
10590 }
10591
10592 /* The buffer allocated is DMA bound. */
10593 rval = ddi_dma_addr_bind_handle(fpkt->pkt_cmd_dma, NULL,
10594 fpkt->pkt_cmd, real_len, DDI_DMA_WRITE | DDI_DMA_CONSISTENT,
10595 cb, NULL, &pkt_cookie, &fpkt->pkt_cmd_cookie_cnt);
10596
10597 if (rval != DDI_DMA_MAPPED) {
10598 ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
10599 ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10600 return (FC_FAILURE);
10601 }
10602
10603 if (fpkt->pkt_cmd_cookie_cnt >
10604 pptr->port_cmd_dma_attr.dma_attr_sgllen) {
10605 (void) ddi_dma_unbind_handle(fpkt->pkt_cmd_dma);
10606 ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
10607 ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10608 return (FC_FAILURE);
10609 }
10610
10611 ASSERT(fpkt->pkt_cmd_cookie_cnt != 0);
10612
10613 /*
10614 * The buffer where the scatter/gather list is going to be built is
10615 * allocated.
10616 */
10617 cp = fpkt->pkt_cmd_cookie = (ddi_dma_cookie_t *)kmem_alloc(
10618 fpkt->pkt_cmd_cookie_cnt * sizeof (pkt_cookie),
10619 KM_NOSLEEP);
10620
10621 if (cp == NULL) {
10622 (void) ddi_dma_unbind_handle(fpkt->pkt_cmd_dma);
10623 ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
10624 ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10625 return (FC_FAILURE);
10626 }
10627
10628 /*
10629 * The scatter/gather list for the buffer we just allocated is built
10630 * here.
10631 */
10632 *cp = pkt_cookie;
10633 cp++;
10634
10635 for (cnt = 1; cnt < fpkt->pkt_cmd_cookie_cnt; cnt++, cp++) {
10636 ddi_dma_nextcookie(fpkt->pkt_cmd_dma,
10637 &pkt_cookie);
10638 *cp = pkt_cookie;
10639 }
10640
10641 ASSERT(fpkt->pkt_resp_dma == NULL);
10642 if (ddi_dma_alloc_handle(pptr->port_dip, &pptr->port_resp_dma_attr,
10643 cb, NULL, &fpkt->pkt_resp_dma) != DDI_SUCCESS) {
10644 (void) ddi_dma_unbind_handle(fpkt->pkt_cmd_dma);
10645 ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
10646 ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10647 return (FC_FAILURE);
10648 }
10649
10650 rval = ddi_dma_mem_alloc(fpkt->pkt_resp_dma, resp_len,
10651 &pptr->port_dma_acc_attr, DDI_DMA_CONSISTENT, cb, NULL,
10652 (caddr_t *)&fpkt->pkt_resp, &real_len,
10653 &fpkt->pkt_resp_acc);
10654
10655 if (rval != DDI_SUCCESS) {
10656 ddi_dma_free_handle(&fpkt->pkt_resp_dma);
10657 (void) ddi_dma_unbind_handle(fpkt->pkt_cmd_dma);
10658 ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
10659 ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10660 kmem_free(fpkt->pkt_cmd_cookie,
10661 fpkt->pkt_cmd_cookie_cnt * sizeof (pkt_cookie));
10662 return (FC_FAILURE);
10663 }
10664
10665 if (real_len < resp_len) {
10666 ddi_dma_mem_free(&fpkt->pkt_resp_acc);
10667 ddi_dma_free_handle(&fpkt->pkt_resp_dma);
10668 (void) ddi_dma_unbind_handle(fpkt->pkt_cmd_dma);
10669 ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
10670 ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10671 kmem_free(fpkt->pkt_cmd_cookie,
10672 fpkt->pkt_cmd_cookie_cnt * sizeof (pkt_cookie));
10673 return (FC_FAILURE);
10674 }
10675
10676 rval = ddi_dma_addr_bind_handle(fpkt->pkt_resp_dma, NULL,
10677 fpkt->pkt_resp, real_len, DDI_DMA_READ | DDI_DMA_CONSISTENT,
10678 cb, NULL, &pkt_cookie, &fpkt->pkt_resp_cookie_cnt);
10679
10680 if (rval != DDI_DMA_MAPPED) {
10681 ddi_dma_mem_free(&fpkt->pkt_resp_acc);
10682 ddi_dma_free_handle(&fpkt->pkt_resp_dma);
10683 (void) ddi_dma_unbind_handle(fpkt->pkt_cmd_dma);
10684 ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
10685 ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10686 kmem_free(fpkt->pkt_cmd_cookie,
10687 fpkt->pkt_cmd_cookie_cnt * sizeof (pkt_cookie));
10688 return (FC_FAILURE);
10689 }
10690
10691 if (fpkt->pkt_resp_cookie_cnt >
10692 pptr->port_resp_dma_attr.dma_attr_sgllen) {
10693 ddi_dma_mem_free(&fpkt->pkt_resp_acc);
10694 ddi_dma_free_handle(&fpkt->pkt_resp_dma);
10695 (void) ddi_dma_unbind_handle(fpkt->pkt_cmd_dma);
10696 ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
10697 ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10698 kmem_free(fpkt->pkt_cmd_cookie,
10699 fpkt->pkt_cmd_cookie_cnt * sizeof (pkt_cookie));
10700 return (FC_FAILURE);
10701 }
10702
10703 ASSERT(fpkt->pkt_resp_cookie_cnt != 0);
10704
10705 cp = fpkt->pkt_resp_cookie = (ddi_dma_cookie_t *)kmem_alloc(
10706 fpkt->pkt_resp_cookie_cnt * sizeof (pkt_cookie),
10707 KM_NOSLEEP);
10708
10709 if (cp == NULL) {
10710 ddi_dma_mem_free(&fpkt->pkt_resp_acc);
10711 ddi_dma_free_handle(&fpkt->pkt_resp_dma);
10712 (void) ddi_dma_unbind_handle(fpkt->pkt_cmd_dma);
10713 ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
10714 ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10715 kmem_free(fpkt->pkt_cmd_cookie,
10716 fpkt->pkt_cmd_cookie_cnt * sizeof (pkt_cookie));
10717 return (FC_FAILURE);
10718 }
10719
10720 *cp = pkt_cookie;
10721 cp++;
10722
10723 for (cnt = 1; cnt < fpkt->pkt_resp_cookie_cnt; cnt++, cp++) {
10724 ddi_dma_nextcookie(fpkt->pkt_resp_dma,
10725 &pkt_cookie);
10726 *cp = pkt_cookie;
10727 }
10728
10729 return (FC_SUCCESS);
10730 }
10731
10732 /*
10733 * Function: fcp_free_cmd_resp
10734 *
10735 * Description: This function releases the FCP_CMD and FCP_RESP buffer
10736 * allocated by fcp_alloc_cmd_resp() and all the resources
10737 * associated with them. That includes the DMA resources and the
10738 * buffer allocated for the cookies of each one of them.
10739 *
10740 * Argument: *pptr FCP port context.
10741 * *fpkt fc packet containing the cmd and resp packet
10742 * to be released.
10743 *
10744 * Return Value: None
10745 *
10746 * Context: Interrupt, User and Kernel context.
10747 */
10748 /* ARGSUSED */
10749 static void
10750 fcp_free_cmd_resp(struct fcp_port *pptr, fc_packet_t *fpkt)
10751 {
10752 ASSERT(fpkt->pkt_resp_dma != NULL && fpkt->pkt_cmd_dma != NULL);
10753
10754 if (fpkt->pkt_resp_dma) {
10755 (void) ddi_dma_unbind_handle(fpkt->pkt_resp_dma);
10756 ddi_dma_mem_free(&fpkt->pkt_resp_acc);
10757 ddi_dma_free_handle(&fpkt->pkt_resp_dma);
10758 }
10759
10760 if (fpkt->pkt_resp_cookie) {
10761 kmem_free(fpkt->pkt_resp_cookie,
10762 fpkt->pkt_resp_cookie_cnt * sizeof (ddi_dma_cookie_t));
10763 fpkt->pkt_resp_cookie = NULL;
10764 }
10765
10766 if (fpkt->pkt_cmd_dma) {
10767 (void) ddi_dma_unbind_handle(fpkt->pkt_cmd_dma);
10768 ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
10769 ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10770 }
10771
10772 if (fpkt->pkt_cmd_cookie) {
10773 kmem_free(fpkt->pkt_cmd_cookie,
10774 fpkt->pkt_cmd_cookie_cnt * sizeof (ddi_dma_cookie_t));
10775 fpkt->pkt_cmd_cookie = NULL;
10776 }
10777 }
10778
10779
10780 /*
10781 * called by the transport to do our own target initialization
10782 *
10783 * can acquire and release the global mutex
10784 */
10785 /* ARGSUSED */
10786 static int
10787 fcp_phys_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
10788 scsi_hba_tran_t *hba_tran, struct scsi_device *sd)
10789 {
10790 uchar_t *bytes;
10791 uint_t nbytes;
10792 uint16_t lun_num;
10793 struct fcp_tgt *ptgt;
10794 struct fcp_lun *plun;
10795 struct fcp_port *pptr = (struct fcp_port *)
10796 hba_tran->tran_hba_private;
10797
10798 ASSERT(pptr != NULL);
10799
10800 FCP_DTRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
10801 FCP_BUF_LEVEL_8, 0,
10802 "fcp_phys_tgt_init: called for %s (instance %d)",
10803 ddi_get_name(tgt_dip), ddi_get_instance(tgt_dip));
10804
10805 /* get our port WWN property */
10806 bytes = NULL;
10807 if ((scsi_device_prop_lookup_byte_array(sd, SCSI_DEVICE_PROP_PATH,
10808 PORT_WWN_PROP, &bytes, &nbytes) != DDI_PROP_SUCCESS) ||
10809 (nbytes != FC_WWN_SIZE)) {
10810 /* no port WWN property */
10811 FCP_DTRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
10812 FCP_BUF_LEVEL_8, 0,
10813 "fcp_phys_tgt_init: Returning DDI_NOT_WELL_FORMED"
10814 " for %s (instance %d): bytes=%p nbytes=%x",
10815 ddi_get_name(tgt_dip), ddi_get_instance(tgt_dip), bytes,
10816 nbytes);
10817
10818 if (bytes != NULL) {
10819 scsi_device_prop_free(sd, SCSI_DEVICE_PROP_PATH, bytes);
10820 }
10821
10822 return (DDI_NOT_WELL_FORMED);
10823 }
10824 ASSERT(bytes != NULL);
10825
10826 lun_num = scsi_device_prop_get_int(sd, SCSI_DEVICE_PROP_PATH,
10827 LUN_PROP, 0xFFFF);
10828 if (lun_num == 0xFFFF) {
10829 FCP_DTRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
10830 FCP_BUF_LEVEL_8, 0,
10831 "fcp_phys_tgt_init: Returning DDI_FAILURE:lun"
10832 " for %s (instance %d)", ddi_get_name(tgt_dip),
10833 ddi_get_instance(tgt_dip));
10834
10835 scsi_device_prop_free(sd, SCSI_DEVICE_PROP_PATH, bytes);
10836 return (DDI_NOT_WELL_FORMED);
10837 }
10838
10839 mutex_enter(&pptr->port_mutex);
10840 if ((plun = fcp_lookup_lun(pptr, bytes, lun_num)) == NULL) {
10841 mutex_exit(&pptr->port_mutex);
10842 FCP_DTRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
10843 FCP_BUF_LEVEL_8, 0,
10844 "fcp_phys_tgt_init: Returning DDI_FAILURE: No Lun"
10845 " for %s (instance %d)", ddi_get_name(tgt_dip),
10846 ddi_get_instance(tgt_dip));
10847
10848 scsi_device_prop_free(sd, SCSI_DEVICE_PROP_PATH, bytes);
10849 return (DDI_FAILURE);
10850 }
10851
10852 ASSERT(bcmp(plun->lun_tgt->tgt_port_wwn.raw_wwn, bytes,
10853 FC_WWN_SIZE) == 0);
10854 ASSERT(plun->lun_num == lun_num);
10855
10856 scsi_device_prop_free(sd, SCSI_DEVICE_PROP_PATH, bytes);
10857
10858 ptgt = plun->lun_tgt;
10859
10860 mutex_enter(&ptgt->tgt_mutex);
10861 plun->lun_tgt_count++;
10862 scsi_device_hba_private_set(sd, plun);
10863 plun->lun_state |= FCP_SCSI_LUN_TGT_INIT;
10864 plun->lun_sd = sd;
10865 mutex_exit(&ptgt->tgt_mutex);
10866 mutex_exit(&pptr->port_mutex);
10867
10868 return (DDI_SUCCESS);
10869 }
10870
10871 /*ARGSUSED*/
10872 static int
10873 fcp_virt_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
10874 scsi_hba_tran_t *hba_tran, struct scsi_device *sd)
10875 {
10876 uchar_t *bytes;
10877 uint_t nbytes;
10878 uint16_t lun_num;
10879 struct fcp_tgt *ptgt;
10880 struct fcp_lun *plun;
10881 struct fcp_port *pptr = (struct fcp_port *)
10882 hba_tran->tran_hba_private;
10883 child_info_t *cip;
10884
10885 ASSERT(pptr != NULL);
10886
10887 FCP_DTRACE(fcp_logq, pptr->port_instbuf,
10888 fcp_trace, FCP_BUF_LEVEL_8, 0,
10889 "fcp_virt_tgt_init: called for %s (instance %d) (hba_dip %p),"
10890 " (tgt_dip %p)", ddi_get_name(tgt_dip),
10891 ddi_get_instance(tgt_dip), hba_dip, tgt_dip);
10892
10893 cip = (child_info_t *)sd->sd_pathinfo;
10894 if (cip == NULL) {
10895 FCP_DTRACE(fcp_logq, pptr->port_instbuf,
10896 fcp_trace, FCP_BUF_LEVEL_8, 0,
10897 "fcp_virt_tgt_init: Returning DDI_NOT_WELL_FORMED"
10898 " for %s (instance %d)", ddi_get_name(tgt_dip),
10899 ddi_get_instance(tgt_dip));
10900
10901 return (DDI_NOT_WELL_FORMED);
10902 }
10903
10904 /* get our port WWN property */
10905 bytes = NULL;
10906 if ((scsi_device_prop_lookup_byte_array(sd, SCSI_DEVICE_PROP_PATH,
10907 PORT_WWN_PROP, &bytes, &nbytes) != DDI_PROP_SUCCESS) ||
10908 (nbytes != FC_WWN_SIZE)) {
10909 if (bytes) {
10910 scsi_device_prop_free(sd, SCSI_DEVICE_PROP_PATH, bytes);
10911 }
10912 return (DDI_NOT_WELL_FORMED);
10913 }
10914
10915 ASSERT(bytes != NULL);
10916
10917 lun_num = scsi_device_prop_get_int(sd, SCSI_DEVICE_PROP_PATH,
10918 LUN_PROP, 0xFFFF);
10919 if (lun_num == 0xFFFF) {
10920 FCP_DTRACE(fcp_logq, pptr->port_instbuf,
10921 fcp_trace, FCP_BUF_LEVEL_8, 0,
10922 "fcp_virt_tgt_init: Returning DDI_FAILURE:lun"
10923 " for %s (instance %d)", ddi_get_name(tgt_dip),
10924 ddi_get_instance(tgt_dip));
10925
10926 scsi_device_prop_free(sd, SCSI_DEVICE_PROP_PATH, bytes);
10927 return (DDI_NOT_WELL_FORMED);
10928 }
10929
10930 mutex_enter(&pptr->port_mutex);
10931 if ((plun = fcp_lookup_lun(pptr, bytes, lun_num)) == NULL) {
10932 mutex_exit(&pptr->port_mutex);
10933 FCP_DTRACE(fcp_logq, pptr->port_instbuf,
10934 fcp_trace, FCP_BUF_LEVEL_8, 0,
10935 "fcp_virt_tgt_init: Returning DDI_FAILURE: No Lun"
10936 " for %s (instance %d)", ddi_get_name(tgt_dip),
10937 ddi_get_instance(tgt_dip));
10938
10939 scsi_device_prop_free(sd, SCSI_DEVICE_PROP_PATH, bytes);
10940 return (DDI_FAILURE);
10941 }
10942
10943 ASSERT(bcmp(plun->lun_tgt->tgt_port_wwn.raw_wwn, bytes,
10944 FC_WWN_SIZE) == 0);
10945 ASSERT(plun->lun_num == lun_num);
10946
10947 scsi_device_prop_free(sd, SCSI_DEVICE_PROP_PATH, bytes);
10948
10949 ptgt = plun->lun_tgt;
10950
10951 mutex_enter(&ptgt->tgt_mutex);
10952 plun->lun_tgt_count++;
10953 scsi_device_hba_private_set(sd, plun);
10954 plun->lun_state |= FCP_SCSI_LUN_TGT_INIT;
10955 plun->lun_sd = sd;
10956 mutex_exit(&ptgt->tgt_mutex);
10957 mutex_exit(&pptr->port_mutex);
10958
10959 return (DDI_SUCCESS);
10960 }
10961
10962
10963 /*
10964 * called by the transport to do our own target initialization
10965 *
10966 * can acquire and release the global mutex
10967 */
10968 /* ARGSUSED */
10969 static int
10970 fcp_scsi_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
10971 scsi_hba_tran_t *hba_tran, struct scsi_device *sd)
10972 {
10973 struct fcp_port *pptr = (struct fcp_port *)
10974 hba_tran->tran_hba_private;
10975 int rval;
10976
10977 ASSERT(pptr != NULL);
10978
10979 /*
10980 * Child node is getting initialized. Look at the mpxio component
10981 * type on the child device to see if this device is mpxio managed
10982 * or not.
10983 */
10984 if (mdi_component_is_client(tgt_dip, NULL) == MDI_SUCCESS) {
10985 rval = fcp_virt_tgt_init(hba_dip, tgt_dip, hba_tran, sd);
10986 } else {
10987 rval = fcp_phys_tgt_init(hba_dip, tgt_dip, hba_tran, sd);
10988 }
10989
10990 return (rval);
10991 }
10992
10993
10994 /* ARGSUSED */
10995 static void
10996 fcp_scsi_tgt_free(dev_info_t *hba_dip, dev_info_t *tgt_dip,
10997 scsi_hba_tran_t *hba_tran, struct scsi_device *sd)
10998 {
10999 struct fcp_lun *plun = scsi_device_hba_private_get(sd);
11000 struct fcp_tgt *ptgt;
11001
11002 FCP_DTRACE(fcp_logq, LUN_PORT->port_instbuf,
11003 fcp_trace, FCP_BUF_LEVEL_8, 0,
11004 "fcp_scsi_tgt_free: called for tran %s%d, dev %s%d",
11005 ddi_get_name(hba_dip), ddi_get_instance(hba_dip),
11006 ddi_get_name(tgt_dip), ddi_get_instance(tgt_dip));
11007
11008 if (plun == NULL) {
11009 return;
11010 }
11011 ptgt = plun->lun_tgt;
11012
11013 ASSERT(ptgt != NULL);
11014
11015 mutex_enter(&ptgt->tgt_mutex);
11016 ASSERT(plun->lun_tgt_count > 0);
11017
11018 if (--plun->lun_tgt_count == 0) {
11019 plun->lun_state &= ~FCP_SCSI_LUN_TGT_INIT;
11020 }
11021 plun->lun_sd = NULL;
11022 mutex_exit(&ptgt->tgt_mutex);
11023 }
11024
11025 /*
11026 * Function: fcp_scsi_start
11027 *
11028 * Description: This function is called by the target driver to request a
11029 * command to be sent.
11030 *
11031 * Argument: *ap SCSI address of the device.
11032 * *pkt SCSI packet containing the cmd to send.
11033 *
11034 * Return Value: TRAN_ACCEPT
11035 * TRAN_BUSY
11036 * TRAN_BADPKT
11037 * TRAN_FATAL_ERROR
11038 */
11039 static int
11040 fcp_scsi_start(struct scsi_address *ap, struct scsi_pkt *pkt)
11041 {
11042 struct fcp_port *pptr = ADDR2FCP(ap);
11043 struct fcp_lun *plun = ADDR2LUN(ap);
11044 struct fcp_pkt *cmd = PKT2CMD(pkt);
11045 struct fcp_tgt *ptgt = plun->lun_tgt;
11046 int rval;
11047
11048 /* ensure command isn't already issued */
11049 ASSERT(cmd->cmd_state != FCP_PKT_ISSUED);
11050
11051 FCP_DTRACE(fcp_logq, pptr->port_instbuf,
11052 fcp_trace, FCP_BUF_LEVEL_9, 0,
11053 "fcp_transport Invoked for %x", plun->lun_tgt->tgt_d_id);
11054
11055 /*
11056 * It is strange that we enter the fcp_port mutex and the target
11057 * mutex to check the lun state (which has a mutex of its own).
11058 */
11059 mutex_enter(&pptr->port_mutex);
11060 mutex_enter(&ptgt->tgt_mutex);
11061
11062 /*
11063 * If the device is offline and is not in the process of coming
11064 * online, fail the request.
11065 */
11066
11067 if ((plun->lun_state & FCP_LUN_OFFLINE) &&
11068 !(plun->lun_state & FCP_LUN_ONLINING)) {
11069 mutex_exit(&ptgt->tgt_mutex);
11070 mutex_exit(&pptr->port_mutex);
11071
11072 if (cmd->cmd_fp_pkt->pkt_pd == NULL) {
11073 pkt->pkt_reason = CMD_DEV_GONE;
11074 }
11075
11076 return (TRAN_FATAL_ERROR);
11077 }
11078 cmd->cmd_fp_pkt->pkt_timeout = pkt->pkt_time;
11079
11080 /*
11081 * If we are suspended, kernel is trying to dump, so don't
11082 * block, fail or defer requests - send them down right away.
11083 * NOTE: If we are in panic (i.e. trying to dump), we can't
11084 * assume we have been suspended. There is hardware such as
11085 * the v880 that doesn't do PM. Thus, the check for
11086 * ddi_in_panic.
11087 *
11088 * If FCP_STATE_IN_CB_DEVC is set, devices are in the process
11089 * of changing. So, if we can queue the packet, do it. Eventually,
11090 * either the device will have gone away or changed and we can fail
11091 * the request, or we can proceed if the device didn't change.
11092 *
11093 * If the pd in the target or the packet is NULL it's probably
11094 * because the device has gone away, we allow the request to be
11095 * put on the internal queue here in case the device comes back within
11096 * the offline timeout. fctl will fix up the pd's if the tgt_pd_handle
11097 * has gone NULL, while fcp deals cases where pkt_pd is NULL. pkt_pd
11098 * could be NULL because the device was disappearing during or since
11099 * packet initialization.
11100 */
11101
11102 if (((plun->lun_state & FCP_LUN_BUSY) && (!(pptr->port_state &
11103 FCP_STATE_SUSPENDED)) && !ddi_in_panic()) ||
11104 (pptr->port_state & (FCP_STATE_ONLINING | FCP_STATE_IN_CB_DEVC)) ||
11105 (ptgt->tgt_pd_handle == NULL) ||
11106 (cmd->cmd_fp_pkt->pkt_pd == NULL)) {
11107 /*
11108 * If ((LUN is busy AND
11109 * LUN not suspended AND
11110 * The system is not in panic state) OR
11111 * (The port is coming up))
11112 *
11113 * We check to see if the any of the flags FLAG_NOINTR or
11114 * FLAG_NOQUEUE is set. If one of them is set the value
11115 * returned will be TRAN_BUSY. If not, the request is queued.
11116 */
11117 mutex_exit(&ptgt->tgt_mutex);
11118 mutex_exit(&pptr->port_mutex);
11119
11120 /* see if using interrupts is allowed (so queueing'll work) */
11121 if (pkt->pkt_flags & FLAG_NOINTR) {
11122 pkt->pkt_resid = 0;
11123 return (TRAN_BUSY);
11124 }
11125 if (pkt->pkt_flags & FLAG_NOQUEUE) {
11126 FCP_DTRACE(fcp_logq, pptr->port_instbuf,
11127 fcp_trace, FCP_BUF_LEVEL_9, 0,
11128 "fcp_scsi_start: lun busy for pkt %p", pkt);
11129 return (TRAN_BUSY);
11130 }
11131 #ifdef DEBUG
11132 mutex_enter(&pptr->port_pkt_mutex);
11133 pptr->port_npkts++;
11134 mutex_exit(&pptr->port_pkt_mutex);
11135 #endif /* DEBUG */
11136
11137 /* got queue up the pkt for later */
11138 fcp_queue_pkt(pptr, cmd);
11139 return (TRAN_ACCEPT);
11140 }
11141 cmd->cmd_state = FCP_PKT_ISSUED;
11142
11143 mutex_exit(&ptgt->tgt_mutex);
11144 mutex_exit(&pptr->port_mutex);
11145
11146 /*
11147 * Now that we released the mutexes, what was protected by them can
11148 * change.
11149 */
11150
11151 /*
11152 * If there is a reconfiguration in progress, wait for it to complete.
11153 */
11154 fcp_reconfig_wait(pptr);
11155
11156 cmd->cmd_timeout = pkt->pkt_time ? fcp_watchdog_time +
11157 pkt->pkt_time : 0;
11158
11159 /* prepare the packet */
11160
11161 fcp_prepare_pkt(pptr, cmd, plun);
11162
11163 if (cmd->cmd_pkt->pkt_time) {
11164 cmd->cmd_fp_pkt->pkt_timeout = cmd->cmd_pkt->pkt_time;
11165 } else {
11166 cmd->cmd_fp_pkt->pkt_timeout = 5 * 60 * 60;
11167 }
11168
11169 /*
11170 * if interrupts aren't allowed (e.g. at dump time) then we'll
11171 * have to do polled I/O
11172 */
11173 if (pkt->pkt_flags & FLAG_NOINTR) {
11174 cmd->cmd_state &= ~FCP_PKT_ISSUED;
11175 return (fcp_dopoll(pptr, cmd));
11176 }
11177
11178 #ifdef DEBUG
11179 mutex_enter(&pptr->port_pkt_mutex);
11180 pptr->port_npkts++;
11181 mutex_exit(&pptr->port_pkt_mutex);
11182 #endif /* DEBUG */
11183
11184 rval = fcp_transport(pptr->port_fp_handle, cmd->cmd_fp_pkt, 0);
11185 if (rval == FC_SUCCESS) {
11186 FCP_DTRACE(fcp_logq, pptr->port_instbuf,
11187 fcp_trace, FCP_BUF_LEVEL_9, 0,
11188 "fcp_transport success for %x", plun->lun_tgt->tgt_d_id);
11189 return (TRAN_ACCEPT);
11190 }
11191
11192 cmd->cmd_state = FCP_PKT_IDLE;
11193
11194 #ifdef DEBUG
11195 mutex_enter(&pptr->port_pkt_mutex);
11196 pptr->port_npkts--;
11197 mutex_exit(&pptr->port_pkt_mutex);
11198 #endif /* DEBUG */
11199
11200 /*
11201 * For lack of clearer definitions, choose
11202 * between TRAN_BUSY and TRAN_FATAL_ERROR.
11203 */
11204
11205 if (rval == FC_TRAN_BUSY) {
11206 pkt->pkt_resid = 0;
11207 rval = TRAN_BUSY;
11208 } else {
11209 mutex_enter(&ptgt->tgt_mutex);
11210 if (plun->lun_state & FCP_LUN_OFFLINE) {
11211 child_info_t *cip;
11212
11213 mutex_enter(&plun->lun_mutex);
11214 cip = plun->lun_cip;
11215 mutex_exit(&plun->lun_mutex);
11216
11217 FCP_TRACE(fcp_logq, pptr->port_instbuf,
11218 fcp_trace, FCP_BUF_LEVEL_6, 0,
11219 "fcp_transport failed 2 for %x: %x; dip=%p",
11220 plun->lun_tgt->tgt_d_id, rval, cip);
11221
11222 rval = TRAN_FATAL_ERROR;
11223 } else {
11224 if (pkt->pkt_flags & FLAG_NOQUEUE) {
11225 FCP_DTRACE(fcp_logq, pptr->port_instbuf,
11226 fcp_trace, FCP_BUF_LEVEL_9, 0,
11227 "fcp_scsi_start: FC_BUSY for pkt %p",
11228 pkt);
11229 rval = TRAN_BUSY;
11230 } else {
11231 rval = TRAN_ACCEPT;
11232 fcp_queue_pkt(pptr, cmd);
11233 }
11234 }
11235 mutex_exit(&ptgt->tgt_mutex);
11236 }
11237
11238 return (rval);
11239 }
11240
11241 /*
11242 * called by the transport to abort a packet
11243 */
11244 /*ARGSUSED*/
11245 static int
11246 fcp_scsi_abort(struct scsi_address *ap, struct scsi_pkt *pkt)
11247 {
11248 int tgt_cnt;
11249 struct fcp_port *pptr = ADDR2FCP(ap);
11250 struct fcp_lun *plun = ADDR2LUN(ap);
11251 struct fcp_tgt *ptgt = plun->lun_tgt;
11252
11253 if (pkt == NULL) {
11254 if (ptgt) {
11255 mutex_enter(&ptgt->tgt_mutex);
11256 tgt_cnt = ptgt->tgt_change_cnt;
11257 mutex_exit(&ptgt->tgt_mutex);
11258 fcp_abort_all(pptr, ptgt, plun, tgt_cnt);
11259 return (TRUE);
11260 }
11261 }
11262 return (FALSE);
11263 }
11264
11265
11266 /*
11267 * Perform reset
11268 */
11269 int
11270 fcp_scsi_reset(struct scsi_address *ap, int level)
11271 {
11272 int rval = 0;
11273 struct fcp_port *pptr = ADDR2FCP(ap);
11274 struct fcp_lun *plun = ADDR2LUN(ap);
11275 struct fcp_tgt *ptgt = plun->lun_tgt;
11276
11277 if (level == RESET_ALL) {
11278 if (fcp_linkreset(pptr, ap, KM_NOSLEEP) == FC_SUCCESS) {
11279 rval = 1;
11280 }
11281 } else if (level == RESET_TARGET || level == RESET_LUN) {
11282 /*
11283 * If we are in the middle of discovery, return
11284 * SUCCESS as this target will be rediscovered
11285 * anyway
11286 */
11287 mutex_enter(&ptgt->tgt_mutex);
11288 if (ptgt->tgt_state & (FCP_TGT_OFFLINE | FCP_TGT_BUSY)) {
11289 mutex_exit(&ptgt->tgt_mutex);
11290 return (1);
11291 }
11292 mutex_exit(&ptgt->tgt_mutex);
11293
11294 if (fcp_reset_target(ap, level) == FC_SUCCESS) {
11295 rval = 1;
11296 }
11297 }
11298 return (rval);
11299 }
11300
11301
11302 /*
11303 * called by the framework to get a SCSI capability
11304 */
11305 static int
11306 fcp_scsi_getcap(struct scsi_address *ap, char *cap, int whom)
11307 {
11308 return (fcp_commoncap(ap, cap, 0, whom, 0));
11309 }
11310
11311
11312 /*
11313 * called by the framework to set a SCSI capability
11314 */
11315 static int
11316 fcp_scsi_setcap(struct scsi_address *ap, char *cap, int value, int whom)
11317 {
11318 return (fcp_commoncap(ap, cap, value, whom, 1));
11319 }
11320
11321 /*
11322 * Function: fcp_pkt_setup
11323 *
11324 * Description: This function sets up the scsi_pkt structure passed by the
11325 * caller. This function assumes fcp_pkt_constructor has been
11326 * called previously for the packet passed by the caller. If
11327 * successful this call will have the following results:
11328 *
11329 * - The resources needed that will be constant through out
11330 * the whole transaction are allocated.
11331 * - The fields that will be constant through out the whole
11332 * transaction are initialized.
11333 * - The scsi packet will be linked to the LUN structure
11334 * addressed by the transaction.
11335 *
11336 * Argument:
11337 * *pkt Pointer to a scsi_pkt structure.
11338 * callback
11339 * arg
11340 *
11341 * Return Value: 0 Success
11342 * !0 Failure
11343 *
11344 * Context: Kernel context or interrupt context
11345 */
11346 /* ARGSUSED */
11347 static int
11348 fcp_pkt_setup(struct scsi_pkt *pkt,
11349 int (*callback)(caddr_t arg),
11350 caddr_t arg)
11351 {
11352 struct fcp_pkt *cmd;
11353 struct fcp_port *pptr;
11354 struct fcp_lun *plun;
11355 struct fcp_tgt *ptgt;
11356 int kf;
11357 fc_packet_t *fpkt;
11358 fc_frame_hdr_t *hp;
11359
11360 pptr = ADDR2FCP(&pkt->pkt_address);
11361 plun = ADDR2LUN(&pkt->pkt_address);
11362 ptgt = plun->lun_tgt;
11363
11364 cmd = (struct fcp_pkt *)pkt->pkt_ha_private;
11365 fpkt = cmd->cmd_fp_pkt;
11366
11367 /*
11368 * this request is for dma allocation only
11369 */
11370 /*
11371 * First step of fcp_scsi_init_pkt: pkt allocation
11372 * We determine if the caller is willing to wait for the
11373 * resources.
11374 */
11375 kf = (callback == SLEEP_FUNC) ? KM_SLEEP: KM_NOSLEEP;
11376
11377 /*
11378 * Selective zeroing of the pkt.
11379 */
11380 cmd->cmd_back = NULL;
11381 cmd->cmd_next = NULL;
11382
11383 /*
11384 * Zero out fcp command
11385 */
11386 bzero(&cmd->cmd_fcp_cmd, sizeof (cmd->cmd_fcp_cmd));
11387
11388 cmd->cmd_state = FCP_PKT_IDLE;
11389
11390 fpkt = cmd->cmd_fp_pkt;
11391 fpkt->pkt_data_acc = NULL;
11392
11393 /*
11394 * When port_state is FCP_STATE_OFFLINE, remote_port (tgt_pd_handle)
11395 * could be destroyed. We need fail pkt_setup.
11396 */
11397 if (pptr->port_state & FCP_STATE_OFFLINE) {
11398 return (-1);
11399 }
11400
11401 mutex_enter(&ptgt->tgt_mutex);
11402 fpkt->pkt_pd = ptgt->tgt_pd_handle;
11403
11404 if (fc_ulp_init_packet(pptr->port_fp_handle, fpkt, kf)
11405 != FC_SUCCESS) {
11406 mutex_exit(&ptgt->tgt_mutex);
11407 return (-1);
11408 }
11409
11410 mutex_exit(&ptgt->tgt_mutex);
11411
11412 /* Fill in the Fabric Channel Header */
11413 hp = &fpkt->pkt_cmd_fhdr;
11414 hp->r_ctl = R_CTL_COMMAND;
11415 hp->rsvd = 0;
11416 hp->type = FC_TYPE_SCSI_FCP;
11417 hp->f_ctl = F_CTL_SEQ_INITIATIVE | F_CTL_FIRST_SEQ;
11418 hp->seq_id = 0;
11419 hp->df_ctl = 0;
11420 hp->seq_cnt = 0;
11421 hp->ox_id = 0xffff;
11422 hp->rx_id = 0xffff;
11423 hp->ro = 0;
11424
11425 /*
11426 * A doubly linked list (cmd_forw, cmd_back) is built
11427 * out of every allocated packet on a per-lun basis
11428 *
11429 * The packets are maintained in the list so as to satisfy
11430 * scsi_abort() requests. At present (which is unlikely to
11431 * change in the future) nobody performs a real scsi_abort
11432 * in the SCSI target drivers (as they don't keep the packets
11433 * after doing scsi_transport - so they don't know how to
11434 * abort a packet other than sending a NULL to abort all
11435 * outstanding packets)
11436 */
11437 mutex_enter(&plun->lun_mutex);
11438 if ((cmd->cmd_forw = plun->lun_pkt_head) != NULL) {
11439 plun->lun_pkt_head->cmd_back = cmd;
11440 } else {
11441 plun->lun_pkt_tail = cmd;
11442 }
11443 plun->lun_pkt_head = cmd;
11444 mutex_exit(&plun->lun_mutex);
11445 return (0);
11446 }
11447
11448 /*
11449 * Function: fcp_pkt_teardown
11450 *
11451 * Description: This function releases a scsi_pkt structure and all the
11452 * resources attached to it.
11453 *
11454 * Argument: *pkt Pointer to a scsi_pkt structure.
11455 *
11456 * Return Value: None
11457 *
11458 * Context: User, Kernel or Interrupt context.
11459 */
11460 static void
11461 fcp_pkt_teardown(struct scsi_pkt *pkt)
11462 {
11463 struct fcp_port *pptr = ADDR2FCP(&pkt->pkt_address);
11464 struct fcp_lun *plun = ADDR2LUN(&pkt->pkt_address);
11465 struct fcp_pkt *cmd = (struct fcp_pkt *)pkt->pkt_ha_private;
11466
11467 /*
11468 * Remove the packet from the per-lun list
11469 */
11470 mutex_enter(&plun->lun_mutex);
11471 if (cmd->cmd_back) {
11472 ASSERT(cmd != plun->lun_pkt_head);
11473 cmd->cmd_back->cmd_forw = cmd->cmd_forw;
11474 } else {
11475 ASSERT(cmd == plun->lun_pkt_head);
11476 plun->lun_pkt_head = cmd->cmd_forw;
11477 }
11478
11479 if (cmd->cmd_forw) {
11480 cmd->cmd_forw->cmd_back = cmd->cmd_back;
11481 } else {
11482 ASSERT(cmd == plun->lun_pkt_tail);
11483 plun->lun_pkt_tail = cmd->cmd_back;
11484 }
11485
11486 mutex_exit(&plun->lun_mutex);
11487
11488 (void) fc_ulp_uninit_packet(pptr->port_fp_handle, cmd->cmd_fp_pkt);
11489 }
11490
11491 /*
11492 * Routine for reset notification setup, to register or cancel.
11493 * This function is called by SCSA
11494 */
11495 /*ARGSUSED*/
11496 static int
11497 fcp_scsi_reset_notify(struct scsi_address *ap, int flag,
11498 void (*callback)(caddr_t), caddr_t arg)
11499 {
11500 struct fcp_port *pptr = ADDR2FCP(ap);
11501
11502 return (scsi_hba_reset_notify_setup(ap, flag, callback, arg,
11503 &pptr->port_mutex, &pptr->port_reset_notify_listf));
11504 }
11505
11506
11507 static int
11508 fcp_scsi_bus_get_eventcookie(dev_info_t *dip, dev_info_t *rdip, char *name,
11509 ddi_eventcookie_t *event_cookiep)
11510 {
11511 struct fcp_port *pptr = fcp_dip2port(dip);
11512
11513 if (pptr == NULL) {
11514 return (DDI_FAILURE);
11515 }
11516
11517 return (ndi_event_retrieve_cookie(pptr->port_ndi_event_hdl, rdip, name,
11518 event_cookiep, NDI_EVENT_NOPASS));
11519 }
11520
11521
11522 static int
11523 fcp_scsi_bus_add_eventcall(dev_info_t *dip, dev_info_t *rdip,
11524 ddi_eventcookie_t eventid, void (*callback)(), void *arg,
11525 ddi_callback_id_t *cb_id)
11526 {
11527 struct fcp_port *pptr = fcp_dip2port(dip);
11528
11529 if (pptr == NULL) {
11530 return (DDI_FAILURE);
11531 }
11532
11533 return (ndi_event_add_callback(pptr->port_ndi_event_hdl, rdip,
11534 eventid, callback, arg, NDI_SLEEP, cb_id));
11535 }
11536
11537
11538 static int
11539 fcp_scsi_bus_remove_eventcall(dev_info_t *dip, ddi_callback_id_t cb_id)
11540 {
11541
11542 struct fcp_port *pptr = fcp_dip2port(dip);
11543
11544 if (pptr == NULL) {
11545 return (DDI_FAILURE);
11546 }
11547 return (ndi_event_remove_callback(pptr->port_ndi_event_hdl, cb_id));
11548 }
11549
11550
11551 /*
11552 * called by the transport to post an event
11553 */
11554 static int
11555 fcp_scsi_bus_post_event(dev_info_t *dip, dev_info_t *rdip,
11556 ddi_eventcookie_t eventid, void *impldata)
11557 {
11558 struct fcp_port *pptr = fcp_dip2port(dip);
11559
11560 if (pptr == NULL) {
11561 return (DDI_FAILURE);
11562 }
11563
11564 return (ndi_event_run_callbacks(pptr->port_ndi_event_hdl, rdip,
11565 eventid, impldata));
11566 }
11567
11568
11569 /*
11570 * A target in in many cases in Fibre Channel has a one to one relation
11571 * with a port identifier (which is also known as D_ID and also as AL_PA
11572 * in private Loop) On Fibre Channel-to-SCSI bridge boxes a target reset
11573 * will most likely result in resetting all LUNs (which means a reset will
11574 * occur on all the SCSI devices connected at the other end of the bridge)
11575 * That is the latest favorite topic for discussion, for, one can debate as
11576 * hot as one likes and come up with arguably a best solution to one's
11577 * satisfaction
11578 *
11579 * To stay on track and not digress much, here are the problems stated
11580 * briefly:
11581 *
11582 * SCSA doesn't define RESET_LUN, It defines RESET_TARGET, but the
11583 * target drivers use RESET_TARGET even if their instance is on a
11584 * LUN. Doesn't that sound a bit broken ?
11585 *
11586 * FCP SCSI (the current spec) only defines RESET TARGET in the
11587 * control fields of an FCP_CMND structure. It should have been
11588 * fixed right there, giving flexibility to the initiators to
11589 * minimize havoc that could be caused by resetting a target.
11590 */
11591 static int
11592 fcp_reset_target(struct scsi_address *ap, int level)
11593 {
11594 int rval = FC_FAILURE;
11595 char lun_id[25];
11596 struct fcp_port *pptr = ADDR2FCP(ap);
11597 struct fcp_lun *plun = ADDR2LUN(ap);
11598 struct fcp_tgt *ptgt = plun->lun_tgt;
11599 struct scsi_pkt *pkt;
11600 struct fcp_pkt *cmd;
11601 struct fcp_rsp *rsp;
11602 uint32_t tgt_cnt;
11603 struct fcp_rsp_info *rsp_info;
11604 struct fcp_reset_elem *p;
11605 int bval;
11606
11607 if ((p = kmem_alloc(sizeof (struct fcp_reset_elem),
11608 KM_NOSLEEP)) == NULL) {
11609 return (rval);
11610 }
11611
11612 mutex_enter(&ptgt->tgt_mutex);
11613 if (level == RESET_TARGET) {
11614 if (ptgt->tgt_state & (FCP_TGT_OFFLINE | FCP_TGT_BUSY)) {
11615 mutex_exit(&ptgt->tgt_mutex);
11616 kmem_free(p, sizeof (struct fcp_reset_elem));
11617 return (rval);
11618 }
11619 fcp_update_tgt_state(ptgt, FCP_SET, FCP_LUN_BUSY);
11620 (void) strcpy(lun_id, " ");
11621 } else {
11622 if (plun->lun_state & (FCP_LUN_OFFLINE | FCP_LUN_BUSY)) {
11623 mutex_exit(&ptgt->tgt_mutex);
11624 kmem_free(p, sizeof (struct fcp_reset_elem));
11625 return (rval);
11626 }
11627 fcp_update_lun_state(plun, FCP_SET, FCP_LUN_BUSY);
11628
11629 (void) sprintf(lun_id, ", LUN=%d", plun->lun_num);
11630 }
11631 tgt_cnt = ptgt->tgt_change_cnt;
11632
11633 mutex_exit(&ptgt->tgt_mutex);
11634
11635 if ((pkt = scsi_init_pkt(ap, NULL, NULL, 0, 0,
11636 0, 0, NULL, 0)) == NULL) {
11637 kmem_free(p, sizeof (struct fcp_reset_elem));
11638 mutex_enter(&ptgt->tgt_mutex);
11639 fcp_update_tgt_state(ptgt, FCP_RESET, FCP_LUN_BUSY);
11640 mutex_exit(&ptgt->tgt_mutex);
11641 return (rval);
11642 }
11643 pkt->pkt_time = FCP_POLL_TIMEOUT;
11644
11645 /* fill in cmd part of packet */
11646 cmd = PKT2CMD(pkt);
11647 if (level == RESET_TARGET) {
11648 cmd->cmd_fcp_cmd.fcp_cntl.cntl_reset_tgt = 1;
11649 } else {
11650 cmd->cmd_fcp_cmd.fcp_cntl.cntl_reset_lun = 1;
11651 }
11652 cmd->cmd_fp_pkt->pkt_comp = NULL;
11653 cmd->cmd_pkt->pkt_flags |= FLAG_NOINTR;
11654
11655 /* prepare a packet for transport */
11656 fcp_prepare_pkt(pptr, cmd, plun);
11657
11658 if (cmd->cmd_pkt->pkt_time) {
11659 cmd->cmd_fp_pkt->pkt_timeout = cmd->cmd_pkt->pkt_time;
11660 } else {
11661 cmd->cmd_fp_pkt->pkt_timeout = 5 * 60 * 60;
11662 }
11663
11664 (void) fc_ulp_busy_port(pptr->port_fp_handle);
11665 bval = fcp_dopoll(pptr, cmd);
11666 fc_ulp_idle_port(pptr->port_fp_handle);
11667
11668 /* submit the packet */
11669 if (bval == TRAN_ACCEPT) {
11670 int error = 3;
11671
11672 rsp = (struct fcp_rsp *)cmd->cmd_fcp_rsp;
11673 rsp_info = (struct fcp_rsp_info *)(cmd->cmd_fcp_rsp +
11674 sizeof (struct fcp_rsp));
11675
11676 if (rsp->fcp_u.fcp_status.rsp_len_set) {
11677 if (fcp_validate_fcp_response(rsp, pptr) ==
11678 FC_SUCCESS) {
11679 if (pptr->port_fcp_dma != FC_NO_DVMA_SPACE) {
11680 FCP_CP_IN(cmd->cmd_fp_pkt->pkt_resp +
11681 sizeof (struct fcp_rsp), rsp_info,
11682 cmd->cmd_fp_pkt->pkt_resp_acc,
11683 sizeof (struct fcp_rsp_info));
11684 }
11685 if (rsp_info->rsp_code == FCP_NO_FAILURE) {
11686 rval = FC_SUCCESS;
11687 error = 0;
11688 } else {
11689 error = 1;
11690 }
11691 } else {
11692 error = 2;
11693 }
11694 }
11695
11696 switch (error) {
11697 case 0:
11698 fcp_log(CE_WARN, pptr->port_dip,
11699 "!FCP: WWN 0x%08x%08x %s reset successfully",
11700 *((int *)&ptgt->tgt_port_wwn.raw_wwn[0]),
11701 *((int *)&ptgt->tgt_port_wwn.raw_wwn[4]), lun_id);
11702 break;
11703
11704 case 1:
11705 fcp_log(CE_WARN, pptr->port_dip,
11706 "!FCP: Reset to WWN 0x%08x%08x %s failed,"
11707 " response code=%x",
11708 *((int *)&ptgt->tgt_port_wwn.raw_wwn[0]),
11709 *((int *)&ptgt->tgt_port_wwn.raw_wwn[4]), lun_id,
11710 rsp_info->rsp_code);
11711 break;
11712
11713 case 2:
11714 fcp_log(CE_WARN, pptr->port_dip,
11715 "!FCP: Reset to WWN 0x%08x%08x %s failed,"
11716 " Bad FCP response values: rsvd1=%x,"
11717 " rsvd2=%x, sts-rsvd1=%x, sts-rsvd2=%x,"
11718 " rsplen=%x, senselen=%x",
11719 *((int *)&ptgt->tgt_port_wwn.raw_wwn[0]),
11720 *((int *)&ptgt->tgt_port_wwn.raw_wwn[4]), lun_id,
11721 rsp->reserved_0, rsp->reserved_1,
11722 rsp->fcp_u.fcp_status.reserved_0,
11723 rsp->fcp_u.fcp_status.reserved_1,
11724 rsp->fcp_response_len, rsp->fcp_sense_len);
11725 break;
11726
11727 default:
11728 fcp_log(CE_WARN, pptr->port_dip,
11729 "!FCP: Reset to WWN 0x%08x%08x %s failed",
11730 *((int *)&ptgt->tgt_port_wwn.raw_wwn[0]),
11731 *((int *)&ptgt->tgt_port_wwn.raw_wwn[4]), lun_id);
11732 break;
11733 }
11734 }
11735 scsi_destroy_pkt(pkt);
11736
11737 if (rval == FC_FAILURE) {
11738 mutex_enter(&ptgt->tgt_mutex);
11739 if (level == RESET_TARGET) {
11740 fcp_update_tgt_state(ptgt, FCP_RESET, FCP_LUN_BUSY);
11741 } else {
11742 fcp_update_lun_state(plun, FCP_RESET, FCP_LUN_BUSY);
11743 }
11744 mutex_exit(&ptgt->tgt_mutex);
11745 kmem_free(p, sizeof (struct fcp_reset_elem));
11746 return (rval);
11747 }
11748
11749 mutex_enter(&pptr->port_mutex);
11750 if (level == RESET_TARGET) {
11751 p->tgt = ptgt;
11752 p->lun = NULL;
11753 } else {
11754 p->tgt = NULL;
11755 p->lun = plun;
11756 }
11757 p->tgt = ptgt;
11758 p->tgt_cnt = tgt_cnt;
11759 p->timeout = fcp_watchdog_time + FCP_RESET_DELAY;
11760 p->next = pptr->port_reset_list;
11761 pptr->port_reset_list = p;
11762
11763 FCP_TRACE(fcp_logq, pptr->port_instbuf,
11764 fcp_trace, FCP_BUF_LEVEL_3, 0,
11765 "Notify ssd of the reset to reinstate the reservations");
11766
11767 scsi_hba_reset_notify_callback(&pptr->port_mutex,
11768 &pptr->port_reset_notify_listf);
11769
11770 mutex_exit(&pptr->port_mutex);
11771
11772 return (rval);
11773 }
11774
11775
11776 /*
11777 * called by fcp_getcap and fcp_setcap to get and set (respectively)
11778 * SCSI capabilities
11779 */
11780 /* ARGSUSED */
11781 static int
11782 fcp_commoncap(struct scsi_address *ap, char *cap,
11783 int val, int tgtonly, int doset)
11784 {
11785 struct fcp_port *pptr = ADDR2FCP(ap);
11786 struct fcp_lun *plun = ADDR2LUN(ap);
11787 struct fcp_tgt *ptgt = plun->lun_tgt;
11788 int cidx;
11789 int rval = FALSE;
11790
11791 if (cap == (char *)0) {
11792 FCP_TRACE(fcp_logq, pptr->port_instbuf,
11793 fcp_trace, FCP_BUF_LEVEL_3, 0,
11794 "fcp_commoncap: invalid arg");
11795 return (rval);
11796 }
11797
11798 if ((cidx = scsi_hba_lookup_capstr(cap)) == -1) {
11799 return (UNDEFINED);
11800 }
11801
11802 /*
11803 * Process setcap request.
11804 */
11805 if (doset) {
11806 /*
11807 * At present, we can only set binary (0/1) values
11808 */
11809 switch (cidx) {
11810 case SCSI_CAP_ARQ:
11811 if (val == 0) {
11812 rval = FALSE;
11813 } else {
11814 rval = TRUE;
11815 }
11816 break;
11817
11818 case SCSI_CAP_LUN_RESET:
11819 if (val) {
11820 plun->lun_cap |= FCP_LUN_CAP_RESET;
11821 } else {
11822 plun->lun_cap &= ~FCP_LUN_CAP_RESET;
11823 }
11824 rval = TRUE;
11825 break;
11826
11827 case SCSI_CAP_SECTOR_SIZE:
11828 rval = TRUE;
11829 break;
11830 default:
11831 FCP_TRACE(fcp_logq, pptr->port_instbuf,
11832 fcp_trace, FCP_BUF_LEVEL_4, 0,
11833 "fcp_setcap: unsupported %d", cidx);
11834 rval = UNDEFINED;
11835 break;
11836 }
11837
11838 FCP_TRACE(fcp_logq, pptr->port_instbuf,
11839 fcp_trace, FCP_BUF_LEVEL_5, 0,
11840 "set cap: cap=%s, val/tgtonly/doset/rval = "
11841 "0x%x/0x%x/0x%x/%d",
11842 cap, val, tgtonly, doset, rval);
11843
11844 } else {
11845 /*
11846 * Process getcap request.
11847 */
11848 switch (cidx) {
11849 case SCSI_CAP_DMA_MAX:
11850 rval = (int)pptr->port_data_dma_attr.dma_attr_maxxfer;
11851
11852 /*
11853 * Need to make an adjustment qlc is uint_t 64
11854 * st is int, so we will make the adjustment here
11855 * being as nobody wants to touch this.
11856 * It still leaves the max single block length
11857 * of 2 gig. This should last .
11858 */
11859
11860 if (rval == -1) {
11861 rval = MAX_INT_DMA;
11862 }
11863
11864 break;
11865
11866 case SCSI_CAP_INITIATOR_ID:
11867 rval = pptr->port_id;
11868 break;
11869
11870 case SCSI_CAP_ARQ:
11871 case SCSI_CAP_RESET_NOTIFICATION:
11872 case SCSI_CAP_TAGGED_QING:
11873 rval = TRUE;
11874 break;
11875
11876 case SCSI_CAP_SCSI_VERSION:
11877 rval = 3;
11878 break;
11879
11880 case SCSI_CAP_INTERCONNECT_TYPE:
11881 if (FC_TOP_EXTERNAL(pptr->port_topology) ||
11882 (ptgt->tgt_hard_addr == 0)) {
11883 rval = INTERCONNECT_FABRIC;
11884 } else {
11885 rval = INTERCONNECT_FIBRE;
11886 }
11887 break;
11888
11889 case SCSI_CAP_LUN_RESET:
11890 rval = ((plun->lun_cap & FCP_LUN_CAP_RESET) != 0) ?
11891 TRUE : FALSE;
11892 break;
11893
11894 default:
11895 FCP_TRACE(fcp_logq, pptr->port_instbuf,
11896 fcp_trace, FCP_BUF_LEVEL_4, 0,
11897 "fcp_getcap: unsupported %d", cidx);
11898 rval = UNDEFINED;
11899 break;
11900 }
11901
11902 FCP_TRACE(fcp_logq, pptr->port_instbuf,
11903 fcp_trace, FCP_BUF_LEVEL_8, 0,
11904 "get cap: cap=%s, val/tgtonly/doset/rval = "
11905 "0x%x/0x%x/0x%x/%d",
11906 cap, val, tgtonly, doset, rval);
11907 }
11908
11909 return (rval);
11910 }
11911
11912 /*
11913 * called by the transport to get the port-wwn and lun
11914 * properties of this device, and to create a "name" based on them
11915 *
11916 * these properties don't exist on sun4m
11917 *
11918 * return 1 for success else return 0
11919 */
11920 /* ARGSUSED */
11921 static int
11922 fcp_scsi_get_name(struct scsi_device *sd, char *name, int len)
11923 {
11924 int i;
11925 int *lun;
11926 int numChars;
11927 uint_t nlun;
11928 uint_t count;
11929 uint_t nbytes;
11930 uchar_t *bytes;
11931 uint16_t lun_num;
11932 uint32_t tgt_id;
11933 char **conf_wwn;
11934 char tbuf[(FC_WWN_SIZE << 1) + 1];
11935 uchar_t barray[FC_WWN_SIZE];
11936 dev_info_t *tgt_dip;
11937 struct fcp_tgt *ptgt;
11938 struct fcp_port *pptr;
11939 struct fcp_lun *plun;
11940
11941 ASSERT(sd != NULL);
11942 ASSERT(name != NULL);
11943
11944 tgt_dip = sd->sd_dev;
11945 pptr = ddi_get_soft_state(fcp_softstate,
11946 ddi_get_instance(ddi_get_parent(tgt_dip)));
11947 if (pptr == NULL) {
11948 return (0);
11949 }
11950
11951 ASSERT(tgt_dip != NULL);
11952
11953 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, sd->sd_dev,
11954 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM,
11955 LUN_PROP, &lun, &nlun) != DDI_SUCCESS) {
11956 name[0] = '\0';
11957 return (0);
11958 }
11959
11960 if (nlun == 0) {
11961 ddi_prop_free(lun);
11962 return (0);
11963 }
11964
11965 lun_num = lun[0];
11966 ddi_prop_free(lun);
11967
11968 /*
11969 * Lookup for .conf WWN property
11970 */
11971 if (ddi_prop_lookup_string_array(DDI_DEV_T_ANY, tgt_dip,
11972 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, CONF_WWN_PROP,
11973 &conf_wwn, &count) == DDI_PROP_SUCCESS) {
11974 ASSERT(count >= 1);
11975
11976 fcp_ascii_to_wwn(conf_wwn[0], barray, FC_WWN_SIZE);
11977 ddi_prop_free(conf_wwn);
11978 mutex_enter(&pptr->port_mutex);
11979 if ((plun = fcp_lookup_lun(pptr, barray, lun_num)) == NULL) {
11980 mutex_exit(&pptr->port_mutex);
11981 return (0);
11982 }
11983 ptgt = plun->lun_tgt;
11984 mutex_exit(&pptr->port_mutex);
11985
11986 (void) ndi_prop_update_byte_array(DDI_DEV_T_NONE,
11987 tgt_dip, PORT_WWN_PROP, barray, FC_WWN_SIZE);
11988
11989 if (!FC_TOP_EXTERNAL(pptr->port_topology) &&
11990 ptgt->tgt_hard_addr != 0) {
11991 tgt_id = (uint32_t)fcp_alpa_to_switch[
11992 ptgt->tgt_hard_addr];
11993 } else {
11994 tgt_id = ptgt->tgt_d_id;
11995 }
11996
11997 (void) ndi_prop_update_int(DDI_DEV_T_NONE, tgt_dip,
11998 TARGET_PROP, tgt_id);
11999 }
12000
12001 /* get the our port-wwn property */
12002 bytes = NULL;
12003 if ((ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, tgt_dip,
12004 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, PORT_WWN_PROP, &bytes,
12005 &nbytes) != DDI_PROP_SUCCESS) || nbytes != FC_WWN_SIZE) {
12006 if (bytes != NULL) {
12007 ddi_prop_free(bytes);
12008 }
12009 return (0);
12010 }
12011
12012 for (i = 0; i < FC_WWN_SIZE; i++) {
12013 (void) sprintf(&tbuf[i << 1], "%02x", *(bytes + i));
12014 }
12015
12016 /* Stick in the address of the form "wWWN,LUN" */
12017 numChars = snprintf(name, len, "w%s,%x", tbuf, lun_num);
12018
12019 ASSERT(numChars < len);
12020 if (numChars >= len) {
12021 fcp_log(CE_WARN, pptr->port_dip,
12022 "!fcp_scsi_get_name: "
12023 "name parameter length too small, it needs to be %d",
12024 numChars+1);
12025 }
12026
12027 ddi_prop_free(bytes);
12028
12029 return (1);
12030 }
12031
12032
12033 /*
12034 * called by the transport to get the SCSI target id value, returning
12035 * it in "name"
12036 *
12037 * this isn't needed/used on sun4m
12038 *
12039 * return 1 for success else return 0
12040 */
12041 /* ARGSUSED */
12042 static int
12043 fcp_scsi_get_bus_addr(struct scsi_device *sd, char *name, int len)
12044 {
12045 struct fcp_lun *plun = ADDR2LUN(&sd->sd_address);
12046 struct fcp_tgt *ptgt;
12047 int numChars;
12048
12049 if (plun == NULL) {
12050 return (0);
12051 }
12052
12053 if ((ptgt = plun->lun_tgt) == NULL) {
12054 return (0);
12055 }
12056
12057 numChars = snprintf(name, len, "%x", ptgt->tgt_d_id);
12058
12059 ASSERT(numChars < len);
12060 if (numChars >= len) {
12061 fcp_log(CE_WARN, NULL,
12062 "!fcp_scsi_get_bus_addr: "
12063 "name parameter length too small, it needs to be %d",
12064 numChars+1);
12065 }
12066
12067 return (1);
12068 }
12069
12070
12071 /*
12072 * called internally to reset the link where the specified port lives
12073 */
12074 static int
12075 fcp_linkreset(struct fcp_port *pptr, struct scsi_address *ap, int sleep)
12076 {
12077 la_wwn_t wwn;
12078 struct fcp_lun *plun;
12079 struct fcp_tgt *ptgt;
12080
12081 /* disable restart of lip if we're suspended */
12082 mutex_enter(&pptr->port_mutex);
12083
12084 if (pptr->port_state & (FCP_STATE_SUSPENDED |
12085 FCP_STATE_POWER_DOWN)) {
12086 mutex_exit(&pptr->port_mutex);
12087 FCP_TRACE(fcp_logq, pptr->port_instbuf,
12088 fcp_trace, FCP_BUF_LEVEL_2, 0,
12089 "fcp_linkreset, fcp%d: link reset "
12090 "disabled due to DDI_SUSPEND",
12091 ddi_get_instance(pptr->port_dip));
12092 return (FC_FAILURE);
12093 }
12094
12095 if (pptr->port_state & (FCP_STATE_OFFLINE | FCP_STATE_ONLINING)) {
12096 mutex_exit(&pptr->port_mutex);
12097 return (FC_SUCCESS);
12098 }
12099
12100 FCP_DTRACE(fcp_logq, pptr->port_instbuf,
12101 fcp_trace, FCP_BUF_LEVEL_8, 0, "Forcing link reset");
12102
12103 /*
12104 * If ap == NULL assume local link reset.
12105 */
12106 if (FC_TOP_EXTERNAL(pptr->port_topology) && (ap != NULL)) {
12107 plun = ADDR2LUN(ap);
12108 ptgt = plun->lun_tgt;
12109 bcopy(&ptgt->tgt_port_wwn.raw_wwn[0], &wwn, sizeof (wwn));
12110 } else {
12111 bzero((caddr_t)&wwn, sizeof (wwn));
12112 }
12113 mutex_exit(&pptr->port_mutex);
12114
12115 return (fc_ulp_linkreset(pptr->port_fp_handle, &wwn, sleep));
12116 }
12117
12118
12119 /*
12120 * called from fcp_port_attach() to resume a port
12121 * return DDI_* success/failure status
12122 * acquires and releases the global mutex
12123 * acquires and releases the port mutex
12124 */
12125 /*ARGSUSED*/
12126
12127 static int
12128 fcp_handle_port_resume(opaque_t ulph, fc_ulp_port_info_t *pinfo,
12129 uint32_t s_id, fc_attach_cmd_t cmd, int instance)
12130 {
12131 int res = DDI_FAILURE; /* default result */
12132 struct fcp_port *pptr; /* port state ptr */
12133 uint32_t alloc_cnt;
12134 uint32_t max_cnt;
12135 fc_portmap_t *tmp_list = NULL;
12136
12137 FCP_DTRACE(fcp_logq, "fcp", fcp_trace,
12138 FCP_BUF_LEVEL_8, 0, "port resume: for port %d",
12139 instance);
12140
12141 if ((pptr = ddi_get_soft_state(fcp_softstate, instance)) == NULL) {
12142 cmn_err(CE_WARN, "fcp: bad soft state");
12143 return (res);
12144 }
12145
12146 mutex_enter(&pptr->port_mutex);
12147 switch (cmd) {
12148 case FC_CMD_RESUME:
12149 ASSERT((pptr->port_state & FCP_STATE_POWER_DOWN) == 0);
12150 pptr->port_state &= ~FCP_STATE_SUSPENDED;
12151 break;
12152
12153 case FC_CMD_POWER_UP:
12154 /*
12155 * If the port is DDI_SUSPENded, defer rediscovery
12156 * until DDI_RESUME occurs
12157 */
12158 if (pptr->port_state & FCP_STATE_SUSPENDED) {
12159 pptr->port_state &= ~FCP_STATE_POWER_DOWN;
12160 mutex_exit(&pptr->port_mutex);
12161 return (DDI_SUCCESS);
12162 }
12163 pptr->port_state &= ~FCP_STATE_POWER_DOWN;
12164 }
12165 pptr->port_id = s_id;
12166 pptr->port_state = FCP_STATE_INIT;
12167 mutex_exit(&pptr->port_mutex);
12168
12169 /*
12170 * Make a copy of ulp_port_info as fctl allocates
12171 * a temp struct.
12172 */
12173 (void) fcp_cp_pinfo(pptr, pinfo);
12174
12175 mutex_enter(&fcp_global_mutex);
12176 if (fcp_watchdog_init++ == 0) {
12177 fcp_watchdog_tick = fcp_watchdog_timeout *
12178 drv_usectohz(1000000);
12179 fcp_watchdog_id = timeout(fcp_watch,
12180 NULL, fcp_watchdog_tick);
12181 }
12182 mutex_exit(&fcp_global_mutex);
12183
12184 /*
12185 * Handle various topologies and link states.
12186 */
12187 switch (FC_PORT_STATE_MASK(pptr->port_phys_state)) {
12188 case FC_STATE_OFFLINE:
12189 /*
12190 * Wait for ONLINE, at which time a state
12191 * change will cause a statec_callback
12192 */
12193 res = DDI_SUCCESS;
12194 break;
12195
12196 case FC_STATE_ONLINE:
12197
12198 if (pptr->port_topology == FC_TOP_UNKNOWN) {
12199 (void) fcp_linkreset(pptr, NULL, KM_NOSLEEP);
12200 res = DDI_SUCCESS;
12201 break;
12202 }
12203
12204 if (FC_TOP_EXTERNAL(pptr->port_topology) &&
12205 !fcp_enable_auto_configuration) {
12206 tmp_list = fcp_construct_map(pptr, &alloc_cnt);
12207 if (tmp_list == NULL) {
12208 if (!alloc_cnt) {
12209 res = DDI_SUCCESS;
12210 }
12211 break;
12212 }
12213 max_cnt = alloc_cnt;
12214 } else {
12215 ASSERT(pptr->port_topology != FC_TOP_UNKNOWN);
12216
12217 alloc_cnt = FCP_MAX_DEVICES;
12218
12219 if ((tmp_list = (fc_portmap_t *)kmem_zalloc(
12220 (sizeof (fc_portmap_t)) * alloc_cnt,
12221 KM_NOSLEEP)) == NULL) {
12222 fcp_log(CE_WARN, pptr->port_dip,
12223 "!fcp%d: failed to allocate portmap",
12224 instance);
12225 break;
12226 }
12227
12228 max_cnt = alloc_cnt;
12229 if ((res = fc_ulp_getportmap(pptr->port_fp_handle,
12230 &tmp_list, &max_cnt, FC_ULP_PLOGI_PRESERVE)) !=
12231 FC_SUCCESS) {
12232 caddr_t msg;
12233
12234 (void) fc_ulp_error(res, &msg);
12235
12236 FCP_TRACE(fcp_logq, pptr->port_instbuf,
12237 fcp_trace, FCP_BUF_LEVEL_2, 0,
12238 "resume failed getportmap: reason=0x%x",
12239 res);
12240
12241 fcp_log(CE_WARN, pptr->port_dip,
12242 "!failed to get port map : %s", msg);
12243 break;
12244 }
12245 if (max_cnt > alloc_cnt) {
12246 alloc_cnt = max_cnt;
12247 }
12248 }
12249
12250 /*
12251 * do the SCSI device discovery and create
12252 * the devinfos
12253 */
12254 fcp_statec_callback(ulph, pptr->port_fp_handle,
12255 pptr->port_phys_state, pptr->port_topology, tmp_list,
12256 max_cnt, pptr->port_id);
12257
12258 res = DDI_SUCCESS;
12259 break;
12260
12261 default:
12262 fcp_log(CE_WARN, pptr->port_dip,
12263 "!fcp%d: invalid port state at attach=0x%x",
12264 instance, pptr->port_phys_state);
12265
12266 mutex_enter(&pptr->port_mutex);
12267 pptr->port_phys_state = FCP_STATE_OFFLINE;
12268 mutex_exit(&pptr->port_mutex);
12269 res = DDI_SUCCESS;
12270
12271 break;
12272 }
12273
12274 if (tmp_list != NULL) {
12275 kmem_free(tmp_list, sizeof (fc_portmap_t) * alloc_cnt);
12276 }
12277
12278 return (res);
12279 }
12280
12281
12282 static void
12283 fcp_cp_pinfo(struct fcp_port *pptr, fc_ulp_port_info_t *pinfo)
12284 {
12285 pptr->port_fp_modlinkage = *pinfo->port_linkage;
12286 pptr->port_dip = pinfo->port_dip;
12287 pptr->port_fp_handle = pinfo->port_handle;
12288 if (pinfo->port_acc_attr != NULL) {
12289 /*
12290 * FCA supports DMA
12291 */
12292 pptr->port_data_dma_attr = *pinfo->port_data_dma_attr;
12293 pptr->port_cmd_dma_attr = *pinfo->port_cmd_dma_attr;
12294 pptr->port_resp_dma_attr = *pinfo->port_resp_dma_attr;
12295 pptr->port_dma_acc_attr = *pinfo->port_acc_attr;
12296 }
12297 pptr->port_priv_pkt_len = pinfo->port_fca_pkt_size;
12298 pptr->port_max_exch = pinfo->port_fca_max_exch;
12299 pptr->port_phys_state = pinfo->port_state;
12300 pptr->port_topology = pinfo->port_flags;
12301 pptr->port_reset_action = pinfo->port_reset_action;
12302 pptr->port_cmds_dma_flags = pinfo->port_dma_behavior;
12303 pptr->port_fcp_dma = pinfo->port_fcp_dma;
12304 bcopy(&pinfo->port_nwwn, &pptr->port_nwwn, sizeof (la_wwn_t));
12305 bcopy(&pinfo->port_pwwn, &pptr->port_pwwn, sizeof (la_wwn_t));
12306
12307 /* Clear FMA caps to avoid fm-capability ereport */
12308 if (pptr->port_cmd_dma_attr.dma_attr_flags & DDI_DMA_FLAGERR)
12309 pptr->port_cmd_dma_attr.dma_attr_flags &= ~DDI_DMA_FLAGERR;
12310 if (pptr->port_data_dma_attr.dma_attr_flags & DDI_DMA_FLAGERR)
12311 pptr->port_data_dma_attr.dma_attr_flags &= ~DDI_DMA_FLAGERR;
12312 if (pptr->port_resp_dma_attr.dma_attr_flags & DDI_DMA_FLAGERR)
12313 pptr->port_resp_dma_attr.dma_attr_flags &= ~DDI_DMA_FLAGERR;
12314 }
12315
12316 /*
12317 * If the elements wait field is set to 1 then
12318 * another thread is waiting for the operation to complete. Once
12319 * it is complete, the waiting thread is signaled and the element is
12320 * freed by the waiting thread. If the elements wait field is set to 0
12321 * the element is freed.
12322 */
12323 static void
12324 fcp_process_elem(struct fcp_hp_elem *elem, int result)
12325 {
12326 ASSERT(elem != NULL);
12327 mutex_enter(&elem->mutex);
12328 elem->result = result;
12329 if (elem->wait) {
12330 elem->wait = 0;
12331 cv_signal(&elem->cv);
12332 mutex_exit(&elem->mutex);
12333 } else {
12334 mutex_exit(&elem->mutex);
12335 cv_destroy(&elem->cv);
12336 mutex_destroy(&elem->mutex);
12337 kmem_free(elem, sizeof (struct fcp_hp_elem));
12338 }
12339 }
12340
12341 /*
12342 * This function is invoked from the taskq thread to allocate
12343 * devinfo nodes and to online/offline them.
12344 */
12345 static void
12346 fcp_hp_task(void *arg)
12347 {
12348 struct fcp_hp_elem *elem = (struct fcp_hp_elem *)arg;
12349 struct fcp_lun *plun = elem->lun;
12350 struct fcp_port *pptr = elem->port;
12351 int result;
12352
12353 ASSERT(elem->what == FCP_ONLINE ||
12354 elem->what == FCP_OFFLINE ||
12355 elem->what == FCP_MPXIO_PATH_CLEAR_BUSY ||
12356 elem->what == FCP_MPXIO_PATH_SET_BUSY);
12357
12358 mutex_enter(&pptr->port_mutex);
12359 mutex_enter(&plun->lun_mutex);
12360 if (((elem->what == FCP_ONLINE || elem->what == FCP_OFFLINE) &&
12361 plun->lun_event_count != elem->event_cnt) ||
12362 pptr->port_state & (FCP_STATE_SUSPENDED |
12363 FCP_STATE_DETACHING | FCP_STATE_POWER_DOWN)) {
12364 mutex_exit(&plun->lun_mutex);
12365 mutex_exit(&pptr->port_mutex);
12366 fcp_process_elem(elem, NDI_FAILURE);
12367 return;
12368 }
12369 mutex_exit(&plun->lun_mutex);
12370 mutex_exit(&pptr->port_mutex);
12371
12372 result = fcp_trigger_lun(plun, elem->cip, elem->old_lun_mpxio,
12373 elem->what, elem->link_cnt, elem->tgt_cnt, elem->flags);
12374 fcp_process_elem(elem, result);
12375 }
12376
12377
12378 static child_info_t *
12379 fcp_get_cip(struct fcp_lun *plun, child_info_t *cip, int lcount,
12380 int tcount)
12381 {
12382 ASSERT(MUTEX_HELD(&plun->lun_mutex));
12383
12384 if (fcp_is_child_present(plun, cip) == FC_FAILURE) {
12385 struct fcp_port *pptr = plun->lun_tgt->tgt_port;
12386
12387 ASSERT(MUTEX_HELD(&pptr->port_mutex));
12388 /*
12389 * Child has not been created yet. Create the child device
12390 * based on the per-Lun flags.
12391 */
12392 if (pptr->port_mpxio == 0 || plun->lun_mpxio == 0) {
12393 plun->lun_cip =
12394 CIP(fcp_create_dip(plun, lcount, tcount));
12395 plun->lun_mpxio = 0;
12396 } else {
12397 plun->lun_cip =
12398 CIP(fcp_create_pip(plun, lcount, tcount));
12399 plun->lun_mpxio = 1;
12400 }
12401 } else {
12402 plun->lun_cip = cip;
12403 }
12404
12405 return (plun->lun_cip);
12406 }
12407
12408
12409 static int
12410 fcp_is_dip_present(struct fcp_lun *plun, dev_info_t *cdip)
12411 {
12412 int rval = FC_FAILURE;
12413 dev_info_t *pdip;
12414 struct dev_info *dip;
12415 int circular;
12416
12417 ASSERT(MUTEX_HELD(&plun->lun_mutex));
12418
12419 pdip = plun->lun_tgt->tgt_port->port_dip;
12420
12421 if (plun->lun_cip == NULL) {
12422 FCP_TRACE(fcp_logq, LUN_PORT->port_instbuf,
12423 fcp_trace, FCP_BUF_LEVEL_3, 0,
12424 "fcp_is_dip_present: plun->lun_cip is NULL: "
12425 "plun: %p lun state: %x num: %d target state: %x",
12426 plun, plun->lun_state, plun->lun_num,
12427 plun->lun_tgt->tgt_port->port_state);
12428 return (rval);
12429 }
12430 ndi_devi_enter(pdip, &circular);
12431 dip = DEVI(pdip)->devi_child;
12432 while (dip) {
12433 if (dip == DEVI(cdip)) {
12434 rval = FC_SUCCESS;
12435 break;
12436 }
12437 dip = dip->devi_sibling;
12438 }
12439 ndi_devi_exit(pdip, circular);
12440 return (rval);
12441 }
12442
12443 static int
12444 fcp_is_child_present(struct fcp_lun *plun, child_info_t *cip)
12445 {
12446 int rval = FC_FAILURE;
12447
12448 ASSERT(plun != NULL);
12449 ASSERT(MUTEX_HELD(&plun->lun_mutex));
12450
12451 if (plun->lun_mpxio == 0) {
12452 rval = fcp_is_dip_present(plun, DIP(cip));
12453 } else {
12454 rval = fcp_is_pip_present(plun, PIP(cip));
12455 }
12456
12457 return (rval);
12458 }
12459
12460 /*
12461 * Function: fcp_create_dip
12462 *
12463 * Description: Creates a dev_info_t structure for the LUN specified by the
12464 * caller.
12465 *
12466 * Argument: plun Lun structure
12467 * link_cnt Link state count.
12468 * tgt_cnt Target state change count.
12469 *
12470 * Return Value: NULL if it failed
12471 * dev_info_t structure address if it succeeded
12472 *
12473 * Context: Kernel context
12474 */
12475 static dev_info_t *
12476 fcp_create_dip(struct fcp_lun *plun, int link_cnt, int tgt_cnt)
12477 {
12478 int failure = 0;
12479 uint32_t tgt_id;
12480 uint64_t sam_lun;
12481 struct fcp_tgt *ptgt = plun->lun_tgt;
12482 struct fcp_port *pptr = ptgt->tgt_port;
12483 dev_info_t *pdip = pptr->port_dip;
12484 dev_info_t *cdip = NULL;
12485 dev_info_t *old_dip = DIP(plun->lun_cip);
12486 char *nname = NULL;
12487 char **compatible = NULL;
12488 int ncompatible;
12489 char *scsi_binding_set;
12490 char t_pwwn[17];
12491
12492 ASSERT(MUTEX_HELD(&plun->lun_mutex));
12493 ASSERT(MUTEX_HELD(&pptr->port_mutex));
12494
12495 /* get the 'scsi-binding-set' property */
12496 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, pdip,
12497 DDI_PROP_NOTPROM | DDI_PROP_DONTPASS, "scsi-binding-set",
12498 &scsi_binding_set) != DDI_PROP_SUCCESS) {
12499 scsi_binding_set = NULL;
12500 }
12501
12502 /* determine the node name and compatible */
12503 scsi_hba_nodename_compatible_get(&plun->lun_inq, scsi_binding_set,
12504 plun->lun_inq.inq_dtype, NULL, &nname, &compatible, &ncompatible);
12505 if (scsi_binding_set) {
12506 ddi_prop_free(scsi_binding_set);
12507 }
12508
12509 if (nname == NULL) {
12510 #ifdef DEBUG
12511 cmn_err(CE_WARN, "%s%d: no driver for "
12512 "device @w%02x%02x%02x%02x%02x%02x%02x%02x,%d:"
12513 " compatible: %s",
12514 ddi_driver_name(pdip), ddi_get_instance(pdip),
12515 ptgt->tgt_port_wwn.raw_wwn[0],
12516 ptgt->tgt_port_wwn.raw_wwn[1],
12517 ptgt->tgt_port_wwn.raw_wwn[2],
12518 ptgt->tgt_port_wwn.raw_wwn[3],
12519 ptgt->tgt_port_wwn.raw_wwn[4],
12520 ptgt->tgt_port_wwn.raw_wwn[5],
12521 ptgt->tgt_port_wwn.raw_wwn[6],
12522 ptgt->tgt_port_wwn.raw_wwn[7], plun->lun_num,
12523 *compatible);
12524 #endif /* DEBUG */
12525 failure++;
12526 goto end_of_fcp_create_dip;
12527 }
12528
12529 cdip = fcp_find_existing_dip(plun, pdip, nname);
12530
12531 /*
12532 * if the old_dip does not match the cdip, that means there is
12533 * some property change. since we'll be using the cdip, we need
12534 * to offline the old_dip. If the state contains FCP_LUN_CHANGED
12535 * then the dtype for the device has been updated. Offline the
12536 * the old device and create a new device with the new device type
12537 * Refer to bug: 4764752
12538 */
12539 if (old_dip && (cdip != old_dip ||
12540 plun->lun_state & FCP_LUN_CHANGED)) {
12541 plun->lun_state &= ~(FCP_LUN_INIT);
12542 mutex_exit(&plun->lun_mutex);
12543 mutex_exit(&pptr->port_mutex);
12544
12545 mutex_enter(&ptgt->tgt_mutex);
12546 (void) fcp_pass_to_hp(pptr, plun, CIP(old_dip), FCP_OFFLINE,
12547 link_cnt, tgt_cnt, NDI_DEVI_REMOVE, 0);
12548 mutex_exit(&ptgt->tgt_mutex);
12549
12550 #ifdef DEBUG
12551 if (cdip != NULL) {
12552 FCP_TRACE(fcp_logq, pptr->port_instbuf,
12553 fcp_trace, FCP_BUF_LEVEL_2, 0,
12554 "Old dip=%p; New dip=%p don't match", old_dip,
12555 cdip);
12556 } else {
12557 FCP_TRACE(fcp_logq, pptr->port_instbuf,
12558 fcp_trace, FCP_BUF_LEVEL_2, 0,
12559 "Old dip=%p; New dip=NULL don't match", old_dip);
12560 }
12561 #endif
12562
12563 mutex_enter(&pptr->port_mutex);
12564 mutex_enter(&plun->lun_mutex);
12565 }
12566
12567 if (cdip == NULL || plun->lun_state & FCP_LUN_CHANGED) {
12568 plun->lun_state &= ~(FCP_LUN_CHANGED);
12569 if (ndi_devi_alloc(pptr->port_dip, nname,
12570 DEVI_SID_NODEID, &cdip) != NDI_SUCCESS) {
12571 failure++;
12572 goto end_of_fcp_create_dip;
12573 }
12574 }
12575
12576 /*
12577 * Previously all the properties for the devinfo were destroyed here
12578 * with a call to ndi_prop_remove_all(). Since this may cause loss of
12579 * the devid property (and other properties established by the target
12580 * driver or framework) which the code does not always recreate, this
12581 * call was removed.
12582 * This opens a theoretical possibility that we may return with a
12583 * stale devid on the node if the scsi entity behind the fibre channel
12584 * lun has changed.
12585 */
12586
12587 /* decorate the node with compatible */
12588 if (ndi_prop_update_string_array(DDI_DEV_T_NONE, cdip,
12589 "compatible", compatible, ncompatible) != DDI_PROP_SUCCESS) {
12590 failure++;
12591 goto end_of_fcp_create_dip;
12592 }
12593
12594 if (ndi_prop_update_byte_array(DDI_DEV_T_NONE, cdip, NODE_WWN_PROP,
12595 ptgt->tgt_node_wwn.raw_wwn, FC_WWN_SIZE) != DDI_PROP_SUCCESS) {
12596 failure++;
12597 goto end_of_fcp_create_dip;
12598 }
12599
12600 if (ndi_prop_update_byte_array(DDI_DEV_T_NONE, cdip, PORT_WWN_PROP,
12601 ptgt->tgt_port_wwn.raw_wwn, FC_WWN_SIZE) != DDI_PROP_SUCCESS) {
12602 failure++;
12603 goto end_of_fcp_create_dip;
12604 }
12605
12606 fcp_wwn_to_ascii(ptgt->tgt_port_wwn.raw_wwn, t_pwwn);
12607 t_pwwn[16] = '\0';
12608 if (ndi_prop_update_string(DDI_DEV_T_NONE, cdip, TGT_PORT_PROP, t_pwwn)
12609 != DDI_PROP_SUCCESS) {
12610 failure++;
12611 goto end_of_fcp_create_dip;
12612 }
12613
12614 /*
12615 * If there is no hard address - We might have to deal with
12616 * that by using WWN - Having said that it is important to
12617 * recognize this problem early so ssd can be informed of
12618 * the right interconnect type.
12619 */
12620 if (!FC_TOP_EXTERNAL(pptr->port_topology) && ptgt->tgt_hard_addr != 0) {
12621 tgt_id = (uint32_t)fcp_alpa_to_switch[ptgt->tgt_hard_addr];
12622 } else {
12623 tgt_id = ptgt->tgt_d_id;
12624 }
12625
12626 if (ndi_prop_update_int(DDI_DEV_T_NONE, cdip, TARGET_PROP,
12627 tgt_id) != DDI_PROP_SUCCESS) {
12628 failure++;
12629 goto end_of_fcp_create_dip;
12630 }
12631
12632 if (ndi_prop_update_int(DDI_DEV_T_NONE, cdip, LUN_PROP,
12633 (int)plun->lun_num) != DDI_PROP_SUCCESS) {
12634 failure++;
12635 goto end_of_fcp_create_dip;
12636 }
12637 bcopy(&plun->lun_addr, &sam_lun, FCP_LUN_SIZE);
12638 if (ndi_prop_update_int64(DDI_DEV_T_NONE, cdip, SAM_LUN_PROP,
12639 sam_lun) != DDI_PROP_SUCCESS) {
12640 failure++;
12641 goto end_of_fcp_create_dip;
12642 }
12643
12644 end_of_fcp_create_dip:
12645 scsi_hba_nodename_compatible_free(nname, compatible);
12646
12647 if (cdip != NULL && failure) {
12648 (void) ndi_prop_remove_all(cdip);
12649 (void) ndi_devi_free(cdip);
12650 cdip = NULL;
12651 }
12652
12653 return (cdip);
12654 }
12655
12656 /*
12657 * Function: fcp_create_pip
12658 *
12659 * Description: Creates a Path Id for the LUN specified by the caller.
12660 *
12661 * Argument: plun Lun structure
12662 * link_cnt Link state count.
12663 * tgt_cnt Target state count.
12664 *
12665 * Return Value: NULL if it failed
12666 * mdi_pathinfo_t structure address if it succeeded
12667 *
12668 * Context: Kernel context
12669 */
12670 static mdi_pathinfo_t *
12671 fcp_create_pip(struct fcp_lun *plun, int lcount, int tcount)
12672 {
12673 int i;
12674 char buf[MAXNAMELEN];
12675 char uaddr[MAXNAMELEN];
12676 int failure = 0;
12677 uint32_t tgt_id;
12678 uint64_t sam_lun;
12679 struct fcp_tgt *ptgt = plun->lun_tgt;
12680 struct fcp_port *pptr = ptgt->tgt_port;
12681 dev_info_t *pdip = pptr->port_dip;
12682 mdi_pathinfo_t *pip = NULL;
12683 mdi_pathinfo_t *old_pip = PIP(plun->lun_cip);
12684 char *nname = NULL;
12685 char **compatible = NULL;
12686 int ncompatible;
12687 char *scsi_binding_set;
12688 char t_pwwn[17];
12689
12690 ASSERT(MUTEX_HELD(&plun->lun_mutex));
12691 ASSERT(MUTEX_HELD(&pptr->port_mutex));
12692
12693 scsi_binding_set = "vhci";
12694
12695 /* determine the node name and compatible */
12696 scsi_hba_nodename_compatible_get(&plun->lun_inq, scsi_binding_set,
12697 plun->lun_inq.inq_dtype, NULL, &nname, &compatible, &ncompatible);
12698
12699 if (nname == NULL) {
12700 #ifdef DEBUG
12701 cmn_err(CE_WARN, "fcp_create_dip: %s%d: no driver for "
12702 "device @w%02x%02x%02x%02x%02x%02x%02x%02x,%d:"
12703 " compatible: %s",
12704 ddi_driver_name(pdip), ddi_get_instance(pdip),
12705 ptgt->tgt_port_wwn.raw_wwn[0],
12706 ptgt->tgt_port_wwn.raw_wwn[1],
12707 ptgt->tgt_port_wwn.raw_wwn[2],
12708 ptgt->tgt_port_wwn.raw_wwn[3],
12709 ptgt->tgt_port_wwn.raw_wwn[4],
12710 ptgt->tgt_port_wwn.raw_wwn[5],
12711 ptgt->tgt_port_wwn.raw_wwn[6],
12712 ptgt->tgt_port_wwn.raw_wwn[7], plun->lun_num,
12713 *compatible);
12714 #endif /* DEBUG */
12715 failure++;
12716 goto end_of_fcp_create_pip;
12717 }
12718
12719 pip = fcp_find_existing_pip(plun, pdip);
12720
12721 /*
12722 * if the old_dip does not match the cdip, that means there is
12723 * some property change. since we'll be using the cdip, we need
12724 * to offline the old_dip. If the state contains FCP_LUN_CHANGED
12725 * then the dtype for the device has been updated. Offline the
12726 * the old device and create a new device with the new device type
12727 * Refer to bug: 4764752
12728 */
12729 if (old_pip && (pip != old_pip ||
12730 plun->lun_state & FCP_LUN_CHANGED)) {
12731 plun->lun_state &= ~(FCP_LUN_INIT);
12732 mutex_exit(&plun->lun_mutex);
12733 mutex_exit(&pptr->port_mutex);
12734
12735 mutex_enter(&ptgt->tgt_mutex);
12736 (void) fcp_pass_to_hp(pptr, plun, CIP(old_pip),
12737 FCP_OFFLINE, lcount, tcount,
12738 NDI_DEVI_REMOVE, 0);
12739 mutex_exit(&ptgt->tgt_mutex);
12740
12741 if (pip != NULL) {
12742 FCP_TRACE(fcp_logq, pptr->port_instbuf,
12743 fcp_trace, FCP_BUF_LEVEL_2, 0,
12744 "Old pip=%p; New pip=%p don't match",
12745 old_pip, pip);
12746 } else {
12747 FCP_TRACE(fcp_logq, pptr->port_instbuf,
12748 fcp_trace, FCP_BUF_LEVEL_2, 0,
12749 "Old pip=%p; New pip=NULL don't match",
12750 old_pip);
12751 }
12752
12753 mutex_enter(&pptr->port_mutex);
12754 mutex_enter(&plun->lun_mutex);
12755 }
12756
12757 /*
12758 * Since FC_WWN_SIZE is 8 bytes and its not like the
12759 * lun_guid_size which is dependent on the target, I don't
12760 * believe the same trancation happens here UNLESS the standards
12761 * change the FC_WWN_SIZE value to something larger than
12762 * MAXNAMELEN(currently 255 bytes).
12763 */
12764
12765 for (i = 0; i < FC_WWN_SIZE; i++) {
12766 (void) sprintf(&buf[i << 1], "%02x",
12767 ptgt->tgt_port_wwn.raw_wwn[i]);
12768 }
12769
12770 (void) snprintf(uaddr, MAXNAMELEN, "w%s,%x",
12771 buf, plun->lun_num);
12772
12773 if (pip == NULL || plun->lun_state & FCP_LUN_CHANGED) {
12774 /*
12775 * Release the locks before calling into
12776 * mdi_pi_alloc_compatible() since this can result in a
12777 * callback into fcp which can result in a deadlock
12778 * (see bug # 4870272).
12779 *
12780 * Basically, what we are trying to avoid is the scenario where
12781 * one thread does ndi_devi_enter() and tries to grab
12782 * fcp_mutex and another does it the other way round.
12783 *
12784 * But before we do that, make sure that nobody releases the
12785 * port in the meantime. We can do this by setting a flag.
12786 */
12787 plun->lun_state &= ~(FCP_LUN_CHANGED);
12788 pptr->port_state |= FCP_STATE_IN_MDI;
12789 mutex_exit(&plun->lun_mutex);
12790 mutex_exit(&pptr->port_mutex);
12791 if (mdi_pi_alloc_compatible(pdip, nname, plun->lun_guid,
12792 uaddr, compatible, ncompatible, 0, &pip) != MDI_SUCCESS) {
12793 fcp_log(CE_WARN, pptr->port_dip,
12794 "!path alloc failed:0x%x", plun);
12795 mutex_enter(&pptr->port_mutex);
12796 mutex_enter(&plun->lun_mutex);
12797 pptr->port_state &= ~FCP_STATE_IN_MDI;
12798 failure++;
12799 goto end_of_fcp_create_pip;
12800 }
12801 mutex_enter(&pptr->port_mutex);
12802 mutex_enter(&plun->lun_mutex);
12803 pptr->port_state &= ~FCP_STATE_IN_MDI;
12804 } else {
12805 (void) mdi_prop_remove(pip, NULL);
12806 }
12807
12808 mdi_pi_set_phci_private(pip, (caddr_t)plun);
12809
12810 if (mdi_prop_update_byte_array(pip, NODE_WWN_PROP,
12811 ptgt->tgt_node_wwn.raw_wwn, FC_WWN_SIZE)
12812 != DDI_PROP_SUCCESS) {
12813 failure++;
12814 goto end_of_fcp_create_pip;
12815 }
12816
12817 if (mdi_prop_update_byte_array(pip, PORT_WWN_PROP,
12818 ptgt->tgt_port_wwn.raw_wwn, FC_WWN_SIZE)
12819 != DDI_PROP_SUCCESS) {
12820 failure++;
12821 goto end_of_fcp_create_pip;
12822 }
12823
12824 fcp_wwn_to_ascii(ptgt->tgt_port_wwn.raw_wwn, t_pwwn);
12825 t_pwwn[16] = '\0';
12826 if (mdi_prop_update_string(pip, TGT_PORT_PROP, t_pwwn)
12827 != DDI_PROP_SUCCESS) {
12828 failure++;
12829 goto end_of_fcp_create_pip;
12830 }
12831
12832 /*
12833 * If there is no hard address - We might have to deal with
12834 * that by using WWN - Having said that it is important to
12835 * recognize this problem early so ssd can be informed of
12836 * the right interconnect type.
12837 */
12838 if (!FC_TOP_EXTERNAL(pptr->port_topology) &&
12839 ptgt->tgt_hard_addr != 0) {
12840 tgt_id = (uint32_t)
12841 fcp_alpa_to_switch[ptgt->tgt_hard_addr];
12842 } else {
12843 tgt_id = ptgt->tgt_d_id;
12844 }
12845
12846 if (mdi_prop_update_int(pip, TARGET_PROP, tgt_id)
12847 != DDI_PROP_SUCCESS) {
12848 failure++;
12849 goto end_of_fcp_create_pip;
12850 }
12851
12852 if (mdi_prop_update_int(pip, LUN_PROP, (int)plun->lun_num)
12853 != DDI_PROP_SUCCESS) {
12854 failure++;
12855 goto end_of_fcp_create_pip;
12856 }
12857 bcopy(&plun->lun_addr, &sam_lun, FCP_LUN_SIZE);
12858 if (mdi_prop_update_int64(pip, SAM_LUN_PROP, sam_lun)
12859 != DDI_PROP_SUCCESS) {
12860 failure++;
12861 goto end_of_fcp_create_pip;
12862 }
12863
12864 end_of_fcp_create_pip:
12865 scsi_hba_nodename_compatible_free(nname, compatible);
12866
12867 if (pip != NULL && failure) {
12868 (void) mdi_prop_remove(pip, NULL);
12869 mutex_exit(&plun->lun_mutex);
12870 mutex_exit(&pptr->port_mutex);
12871 (void) mdi_pi_free(pip, 0);
12872 mutex_enter(&pptr->port_mutex);
12873 mutex_enter(&plun->lun_mutex);
12874 pip = NULL;
12875 }
12876
12877 return (pip);
12878 }
12879
12880 static dev_info_t *
12881 fcp_find_existing_dip(struct fcp_lun *plun, dev_info_t *pdip, caddr_t name)
12882 {
12883 uint_t nbytes;
12884 uchar_t *bytes;
12885 uint_t nwords;
12886 uint32_t tgt_id;
12887 int *words;
12888 dev_info_t *cdip;
12889 dev_info_t *ndip;
12890 struct fcp_tgt *ptgt = plun->lun_tgt;
12891 struct fcp_port *pptr = ptgt->tgt_port;
12892 int circular;
12893
12894 ndi_devi_enter(pdip, &circular);
12895
12896 ndip = (dev_info_t *)DEVI(pdip)->devi_child;
12897 while ((cdip = ndip) != NULL) {
12898 ndip = (dev_info_t *)DEVI(cdip)->devi_sibling;
12899
12900 if (strcmp(DEVI(cdip)->devi_node_name, name)) {
12901 continue;
12902 }
12903
12904 if (ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, cdip,
12905 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, NODE_WWN_PROP, &bytes,
12906 &nbytes) != DDI_PROP_SUCCESS) {
12907 continue;
12908 }
12909
12910 if (nbytes != FC_WWN_SIZE || bytes == NULL) {
12911 if (bytes != NULL) {
12912 ddi_prop_free(bytes);
12913 }
12914 continue;
12915 }
12916 ASSERT(bytes != NULL);
12917
12918 if (bcmp(bytes, ptgt->tgt_node_wwn.raw_wwn, nbytes) != 0) {
12919 ddi_prop_free(bytes);
12920 continue;
12921 }
12922
12923 ddi_prop_free(bytes);
12924
12925 if (ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, cdip,
12926 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, PORT_WWN_PROP, &bytes,
12927 &nbytes) != DDI_PROP_SUCCESS) {
12928 continue;
12929 }
12930
12931 if (nbytes != FC_WWN_SIZE || bytes == NULL) {
12932 if (bytes != NULL) {
12933 ddi_prop_free(bytes);
12934 }
12935 continue;
12936 }
12937 ASSERT(bytes != NULL);
12938
12939 if (bcmp(bytes, ptgt->tgt_port_wwn.raw_wwn, nbytes) != 0) {
12940 ddi_prop_free(bytes);
12941 continue;
12942 }
12943
12944 ddi_prop_free(bytes);
12945
12946 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, cdip,
12947 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, TARGET_PROP, &words,
12948 &nwords) != DDI_PROP_SUCCESS) {
12949 continue;
12950 }
12951
12952 if (nwords != 1 || words == NULL) {
12953 if (words != NULL) {
12954 ddi_prop_free(words);
12955 }
12956 continue;
12957 }
12958 ASSERT(words != NULL);
12959
12960 /*
12961 * If there is no hard address - We might have to deal with
12962 * that by using WWN - Having said that it is important to
12963 * recognize this problem early so ssd can be informed of
12964 * the right interconnect type.
12965 */
12966 if (!FC_TOP_EXTERNAL(pptr->port_topology) &&
12967 ptgt->tgt_hard_addr != 0) {
12968 tgt_id =
12969 (uint32_t)fcp_alpa_to_switch[ptgt->tgt_hard_addr];
12970 } else {
12971 tgt_id = ptgt->tgt_d_id;
12972 }
12973
12974 if (tgt_id != (uint32_t)*words) {
12975 ddi_prop_free(words);
12976 continue;
12977 }
12978 ddi_prop_free(words);
12979
12980 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, cdip,
12981 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, LUN_PROP, &words,
12982 &nwords) != DDI_PROP_SUCCESS) {
12983 continue;
12984 }
12985
12986 if (nwords != 1 || words == NULL) {
12987 if (words != NULL) {
12988 ddi_prop_free(words);
12989 }
12990 continue;
12991 }
12992 ASSERT(words != NULL);
12993
12994 if (plun->lun_num == (uint16_t)*words) {
12995 ddi_prop_free(words);
12996 break;
12997 }
12998 ddi_prop_free(words);
12999 }
13000 ndi_devi_exit(pdip, circular);
13001
13002 return (cdip);
13003 }
13004
13005
13006 static int
13007 fcp_is_pip_present(struct fcp_lun *plun, mdi_pathinfo_t *pip)
13008 {
13009 dev_info_t *pdip;
13010 char buf[MAXNAMELEN];
13011 char uaddr[MAXNAMELEN];
13012 int rval = FC_FAILURE;
13013
13014 ASSERT(MUTEX_HELD(&plun->lun_mutex));
13015
13016 pdip = plun->lun_tgt->tgt_port->port_dip;
13017
13018 /*
13019 * Check if pip (and not plun->lun_cip) is NULL. plun->lun_cip can be
13020 * non-NULL even when the LUN is not there as in the case when a LUN is
13021 * configured and then deleted on the device end (for T3/T4 case). In
13022 * such cases, pip will be NULL.
13023 *
13024 * If the device generates an RSCN, it will end up getting offlined when
13025 * it disappeared and a new LUN will get created when it is rediscovered
13026 * on the device. If we check for lun_cip here, the LUN will not end
13027 * up getting onlined since this function will end up returning a
13028 * FC_SUCCESS.
13029 *
13030 * The behavior is different on other devices. For instance, on a HDS,
13031 * there was no RSCN generated by the device but the next I/O generated
13032 * a check condition and rediscovery got triggered that way. So, in
13033 * such cases, this path will not be exercised
13034 */
13035 if (pip == NULL) {
13036 FCP_TRACE(fcp_logq, LUN_PORT->port_instbuf,
13037 fcp_trace, FCP_BUF_LEVEL_4, 0,
13038 "fcp_is_pip_present: plun->lun_cip is NULL: "
13039 "plun: %p lun state: %x num: %d target state: %x",
13040 plun, plun->lun_state, plun->lun_num,
13041 plun->lun_tgt->tgt_port->port_state);
13042 return (rval);
13043 }
13044
13045 fcp_wwn_to_ascii(plun->lun_tgt->tgt_port_wwn.raw_wwn, buf);
13046
13047 (void) snprintf(uaddr, MAXNAMELEN, "w%s,%x", buf, plun->lun_num);
13048
13049 if (plun->lun_old_guid) {
13050 if (mdi_pi_find(pdip, plun->lun_old_guid, uaddr) == pip) {
13051 rval = FC_SUCCESS;
13052 }
13053 } else {
13054 if (mdi_pi_find(pdip, plun->lun_guid, uaddr) == pip) {
13055 rval = FC_SUCCESS;
13056 }
13057 }
13058 return (rval);
13059 }
13060
13061 static mdi_pathinfo_t *
13062 fcp_find_existing_pip(struct fcp_lun *plun, dev_info_t *pdip)
13063 {
13064 char buf[MAXNAMELEN];
13065 char uaddr[MAXNAMELEN];
13066 mdi_pathinfo_t *pip;
13067 struct fcp_tgt *ptgt = plun->lun_tgt;
13068 struct fcp_port *pptr = ptgt->tgt_port;
13069
13070 ASSERT(MUTEX_HELD(&pptr->port_mutex));
13071
13072 fcp_wwn_to_ascii(ptgt->tgt_port_wwn.raw_wwn, buf);
13073 (void) snprintf(uaddr, MAXNAMELEN, "w%s,%x", buf, plun->lun_num);
13074
13075 pip = mdi_pi_find(pdip, plun->lun_guid, uaddr);
13076
13077 return (pip);
13078 }
13079
13080
13081 static int
13082 fcp_online_child(struct fcp_lun *plun, child_info_t *cip, int lcount,
13083 int tcount, int flags, int *circ)
13084 {
13085 int rval;
13086 struct fcp_port *pptr = plun->lun_tgt->tgt_port;
13087 struct fcp_tgt *ptgt = plun->lun_tgt;
13088 dev_info_t *cdip = NULL;
13089
13090 ASSERT(MUTEX_HELD(&pptr->port_mutex));
13091 ASSERT(MUTEX_HELD(&plun->lun_mutex));
13092
13093 if (plun->lun_cip == NULL) {
13094 FCP_TRACE(fcp_logq, pptr->port_instbuf,
13095 fcp_trace, FCP_BUF_LEVEL_3, 0,
13096 "fcp_online_child: plun->lun_cip is NULL: "
13097 "plun: %p state: %x num: %d target state: %x",
13098 plun, plun->lun_state, plun->lun_num,
13099 plun->lun_tgt->tgt_port->port_state);
13100 return (NDI_FAILURE);
13101 }
13102 again:
13103 if (plun->lun_mpxio == 0) {
13104 cdip = DIP(cip);
13105 mutex_exit(&plun->lun_mutex);
13106 mutex_exit(&pptr->port_mutex);
13107
13108 FCP_TRACE(fcp_logq, pptr->port_instbuf,
13109 fcp_trace, FCP_BUF_LEVEL_3, 0,
13110 "!Invoking ndi_devi_online for %s: target=%x lun=%x",
13111 ddi_get_name(cdip), ptgt->tgt_d_id, plun->lun_num);
13112
13113 /*
13114 * We could check for FCP_LUN_INIT here but chances
13115 * of getting here when it's already in FCP_LUN_INIT
13116 * is rare and a duplicate ndi_devi_online wouldn't
13117 * hurt either (as the node would already have been
13118 * in CF2)
13119 */
13120 if (!i_ddi_devi_attached(ddi_get_parent(cdip))) {
13121 rval = ndi_devi_bind_driver(cdip, flags);
13122 FCP_TRACE(fcp_logq, pptr->port_instbuf,
13123 fcp_trace, FCP_BUF_LEVEL_3, 0,
13124 "!Invoking ndi_devi_bind_driver: rval=%d", rval);
13125 } else {
13126 rval = ndi_devi_online(cdip, flags);
13127 }
13128
13129 /*
13130 * We log the message into trace buffer if the device
13131 * is "ses" and into syslog for any other device
13132 * type. This is to prevent the ndi_devi_online failure
13133 * message that appears for V880/A5K ses devices.
13134 */
13135 if (rval == NDI_SUCCESS) {
13136 mutex_enter(&ptgt->tgt_mutex);
13137 plun->lun_state |= FCP_LUN_INIT;
13138 mutex_exit(&ptgt->tgt_mutex);
13139 } else if (strncmp(ddi_node_name(cdip), "ses", 3) != 0) {
13140 fcp_log(CE_NOTE, pptr->port_dip,
13141 "!ndi_devi_online:"
13142 " failed for %s: target=%x lun=%x %x",
13143 ddi_get_name(cdip), ptgt->tgt_d_id,
13144 plun->lun_num, rval);
13145 } else {
13146 FCP_TRACE(fcp_logq, pptr->port_instbuf,
13147 fcp_trace, FCP_BUF_LEVEL_3, 0,
13148 " !ndi_devi_online:"
13149 " failed for %s: target=%x lun=%x %x",
13150 ddi_get_name(cdip), ptgt->tgt_d_id,
13151 plun->lun_num, rval);
13152 }
13153 } else {
13154 cdip = mdi_pi_get_client(PIP(cip));
13155 mutex_exit(&plun->lun_mutex);
13156 mutex_exit(&pptr->port_mutex);
13157
13158 FCP_TRACE(fcp_logq, pptr->port_instbuf,
13159 fcp_trace, FCP_BUF_LEVEL_3, 0,
13160 "!Invoking mdi_pi_online for %s: target=%x lun=%x",
13161 ddi_get_name(cdip), ptgt->tgt_d_id, plun->lun_num);
13162
13163 /*
13164 * Hold path and exit phci to avoid deadlock with power
13165 * management code during mdi_pi_online.
13166 */
13167 mdi_hold_path(PIP(cip));
13168 mdi_devi_exit_phci(pptr->port_dip, *circ);
13169
13170 rval = mdi_pi_online(PIP(cip), flags);
13171
13172 mdi_devi_enter_phci(pptr->port_dip, circ);
13173 mdi_rele_path(PIP(cip));
13174
13175 if (rval == MDI_SUCCESS) {
13176 mutex_enter(&ptgt->tgt_mutex);
13177 plun->lun_state |= FCP_LUN_INIT;
13178 mutex_exit(&ptgt->tgt_mutex);
13179
13180 /*
13181 * Clear MPxIO path permanent disable in case
13182 * fcp hotplug dropped the offline event.
13183 */
13184 (void) mdi_pi_enable_path(PIP(cip), DRIVER_DISABLE);
13185
13186 } else if (rval == MDI_NOT_SUPPORTED) {
13187 child_info_t *old_cip = cip;
13188
13189 /*
13190 * MPxIO does not support this device yet.
13191 * Enumerate in legacy mode.
13192 */
13193 mutex_enter(&pptr->port_mutex);
13194 mutex_enter(&plun->lun_mutex);
13195 plun->lun_mpxio = 0;
13196 plun->lun_cip = NULL;
13197 cdip = fcp_create_dip(plun, lcount, tcount);
13198 plun->lun_cip = cip = CIP(cdip);
13199 if (cip == NULL) {
13200 fcp_log(CE_WARN, pptr->port_dip,
13201 "!fcp_online_child: "
13202 "Create devinfo failed for LU=%p", plun);
13203 mutex_exit(&plun->lun_mutex);
13204
13205 mutex_enter(&ptgt->tgt_mutex);
13206 plun->lun_state |= FCP_LUN_OFFLINE;
13207 mutex_exit(&ptgt->tgt_mutex);
13208
13209 mutex_exit(&pptr->port_mutex);
13210
13211 /*
13212 * free the mdi_pathinfo node
13213 */
13214 (void) mdi_pi_free(PIP(old_cip), 0);
13215 } else {
13216 FCP_TRACE(fcp_logq, pptr->port_instbuf,
13217 fcp_trace, FCP_BUF_LEVEL_3, 0,
13218 "fcp_online_child: creating devinfo "
13219 "node 0x%p for plun 0x%p",
13220 cip, plun);
13221 mutex_exit(&plun->lun_mutex);
13222 mutex_exit(&pptr->port_mutex);
13223 /*
13224 * free the mdi_pathinfo node
13225 */
13226 (void) mdi_pi_free(PIP(old_cip), 0);
13227 mutex_enter(&pptr->port_mutex);
13228 mutex_enter(&plun->lun_mutex);
13229 goto again;
13230 }
13231 } else {
13232 if (cdip) {
13233 fcp_log(CE_NOTE, pptr->port_dip,
13234 "!fcp_online_child: mdi_pi_online:"
13235 " failed for %s: target=%x lun=%x %x",
13236 ddi_get_name(cdip), ptgt->tgt_d_id,
13237 plun->lun_num, rval);
13238 }
13239 }
13240 rval = (rval == MDI_SUCCESS) ? NDI_SUCCESS : NDI_FAILURE;
13241 }
13242
13243 if (rval == NDI_SUCCESS) {
13244 if (cdip) {
13245 (void) ndi_event_retrieve_cookie(
13246 pptr->port_ndi_event_hdl, cdip, FCAL_INSERT_EVENT,
13247 &fcp_insert_eid, NDI_EVENT_NOPASS);
13248 (void) ndi_event_run_callbacks(pptr->port_ndi_event_hdl,
13249 cdip, fcp_insert_eid, NULL);
13250 }
13251 }
13252 mutex_enter(&pptr->port_mutex);
13253 mutex_enter(&plun->lun_mutex);
13254 return (rval);
13255 }
13256
13257 /* ARGSUSED */
13258 static int
13259 fcp_offline_child(struct fcp_lun *plun, child_info_t *cip, int lcount,
13260 int tcount, int flags, int *circ)
13261 {
13262 int rval;
13263 int lun_mpxio;
13264 struct fcp_port *pptr = plun->lun_tgt->tgt_port;
13265 struct fcp_tgt *ptgt = plun->lun_tgt;
13266 dev_info_t *cdip;
13267
13268 ASSERT(MUTEX_HELD(&plun->lun_mutex));
13269 ASSERT(MUTEX_HELD(&pptr->port_mutex));
13270
13271 if (plun->lun_cip == NULL) {
13272 FCP_TRACE(fcp_logq, pptr->port_instbuf,
13273 fcp_trace, FCP_BUF_LEVEL_3, 0,
13274 "fcp_offline_child: plun->lun_cip is NULL: "
13275 "plun: %p lun state: %x num: %d target state: %x",
13276 plun, plun->lun_state, plun->lun_num,
13277 plun->lun_tgt->tgt_port->port_state);
13278 return (NDI_FAILURE);
13279 }
13280
13281 /*
13282 * We will use this value twice. Make a copy to be sure we use
13283 * the same value in both places.
13284 */
13285 lun_mpxio = plun->lun_mpxio;
13286
13287 if (lun_mpxio == 0) {
13288 cdip = DIP(cip);
13289 mutex_exit(&plun->lun_mutex);
13290 mutex_exit(&pptr->port_mutex);
13291 rval = ndi_devi_offline(DIP(cip), flags);
13292 if (rval != NDI_SUCCESS) {
13293 FCP_TRACE(fcp_logq, pptr->port_instbuf,
13294 fcp_trace, FCP_BUF_LEVEL_3, 0,
13295 "fcp_offline_child: ndi_devi_offline failed "
13296 "rval=%x cip=%p", rval, cip);
13297 }
13298 } else {
13299 cdip = mdi_pi_get_client(PIP(cip));
13300 mutex_exit(&plun->lun_mutex);
13301 mutex_exit(&pptr->port_mutex);
13302
13303 /*
13304 * Exit phci to avoid deadlock with power management code
13305 * during mdi_pi_offline
13306 */
13307 mdi_hold_path(PIP(cip));
13308 mdi_devi_exit_phci(pptr->port_dip, *circ);
13309
13310 rval = mdi_pi_offline(PIP(cip), flags);
13311
13312 mdi_devi_enter_phci(pptr->port_dip, circ);
13313 mdi_rele_path(PIP(cip));
13314
13315 rval = (rval == MDI_SUCCESS) ? NDI_SUCCESS : NDI_FAILURE;
13316 }
13317
13318 mutex_enter(&ptgt->tgt_mutex);
13319 plun->lun_state &= ~FCP_LUN_INIT;
13320 mutex_exit(&ptgt->tgt_mutex);
13321
13322 if (rval == NDI_SUCCESS) {
13323 cdip = NULL;
13324 if (flags & NDI_DEVI_REMOVE) {
13325 mutex_enter(&plun->lun_mutex);
13326 /*
13327 * If the guid of the LUN changes, lun_cip will not
13328 * equal to cip, and after offlining the LUN with the
13329 * old guid, we should keep lun_cip since it's the cip
13330 * of the LUN with the new guid.
13331 * Otherwise remove our reference to child node.
13332 *
13333 * This must be done before the child node is freed,
13334 * otherwise other threads could see a stale lun_cip
13335 * pointer.
13336 */
13337 if (plun->lun_cip == cip) {
13338 plun->lun_cip = NULL;
13339 }
13340 if (plun->lun_old_guid) {
13341 kmem_free(plun->lun_old_guid,
13342 plun->lun_old_guid_size);
13343 plun->lun_old_guid = NULL;
13344 plun->lun_old_guid_size = 0;
13345 }
13346 mutex_exit(&plun->lun_mutex);
13347 }
13348 }
13349
13350 if (lun_mpxio != 0) {
13351 if (rval == NDI_SUCCESS) {
13352 /*
13353 * Clear MPxIO path permanent disable as the path is
13354 * already offlined.
13355 */
13356 (void) mdi_pi_enable_path(PIP(cip), DRIVER_DISABLE);
13357
13358 if (flags & NDI_DEVI_REMOVE) {
13359 (void) mdi_pi_free(PIP(cip), 0);
13360 }
13361 } else {
13362 FCP_TRACE(fcp_logq, pptr->port_instbuf,
13363 fcp_trace, FCP_BUF_LEVEL_3, 0,
13364 "fcp_offline_child: mdi_pi_offline failed "
13365 "rval=%x cip=%p", rval, cip);
13366 }
13367 }
13368
13369 mutex_enter(&pptr->port_mutex);
13370 mutex_enter(&plun->lun_mutex);
13371
13372 if (cdip) {
13373 FCP_TRACE(fcp_logq, pptr->port_instbuf,
13374 fcp_trace, FCP_BUF_LEVEL_3, 0, "!%s failed for %s:"
13375 " target=%x lun=%x", "ndi_offline",
13376 ddi_get_name(cdip), ptgt->tgt_d_id, plun->lun_num);
13377 }
13378
13379 return (rval);
13380 }
13381
13382 static void
13383 fcp_remove_child(struct fcp_lun *plun)
13384 {
13385 child_info_t *cip;
13386 int circ;
13387
13388 ASSERT(MUTEX_HELD(&plun->lun_mutex));
13389
13390 if (fcp_is_child_present(plun, plun->lun_cip) == FC_SUCCESS) {
13391 if (plun->lun_mpxio == 0) {
13392 (void) ndi_prop_remove_all(DIP(plun->lun_cip));
13393 (void) ndi_devi_free(DIP(plun->lun_cip));
13394 plun->lun_cip = NULL;
13395 } else {
13396 /*
13397 * Clear reference to the child node in the lun.
13398 * This must be done before freeing it with mdi_pi_free
13399 * and with lun_mutex held so that other threads always
13400 * see either valid lun_cip or NULL when holding
13401 * lun_mutex. We keep a copy in cip.
13402 */
13403 cip = plun->lun_cip;
13404 plun->lun_cip = NULL;
13405
13406 mutex_exit(&plun->lun_mutex);
13407 mutex_exit(&plun->lun_tgt->tgt_mutex);
13408 mutex_exit(&plun->lun_tgt->tgt_port->port_mutex);
13409
13410 mdi_devi_enter(
13411 plun->lun_tgt->tgt_port->port_dip, &circ);
13412
13413 /*
13414 * Exit phci to avoid deadlock with power management
13415 * code during mdi_pi_offline
13416 */
13417 mdi_hold_path(PIP(cip));
13418 mdi_devi_exit_phci(
13419 plun->lun_tgt->tgt_port->port_dip, circ);
13420 (void) mdi_pi_offline(PIP(cip),
13421 NDI_DEVI_REMOVE);
13422 mdi_devi_enter_phci(
13423 plun->lun_tgt->tgt_port->port_dip, &circ);
13424 mdi_rele_path(PIP(cip));
13425
13426 mdi_devi_exit(
13427 plun->lun_tgt->tgt_port->port_dip, circ);
13428
13429 FCP_TRACE(fcp_logq,
13430 plun->lun_tgt->tgt_port->port_instbuf,
13431 fcp_trace, FCP_BUF_LEVEL_3, 0,
13432 "lun=%p pip freed %p", plun, cip);
13433
13434 (void) mdi_prop_remove(PIP(cip), NULL);
13435 (void) mdi_pi_free(PIP(cip), 0);
13436
13437 mutex_enter(&plun->lun_tgt->tgt_port->port_mutex);
13438 mutex_enter(&plun->lun_tgt->tgt_mutex);
13439 mutex_enter(&plun->lun_mutex);
13440 }
13441 } else {
13442 plun->lun_cip = NULL;
13443 }
13444 }
13445
13446 /*
13447 * called when a timeout occurs
13448 *
13449 * can be scheduled during an attach or resume (if not already running)
13450 *
13451 * one timeout is set up for all ports
13452 *
13453 * acquires and releases the global mutex
13454 */
13455 /*ARGSUSED*/
13456 static void
13457 fcp_watch(void *arg)
13458 {
13459 struct fcp_port *pptr;
13460 struct fcp_ipkt *icmd;
13461 struct fcp_ipkt *nicmd;
13462 struct fcp_pkt *cmd;
13463 struct fcp_pkt *ncmd;
13464 struct fcp_pkt *tail;
13465 struct fcp_pkt *pcmd;
13466 struct fcp_pkt *save_head;
13467 struct fcp_port *save_port;
13468
13469 /* increment global watchdog time */
13470 fcp_watchdog_time += fcp_watchdog_timeout;
13471
13472 mutex_enter(&fcp_global_mutex);
13473
13474 /* scan each port in our list */
13475 for (pptr = fcp_port_head; pptr != NULL; pptr = pptr->port_next) {
13476 save_port = fcp_port_head;
13477 pptr->port_state |= FCP_STATE_IN_WATCHDOG;
13478 mutex_exit(&fcp_global_mutex);
13479
13480 mutex_enter(&pptr->port_mutex);
13481 if (pptr->port_ipkt_list == NULL &&
13482 (pptr->port_state & (FCP_STATE_SUSPENDED |
13483 FCP_STATE_DETACHING | FCP_STATE_POWER_DOWN))) {
13484 pptr->port_state &= ~FCP_STATE_IN_WATCHDOG;
13485 mutex_exit(&pptr->port_mutex);
13486 mutex_enter(&fcp_global_mutex);
13487 goto end_of_watchdog;
13488 }
13489
13490 /*
13491 * We check if a list of targets need to be offlined.
13492 */
13493 if (pptr->port_offline_tgts) {
13494 fcp_scan_offline_tgts(pptr);
13495 }
13496
13497 /*
13498 * We check if a list of luns need to be offlined.
13499 */
13500 if (pptr->port_offline_luns) {
13501 fcp_scan_offline_luns(pptr);
13502 }
13503
13504 /*
13505 * We check if a list of targets or luns need to be reset.
13506 */
13507 if (pptr->port_reset_list) {
13508 fcp_check_reset_delay(pptr);
13509 }
13510
13511 mutex_exit(&pptr->port_mutex);
13512
13513 /*
13514 * This is where the pending commands (pkt) are checked for
13515 * timeout.
13516 */
13517 mutex_enter(&pptr->port_pkt_mutex);
13518 tail = pptr->port_pkt_tail;
13519
13520 for (pcmd = NULL, cmd = pptr->port_pkt_head;
13521 cmd != NULL; cmd = ncmd) {
13522 ncmd = cmd->cmd_next;
13523 /*
13524 * If a command is in this queue the bit CFLAG_IN_QUEUE
13525 * must be set.
13526 */
13527 ASSERT(cmd->cmd_flags & CFLAG_IN_QUEUE);
13528 /*
13529 * FCP_INVALID_TIMEOUT will be set for those
13530 * command that need to be failed. Mostly those
13531 * cmds that could not be queued down for the
13532 * "timeout" value. cmd->cmd_timeout is used
13533 * to try and requeue the command regularly.
13534 */
13535 if (cmd->cmd_timeout >= fcp_watchdog_time) {
13536 /*
13537 * This command hasn't timed out yet. Let's
13538 * go to the next one.
13539 */
13540 pcmd = cmd;
13541 goto end_of_loop;
13542 }
13543
13544 if (cmd == pptr->port_pkt_head) {
13545 ASSERT(pcmd == NULL);
13546 pptr->port_pkt_head = cmd->cmd_next;
13547 } else {
13548 ASSERT(pcmd != NULL);
13549 pcmd->cmd_next = cmd->cmd_next;
13550 }
13551
13552 if (cmd == pptr->port_pkt_tail) {
13553 ASSERT(cmd->cmd_next == NULL);
13554 pptr->port_pkt_tail = pcmd;
13555 if (pcmd) {
13556 pcmd->cmd_next = NULL;
13557 }
13558 }
13559 cmd->cmd_next = NULL;
13560
13561 /*
13562 * save the current head before dropping the
13563 * mutex - If the head doesn't remain the
13564 * same after re acquiring the mutex, just
13565 * bail out and revisit on next tick.
13566 *
13567 * PS: The tail pointer can change as the commands
13568 * get requeued after failure to retransport
13569 */
13570 save_head = pptr->port_pkt_head;
13571 mutex_exit(&pptr->port_pkt_mutex);
13572
13573 if (cmd->cmd_fp_pkt->pkt_timeout ==
13574 FCP_INVALID_TIMEOUT) {
13575 struct scsi_pkt *pkt = cmd->cmd_pkt;
13576 struct fcp_lun *plun;
13577 struct fcp_tgt *ptgt;
13578
13579 plun = ADDR2LUN(&pkt->pkt_address);
13580 ptgt = plun->lun_tgt;
13581
13582 FCP_TRACE(fcp_logq, pptr->port_instbuf,
13583 fcp_trace, FCP_BUF_LEVEL_2, 0,
13584 "SCSI cmd 0x%x to D_ID=%x timed out",
13585 pkt->pkt_cdbp[0], ptgt->tgt_d_id);
13586
13587 cmd->cmd_state == FCP_PKT_ABORTING ?
13588 fcp_fail_cmd(cmd, CMD_RESET,
13589 STAT_DEV_RESET) : fcp_fail_cmd(cmd,
13590 CMD_TIMEOUT, STAT_ABORTED);
13591 } else {
13592 fcp_retransport_cmd(pptr, cmd);
13593 }
13594 mutex_enter(&pptr->port_pkt_mutex);
13595 if (save_head && save_head != pptr->port_pkt_head) {
13596 /*
13597 * Looks like linked list got changed (mostly
13598 * happens when an an OFFLINE LUN code starts
13599 * returning overflow queue commands in
13600 * parallel. So bail out and revisit during
13601 * next tick
13602 */
13603 break;
13604 }
13605 end_of_loop:
13606 /*
13607 * Scan only upto the previously known tail pointer
13608 * to avoid excessive processing - lots of new packets
13609 * could have been added to the tail or the old ones
13610 * re-queued.
13611 */
13612 if (cmd == tail) {
13613 break;
13614 }
13615 }
13616 mutex_exit(&pptr->port_pkt_mutex);
13617
13618 mutex_enter(&pptr->port_mutex);
13619 for (icmd = pptr->port_ipkt_list; icmd != NULL; icmd = nicmd) {
13620 struct fcp_tgt *ptgt = icmd->ipkt_tgt;
13621
13622 nicmd = icmd->ipkt_next;
13623 if ((icmd->ipkt_restart != 0) &&
13624 (icmd->ipkt_restart >= fcp_watchdog_time)) {
13625 /* packet has not timed out */
13626 continue;
13627 }
13628
13629 /* time for packet re-transport */
13630 if (icmd == pptr->port_ipkt_list) {
13631 pptr->port_ipkt_list = icmd->ipkt_next;
13632 if (pptr->port_ipkt_list) {
13633 pptr->port_ipkt_list->ipkt_prev =
13634 NULL;
13635 }
13636 } else {
13637 icmd->ipkt_prev->ipkt_next = icmd->ipkt_next;
13638 if (icmd->ipkt_next) {
13639 icmd->ipkt_next->ipkt_prev =
13640 icmd->ipkt_prev;
13641 }
13642 }
13643 icmd->ipkt_next = NULL;
13644 icmd->ipkt_prev = NULL;
13645 mutex_exit(&pptr->port_mutex);
13646
13647 if (fcp_is_retryable(icmd)) {
13648 fc_ulp_rscn_info_t *rscnp =
13649 (fc_ulp_rscn_info_t *)icmd->ipkt_fpkt->
13650 pkt_ulp_rscn_infop;
13651
13652 FCP_TRACE(fcp_logq, pptr->port_instbuf,
13653 fcp_trace, FCP_BUF_LEVEL_2, 0,
13654 "%x to D_ID=%x Retrying..",
13655 icmd->ipkt_opcode,
13656 icmd->ipkt_fpkt->pkt_cmd_fhdr.d_id);
13657
13658 /*
13659 * Update the RSCN count in the packet
13660 * before resending.
13661 */
13662
13663 if (rscnp != NULL) {
13664 rscnp->ulp_rscn_count =
13665 fc_ulp_get_rscn_count(pptr->
13666 port_fp_handle);
13667 }
13668
13669 mutex_enter(&pptr->port_mutex);
13670 mutex_enter(&ptgt->tgt_mutex);
13671 if (!FCP_STATE_CHANGED(pptr, ptgt, icmd)) {
13672 mutex_exit(&ptgt->tgt_mutex);
13673 mutex_exit(&pptr->port_mutex);
13674 switch (icmd->ipkt_opcode) {
13675 int rval;
13676 case LA_ELS_PLOGI:
13677 if ((rval = fc_ulp_login(
13678 pptr->port_fp_handle,
13679 &icmd->ipkt_fpkt, 1)) ==
13680 FC_SUCCESS) {
13681 mutex_enter(
13682 &pptr->port_mutex);
13683 continue;
13684 }
13685 if (fcp_handle_ipkt_errors(
13686 pptr, ptgt, icmd, rval,
13687 "PLOGI") == DDI_SUCCESS) {
13688 mutex_enter(
13689 &pptr->port_mutex);
13690 continue;
13691 }
13692 break;
13693
13694 case LA_ELS_PRLI:
13695 if ((rval = fc_ulp_issue_els(
13696 pptr->port_fp_handle,
13697 icmd->ipkt_fpkt)) ==
13698 FC_SUCCESS) {
13699 mutex_enter(
13700 &pptr->port_mutex);
13701 continue;
13702 }
13703 if (fcp_handle_ipkt_errors(
13704 pptr, ptgt, icmd, rval,
13705 "PRLI") == DDI_SUCCESS) {
13706 mutex_enter(
13707 &pptr->port_mutex);
13708 continue;
13709 }
13710 break;
13711
13712 default:
13713 if ((rval = fcp_transport(
13714 pptr->port_fp_handle,
13715 icmd->ipkt_fpkt, 1)) ==
13716 FC_SUCCESS) {
13717 mutex_enter(
13718 &pptr->port_mutex);
13719 continue;
13720 }
13721 if (fcp_handle_ipkt_errors(
13722 pptr, ptgt, icmd, rval,
13723 "PRLI") == DDI_SUCCESS) {
13724 mutex_enter(
13725 &pptr->port_mutex);
13726 continue;
13727 }
13728 break;
13729 }
13730 } else {
13731 mutex_exit(&ptgt->tgt_mutex);
13732 mutex_exit(&pptr->port_mutex);
13733 }
13734 } else {
13735 fcp_print_error(icmd->ipkt_fpkt);
13736 }
13737
13738 (void) fcp_call_finish_init(pptr, ptgt,
13739 icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
13740 icmd->ipkt_cause);
13741 fcp_icmd_free(pptr, icmd);
13742 mutex_enter(&pptr->port_mutex);
13743 }
13744
13745 pptr->port_state &= ~FCP_STATE_IN_WATCHDOG;
13746 mutex_exit(&pptr->port_mutex);
13747 mutex_enter(&fcp_global_mutex);
13748
13749 end_of_watchdog:
13750 /*
13751 * Bail out early before getting into trouble
13752 */
13753 if (save_port != fcp_port_head) {
13754 break;
13755 }
13756 }
13757
13758 if (fcp_watchdog_init > 0) {
13759 /* reschedule timeout to go again */
13760 fcp_watchdog_id =
13761 timeout(fcp_watch, NULL, fcp_watchdog_tick);
13762 }
13763 mutex_exit(&fcp_global_mutex);
13764 }
13765
13766
13767 static void
13768 fcp_check_reset_delay(struct fcp_port *pptr)
13769 {
13770 uint32_t tgt_cnt;
13771 int level;
13772 struct fcp_tgt *ptgt;
13773 struct fcp_lun *plun;
13774 struct fcp_reset_elem *cur = NULL;
13775 struct fcp_reset_elem *next = NULL;
13776 struct fcp_reset_elem *prev = NULL;
13777
13778 ASSERT(mutex_owned(&pptr->port_mutex));
13779
13780 next = pptr->port_reset_list;
13781 while ((cur = next) != NULL) {
13782 next = cur->next;
13783
13784 if (cur->timeout < fcp_watchdog_time) {
13785 prev = cur;
13786 continue;
13787 }
13788
13789 ptgt = cur->tgt;
13790 plun = cur->lun;
13791 tgt_cnt = cur->tgt_cnt;
13792
13793 if (ptgt) {
13794 level = RESET_TARGET;
13795 } else {
13796 ASSERT(plun != NULL);
13797 level = RESET_LUN;
13798 ptgt = plun->lun_tgt;
13799 }
13800 if (prev) {
13801 prev->next = next;
13802 } else {
13803 /*
13804 * Because we drop port mutex while doing aborts for
13805 * packets, we can't rely on reset_list pointing to
13806 * our head
13807 */
13808 if (cur == pptr->port_reset_list) {
13809 pptr->port_reset_list = next;
13810 } else {
13811 struct fcp_reset_elem *which;
13812
13813 which = pptr->port_reset_list;
13814 while (which && which->next != cur) {
13815 which = which->next;
13816 }
13817 ASSERT(which != NULL);
13818
13819 which->next = next;
13820 prev = which;
13821 }
13822 }
13823
13824 kmem_free(cur, sizeof (*cur));
13825
13826 if (tgt_cnt == ptgt->tgt_change_cnt) {
13827 mutex_enter(&ptgt->tgt_mutex);
13828 if (level == RESET_TARGET) {
13829 fcp_update_tgt_state(ptgt,
13830 FCP_RESET, FCP_LUN_BUSY);
13831 } else {
13832 fcp_update_lun_state(plun,
13833 FCP_RESET, FCP_LUN_BUSY);
13834 }
13835 mutex_exit(&ptgt->tgt_mutex);
13836
13837 mutex_exit(&pptr->port_mutex);
13838 fcp_abort_all(pptr, ptgt, plun, tgt_cnt);
13839 mutex_enter(&pptr->port_mutex);
13840 }
13841 }
13842 }
13843
13844
13845 static void
13846 fcp_abort_all(struct fcp_port *pptr, struct fcp_tgt *ttgt,
13847 struct fcp_lun *rlun, int tgt_cnt)
13848 {
13849 int rval;
13850 struct fcp_lun *tlun, *nlun;
13851 struct fcp_pkt *pcmd = NULL, *ncmd = NULL,
13852 *cmd = NULL, *head = NULL,
13853 *tail = NULL;
13854
13855 mutex_enter(&pptr->port_pkt_mutex);
13856 for (cmd = pptr->port_pkt_head; cmd != NULL; cmd = ncmd) {
13857 struct fcp_lun *plun = ADDR2LUN(&cmd->cmd_pkt->pkt_address);
13858 struct fcp_tgt *ptgt = plun->lun_tgt;
13859
13860 ncmd = cmd->cmd_next;
13861
13862 if (ptgt != ttgt && plun != rlun) {
13863 pcmd = cmd;
13864 continue;
13865 }
13866
13867 if (pcmd != NULL) {
13868 ASSERT(pptr->port_pkt_head != cmd);
13869 pcmd->cmd_next = ncmd;
13870 } else {
13871 ASSERT(cmd == pptr->port_pkt_head);
13872 pptr->port_pkt_head = ncmd;
13873 }
13874 if (pptr->port_pkt_tail == cmd) {
13875 ASSERT(cmd->cmd_next == NULL);
13876 pptr->port_pkt_tail = pcmd;
13877 if (pcmd != NULL) {
13878 pcmd->cmd_next = NULL;
13879 }
13880 }
13881
13882 if (head == NULL) {
13883 head = tail = cmd;
13884 } else {
13885 ASSERT(tail != NULL);
13886 tail->cmd_next = cmd;
13887 tail = cmd;
13888 }
13889 cmd->cmd_next = NULL;
13890 }
13891 mutex_exit(&pptr->port_pkt_mutex);
13892
13893 for (cmd = head; cmd != NULL; cmd = ncmd) {
13894 struct scsi_pkt *pkt = cmd->cmd_pkt;
13895
13896 ncmd = cmd->cmd_next;
13897 ASSERT(pkt != NULL);
13898
13899 mutex_enter(&pptr->port_mutex);
13900 if (ttgt->tgt_change_cnt == tgt_cnt) {
13901 mutex_exit(&pptr->port_mutex);
13902 cmd->cmd_flags &= ~CFLAG_IN_QUEUE;
13903 pkt->pkt_reason = CMD_RESET;
13904 pkt->pkt_statistics |= STAT_DEV_RESET;
13905 cmd->cmd_state = FCP_PKT_IDLE;
13906 fcp_post_callback(cmd);
13907 } else {
13908 mutex_exit(&pptr->port_mutex);
13909 }
13910 }
13911
13912 /*
13913 * If the FCA will return all the commands in its queue then our
13914 * work is easy, just return.
13915 */
13916
13917 if (pptr->port_reset_action == FC_RESET_RETURN_ALL) {
13918 return;
13919 }
13920
13921 /*
13922 * For RESET_LUN get hold of target pointer
13923 */
13924 if (ttgt == NULL) {
13925 ASSERT(rlun != NULL);
13926
13927 ttgt = rlun->lun_tgt;
13928
13929 ASSERT(ttgt != NULL);
13930 }
13931
13932 /*
13933 * There are some severe race conditions here.
13934 * While we are trying to abort the pkt, it might be completing
13935 * so mark it aborted and if the abort does not succeed then
13936 * handle it in the watch thread.
13937 */
13938 mutex_enter(&ttgt->tgt_mutex);
13939 nlun = ttgt->tgt_lun;
13940 mutex_exit(&ttgt->tgt_mutex);
13941 while ((tlun = nlun) != NULL) {
13942 int restart = 0;
13943 if (rlun && rlun != tlun) {
13944 mutex_enter(&ttgt->tgt_mutex);
13945 nlun = tlun->lun_next;
13946 mutex_exit(&ttgt->tgt_mutex);
13947 continue;
13948 }
13949 mutex_enter(&tlun->lun_mutex);
13950 cmd = tlun->lun_pkt_head;
13951 while (cmd != NULL) {
13952 if (cmd->cmd_state == FCP_PKT_ISSUED) {
13953 struct scsi_pkt *pkt;
13954
13955 restart = 1;
13956 cmd->cmd_state = FCP_PKT_ABORTING;
13957 mutex_exit(&tlun->lun_mutex);
13958 rval = fc_ulp_abort(pptr->port_fp_handle,
13959 cmd->cmd_fp_pkt, KM_SLEEP);
13960 if (rval == FC_SUCCESS) {
13961 pkt = cmd->cmd_pkt;
13962 pkt->pkt_reason = CMD_RESET;
13963 pkt->pkt_statistics |= STAT_DEV_RESET;
13964 cmd->cmd_state = FCP_PKT_IDLE;
13965 fcp_post_callback(cmd);
13966 } else {
13967 caddr_t msg;
13968
13969 (void) fc_ulp_error(rval, &msg);
13970
13971 /*
13972 * This part is tricky. The abort
13973 * failed and now the command could
13974 * be completing. The cmd_state ==
13975 * FCP_PKT_ABORTING should save
13976 * us in fcp_cmd_callback. If we
13977 * are already aborting ignore the
13978 * command in fcp_cmd_callback.
13979 * Here we leave this packet for 20
13980 * sec to be aborted in the
13981 * fcp_watch thread.
13982 */
13983 fcp_log(CE_WARN, pptr->port_dip,
13984 "!Abort failed after reset %s",
13985 msg);
13986
13987 cmd->cmd_timeout =
13988 fcp_watchdog_time +
13989 cmd->cmd_pkt->pkt_time +
13990 FCP_FAILED_DELAY;
13991
13992 cmd->cmd_fp_pkt->pkt_timeout =
13993 FCP_INVALID_TIMEOUT;
13994 /*
13995 * This is a hack, cmd is put in the
13996 * overflow queue so that it can be
13997 * timed out finally
13998 */
13999 cmd->cmd_flags |= CFLAG_IN_QUEUE;
14000
14001 mutex_enter(&pptr->port_pkt_mutex);
14002 if (pptr->port_pkt_head) {
14003 ASSERT(pptr->port_pkt_tail
14004 != NULL);
14005 pptr->port_pkt_tail->cmd_next
14006 = cmd;
14007 pptr->port_pkt_tail = cmd;
14008 } else {
14009 ASSERT(pptr->port_pkt_tail
14010 == NULL);
14011 pptr->port_pkt_head =
14012 pptr->port_pkt_tail
14013 = cmd;
14014 }
14015 cmd->cmd_next = NULL;
14016 mutex_exit(&pptr->port_pkt_mutex);
14017 }
14018 mutex_enter(&tlun->lun_mutex);
14019 cmd = tlun->lun_pkt_head;
14020 } else {
14021 cmd = cmd->cmd_forw;
14022 }
14023 }
14024 mutex_exit(&tlun->lun_mutex);
14025
14026 mutex_enter(&ttgt->tgt_mutex);
14027 restart == 1 ? (nlun = ttgt->tgt_lun) : (nlun = tlun->lun_next);
14028 mutex_exit(&ttgt->tgt_mutex);
14029
14030 mutex_enter(&pptr->port_mutex);
14031 if (tgt_cnt != ttgt->tgt_change_cnt) {
14032 mutex_exit(&pptr->port_mutex);
14033 return;
14034 } else {
14035 mutex_exit(&pptr->port_mutex);
14036 }
14037 }
14038 }
14039
14040
14041 /*
14042 * unlink the soft state, returning the soft state found (if any)
14043 *
14044 * acquires and releases the global mutex
14045 */
14046 struct fcp_port *
14047 fcp_soft_state_unlink(struct fcp_port *pptr)
14048 {
14049 struct fcp_port *hptr; /* ptr index */
14050 struct fcp_port *tptr; /* prev hptr */
14051
14052 mutex_enter(&fcp_global_mutex);
14053 for (hptr = fcp_port_head, tptr = NULL;
14054 hptr != NULL;
14055 tptr = hptr, hptr = hptr->port_next) {
14056 if (hptr == pptr) {
14057 /* we found a match -- remove this item */
14058 if (tptr == NULL) {
14059 /* we're at the head of the list */
14060 fcp_port_head = hptr->port_next;
14061 } else {
14062 tptr->port_next = hptr->port_next;
14063 }
14064 break; /* success */
14065 }
14066 }
14067 if (fcp_port_head == NULL) {
14068 fcp_cleanup_blacklist(&fcp_lun_blacklist);
14069 }
14070 mutex_exit(&fcp_global_mutex);
14071 return (hptr);
14072 }
14073
14074
14075 /*
14076 * called by fcp_scsi_hba_tgt_init to find a LUN given a
14077 * WWN and a LUN number
14078 */
14079 /* ARGSUSED */
14080 static struct fcp_lun *
14081 fcp_lookup_lun(struct fcp_port *pptr, uchar_t *wwn, uint16_t lun)
14082 {
14083 int hash;
14084 struct fcp_tgt *ptgt;
14085 struct fcp_lun *plun;
14086
14087 ASSERT(mutex_owned(&pptr->port_mutex));
14088
14089 hash = FCP_HASH(wwn);
14090 for (ptgt = pptr->port_tgt_hash_table[hash]; ptgt != NULL;
14091 ptgt = ptgt->tgt_next) {
14092 if (bcmp((caddr_t)wwn, (caddr_t)&ptgt->tgt_port_wwn.raw_wwn[0],
14093 sizeof (ptgt->tgt_port_wwn)) == 0) {
14094 mutex_enter(&ptgt->tgt_mutex);
14095 for (plun = ptgt->tgt_lun;
14096 plun != NULL;
14097 plun = plun->lun_next) {
14098 if (plun->lun_num == lun) {
14099 mutex_exit(&ptgt->tgt_mutex);
14100 return (plun);
14101 }
14102 }
14103 mutex_exit(&ptgt->tgt_mutex);
14104 return (NULL);
14105 }
14106 }
14107 return (NULL);
14108 }
14109
14110 /*
14111 * Function: fcp_prepare_pkt
14112 *
14113 * Description: This function prepares the SCSI cmd pkt, passed by the caller,
14114 * for fcp_start(). It binds the data or partially maps it.
14115 * Builds the FCP header and starts the initialization of the
14116 * Fibre Channel header.
14117 *
14118 * Argument: *pptr FCP port.
14119 * *cmd FCP packet.
14120 * *plun LUN the command will be sent to.
14121 *
14122 * Context: User, Kernel and Interrupt context.
14123 */
14124 static void
14125 fcp_prepare_pkt(struct fcp_port *pptr, struct fcp_pkt *cmd,
14126 struct fcp_lun *plun)
14127 {
14128 fc_packet_t *fpkt = cmd->cmd_fp_pkt;
14129 struct fcp_tgt *ptgt = plun->lun_tgt;
14130 struct fcp_cmd *fcmd = &cmd->cmd_fcp_cmd;
14131
14132 ASSERT(cmd->cmd_pkt->pkt_comp ||
14133 (cmd->cmd_pkt->pkt_flags & FLAG_NOINTR));
14134
14135 if (cmd->cmd_pkt->pkt_numcookies) {
14136 if (cmd->cmd_pkt->pkt_dma_flags & DDI_DMA_READ) {
14137 fcmd->fcp_cntl.cntl_read_data = 1;
14138 fcmd->fcp_cntl.cntl_write_data = 0;
14139 fpkt->pkt_tran_type = FC_PKT_FCP_READ;
14140 } else {
14141 fcmd->fcp_cntl.cntl_read_data = 0;
14142 fcmd->fcp_cntl.cntl_write_data = 1;
14143 fpkt->pkt_tran_type = FC_PKT_FCP_WRITE;
14144 }
14145
14146 fpkt->pkt_data_cookie = cmd->cmd_pkt->pkt_cookies;
14147
14148 fpkt->pkt_data_cookie_cnt = cmd->cmd_pkt->pkt_numcookies;
14149 ASSERT(fpkt->pkt_data_cookie_cnt <=
14150 pptr->port_data_dma_attr.dma_attr_sgllen);
14151
14152 cmd->cmd_dmacount = cmd->cmd_pkt->pkt_dma_len;
14153
14154 /* FCA needs pkt_datalen to be set */
14155 fpkt->pkt_datalen = cmd->cmd_dmacount;
14156 fcmd->fcp_data_len = cmd->cmd_dmacount;
14157 } else {
14158 fcmd->fcp_cntl.cntl_read_data = 0;
14159 fcmd->fcp_cntl.cntl_write_data = 0;
14160 fpkt->pkt_tran_type = FC_PKT_EXCHANGE;
14161 fpkt->pkt_datalen = 0;
14162 fcmd->fcp_data_len = 0;
14163 }
14164
14165 /* set up the Tagged Queuing type */
14166 if (cmd->cmd_pkt->pkt_flags & FLAG_HTAG) {
14167 fcmd->fcp_cntl.cntl_qtype = FCP_QTYPE_HEAD_OF_Q;
14168 } else if (cmd->cmd_pkt->pkt_flags & FLAG_OTAG) {
14169 fcmd->fcp_cntl.cntl_qtype = FCP_QTYPE_ORDERED;
14170 } else if (cmd->cmd_pkt->pkt_flags & FLAG_STAG) {
14171 fcmd->fcp_cntl.cntl_qtype = FCP_QTYPE_SIMPLE;
14172 } else {
14173 fcmd->fcp_cntl.cntl_qtype = FCP_QTYPE_UNTAGGED;
14174 }
14175
14176 fcmd->fcp_ent_addr = plun->lun_addr;
14177
14178 if (pptr->port_fcp_dma != FC_NO_DVMA_SPACE) {
14179 FCP_CP_OUT((uint8_t *)fcmd, fpkt->pkt_cmd,
14180 fpkt->pkt_cmd_acc, sizeof (struct fcp_cmd));
14181 } else {
14182 ASSERT(fpkt->pkt_cmd_dma == NULL && fpkt->pkt_resp_dma == NULL);
14183 }
14184
14185 cmd->cmd_pkt->pkt_reason = CMD_CMPLT;
14186 cmd->cmd_pkt->pkt_state = 0;
14187 cmd->cmd_pkt->pkt_statistics = 0;
14188 cmd->cmd_pkt->pkt_resid = 0;
14189
14190 cmd->cmd_fp_pkt->pkt_data_dma = cmd->cmd_pkt->pkt_handle;
14191
14192 if (cmd->cmd_pkt->pkt_flags & FLAG_NOINTR) {
14193 fpkt->pkt_tran_flags = (FC_TRAN_CLASS3 | FC_TRAN_NO_INTR);
14194 fpkt->pkt_comp = NULL;
14195 } else {
14196 fpkt->pkt_tran_flags = (FC_TRAN_CLASS3 | FC_TRAN_INTR);
14197 if (cmd->cmd_pkt->pkt_flags & FLAG_IMMEDIATE_CB) {
14198 fpkt->pkt_tran_flags |= FC_TRAN_IMMEDIATE_CB;
14199 }
14200 fpkt->pkt_comp = fcp_cmd_callback;
14201 }
14202
14203 mutex_enter(&pptr->port_mutex);
14204 if (pptr->port_state & FCP_STATE_SUSPENDED) {
14205 fpkt->pkt_tran_flags |= FC_TRAN_DUMPING;
14206 }
14207 mutex_exit(&pptr->port_mutex);
14208
14209 fpkt->pkt_cmd_fhdr.d_id = ptgt->tgt_d_id;
14210 fpkt->pkt_cmd_fhdr.s_id = pptr->port_id;
14211
14212 /*
14213 * Save a few kernel cycles here
14214 */
14215 fpkt->pkt_fca_device = ptgt->tgt_fca_dev;
14216 }
14217
14218 static void
14219 fcp_post_callback(struct fcp_pkt *cmd)
14220 {
14221 scsi_hba_pkt_comp(cmd->cmd_pkt);
14222 }
14223
14224
14225 /*
14226 * called to do polled I/O by fcp_start()
14227 *
14228 * return a transport status value, i.e. TRAN_ACCECPT for success
14229 */
14230 static int
14231 fcp_dopoll(struct fcp_port *pptr, struct fcp_pkt *cmd)
14232 {
14233 int rval;
14234
14235 #ifdef DEBUG
14236 mutex_enter(&pptr->port_pkt_mutex);
14237 pptr->port_npkts++;
14238 mutex_exit(&pptr->port_pkt_mutex);
14239 #endif /* DEBUG */
14240
14241 if (cmd->cmd_fp_pkt->pkt_timeout) {
14242 cmd->cmd_fp_pkt->pkt_timeout = cmd->cmd_pkt->pkt_time;
14243 } else {
14244 cmd->cmd_fp_pkt->pkt_timeout = FCP_POLL_TIMEOUT;
14245 }
14246
14247 ASSERT(cmd->cmd_fp_pkt->pkt_comp == NULL);
14248
14249 cmd->cmd_state = FCP_PKT_ISSUED;
14250
14251 rval = fc_ulp_transport(pptr->port_fp_handle, cmd->cmd_fp_pkt);
14252
14253 #ifdef DEBUG
14254 mutex_enter(&pptr->port_pkt_mutex);
14255 pptr->port_npkts--;
14256 mutex_exit(&pptr->port_pkt_mutex);
14257 #endif /* DEBUG */
14258
14259 cmd->cmd_state = FCP_PKT_IDLE;
14260
14261 switch (rval) {
14262 case FC_SUCCESS:
14263 if (cmd->cmd_fp_pkt->pkt_state == FC_PKT_SUCCESS) {
14264 fcp_complete_pkt(cmd->cmd_fp_pkt);
14265 rval = TRAN_ACCEPT;
14266 } else {
14267 rval = TRAN_FATAL_ERROR;
14268 }
14269 break;
14270
14271 case FC_TRAN_BUSY:
14272 rval = TRAN_BUSY;
14273 cmd->cmd_pkt->pkt_resid = 0;
14274 break;
14275
14276 case FC_BADPACKET:
14277 rval = TRAN_BADPKT;
14278 break;
14279
14280 default:
14281 rval = TRAN_FATAL_ERROR;
14282 break;
14283 }
14284
14285 return (rval);
14286 }
14287
14288
14289 /*
14290 * called by some of the following transport-called routines to convert
14291 * a supplied dip ptr to a port struct ptr (i.e. to the soft state)
14292 */
14293 static struct fcp_port *
14294 fcp_dip2port(dev_info_t *dip)
14295 {
14296 int instance;
14297
14298 instance = ddi_get_instance(dip);
14299 return (ddi_get_soft_state(fcp_softstate, instance));
14300 }
14301
14302
14303 /*
14304 * called internally to return a LUN given a dip
14305 */
14306 struct fcp_lun *
14307 fcp_get_lun_from_cip(struct fcp_port *pptr, child_info_t *cip)
14308 {
14309 struct fcp_tgt *ptgt;
14310 struct fcp_lun *plun;
14311 int i;
14312
14313
14314 ASSERT(mutex_owned(&pptr->port_mutex));
14315
14316 for (i = 0; i < FCP_NUM_HASH; i++) {
14317 for (ptgt = pptr->port_tgt_hash_table[i];
14318 ptgt != NULL;
14319 ptgt = ptgt->tgt_next) {
14320 mutex_enter(&ptgt->tgt_mutex);
14321 for (plun = ptgt->tgt_lun; plun != NULL;
14322 plun = plun->lun_next) {
14323 mutex_enter(&plun->lun_mutex);
14324 if (plun->lun_cip == cip) {
14325 mutex_exit(&plun->lun_mutex);
14326 mutex_exit(&ptgt->tgt_mutex);
14327 return (plun); /* match found */
14328 }
14329 mutex_exit(&plun->lun_mutex);
14330 }
14331 mutex_exit(&ptgt->tgt_mutex);
14332 }
14333 }
14334 return (NULL); /* no LUN found */
14335 }
14336
14337 /*
14338 * pass an element to the hotplug list, kick the hotplug thread
14339 * and wait for the element to get processed by the hotplug thread.
14340 * on return the element is freed.
14341 *
14342 * return zero success and non-zero on failure
14343 *
14344 * acquires/releases the target mutex
14345 *
14346 */
14347 static int
14348 fcp_pass_to_hp_and_wait(struct fcp_port *pptr, struct fcp_lun *plun,
14349 child_info_t *cip, int what, int link_cnt, int tgt_cnt, int flags)
14350 {
14351 struct fcp_hp_elem *elem;
14352 int rval;
14353
14354 mutex_enter(&plun->lun_tgt->tgt_mutex);
14355 if ((elem = fcp_pass_to_hp(pptr, plun, cip,
14356 what, link_cnt, tgt_cnt, flags, 1)) == NULL) {
14357 mutex_exit(&plun->lun_tgt->tgt_mutex);
14358 fcp_log(CE_CONT, pptr->port_dip,
14359 "Can not pass_to_hp: what: %d; D_ID=%x, LUN=%x\n",
14360 what, plun->lun_tgt->tgt_d_id, plun->lun_num);
14361 return (NDI_FAILURE);
14362 }
14363 mutex_exit(&plun->lun_tgt->tgt_mutex);
14364 mutex_enter(&elem->mutex);
14365 if (elem->wait) {
14366 while (elem->wait) {
14367 cv_wait(&elem->cv, &elem->mutex);
14368 }
14369 }
14370 rval = (elem->result);
14371 mutex_exit(&elem->mutex);
14372 mutex_destroy(&elem->mutex);
14373 cv_destroy(&elem->cv);
14374 kmem_free(elem, sizeof (struct fcp_hp_elem));
14375 return (rval);
14376 }
14377
14378 /*
14379 * pass an element to the hotplug list, and then
14380 * kick the hotplug thread
14381 *
14382 * return Boolean success, i.e. non-zero if all goes well, else zero on error
14383 *
14384 * acquires/releases the hotplug mutex
14385 *
14386 * called with the target mutex owned
14387 *
14388 * memory acquired in NOSLEEP mode
14389 * NOTE: if wait is set to 1 then the caller is responsible for waiting on
14390 * for the hp daemon to process the request and is responsible for
14391 * freeing the element
14392 */
14393 static struct fcp_hp_elem *
14394 fcp_pass_to_hp(struct fcp_port *pptr, struct fcp_lun *plun,
14395 child_info_t *cip, int what, int link_cnt, int tgt_cnt, int flags, int wait)
14396 {
14397 struct fcp_hp_elem *elem;
14398 dev_info_t *pdip;
14399
14400 ASSERT(pptr != NULL);
14401 ASSERT(plun != NULL);
14402 ASSERT(plun->lun_tgt != NULL);
14403 ASSERT(mutex_owned(&plun->lun_tgt->tgt_mutex));
14404
14405 /* create space for a hotplug element */
14406 if ((elem = kmem_zalloc(sizeof (struct fcp_hp_elem), KM_NOSLEEP))
14407 == NULL) {
14408 fcp_log(CE_WARN, NULL,
14409 "!can't allocate memory for hotplug element");
14410 return (NULL);
14411 }
14412
14413 /* fill in hotplug element */
14414 elem->port = pptr;
14415 elem->lun = plun;
14416 elem->cip = cip;
14417 elem->old_lun_mpxio = plun->lun_mpxio;
14418 elem->what = what;
14419 elem->flags = flags;
14420 elem->link_cnt = link_cnt;
14421 elem->tgt_cnt = tgt_cnt;
14422 elem->wait = wait;
14423 mutex_init(&elem->mutex, NULL, MUTEX_DRIVER, NULL);
14424 cv_init(&elem->cv, NULL, CV_DRIVER, NULL);
14425
14426 /* schedule the hotplug task */
14427 pdip = pptr->port_dip;
14428 mutex_enter(&plun->lun_mutex);
14429 if (elem->what == FCP_ONLINE || elem->what == FCP_OFFLINE) {
14430 plun->lun_event_count++;
14431 elem->event_cnt = plun->lun_event_count;
14432 }
14433 mutex_exit(&plun->lun_mutex);
14434 if (taskq_dispatch(DEVI(pdip)->devi_taskq, fcp_hp_task,
14435 (void *)elem, KM_NOSLEEP) == NULL) {
14436 mutex_enter(&plun->lun_mutex);
14437 if (elem->what == FCP_ONLINE || elem->what == FCP_OFFLINE) {
14438 plun->lun_event_count--;
14439 }
14440 mutex_exit(&plun->lun_mutex);
14441 kmem_free(elem, sizeof (*elem));
14442 return (0);
14443 }
14444
14445 return (elem);
14446 }
14447
14448
14449 static void
14450 fcp_retransport_cmd(struct fcp_port *pptr, struct fcp_pkt *cmd)
14451 {
14452 int rval;
14453 struct scsi_address *ap;
14454 struct fcp_lun *plun;
14455 struct fcp_tgt *ptgt;
14456 fc_packet_t *fpkt;
14457
14458 ap = &cmd->cmd_pkt->pkt_address;
14459 plun = ADDR2LUN(ap);
14460 ptgt = plun->lun_tgt;
14461
14462 ASSERT(cmd->cmd_flags & CFLAG_IN_QUEUE);
14463
14464 cmd->cmd_state = FCP_PKT_IDLE;
14465
14466 mutex_enter(&pptr->port_mutex);
14467 mutex_enter(&ptgt->tgt_mutex);
14468 if (((plun->lun_state & (FCP_LUN_BUSY | FCP_LUN_OFFLINE)) == 0) &&
14469 (!(pptr->port_state & FCP_STATE_ONLINING))) {
14470 fc_ulp_rscn_info_t *rscnp;
14471
14472 cmd->cmd_state = FCP_PKT_ISSUED;
14473
14474 /*
14475 * It is possible for pkt_pd to be NULL if tgt_pd_handle was
14476 * originally NULL, hence we try to set it to the pd pointed
14477 * to by the SCSI device we're trying to get to.
14478 */
14479
14480 fpkt = cmd->cmd_fp_pkt;
14481 if ((fpkt->pkt_pd == NULL) && (ptgt->tgt_pd_handle != NULL)) {
14482 fpkt->pkt_pd = ptgt->tgt_pd_handle;
14483 /*
14484 * We need to notify the transport that we now have a
14485 * reference to the remote port handle.
14486 */
14487 fc_ulp_hold_remote_port(ptgt->tgt_pd_handle);
14488 }
14489
14490 mutex_exit(&ptgt->tgt_mutex);
14491 mutex_exit(&pptr->port_mutex);
14492
14493 ASSERT((cmd->cmd_pkt->pkt_flags & FLAG_NOINTR) == 0);
14494
14495 /* prepare the packet */
14496
14497 fcp_prepare_pkt(pptr, cmd, plun);
14498
14499 rscnp = (fc_ulp_rscn_info_t *)cmd->cmd_fp_pkt->
14500 pkt_ulp_rscn_infop;
14501
14502 cmd->cmd_timeout = cmd->cmd_pkt->pkt_time ?
14503 fcp_watchdog_time + cmd->cmd_pkt->pkt_time : 0;
14504
14505 if (rscnp != NULL) {
14506 rscnp->ulp_rscn_count =
14507 fc_ulp_get_rscn_count(pptr->
14508 port_fp_handle);
14509 }
14510
14511 rval = fcp_transport(pptr->port_fp_handle,
14512 cmd->cmd_fp_pkt, 0);
14513
14514 if (rval == FC_SUCCESS) {
14515 return;
14516 }
14517 cmd->cmd_state &= ~FCP_PKT_ISSUED;
14518 } else {
14519 mutex_exit(&ptgt->tgt_mutex);
14520 mutex_exit(&pptr->port_mutex);
14521 }
14522
14523 fcp_queue_pkt(pptr, cmd);
14524 }
14525
14526
14527 static void
14528 fcp_fail_cmd(struct fcp_pkt *cmd, uchar_t reason, uint_t statistics)
14529 {
14530 ASSERT(cmd->cmd_flags & CFLAG_IN_QUEUE);
14531
14532 cmd->cmd_flags &= ~CFLAG_IN_QUEUE;
14533 cmd->cmd_state = FCP_PKT_IDLE;
14534
14535 cmd->cmd_pkt->pkt_reason = reason;
14536 cmd->cmd_pkt->pkt_state = 0;
14537 cmd->cmd_pkt->pkt_statistics = statistics;
14538
14539 fcp_post_callback(cmd);
14540 }
14541
14542 /*
14543 * Function: fcp_queue_pkt
14544 *
14545 * Description: This function queues the packet passed by the caller into
14546 * the list of packets of the FCP port.
14547 *
14548 * Argument: *pptr FCP port.
14549 * *cmd FCP packet to queue.
14550 *
14551 * Return Value: None
14552 *
14553 * Context: User, Kernel and Interrupt context.
14554 */
14555 static void
14556 fcp_queue_pkt(struct fcp_port *pptr, struct fcp_pkt *cmd)
14557 {
14558 ASSERT((cmd->cmd_pkt->pkt_flags & FLAG_NOQUEUE) == NULL);
14559
14560 mutex_enter(&pptr->port_pkt_mutex);
14561 cmd->cmd_flags |= CFLAG_IN_QUEUE;
14562 ASSERT(cmd->cmd_state != FCP_PKT_ISSUED);
14563 cmd->cmd_timeout = fcp_watchdog_time + FCP_QUEUE_DELAY;
14564
14565 /*
14566 * zero pkt_time means hang around for ever
14567 */
14568 if (cmd->cmd_pkt->pkt_time) {
14569 if (cmd->cmd_fp_pkt->pkt_timeout > FCP_QUEUE_DELAY) {
14570 cmd->cmd_fp_pkt->pkt_timeout -= FCP_QUEUE_DELAY;
14571 } else {
14572 /*
14573 * Indicate the watch thread to fail the
14574 * command by setting it to highest value
14575 */
14576 cmd->cmd_timeout = fcp_watchdog_time;
14577 cmd->cmd_fp_pkt->pkt_timeout = FCP_INVALID_TIMEOUT;
14578 }
14579 }
14580
14581 if (pptr->port_pkt_head) {
14582 ASSERT(pptr->port_pkt_tail != NULL);
14583
14584 pptr->port_pkt_tail->cmd_next = cmd;
14585 pptr->port_pkt_tail = cmd;
14586 } else {
14587 ASSERT(pptr->port_pkt_tail == NULL);
14588
14589 pptr->port_pkt_head = pptr->port_pkt_tail = cmd;
14590 }
14591 cmd->cmd_next = NULL;
14592 mutex_exit(&pptr->port_pkt_mutex);
14593 }
14594
14595 /*
14596 * Function: fcp_update_targets
14597 *
14598 * Description: This function applies the specified change of state to all
14599 * the targets listed. The operation applied is 'set'.
14600 *
14601 * Argument: *pptr FCP port.
14602 * *dev_list Array of fc_portmap_t structures.
14603 * count Length of dev_list.
14604 * state State bits to update.
14605 * cause Reason for the update.
14606 *
14607 * Return Value: None
14608 *
14609 * Context: User, Kernel and Interrupt context.
14610 * The mutex pptr->port_mutex must be held.
14611 */
14612 static void
14613 fcp_update_targets(struct fcp_port *pptr, fc_portmap_t *dev_list,
14614 uint32_t count, uint32_t state, int cause)
14615 {
14616 fc_portmap_t *map_entry;
14617 struct fcp_tgt *ptgt;
14618
14619 ASSERT(MUTEX_HELD(&pptr->port_mutex));
14620
14621 while (count--) {
14622 map_entry = &(dev_list[count]);
14623 ptgt = fcp_lookup_target(pptr,
14624 (uchar_t *)&(map_entry->map_pwwn));
14625 if (ptgt == NULL) {
14626 continue;
14627 }
14628
14629 mutex_enter(&ptgt->tgt_mutex);
14630 ptgt->tgt_trace = 0;
14631 ptgt->tgt_change_cnt++;
14632 ptgt->tgt_statec_cause = cause;
14633 ptgt->tgt_tmp_cnt = 1;
14634 fcp_update_tgt_state(ptgt, FCP_SET, state);
14635 mutex_exit(&ptgt->tgt_mutex);
14636 }
14637 }
14638
14639 static int
14640 fcp_call_finish_init(struct fcp_port *pptr, struct fcp_tgt *ptgt,
14641 int lcount, int tcount, int cause)
14642 {
14643 int rval;
14644
14645 mutex_enter(&pptr->port_mutex);
14646 rval = fcp_call_finish_init_held(pptr, ptgt, lcount, tcount, cause);
14647 mutex_exit(&pptr->port_mutex);
14648
14649 return (rval);
14650 }
14651
14652
14653 static int
14654 fcp_call_finish_init_held(struct fcp_port *pptr, struct fcp_tgt *ptgt,
14655 int lcount, int tcount, int cause)
14656 {
14657 int finish_init = 0;
14658 int finish_tgt = 0;
14659 int do_finish_init = 0;
14660 int rval = FCP_NO_CHANGE;
14661
14662 if (cause == FCP_CAUSE_LINK_CHANGE ||
14663 cause == FCP_CAUSE_LINK_DOWN) {
14664 do_finish_init = 1;
14665 }
14666
14667 if (ptgt != NULL) {
14668 FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
14669 FCP_BUF_LEVEL_2, 0,
14670 "link_cnt: %d,%d; tgt_cnt: %d,%d; tmp_cnt: %d,%d;"
14671 " cause = %d, d_id = 0x%x, tgt_done = %d",
14672 pptr->port_link_cnt, lcount, ptgt->tgt_change_cnt, tcount,
14673 pptr->port_tmp_cnt, ptgt->tgt_tmp_cnt, cause,
14674 ptgt->tgt_d_id, ptgt->tgt_done);
14675
14676 mutex_enter(&ptgt->tgt_mutex);
14677
14678 if (tcount && (ptgt->tgt_change_cnt != tcount)) {
14679 rval = FCP_DEV_CHANGE;
14680 if (do_finish_init && ptgt->tgt_done == 0) {
14681 ptgt->tgt_done++;
14682 finish_init = 1;
14683 }
14684 } else {
14685 if (--ptgt->tgt_tmp_cnt <= 0) {
14686 ptgt->tgt_tmp_cnt = 0;
14687 finish_tgt = 1;
14688
14689 if (do_finish_init) {
14690 finish_init = 1;
14691 }
14692 }
14693 }
14694 mutex_exit(&ptgt->tgt_mutex);
14695 } else {
14696 FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
14697 FCP_BUF_LEVEL_2, 0,
14698 "Call Finish Init for NO target");
14699
14700 if (do_finish_init) {
14701 finish_init = 1;
14702 }
14703 }
14704
14705 if (finish_tgt) {
14706 ASSERT(ptgt != NULL);
14707
14708 mutex_enter(&ptgt->tgt_mutex);
14709 #ifdef DEBUG
14710 bzero(ptgt->tgt_tmp_cnt_stack,
14711 sizeof (ptgt->tgt_tmp_cnt_stack));
14712
14713 ptgt->tgt_tmp_cnt_depth = getpcstack(ptgt->tgt_tmp_cnt_stack,
14714 FCP_STACK_DEPTH);
14715 #endif /* DEBUG */
14716 mutex_exit(&ptgt->tgt_mutex);
14717
14718 (void) fcp_finish_tgt(pptr, ptgt, lcount, tcount, cause);
14719 }
14720
14721 if (finish_init && lcount == pptr->port_link_cnt) {
14722 ASSERT(pptr->port_tmp_cnt > 0);
14723 if (--pptr->port_tmp_cnt == 0) {
14724 fcp_finish_init(pptr);
14725 }
14726 } else if (lcount != pptr->port_link_cnt) {
14727 FCP_TRACE(fcp_logq, pptr->port_instbuf,
14728 fcp_trace, FCP_BUF_LEVEL_2, 0,
14729 "fcp_call_finish_init_held,1: state change occured"
14730 " for D_ID=0x%x", (ptgt) ? ptgt->tgt_d_id : 0);
14731 }
14732
14733 return (rval);
14734 }
14735
14736 static void
14737 fcp_reconfigure_luns(void * tgt_handle)
14738 {
14739 uint32_t dev_cnt;
14740 fc_portmap_t *devlist;
14741 struct fcp_tgt *ptgt = (struct fcp_tgt *)tgt_handle;
14742 struct fcp_port *pptr = ptgt->tgt_port;
14743
14744 /*
14745 * If the timer that fires this off got canceled too late, the
14746 * target could have been destroyed.
14747 */
14748
14749 if (ptgt->tgt_tid == NULL) {
14750 return;
14751 }
14752
14753 devlist = kmem_zalloc(sizeof (*devlist), KM_NOSLEEP);
14754 if (devlist == NULL) {
14755 fcp_log(CE_WARN, pptr->port_dip,
14756 "!fcp%d: failed to allocate for portmap",
14757 pptr->port_instance);
14758 return;
14759 }
14760
14761 dev_cnt = 1;
14762 devlist->map_pd = ptgt->tgt_pd_handle;
14763 devlist->map_hard_addr.hard_addr = ptgt->tgt_hard_addr;
14764 devlist->map_did.port_id = ptgt->tgt_d_id;
14765
14766 bcopy(&ptgt->tgt_node_wwn.raw_wwn[0], &devlist->map_nwwn, FC_WWN_SIZE);
14767 bcopy(&ptgt->tgt_port_wwn.raw_wwn[0], &devlist->map_pwwn, FC_WWN_SIZE);
14768
14769 devlist->map_state = PORT_DEVICE_LOGGED_IN;
14770 devlist->map_type = PORT_DEVICE_REPORTLUN_CHANGED;
14771 devlist->map_flags = 0;
14772
14773 fcp_statec_callback(NULL, pptr->port_fp_handle, FC_STATE_DEVICE_CHANGE,
14774 pptr->port_topology, devlist, dev_cnt, pptr->port_id);
14775
14776 /*
14777 * Clear the tgt_tid after no more references to
14778 * the fcp_tgt
14779 */
14780 mutex_enter(&ptgt->tgt_mutex);
14781 ptgt->tgt_tid = NULL;
14782 mutex_exit(&ptgt->tgt_mutex);
14783
14784 kmem_free(devlist, sizeof (*devlist));
14785 }
14786
14787
14788 static void
14789 fcp_free_targets(struct fcp_port *pptr)
14790 {
14791 int i;
14792 struct fcp_tgt *ptgt;
14793
14794 mutex_enter(&pptr->port_mutex);
14795 for (i = 0; i < FCP_NUM_HASH; i++) {
14796 ptgt = pptr->port_tgt_hash_table[i];
14797 while (ptgt != NULL) {
14798 struct fcp_tgt *next_tgt = ptgt->tgt_next;
14799
14800 fcp_free_target(ptgt);
14801 ptgt = next_tgt;
14802 }
14803 }
14804 mutex_exit(&pptr->port_mutex);
14805 }
14806
14807
14808 static void
14809 fcp_free_target(struct fcp_tgt *ptgt)
14810 {
14811 struct fcp_lun *plun;
14812 timeout_id_t tid;
14813
14814 mutex_enter(&ptgt->tgt_mutex);
14815 tid = ptgt->tgt_tid;
14816
14817 /*
14818 * Cancel any pending timeouts for this target.
14819 */
14820
14821 if (tid != NULL) {
14822 /*
14823 * Set tgt_tid to NULL first to avoid a race in the callback.
14824 * If tgt_tid is NULL, the callback will simply return.
14825 */
14826 ptgt->tgt_tid = NULL;
14827 mutex_exit(&ptgt->tgt_mutex);
14828 (void) untimeout(tid);
14829 mutex_enter(&ptgt->tgt_mutex);
14830 }
14831
14832 plun = ptgt->tgt_lun;
14833 while (plun != NULL) {
14834 struct fcp_lun *next_lun = plun->lun_next;
14835
14836 fcp_dealloc_lun(plun);
14837 plun = next_lun;
14838 }
14839
14840 mutex_exit(&ptgt->tgt_mutex);
14841 fcp_dealloc_tgt(ptgt);
14842 }
14843
14844 /*
14845 * Function: fcp_is_retryable
14846 *
14847 * Description: Indicates if the internal packet is retryable.
14848 *
14849 * Argument: *icmd FCP internal packet.
14850 *
14851 * Return Value: 0 Not retryable
14852 * 1 Retryable
14853 *
14854 * Context: User, Kernel and Interrupt context
14855 */
14856 static int
14857 fcp_is_retryable(struct fcp_ipkt *icmd)
14858 {
14859 if (icmd->ipkt_port->port_state & (FCP_STATE_SUSPENDED |
14860 FCP_STATE_DETACHING | FCP_STATE_POWER_DOWN)) {
14861 return (0);
14862 }
14863
14864 return (((fcp_watchdog_time + icmd->ipkt_fpkt->pkt_timeout) <
14865 icmd->ipkt_port->port_deadline) ? 1 : 0);
14866 }
14867
14868 /*
14869 * Function: fcp_create_on_demand
14870 *
14871 * Argument: *pptr FCP port.
14872 * *pwwn Port WWN.
14873 *
14874 * Return Value: 0 Success
14875 * EIO
14876 * ENOMEM
14877 * EBUSY
14878 * EINVAL
14879 *
14880 * Context: User and Kernel context
14881 */
14882 static int
14883 fcp_create_on_demand(struct fcp_port *pptr, uchar_t *pwwn)
14884 {
14885 int wait_ms;
14886 int tcount;
14887 int lcount;
14888 int ret;
14889 int error;
14890 int rval = EIO;
14891 int ntries;
14892 fc_portmap_t *devlist;
14893 opaque_t pd;
14894 struct fcp_lun *plun;
14895 struct fcp_tgt *ptgt;
14896 int old_manual = 0;
14897
14898 /* Allocates the fc_portmap_t structure. */
14899 devlist = kmem_zalloc(sizeof (*devlist), KM_SLEEP);
14900
14901 /*
14902 * If FC_INVALID_RSCN_COUNT is non-zero, we will have to init as shown
14903 * in the commented statement below:
14904 *
14905 * devlist->map_rscn_info.ulp_rscn_count = FC_INVALID_RSCN_COUNT;
14906 *
14907 * Below, the deadline for the discovery process is set.
14908 */
14909 mutex_enter(&pptr->port_mutex);
14910 pptr->port_deadline = fcp_watchdog_time + FCP_ICMD_DEADLINE;
14911 mutex_exit(&pptr->port_mutex);
14912
14913 /*
14914 * We try to find the remote port based on the WWN provided by the
14915 * caller. We actually ask fp/fctl if it has it.
14916 */
14917 pd = fc_ulp_get_remote_port(pptr->port_fp_handle,
14918 (la_wwn_t *)pwwn, &error, 1);
14919
14920 if (pd == NULL) {
14921 kmem_free(devlist, sizeof (*devlist));
14922 return (rval);
14923 }
14924
14925 /*
14926 * The remote port was found. We ask fp/fctl to update our
14927 * fc_portmap_t structure.
14928 */
14929 ret = fc_ulp_pwwn_to_portmap(pptr->port_fp_handle,
14930 (la_wwn_t *)pwwn, devlist);
14931 if (ret != FC_SUCCESS) {
14932 kmem_free(devlist, sizeof (*devlist));
14933 return (rval);
14934 }
14935
14936 /*
14937 * The map flag field is set to indicates that the creation is being
14938 * done at the user request (Ioclt probably luxadm or cfgadm).
14939 */
14940 devlist->map_type = PORT_DEVICE_USER_CREATE;
14941
14942 mutex_enter(&pptr->port_mutex);
14943
14944 /*
14945 * We check to see if fcp already has a target that describes the
14946 * device being created. If not it is created.
14947 */
14948 ptgt = fcp_lookup_target(pptr, pwwn);
14949 if (ptgt == NULL) {
14950 lcount = pptr->port_link_cnt;
14951 mutex_exit(&pptr->port_mutex);
14952
14953 ptgt = fcp_alloc_tgt(pptr, devlist, lcount);
14954 if (ptgt == NULL) {
14955 fcp_log(CE_WARN, pptr->port_dip,
14956 "!FC target allocation failed");
14957 return (ENOMEM);
14958 }
14959
14960 mutex_enter(&pptr->port_mutex);
14961 }
14962
14963 mutex_enter(&ptgt->tgt_mutex);
14964 ptgt->tgt_statec_cause = FCP_CAUSE_USER_CREATE;
14965 ptgt->tgt_tmp_cnt = 1;
14966 ptgt->tgt_device_created = 0;
14967 /*
14968 * If fabric and auto config is set but the target was
14969 * manually unconfigured then reset to the manual_config_only to
14970 * 0 so the device will get configured.
14971 */
14972 if (FC_TOP_EXTERNAL(pptr->port_topology) &&
14973 fcp_enable_auto_configuration &&
14974 ptgt->tgt_manual_config_only == 1) {
14975 old_manual = 1;
14976 ptgt->tgt_manual_config_only = 0;
14977 }
14978 mutex_exit(&ptgt->tgt_mutex);
14979
14980 fcp_update_targets(pptr, devlist, 1,
14981 FCP_LUN_BUSY | FCP_LUN_MARK, FCP_CAUSE_USER_CREATE);
14982
14983 lcount = pptr->port_link_cnt;
14984 tcount = ptgt->tgt_change_cnt;
14985
14986 if (fcp_handle_mapflags(pptr, ptgt, devlist, lcount,
14987 tcount, FCP_CAUSE_USER_CREATE) == TRUE) {
14988 if (FC_TOP_EXTERNAL(pptr->port_topology) &&
14989 fcp_enable_auto_configuration && old_manual) {
14990 mutex_enter(&ptgt->tgt_mutex);
14991 ptgt->tgt_manual_config_only = 1;
14992 mutex_exit(&ptgt->tgt_mutex);
14993 }
14994
14995 if (pptr->port_link_cnt != lcount ||
14996 ptgt->tgt_change_cnt != tcount) {
14997 rval = EBUSY;
14998 }
14999 mutex_exit(&pptr->port_mutex);
15000
15001 FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
15002 FCP_BUF_LEVEL_3, 0,
15003 "fcp_create_on_demand: mapflags ptgt=%x, "
15004 "lcount=%x::port_link_cnt=%x, "
15005 "tcount=%x: tgt_change_cnt=%x, rval=%x",
15006 ptgt, lcount, pptr->port_link_cnt,
15007 tcount, ptgt->tgt_change_cnt, rval);
15008 return (rval);
15009 }
15010
15011 /*
15012 * Due to lack of synchronization mechanisms, we perform
15013 * periodic monitoring of our request; Because requests
15014 * get dropped when another one supercedes (either because
15015 * of a link change or a target change), it is difficult to
15016 * provide a clean synchronization mechanism (such as a
15017 * semaphore or a conditional variable) without exhaustively
15018 * rewriting the mainline discovery code of this driver.
15019 */
15020 wait_ms = 500;
15021
15022 ntries = fcp_max_target_retries;
15023
15024 FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
15025 FCP_BUF_LEVEL_3, 0,
15026 "fcp_create_on_demand(1): ntries=%x, ptgt=%x, "
15027 "lcount=%x::port_link_cnt=%x, "
15028 "tcount=%x::tgt_change_cnt=%x, rval=%x, tgt_device_created=%x "
15029 "tgt_tmp_cnt =%x",
15030 ntries, ptgt, lcount, pptr->port_link_cnt,
15031 tcount, ptgt->tgt_change_cnt, rval, ptgt->tgt_device_created,
15032 ptgt->tgt_tmp_cnt);
15033
15034 mutex_enter(&ptgt->tgt_mutex);
15035 while (ntries-- != 0 && pptr->port_link_cnt == lcount &&
15036 ptgt->tgt_change_cnt == tcount && ptgt->tgt_device_created == 0) {
15037 mutex_exit(&ptgt->tgt_mutex);
15038 mutex_exit(&pptr->port_mutex);
15039
15040 delay(drv_usectohz(wait_ms * 1000));
15041
15042 mutex_enter(&pptr->port_mutex);
15043 mutex_enter(&ptgt->tgt_mutex);
15044 }
15045
15046
15047 if (pptr->port_link_cnt != lcount || ptgt->tgt_change_cnt != tcount) {
15048 rval = EBUSY;
15049 } else {
15050 if (ptgt->tgt_tmp_cnt == 0 && ptgt->tgt_node_state ==
15051 FCP_TGT_NODE_PRESENT) {
15052 rval = 0;
15053 }
15054 }
15055
15056 FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
15057 FCP_BUF_LEVEL_3, 0,
15058 "fcp_create_on_demand(2): ntries=%x, ptgt=%x, "
15059 "lcount=%x::port_link_cnt=%x, "
15060 "tcount=%x::tgt_change_cnt=%x, rval=%x, tgt_device_created=%x "
15061 "tgt_tmp_cnt =%x",
15062 ntries, ptgt, lcount, pptr->port_link_cnt,
15063 tcount, ptgt->tgt_change_cnt, rval, ptgt->tgt_device_created,
15064 ptgt->tgt_tmp_cnt);
15065
15066 if (rval) {
15067 if (FC_TOP_EXTERNAL(pptr->port_topology) &&
15068 fcp_enable_auto_configuration && old_manual) {
15069 ptgt->tgt_manual_config_only = 1;
15070 }
15071 mutex_exit(&ptgt->tgt_mutex);
15072 mutex_exit(&pptr->port_mutex);
15073 kmem_free(devlist, sizeof (*devlist));
15074
15075 FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
15076 FCP_BUF_LEVEL_3, 0,
15077 "fcp_create_on_demand(3): ntries=%x, ptgt=%x, "
15078 "lcount=%x::port_link_cnt=%x, "
15079 "tcount=%x::tgt_change_cnt=%x, rval=%x, "
15080 "tgt_device_created=%x, tgt D_ID=%x",
15081 ntries, ptgt, lcount, pptr->port_link_cnt,
15082 tcount, ptgt->tgt_change_cnt, rval,
15083 ptgt->tgt_device_created, ptgt->tgt_d_id);
15084 return (rval);
15085 }
15086
15087 if ((plun = ptgt->tgt_lun) != NULL) {
15088 tcount = plun->lun_tgt->tgt_change_cnt;
15089 } else {
15090 rval = EINVAL;
15091 }
15092 lcount = pptr->port_link_cnt;
15093
15094 /*
15095 * Configuring the target with no LUNs will fail. We
15096 * should reset the node state so that it is not
15097 * automatically configured when the LUNs are added
15098 * to this target.
15099 */
15100 if (ptgt->tgt_lun_cnt == 0) {
15101 ptgt->tgt_node_state = FCP_TGT_NODE_NONE;
15102 }
15103 mutex_exit(&ptgt->tgt_mutex);
15104 mutex_exit(&pptr->port_mutex);
15105
15106 while (plun) {
15107 child_info_t *cip;
15108
15109 mutex_enter(&plun->lun_mutex);
15110 cip = plun->lun_cip;
15111 mutex_exit(&plun->lun_mutex);
15112
15113 mutex_enter(&ptgt->tgt_mutex);
15114 if (!(plun->lun_state & FCP_LUN_OFFLINE)) {
15115 mutex_exit(&ptgt->tgt_mutex);
15116
15117 rval = fcp_pass_to_hp_and_wait(pptr, plun, cip,
15118 FCP_ONLINE, lcount, tcount,
15119 NDI_ONLINE_ATTACH);
15120 if (rval != NDI_SUCCESS) {
15121 FCP_TRACE(fcp_logq,
15122 pptr->port_instbuf, fcp_trace,
15123 FCP_BUF_LEVEL_3, 0,
15124 "fcp_create_on_demand: "
15125 "pass_to_hp_and_wait failed "
15126 "rval=%x", rval);
15127 rval = EIO;
15128 } else {
15129 mutex_enter(&LUN_TGT->tgt_mutex);
15130 plun->lun_state &= ~(FCP_LUN_OFFLINE |
15131 FCP_LUN_BUSY);
15132 mutex_exit(&LUN_TGT->tgt_mutex);
15133 }
15134 mutex_enter(&ptgt->tgt_mutex);
15135 }
15136
15137 plun = plun->lun_next;
15138 mutex_exit(&ptgt->tgt_mutex);
15139 }
15140
15141 kmem_free(devlist, sizeof (*devlist));
15142
15143 if (FC_TOP_EXTERNAL(pptr->port_topology) &&
15144 fcp_enable_auto_configuration && old_manual) {
15145 mutex_enter(&ptgt->tgt_mutex);
15146 /* if successful then set manual to 0 */
15147 if (rval == 0) {
15148 ptgt->tgt_manual_config_only = 0;
15149 } else {
15150 /* reset to 1 so the user has to do the config */
15151 ptgt->tgt_manual_config_only = 1;
15152 }
15153 mutex_exit(&ptgt->tgt_mutex);
15154 }
15155
15156 return (rval);
15157 }
15158
15159
15160 static void
15161 fcp_ascii_to_wwn(caddr_t string, uchar_t bytes[], unsigned int byte_len)
15162 {
15163 int count;
15164 uchar_t byte;
15165
15166 count = 0;
15167 while (*string) {
15168 byte = FCP_ATOB(*string); string++;
15169 byte = byte << 4 | FCP_ATOB(*string); string++;
15170 bytes[count++] = byte;
15171
15172 if (count >= byte_len) {
15173 break;
15174 }
15175 }
15176 }
15177
15178 static void
15179 fcp_wwn_to_ascii(uchar_t wwn[], char *string)
15180 {
15181 int i;
15182
15183 for (i = 0; i < FC_WWN_SIZE; i++) {
15184 (void) sprintf(string + (i * 2),
15185 "%02x", wwn[i]);
15186 }
15187
15188 }
15189
15190 static void
15191 fcp_print_error(fc_packet_t *fpkt)
15192 {
15193 struct fcp_ipkt *icmd = (struct fcp_ipkt *)
15194 fpkt->pkt_ulp_private;
15195 struct fcp_port *pptr;
15196 struct fcp_tgt *ptgt;
15197 struct fcp_lun *plun;
15198 caddr_t buf;
15199 int scsi_cmd = 0;
15200
15201 ptgt = icmd->ipkt_tgt;
15202 plun = icmd->ipkt_lun;
15203 pptr = ptgt->tgt_port;
15204
15205 buf = kmem_zalloc(256, KM_NOSLEEP);
15206 if (buf == NULL) {
15207 return;
15208 }
15209
15210 switch (icmd->ipkt_opcode) {
15211 case SCMD_REPORT_LUN:
15212 (void) sprintf(buf, "!REPORT LUN to D_ID=0x%%x"
15213 " lun=0x%%x failed");
15214 scsi_cmd++;
15215 break;
15216
15217 case SCMD_INQUIRY_PAGE83:
15218 (void) sprintf(buf, "!INQUIRY-83 to D_ID=0x%%x"
15219 " lun=0x%%x failed");
15220 scsi_cmd++;
15221 break;
15222
15223 case SCMD_INQUIRY:
15224 (void) sprintf(buf, "!INQUIRY to D_ID=0x%%x"
15225 " lun=0x%%x failed");
15226 scsi_cmd++;
15227 break;
15228
15229 case LA_ELS_PLOGI:
15230 (void) sprintf(buf, "!PLOGI to D_ID=0x%%x failed");
15231 break;
15232
15233 case LA_ELS_PRLI:
15234 (void) sprintf(buf, "!PRLI to D_ID=0x%%x failed");
15235 break;
15236 }
15237
15238 if (scsi_cmd && fpkt->pkt_state == FC_PKT_SUCCESS) {
15239 struct fcp_rsp response, *rsp;
15240 uchar_t asc, ascq;
15241 caddr_t sense_key = NULL;
15242 struct fcp_rsp_info fcp_rsp_err, *bep;
15243
15244 if (icmd->ipkt_nodma) {
15245 rsp = (struct fcp_rsp *)fpkt->pkt_resp;
15246 bep = (struct fcp_rsp_info *)((caddr_t)rsp +
15247 sizeof (struct fcp_rsp));
15248 } else {
15249 rsp = &response;
15250 bep = &fcp_rsp_err;
15251
15252 FCP_CP_IN(fpkt->pkt_resp, rsp, fpkt->pkt_resp_acc,
15253 sizeof (struct fcp_rsp));
15254
15255 FCP_CP_IN(fpkt->pkt_resp + sizeof (struct fcp_rsp),
15256 bep, fpkt->pkt_resp_acc,
15257 sizeof (struct fcp_rsp_info));
15258 }
15259
15260
15261 if (fcp_validate_fcp_response(rsp, pptr) != FC_SUCCESS) {
15262 (void) sprintf(buf + strlen(buf),
15263 " : Bad FCP response values rsvd1=%%x, rsvd2=%%x,"
15264 " sts-rsvd1=%%x, sts-rsvd2=%%x, rsplen=%%x,"
15265 " senselen=%%x. Giving up");
15266
15267 fcp_log(CE_WARN, pptr->port_dip, buf,
15268 ptgt->tgt_d_id, plun->lun_num, rsp->reserved_0,
15269 rsp->reserved_1, rsp->fcp_u.fcp_status.reserved_0,
15270 rsp->fcp_u.fcp_status.reserved_1,
15271 rsp->fcp_response_len, rsp->fcp_sense_len);
15272
15273 kmem_free(buf, 256);
15274 return;
15275 }
15276
15277 if (rsp->fcp_u.fcp_status.rsp_len_set &&
15278 bep->rsp_code != FCP_NO_FAILURE) {
15279 (void) sprintf(buf + strlen(buf),
15280 " FCP Response code = 0x%x", bep->rsp_code);
15281 }
15282
15283 if (rsp->fcp_u.fcp_status.scsi_status & STATUS_CHECK) {
15284 struct scsi_extended_sense sense_info, *sense_ptr;
15285
15286 if (icmd->ipkt_nodma) {
15287 sense_ptr = (struct scsi_extended_sense *)
15288 ((caddr_t)fpkt->pkt_resp +
15289 sizeof (struct fcp_rsp) +
15290 rsp->fcp_response_len);
15291 } else {
15292 sense_ptr = &sense_info;
15293
15294 FCP_CP_IN(fpkt->pkt_resp +
15295 sizeof (struct fcp_rsp) +
15296 rsp->fcp_response_len, &sense_info,
15297 fpkt->pkt_resp_acc,
15298 sizeof (struct scsi_extended_sense));
15299 }
15300
15301 if (sense_ptr->es_key < NUM_SENSE_KEYS +
15302 NUM_IMPL_SENSE_KEYS) {
15303 sense_key = sense_keys[sense_ptr->es_key];
15304 } else {
15305 sense_key = "Undefined";
15306 }
15307
15308 asc = sense_ptr->es_add_code;
15309 ascq = sense_ptr->es_qual_code;
15310
15311 (void) sprintf(buf + strlen(buf),
15312 ": sense key=%%s, ASC=%%x," " ASCQ=%%x."
15313 " Giving up");
15314
15315 fcp_log(CE_WARN, pptr->port_dip, buf,
15316 ptgt->tgt_d_id, plun->lun_num, sense_key,
15317 asc, ascq);
15318 } else {
15319 (void) sprintf(buf + strlen(buf),
15320 " : SCSI status=%%x. Giving up");
15321
15322 fcp_log(CE_WARN, pptr->port_dip, buf,
15323 ptgt->tgt_d_id, plun->lun_num,
15324 rsp->fcp_u.fcp_status.scsi_status);
15325 }
15326 } else {
15327 caddr_t state, reason, action, expln;
15328
15329 (void) fc_ulp_pkt_error(fpkt, &state, &reason,
15330 &action, &expln);
15331
15332 (void) sprintf(buf + strlen(buf), ": State:%%s,"
15333 " Reason:%%s. Giving up");
15334
15335 if (scsi_cmd) {
15336 fcp_log(CE_WARN, pptr->port_dip, buf,
15337 ptgt->tgt_d_id, plun->lun_num, state, reason);
15338 } else {
15339 fcp_log(CE_WARN, pptr->port_dip, buf,
15340 ptgt->tgt_d_id, state, reason);
15341 }
15342 }
15343
15344 kmem_free(buf, 256);
15345 }
15346
15347
15348 static int
15349 fcp_handle_ipkt_errors(struct fcp_port *pptr, struct fcp_tgt *ptgt,
15350 struct fcp_ipkt *icmd, int rval, caddr_t op)
15351 {
15352 int ret = DDI_FAILURE;
15353 char *error;
15354
15355 switch (rval) {
15356 case FC_DEVICE_BUSY_NEW_RSCN:
15357 /*
15358 * This means that there was a new RSCN that the transport
15359 * knows about (which the ULP *may* know about too) but the
15360 * pkt that was sent down was related to an older RSCN. So, we
15361 * are just going to reset the retry count and deadline and
15362 * continue to retry. The idea is that transport is currently
15363 * working on the new RSCN and will soon let the ULPs know
15364 * about it and when it does the existing logic will kick in
15365 * where it will change the tcount to indicate that something
15366 * changed on the target. So, rediscovery will start and there
15367 * will not be an infinite retry.
15368 *
15369 * For a full flow of how the RSCN info is transferred back and
15370 * forth, see fp.c
15371 */
15372 icmd->ipkt_retries = 0;
15373 icmd->ipkt_port->port_deadline = fcp_watchdog_time +
15374 FCP_ICMD_DEADLINE;
15375
15376 FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
15377 FCP_BUF_LEVEL_3, 0,
15378 "fcp_handle_ipkt_errors: rval=%x for D_ID=%x",
15379 rval, ptgt->tgt_d_id);
15380 /* FALLTHROUGH */
15381
15382 case FC_STATEC_BUSY:
15383 case FC_DEVICE_BUSY:
15384 case FC_PBUSY:
15385 case FC_FBUSY:
15386 case FC_TRAN_BUSY:
15387 case FC_OFFLINE:
15388 FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
15389 FCP_BUF_LEVEL_3, 0,
15390 "fcp_handle_ipkt_errors: rval=%x for D_ID=%x",
15391 rval, ptgt->tgt_d_id);
15392 if (icmd->ipkt_retries < FCP_MAX_RETRIES &&
15393 fcp_is_retryable(icmd)) {
15394 fcp_queue_ipkt(pptr, icmd->ipkt_fpkt);
15395 ret = DDI_SUCCESS;
15396 }
15397 break;
15398
15399 case FC_LOGINREQ:
15400 /*
15401 * FC_LOGINREQ used to be handled just like all the cases
15402 * above. It has been changed to handled a PRLI that fails
15403 * with FC_LOGINREQ different than other ipkts that fail
15404 * with FC_LOGINREQ. If a PRLI fails with FC_LOGINREQ it is
15405 * a simple matter to turn it into a PLOGI instead, so that's
15406 * exactly what we do here.
15407 */
15408 if (icmd->ipkt_opcode == LA_ELS_PRLI) {
15409 ret = fcp_send_els(icmd->ipkt_port, icmd->ipkt_tgt,
15410 icmd, LA_ELS_PLOGI, icmd->ipkt_link_cnt,
15411 icmd->ipkt_change_cnt, icmd->ipkt_cause);
15412 } else {
15413 FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
15414 FCP_BUF_LEVEL_3, 0,
15415 "fcp_handle_ipkt_errors: rval=%x for D_ID=%x",
15416 rval, ptgt->tgt_d_id);
15417 if (icmd->ipkt_retries < FCP_MAX_RETRIES &&
15418 fcp_is_retryable(icmd)) {
15419 fcp_queue_ipkt(pptr, icmd->ipkt_fpkt);
15420 ret = DDI_SUCCESS;
15421 }
15422 }
15423 break;
15424
15425 default:
15426 mutex_enter(&pptr->port_mutex);
15427 mutex_enter(&ptgt->tgt_mutex);
15428 if (!FCP_STATE_CHANGED(pptr, ptgt, icmd)) {
15429 mutex_exit(&ptgt->tgt_mutex);
15430 mutex_exit(&pptr->port_mutex);
15431
15432 (void) fc_ulp_error(rval, &error);
15433 fcp_log(CE_WARN, pptr->port_dip,
15434 "!Failed to send %s to D_ID=%x error=%s",
15435 op, ptgt->tgt_d_id, error);
15436 } else {
15437 FCP_TRACE(fcp_logq, pptr->port_instbuf,
15438 fcp_trace, FCP_BUF_LEVEL_2, 0,
15439 "fcp_handle_ipkt_errors,1: state change occured"
15440 " for D_ID=0x%x", ptgt->tgt_d_id);
15441 mutex_exit(&ptgt->tgt_mutex);
15442 mutex_exit(&pptr->port_mutex);
15443 }
15444 break;
15445 }
15446
15447 return (ret);
15448 }
15449
15450
15451 /*
15452 * Check of outstanding commands on any LUN for this target
15453 */
15454 static int
15455 fcp_outstanding_lun_cmds(struct fcp_tgt *ptgt)
15456 {
15457 struct fcp_lun *plun;
15458 struct fcp_pkt *cmd;
15459
15460 for (plun = ptgt->tgt_lun; plun != NULL; plun = plun->lun_next) {
15461 mutex_enter(&plun->lun_mutex);
15462 for (cmd = plun->lun_pkt_head; cmd != NULL;
15463 cmd = cmd->cmd_forw) {
15464 if (cmd->cmd_state == FCP_PKT_ISSUED) {
15465 mutex_exit(&plun->lun_mutex);
15466 return (FC_SUCCESS);
15467 }
15468 }
15469 mutex_exit(&plun->lun_mutex);
15470 }
15471
15472 return (FC_FAILURE);
15473 }
15474
15475 static fc_portmap_t *
15476 fcp_construct_map(struct fcp_port *pptr, uint32_t *dev_cnt)
15477 {
15478 int i;
15479 fc_portmap_t *devlist;
15480 fc_portmap_t *devptr = NULL;
15481 struct fcp_tgt *ptgt;
15482
15483 mutex_enter(&pptr->port_mutex);
15484 for (i = 0, *dev_cnt = 0; i < FCP_NUM_HASH; i++) {
15485 for (ptgt = pptr->port_tgt_hash_table[i]; ptgt != NULL;
15486 ptgt = ptgt->tgt_next) {
15487 if (!(ptgt->tgt_state & FCP_TGT_ORPHAN)) {
15488 ++*dev_cnt;
15489 }
15490 }
15491 }
15492
15493 devptr = devlist = kmem_zalloc(sizeof (*devlist) * *dev_cnt,
15494 KM_NOSLEEP);
15495 if (devlist == NULL) {
15496 mutex_exit(&pptr->port_mutex);
15497 fcp_log(CE_WARN, pptr->port_dip,
15498 "!fcp%d: failed to allocate for portmap for construct map",
15499 pptr->port_instance);
15500 return (devptr);
15501 }
15502
15503 for (i = 0; i < FCP_NUM_HASH; i++) {
15504 for (ptgt = pptr->port_tgt_hash_table[i]; ptgt != NULL;
15505 ptgt = ptgt->tgt_next) {
15506 if (!(ptgt->tgt_state & FCP_TGT_ORPHAN)) {
15507 int ret;
15508
15509 ret = fc_ulp_pwwn_to_portmap(
15510 pptr->port_fp_handle,
15511 (la_wwn_t *)&ptgt->tgt_port_wwn.raw_wwn[0],
15512 devlist);
15513
15514 if (ret == FC_SUCCESS) {
15515 devlist++;
15516 continue;
15517 }
15518
15519 devlist->map_pd = NULL;
15520 devlist->map_did.port_id = ptgt->tgt_d_id;
15521 devlist->map_hard_addr.hard_addr =
15522 ptgt->tgt_hard_addr;
15523
15524 devlist->map_state = PORT_DEVICE_INVALID;
15525 devlist->map_type = PORT_DEVICE_OLD;
15526
15527 bcopy(&ptgt->tgt_node_wwn.raw_wwn[0],
15528 &devlist->map_nwwn, FC_WWN_SIZE);
15529
15530 bcopy(&ptgt->tgt_port_wwn.raw_wwn[0],
15531 &devlist->map_pwwn, FC_WWN_SIZE);
15532
15533 devlist++;
15534 }
15535 }
15536 }
15537
15538 mutex_exit(&pptr->port_mutex);
15539
15540 return (devptr);
15541 }
15542 /*
15543 * Inimate MPxIO that the lun is busy and cannot accept regular IO
15544 */
15545 static void
15546 fcp_update_mpxio_path_verifybusy(struct fcp_port *pptr)
15547 {
15548 int i;
15549 struct fcp_tgt *ptgt;
15550 struct fcp_lun *plun;
15551
15552 for (i = 0; i < FCP_NUM_HASH; i++) {
15553 for (ptgt = pptr->port_tgt_hash_table[i]; ptgt != NULL;
15554 ptgt = ptgt->tgt_next) {
15555 mutex_enter(&ptgt->tgt_mutex);
15556 for (plun = ptgt->tgt_lun; plun != NULL;
15557 plun = plun->lun_next) {
15558 if (plun->lun_mpxio &&
15559 plun->lun_state & FCP_LUN_BUSY) {
15560 if (!fcp_pass_to_hp(pptr, plun,
15561 plun->lun_cip,
15562 FCP_MPXIO_PATH_SET_BUSY,
15563 pptr->port_link_cnt,
15564 ptgt->tgt_change_cnt, 0, 0)) {
15565 FCP_TRACE(fcp_logq,
15566 pptr->port_instbuf,
15567 fcp_trace,
15568 FCP_BUF_LEVEL_2, 0,
15569 "path_verifybusy: "
15570 "disable lun %p failed!",
15571 plun);
15572 }
15573 }
15574 }
15575 mutex_exit(&ptgt->tgt_mutex);
15576 }
15577 }
15578 }
15579
15580 static int
15581 fcp_update_mpxio_path(struct fcp_lun *plun, child_info_t *cip, int what)
15582 {
15583 dev_info_t *cdip = NULL;
15584 dev_info_t *pdip = NULL;
15585
15586 ASSERT(plun);
15587
15588 mutex_enter(&plun->lun_mutex);
15589 if (fcp_is_child_present(plun, cip) == FC_FAILURE) {
15590 mutex_exit(&plun->lun_mutex);
15591 return (NDI_FAILURE);
15592 }
15593 mutex_exit(&plun->lun_mutex);
15594 cdip = mdi_pi_get_client(PIP(cip));
15595 pdip = mdi_pi_get_phci(PIP(cip));
15596
15597 ASSERT(cdip != NULL);
15598 ASSERT(pdip != NULL);
15599
15600 if (what == FCP_MPXIO_PATH_CLEAR_BUSY) {
15601 /* LUN ready for IO */
15602 (void) mdi_pi_enable_path(PIP(cip), DRIVER_DISABLE_TRANSIENT);
15603 } else {
15604 /* LUN busy to accept IO */
15605 (void) mdi_pi_disable_path(PIP(cip), DRIVER_DISABLE_TRANSIENT);
15606 }
15607 return (NDI_SUCCESS);
15608 }
15609
15610 /*
15611 * Caller must free the returned string of MAXPATHLEN len
15612 * If the device is offline (-1 instance number) NULL
15613 * will be returned.
15614 */
15615 static char *
15616 fcp_get_lun_path(struct fcp_lun *plun)
15617 {
15618 dev_info_t *dip = NULL;
15619 char *path = NULL;
15620 mdi_pathinfo_t *pip = NULL;
15621
15622 if (plun == NULL) {
15623 return (NULL);
15624 }
15625
15626 mutex_enter(&plun->lun_mutex);
15627 if (plun->lun_mpxio == 0) {
15628 dip = DIP(plun->lun_cip);
15629 mutex_exit(&plun->lun_mutex);
15630 } else {
15631 /*
15632 * lun_cip must be accessed with lun_mutex held. Here
15633 * plun->lun_cip either points to a valid node or it is NULL.
15634 * Make a copy so that we can release lun_mutex.
15635 */
15636 pip = PIP(plun->lun_cip);
15637
15638 /*
15639 * Increase ref count on the path so that we can release
15640 * lun_mutex and still be sure that the pathinfo node (and thus
15641 * also the client) is not deallocated. If pip is NULL, this
15642 * has no effect.
15643 */
15644 mdi_hold_path(pip);
15645
15646 mutex_exit(&plun->lun_mutex);
15647
15648 /* Get the client. If pip is NULL, we get NULL. */
15649 dip = mdi_pi_get_client(pip);
15650 }
15651
15652 if (dip == NULL)
15653 goto out;
15654 if (ddi_get_instance(dip) < 0)
15655 goto out;
15656
15657 path = kmem_alloc(MAXPATHLEN, KM_SLEEP);
15658 if (path == NULL)
15659 goto out;
15660
15661 (void) ddi_pathname(dip, path);
15662
15663 /* Clean up. */
15664 out:
15665 if (pip != NULL)
15666 mdi_rele_path(pip);
15667
15668 /*
15669 * In reality, the user wants a fully valid path (one they can open)
15670 * but this string is lacking the mount point, and the minor node.
15671 * It would be nice if we could "figure these out" somehow
15672 * and fill them in. Otherwise, the userland code has to understand
15673 * driver specific details of which minor node is the "best" or
15674 * "right" one to expose. (Ex: which slice is the whole disk, or
15675 * which tape doesn't rewind)
15676 */
15677 return (path);
15678 }
15679
15680 static int
15681 fcp_scsi_bus_config(dev_info_t *parent, uint_t flag,
15682 ddi_bus_config_op_t op, void *arg, dev_info_t **childp)
15683 {
15684 int64_t reset_delay;
15685 int rval, retry = 0;
15686 struct fcp_port *pptr = fcp_dip2port(parent);
15687
15688 reset_delay = (int64_t)(USEC_TO_TICK(FCP_INIT_WAIT_TIMEOUT)) -
15689 (ddi_get_lbolt64() - pptr->port_attach_time);
15690 if (reset_delay < 0) {
15691 reset_delay = 0;
15692 }
15693
15694 if (fcp_bus_config_debug) {
15695 flag |= NDI_DEVI_DEBUG;
15696 }
15697
15698 switch (op) {
15699 case BUS_CONFIG_ONE:
15700 /*
15701 * Retry the command since we need to ensure
15702 * the fabric devices are available for root
15703 */
15704 while (retry++ < fcp_max_bus_config_retries) {
15705 rval = (ndi_busop_bus_config(parent,
15706 flag | NDI_MDI_FALLBACK, op,
15707 arg, childp, (clock_t)reset_delay));
15708 if (rval == 0) {
15709 return (rval);
15710 }
15711 }
15712
15713 /*
15714 * drain taskq to make sure nodes are created and then
15715 * try again.
15716 */
15717 taskq_wait(DEVI(parent)->devi_taskq);
15718 return (ndi_busop_bus_config(parent, flag | NDI_MDI_FALLBACK,
15719 op, arg, childp, 0));
15720
15721 case BUS_CONFIG_DRIVER:
15722 case BUS_CONFIG_ALL: {
15723 /*
15724 * delay till all devices report in (port_tmp_cnt == 0)
15725 * or FCP_INIT_WAIT_TIMEOUT
15726 */
15727 mutex_enter(&pptr->port_mutex);
15728 while ((reset_delay > 0) && pptr->port_tmp_cnt) {
15729 (void) cv_timedwait(&pptr->port_config_cv,
15730 &pptr->port_mutex,
15731 ddi_get_lbolt() + (clock_t)reset_delay);
15732 reset_delay =
15733 (int64_t)(USEC_TO_TICK(FCP_INIT_WAIT_TIMEOUT)) -
15734 (ddi_get_lbolt64() - pptr->port_attach_time);
15735 }
15736 mutex_exit(&pptr->port_mutex);
15737 /* drain taskq to make sure nodes are created */
15738 taskq_wait(DEVI(parent)->devi_taskq);
15739 return (ndi_busop_bus_config(parent, flag, op,
15740 arg, childp, 0));
15741 }
15742
15743 default:
15744 return (NDI_FAILURE);
15745 }
15746 /*NOTREACHED*/
15747 }
15748
15749 static int
15750 fcp_scsi_bus_unconfig(dev_info_t *parent, uint_t flag,
15751 ddi_bus_config_op_t op, void *arg)
15752 {
15753 if (fcp_bus_config_debug) {
15754 flag |= NDI_DEVI_DEBUG;
15755 }
15756
15757 return (ndi_busop_bus_unconfig(parent, flag, op, arg));
15758 }
15759
15760
15761 /*
15762 * Routine to copy GUID into the lun structure.
15763 * returns 0 if copy was successful and 1 if encountered a
15764 * failure and did not copy the guid.
15765 */
15766 static int
15767 fcp_copy_guid_2_lun_block(struct fcp_lun *plun, char *guidp)
15768 {
15769
15770 int retval = 0;
15771
15772 /* add one for the null terminator */
15773 const unsigned int len = strlen(guidp) + 1;
15774
15775 if ((guidp == NULL) || (plun == NULL)) {
15776 return (1);
15777 }
15778
15779 /*
15780 * if the plun->lun_guid already has been allocated,
15781 * then check the size. if the size is exact, reuse
15782 * it....if not free it an allocate the required size.
15783 * The reallocation should NOT typically happen
15784 * unless the GUIDs reported changes between passes.
15785 * We free up and alloc again even if the
15786 * size was more than required. This is due to the
15787 * fact that the field lun_guid_size - serves
15788 * dual role of indicating the size of the wwn
15789 * size and ALSO the allocation size.
15790 */
15791 if (plun->lun_guid) {
15792 if (plun->lun_guid_size != len) {
15793 /*
15794 * free the allocated memory and
15795 * initialize the field
15796 * lun_guid_size to 0.
15797 */
15798 kmem_free(plun->lun_guid, plun->lun_guid_size);
15799 plun->lun_guid = NULL;
15800 plun->lun_guid_size = 0;
15801 }
15802 }
15803 /*
15804 * alloc only if not already done.
15805 */
15806 if (plun->lun_guid == NULL) {
15807 plun->lun_guid = kmem_zalloc(len, KM_NOSLEEP);
15808 if (plun->lun_guid == NULL) {
15809 cmn_err(CE_WARN, "fcp_copy_guid_2_lun_block:"
15810 "Unable to allocate"
15811 "Memory for GUID!!! size %d", len);
15812 retval = 1;
15813 } else {
15814 plun->lun_guid_size = len;
15815 }
15816 }
15817 if (plun->lun_guid) {
15818 /*
15819 * now copy the GUID
15820 */
15821 bcopy(guidp, plun->lun_guid, plun->lun_guid_size);
15822 }
15823 return (retval);
15824 }
15825
15826 /*
15827 * fcp_reconfig_wait
15828 *
15829 * Wait for a rediscovery/reconfiguration to complete before continuing.
15830 */
15831
15832 static void
15833 fcp_reconfig_wait(struct fcp_port *pptr)
15834 {
15835 clock_t reconfig_start, wait_timeout;
15836
15837 /*
15838 * Quick check. If pptr->port_tmp_cnt is 0, there is no
15839 * reconfiguration in progress.
15840 */
15841
15842 mutex_enter(&pptr->port_mutex);
15843 if (pptr->port_tmp_cnt == 0) {
15844 mutex_exit(&pptr->port_mutex);
15845 return;
15846 }
15847 mutex_exit(&pptr->port_mutex);
15848
15849 /*
15850 * If we cause a reconfig by raising power, delay until all devices
15851 * report in (port_tmp_cnt returns to 0)
15852 */
15853
15854 reconfig_start = ddi_get_lbolt();
15855 wait_timeout = drv_usectohz(FCP_INIT_WAIT_TIMEOUT);
15856
15857 mutex_enter(&pptr->port_mutex);
15858
15859 while (((ddi_get_lbolt() - reconfig_start) < wait_timeout) &&
15860 pptr->port_tmp_cnt) {
15861
15862 (void) cv_timedwait(&pptr->port_config_cv, &pptr->port_mutex,
15863 reconfig_start + wait_timeout);
15864 }
15865
15866 mutex_exit(&pptr->port_mutex);
15867
15868 /*
15869 * Even if fcp_tmp_count isn't 0, continue without error. The port
15870 * we want may still be ok. If not, it will error out later
15871 */
15872 }
15873
15874 /*
15875 * Read masking info from fp.conf and construct the global fcp_lun_blacklist.
15876 * We rely on the fcp_global_mutex to provide protection against changes to
15877 * the fcp_lun_blacklist.
15878 *
15879 * You can describe a list of target port WWNs and LUN numbers which will
15880 * not be configured. LUN numbers will be interpreted as decimal. White
15881 * spaces and ',' can be used in the list of LUN numbers.
15882 *
15883 * To prevent LUNs 1 and 2 from being configured for target
15884 * port 510000f010fd92a1 and target port 510000e012079df1, set:
15885 *
15886 * pwwn-lun-blacklist=
15887 * "510000f010fd92a1,1,2",
15888 * "510000e012079df1,1,2";
15889 */
15890 static void
15891 fcp_read_blacklist(dev_info_t *dip,
15892 struct fcp_black_list_entry **pplun_blacklist)
15893 {
15894 char **prop_array = NULL;
15895 char *curr_pwwn = NULL;
15896 char *curr_lun = NULL;
15897 uint32_t prop_item = 0;
15898 int idx = 0;
15899 int len = 0;
15900
15901 ASSERT(mutex_owned(&fcp_global_mutex));
15902 if (ddi_prop_lookup_string_array(DDI_DEV_T_ANY, dip,
15903 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM,
15904 LUN_BLACKLIST_PROP, &prop_array, &prop_item) != DDI_PROP_SUCCESS) {
15905 return;
15906 }
15907
15908 for (idx = 0; idx < prop_item; idx++) {
15909
15910 curr_pwwn = prop_array[idx];
15911 while (*curr_pwwn == ' ') {
15912 curr_pwwn++;
15913 }
15914 if (strlen(curr_pwwn) <= (sizeof (la_wwn_t) * 2 + 1)) {
15915 fcp_log(CE_WARN, NULL, "Invalid WWN %s in the blacklist"
15916 ", please check.", curr_pwwn);
15917 continue;
15918 }
15919 if ((*(curr_pwwn + sizeof (la_wwn_t) * 2) != ' ') &&
15920 (*(curr_pwwn + sizeof (la_wwn_t) * 2) != ',')) {
15921 fcp_log(CE_WARN, NULL, "Invalid WWN %s in the blacklist"
15922 ", please check.", curr_pwwn);
15923 continue;
15924 }
15925 for (len = 0; len < sizeof (la_wwn_t) * 2; len++) {
15926 if (isxdigit(curr_pwwn[len]) != TRUE) {
15927 fcp_log(CE_WARN, NULL, "Invalid WWN %s in the "
15928 "blacklist, please check.", curr_pwwn);
15929 break;
15930 }
15931 }
15932 if (len != sizeof (la_wwn_t) * 2) {
15933 continue;
15934 }
15935
15936 curr_lun = curr_pwwn + sizeof (la_wwn_t) * 2 + 1;
15937 *(curr_lun - 1) = '\0';
15938 fcp_mask_pwwn_lun(curr_pwwn, curr_lun, pplun_blacklist);
15939 }
15940
15941 ddi_prop_free(prop_array);
15942 }
15943
15944 /*
15945 * Get the masking info about one remote target port designated by wwn.
15946 * Lun ids could be separated by ',' or white spaces.
15947 */
15948 static void
15949 fcp_mask_pwwn_lun(char *curr_pwwn, char *curr_lun,
15950 struct fcp_black_list_entry **pplun_blacklist)
15951 {
15952 int idx = 0;
15953 uint32_t offset = 0;
15954 unsigned long lun_id = 0;
15955 char lunid_buf[16];
15956 char *pend = NULL;
15957 int illegal_digit = 0;
15958
15959 while (offset < strlen(curr_lun)) {
15960 while ((curr_lun[offset + idx] != ',') &&
15961 (curr_lun[offset + idx] != '\0') &&
15962 (curr_lun[offset + idx] != ' ')) {
15963 if (isdigit(curr_lun[offset + idx]) == 0) {
15964 illegal_digit++;
15965 }
15966 idx++;
15967 }
15968 if (illegal_digit > 0) {
15969 offset += (idx+1); /* To the start of next lun */
15970 idx = 0;
15971 illegal_digit = 0;
15972 fcp_log(CE_WARN, NULL, "Invalid LUN %s for WWN %s in "
15973 "the blacklist, please check digits.",
15974 curr_lun, curr_pwwn);
15975 continue;
15976 }
15977 if (idx >= (sizeof (lunid_buf) / sizeof (lunid_buf[0]))) {
15978 fcp_log(CE_WARN, NULL, "Invalid LUN %s for WWN %s in "
15979 "the blacklist, please check the length of LUN#.",
15980 curr_lun, curr_pwwn);
15981 break;
15982 }
15983 if (idx == 0) { /* ignore ' ' or ',' or '\0' */
15984 offset++;
15985 continue;
15986 }
15987
15988 bcopy(curr_lun + offset, lunid_buf, idx);
15989 lunid_buf[idx] = '\0';
15990 if (ddi_strtoul(lunid_buf, &pend, 10, &lun_id) == 0) {
15991 fcp_add_one_mask(curr_pwwn, lun_id, pplun_blacklist);
15992 } else {
15993 fcp_log(CE_WARN, NULL, "Invalid LUN %s for WWN %s in "
15994 "the blacklist, please check %s.",
15995 curr_lun, curr_pwwn, lunid_buf);
15996 }
15997 offset += (idx+1); /* To the start of next lun */
15998 idx = 0;
15999 }
16000 }
16001
16002 /*
16003 * Add one masking record
16004 */
16005 static void
16006 fcp_add_one_mask(char *curr_pwwn, uint32_t lun_id,
16007 struct fcp_black_list_entry **pplun_blacklist)
16008 {
16009 struct fcp_black_list_entry *tmp_entry = *pplun_blacklist;
16010 struct fcp_black_list_entry *new_entry = NULL;
16011 la_wwn_t wwn;
16012
16013 fcp_ascii_to_wwn(curr_pwwn, wwn.raw_wwn, sizeof (la_wwn_t));
16014 while (tmp_entry) {
16015 if ((bcmp(&tmp_entry->wwn, &wwn,
16016 sizeof (la_wwn_t)) == 0) && (tmp_entry->lun == lun_id)) {
16017 return;
16018 }
16019
16020 tmp_entry = tmp_entry->next;
16021 }
16022
16023 /* add to black list */
16024 new_entry = (struct fcp_black_list_entry *)kmem_zalloc
16025 (sizeof (struct fcp_black_list_entry), KM_SLEEP);
16026 bcopy(&wwn, &new_entry->wwn, sizeof (la_wwn_t));
16027 new_entry->lun = lun_id;
16028 new_entry->masked = 0;
16029 new_entry->next = *pplun_blacklist;
16030 *pplun_blacklist = new_entry;
16031 }
16032
16033 /*
16034 * Check if we should mask the specified lun of this fcp_tgt
16035 */
16036 static int
16037 fcp_should_mask(la_wwn_t *wwn, uint32_t lun_id)
16038 {
16039 struct fcp_black_list_entry *remote_port;
16040
16041 remote_port = fcp_lun_blacklist;
16042 while (remote_port != NULL) {
16043 if (bcmp(wwn, &remote_port->wwn, sizeof (la_wwn_t)) == 0) {
16044 if (remote_port->lun == lun_id) {
16045 remote_port->masked++;
16046 if (remote_port->masked == 1) {
16047 fcp_log(CE_NOTE, NULL, "LUN %d of port "
16048 "%02x%02x%02x%02x%02x%02x%02x%02x "
16049 "is masked due to black listing.\n",
16050 lun_id, wwn->raw_wwn[0],
16051 wwn->raw_wwn[1], wwn->raw_wwn[2],
16052 wwn->raw_wwn[3], wwn->raw_wwn[4],
16053 wwn->raw_wwn[5], wwn->raw_wwn[6],
16054 wwn->raw_wwn[7]);
16055 }
16056 return (TRUE);
16057 }
16058 }
16059 remote_port = remote_port->next;
16060 }
16061 return (FALSE);
16062 }
16063
16064 /*
16065 * Release all allocated resources
16066 */
16067 static void
16068 fcp_cleanup_blacklist(struct fcp_black_list_entry **pplun_blacklist)
16069 {
16070 struct fcp_black_list_entry *tmp_entry = *pplun_blacklist;
16071 struct fcp_black_list_entry *current_entry = NULL;
16072
16073 ASSERT(mutex_owned(&fcp_global_mutex));
16074 /*
16075 * Traverse all luns
16076 */
16077 while (tmp_entry) {
16078 current_entry = tmp_entry;
16079 tmp_entry = tmp_entry->next;
16080 kmem_free(current_entry, sizeof (struct fcp_black_list_entry));
16081 }
16082 *pplun_blacklist = NULL;
16083 }
16084
16085 /*
16086 * In fcp module,
16087 * pkt@scsi_pkt, cmd@fcp_pkt, icmd@fcp_ipkt, fpkt@fc_packet, pptr@fcp_port
16088 */
16089 static struct scsi_pkt *
16090 fcp_pseudo_init_pkt(struct scsi_address *ap, struct scsi_pkt *pkt,
16091 struct buf *bp, int cmdlen, int statuslen, int tgtlen,
16092 int flags, int (*callback)(), caddr_t arg)
16093 {
16094 fcp_port_t *pptr = ADDR2FCP(ap);
16095 fcp_pkt_t *cmd = NULL;
16096 fc_frame_hdr_t *hp;
16097
16098 /*
16099 * First step: get the packet
16100 */
16101 if (pkt == NULL) {
16102 pkt = scsi_hba_pkt_alloc(pptr->port_dip, ap, cmdlen, statuslen,
16103 tgtlen, sizeof (fcp_pkt_t) + pptr->port_priv_pkt_len,
16104 callback, arg);
16105 if (pkt == NULL) {
16106 return (NULL);
16107 }
16108
16109 /*
16110 * All fields in scsi_pkt will be initialized properly or
16111 * set to zero. We need do nothing for scsi_pkt.
16112 */
16113 /*
16114 * But it's our responsibility to link other related data
16115 * structures. Their initialization will be done, just
16116 * before the scsi_pkt will be sent to FCA.
16117 */
16118 cmd = PKT2CMD(pkt);
16119 cmd->cmd_pkt = pkt;
16120 cmd->cmd_fp_pkt = &cmd->cmd_fc_packet;
16121 /*
16122 * fc_packet_t
16123 */
16124 cmd->cmd_fp_pkt->pkt_ulp_private = (opaque_t)cmd;
16125 cmd->cmd_fp_pkt->pkt_fca_private = (opaque_t)((caddr_t)cmd +
16126 sizeof (struct fcp_pkt));
16127 cmd->cmd_fp_pkt->pkt_cmd = (caddr_t)&cmd->cmd_fcp_cmd;
16128 cmd->cmd_fp_pkt->pkt_cmdlen = sizeof (struct fcp_cmd);
16129 cmd->cmd_fp_pkt->pkt_resp = cmd->cmd_fcp_rsp;
16130 cmd->cmd_fp_pkt->pkt_rsplen = FCP_MAX_RSP_IU_SIZE;
16131 /*
16132 * Fill in the Fabric Channel Header
16133 */
16134 hp = &cmd->cmd_fp_pkt->pkt_cmd_fhdr;
16135 hp->r_ctl = R_CTL_COMMAND;
16136 hp->rsvd = 0;
16137 hp->type = FC_TYPE_SCSI_FCP;
16138 hp->f_ctl = F_CTL_SEQ_INITIATIVE | F_CTL_FIRST_SEQ;
16139 hp->seq_id = 0;
16140 hp->df_ctl = 0;
16141 hp->seq_cnt = 0;
16142 hp->ox_id = 0xffff;
16143 hp->rx_id = 0xffff;
16144 hp->ro = 0;
16145 } else {
16146 /*
16147 * We need think if we should reset any elements in
16148 * related data structures.
16149 */
16150 FCP_TRACE(fcp_logq, pptr->port_instbuf,
16151 fcp_trace, FCP_BUF_LEVEL_6, 0,
16152 "reusing pkt, flags %d", flags);
16153 cmd = PKT2CMD(pkt);
16154 if (cmd->cmd_fp_pkt->pkt_pd) {
16155 cmd->cmd_fp_pkt->pkt_pd = NULL;
16156 }
16157 }
16158
16159 /*
16160 * Second step: dma allocation/move
16161 */
16162 if (bp && bp->b_bcount != 0) {
16163 /*
16164 * Mark if it's read or write
16165 */
16166 if (bp->b_flags & B_READ) {
16167 cmd->cmd_flags |= CFLAG_IS_READ;
16168 } else {
16169 cmd->cmd_flags &= ~CFLAG_IS_READ;
16170 }
16171
16172 bp_mapin(bp);
16173 cmd->cmd_fp_pkt->pkt_data = bp->b_un.b_addr;
16174 cmd->cmd_fp_pkt->pkt_datalen = bp->b_bcount;
16175 cmd->cmd_fp_pkt->pkt_data_resid = 0;
16176 } else {
16177 /*
16178 * It seldom happens, except when CLUSTER or SCSI_VHCI wants
16179 * to send zero-length read/write.
16180 */
16181 cmd->cmd_fp_pkt->pkt_data = NULL;
16182 cmd->cmd_fp_pkt->pkt_datalen = 0;
16183 }
16184
16185 return (pkt);
16186 }
16187
16188 static void
16189 fcp_pseudo_destroy_pkt(struct scsi_address *ap, struct scsi_pkt *pkt)
16190 {
16191 fcp_port_t *pptr = ADDR2FCP(ap);
16192
16193 /*
16194 * First we let FCA to uninitilize private part.
16195 */
16196 (void) fc_ulp_uninit_packet(pptr->port_fp_handle,
16197 PKT2CMD(pkt)->cmd_fp_pkt);
16198
16199 /*
16200 * Then we uninitialize fc_packet.
16201 */
16202
16203 /*
16204 * Thirdly, we uninitializae fcp_pkt.
16205 */
16206
16207 /*
16208 * In the end, we free scsi_pkt.
16209 */
16210 scsi_hba_pkt_free(ap, pkt);
16211 }
16212
16213 static int
16214 fcp_pseudo_start(struct scsi_address *ap, struct scsi_pkt *pkt)
16215 {
16216 fcp_port_t *pptr = ADDR2FCP(ap);
16217 fcp_lun_t *plun = ADDR2LUN(ap);
16218 fcp_tgt_t *ptgt = plun->lun_tgt;
16219 fcp_pkt_t *cmd = PKT2CMD(pkt);
16220 fcp_cmd_t *fcmd = &cmd->cmd_fcp_cmd;
16221 fc_packet_t *fpkt = cmd->cmd_fp_pkt;
16222 int rval;
16223
16224 fpkt->pkt_pd = ptgt->tgt_pd_handle;
16225 (void) fc_ulp_init_packet(pptr->port_fp_handle, cmd->cmd_fp_pkt, 1);
16226
16227 /*
16228 * Firstly, we need initialize fcp_pkt_t
16229 * Secondly, we need initialize fcp_cmd_t.
16230 */
16231 bcopy(pkt->pkt_cdbp, fcmd->fcp_cdb, pkt->pkt_cdblen);
16232 fcmd->fcp_data_len = fpkt->pkt_datalen;
16233 fcmd->fcp_ent_addr = plun->lun_addr;
16234 if (pkt->pkt_flags & FLAG_HTAG) {
16235 fcmd->fcp_cntl.cntl_qtype = FCP_QTYPE_HEAD_OF_Q;
16236 } else if (pkt->pkt_flags & FLAG_OTAG) {
16237 fcmd->fcp_cntl.cntl_qtype = FCP_QTYPE_ORDERED;
16238 } else if (pkt->pkt_flags & FLAG_STAG) {
16239 fcmd->fcp_cntl.cntl_qtype = FCP_QTYPE_SIMPLE;
16240 } else {
16241 fcmd->fcp_cntl.cntl_qtype = FCP_QTYPE_UNTAGGED;
16242 }
16243
16244 if (cmd->cmd_flags & CFLAG_IS_READ) {
16245 fcmd->fcp_cntl.cntl_read_data = 1;
16246 fcmd->fcp_cntl.cntl_write_data = 0;
16247 } else {
16248 fcmd->fcp_cntl.cntl_read_data = 0;
16249 fcmd->fcp_cntl.cntl_write_data = 1;
16250 }
16251
16252 /*
16253 * Then we need initialize fc_packet_t too.
16254 */
16255 fpkt->pkt_timeout = pkt->pkt_time + 2;
16256 fpkt->pkt_cmd_fhdr.d_id = ptgt->tgt_d_id;
16257 fpkt->pkt_cmd_fhdr.s_id = pptr->port_id;
16258 if (cmd->cmd_flags & CFLAG_IS_READ) {
16259 fpkt->pkt_tran_type = FC_PKT_FCP_READ;
16260 } else {
16261 fpkt->pkt_tran_type = FC_PKT_FCP_WRITE;
16262 }
16263
16264 if (pkt->pkt_flags & FLAG_NOINTR) {
16265 fpkt->pkt_comp = NULL;
16266 fpkt->pkt_tran_flags = (FC_TRAN_CLASS3 | FC_TRAN_NO_INTR);
16267 } else {
16268 fpkt->pkt_comp = fcp_cmd_callback;
16269 fpkt->pkt_tran_flags = (FC_TRAN_CLASS3 | FC_TRAN_INTR);
16270 if (pkt->pkt_flags & FLAG_IMMEDIATE_CB) {
16271 fpkt->pkt_tran_flags |= FC_TRAN_IMMEDIATE_CB;
16272 }
16273 }
16274
16275 /*
16276 * Lastly, we need initialize scsi_pkt
16277 */
16278 pkt->pkt_reason = CMD_CMPLT;
16279 pkt->pkt_state = 0;
16280 pkt->pkt_statistics = 0;
16281 pkt->pkt_resid = 0;
16282
16283 /*
16284 * if interrupts aren't allowed (e.g. at dump time) then we'll
16285 * have to do polled I/O
16286 */
16287 if (pkt->pkt_flags & FLAG_NOINTR) {
16288 return (fcp_dopoll(pptr, cmd));
16289 }
16290
16291 cmd->cmd_state = FCP_PKT_ISSUED;
16292 rval = fcp_transport(pptr->port_fp_handle, fpkt, 0);
16293 if (rval == FC_SUCCESS) {
16294 return (TRAN_ACCEPT);
16295 }
16296
16297 /*
16298 * Need more consideration
16299 *
16300 * pkt->pkt_flags & FLAG_NOQUEUE could abort other pkt
16301 */
16302 cmd->cmd_state = FCP_PKT_IDLE;
16303 if (rval == FC_TRAN_BUSY) {
16304 return (TRAN_BUSY);
16305 } else {
16306 return (TRAN_FATAL_ERROR);
16307 }
16308 }
16309
16310 /*
16311 * scsi_poll will always call tran_sync_pkt for pseudo FC-HBAs
16312 * SCSA will initialize it to scsi_sync_cache_pkt for physical FC-HBAs
16313 */
16314 static void
16315 fcp_pseudo_sync_pkt(struct scsi_address *ap, struct scsi_pkt *pkt)
16316 {
16317 FCP_TRACE(fcp_logq, "fcp_pseudo_sync_pkt", fcp_trace,
16318 FCP_BUF_LEVEL_2, 0, "ap-%p, scsi_pkt-%p", ap, pkt);
16319 }
16320
16321 /*
16322 * scsi_dmafree will always call tran_dmafree, when STATE_ARQ_DONE
16323 */
16324 static void
16325 fcp_pseudo_dmafree(struct scsi_address *ap, struct scsi_pkt *pkt)
16326 {
16327 FCP_TRACE(fcp_logq, "fcp_pseudo_dmafree", fcp_trace,
16328 FCP_BUF_LEVEL_2, 0, "ap-%p, scsi_pkt-%p", ap, pkt);
16329 }